diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 00000000000..75a99abd784
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,22 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+// README at: https://github.com/devcontainers/templates/tree/main/src/go
+{
+ "name": "Go",
+ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
+ "image": "mcr.microsoft.com/devcontainers/go:1.23-bookworm",
+ // Features to add to the dev container. More info: https://containers.dev/features.
+ "features": {
+ "ghcr.io/devcontainers/features/docker-in-docker:2": {},
+ "ghcr.io/devcontainers/features/github-cli:1": {},
+ "ghcr.io/devcontainers/features/kubectl-helm-minikube:1": {}
+ },
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
+ "forwardPorts": [
+ 2379,
+ 2380
+ ],
+ // Use 'postCreateCommand' to run commands after the container is created.
+ "postCreateCommand": "make build"
+ // Configure tool-specific properties.
+ // "customizations": {},
+}
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 3a3c247b5bd..00000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,2 +0,0 @@
-
-Please read https://etcd.io/docs/latest/reporting_bugs/
diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml
new file mode 100644
index 00000000000..5ba33bc16bc
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.yml
@@ -0,0 +1,102 @@
+---
+name: Bug Report
+description: Report a bug encountered while operating etcd
+labels:
+ - type/bug
+body:
+ - type: checkboxes
+ id: confirmations
+ attributes:
+ label: Bug report criteria
+ description: Please confirm this bug report meets the following criteria.
+ options:
+ - label: This bug report is not security related, security issues should be disclosed privately via [etcd maintainers](mailto:etcd-maintainers@googlegroups.com).
+ - label: This is not a support request or question, support requests or questions should be raised in the etcd [discussion forums](https://github.com/etcd-io/etcd/discussions).
+ - label: You have read the etcd [bug reporting guidelines](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/reporting_bugs.md).
+ - label: Existing open issues along with etcd [frequently asked questions](https://etcd.io/docs/latest/faq) have been checked and this is not a duplicate.
+
+ - type: markdown
+ attributes:
+ value: |
+ Please fill the form below and provide as much information as possible.
+ Not doing so may result in your bug not being addressed in a timely manner.
+
+ - type: textarea
+ id: problem
+ attributes:
+ label: What happened?
+ validations:
+ required: true
+
+ - type: textarea
+ id: expected
+ attributes:
+ label: What did you expect to happen?
+ validations:
+ required: true
+
+ - type: textarea
+ id: repro
+ attributes:
+ label: How can we reproduce it (as minimally and precisely as possible)?
+ validations:
+ required: true
+
+ - type: textarea
+ id: additional
+ attributes:
+ label: Anything else we need to know?
+
+ - type: textarea
+ id: etcdVersion
+ attributes:
+ label: Etcd version (please run commands below)
+ value: |
+
+
+ ```console
+ $ etcd --version
+ # paste output here
+
+ $ etcdctl version
+ # paste output here
+ ```
+
+
+ validations:
+ required: true
+
+ - type: textarea
+ id: config
+ attributes:
+ label: Etcd configuration (command line flags or environment variables)
+ value: |
+
+
+ # paste your configuration here
+
+
+
+ - type: textarea
+ id: etcdDebugInformation
+ attributes:
+ label: Etcd debug information (please run commands below, feel free to obfuscate the IP address or FQDN in the output)
+ value: |
+
+
+ ```console
+ $ etcdctl member list -w table
+ # paste output here
+
+ $ etcdctl --endpoints= endpoint status -w table
+ # paste output here
+ ```
+
+
+
+ - type: textarea
+ id: logs
+ attributes:
+ label: Relevant log output
+ description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
+ render: Shell
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000000..b48f29b9061
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,6 @@
+---
+blank_issues_enabled: false
+contact_links:
+ - name: Question
+ url: https://github.com/etcd-io/etcd/discussions
+ about: Question relating to Etcd
diff --git a/.github/ISSUE_TEMPLATE/distributors-application.md b/.github/ISSUE_TEMPLATE/distributors-application.md
deleted file mode 100644
index 2f65c6afa7f..00000000000
--- a/.github/ISSUE_TEMPLATE/distributors-application.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-name: Distributors Application
-title: Distributors Application for
-about: Apply for membership of security@etcd.io
----
-
-
-
-**Actively monitored security email alias for our project:**
-
-**1. Have a user base not limited to your own organization.**
-
-**2. Have a publicly verifiable track record up to present day of fixing security issues.**
-
-**3. Not be a downstream or rebuild of another distribution.**
-
-**4. Be a participant and active contributor in the community.**
-
-**5. Accept the Embargo Policy.**
-
-
-**6. Be willing to contribute back.**
-
-
-**7. Have someone already on the list vouch for the person requesting membership on behalf of your distribution.**
diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml
new file mode 100644
index 00000000000..d18dec24567
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature-request.yml
@@ -0,0 +1,19 @@
+---
+name: Feature request
+description: Provide idea for a new feature
+labels:
+ - type/feature
+body:
+ - type: textarea
+ id: feature
+ attributes:
+ label: What would you like to be added?
+ validations:
+ required: true
+
+ - type: textarea
+ id: rationale
+ attributes:
+ label: Why is this needed?
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/test-flake.yml b/.github/ISSUE_TEMPLATE/test-flake.yml
new file mode 100644
index 00000000000..6ecf32392a4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/test-flake.yml
@@ -0,0 +1,35 @@
+---
+name: Flaking Test
+description: Report flaky tests
+labels:
+ - type/flake
+ - area/testing
+body:
+ - type: textarea
+ id: workflows
+ attributes:
+ label: Which Github Action / Prow Jobs are flaking?
+ validations:
+ required: true
+
+ - type: textarea
+ id: tests
+ attributes:
+ label: Which tests are flaking?
+ validations:
+ required: true
+
+ - type: input
+ id: link
+ attributes:
+ label: Github Action / Prow Job link
+
+ - type: textarea
+ id: reason
+ attributes:
+ label: Reason for failure (if possible)
+
+ - type: textarea
+ id: additional
+ attributes:
+ label: Anything else we need to know?
diff --git a/.github/OWNERS b/.github/OWNERS
new file mode 100644
index 00000000000..efdfed26735
--- /dev/null
+++ b/.github/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - ivanvc # Ivan Valdes
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000000..6931f34066e
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,38 @@
+---
+version: 2
+updates:
+ - package-ecosystem: github-actions
+ directory: /
+ schedule:
+ interval: weekly
+
+ - package-ecosystem: gomod
+ directory: /
+ schedule:
+ interval: weekly
+ allow:
+ - dependency-type: all
+
+ - package-ecosystem: gomod
+ directory: /tools/mod # Not linked from /go.mod
+ schedule:
+ interval: weekly
+ allow:
+ - dependency-type: direct
+
+ - package-ecosystem: docker
+ directory: /
+ schedule:
+ interval: weekly
+
+ - package-ecosystem: docker
+ directory: /
+ target-branch: "release-3.4"
+ schedule:
+ interval: monthly
+
+ - package-ecosystem: docker
+ directory: /
+ target-branch: "release-3.5"
+ schedule:
+ interval: monthly
diff --git a/.github/stale.yml b/.github/stale.yml
index b85f1f45720..1c04a756b9a 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -1,3 +1,4 @@
+---
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
@@ -12,8 +13,7 @@ onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- - "area/security"
- - "Investigating"
+ - "stage/tracked"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
@@ -28,11 +28,7 @@ exemptAssignees: false
staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
-markComment: >
- This issue has been automatically marked as stale because it has not had
- recent activity. It will be closed after 21 days if no further activity
- occurs. Thank you for your contributions.
-
+markComment: This issue has been automatically marked as stale because it has not had recent activity. It will be closed after 21 days if no further activity occurs. Thank you for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
diff --git a/.github/workflows/OWNERS b/.github/workflows/OWNERS
new file mode 100644
index 00000000000..0dbba06bbad
--- /dev/null
+++ b/.github/workflows/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - github_actions
diff --git a/.github/workflows/asset-transparency.yaml b/.github/workflows/asset-transparency.yaml
deleted file mode 100644
index 6f4f2bf0cfd..00000000000
--- a/.github/workflows/asset-transparency.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-name: Publish Release Assets to Asset Transparency Log
-
-on:
- release:
- types: [published, created, edited, released]
-
-jobs:
- github_release_asset_transparency_log_publish_job:
- runs-on: ubuntu-latest
- name: Publish GitHub release asset digests to https://beta-asset.transparencylog.net
- steps:
- - name: Gather URLs from GitHub release and publish
- id: asset-transparency
- uses: transparencylog/github-releases-asset-transparency-verify-action@v11
- - name: List verified and published URLs
- run: echo "Verified URLs ${{ steps.asset-transparency.outputs.verified }}"
- - name: List failed URLs
- run: echo "Failed URLs ${{ steps.asset-transparency.outputs.failed }}"
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 76e8a31e370..a270f436ac4 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -1,3 +1,4 @@
+---
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
@@ -10,58 +11,45 @@
# supported CodeQL languages.
#
name: "CodeQL"
-
on:
push:
- branches: [ main, release-0.4, release-2.0, release-2.1, release-2.2, release-2.3, release-3.0, release-3.1 ]
+ branches: [main, release-3.4, release-3.5, release-3.6]
pull_request:
# The branches below must be a subset of the branches above
- branches: [ main ]
+ branches: [main]
schedule:
- cron: '20 14 * * 5'
-
+permissions: read-all
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
-
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
strategy:
fail-fast: false
matrix:
- language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
-
+ language: ['go']
steps:
- - name: Checkout repository
- uses: actions/checkout@v2
-
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v1
- with:
- languages: ${{ matrix.language }}
- # If you wish to specify custom queries, you can do so here or in a config file.
- # By default, queries listed here will override any specified in a config file.
- # Prefix the list here with "+" to use these queries and those in the config file.
- # queries: ./path/to/local/query, your-org/your-repo/queries@main
-
- # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
- # If this step fails, then you should remove it and run the build manually (see below)
- - name: Autobuild
- uses: github/codeql-action/autobuild@v1
-
- # âšī¸ Command-line programs to run using the OS shell.
- # đ https://git.io/JvXDl
-
- # âī¸ If the Autobuild fails above, remove it and uncomment the following three lines
- # and modify them (or add more) to build your code if your project
- # uses a compiled language
-
- #- run: |
- # make bootstrap
- # make release
-
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v1
+ - name: Checkout repository
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
+ with:
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+ languages: ${{ matrix.language }}
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
diff --git a/.github/workflows/contrib.yaml b/.github/workflows/contrib.yaml
new file mode 100644
index 00000000000..fc3f040c66e
--- /dev/null
+++ b/.github/workflows/contrib.yaml
@@ -0,0 +1,18 @@
+---
+name: Test contrib/mixin
+on: [push, pull_request]
+permissions: read-all
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - id: goversion
+ run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
+ - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
+ with:
+ go-version: ${{ steps.goversion.outputs.goversion }}
+ - run: |
+ set -euo pipefail
+
+ make -C contrib/mixin tools test
diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml
new file mode 100644
index 00000000000..69ec34868d1
--- /dev/null
+++ b/.github/workflows/coverage.yaml
@@ -0,0 +1,34 @@
+---
+name: Coverage
+on: [push, pull_request]
+permissions: read-all
+jobs:
+ coverage:
+ # this is to prevent the job to run at forked projects
+ if: github.repository == 'etcd-io/etcd'
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ target:
+ - linux-amd64-coverage
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - id: goversion
+ run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
+ - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
+ with:
+ go-version: ${{ steps.goversion.outputs.goversion }}
+ - env:
+ TARGET: ${{ matrix.target }}
+ run: |
+ mkdir "${TARGET}"
+ case "${TARGET}" in
+ linux-amd64-coverage)
+ GOARCH=amd64 ./scripts/codecov_upload.sh
+ ;;
+ *)
+ echo "Failed to find target"
+ exit 1
+ ;;
+ esac
diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml
deleted file mode 100644
index 4ec326b52ed..00000000000
--- a/.github/workflows/e2e.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-name: E2E
-on: [push, pull_request]
-jobs:
- test:
- runs-on: ubuntu-latest
- strategy:
- fail-fast: true
- matrix:
- target:
- - linux-amd64-e2e
- - linux-386-e2e
- steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-go@v2
- with:
- go-version: "^1.16"
- - run: date
- - env:
- TARGET: ${{ matrix.target }}
- run: |
- echo "${TARGET}"
- case "${TARGET}" in
- linux-amd64-e2e)
- PASSES='build release e2e' MANUAL_VER=v3.4.7 CPU='4' EXPECT_DEBUG='true' COVER='false' RACE='true' ./test.sh 2>&1 | tee test.log
- ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test.log
- ;;
- linux-386-e2e)
- GOARCH=386 PASSES='build e2e' CPU='4' EXPECT_DEBUG='true' COVER='false' RACE='true' ./test.sh 2>&1 | tee test.log
- ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test.log
- ;;
- *)
- echo "Failed to find target"
- exit 1
- ;;
- esac
diff --git a/.github/workflows/functional.yaml b/.github/workflows/functional.yaml
deleted file mode 100644
index 0e3a46fc688..00000000000
--- a/.github/workflows/functional.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: functional-tests
-on: [push, pull_request]
-jobs:
- test:
- runs-on: ubuntu-latest
- strategy:
- fail-fast: true
- matrix:
- target:
- - linux-amd64-functional
- steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-go@v2
- with:
- go-version: "^1.16"
- - run: date
- - env:
- TARGET: ${{ matrix.target }}
- run: |
- echo "${TARGET}"
- case "${TARGET}" in
- linux-amd64-functional)
- GO_BUILD_FLAGS='-v -mod=readonly' ./build && GOARCH=amd64 PASSES='functional' ./test
- ;;
- *)
- echo "Failed to find target"
- exit 1
- ;;
- esac
diff --git a/.github/workflows/fuzzing.yaml b/.github/workflows/fuzzing.yaml
new file mode 100644
index 00000000000..8ab8c374adb
--- /dev/null
+++ b/.github/workflows/fuzzing.yaml
@@ -0,0 +1,26 @@
+---
+name: Fuzzing v3rpc
+on: [push, pull_request]
+permissions: read-all
+jobs:
+ fuzzing:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ env:
+ TARGET_PATH: ./server/etcdserver/api/v3rpc
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - id: goversion
+ run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
+ - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
+ with:
+ go-version: ${{ steps.goversion.outputs.goversion }}
+ - run: |
+ set -euo pipefail
+
+ GOARCH=amd64 CPU=4 make fuzz
+ - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
+ if: failure()
+ with:
+ path: "${{env.TARGET_PATH}}/testdata/fuzz/**/*"
diff --git a/.github/workflows/gh-workflow-approve.yaml b/.github/workflows/gh-workflow-approve.yaml
new file mode 100644
index 00000000000..1f988637677
--- /dev/null
+++ b/.github/workflows/gh-workflow-approve.yaml
@@ -0,0 +1,45 @@
+---
+name: Approve GitHub Workflows
+permissions: read-all
+
+on:
+ pull_request_target:
+ types:
+ - labeled
+ - synchronize
+ branches:
+ - main
+ - release-3.5
+ - release-3.4
+
+jobs:
+ approve:
+ name: Approve ok-to-test
+ if: contains(github.event.pull_request.labels.*.name, 'ok-to-test')
+ runs-on: ubuntu-latest
+ permissions:
+ actions: write
+ steps:
+ - name: Update PR
+ uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
+ continue-on-error: true
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ debug: ${{ secrets.ACTIONS_RUNNER_DEBUG == 'true' }}
+ script: |
+ const result = await github.rest.actions.listWorkflowRunsForRepo({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ event: "pull_request",
+ status: "action_required",
+ head_sha: context.payload.pull_request.head.sha,
+ per_page: 100
+ });
+
+ for (var run of result.data.workflow_runs) {
+ await github.rest.actions.approveWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: run.id
+ });
+ }
diff --git a/.github/workflows/grpcproxy.yaml b/.github/workflows/grpcproxy.yaml
index 081d8be328f..e15a37bc358 100644
--- a/.github/workflows/grpcproxy.yaml
+++ b/.github/workflows/grpcproxy.yaml
@@ -1,30 +1,38 @@
+---
name: grpcProxy-tests
on: [push, pull_request]
+permissions: read-all
jobs:
test:
runs-on: ubuntu-latest
strategy:
- fail-fast: true
+ fail-fast: false
matrix:
target:
- - linux-amd64-grpcproxy
+ - linux-amd64-grpcproxy-integration
+ - linux-amd64-grpcproxy-e2e
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-go@v2
- with:
- go-version: "^1.16"
- - run: date
- - env:
- TARGET: ${{ matrix.target }}
- run: |
- echo "${TARGET}"
- case "${TARGET}" in
- linux-amd64-grpcproxy)
- PASSES='build grpcproxy' CPU='4' COVER='false' RACE='true' ./test.sh 2>&1 | tee test.log
- ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test.log
- ;;
- *)
- echo "Failed to find target"
- exit 1
- ;;
- esac
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - id: goversion
+ run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
+ - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
+ with:
+ go-version: ${{ steps.goversion.outputs.goversion }}
+ - env:
+ TARGET: ${{ matrix.target }}
+ run: |
+ set -euo pipefail
+
+ echo "${TARGET}"
+ case "${TARGET}" in
+ linux-amd64-grpcproxy-integration)
+ GOOS=linux GOARCH=amd64 CPU=4 make test-grpcproxy-integration
+ ;;
+ linux-amd64-grpcproxy-e2e)
+ GOOS=linux GOARCH=amd64 CPU=4 make test-grpcproxy-e2e
+ ;;
+ *)
+ echo "Failed to find target"
+ exit 1
+ ;;
+ esac
diff --git a/.github/workflows/measure-testgrid-flakiness.yaml b/.github/workflows/measure-testgrid-flakiness.yaml
new file mode 100644
index 00000000000..97ecf0c95bb
--- /dev/null
+++ b/.github/workflows/measure-testgrid-flakiness.yaml
@@ -0,0 +1,26 @@
+---
+name: Measure TestGrid Flakiness
+
+on:
+ schedule:
+ - cron: "0 0 * * *" # run every day at midnight
+
+permissions: read-all
+
+jobs:
+ measure-testgrid-flakiness:
+ name: Measure TestGrid Flakiness
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - id: goversion
+ run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
+ - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
+ with:
+ go-version: ${{ steps.goversion.outputs.goversion }}
+ - env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ set -euo pipefail
+
+ ./scripts/measure-testgrid-flakiness.sh
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
new file mode 100644
index 00000000000..3f370fb57c0
--- /dev/null
+++ b/.github/workflows/release.yaml
@@ -0,0 +1,77 @@
+---
+name: Release
+on: [push, pull_request]
+permissions: read-all
+jobs:
+ main:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - id: goversion
+ run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
+ - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
+ with:
+ go-version: ${{ steps.goversion.outputs.goversion }}
+ - name: release
+ run: |
+ set -euo pipefail
+
+ git config --global user.email "github-action@etcd.io"
+ git config --global user.name "Github Action"
+ gpg --batch --gen-key <
- case "${TARGET}" in
- linux-amd64-coverage)
- sudo HOST_TMP_DIR=/tmp TEST_OPTS="VERBOSE='1'" make docker-test-coverage
- ;;
- linux-amd64-fmt-unit-go-tip-2-cpu)
- GOARCH=amd64 PASSES='fmt unit' CPU='2' RACE='false' ./test.sh -p=2
- ;;
- esac
diff --git a/.words b/.words
deleted file mode 100644
index da36ba44ba5..00000000000
--- a/.words
+++ /dev/null
@@ -1,116 +0,0 @@
-accessors
-addrConns
-args
-atomics
-backoff
-BackoffFunc
-BackoffLinearWithJitter
-Balancer
-BidiStreams
-blackhole
-blackholed
-CallOptions
-cancelable
-cancelation
-ccBalancerWrapper
-clientURLs
-clusterName
-cluster_proxy
-consistentIndex
-ConsistentIndexGetter
-DefaultMaxRequestBytes
-defragment
-defragmenting
-deleter
-dev
-/dev/null
-dev/null
-DNS
-errClientDisconnected
-ErrCodeEnhanceYourCalm
-ErrConnClosing
-ErrRequestTooLarge
-ErrTimeout
-etcd
-FIXME
-github
-GoAway
-goroutine
-goroutines
-gRPC
-grpcAddr
-hasleader
-healthcheck
-hostname
-iff
-inflight
-InfoLevel
-jitter
-jitter
-jitter
-keepalive
-Keepalive
-KeepAlive
-keepalives
-keyspace
-lexically
-lexicographically
-linearizable
-linearization
-linearized
-liveness
-localhost
-__lostleader
-MaxRequestBytes
-MiB
-middleware
-mutators
-mutex
-nils
-nondeterministically
-nop
-OutputWALDir
-parsedTarget
-passthrough
-PermitWithoutStream
-prefetching
-prometheus
-protobuf
-racey
-rafthttp
-rebalanced
-reconnection
-repin
-ResourceExhausted
-retriable
-retriable
-rpc
-RPC
-RPCs
-saveWALAndSnap
-serializable
-ServerStreams
-SHA
-SRV
-statusError
-subConn
-subconns
-SubConns
-teardown
-TestBalancerDoNotBlockOnClose
-todo
-too_many_pings
-transactional
-transferee
-transientFailure
-unbuffered
-uncontended
-unfreed
-unlisting
-unprefixed
-WatchProgressNotifyInterval
-WAL
-WithBackoff
-WithDialer
-WithMax
-WithRequireLeader
diff --git a/ADOPTERS.md b/ADOPTERS.md
index c6c294637d3..5c6874c609f 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -212,7 +212,7 @@ At [Branch][branch], we use kubernetes heavily as our core microservice platform
## Baidu Waimai
- *Application*: SkyDNS, Kubernetes, UDC, CMDB and other distributed systems
-- *Launched*: April. 2016
+- *Launched*: April 2016
- *Cluster Size*: 3 clusters of 5 members
- *Order of Data Size*: several gigabytes
- *Operator*: Baidu Waimai Operations Department
@@ -248,3 +248,13 @@ At [Branch][branch], we use kubernetes heavily as our core microservice platform
- *Operator*: Trasnwarp Operating System
- *Environment*: Bare Metal, Container
- *Backups*: backup scripts
+
+## Cyberfusion
+
+- *Application*: cluster configuration management
+- *Launched*: February 2023
+- *Cluster Size*: single cluster, 3 nodes
+- *Order of Data Size*: kilobytes
+- *Operator*: Cyberfusion
+- *Environment*: Debian on VMs
+- *Backups*: periodic `etcdctl snapshot save` + rotation in cron. More about our setup: https://cyberfusion.io/articles/building-hosting-infrastructure-in-2024-configuration-management-part-1
diff --git a/CHANGELOG-3.5.md b/CHANGELOG-3.5.md
deleted file mode 100644
index b60a08e5c79..00000000000
--- a/CHANGELOG-3.5.md
+++ /dev/null
@@ -1,296 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.4](https://github.com/etcd-io/etcd/blob/main/CHANGELOG-3.4.md).
-
-
-The minimum recommended etcd versions to run in **production** are 3.2.28+, 3.3.18+, and 3.4.2+.
-
-
-
-
-
-## v3.5.0 (2021-06)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes.
-
-- [v3.5.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0) (2020 TBD), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.1...v3.5.0).
-- [v3.5.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.1) (2020 TBD), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.0...v3.5.0-rc.1).
-- [v3.5.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.0) (2020 TBD), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0-rc.0).
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/).**
-
-### Breaking Changes
-
-- `go.etcd.io/etcd` Go packages have moved to `go.etcd.io/etcd/{api,pkg,raft,client,etcdctl,server,raft,tests}/v3` to follow the [Go modules](https://github.com/golang/go/wiki/Modules) conventions
-- `go.etcd.io/clientv3/snapshot` SnapshotManager class have moved to `go.etcd.io/clientv3/etcdctl`.
- The method `snapshot.Save` to download a snapshot from the remote server was preserved in 'go.etcd.io/clientv3/snapshot`.
-- `go.etcd.io/client' package got migrated to 'go.etcd.io/client/v2'.
-- Changed behavior of clienv3 API [MemberList](https://github.com/etcd-io/etcd/pull/11639).
- - Previously, it is directly served with server's local data, which could be stale.
- - Now, it is served with linearizable guarantee. If the server is disconnected from quorum, `MemberList` call will fail.
-- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint.
- - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298).
- - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` does work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-- **`etcd --experimental-enable-v2v3` flag remains experimental and to be deprecated.**
- - v2 storage emulation feature will be deprecated in the next release.
- - etcd 3.5 is the last version that supports V2 API. Flags `--enable-v2` and `--experimental-enable-v2v3` [are now deprecated](https://github.com/etcd-io/etcd/pull/) and will be removed in etcd v3.6 release.
-- **`etcd --experimental-backend-bbolt-freelist-type` flag has been deprecated.** Use **`etcd --backend-bbolt-freelist-type`** instead. The default type is hashmap and it is stable now.
-- **`etcd --debug` flag has been deprecated.** Use **`etcd --log-level=debug`** instead.
-- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947).
-- **`etcd --log-output` flag has been deprecated.** Use **`etcd --log-outputs`** instead.
-- **`etcd --logger=zap --log-outputs=stderr`** is now the default.
-- **`etcd --logger=capnslog` flag value has been deprecated.**
-- **`etcd --logger=zap --log-outputs=default` flag value is not supported.**.
- - Use `etcd --logger=zap --log-outputs=stderr`.
- - Or, use `etcd --logger=zap --log-outputs=systemd/journal` to send logs to the local systemd journal.
- - Previously, if etcd parent process ID (PPID) is 1 (e.g. run with systemd), `etcd --logger=capnslog --log-outputs=default` redirects server logs to local systemd journal. And if write to journald fails, it writes to `os.Stderr` as a fallback.
- - However, even with PPID 1, it can fail to dial systemd journal (e.g. run embedded etcd with Docker container). Then, [every single log write will fail](https://github.com/etcd-io/etcd/pull/9729) and fall back to `os.Stderr`, which is inefficient.
- - To avoid this problem, systemd journal logging must be configured manually.
-- **`etcd --log-outputs=stderr`** is now the default.
-- **`etcd --log-package-levels` flag for `capnslog` has been deprecated.** Now, **`etcd --logger=zap --log-outputs=stderr`** is the default.
-- **`[CLIENT-URL]/config/local/log` endpoint has been deprecated, as is `etcd --log-package-levels` flag.**
- - `curl http://127.0.0.1:2379/config/local/log -XPUT -d '{"Level":"DEBUG"}'` won't work.
- - Please use `etcd --logger=zap --log-outputs=stderr` instead.
-- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead.
-- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead.
-- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead.
-- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead.
-- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead.
-- Main branch `/version` outputs `3.5.0-pre`, instead of `3.4.0+git`.
-- Changed `proxy` package function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11614).
- - Previously, `NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`, now `NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`.
- - Previously, `Register(c *clientv3.Client, prefix string, addr string, ttl int)`, now `Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{}`.
- - Previously, `NewHandler(t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`, now `NewHandler(lg *zap.Logger, t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`.
-- Changed `pkg/flags` function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11616).
- - Previously, `SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error`, now `SetFlagsFromEnv(lg *zap.Logger, prefix string, fs *flag.FlagSet) error`.
- - Previously, `SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error`, now `SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error`.
-- ClientV3 supports [grpc resolver API](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go).
- - Endpoints can be managed using [endpoints.Manager](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/endpoints/endpoints.go)
- - Previously supported [GRPCResolver was decomissioned](https://github.com/etcd-io/etcd/pull/12675). Use [resolver](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go) instead.
-- Turned on [--pre-vote by default](https://github.com/etcd-io/etcd/pull/12770). Should prevent disrupting RAFT leader by an individual member.
-- [ETCD_CLIENT_DEBUG env](https://github.com/etcd-io/etcd/pull/12786): Now supports log levels (debug, info, warn, error, dpanic, panic, fatal). Only when set, overrides application-wide grpc logging settings.
-- [Embed Etcd.Close()](https://github.com/etcd-io/etcd/pull/12828) needs to called exactly once and closes Etcd.Err() stream.
-- [Embed Etcd does not override global/grpc logger](https://github.com/etcd-io/etcd/pull/12861) be default any longer. If desired, please call `embed.Config::SetupGlobalLoggers()` explicitly.
-- [Embed Etcd custom logger should be configured using simpler builder `NewZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/12973).
-- Client errors of `context cancelled` or `context deadline exceeded` are exposed as `codes.Canceled` and `codes.DeadlineExceeded`, instead of `codes.Unknown`.
-
-
-### Storage format changes
-- [WAL log's snapshots persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12735)
-- [Backend persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12962) in the `meta` bucket `confState` key.
-- [Backend persists applied term](https://github.com/etcd-io/etcd/pull/) in the `meta` bucket.
-- Backend persists `downgrade` in the `cluster` bucket
-
-### Security
-
-- Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864).
-- Changed [the format of WAL entries related to auth for not keeping password as a plain text](https://github.com/etcd-io/etcd/pull/11943).
-- Add third party [Security Audit Report](https://github.com/etcd-io/etcd/pull/12201).
-- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd uses any existing directory that has a permission different than 700 on Linux and 777 on Windows.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead.
-- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead.
-- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead.
-- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead.
-- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead.
-- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-- Change [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11254) Prometheus metrics to include only major and minor version.
-- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric.
-- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687).
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3).
-- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
-
-### etcd server
-
- - Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864).
- - Automatically [create parent directory if it does not exist](https://github.com/etcd-io/etcd/pull/9626) (fix [issue#9609](https://github.com/etcd-io/etcd/issues/9609)).
- - v4.0 will configure `etcd --enable-v2=true --enable-v2v3=/aaa` to enable v2 API server that is backed by **v3 storage**.
-- [`etcd --backend-bbolt-freelist-type`] flag is now stable.
- - `etcd --experimental-backend-bbolt-freelist-type` has been deprecated.
-- Support [downgrade API](https://github.com/etcd-io/etcd/pull/11715).
-- Deprecate v2 apply on cluster version. [Use v3 request to set cluster version and recover cluster version from v3 backend](https://github.com/etcd-io/etcd/pull/11427).
-- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613).
-- Fix [quorum protection logic when promoting a learner](https://github.com/etcd-io/etcd/pull/11640).
-- Improve [peer corruption checker](https://github.com/etcd-io/etcd/pull/11621) to work when peer mTLS is enabled.
-- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704).
-- Log [successful etcd server-side health check in debug level](https://github.com/etcd-io/etcd/pull/12677).
-- Improve [compaction performance when latest index is greater than 1-million](https://github.com/etcd-io/etcd/pull/11734).
-- [Refactor consistentindex](https://github.com/etcd-io/etcd/pull/11699).
-- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670).
-- Improve [count-only range performance](https://github.com/etcd-io/etcd/pull/11771).
-- Remove [redundant storage restore operation to shorten the startup time](https://github.com/etcd-io/etcd/pull/11779).
- - With 40 million key test data,it can shorten the startup time from 5 min to 2.5 min.
-- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817).
-- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888).
- - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot.
- - See https://github.com/etcd-io/etcd/issues/10219 for more.
- - Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924).
- - See https://github.com/etcd-io/etcd/issues/11918.
-- Improve logging around snapshot send and receive.
-- [Push down RangeOptions.limit argv into index tree to reduce memory overhead](https://github.com/etcd-io/etcd/pull/11990).
-- Add [reason field for /health response](https://github.com/etcd-io/etcd/pull/11983).
-- Add [exclude alarms from health check conditionally](https://github.com/etcd-io/etcd/pull/12880).
-- Add [`etcd --unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/11946) flag.
- - Setting the flag disables all uses of fsync, which is unsafe and will cause data loss. This flag makes it possible to run an etcd node for testing and development without placing lots of load on the file system.
-- Add [`etcd --auth-token-ttl`](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings.
-- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986).
-- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987).
-- Log [expensive request info in UnaryInterceptor](https://github.com/etcd-io/etcd/pull/12086).
-- [Fix invalid Go type in etcdserverpb](https://github.com/etcd-io/etcd/pull/12000).
-- [Improve healthcheck by using v3 range request and its corresponding timeout](https://github.com/etcd-io/etcd/pull/12195).
-- Add [`etcd --experimental-watch-progress-notify-interval`](https://github.com/etcd-io/etcd/pull/12216) flag to make watch progress notify interval configurable.
-- Fix [server panic in slow writes warnings](https://github.com/etcd-io/etcd/issues/12197).
- - Fixed via [PR#12238](https://github.com/etcd-io/etcd/pull/12238).
-- [Fix server panic](https://github.com/etcd-io/etcd/pull/12288) when force-new-cluster flag is enabled in a cluster which had learner node.
-- Add [`etcd --self-signed-cert-validity`](https://github.com/etcd-io/etcd/pull/12429) flag to support setting certificate expiration time.
- - Notice, certificates generated by etcd are valid for 1 year by default when specifying the auto-tls or peer-auto-tls option.
-- Add [`etcd --experimental-warning-apply-duration`](https://github.com/etcd-io/etcd/pull/12448) flag which allows apply duration threshold to be configurable.
-- Add [`etcd --experimental-memory-mlock`](https://github.com/etcd-io/etcd/pull/TODO) flag which prevents etcd memory pages to be swapped out.
-- Add [`etcd --socket-reuse-port`](https://github.com/etcd-io/etcd/pull/12702) flag
- - Setting this flag enables `SO_REUSEPORT` which allows rebind of a port already in use. User should take caution when using this flag to ensure flock is properly enforced.
-- Add [`etcd --socket-reuse-address`](https://github.com/etcd-io/etcd/pull/12702) flag
- - Setting this flag enables `SO_REUSEADDR` which allows binding to an address in `TIME_WAIT` state, improving etcd restart time.
-- Reduce [around 30% memory allocation by logging range response size without marshal](https://github.com/etcd-io/etcd/pull/12871).
-- `ETCD_VERIFY="all"` environment triggers [additional verification of consistency](https://github.com/etcd-io/etcd/pull/) of etcd data-dir files.
-- Add [`etcd --enable-log-rotation`](https://github.com/etcd-io/etcd/pull/12774) boolean flag which enables log rotation if true.
-- Add [`etcd --log-rotation-config-json`](https://github.com/etcd-io/etcd/pull/12774) flag which allows passthrough of JSON config to configure log rotation for a file output target.
-- Add experimental distributed tracing boolean flag [`--experimental-enable-distributed-tracing`](https://github.com/etcd-io/etcd/pull/12919) which enables tracing.
-- Add [`etcd --experimental-distributed-tracing-address`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows configuring the OpenTelemetry collector address.
-- Add [`etcd --experimental-distributed-tracing-service-name`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows changing the default "etcd" service name.
-- Add [`etcd --experimental-distributed-tracing-instance-id`](https://github.com/etcd-io/etcd/pull/12919) string flag which configures an instance ID, which must be unique per etcd instance.
-
-### Package `runtime`
-
-- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214).
-
-### Package `embed`
-
-- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947).
- - Use `embed.Config.LogLevel` instead.
-- Add [`embed.Config.ZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/11147) to allow creating a custom zap logger.
-- Replace [global `*zap.Logger` with etcd server logger object](https://github.com/etcd-io/etcd/pull/12212).
-- Add [`embed.Config.EnableLogRotation`](https://github.com/etcd-io/etcd/pull/12774) which enables log rotation if true.
-- Add [`embed.Config.LogRotationConfigJSON`](https://github.com/etcd-io/etcd/pull/12774) to allow passthrough of JSON config to configure log rotation for a file output target.
-- Add [`embed.Config.ExperimentalEnableDistributedTracing`](https://github.com/etcd-io/etcd/pull/12919) which enables experimental distributed tracing if true.
-- Add [`embed.Config.ExperimentalDistributedTracingAddress`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default collector address.
-- Add [`embed.Config.ExperimentalDistributedTracingServiceName`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default "etcd" service name.
-- Add [`embed.Config.ExperimentalDistributedTracingServiceInstanceID`](https://github.com/etcd-io/etcd/pull/12919) which allows configuring an instance ID, which must be uniquer per etcd instance.
-
-### Package `clientv3`
-
-- Remove [excessive watch cancel logging messages](https://github.com/etcd-io/etcd/pull/12187).
- - See [kubernetes/kubernetes#93450](https://github.com/kubernetes/kubernetes/issues/93450).
-- Add [`TryLock`](https://github.com/etcd-io/etcd/pull/11104) method to `clientv3/concurrency/Mutex`. A non-blocking method on `Mutex` which does not wait to get lock on the Mutex, returns immediately if Mutex is locked by another session.
-- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184).
- - Fix [`"kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch"`](https://github.com/kubernetes/kubernetes/issues/83028).
-- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211).
- - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550).
-- Fix [errors caused by grpc changing balancer/resolver API](https://github.com/etcd-io/etcd/pull/11564). This change is compatible with grpc >= [v1.26.0](https://github.com/grpc/grpc-go/releases/tag/v1.26.0), but is not compatible with < v1.26.0 version.
-- Use [ServerName as the authority](https://github.com/etcd-io/etcd/pull/11574) after bumping to grpc v1.26.0. Remove workaround in [#11184](https://github.com/etcd-io/etcd/pull/11184).
-- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687).
- - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys.
-- Fix [watch leak caused by lazy cancellation](https://github.com/etcd-io/etcd/pull/11850). When clients cancel their watches, a cancel request will now be immediately sent to the server instead of waiting for the next watch event.
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready.
-- Improve [clientv3:get AuthToken gracefully without extra connection](https://github.com/etcd-io/etcd/pull/12165).
-- Changed [clientv3 dialing code](https://github.com/etcd-io/etcd/pull/12671) to use grpc resolver API instead of custom balancer.
- - Endpoints self identify now as `etcd-endpoints://{id}/#initially={list of endpoints}` e.g. `etcd-endpoints://0xc0009d8540/#initially=[localhost:2079]`
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-
-### Package `lease`
-
-- Fix [memory leak in follower nodes](https://github.com/etcd-io/etcd/pull/11731).
- - https://github.com/etcd-io/etcd/issues/11495
- - https://github.com/etcd-io/etcd/issues/11730
-- Make sure [grant/revoke won't be applied repeatedly after restarting etcd](https://github.com/etcd-io/etcd/pull/11935).
-
-### Package `wal`
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-- Handle [out-of-range slice bound in `ReadAll` and entry limit in `decodeRecord`](https://github.com/etcd-io/etcd/pull/11793).
-
-### etcdctl v3
-
-- Fix `etcdctl member add` command to prevent potential timeout. ([PR#11194](https://github.com/etcd-io/etcd/pull/11194) and [PR#11638](https://github.com/etcd-io/etcd/pull/11638))
-- Add [`etcdctl watch --progress-notify`](https://github.com/etcd-io/etcd/pull/11462) flag.
-- Add [`etcdctl auth status`](https://github.com/etcd-io/etcd/pull/11536) command to check if authentication is enabled
-- Add [`etcdctl get --count-only`](https://github.com/etcd-io/etcd/pull/11743) flag for output type `fields`.
-- Add [`etcdctl member list -w=json --hex`](https://github.com/etcd-io/etcd/pull/11812) flag to print memberListResponse in hex format json.
-- Changed [`etcdctl lock exec-command`](https://github.com/etcd-io/etcd/pull/12829) to return exit code of exec-command.
-- [New tool: `etcdutl`](https://github.com/etcd-io/etcd/pull/12971) incorporated functionality of: `etcdctl snapshot status|restore`, `etcdctl backup`, `etcdctl defrag --data-dir ...`.
-- [ETCDCTL_API=2 `etcdctl migrate`](https://github.com/etcd-io/etcd/pull/12971) has been decomissioned. Use etcd <=v3.4 to restore v2 storage.
-
-### gRPC gateway
-
-- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint.
- - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298).
- - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` does work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-
-### gRPC Proxy
-
-- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler.
-- Add [gRPC keepalive related flags](https://github.com/etcd-io/etcd/pull/11711) `grpc-keepalive-min-time`, `grpc-keepalive-interval` and `grpc-keepalive-timeout`.
-- [Fix grpc watch proxy hangs when failed to cancel a watcher](https://github.com/etcd-io/etcd/pull/12030) .
-- Add [metrics handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12107).
-- Add [health handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12114).
-
-### Auth
-
-- Fix [NoPassword check when adding user through GRPC gateway](https://github.com/etcd-io/etcd/pull/11418) ([issue#11414](https://github.com/etcd-io/etcd/issues/11414))
-- Fix bug where [some auth related messages are logged at wrong level](https://github.com/etcd-io/etcd/pull/11586)
-- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652).
-- [Improve checkPassword performance](https://github.com/etcd-io/etcd/pull/11735).
-- [Add authRevision field in AuthStatus](https://github.com/etcd-io/etcd/pull/11659).
-
-### API
-
-- Add [`/v3/auth/status`](https://github.com/etcd-io/etcd/pull/11536) endpoint to check if authentication is enabled
-- [Add `Linearizable` field to `etcdserverpb.MemberListRequest`](https://github.com/etcd-io/etcd/pull/11639).
-- [Learner support Snapshot RPC](https://github.com/etcd-io/etcd/pull/12890/).
-
-### Package `netutil`
-
-- Remove [`netutil.DropPort/RecoverPort/SetLatency/RemoveLatency`](https://github.com/etcd-io/etcd/pull/12491).
- - These are not used anymore. They were only used for older versions of functional testing.
- - Removed to adhere to best security practices, minimize arbitrary shell invocation.
-
-### `tools/etcd-dump-metrics`
-
-- Implement [input validation to prevent arbitrary shell invocation](https://github.com/etcd-io/etcd/pull/12491).
-
-### Dependency
-
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0) to [**`v1.37.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.37.0).
-- Upgrade [`go.uber.org/zap`](https://github.com/uber-go/zap/releases) from [**`v1.14.1`**](https://github.com/uber-go/zap/releases/tag/v1.14.1) to [**`v1.16.0`**](https://github.com/uber-go/zap/releases/tag/v1.16.0).
-
-### Platforms
-
-- etcd now [officially supports `arm64`](https://github.com/etcd-io/etcd/pull/12929).
- - See https://github.com/etcd-io/etcd/pull/12928 for adding automated tests with `arm64` EC2 instances (Graviton 2).
- - See https://github.com/etcd-io/website/pull/273 for new platform support tier policies.
-
-### Release
-
-- Add s390x build support ([PR#11548](https://github.com/etcd-io/etcd/pull/11548) and [PR#11358](https://github.com/etcd-io/etcd/pull/11358))
-
-### Go
-
-- Require [*Go 1.16+*](https://github.com/etcd-io/etcd/pull/11110).
-- Compile with [*Go 1.16+*](https://golang.org/doc/devel/release.html#go1.16)
-- etcd uses [go modules](https://github.com/etcd-io/etcd/pull/12279) (instead of vendor dir) to track dependencies.
-
-### Project Governance
-
-- The etcd team has added, a well defined and openly discussed, project [governance](https://github.com/etcd-io/etcd/pull/11175).
-
-
-
-
diff --git a/CHANGELOG-2.3.md b/CHANGELOG/CHANGELOG-2.3.md
similarity index 100%
rename from CHANGELOG-2.3.md
rename to CHANGELOG/CHANGELOG-2.3.md
diff --git a/CHANGELOG-3.0.md b/CHANGELOG/CHANGELOG-3.0.md
similarity index 100%
rename from CHANGELOG-3.0.md
rename to CHANGELOG/CHANGELOG-3.0.md
diff --git a/CHANGELOG-3.1.md b/CHANGELOG/CHANGELOG-3.1.md
similarity index 99%
rename from CHANGELOG-3.1.md
rename to CHANGELOG/CHANGELOG-3.1.md
index 18765392ace..0c97517a7e2 100644
--- a/CHANGELOG-3.1.md
+++ b/CHANGELOG/CHANGELOG-3.1.md
@@ -1,10 +1,6 @@
-Previous change logs can be found at [CHANGELOG-3.0](https://github.com/etcd-io/etcd/blob/main/CHANGELOG-3.0.md).
-
-
-The minimum recommended etcd versions to run in **production** are 3.1.11+, 3.2.26+, and 3.3.11+.
-
+Previous change logs can be found at [CHANGELOG-3.0](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.0.md).
diff --git a/CHANGELOG-3.2.md b/CHANGELOG/CHANGELOG-3.2.md
similarity index 99%
rename from CHANGELOG-3.2.md
rename to CHANGELOG/CHANGELOG-3.2.md
index b6b2d2a39cb..095ff6e9f2a 100644
--- a/CHANGELOG-3.2.md
+++ b/CHANGELOG/CHANGELOG-3.2.md
@@ -1,10 +1,8 @@
-Previous change logs can be found at [CHANGELOG-3.1](https://github.com/etcd-io/etcd/blob/main/CHANGELOG-3.1.md).
+Previous change logs can be found at [CHANGELOG-3.1](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.1.md).
-The minimum recommended etcd versions to run in **production** are 3.2.28+, 3.3.18+, and 3.4.2+.
-
## v3.2.33 (TBD)
diff --git a/CHANGELOG-3.3.md b/CHANGELOG/CHANGELOG-3.3.md
similarity index 97%
rename from CHANGELOG-3.3.md
rename to CHANGELOG/CHANGELOG-3.3.md
index 1f914fad776..8addba112f6 100644
--- a/CHANGELOG-3.3.md
+++ b/CHANGELOG/CHANGELOG-3.3.md
@@ -1,15 +1,42 @@
-Previous change logs can be found at [CHANGELOG-3.2](https://github.com/etcd-io/etcd/blob/main/CHANGELOG-3.2.md).
+Previous change logs can be found at [CHANGELOG-3.2](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.2.md).
+
+
+## v3.3.27 (2021-10-15)
-The minimum recommended etcd versions to run in **production** are 3.2.28+, 3.3.18+, and 3.4.2+.
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.26...v3.3.27) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
+
+### Other
+- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs:
+ - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption
+ - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc
+ - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp
+ - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads.
+## v3.3.26 (2021-10-03)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.25...v3.3.26) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
+
+### Package `clientv3`
+
+- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready.
+
+### Package `fileutil`
+
+- Fix [constant](https://github.com/etcd-io/etcd/pull/12440) for linux locking.
+
+### Go
+
+- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
+
+
-## v3.3.25 (2020 TBD)
+## v3.3.25 (2020-08-24)
See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.23...v3.3.25) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
@@ -730,7 +757,7 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.2...v3.3.3) and
- For every compaction period or 1-hour, compactor uses the last revision that was fetched before compaction period, to discard historical data.
- The retention window of compaction period moves for every given compaction period or hour.
- For instance, when hourly writes are 100 and `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`, `v3.2.x`, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 2400, 2640, and 2880 for every 2.4-hour, while `v3.3.3` *or later* compacts revision 2400, 2500, 2600 for every 1-hour.
- - Futhermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute.
+ - Furthermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute.
### Metrics, Monitoring
@@ -933,7 +960,7 @@ See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more deta
- Periodic compactor continues to record latest revisions for every 1/10 of given compaction period (e.g. 1-hour when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=10h`).
- For every 1/10 of given compaction period, compactor uses the last revision that was fetched before compaction period, to discard historical data.
- The retention window of compaction period moves for every 1/10 of given compaction period.
- - For instance, when hourly writes are 100 and `--auto-compaction-retention=10`, v3.1 compacts revision 1000, 2000, and 3000 for every 10-hour, while v3.2.x, v3.3.0, v3.3.1, and v3.3.2 compact revision 1000, 1100, and 1200 for every 1-hour. Futhermore, when writes per minute are 1000, v3.3.0, v3.3.1, and v3.3.2 with `--auto-compaction-mode=periodic --auto-compaction-retention=30m` compact revision 30000, 33000, and 36000, for every 3-minute with more finer granularity.
+ - For instance, when hourly writes are 100 and `--auto-compaction-retention=10`, v3.1 compacts revision 1000, 2000, and 3000 for every 10-hour, while v3.2.x, v3.3.0, v3.3.1, and v3.3.2 compact revision 1000, 1100, and 1200 for every 1-hour. Furthermore, when writes per minute are 1000, v3.3.0, v3.3.1, and v3.3.2 with `--auto-compaction-mode=periodic --auto-compaction-retention=30m` compact revision 30000, 33000, and 36000, for every 3-minute with more finer granularity.
- Whether compaction succeeds or not, this process repeats for every 1/10 of given compaction period. If compaction succeeds, it just removes compacted revision from historical revision records.
- Add [`etcd --grpc-keepalive-min-time`, `etcd --grpc-keepalive-interval`, `etcd --grpc-keepalive-timeout`](https://github.com/etcd-io/etcd/pull/8535) flags to configure server-side keepalive policies.
- Serve [`/health` endpoint as unhealthy](https://github.com/etcd-io/etcd/pull/8272) when [alarm (e.g. `NOSPACE`) is raised or there's no leader](https://github.com/etcd-io/etcd/issues/8207).
diff --git a/CHANGELOG-3.4.md b/CHANGELOG/CHANGELOG-3.4.md
similarity index 82%
rename from CHANGELOG-3.4.md
rename to CHANGELOG/CHANGELOG-3.4.md
index 63926da1eea..6619cc2805d 100644
--- a/CHANGELOG-3.4.md
+++ b/CHANGELOG/CHANGELOG-3.4.md
@@ -1,9 +1,359 @@
-Previous change logs can be found at [CHANGELOG-3.3](https://github.com/etcd-io/etcd/blob/main/CHANGELOG-3.3.md).
+Previous change logs can be found at [CHANGELOG-3.3](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.3.md).
+
+
+## v3.4.36 (TBC)
+
+### Package `clientv3`
+- Fix [runtime panic that occurs when KeepAlive is called with a Context implemented by an uncomparable type](https://github.com/etcd-io/etcd/pull/18936)
+
+### Dependencies
+- Compile binaries using [go 1.22.10](https://github.com/etcd-io/etcd/pull/19005)
+
+
+
+## v3.4.35 (2024-11-12)
+
+### etcd server
+- Fix [watchserver related goroutine leakage](https://github.com/etcd-io/etcd/pull/18785)
+- Fix [panicking occurred due to improper error handling during defragmentation](https://github.com/etcd-io/etcd/pull/18843)
+- Fix [close temp file(s) in case an error happens during defragmentation](https://github.com/etcd-io/etcd/pull/18855)
+
+### Dependencies
+- Compile binaries using [go 1.22.9](https://github.com/etcd-io/etcd/pull/18850).
+
+
+
+## v3.4.34 (2024-09-11)
+
+### etcd server
+- Fix [performance regression issue caused by the `ensureLeadership` in lease renew](https://github.com/etcd-io/etcd/pull/18440).
+- [Keep the tombstone during compaction if it happens to be the compaction revision](https://github.com/etcd-io/etcd/pull/18475)
+
+### Package clientv3
+- [Print gRPC metadata in guaranteed order using the official go fmt pkg](https://github.com/etcd-io/etcd/pull/18311).
-The minimum recommended etcd versions to run in **production** are 3.2.28+, 3.3.18+, and 3.4.2+.
+### Dependencies
+- Compile binaries using [go 1.22.7](https://github.com/etcd-io/etcd/pull/18549).
+- Upgrade [bbolt to 1.3.11](https://github.com/etcd-io/etcd/pull/18488).
+
+
+
+## v3.4.33 (2024-06-13)
+
+### etcd grpc-proxy
+- Fix [Memberlist results not updated when proxy node down](https://github.com/etcd-io/etcd/pull/17896).
+
+### Dependencies
+- Compile binaries using go [1.21.11](https://github.com/etcd-io/etcd/pull/18130).
+- Upgrade [bbolt to 1.3.10](https://github.com/etcd-io/etcd/pull/17945).
+
+
+
+## v3.4.32 (2024-04-25)
+
+### etcd server
+- Fix [LeaseTimeToLive returns error if leader changed](https://github.com/etcd-io/etcd/pull/17705).
+- Fix [ignore raft messages if member id mismatch](https://github.com/etcd-io/etcd/pull/17814).
+- Update [the compaction log when bootstrap](https://github.com/etcd-io/etcd/pull/17831).
+- [Allow new server to join 3.5 cluster if `next-cluster-version-compatible=true`](https://github.com/etcd-io/etcd/pull/17665)
+- [Allow updating the cluster version when downgrading from 3.5](https://github.com/etcd-io/etcd/pull/17821).
+- Fix [Revision decreasing after panic during compaction](https://github.com/etcd-io/etcd/pull/17864)
+
+### Package `clientv3`
+- Add [requests retry when receiving ErrGPRCNotSupportedForLearner and endpoints > 1](https://github.com/etcd-io/etcd/pull/17692).
+- Fix [initialization for epMu in client context](https://github.com/etcd-io/etcd/pull/17714).
+
+### Dependencies
+- Compile binaries using [go 1.21.9](https://github.com/etcd-io/etcd/pull/17709).
+
+
+
+## v3.4.31 (2024-03-21)
+
+### etcd server
+- Add [mvcc: print backend database size and size in use in compaction logs](https://github.com/etcd-io/etcd/pull/17436).
+- Fix leases wrongly revoked by the leader by [ignoring old leader's leases revoking request](https://github.com/etcd-io/etcd/pull/17465).
+- Fix [no progress notification being sent for watch that doesn't get any events](https://github.com/etcd-io/etcd/pull/17567).
+- Fix [watch event loss after compaction](https://github.com/etcd-io/etcd/pull/17610).
+- Add `next-cluster-version-compatible` flag to [allow downgrade from 3.5](https://github.com/etcd-io/etcd/pull/17330).
+
+### Package `clientv3`
+- Add [client backoff and retry config options](https://github.com/etcd-io/etcd/pull/17369).
+
+### Dependencies
+- Upgrade [bbolt to 1.3.9](https://github.com/etcd-io/etcd/pull/17484).
+- Compile binaries using [go 1.21.8](https://github.com/etcd-io/etcd/pull/17538).
+- Upgrade [google.golang.org/protobuf to v1.33.0 to address CVE-2024-24786](https://github.com/etcd-io/etcd/pull/17554).
+- Upgrade github.com/sirupsen/logrus to v1.9.3 to address [PRISMA-2023-0056](https://github.com/etcd-io/etcd/pull/17580).
+
+### Others
+- [Make CGO_ENABLED configurable](https://github.com/etcd-io/etcd/pull/17422).
+
+
+
+## v3.4.30 (2024-01-31)
+
+### etcd server
+- Fix [nil pointer panicking due to using the wrong log library](https://github.com/etcd-io/etcd/pull/17270)
+
+### Dependencies
+- Compile binaries using go [1.20.13](https://github.com/etcd-io/etcd/pull/17276).
+- Upgrade [golang.org/x/crypto to v0.17+ to address CVE-2023-48795](https://github.com/etcd-io/etcd/pull/17347).
+
+
+
+## v3.4.29 (2024-01-09)
+
+### etcd server
+- [Disable following HTTP redirects in peer communication](https://github.com/etcd-io/etcd/pull/17112)
+- [Add livez/readyz HTTP endpoints](https://github.com/etcd-io/etcd/pull/17128)
+- Fix [Check if be is nil to avoid panic when be is overriden with nil](https://github.com/etcd-io/etcd/pull/17154)
+- Fix [Add missing experimental-enable-lease-checkpoint-persist flag in etcd help](https://github.com/etcd-io/etcd/pull/17189)
+- Fix [Don't flock snapshot files](https://github.com/etcd-io/etcd/pull/17208)
+
+### Dependencies
+- Compile binaries using go [1.20.12](https://github.com/etcd-io/etcd/pull/17076).
+
+
+
+## v3.4.28 (2023-11-23)
+
+### etcd server
+- Improve [Skip getting authInfo from incoming context when auth is disabled](https://github.com/etcd-io/etcd/pull/16240)
+- Use [the default write scheduler](https://github.com/etcd-io/etcd/pull/16782) since golang.org/x/net@v0.11.0 started using round-robin scheduler.
+- Add [cluster ID check during data corruption detection to prevent false alarm](https://github.com/etcd-io/etcd/issues/15548).
+- Add [Learner support Snapshot RPC](https://github.com/etcd-io/etcd/pull/16990/).
+
+### Package `clientv3`
+- Fix [Reset auth token when failing to authenticate due to auth being disabled](https://github.com/etcd-io/etcd/pull/16240).
+- [Simplify grpc dialer usage](https://github.com/etcd-io/etcd/issues/11519).
+- [Replace balancer with upstream grpc solution](https://github.com/etcd-io/etcd/pull/16844).
+- Fix [race condition when accessing cfg.Endpoints in dial()](https://github.com/etcd-io/etcd/pull/16857).
+- Fix [invalid authority header issue in single endpoint scenario](https://github.com/etcd-io/etcd/pull/16988).
+
+### Dependencies
+- Compile binaries using [go 1.20.11](https://github.com/etcd-io/etcd/pull/16916).
+- Upgrade [bbolt to 1.3.8](https://github.com/etcd-io/etcd/pull/16834).
+- Upgrade gRPC to 1.58.3 in https://github.com/etcd-io/etcd/pull/16997 and https://github.com/etcd-io/etcd/pull/16999. Note that gRPC server will reject requests with connection header (refer to https://github.com/grpc/grpc-go/pull/4803).
+
+
+
+## v3.4.27 (2023-07-11)
+
+### etcd server
+- Fix [corruption check may get a `ErrCompacted` error when server has just been compacted](https://github.com/etcd-io/etcd/pull/16047)
+- Improve [Lease put performance for the case that auth is disabled or the user is admin](https://github.com/etcd-io/etcd/pull/16020)
+- Fix [embed: nil pointer dereference when stopServer](https://github.com/etcd-io/etcd/pull/16195)
+
+### etcdctl v3
+- Add [optional --bump-revision and --mark-compacted flag to etcdctl snapshot restore operation](https://github.com/etcd-io/etcd/pull/16193).
+
+### Dependencies
+- Compile binaries using [go 1.19.10](https://github.com/etcd-io/etcd/pull/16038).
+
+
+
+## v3.4.26 (2023-05-12)
+
+### etcd server
+- Fix [LeaseTimeToLive API may return keys to clients which have no read permission on the keys](https://github.com/etcd-io/etcd/pull/15814).
+
+
+### Dependencies
+- Compile binaries using [go 1.19.9](https://github.com/etcd-io/etcd/pull/15823)
+
+
+
+## v3.4.25 (2023-04-14)
+
+### etcd server
+- Add [`etcd --tls-min-version --tls-max-version`](https://github.com/etcd-io/etcd/pull/15486) to enable support for TLS 1.3.
+- Add [`etcd --listen-client-http-urls`](https://github.com/etcd-io/etcd/pull/15620) flag to support separating http server from grpc one, thus giving full immunity to [watch stream starvation under high read load](https://github.com/etcd-io/etcd/issues/15402).
+- Change [http2 frame scheduler to random algorithm](https://github.com/etcd-io/etcd/pull/15478)
+- Fix [server/embed: fix data race when starting both secure & insecure gRPC servers on the same address](https://github.com/etcd-io/etcd/pull/15518)
+- Fix [server/auth: disallow creating empty permission ranges](https://github.com/etcd-io/etcd/pull/15621)
+- Fix [wsproxy did not print log in JSON format](https://github.com/etcd-io/etcd/pull/15662).
+- Fix [CVE-2021-28235](https://nvd.nist.gov/vuln/detail/CVE-2021-28235) by [clearing password after authenticating the user](https://github.com/etcd-io/etcd/pull/15655).
+- Fix [etcdserver may panic when parsing a JWT token without username or revision](https://github.com/etcd-io/etcd/pull/15677).
+- Fix [Watch response traveling back in time when reconnecting member downloads snapshot from the leader](https://github.com/etcd-io/etcd/pull/15520).
+- Fix [Requested watcher progress notifications are not synchronised with stream](https://github.com/etcd-io/etcd/pull/15697).
+
+### Package `clientv3`
+- Reverted the fix to [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/15542).
+
+### Dependencies
+- Recommend [Go 1.19+](https://github.com/etcd-io/etcd/pull/15337).
+- Compile binaries using [Go 1.19.8](https://github.com/etcd-io/etcd/pull/15652).
+- Upgrade [golang.org/x/net to v0.7.0](https://github.com/etcd-io/etcd/pull/15333).
+
+### Docker image
+- Fix [etcd docker images all tagged with amd64 architecture](https://github.com/etcd-io/etcd/pull/15681)
+
+
+
+## v3.4.24 (2023-02-16)
+
+### etcd server
+- Fix [etcdserver might promote a non-started learner](https://github.com/etcd-io/etcd/pull/15097).
+- Improve [mvcc: reduce count-only range overhead](https://github.com/etcd-io/etcd/pull/15099)
+- Improve [mvcc: push down RangeOptions.limit argv into index tree to reduce memory overhead](https://github.com/etcd-io/etcd/pull/15137)
+- Improve [server: set multiple concurrentReadTx instances share one txReadBuffer](https://github.com/etcd-io/etcd/pull/15195)
+- Fix [aligning zap log timestamp resolution to microseconds](https://github.com/etcd-io/etcd/pull/15241). Etcd now uses zap timestamp format: `2006-01-02T15:04:05.999999Z0700` (microsecond instead of milliseconds precision).
+- Fix [consistently format IPv6 addresses for comparison](https://github.com/etcd-io/etcd/pull/15188)
+
+### Package `clientv3`
+- Fix [etcd might send duplicated events to watch clients](https://github.com/etcd-io/etcd/pull/15275).
+
+### Dependencies
+- Upgrade [bbolt to v1.3.7](https://github.com/etcd-io/etcd/pull/15223).
+- Upgrade [github.com/grpc-ecosystem/grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [v1.9.5](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.9.5) to [v1.11.0](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.11.0).
+
+### Docker image
+- Updated [base image from base-debian11 to static-debian11 and removed dependency on busybox](https://github.com/etcd-io/etcd/pull/15038).
+
+
+
+## v3.4.23 (2022-12-21)
+
+### etcd server
+- Fix [Remove memberID from data corrupt alarm](https://github.com/etcd-io/etcd/pull/14853).
+- Fix [nil pointer panic for readonly txn due to nil response](https://github.com/etcd-io/etcd/pull/14900).
+- Bumped [some dependencies](https://github.com/etcd-io/etcd/pull/15019) to address some HIGH Vulnerabilities.
+
+### Package `clientv3`
+- Fix [Refreshing token on CommonName based authentication causes segmentation violation in client](https://github.com/etcd-io/etcd/pull/14792).
+
+### Dependencies
+- Recommend [Go 1.17+](https://github.com/etcd-io/etcd/pull/15019).
+- Compile binaries using [Go 1.17.13](https://github.com/etcd-io/etcd/pull/15019).
+
+### Docker image
+- Use [distroless base image](https://github.com/etcd-io/etcd/pull/15017) to address critical Vulnerabilities.
+
+
+
+## v3.4.22 (2022-11-02)
+
+### etcd server
+- Fix [memberID equals zero in corruption alarm](https://github.com/etcd-io/etcd/pull/14530)
+- Fix [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14548)
+- Fix [avoid closing a watch with ID 0 incorrectly](https://github.com/etcd-io/etcd/pull/14562)
+- Fix [auth: fix data consistency issue caused by recovery from snapshot](https://github.com/etcd-io/etcd/pull/14649)
+
+### Package `netutil`
+- Fix [netutil: add url comparison without resolver to URLStringsEqual](https://github.com/etcd-io/etcd/pull/14577)
+
+### Package `clientv3`
+- Fix [Add backoff before retry when watch stream returns unavailable](https://github.com/etcd-io/etcd/pull/14581).
+
+### etcd grpc-proxy
+- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14601) flag to support adding configurable cipher list.
+
+
+
+## v3.4.21 (2022-09-15)
+
+### etcd server
+- Fix [Durability API guarantee broken in single node cluster](https://github.com/etcd-io/etcd/pull/14423)
+- Fix [Panic due to nil log object](https://github.com/etcd-io/etcd/pull/14420)
+- Fix [authentication data not loaded on member startup](https://github.com/etcd-io/etcd/pull/14410)
+
+### etcdctl v3
+
+- Fix [etcdctl move-leader may fail for multiple endpoints](https://github.com/etcd-io/etcd/pull/14441)
+
+
+
+## v3.4.20 (2022-08-06)
+
+### Package `clientv3`
+
+- Fix [filter learners members during autosync](https://github.com/etcd-io/etcd/pull/14236).
+
+### etcd server
+- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14251) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32.
+- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/14253) flag to enable checkpoint persisting.
+- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/14253), requires enabling checkpoint persisting.
+- Fix [Protect rangePermCache with a RW lock correctly](https://github.com/etcd-io/etcd/pull/14230)
+- Fix [raft: postpone MsgReadIndex until first commit in the term](https://github.com/etcd-io/etcd/pull/14258)
+- Fix [etcdserver: resend ReadIndex request on empty apply request](https://github.com/etcd-io/etcd/pull/14269)
+- Fix [remove temp files in snap dir when etcdserver starting](https://github.com/etcd-io/etcd/pull/14246)
+- Fix [Etcdserver is still in progress of processing LeaseGrantRequest when it receives a LeaseKeepAliveRequest on the same leaseID](https://github.com/etcd-io/etcd/pull/14177)
+- Fix [Grant lease with negative ID can possibly cause db out of sync](https://github.com/etcd-io/etcd/pull/14239)
+- Fix [Allow non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/14254)
+
+
+
+## v3.4.19 (2022-07-12)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.18...v3.4.19) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
+
+### etcd server
+- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13475).
+- Fix [Defrag unsets backend options](https://github.com/etcd-io/etcd/pull/13713).
+- Fix [lease leak issue due to tokenProvider isn't enabled when restoring auth store from a snapshot](https://github.com/etcd-io/etcd/pull/13206).
+- Fix [the race condition between goroutine and channel on the same leases to be revoked](https://github.com/etcd-io/etcd/pull/14150).
+- Fix [lessor may continue to schedule checkpoint after stepping down leader role](https://github.com/etcd-io/etcd/pull/14150).
+
+### Package `clientv3`
+- Fix [a bug of not refreshing expired tokens](https://github.com/etcd-io/etcd/pull/13999).
+
+### Dependency
+- Upgrade [go.etcd.io/bbolt](https://github.com/etcd-io/bbolt/releases) from [v1.3.3](https://github.com/etcd-io/bbolt/releases/tag/v1.3.3) to [v1.3.6](https://github.com/etcd-io/bbolt/releases/tag/v1.3.6).
+
+### Security
+- Upgrade [golang.org/x/crypto](https://github.com/etcd-io/etcd/pull/14179) to v0.0.0-20220411220226-7b82a4e95df4 to address [CVE-2022-27191 ](https://github.com/advisories/GHSA-8c26-wmh5-6g9v).
+- Upgrade [gopkg.in/yaml.v2](https://github.com/etcd-io/etcd/pull/14192) to v2.4.0 to address [CVE-2019-11254](https://github.com/advisories/GHSA-wxc4-f4m6-wwqv).
+
+### Go
+- Require [Go 1.16+](https://github.com/etcd-io/etcd/pull/14136).
+- Compile with [Go 1.16+](https://go.dev/doc/devel/release#go1.16).
+- etcd uses [go modules](https://github.com/etcd-io/etcd/pull/14136) (instead of vendor dir) to track dependencies.
+
+
+
+## v3.4.18 (2021-10-15)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.17...v3.4.18) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
+
+### Metrics, Monitoring
+
+See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
+
+- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13397).
+
+### Other
+
+- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs:
+ - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption
+ - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc
+ - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp
+ - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads.
+
+
+
+## v3.4.17 (2021-10-03)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.16...v3.4.17) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
+
+### `etcdctl`
+
+- Fix [etcdctl check datascale command](https://github.com/etcd-io/etcd/pull/11896) to work with https endpoints.
+
+### gRPC gateway
+
+- Add [`MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/13077) support for http client.
+
+### Dependency
+
+- Replace [`github.com/dgrijalva/jwt-go with github.com/golang-jwt/jwt'](https://github.com/etcd-io/etcd/pull/13378).
+
+### Go
+
+- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
@@ -141,6 +491,7 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.10...v3.4.11) an
### Metrics, Monitoring
- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
+- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13397).
### Go
@@ -492,7 +843,7 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0...v3.4.0) and
- For every compaction period or 1-hour, compactor uses the last revision that was fetched before compaction period, to discard historical data.
- The retention window of compaction period moves for every given compaction period or hour.
- For instance, when hourly writes are 100 and `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`, `v3.2.x`, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 2400, 2640, and 2880 for every 2.4-hour, while `v3.3.3` *or later* compacts revision 2400, 2500, 2600 for every 1-hour.
- - Futhermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute.
+ - Furthermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute.
- Improve [lease expire/revoke operation performance](https://github.com/etcd-io/etcd/pull/9418), address [lease scalability issue](https://github.com/etcd-io/etcd/issues/9496).
- Make [Lease `Lookup` non-blocking with concurrent `Grant`/`Revoke`](https://github.com/etcd-io/etcd/pull/9229).
- Make etcd server return `raft.ErrProposalDropped` on internal Raft proposal drop in [v3 applier](https://github.com/etcd-io/etcd/pull/9549) and [v2 applier](https://github.com/etcd-io/etcd/pull/9558).
@@ -838,7 +1189,7 @@ See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more deta
- If watch response events exceed this server-side request limit and watch request is created with `fragment` field `true`, the server will split watch events into a set of chunks, each of which is a subset of watch events below server-side request limit.
- Useful when client-side has limited bandwidths.
- For example, watch response contains 10 events, where each event is 1 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB. Then, server will send 10 separate fragmented events to the client.
- - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`.
+ - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-recv-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`.
- Client must implement fragmented watch event merge (which `clientv3` does in etcd v3.4).
- Add [`raftAppliedIndex` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9176) for current Raft applied index.
- Add [`errors` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9206) for server-side error.
@@ -972,7 +1323,7 @@ Note: **v3.5 will deprecate `etcd --log-package-levels` flag for `capnslog`**; `
- Now [`(r *raft) Step` returns `raft.ErrProposalDropped`](https://github.com/etcd-io/etcd/pull/9137) if a proposal has been ignored.
- e.g. a node is removed from cluster, or [`raftpb.MsgProp` arrives at current leader while there is an ongoing leadership transfer](https://github.com/etcd-io/etcd/issues/8975).
- Improve [Raft `becomeLeader` and `stepLeader`](https://github.com/etcd-io/etcd/pull/9073) by keeping track of latest `pb.EntryConfChange` index.
- - Previously record `pendingConf` boolean field scanning the entire tail of the log, which can delay hearbeat send.
+ - Previously record `pendingConf` boolean field scanning the entire tail of the log, which can delay heartbeat send.
- Fix [missing learner nodes on `(n *node) ApplyConfChange`](https://github.com/etcd-io/etcd/pull/9116).
- Add [`raft.Config.MaxUncommittedEntriesSize`](https://github.com/etcd-io/etcd/pull/10167) to limit the total size of the uncommitted entries in bytes.
- Once exceeded, raft returns `raft.ErrProposalDropped` error.
diff --git a/CHANGELOG/CHANGELOG-3.5.md b/CHANGELOG/CHANGELOG-3.5.md
new file mode 100644
index 00000000000..4512e588002
--- /dev/null
+++ b/CHANGELOG/CHANGELOG-3.5.md
@@ -0,0 +1,666 @@
+
+
+Previous change logs can be found at [CHANGELOG-3.4](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.4.md).
+
+
+## v3.5.18 (TBC)
+
+### etcd server
+- [Print warning messages if any of the deprecated v2store related flags is set](https://github.com/etcd-io/etcd/pull/18999)
+
+### Package `clientv3`
+- Fix [runtime panic that occurs when KeepAlive is called with a Context implemented by an uncomparable type](https://github.com/etcd-io/etcd/pull/18937)
+
+
+### etcd grpc-proxy
+- Add [`tls min/max version to grpc proxy`](https://github.com/etcd-io/etcd/pull/18829) to support setting TLS min and max version.
+
+### Dependencies
+- Bump [golang-jwt/jwt to 4.5.1 to address GO-2024-3250](https://github.com/etcd-io/etcd/pull/18899).
+- Compile binaries using [go 1.22.10](https://github.com/etcd-io/etcd/pull/19004).
+
+
+
+## v3.5.17 (2024-11-12)
+
+### etcd server
+- Fix [watchserver related goroutine leakage](https://github.com/etcd-io/etcd/pull/18784)
+- Fix [risk of a partial write txn being applied](https://github.com/etcd-io/etcd/pull/18799)
+- Fix [panicking occurred due to improper error handling during defragmentation](https://github.com/etcd-io/etcd/pull/18842)
+- Fix [close temp file(s) in case an error happens during defragmentation](https://github.com/etcd-io/etcd/pull/18854)
+
+### Dependencies
+- Compile binaries using [go 1.22.9](https://github.com/etcd-io/etcd/pull/18849).
+
+
+
+## v3.5.16 (2024-09-10)
+
+### etcd server
+- Fix [performance regression issue caused by the `ensureLeadership` in lease renew](https://github.com/etcd-io/etcd/pull/18439).
+- [Keep the tombstone during compaction if it happens to be the compaction revision](https://github.com/etcd-io/etcd/pull/18474)
+- Add [`etcd --experimental-compaction-sleep-interval`](https://github.com/etcd-io/etcd/pull/18514) flag to control the sleep interval between each compaction batch.
+
+### Dependencies
+- Compile binaries using [go 1.22.7](https://github.com/etcd-io/etcd/pull/18550).
+- Upgrade [bbolt to v1.3.11](https://github.com/etcd-io/etcd/pull/18489).
+
+
+
+## v3.5.15 (2024-07-19)
+
+### etcd server
+- Fix [add prometheus metric registration for metric `etcd_disk_wal_write_duration_seconds`](https://github.com/etcd-io/etcd/pull/18174).
+- Add [Support multiple values for allowed client and peer TLS identities](https://github.com/etcd-io/etcd/pull/18160)
+- Fix [noisy logs from simple auth token expiration by reducing log level to debug](https://github.com/etcd-io/etcd/pull/18245)
+- [Differentiate the warning message for rejected client and peer connections](https://github.com/etcd-io/etcd/pull/18319)
+
+### Package clientv3
+- [Print gRPC metadata in guaranteed order using the official go fmt pkg](https://github.com/etcd-io/etcd/pull/18312).
+
+### Dependencies
+- Compile binaries using [go 1.21.12](https://github.com/etcd-io/etcd/pull/18271).
+- [Fully address CVE-2023-45288 and fix govulncheck CI check](https://github.com/etcd-io/etcd/pull/18170)
+
+## v3.5.14 (2024-05-29)
+
+### etcd server
+- Fix [LeaseTimeToLive returns error if leader changed](https://github.com/etcd-io/etcd/pull/17704).
+- Add [metrics `etcd_disk_wal_write_duration_seconds`](https://github.com/etcd-io/etcd/pull/17616).
+- Fix [ignore raft messages if member id mismatch](https://github.com/etcd-io/etcd/pull/17813).
+- Update [the compaction log when bootstrap](https://github.com/etcd-io/etcd/pull/17830).
+- Fix [Revision decreasing after panic during compaction](https://github.com/etcd-io/etcd/pull/17865)
+- Add [`etcd --experimental-stop-grpc-service-on-defrag`](https://github.com/etcd-io/etcd/pull/17914) to enable client failover on defrag.
+- Add [support for `AllowedCN` and `AllowedHostname` through config file](https://github.com/etcd-io/etcd/pull/18063)
+
+### etcdutl v3
+- Add [`--initial-memory-map-size` to `snapshot restore` to avoid memory allocation issues](https://github.com/etcd-io/etcd/pull/17977)
+
+### Package `clientv3`
+- Add [requests retry when receiving ErrGPRCNotSupportedForLearner and endpoints > 1](https://github.com/etcd-io/etcd/pull/17641).
+- Fix [initialization for mu in client context](https://github.com/etcd-io/etcd/pull/17699).
+
+### Dependencies
+- Compile binaries using [go 1.21.10](https://github.com/etcd-io/etcd/pull/17980).
+- Upgrade [bbolt to v1.3.10](https://github.com/etcd-io/etcd/pull/17943).
+
+
+
+## v3.5.13 (2024-03-29)
+
+### etcd server
+- Fix leases wrongly revoked by the leader by [ignoring old leader's leases revoking request](https://github.com/etcd-io/etcd/pull/17425).
+- Fix [no progress notification being sent for watch that doesn't get any events](https://github.com/etcd-io/etcd/pull/17566).
+- Fix [watch event loss after compaction](https://github.com/etcd-io/etcd/pull/17612).
+
+### Package `clientv3`
+- Add [client backoff and retry config options](https://github.com/etcd-io/etcd/pull/17363).
+- [Ignore SetKeepAlivePeriod errors on OpenBSD](https://github.com/etcd-io/etcd/pull/17387).
+- [Support unix/unixs socket in client or peer URLs](https://github.com/etcd-io/etcd/pull/15940)
+
+### gRPC Proxy
+- Add [three flags (see below) for grpc-proxy](https://github.com/etcd-io/etcd/pull/17447)
+ - `--dial-keepalive-time`
+ - `--dial-keepalive-timeout`
+ - `--permit-without-stream`
+
+### Dependencies
+- Upgrade [bbolt to v1.3.9](https://github.com/etcd-io/etcd/pull/17483).
+- Compile binaries using [go 1.21.8](https://github.com/etcd-io/etcd/pull/17537).
+- Upgrade [google.golang.org/protobuf to v1.33.0 to address CVE-2024-24786](https://github.com/etcd-io/etcd/pull/17553).
+- Upgrade github.com/sirupsen/logrus to v1.9.3 to address [PRISMA-2023-0056](https://github.com/etcd-io/etcd/pull/17482).
+
+### Others
+- [Make CGO_ENABLED configurable](https://github.com/etcd-io/etcd/pull/17421).
+
+
+
+## v3.5.12 (2024-01-31)
+
+### etcd server
+- Fix [not validating database consistent index, and panicking on nil backend](https://github.com/etcd-io/etcd/pull/17151)
+- Document [`experimental-enable-lease-checkpoint-persist` flag in etcd help](https://github.com/etcd-io/etcd/pull/17190)
+- Fix [needlessly flocking snapshot files when deleting](https://github.com/etcd-io/etcd/pull/17206)
+- Add [digest for etcd base image](https://github.com/etcd-io/etcd/pull/17205)
+- Fix [delete inconsistencies in read buffer](https://github.com/etcd-io/etcd/pull/17230)
+- Add [mvcc: print backend database size and size in use in compaction logs](https://github.com/etcd-io/etcd/pull/17291)
+
+### Dependencies
+- Compile binaries using [go 1.20.13](https://github.com/etcd-io/etcd/pull/17275)
+- Upgrade [golang.org/x/crypto to v0.17+ to address CVE-2023-48795](https://github.com/etcd-io/etcd/pull/17346)
+
+## v3.5.11 (2023-12-07)
+
+### etcd server
+- Fix distributed tracing by ensuring `--experimental-distributed-tracing-sampling-rate` configuration option is available to [set tracing sample rate](https://github.com/etcd-io/etcd/pull/16951).
+- Fix [url redirects while checking peer urls during new member addition](https://github.com/etcd-io/etcd/pull/16986)
+- Add [livez/readyz HTTP endpoints](https://github.com/etcd-io/etcd/pull/17039)
+
+### Dependencies
+- Compile binaries using [go 1.20.12](https://github.com/etcd-io/etcd/pull/17077)
+- Fix [CVE-2023-47108](https://github.com/advisories/GHSA-8pgv-569h-w5rw) by [bumping go.opentelemetry.io/otel to 1.20.0 and go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc to 0.46.0](https://github.com/etcd-io/etcd/pull/16946).
+
+
+
+## v3.5.10 (2023-10-27)
+
+### etcd server
+- Fix [`--socket-reuse-port` and `--socket-reuse-address` not able to be set in configuration file](https://github.com/etcd-io/etcd/pull/16435).
+- Fix [corruption check may get a `ErrCompacted` error when server has just been compacted](https://github.com/etcd-io/etcd/pull/16048)
+- Improve [Lease put performance for the case that auth is disabled or the user is admin](https://github.com/etcd-io/etcd/pull/16019)
+- Improve [Skip getting authInfo from incoming context when auth is disabled](https://github.com/etcd-io/etcd/pull/16241)
+- Fix [Hash and HashKV have duplicated RESTful API](https://github.com/etcd-io/etcd/pull/16490)
+
+### etcdutl v3
+- Add [optional --bump-revision and --mark-compacted flag to etcdutl snapshot restore operation](https://github.com/etcd-io/etcd/pull/16165).
+
+### etcdctl v3
+- Add [optional --bump-revision and --mark-compacted flag to etcdctl snapshot restore operation](https://github.com/etcd-io/etcd/pull/16165).
+
+### etcd grpc-proxy
+- Fix [Memberlist results not updated when proxy node down](https://github.com/etcd-io/etcd/pull/15907).
+
+### Package `clientv3`
+- Fix [Multiple endpoints with same prefix got mixed up](https://github.com/etcd-io/etcd/pull/15939)
+- Fix [Unexpected blocking when barrier waits on a nonexistent key](https://github.com/etcd-io/etcd/pull/16188)
+- Fix [Reset auth token when failing to authenticate due to auth being disabled](https://github.com/etcd-io/etcd/pull/16241)
+- Fix [panic in etcd validate secure endpoints](https://github.com/etcd-io/etcd/pull/16565)
+
+### Dependencies
+- Compile binaries using [go 1.20.10](https://github.com/etcd-io/etcd/pull/16745).
+- Upgrade gRPC to 1.58.3 in https://github.com/etcd-io/etcd/pull/16625, https://github.com/etcd-io/etcd/pull/16781 and https://github.com/etcd-io/etcd/pull/16790. Note that gRPC server will reject requests with connection header (refer to https://github.com/grpc/grpc-go/pull/4803).
+- Upgrade [bbolt to v1.3.8](https://github.com/etcd-io/etcd/pull/16833)
+
+
+
+## v3.5.9 (2023-05-11)
+
+### etcd server
+- Fix [LeaseTimeToLive API may return keys to clients which have no read permission on the keys](https://github.com/etcd-io/etcd/pull/15815).
+
+### Dependencies
+- Compile binaries using [go 1.19.9](https://github.com/etcd-io/etcd/pull/15822).
+
+
+
+## v3.5.8 (2023-04-13)
+
+### etcd server
+- Add [`etcd --tls-min-version --tls-max-version`](https://github.com/etcd-io/etcd/pull/15483) to enable support for TLS 1.3.
+- Add [`etcd --listen-client-http-urls`](https://github.com/etcd-io/etcd/pull/15589) flag to support separating http server from grpc one, thus giving full immunity to [watch stream starvation under high read load](https://github.com/etcd-io/etcd/issues/15402).
+- Change [http2 frame scheduler to random algorithm](https://github.com/etcd-io/etcd/pull/15452)
+- Fix [Watch response traveling back in time when reconnecting member downloads snapshot from the leader](https://github.com/etcd-io/etcd/pull/15515)
+- Fix [race when starting both secure & insecure gRPC servers on the same address](https://github.com/etcd-io/etcd/pull/15517)
+- Fix [server/auth: disallow creating empty permission ranges](https://github.com/etcd-io/etcd/pull/15619)
+- Fix [aligning zap log timestamp resolution to microseconds](https://github.com/etcd-io/etcd/pull/15240). Etcd now uses zap timestamp format: `2006-01-02T15:04:05.999999Z0700` (microsecond instead of milliseconds precision).
+- Fix [wsproxy did not print log in JSON format](https://github.com/etcd-io/etcd/pull/15661).
+- Fix [CVE-2021-28235](https://nvd.nist.gov/vuln/detail/CVE-2021-28235) by [clearing password after authenticating the user](https://github.com/etcd-io/etcd/pull/15653).
+- Fix [etcdserver may panic when parsing a JWT token without username or revision](https://github.com/etcd-io/etcd/pull/15676).
+- Fix [Requested watcher progress notifications are not synchronised with stream](https://github.com/etcd-io/etcd/pull/15695).
+
+### Package `netutil`
+- Fix [consistently format IPv6 addresses for comparison](https://github.com/etcd-io/etcd/pull/15187).
+
+### Package `clientv3`
+- Fix [etcd might send duplicated events to watch clients](https://github.com/etcd-io/etcd/pull/15274).
+
+### Dependencies
+- Recommend [Go 1.19+](https://github.com/etcd-io/etcd/pull/15337).
+- Compile binaries using [go to 1.19.8](https://github.com/etcd-io/etcd/pull/15651)
+- Upgrade [golang.org/x/net to v0.7.0](https://github.com/etcd-io/etcd/pull/15337)
+- Upgrade [bbolt to v1.3.7](https://github.com/etcd-io/etcd/pull/15222).
+
+### Docker image
+- [Remove nsswitch.conf from docker image](https://github.com/etcd-io/etcd/pull/15161)
+- Fix [etcd docker images all tagged with amd64 architecture](https://github.com/etcd-io/etcd/pull/15612)
+
+
+
+## v3.5.7 (2023-01-20)
+
+### etcd server
+- Fix [Remove memberID from data corrupt alarm](https://github.com/etcd-io/etcd/pull/14852).
+- Fix [Allow non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/14884).
+- Fix [nil pointer panic for readonly txn due to nil response](https://github.com/etcd-io/etcd/pull/14899).
+- Fix [The last record which was partially synced to disk isn't automatically repaired](https://github.com/etcd-io/etcd/pull/15069).
+- Fix [etcdserver might promote a non-started learner](https://github.com/etcd-io/etcd/pull/15096).
+
+### Package `clientv3`
+- Reverted the fix to [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14995).
+
+### Dependencies
+- Recommend [Go 1.17+](https://github.com/etcd-io/etcd/pull/15019).
+- Compile binaries using [Go 1.17.13](https://github.com/etcd-io/etcd/pull/15019)
+- Bumped [some dependencies](https://github.com/etcd-io/etcd/pull/15018) to address some HIGH Vulnerabilities.
+
+### Docker image
+- Use [distroless base image](https://github.com/etcd-io/etcd/pull/15016) to address critical Vulnerabilities.
+- Updated [base image from base-debian11 to static-debian11 and removed dependency on busybox](https://github.com/etcd-io/etcd/pull/15037).
+
+
+
+## v3.5.6 (2022-11-21)
+
+### etcd server
+- Fix [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14547)
+- Fix [avoid closing a watch with ID 0 incorrectly](https://github.com/etcd-io/etcd/pull/14563)
+- Fix [auth: fix data consistency issue caused by recovery from snapshot](https://github.com/etcd-io/etcd/pull/14648)
+- Fix [revision might be inconsistency between members when etcd crashes during processing defragmentation operation](https://github.com/etcd-io/etcd/pull/14733)
+- Fix [timestamp in inconsistent format](https://github.com/etcd-io/etcd/pull/14799)
+- Fix [Failed resolving host due to lost DNS record](https://github.com/etcd-io/etcd/pull/14573)
+
+### Package `clientv3`
+- Fix [Add backoff before retry when watch stream returns unavailable](https://github.com/etcd-io/etcd/pull/14582).
+- Fix [stack overflow error in double barrier](https://github.com/etcd-io/etcd/pull/14658)
+- Fix [Refreshing token on CommonName based authentication causes segmentation violation in client](https://github.com/etcd-io/etcd/pull/14790).
+
+### etcd grpc-proxy
+- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14500) flag to support adding configurable cipher list.
+
+
+
+## v3.5.5 (2022-09-15)
+
+### Deprecations
+- Deprecated [SetKeepAlive and SetKeepAlivePeriod in limitListenerConn](https://github.com/etcd-io/etcd/pull/14366).
+
+### Package `clientv3`
+- Fix [do not overwrite authTokenBundle on dial](https://github.com/etcd-io/etcd/pull/14132).
+- Fix [IsOptsWithPrefix returns false even if WithPrefix() is included](https://github.com/etcd-io/etcd/pull/14187).
+
+### etcd server
+- [Build official darwin/arm64 artifacts](https://github.com/etcd-io/etcd/pull/14436).
+- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14219) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32.
+- Add [`etcd --experimental-compact-hash-check-enabled --experimental-compact-hash-check-time`](https://github.com/etcd-io/etcd/issues/14039) flags to support enabling reliable corruption detection on compacted revisions.
+- Fix [unexpected error during txn](https://github.com/etcd-io/etcd/issues/14110).
+- Fix [lease leak issue due to tokenProvider isn't enabled when restoring auth store from a snapshot](https://github.com/etcd-io/etcd/pull/13205).
+- Fix [the race condition between goroutine and channel on the same leases to be revoked](https://github.com/etcd-io/etcd/pull/14087).
+- Fix [lessor may continue to schedule checkpoint after stepping down leader role](https://github.com/etcd-io/etcd/pull/14087).
+- Fix [Restrict the max size of each WAL entry to the remaining size of the WAL file](https://github.com/etcd-io/etcd/pull/14127).
+- Fix [Protect rangePermCache with a RW lock correctly](https://github.com/etcd-io/etcd/pull/14227)
+- Fix [memberID equals zero in corruption alarm](https://github.com/etcd-io/etcd/pull/14272)
+- Fix [Durability API guarantee broken in single node cluster](https://github.com/etcd-io/etcd/pull/14424)
+- Fix [etcd fails to start after performing alarm list operation and then power off/on](https://github.com/etcd-io/etcd/pull/14429)
+- Fix [authentication data not loaded on member startup](https://github.com/etcd-io/etcd/pull/14409)
+
+### etcdctl v3
+
+- Fix [etcdctl move-leader may fail for multiple endpoints](https://github.com/etcd-io/etcd/pull/14434)
+
+
+### Other
+- [Bump golang.org/x/crypto to latest version](https://github.com/etcd-io/etcd/pull/13996) to address [CVE-2022-27191](https://github.com/advisories/GHSA-8c26-wmh5-6g9v).
+- [Bump OpenTelemetry to 1.0.1 and gRPC to 1.41.0](https://github.com/etcd-io/etcd/pull/14312).
+
+
+
+## v3.5.4 (2022-04-24)
+
+### etcd server
+- Fix [etcd panic on startup (auth enabled)](https://github.com/etcd-io/etcd/pull/13946)
+
+### package `client/pkg/v3`
+
+- [Revert the change of trimming the trailing dot from SRV.Target](https://github.com/etcd-io/etcd/pull/13950) returned by DNS lookup
+
+
+
+
+## v3.5.3 (2022-04-13)
+
+### etcd server
+- Fix [Provide a better liveness probe for when etcd runs as a Kubernetes pod](https://github.com/etcd-io/etcd/pull/13706)
+- Fix [inconsistent log format](https://github.com/etcd-io/etcd/pull/13864)
+- Fix [Inconsistent revision and data occurs](https://github.com/etcd-io/etcd/pull/13908)
+- Fix [Etcdserver is still in progress of processing LeaseGrantRequest when it receives a LeaseKeepAliveRequest on the same leaseID](https://github.com/etcd-io/etcd/pull/13932)
+- Fix [consistent_index coming from snapshot is overwritten by the old local value](https://github.com/etcd-io/etcd/pull/13933)
+- [Update container base image snapshot](https://github.com/etcd-io/etcd/pull/13862)
+- Fix [Defrag unsets backend options](https://github.com/etcd-io/etcd/pull/13701).
+
+### package `client/pkg/v3`
+
+- [Trim the suffix dot from the target](https://github.com/etcd-io/etcd/pull/13714) in SRV records returned by DNS lookup
+
+### etcdctl v3
+
+- [Always print the raft_term in decimal](https://github.com/etcd-io/etcd/pull/13727) when displaying member list in json.
+
+
+
+## [v3.5.2](https://github.com/etcd-io/etcd/releases/tag/v3.5.2) (2022-02-01)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.1...v3.5.2) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes.
+
+### etcd server
+- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13476).
+- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to enable checkpoint persisting.
+- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/13508), requires enabling checkpoint persisting.
+- Fix [assertion failed due to tx closed when recovering v3 backend from a snapshot db](https://github.com/etcd-io/etcd/pull/13501)
+- Fix [segmentation violation(SIGSEGV) error due to premature unlocking of watchableStore](https://github.com/etcd-io/etcd/pull/13541)
+
+
+
+## [v3.5.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.1) (2021-10-15)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.5.1) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes.
+
+### etcd server
+
+- Fix [self-signed-cert-validity parameter cannot be specified in the config file](https://github.com/etcd-io/etcd/pull/13237).
+- Fix [ensure that cluster members stored in v2store and backend are in sync](https://github.com/etcd-io/etcd/pull/13348)
+
+### etcd client
+
+- [Fix etcd client sends invalid :authority header](https://github.com/etcd-io/etcd/issues/13192)
+
+### package clientv3
+
+- Endpoints self identify now as `etcd-endpoints://{id}/{authority}` where authority is based on first endpoint passed, for example `etcd-endpoints://0xc0009d8540/localhost:2079`
+
+### Other
+
+- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs:
+ - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption
+ - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc
+ - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp
+ - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads.
+
+
+
+## v3.5.0 (2021-06)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes.
+
+- [v3.5.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0) (2021 TBD), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.1...v3.5.0).
+- [v3.5.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.1) (2021-06-10), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.0...v3.5.0-rc.1).
+- [v3.5.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.0) (2021-06-04), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.4...v3.5.0-rc.0).
+- [v3.5.0-beta.4](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.4) (2021-05-26), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.3...v3.5.0-beta.4).
+- [v3.5.0-beta.3](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.3) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.2...v3.5.0-beta.3).
+- [v3.5.0-beta.2](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.2) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.1...v3.5.0-beta.2).
+- [v3.5.0-beta.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.1) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0-beta.1).
+
+**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/).**
+
+### Breaking Changes
+
+- `go.etcd.io/etcd` Go packages have moved to `go.etcd.io/etcd/{api,pkg,raft,client,etcdctl,server,raft,tests}/v3` to follow the [Go modules](https://github.com/golang/go/wiki/Modules) conventions
+- `go.etcd.io/clientv3/snapshot` SnapshotManager class have moved to `go.etcd.io/clientv3/etcdctl`.
+ The method `snapshot.Save` to download a snapshot from the remote server was preserved in 'go.etcd.io/clientv3/snapshot`.
+- `go.etcd.io/client' package got migrated to 'go.etcd.io/client/v2'.
+- Changed behavior of clientv3 API [MemberList](https://github.com/etcd-io/etcd/pull/11639).
+ - Previously, it is directly served with server's local data, which could be stale.
+ - Now, it is served with linearizable guarantee. If the server is disconnected from quorum, `MemberList` call will fail.
+- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint.
+ - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298).
+ - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` doesn't work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
+- **`etcd --experimental-enable-v2v3` flag remains experimental and to be deprecated.**
+ - v2 storage emulation feature will be deprecated in the next release.
+ - etcd 3.5 is the last version that supports V2 API. Flags `--enable-v2` and `--experimental-enable-v2v3` [are now deprecated](https://github.com/etcd-io/etcd/pull/12940) and will be removed in etcd v3.6 release.
+- **`etcd --experimental-backend-bbolt-freelist-type` flag has been deprecated.** Use **`etcd --backend-bbolt-freelist-type`** instead. The default type is hashmap and it is stable now.
+- **`etcd --debug` flag has been deprecated.** Use **`etcd --log-level=debug`** instead.
+- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947).
+- **`etcd --log-output` flag has been deprecated.** Use **`etcd --log-outputs`** instead.
+- **`etcd --logger=zap --log-outputs=stderr`** is now the default.
+- **`etcd --logger=capnslog` flag value has been deprecated.**
+- **`etcd --logger=zap --log-outputs=default` flag value is not supported.**.
+ - Use `etcd --logger=zap --log-outputs=stderr`.
+ - Or, use `etcd --logger=zap --log-outputs=systemd/journal` to send logs to the local systemd journal.
+ - Previously, if etcd parent process ID (PPID) is 1 (e.g. run with systemd), `etcd --logger=capnslog --log-outputs=default` redirects server logs to local systemd journal. And if write to journald fails, it writes to `os.Stderr` as a fallback.
+ - However, even with PPID 1, it can fail to dial systemd journal (e.g. run embedded etcd with Docker container). Then, [every single log write will fail](https://github.com/etcd-io/etcd/pull/9729) and fall back to `os.Stderr`, which is inefficient.
+ - To avoid this problem, systemd journal logging must be configured manually.
+- **`etcd --log-outputs=stderr`** is now the default.
+- **`etcd --log-package-levels` flag for `capnslog` has been deprecated.** Now, **`etcd --logger=zap --log-outputs=stderr`** is the default.
+- **`[CLIENT-URL]/config/local/log` endpoint has been deprecated, as is `etcd --log-package-levels` flag.**
+ - `curl http://127.0.0.1:2379/config/local/log -XPUT -d '{"Level":"DEBUG"}'` won't work.
+ - Please use `etcd --logger=zap --log-outputs=stderr` instead.
+- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead.
+- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead.
+- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead.
+- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead.
+- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead.
+- Main branch `/version` outputs `3.5.0-pre`, instead of `3.4.0+git`.
+- Changed `proxy` package function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11614).
+ - Previously, `NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`, now `NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`.
+ - Previously, `Register(c *clientv3.Client, prefix string, addr string, ttl int)`, now `Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{}`.
+ - Previously, `NewHandler(t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`, now `NewHandler(lg *zap.Logger, t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`.
+- Changed `pkg/flags` function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11616).
+ - Previously, `SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error`, now `SetFlagsFromEnv(lg *zap.Logger, prefix string, fs *flag.FlagSet) error`.
+ - Previously, `SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error`, now `SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error`.
+- ClientV3 supports [grpc resolver API](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go).
+ - Endpoints can be managed using [endpoints.Manager](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/endpoints/endpoints.go)
+ - Previously supported [GRPCResolver was decomissioned](https://github.com/etcd-io/etcd/pull/12675). Use [resolver](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go) instead.
+- Turned on [--pre-vote by default](https://github.com/etcd-io/etcd/pull/12770). Should prevent disrupting RAFT leader by an individual member.
+- [ETCD_CLIENT_DEBUG env](https://github.com/etcd-io/etcd/pull/12786): Now supports log levels (debug, info, warn, error, dpanic, panic, fatal). Only when set, overrides application-wide grpc logging settings.
+- [Embed Etcd.Close()](https://github.com/etcd-io/etcd/pull/12828) needs to called exactly once and closes Etcd.Err() stream.
+- [Embed Etcd does not override global/grpc logger](https://github.com/etcd-io/etcd/pull/12861) be default any longer. If desired, please call `embed.Config::SetupGlobalLoggers()` explicitly.
+- [Embed Etcd custom logger should be configured using simpler builder `NewZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/12973).
+- Client errors of `context cancelled` or `context deadline exceeded` are exposed as `codes.Canceled` and `codes.DeadlineExceeded`, instead of `codes.Unknown`.
+
+
+### Storage format changes
+- [WAL log's snapshots persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12735)
+- [Backend persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12962) in the `meta` bucket `confState` key.
+- [Backend persists applied term](https://github.com/etcd-io/etcd/pull/) in the `meta` bucket.
+- Backend persists `downgrade` in the `cluster` bucket
+
+### Security
+
+- Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864).
+- Changed [the format of WAL entries related to auth for not keeping password as a plain text](https://github.com/etcd-io/etcd/pull/11943).
+- Add third party [Security Audit Report](https://github.com/etcd-io/etcd/pull/12201).
+- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd uses any existing directory that has a permission different than 700 on Linux and 777 on Windows.
+- Add optional [`ClientCertFile` and `ClientKeyFile`](https://github.com/etcd-io/etcd/pull/12705) options for peer and client tls configuration when split certificates are used.
+
+### Metrics, Monitoring
+
+See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
+
+Note that any `etcd_debugging_*` metrics are experimental and subject to change.
+
+- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead.
+- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead.
+- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead.
+- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead.
+- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead.
+- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
+- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
+- Change [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11254) Prometheus metrics to include only major and minor version.
+- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric.
+- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687).
+- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
+- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3).
+- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
+- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13395).
+
+### etcd server
+
+ - Add [don't attempt to grant nil permission to a role](https://github.com/etcd-io/etcd/pull/13086).
+ - Add [don't activate alarms w/missing AlarmType](https://github.com/etcd-io/etcd/pull/13084).
+ - Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864).
+ - Automatically [create parent directory if it does not exist](https://github.com/etcd-io/etcd/pull/9626) (fix [issue#9609](https://github.com/etcd-io/etcd/issues/9609)).
+ - v4.0 will configure `etcd --enable-v2=true --enable-v2v3=/aaa` to enable v2 API server that is backed by **v3 storage**.
+- [`etcd --backend-bbolt-freelist-type`] flag is now stable.
+ - `etcd --experimental-backend-bbolt-freelist-type` has been deprecated.
+- Support [downgrade API](https://github.com/etcd-io/etcd/pull/11715).
+- Deprecate v2 apply on cluster version. [Use v3 request to set cluster version and recover cluster version from v3 backend](https://github.com/etcd-io/etcd/pull/11427).
+- [Use v2 api to update cluster version to support mixed version cluster during upgrade](https://github.com/etcd-io/etcd/pull/12988).
+- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613).
+- Fix [quorum protection logic when promoting a learner](https://github.com/etcd-io/etcd/pull/11640).
+- Improve [peer corruption checker](https://github.com/etcd-io/etcd/pull/11621) to work when peer mTLS is enabled.
+- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704).
+- Log [successful etcd server-side health check in debug level](https://github.com/etcd-io/etcd/pull/12677).
+- Improve [compaction performance when latest index is greater than 1-million](https://github.com/etcd-io/etcd/pull/11734).
+- [Refactor consistentindex](https://github.com/etcd-io/etcd/pull/11699).
+- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670).
+- Improve [count-only range performance](https://github.com/etcd-io/etcd/pull/11771).
+- Remove [redundant storage restore operation to shorten the startup time](https://github.com/etcd-io/etcd/pull/11779).
+ - With 40 million key test data,it can shorten the startup time from 5 min to 2.5 min.
+- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817).
+- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888).
+ - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot.
+ - See https://github.com/etcd-io/etcd/issues/10219 for more.
+ - Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924).
+ - See https://github.com/etcd-io/etcd/issues/11918.
+- Improve logging around snapshot send and receive.
+- [Push down RangeOptions.limit argv into index tree to reduce memory overhead](https://github.com/etcd-io/etcd/pull/11990).
+- Add [reason field for /health response](https://github.com/etcd-io/etcd/pull/11983).
+- Add [exclude alarms from health check conditionally](https://github.com/etcd-io/etcd/pull/12880).
+- Add [`etcd --unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/11946) flag.
+ - Setting the flag disables all uses of fsync, which is unsafe and will cause data loss. This flag makes it possible to run an etcd node for testing and development without placing lots of load on the file system.
+- Add [`etcd --auth-token-ttl`](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings.
+- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986).
+- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987).
+- Log [expensive request info in UnaryInterceptor](https://github.com/etcd-io/etcd/pull/12086).
+- [Fix invalid Go type in etcdserverpb](https://github.com/etcd-io/etcd/pull/12000).
+- [Improve healthcheck by using v3 range request and its corresponding timeout](https://github.com/etcd-io/etcd/pull/12195).
+- Add [`etcd --experimental-watch-progress-notify-interval`](https://github.com/etcd-io/etcd/pull/12216) flag to make watch progress notify interval configurable.
+- Fix [server panic in slow writes warnings](https://github.com/etcd-io/etcd/issues/12197).
+ - Fixed via [PR#12238](https://github.com/etcd-io/etcd/pull/12238).
+- [Fix server panic](https://github.com/etcd-io/etcd/pull/12288) when force-new-cluster flag is enabled in a cluster which had learner node.
+- Add [`etcd --self-signed-cert-validity`](https://github.com/etcd-io/etcd/pull/12429) flag to support setting certificate expiration time.
+ - Notice, certificates generated by etcd are valid for 1 year by default when specifying the auto-tls or peer-auto-tls option.
+- Add [`etcd --experimental-warning-apply-duration`](https://github.com/etcd-io/etcd/pull/12448) flag which allows apply duration threshold to be configurable.
+- Add [`etcd --experimental-memory-mlock`](https://github.com/etcd-io/etcd/pull/TODO) flag which prevents etcd memory pages to be swapped out.
+- Add [`etcd --socket-reuse-port`](https://github.com/etcd-io/etcd/pull/12702) flag
+ - Setting this flag enables `SO_REUSEPORT` which allows rebind of a port already in use. User should take caution when using this flag to ensure flock is properly enforced.
+- Add [`etcd --socket-reuse-address`](https://github.com/etcd-io/etcd/pull/12702) flag
+ - Setting this flag enables `SO_REUSEADDR` which allows binding to an address in `TIME_WAIT` state, improving etcd restart time.
+- Reduce [around 30% memory allocation by logging range response size without marshal](https://github.com/etcd-io/etcd/pull/12871).
+- `ETCD_VERIFY="all"` environment triggers [additional verification of consistency](https://github.com/etcd-io/etcd/pull/12901) of etcd data-dir files.
+- Add [`etcd --enable-log-rotation`](https://github.com/etcd-io/etcd/pull/12774) boolean flag which enables log rotation if true.
+- Add [`etcd --log-rotation-config-json`](https://github.com/etcd-io/etcd/pull/12774) flag which allows passthrough of JSON config to configure log rotation for a file output target.
+- Add experimental distributed tracing boolean flag [`--experimental-enable-distributed-tracing`](https://github.com/etcd-io/etcd/pull/12919) which enables tracing.
+- Add [`etcd --experimental-distributed-tracing-address`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows configuring the OpenTelemetry collector address.
+- Add [`etcd --experimental-distributed-tracing-service-name`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows changing the default "etcd" service name.
+- Add [`etcd --experimental-distributed-tracing-instance-id`](https://github.com/etcd-io/etcd/pull/12919) string flag which configures an instance ID, which must be unique per etcd instance.
+- Add [`--experimental-bootstrap-defrag-threshold-megabytes`](https://github.com/etcd-io/etcd/pull/12941) which configures a threshold for the unused db size and etcdserver will automatically perform defragmentation on bootstrap when it exceeds this value. The functionality is disabled if the value is 0.
+
+### Package `runtime`
+
+- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214).
+
+### Package `embed`
+
+- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947).
+ - Use `embed.Config.LogLevel` instead.
+- Add [`embed.Config.ZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/11147) to allow creating a custom zap logger.
+- Replace [global `*zap.Logger` with etcd server logger object](https://github.com/etcd-io/etcd/pull/12212).
+- Add [`embed.Config.EnableLogRotation`](https://github.com/etcd-io/etcd/pull/12774) which enables log rotation if true.
+- Add [`embed.Config.LogRotationConfigJSON`](https://github.com/etcd-io/etcd/pull/12774) to allow passthrough of JSON config to configure log rotation for a file output target.
+- Add [`embed.Config.ExperimentalEnableDistributedTracing`](https://github.com/etcd-io/etcd/pull/12919) which enables experimental distributed tracing if true.
+- Add [`embed.Config.ExperimentalDistributedTracingAddress`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default collector address.
+- Add [`embed.Config.ExperimentalDistributedTracingServiceName`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default "etcd" service name.
+- Add [`embed.Config.ExperimentalDistributedTracingServiceInstanceID`](https://github.com/etcd-io/etcd/pull/12919) which allows configuring an instance ID, which must be uniquer per etcd instance.
+
+### Package `clientv3`
+
+- Remove [excessive watch cancel logging messages](https://github.com/etcd-io/etcd/pull/12187).
+ - See [kubernetes/kubernetes#93450](https://github.com/kubernetes/kubernetes/issues/93450).
+- Add [`TryLock`](https://github.com/etcd-io/etcd/pull/11104) method to `clientv3/concurrency/Mutex`. A non-blocking method on `Mutex` which does not wait to get lock on the Mutex, returns immediately if Mutex is locked by another session.
+- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184).
+ - Fix [`"kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch"`](https://github.com/kubernetes/kubernetes/issues/83028).
+- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211).
+ - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550).
+- Fix [errors caused by grpc changing balancer/resolver API](https://github.com/etcd-io/etcd/pull/11564). This change is compatible with grpc >= [v1.26.0](https://github.com/grpc/grpc-go/releases/tag/v1.26.0), but is not compatible with < v1.26.0 version.
+- Use [ServerName as the authority](https://github.com/etcd-io/etcd/pull/11574) after bumping to grpc v1.26.0. Remove workaround in [#11184](https://github.com/etcd-io/etcd/pull/11184).
+- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687).
+ - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys.
+- Fix [watch leak caused by lazy cancellation](https://github.com/etcd-io/etcd/pull/11850). When clients cancel their watches, a cancel request will now be immediately sent to the server instead of waiting for the next watch event.
+- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
+- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready.
+- Improve [clientv3:get AuthToken gracefully without extra connection](https://github.com/etcd-io/etcd/pull/12165).
+- Changed [clientv3 dialing code](https://github.com/etcd-io/etcd/pull/12671) to use grpc resolver API instead of custom balancer.
+ - Endpoints self identify now as `etcd-endpoints://{id}/#initially={list of endpoints}` e.g. `etcd-endpoints://0xc0009d8540/#initially=[localhost:2079]`
+- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
+
+### Package `lease`
+
+- Fix [memory leak in follower nodes](https://github.com/etcd-io/etcd/pull/11731).
+ - https://github.com/etcd-io/etcd/issues/11495
+ - https://github.com/etcd-io/etcd/issues/11730
+- Make sure [grant/revoke won't be applied repeatedly after restarting etcd](https://github.com/etcd-io/etcd/pull/11935).
+
+### Package `wal`
+
+- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
+- Handle [out-of-range slice bound in `ReadAll` and entry limit in `decodeRecord`](https://github.com/etcd-io/etcd/pull/11793).
+
+### etcdctl v3
+
+- Fix `etcdctl member add` command to prevent potential timeout. ([PR#11194](https://github.com/etcd-io/etcd/pull/11194) and [PR#11638](https://github.com/etcd-io/etcd/pull/11638))
+- Add [`etcdctl watch --progress-notify`](https://github.com/etcd-io/etcd/pull/11462) flag.
+- Add [`etcdctl auth status`](https://github.com/etcd-io/etcd/pull/11536) command to check if authentication is enabled
+- Add [`etcdctl get --count-only`](https://github.com/etcd-io/etcd/pull/11743) flag for output type `fields`.
+- Add [`etcdctl member list -w=json --hex`](https://github.com/etcd-io/etcd/pull/11812) flag to print memberListResponse in hex format json.
+- Changed [`etcdctl lock exec-command`](https://github.com/etcd-io/etcd/pull/12829) to return exit code of exec-command.
+- [New tool: `etcdutl`](https://github.com/etcd-io/etcd/pull/12971) incorporated functionality of: `etcdctl snapshot status|restore`, `etcdctl backup`, `etcdctl defrag --data-dir ...`.
+- [ETCDCTL_API=3 `etcdctl migrate`](https://github.com/etcd-io/etcd/pull/12971) has been decommissioned. Use etcd <=v3.4 to restore v2 storage.
+
+### gRPC gateway
+
+- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint.
+ - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298).
+ - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` does work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
+- Set [`enable-grpc-gateway`](https://github.com/etcd-io/etcd/pull/12297) flag to true when using a config file to keep the defaults the same as the command line configuration.
+
+### gRPC Proxy
+
+- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler.
+- Add [gRPC keepalive related flags](https://github.com/etcd-io/etcd/pull/11711) `grpc-keepalive-min-time`, `grpc-keepalive-interval` and `grpc-keepalive-timeout`.
+- [Fix grpc watch proxy hangs when failed to cancel a watcher](https://github.com/etcd-io/etcd/pull/12030) .
+- Add [metrics handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12107).
+- Add [health handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12114).
+
+### Auth
+
+- Fix [NoPassword check when adding user through GRPC gateway](https://github.com/etcd-io/etcd/pull/11418) ([issue#11414](https://github.com/etcd-io/etcd/issues/11414))
+- Fix bug where [some auth related messages are logged at wrong level](https://github.com/etcd-io/etcd/pull/11586)
+- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652).
+- [Improve checkPassword performance](https://github.com/etcd-io/etcd/pull/11735).
+- [Add authRevision field in AuthStatus](https://github.com/etcd-io/etcd/pull/11659).
+- Fix [a bug of not refreshing expired tokens](https://github.com/etcd-io/etcd/pull/13308).
+-
+### API
+
+- Add [`/v3/auth/status`](https://github.com/etcd-io/etcd/pull/11536) endpoint to check if authentication is enabled
+- [Add `Linearizable` field to `etcdserverpb.MemberListRequest`](https://github.com/etcd-io/etcd/pull/11639).
+- [Learner support Snapshot RPC](https://github.com/etcd-io/etcd/pull/12890/).
+
+### Package `netutil`
+
+- Remove [`netutil.DropPort/RecoverPort/SetLatency/RemoveLatency`](https://github.com/etcd-io/etcd/pull/12491).
+ - These are not used anymore. They were only used for older versions of functional testing.
+ - Removed to adhere to best security practices, minimize arbitrary shell invocation.
+
+### `tools/etcd-dump-metrics`
+
+- Implement [input validation to prevent arbitrary shell invocation](https://github.com/etcd-io/etcd/pull/12491).
+
+### Dependency
+
+- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0) to [**`v1.37.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.37.0).
+- Upgrade [`go.uber.org/zap`](https://github.com/uber-go/zap/releases) from [**`v1.14.1`**](https://github.com/uber-go/zap/releases/tag/v1.14.1) to [**`v1.16.0`**](https://github.com/uber-go/zap/releases/tag/v1.16.0).
+
+### Platforms
+
+- etcd now [officially supports `arm64`](https://github.com/etcd-io/etcd/pull/12929).
+ - See https://github.com/etcd-io/etcd/pull/12928 for adding automated tests with `arm64` EC2 instances (Graviton 2).
+ - See https://github.com/etcd-io/website/pull/273 for new platform support tier policies.
+
+### Release
+
+- Add s390x build support ([PR#11548](https://github.com/etcd-io/etcd/pull/11548) and [PR#11358](https://github.com/etcd-io/etcd/pull/11358))
+
+### Go
+
+- Require [*Go 1.16+*](https://github.com/etcd-io/etcd/pull/11110).
+- Compile with [*Go 1.16+*](https://golang.org/doc/devel/release.html#go1.16)
+- etcd uses [go modules](https://github.com/etcd-io/etcd/pull/12279) (instead of vendor dir) to track dependencies.
+
+### Project Governance
+
+- The etcd team has added, a well defined and openly discussed, project [governance](https://github.com/etcd-io/etcd/pull/11175).
+
+
+
+
diff --git a/CHANGELOG/CHANGELOG-3.6.md b/CHANGELOG/CHANGELOG-3.6.md
new file mode 100644
index 00000000000..c6569365d97
--- /dev/null
+++ b/CHANGELOG/CHANGELOG-3.6.md
@@ -0,0 +1,111 @@
+
+
+Previous change logs can be found at [CHANGELOG-3.5](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.5.md).
+
+
+
+## v3.6.0 (TBD)
+
+See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.6.0).
+
+### Breaking Changes
+
+- `etcd` will no longer start on data dir created by newer versions (for example etcd v3.6 will not run on v3.7+ data dir). To downgrade data dir please check out `etcdutl migrate` command.
+- `etcd` doesn't support serving client requests on the peer listen endpoints (--listen-peer-urls). See [pull/13565](https://github.com/etcd-io/etcd/pull/13565).
+- `etcdctl` will sleep(2s) in case of range delete without `--range` flag. See [pull/13747](https://github.com/etcd-io/etcd/pull/13747)
+- Applications which depend on etcd v3.6 packages must be built with go version >= v1.18.
+
+### Deprecations
+
+- Deprecated [V2 discovery](https://etcd.io/docs/v3.5/dev-internal/discovery_protocol/).
+- Deprecated [SetKeepAlive and SetKeepAlivePeriod in limitListenerConn](https://github.com/etcd-io/etcd/pull/14356).
+- Removed [etcdctl defrag --data-dir](https://github.com/etcd-io/etcd/pull/13793).
+- Removed [etcdctl snapshot status](https://github.com/etcd-io/etcd/pull/13809).
+- Removed [etcdctl snapshot restore](https://github.com/etcd-io/etcd/pull/13809).
+- Removed [etcdutl snapshot save](https://github.com/etcd-io/etcd/pull/13809).
+
+
+### etcdctl v3
+
+- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13133).
+- When print endpoint status, [show db size in use](https://github.com/etcd-io/etcd/pull/13639)
+- [Always print the raft_term in decimal](https://github.com/etcd-io/etcd/pull/13711) when displaying member list in json.
+- [Add one more field `storageVersion`](https://github.com/etcd-io/etcd/pull/13773) into the response of command `etcdctl endpoint status`.
+- Add [`--max-txn-ops`](https://github.com/etcd-io/etcd/pull/14340) flag to make-mirror command.
+- Add [`--consistency`](https://github.com/etcd-io/etcd/pull/15261) flag to member list command.
+- Display [field `hash_revision`](https://github.com/etcd-io/etcd/pull/14812) for `etcdctl endpoint hash` command.
+- Add [`--max-request-bytes` and `--max-recv-bytes`](https://github.com/etcd-io/etcd/pull/18718) global flags.
+
+### etcdutl v3
+
+- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13142).
+- Add `migrate` command for downgrading/upgrading etcd data dir files.
+- Add [optional --bump-revision and --mark-compacted flag to etcdutl snapshot restore operation](https://github.com/etcd-io/etcd/pull/16029).
+- Add [hashkv](https://github.com/etcd-io/etcd/pull/15965) command to print hash of keys and values up to given revision
+- Removed [legacy etcdutl backup](https://github.com/etcd-io/etcd/pull/16662)
+
+### Package `clientv3`
+
+- [Support serializable `MemberList` operation](https://github.com/etcd-io/etcd/pull/15261).
+
+### Package `server`
+
+- Package `mvcc` was moved to `storage/mvcc`
+- Package `mvcc/backend` was moved to `storage/backend`
+- Package `mvcc/buckets` was moved to `storage/schema`
+- Package `wal` was moved to `storage/wal`
+- Package `datadir` was moved to `storage/datadir`
+
+### Package `raft`
+- [Decouple raft from etcd](https://github.com/etcd-io/etcd/issues/14713). Migrated raft to a separate [repository](https://github.com/etcd-io/raft), and renamed raft module to `go.etcd.io/raft/v3`.
+
+### etcd server
+
+- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.
+- Add [`etcd --experimental-max-learners`](https://github.com/etcd-io/etcd/pull/13377) flag to allow configuration of learner max membership.
+- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to handle upgrade from v3.5.2 clusters with this feature enabled.
+- Add [`etcdctl make-mirror --rev`](https://github.com/etcd-io/etcd/pull/13519) flag to support incremental mirror.
+- Add [v3 discovery](https://github.com/etcd-io/etcd/pull/13635) to bootstrap a new etcd cluster.
+- Add [field `storage`](https://github.com/etcd-io/etcd/pull/13772) into the response body of endpoint `/version`.
+- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14169) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32.
+- Add [`etcd grpc-proxy --experimental-enable-grpc-logging`](https://github.com/etcd-io/etcd/pull/14266) flag to logging all grpc requests and responses.
+- Add [`etcd --experimental-compact-hash-check-enabled --experimental-compact-hash-check-time`](https://github.com/etcd-io/etcd/issues/14039) flags to support enabling reliable corruption detection on compacted revisions.
+- Add [Protection on maintenance request when auth is enabled](https://github.com/etcd-io/etcd/pull/14663).
+- Graduated [`--experimental-warning-unary-request-duration` to `--warning-unary-request-duration`](https://github.com/etcd-io/etcd/pull/14414). Note the experimental flag is deprecated and will be decommissioned in v3.7.
+- Add [field `hash_revision` into `HashKVResponse`](https://github.com/etcd-io/etcd/pull/14537).
+- Add [`etcd --experimental-snapshot-catch-up-entries`](https://github.com/etcd-io/etcd/pull/15033) flag to configure number of entries for a slow follower to catch up after compacting the raft storage entries and defaults to 5k.
+- Decreased [`--snapshot-count` default value from 100,000 to 10,000](https://github.com/etcd-io/etcd/pull/15408)
+- Add [`etcd --tls-min-version --tls-max-version`](https://github.com/etcd-io/etcd/pull/15156) to enable support for TLS 1.3.
+- Add [quota to endpoint status response](https://github.com/etcd-io/etcd/pull/17877)
+- Add ['etcd --experimental-set-member-localaddr'](https://github.com/etcd-io/etcd/pull/17661) to enable using the first specified and non-loopback local address from initial-advertise-peer-urls as the local address when communicating with a peer.
+- Add [Support multiple values for allowed client and peer TLS identities](https://github.com/etcd-io/etcd/pull/18015)
+- Add [`embed.Config.GRPCAdditionalServerOptions`](https://github.com/etcd-io/etcd/pull/14066) to support updating the default internal gRPC configuration for embedded use cases.
+
+### etcd grpc-proxy
+
+- Add [`etcd grpc-proxy start --endpoints-auto-sync-interval`](https://github.com/etcd-io/etcd/pull/14354) flag to enable and configure interval of auto sync of endpoints with server.
+- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14308) flag to support adding configurable cipher list.
+- Add [`tls min/max version to grpc proxy`](https://github.com/etcd-io/etcd/pull/18816) to support setting TLS min and max version.
+
+### tools/benchmark
+
+- [Add etcd client autoSync flag](https://github.com/etcd-io/etcd/pull/13416)
+
+### Metrics, Monitoring
+
+See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
+
+- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13371).
+- Add [`etcd_debugging_server_alarms`](https://github.com/etcd-io/etcd/pull/14276).
+- Add [`etcd_server_range_duration_seconds`](https://github.com/etcd-io/etcd/pull/17983).
+
+### Go
+- Require [Go 1.23+](https://github.com/etcd-io/etcd/pull/16594).
+- Compile with [Go 1.23+](https://go.dev/doc/devel/release#go1.21.minor). Please refer to [gc-guide](https://go.dev/doc/gc-guide) to configure `GOGC` and `GOMEMLIMIT` properly.
+
+### Other
+
+- Use Distroless as base image to make the image less vulnerable and reduce image size.
+- [Upgrade grpc-gateway from v1 to v2](https://github.com/etcd-io/etcd/pull/16595).
+
+
diff --git a/CHANGELOG-4.0.md b/CHANGELOG/CHANGELOG-4.0.md
similarity index 85%
rename from CHANGELOG-4.0.md
rename to CHANGELOG/CHANGELOG-4.0.md
index 6b15f723719..860e5efd072 100644
--- a/CHANGELOG-4.0.md
+++ b/CHANGELOG/CHANGELOG-4.0.md
@@ -1,14 +1,9 @@
-Previous change logs can be found at [CHANGELOG-3.x](https://github.com/etcd-io/etcd/blob/main/CHANGELOG-3.x.md).
-
-
-The minimum recommended etcd versions to run in **production** are 3.2.28+, 3.3.18+, and 3.4.2+.
-
+Previous change logs can be found at [CHANGELOG-3.x](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.x.md).
-
## v4.0.0 (TBD)
See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v4.0.0) and [v4.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_4_0/) for any breaking changes.
@@ -18,10 +13,6 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v4.0.0) and
### Breaking Changes
- [Secure etcd by default](https://github.com/etcd-io/etcd/issues/9475)?
-- Change `/health` endpoint output.
- - Previously, `{"health":"true"}`.
- - Now, `{"health":true}`.
- - Breaks [Kubernetes `kubectl get componentstatuses` command](https://github.com/kubernetes/kubernetes/issues/58240).
- Deprecate [`etcd --proxy*`](TODO) flags; **no more v2 proxy**.
- Deprecate [v2 storage backend](https://github.com/etcd-io/etcd/issues/9232); **no more v2 store**.
- v2 API is still supported via [v2 emulation](TODO).
diff --git a/CHANGELOG/README.md b/CHANGELOG/README.md
new file mode 100644
index 00000000000..8448414b9b7
--- /dev/null
+++ b/CHANGELOG/README.md
@@ -0,0 +1,21 @@
+# Change logs
+
+## Production recommendation
+
+The minimum recommended etcd versions to run in **production** are v3.4.22+ and v3.5.6+. Refer to the [versioning policy](https://etcd.io/docs/v3.5/op-guide/versioning/) for more details.
+
+### v3.5 data corruption issue
+
+Running etcd v3.5.2, v3.5.1 and v3.5.0 under high load can cause a data corruption issue.
+If etcd process is killed, occasionally some committed transactions are not reflected on all the members.
+Recommendation is to upgrade to v3.5.4+.
+
+If you have encountered data corruption, please follow instructions on https://etcd.io/docs/v3.5/op-guide/data_corruption/.
+
+## Change log rules
+1. Each patch release only includes changes against previous patch release.
+For example, the change log of v3.5.5 should only include items which are new to v3.5.4.
+2. For the first release (e.g. 3.4.0, 3.5.0, 3.6.0, 4.0.0 etc.) for each minor or major
+version, it only includes changes which are new to the first release of previous minor
+or major version. For example, v3.5.0 should only include items which are new to v3.4.0,
+and v3.6.0 should only include items which are new to v3.5.0.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3b1d78dcfa9..5af3a32ea52 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,47 +1,164 @@
# How to contribute
-etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests. This document outlines some of the conventions on commit message formatting, contact points for developers, and other resources to help get contributions into etcd.
+etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
+This document outlines the basics of contributing to etcd.
-# Email and chat
+This is a rough outline of what a contributor's workflow looks like:
+* [Find something to work on](#Find-something-to-work-on)
+ * [Check for flaky tests](#Check-for-flaky-tests)
+* [Set up development environment](#Set-up-development-environment)
+* [Implement your change](#Implement-your-change)
+* [Commit your change](#Commit-your-change)
+* [Create a pull request](#Create-a-pull-request)
+* [Get your pull request reviewed](#Get-your-pull-request-reviewed)
-- Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
-- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) IRC channel on freenode.org
-- Slack: [#etcd](https://kubernetes.slack.com/messages/C3HD8ARJ5/details/)
+If you have any questions, please reach out using one of the methods listed in [contact].
-## Getting started
+[contact]: ./README.md#Contact
-- Fork the repository on GitHub
-- Read the README.md for build instructions
+## Learn more about etcd
-## Reporting bugs and creating issues
+Before making a change please look through the resources below to learn more about etcd and tools used for development.
-Reporting bugs is one of the best ways to contribute. However, a good bug report has some very specific qualities, so please read over our short document on [reporting bugs](https://etcd.io/docs/latest/reporting_bugs) before submitting a bug report. This document might contain links to known issues, another good reason to take a look there before reporting a bug.
+* Please learn about [Git](https://github.com/git-guides) version control system used in etcd.
+* Read the [etcd learning resources](https://etcd.io/docs/v3.5/learning/)
+* Read the [etcd community membership](/Documentation/contributor-guide/community-membership.md)
+* Watch [etcd deep dive](https://www.youtube.com/watch?v=D2pm6ufIt98&t=927s)
+* Watch [etcd code walkthrough](https://www.youtube.com/watch?v=H3XaSF6wF7w)
-## Contribution flow
+## Find something to work on
-This is a rough outline of what a contributor's workflow looks like:
+All the work in the etcd project is tracked in [GitHub issue tracker].
+Issues should be properly labeled making it easy to find something for you.
-- Create a topic branch from where to base the contribution. This is usually main.
-- Make commits of logical units.
-- Make sure commit messages are in the proper format (see below).
-- Push changes in a topic branch to a personal fork of the repository.
-- Submit a pull request to etcd-io/etcd.
-- The PR must receive a LGTM from two maintainers found in the MAINTAINERS file.
+Depending on your interest and experience you should check different labels:
+* If you are just starting, check issues labeled with [good first issue].
+* When you feel more comfortable in your contributions, check out [help wanted].
+* Advanced contributors can try to help with issues labeled [priority/important] covering the most relevant work at the time.
-Thanks for contributing!
+If any of the aforementioned labels don't have unassigned issues, please [contact] one of the [maintainers] asking to triage more issues.
+
+[github issue tracker]: https://github.com/etcd-io/etcd/issues
+[good first issue]: https://github.com/search?type=issues&q=org%3Aetcd-io+state%3Aopen++label%3A%22good+first+issue%22
+[help wanted]: https://github.com/search?type=issues&q=org%3Aetcd-io+state%3Aopen++label%3A%22help+wanted%22
+[maintainers]: https://github.com/etcd-io/etcd/blob/main/OWNERS
+[priority/important]: https://github.com/search?type=issues&q=org%3Aetcd-io+state%3Aopen++label%3A%22priority%2Fimportant%22
+
+### Check for flaky tests
+
+The project could always use some help to deflake tests. [These](https://github.com/etcd-io/etcd/issues?q=is%3Aissue+is%3Aopen+label%3Atype%2Fflake) are the currently open flaky test issues.
+
+For more, because etcd uses Kubernetes' prow infrastructure to run CI jobs, the past test results can be viewed at [testgrid](https://testgrid.k8s.io/sig-etcd).
+
+| Tests | Status |
+| ----- | ------ |
+| periodics e2e-amd64 | [![sig-etcd-periodics/ci-etcd-e2e-amd64](https://testgrid.k8s.io/q/summary/sig-etcd-periodics/ci-etcd-e2e-amd64/tests_status?style=svg)](https://testgrid.k8s.io/q/summary/sig-etcd-periodics/ci-etcd-e2e-amd64) |
+| presubmit build | [![sig-etcd-presubmits/pull-etcd-build](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-build/tests_status?style=svg)](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-build) |
+| presubmit e2e-amd64 | [![sig-etcd-presubmits/pull-etcd-e2e-amd64](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-e2e-amd64/tests_status?style=svg)](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-e2e-amd64) |
+| presubmit unit-test | [![sig-etcd-presubmits/pull-etcd-unit-test](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-unit-test/tests_status?style=svg)](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-unit-test) |
+| presubmit verify | [![sig-etcd-presubmits/pull-etcd-verify](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-verify/tests_status?style=svg)](https://testgrid.k8s.io/q/summary/sig-etcd-presubmits/pull-etcd-verify) |
+| postsubmit build | [![sig-etcd-postsubmits/post-etcd-build](https://testgrid.k8s.io/q/summary/sig-etcd-postsubmits/post-etcd-build/tests_status?style=svg)](https://testgrid.k8s.io/q/summary/sig-etcd-postsubmits/post-etcd-build) |
+
+If you find any flaky tests on testgrid, please
+
+1. Check [existing issues](https://github.com/etcd-io/etcd/issues?q=is%3Aissue+is%3Aopen+label%3Atype%2Fflake) to see if an issue has already been opened for this test. If not, open an issue with the `type/flake` label.
+2. Try to reproduce the flaky test on your machine via [`stress`](https://pkg.go.dev/golang.org/x/tools/cmd/stress), for example, to reproduce the failure of `TestPeriodicSkipRevNotChange`:
+
+```bash
+# install the stress utility
+go install golang.org/x/tools/cmd/stress@latest
+cd server/etcdserver/api/v3compactor
+# compile the test
+go test -v -c -count 1
+# run the compiled test file using stress
+stress -p=8 ./v3compactor.test -test.run â^TestPeriodicSkipRevNotChange$â
+```
+3. Fix it.
+
+## Set up development environment
+
+The etcd project supports two options for development:
+
+ 1. Manually set up the local environment.
+ 2. Automatically set up [devcontainer](https://containers.dev).
+
+For both options, the only supported architecture is `linux-amd64`. Bug reports for other environments will generally be ignored. Supporting new environments requires the introduction of proper tests and maintainer support that is currently lacking in the etcd project.
+
+If you would like etcd to support your preferred environment you can [file an issue].
+
+### Option 1 - Manually set up the local environment
+
+This is the original etcd development environment, is most supported, and is backward compatible for the development of older etcd versions.
+
+Follow the steps below to set up the environment:
+
+- [Clone the repository](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository)
+- Install Go by following [installation](https://go.dev/doc/install). Please check the minimal go version in [go.mod file](./go.mod#L3).
+- Install build tools:
+ - [`make`](https://www.gnu.org/software/make/): For Debian-based distributions
+ you can run `sudo apt-get install build-essential`
+ - [`protoc`](https://protobuf.dev/): You can download it for your os. Use
+ version
+ [`v3.20.3`](https://github.com/protocolbuffers/protobuf/releases/tag/v3.20.3).
+ - [`yamllint`](https://www.yamllint.com/): For Debian-based distribution you
+ can run `sudo apt-get install yamllint`
+ - [`jq`](https://jqlang.github.io/jq/): For Debian-based distribution you can
+ run `sudo apt-get install jq`
+ - [`xz`](https://tukaani.org/xz/): For Debian-based distribution you can run
+ `sudo apt-get install xz-utils`
+- Verify that everything is installed by running `make build`
-### Code style
+Note: `make build` runs with `-v`. Other build flags can be added through env `GO_BUILD_FLAGS`, **if required**. Eg.,
+```console
+GO_BUILD_FLAGS="-buildmode=pie" make build
+```
+
+### Option 2 - Automatically set up devcontainer
+
+This is a more recently added environment that aims to make it faster for new contributors to get started with etcd. This option is supported for etcd versions 3.6 onwards.
+
+This option can be [used locally](https://code.visualstudio.com/docs/devcontainers/tutorial) on a system running Visual Studio Code and Docker, or in a remote cloud-based [Codespaces](https://github.com/features/codespaces) environment.
+
+To get started, create a codespace for this repository by clicking this đ
+
+[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=11225014)
+
+A codespace will open in a web-based version of Visual Studio Code. The [dev container](.devcontainer/devcontainer.json) is fully configured with the software needed for this project.
+
+**Note**: Dev containers is an open spec which is supported by [GitHub Codespaces](https://github.com/codespaces) and [other tools](https://containers.dev/supporting).
+
+[file an issue]: https://github.com/etcd-io/etcd/issues/new/choose
+
+## Implement your change
+
+etcd code should follow the coding style suggested by the Golang community.
+See the [style doc](https://go.dev/wiki/CodeReviewComments) for details.
-The coding style suggested by the Golang community is used in etcd. See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details.
+Please ensure that your change passes static analysis (requires
+[golangci-lint](https://golangci-lint.run/welcome/install/)):
+- `make verify` to verify if all checks pass.
+- `make verify-*` to verify a single check, for example, `make verify-bom` to verify if `bill-of-materials.json` file is up-to-date.
+- `make fix` to fix all checks.
+- `make fix-*` to fix a single check, for example, `make fix-bom` to update `bill-of-materials.json`.
-Please follow this style to make etcd easy to review, maintain and develop.
+Please ensure that your change passes tests.
+- `make test-unit` to run unit tests.
+- `make test-integration` to run integration tests.
+- `make test-e2e` to run e2e tests.
-### Format of the commit message
+All changes are expected to come with a unit test.
+All new features are expected to have either e2e or integration tests.
-We follow a rough convention for commit messages that is designed to answer two
-questions: what changed and why. The subject line should feature the what and
-the body of the commit should describe the why.
+## Commit your change
+etcd follows a rough convention for commit messages:
+* First line:
+ * Should start with the name of the package (for example `etcdserver`, `etcdctl`) followed by the `:` character.
+ * Describe the `what` behind the change
+* Optionally, the author might provide the `why` behind the change in the main commit message body.
+* Last line should be `Signed-off-by: firstname lastname ` (can be automatically generate by providing `--signoff` to git commit command).
+
+Example of commit message:
```
etcdserver: add grpc interceptor to log info on incoming requests
@@ -51,44 +168,30 @@ remote client info, request content (with value field redacted), request
handling latency, response size, etc. Uses zap logger if available,
otherwise uses capnslog.
-Fixes #38
+Signed-off-by: FirstName LastName
```
-The format can be described more formally as follows:
+## Create a pull request
-```
-:
-
-
-
-
-```
+Please follow the [making a pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects#making-a-pull-request) guide.
-The first line is the subject and should be no longer than 70 characters, the second
-line is always blank, and other lines should be wrapped at 80 characters. This allows
-the message to be easier to read on GitHub as well as in various git tools.
+If you are still working on the pull request, you can convert it to a draft by clicking `Convert to draft` link just below the list of reviewers.
-### Pull request across multiple files and packages
+Multiple small PRs are preferred over single large ones (>500 lines of code).
-If multiple files in a package are changed in a pull request for example:
+Please make sure there is an associated issue for each PR you submit. Create one if it doesn't exist yet, and close the issue
+once the PR gets merged and has been backported to previous stable releases, if necessary. If there are multiple PRs linked to
+the same issue, refrain from closing the issue until all PRs have been merged and, if needed, backported to previous stable
+releases.
-```
-etcdserver/config.go
-etcdserver/corrupt.go
-```
+## Get your pull request reviewed
-At the end of the review process if multiple commits exist for a single package they
-should be squashed/rebased into a single commit before being merged.
+Before requesting review please ensure that all GitHub and Prow checks are successful. In some cases your pull request may have the label `needs-ok-to-test`. If so an `etcd-io` organisation member will leave a comment on your pull request with `/ok-to-test` to trigger all checks to be run.
-```
-etcdserver:
-[..]
-```
+It might happen that some unrelated tests on your PR are failing, due to their flakiness.
+In such cases please [file an issue] to deflake the problematic test and ask one of [maintainers] to rerun the tests.
-If a pull request spans many packages these commits should be squashed/rebased into a single
-commit using message with a more generic `*:` prefix.
+If all checks were successful feel free to reach out for review from people that were involved in the original discussion or [maintainers].
+Depending on the complexity of the PR it might require between 1 and 2 maintainers to approve your change before merging.
-```
-*:
-[..]
-```
+Thanks for contributing!
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000000..0ca9b5242bc
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,14 @@
+ARG ARCH=amd64
+FROM --platform=linux/${ARCH} gcr.io/distroless/static-debian12@sha256:5c7e2b465ac6a2a4e5f4f7f722ce43b147dabe87cb21ac6c4007ae5178a1fa58
+
+ADD etcd /usr/local/bin/
+ADD etcdctl /usr/local/bin/
+ADD etcdutl /usr/local/bin/
+
+WORKDIR /var/etcd/
+WORKDIR /var/lib/etcd/
+
+EXPOSE 2379 2380
+
+# Define default command.
+CMD ["/usr/local/bin/etcd"]
diff --git a/Dockerfile-release.amd64 b/Dockerfile-release.amd64
deleted file mode 100644
index 9bd425887cd..00000000000
--- a/Dockerfile-release.amd64
+++ /dev/null
@@ -1,18 +0,0 @@
-FROM k8s.gcr.io/build-image/debian-base:buster-v1.4.0
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-RUN mkdir -p /var/etcd/
-RUN mkdir -p /var/lib/etcd/
-
-# Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
-# but Golang relies on /etc/nsswitch.conf to check the order of DNS resolving
-# (see https://github.com/golang/go/commit/9dee7771f561cf6aee081c0af6658cc81fac3918)
-# To fix this we just create /etc/nsswitch.conf and add the following line:
-RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Dockerfile-release.arm64 b/Dockerfile-release.arm64
deleted file mode 100644
index d04d79041a8..00000000000
--- a/Dockerfile-release.arm64
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM k8s.gcr.io/build-image/debian-base-arm64:buster-v1.4.0
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-ADD var/etcd /var/etcd
-ADD var/lib/etcd /var/lib/etcd
-ENV ETCD_UNSUPPORTED_ARCH=arm64
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Dockerfile-release.ppc64le b/Dockerfile-release.ppc64le
deleted file mode 100644
index 51adb7ae3af..00000000000
--- a/Dockerfile-release.ppc64le
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM k8s.gcr.io/build-image/debian-base-ppc64le:buster-v1.4.0
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-ADD var/etcd /var/etcd
-ADD var/lib/etcd /var/lib/etcd
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Dockerfile-release.s390x b/Dockerfile-release.s390x
deleted file mode 100644
index a96d45534c0..00000000000
--- a/Dockerfile-release.s390x
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM k8s.gcr.io/build-image/debian-base-s390x:buster-v1.4.0
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-ADD var/etcd /var/etcd
-ADD var/lib/etcd /var/lib/etcd
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Documentation/OWNERS b/Documentation/OWNERS
new file mode 100644
index 00000000000..f498964062e
--- /dev/null
+++ b/Documentation/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/documentation
diff --git a/Documentation/README.md b/Documentation/README.md
index 960e46016ef..5c1262f8e2e 100644
--- a/Documentation/README.md
+++ b/Documentation/README.md
@@ -1,6 +1,4 @@
-# The etcd documentation
-
-etcd is a distributed key-value store designed to reliably and quickly preserve and provide access to critical data. It enables reliable distributed coordination through distributed locking, leader elections, and write barriers. An etcd cluster is intended for high availability and permanent data storage and retrieval.
-
-These documents have moved to the [etcd-io/website repo](https://github.com/etcd-io/website/), and can be viewed live at [https://etcd.io/docs/latest/](https://etcd.io/docs/latest/).
+This directory includes etcd project internal documentation for new and existing contributors.
+For user and developer documentation please go to [etcd.io](https://etcd.io/),
+which is developed in [website](https://github.com/etcd-io/website/) repo.
diff --git a/Documentation/contributor-guide/branch_management.md b/Documentation/contributor-guide/branch_management.md
new file mode 100644
index 00000000000..dd3420cb75d
--- /dev/null
+++ b/Documentation/contributor-guide/branch_management.md
@@ -0,0 +1,26 @@
+# Branch management
+
+## Guide
+
+* New development occurs on the [main branch][main].
+* The main branch should always have a green build!
+* Backwards-compatible bug fixes should target the main branch and subsequently be ported to stable branches.
+* Once the main branch is ready for release, it will be tagged and become the new stable branch.
+
+The etcd team has adopted a *rolling release model* and supports two stable versions of etcd.
+
+### Main branch
+
+The `main` branch is our development branch. All new features land here first.
+
+To try new and experimental features, pull `main` and play with it. Note that `main` may not be stable because new features may introduce bugs.
+
+Before the release of the next stable version, feature PRs will be frozen. A [release manager](./release.md#release-management) will be assigned to the major/minor version and will lead the etcd community in testing, bug-fix, and documentation of the release for one to two weeks.
+
+### Stable branches
+
+All branches with the prefix `release-` are considered _stable_ branches.
+
+After every minor release ([semver.org](https://semver.org/)), we will have a new stable branch for that release, managed by a [patch release manager](./release.md#release-management). We will keep fixing the backward-compatible bugs for the latest two stable releases. A _patch_ release to each supported release branch, incorporating any bug fixes, will be once every two weeks, given any patches.
+
+[main]: https://github.com/etcd-io/etcd/tree/main
diff --git a/Documentation/contributor-guide/community-membership.md b/Documentation/contributor-guide/community-membership.md
new file mode 100644
index 00000000000..15d5615a0ca
--- /dev/null
+++ b/Documentation/contributor-guide/community-membership.md
@@ -0,0 +1,169 @@
+# Community membership
+
+This doc outlines the various responsibilities of contributor roles in etcd.
+
+| Role | Responsibilities | Requirements | Defined by |
+|------------|----------------------------------------------|---------------------------------------------------------------|-------------------------------|
+| Member | Active contributor in the community | Sponsored by 2 reviewers and multiple contributions | etcd GitHub org member |
+| Reviewer | Review contributions from other members | History of review and authorship | [OWNERS] file reviewer entry |
+| Maintainer | Set direction and priorities for the project | Demonstrated responsibility and excellent technical judgement | [OWNERS] file approver entry |
+
+## New contributors
+
+New contributors should be welcomed to the community by existing members,
+helped with PR workflow, and directed to relevant documentation and
+communication channels.
+
+## Established community members
+
+Established community members are expected to demonstrate their adherence to the
+principles in this document, familiarity with project organization, roles,
+policies, procedures, conventions, etc., and technical and/or writing ability.
+Role-specific expectations, responsibilities, and requirements are enumerated
+below.
+
+## Member
+
+Members are continuously active contributors to the community. They can have
+issues and PRs assigned to them. Members are expected to remain active
+contributors to the community.
+
+**Defined by:** Member of the etcd GitHub organization.
+
+### Requirements
+
+- Enabled [two-factor authentication] on their GitHub account
+- Have made multiple contributions to the project or community. Contribution may include, but is not limited to:
+ - Authoring or reviewing PRs on GitHub. At least one PR must be **merged**.
+ - Filing or commenting on issues on GitHub
+ - Contributing to community discussions (e.g. meetings, Slack, email discussion
+ forums, Stack Overflow)
+- Subscribed to [etcd-dev@googlegroups.com](https://groups.google.com/g/etcd-dev)
+- Have read the [contributor guide]
+- Sponsored by two active maintainers or reviewers.
+ - Sponsors must be from multiple member companies to demonstrate integration across the community.
+ - With no objections from other maintainers
+- Open a [membership nomination] issue against the `kubernetes/org` repo
+ - Ensure your sponsors are @mentioned on the issue
+ - Make sure that the list of contributions included is representative of your work on the project.
+- Members can be removed by a supermajority of the maintainers or can resign by notifying
+ the maintainers.
+
+### Responsibilities and privileges
+
+- Responsive to issues and PRs assigned to them
+- Granted "triage access" to etcd project
+- Active owner of code they have contributed (unless ownership is explicitly transferred)
+ - Code is well-tested
+ - Tests consistently pass
+ - Addresses bugs or issues discovered after code is accepted
+
+**Note:** Members who frequently contribute code are expected to proactively
+perform code reviews and work towards becoming a *reviewer*.
+
+## Reviewers
+
+Reviewers are contributors who have demonstrated greater skill in
+reviewing the code from other contributors. They are knowledgeable about both
+the codebase and software engineering principles. Their LGTM counts towards
+merging a code change into the project. A reviewer is generally on the ladder towards
+maintainership.
+
+**Defined by:** *reviewers* entry in the [OWNERS] file.
+
+### Requirements
+
+- member for at least 3 months.
+- Primary reviewer for at least 5 PRs to the codebase.
+- Reviewed or contributed at least 20 substantial PRs to the codebase.
+- Knowledgeable about the codebase.
+- Sponsored by two active maintainers.
+ - Sponsors must be from multiple member companies to demonstrate integration across the community.
+ - With no objections from other maintainers
+- Reviewers can be removed by a supermajority of the maintainers or can resign by notifying
+ the maintainers.
+
+### Responsibilities and privileges
+
+- Code reviewer status may be a precondition to accepting large code contributions
+- Responsible for project quality control via code reviews
+ - Focus on code quality and correctness, including testing and factoring
+ - May also review for more holistic issues, but not a requirement
+- Expected to be responsive to review requests
+- Assigned PRs to review related to area of expertise
+- Assigned test bugs related to area of expertise
+- Granted "triage access" to etcd project
+
+## Maintainers
+
+Maintainers are first and foremost contributors who have shown they
+are committed to the long-term success of a project. Maintainership is about building
+trust with the current maintainers and being a person that they can
+depend on to make decisions in the best interest of the project in a consistent manner.
+
+**Defined by:** *approvers* entry in the [OWNERS] file.
+
+### Requirements
+
+- Deep understanding of the technical goals and direction of the project
+- Deep understanding of the technical domain of the project
+- Sustained contributions to design and direction by doing all of:
+ - Authoring and reviewing proposals
+ - Initiating, contributing, and resolving discussions (emails, GitHub issues, meetings)
+ - Identifying subtle or complex issues in the designs and implementation of PRs
+- Directly contributed to the project through implementation and/or review
+- Sponsored by two active maintainers and elected by supermajority
+ - Sponsors must be from multiple member companies to demonstrate integration across the community.
+- To become a maintainer send an email with your candidacy to etcd-maintainers-private@googlegroups.com
+ - Ensure your sponsors are @mentioned in the email
+ - Include a list of contributions representative of your work on the project.
+ - Existing maintainers vote will privately and respond to the email with either acceptance or feedback for suggested improvement.
+- With your membership approved you are expected to:
+ - Open a PR and add an entry to the [OWNERS] file
+ - Subscribe to etcd-maintainers@googlegroups.com and etcd-maintainers-private@googlegroups.com
+ - Request to join [etcd-maintainer teams of etcd organization of GitHub](https://github.com/orgs/etcd-io/teams/maintainers-etcd)
+ - Request to join the private slack channel for etcd maintainers on [kubernetes slack](http://slack.kubernetes.io/)
+ - Request access to etcd-development GCP project where we publish releases
+ - Request access to passwords shared between maintainers
+
+### Responsibilities and privileges
+
+- Make and approve technical design decisions
+- Set technical direction and priorities
+- Define milestones and releases
+- Mentor and guide reviewers, and contributors to the project.
+- Participate when called upon in the [security disclosure and release process]
+- Ensure the continued health of the project
+ - Adequate test coverage to confidently release
+ - Tests are passing reliably (i.e. not flaky) and are fixed when they fail
+- Ensure a healthy process for discussion and decision-making is in place.
+- Work with other maintainers to maintain the project's overall health and success holistically
+
+### Retiring
+
+Life priorities, interests, and passions can change. Maintainers can retire and
+move to [emeritus maintainers]. If a maintainer needs to step down, they should
+inform other maintainers and, if possible, help find someone to pick up the related
+work. At the very least, ensure the related work can be continued. Afterward,
+they can remove themselves from the list of existing maintainers.
+
+If a maintainer has not been performing their duties for 12 months,
+they can be removed by other maintainers. In that case, the inactive maintainer will
+be first notified via an email. If the situation doesn't improve, they will be
+removed. If an emeritus maintainer wants to regain an active role, they can do
+so by renewing their contributions. Active maintainers should welcome such a move.
+Retiring other maintainers or regaining the status should require the approval
+of at least two active maintainers.
+
+## Acknowledgements
+
+Contributor roles and responsibilities were written based on [Kubernetes community membership]
+
+[OWNERS]: /OWNERS
+[contributor guide]: /CONTRIBUTING.md
+[membership nomination]: https://github.com/kubernetes/org/issues/new?assignees=&labels=area%2Fgithub-membership&projects=&template=membership.yml&title=REQUEST%3A+New+membership+for+%3Cyour-GH-handle%3E
+[Kubernetes community membership]: https://github.com/kubernetes/community/blob/master/community-membership.md
+[emeritus maintainers]: /README.md#etcd-emeritus-maintainers
+[security disclosure and release process]: /security/README.md
+[two-factor authentication]: https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa/about-two-factor-authentication
+
diff --git a/Documentation/contributor-guide/dependency_management.md b/Documentation/contributor-guide/dependency_management.md
new file mode 100644
index 00000000000..8b022b424f8
--- /dev/null
+++ b/Documentation/contributor-guide/dependency_management.md
@@ -0,0 +1,162 @@
+# Dependency management
+
+## Table of Contents
+
+- **[Main branch](#main-branch)**
+ - [Dependencies used in workflows](#dependencies-used-in-workflows)
+ - [Bumping order](#bumping-order)
+ - [Steps to bump a dependency](#steps-to-bump-a-dependency)
+ - [Indirect dependencies](#indirect-dependencies)
+ - [Known incompatible dependency updates](#known-incompatible-dependency-updates)
+ - [arduino/setup-protoc](#arduinosetup-protoc)
+ - [Rotation worksheet](#rotation-worksheet)
+- **[Stable branches](#stable-branches)**
+- **[Golang versions](#golang-versions)**
+- **[Core dependencies mappings](#core-dependencies-mappings)**
+
+## Main branch
+
+The dependabot is enabled & [configured](https://github.com/etcd-io/etcd/blob/main/.github/dependabot.yml) to
+manage dependencies for etcd `main` branch. But dependabot doesn't work well for multi-module repository like `etcd`,
+see [dependabot-core/issues/6678](https://github.com/dependabot/dependabot-core/issues/6678).
+Usually, human intervention is required each time when dependabot automatically opens some PRs to bump dependencies.
+Please see the guidance below.
+
+### Dependencies used in workflows
+
+The PRs that automatically bump dependencies (see examples below) used in workflows are fine and can be approved & merged directly as long as all checks are successful.
+
+- [build(deps): bump github/codeql-action from 2.2.11 to 2.2.12](https://github.com/etcd-io/etcd/pull/15736)
+- [build(deps): bump actions/checkout from 3.5.0 to 3.5.2](https://github.com/etcd-io/etcd/pull/15735)
+- [build(deps): bump ossf/scorecard-action from 2.1.2 to 2.1.3](https://github.com/etcd-io/etcd/pull/15607)
+
+### Bumping order
+
+When multiple etcd modules depend on the same package, please bump the package version for all the modules in the correct order. The rule is simple:
+if module A depends on module B, then bump the dependency for module B before module A. If the two modules do not depend on each other, then
+it doesn't matter to bump which module first. For example, multiple modules depend on `github.com/spf13/cobra`, so we need to bump the dependency
+in the following order,
+
+- go.etcd.io/etcd/pkg/v3
+- go.etcd.io/etcd/server/v3
+- go.etcd.io/etcd/etcdctl/v3
+- go.etcd.io/etcd/etcdutl/v3
+- go.etcd.io/etcd/tests/v3
+- go.etcd.io/etcd/v3
+- go.etcd.io/etcd/tools/v3
+For more details about etcd Golang modules, please check
+
+Note the module `go.etcd.io/etcd/tools/v3` doesn't depend on any other modules, nor by any other modules, so it doesn't matter when to bump dependencies for it.
+
+### Steps to bump a dependency
+
+Use the `github.com/spf13/cobra` as an example, follow the steps below to bump it from 1.6.1 to 1.7.0 for module `go.etcd.io/etcd/etcdctl/v3`,
+
+```bash
+cd ${ETCD_ROOT_DIR}/etcdctl
+go get github.com/spf13/cobra@v1.7.0
+go mod tidy
+cd ..
+./scripts/fix.sh
+```
+
+Execute the same steps for all other modules. When you finish bumping the dependency for all modules, then commit the change,
+
+```bash
+git add .
+git commit --signoff -m "dependency: bump github.com/spf13/cobra from 1.6.1 to 1.7.0"
+```
+
+Please close the related PRs which were automatically opened by dependabot.
+
+When you bump multiple dependencies in one PR, it's recommended to create a separate commit for each dependency. But it isn't a must; for example,
+you can get all dependencies bumping for the module `go.etcd.io/etcd/tools/v3` included in one commit.
+
+#### Troubleshooting
+
+In an event of bumping the version of protoc, protoc plugins or grpc-gateway, it might change `*.proto` file which can result in the following error:
+
+```bash
+[0;31mFAIL: 'genproto' FAILED at Wed Jul 31 07:09:08 UTC 2024
+make: *** [Makefile:134: verify-genproto] Error 255
+```
+
+To fix the above error, run the following script from the root of etcd repository:
+
+```bash
+./scripts/genproto.sh
+```
+
+### Indirect dependencies
+
+Usually, we don't bump a dependency if all modules just indirectly depend on it, such as `github.com/go-logr/logr`.
+
+If an indirect dependency (e.g. `D1`) causes any CVE or bugs that affect etcd, usually the module (e.g. `M1`, not part of etcd, but used by etcd)
+which depends on it should bump the dependency (`D1`), and then etcd just needs to bump `M1`. However, if the module (`M1`) somehow doesn't
+bump the problematic dependency, then etcd can still bump it (`D1`) directly following the same steps above. But as a long-term solution, etcd should
+try to remove the dependency on such module (`M1`) that lack maintenance.
+
+For mixed cases, in which some modules directly while others indirectly depend on a dependency, we have multiple options,
+
+- Bump the dependency for all modules, no matter it's direct or indirect dependency.
+- Bump the dependency only for modules that directly depend on it.
+
+We should try to follow the first way, and temporarily fall back to the second one if we run into any issue on the first way. Eventually we
+should fix the issue and ensure all modules depend on the same version of the dependency.
+
+### Known incompatible dependency updates
+
+#### arduino/setup-protoc
+
+Please refer to [build(deps): bump arduino/setup-protoc from 1.3.0 to 2.0.0](https://github.com/etcd-io/etcd/pull/16016)
+
+### Rotation worksheet
+
+The dependabot scheduling interval is weekly; it means dependabot will automatically raise a bunch of PRs per week.
+Usually, human intervention is required each time. We have a [rotation worksheet](https://docs.google.com/spreadsheets/d/1jodHIO7Dk2VWTs1IRnfMFaRktS9IH8XRyifOnPdSY8I/edit#gid=1394774387),
+and everyone is welcome to participate; you just need to register your name in the worksheet.
+
+## Stable branches
+
+Usually, we don't proactively bump dependencies for stable releases unless there are any CVEs or bugs that affect etcd.
+
+If we have to do it, then follow the same guidance above. Note that there is no `./scripts/fix.sh` in release-3.4, so no need to
+execute it for 3.4.
+
+## Golang versions
+
+The etcd project aims to maintain a development branch that is on the latest [Go version](https://go.dev/dl), ideally, this will align with the Go version in use for Kubernetes project development. For an example of how to update etcd to a new minor release of Go refer to issue and the linked pull requests.
+
+Suggested steps for performing a minor version upgrade for the etcd development branch:
+
+1. Carefully review new Go version release notes and potentially related blog posts for any deprecations, performance impacts, or other considerations.
+2. Create a GitHub issue to signal intent to upgrade and invite discussion, for example, .
+3. Complete the upgrade locally in your development environment by editing `.go-version` and running `make fix`.
+4. Run performance benchmarks locally to compare before and after.
+5. Raise a pull request for the changes, for example, .
+
+Stable etcd release branches will be maintained to stay on the latest patch release of a supported Go version. Upgrading minor versions will be completed before the minor version in use currently is no longer supported. Refer to the [Go release policy](https://go.dev/doc/devel/release).
+
+For an example of how to update etcd to a new patch release of Go refer to issue and the linked pull requests.
+
+References:
+
+-
+
+## Core dependencies mappings
+
+[bbolt](https://github.com/etcd-io/bbolt) and [raft](https://github.com/etcd-io/raft) are two core dependencies of etcd.
+
+Both etcd 3.4.x and 3.5.x depend on bbolt 1.3.x, and etcd 3.6.x (`main` branch) depends on bbolt 1.4.x.
+
+raft is included in the etcd repository for release-3.4 and release-3.5 branches, so etcd 3.4.x and 3.5.x do not depend on any
+external raft module. We moved raft into [a separate repository](https://github.com/etcd-io/raft) starting from 3.6 (`main` branch), and the first raft
+release will be v3.6.0, so etcd 3.6.x will depend on raft 3.6.x.
+
+Please see the table below:
+
+| etcd versions | bbolt versions | raft versions |
+|---------------|----------------|---------------|
+| 3.4.x | v1.3.x | N/A |
+| 3.5.x | v1.3.x | N/A |
+| 3.6.x | v1.4.x | v3.6.x |
diff --git a/Documentation/contributor-guide/features.md b/Documentation/contributor-guide/features.md
new file mode 100644
index 00000000000..5f98391077b
--- /dev/null
+++ b/Documentation/contributor-guide/features.md
@@ -0,0 +1,83 @@
+# Features
+
+This document provides an overview of etcd features and general development guidelines for adding and deprecating them. The project maintainers can override these guidelines per the need of the project following the project governance.
+
+## Overview
+
+The etcd features fall into three stages, experimental, stable, and unsafe.
+
+### Experimental
+
+Any new feature is usually added as an experimental feature. An experimental feature is characterized as below:
+- Might be buggy due to a lack of user testing. Enabling the feature may not work as expected.
+- Disabled by default when added initially.
+- Support for such a feature may be dropped at any time without notice
+ - Feature-related issues may be given lower priorities.
+ - It can be removed in the next minor or major release without following the feature deprecation policy unless it graduates to a stable future.
+
+### Stable
+
+A stable feature is characterized as below:
+- Supported as part of the supported releases of etcd.
+- May be enabled by default.
+- Discontinuation of support must follow the feature deprecation policy.
+
+### Unsafe
+
+Unsafe features are rare and listed under the `Unsafe feature:` section in the etcd usage documentation. By default, they are disabled. They should be used with caution following documentation. An unsafe feature can be removed in the next minor or major release without following the feature deprecation policy.
+
+## Development Guidelines
+
+### Adding a new feature
+
+Any new enhancements to the etcd are typically added as an experimental feature. The general development requirements are listed below. They can be somewhat flexible depending on the scope of the feature and review discussions and will evolve over time.
+- Open an issue
+ - It must provide a clear need for the proposed feature.
+ - It should list development work items as checkboxes. There must be one work item towards future graduation to a stable future.
+ - Label the issue with `type/feature` and `experimental`.
+ - Keep the issue open for tracking purposes until a decision is made on graduation.
+- Open a Pull Request (PR)
+ - Provide unit tests. Integration tests are also recommended as possible.
+ - Provide robust e2e test coverage. If the feature being added is complicated or quickly needed, maintainers can decide to go with e2e tests for basic coverage initially and have robust coverage added at a later time before the feature graduation to the stable feature.
+ - Provide logs for proper debugging.
+ - Provide metrics and benchmarks as needed.
+ - The Feature should be disabled by default.
+ - Any configuration flags related to the implementation of the feature must be prefixed with `experimental` e.g. `--experimental-feature-name`.
+ - Add a CHANGELOG entry.
+- At least two maintainers must approve feature requirements and related code changes.
+
+### Graduating an Experimental feature to Stable
+
+It is important that experimental features don't get stuck in that stage. They should be revisited and moved to the stable stage following the graduation steps as described here.
+
+#### Locate graduation candidate
+Decide if an experimental feature is ready for graduation to the stable stage.
+- Find the issue that was used to enable the experimental feature initially. One way to find such issues is to search for issues with `type/feature` and `experimental` labels.
+- Fix any known open issues against the feature.
+- Make sure the feature was enabled for at least one previous release. Check the PR(s) reference from the issue to see when the feature-related code changes were merged.
+
+#### Provide implementation
+If an experimental feature is found ready for graduation to the stable stage, open a Pull Request (PR) with the following changes.
+- Add robust e2e tests if not already provided.
+- Add a new stable feature flag identical to the experimental feature flag but without the `--experimental` prefix.
+- Deprecate the experimental feature following the [feature deprecation policy](#Deprecating-a-feature).
+- Implementation must ensure that both the graduated and deprecated experimental feature flags work as expected. Note that both these flags will co-exist for the timeframe described in the feature deprecation policy.
+- Enable the graduated feature by default if needed.
+
+At least two maintainers must approve the work. Patch releases should not be considered for graduation.
+
+### Deprecating a feature
+
+#### Experimental
+An experimental feature deprecates when it graduates to the stable stage.
+- Add a deprecation message in the documentation of the experimental feature with a recommendation to use a related stable feature. e.g. `DEPRECATED. Use instead.`
+- Add a `deprecated` label in the issue that was initially used to enable the experimental feature.
+
+#### Stable
+As the project evolves, a stable feature may sometimes need to be deprecated and removed. Such a situation should be handled using the steps below:
+- Create an issue for tracking purposes.
+- Add a deprecation message in the feature usage documentation before a planned release for feature deprecation. e.g. `To be deprecated in .`. If a new feature replaces the `To be deprecated` feature, then also provide a message saying so. e.g. `Use instead.`.
+- Deprecate the feature in the planned release with a message as part of the feature usage documentation. e.g. `DEPRECATED`. If a new feature replaces the deprecated feature, then also provide a message saying so. e.g. `DEPRECATED. Use instead.`.
+- Add a `deprecated` label in the related issue.
+
+Remove the deprecated feature in the following release. Close any related issue(s). At least two maintainers must approve the work. Patch releases should not be considered for deprecation.
diff --git a/Documentation/contributor-guide/local_cluster.md b/Documentation/contributor-guide/local_cluster.md
new file mode 100644
index 00000000000..20b9de72ec2
--- /dev/null
+++ b/Documentation/contributor-guide/local_cluster.md
@@ -0,0 +1,150 @@
+# Set up the local cluster
+
+For testing and development deployments, the quickest and easiest way is to configure a local cluster. For a production deployment, refer to the [clustering][clustering] section.
+
+## Local standalone cluster
+
+### Starting a cluster
+
+Run the following to deploy an etcd cluster as a standalone cluster:
+
+```
+$ ./etcd
+...
+```
+
+If the `etcd` binary is not present in the current working directory, it might be located either at `$GOPATH/bin/etcd` or at `/usr/local/bin/etcd`. Run the command appropriately.
+
+The running etcd member listens on `localhost:2379` for client requests.
+
+### Interacting with the cluster
+
+Use `etcdctl` to interact with the running cluster:
+
+1. Store an example key-value pair in the cluster:
+
+ ```
+ $ ./etcdctl put foo bar
+ OK
+ ```
+
+ If OK is printed, storing the key-value pair is successful.
+
+2. Retrieve the value of `foo`:
+
+ ```
+ $ ./etcdctl get foo
+ bar
+ ```
+
+ If `bar` is returned, interaction with the etcd cluster is working as expected.
+
+## Local multi-member cluster
+
+### Starting a cluster
+
+A `Procfile` at the base of the etcd git repository is provided to easily configure a local multi-member cluster. To start a multi-member cluster, navigate to the root of the etcd source tree and perform the following:
+
+1. Install `goreman` to control Procfile-based applications:
+
+ ```
+ $ go install github.com/mattn/goreman@latest
+ ```
+ The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. Make sure that $PATH is set accordingly in your environment.
+
+2. Start a cluster with `goreman` using etcd's stock Procfile:
+
+ ```
+ $ goreman -f Procfile start
+ ```
+
+ The members start running. They listen on `localhost:2379`, `localhost:22379`, and `localhost:32379` respectively for client requests.
+
+### Interacting with the cluster
+
+Use `etcdctl` to interact with the running cluster:
+
+1. Print the list of members:
+
+ ```
+ $ etcdctl --write-out=table --endpoints=localhost:2379 member list
+ ```
+ The list of etcd members is displayed as follows:
+
+ ```
+ +------------------+---------+--------+------------------------+------------------------+
+ | ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS |
+ +------------------+---------+--------+------------------------+------------------------+
+ | 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:2380 | http://127.0.0.1:2379 |
+ | 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 |
+ | fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 |
+ +------------------+---------+--------+------------------------+------------------------+
+ ```
+
+2. Store an example key-value pair in the cluster:
+
+ ```
+ $ etcdctl put foo bar
+ OK
+ ```
+
+ If OK is printed, storing the key-value pair is successful.
+
+### Testing fault tolerance
+
+To exercise etcd's fault tolerance, kill a member and attempt to retrieve the key.
+
+1. Identify the process name of the member to be stopped.
+
+ The `Procfile` lists the properties of the multi-member cluster. For example, consider the member with the process name, `etcd2`.
+
+2. Stop the member:
+
+ ```
+ # kill etcd2
+ $ goreman run stop etcd2
+ ```
+
+3. Store a key:
+
+ ```
+ $ etcdctl put key hello
+ OK
+ ```
+
+4. Retrieve the key that is stored in the previous step:
+
+ ```
+ $ etcdctl get key
+ hello
+ ```
+
+5. Retrieve a key from the stopped member:
+
+ ```
+ $ etcdctl --endpoints=localhost:22379 get key
+ ```
+
+ The command should display an error caused by connection failure:
+
+ ```
+ 2017/06/18 23:07:35 grpc: Conn.resetTransport failed to create client transport: connection error: desc = "transport: dial tcp 127.0.0.1:22379: getsockopt: connection refused"; Reconnecting to "localhost:22379"
+ Error: grpc: timed out trying to connect
+ ```
+6. Restart the stopped member:
+
+ ```
+ $ goreman run restart etcd2
+ ```
+
+7. Get the key from the restarted member:
+
+ ```
+ $ etcdctl --endpoints=localhost:22379 get key
+ hello
+ ```
+
+ Restarting the member re-establishs the connection. `etcdctl` will now be able to retrieve the key successfully. To learn more about interacting with etcd, read [interacting with etcd section][interacting].
+
+[clustering]: https://etcd.io/docs/latest/op-guide/clustering/
+[interacting]: https://etcd.io/docs/latest/dev-guide/interacting_v3/
diff --git a/Documentation/contributor-guide/logging.md b/Documentation/contributor-guide/logging.md
new file mode 100644
index 00000000000..6798ebf6e2e
--- /dev/null
+++ b/Documentation/contributor-guide/logging.md
@@ -0,0 +1,33 @@
+# Logging Conventions
+
+etcd uses the [zap][zap] library for logging application output categorized into *levels*. A log message's level is determined according to these conventions:
+
+* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. Usually not used in production.
+ * Examples:
+ * Send a normal message to a remote peer
+ * Write a log entry to disk
+
+* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. Should rather not be logged more frequently than once per a few seconds in a normal server's operation.
+ * Examples:
+ * Startup configuration
+ * Start to do a snapshot
+
+* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
+ * Examples:
+ * Failure to send a raft message to a remote peer
+ * Failure to receive heartbeat message within the configured election timeout
+
+* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost.
+ * Examples:
+ * Failure to allocate disk space for WAL
+
+* Panic: Unrecoverable or unexpected error situation that requires stopping execution.
+ * Examples:
+ * Failure to create the database
+
+* Fatal: Unrecoverable or unexpected error situation that requires immediate exit. Mostly used in the test.
+ * Examples:
+ * Failure to find the data directory
+ * Failure to run a test function
+
+[zap]: https://github.com/uber-go/zap
diff --git a/Documentation/contributor-guide/modules-future.svg b/Documentation/contributor-guide/modules-future.svg
new file mode 100644
index 00000000000..92d060a29fc
--- /dev/null
+++ b/Documentation/contributor-guide/modules-future.svg
@@ -0,0 +1,604 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ go.etcd.io/raft/v3
+
+
diff --git a/Documentation/contributor-guide/modules.md b/Documentation/contributor-guide/modules.md
new file mode 100644
index 00000000000..dcee156747c
--- /dev/null
+++ b/Documentation/contributor-guide/modules.md
@@ -0,0 +1,91 @@
+# Golang modules
+
+The etcd project (since version 3.5) is organized into multiple
+[golang modules](https://golang.org/ref/mod) hosted in a [single repository](https://golang.org/ref/mod#vcs-dir).
+
+![modules graph](modules.svg)
+
+There are the following modules:
+
+ - **go.etcd.io/etcd/api/v3** - contains API definitions
+ (like protos & proto-generated libraries) that defines communication protocol
+ between etcd clients and servers.
+
+ - **go.etcd.io/etcd/pkg/v3** - a collection of utility packages used by etcd
+ without being specific to etcd itself. A package belongs here
+ only if it could possibly be moved out into its own repository in the future.
+ Please avoid adding here code that has a lot of dependencies on its own, as
+ they automatically become dependencies of the client library
+ (that we want to keep lightweight).
+
+ - **go.etcd.io/etcd/client/v3** - client library used to contact etcd over
+ the network (grpc). Recommended for all new usage of etcd.
+
+ - **go.etcd.io/raft/v3** - implementation of distributed consensus
+ protocol. Should have no etcd specific code. Hosted in a separate repository:
+ https://github.com/etcd-io/raft.
+
+ - **go.etcd.io/etcd/server/v3** - etcd implementation.
+ The code in this package is internal to etcd and should not be consumed
+ by external projects. The package layout and API can change within the minor versions.
+
+ - **go.etcd.io/etcd/etcdctl/v3** - a command line tool to access and manage etcd.
+
+ - **go.etcd.io/etcd/tests/v3** - a module that contains all integration tests of etcd.
+ Notice: All unit tests (fast and not requiring cross-module dependencies)
+ should be kept in the local modules of the code under the test.
+
+ - **go.etcd.io/bbolt** - implementation of persistent b-tree.
+ Hosted in a separate repository: https://github.com/etcd-io/bbolt.
+
+
+### Operations
+
+1. All etcd modules should be released in the same versions, e.g.
+ `go.etcd.io/etcd/client/v3@v3.5.10` must depend on `go.etcd.io/etcd/api/v3@v3.5.10`.
+
+ The consistent updating of versions can be performed using:
+ ```shell script
+ % DRY_RUN=false TARGET_VERSION="v3.5.10" ./scripts/release_mod.sh update_versions
+ ```
+2. The released modules should be tagged according to https://golang.org/ref/mod#vcs-version rules,
+ i.e. each module should get its own tag.
+ The tagging can be performed using:
+ ```shell script
+ % DRY_RUN=false REMOTE_REPO="origin" ./scripts/release_mod.sh push_mod_tags
+ ```
+
+3. All etcd modules should depend on the same versions of underlying dependencies.
+ This can be verified using:
+ ```shell script
+ % PASSES="dep" ./test.sh
+ ```
+
+4. The go.mod files must not contain dependencies not being used and must
+ conform to `go mod tidy` format.
+ This is being verified by:
+ ```
+ % PASSES="mod_tidy" ./test.sh
+ ```
+
+5. To trigger actions across all modules (e.g. auto-format all files), please
+ use/expand the following script:
+ ```shell script
+ % ./scripts/fix.sh
+ ```
+
+### Future
+
+As a North Star, we would like to evaluate etcd modules towards the following model:
+
+![modules graph](modules-future.svg)
+
+This assumes:
+ - Splitting etcdmigrate/etcdadm out of etcdctl binary.
+ Thanks to this etcdctl would become clearly a command-line wrapper
+ around network client API,
+ while etcdmigrate/etcdadm would support direct physical operations on the
+ etcd storage files.
+ - Splitting etcd-proxy out of ./etcd binary, as it contains more experimental code
+ so carries additional risk & dependencies.
+ - Deprecation of support for v2 protocol.
diff --git a/Documentation/contributor-guide/modules.svg b/Documentation/contributor-guide/modules.svg
new file mode 100644
index 00000000000..5a3c3b2c39e
--- /dev/null
+++ b/Documentation/contributor-guide/modules.svg
@@ -0,0 +1,489 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ go.etcd.io/raft/v3
+
+
diff --git a/Documentation/contributor-guide/release.md b/Documentation/contributor-guide/release.md
new file mode 100644
index 00000000000..266b500d1c4
--- /dev/null
+++ b/Documentation/contributor-guide/release.md
@@ -0,0 +1,129 @@
+# Release
+
+The guide talks about how to release a new version of etcd.
+
+The procedure includes some manual steps for sanity checking, but it can probably be further scripted. Please keep this document up-to-date if making changes to the release process.
+
+## Release management
+
+The following pool of release candidates manages the release of each etcd major/minor version as well as manages patches
+to each stable release branch. They are responsible for communicating the timelines and status of each release and
+for ensuring the stability of the release branch.
+
+- Benjamin Wang [@ahrtr](https://github.com/ahrtr)
+- James Blair [@jmhbnz](https://github.com/jmhbnz)
+- Marek Siarkowicz [@serathius](https://github.com/serathius)
+- Sahdev Zala [@spzala](https://github.com/spzala)
+- Wenjia Zhang [@wenjiaswe](https://github.com/wenjiaswe)
+
+All release version numbers follow the format of [semantic versioning 2.0.0](http://semver.org/).
+
+### Major, minor version release, or its pre-release
+
+- Ensure the relevant [milestone](https://github.com/etcd-io/etcd/milestones) on GitHub is complete. All referenced issues should be closed or moved elsewhere.
+- Ensure the latest [upgrade documentation](https://etcd.io/docs/next/upgrades) is available.
+- Bump [hardcoded MinClusterVerion in the repository](https://github.com/etcd-io/etcd/blob/v3.4.15/version/version.go#L29), if necessary.
+- Add feature capability maps for the new version, if necessary.
+
+### Patch version release
+
+- To request a backport, developers submit cherry-pick PRs targeting the release branch. The commits should not include merge commits. The commits should be restricted to bug fixes and security patches.
+- The cherrypick PRs should target the appropriate release branch (`base:release--`). The k8s infra cherry pick robot `/cherrypick ` PR chatops command may be used to automatically generate cherrypick PRs.
+- The release patch manager reviews the cherrypick PRs. Please discuss carefully what is backported to the patch release. Each patch release should be strictly better than its predecessor.
+- The release patch manager will cherry-pick these commits starting from the oldest one into stable branch.
+
+## Write a release note
+
+- Write an introduction for the new release. For example, what major bug we fix, what new features we introduce, or what performance improvement we make.
+- Put `[GH XXXX]` at the head of the change line to reference the Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
+- Find PRs with the `release-note` label and explain them in the `NEWS` file, as a straightforward summary of changes for end-users.
+
+## Patch release criteria
+
+The etcd project aims to release a new patch version if any of the following conditions are met:
+
+- Fixed one or more major CVEs (>=7.5).
+- Fixed one or more critical bugs.
+- Fixed three or more major bugs.
+- Fixed five or more minor bugs.
+
+## Release guide
+
+### Prerequisites
+
+There are some prerequisites, which should be done before the release process. These are one-time operations,
+which don't need to be executed before releasing each version.
+1. Generate a GPG key and add it to your GitHub account. Refer to the links on [settings](https://github.com/settings/keys).
+2. Ensure you have a Linux machine, on which the git, Golang, and docker have been installed.
+ - Ensure the Golang version matches the version defined in `.go-version` file.
+ - Ensure non-privileged users can run docker commands, refer to the [Linux postinstall](https://docs.docker.com/engine/install/linux-postinstall/).
+ - Ensure there is at least 5GB of free space on your Linux machine.
+3. Install gsutil, refer to [gsutil_install](https://cloud.google.com/storage/docs/gsutil_install). When asked about cloud project to use, pick `etcd-development`.
+4. Authenticate the image registry, refer to [Authentication methods](https://cloud.google.com/container-registry/docs/advanced-authentication).
+ - `gcloud auth login`
+ - `gcloud auth configure-docker`
+5. Install gh, refer to [GitHub's documentation](https://github.com/cli/cli#installation). Ensure that running
+ `gh auth login` succeeds for the GitHub account you use to contribute to etcd,
+ and that `gh auth status` has a clean exit and doesn't show any issues.
+
+### Release steps
+
+1. Raise an issue to publish the release plan, e.g. [issues/17350](https://github.com/etcd-io/etcd/issues/17350).
+2. Raise a `kubernetes/org` pull request to temporarily elevate permissions for the GitHub release team.
+3. Once permissions are elevated, temporarily relax [branch protections](https://github.com/etcd-io/etcd/settings/branches) to allow pushing changes directly to `release-*` branches in GitHub.
+4. Verify you can pass the authentication to the image registries,
+ - `docker login gcr.io`
+ - `docker login quay.io`
+ - If the release person doesn't have access to 1password, one of the owners (@ahrtr, @ivanvc, @jmhbnz, @serathius) needs to share the password with them per [this guide](https://support.1password.com/share-items/). See rough steps below,
+ - [Sign in](https://team-etcd.1password.com/home) to your account on 1password.com.
+ - Click `Your Vault Items` on the right side.
+ - Select `Password of quay.io`.
+ - Click `Share` on the top right, and set expiration as `1 hour` and only available to the release person using his/her email.
+ - Click `Copy Link` then send the link to the release person via slack or email.
+5. Clone the etcd repository and checkout the target branch,
+ - `git clone --branch release-3.X git@github.com:etcd-io/etcd.git`
+6. Run the release script under the repository's root directory, replacing `${VERSION}` with a value without the `v` prefix, i.e. `3.5.13`.
+ - `DRY_RUN=false ./scripts/release.sh ${VERSION}`
+ - **NOTE:** When doing a pre-release (i.e., a version from the main branch, 3.6.0-alpha.2), you will need to explicitly set the branch to main:
+ ```
+ DRY_RUN=false BRANCH=main ./scripts/release.sh ${VERSION}
+ ```
+
+ It generates all release binaries under the directory `/tmp/etcd-release-${VERSION}/etcd/release/` and images. Binaries are pushed to the Google Cloud bucket
+ under project `etcd-development`, and images are pushed to `quay.io` and `gcr.io`.
+7. Publish the release page on GitHub
+ - Open the **draft** release URL shown by the release script
+ - Click the pen button at the top right to edit the release
+ - Review that it looks correct, reviewing that the bottom checkboxes are checked depending on the
+ release version (v3.4 no checkboxes, v3.5 has the set as latest release checkbox checked,
+ v3.6 has the set as pre-release checkbox checked)
+ - Then, publish the release
+8. Announce to the etcd-dev googlegroup
+
+ Follow the format of previous release emails sent to etcd-dev@googlegroups.com, see an example below. After sending out the email, ask one of the mailing list maintainers to approve the email from the pending list. Additionally, label the release email as `Release`.
+
+```text
+Hello,
+
+etcd v3.4.30 is now public!
+
+https://github.com/etcd-io/etcd/releases/tag/v3.4.30
+
+Thanks to everyone who contributed to the release!
+
+etcd team
+```
+
+9. Update the changelog to reflect the correct release date.
+10. Paste the release link to the issue raised in Step 1 and close the issue.
+11. Restore standard branch protection settings and raise a follow-up `kubernetes/org` pull request to return to least privilege permissions.
+12. Crease a new stable branch through `git push origin release-${VERSION_MAJOR}.${VERSION_MINOR}` if this is a new major or minor stable release.
+13. Re-generate a new password for quay.io if needed (e.g. shared to a contributor who isn't in the release team, and we should rotate the password at least once every 3 months).
+
+#### Release known issues
+
+1. Timeouts pushing binaries - If binaries fail to fully upload to Google Cloud storage, the script must be re-run using the same command. Any artifacts that are already pushed will be overwritten to ensure they are correct. The storage bucket does not use object versioning so incorrect files cannot remain.
+
+2. Timeouts pushing images - It is rare, although possible for connection timeouts to occur when publishing etcd release images to `quay.io` or `gcr.io`. If this occurs, it is known to be safe to rerun the release script command appending the `--no-upload` flag, and image uploads will gracefully resume.
+
+3. GPG vs SSH signing - The release scripts assume that git tags will be signed with a GPG key. Since 2022 GitHub has supported [signing commits and tags using ssh](https://github.blog/changelog/2022-08-23-ssh-commit-verification-now-supported). Until further release script updates are completed you will need to disable this feature in your `~/.gitconfig` and revert to signing via GPG to perform etcd releases.
diff --git a/Documentation/contributor-guide/reporting_bugs.md b/Documentation/contributor-guide/reporting_bugs.md
new file mode 100644
index 00000000000..12de922feab
--- /dev/null
+++ b/Documentation/contributor-guide/reporting_bugs.md
@@ -0,0 +1,45 @@
+# Reporting bugs
+
+If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
+
+To make the bug report accurate and easy to understand, please try to create bug reports that are:
+
+- Specific. Include as many details as possible: which version, what environment, what configuration, etc. If the bug is related to running the etcd server, please attach the etcd log (the starting log with the etcd configuration is especially important).
+
+- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please include the steps that might lead to the problem. If possible, please attach the affected etcd data dir and stack trace to the bug report.
+
+- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on etcd is out of scope, but we are happy to provide guidance in the right direction or help with using etcd itself.
+
+- Unique. Do not duplicate existing bug reports.
+
+- Scoped. One bug per report. Do not follow up with another bug inside one report.
+
+It may be worthwhile to read [Elika Etemadâs article on filing good bug reports][filing-good-bugs] before creating a bug report.
+
+We might ask for further information to locate a bug. A duplicated bug report will be closed.
+
+## Frequently asked questions
+
+### How to get a stack trace
+
+``` bash
+$ kill -QUIT $PID
+```
+
+### How to get the etcd version
+
+``` bash
+$ etcd --version
+```
+
+### How to get etcd configuration and log when it runs as systemd service âetcd2.serviceâ
+
+``` bash
+$ sudo systemctl cat etcd2
+$ sudo journalctl -u etcd2
+```
+
+Due to an upstream systemd bug, journald may miss the last few log lines when its processes exit. If journalctl says etcd stopped without a fatal or panic message, try `sudo journalctl -f -t etcd2` to get the full log.
+
+[etcd-issue]: https://github.com/etcd-io/etcd/issues/new
+[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/
diff --git a/Documentation/contributor-guide/roadmap.md b/Documentation/contributor-guide/roadmap.md
new file mode 100644
index 00000000000..c036e2eb45c
--- /dev/null
+++ b/Documentation/contributor-guide/roadmap.md
@@ -0,0 +1,50 @@
+# Roadmap
+
+etcd uses GitHub milestones to track all tasks in each major or minor release. The `roadmap.md` file only records the
+most important tasks for each release. The list is based on the current maintainer capacity that may shift over time.
+Proposed milestones are what we think we can deliver with the people we have. If we have more support on the important
+stuff, we could pick up more items from the backlog. Note that etcd will continue to mainly focus on technical debt over
+the next few major or minor releases.
+
+Each item has an assigned priority. Refer to [priority definitions](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_issues.md#step-5---prioritise-the-issue).
+
+## v3.6.0
+
+For a full list of tasks in `v3.6.0`, please see [milestone etcd-v3.6](https://github.com/etcd-io/etcd/milestone/38).
+
+| Title | Priority | Status | Note |
+|--------------------------------------------------------------------------------------------------------------------|----------|-------------|--------------------------------------------------------------------------------------------------------------|
+| [Support downgrade](https://github.com/etcd-io/etcd/issues/11716) | priority/important-soon | In progress | etcd will support downgrade starting from 3.6.0. But it will also support offline downgrade from 3.5 to 3.4. |
+| [StoreV2 deprecation](https://github.com/etcd-io/etcd/issues/12913) | priority/important-soon | In progress | This task will be covered in both 3.6 and 3.7. |
+| [Release raft 3.6.0](https://github.com/etcd-io/raft/issues/89) | priority/important-soon | Not started | etcd 3.6.0 will depends on raft 3.6.0 |
+| [Release bbolt 1.4.0](https://github.com/etcd-io/bbolt/issues/553) | priority/important-soon | Not started | etcd 3.6.0 will depends on bbolt 1.4.0 |
+| [Support /livez and /readyz endpoints](https://github.com/etcd-io/etcd/issues/16007) | priority/important-longterm | In progress | It provides clearer APIs, and can also work around the stalled writes issue |
+| [Bump gRPC](https://github.com/etcd-io/etcd/issues/16290) | priority/important-longterm | Completed | It isn't guaranteed to be resolved in 3.6, and might be postponed to 3.7 depending on the effort and risk. |
+| [Deprecate grpc-gateway or bump it](https://github.com/etcd-io/etcd/issues/14499) | priority/important-longterm | Completed | It isn't guaranteed to be resolved in 3.6, and might be postponed to 3.7 depending on the effort and risk. |
+| [bbolt: Add logger into bbolt](https://github.com/etcd-io/bbolt/issues/509) | priority/important-longterm | Completed | It's important to diagnose bbolt issues |
+| [bbolt: Add surgery commands](https://github.com/etcd-io/bbolt/issues/370) | priority/important-longterm | Completed | Surgery commands are important for fixing corrupted db files |
+| [Evaluate and (Gradulate or deprecate/remove) experimental features](https://github.com/etcd-io/etcd/issues/16292) | priority/backlog | Not started | This task will be covered in both 3.6 and 3.7. |
+
+## v3.7.0
+
+For a full list of tasks in `v3.7.0`, please see [milestone etcd-v3.7](https://github.com/etcd-io/etcd/milestone/39).
+
+| Title | Priority | Note |
+|-------------------------------------------------------------------------------------------------------------------|----------|-----------------------------------------------------------------------------------|
+| [StoreV2 deprecation](https://github.com/etcd-io/etcd/issues/12913) | P0 | Finish the remaining tasks 3.7. |
+| [Refactor lease: Lease might be revoked by mistake by old leader](https://github.com/etcd-io/etcd/issues/15247) | P1 | to be investigated & discussed |
+| [Integrate raft's new feature (async write) into etcd](https://github.com/etcd-io/etcd/issues/16291) | P1 | It should improve the performance |
+| [bbolt: Support customizing the bbolt rebalance threshold](https://github.com/etcd-io/bbolt/issues/422) | P2 | It may get rid of etcd's defragmentation. Both bbolt and etcd need to be changed. |
+| [Evaluate and (graduate or deprecate/remove) experimental features](https://github.com/etcd-io/etcd/issues/16292) | P2 | Finish the remaining tasks 3.7. |
+
+## Backlog (future releases)
+
+| Title | Priority | Note |
+|----------------------------------------------------------------------------------------------------------|----------|------|
+| [Remove the dependency on grpc-go's experimental API](https://github.com/etcd-io/etcd/issues/15145) | | |
+| [Protobuf: cleanup both golang/protobuf and gogo/protobuf](https://github.com/etcd-io/etcd/issues/14533) | | |
+| [Proposals should include a merkle root](https://github.com/etcd-io/etcd/issues/13839) | | |
+| [Add Distributed Tracing using OpenTelemetry](https://github.com/etcd-io/etcd/issues/12460) | | |
+| [Support CA rotation](https://github.com/etcd-io/etcd/issues/11555) | | |
+| [bbolt: Migrate all commands to cobra style commands](https://github.com/etcd-io/bbolt/issues/472) | | |
+| [raft: enhance the configuration change validation](https://github.com/etcd-io/raft/issues/80) | | |
diff --git a/Documentation/contributor-guide/triage_issues.md b/Documentation/contributor-guide/triage_issues.md
new file mode 100644
index 00000000000..4faa47c720d
--- /dev/null
+++ b/Documentation/contributor-guide/triage_issues.md
@@ -0,0 +1,190 @@
+# Issue triage guidelines
+
+## Purpose
+
+Speed up issue management.
+
+The `etcd` issues are listed at and are identified with labels. For example, an issue that is identified as a bug will be set to the label `type/bug`.
+
+The etcd project uses labels to indicate common attributes such as `area`, `type`, and `priority` of incoming issues.
+
+New issues will often start without any labels, but typically `etcd` maintainers, reviewers, and members will add labels by following these triage guidelines. The detailed list of labels can be found at .
+
+## Scope
+
+This document serves as the primary guidelines for triaging incoming issues in `etcd`.
+
+All contributors are encouraged and welcome to help manage issues which will help reduce the burden on project maintainers, though the work and responsibilities discussed in this document are created with `etcd` project reviewers and members in mind as these individuals will have triage access to the etcd project which is a requirement for actions like applying labels or closing issues.
+
+Refer to [etcd community membership](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/community-membership.md) for guidance on becoming an etcd project member or reviewer.
+
+## Step 1 - Find an issue to triage
+
+To get started you can use the following recommended issue searches to identify issues that are in need of triage:
+
+* [Issues that have no labels](https://github.com/etcd-io/etcd/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated+no%3Alabel)
+* [Issues created recently](https://github.com/etcd-io/etcd/issues?q=is%3Aissue+is%3Aopen+)
+* [Issues not assigned but linked pr](https://github.com/etcd-io/etcd/issues?q=is%3Aopen+is%3Aissue+no%3Aassignee+linked%3Apr)
+* [Issues with no comments](https://github.com/etcd-io/etcd/issues?q=is%3Aopen+is%3Aissue+comments%3A0+)
+* [Issues with help wanted](https://github.com/etcd-io/etcd/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22+)
+
+## Step 2 - Check the issue is valid
+
+Before we start adding labels or trying to work out a priority, our first triage step needs to be working out if the issue actually belongs to the etcd project and is not a duplicate.
+
+### Issues that don't belong to etcd
+
+Sometimes issues are reported that belong to other projects that `etcd` use. For example, `grpc` or `golang` issues. Such issues should be addressed by asking the reporter to open issues in the appropriate other projects.
+
+These issues can generally be closed unless a maintainer and issue reporter see a need to keep it open for tracking purposes. If you have triage permissions please close it, alternatively mention the @etcd-io/members group to request a member with triage access to close the issue.
+
+### Duplicate issues
+
+If an issue is a duplicate, add a comment stating so along with a reference for the original issue and if you have triage permissions please close it, alternatively mention the @etcd-io/members group to request a member with triage access close the issue.
+
+## Step 3 - Apply the appropriate type of label
+
+Adding a `type` label to an issue helps create visibility on the health of the project and helps contributors identify potential priorities, i.e. addressing existing bugs or test flakes before implementing new features.
+
+### Support requests
+
+As a general rule, the focus for etcd support is to address common themes in a broad way that helps all users, i.e. through channels like known issues, frequently asked questions, and high-quality documentation. To make the best use of project members time we should avoid providing 1:1 support if a broad approach is available.
+
+Some people mistakenly use our GitHub bug report or feature request templates to file support requests. Usually, they are asking for help operating or configuring some aspect of etcd. Support requests for etcd should instead be raised as [discussions](https://github.com/etcd-io/etcd/discussions).
+
+Common types of support requests are:
+
+ 1. Questions about configuring or operating existing well-documented etcd features, for example, . Note - If an existing feature is not well documented please apply the `area/documentation` label and propose documentation improvements that would prevent future users from stumbling on the problem again.
+
+ 2. Bug reports or questions about unsupported versions of etcd, for example . When responding to these issues please refer to our [supported versions documentation](https://etcd.io/docs/latest/op-guide/versioning) and encourage the reporter to upgrade to a recent patch release of a supported version as soon as possible. We should limit the effort supporting users that do not make the effort to run a supported version of etcd or ensure their version is patched.
+
+ 3. Bug reports that do not provide a complete list of steps to reproduce the issue and/or contributors are not able to reproduce the issue, for example, . We should limit the effort we put into reproducing issues ourselves and motivate users to provide the necessary information to accept the bug report.
+
+ 4. General questions that are filed using feature request or bug report issue templates, for example, . Note - These types of requests may surface good additions to our [frequently asked questions](https://etcd.io/docs/v3.5/faq).
+
+If you identify that an issue is a support request please:
+
+ 1. Add the `type/support` or `type/question` label.
+
+ 2. Add the following comment to inform the issue creator that discussions should be used instead and that this issue will be converted to a discussion.
+
+ > Thank you for your question, this support issue will be moved to our [Discussion Forums](https://github.com/etcd-io/etcd/discussions).
+ >
+ > We are trying to consolidate the channels to which questions for help/support are posted so that we can improve our efficiency in responding to your requests, and make it easier for you to find answers to frequently asked questions and how to address common use cases.
+ >
+ > We regularly see messages posted in multiple forums, with the full response thread only in one place or, worse, spread across multiple forums. Also, the large volume of support issues on GitHub is making it difficult for us to use issues to identify real bugs.
+ >
+ > Members of the etcd community use Discussion Forums to field support requests. Before posting a new question, please search these for answers to similar questions, and also familiarize yourself with:
+ >
+ > 1. [user documentation](https://etcd.io/docs/latest)
+ > 2. [frequently asked questions](https://etcd.io/docs/v3.5/faq)
+ >
+ > Again, thanks for using etcd and raising this question.
+ >
+ > The etcd team
+
+ 3. Finally, click `Convert to discussion` on the right-hand panel, selecting the appropriate discussion category.
+
+### Bug reports
+
+If an issue has been raised as a bug it should already have the `type/bug` label, however, if this is missing for an issue you determine to be a bug please add the label manually.
+
+The next step is to validate if the issue is indeed a bug. If not, add a comment with the findings and close the trivial issue. For non-trivial issues, wait to hear back from the issue reporter and see if there is any objection. If the issue reporter does not reply in 30 days, close the issue.
+
+If the problem can not be reproduced or requires more information, leave a comment for the issue reporter as soon as possible while the issue is fresh for the issue reporter.
+
+### Feature requests
+
+New feature requests should be created via the etcd feature request template and in theory already have the `type/feature` label, however, if this is missing for an issue you determine to be a feature please add the label manually.
+
+### Test flakes
+
+Test flakes are a specific type of bug that the etcd project tracks separately as these are a priority to address. These should be created via the test flake template and in theory already have the `type/flake` label, however, if this is missing for an issue you determine to be related to a flaking test please add the label manually.
+
+## Step 4 - Define the areas impacted
+
+Adding an `area` label to an issue helps create visibility on which areas of the etcd project require attention and helps contributors find issues to work on relating to their particular skills or knowledge of the etcd codebase.
+
+If an issue crosses multiple domains please add additional `area` labels to reflect that.
+
+Below is a brief summary of the area labels in active use by the etcd project along with any notes on their use:
+
+| Label | Notes |
+| --- | --- |
+| area/external | Tracking label for issues raised that are external to etcd. |
+| area/community | |
+| area/raft | |
+| area/clientv3 | |
+| area/performance | |
+| area/security | |
+| area/tls | |
+| area/auth | |
+| area/etcdctl | |
+| area/etcdutl | |
+| area/contrib | Not to be confused with `area/community` this label is specifically used for issues relating to community-maintained scripts or files in the `contrib/` directory which aren't part of the core etcd project. |
+| area/documentation | |
+| area/tooling | Generally used in relation to the third party / external utilities or tools that are used in various stages of the etcd build, test, or release process, for example, tooling to create sboms. |
+| area/testing | |
+| area/robustness-testing | |
+
+## Step 5 - Prioritise the issue
+
+If an issue lacks a priority label it has not been formally prioritized yet.
+
+Adding a `priority` label helps the etcd project understand what is important and should be worked on now, and conversely, what is not as important and is on the project backlog.
+
+|Priority label|What it means|Examples|
+|---|---|---|
+| `priority/critical-urgent` | Maintainers are responsible for making sure that these issues (in their area) are being actively worked onâi.e., drop what you're doing. The stuff is burning. These should be fixed before the next release. | user-visible critical bugs in core features broken builds on tier1 supported platforms tests and critical security issues |
+| `priority/important-soon` | Must be staffed and worked on either currently or very soonâideally in time for the next release. | |
+| `priority/important-longterm` | Important over the long term, but may not be currently staffed and/or may require multiple releases to complete. | |
+| `priority/backlog` | General agreement that this is a nice-to-have, but no one's available to work on it anytime soon. Community contributions would be most welcome in the meantime, though it might take a while to get them reviewed if reviewers are fully occupied with higher-priority issuesâfor example, immediately before a release.| |
+| `priority/awaiting-more-evidence` | Possibly useful, but not yet enough support to actually get it done. | Mostly placeholders for potentially good ideas, so that they don't get completely forgotten, and can be referenced or deduped every time they come up |
+
+## Step 6 - Support new contributors
+
+As part of the `etcd` triage process once the `kind` and `area` have been determined, please consider if the issue would be suitable for a less experienced contributor. The `good first issue` label is a subset of the `help wanted` label, indicating that members have committed to providing extra assistance for new contributors. All `good first issue` items also have the `help wanted` label.
+
+### Help wanted
+
+Items marked with the `help wanted` label need to ensure that they meet these criteria:
+
+* **Low Barrier to Entry** - It should be easy for new contributors.
+
+* **Clear** - The task is agreed upon and does not require further discussions in the community.
+
+* **Goldilocks priority** - The priority should not be so high that a core contributor should do it, but not too low that it isnât useful enough for a core contributor to spend time reviewing it, answering questions, helping get it into a release, etc.
+
+### Good first issue
+
+Items marked with `good first issue` are intended for first-time contributors. It indicates that members will keep an eye out for these pull requests and shepherd it through our processes.
+
+New contributors should not be left to find an approver, ping for reviews, decipher test commands, or identify that their build failed due to a flake. It is important to make new contributors feel welcome and valued. We should assure them that they will have an extra level of help with their first contribution.
+
+After a contributor has successfully completed one or two `good first issue` items, they should be ready to move on to `help wanted` items.
+
+* **No Barrier to Entry** - The task is something that a new contributor can tackle without advanced setup or domain knowledge.
+
+* **Solution Explained** - The recommended solution is clearly described in the issue.
+
+* **Gives Examples** - Link to examples of similar implementations so new contributors have a reference guide for their changes.
+
+* **Identifies Relevant Code** - The relevant code and tests to be changed should be linked in the issue.
+
+* **Ready to Test** - There should be existing tests that can be modified, or existing test cases fit to be copied. If the area of code doesnât have tests, before labeling the issue, add a test fixture. This prep often makes a great help wanted task!
+
+## Step 7 - Follow up
+
+Once initial triage has been completed, issues need to be re-evaluated over time to ensure they don't become stale incorrectly.
+
+### Track important issues
+
+If an issue is at risk of being closed by the stale bot in the future, but is an important issue for the etcd project, then please apply the `stage/tracked` label and remove any `stale` labels that exist. This will ensure the project does not lose sight of the issue.
+
+### Close incomplete issues
+
+Issues that lack enough information from the issue reporter should be closed if the issue reporter does not provide information in 30 days. Issues can always be re-opened at a later date if new information is provided.
+
+### Check for incomplete work
+
+If an issue owned by a developer has no pull request created in 30 days, contact the issue owner and kindly ask about the status of their work, or to release ownership on the issue if needed.
diff --git a/Documentation/contributor-guide/triage_prs.md b/Documentation/contributor-guide/triage_prs.md
new file mode 100644
index 00000000000..ffa0f7a1d8d
--- /dev/null
+++ b/Documentation/contributor-guide/triage_prs.md
@@ -0,0 +1,32 @@
+# PR management
+
+## Purpose
+
+Speed up PR management.
+
+The `etcd` PRs are listed at https://github.com/etcd-io/etcd/pulls
+A PR can have various labels, milestones, reviewers, etc. The detailed list of labels can be found at
+https://github.com/kubernetes/kubernetes/labels
+
+Following are a few example searches on PR for convenience:
+* [Open PRS for milestone etcd-v3.6](https://github.com/etcd-io/etcd/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+milestone%3Aetcd-v3.6)
+* [PRs under investigation](https://github.com/etcd-io/etcd/labels/Investigating)
+
+## Scope
+
+These guidelines serve as a primary document for managing PRs in `etcd`. Everyone is welcome to help manage PRs but the work and responsibilities discussed in this document are created with `etcd` maintainers and active contributors in mind.
+
+## Ensure tests are run
+
+The etcd project use Kubernetes Prow and GitHub Actions to run tests. To ensure all required tests run if a pull request is ready for testing and still has the `needs-ok-to-test` label then please comment on the pull request `/ok-to-test`.
+
+## Handle inactive PRs
+Poke PR owner if review comments are not addressed in 15 days. If the PR owner does not reply in 90 days, update the PR with a new commit if possible. If not, inactive PR should be closed after 180 days.
+
+## Poke reviewer if needed
+
+Reviewers are responsive in a timely fashion, but considering everyone is busy, give them some time after requesting a review if a quick response is not provided. If the response is not provided in 10 days, feel free to contact them via adding a comment in the PR or sending an email or message on Slack.
+
+## Verify important labels are in place
+
+Make sure that appropriate reviewers are added to the PR. Also, make sure that a milestone is identified. If any of these or other important labels are missing, add them. If a correct label cannot be decided, leave a comment for the maintainers to do so as needed.
diff --git a/Documentation/dev-guide/apispec/swagger/rpc.swagger.json b/Documentation/dev-guide/apispec/swagger/rpc.swagger.json
index b873790f380..b2e0a00b270 100644
--- a/Documentation/dev-guide/apispec/swagger/rpc.swagger.json
+++ b/Documentation/dev-guide/apispec/swagger/rpc.swagger.json
@@ -1,33 +1,40 @@
{
+ "swagger": "2.0",
+ "info": {
+ "title": "api/etcdserverpb/rpc.proto",
+ "version": "version not set"
+ },
+ "tags": [
+ {
+ "name": "KV"
+ },
+ {
+ "name": "Watch"
+ },
+ {
+ "name": "Lease"
+ },
+ {
+ "name": "Cluster"
+ },
+ {
+ "name": "Maintenance"
+ },
+ {
+ "name": "Auth"
+ }
+ ],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
- "swagger": "2.0",
- "info": {
- "title": "api/etcdserverpb/rpc.proto",
- "version": "version not set"
- },
"paths": {
"/v3/auth/authenticate": {
"post": {
- "tags": [
- "Auth"
- ],
"summary": "Authenticate processes an authenticate request.",
"operationId": "Auth_Authenticate",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthenticateRequest"
- }
- }
- ],
"responses": {
"200": {
"description": "A successful response.",
@@ -36,31 +43,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/disable": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "AuthDisable disables authentication.",
- "operationId": "Auth_AuthDisable",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthDisableRequest"
+ "$ref": "#/definitions/etcdserverpbAuthenticateRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/disable": {
+ "post": {
+ "summary": "AuthDisable disables authentication.",
+ "operationId": "Auth_AuthDisable",
"responses": {
"200": {
"description": "A successful response.",
@@ -69,31 +76,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/enable": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "AuthEnable enables authentication.",
- "operationId": "Auth_AuthEnable",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthEnableRequest"
+ "$ref": "#/definitions/etcdserverpbAuthDisableRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/enable": {
+ "post": {
+ "summary": "AuthEnable enables authentication.",
+ "operationId": "Auth_AuthEnable",
"responses": {
"200": {
"description": "A successful response.",
@@ -102,31 +109,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/role/add": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleAdd adds a new role. Role name cannot be empty.",
- "operationId": "Auth_RoleAdd",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleAddRequest"
+ "$ref": "#/definitions/etcdserverpbAuthEnableRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/role/add": {
+ "post": {
+ "summary": "RoleAdd adds a new role. Role name cannot be empty.",
+ "operationId": "Auth_RoleAdd",
"responses": {
"200": {
"description": "A successful response.",
@@ -135,31 +142,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/role/delete": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleDelete deletes a specified role.",
- "operationId": "Auth_RoleDelete",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleDeleteRequest"
+ "$ref": "#/definitions/etcdserverpbAuthRoleAddRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/role/delete": {
+ "post": {
+ "summary": "RoleDelete deletes a specified role.",
+ "operationId": "Auth_RoleDelete",
"responses": {
"200": {
"description": "A successful response.",
@@ -168,31 +175,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/role/get": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleGet gets detailed role information.",
- "operationId": "Auth_RoleGet",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleGetRequest"
+ "$ref": "#/definitions/etcdserverpbAuthRoleDeleteRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/role/get": {
+ "post": {
+ "summary": "RoleGet gets detailed role information.",
+ "operationId": "Auth_RoleGet",
"responses": {
"200": {
"description": "A successful response.",
@@ -201,31 +208,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/role/grant": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleGrantPermission grants a permission of a specified key or range to a specified role.",
- "operationId": "Auth_RoleGrantPermission",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionRequest"
+ "$ref": "#/definitions/etcdserverpbAuthRoleGetRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/role/grant": {
+ "post": {
+ "summary": "RoleGrantPermission grants a permission of a specified key or range to a specified role.",
+ "operationId": "Auth_RoleGrantPermission",
"responses": {
"200": {
"description": "A successful response.",
@@ -234,31 +241,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/role/list": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleList gets lists of all roles.",
- "operationId": "Auth_RoleList",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleListRequest"
+ "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/role/list": {
+ "post": {
+ "summary": "RoleList gets lists of all roles.",
+ "operationId": "Auth_RoleList",
"responses": {
"200": {
"description": "A successful response.",
@@ -267,31 +274,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/role/revoke": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleRevokePermission revokes a key or range permission of a specified role.",
- "operationId": "Auth_RoleRevokePermission",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionRequest"
+ "$ref": "#/definitions/etcdserverpbAuthRoleListRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/role/revoke": {
+ "post": {
+ "summary": "RoleRevokePermission revokes a key or range permission of a specified role.",
+ "operationId": "Auth_RoleRevokePermission",
"responses": {
"200": {
"description": "A successful response.",
@@ -300,31 +307,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/status": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "AuthStatus displays authentication status.",
- "operationId": "Auth_AuthStatus",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthStatusRequest"
+ "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/status": {
+ "post": {
+ "summary": "AuthStatus displays authentication status.",
+ "operationId": "Auth_AuthStatus",
"responses": {
"200": {
"description": "A successful response.",
@@ -333,31 +340,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/user/add": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserAdd adds a new user. User name cannot be empty.",
- "operationId": "Auth_UserAdd",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserAddRequest"
+ "$ref": "#/definitions/etcdserverpbAuthStatusRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/user/add": {
+ "post": {
+ "summary": "UserAdd adds a new user. User name cannot be empty.",
+ "operationId": "Auth_UserAdd",
"responses": {
"200": {
"description": "A successful response.",
@@ -366,31 +373,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/user/changepw": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserChangePassword changes the password of a specified user.",
- "operationId": "Auth_UserChangePassword",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordRequest"
+ "$ref": "#/definitions/etcdserverpbAuthUserAddRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/user/changepw": {
+ "post": {
+ "summary": "UserChangePassword changes the password of a specified user.",
+ "operationId": "Auth_UserChangePassword",
"responses": {
"200": {
"description": "A successful response.",
@@ -399,31 +406,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/user/delete": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserDelete deletes a specified user.",
- "operationId": "Auth_UserDelete",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserDeleteRequest"
+ "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/user/delete": {
+ "post": {
+ "summary": "UserDelete deletes a specified user.",
+ "operationId": "Auth_UserDelete",
"responses": {
"200": {
"description": "A successful response.",
@@ -432,31 +439,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/user/get": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserGet gets detailed user information.",
- "operationId": "Auth_UserGet",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserGetRequest"
+ "$ref": "#/definitions/etcdserverpbAuthUserDeleteRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/user/get": {
+ "post": {
+ "summary": "UserGet gets detailed user information.",
+ "operationId": "Auth_UserGet",
"responses": {
"200": {
"description": "A successful response.",
@@ -465,31 +472,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/user/grant": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserGrant grants a role to a specified user.",
- "operationId": "Auth_UserGrantRole",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleRequest"
+ "$ref": "#/definitions/etcdserverpbAuthUserGetRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/user/grant": {
+ "post": {
+ "summary": "UserGrant grants a role to a specified user.",
+ "operationId": "Auth_UserGrantRole",
"responses": {
"200": {
"description": "A successful response.",
@@ -498,31 +505,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/user/list": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserList gets a list of all users.",
- "operationId": "Auth_UserList",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserListRequest"
+ "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/user/list": {
+ "post": {
+ "summary": "UserList gets a list of all users.",
+ "operationId": "Auth_UserList",
"responses": {
"200": {
"description": "A successful response.",
@@ -531,31 +538,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/auth/user/revoke": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserRevokeRole revokes a role of specified user.",
- "operationId": "Auth_UserRevokeRole",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleRequest"
+ "$ref": "#/definitions/etcdserverpbAuthUserListRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/auth/user/revoke": {
+ "post": {
+ "summary": "UserRevokeRole revokes a role of specified user.",
+ "operationId": "Auth_UserRevokeRole",
"responses": {
"200": {
"description": "A successful response.",
@@ -564,31 +571,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/cluster/member/add": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberAdd adds a member into the cluster.",
- "operationId": "Cluster_MemberAdd",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbMemberAddRequest"
+ "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleRequest"
}
}
],
+ "tags": [
+ "Auth"
+ ]
+ }
+ },
+ "/v3/cluster/member/add": {
+ "post": {
+ "summary": "MemberAdd adds a member into the cluster.",
+ "operationId": "Cluster_MemberAdd",
"responses": {
"200": {
"description": "A successful response.",
@@ -597,31 +604,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/cluster/member/list": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberList lists all the members in the cluster.",
- "operationId": "Cluster_MemberList",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbMemberListRequest"
+ "$ref": "#/definitions/etcdserverpbMemberAddRequest"
}
}
],
+ "tags": [
+ "Cluster"
+ ]
+ }
+ },
+ "/v3/cluster/member/list": {
+ "post": {
+ "summary": "MemberList lists all the members in the cluster.",
+ "operationId": "Cluster_MemberList",
"responses": {
"200": {
"description": "A successful response.",
@@ -630,31 +637,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/cluster/member/promote": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberPromote promotes a member from raft learner (non-voting) to raft voting member.",
- "operationId": "Cluster_MemberPromote",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbMemberPromoteRequest"
+ "$ref": "#/definitions/etcdserverpbMemberListRequest"
}
}
],
+ "tags": [
+ "Cluster"
+ ]
+ }
+ },
+ "/v3/cluster/member/promote": {
+ "post": {
+ "summary": "MemberPromote promotes a member from raft learner (non-voting) to raft voting member.",
+ "operationId": "Cluster_MemberPromote",
"responses": {
"200": {
"description": "A successful response.",
@@ -663,31 +670,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/cluster/member/remove": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberRemove removes an existing member from the cluster.",
- "operationId": "Cluster_MemberRemove",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbMemberRemoveRequest"
+ "$ref": "#/definitions/etcdserverpbMemberPromoteRequest"
}
}
],
+ "tags": [
+ "Cluster"
+ ]
+ }
+ },
+ "/v3/cluster/member/remove": {
+ "post": {
+ "summary": "MemberRemove removes an existing member from the cluster.",
+ "operationId": "Cluster_MemberRemove",
"responses": {
"200": {
"description": "A successful response.",
@@ -696,31 +703,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/cluster/member/update": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberUpdate updates the member configuration.",
- "operationId": "Cluster_MemberUpdate",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbMemberUpdateRequest"
+ "$ref": "#/definitions/etcdserverpbMemberRemoveRequest"
}
}
],
+ "tags": [
+ "Cluster"
+ ]
+ }
+ },
+ "/v3/cluster/member/update": {
+ "post": {
+ "summary": "MemberUpdate updates the member configuration.",
+ "operationId": "Cluster_MemberUpdate",
"responses": {
"200": {
"description": "A successful response.",
@@ -729,31 +736,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/compaction": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Compact compacts the event history in the etcd key-value store. The key-value\nstore should be periodically compacted or the event history will continue to grow\nindefinitely.",
- "operationId": "KV_Compact",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbCompactionRequest"
+ "$ref": "#/definitions/etcdserverpbMemberUpdateRequest"
}
}
],
+ "tags": [
+ "Cluster"
+ ]
+ }
+ },
+ "/v3/kv/compaction": {
+ "post": {
+ "summary": "Compact compacts the event history in the etcd key-value store. The key-value\nstore should be periodically compacted or the event history will continue to grow\nindefinitely.",
+ "operationId": "KV_Compact",
"responses": {
"200": {
"description": "A successful response.",
@@ -762,31 +769,32 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/deleterange": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "DeleteRange deletes the given range from the key-value store.\nA delete request increments the revision of the key-value store\nand generates a delete event in the event history for every deleted key.",
- "operationId": "KV_DeleteRange",
+ },
"parameters": [
{
"name": "body",
+ "description": "CompactionRequest compacts the key-value store up to a given revision. All superseded keys\nwith a revision less than the compaction revision will be removed.",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbDeleteRangeRequest"
+ "$ref": "#/definitions/etcdserverpbCompactionRequest"
}
}
],
+ "tags": [
+ "KV"
+ ]
+ }
+ },
+ "/v3/kv/deleterange": {
+ "post": {
+ "summary": "DeleteRange deletes the given range from the key-value store.\nA delete request increments the revision of the key-value store\nand generates a delete event in the event history for every deleted key.",
+ "operationId": "KV_DeleteRange",
"responses": {
"200": {
"description": "A successful response.",
@@ -795,31 +803,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/lease/leases": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseLeases lists all existing leases.",
- "operationId": "Lease_LeaseLeases2",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest"
+ "$ref": "#/definitions/etcdserverpbDeleteRangeRequest"
}
}
],
+ "tags": [
+ "KV"
+ ]
+ }
+ },
+ "/v3/kv/lease/leases": {
+ "post": {
+ "summary": "LeaseLeases lists all existing leases.",
+ "operationId": "Lease_LeaseLeases2",
"responses": {
"200": {
"description": "A successful response.",
@@ -828,31 +836,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/lease/revoke": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.",
- "operationId": "Lease_LeaseRevoke2",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseRevokeRequest"
+ "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest"
}
}
],
+ "tags": [
+ "Lease"
+ ]
+ }
+ },
+ "/v3/kv/lease/revoke": {
+ "post": {
+ "summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.",
+ "operationId": "Lease_LeaseRevoke2",
"responses": {
"200": {
"description": "A successful response.",
@@ -861,31 +869,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/lease/timetolive": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseTimeToLive retrieves lease information.",
- "operationId": "Lease_LeaseTimeToLive2",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveRequest"
+ "$ref": "#/definitions/etcdserverpbLeaseRevokeRequest"
}
}
],
+ "tags": [
+ "Lease"
+ ]
+ }
+ },
+ "/v3/kv/lease/timetolive": {
+ "post": {
+ "summary": "LeaseTimeToLive retrieves lease information.",
+ "operationId": "Lease_LeaseTimeToLive2",
"responses": {
"200": {
"description": "A successful response.",
@@ -894,31 +902,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/put": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Put puts the given key into the key-value store.\nA put request increments the revision of the key-value store\nand generates one event in the event history.",
- "operationId": "KV_Put",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbPutRequest"
+ "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveRequest"
}
}
],
+ "tags": [
+ "Lease"
+ ]
+ }
+ },
+ "/v3/kv/put": {
+ "post": {
+ "summary": "Put puts the given key into the key-value store.\nA put request increments the revision of the key-value store\nand generates one event in the event history.",
+ "operationId": "KV_Put",
"responses": {
"200": {
"description": "A successful response.",
@@ -927,31 +935,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/range": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Range gets the keys in the range from the key-value store.",
- "operationId": "KV_Range",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbRangeRequest"
+ "$ref": "#/definitions/etcdserverpbPutRequest"
}
}
],
+ "tags": [
+ "KV"
+ ]
+ }
+ },
+ "/v3/kv/range": {
+ "post": {
+ "summary": "Range gets the keys in the range from the key-value store.",
+ "operationId": "KV_Range",
"responses": {
"200": {
"description": "A successful response.",
@@ -960,31 +968,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/kv/txn": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Txn processes multiple requests in a single transaction.\nA txn request increments the revision of the key-value store\nand generates events with the same revision for every completed request.\nIt is not allowed to modify the same key several times within one txn.",
- "operationId": "KV_Txn",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbTxnRequest"
+ "$ref": "#/definitions/etcdserverpbRangeRequest"
}
}
],
+ "tags": [
+ "KV"
+ ]
+ }
+ },
+ "/v3/kv/txn": {
+ "post": {
+ "summary": "Txn processes multiple requests in a single transaction.\nA txn request increments the revision of the key-value store\nand generates events with the same revision for every completed request.\nIt is not allowed to modify the same key several times within one txn.",
+ "operationId": "KV_Txn",
"responses": {
"200": {
"description": "A successful response.",
@@ -993,31 +1001,32 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/lease/grant": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseGrant creates a lease which expires if the server does not receive a keepAlive\nwithin a given time to live period. All keys attached to the lease will be expired and\ndeleted if the lease expires. Each expired key generates a delete event in the event history.",
- "operationId": "Lease_LeaseGrant",
+ },
"parameters": [
{
"name": "body",
+ "description": "From google paxosdb paper:\nOur implementation hinges around a powerful primitive which we call MultiOp. All other database\noperations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically\nand consists of three components:\n1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check\nfor the absence or presence of a value, or compare with a given value. Two different tests in the guard\nmay apply to the same or different entries in the database. All tests in the guard are applied and\nMultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise\nit executes f op (see item 3 below).\n2. A list of database operations called t op. Each operation in the list is either an insert, delete, or\nlookup operation, and applies to a single database entry. Two different operations in the list may apply\nto the same or different entries in the database. These operations are executed\nif guard evaluates to\ntrue.\n3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseGrantRequest"
+ "$ref": "#/definitions/etcdserverpbTxnRequest"
}
}
],
+ "tags": [
+ "KV"
+ ]
+ }
+ },
+ "/v3/lease/grant": {
+ "post": {
+ "summary": "LeaseGrant creates a lease which expires if the server does not receive a keepAlive\nwithin a given time to live period. All keys attached to the lease will be expired and\ndeleted if the lease expires. Each expired key generates a delete event in the event history.",
+ "operationId": "Lease_LeaseGrant",
"responses": {
"200": {
"description": "A successful response.",
@@ -1026,74 +1035,74 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/lease/keepalive": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client\nto the server and streaming keep alive responses from the server to the client.",
- "operationId": "Lease_LeaseKeepAlive",
+ },
"parameters": [
{
- "description": " (streaming inputs)",
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseKeepAliveRequest"
+ "$ref": "#/definitions/etcdserverpbLeaseGrantRequest"
}
}
],
+ "tags": [
+ "Lease"
+ ]
+ }
+ },
+ "/v3/lease/keepalive": {
+ "post": {
+ "summary": "LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client\nto the server and streaming keep alive responses from the server to the client.",
+ "operationId": "Lease_LeaseKeepAlive",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
- "title": "Stream result of etcdserverpbLeaseKeepAliveResponse",
"properties": {
- "error": {
- "$ref": "#/definitions/runtimeStreamError"
- },
"result": {
"$ref": "#/definitions/etcdserverpbLeaseKeepAliveResponse"
+ },
+ "error": {
+ "$ref": "#/definitions/googlerpcStatus"
}
- }
+ },
+ "title": "Stream result of etcdserverpbLeaseKeepAliveResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/lease/leases": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseLeases lists all existing leases.",
- "operationId": "Lease_LeaseLeases",
+ },
"parameters": [
{
"name": "body",
+ "description": " (streaming inputs)",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest"
+ "$ref": "#/definitions/etcdserverpbLeaseKeepAliveRequest"
}
}
],
+ "tags": [
+ "Lease"
+ ]
+ }
+ },
+ "/v3/lease/leases": {
+ "post": {
+ "summary": "LeaseLeases lists all existing leases.",
+ "operationId": "Lease_LeaseLeases",
"responses": {
"200": {
"description": "A successful response.",
@@ -1102,21 +1111,45 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
+ },
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest"
+ }
+ }
+ ],
+ "tags": [
+ "Lease"
+ ]
}
},
"/v3/lease/revoke": {
"post": {
- "tags": [
- "Lease"
- ],
"summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.",
"operationId": "Lease_LeaseRevoke",
+ "responses": {
+ "200": {
+ "description": "A successful response.",
+ "schema": {
+ "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse"
+ }
+ },
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/googlerpcStatus"
+ }
+ }
+ },
"parameters": [
{
"name": "body",
@@ -1127,29 +1160,29 @@
}
}
],
+ "tags": [
+ "Lease"
+ ]
+ }
+ },
+ "/v3/lease/timetolive": {
+ "post": {
+ "summary": "LeaseTimeToLive retrieves lease information.",
+ "operationId": "Lease_LeaseTimeToLive",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse"
+ "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/lease/timetolive": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseTimeToLive retrieves lease information.",
- "operationId": "Lease_LeaseTimeToLive",
+ },
"parameters": [
{
"name": "body",
@@ -1160,29 +1193,29 @@
}
}
],
+ "tags": [
+ "Lease"
+ ]
+ }
+ },
+ "/v3/maintenance/alarm": {
+ "post": {
+ "summary": "Alarm activates, deactivates, and queries alarms regarding cluster health.",
+ "operationId": "Maintenance_Alarm",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse"
+ "$ref": "#/definitions/etcdserverpbAlarmResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/maintenance/alarm": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Alarm activates, deactivates, and queries alarms regarding cluster health.",
- "operationId": "Maintenance_Alarm",
+ },
"parameters": [
{
"name": "body",
@@ -1193,29 +1226,29 @@
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/maintenance/defragment": {
+ "post": {
+ "summary": "Defragment defragments a member's backend database to recover storage space.",
+ "operationId": "Maintenance_Defragment",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/etcdserverpbAlarmResponse"
+ "$ref": "#/definitions/etcdserverpbDefragmentResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/maintenance/defragment": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Defragment defragments a member's backend database to recover storage space.",
- "operationId": "Maintenance_Defragment",
+ },
"parameters": [
{
"name": "body",
@@ -1226,29 +1259,29 @@
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/maintenance/downgrade": {
+ "post": {
+ "summary": "Downgrade requests downgrades, verifies feasibility or cancels downgrade\non the cluster version.\nSupported since etcd 3.5.",
+ "operationId": "Maintenance_Downgrade",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/etcdserverpbDefragmentResponse"
+ "$ref": "#/definitions/etcdserverpbDowngradeResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/maintenance/downgrade": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Downgrade requests downgrades, verifies feasibility or cancels downgrade\non the cluster version.\nSupported since etcd 3.5.",
- "operationId": "Maintenance_Downgrade",
+ },
"parameters": [
{
"name": "body",
@@ -1259,39 +1292,48 @@
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/maintenance/hash": {
+ "post": {
+ "summary": "Hash computes the hash of whole backend keyspace,\nincluding key, lease, and other buckets in storage.\nThis is designed for testing ONLY!\nDo not rely on this in production with ongoing transactions,\nsince Hash operation does not hold MVCC locks.\nUse \"HashKV\" API instead for \"key\" bucket consistency checks.",
+ "operationId": "Maintenance_Hash",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/etcdserverpbDowngradeResponse"
+ "$ref": "#/definitions/etcdserverpbHashResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/maintenance/hash": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "HashKV computes the hash of all MVCC keys up to a given revision.\nIt only iterates \"key\" bucket in backend storage.",
- "operationId": "Maintenance_HashKV",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbHashKVRequest"
+ "$ref": "#/definitions/etcdserverpbHashRequest"
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/maintenance/hashkv": {
+ "post": {
+ "summary": "HashKV computes the hash of all MVCC keys up to a given revision.\nIt only iterates \"key\" bucket in backend storage.",
+ "operationId": "Maintenance_HashKV",
"responses": {
"200": {
"description": "A successful response.",
@@ -1300,73 +1342,73 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/maintenance/snapshot": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Snapshot sends a snapshot of the entire backend from a member over a stream to a client.",
- "operationId": "Maintenance_Snapshot",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbSnapshotRequest"
+ "$ref": "#/definitions/etcdserverpbHashKVRequest"
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/maintenance/snapshot": {
+ "post": {
+ "summary": "Snapshot sends a snapshot of the entire backend from a member over a stream to a client.",
+ "operationId": "Maintenance_Snapshot",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
- "title": "Stream result of etcdserverpbSnapshotResponse",
"properties": {
- "error": {
- "$ref": "#/definitions/runtimeStreamError"
- },
"result": {
"$ref": "#/definitions/etcdserverpbSnapshotResponse"
+ },
+ "error": {
+ "$ref": "#/definitions/googlerpcStatus"
}
- }
+ },
+ "title": "Stream result of etcdserverpbSnapshotResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/maintenance/status": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Status gets the status of the member.",
- "operationId": "Maintenance_Status",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbStatusRequest"
+ "$ref": "#/definitions/etcdserverpbSnapshotRequest"
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/maintenance/status": {
+ "post": {
+ "summary": "Status gets the status of the member.",
+ "operationId": "Maintenance_Status",
"responses": {
"200": {
"description": "A successful response.",
@@ -1375,31 +1417,31 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/maintenance/transfer-leadership": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "MoveLeader requests current leader node to transfer its leadership to transferee.",
- "operationId": "Maintenance_MoveLeader",
+ },
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbMoveLeaderRequest"
+ "$ref": "#/definitions/etcdserverpbStatusRequest"
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/maintenance/transfer-leadership": {
+ "post": {
+ "summary": "MoveLeader requests current leader node to transfer its leadership to transferee.",
+ "operationId": "Maintenance_MoveLeader",
"responses": {
"200": {
"description": "A successful response.",
@@ -1408,181 +1450,194 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
- }
- },
- "/v3/watch": {
- "post": {
- "tags": [
- "Watch"
- ],
- "summary": "Watch watches for events happening or that have happened. Both input and output\nare streams; the input stream is for creating and canceling watchers and the output\nstream sends events. One watch RPC can watch on multiple key ranges, streaming events\nfor several watches at once. The entire event history can be watched starting from the\nlast compaction revision.",
- "operationId": "Watch_Watch",
+ },
"parameters": [
{
- "description": " (streaming inputs)",
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/etcdserverpbWatchRequest"
+ "$ref": "#/definitions/etcdserverpbMoveLeaderRequest"
}
}
],
+ "tags": [
+ "Maintenance"
+ ]
+ }
+ },
+ "/v3/watch": {
+ "post": {
+ "summary": "Watch watches for events happening or that have happened. Both input and output\nare streams; the input stream is for creating and canceling watchers and the output\nstream sends events. One watch RPC can watch on multiple key ranges, streaming events\nfor several watches at once. The entire event history can be watched starting from the\nlast compaction revision.",
+ "operationId": "Watch_Watch",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
- "title": "Stream result of etcdserverpbWatchResponse",
"properties": {
- "error": {
- "$ref": "#/definitions/runtimeStreamError"
- },
"result": {
"$ref": "#/definitions/etcdserverpbWatchResponse"
+ },
+ "error": {
+ "$ref": "#/definitions/googlerpcStatus"
}
- }
+ },
+ "title": "Stream result of etcdserverpbWatchResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/googlerpcStatus"
}
}
- }
+ },
+ "parameters": [
+ {
+ "name": "body",
+ "description": " (streaming inputs)",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/etcdserverpbWatchRequest"
+ }
+ }
+ ],
+ "tags": [
+ "Watch"
+ ]
}
}
},
"definitions": {
"AlarmRequestAlarmAction": {
"type": "string",
- "default": "GET",
"enum": [
"GET",
"ACTIVATE",
"DEACTIVATE"
- ]
+ ],
+ "default": "GET"
},
"CompareCompareResult": {
"type": "string",
- "default": "EQUAL",
"enum": [
"EQUAL",
"GREATER",
"LESS",
"NOT_EQUAL"
- ]
+ ],
+ "default": "EQUAL"
},
"CompareCompareTarget": {
"type": "string",
- "default": "VERSION",
"enum": [
"VERSION",
"CREATE",
"MOD",
"VALUE",
"LEASE"
- ]
+ ],
+ "default": "VERSION"
},
"DowngradeRequestDowngradeAction": {
"type": "string",
- "default": "VALIDATE",
"enum": [
"VALIDATE",
"ENABLE",
"CANCEL"
- ]
+ ],
+ "default": "VALIDATE"
},
"EventEventType": {
"type": "string",
- "default": "PUT",
"enum": [
"PUT",
"DELETE"
- ]
+ ],
+ "default": "PUT"
},
"RangeRequestSortOrder": {
"type": "string",
- "default": "NONE",
"enum": [
"NONE",
"ASCEND",
"DESCEND"
- ]
+ ],
+ "default": "NONE",
+ "title": "- NONE: default, no sorting\n - ASCEND: lowest target value first\n - DESCEND: highest target value first"
},
"RangeRequestSortTarget": {
"type": "string",
- "default": "KEY",
"enum": [
"KEY",
"VERSION",
"CREATE",
"MOD",
"VALUE"
- ]
+ ],
+ "default": "KEY"
},
"WatchCreateRequestFilterType": {
- "description": " - NOPUT: filter out put event.\n - NODELETE: filter out delete event.",
"type": "string",
- "default": "NOPUT",
"enum": [
"NOPUT",
"NODELETE"
- ]
+ ],
+ "default": "NOPUT",
+ "description": " - NOPUT: filter out put event.\n - NODELETE: filter out delete event."
},
"authpbPermission": {
"type": "object",
- "title": "Permission is a single entity",
"properties": {
+ "permType": {
+ "$ref": "#/definitions/authpbPermissionType"
+ },
"key": {
"type": "string",
"format": "byte"
},
- "permType": {
- "$ref": "#/definitions/authpbPermissionType"
- },
"range_end": {
"type": "string",
"format": "byte"
}
- }
+ },
+ "title": "Permission is a single entity"
},
"authpbPermissionType": {
"type": "string",
- "default": "READ",
"enum": [
"READ",
"WRITE",
"READWRITE"
- ]
+ ],
+ "default": "READ"
},
"authpbUserAddOptions": {
"type": "object",
"properties": {
"no_password": {
- "type": "boolean",
- "format": "boolean"
+ "type": "boolean"
}
}
},
"etcdserverpbAlarmMember": {
"type": "object",
"properties": {
- "alarm": {
- "description": "alarm is the type of alarm which has been raised.",
- "$ref": "#/definitions/etcdserverpbAlarmType"
- },
"memberID": {
- "description": "memberID is the ID of the member associated with the raised alarm.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "memberID is the ID of the member associated with the raised alarm."
+ },
+ "alarm": {
+ "$ref": "#/definitions/etcdserverpbAlarmType",
+ "description": "alarm is the type of alarm which has been raised."
}
}
},
@@ -1590,43 +1645,45 @@
"type": "object",
"properties": {
"action": {
- "description": "action is the kind of alarm request to issue. The action\nmay GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a\nraised alarm.",
- "$ref": "#/definitions/AlarmRequestAlarmAction"
- },
- "alarm": {
- "description": "alarm is the type of alarm to consider for this request.",
- "$ref": "#/definitions/etcdserverpbAlarmType"
+ "$ref": "#/definitions/AlarmRequestAlarmAction",
+ "description": "action is the kind of alarm request to issue. The action\nmay GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a\nraised alarm."
},
"memberID": {
- "description": "memberID is the ID of the member associated with the alarm. If memberID is 0, the\nalarm request covers all members.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "memberID is the ID of the member associated with the alarm. If memberID is 0, the\nalarm request covers all members."
+ },
+ "alarm": {
+ "$ref": "#/definitions/etcdserverpbAlarmType",
+ "description": "alarm is the type of alarm to consider for this request."
}
}
},
"etcdserverpbAlarmResponse": {
"type": "object",
"properties": {
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
"alarms": {
- "description": "alarms is a list of alarms associated with the alarm request.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbAlarmMember"
- }
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
+ "description": "alarms is a list of alarms associated with the alarm request."
}
}
},
"etcdserverpbAlarmType": {
"type": "string",
- "default": "NONE",
"enum": [
"NONE",
"NOSPACE",
"CORRUPT"
- ]
+ ],
+ "default": "NONE",
+ "title": "- NONE: default, used to query if any alarm is active\n - NOSPACE: space quota is exhausted\n - CORRUPT: kv store corruption detected"
},
"etcdserverpbAuthDisableRequest": {
"type": "object"
@@ -1654,8 +1711,8 @@
"type": "object",
"properties": {
"name": {
- "description": "name is the name of the role to add to the authentication system.",
- "type": "string"
+ "type": "string",
+ "description": "name is the name of the role to add to the authentication system."
}
}
},
@@ -1700,6 +1757,7 @@
"perm": {
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/authpbPermission"
}
}
@@ -1709,12 +1767,12 @@
"type": "object",
"properties": {
"name": {
- "description": "name is the name of the role which will be granted the permission.",
- "type": "string"
+ "type": "string",
+ "description": "name is the name of the role which will be granted the permission."
},
"perm": {
- "description": "perm is the permission to grant to the role.",
- "$ref": "#/definitions/authpbPermission"
+ "$ref": "#/definitions/authpbPermission",
+ "description": "perm is the permission to grant to the role."
}
}
},
@@ -1746,6 +1804,9 @@
"etcdserverpbAuthRoleRevokePermissionRequest": {
"type": "object",
"properties": {
+ "role": {
+ "type": "string"
+ },
"key": {
"type": "string",
"format": "byte"
@@ -1753,9 +1814,6 @@
"range_end": {
"type": "string",
"format": "byte"
- },
- "role": {
- "type": "string"
}
}
},
@@ -1773,33 +1831,32 @@
"etcdserverpbAuthStatusResponse": {
"type": "object",
"properties": {
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
+ "enabled": {
+ "type": "boolean"
+ },
"authRevision": {
"type": "string",
"format": "uint64",
"title": "authRevision is the current revision of auth store"
- },
- "enabled": {
- "type": "boolean",
- "format": "boolean"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
}
}
},
"etcdserverpbAuthUserAddRequest": {
"type": "object",
"properties": {
- "hashedPassword": {
+ "name": {
"type": "string"
},
- "name": {
+ "password": {
"type": "string"
},
"options": {
"$ref": "#/definitions/authpbUserAddOptions"
},
- "password": {
+ "hashedPassword": {
"type": "string"
}
}
@@ -1815,17 +1872,17 @@
"etcdserverpbAuthUserChangePasswordRequest": {
"type": "object",
"properties": {
- "hashedPassword": {
- "description": "hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.",
- "type": "string"
- },
"name": {
- "description": "name is the name of the user whose password is being changed.",
- "type": "string"
+ "type": "string",
+ "description": "name is the name of the user whose password is being changed."
},
"password": {
- "description": "password is the new password for the user. Note that this field will be removed in the API layer.",
- "type": "string"
+ "type": "string",
+ "description": "password is the new password for the user. Note that this field will be removed in the API layer."
+ },
+ "hashedPassword": {
+ "type": "string",
+ "description": "hashedPassword is the new password for the user. Note that this field will be initialized in the API layer."
}
}
},
@@ -1841,8 +1898,8 @@
"type": "object",
"properties": {
"name": {
- "description": "name is the name of the user to delete.",
- "type": "string"
+ "type": "string",
+ "description": "name is the name of the user to delete."
}
}
},
@@ -1879,13 +1936,13 @@
"etcdserverpbAuthUserGrantRoleRequest": {
"type": "object",
"properties": {
- "role": {
- "description": "role is the name of the role to grant to the user.",
- "type": "string"
- },
"user": {
- "description": "user is the name of the user which should be granted a given role.",
- "type": "string"
+ "type": "string",
+ "description": "user is the name of the user which should be granted a given role."
+ },
+ "role": {
+ "type": "string",
+ "description": "role is the name of the role to grant to the user."
}
}
},
@@ -1957,20 +2014,19 @@
}
},
"etcdserverpbCompactionRequest": {
- "description": "CompactionRequest compacts the key-value store up to a given revision. All superseded keys\nwith a revision less than the compaction revision will be removed.",
"type": "object",
"properties": {
- "physical": {
- "description": "physical is set so the RPC will wait until the compaction is physically\napplied to the local database such that compacted entries are totally\nremoved from the backend database.",
- "type": "boolean",
- "format": "boolean"
- },
"revision": {
- "description": "revision is the key-value store revision for the compaction operation.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "revision is the key-value store revision for the compaction operation."
+ },
+ "physical": {
+ "type": "boolean",
+ "description": "physical is set so the RPC will wait until the compaction is physically\napplied to the local database such that compacted entries are totally\nremoved from the backend database."
}
- }
+ },
+ "description": "CompactionRequest compacts the key-value store up to a given revision. All superseded keys\nwith a revision less than the compaction revision will be removed."
},
"etcdserverpbCompactionResponse": {
"type": "object",
@@ -1983,48 +2039,48 @@
"etcdserverpbCompare": {
"type": "object",
"properties": {
- "create_revision": {
- "type": "string",
- "format": "int64",
- "title": "create_revision is the creation revision of the given key"
+ "result": {
+ "$ref": "#/definitions/CompareCompareResult",
+ "description": "result is logical comparison operation for this comparison."
+ },
+ "target": {
+ "$ref": "#/definitions/CompareCompareTarget",
+ "description": "target is the key-value field to inspect for the comparison."
},
"key": {
- "description": "key is the subject key for the comparison operation.",
"type": "string",
- "format": "byte"
+ "format": "byte",
+ "description": "key is the subject key for the comparison operation."
},
- "lease": {
- "description": "lease is the lease id of the given key.",
+ "version": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "title": "version is the version of the given key"
},
- "mod_revision": {
- "description": "mod_revision is the last modified revision of the given key.",
+ "create_revision": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "title": "create_revision is the creation revision of the given key"
},
- "range_end": {
- "description": "range_end compares the given target to all keys in the range [key, range_end).\nSee RangeRequest for more details on key ranges.",
+ "mod_revision": {
"type": "string",
- "format": "byte"
- },
- "result": {
- "description": "result is logical comparison operation for this comparison.",
- "$ref": "#/definitions/CompareCompareResult"
- },
- "target": {
- "description": "target is the key-value field to inspect for the comparison.",
- "$ref": "#/definitions/CompareCompareTarget"
+ "format": "int64",
+ "description": "mod_revision is the last modified revision of the given key."
},
"value": {
- "description": "value is the value of the given key, in bytes.",
"type": "string",
- "format": "byte"
+ "format": "byte",
+ "description": "value is the value of the given key, in bytes."
},
- "version": {
+ "lease": {
"type": "string",
"format": "int64",
- "title": "version is the version of the given key"
+ "description": "lease is the lease id of the given key.\n\nleave room for more target_union field tags, jump to 64"
+ },
+ "range_end": {
+ "type": "string",
+ "format": "byte",
+ "description": "range_end compares the given target to all keys in the range [key, range_end).\nSee RangeRequest for more details on key ranges.\n\nTODO: fill out with most of the rest of RangeRequest fields when needed."
}
}
},
@@ -2043,39 +2099,39 @@
"type": "object",
"properties": {
"key": {
- "description": "key is the first key to delete in the range.",
"type": "string",
- "format": "byte"
- },
- "prev_kv": {
- "description": "If prev_kv is set, etcd gets the previous key-value pairs before deleting it.\nThe previous key-value pairs will be returned in the delete response.",
- "type": "boolean",
- "format": "boolean"
+ "format": "byte",
+ "description": "key is the first key to delete in the range."
},
"range_end": {
- "description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is one bit larger than the given key, then the range is all the keys\nwith the prefix (the given key).\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument.",
"type": "string",
- "format": "byte"
+ "format": "byte",
+ "description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is one bit larger than the given key, then the range is all the keys\nwith the prefix (the given key).\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument."
+ },
+ "prev_kv": {
+ "type": "boolean",
+ "description": "If prev_kv is set, etcd gets the previous key-value pairs before deleting it.\nThe previous key-value pairs will be returned in the delete response."
}
}
},
"etcdserverpbDeleteRangeResponse": {
"type": "object",
"properties": {
- "deleted": {
- "description": "deleted is the number of keys deleted by the delete range request.",
- "type": "string",
- "format": "int64"
- },
"header": {
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
+ "deleted": {
+ "type": "string",
+ "format": "int64",
+ "description": "deleted is the number of keys deleted by the delete range request."
+ },
"prev_kvs": {
- "description": "if prev_kv is set in the request, the previous key-value pairs will be returned.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/mvccpbKeyValue"
- }
+ },
+ "description": "if prev_kv is set in the request, the previous key-value pairs will be returned."
}
}
},
@@ -2083,12 +2139,12 @@
"type": "object",
"properties": {
"action": {
- "description": "action is the kind of downgrade request to issue. The action may\nVALIDATE the target version, DOWNGRADE the cluster version,\nor CANCEL the current downgrading job.",
- "$ref": "#/definitions/DowngradeRequestDowngradeAction"
+ "$ref": "#/definitions/DowngradeRequestDowngradeAction",
+ "description": "action is the kind of downgrade request to issue. The action may\nVALIDATE the target version, DOWNGRADE the cluster version,\nor CANCEL the current downgrading job."
},
"version": {
- "description": "version is the target version to downgrade.",
- "type": "string"
+ "type": "string",
+ "description": "version is the target version to downgrade."
}
}
},
@@ -2099,8 +2155,8 @@
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"version": {
- "description": "version is the current cluster version.",
- "type": "string"
+ "type": "string",
+ "description": "version is the current cluster version."
}
}
},
@@ -2108,27 +2164,32 @@
"type": "object",
"properties": {
"revision": {
- "description": "revision is the key-value store revision for the hash operation.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "revision is the key-value store revision for the hash operation."
}
}
},
"etcdserverpbHashKVResponse": {
"type": "object",
"properties": {
- "compact_revision": {
- "description": "compact_revision is the compacted revision of key-value store when hash begins.",
- "type": "string",
- "format": "int64"
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
},
"hash": {
- "description": "hash is the hash value computed from the responding member's MVCC keys up to a given revision.",
"type": "integer",
- "format": "int64"
+ "format": "int64",
+ "description": "hash is the hash value computed from the responding member's MVCC keys up to a given revision."
},
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
+ "compact_revision": {
+ "type": "string",
+ "format": "int64",
+ "description": "compact_revision is the compacted revision of key-value store when hash begins."
+ },
+ "hash_revision": {
+ "type": "string",
+ "format": "int64",
+ "description": "hash_revision is the revision up to which the hash is calculated."
}
}
},
@@ -2138,49 +2199,49 @@
"etcdserverpbHashResponse": {
"type": "object",
"properties": {
- "hash": {
- "description": "hash is the hash value computed from the responding member's KV's backend.",
- "type": "integer",
- "format": "int64"
- },
"header": {
"$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
+ "hash": {
+ "type": "integer",
+ "format": "int64",
+ "description": "hash is the hash value computed from the responding member's KV's backend."
}
}
},
"etcdserverpbLeaseGrantRequest": {
"type": "object",
"properties": {
- "ID": {
- "description": "ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.",
+ "TTL": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "TTL is the advisory time-to-live in seconds. Expired lease will return -1."
},
- "TTL": {
- "description": "TTL is the advisory time-to-live in seconds. Expired lease will return -1.",
+ "ID": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID."
}
}
},
"etcdserverpbLeaseGrantResponse": {
"type": "object",
"properties": {
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
"ID": {
- "description": "ID is the lease ID for the granted lease.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "ID is the lease ID for the granted lease."
},
"TTL": {
- "description": "TTL is the server chosen lease time-to-live in seconds.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "TTL is the server chosen lease time-to-live in seconds."
},
"error": {
"type": "string"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
}
}
},
@@ -2188,27 +2249,27 @@
"type": "object",
"properties": {
"ID": {
- "description": "ID is the lease ID for the lease to keep alive.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "ID is the lease ID for the lease to keep alive."
}
}
},
"etcdserverpbLeaseKeepAliveResponse": {
"type": "object",
"properties": {
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
"ID": {
- "description": "ID is the lease ID from the keep alive request.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "ID is the lease ID from the keep alive request."
},
"TTL": {
- "description": "TTL is the new time-to-live for the lease.",
"type": "string",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
+ "format": "int64",
+ "description": "TTL is the new time-to-live for the lease."
}
}
},
@@ -2224,6 +2285,7 @@
"leases": {
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbLeaseStatus"
}
}
@@ -2233,9 +2295,9 @@
"type": "object",
"properties": {
"ID": {
- "description": "ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted."
}
}
},
@@ -2252,7 +2314,8 @@
"properties": {
"ID": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "title": "TODO: int64 TTL = 2;"
}
}
},
@@ -2260,45 +2323,44 @@
"type": "object",
"properties": {
"ID": {
- "description": "ID is the lease ID for the lease.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "ID is the lease ID for the lease."
},
"keys": {
- "description": "keys is true to query all the keys attached to this lease.",
"type": "boolean",
- "format": "boolean"
+ "description": "keys is true to query all the keys attached to this lease."
}
}
},
"etcdserverpbLeaseTimeToLiveResponse": {
"type": "object",
"properties": {
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
"ID": {
- "description": "ID is the lease ID from the keep alive request.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "ID is the lease ID from the keep alive request."
},
"TTL": {
- "description": "TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds."
},
"grantedTTL": {
- "description": "GrantedTTL is the initial granted time in seconds upon lease creation/renewal.",
"type": "string",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
+ "format": "int64",
+ "description": "GrantedTTL is the initial granted time in seconds upon lease creation/renewal."
},
"keys": {
- "description": "Keys is the list of keys attached to this lease.",
"type": "array",
"items": {
"type": "string",
"format": "byte"
- }
+ },
+ "description": "Keys is the list of keys attached to this lease."
}
}
},
@@ -2306,49 +2368,47 @@
"type": "object",
"properties": {
"ID": {
- "description": "ID is the member ID for this member.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "ID is the member ID for this member."
+ },
+ "name": {
+ "type": "string",
+ "description": "name is the human-readable name of the member. If the member is not started, the name will be an empty string."
},
- "clientURLs": {
- "description": "clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.",
+ "peerURLs": {
"type": "array",
"items": {
"type": "string"
- }
- },
- "isLearner": {
- "description": "isLearner indicates if the member is raft learner.",
- "type": "boolean",
- "format": "boolean"
- },
- "name": {
- "description": "name is the human-readable name of the member. If the member is not started, the name will be an empty string.",
- "type": "string"
+ },
+ "description": "peerURLs is the list of URLs the member exposes to the cluster for communication."
},
- "peerURLs": {
- "description": "peerURLs is the list of URLs the member exposes to the cluster for communication.",
+ "clientURLs": {
"type": "array",
"items": {
"type": "string"
- }
+ },
+ "description": "clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty."
+ },
+ "isLearner": {
+ "type": "boolean",
+ "description": "isLearner indicates if the member is raft learner."
}
}
},
"etcdserverpbMemberAddRequest": {
"type": "object",
"properties": {
- "isLearner": {
- "description": "isLearner indicates if the added member is raft learner.",
- "type": "boolean",
- "format": "boolean"
- },
"peerURLs": {
- "description": "peerURLs is the list of URLs the added member will use to communicate with the cluster.",
"type": "array",
"items": {
"type": "string"
- }
+ },
+ "description": "peerURLs is the list of URLs the added member will use to communicate with the cluster."
+ },
+ "isLearner": {
+ "type": "boolean",
+ "description": "isLearner indicates if the added member is raft learner."
}
}
},
@@ -2359,15 +2419,16 @@
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"member": {
- "description": "member is the member information for the added member.",
- "$ref": "#/definitions/etcdserverpbMember"
+ "$ref": "#/definitions/etcdserverpbMember",
+ "description": "member is the member information for the added member."
},
"members": {
- "description": "members is a list of all members after adding the new member.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbMember"
- }
+ },
+ "description": "members is a list of all members after adding the new member."
}
}
},
@@ -2375,8 +2436,7 @@
"type": "object",
"properties": {
"linearizable": {
- "type": "boolean",
- "format": "boolean"
+ "type": "boolean"
}
}
},
@@ -2387,11 +2447,12 @@
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"members": {
- "description": "members is a list of all members associated with the cluster.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbMember"
- }
+ },
+ "description": "members is a list of all members associated with the cluster."
}
}
},
@@ -2399,9 +2460,9 @@
"type": "object",
"properties": {
"ID": {
- "description": "ID is the member ID of the member to promote.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "ID is the member ID of the member to promote."
}
}
},
@@ -2412,11 +2473,12 @@
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"members": {
- "description": "members is a list of all members after promoting the member.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbMember"
- }
+ },
+ "description": "members is a list of all members after promoting the member."
}
}
},
@@ -2424,9 +2486,9 @@
"type": "object",
"properties": {
"ID": {
- "description": "ID is the member ID of the member to remove.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "ID is the member ID of the member to remove."
}
}
},
@@ -2437,11 +2499,12 @@
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"members": {
- "description": "members is a list of all members after removing the member.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbMember"
- }
+ },
+ "description": "members is a list of all members after removing the member."
}
}
},
@@ -2449,16 +2512,16 @@
"type": "object",
"properties": {
"ID": {
- "description": "ID is the member ID of the member to update.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "ID is the member ID of the member to update."
},
"peerURLs": {
- "description": "peerURLs is the new list of URLs the member will use to communicate with the cluster.",
"type": "array",
"items": {
"type": "string"
- }
+ },
+ "description": "peerURLs is the new list of URLs the member will use to communicate with the cluster."
}
}
},
@@ -2469,11 +2532,12 @@
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"members": {
- "description": "members is a list of all members after updating the member.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbMember"
- }
+ },
+ "description": "members is a list of all members after updating the member."
}
}
},
@@ -2481,9 +2545,9 @@
"type": "object",
"properties": {
"targetID": {
- "description": "targetID is the node ID for the new leader.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "targetID is the node ID for the new leader."
}
}
},
@@ -2498,35 +2562,32 @@
"etcdserverpbPutRequest": {
"type": "object",
"properties": {
- "ignore_lease": {
- "description": "If ignore_lease is set, etcd updates the key using its current lease.\nReturns an error if the key does not exist.",
- "type": "boolean",
- "format": "boolean"
- },
- "ignore_value": {
- "description": "If ignore_value is set, etcd updates the key using its current value.\nReturns an error if the key does not exist.",
- "type": "boolean",
- "format": "boolean"
- },
"key": {
- "description": "key is the key, in bytes, to put into the key-value store.",
"type": "string",
- "format": "byte"
+ "format": "byte",
+ "description": "key is the key, in bytes, to put into the key-value store."
+ },
+ "value": {
+ "type": "string",
+ "format": "byte",
+ "description": "value is the value, in bytes, to associate with the key in the key-value store."
},
"lease": {
- "description": "lease is the lease ID to associate with the key in the key-value store. A lease\nvalue of 0 indicates no lease.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "lease is the lease ID to associate with the key in the key-value store. A lease\nvalue of 0 indicates no lease."
},
"prev_kv": {
- "description": "If prev_kv is set, etcd gets the previous key-value pair before changing it.\nThe previous key-value pair will be returned in the put response.",
"type": "boolean",
- "format": "boolean"
+ "description": "If prev_kv is set, etcd gets the previous key-value pair before changing it.\nThe previous key-value pair will be returned in the put response."
},
- "value": {
- "description": "value is the value, in bytes, to associate with the key in the key-value store.",
- "type": "string",
- "format": "byte"
+ "ignore_value": {
+ "type": "boolean",
+ "description": "If ignore_value is set, etcd updates the key using its current value.\nReturns an error if the key does not exist."
+ },
+ "ignore_lease": {
+ "type": "boolean",
+ "description": "If ignore_lease is set, etcd updates the key using its current lease.\nReturns an error if the key does not exist."
}
}
},
@@ -2537,115 +2598,112 @@
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"prev_kv": {
- "description": "if prev_kv is set in the request, the previous key-value pair will be returned.",
- "$ref": "#/definitions/mvccpbKeyValue"
+ "$ref": "#/definitions/mvccpbKeyValue",
+ "description": "if prev_kv is set in the request, the previous key-value pair will be returned."
}
}
},
"etcdserverpbRangeRequest": {
"type": "object",
"properties": {
- "count_only": {
- "description": "count_only when set returns only the count of the keys in the range.",
- "type": "boolean",
- "format": "boolean"
- },
"key": {
- "description": "key is the first key for the range. If range_end is not given, the request only looks up key.",
"type": "string",
- "format": "byte"
+ "format": "byte",
+ "description": "key is the first key for the range. If range_end is not given, the request only looks up key."
},
- "keys_only": {
- "description": "keys_only when set returns only the keys and not the values.",
- "type": "boolean",
- "format": "boolean"
+ "range_end": {
+ "type": "string",
+ "format": "byte",
+ "description": "range_end is the upper bound on the requested range [key, range_end).\nIf range_end is '\\0', the range is all keys \u003e= key.\nIf range_end is key plus one (e.g., \"aa\"+1 == \"ab\", \"a\\xff\"+1 == \"b\"),\nthen the range request gets all keys prefixed with key.\nIf both key and range_end are '\\0', then the range request returns all keys."
},
"limit": {
- "description": "limit is a limit on the number of keys returned for the request. When limit is set to 0,\nit is treated as no limit.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "limit is a limit on the number of keys returned for the request. When limit is set to 0,\nit is treated as no limit."
},
- "max_create_revision": {
- "description": "max_create_revision is the upper bound for returned key create revisions; all keys with\ngreater create revisions will be filtered away.",
+ "revision": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "revision is the point-in-time of the key-value store to use for the range.\nIf revision is less or equal to zero, the range is over the newest key-value store.\nIf the revision has been compacted, ErrCompacted is returned as a response."
},
- "max_mod_revision": {
- "description": "max_mod_revision is the upper bound for returned key mod revisions; all keys with\ngreater mod revisions will be filtered away.",
- "type": "string",
- "format": "int64"
+ "sort_order": {
+ "$ref": "#/definitions/RangeRequestSortOrder",
+ "description": "sort_order is the order for returned sorted results."
},
- "min_create_revision": {
- "description": "min_create_revision is the lower bound for returned key create revisions; all keys with\nlesser create revisions will be filtered away.",
- "type": "string",
- "format": "int64"
+ "sort_target": {
+ "$ref": "#/definitions/RangeRequestSortTarget",
+ "description": "sort_target is the key-value field to use for sorting."
},
- "min_mod_revision": {
- "description": "min_mod_revision is the lower bound for returned key mod revisions; all keys with\nlesser mod revisions will be filtered away.",
- "type": "string",
- "format": "int64"
+ "serializable": {
+ "type": "boolean",
+ "description": "serializable sets the range request to use serializable member-local reads.\nRange requests are linearizable by default; linearizable requests have higher\nlatency and lower throughput than serializable requests but reflect the current\nconsensus of the cluster. For better performance, in exchange for possible stale reads,\na serializable range request is served locally without needing to reach consensus\nwith other nodes in the cluster."
},
- "range_end": {
- "description": "range_end is the upper bound on the requested range [key, range_end).\nIf range_end is '\\0', the range is all keys \u003e= key.\nIf range_end is key plus one (e.g., \"aa\"+1 == \"ab\", \"a\\xff\"+1 == \"b\"),\nthen the range request gets all keys prefixed with key.\nIf both key and range_end are '\\0', then the range request returns all keys.",
- "type": "string",
- "format": "byte"
+ "keys_only": {
+ "type": "boolean",
+ "description": "keys_only when set returns only the keys and not the values."
},
- "revision": {
- "description": "revision is the point-in-time of the key-value store to use for the range.\nIf revision is less or equal to zero, the range is over the newest key-value store.\nIf the revision has been compacted, ErrCompacted is returned as a response.",
+ "count_only": {
+ "type": "boolean",
+ "description": "count_only when set returns only the count of the keys in the range."
+ },
+ "min_mod_revision": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "min_mod_revision is the lower bound for returned key mod revisions; all keys with\nlesser mod revisions will be filtered away."
},
- "serializable": {
- "description": "serializable sets the range request to use serializable member-local reads.\nRange requests are linearizable by default; linearizable requests have higher\nlatency and lower throughput than serializable requests but reflect the current\nconsensus of the cluster. For better performance, in exchange for possible stale reads,\na serializable range request is served locally without needing to reach consensus\nwith other nodes in the cluster.",
- "type": "boolean",
- "format": "boolean"
+ "max_mod_revision": {
+ "type": "string",
+ "format": "int64",
+ "description": "max_mod_revision is the upper bound for returned key mod revisions; all keys with\ngreater mod revisions will be filtered away."
},
- "sort_order": {
- "description": "sort_order is the order for returned sorted results.",
- "$ref": "#/definitions/RangeRequestSortOrder"
+ "min_create_revision": {
+ "type": "string",
+ "format": "int64",
+ "description": "min_create_revision is the lower bound for returned key create revisions; all keys with\nlesser create revisions will be filtered away."
},
- "sort_target": {
- "description": "sort_target is the key-value field to use for sorting.",
- "$ref": "#/definitions/RangeRequestSortTarget"
+ "max_create_revision": {
+ "type": "string",
+ "format": "int64",
+ "description": "max_create_revision is the upper bound for returned key create revisions; all keys with\ngreater create revisions will be filtered away."
}
}
},
"etcdserverpbRangeResponse": {
"type": "object",
"properties": {
- "count": {
- "description": "count is set to the number of keys within the range when requested.",
- "type": "string",
- "format": "int64"
- },
"header": {
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
"kvs": {
- "description": "kvs is the list of key-value pairs matched by the range request.\nkvs is empty when count is requested.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/mvccpbKeyValue"
- }
+ },
+ "description": "kvs is the list of key-value pairs matched by the range request.\nkvs is empty when count is requested."
},
"more": {
- "description": "more indicates if there are more keys to return in the requested range.",
"type": "boolean",
- "format": "boolean"
+ "description": "more indicates if there are more keys to return in the requested range."
+ },
+ "count": {
+ "type": "string",
+ "format": "int64",
+ "description": "count is set to the number of keys within the range when requested."
}
}
},
"etcdserverpbRequestOp": {
"type": "object",
"properties": {
- "request_delete_range": {
- "$ref": "#/definitions/etcdserverpbDeleteRangeRequest"
+ "request_range": {
+ "$ref": "#/definitions/etcdserverpbRangeRequest"
},
"request_put": {
"$ref": "#/definitions/etcdserverpbPutRequest"
},
- "request_range": {
- "$ref": "#/definitions/etcdserverpbRangeRequest"
+ "request_delete_range": {
+ "$ref": "#/definitions/etcdserverpbDeleteRangeRequest"
},
"request_txn": {
"$ref": "#/definitions/etcdserverpbTxnRequest"
@@ -2656,38 +2714,38 @@
"type": "object",
"properties": {
"cluster_id": {
- "description": "cluster_id is the ID of the cluster which sent the response.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "cluster_id is the ID of the cluster which sent the response."
},
"member_id": {
- "description": "member_id is the ID of the member which sent the response.",
"type": "string",
- "format": "uint64"
+ "format": "uint64",
+ "description": "member_id is the ID of the member which sent the response."
},
- "raft_term": {
- "description": "raft_term is the raft term when the request was applied.",
+ "revision": {
"type": "string",
- "format": "uint64"
+ "format": "int64",
+ "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number."
},
- "revision": {
- "description": "revision is the key-value store revision when the request was applied.\nFor watch progress responses, the header.revision indicates progress. All future events\nrecieved in this stream are guaranteed to have a higher revision number than the\nheader.revision number.",
+ "raft_term": {
"type": "string",
- "format": "int64"
+ "format": "uint64",
+ "description": "raft_term is the raft term when the request was applied."
}
}
},
"etcdserverpbResponseOp": {
"type": "object",
"properties": {
- "response_delete_range": {
- "$ref": "#/definitions/etcdserverpbDeleteRangeResponse"
+ "response_range": {
+ "$ref": "#/definitions/etcdserverpbRangeResponse"
},
"response_put": {
"$ref": "#/definitions/etcdserverpbPutResponse"
},
- "response_range": {
- "$ref": "#/definitions/etcdserverpbRangeResponse"
+ "response_delete_range": {
+ "$ref": "#/definitions/etcdserverpbDeleteRangeResponse"
},
"response_txn": {
"$ref": "#/definitions/etcdserverpbTxnResponse"
@@ -2700,19 +2758,23 @@
"etcdserverpbSnapshotResponse": {
"type": "object",
"properties": {
- "blob": {
- "description": "blob contains the next chunk of the snapshot in the snapshot stream.",
- "type": "string",
- "format": "byte"
- },
"header": {
- "description": "header has the current key-value store information. The first header in the snapshot\nstream indicates the point in time of the snapshot.",
- "$ref": "#/definitions/etcdserverpbResponseHeader"
+ "$ref": "#/definitions/etcdserverpbResponseHeader",
+ "description": "header has the current key-value store information. The first header in the snapshot\nstream indicates the point in time of the snapshot."
},
"remaining_bytes": {
"type": "string",
"format": "uint64",
"title": "remaining_bytes is the number of blob bytes to be sent after this message"
+ },
+ "blob": {
+ "type": "string",
+ "format": "byte",
+ "description": "blob contains the next chunk of the snapshot in the snapshot stream."
+ },
+ "version": {
+ "type": "string",
+ "description": "local version of server that created the snapshot.\nIn cluster with binaries with different version, each cluster can return different result.\nInforms which etcd server version should be used when restoring the snapshot."
}
}
},
@@ -2722,83 +2784,94 @@
"etcdserverpbStatusResponse": {
"type": "object",
"properties": {
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
+ "version": {
+ "type": "string",
+ "description": "version is the cluster protocol version used by the responding member."
+ },
"dbSize": {
- "description": "dbSize is the size of the backend database physically allocated, in bytes, of the responding member.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "dbSize is the size of the backend database physically allocated, in bytes, of the responding member."
},
- "dbSizeInUse": {
- "description": "dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.",
+ "leader": {
+ "type": "string",
+ "format": "uint64",
+ "description": "leader is the member ID which the responding member believes is the current leader."
+ },
+ "raftIndex": {
+ "type": "string",
+ "format": "uint64",
+ "description": "raftIndex is the current raft committed index of the responding member."
+ },
+ "raftTerm": {
+ "type": "string",
+ "format": "uint64",
+ "description": "raftTerm is the current raft term of the responding member."
+ },
+ "raftAppliedIndex": {
"type": "string",
- "format": "int64"
+ "format": "uint64",
+ "description": "raftAppliedIndex is the current raft applied index of the responding member."
},
"errors": {
- "description": "errors contains alarm/health information and status.",
"type": "array",
"items": {
"type": "string"
- }
+ },
+ "description": "errors contains alarm/health information and status."
},
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
+ "dbSizeInUse": {
+ "type": "string",
+ "format": "int64",
+ "description": "dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member."
},
"isLearner": {
- "description": "isLearner indicates if the member is raft learner.",
"type": "boolean",
- "format": "boolean"
- },
- "leader": {
- "description": "leader is the member ID which the responding member believes is the current leader.",
- "type": "string",
- "format": "uint64"
- },
- "raftAppliedIndex": {
- "description": "raftAppliedIndex is the current raft applied index of the responding member.",
- "type": "string",
- "format": "uint64"
+ "description": "isLearner indicates if the member is raft learner."
},
- "raftIndex": {
- "description": "raftIndex is the current raft committed index of the responding member.",
+ "storageVersion": {
"type": "string",
- "format": "uint64"
+ "description": "storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version."
},
- "raftTerm": {
- "description": "raftTerm is the current raft term of the responding member.",
+ "dbSizeQuota": {
"type": "string",
- "format": "uint64"
- },
- "version": {
- "description": "version is the cluster protocol version used by the responding member.",
- "type": "string"
+ "format": "int64",
+ "title": "dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes)"
}
}
},
"etcdserverpbTxnRequest": {
- "description": "From google paxosdb paper:\nOur implementation hinges around a powerful primitive which we call MultiOp. All other database\noperations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically\nand consists of three components:\n1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check\nfor the absence or presence of a value, or compare with a given value. Two different tests in the guard\nmay apply to the same or different entries in the database. All tests in the guard are applied and\nMultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise\nit executes f op (see item 3 below).\n2. A list of database operations called t op. Each operation in the list is either an insert, delete, or\nlookup operation, and applies to a single database entry. Two different operations in the list may apply\nto the same or different entries in the database. These operations are executed\nif guard evaluates to\ntrue.\n3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.",
"type": "object",
"properties": {
"compare": {
- "description": "compare is a list of predicates representing a conjunction of terms.\nIf the comparisons succeed, then the success requests will be processed in order,\nand the response will contain their respective responses in order.\nIf the comparisons fail, then the failure requests will be processed in order,\nand the response will contain their respective responses in order.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbCompare"
- }
+ },
+ "description": "compare is a list of predicates representing a conjunction of terms.\nIf the comparisons succeed, then the success requests will be processed in order,\nand the response will contain their respective responses in order.\nIf the comparisons fail, then the failure requests will be processed in order,\nand the response will contain their respective responses in order."
},
- "failure": {
- "description": "failure is a list of requests which will be applied when compare evaluates to false.",
+ "success": {
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbRequestOp"
- }
+ },
+ "description": "success is a list of requests which will be applied when compare evaluates to true."
},
- "success": {
- "description": "success is a list of requests which will be applied when compare evaluates to true.",
+ "failure": {
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbRequestOp"
- }
+ },
+ "description": "failure is a list of requests which will be applied when compare evaluates to false."
}
- }
+ },
+ "description": "From google paxosdb paper:\nOur implementation hinges around a powerful primitive which we call MultiOp. All other database\noperations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically\nand consists of three components:\n1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check\nfor the absence or presence of a value, or compare with a given value. Two different tests in the guard\nmay apply to the same or different entries in the database. All tests in the guard are applied and\nMultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise\nit executes f op (see item 3 below).\n2. A list of database operations called t op. Each operation in the list is either an insert, delete, or\nlookup operation, and applies to a single database entry. Two different operations in the list may apply\nto the same or different entries in the database. These operations are executed\nif guard evaluates to\ntrue.\n3. A list of database operations called f op. Like t op, but executed if guard evaluates to false."
},
"etcdserverpbTxnResponse": {
"type": "object",
@@ -2806,17 +2879,17 @@
"header": {
"$ref": "#/definitions/etcdserverpbResponseHeader"
},
+ "succeeded": {
+ "type": "boolean",
+ "description": "succeeded is set to true if the compare evaluated to true or false otherwise."
+ },
"responses": {
- "description": "responses is a list of responses corresponding to the results from applying\nsuccess if succeeded is true or failure if succeeded is false.",
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/etcdserverpbResponseOp"
- }
- },
- "succeeded": {
- "description": "succeeded is set to true if the compare evaluated to true or false otherwise.",
- "type": "boolean",
- "format": "boolean"
+ },
+ "description": "responses is a list of responses corresponding to the results from applying\nsuccess if succeeded is true or failure if succeeded is false."
}
}
},
@@ -2824,72 +2897,69 @@
"type": "object",
"properties": {
"watch_id": {
- "description": "watch_id is the watcher id to cancel so that no more events are transmitted.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "watch_id is the watcher id to cancel so that no more events are transmitted."
}
}
},
"etcdserverpbWatchCreateRequest": {
"type": "object",
"properties": {
- "filters": {
- "description": "filters filter the events at server side before it sends back to the watcher.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/WatchCreateRequestFilterType"
- }
- },
- "fragment": {
- "description": "fragment enables splitting large revisions into multiple watch responses.",
- "type": "boolean",
- "format": "boolean"
- },
"key": {
- "description": "key is the key to register for watching.",
"type": "string",
- "format": "byte"
- },
- "prev_kv": {
- "description": "If prev_kv is set, created watcher gets the previous KV before the event happens.\nIf the previous KV is already compacted, nothing will be returned.",
- "type": "boolean",
- "format": "boolean"
- },
- "progress_notify": {
- "description": "progress_notify is set so that the etcd server will periodically send a WatchResponse with\nno events to the new watcher if there are no recent events. It is useful when clients\nwish to recover a disconnected watcher starting from a recent known revision.\nThe etcd server may decide how often it will send notifications based on current load.",
- "type": "boolean",
- "format": "boolean"
+ "format": "byte",
+ "description": "key is the key to register for watching."
},
"range_end": {
- "description": "range_end is the end of the range [key, range_end) to watch. If range_end is not given,\nonly the key argument is watched. If range_end is equal to '\\0', all keys greater than\nor equal to the key argument are watched.\nIf the range_end is one bit larger than the given key,\nthen all keys with the prefix (the given key) will be watched.",
"type": "string",
- "format": "byte"
+ "format": "byte",
+ "description": "range_end is the end of the range [key, range_end) to watch. If range_end is not given,\nonly the key argument is watched. If range_end is equal to '\\0', all keys greater than\nor equal to the key argument are watched.\nIf the range_end is one bit larger than the given key,\nthen all keys with the prefix (the given key) will be watched."
},
"start_revision": {
- "description": "start_revision is an optional revision to watch from (inclusive). No start_revision is \"now\".",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "start_revision is an optional revision to watch from (inclusive). No start_revision is \"now\"."
+ },
+ "progress_notify": {
+ "type": "boolean",
+ "description": "progress_notify is set so that the etcd server will periodically send a WatchResponse with\nno events to the new watcher if there are no recent events. It is useful when clients\nwish to recover a disconnected watcher starting from a recent known revision.\nThe etcd server may decide how often it will send notifications based on current load."
+ },
+ "filters": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/WatchCreateRequestFilterType"
+ },
+ "description": "filters filter the events at server side before it sends back to the watcher."
+ },
+ "prev_kv": {
+ "type": "boolean",
+ "description": "If prev_kv is set, created watcher gets the previous KV before the event happens.\nIf the previous KV is already compacted, nothing will be returned."
},
"watch_id": {
- "description": "If watch_id is provided and non-zero, it will be assigned to this watcher.\nSince creating a watcher in etcd is not a synchronous operation,\nthis can be used ensure that ordering is correct when creating multiple\nwatchers on the same stream. Creating a watcher with an ID already in\nuse on the stream will cause an error to be returned.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "If watch_id is provided and non-zero, it will be assigned to this watcher.\nSince creating a watcher in etcd is not a synchronous operation,\nthis can be used ensure that ordering is correct when creating multiple\nwatchers on the same stream. Creating a watcher with an ID already in\nuse on the stream will cause an error to be returned."
+ },
+ "fragment": {
+ "type": "boolean",
+ "description": "fragment enables splitting large revisions into multiple watch responses."
}
}
},
"etcdserverpbWatchProgressRequest": {
- "description": "Requests the a watch stream progress status be sent in the watch response stream as soon as\npossible.",
- "type": "object"
+ "type": "object",
+ "description": "Requests the a watch stream progress status be sent in the watch response stream as soon as\npossible."
},
"etcdserverpbWatchRequest": {
"type": "object",
"properties": {
- "cancel_request": {
- "$ref": "#/definitions/etcdserverpbWatchCancelRequest"
- },
"create_request": {
"$ref": "#/definitions/etcdserverpbWatchCreateRequest"
},
+ "cancel_request": {
+ "$ref": "#/definitions/etcdserverpbWatchCancelRequest"
+ },
"progress_request": {
"$ref": "#/definitions/etcdserverpbWatchProgressRequest"
}
@@ -2898,155 +2968,123 @@
"etcdserverpbWatchResponse": {
"type": "object",
"properties": {
- "cancel_reason": {
- "description": "cancel_reason indicates the reason for canceling the watcher.",
- "type": "string"
+ "header": {
+ "$ref": "#/definitions/etcdserverpbResponseHeader"
+ },
+ "watch_id": {
+ "type": "string",
+ "format": "int64",
+ "description": "watch_id is the ID of the watcher that corresponds to the response."
+ },
+ "created": {
+ "type": "boolean",
+ "description": "created is set to true if the response is for a create watch request.\nThe client should record the watch_id and expect to receive events for\nthe created watcher from the same stream.\nAll events sent to the created watcher will attach with the same watch_id."
},
"canceled": {
- "description": "canceled is set to true if the response is for a cancel watch request.\nNo further events will be sent to the canceled watcher.",
"type": "boolean",
- "format": "boolean"
+ "description": "canceled is set to true if the response is for a cancel watch request\nor if the start_revision has already been compacted.\nNo further events will be sent to the canceled watcher."
},
"compact_revision": {
- "description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store.\n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again.",
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store.\n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again."
},
- "created": {
- "description": "created is set to true if the response is for a create watch request.\nThe client should record the watch_id and expect to receive events for\nthe created watcher from the same stream.\nAll events sent to the created watcher will attach with the same watch_id.",
+ "cancel_reason": {
+ "type": "string",
+ "description": "cancel_reason indicates the reason for canceling the watcher."
+ },
+ "fragment": {
"type": "boolean",
- "format": "boolean"
+ "description": "framgment is true if large watch response was split over multiple responses."
},
"events": {
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/mvccpbEvent"
}
+ }
+ }
+ },
+ "googlerpcStatus": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "integer",
+ "format": "int32"
},
- "fragment": {
- "description": "framgment is true if large watch response was split over multiple responses.",
- "type": "boolean",
- "format": "boolean"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
+ "message": {
+ "type": "string"
},
- "watch_id": {
- "description": "watch_id is the ID of the watcher that corresponds to the response.",
- "type": "string",
- "format": "int64"
+ "details": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "$ref": "#/definitions/protobufAny"
+ }
}
}
},
"mvccpbEvent": {
"type": "object",
"properties": {
+ "type": {
+ "$ref": "#/definitions/EventEventType",
+ "description": "type is the kind of event. If type is a PUT, it indicates\nnew data has been stored to the key. If type is a DELETE,\nit indicates the key was deleted."
+ },
"kv": {
- "description": "kv holds the KeyValue for the event.\nA PUT event contains current kv pair.\nA PUT event with kv.Version=1 indicates the creation of a key.\nA DELETE/EXPIRE event contains the deleted key with\nits modification revision set to the revision of deletion.",
- "$ref": "#/definitions/mvccpbKeyValue"
+ "$ref": "#/definitions/mvccpbKeyValue",
+ "description": "kv holds the KeyValue for the event.\nA PUT event contains current kv pair.\nA PUT event with kv.Version=1 indicates the creation of a key.\nA DELETE/EXPIRE event contains the deleted key with\nits modification revision set to the revision of deletion."
},
"prev_kv": {
- "description": "prev_kv holds the key-value pair before the event happens.",
- "$ref": "#/definitions/mvccpbKeyValue"
- },
- "type": {
- "description": "type is the kind of event. If type is a PUT, it indicates\nnew data has been stored to the key. If type is a DELETE,\nit indicates the key was deleted.",
- "$ref": "#/definitions/EventEventType"
+ "$ref": "#/definitions/mvccpbKeyValue",
+ "description": "prev_kv holds the key-value pair before the event happens."
}
}
},
"mvccpbKeyValue": {
"type": "object",
"properties": {
- "create_revision": {
- "description": "create_revision is the revision of last creation on this key.",
- "type": "string",
- "format": "int64"
- },
"key": {
- "description": "key is the key in bytes. An empty key is not allowed.",
"type": "string",
- "format": "byte"
+ "format": "byte",
+ "description": "key is the key in bytes. An empty key is not allowed."
},
- "lease": {
- "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key.",
+ "create_revision": {
"type": "string",
- "format": "int64"
+ "format": "int64",
+ "description": "create_revision is the revision of last creation on this key."
},
"mod_revision": {
- "description": "mod_revision is the revision of last modification on this key.",
- "type": "string",
- "format": "int64"
- },
- "value": {
- "description": "value is the value held by the key, in bytes.",
"type": "string",
- "format": "byte"
+ "format": "int64",
+ "description": "mod_revision is the revision of last modification on this key."
},
"version": {
- "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version.",
"type": "string",
- "format": "int64"
- }
- }
- },
- "protobufAny": {
- "type": "object",
- "properties": {
- "type_url": {
- "type": "string"
+ "format": "int64",
+ "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version."
},
"value": {
"type": "string",
- "format": "byte"
- }
- }
- },
- "runtimeError": {
- "type": "object",
- "properties": {
- "code": {
- "type": "integer",
- "format": "int32"
- },
- "details": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/protobufAny"
- }
- },
- "error": {
- "type": "string"
+ "format": "byte",
+ "description": "value is the value held by the key, in bytes."
},
- "message": {
- "type": "string"
+ "lease": {
+ "type": "string",
+ "format": "int64",
+ "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key."
}
}
},
- "runtimeStreamError": {
+ "protobufAny": {
"type": "object",
"properties": {
- "details": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/protobufAny"
- }
- },
- "grpc_code": {
- "type": "integer",
- "format": "int32"
- },
- "http_code": {
- "type": "integer",
- "format": "int32"
- },
- "http_status": {
- "type": "string"
- },
- "message": {
+ "@type": {
"type": "string"
}
- }
+ },
+ "additionalProperties": {}
}
},
"securityDefinitions": {
@@ -3061,4 +3099,4 @@
"ApiKey": []
}
]
-}
\ No newline at end of file
+}
diff --git a/Documentation/dev-guide/apispec/swagger/v3election.swagger.json b/Documentation/dev-guide/apispec/swagger/v3election.swagger.json
index 3121774607e..75194fe0479 100644
--- a/Documentation/dev-guide/apispec/swagger/v3election.swagger.json
+++ b/Documentation/dev-guide/apispec/swagger/v3election.swagger.json
@@ -4,6 +4,11 @@
"title": "server/etcdserver/api/v3election/v3electionpb/v3election.proto",
"version": "version not set"
},
+ "tags": [
+ {
+ "name": "Election"
+ }
+ ],
"consumes": [
"application/json"
],
@@ -23,9 +28,9 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/rpcStatus"
}
}
},
@@ -56,9 +61,9 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/rpcStatus"
}
}
},
@@ -91,16 +96,16 @@
"$ref": "#/definitions/v3electionpbLeaderResponse"
},
"error": {
- "$ref": "#/definitions/runtimeStreamError"
+ "$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of v3electionpbLeaderResponse"
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/rpcStatus"
}
}
},
@@ -131,9 +136,9 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/rpcStatus"
}
}
},
@@ -164,9 +169,9 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/rpcStatus"
}
}
},
@@ -203,7 +208,7 @@
"revision": {
"type": "string",
"format": "int64",
- "description": "revision is the key-value store revision when the request was applied.\nFor watch progress responses, the header.revision indicates progress. All future events\nrecieved in this stream are guaranteed to have a higher revision number than the\nheader.revision number."
+ "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number."
},
"raft_term": {
"type": "string",
@@ -250,21 +255,15 @@
"protobufAny": {
"type": "object",
"properties": {
- "type_url": {
+ "@type": {
"type": "string"
- },
- "value": {
- "type": "string",
- "format": "byte"
}
- }
+ },
+ "additionalProperties": {}
},
- "runtimeError": {
+ "rpcStatus": {
"type": "object",
"properties": {
- "error": {
- "type": "string"
- },
"code": {
"type": "integer",
"format": "int32"
@@ -275,31 +274,7 @@
"details": {
"type": "array",
"items": {
- "$ref": "#/definitions/protobufAny"
- }
- }
- }
- },
- "runtimeStreamError": {
- "type": "object",
- "properties": {
- "grpc_code": {
- "type": "integer",
- "format": "int32"
- },
- "http_code": {
- "type": "integer",
- "format": "int32"
- },
- "message": {
- "type": "string"
- },
- "http_status": {
- "type": "string"
- },
- "details": {
- "type": "array",
- "items": {
+ "type": "object",
"$ref": "#/definitions/protobufAny"
}
}
diff --git a/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json b/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json
index 20fe3771503..10ca7c2c03c 100644
--- a/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json
+++ b/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json
@@ -4,6 +4,11 @@
"title": "server/etcdserver/api/v3lock/v3lockpb/v3lock.proto",
"version": "version not set"
},
+ "tags": [
+ {
+ "name": "Lock"
+ }
+ ],
"consumes": [
"application/json"
],
@@ -23,9 +28,9 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/rpcStatus"
}
}
},
@@ -56,9 +61,9 @@
}
},
"default": {
- "description": "An unexpected error response",
+ "description": "An unexpected error response.",
"schema": {
- "$ref": "#/definitions/runtimeError"
+ "$ref": "#/definitions/rpcStatus"
}
}
},
@@ -95,7 +100,7 @@
"revision": {
"type": "string",
"format": "int64",
- "description": "revision is the key-value store revision when the request was applied.\nFor watch progress responses, the header.revision indicates progress. All future events\nrecieved in this stream are guaranteed to have a higher revision number than the\nheader.revision number."
+ "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number."
},
"raft_term": {
"type": "string",
@@ -107,21 +112,15 @@
"protobufAny": {
"type": "object",
"properties": {
- "type_url": {
+ "@type": {
"type": "string"
- },
- "value": {
- "type": "string",
- "format": "byte"
}
- }
+ },
+ "additionalProperties": {}
},
- "runtimeError": {
+ "rpcStatus": {
"type": "object",
"properties": {
- "error": {
- "type": "string"
- },
"code": {
"type": "integer",
"format": "int32"
@@ -132,6 +131,7 @@
"details": {
"type": "array",
"items": {
+ "type": "object",
"$ref": "#/definitions/protobufAny"
}
}
diff --git a/Documentation/etcd-internals/diagrams/consistent_read_workflow.drawio b/Documentation/etcd-internals/diagrams/consistent_read_workflow.drawio
new file mode 100644
index 00000000000..9886b70924b
--- /dev/null
+++ b/Documentation/etcd-internals/diagrams/consistent_read_workflow.drawio
@@ -0,0 +1 @@
+7LzH0uTK1S32NBySAW+G8N4UPDC5Ae9twT+9gK/7kOeQ/K8UkkIjtfsKWUDavddea2ei/wYz/Sks8VRpY5Z3f4OA7PwbzP4NgmAYQ54fb8n1qwRCYehXSbnU2a8y8F8Fdn3nvwuB36VbneXfv9y4jmO31tNfC9NxGPJ0/UtZvCzj8dfbirH7a6tTXOb/UWCncfefpX6drdWvUgIF/lUu5nVZ/dEyCPz+po//uPl3wbeKs/H4UxHM/Q1mlnFcf33qTybv3tn7Y15+Pcf/D9/+s2NLPqz/Vx5YiYmwbpcdL7ekkpDjUWL6O/mrlj3utt8D/t3Z9fpjBsZt7eohZ/45wcDfYLoYh5UZu3H5uQd+fvNvq3S5xFmd/+u7YRzy9/a66/50O0tgGAg/5d91Gdv8327O4m+VZ78b2vNlrZ/lUOMk78zxW6/1ODzfJeO6jv2fbqC6uny/WMfpKY1/X6VPX/Knbrpa++65Bn/3/behgdAf17/H+zYZf6dfAy3q8+0HPY31Wwu3P5V9f1fyrOb0PtCf5Wv5/4iPL/KPLPlf4/C/6uG7xkOav3X/nt6nj/n5P64b+E9rePwoH/t8Xa7nlt8P/B3HsV/PXH8UkOg/0F9Fx3+xyepP9vjPwvi3H5T/rP9fpvJ8+G0t/91yKA3++xD9va/K3SHHK3RmQf078v9bzv8HlvMfZvJfjOl/tBwS+Kvd/NMY/mQ1EP5frOafhf+vWw0I/p+bzTJuQ/bPZRyXtRrLcXgWcnwX6GcOm3xdr98rEW/r+Ndl+mUbf8A19G8LhzzX+VmvwdvAPzAM/X0dPtd/B/4B/HHNnr978HNx/enCzJf6mY7XOn6VDc/U/K4OIP4oeOsD/wGA+B8F/6rw5+r689W/V/k/rv533JY0/9/N8O8JXeOlzNf/3Y2/PTjP/hIB/9OalryL13r/a0D8b5bx+1HzNfo/4Rf6OOxf7BAj/828fg3q93N/DmH/WdW/mTQJ/FtVv4b9H1X9GOs/x/T/wH6h/3P7/cPB6/6HW9A/P6k/oOG/4sT/Taz6Kwg+8AjDJFkU/+EC2L8B6R/Y+ievwP/o6HORxWv8N5j6dQnx01D+DWJqjzasA1CEcqSeX7rtVtxDJCjK/T7/MB5Dhc9P1iL1Cnw+KIvQsR+Qlj+AVrqivEd9940+FGVLvdcdmev+DaJbuzRJHAB0xn6uypVkIklGGBfgjIBtIoarGVegJPqUFP60vJS5xnpk6sujvGBKq/gbs/U4M1Thcj7le5mqV5/cShFaLIr86Tqd6ffwrvM7DGMIyP75uT9/U8NPGUoeFK6s7dBvKXdvpqOndg+cWo/96PohP+MsqZbyIVWM45LmXFRKiTIYyAweChqjoiLLWvmZqV9VPq3t0tespoWjS0n6NJvffj3tY47Q14MuWqMBymO8GiRQjSfuIwvt6TNbVPX4Om+m5pE6xHngzNUAifZUNqb8xjDacMh6qAocQTGf7ORwNdTSJnWbjZH303CO587jW94pN8l+CtKC1qosTRVUKt1E3p8w4ctP/eCu+SKv8fWa36I+9JZPGBept9oZEzD5RJOf/vud4FgFhZCRzNmVjtO1HIn49nxLA/1UaIXeUIc69GkekdHzSEfLPAyx1w2QV42iA5vuqfcsp0HI+2DIyOCexzBYLXaTB9CgNMrPiMFvucLh44lObRdo5WKXowOdBuKIIoFLzqcWBwORXPHpZ+yOzV6OT/MJxCAuHe7LQ3t4hQzo4srHxRwmFYKDDnVMMScl65M834qR/9nB9F11rYqqqc3V2OBPuDn8QlOFp1zSQW/gto/IOBB6RXidg5Ngiw/CDRKN94cOVESFtN8Bo1F6wynqeaaszHOUKDShmKlkfYRNHXaEn7Wnw4T2sYX2N243ay6jRxZzpK1Ju6G/sNqPqsinpAPAH0sknATCDZJhBkFdK3ssmeNyxfLtkqLfH6MErWrqQk3VG2AGRryi5HsJw40TAgPqg5AROIpzR0rpJAoDqFA4KCuk6L2k+OtTDyAlUSXv+lQXShrXUgon0caXQldKrEtqI55x0C6lKofHfygEY1uM4gfKDw42+DA5RgU7JbESXRG0rJVmVjKjVIqcLSgyZU+UOH2+8Yd2bXFgKPOiCKWalVJgyhxptTqmIYC2Z4qZORujKvdFV4AZwmeanbxMtpLWqVr/+K+b3zelyN+3o7BfngWbyeXjA/RTIUGNiNhi/Pe55NOcylKWL6jX/uj+/UdNmaJg3vWg1ZyWeJeWxjz69Fdp8RbeV1QnWjNbu5zq0l1IzJ/OL4WOXmcKd4PsQ8XVDTIhyLskBazUEFM5pLOtcQPxMLJ5s2K0DDyYGxUfj+RvSBra1zkPlDrnqmoaFhLqcOBOQbu0oA+l9vCmsrv5zFCFNCZOSDmJriIXBvaFc+Od1N5OHxfF7wcKa7LecJ6FuDZl55zTiBBFSvlU+QNTSxKmxFTLDjEoz10oUktF2ODgh8osWDtvHpFGayqhBshr1s0LZlTzlDE9oz+2EXKEJIuUFB8lzkQ+p3/l9Wjoqq4rFmRSjGsNuU4t/RTk+p07biYc8VxCm6vkmBtSy2YinLPGrq4tzQYvV5AdzIq4TpDZwk4nRVol3XWSScpbGrOv2CHTckOF6jKGVtyiiOiL64FznhcecG/nK3Ja24ikrh2hB9f5SNonqbaNTtFiy7d7T+XdPmuXcdVtVG/12Mkmvbdf2/io0xsXJBLYTbtjW0cImMiS26zd3Cu7P1Dk8nW+PJD20OgLFfXOAeYc7KBaUur4gVFFuBW+HTvUnrpemjusMyKQs+YnRCBr+MUkO12iyw3UKe28a4KdHn28w99l4nUOB00KJSDtvf0FiF9EianWoGec1kdXqKynTGDHca7rSw4077k83YQPiESznbRjFbebVlBRgUrvVlWxiE7tHMNjH2rgnaA/KiUyJ7JHdJE2s6BXebbphboPzm3mJU9X57lf8riVQQobAy9MPDQLMpA3Pf1paSk6lHQko0ZihwbIekldsaFdyDhnqgOQafKg0unnOYYf1XHbRhxEk+PvUxx6c7JaMzj0pbPmQ5xmE75mGyjDHyAzTxDcBhe7b1fOGzuzdOA1txXCRPwDkLkK6NsMYhI8z29QuknHBJ9PfFBAJ6UPFGSz/qip1VCzfitcYSQ2NhEen8S3vo8BuTHsxELjQ1qehmKTfIPQSiI1l/a5a9wEkJ4nqrbX7YSVF+UNz3OUNNGqBRDoC9lJGmpW31+/uI1bb7/QfFt4xKAL3htPd6Dk1l/0hPj2EzxcyamHOoiAVbtOXPWIxfe+VhBgE+sV/qasb3gAve0ZG0ZA68Mv+iNONO+bQWeTaOtDGGljcnNLJ439Gf83MMAL5/A3whcy/A2f7+ECe23DIwy5RGV2cI0R2IEOZgKPQaJ0ta6IbfPUOo+xGXiDBiFhYP1MCFHvfoE+JXkISSp3C4+DpXu5EQzNw9B+adVAXlMz2aPtocfbNpvpnt03nHyJiIUZkxuJYZjmPI1wcFiNnO/wpfCxZ2FOcg9yPINNTzCuPgk0xiKTl/W8nuTBp7zXT9Sj5QQ337m6iUpEn6Hyy4vRD4zwxAsoREpvpUiUckRxWqnBtInI3NtjhQ1GEAvCyeVVG30AiGbv0AyqIuVcSfYtz+bfap9ykPWAhxbQdaNJ2uF6Ff5gIELsbExQhIVXAs99DKu+zMCCI0Fxi3Ps5ibNxArwAqslvkS3gYm+3I/0HnJiQV7/qrfLW89wTo4rz33U85TFV1bj9Ola6tU5XLCD6lpv0bow6s8Ip5ykSaDihR21OdZBFXINe+FKggUjDb3zoRO8YBDBXp+nknLIaJ9Dt+1PQMEAZz9NlWOBnrYzTwaI1rpCYHonzw47Vif8AHkBjZhmMvOiGf/yDykgDpeZi/x1D5DqqwWnZtadkjaLY6f1hRH81hTJPaSe0ybktpt3qiIk6odvb19voOthsKpXKIl6Wf/eT8nzJwcxNfiuObzoerb6TY4Mi/iUX1Q0ft6fAlnd66BfKRKCuQB9Ie4BoKSY1i859McX3tfW+sbp6l0kFD6+wtsseLz2n0cyeixvM8sHTS6Y5N4RNfjX3xI0CVkIx7s3zMg7P5UeT3sElOJ5sWsD8ZAvWinQJeeSvImxQr9bA/+qO7K9KgEyfnV9U6LPdDNICm1vuxPc7on/C08f4ox7G4S7j9l4KlZsbyFQYGy44A1YahgdfF7a3jG2S8ox1jWxP0fRqjyPr0crGgeKsuoYRtdaKAzx0gQxD4tdfKMaBnRX5YE8wItf4NG2dHNfO6s009sJk8x8xUs9T4RWtEtNkHKCQOnBJYZWF4MT//NiWv3OitjUj6bksy1HfgbEiz49GYKEwFfzcYeDGqzael3mNG87I1U4Mr+vjcy9AODzcKHj6BSD7ppGjI69/8RYGpwbH29eVh+RSyLqc1w0AbkW8xddBzuF1SGbkGH4EqQJA9XLhdTWSkWecPezQ5LYw9/wulXnuwIZ5k1eji37lO8BrhPk8gTKRKzyzQCfKMbnX5U80Toho3YbnAc51XxNnOSHsZOJG2WmV+g7lG3fbLvXFXY37H6RCSHBC8NgHIYauN+fNS91fNtflKB/zQOsvmKj2F4MwEyz/emoOVXn15is81uEJz6Y6Luk7m65pD/wOPJip37gISsfSfbD8kj7bqFYq8iVVU1S/VU7q7yVojexCxUXYhYECiqAy80AyUBwgYi2nzYx0Iv5OpnZ37gc0DNp/LKq5wYMki9pqMCHIwHQGxh20MNeo4MNrIPx6GF575qmE479NCSSLzh2zV0X2ITAJoeiwmOzTxmiEl7BJm8dRWGS5FBiUTDFB61cUZvSvKqWG1tScPdhP1zUM98xo6RGRftk9kPxDWBloXF7ZZpsBt/IqFOqL/VVT1JNyLhGzNwMK7Vt4t98zTVcf0yxh9zprxn+4ibWcGXxMnX2wF97ATL81yifQHQ4TzPIW7y8gyDiWnknxCkItVxd7sXzbzc9PJeiaD/yKjlqQZ1ifvS6RFGCGH8o7r2gfm75KX9+GXZVUuL7iXv1PGeXj/D9133uVlLHn65l99dHT2dtt30rckJY7iIfvWOBBCA2POP3viyaPL4CfA7UjF7fExutIoGsIxtFkuB30+yJGEO1pgLYZQJX5gL4TQYNy1mgDn1rD3sXe68T3wNCm6glscSee45M+JJSV+kuQ7PJ07bEhpvGIIfUIIrEUKUpym3UTLbFhf+sM+2t3rTlMROtw6iJPYOzx+/SW+3JK7qIR0u3qHpTl3pL1xOL26gG7yfgAGFQrs/zzR91/6l+Lgr0Ju27pz/dntT0FQkhFvryngUfUnoo6R/3//H3j75EQTVFDVDHogWk7LirEPr0g28TSO7Un7kiu1zoevXmnnERtVmOf66H+PX89ImDqkv4jot9sEsGfUqgF7I054OazK8ajI6ecsH7+q41PbO4pdDT0z+N5Gc0gofEfoi5QodE/hlaQrf+rKZPbmaj3Zpz7H9tsfq3Fpn6T+Nkz98te3cUyGwCgW9tgNm9c+W9T40J/CFU6FerXqB3afuM+afGz59rY4/9XTWjtboU0q84oIGfPv2aDUKF/7nK71PNsefs9NgWvUaBVZnlrEQ9FT32+dh0x308C3n0XkanVGehwR5ftxwJkpNdxYa3NoUY2LEZNxMuu/SO6YWRyg+qo1CoLmUuVZ0Rmv8ydliLF+NwZqRrMA8MQvDjnzz8EkO6yHcfT16YNtHdJd+gsuKT7LwadnHnfJmxfjCL/Y1cHmJPAh9/HyPuTw52yMR6YxUzfp6hXqv0qe2z31XDni++67HCq7dy0vobWR29KDwKlUJxEN6+9vYROJY5z0ch9sukNA7KQrP9Tdnl22j5I9wBY5rNegaIPaZ0Jlfl2TAUxDpmNOKSllMt0H8czkXlXP4IZTkJ/ToptJconf64QFRTgdjPeZt0L6/EXZW94tVRSSEXOPDQA4ycJJNypoWWKVmyS6BlBwTrs6+Ji3UXtZnjhKQodyQMpNMUQuT9xWJKuS1Cdjxb/Eb998gcg3xDZJy2ZOvwYOMnTBXaVnMy9HVWBZd+kY9i6snkdKwWXX3pidMSphtL9YPx9fc3epOvNOyNjxBMl7iJRJMqaCAIq2Eq5ycaz2/A4VWMf9DojN0r4BuM2uvpenzxdC3hGf8arfsR3Ve1qwF3jODNcAIO9fFj+As92mjgwelrGgIdAgPEdDvbCDIMVWFsVeUPlYpny2yUXZZ1xmpVvdBJhZekWDirLm20c5JBy1y53Y1cfHlCKO9QMWiVF7cjrbL24xYzfKabmdVOD5hcHfvpxYVrjx86P109U3+I20qTCGBiXbF576yjY1YxvfIiKQIf4W96RjKBuq3utPJhDONkH66sbeV1etcTqGDRiyWNtFdAioLPtX/T133ojZp2ocPL6ENGdwinljRB7DE3s5a/yQa+Ea0bQJ0MhYhtCtRem0ZEeSwP7b41D97jE6EGwz8JFLJ66bMNj7gULECzb2Y+PUxP1K2Pw8NRRFmdQciyql3CJ43XIECd7bjp0OaVlW1z1W+0Y8pU6z4k5zbS2WuGnzq+CYDjK2tuj2QhWISATgHcqVN5OExT9LtpI3ezp6s5t+nCQeEx6IQrLu3xoPuwwo5PKiFoZnUSXBr6MsuZ9eD2adikV10pbxxm/mSfR/VhHujnfCX0ZApbLxH++Ior6gOB0+URltckKEs746QS25RXKah/OupguIRDIO8KxfB3BUGL+khRs2If71PhV8e5AOdb7/hOOvL0CJkBw753AX3TMfGJzAs9V70laVGJDkjSB25Y+22Z6juZKFMpcZ96Gmfj4Q2AG3k2cuzfcxYiF+MLeim0zbumpg5EYLISf59HBHTz9CX7do1pyAieMmZG18IyzQnIRWMFp+pyidpfvpv6v/tAAEKxfu5HuueDkyghUJI0YcNoqZfBLI4y3Ft79S1R9rtcybcqHFR1B6QeJlnAXPGTfEU7INqJsgKyLmXS8PgO8Nh6876hM8Sx3tGD+dncbxx/8bAxiIYfXPDVE40U+LQdaTw7BWK38hYtI29uWYExf8lozE2FyOwhgUjy2eHhVFNvFas9NnM8Yy/KeTmBGXCvhKpa5sFNrTroZRxFCtE1zZEBFWUU8i7Ol9GFRzdo3FEzA9ur25dYsVcYRxa80I5q6o70oF7bFb4Sx+in2COCEBW0YSLlJMU5rezM7tAPeTYa0GR6IActMK+T4RXnjqtY96C8wknagMVnSIhVu9DWDe51+wjhDvQMqFwBvcv0TJwFJdDaGHPvbmIyrYxN5ie3MpxIiKZHJAo8j7a4AJONKlezzU2PcL5zQdkzQY2QSxY/UVfDgQZfwcdTNirlbNYb2cqqAhx0oXSI5XN2DG8vKeEDSxY6khKzajLdakmZ8/3F4cuSCc1Njxh/VUc5ZArmn5a2OBMlGgI9kcVB4qKmINfQpcqbgdetN3uCTTcXvckoIAXy3tMebQe3p0vBFJvUpkH04aqT4hl1tEN+MEZ5NxYAVuLLiD04TdaMyqiNu3PcaQocX4jsz8mPuApi3e0cusbaxMirfcPyC84757KN7uWDk8V7jnpsLThuLwZ6NBOWCs/HCJCeh1LoOvJZBT3pjPX9njMak4X4hb6Qeby/VJ9LqMHCk+/3hvzl9QTEVhgqaFU4x4O3/PHjwbuHPQTJb+hBDM22r8NkW7HlgvxQsyhfRb6dj2o/uQ9o7l3U9ubWCX1HCXw1KgmIp3OEaTHMUFIGBMZ4XBAIOPYVbyvdityp9km8TTwlApZEa7saDI2XMozJxWSMh9Cq+W6rvkW31q/xZKHDKG5a/MG6mQzV3pgfZArUOXMv3YljbSNvH5HVfc667eR3MrVN0G0GkbiT4HTPXJjsoU22ZQHVrZuIMdapBmbfJJcTqSXvRrefhXGKIr31fJ6LZU3AljYroXSILgQZ+2K3Hw200qpnJvdIZeFrBCJRSV5sWLb4UAj+ZgJ1nJOSj0Vq4FOfRVqoZSY2X0GjfyRqlHyZG7PBVKKFO8H8zX/spYJkv5/32q21Fc5FO3LAqqosUnszR7giCWBTCd4F2q9cnRq6PYpPVRodw5VaKX2+VIP1/UVLG10Yfi5TbvXqoTppGE9mw/0jYfR3vgTEn5v0/M4Vqc3YKjLLmy8vWSGVDziXF+RKgXJJhGxoomhvkI+3D8gTLoyMFwrBEw+xdGCzY2allDbBy8X6QTVtnph8VMVPIE+qqKczcgx5cU7vRE1kP2CAS0qvkVA2qp/zl3I+n7GBQOnwmr5M+6LOKIEYrL5yYEKDV7asB/AGLOmxZSGxrUqWtDcBSgsa4H9LwRN07g0pWixP2VfbLEcgnUYz0hLWHotjLdfau8SnFIYB4pP0Qdt5H7cupcaiOEQHBwNMEzRXjSDslxdBsgoCA3d71+7SlAuja27XZtDl0Fc/Ye0T4y6JdzDzZs+c1Yczrc6VUdZKZm5nH3WrWHkMKllUn+cA/nuQprSuZd2nmmmxT/RQnBdP7wsPRhGHMi16U8n88lUdu4I8yFFQTSZGt33IaWIMmrg1ra8Y0lKg3NFv60aVNWmi70pNN3vAvMQvlCTyFrU/EeV0zvZVuIiH0TE6mxFTXQn+fUnfrVvBoyhXi1UvlE67QchUeLGWuHaClPseTrLYtMkMHwG8bAHB3oyDyc7XyrD5DhDkaq9p52YOGhspHSoe6CheIPIGti29H4HtyH4v9xFSpINx6RkdNvtmHvxaXo7ZAVcqO5ZQchQa6DiHzwN7QZ9pwdXuQuSfbMQLgj/EXml3yP187oapvlR2jktWtSMqNGXqfzKVolPGJz4VqQyWSBIqUeCwAizaV1v4Fog/jjSWH29bkducY5ZOL6hyhNFYJ3RE4nfpM+b7yK3OHzbFuK3Fmg3PA11WKcLLlUNJZzS8/snhrSh/35ENCmEYXDMv59QIfXruzUY2TH0otNVxOmow5J5pMeQjWfTmeo6gq89clMM1sKYeDjzzQEpIqfCJ9z5+x7fkooBKgCnwpZz0IecnNE7rpFkmZbD5SAOe1VSSrL8u4kAE1oBflR+S3BAdIekWidTspoKLZ0jjuPNcAjllbtseZ0V3zeeryuQllPOp+y1srNIX4pvZr3vMUL16cOFxGoHN3zcBFjpOlt4fzdNIPtXRZfs+IkX5xjR0P1YziHD08c2lPSq6VeQ0zXs6XE1eCNPelhOl6fu1ag+Xbx+5KJ4hetOk17L6aCVRbfiqPbPOm6ZD0XkK2JoNBplo1rsnpbWFCpLbYnof9dUJNx+qrFnyRvdo5vm7aGH7RI61vQEZz0Piuy454g6kqkFIqUXLo1FzK9BR2CtE90ZtdRzBAbK/t0urwOdIdFoRchbjqraVQnbiCFK6lotntmJv6DATHQcoMnc9bTZ1C1Ooi68xojahpvmvHBscdIxNYpIgXvJXYCTEqiiPvThKdn3Pn/Mmpd4tuNgK2iKNF4dtX3sQg9fWwY+Px5Rpve68xlsJqknIK1fQ3kaZpFbHV0NdHZjvefp21K7graNCgjOA1f2omexDzBqhzw34Mhn5GBLp20VeLO6ANiedBzaCJSHZqMq7bFFe+Pa3FgDsUx1JXwXiI6h9d2RRpSq0mXj34KQgbl/H3yY8xJPEgHrxrBFCn0HyyqMGxGhNECghgk33zsnAOypX5UeHJxqE7LXR+y4pgkbfLcGLqcVW0tYHTae9QYqB2YaW1FhfuEzmD5GCAU1523JV9cnwx6nnan8aRnIDda1ALcCf221A4o7JboDb3x2zYaUvRrQxIz68TJkAGejwIjXyNqJ3PPOE+esRx0mlub6P3rggK73EFHxmmYll3lIjBArrZI+5waciZPsGgOEHiS4iM9Tb3LkPuCEZ3/+Oxi9pSLI3B3iddzO/SQYTEaeeVbCXW6epFyO4FCb2V1cfF58tmwNUXUxLXqp8gAd5sDiD5Q20RWIoHd/Fat04QLwCbwZyGPi6MS3j83DuR7E08yhVPAukuPDIV/VH7fz6g9WZmQmj1EdpS0Tvo7gJtc7P9hH2EdEo9BRwJGZi4Did8Ub/DXPQwkSjmjMwu4PUZLaiOLiNcKjDsbGhabJbhTgPYVfwjq8VPbfG4Iw/v8C0u94sBEpVw6dKN796XCpCnIZc24eILxC0h9Qlzts4uyUmBDMe1ApJGI5zgMUvluXtPdZ9UyInZDh498roT/pKS5kvPeOLcGnCKKCsuud6+1K8QQropVp8nkf0czShENEkTUX9S3qePdc/uvUbRzxoXrauXd/ptJs8tMrdFARcLExY82nMmapgTpJxC2BRbbcvnqLh+e4lfvZ7MD16yt8Y8Wk4VHF0fn6nKOnQIDA/UW9ycp6EHZ97gsn7ngpXAYpEXxzq9VrVwc89CQ+s7dwReuKWHRrqaaFdAu+2yOAXRaptVcx9YYhbNTFj3+0lsxhO5126gxj6VQCY/BEF9WICk7FGSG0/dOce9QMQ24buppHiURB1GACxoO168+rWPCNedN8I4OiGe0w3SZagOdtrnumm7lozauk77aeDHQrwG6KbSg12RicE2YaX3IK4RVjt6dI8Qnm+bsVMoPpTVDaSszfXpHgRIvzqG7PXwVDIICZr850GSKsNBeiP+d02ePdcPvWbYZNQnveid3dzNG4ENLBhDbc3xVZMZHfwzLnQxRtzBGBMcmzAfYC+uzfWKW6NYkqKOr2bLxMJjVf3mkcevVHyIMDFGtGPEwaJLCSNi3ZxX72mVxS8idzDMFQl0M2Qa5siK/snyA3LzKZjZ1t2GGTTLL7pvG4CQWUKtHVamxtUj2sNFf2DCN/kJlXv7pBVZd+0UcvSJBwEQPe6s6yJcL+oLHQUm3iizoBF69YMnmvHLnNDWGOPwba2zDtISyZiGzd2EKczOSh6bjCIrwyUY847j/UwCWMBL6fyVyYF2Dyzt7JNeIdGCZaJDWR7zVcVPMZeuJ2JY8dbFRsDsBiMwiDFHz3YoiB2CeMHLg/2WtGAUPUEhip9RMELZvNWf4JJPyRDubQe5WMPGeFey5O92y+EQfc+VYtwHX6SnkIQMhe/W6jaeKYUGNklXJhhBXiMYmf6iyi2pB9eXhiOSRAgilco7Pvfr9afnfIQCZ+TwxkHjuj7bobiaYAjmOEHVcAeRS+ip5fz7Bs+cPhwXi3oq9tDAY5Ie9+N4M/jUoIRGGLQ7d/wgTsl7QvtfEnQRxt3pu1wDR5pa2Dm5gZAJZ8gN0QJNJOv8zUg3g4la0I/UB09MiH7HIjWXMvQ3kWmDTYLBYjUZRx66rtt0y4hSKE1JULMk5J1Iwl1ju/SdmYMsQnRiLFpcqwPQO6pROy7Key2HkHzYMPL4XT0lSWtd5ZIJnjscZJYMQQH1/013V5Ach0EAfjmwaVJWAHXu4rPOwzbWpwPdwAxXRkrMPYZg/8I/XdLKmHLe9kZMikysXqwy83etLHJDY4a5/yJxQvH9WxO9wVg9bH7ZrIBvOCi2pIpkIyXghZaJVHtaIJO84VAT3pXmNJ91MJ7nzUCQRiRDezMXXakyzPjFq344ZoqMhij64RjwW0PBB8TFAn14GEtM4lLJZWDAGC5V08R4cJFt/ChIz6Foi93CzvjXjOiGqEKgeAxFsUKLouSZUpLoucyLdW1cuid71s9f2cSDaZVHofYv2agPL3Vf/kwVZiDleeb0fW2y9jHw0MFI942NwmLXVsrAWvH9QFEKgxzja0bKT/yZyG/ImsBZPgRX4xAAPYSdypaTAH/EmX8iSD0dhC9W+oSmuTSyr5QG1k57Uw5VeGmCnbfAOSKTl1UoAgVB6OjAZVG2lcqNGuOPpbSIA/YxrAwwSAKHsd049DK8LhaEH/aMAUM9LxQ3rBdF5c7WXphjyAgWFclfgBXDMPPMMAjdqvvfi45sJWe5TyDYS9srOe1vue6acVVVqD4aGKKj59mTVyZLVwJhXp/n4WpPycVA8/C0pvJrnjZ8OdvyiB24WFCJGNvxdONg8oHauc3Ln4MtBNfnGgI/z18rwIGOo82WEuJ2N4jJ0PkClHxw65jab5wHHlYceaKCFJ4Nr7SWVZiRUEWSVmfVV98xZU80n1IMtLnSTO3I9kRbSOmrxlq4tALutd1iy9BqI1Okl9xeCkFDWoMFjB0uXJHQ1bvCQaenTIcW19WgFdyorfYMcj8GzkrZxjQnMIDeJiWzRQhg5Abrf48Ft5SsFWp+hiW8IUmyem/xxbIr5kvbmM6BlDwZw4chmnSy8YZAAyL/LAZmO8jgDiJ3QEVDQnJGAmB9rrKDzSZXgsv3+5NdVlDkvBoriMvK0ICliSPJlHVfGuB7pYpbFCUDKlr4N1oDiSJeSLI1F2YAXpCL2Qvoc3VY+2DwEJyBHPdKhWRacbI490XoWSHWIQB/HLrnqAZo0/B5mLQG5CCrNLEOlgUsaMOlPGyyF4uvSLjPSWPEpMtov+ApVG/loEmz+y75sCiqP+IvIC0qz2czMcJbkSkOylH+/vW2KAE3W9055yAzOOIvVsEA4NoxxMsLVQMN/5GsL3ohrfKTRAr0PWq9wwdLRWwvCT5uxWCv6qx6KqfY1dBy6Vdmda1/kbFMbzZ3TSalPiWDKu4vWLWbkLkMH5Rcae886cURpk/bPcNuHqROB9elz7gR3qPpx1LeZH6QhVFrsqp4ek6Olw3/tj2bjKzuhX0SOW9jknmZtGjiUbnR2PxuVB9XRozcprt+wkbsjaAdrbRByC4ABsLYYOLA8pdZLexQlbkrY9g6rIclbRWtgYAKBJr7mZYRn/ljfVB33TEqAIj4IxoKGGJbSiJPC1F3b7nMDwRY17t1HmiWCEGqA9T+Wa2R77Ip9LdK1Lc16p6w1UuNuM3LViRwBIeRknwUZIOU+RE8SHMiHTM0HuPVybHnLvoI9CmHEWulXy3ZUYA7lr4PdtQKkxidrH4TGnDHpXi06enBZfVlHwBzg4Us7EbDYrMEI30HoIqVAIjNFHFv8BC0+xAfhLzcz1BILDZMRGJ/mqb/E3oNE4qzTd8+XyVBSMORsz460gC3/2c78RqJFJpCgAR5osfAz76V8Fm+deikeTubEL32BsJm3VC2psWq+lChxsOemvvYx1+1qooFiKANMxD4uZiDl3PG8PEkFWvlXRy2Bvisi+FvW6qHhqILD+HbZS2vTsHP6ZOe7x4sSxDSIoiLGc+ugDXOv06wLQbRoQ0DDyQ6SmGxOU38+B3i/rw8k2E3pknVAdaxkfSV2d0/rwvEor1xrMpUuqCOkpmeRbB9TWC4R4/rpcmxjMdznrn+rqufYvk70kM8sBNGHLX1zIfoR41MqHDF/+SAxaY1Yj2okwntvyGSqj6aDxbl++K7d8vZyI/sJ2LJI6VW87fTpEeIqOxsB1ncen3tDpjooh6sgduES0IhyawyA65/IrAo4j3qPlaIRNVdItZn80uet3kROAbE0pWpqJwcRFGGF8cLe1LwLt+2xDtmTHNrAF9Fz6OXiMJbyxaihlkp6hZod34o5BJufeWUvh1zKR/UGg5HHQv3oWPmuHk9SamEZHYafEj+Bppfrg6dNcER9ZbVk9Q8cq2QUOKzrmemVkm6rDXUpoTGdtVLwcVBAl4UcahfHmEMr7+3hkR8uB9OSqjWn7M4pERDnmPUnKQVzRDpfIZuKvPRHYQortX86wA3s1vnlxQzu+lmNaOzbQlwOjyCxJrDHqwISXtKCxWJrHoyXYnNOpw9nu/r2sUF6ARpuk0X1LVA+kY+E3xDqv000gAP02be+y+ONqLYDzyvhf0BGop04K5M3R3fk+dznMbhPIpZI8K4syKvgpSi45XmFuKVKHYyXY8PMJiHOsQinjAXgQcEr10LNyc97Zm0LykDRPmE2x0SfAJrWnq3jrFanUZiaF3d01AFv+SrO/eAJKqutB4ARbuGPRh9TiL7jk0UzvAk7NvVVaLopoR630lTCgMd5Zdk+0Tbt8Kot49fTaMftJjL04fnXPoLMOSVj49gIYcSXK5cMCFZ9N371slWVHA+BO7BFEniRRdR8eiYOd0xNwAyRtfzwMGg6DqdsNcCwSU9kPI53uieBN/Af49IBeicQi2c/6+tMHliTlaToRSBP0mzHXFLKDpzkFXH6kRbAQ/ZQdOwxiWe6O/SjtLIaQVi+15RMfsrxmY5531nhDxwitbjq/v6FvKa2xhq+wNh6zhxSkYw03p34GftkGAgueIGTjW8TWh1XembW2kUSqNcLfd+7zxnrPIcvFzDJkvt36qvsrl10HGHyX4zLk28zxctL01mGOrHhh9uTMIPiS8r1LXT0Q3/JSZCfIC3OxBDNMLmRK3u/T81/6UF0Ewxw5WlBdCvccTIbicM19wEyOZPzD24gcgSAXNasdJavzHA+8tyfVcC9IYrrddB2XaAJK2y5J8Ami7q1y+nUuvf9MWbTEmvo04BdQoEk19zPJVrq7WwTemmD2XejnnzX5F+sC627mCIcaKgve+Bfvp0TZFnZWbCj3zRF8eRjqMT1qMAWbSmqmXBcrS6NVZlckVIT6qYVwP+DEGRfx9SPUmlAJfiLqdTcmGzSyyqJ8DHEeed8ASx58jtIPzlQtEKIbvpoK5qDIJ1UuKfwwDfgFY7F8Co/pmUVsENu3F13aB+SOqLgPu5Uck78wOLhucQM4vRYnDGwpHlgYcnL05MJBwhgV+SF4kiDTo8/5dpnyFUeHXo6hHGmjvQefkUdi7+s1zliImKcMdhTjuhHGzOb1YeR3mC/pu5OOmi1M8amE9UNyEmuQgtMbXlBUCTiib4VLExyM+XqE2EK+FI/WdKveYVNuVp5AeldM6ZVakWXBhN4E5wXAg5p3XKsEgFZs1OVWvfS2bXRg6Twt+Ccl9MBlHAwpc12zgPsSyQ9r3EDoZuzzy0LJj94clCcHKSHRKVKTHbE74PW7kDKmyCQi2zs2uyffH3fhdNmuE+7a5WzA1XW+b4i8Ogt+6GC8ZsO/Oo5IWyXfLqXHk1zm5z/7wiakUxAaD+RLkRFcQ1vFrikxByB8XBQ0cp5X8BUuhT0NovHb5g/3s6I7GrFQnYQhQZbCQX/UxMX/gVymAWS4Y5PnxSq2wt4tk74evvgyHN6iz77gTJ9TkCBBw7iqsvK1kyykAoCc9o76HVhvo3TsTkOAfgKLRl0NmOry/bRqb4dhrCVNXHSkYTr0HsemzwhxHL0ODRT2es2LKeh8rxRcuJITkPqDEam8W27j5jr3AhQETvy6DFuR7urF09dMM0TtA8eML0JLh0a+DmLSKH0T8hnPTO/ELAnSoBT5vVssxVXzVEeIOoRaFhdyE9vtO1Z8DXdoaFpquN9QuWkOn85QQzELSUdqPGD323UjiVEO7n2PtFZzCFZf4p9ML6EApxK3leaGrz1S+QfddoHH5bp4gkBNrG9G9kh3AIVuj9o1E2OGbitcYEQFg0azz71Ysj657t+gfUnGXN4s4UNcyUBxDHpNlPoeck6ScBjOs8lkfbj9U/UuAePI4PscMJTIHXR1FyGW1Y0gKw5hcPXp2h+LEC1mluqa7jRoFeOTCe1LaD4bBMjYuM6fYv3XpAjkQCspg25dyivZENOkp50/20+bqaeIbUn3DFqyAonJfK8mlxObn3PQ/4qoSt/dr0pVzUpA0dpLj/om3ONAODC9so6BVlvZxWVppFJcFTzJIGjwQexfQLeEjwAse0ZJ0vJ4hoihh781q5DmIUmEQYu0275DmUzYftK7AIQYN4qz8uPkS5FkfZtm6HgieJH23BFo7XT1yd57QHkkZfIPiCdo6kjwMDHSbR3y+Nif2xGWu4TsOqGaBhwT6HXiyd++6awGVwRhBjTQYAvwmmws867oFwCi+JFpWFJrCoUh2WAKe7zkYSDDWZBKbMYB4P7+bMe3mvhcZ8ivo0zCYvT5wGZ0b1+uuERyCe+OJmeSG5yTkjAiIuHMro4JP6IvJca+GFJ6w598OPSGFaJf6eyTZ8YwXfjUAYtGaKTmrz67+MWw4SJQ4mmGz1tLV+G6I92gVWzDT+7tYw+wRRYIJxhsJvPfFSZZzo0qTxMWaTAgRq7sP88osIfFQPIzasRCcsHVyBxgrZAYy27RE+srnXT1aEZb9sfobmQ+SPMERkTyHcc3CSe6BXtXZw8rpB1Y3c9sImcmrzcx7oA7nU9K0C58Tx5q4/bVwAvKqpsI2QzRxE1tr5MOyM/VFbqqDqDfPFx0PYyk8C7luFIfOD3P7edU9zF2b7MAs5MMXc8+LgaZnuPzWExxHyLwIIMQCvt0sEJ8P0LFW10WGeGv7sAW5pJml41yB2KyZN4p59EjjOum6b0BjJvPoLeDlDLFp9RdvF4bxpSHQhsbgzZnQmNvBiWoWufBiVlFw3JurQX7yFKfYvhxq3+z8uxZz+lVFHyW4XXhzT95hf97DNnT95raDJNaKX5GXF7tK87RHGMHtSnJhndiiX88vt0TlLJDXb6di9DC+ZoTJDLs45ymOlCqu+djiaGjj9bgibxM32fQsdcSJ4pBaYYTqmmD+5z4wKyu4DUVdU85yDDqGKWGMAJjYEosNs2Y6fGan+t1BUHSMju19Deb3LY3vuSsvBXmD+rtW5TNXtsY0tATIiIgeXkeorz2lhCz33ZtUtZPmOqfdXekwehYr6o/ZXu/z+ugtqYtd3s5sRiQL+cyRllVxm2C79+aD9rH6LLul7IjVDMiOmz7wiL/U7HXmuPYHiVcJE69sbgSg6u+GTbOIZYNDYOswfYT4vNIStds3WtCTNHxfzvlQ9roOH3VruKgrlnhPswZ0+mNyzfcGAnzPaJcm09SbHZ3l1PS/9juUww8qvb16TZOUQs/qeOY6MuKNCgns+w31tlXV+rvLEdlGA5Uz4kt6QaaO6DgkFqlAIgDku30CIgV+k6bjRi6IxSmV40+zXWMAMFX85F/Q2X7fOMbJNZXSRaKibWFnmxsklj3Lu0VDIOf7d9bDDOG9Mt9PBdrZWTRCXvFX1gWLx91zVjcTWAqDh7ZxDTcejKRNHhgaTXpulMCv56YsaCC4ZfmmqbA5SZqoiQs3LD/YazKwmfZgALxH1oyesnW8mpcgbkUMJ4MTtqHsLJu69JV0Hhwx9gtLR/YYJd9oal37Csem266azj3z2kIk2mIB++7pqjdCI1luooaE/nofnddweGVxIN++bupin0xYDmJXSHjYkrIsJAKMwI0pXh4N66FnG1uQFBG0Vx3cDJP/oUW0DWAo7yLETfQhV6a+vu5CeV1VTOaTFrimmYqJQTTUCSFmjaTQoZ6LoTVhgRTSdyq5kJd8a5psO25hUNnoXH63NTuCTy/xaIfN++52cKO8/qa54D6o1n4Oz8/rpWyPMck5Vwj3xvRUUY+tDwI6MUBTLHgQCZeKyTGiLRee2WuFT51P0l7+tjHHD1BgomUodUWzmSNo4obynS+0a+z+nNeVHvgtckUWnpV/4rEhXMBw9fVm+qmeY1t/9yzjh8s2C+J3trIs7MldN7Ufp/z2U9M7FORZJVo/pHJhrGk0RcU/E03WLXFnZRLReYE4hyIZ3q0SmMRsALDATNxuBv5KRq8C+Aek3v8ggr4pZhvpVPjZ78ImcG9cTKJoo6Bhmo1+BffgKPAvCaPjARl5QuvDSvUG4SC7IVm4ew50+W4ragmIAngR9MtrEgdG4H1ufIQWXh/0Sh0wiENxo+UtXUuGqUDWl9pzUmM3tjqtj8y5zsJ3cr5rWa0PQBoiKuTJ8eh6dmJpmUE81XjodeQml/8at0sl9wQXqTjhPrURZW/u+nbZX7OWH8eacKY59EkR0+yFBxJxErDb7fDdTOtJMnNz8HPP3rQnroh6Zvi+fUUvC4okPMoTgaCCs8jlVGA3un8ukhERJc2edlz9zOjOBT2zZxi4x88ymNlIC5832/AGfOCwug/m2l+kDK9xjL0kkUJ2FOQ7D2cgmLxUP8CPYD6VfPvj7B6xnMfCQLA8aL5byqL4djiirckXGedWAG3I5Xd+Aatp3U50H3jwgEZUwpvRf0vrl5Ylj0u6+pyLUpnYK/KeXfq+RLPQjPz+6jjVSNUTnCL3me4ZM0aGHKO2McA0fTO1gYd6NXoEVorcSh0gyT7A9UVEzYWcXN0B0UcjZLHmdoAwjK0FCPO63cdo46CHa+5waGLZXkBVZ570kq8ChUZajoLQfJerlSywXtbJ3OqvhBBGf5ExVyIIiSnH2crkF8qlw/p4/OWv+1EgCr9i2itG74CL7isahqrqsHq/XdLJufMW3IrFl1ryRRLZS+JT40Yyr13M2agufvafF8BlxHHUBQN0hUJZNmNek+xWHkUu4GYxNtbrh8KGrIdACx1knTmkJIx83ndr6V5yIgyQ4i08COLgKDOuS/pYPoJygGH6YYjsQT9PHKiXI+57cBcmtzxk/G6NpKnxMYA+pkmJosEGJ4LjN0EtX9gWb16zb5x9HI9A7y2WZqiMJwPNLwTX/Pd1ow/DyoiO4i+Ami1fIKsCi6iyyYa5fL2k3ypxS81KPBN44YEP0Je7VBlHeXl6R97TUjUmYDD0V4Z3mVGCb7fWb8rvaOUoZyHrez3QisHm1XzToYDUR1NN7whOWEYe8d2VuPtRlFiKbK9Su5flRCV3sgkjfS5RgKRPnkDOUrwBFHPfB4kjF3lahJ8laMb0Nl6LAzubtQ92AOG+n06TYVMDAXsubz9bw/Jv4hQIof0QXv6UcmprwcOwqIV3xBWokniVY7CDiO9995u/BGgB9prJ1ClTi99dYy/wt0VlKlXlvMMb6/+DpqtYehxrlk9z92JYWmwx007MaPHTX52v55+IiemYaMuyVJWVWXQCXPeL+cSuazS1/zahGDHqB3SGfYgdJISIELumQzvcSJ0KNQ5O9MTSmPtQLB9NGl4Z48gIAcHBu7AyrSd+FADgNIy/dAePUhzLjjB5muHQk08fJHpwlIgXNvBsoPBptCvQEjfWL8Svv6vIUyd82KSG+xmRjnw9A6NXNX/fz2eh95v4oHYWeR6GdiuSVp+Y5SqDe0OUKce73Z3nL/yTmzwz3h/DGWccrlv3i4o8BxtyYkNiHBImBAdiKBWmrO1mqcqoJUVUsBZD+f1RUYWXc5c5SiTTXuTyIEVL+4P0fRpqCM0vue4JlmkV/uN13BlvRRrcxpznjgKjiiaoS5YNbl0saQt/oRZ282UDnt2WtfIPaxi5AO9fUwcZRw3NM4Z5kYNnizuJrZJ10aDhl5O0/lJR9Bu737T7zc7gmPYmaqDyl1PmS5foooAQjFgFCTxvP8nG98U1APu8QyNAljB4IZgyguxR6YE5Xgp+beNui/4JK+N+g3w/n5kpHs3HpyFIu+Zkv+NV0jmViyMsli9NCLoqvJpQ0Zs9Jw6CtB9R9NOJ8md3aSFAP9cK/dp12LNDh7lbf6U6cer3h4gzhHDXNKbCSJ7EuA7PauBqMec1w/DgIpt+SOpWh4ry00fmmIT8S0v3KukjNfPcFSOyNF6WP5ahKAsMouj9yPzwJ1xf70rJMBzy7Fql7awTshxHGNAaDY7Le8X7MUNOYjn8p3rAK9hl4YMa+o/yLrhqNMuGk8zjZjtq+/0HL1Hw2q8/mGX2/eu9e6xuMwuj4iw6yTdvxfrrFZ8s/UzRM7IrBP+MnWoQOJxsp1Igb6c420RdT4krkAn2licZVYNKr7aex+PC2xfSF27rqhnL4Glg8PfCZMfxLXPSuw2yicJlzGIVn0mc3L9WseAI8tprTBpaXYiX0cC5v/8UkL12sxgJgK6RBp7XXxFzHXfxvHdkmLVNyBN3nv9cePFVsoAHJJ5ixnUtbqLieXTiF0xRPCn/qjbf572XT3pCR5smjJ/haDNSnn+TaMWme5M7mkE0hfbEWlR+zfGkIMB/Fw4b62Mvy0NspMNcVkO9kay6n9FvjdkYrrbBruG7ddqLv9ZFUYwRVlFGJPxR/VRvLv9rnMvJRYB7ESilvioi4nt3EhfGHhrn2wOFuDvFiO/MFLlIefRPUjEY/4IgQj0qd4qsl+G31lZjwtRZRw1ipTtq1pwvyn56ZvmqgRDt4d8kvL8+t919Ep03EKWvP5533Mo5yGgFQtptz46nHDNYFAX0JYLCVGoWdP2+A9xM3GU3De4Cy08ESuaeu1YxYQ6ORvAPdlOORm9nJY7lHFJT0VjcrSCeNe2LUsx+fzuHbvBJV253zxI54JxuHsu7JfXOa95QwhHQ5n1n7XtVUcjpxOcbzbFkVRsWBVKaIGVq2f+emKbENL+IxjzWpAl/1SfNSL/qQ73wx0IQY6SNL9y3aHYXcdID4C9Vkgwvq8Y1+twlVWlKevlCsNZhHrnhcOeHzPLQUeQWCO+0Ji3EeyxOkKM2ptL9IipUd3NRkZoylX4stzONxtlwnfFalOqVbfsbQgR0IfDe005QWBYybt33g2YaaHCiiWA/cmTR9pTB7QZs7qcm085ynxKB69UIid7UQl5Lz6BanXYprgTFmjFCPuMoncgH0eQKA/sXkkgL0SVLPcGAsBo/BDCz9TowNhE3e7Ck5ci84v662z3jC8TQtq/3Ldbcee9hUoYe6WqivP2Nn4NV6lqxGqkqBJBUGK6p0aHaj0GT3sf/lPKRiAQtkY2UZgUyAtyBvRvTNmwqcq49izcaYamCw+r2jcnG4QpcyHWGUp7e+7pwYnGztrBmbpWk4Dl5Doq16mgifMJZM5HKgwnRrNOW6NjiboK916RslN2xnFzLxz3SgaVwhex9ln3Y+HO0CIgpeCU1Ic5K/XPv+44Nu1HcWTYjMwFTyS1ZptaxE3dtlr7Uh628/0Ddq8nnbn9VvAI3+vIw8nCL9U4rOQ75LKH3nie5lgIJ4fMbogmKo/qv1sdJnMJ+zzR0A30d2Bd5OMODEoYPUr9AkIYtX3be3xe2FMjttdQSFDmN9SgORX3O+XPSBs74m1+yVkbpOOpQmadsIOFSPSsCNfTfI7MI9BD967Ffe+xTJNM1xFDohhR4k+mCgLqspcs4aYXLhvwGzGiInJFkGoX9dc47/jN+j1iovzDfYHZZs8/RW3+tczHBYHhkvrcldCfpSL3mJR5A9BTpa7t0CJdLNt5V8Tu/7Yx8ZWZ9/DEkufNWHySnZFoBCbTlbycDhCkXocnzpDUJMjRhp6u6GQxeSM3id3U4tfqet3aMClzvVxi8iqLLkivG+9i6Re5nJj7qPlGJ9oaluc7qKIq/nlk9O/zyE+QyscjQbm4GqkVa06oINfg4u9er2EWjkClNgL9Am/iJ4krxl+UDKcAoT0yw1yyeAL6wmUjeAHee+Ef5cvbsGsrnpjQVZSxH17bpR5XEVIHmoNM672jb9oMketvCwitebE0UJSZS0D4Llm/1o5OqUUGOVKHewBkDr+YDsZZB+i0aDqd2gSHn+l7Swo8o3HPiUnSkSC5nDiscw7sOd7RDttjEj3PBTNwLCBXnZeiuTWveDT31SdDJYtGPCxD5+JZrfo/Ngf6wW8OFAgwljNil/LYLVcagEDT+wk2FbFKOGOJoZb/QNi8LRIjOwSjDslc6Q/ZSt9SBUuufJv+eaOAETYT/IlsG5sCdJFc7chLgh77+lzdEFRf1ujKFxiFbxhk2QqiIIaTRA1XGLfUA1S361SdoqP7IhAafgoWxHfsSn98XXDy4bmZgIRKzjRIetZF01VJalDmNQBDcnvYEuH18nqe7Xty6CFRg+qchkMT04217IBWJmP7Sn87hqia05z1b+JmcZ3KWC/nPeXwTkQ2yh0CIZyZimQRtzWslogfZFrRbS2BvmxQGTxH+DUIjpT/JR3XqV096E5cb1tar36f1PXsN5L/6kWxtFxEO22nHyi1iJ4IQyBDA0M/sRAIrcOR3+5w/xqcciPd5kKQpUyvubtwAcVvB7jul6zkN+QfSZ+fns2sKUsOwU8BdB8YlBPWel1Tzv7doI/uhOeXv90MDu8bEyVmz7ChooYIP+H16cdCl/onAaSwlFztHR/Dlx9iN3XpLf9orqLR6znHQRSXkyo55SB3B4Xd3pdyidgHeQKJr8N+7RG0nk1q0IMVBd9ksbS5dvszPpuTG6cLxg+WjMSKJvuqx4+nltX7b9ba+PORf+uBZE80LPq4YmmzVw49ous/qJvjBCBKT2oCfk6Hh7D5TK2dDpikTGLDyV5UCdjt/04OBWIZk/7ylEHAautHAw2kS91/p/1k+IPBqXr/lelgex6G8f46BzUe/9sUAEv+Zdwkbx9jMLTtYa/gA12ov6YccqldHJ3qscHOefCOaJPIjLmg1d4/AjzAgXOW+vIamjA+nOruo9IgGxv88PCQoFIKvcAe2uTrn3F6zG4pV/4IJMuxS+bcs5tUGTMHgPp/Q5Q9MSCSIgUzrl7d0aGb1LyH6kMgWq6mXSnYR+i97MAF74bTOWGtkkNgciN313Ea3FJW0QH+n/msuX9w8KiZp+mz2oowAkNrktfF0FVyh2DX9PXuVaLYPUk/MqQ3eX/5fUc/7Mc1fDaeK/E3MhJz5Ms2zqARiZjCMZFnDlE6KjOBjB+FG6BSLmFTK8bUx+XAJbBk2yolqjsNbTxie1x6Q9Ta6AxRcb3erVZo9TbnPYueIDkNIyzOKBmxTq03V/73OeElTUux8ltpyOB52H3M+GV4UK8MlhjhiXT3eT1k4d25xZ3EO0Ffc79eXXgF8T+krF2jPuTA3wruH80Ty90VxVghibZaGy1kPLcYm1lqZrPjsBlaih2kLeC8Qn3DeiUYrbXirOxzjvafNQWyLt+ZvFQ6N1JsubYCdAidmniN9oL+uFWNDVGr4/mCCM14d/sxht1agWvgUlsJCARwC6KMkqTP/yQAwIkM343eUpMZjBkfY0AA3B+HcSOJ4wqOLuiWJcznfIaK1Yhqjru1X1uBbYV0yEgLUrb6p70nYqf6tqf3c6NPO80RgzQd6A2PtUjNahakgPx9+vvVFi50AOSAzTwrsb6SKkNuXNKbtXnnoPRp6owpGMUSy/GVxSmB9cI8OfExu0w3Ot//ol0U5Kf/v3u+Dc86TuGDd/uC/dbl8nq3wbpKHsWI4HMm3U5k9Iaa4tKSpJUQ6lwQJPW9LuZRyi+MTmA09YpK6pYgUUGxBahEzbN8PWn4kFNSM6j0rIlVZNNtfhx4pivQ1lWsvQ+hw5Qz0qjEQaBlkSu8uxbwna7K9Mgd8dGT+dqB6zvk0oXu6zgFywEb1GIeM+RPpLYJKJ1CN13EoyIc3v/QKfGJxVnWSmbbk2jBt6dDjImLtxDBvB9d0bLZm9Gm4eeWRhkz0Tv66YOm4weopwUwM3cUbh5YpxL+LjZmsoRseqZsntMHN8ar7T0BsPb0jKFuir1GCWtSuehRg4DZh6lf78auQO5y57n4pYz2FQ33Aq31VXa+ewwGkdDRhUOBLxsem3DIrh+zlvcuasjjaf6Objm7xDpJRCv+6kER3RUnyygvhWwKVmibZs1ZQeRKQsGminO5sx21cOec9Me2A1jD53SrQwpuD9JTD87f4RnfIcNNBcvjirfVxOABRPRL7sKgb4mtWOdNFhCbmALPLCcKXsswoknxhcIxk59oMiioVHCEP8MF8JvOS0WOZ4Cma/daXtBLOiNfHFBiAC7DUcc4cM8l2bUQmDxJmM2jJEcyXIAFfc+/dHx1j8gm9wL3seNzJohQj3EcsNI5Sla4wnljEG8n2b3cH8upD1ASF4u1FDVpd2+VzJjT4ZSpKs5rnyBWtfWIEkAQhAF4bvuxhULbDTqdYl/66XL8/nhQrYxmhdfPAnJBbxrdG4i3qO+fEm0U5rn34t9FZJsNuGdZQrosg5W+Y48w2xIz8XksXmtvGErhv2YmUXUKPLQU/2puXz+Hfv/rzbIaEAUShcEElZyPOuYcEKpuPMXnwCne4QcJqXBMk8MjXtVDWPGbCTYo30Au3G9Yp/Vf7a7dW08QxfBVCaxGIQseouCe6PXcI3RMfMcWE0bWRk0SwENqOxeSgEeC7SKZm+zuv3+hc35jBgE0cz/0AuBGALablpBFT4RE/gbyJxzpoez18P8j8ZZH6TTokS8SRwqW7lob/9uhJCBdKDbJJexKeLiGVTkdflrdiyyTq1VGKBAFP9yZYArpNdWHu0z2b4tdT9aSfvORe2zps+j/v/P6OhP+uEDakop+dwuHj0UlEZ89OqUMm6sGCtOh+AHEB1VN2zzZRseB5+n8jbe4zwyOHuIqaGoqrdkwWmJ969VDQBdph5WGMGWUEwWLizZCVUo//i0eKoAPSVlbWNS7nwN1B3kgNWdiNQqkvdnTLnAS3bi6RFCghaXc/e8RwV/aq0yJ4NXYRuCoH6QiEeYttUiH9ev/9qChhthj3b0/V1qY+JJV5LPHgN0LXWtBE/UbdXx6DLAMKAc8BU8RevaZDU6aarq+Mia5/Ay84fcYvOmVfPbDJY/12h20u20GDPZqMtDfja5vri6EwHIq6Hh903avHrOoI6sXoYeET1iHl60o2cR0HRVh3v6VpE5hrMrdUCSyKXA+Z82FPIX4zjnsQWgPHBMXTUkimUVwpe2U+Vd5vWbKBWDlt80e9DsNuG8nMHpSKy/EIjHw+6SBA2pvBHcsoAK37/NUa9A1L5sawalM2dWtRZGHHZOdvpyvtfhIpiUdAqSS+L3+obZ9r+QIu6umpJ3ksZX3PDjoRCifvQBoIHGhmyMLYELw9I7CRvNPlGDVlINsv1TUyspTHq9yJPIUx0ESXDPNrKdqBh9Q4qfrwCK6k6CushtzwhfuKfFhalZYgJ5sCH8rQTmLgiQeWoieJEgawr407SvNvkBMJ/1YxCvAbZahdTC3VZTt9ac3txYHZYSCTB78mNOkCNXXa22M6u3o5sylQCnjoo+C3NOS1xteYQBQ9Rph00OBkvMGPUn2Qo37ZvQ13xrGbenyxHe6nHvdCcW8l2qAx8GFYs/5bPlD5dDBJxeE4dlghcSuKwHhHtuKD7AqzzetscJjE29bBr4V29HheO6LnMcfSNEr+A6POE8PgVGad68nQB6EzkRPktliO35YWab/9V4r4v//agRkl1Xl7278IGbvAiF3FfjiWx8yfwpM6js9Sho1Ue3dP8CfrEvSom1+Iim4/SjQI02MaitocvxxbxyT7cm34//7X+Mq17qX60N/iQSDF8KsfzIzgGnEKd1uYT4N8PkJo+/YFW0pHxdE0V1/c9iVJjI+Ltl7OqVQjuJTzwctylBBD9FalA9/7Ub+/VKmLrqFdQx5Cf/YcRhmJ5nPMnXmAJOvsDiiIwjj5lZ5dpekZfWZkLnwt+wQIWAZ+S6a2Q8VM/M0/a6k54Uw/Ct+NdkOAYbq2oHFyyvLarhPqrWdG++Ih2iYYsBQkiqWUsERnCjt1b9akVbNaGtppqCJUN61hjiFv2hwa6ep+8B84m+Bzt1zELr87OGI9Kzz14uNjcCGnH7md97whq3GcZjKGF7Gp6UoscWGof08DWgZeuixDwcswI/N+0HPl5W70Zo4NlI+1nIt60sgcsxVq907Ygz4aFaou92nRUtC0eB8zj2VPVGAHPNdXEWLTWjTzWe4NpcIzSga9OKUBw1ihA54DpdhfGzcIvjegKCvFr3W+Y/04L61VlG0UBAVb+zClGSeDqSduJQ6XLabkYnTVjfWq7Xt2El/pbwG7haQ3l57X9J9y0JCXUeMY1IwAAjjT0PbHcdefvs++vCB1m34Uor8mCltY89RKipW8BNdSW2v+VoWbGYQJhjw6sHmIayafHhkKqjivzQ+zbWxJ1eB2+l4V288QD1MUfiMHBRXv5OX6IEe1KhI+3csB7S9TfLVq8TclVuGiaNX47AtlCGq3Jkhqq5E2qsLL4kQTj0MKBYhA/hGIQvHjfVL22nY3dZQsRvRGSC/rCaq9ImriTslQ3mioSDlEqLHAnlsyQCjzeFoIlGsEXl3R8RO9f3+KNb1s8iczKFucL2TGKbiXWuegX+3x/qC89pr3XQJnyrktxZR4rOpAmriNmup2MaKsV6oJnm/Z1NxphgXu0NQzz9QSY7d2DES4MNmubc9pBJn87HuA8jZLDE4l5to3deDs7juEcpifO7EXDnI3i0hV3w0MiKw7CVGQBfXZGdJdpElvkJBIK6ML8qKOg21/a1kygac2WfRG6i5eW7azFACF77sP78cdHVxKQ0Xt4N+YxL/WQj9CD0EEpP+iX0QvDoXHOEl/UpkrREohKMOnaSq7jSPTqHDMsgHRqvtujjZOMM30o0hE6N/FuL35KvvA5owXnX7Os0mgMwOmj0Qzd/ToSwwvnpWipJAcSEfVIywa26K9kJ1RabTJvMvPdDwUqr3P/CzasaWqqW4LQU4h45oDC0/9sBErxI5V1wpRkyOU64M05fxhwbVImRWDAM7TfzsK+vL6ScyBjNplaCxzw8B/depwY9wB02FkK7z++sD59FXnFEmBKIo5P0ntZMYZB5mWCXO+WX2JGfrc5/k1RzRIglUHLoSiKOJ3c1JfPyWsmO+HqDQG/3nJAcPp1i4Z+vK2FZjVv8l9QKTef6OrDn4A8d5nAxBR+FRhAlkmlMdc1TUokGfEbX8f69Y957PBpPGdqvTz0/W0UrKTjIiGf5bMhEP9Qh8Y9x+G2zPIPcopxiQn/6SgdclYdJNce5azPj4Yq7ltswgU+ZdKNUF+JFIMoVOxoEn+rvgEjKuI570oKlNHs7k4UhFq33jCyjxBOfdfd+xaJTAVH0mlPUgUPa4FzaiP2iT8+2RFJkO5nykmDVgX4uW6AMP0OLB7vyO+3wdcIQmoqf1RlpevWMaXglVUvOi2lDr2xqDX04fTOjg3HCwpbnvE8G5X+5kUPZFvUD72JsnyTRKSgdV1LPhhYTaD/fUQtoeJUD7zqa4yiHZ+aDit+zxxVFUXr3/E8ozg7odJ3hcUrporlTi//YUH40o/+SD7vFxLCefA6gFz9343dCvvvdtcGfbVvTSLVN3R5RTy+bAVxrASRSm9YCNr74KUmcpmshuT1lWrPXi9UkRNd2+NI1cHPOSOK45RFAZdrtiDQTOQCmL4fuyzmsTGdrYnYARljuJAvrw85S8Z+wkj4cG0aRi/nikgo6UGc5+B1l9q5nriWBP6G+Qae5XmARMiCYjjlqcJU6g8Epst8xfb8jL5IrMVUFoS/aADl47oHxvoQSaCajqRYYj0YyzZi7OE1JpfqghaFOONqKSP7cXtrZE3VGk1aeRy+H1HOcHGEtoNrt7VJ9kK93145a4zX1PQ2PWj9SVFli+MpB9WqLKnLrhqdcg8KPtt+rMMIxNh7gRPgYezI0cZz+0O/6LOgHPQz/Kgrv/JJFWbAMG9T98NIT7JoMinAlXUD9AL8JLEQ1nioxyf7HjFc7Af2GBkVC//xJm3yiUea2RoMccOuYxYFtiIMV6lJlemt+GanZHe+/pI3GZiQwc6fd22wTO/Lziu+uyTroKeYRALFWL3lyHFC769KhywkoR79MThj2uUyMfNVPVZoUjYqnAK9FPNoM8h+3iCttDNXeqYBXBq7Hnr0Z5HzjKH0CRq5XHhUu2jiByN29xXXkN/DAsvAj9s8cWWMDSIquAVphxorW2K24X08gKTQvNYXoHcqkG7RbTfpBEoJOETEsZ4KhgwGBNSVvsbnoz5aE+xHcf6Gq9pgT9nZCF9U9Wt4dIp/11kuf6IYxdxtUJ5bjaDJL/6VxtZqzwm/jYfCnq+EzvBT86Pvn+yXSxFzi7rbQhPuHTk1bT5/EpGVztzc6n7JhvigpPwux5yjLgf9sIqLMWhgGDNPXRoPHrlq4JUhQsYr0MVjgZF0Z6cd9YghlxvoNUdzaC4aPsMHrp8IoGGtHqKxmNtZIxmZtdbMeyUyV3xcIjLXrSXo9BoacYQOTYq0YTSZh1o6v8cyFcKKT7x6qxftfbE1Ik4L2uDTd7q8XRdn7RFhDanTysat8yYIanaOCf0d9dIy3ze8Dh308XnZ+fqFnvZ+mBDBNGDGDGLqZXZPM4InXOqfl0t0UuNZ3woJyOHF0CTPWoP1JIG1fodf23qoxsA98JqFz019q6HeYwwaI2Q3zvT1IoU/fPW6Run6rlK8EbaztAlIJ+CvjEN/PeAhnGirYvevSw5Mgr7dFyxZ89jrJ9+q4rpSFc5F+Qbk8rkiO/nMBh+DzN2o69pglbzqsOBqn9AH2DmFTc08sYmpvF/P1cZRtpO2rTaDdEw7rP283UYTclICtcgklHRkSaD2IlCU1cslraqfiLfFyd6+9+/DMvOuZEpmcV/kjRTd+ogQmG9ymDQJohU2zyGUgLFiCWVRDT0IpWklv0lf8JAUNs//knAMgItN4O0nTEunexVuUD0cAjPL9sCBMJN/duhnBNaDmVm0PBqqE9JUk17yKRyqYpbh42nYPxia3tMFKE8O/L+KPCQmL95Xf/VO4Qql/0wGoOfYzfCX2UD5Ac+uUpbmt3LRxgjxjOK/xSZHNCS9c1SrMaH/1YV0b3vWFM6FN76lD6K4/3d7vevmFzfh5TeTaiJatsMFWb4bKfv31hwGIbt9jsGPlu+n907Px/t1GWbF7ymL+A+fbLJzIv0JMbqXDskKyG0QqaCME/WXZeEH2yFvi9dnJvw77wlLmyfjlmN/Xr5uqzIJ4wiH1PYD+7aVw3/sybmA8N+fWlTJbZTPDzy8Yp2UcADncPVhtcxj9mNWPpNHKBKUi2VZb5CsPysr9BVWSwnx/zlAyJ84fT8gcdvB0FDHg8fFAI01d/cXoI0a/ZLLVgKDnr1/fOhYCereEA2WukYazo0iwlvSxl08KDTefkHIKDMkiEdG2AUFmev9AbAhcr7eW2KX2Vw/9q2YzE3owcUj8uasniE9YZL3sTIZ4IoRG/dc1dfYubIPtGROKi9cJAIJAQtcpmh/a0D9CeHtkHuJ6l3YJSGKXTkX4b7Z/K0jrffam1QwM1aPLJ0HCjCuLJPuSZs5ai9JHF5RZR7JFGLXyQTWtyG3QO6CeXezpAZQw9AiVQSJ9cdBgNPk+uWixNRifiic8dh/T4rwlQ1MGwLv7iEf44Mi3zuVvKxGWHSASHzIsn0N0C1zfbPQrdlBtzeGeiNuLSD4ZXLN8029xnEiKiIfL47nBKa1P598yIx22GR3tgtGefXta0PI4Y0IOmUnBAMX/kkizi8piiFiVsR55meVPYnyYUz23uxu70UCpohW47mIbl2qSfcUGtbHGP6xBjpROEFKpQB2EcxZDqMR4b3HR7RFBxYzzrM0lp+pGGz9vofmbRKKTt6Yv/kuqX5kVd8+BxaqdQWbaaOn4Mk7zO0DZtePl3WR+LFSYBsz9pJaNc4jrJc8M5RHn/bPyhopV6yV9AFZjF0u7XqLPWdUptcEDY5sdsAT6HJfDUerxjkYteQu8SmQoRxPowlrIeDg8aGQVGsQUzl/ot3ylniyN4oRVKobImG7nj63cyhkVnaTq7QDJ/lV/po8zXRY8PgX0gGKTR8Rjjv/X+TeiqDvYfwq9lAgTKnSbcl368WfTzL31jeL2Pd4nYPvZ4KBtdAUYChsu0uf8+c2w0oV0J58Q9YlgDNLdqloRgtNGAIstK3XliEQ54PntQAm36+tmGAc1uZGntyYx3Q9uVopFECmkhaA4RZ1z4BQpBT51O3ziWT3qRov3rvGW1f7HGxdTAu6wTSorNfr+g3hOqp3lIzc6W/ZiHdRmIdLGs9OHmiW5beZPdx/BOPB/L9PYOgjJ/ik2PmV20PLV3aAv0r12BpKS/aIVpn51v6uOs5VSEtkC3tS3JvkpNPBDsY0BkmkLaI0bT7M0ifu6jmm98EOf0VRLD0xEapKcVX9wD8p08yN8aw1rDqooSyytrnTy110cQA8fcLJXku9UNnbNBgOIAeiW1DEPTH4r8dZHjFxiZqjZi5VaBpJdGeM37MMm5tQbOU83Mai8NFgcbrCadvz6SZHWQi5vUNMMW7vWs/slIz4Sep/vrTGLTk794PwOMWKUIfcX+6JBtlCtxgbm8YFqL+O9cDJW3wrtOL7pfHiF6WrVI4PaqX/jnvE/6TTuzNxbj2kty/M3voPCO8eK93buOnT61z5DeL/w4wqUPeLSWi/Bz3CjEEpEAj10rCq+oJSEUb/G/C82qJ9auZKW6UteG3NHRP+mVuTVNxfaR01/mtv8YLkJ/zkViFJx4vSEihKhLhHj+GRrYWSX+RgMGoZ6dwkriOly3V3E5BD9Hs0t+eYH0HFKZ4cvDTePqTetpWmOF+oORjPEbmrubTf2MOVUBDfSGFRH4IBlC19ux50j+Z8cWo5Fl3HpBTY35K7HwpHHjZAsVdIZs3r7lWj3osxPF1hdQVdLVGi7gd1LBHR9RCfwO4YuRjqXT/PvbvTqjk+OqD2HpbzEbEz/WHyrLg7hnZCmJnKnNrxPyW1YO9trUEvMsl4dx9Obf4G2AjivOHrxqvfHql+8SHL91Oz9nhbPJJNRkpGOpKQJpy/lZnKELDDW79e7zSKTbjtiGuZLAfETRZVWPGHkcRBvKGk+pcSt/0OMSKdHW6oF8wDEdXzJdC4inkmcJEN9f7KPmemrwEpExDGbgH8btJ7JgP1vZtNRxF5XyjTaXwYV69YKQ5glZyY/lv36egeNeSzFLCdtAwVQrJxy6Us7U+vAwyEIo7tWJt/kZOjbp5Aul2UE4jQ7pb9OOal+D2MoPPS8J9059xaBTGwROVZZT6t0JiZ6TsHzSJXPVV3Kl6HpjKUvbpN4nEFREeLkcjPu0hhdwORk+YqaqNvyqXMr0onRgVEuYSz+kcCkaSVv6n/ebrAns3OzMNV3QQzMhVoefIpz/1+c11TJMds0b3Ou1O8nQVujeJi8IevSTm9kI7r+ggJn2/80c4Q2noL90hGZIML63/uUW7fbNMtn7L3KM4jLD8i/N4nnegX9KbLIAyhsYdt90suE43AeI6trD5cJXK2mISlpC3UVi1c7ko9mSy03whgoBPeYGL60uMfPrCOigcuNxEEthVnBOjYV1ZOanikKE5QmEwkKE8vEnKKzNOfi9bnfLD2U7SIZiQwMzBlkv3VsvExXs4InzEcVdc3xhJCWAcWwWIbM9UsgR86B5h8woD8X035vxfB1ruUb75qqgtEB0H0jmgoVP4jjN+DeNrEup0YbgDah4f4hVIgVHe98oiqLJslOtJyrZA8J6DNXRC8dXCbJPpmujOw/iy4DBqprpD7mzMsAwFyDWzv31QtkFLNF+1lc3GoF9ionVq/fKHeivuZiIvyxXWzgHrdgWnJ3A3PIzYHPd89UHffXp7OEqLS+UkMhW+3BCk/ep8Qqy49rgzeWkBYYVIo6dyozAaVO4a+EmI/gzCTR7q5eAf+nWbQ/MCcW1fjyUg6X1MLGnUxbZRgtpDR7Iq+tUb0PYhrcvWsvVsjybToU3D0JZxb6FFHyCPLVInCt3j5UwsvhSFLwQeH2000HgFE+ZQ2iZVg7h7/mbQ0ix0eJh2B1HluTtYdg//HUur1qNPaEV5lgg6uQcHinZy9TgSR89NIOqyZxAC6xdf0O/Uf/5W8I0kSFx8a20mFBxamExuNPzSpBQ9ava0L1o2Y986JfV0VYb6IcFY7K88NZTtxjRFsgzZGZZdvtqvXvxEbbr5skE5dlBe86ZWMaa+JfT7Ftam8BABNUcdpXM7Imc7//bP86AnnDCOnkV3q3mvWSZy0tTxe2W8IUACAwphYZw9R/5x/h/WocTjswWrrRO2VLL+Oj0d9PSMqGfqIhS5IwgUmoedrPng85hq1zJxsS4W88j29LHwwJ0dNBGgGZLSaIS5sS92gvB8lkLzd9BAO8/EjaxaOOxk+beOJUnRgS6PB0O/soTOpLlzzC1nedm1UZb9tnpQJkmK0Dd6R8tMo8i/kTZmqjNts7LwpuZqPfcgv8z2rO3oZQqAL6n6FsJ8MoH9JLMRRX+nID+x9nJKHgeIBYjbTVMMZybcKHEMcvo5ko5hGGvVt2l7btRq1uSz4IO015R0bM8imj2a5bOuGDEqK0hRxKhZ5BCIwxgWHhSHtej9FWH/S0YDnPthJ4kdYuVetEulQh4JhZrs/T2xP1J4TMCV/Swafg+92OQ4TN+KKoZv69BK6cBwX4t2yN8MVFSfTNDixylYBeEwz5rjr8Su0F8mlyZ022JAN2+WOe1HBpSfwF/7/xcoOtvUyactFi2bOPMN2HUeiLleaOSejCwnyMe6fWUkMj5SOMfSztEEBS4RWL8wyNiWJtEFD179v+x+mxTI+a3caRe16v6h49+evtdJU120072ap5npLajoxFx2C+BS/nX+0U5ATMerd7TDvi56ZmCaauADqvNvTmpGmlgi4GyfoyzbBV+/IGiEfga06xttzggBMjPSzTEBFHgt/op8f4ns0DD0FmZ7KI2P+ghqCwvSaGRoTUdAqkSMcO6kXq3EIKMTFzKUbKDEzBX8crIhKUpM+5KnO6QGpvJWzGhHm3hECEUdm7IO7WnwdHocM0dwdhbPDsZyl27LMknpD4spJoqGv86Zq+WyXl+IyUllC53/BoCyg91yVxeGcDpjYcVhiv6QNDMqAoNgTax8GF3U2Du4kAphchQFlCRfyMc0yjb1oaLQRQvxcfvox780uQRFkzWnmK4wXKeFOHgiNF6gLYKOe7K2ys8tvVV1za+eyDoSNOLfpJgeqGGAGejsZiAtrD+XGaJGz/01m1OYYWgx2LDzVy+TnrosKF2k8Py9q+zlttVQfGXVJrVAUxVd/7pGVuL3Yw+5XlZYZsL1awmHo4CbKbHSrSbTNJeJK0Idgy5L81L1fEZkMtGNdbiC662nFMM9w8bBEuE89JUBhbYzy/52kakMd4LyME0NXqvFbgWKj2buqYhMkx/p8hQa0/oQlGz6tczo8eejI5dHOt7shik9mtAd3O5jHOfIswRH2QRlGQg8qVP9nc+qioYkoXXPPZE0mpsmPub2ERAqNTEdK/Z/wDJAuIsPLTZFghE+yxhQ6vqsC+Imvy3CVBkJv1W4JIWsxZR6EiNM04gtSnVYLBaEjCx5tTI4bzuMFBDSyHJ8OOnmJhV2Te2ri95ZguNX8yxq4NS3azXCxwB92IwqeOsc8SzF8L+JcvBOmi90zck0GfwrUFrWSdGn0ArhAFeuCAKFK0VbeKxway98ygFQVipJQEcA8lKUNT1yPWU/TcFNH2ANyLxl6JMZ9tVKWvedh1v5SHatXSA+C0kGvpd7AaYcw3spIhiPsZS6/+ZYzeMh4PPKSKL6TFK2oYu3fC8sgRodISXAEEKQWHSETU9jeFe+/V7mMxBtGWItpKNy2e94WvoxufRHUbCK3SZUeAz6Gb/4gST8MWIDTVf7Zdv43VAP2z7N9w3Ef/NgXFBUJ99h1Hcd6ldsH7vgXlrpUHm2wQaKona5v9hR3x2GP6AVn8wjPNcw1wlYubHJEwddV6dOkc0rcHIOuiidzC5VbQPh5JhAiazY5QCDgr0s51PyPkuwbdi5J+jkNXwxdmHCV2QhYLrf7y9pHc59DvLe5Shx1JU3jUfQ1CFLa2EHn0+TgvAwJN8W92hwHIUSj6Ienw2WbqXZzkpSSAayB783AAGsKMl823nhwu9IG8ix+YEDlSfTjUNvCLphlL9mL73RtyJaQf658bim1IU1S+6/mq3CZPhCGwlEccGxhNM3/nJZzlYlbsk0HX7RkGU2JkgVwToCnG0c44VyjSqyK05xVd/lUJ7rpVp6zWGiv4lk2DT9N8vAFTCJYEM1CUActJEJctAMX5QF93eudn+UnAf+QKcOJ+UTnsIZzlWw4hshr8fJFaMF8CuwE+6Ko0XDqLpCUA2N8em5IucG9RYQcYCVKeQVoW/w9GT8Y3SlzIlDrcL73doDSA4y/AT8mOev0iECr8RXcAofc0j3TXDXI4fk+ctKNojzIj28iFN/wHi0cv3YyuRGjoVBfPqDELb95+v6FGdowqq1UYpU88uUT1fNeMD28i82yR5FlG6bmoXSKDq2QamCpYN7dp9Ly5kPyAXsap6fH0sza9k8wPMh0YqCSvg6NGVMOTlUfpDp/bfijziKV9CD0CX+jJEgXleW23YV+fhUjAQlrAQjPh+xABfetpMu4VzfieeCORjHNXXymtCEloCmdfpBKVbrW+46/uZKq74t1lCix1SVyiqmDQVjz0Nmx9ez4xVh9lK84mv/OwY3xAWDd1ImuVG6X8VLfmT2Y/fYPq58+LIji0zSuDgpoxeqCMIHID80MRf4v86dK7Lk1nXDBb/uc71H736CURtKJKmiFLR070ypA1aAkeCXGs9dp0UwtU8rsWhb5TqybVj4bLbIKtgdkqhjZN2IqXAFWTt7Z1apZo1MiuNAuW2i4S66h/vxq2XNXHOZOr4zhpDoi+HIG6Pj3/TXWd5aJQudhi4W/tjRE+zjlPtsRvg9oeOhOU70W+uRiiQcr4hk2D+z5aCQHKt4/H1g66AJbjkNq2aDUt7OL3GhJYYAii8wvP9Y0r7W64tNxmfCk29wvFFoN4I8L+auqLVTJwXj4zlKd5gCeddSlH90qRIkNywXERZNQudnrYkS/zfF/dlTK2L87VBPsgy3YYlcS885F+2RXsWxITDcWtmRIN1Xh62q2ra/Uecw7HNiFkEGpPzbO8TmtZGcJIAsReehXSOG25FYX5Z3b1NOrBlOvwl6pmlvSJm+IDlllEY5fP+zRbhke5Buv/xCj4IBPxiah1HJMbT+MTiz/Hc88ksxtvRbY+jsh00xhgmCVmGZ6OmFK4XCaaM+PCcVJ+GXaEjA2p6nXgaS4QZn+fACjC+KMV6L+T4yClPw80AMoPfv7++njj9NK7hDTXVqYu2K0YMH1pRjZudoBtwN6ZDEYL5FArYoNmr3H4/ceXTa9N9U4VZAvb/z333qLHf+NToYNF0MlI2d+i4LBoMiFqEBjJgsiAqO6v6CBk6OVF6JpPIMfU4FuXXUoUQrVpYjsNlPO9Ab6CLC9iIlmJ/as5SLmWqdun8HoD9f8ALCqsaKIBcVQM/MscnMTteRVUYXBWoEqFVhv4EhjNNvnEnQ0ppO3eXCJ2GZo/xx48sMp+avqKlZBeHXvdw3fB6ajc4VEZbJBhgoY4Sj3wCPN5qQe+mrsXvJZ1lnPlmpuoU2RmhIvmNfSldKFsXdZx9No4B88nroWfEjV+orh8gKYCAFMkyRiRIrHJF70GYQHzisrE3spUMgy76We/LX7Q5Vt/Q+V2B00lZXA8hVD98PeM5f0gzE0WSD5YeWd6VXFr/WAaJMesLzKyd9xZ9OlQnBIwAKQx7gDe8SdU9mzXtnf+dhTobiWZmKVmyLEDROvzahp+Ibz3Eb32//gT+1TFGkOaoqeuKMq9frxmdDOK/kq1ZmbMHzU3ty3hU41ILFHxk54jA1xYyGSahi+Q+8D41HUpPApM66yUIsyo7gxHuD+mfkJ9Ly2snU63l8A3j7FykRI1NBXm3+zY4cbKbvLVb0qbL+6H9AQA5DlFD5WIcIUgqxvj+oqtvCEQwp1brnLJjCSQXtYbZ/e9pByoloOyEu17rLS59MRycQlAizHfcRu89U1PV3Uy4WIae7XnrfYNmTGe9yv0FJ6hoLOv+GBB5K0Q2rvadglvZ8nKzxrmKrSZuWfOtjVKkyy9CQsbim2yF+IfNYKwB46ToT0kc6dil2vtQBiZz+vYfahgmJ9tBrPeqXYKeftfGCZ/kFBnvQ4F1TjpMefV16dziYTmtUL/AeUihks1Am1A+0OOlnk9zU6e7ZQMJf7Ps+gQjDqOWztgsfOcMy/u6r/fy1JVDzlHbUf+djmeGlcbD0QnYvWTQK60TW6vG8yFwqnLZlMqfUXqVCM2h4zt6I6GtgMr0SIpv5A3OJrXsEAfJ7X8yVB5D52+I1aVeQo6RMhfDq5w2ahFMeILaUZvnXyIe5Md3MI4kyRI978cXZZLFEDLqmYpeNNlvEK4uAbI+kWavumK3HWxamAe5IjJf+M3eNW1FCSJPOhJqKJwLstbI/1RW+NMFTJMtyVfmU+jPgG1MlkOvJ53JBTZWrin6jACq1naTtLM1lWc4cjbkwxPEFzeShRWvd5xX0Jws/F8hoYDTo6+T/iD0ZQvYhYr5S1YeUkyhku6mEzzRuO5ErZ9Y+KXybg2TqjL4SxyWF31BdwwfTxTbDdePZjstdb6Mqc0XH+vYP0/4H+5Q2j9JFFlTx2li1W9l5W/NTVFfzQ2P976zlsbNqtFRO61ehVqR9JdbK6b+28U86p9sNvhnvMl6foHqub0zo/PGTGylOShAFsQoukdV8OzbaxeQgjNBdJ3yCBl92iMXZ64IZ16VgntddyE6KHb6s7IBmVpe9pOrypdeRUAqnVJdqly+adZr8g+FJca+qyzKTHE2sl/zas2iQl0NdvC/4/K9sOr50IaugDE1FCa9ZOFQGFoHkrPkbBkyMFu1UU8W8GEpdVwqilzGmb+CtHfw0jZBtkDMCRJTJagwL8YJLxeUw4R84jqanN7i6weggOR6yqGW7a6mpR+mTOM4gsSW4P/kIq2x4jpTyGRwEI2ABO4ycAH/N1Cw9nhRmeC26uviM/d/2aWSnPA1vRXsxZR40P+S//OUJHPjOifFr8ntBfFIkxL+rxelZhRSuHo+0onhbzzIGHcbnxGxam6d5Oc0p36vRxfJSMi9cDAsi29rPV6QKZFwCn4Np8Wf9NX7ryPNNWzExFvGDI+wHGzIxXZUjjmOjPb8NlCKDQaYeZCl22BGXnZ06Pr+4x+sggocYpuc9dZcpFcbey2Sk1b03IlC8RhJgs03m2jFDWcmU2J8INul2P57KuidSBk6nSjAmQMLqfvRRDxJ0qTcEF0DFZ4dRLKdQQAC2bDSRRYdCQQF/WaMlFSR+zP0/Pj3a+y6pjJsl0lIlTr9LCZQx2OcqCe+AK/xeVcBZB8su0/MDHS4RYRd9Y0/ICcfR9UK/2ZABId/roS9Wgl2lXfWF+uuw/o3mph8EUgjwdZ352JiFRtLfCkTDk4+H4VBtMMxiMyO0Y1VG0WVeRsSp+TBOJCT5dxiAWYZQZn6FkoyaWe0172o9MgidQiFX7HhD5QuTmYPegbgrQpLceCWYE1zzEz9fF/LFnv7SpW2Vb1PkIfep8YVXGdDboHJ0T+sP6qObUNxD9qpXfD8r3VmNbd9pALsFpTgj+iuLrVF2R7bns5ebj+RyguKTQeM7oQnPv8uerMACrCcIyFDszqKK+xR8hUMjiTa8brcbIIfen+cZfgpI4rorO0QItp9DkgpU+Uifu8gyFMeWMBGXajFwoizoND23H02hbYKs2RJelDC8cDU/WHVgDTie4kUV11lonjd/cqLwqZkohVYJYfQLCL+io/AjUYYMYxpjDemkfhIFwE70R/EGk55cwsinMN9MGIZ0m2MaghQR5U9CPrTr+zNhn50coAKqPnO2lmO4F6uHIvVTdDgOLeKXD7bPecBcqBDtRBRyUjGxFuFkxobN/P/c/dfWrDqTLYo+TV1WNby5xEOSmMTDHZB4783TH5RzzqrfrVpt77Nq79POaN8Y30BJCiFF9OghhRRe77QuLmrIxGOv9WoVTIeO6+z08F11S/CmVhpbA3Q3ZHNffueSijqM76a3Bus8dGPQVXXC+HGRT4D2gFyGok/QW5+RcLFcvZbVtMRx8ADNTZqTUhhc50pBgYZiAeWZ5SF/v6x5icBZF1t1rbgHytGGqVC5/0P6cxXMZBLno6kbYpvwh6WM/s8YJgwi3lNqJhQanfcudTz5UcJj1H/OYihgx2OemiSjNE53LANB5ZL3oVHIQEqygZ5PyoxBg/UGAkSOt/KQ+mps8gpsrqV0k0S1a3K2/U39YvWlfToeasYviYdPxUZO9976CtsQjZAx5/cLlX+0uNnvAuDroxR2mZlRltGTA8Ax6mhP9LgSB2Epb53k3YZ6lyk1P6RILLD80uMxQ3jsM4WK95YlJEKa/O1uXAgDa7rEuGwJGyu/u93UDeQDXvNkLEs06y69NkacnFbkxe3x5AMkm+kv6eKy5gtk4GLWVkte1h7b6l8y6tOEDhLvsTTWeQRwaA7c8cHc9JzJamrCqQAMtTi0ue2+iGn9TcFv47651vlaxlC2H6RbHiYkSNjHonGF5BdszX5a3XNMQeyVF9mEaFFRn7sfD5NzOo5JFoGJro8ruOPpeah0sziL46t/ITsM6hpsb8iTdT2u3459BPE/Tna/DWvdaFYnh63Jau1zN4/527hXPm6fdzA3uXvnxwQ6nFBoBszGN++z42+Fsd+aUV8YdWGxd6OOOJUPDKvuvRB220fCoMFefrDW6zhVGiz5B6kaMnz0Na8+nT53CThx+f4Y45GbYmh26PRt4d/Z6QXhl/7Gz0QQ+fLjlg78mxEp8WZCfR51wb6VMwrEqXvrd/KIwDCmI2Gqb96IM1zt1+8IX56OwTN77kRK386yw5kFd3C29I+rtrdS/TJG/eCg+dRpsADAvunOHuwYzF/q2vvQMS4AuFwOuWYpaKbLc3QIOrMHy10kdmdswew8GHBE26rPogspHatWknfPInC4SK3p109CUDnHwF1xJalnD/C1wYr+EDZ4hyWp8Vob4oiVsnja+61DZJ4YPKgpNcqjFtkxDeQdTFq+H1lbUdd8rtstDcqkBgZjNY7+whFNSF46RV+h9sYioe+XyiF10nFC6S1TuYfpcTmhtd3B6XAaIkoaMbWVdc+nVr3dcO/MmEfQ/nlZq7Y+RuCPVe5FbfL62vL1+kXdG8vmpg9Dd1J4/YTUyfYUrfPQIOj65sRNrjr6EOWDCCXr4ab5gunBd9q41b6hJnWqk0RDWYxinu5fOgTDsw6OxhWFw51OxBbZ4hDFuOA+o7d2IhAS1PdV25pvoXq8eyvM19XzPOl4TVMrbeY3lfjA4zs3Zm31EB+XNWNfL03QQhHplJLIJNod3voAFsYQMwLnSrM6VrlFvEY8F4nqWVf3mLC/lCIxNPtWkisnx6Pc5IIYjc1WwPxAnFARLNXVRS/n5gq13rnfcf34AoibEN0JOdTF5i3aKRwQt7EW65wz4ncWYubmFqTna9+TvngcaNDyqudl5AMdnAPMSjtdiigyDsvL9124j0OQgk4MlyKsxRLnJu19BkqIzL/QT/zqPttjMH4NA5i83zPmgEOrxGTOTpqYgLxqt3SqMvHKdOXsPaOIGy4EtlZvlYfkXVKvvX7nmF4dKxya6y0Dxm3hLgic2WtpbbVgC4CC6tqFSWoXwapItOeboVmPgES5Di4lB6FPi1XTvQUwbEEP+nk9CS6tt18dc5liSAxrnPPUN+hJKIHJtFcL1ArRh2NXrq8twbrdzzkciGBdf/hW8Rbe4+VVHMdw8zQN7T5d03bs6L2+04YQv0PJDNCZxFQ3PQgWHzGRsdkj9VOPm6F9zYTpPHbp/maA5YpccDvvCCG5rzx7AvZKSTC7Isdq5bUj8Dpdo/oE8spO+athssw7UBEwZ/irlKnEJg0UB3sd2ZX8Ahs6YnAmgygtslqHWUmUJ/meWmRUmS/LTwfXoJXdHEc/QZUkBQPUTucIndMtVcMDxBVfdp9Jqaih28qyqWHK54pTXqINQeD54IXDbBvD+tRQUMFyuJt2+Pk6H9FnjtQ6vIVEJXFfX2TEUVuf48vXFctaih5/gD1NBBPvASVZq5+dg+pzsY+4MhtTSTQek18NUxhVSs9FtMC0cWK33zkivrWdGIR/d0zycTBSHr+htmsPtUbZh+R3YVrnNeLdFetm2/xLS7W8pR7RKLgw5kE/dDFMwcSPoK8qscXzwzRRwReFcHRk3n0JiNzKW24hF0JgK5wsLHW5jiQmJoq/8IKpeJUmULuJGMe2xYCbBTCNSNMK3H6qNwyVwyVAHtvEU8O0rB/FcwqNJUjgytIP9k6c5RQuXlNT2MXSEh+X06St3ccjik7645PsBByP9ck7srVsFnFDX0nXDc2nb87Povc33JOdyZHdE3OrfCuqdkAf87R35NHOj4Q78reHHtIXV4baAPmGPleLkYalt/s3w/K1/nw078SpNPnAExtVFH3TqLNse5T6e1EYu6zgBs/h8xa+Y+XmqdJA4O5qoVo30UTTgUlrU618l87X7Di4Jd5kOn7lMh1BgB59vDYlszqaYsfuarINu5I3Hc0eKn+G97WVtjCHNleUYl/DjerwOsjq0pqaKi5zEXtOKDe8jud4KLHbez0ilRayIc45m5aDQZc0OFCmLeJwV8WWmPHOJHp3oxu0w51s6H4oYkZyviwjQ9LNyjKU1+N747Opde8Z8JK9Af9yE/rRAMs6VTCViB/temvXO1eHVvxk6Tmk7RjjAKSasc1DdC9H1rFnGYeVwUUebnWJY+IIL5OfJg//SHVdDHFc1MHXqVCorQlu7bz5+epnRibt8mldXE7ajsXQiiMIE7SAl95j3bO7PqZDJp9Y630GZKuv7wrVGLbJ1enuvDsT6DszR+miGsRedtTmmirEK2nN9W+6L2XEIAV+uGdVs6wZyfjDq35rKRSON9a0KLXqJLaN8mAp8i385nF2qHuIjE1CjI7dd0IuNSq/UdONnA/O1FYMHFmMKzKLyJjF6KL4IFy5R1T0fny0obULZ2H7vR7pItLrKQrx/W00x8M2n++97qoRP1LwKHsdvGv/a7bRPGH7+frs0DE303dMs4XphlizzaHcxTRcYkdzKGfx/LJoyLc3PqjXKLaF6e9Phw2oXZ/J/g3M3Qno37kmAaaR0UzTZIYwlKOGyfJHqiS29XeYXfQJXusrkOSZNc0eyd0xDCElyXYbtycCEPgLI2jjXcOeVn6K2hrO/n2NS1BH0MwClM+BfcG+xBBm105x5H0wZb2wbEvwvGYA9DUsFm1Kfk4C6ZIE68G55asCB5ky1VkD1mqevREon+UTy1TnEnRb45JGbWZ7r8w/ire4zyYpLhzTm1GkZM6qzm+v8GsOLBcZCrKASacsNEC+L/YiPq7UNLTamF83B1OJAv9KVT40reDxnUCot3vjMpkx+T4ujz6uZwnmTZivsb+D2gcOSZtcBPTLldR/XS9Qr0fgRTRTEI1N0aLjqTaPSA0MojWImNCebYOgifvSaup8Q/fng1ONKzF8558lJ/fqRNQcOXyQh1KtYB5t6cUGwYdEGx5oGzL6oQ7HlWDbLMmGycF29wVHC7Az06jpJN1s/2sJ/UDrjiJYGPq9GEKcqzj4q1jHjpYHuLVheNrkAz/kFHnJHffnmmhClzhBoJr68kn4qp0RFOLrzJdGb/I1ROFwOGFCbl7DXZJysM/IBawqccG7NhOto34NSgoxyjAchzTNAOezT7fIuMncy5o4g+uYSLaXWvi4nxivFJ0wjLrycWKJWYL+jrGxA3MuXgG4xmiJvqKe/fcTbgN190YnrYwE1wV4w9TIRAVm21KKdlEvVEqAs86T1EnwJEQxk+iD1AwXirPnytKmdsQwJOVkhLydtRySl50JJnlev8wZE0MP3mtyGk5dVbclrlztR0f9CJc+Syt8E9URQbAYpezXvVcQQgtGG92vYFEowwWRYLxzHB+DJuF055x23aWchCMQxvwRu830kUbzkvyer3YlY/JrK295CPYleaCt/jgMmNKzWYavspWGyZ4kiUyN26+vDRcV2qRLkR43AzitTp+zV8keCxVi4z2cwBS8NU5v85VYLswai9KEiOctTDjNRem8u+g1IUsqadSAQAlsqKqsf16DPFfSw1TZ98LHnpgVj/MaEc16VNGFrBiHMrrOZlSZNtBu/dISiq3fmEunbqUuXR+79DhVQVrxAkBOswX7HiwrZI1GBU5sn/CkXHaC+JakF6zkXfmRJZcot7izdOibrZgVnyliYGcYJm1L7w4D2WMCWSF+GfCaD48PSO8oSRr0DsFooopTFYFQLC2rVihUEb4rnBkcJh5XfIF2n5UkgXbnc0N+llUl7T5jm52bfwEqJsgUFMRH6pH3eSOLQd1Jgen5ywKBXyxTOccYua1lVlujDRUOM+4ugNznkwBcEEn2rOiko+69LhmnWMwvrLj/9iaDxQCJ1sJfqGLMmO0LUYGwPl3XIt29vlCr1MniXssK/SVdRT+YpZ+fkJN3zlPLccUef3sz3jQ3lMltbLe/FIj/ZtrX5wJzGDM4EkcsrDxCN6c2JHLt2izl8uy4I2mfO+BriyRpCQbSQQEfICBgWwHesskW5cM70+hoxpR/K7wJt7y/sH554aV/mi2QpbVmvKNG+k10SLRMMuOXaRaKQ/ZzHhTDZA5rUu0dU1FM2Fs7D7R280a5nI2FD4QIYBT1XHMn5zdiN0EgZ19t/LxBTJRyInHDGNTKBLJYaQzr49awwozUYRlRB6kw8ymGSnFQBSdpk2/X9a3pm1+kz5rtbKUJhpiq+fkUq90Jn881b9/Fkn2i65Mjo4m82Cp3v/ov/2Kr2zTcIis8QQgFSccx/l1YRTRARbn94vryzMhvJ6PxjS2lB8hkrTgb2PJU6ZDVL3QRxOt13qfllYcFo537uCsgZRarr5JBFPm+T+u6A08lVWNpy0vs0UgAy6mJiVhgAujF5NLWCbbYFpU3o3mDzm0ppgWzmz9monakYFwN14d40NgHtvbAOz51EEsOlmtiiH2QfE2u5tB3vE7zh4drRT+mN9oVcc4MVR3tE3yX/u2dTF+XU7jFTOGnlzwzoKH+NzRVdDNf3HkrcswQgEfWXHRoEvmMJw0l4ZhJ/G1r4REqf+QoWy4SPSUE6ava4OwueKnmzlh7LGFQlexr3+9r9rgPV7P9zAJJkvou5/KXqhwitKJq3Nws295a/nIWOUnyaOxk+Z183af6OOzNC9fi0GDeJ8x0WfYyQC+lYAFFghDJuhew5439BIwP3CcvsVr/O2dMTVpNEuHEqdaJMdrWCe0MMlDqED3uUumVLuqOXyF2qJPaR41AF20KkkiBQMUamvEllA4XL2f4EWYT7uXLWOPKR8kY8iMek48CB1lO+sEa32qhfOkGhkLa3x6rCsV4EX4EWuM5cdNgMu76kcQRfc9eiHTsa/ReY4maEG8amXiJRwjb75vv3eMNiNOU5CojINwvjqCrTRz/ZboWmxPXXUPokXVMoW1wiLXvSMexI920GwR5tXsnGsv4BW4OiR8Y3Q+CvHCv1L+kwBlbq7fKKYFZNZx+QQXNVW9dMqMPm6Zl+k1PuGh5JGy/hxttiXoavnhLtDn/HkvvrdDal713ww9Ceay7IIM0/EgRfNqoocDFE6zCm0HPieHDONA6igXtIkhTv+mD0UsYQZB1ow3mgLoPsGOBA58TxpwSM6+aRzzoFhyaGeY1xrP29nJgfpBwVEizHHrl176d+yzeZTmWOZbYFNtW2OFmR2BXFuUrL/vPExWre1KUiKor7dViLbl/T3QCs0tbeEV1m7w3NUMAslOf0Nw83lY7qxZxVBbu84uZ2QvMCz8j1+QKF/ZMQe1JwOGycnxM+0GoLhiS2+Xh90FlR7XMYDGDLYTziirU4tpLtm7eB248ipdZ/oZhuDj3LCdOLPNkWQSh9dRqa2PfGKjdjr8ThAm5zjCyEByQO0P6HSgto8vHSo7EtalbSElFyb/Etsjv2hPQzP6q8pquFETbbHYJg+6IplU7y40R75qwunPVzLFPpli6d+Qxz/3d+1zHS5pMJekA4uZ5XnH5G36b+6lOgBedXsWFKlVp0+u6Ckd+dYvTIJjaXCPjtzyeie9M5MaiaTDI94WsLPpKp7NuI36H1BUl2V0JzQ44zh+uAe+/bbiF7L9Wfpi0JQngH71Zg+B3jHX2BREorC9n/MeXw1KVsomRNa6NoMp+bO3dodt1XExiRKKCHmg9XB7j0hSaaagrCxfRvDcmf1GbETr2uehoGtH2PUmFz+yzYhy2yS8sDBlbmSnUR8RNnwJ22Vr2GqxLviZG0sZQeh2yH+gu84XfnzHqz5iXQjLlaozxLt38THohmK95QMG8XBgDwL7IE8pOvH+TSYa+Ms/1NluGTI2r5cV0FNy8ze/MAIC75rlo+iBg2c2cOv86ROWY4qS1HZI1J6E8ScrGTn6DtAP7VHP1baJXoe7h+FiP9ri5O6Med9BYucbg4h13JROa9ocwCAEELy7ffDD15sl7dYyrvAsnJ4qTBHsWg72zs9F3o0+1vdb17uTkdy7QmhurpSdJi8a+e9hch7ijBfGVMGnS1Ktw+wK7DgbpPUFB1/WfdSo9yTPNBMRzFROZJCuMyA+BSlXDR45MX+mjgNhUB7EX7FgRWWsarPYluS0jfZ/SPO6MBQJoHo4+fnSBaYlX567qYQ5tzdm3S48RiwIkglJUrjnprl2fYgtsQgWTjDYVSyopPr3Hj06uyeOm8kVTVw4cZmsS0ZkD4S7vbm5LGTZZRy456zpecbNOYfAu7sB/TfB3N72PZnYyWnNwaEPZ47M6BDXr9rZJRDDNaPHVEI/pSo2PRiIjlvm+XwP8gbdxtnofdr4XvIZnRD7WdZqJjr815gZNIPenyWce/1ZPIkbRaED6TUZQesoI7vfrHged1WCIEdYE7/dY8xCin7w5cTrtG7069L5bTd4gd4XOExJ1PJ64bh3P8q37305+3OZW7cyIgPk0eDq+4GfUOD6ype3z46ykZxVIb15g5uZs2Vr70KkmiQqHRtI1CGB7qQLZ0mDsAeGVBa1J+yfQWnvAXAvEnORf3v5iwkw3V/LbzQ4ZJiIJLvaBQnKzB/5OXLcJ0UbtUaRVpmZ1bdMTqlrCSgaf4cK715S2gz4/RlehK03+dIr85aYu9aHMMNsodwQlY9mALUSTLY0QxTuLwQ91sOX+apnHPEfzoh+0U0WdHimlsDtSMy3yq52acpIO35rLTJz9k550zf/0RqV9kJaewhWTWrMpR+MX0/DL3kXodi/c2miX4ksYm1kqNV1c7rtZjPwtGcXFRKpkRHCHpAmq2487+0qSwJa+b6wZQizCiqBA43Afq6zU4JF2xndfOLwdRsp53Cc7jE6kt/qKQMY8MCv/aoRkj0JOpyHtGeHToctGJpQN7v2Msuhh/SRJ17TxCwBrZKb+YLJCf7l1x6gzWMRKJYjBB2wkX8u6mejhD3wvbHa5y2kysEb/CtkqIXKJDB8uiMWYhkOPVg46mEuYwqdlRkzAt9FGEaR6ieuRqBY6w3ApfrbFshHGx0JTsbwsojRtQj6QsKEAmTi0q9i7G307s2sqFiJoXjHOdqlmYA1SiCuFYmIomftvUGLnStW28EvF60QhOMgYTKakdf5mV+17MJ3F4Xe53sHHHq5Mi2MvkZrE9yvg+CuD/Hg+0IAXMC2HhbdU76rVldp9K1f8yEWHLmQ2ZZPUf+434fMhKRARC9YxDYj8sC0l6VUialzse/UcfFynnhKr24qzHINPCjMcUmmEMab10TNJ+N0/8rQtkRWFzvKam0a0At0cWhyfX8FjCHi27q1YjixpEF2WnyBY4fyy1Wh6GSrgepWLPn4hmi9T3T0cmVjSwpVx3U6kZY48cRlZ/rz74pqgKC2ts8VOw0K2E+0NY9Wt8sNmgFCoq63qaa4rHSIhgs94q2moU6v154cXZM++5DVQ8+EiB80N3PZONEIOr7c8zdDDS9zpa/HzsIwRphlrBhZa3tFrVXfI/9gR4TJQCT8u3RZzDe0ow2VXNbVZErTEwYrNuFzF6Fc0JXq+lcYg9SuJTS53mEOdeyLCbwuf7JUiQlE4eezohlU4tX4rCLxbClRmeLEj0aVn4DuntnW2IPTdsvj4UCMtvqe2M6VW3izkYRWrcCTawcuEhoqFT7aZlG3LSpv+9p7ggx2aMySgXEc5XhLoRIFDSlNfvSXW9P3WtM9C3ZGdqiz+mxz6KjWtkCLcYVpVk0mjUfTIT6rK7nBr3ZZf8MHKdl98nHLbGgeDbBwXSZcJLRBeG+Wkuu71hGfrE5ScrIWW7vlQZ3BElJTiW5y0EaP9R4UaocwKY0AffuCctZ5djx/n1tDdSWrI/VYXYFjD6KrpYtgo4Hmkn/qDfNWQ5Kht0TGqpmivhdAkp3jqfgvL6L+YETH6+Tt0hFbLD+4REExCjcYPtvUagA/SxzATw05vLu/Xq8o9xIDFKg4F5KExkpSy8gOJiNSheDq5UPSKz7Qw0GOlcYsNEK/pt29Ie1b3cMskHpmOOxyf/XgSWBumVOVyb6ftJvnhJRGIbA3ti+oNDoPH5nGS9VaRdvyQKvsO1sfLgxHgw+MXpObFvU6W1GxJmcdxVWuPi8c37zp44S1cVc20qsyoQ6yOncVXFHzR8QOiaeJECB5FJHuPGx7VYSxkhY+O662equXsZXGPxX6vSxW9POljNVGJoKqvamDHgfU2rrs3H6K3ImG3LOkLTSDsRbN770JPBbZltq96+9zj6yPWGJj9pgWHk4a6lUFeUpHgDK7h1RhMTCmlsYFd7Ozs6583jauJY0Seh/WLdmLizMUAJMHq8YmaW1IYF+Hg6Apz+RV9b+YNH3XaO9vco8su0jzbOHBpTj0tvI3VWxV1o2eC4LL37mdgfeQYgun7/YWj4Jut8q/542pvpZZ8r5JqSC6Zw8HWu0AGiaG9F9VP0RUMyJhoGaPQsWdUfVxX/u/Q3uwFjSbFieT4eL+3jhI2oTdYYKBFggMZrKxvplNrbWmlfA+nI0ModjXngCobOSs0sbwy+TrqqW4B2BRt5dYkpJPJF+EIMJ1cTNNXXoXBXdhO+p2AdT8CHm4KqflGuB/63jFRlxctP/IBFGK6dd/H+6Ii+4Xd72WhpQaE4hf18Fs0ojKbqC2CoDKLbaxWO7J2zTwlwZQ37rAFzJ7sERPc6rwopNslV3Jxz3fHTCTPpM6/0SQS7yJtzvPBFj6EAP2WDKGQ2j743I5qBPAj6U56aEFKMvFKoFF5OvcvNq1kSkMSf/Hb2sE8f9iX5eLC3LyKovg3lP/9sP+GPNwSGuM561dQgiDwH0V7Nq/Z+TdFqPBvKNedUjZ02Tpfzy1/fvrvOIn/B/7n164/ywiK+g8Q1g7Kjuq7ln+U0/R/0MQfpWVWFeWfz8T+vDNe/rgu/vMhYLH5j0cDD/7ksrb9qyW//yNQ9f3jO4yG/nsf/XtXFrtDD1foTNL732H0z7eJ2y37474/Cpb1av8syL5FZv95OcxrORRDH7fCf5Wy87D13ww8B3qu/uue9zCMTyH8FNbZuj7W5AaVxNs6PEXl2rV/fpoP/frnhzD5XC/rPDSZ/2e/PH3LZme1BuAB/4Ehf12Hv2ua/uuaP/9swe/i+psLM5urp8Oy+a+y/um8P6pDKfyvAlDfv0P/ASHUXyX/VePv6vrbq3+s838pKcuwzWn23w0C9seNazwX2frf3fjn4IAR+W8lb87aeK327O/a8a+E58+vmkP1NPq/JJaEoH8QV/ofJPCPtv75vX8Qwv9syP8Xcon97+VyBA//NQdnn59n4B5h5v769W/4cw/3K8d/139fBuK0//le+FeI/et7SQz/FxXD/1D2ZyP+6ea/rxkH2AIgpEofNYmTx60dlmqthv55n2RY16H7ew35616mrQpwzwo0i42XMUuBxOTVCfSPjf/8OH2EEEgm++ukbBb27I++AnUtZTyCPuzOYo7H8j/SakkHmP6PY5ibZY3/aAWbV23LDe0w/3obhSAcelz/v3Tzr0/6oc/+hQL/n4BNAoL+AyQ++1s5hCEY+ifQhHHonyETRqD/KczE/0k2ubbK/lSEv5XQ503Xvx/GfxqffxzXrvp+/8DUbKnuOPlVBf01kP8p7UB8AIwuf/b5/9AQ4Dj+L4YA++chIP7FCKD/YwNA/f+E0fpnI/W3Q4D9vZmhEfpvzcxjd/4wW/9XrUy6zfuv0fDvji8zz8PxX3r4lIgV6M3fzY8yz+tfd6RtvCxV+lfxn7fB/2Bb/7r+yxb+8R7/l4zr/31D+JfK/m8NIYr/P2QIUfQfDCEO/0Mlf7zUPxnCf1EVTP4HieDQX39Q4h9qpv9BX/4XJvYZz/j6m9v+BIb/5h3+Aqz/NOYI9Q86+Eed/0ctOIn8n1DSvxP3/wMa+zf68KfG/K0yQH+vU/+lMf+lVv9a8f9Lg1AI+wcNgsj/XoP+BiNo/O8xgkD/70DE/7/rJEb/IzmF/qc0B0X/4Ul/+kv/o5rzV4//d5rzF4Wruhj0Nvv7zfzFBqF/RQ3/t2zzX1PMv6d7DxFEUZrO839SA+IfKOP/ghj+2WD+G6/xv6HMH5eIOPbFQ40rjzWsA1KlYgAOuW67peAWz//AX0buOSZ8fvMGlHlfhhFitWmFj2dhyHbTLH62Puz2NVjARs1N6q65mzdive8d4uxP9LEOX5AcWSPY4vPNqxkzqqFjDKnLhb1cBetrf4aB0WxRLihLUxx5B7F/okf2skiiBFzgMAb1pE73M2oas7eh5IRs84SnBLX1me7BZA33PRo8wLaRcIuzxSIWGlto5zjwwsFIBfVcvwoGlLGDyYbMmwl58BPz3IfjD585JObomOfX/6v3KveoiY3Aa4l6TPmOuiuC8G6Cxboo9bFO1bCjljI6eC8QqFQ0Zc0W1BsRQqjlPsf8+mUPAmcwpO0dQGAOFg1eLsrKL65iD60N4+cfbsK4PhATLKR2Fd/H7xK/Sb7pwsOrU6HCeNYXBwq91/PgeZ0vykXrdL/Czfc39WahQLKCiLj3TL+4yP181sZe2vAVfraWV303tIpu5OSekqb56mmPG8n+yFOKdQ1C2LmyEcF5Kj0ccZkSlXQ7nhROBN8rol4ZrsT5x0vHjhy7PlPJun7fdwavCe/gf/UYy/WvYSBN/ftF89SR7peIvkklhOHiTbF7NyOFN2P5qkJvoQAnTDJgyX2HGMJTVHvxVAlpYfHjzavDvZmRa02s9jkHyiEQplnMIujZNy6HqEvft1AsIxM2LxBu+tHB5udgO0f1BGFc7UK2c0ZVBL2qcdciKV9QglBoYvh1wCReLyOtmqL8vlEZPQaxYss+d3lxrhbERUZKGEwZWE7x5igj5KuF1ol/5aviC7ZiMM1rOEBiBNaDBra039VmHxbL8UIlJiOd8ZkQeghbwgbMZftIftqzQ1LzrxbQfY0Qq0sMJHmb8OSnrsXKT9+uRVccN69371gzwZK3/Bin09vAFpKAdAoQcWxxYHIUoxsmbkSZSS3aEBVwAN+IQG+8vXgR7MMQ777voDELx75rNLbUGLbTX1QAFlBZM1c709OhUpdb7bOprp2sHQGLGU2ac88qyJZp4Ve0hVFxPodWUBVbbivOuhw9sY5xiKZpoOQWwKQN91U8pl6hMUCnQ8GhqTSOr19ygetT7CfKbbilhtBadj33JdEqr/MHfDQf7YyiCiVXYy5+bSjVqYS65IteifC71y+nJhPfTDs7rLmQeTQxpdQXlh2dmoEID3HkdrpHUc1Z/JiOpd1YCCvbsiU4lcL5aJKuMnG0KxFqMMKWNVEAphzY0O35OwVhXFjXno/yM49eq5saRQjWN/2a5JQDU+JQUko5SsYGj1mDJ6vMRcRRfCqmHSWZ0xnI5Qafo1ZXfOUcpKQgWFr2CSU9D/Y3xm/JT4OjoSn42J3vC0IrmHrLySpB9WKyEfZLmfvpeEYTUAINzK9iBwIYNj696VGvwEbVRcDes/i8ucMw4QSTI08keSLUtYGihRdH5aowE/buGopvwwSDlqSTqUKSQkq/uPHiTfV3klXRv8/FzeOGpgvOkX59OR12X6L6ThMg3tOGBCoZ286udtI2mhvh1+eJfmclbliyKlpKLTWPPAjxWfR6TsAmq7N8Vb8xF8PwHg4iQQ3vj+A4VKVOAiajahUdzEMQYe5vgdfZ5niHe1dlC44+bc8L4nf+MM47Ybcv/ef3lrXkqw1liPphZvy1iR5dIYmzGgtYuTXtkTqj+1X4A+OmpokbBPHpkQ66Qcjy1/XzUy+kH1ov3w+G/nlIkz3W/n1M7V5pfnVl38+IxTDJ2xF1BB5boMvbLYs0LuRVwAqOAirPuuXJ+b6gaC9GMU5brqdcpPUg2KcvhILF7w0Pb30Kr9MbHLAg4pngoBMWs5ciKuolaWqmLf3gzBZwRkQpM/0xlJvaWMXRMMUt795aY5my5g16J50l77CrDt7ZSUz8NP23T+YcfgfF4J92idNS+y4ilWsMWmVgjYZn3/UfWIL5mr/Dcspl/MYkh1sE1IlJWt1kinKJfZIxYZEI+xILqBHFKPRrT8IYmPuRorD7tQfoU1GBHW1fxsTexYPBOfqlysjgJaugPmfzrhVdt736GuwoVh5TlL3SD9Ysi/sbuLje1tfO5LzMvLH3R0DgG4byC9EGEnofb8ldGPFqF1Xb6ikODRA40O+fd5EfTYkulB1VJ2qzIRueApAiMp1HB3sPvx39PIX8zlwM/BE5MQwcRSSexVuzHBR3JNDrLYgLlZyC311ohtUYgGF9BTqwSL6x/bJWe+pkaeCrRcB8qRvJbpFXJDd2GCGkDtxUA8dDmBWvvJDZidhVq6pFF8iOboYN1QM+AW469tQnzAsTUumzgWEvFd9kcDKkzpsGW53RFzes4S1ur0P8vClZGmnX7tDmRbEFs7DhHYIjWmZ3Go2Dg/DELMOU9EReKwzRFn/bbG4BqaL8t0s4MVmJeWeuO9L2U0v9ejgKqMX51UI+tfgHADVu4Yks6orPxEvNfOjHq5R5jOUKBHqwl88Yg4vdbXiNX8vPq0eoC05r4xvLofApMh/ZlMLv+e2Lh3l/DyGELY9wtVt2MRApTlwmpeZtQuQfSxCq8ELeY3YjjAgpFdhmwuYvyTiPBPfORgedwICw49SW3Lzq7V9U932cTQhSsIlKp74hMUVe0rIyve1bD5ZO2LW2y5vDzZY/QHZzECstceXRByBijw7d7aWkqIKja9wz+WULhlKZiCAJBy+lbQCWqk75kX0G28o6gLAduVxhW7VYwYsHEY73LJRxDLaHxEsVyzUQCKcPlLsJsvPhGXeAXMvv+PWlPMG+jvZFwX/09AA2BzhwTdCmLO57Eg2f+9o35bVR8ZoPKGxb+9ysEd/7qXhzWnCY1OezH76JNr/2uUCtHiAMHpTdOJXQ8JpvMocu4MQMZF4Rv3QgCzXvelv9kbCHLlSgnx6d4TX9LpQGnI0o/qRPVrfqD0jD4lYWvhpXYEKgmaqJeK9p/UUSjr8V2BJhdlhXe08HMW09gz04JmmnaNKmqgYEY95cWlaPfeiRAN/vZ1RjoV8SoVu/ovlDLHENHbVyogOwX7ZWLHD8owJNY3TwFEpyqRo9XPkVLYX1ZsghxDhEsZPDOj5XD7PXoQLGcfYHQDCS4ij+YtqLQJdPY4kyW84sgtmdSAbR7ZhEzrM7Y4xf+x26ofGzn7zYnznN21O7MRL9nTtDs90eRojUzRkQlStqvBu+A922ByaK34beFiGcCK+neeMIzowSpstgCzMRq5PTYwibKJ7iAbCKKNHWfA/QLvromNoWOV3DYg6IZY3dIJcrS41U+VKOL8R79QBdzh62nGOfeOGdhUS5R31af73jjgDjj1IyxdM8WK1Wp4IL+eFwgb10OtlWVIGRN9MjEri/lZbu+QKF70NW+ICzBclnPz+NTBcQt8CkvNwSangjaoVxe9otZa2BU0MeI/Qy6ZfLBw+QzgNS6z5xaUYo8RSTcxUQi9INfiy8AvkeReOQKbydCJx0Zowah3abufR6s3YlPJRH7l56AEf34SYFi40WX72HjXM/HXUk4WNrKWT8OvgaJ5SQTHZ92736qjUcGrkLBDpuzMw1oQFe3XxoBabgNjHQkQi8Kel35lhOVYWyMPSEt0Z2TTWhjdH4gi8PhKD+Au5VcmXSt08c1EgD7/R12kRbAd8y/RaiJu7INoIuWajlj4yFY2q3kH1L3I3vj4zxlvRZIreBkCKZ3ox2uuhbOxe1ZAweLS0wy3AQL8BMw/2HmPfnl0BZYGgb0SaT8VMpnT5r2oFTFdlmySfX7TbNN0bNB3o/rOaMNaVZ+GJh8DDFe6D0/mOU0OWyp5BwCjf4snT92dmP6NivRlrXQPRNYX9/QmC/XsUHG7WFq0KmfvlA6uQUYI2YMh3cxDSVi419tpzJvJd3re6XXg7ELkWdsAAoulrBUHufOWcBulkPy6/83hibuKkI9BRsR9vNlBGwqEHunWKNv0EDqVF8T6yysUB7o1+uGueQMQ0lfB4LCQRjV9vAykWo1RwybavJufB5onOell33qa8UD2KYrWriuj01CaOTDDQWSdcdDslYoW8Wavy29K+ahNQvSF7TTOkDRlEaOoY7xDq7APDpjIg37R/EkB++GLI9EPmpqINZS//8KY538pk8haaGA0SJS6qn9sLM7heLZapnXCzCsAAsartL4otYzPfpql9/b+UBrjAFtBNXga71THKJLwfgpab6BbxIWhkKJauP2C8PYjs8tt99MOxLMgNw+ts2ORM61mSNqd8KjOcF2AQjXpSSMz4EfMhXzucF/yCT/IzA40KVb2DOwFnLLMW8FVvvNTGRdsXjNDK/AxvF3ucRHIzNLPnPzKDpYx7tHIJe8VQewsIyR+naHPgIWV/z8yaOX7TKdKq/uYcIxNUGlHEAMHp6IDRmgKDFchb+YQ6yWyX7HD+AxpjKw3Oy65FvYKeMH/kWFw5d7I8vtoeaPRr+PtmaQCkYg7oWBkT/kMBTjCigfDp4dIkHvJptl5wJ30ldnLP2fmxorcIpmTfpwzrShQVya7bA5RooxmC8QoLWOnW4lKbYxwbcUm9K94PT5u9WnsmcXzIjSWFrRlSWOg3YzdDKXxIz/xDev42jvLqG791thm90C48PxmtBDuxXCXZ+iNj2MraFFQs2VPzpEPujZ6TlZY3fT/R4HA6jxbz6S13D4QCBHtZ4+juvMBQ/AU2WPqkDagsHtCzE12RzHVw92Mgq7M+QAv8FP/jsHl9hINU8bY5YPYHZgzL/MMzT2GpOWbjEYiY6PXuCa9d5WKBo/XLxAKSP/dN411db+EhdtnTyg4TzUBW0M9lERlxmL5b2PXn2oQJsJ1KggRd45usV5uLxcGNpwWQZzAFRXugrFuPQUfmJz6+ws3totTewLtbQXz8pWqrUR6xBmIe7eSgEOrnaN8ykYnTIInsevCTBI0u5930rCVC3CLvEJsfzI2BUuLGrY1tLEQtvT3eNFoBQBcKiqGZIBHQJBQ7RV+ymXkpOWGfKCMXr16+w9KHRO7WdJbz5vam3+LXXztFq764r63jjg8NfcvSxYiA6v3uEa+aQFnhje3563K4oj2zxH2/HwyPVGXQRMSVg6IUGsC4zxXEx20eXezkN0do8ltJcXo8QqcerZ3H9zy0lwLDN5J1pjpGoAi28BruUmkHtXy+Cc4tpA0B/zmXqiQgqGqzu+yr1LhSxTWKoDdsR27JYbKR2Alt7Kq6XvbTvh4d0qt7euFT00TpZoA0TPCmOcgwl5QrRlJzO8ofo99WeZn0amlHMtmE4FJBwiUEOqTvwSoBe5LOTPHivHQpzsDyzL2GTSFETn2iuYA5mABROuencTVoAOPQwK+ChKA5i9RU+fiOq16K8jtGro5pfxvLKYsBsOk1KDwtZuG8xZzpZWkweAm3xKs/gwf5sNocF05TmLrDjMhWL9sUwPaJxEiNgr4F5HcbMV5m8JHQFOtzZ8OQVi4fPuoWaunFz8B0yLo9F59nkhVDOUJ7SNwJTsl7hGfUN9Jh3Z3rKFmyVcz3k2Y4uokFZOX8R2V0iGczsON35KHCRbsEXG+iMHCMJ1FDbBrC2wDDBD5SDLb5ffDaPqGbShmkV5VQt5vGXDt6V2bmaIXGojqXiufscOffNLEfBfgII7jLI6cHknxgEhF24oOu8nKdDWnkVqMFh5pAqDJTxryFU29GCApNOZIzipBwq5h+gzlRWBqjb8yazyqkJafEw0hBb/zKa5Al2ExSEYVbZZN7tSArHYFJlcoU4emF4beqYOWb9kJyyM474y+BCUVpmVtwEsM84pHnzKdIBQK43N/we9zi2Zz9iWNIsylneNHeDBRK2yVb+0AVoBvdam+2VYG8I54IhvTIC+uUjLefK/lhjfh0ftjOMiGFaye40PNfTk1G+K6Ihuf6l8a68qc1dYhvoC2miLEY0avnqd5YGSGaBjne0kH9+FCsyD9NPXxgFncASm1BbpilPGgdEZt32QXPVC//z/rTR6DaMep+AsEcwwVahMRWEUAsKWOa3vuDsffmuztloTwu3S/Lv5fEA5bvBm32hHyLuFAZgOqLyvUbMd4n7DiJfOKTHWhy4yjhM3MU5GlehrPkPB6oOw2CigneAcW360X4Y/0jVvUlL4YOxrVoQsi2MsR5kZVxyk9mKu4bA/MthX874DD69SBQJlYD4mBp90Y9NVPEjvU3CC5P3Fd2cL4y4HStT9KBaclIEwb7mOlqChQmoKLIcrTV8MA1Cf2wdF55GvoCcrUkwOUIrdWeNeJ9gpB/6NWRgo51hBcke7xjeG6zER5hZfujyqyHoMANL3NbaKzT8qd9jtyxiGNFEmeZvgSVR7iSYS6Gg8XF425GE3I6ZiRgljsqdXbGOhyFxoZ8PuPo+M64zoCT27Rs6nIf+4y2Ky4wte8W9PullFXtDm1orBh6acdG9PD3/I9hgoaENrl8c7aJzJiAzj1fC+a7ZovC3GtbPCZgFera6kt8K3KbyAltYNCqUa3msrzduvIprrpDNmUwzr87+1p2sZFx48Jl38sPc8At7ZRHJQ1yuP2OdCHcEZjDB3jKcnD5uH6eShn0xuDdZiR1bYD2fVm6/dEA/L4CsVRkzzq/OEHvtbT8F6Mtbexy7Wtl63BOBZQ1nYhlZPe4TThnhyMb9ShcSD+U+TW4PG2GmZN9/KsFESCUzbglIcrABxX80q+2wC6HCU3FtnUQNQsZ0a1+5QL1zeoXGjZJm1eURxrM6b6dgwuwbBucU8U5hpFdxe+Mr+8x8ywbDIAS/bFXubDh4aKquhjpfP+OUkEsI59hqJLbSHCW9Ylr0Cld57JaB+vB4kX92it6lR1ash1wDCnQEyVpnZL2Yz7jwKrCaLEIp1yH36AfQGrb6wGDNg/ts8NgI6vJVORLzYSIyJEub3JHjIRT+eKwlu433FSuaRAtyvr4vzMYtYZg37m5oCmxy3GOw4EFLLQZdAvbgHUsU0d7Zop7f74fy46z6BhtMvAC5TLCr9nH1z4IMHaTU/PNTYp5vjyfs7WBjRA5/t3LWv0N3eadniCI4M2OKwyoIyq+v21BtpNwpfARNREoCAOLzRnrHWhCPh58zwsQxLfvlZnUKgGImLTAtkTRMHavB4Af7wr5Aka8l++UbsLPhDQ6PrbUQD6f+rpWhUg+UQPiUSjhYb6MlFg0VO8xRBbmDWJTlw4DV+dlBYDHgDQoV5wGcEF6vrQncSOJuLkwbp5pEA+9USspTlH/7IyFrV5LqbbOx3hneOoVdpXSe5rl1bWDR67RqTvx5ziI63yIHOmP+JQgIu8b/Pni5LEQmC5VzOPVH1OzwcM0Bw4M0o1NA4+rYV8kwZtLy4/R2z71t4GiZsVHBnUajr6uhIiq7VoY21zgeflOFEW15IaHBZguiXNndDM791VwphcCeasbQ9ys/Y9XcR0VYyv7Ry52zVlsvZQUc2S1+HSv/NKjfXWKkcrYMCJWKZrT5y176XtseyAfwR0P5pUyVhh+JvNPb9Qaqx/JC6D2y9sB5bK9x8bBsVnH93vWZEEzNPVJUiyHmOAIPGjdwUUYSXGlsFjUUNrBA3mjUe/SZsKfLXTHkaxV9o2CHQG7BD1dUi91VbPrwPV02j+fpIvrgPEulHeT9cWatOD3G+7YyI65zdV9sq5VG1h56mRDNn8XiXFHciKHGzWuvJYu/B9Nq4P0zdgMkww+95PvHd0DOq38rgj+18ZBBUM0WV3WC7ePjUqAYxPbKpk247PXWjaoOc2K6btuK31wPL+eDHjZuBVAo4FXw96cl+DFmfou5Aacc2Xzoa9Oju0qdRfZo9efuUK9zGFFukSFk6DptK0wCC6tzP+BE3k5DZ796KTbEI75cGAGU2aI7JfI2oqvxbSU280a0jxhqr3vt/RodzZat97LXpPistJK5PCp8/BeuxQo3fBzDgpWIg/TsOgJzP3fYUNkHV2HXu23mYQZ3cvWVIk6sx8C7fsY1+13fnLlOlqJFWHQPYaP4VQuOH2FnMfe9xbpg6kYfKXXeGlV8OhE5FWF+aZAFhs9fZ1okpS56vQalF+t5EJBS8dORANthLmBKE1Jm6oIn0gunx8rbwnFtQ47vIb6HwcKUjV041PlFl0vUUAH/cC0nGC2+88QkgXumC73P2OZ4PqRofr47Wb8ry6lBhrMSbWWsFVO5vTIH7mNV1Td0qjCs+baU24oMmKdGoOv2UR4mMp9fTOciuWXK18CF5C8pY00bR1VsLsF3P+wzcQfQQcB5v2RR0jtK4QvEV35qeTKbIynQ9E9YCEBhyvIZBzkmNHzpbt1l5Rns3pheAaJYCYHojif/IP7nH+jnB60CcZyl9kErt+ZeUEDtB3A3wF1L9TvovrljiFFZt/a7nM/B1p3uWnLI2x6HZKsFI6u0lgQ5p7iaNPjid0p5jMUbWq3d45JZWx/aFO4R8f4gxLtBw1pKFOeCAUk1sxsbT/yD75RH0nK310YyfAmzkbK5R6L0vVYNpC/IB3WY5IZG+ahe7+A0bpD1Rrrfo7eibYrp+NHWEMcnLv4eqbtPa0ajbioHGzZEzA/IuWN4zYAwA6/ppTOjjd/HWpNCObEE486YuC7eHq2p+TeHAX6Q/x+armNJbmVXfs3b05slPbvpvdnRNr33X/9Yo3MXCkVoRs1mFSqRCaCA3a277+8x5Q+boHbAptFW/ViHHp1l0fS1nWE4gBXPl6oEC+P2HqyD9DsTpMHF4rpKz76Yr98rmbj+qFbVbGxq8m+fClgwa1tqziBgvkD67+8SEHP38zF8ovHaeamoYLn6zsY2dkdV8HalcFlWhadhrYCGGnn0gM5SYmm+ipTCYvck+1LvX52KSnH4yhh6qs7y05C43HZnzWFCkN81wTg881OWTf8E0ZaOU6LAkNs88vqkc6NmJlVEycOXrJdy/SeHP6fAJGIYPYSdLyhZtHUtfcbXd+tsFRzfka2d+bI3LPrFJpc0WBo4onvmTj3c4+rW4gFaq35O88+7LLm4OTOQpXgIlojdTRb7hqvk5+7yNdnwrxXzd+6HlWMVshVW7Dhy4WHw5xiEVEeX+dUCvv9oMYpJ0Qjar4c62/jfco/64JILEHJEaeM9ELrfdqsuP6bQ8IGc15q5CgcmagyLJbO/kBn0+bT3QWt29Xj45DfnZq3sxXH0Xlo5OsLAwqVQnTqloMQMRIynIDofZ0efLl4G3hqv6aVPz6OYUqgkUwzDv38eQZrUC+QzEwcicHQ0i7Y6ApPkuv4rZ0o6h5g4ii4cwuruP0ZmuVRelDJwdGy10dz3xz7X+sqnT6bdNNln/d+gll5nOU0RMFrm0Mp65ntOlQ+yH94IFDl6Lx5IhrVYh0JIcg+eyVLIQPx+zeDXW8V42VBOcbFxarYwS4f71l8vIRqPu0E7Pi6X8XRC0ckwo0QIufnaMpOMil8B1yItA2mz796wca/T8WdbNTGHYtPlE/fn/zPH7smpHKHil0MaHJul57/gCPr6DlMsqe2pGur1FlLTQSwkgaocV2R/gW/b6j2WixmyyzFhV8aRLBwSbbCS8a8hjvTxFpUo/LE2N8UaDULyWP0yADaZGXzV7YwHpvcXi4hhJFP1VpZ8p6VUpyOnIqMIIcgYSYHmbC4fwR2sngwE6JCAtlgU1OmiNSSYOSj6ksgAmujzKsCrswZKZ9G+44RCBSoMnUfI+T0fiJTaaFfKVKtzSiWrLfvzcerF6nX+rqN71ooXFZO5oQNZjRZ4+HbfRxgn6RNVj+LSSkiqcnUtx49gNFNkU4CSasTd5ysqmR3pZWgrIWwgWiPqi0bQAT7Do//9Ify8fkVuKHajwGyZp8btxQa4hmB9jN7PlN7TA1wbvFq/tVFS7E6J7HW4gs43snaTiVbjtJnzj9cpQe4ECu4Jr4a6Og+o8R2fbqEK6I8mwUBBaAE04iZ5FJy3hsyxdJxC8+oI93OYctfVxi6qOGMZIBs4Jttaw9sniP3NdNC+vjWclGitNR94pWV0JmDKwhhFUgNJrgb6+KS3R6wQsWtbFGq9CuqIPp7ouLVNuuAddHyu9/NwCKNbKrW3kF2m7TL57KkoOlMWCphDAeOiMXPZx0YUns3k+7AsCWuiEpPaGoHtWd/g/yhN2jZ8vp3cc1pSRGBtMaVKUB8qyHXetX9wzyHmATcH8Li8lmLlWypPJOis79zHhjqU6/qgsxPbjXI/qaOjL6PDgBNhPkX2tb/WMtgq8wfkOfyynltekMDRt7pYk3x1+orPXpb76WFRX6NpmoQ/hrLtwWqAk11JJi1AoaKJBjPEzb3j1a160K7+rmH2vtU92dALcaM2O8eRrpWWhZtDr0XacUgh5PmgPPQThCWcKLgRY4MLabjmvNAK8501N/XqNBi3HUu02zgEGix2OHneXWsafmrsxtSYeMDH8PSuoZQUhj7/lfkAx9SlJtrN8HdgF5Dhe8V3ANbBvZFXJDWg4skNkOr34nFLWbKHAYl4yhB7PjXSoUZeyp4m6PWv+1oFPi5O48Qx2m4fNe7VzzT1HIZpaBZ7GNtVYkQKwiMXfGDzL8e/9x88xR7u4enyqgVygZT5hKUnylYlFxwNW4NNjX+61333CxOZAj9c8fsXOT6aKlDFE2GpcgGZetN+tXMhB7swhFCZ/5XHGNayXwpSWLYUD9WxWBw5z9fsX6C3q49TL8NIT1QFtTbHneRG89jhcfQbOzKNE2VEOeLQr8/TpK1aMVAU+lKLgnJIOvTW9OXPS/0dWqNoX8JheXo/KZTsC/ZYqgdRrChyR/SLjGUVcn8uHAA3TvOdZ4/Sx3AZKIsjBWjK2GW4GduafI3hHn2Bsvt+Mjqjfw8LJ443VvLwq7zsT4ok0wjFEbgr3Bi5Dn8K7CcdH2pcU9aTpskvvsgcvbSLoqp41PZvKCng4E1w+m7Wst/P5DlKERFVExB29ohCokAHrNyXadILuf8VON0UXhTZE78qCdqFn4oCU2EFqgbN2B2jT+NnNv2zc9tTpOtLEl8HWX5H8BwVOHw5NX6Ns13xjmDHe8oC6aT67JQrVZK/gGTWtU9+qYPpjhIGEdmeon6SVymVVS8lFLwic9OC10dojDFpf4OYN2rgQH9O1t7x97jeT/Uic6G5GohhTLR8mgXIN6AtOoCCGMsrUunu6GdPURualJGapLXQHqSQ3oUPRfOLu9a3yJHP+b8gpYG3jKuZ15kB2Y9iC/hHcvBUzGRnxbHUEW0drTsWVZjEVgi20+HIaH/trSuY0epS19ixhZzvs8FLBudulf452W18FCo29T00eyvGJge8jT4Q8cNEnc3Bi47mrfK6HKbJyQEHYQqvwIxpbl6Ykx+/+j08tSJYehQQlSfeRWnQznMWX+FCsw8iH10niPSBmOb7l1cz/jHpmq1tWnSAIgzIjmaUwl56mIYUYrFrFRWzPXtr1WU9qP6Dpuclyqkk1dmrN/P7ACrk+Rc+HGRK+ptU+LMldp59Cv6GnLlGt9/jzOlKZp0JxtrNwVX9BuYu5DWuq4xC0TLSgc2AauaFQaP8dxf4A18TB3abnVIpV277I/CX4AidglKg7jmy7p9HNexGvDjfSG5x6d/od7o/XOb3CTlJuqmAiviLDqfJD9ZGhgcKa+B8+3TtgnRzLK2dkgK3gsRpuEugomLe/fJfOecm3MWH4noeyKxa1aV4z4l5PdPVxSCfdl7jYTGI57kwxDCW0iaAjMS3tWhi4Pz2jIJn1PaH4JWCnw3lU+0lw42AXswzNNpkK4Q3yyM1tA0DbqSK9L64ZnnXhyLVf/UeIHWgkCAquDVU8WdxgBVAEZJgjjUpgpevyNdpf4fAnzrtsjI0l5GHfYCSBhMiRKJnVpGhxMt87v2LQ774iy8LkZLRs9RyYtL0E3RLohixLl7+GYab9uG+niAD5OES7+fKaMliupKMTrZAmpWimDmxuoM9sqRVC1v11ZFHo0bx28vKqr2kLYZAOt4GQYd0D6oLChiuHT8GjCsfd3OJzpt5x+q+euOC4C6b7eKryfLwc4e0raPFuuAm/KRTPwpa6gzj0VnYNVd4zWZSoLe1oZAy/nFu/57X8y9Lc/1OtTXrihSlypgmu5TjCia5GyPVNSia40ZVNPgM6/1SZNvXKDG++XqaPMMg5HUMElWOnX1pyN44PRFD330Z+qkPWjw5ze8naA0nu4K8km6k4WikW4Mj7gN/TqSLDB+SlcqHptBKglDTzyBJNwk5qwddLwr9Hnb7YxGcRSPU0AiF+JTff/R3yHlTne8JKPoA8w7LFSj5Uf2MK17L7+uz+EAmXuPfQdTgv3Wy8Hr6DvjLkdZ1LrLbboq15LBHcTL3NwhJSFRFhjoTroLuF2xdXmtHwi5OBeeIX1Vu0XzZmFxBG9V9tPS9fjwNhTdxuLccRIfsYWMunZMc/5ya4+N4rdd8Rxe/ZZm8ZGvjBxtFTDCDkfXP11t3gFwfDn9RzQWXUIokn7lGfIJHfUeoYrMq7CMrDnEDr6iRgoPj/lfdxsFqditnEqWdWv+ZSlrtGqycB5gyvzT4TMtMptOsDhBo7wovLZui2KWnLsaTkLb5XflKZp/uPIfBn5Z+gBQ3chj/bmXzLirMX+mLDc+xe3oOxEU2Yy2mErecH164og8NwM0vxRXKUYLgVldjyhypiELxQGexdrLf9Xkf1PfqjBuEZSU/GEQaCTX4bPJeszK4bhS/8Wq/0bRF+mqzAWKkzJON892LT2TnlT4fFqamVndhkPLo2NLG6yohG8TP9Np8XyxFFjTIem4eoiwu9NBPTne1eJlEyrz+qleXYYMfSLvk+WKwtbygDbaCghsYHlYpSFmqAzRMdAnNF3WYKjVStqq2/2Rm+WTzbyaJO7Gy9LWBsyvi1KSRJcjrUM35VG6cBR21BXwc6SIXFw7dHcqNNd0KDJP3RP9NqxF9LJ6fIo7GnUhWSrm8qvS+Yp7K1BEc9EsXNeIq4RxvDuQyblV9iiBE5RCVdAGOWi4nANbSRCWCBIryegvZyY7wG3Pkxx0oX+//7qy01tqM6nL8TY2mWOLFgyr5nXzqdiimZ4axlt/h79bNbKGnMzA/88q71Zq/P0aitBV0uGSN4UbAPZcL1IXwmOY9MRhqAahFlUpKbb3PLs0SspL4B2E4wFEW3JLBfYKxfrKjydRK78+kgpxmJQdDOiD1x8XM1QO/e4J7NtJ4M4zHsk4SLcB1Rqdy9hPxlxQlxJb9rv/5GiZp5hk/Q/uvTRS3pw0ovyLpxVtZ1IBRoSFn6yhjvJEcGCTlHtJI1zQKdVhyWlDlwQZT67gQggj1siYUJfSLIGFSdq8M2qAWhyy+Fy/fETrfr6rCUHhsIzfcIJoiBUdH2uFZRdZBe7RVavkpx/XfDFwK5n4hEYZjoYQlUR8fHmTtQTAP+BqPsoQZ60b4NZ8Ej5nZkBOjsxwVG0SLNQK2PsUKXmCm5pL/MnUg2FecKGDXPTO8QB8udWgeq2ReJsy8hnENCyN1yxUgUfZZcV28aciD0LRWXiA9PrJ82HHdMA9zYxe3M//iryJFSmTOYvxwtos3bgou5xvUx/xManqCcy9Xed9oTP/md7YJq2RROljl7E4flQ1U2cLFfhxjEpgfPseMcln59yZMzvHhs95pio4mnxWnmOZfuhlCsnxLrgNWoppXKMTGE/u8SotJPOFrwGuRQ5jnFVb/E9NnYCaM55j2CKKcQs6k/RsBI2AaSUt6Tcm2CbsuSzfPqfFaoviRchn88zfWVTZ/MXqArPe5sQi1fspATWl++Wtyk6z20MtMe0YJO8wnEb/AZiq6SjEJ+QtW6XUO3z2gDOYSLM9pTw3lATgpESJD3gaTz5DWJCCiLCjTN3GrlIKPmHhSUw8GoLyv05C4DKTcF7e7nqpw0sN9vkGTBocakPz4iTbpJoozSw7zM7xGVwBuGdvB8eXVj9zL2C/Pbv0UoO5AsHJMy4tF6ermCCtNmfOR6k4R/ngbeYO7oyJfyEjGrYJqlYdKhXj/12I3AYpRKl42WdrabWcQpOYRS708ijAfIDHZKmzM11ddQd24B/jtZW4/jiyLlW+npEopK8PKdIKTRVl/uN6VNBJtM58mob9+w/G7uIiH8S9jGyTu+ACt9mWNB6kJgaDx/MN8T20GLL5Dq8KKQaYWu31g0vBrqCDAPxwHcSOlTUeA1NobiscEv36/Vl7hX0m3tOw4VOCfKDnTIYjn8p1L4+1zcXdUjdbUePz3b1YLKWc5jw6SrTzNR4RvbKl2aPaT2WFxARG+Qa11L0/VXlAj24vnJX7BnL+LBA1PSEdyHas1ETxrt8vFbD/wpQ1QPkbnT6FRJqgQpTfwAlVKwW6py74iMxpeXJ8KvMh2pHrJpulwQLc+kDPz8C74DMbdbNfzKZSDz6X0z/nbf15gGmZIp4tqdBK2l1C25b7jqNAcAtN6mb9H16W4CiQT/nU4l8EbvutkRPmkangjkK3G36/9VBg7kotgS5ZXMRO44oc1s0UEgnYcim6YEJ1iguvCH+IzGheqQozTAoD8chgSD+VxrJkQqUNNG4Nn9IHJgcTFcAAnItWjvgRRbTaIPIgP8aVr4Ko68Xleg/H1hDUV5AgpXNIQuvGgyNLJCBlnqBUu9BKXg5BLCxZ6vmPghuIIEGaBy+h6+a9A6p+ELDNd4HmnFQTfPL3bquzH/5INBW75itITGeY5vEr7F53dAoWnLFHAx3g5Whr0q8kDED78d52I3dWy775gc8gHfeCK9QGp9jLm5A2SvEQ1BmKKo8eGvlGBqHsnDWv4ijEPhZT9yeFRMbqhvi4UpyEEhCApz7Z31qp+n0n/D7MZrPilssb/iF/4FJVrCdPX9cVL6SLh1KKC8VFscIFnLd/3RVTojOKbXtXPx0qWF4KHV40vE/U9ZZVvgWUFeeh6tHAw8b6Xq6/+eidyohuoZhHUBIgS6YUDVoQzYILgjxpw+auQnvJfOpClBQLSt8D9K708aIfM3g0CZvP9OQWO6O1nPNIjMzD4NQFb885GmQomHswP2GfvZ4Npfe8R1nSyvLof2wThynZG/n//5l2Tr+AAIQdW4uKxML3X8i/eNOUC4Pf7jb/WKuneYZTWjlKqBrI1/Np//CVENl/1JDKo95Mi9nyxIHjiVBRMvettsRBWS/pDSrF/bmYHKf53k3basiSlqzwf41dQ3DCYE6gQeuE99Lcq23LjalSEef0P4uwxw53Pwm944n3GF/MCEzFwC+q/v76raNooQeY2AW7674aQ2xLqX6EBYA5RmnzVZE8hcXiFR71VHigIVpCfEeAfJs/D4n1JhsMz8o7u6Gpc0DlHrApwMYr94cY/q+OyiNRHDjwnrK3o6QgrPEWdTDOUH5pHY4N32Uw0nBro+ZuwDSQY8IjwOJWcqoJ7deJMrsU/+wKMJpzV53zZ3eDjYEs3EqHHlFJ2t4DHRYAtSCasdslvkkatScvhrEwTM78xBEBNqr3Ug2Hpcz0aAoQ1WT4VUb7doncvpGJiVZu/f69o7X8vEyceRJmcA9vnTHQQyf5+yh8i3cvKvipSA+sBYI3oVItSUQnT+u/rd6jOpJa+rYaKlIeVplgJNkv7Vuqj0txie0zwra/+E4QkcNYcPEZ/Ge08saOJq2TvW/xKmdJ9F77nXmAWs4qN+YRNsM/cFvU/Zr0ORG5ii0pivu4WTFZAzIgfsrXWXUW4tN8v2o1m1dVm1XI5CqL+Y+oxsiT2VcSK53etTL20Igtciv1MhdZ6UAWlrQSvdppUXD4kxm47W8jWVMI9I4jWiexH+J7RHF+k8FLDksRokv8zy4LLWEf5tzuNiOL+taMljoPKiMYIMWnpAy4xrIvAjtJKFxDTOH/Vl2UrmZosh1tQqkB4B8I2WWuOEJCGFy8+VWV7IMbPqg+MSBg6uNxvm3B3ogjDx/C6LH4hgP7t5gRXlNCNYLd1b73VIMQa+dH5qUw9nFkECekZATafkOgfIsg067Q8f/K0Tty1z0u3F7lIiTR/oBorFPswOdGtGZo26vgcxuhUabieq9Sm2uDpuYzrrOLen/IGZm1/myIcWZ6XizKoDtk2mK+XyrO3o1P5QTMNxJGuQeFifLKOkcof9PyGMY3xr2TkwcsG/RxBtzGjrnsj+aP5JCpnWywoE+7/Zb/Dy/YgTpaL5EsxkUw5MIAmylkWdNVTvv+kXKjKD//VXg3HVrxF0TlsuuhoyLPhAlZKQ+GHMNsorjjzY8cIlzgRSxdsLn/Aj98Hk/Fuo8toYvCGRKjLx+MN5wFHjfHQRstvL8466DFFA8E1uc1AgKjIUffPnYBTrEDDQmInGYNFxFElHjrQnVzclO+lMnvqXi9DBIqQEkn530EXGSfPNY/rBnoAqJOHARptZPD0Gjju3cx+LS4OT6MwQ+tyuCRTOP6DNj0AXIqgJjNkDcpiPXfrme9tJ+QGWmKwu17+9fCP4+vl7RfqZFTnL8QVRYakfn3y3vTwKGx2/2S14vGYkvro38yUNMRecbRQX8GC9Uj7tPpQeFeRAAW0h+EBGbgOvOp9gF/PSsnr+J+g1jwXSJ/xU7CRPs1EipjmRRPqfpjmCZLIgZ888UKgwL9uoyBvyFMnL2/M4KPJFEs4Msk+eK0v9vNR+htapzPaAZsuTXg7zleyFCPvvQsHYbatKPzyyOZYmvPjqPDJvpBxzsVeZ6C0t7YBgPKw/Dn5bOXVp97SBlEBSgmFqC3qJ3eHV+C/wvYqSf6gDBAajF/liGjgvKJGKPOSkGEkCINfncInRxZSW/PsHv1DUwCYhsjcU4O+joYP/hCMzG0YSZFZXkhu/h1Pz22GZcgvp9g8TdOmRKpW93waFHQXp1wahFMNCadhcOOQ7ToQUbTvd9UyPzWbtUIknRZMse3dl+lmmM0glmcE8Qxir+KcXmO9N2hR3OcsOldpqtvf4PZRHciHQORrW5k7AjGnvpyLknj6G0eFnvlsOiPgwHRUq8LDbFarhQt4ZTFRHs1xmJab9V/A4luc9q/8XfjHR7MhIUBcYnUL1S7jzR6wyR1x2HIjR7Hai6Zez2P68iNX3vzbAgyXtstICDD+RByI3ZpixiKd/sWSv86vz1P1NPOR6wdRyb/5HPJe0xRvpAJnXphbcPPgDRXxD01f2WQ0ryypqXW6kPORv9GoN4TjtZwyF90PsSsB8nLVhE2dMjpAOTBF/e2sO1gsFJgg+df/uITx8lCAfoS63h5n95EH7Hcr6v2vK2wFW9UqbV1cZEc8WedwQ//IzStHoZMRKymPX6mwmt9BVWRfxqnopATAGh4vaR5KNGnP2qcIDLtjOYPPQW50jIK1nKHPJqgf3NrYiYQzJgo455IvywXWdSQ4UCju38VOIWX0C4GiOl01JE6q82xv1vc+Ci+YL/MO013Gj+g7hpaSyFkB7s6JvxefC5hdItYZ7F/g59NfLcQBUlU74A2/rxT/WNp8toqpoxFsb0j8KGNzaN0MfOFItSjyVuqk/L/RqaCgNlTjtg/BNU12xQoOU4cv/jfqLZN3IzZ1JTaZWPo2XHHn4uW6jhaEdth9vGzQR+k9sUAXNEVWfK/vKX5mZz0coCKH5ohKtGQo9yf4AKWIVn0pWiJWoLI1cm4WBpzaqqngIjPrQUwPPc49WqOxK4WtnD5EUBwTHfO3GioiA8/Eu2rB13ba+QiDCrgKMekeug+QenQTDjfHIKi6hQhtTfzbLM05PCW5em1CTexcJBVCPmxjWuOyPH++j68hP9fSn/b5Rd9uRIhTPMF3pwHJLn/S8V2keQS2s2qYT/RLjajOzoHxOJ3k1qxHMykOuIwQK5/qQqHiPTbVT2naXYjkH9XkKw1c5q8cRrYiJOMmtoTNWASb/en1WX9zBFV8ernhz1vmly89mUF+hsuyf23K77ACGO9YXoF7wI1qJOpSo2Dj1hPLgfC5ItemqcQ8k27rRRcqAmMCOeylwuZvsXQsO1ZJas4JGjYFoifSPn+IFcnafhHtuBk59Peg5GbK3cdZg1nNiQIsKyUPJrHLRMQYegEpH6AO6Qv49cZCyvLvRtgFUxVef8/73u95yaGtZ5/4eR91Kep0MGXH8FPxJO0kxQr8D2cgDCIPzVQM5xGcLj25oCYnNTyuDZxbPLWrzJWaaeggit9KGN06MtHzQzvFKFKdfDdZ3l8BgWAYsHKaHakdiDHtpfHgxF5cVACQkQMkIvJUWRhYH4uVKAQys1DT4fIx4JELUhPKPTaWPUp4TPgbKygzdzpL0/GLt8b1xzS5xNT9FqiRF+Q+sQq5DOp2tldSfi1ZuLLjE5Vaj2Lb76+EytJgSu5b4C6i3MwGrwpCBYLqdpDSvs43or+l2PvVzL2zghDqnWlHgFIkuvd3p5PyTiMeg71uPuSvEOUQM5u/D3aZqPuNyG/qFeAhN5hczeWLcpfBsF463NFWVrZzMHymRs55ltxQ9XWEhE+tq4B30xJlEyn1NWEx/RBW9jKU8KZaJyjqVrNwRYzx1ul60TLLC3u4ryimup4NiErJt4HfRqrCTHgAKGdvO7Bud4OGrIJlsdfxwPhKvxQ/2q9b2t/UgA8LyWMGJvRr3sOFSZvYjCy3QzBbcn67PRN8G2nHiYdhWVOAEJlw+ZHDTVQvz4J89K9VAm6oX/JDLkkPVRL6m4K6UTQnbXwNcx5gSbjzI6pJsxX8ZXWovHLXH8MGP/qm4ldiJlGJtw9MjYck/9oS/ovTfDSnNMj9utPvzobgf5AGXqvMXb++fXmZlPDHyQD/BvD8cvBQJyLqb+ANmkPCqK5wciKb6nH1tQcks/7FSsetVH2uwW8cIkBuqSb6jugdEWVnf+QQzDSMGrWQhpAunYll6LKZrt4x3FCCcVDPXkw+xfgQ+7SSAdGwLUuGWecoxXE22NVSdPl0a/Nj7Mwn3ker5/lTaBhUZwIlcJetX5FJNQY4DPullf3dPf0nRLPj+NTFmYpTZn/uAfIJCMJBhAFERGh1bhQmoGTVIDPdQbG/O+dOGTRogreM+nXhv0mS9XAefPld58xkyggRvAMxY+dl1tFKDyY2I9ODs3ggqyCDlXjKlm4SvZ8goroSyfB5utgIppKvsNzEbPUHfzQ9Ymb0V63ZveuAHzU6yWbe1Wat33lZS1ofSfZ2nqGPMDpm8SVag0CHP3gshxWmcN1DWfffPm16eSUGccmskV7P55JqOK/Uj/crO7UidgC875ml+gWW8C/Cj3/X5FmdJqn1PBwLEy6KMq8ptF9ZB4INPcOsAj8Z4fJbfh4G7LM3Nf15+EnD1wIcV4jlgZFQp/D5FmyIvATCEwCp+RvcKHTlRT1/XV2iLgehqPow7G1l+25qsxOTLlodpWviaYoZFV9373ogieg3C3nUzsb2S52/wabc1alZ/jdQLoR0Pkf1tC5svLXBwI9nH/tbRVxC7azHbQ0LLMWhXgrpbLrmu5qemDVgXnRBVjDGS5E651+VWJxGC39GgR/09cQUhPWMaWjwKw3Q3kmp/U+2aUUEFE7leM0Uw4T3emWkMjENh1nNa5X+N0emZ0Yibzz5D2Td5kCT8vIJ7svxBlAZ87J03E5Tq6s/nj//3d7dPfdIuSHEagb6a7uSOHctfYRmf5JaQZeAekmQff36P7sBprv/fdQej6ZImke8E5RjNAG8LFPB+ceNFlh6kWqbrhzcKOu6Tqb85/Gx8cvYB+uc/aSIYPmEA30le3K+/HbYwKmgVvWV1f1fWEU+ZcQoNilk1433YAyiZRQ6iPYvOeW/lvan2Lp7TU2iuacq/Luvji63C9HQhRFklqs/70Z+wooWNGu1N4/SkkIimDbnk1qzK14eUMHGhjfFVihb8Ks+LzLfn8GTkfn7qkrLGIYa2uEpsPguE2/T5KjDJGu/ro0N9jHlTy8ARpJb8GeYO+p9MfnzF8NvVqAmCHCGqqt63/rZTQFr2wzv+JHKUCDiEi5jXhg028l1vO60nmCN+bPRtWxqy9L8lK9ZuyRWlKlSE8+/1X89z5QQA4vhZ6zMpd5nvLzYPqAbC/KqeDHf6e6jwMRaop7/ErXccXxRDV5Oaj6KgVNkOTGzMCV4k5bw0h5f3SZjN1+/9V+997IG4cO6r2Ksxl47XoJgcR1kaNDEMc42wA2BTV8C3j4NRvXZlybx4RHzdR6AzxdNSN26L3Z2yTfp612buCDSBzxkQACOXcips2KPP34k8ndFGjsFjMWttYyVhz4cs93zg3rkfrK59ZDyBT+F8AAfDBVnNez8pmJK3VFLT06e/MmRtD5nSliUVwk8O4HLL4n3AxTybp8InZ0Hq8LigtZHR49VpGRbwYc7yD9HDfC5xUY6/EbdKIYmqbhQ9QlPHCrElJzkqhF1iixGVf5y3Xp4zpCHPry94hX/kWST2Zc1Gj+9Tna0GOSe2KAJ1HJnuLiEQEH+dUlQyEge9oOnsKXg6QgMFwVeaN77mbnFgov0Ged4LG2gIBEpQxUwU9u5GK5Okq1oi5h/P1EGXoNVvr84ET9Z8Ze2Q6bo/Jzh6b6QHTbC/hmB2oa7R/eSzDlTvkRzxES/PCFfiFFW8GMbgM4fLmiXIy6s5bd8f5UAqv16xB/i7hLY4qf0I4XEuUHs5ycTwFtNpEqD89j7RIMHdZW0/rUihXUwUrcVBb9aE0UhBul9DU11fl/oMy49mp/EOm2oAeWf8nMtr4eDPfOc5cEBj1+Rlj5K2m8uH5/VEH12oVEBx2Rt8/cVimvywJ4HYVYYt68ZZaTfwYmrIcefNZTg9SfOlVGyO470mIPUTgxrNd4x3eIXy+op3KaHCT1Qn8yeer4mJ3W7jCBSpH5ksJg9EiHqRyXAB/MO+um21CUQgyMFbBK/HtX1uyd8fl7jn9CuH8RwtCdPXUxYfeAvGCkJiSErg56jIAHHGqyguvMTyGGH4wyOgC//K1NMocMdfj9RoRI6oHMb8Wua7Px3lQSydO78C/qTiEv+njTtPnhdvYGnEzNL6Vm0UUx7K1GNn+0rN4Zl/QKRWnyKrLM/OL2V21TUoICxSE+dhUWZ31xQtmkcNKsHj+R/DiyHCZPgfjmaKsz90u/7qn1doJaUhMjZ7CRHbxocRb54rFT+FttjX0ZGU+jLROC6C3YEKsv6MlTy7v+hj6StUMycX/9lxrJ5KUXkczju//YarQhmkSHDmAmS4HFUNsYjPBV7iDhPxK1GAFbk0fYeeI49y45x1+0R+d2KEroB0IvN5kWZnj9HTeB8vDJIs59EBWICdNWoNtxuJZOLqAKJ7XVvGkTny7UXWFO8D+Qylo8ONL5JSv6by6UDxU2d8g8IrkSBV7Ns9dQcMBRztqLT/Pn4ieRBbAS8Xym9EogSx1AKZXuyDuvQZYcuveuI8u8Ppxh8iBmEzfRL4DEUBDFR3fo2vNyU0VsagrG+nGu57I1HnwVD91HJYErRXhlCfHw4e4kOUh8lOoP9RWzxCuHUwLpPHyUtgQJ64yN3jctjQYYIebRl+/Xzs4Y9N8yLBvnwztrOiazZzbdHVV23enDevEdqZ1Mv/c+Ef7PMJaQSvwIjbDOyn4YfG8tRpgMXTy5pQ/Vb4ReWIJiCyXVnNGGCTTDzYEeOLbOTPBNQuj3J9kEXTMrQU9Cmolet4MY4iFQ0qxVbVlRYz2KIvz43HJFdQ6LJ8u+bglAdpBfNgvxivoVCE1A5vGAc+SdByY7T/M6yBlNB7jBHeziqjGouACClVI8szAFUPQslsjsi+gR+ocnHBZxXYSDuHSE2P5i6SPnjV9s4JSecLa00Ju2wE2G439W3hj4ioEacYk46gY/jEGEzk2Dn11O6rEFMR2NHo9spFUI4Y1oWJVf/xCC64wZpiiFPEBWkNMoToEJcZohOCie+krfTLVn26N+5v8eSj67eyR279O3J+rYL8e3s9PDGhBHa6lY3u8cWO6OL1gJIz8f6ubM1TyjEHbd19IDQAKZju9MFkGjPldLNYBFQGoAIxeL41l4lo0EoQ5H8hTIthKq3Ug2meV4EJpDNShdQEleMN9+v0Nyd0OFeJTrbKaOfHNQdKFYQC4pxaXURQzLx72ml/kZbCVgJBtOCHrBiyQ5/1PXlHplXmhSSw3A3BDYIVmFPPJvYnSPlg6jH6xbYZ6Wpj8hTr8GTIwni2GkQ7sh2FT5D+As30aSp6uHfNeegaqOv9a8mtbiO+HgplQSRIkjK0LfG9sdFYfmvw5nJ0GzWVOg7lPOboxNEz86ZqMnIX7ZnvSIvRROpnqunSvFWyt3QawhxiFgLv24IsZQlAFKeqZoMPhWIE2n7Ykx7cmq7EaXG/esvVw96FQwAIY+Xt924gqB79dfAly8NOFLBqTv/vETRJ0nzQSGOI2jaLa/zJZ0nyiiI/PRG8OCXuVoESemxU05yMuwr+HhdVTJkgxTgz4kA6n2vrdQus8PfXrjmTcbbSYYgJiDzmWFGOGYu9IrcnHp+TSx9YJleCFBxkkEhOWPLC2cgYIQD/hN3ky/nhjBkW33hgPiDWwW+WTIs55j1FWuB6HQHjSxfOjao/vsClV2sCLHGyIRm0RdQ/7LaDQL+9BssG78lS0KZvqWLVxRqoCNh5v7KKABFzRDhlkd2vn9mSS8K7UtXvy3jnB1lSdrU32TZ8HbbV//ffm2DVwpXWHO8Z1zmuadFJTk1V4g/r/cCMsupDUYqkRolffWelqQ1JHpNZl35fUd+ZrKKsSUGWo+Wu3xYi00dIrdUCupPSkEe5wq6I58+3eQq+ZptcSQ08ddgUFfz5ryY/BfX5Q8Ujo1zsYb7F99BWQublOstTjsGvm76NaXRpAL0hXuQB1SKl2IabOg68IgOkNvXOCxtDeSLRTkWk1sbsjx2z0EyLmLUq90HxA2WMLWE+T3yFPBJIp05EZNRDqHh2c6BQ7VFJdib8fpmEGEzU/Enym/lr7q5p9sv7mYx9YrgiBZHcie87+A80Gbt/mLPDsa2pYsLYy6/Z1QuFwFnlPEhdfR+RkXsrpdcNUm2PL6GvUYNE+0r9j/zRe9k/8J+Duc0Bm/9X8epmyw/KUhNxnirx+Qc5nyiF1zz9UwTyNyp7vl3Q5HKdnDqg5wg6fBtnVkM9MqIZSGV3v0klHE5gVrJXXOSZUeknzJYeX2x0HF+EXP/4Z4fUKCwAsQbf4Tm4t/lkOyz+8ygMF6gnGwT2f5Er/71BRcRDmR0lD8dRJeZ709MinJCoi0BzqFyY4sWkDtdgmJxPRhm2GnmXziVlcC24w6f2NZzYn3vMuuTQjLbRn3GYkZVfnZBnYhxeh4HZgx+AFtgACcnE0DaQ4Ddyym8qMvhXs6GSYH84dpX+gBbX5P+Ovbs777jeN1FlL2M34jdF+2Wi6b4gvyF4azs2PoJ9gBH02CRhxXKYxEO6p0Mu3FHHg0vjuhIy8xoGWH1GbjnNQtM1AVmEmrGotuwDi7EyFNL56PnkDpvNgjQichC/IKP+AeWKN4vOSoNIEMV9eGcUnkjVtPSItU+KxLBaz0WFveqaJp31b/BEtDp4F39w5mtHGecIJRuEN1anHT9745k1hr3nnhqRdm1MKIWArjLcyV2XWsTJY1T/gh/wAlsO32SLqy5ij0VX0UoxDMEZwgxXvpaaAVVYARELRc1X1kYLj908TIm3jubS8/Ucm7t2/k67Q13r3roK5oRTkLAuyvLWPqCiYXC3M2YMVCHFTAJWmDxuQAXIKBN0Tjz9VHi9Vx3/SS6DaTBQ+AEFQTLCqfTHnhFyJidJIibwO15L0MPbXMZphAkaHgOCkArMKAcpFwaQ60lKEUWkr55MNtApFysTWFilnZusLqcDhBtY4vKT9CxAQ9zaIUsiczsehcVKjXNXwkGL1MFyjUPp97waXmKVdbF66So0o2jneLULGt2cj+n1MDn7695pe3X4+f0SxxPlR/0VSAu4kykbyvZdPZS2WK67vWQrtvWeSsbJfRx57kLdstAhCmRwEepcoUePbftKQ/jl22zHlSHjUls2mA2gBh1C8IJiuoH/DAdcX2tikq/rM2BnHJ53KgNxmRncSRXw5f7EDpHfrcJ+CL5iARbmUeov5EwEa8E+/NCrdlXeE7aoVAICh6chA9bSaOIWS34bmOj3EioxWk0u4yjv8+Xuf8AzpfYTuOVRuJfVjE9+C+mhWnBr2fwTPzoul+lRARP3AsefU/GqDHq6VpaRXnUvGQotjaaliyYQumBXP+alYDoZx78TPIVAmlvhN3dtYqL7c+c1L+5RznVq6k5Pyphtift2HVdgYzl6T8JC8Ll+frZyiHYf1D1MD+SWodlUknjNzMBTQcNXq4oNN0P62c0g5gjCVRxZRILiC3do7kq6xKu1azatwWmWAm4G0aj5+4u52NrVtl0PNbK2s21T/s/osYJVku/ypf8iM7YrzkomGfp6ft9ZVFD5DekMrmumWH5Zx7Hglw//9Xw6I7lujngvY8Bh6iEQ4DoGx9UAwZQuiI9oc+ZWgL5fnb7zX9VKyi3FHcTrZt0L2VEfwEdYE8WYDXW5RdgblRzEx/zNzYiJjegC5eY5cixsd8fY/5ML1zu3b9T/gYPMc/tq6Jm8dBnXQL1ErcmE8XinjW++LmKndMXNJkdiKBCKn0a7OhV/R7qQ3ytuyELOMeFmoynewn9xMOVUF3iJjrCAlN+OhF+JLC0NhxFaX7mU3HzyYJDk9HQzveLlZ0oB/AAk96F4Oer2xu67Uuf2iDcIRWB1Vs8rVbX3nNgEjY98UuxaWvKBtL5LY2RqMNLj16PD3jPFhuvXCQ9U1fNT5eflOk9KsKsQ7ITlS3Yr0Z7X+/zrGjUwQs4x0LRIRr1l770e61p50LtnwJnP/sWbA7U475hYcpfWbbT/43ujlAviZhu4hCO2jngQjQXfcVB6WbuT+xrsmzikOTrGmgMUEMBPD3MUWhLoZFGEHQ+8Qm9phhJGcY0/0St6ETj+wLk+BdtWp3YMzYtFCBNeXGJLA8BeMEEzUIPmiKfxUqDClKpOma5LmALA0rg4P2PUP69BZQtLHV5ef9+efsbdUFZT8Z3mZkBqGMHhfQgPpK1M6pGLJyQ08VJQVHywO1B7ZCYirD284PknMAFC4bvZIS/e9yMedSSAfCd3BcDf93fHEOJXFTDv2y/SgfEFOzwjVBVxi073UTyLxMREhK4ftk2v3KZu6Ef/zjHv767jxgtywalsdRVH3UlH6DeoG6x5L6tcaY6uuQ54HI8G2DWWX9F9l88nqQF6jHKClxXmkGvyJfclc2laNVB0jGMqP30oWpZ7fbog4JanG28jZC8v79chLRspktZXNPk7owDu254JvMi91fmRbhKntn8B8VLpG7qtd6Es3VosNG2S/n7YkxIdadIcCuxInL1i11Rq8lAu5WQtzgyQetXJ5b93QSS//uM80utS3NIO7ZzxvUFKnAIjp3FOl1kEav0NuTb3TntWuX1LVAth5fQw+hTpQmQD7Jvzxy60q3Nlxddn0O6Bm5NS0rmwzWY1J8FBKK8fp5g2rO9NA4rArEB8WQJJkj4B9c8qAPfbivpLDJ1Q8jZewYD68TK+xef4ddKA9daxUQ2xjHQAjg3F2WorzR1fiOZHXkJ4hoJWZJ3DB0zbvHoQm+m7UnUllM93LG/kH3fsYc/nTTbmgIdZrwGXhmLswuVqT9jtd/2/HCfE4ihhmpTmIn382V597bnC0s8Oytmq/wF+kaGB8VVqYNsubqAmOpaaoe45A155FTeo/v5xTY52XoDOtRIYtvfEtGmQPu0/HxCdo7d9LhIHXgwAcf8AFcKZn25QjrsO7q/yh2evj8etfI9BjdBWcg1dleZtq5Jq4DXiBMIRVGi7uIkzr+WSINeszJBTfNMlo/6Sc9XJTV+6Ou1/cf5xeLUMFBl4Bl7Qi2Tl04Z5a0gMb389U1PoeOXOwX3+RzHwq3e3u5Sct+LQxndMmehG76H7lruFO/sRDAGtON35OrIPTtSrj81Ys/bhb4R9wF7lsTDtCev2KEwPoLgJCuMH2vdNdtjq9MFsF2eKm4Olj+ItY1PsfaouT48HS6jNItdDsRihfvTVbfaAS0izcCdpQqckUZlK0vo8LE8fssAa4xB8M2LQShCAbYXpJ2hnDGMNX6KeI9hNjZNyDiYbCgyM9L3ZarPXJsdyfcI9cI8lxHyNP89X9ofJMluh4QaCvEUxa/rrUDvgxIcVksfS/9yc7pdjvlDL2ADHvilSJfU+Y4qouFIPueHXBHhJ/28tV4uaXWuaNSawD+J47vdYzSqSNMCn6cCFgumCyiOUUV95GbI1+0IynV6e8qs6eXQxLOTEcLHwzc5N3QjKs29SLIN/m7rgrHWYidaAwqqgYJnSv0Oted5MDnEskIVzR3UG5Ue+nLgvViv+V+pMEjSWeCA92TKKJXn5eNXdeRHr4ei5QN0jxntEkAN18ElPvJLSjWV/fTwQpL9IWFTf9ECvLtIXZkxV44+at14hcaIrgjkEjSODm6oPd6fB0IVUKeDocyGKhEytt16zvvdi1QB4pMqON5ioNVKNxFo5MH5DqHElfi2UeSGglM8QvtWyGjc/PFwdvlUbBXJYCfVNtdrwoljmWbzVxZC2sbveOTCRAO1rJ1GYnHkgQxeog2KSWRCeUp4RvbRTeQa/MNiMeuFCpkv27GPNqWFS6E7PjV78rYx5CsU7AX/awhWgyvG1wt03ow+iH6Yf9djpEjwAVFffVTeKD/bktc71lpZ4QfQmhC6QShOPUxSc9endQcLqss2G68U/Cf/ETMIU3ygpVTGRClr8cCKCgNRH/2UUy9l/GsX3OexI33rzaPGIxtbwe4qMqFA6Kc9FAVaNDIo/iJUMxGA3aX0P91sa9vINx1COBLtQY3U7Xjxw0l98Sw7POcXqv351BQxFc4oXMy0ulKM89K6wxv1LPSsxfgeL2XaEWfp6xNkh24IJTw4wF8t8UKG68C9zUq2t962lBr5TXyEtJmBUjwR3BW2nHuxmRxpVfqzJHsKdzZqPcMTaf7U2P/lFZW2pV0fV0672U4yQP/O534cVxrZWTDRbPba6njHWFOADIHJKlxCA2sCoZCWiu+DUt2DTidwh0fsuDr6LhEAiGcCd+dNpLTcERi8Kis+KLxo9Q4t4bYO/HoseiI+x8oZ0Vm4XliDQKjymlLYzYQTArVVnDOZLftYFJIARmrzWuophSEb4KLNDq+tR2urc0BZUG1nYmQSsuel+0r8vwBsYkpjbHP+OL0+lgIhe3JDfUdUIQtb8IInEOz7fPDSRDU6ZAt3wl2BdmW6x3oemmEy4j7dF7xG+Zf418aEKv/ujbjFnqx/nfyKeT6wb0EpxBxdLhcLQKsmOEp6ABGkZYJxEGCiKuwKoDUirTL+p1PIHWUPGJNFwhF+39gWAPkrqqPYI0a0GkbKdhMRRjpcRCXiFn7Eu8/jEdVHA6QN4NYIst6ZXigdYYkOEr+Q8P4DLeehb0HTpNzwX+sZDG0xwOOgQrTqCe4BEBH7jIB/WwlSJzMvDWESnvluyzGjLcVWdTqf67C6vhTOmDNED4tdWZA2MZJ/X74kbDC+jq3C3ft0Cwh2RjVSL/GWKCEewdmk8qGrl+4woMCrZf/P0nVsS4rswK95e6AwxRLvvWeHL7y3X//I27OYmTN9qm9xSSkUISmljpZXyc54+wzBQ6qvxHA9WUm1627Or75s5j6pVMTHPl9x3CFUMao2u+wWNrnGv9k9kCmmPzWmf3oF+fkNmn7Xu67hr9we5NyFv5JCEnUY0A74ZQlySHu8NdY43KNoHnpaorBW5kYdU5mEQH93EDytoU88iyzZqi0mM7bv1hVN9Y9/8378lzwojWhKzaMSITPOnPrJagCWa33ytxRgpaenjlPcczSVKj5/Xok9B/05ZmX20XH3vA10f1Uhw5WYbf5NgsrZeAvtSZhSV5oFWjNtIgevr6f5nLy3E9UTsF4I0ApVf4TkWbn5b/Jn96nzjcwLvnRcTzSCDeQ1Du+bbS0UZ0jAyjoF4Otah4/r6N3DI6TDlk8MH7o4hanU63N6fCzCkjz1YCLQ/rkRs1cNnizo3H745sPKrNV9dtP/xRjHqsF6XPe3OFrlHkz7CK2XdIgxkBxTmNPefiy/FMK46GA98q+zJCI9tKcEfzREAol4KZngAVsuHO3GsMVZyihL1vSSAsS9aRHu34ew6g0k29E4IPQjCP6kQeAARvYU9fEx5kbtN28iv9C8RKmb2RvSb/mHTM65NirxdFwsUA4fR2p+DzyTeJEfDrSZtJBXqZTHC9QujGCuA2jsTNug7JNf08x9KshFUiVmjSEqV/rH49SCoQSj+KsDIVIpMcRXtUN7t/MHzg4d1+8nJxTyPV+avsPOnaNE6L/sx64bzV7RyoMZt35/Ll8zr2XvJpTfIj82ltTXpAkT4qPC0hzfPmsF0QCZpL58y4sAB71Ua8bSIkfhc19If/W6Iu8Ihi6JahABjg3Rz02qL/GPOhzMfJTrwm57QtZgGwKwBIqlm68wp8qrhIqV3xagI3+4jOkz9+WLbLa/f238YzaFvPwD5Hodc6lavd6BFuUmSUIW8h5VjTwQCqM7yBY8HD0QJNqSD94FS2zgmFrajkHsaL0H61KI3li2XPvVVV2sR+mXsnpm9GRBYOIcEvZHF0pQIuEJGdlKZ3hctABEiVe1cM70apKv/UMhigeGlPIaxIY9PuzWFoLLUTz4szlMslkgdDpEzIuxJpXoIfADZo9uWqiv+543QHlQ7iUk+E2OTWHpGwImJgYk1H71qKrqw0IWnPUq+FHMz0Ymps1gA5v6JMfxmIgKvovLn6L82PR+VvNzatobTviF7tUyE4wW9L7SEfRStw2cQEh2p0qvsIRq6NcsD/v4xMtXWJJkGknfEHXdqMDtfHLyI9zn+c3j55OVJTbc7yIpZuexZkfVsdA1u9g4zJfTA3fQgzSHz1kUjU+KO3U19skJs+GJpbjvGUmsN3FDJOqy4V2kpfcs0uTfqNciR8j9DVxWIm3TmFx2WgjlEcafu7v7s0vuYsoIPoytRk3ZcbJ1LkUHL4uZxPEZ2+U0K95KN8C+6n9Bfc2Ngnm/x0zqRsonpRYcz1ld69M5LyTHuDLYnJrYatCcxxvKq3z/WoUlzUstKj8s1fQPAXDopsOAKCo50nlTEtdL/+uTj7NszyhulC1qUCfIvgFSm8GXXrgHhUZ6cYwLIDhIZwNfTf56erNmDstVadqDd7LZzwNnYsdL8zTiQY0ft3PZptyYZ7lCIKl/OxMUdF/b+6vnyp0T8V/BGAg4Lr7RQjaSmbcUedOVLNAJ6T0kvC8uYfh3HfC2xpSD4ijTGiSb6QPMD6DTl7Y9m//11xWa6/K0CyP0LLwnyMMHxISpXiLcVxayNYHjPKiEseZUjkp1toxM45mCVLzYhh97WKCyaDSIsy8Tg6p9vPQYX/4ue4BfV+wGnpMk9an9jtBxKfhby1IW5JcWv138OZCyL5DFB4VFErykndwNTaEro6fNH0M8dYRtlMw4AfQTTyGqaQqnrMxRMUrTwhxRiTl5lsnncDJH+o5I1wdQ3E8RI3EQIDEMTQievxLU3Pv2k3cYAKgc+y8v9Tcop31ccGPi8rZ+k5hdt+t0EreIVl+Q3RAtNPqudoxvlmkcZQo+XBjYq+fLa6TDtEei55JZNL91AgfxMKIp9g3f/XfO+BWbVkw2QPAFqQrJ1+Te1vPbTFMSZczUqHHXX+Bj8bGPOtqfTtEWuWbU5+V4j6c5/H1q3lq08AR5RDm2mdmY4D3l3bQJgvspP8/zao7xAwQ/79rMttGZiR64c1fs+gxSK1S4zNhOp0OkTgufhrCEA3s037gPBRKUjyVPncc0/PLK1wJqkkLgfXOFKlV59h/tX2n302pm8x7ynkCf/FWxlMZ37vNUlEmdoLGhxCiqztf0lB/R/nwjg6otCbYeVlo5eppmWV0jgamOu40vt5npbgnXKxA+ovU3XcM0L5rXaEK/0LgR0bUpG3RDjup1dW32x6dnKNq8pkIsSQeTKEuF76eD8eHbAfRX05W8i408ExBVXkn2d0kUFN2q8i+KILa0YY3GJFqsqo73odviDP3wNho1MqkIse9ut7UKl6IfScEjnsGg/Bb3xsI8+1Qnm+pCiSk77PyhJZ0eEUiqteDctM7cLtr5BFltYYtXyhuPFg69G63VfAcx4h2ppH9T2J4784lBpYXT1MkhYM7FEAmSytx1qchqecTo/a4W85/sm86a0dRj45xM9bRFVcVwkY8ImHEfPxCkNstCjgndTdG201IUgfdHakyW+14YT9xaLbkQY2Ff3ONggqaeqoK9KafYlmg6/R/RPV+MR6cgbhwLw1pSAOG22G7ow1isB6r95BeHvrppFr38b/MIfy3DN662m17mC70z83zfXe8zX/tjJga+qzXHG/XvJ0L7HKovDcIv2Fc8YiixLDb7aOxpAQraE8LgO+sxtP22QxXpvyfuQFm4vrFmQPyWVPdmryodK/s0bmbGyPVX9OjVvfdGfxL2KbFBisr+VJM98UAXkanXtWLnASrzB1SxoBN8jO5XLy2Hi+GB9YYiiOr2mb9kLBxNFaXKGF04258GajR6NMRVbwkiGvEOR61wV998OnXuh77HCrSG2a8/vP8xLnZVHsun6GEN5LO4ghD4uB8y4EOONNTPWtVtpx6VUP+RMvo0iNJE7xpgDWS1DZSvPS0Jpj9MW3clLzAQajy6g7zJxge9Ehztg/z2FYW711dW/PVvB97n9xUqUreLTT4z1Nw/TJ1Ee/FGLC/8jrLOoYD0cDkf7wIH8VNM6TAJNO0sbLFJoquTl757fNsEootnK9rUsxeLm3g/CQrezgZCgJZQB4jC+FVqKWKeA2vjZQ6kX/WHZL2KSfxB+X2nJ1yD9hIIZdysviuweWTEMD1Y1f2iZa/mAArwSoPg6ot+yl+Vw4AFI1Kn0YMeKBUwv892CCQCSNhr5xTF1H8ijqEVuHfJ8gOXVqLJeKKziPk5QlSHV9qoVdf7IT3AUATWU2t62tXKMy1aXEM3nZG0IgmZfyxjRw8XZZ+upxfd8vyg6naHhMYP4TNsDrMTRzU5ZxEIBVXkZr0y++/q5M6LGE6Lamk2mPbymT4BD/XZ4TcmkmQQemrXbPHG6k6OzeEenVxff110ozYsDSHZVQ1JwL+AAFtUKXHWNcc9UzKam33sL4R7Qs5K0yA9Yu6I66Z0Tfez4E3VLcOw1JazYqp1QL/EG1tBlLep3gkKj0PHi6t38VsifviGQMikVNXQN7LX7LHs4NwdGh5TXXHGbvE2DbDuia9NkKQ5tN/NJ2K6OYdhr8Fd3IuKfQZZLofGCgmcHIJwsEyltdbVS4ixYdJkIiX98HMoavRH7M6x7i+Hup6iTx7Kyz/+JLkf+rLNm5KoYRRkqP7WRtGM0Pt1vyYK/Z2pZ3XVndbXPCplpOXJNku+usyHai5lezL0yxX5WyMt6L0qnfIQ8n9DQhHWnVy1+prHVEi9b2wNkc7nJBPCDlPMyMoDiR5jm1B/KoqP5EVYP5/y7Fkyfb1OW7+8Q0kt2/wIrdmR47PAC74ivWBnjoaq/WDDH9EbnuiqJ+gfnFmxowIGJPaystcgYMRj4AsNF5Ks7olVLc74Vgo/ubbJY2Y7Je7Z+gBFQlc/M+FHGHa3qf7o+pJkL4WG9VYfpZ/UncU7ztAOeOBvnlyiVaahqe1Xy9q5e7BnfKG99Hnp6DDuNoK7hiptoUGSiQ8B5rD9gsoO0E2esMPD5d/f1HH3+/KjUfkeYvOKgC76jSj+NQf/VTy1I0+sboXxpZZ3kquDZTS6Lim/okkzZhnRMzTOePL/NtA87ZSjIEfg78b4sjNwFJv3/TA3HXswe2Mg92NvH8j8GcHucqtjYCaMXx9teflpSRBdfqri5zbEtkIo/D5eVKVr0HdMPyWkximBioyWcGJJJFhKp7NuJuJ5Tm5z1fzUuWJaWyNFKKwk2hFDq2kjxbUgakkT2xDG5nNj0ch69yud4Gcz900bbNaZ3F0o7SuClk5mGo84wHN48V02gd0JfN9LZWhD+5Bbl+mh2ZeKE8uF7GVhG8cbm+oTfT8OLlxkr2gEQs4Hglrx62BEOZRvHJHzfVPxNWpSwtqsOZmbEro/LsSkbFN/x+vhi7hrHTjyVq26xWHYo/WvH07j4KbUyuJim9gPq44AVLJxjRQteSyHoBIR/Ss5DSrDyqRIXp+2GChtQTmEZ6InO2ZiLeHhgzrwcSHvF4h8LAwM5y5imHtCW5ujK3j51+Z7nTuFKlf6OYzVXlm/V5C8oRaLQc5mSWOJpmZCQMgI/7369VR3RK8pQG6bgJVfKva5wqoFYuGNpVc+BMcsnUyCnBhJYse9+rS0jTMIY6yLP1jLGlsZNDFS1VFuHbOjX4C3IstkD9iSxcHsgUTpeqGl25QwbsZ7ovd2Gnv860TxrsixSKXR86OGE/9bD+RTY632Sej+kA+SV0yVBKL11w2mcWZTECU5wrAQ3CvSHFfJ9SB1/SHJUFFpikG9cyIAslmZIh7tBJdzR0sg5jHXwCQP95LZBKTFxR3KbEaGP1t7/jrk+4peDaKtVoYNjKMmvdo6zrB1jQ0pUWFTFC+xr9cPDplXa904L+8EMrH5+Pd0w0aDY2gM8UCsGbcyHuo3F3Han6H9fb09ZcPd+7KC9Q4A3JrkS0IILYYq5TTgD8CVIJpNKbxFHglHusFVUvIF8FEyYfO0yUwIn0r0IwTqptmVBuPl8JWKsQvmbrjK2PQ8FN+rbBi5UpWoVUDEJY1MAZfsSb/xF9FZ1AURA84XZIeViqrQz7OQkVQFsgqkD7IRjnuboyZI3/VFJZPwc4TJQGN5u/C7GGmM/r7fVxF1jT2x0Y8P3DFhx1WHfhD0+8XCT3mEbzmDs2d7iJSQZhdmkCi9zgNVZFUkCx+DDeHILH4lXxDzy70AaeKF6znUFS2DDoIOEfWrcLu+2srlVRxAKmSFiV0txiOQbHbTLJWeU4s6ZvvJ7yYBAm3hL6oZWabd8MsjyeBBj7iQb7A2ibT/eJrbCo7c0V1IVe6rbtrWb3/UvTpak70+GbsTkz0t70DLqxhfkusviC5RJ3OdP0zTK+EA50MFHEvFbUNhINR/zbLCTYxmbOLXqjO47sDVdoUKVbQytP4GxMWFGpl69ecLEInaDKblxYqbJM65eD3dfbyQDAFHN/RPar8K2CP9Q7e3+7s80hoQayxKh8AM/bq6uvFSTMcowo+OOU5GkQ/TH+Aelt6RW1kY6PF0lF3z9MtAkspc2HBH3AUEFCXOpd5Zn7TZPBeaS9EyG99Q1FJLyBM2oY9ErC8kI5zGBBdiQZ7RS1U2cUv9g2OaBRdXHH+147/eFm2pWP/puiqiGCoyF5U4M1NhkA0pIbKQX+laWnn8HKjePqg73vzo1FuukNV29a2WPoXifR0Wlx0s8YCep4uzeQZhI2lTh0Zoc9XZ/BvJkXtHH1eRsARuqJyqpmp3yFR7bYlF3E8+SBCE8Cggu53C2je/0VKihEyP/kpCk6Q7vnJxHay6sw0iJGSOhfraL4esYNowTYsgDBgnJArnKOFaHszGPaZROTxHaRXWp3Z+XR8aT/CFWiq4n15WjfLmCKl61Tq02/FSOolQthoSEuWH1t6/C+Ki+MG0k2MekgOZm0jfcTHaGS+N5Db6rV8EG/jN6Co6vjlsSbXrJLYRPlzvxazcu79DD0YlZuxtVq8PAvwC6SMgSnugqCr5fayU29SLp/gIM1VTFdXBj1pn/niLkIb0GmwllbwC0c88TDfyhTOrMQY5CjgZprxO8Sn3GoJqeYVK12/ZjnrSSl2VM99s67+DMdQVJRzOHE+Pi7vur4S5rd7kzgkxRUJVVKuTodfXSlrxHlgEP/e9uMtp8wkoZHnl7qK7oWttVxr37sDG5nOY3Ddcv1m0U/1sD/BHTfbrgEQXn9xgGe6jtU7ub6owej9pO2QHuvb1oz84K9hqcrzyzvMJjJFswX8DkYeJ/T07q0Gbjul2YpD2ZH6nCqsEEteZfvpBAuOHDAYTtn/TzyTMilj7JiEZNbiNsSi0qdeX1Tpfv/7MLtbHl8yV5Q+aBjuz7/qN35sZJtMWz2HaxnNUy7fBCzsRUUEoHSrfVIry3FRMBYlG51t9LrjgGebD37QDizTS2WCdKf3wlGXYuU/AWXJIKs3R77lXd7RiDzB8qfqbqLOav9kcJoti4o6KIIH+m+htdlQxhjPzKHaHBUrbWqDErioNZl5KMzhTgv3uFn95IZeSqT7EjxRnAp0WcdWUjYaWCPRtDKSBsBzH+U4sh6i+2/7inLVFLn3hvcD1GeKP9pfxR4EoJ4Xx41ycV+D4IxG1zaeSW9oZwuaUOMZj3Eou7RDysV9lQToIw27NkEcDkgBIGQHoeG6LwjhLrvLaiwK58zshPO3dKBZrpSQja8Fn/xaXdmA5Kv1pPFFcGd4CoYdWmQMcV7N3t8u3wqt3FQY3qAITLJRvkz1BY93Qvzi0yZrrz3a/Jz/2kZ+oiqK7o/O2PRqzs7DlFcY+4DHgp0b1ZWmnOrG/wz+43XbKkh0YjaV63c0wtx4+Cs7YdNVGU9emEIuS2j7/XX6pFWY2qLL7+QNEYoGEsu7wInoDVuMYaW5O386USmdSePlvUREARwh3HDaYyxMN1QqzlRgTSr8dHN7kZ9DxopwrPktHhy8BwKRKry0kP1x355tVEKRo4lonK5tyVwthbVwaE1zXf6nYuF1QP0UovFU+bJsUZR58U3NM9fUxIdsfGbHGHpgcfAr9TXNCgG9ghy3tP5P+/RuSWPL90zvviXt/M5RghQASwbC+9GjtGKVfILyBv/GJ9htjNsXrBqMPZar9eV78c3+LjqbqqEBPe6JcpqCbt6NZj9ayr5bzpKwDTa4Lw9t80frBoWgZVcEO0YELS7TgKFYst9n9dTTma7huJQEDammpLiR0WFOZ2zlaaMuaLsesI2YRysfHAgGVriwRSb+HH2tF8ULDXwKa+HtWfuPA/zRz0HiX+6tNppcvCJqfkuUA03sh4JuJPSUtS4Rb6aaR+DmhEX7p2f1il1m+zkfvJVA2c+2apCCFD9VCTRMfx+REK63Cxx5SlttOhlltjZKlO65DHV+VD3mt9v5Bvo93gmTZFqNqrH8W8mczlKpAkoZi2o8Pl/Xiz8sSqDNQk+ECtY0g7VSdrsY8BXxYfwMFMf2N4cu7tUJ+ZvPRboY20Yxp5Buit8QmHFFPzlLJqi/t6FvpmJB3ggQ+2Y6gPsAHBCyv8e9aFS4P9lr5rtMEuwiyOpTGq5xckbhpg6ZCKhuNBrx504teVYkmY/asdnqKxNYlQWKMka2XFS0MEg8jja0c4CIhPR4srpApbHfm5dYWhasGdzxa21a/AyF2oa/LSmy0j9aimhlIYrw2L0I1vJUmdnHOzA11HRDPjuZODglFgoyAK45kDLbPOQcEZPQrGPpqtmKl+ytB1Rt2VqN6VvWXIsBQa9qWNHpETSF8ddGF1mbKvKHp76KBuPCCL1cv/hOeGdttdl357lgUJbi7S3PUAn7hkHjoxNNjGNiN9c986NEsGfTv0XzBPga56lV9aLZp1xKtNVETQK7horYzZKBBgA80SLE1Uf/GKg1DB/zygFnKsb9rwvtP6hDjpqlTM4yrd/b+eZ7sRhNLKj+LAXplc7OPOI4eqVhQv3OA3X73ssas3X0QnkOvP0QwqYH6g5KGFifoaJzr+ZkC1kIBaQitbX9Y2z6gy34t0D599aXpyi/p6SeAPBqvjxtGH89GD/wvOzbMTkafkrKW+zOkD2zZmBpGjfoKYc7y+CIA4Vshh5cPoqlfYVDrxO4F6hqNDOreAONDrqVRHNXpyPgWnk+dlNVKfdDV5xsErl21qMrZX6F4naCPnR5Q64yaRdQNMK7guFUQ6ehvNfUxq217LWHxq+ECTABr9vBfwpG97WelOXswRiGma+iH0+BUNFbVmK1xoZE1HIddu8mDLZsETJbTGlU0bYVRYkbGFLi5DEhwYro8z/VtIw686cKstrMXSmZRp1DLNtczGfHV7xnFlX1RuxDltT8iHf3aKDaYafaBaUFX0eWJ/u4YV5ifvI4hYn8z5cqENgzqpZE4NuOx5GWOmiWux1Djnspk6Bwb/TcfgYBemouKVQmDrmb+YE7xSZJn80bgo5jHU+E2O0iVJLo6MUD2X6vp388w2DDLc3/FmZkCFoAcSJqHG/xX+f3uzlnh0MrKyqlRhYyGWfOaGeAI1EBkASXRmVyXbpzt7GSdAiNx1NZ8YOZvVhGAeu0+BDlJt4uOKvBkXwqF9+qWvSgX6g+RAQXCG9uR2rX81IuiTj7WX/eXVGSriJKltBLkhjQnS0zHnv16s+8CX5XVtX6yDnzE1p7scnj8w6WKrLH0a1DuK3MM1SGp5j34l4xNToOswcs0mk1ZvnuxxJgbgW4887VHK7mCO89bTVeJNUi6+wrOw81I3KdZ2f7Ae/8NOIFIfA4SXHJpYQonv2osPHk4kB019R0J19IbDe4u2iDMEgroK9D/AhoPKULBQSYzyyS5U2aKtZnT3qDECLv1wU0B/5e39q3du5bQHjTq5zm9ibx0xC+7sPm7u/gR1J+EMK/kP+geu6GDDSnoO9rV1gXbhuR7IIXnyFgYFbLbX6LDSjh7guABmMZ+0QJ/PpchDcC1r/7KO3lmFlkupAibqvrVPcb08kQaVVCYRSocvyFHgl4H+fi/7CfnJBsKxCeuw1HsjIgGd5vpEBMC694IvmkFzdWgnqBNaz+E0sg3Q4gZoR6lU4ul6mvSKLjgLbZp+VXsfbC/9j1WhILOUitVZHYpKlpWnW5f/JWpd6nK93voZgJ4jcyt+IfU+eLV+neqWyUrDdyRUbUBoRv70AMA8fICWLFCGLj3Q2tHlOy6GliF8bu17kQ1FtH20TmwxLy3pMo/lRPfBPy3kRy2Bc5jh5eZKUb0JYqo0TrhgD1y+s7x/tqHHqMobMuwvA0fIavfj7D+XOz3kdVU0Uo5B/XcSlAgl8Mt2MuUGf5c9s8gURZLJWyqmh355D62Jtr1+5Fid375EkyJ5JNXpRpeO/cjuforMThik/xc7XquXBWSV35vQiunn/uHNcivWTKsNpDbi9dDf2Pz36hSUYeJjd40xlLS+ZsFX/hXF5kB6hUFkM0dceTc4a6QsJmOdhoDgu6NoYyj1D+7COnjc480eR7j5WuCO+9SpvwEiIwei+S8k6fGh2unX/U17PYpxqcdiOVJC4eFNFJE6H42Io4wgNhL6cDn/sa1mIO3zULgdCslEN1XvIT0Ome//KkNK3xfDW4kjwcmQNBFNBvCyfUDkMO6++cScVgbnQWjvyJQSu/7/a3IthYj6mHBCzXnqz2WH8dL2gF6tM1DnHBNaAr1CAqkGY7l7vig5QbChIZ1feLY+EQISSYhdOAKQRV/Dr9L9vIziKBLEg1qKlckiekO1/bGYyzIhVTjl/XMlCpYeYqx4qRDnVMAKfz3V2MHSbAwqUbvuflra4urcVQnmCVAfv6rVl+S3UoiOr+SBhggnkTjuEdIeXUhACrLDA9AGWASfmldgGVGW4hNlvwKqEiEvgLTRHhdEFwNRdV79fPrE7sdt4KelyWxOXe9rEe8NWvVHetaDfaiixoLi1b5cRaUn9Bvrvh4/mQHb7xCEEC8TV4QvzK9Kn3pXbtNf2qrYcLDaNawfb1+cgKUECMauLnLiVbZETkEn9ez8uVy0YVs/GdUOkTBRvyH/LS/ZeSYl0WHEf/QE1Opa288VQl3waPz+4BUXUqnk+DU0LcLSATeKfwKWprJzDaCM/4D7G0qRrdKoX+Tl/lNVHLh7xZamJijIlWgFJnwiwqcCgEIZhThCYWZRB2xvKEPhWhYjgptHaDauv3y3aZnLQoanqAgdPzwY0wpJhbxEv1l5rqcNZnfwC5awPn7b4X+iUed9Lo+Ie0+2njMH0aF7aEFlKW1MsDIvlm5kDFkzbmKLuj1BYiBZ0AsYVa+Gxv6wAJpSVBrLvl4mvOL2ynqeNzpIL6rCvznhOq+Wglr2FZwhi8NqJVsGXgbRhR5GRIZ9xAZ30nXHQq0iLCm2V5jEqr5RHh2X3WlJPL0kR2/5UMM08LAO4NCurMkn2NBsnncAsZ88nLQGGCrCN3l2dYEFQ7YpuQPf4sOqLrbmfCTpKCd6FW7l9MB6RJ4uaDq06DuwotvRWs6suI15hIyZ23JzMKapprZhbrWJsgQXG/A/oYxhzQ38mA1CZgC9p1Idb0+JqfFQh6DKzwaw7VKFYfWQ/t5S/UFZGeZV7Mj436XyKMMGjvqCzRygqQgI+48RlMeO/8NW6+/y//+Te2O0Hiist4VhfDl5PMB6bHvmlMnzLPyK3GvOcS4r/uuW/fpqw3U5fF7YR3KEyfciRjJYF4BW+Fp/5K8sv55K5nlrtjPfJyS+Zd+oPCN1r+2NFro8nb6qvhhpKm0fx0i7y8TATkCx5i5WnnAI4kd3hRvSCMZkP/4o8YOTSu0t+HsC+yrcJrgsfOeo+jqanobvYMZ9610dtednq8RdPvTSNl82PYFWrLdTkS7vKBmAaPvXe/4ondOTc+HSBSqDSePhM2UJ2Hyr2nuUunXLfq/Yj5Em2KPuxk8KseBpcJ6hIOas/1f6w5OVZBPVNUH02DkFldZJiq1olD951AcW++u9HMsBdqZxkgk+Q00t1geryPyKO74IH2dSr/ju21x6Dm6HzzH9Uxb3vz+DuevLDNz8oSpN2aAIxxgefn5HTQrZ+A9ag25HD7Ol2aSN7SdiTkgSw4xS1CsNAXXjhc/CCRjP7BI9jOy1SFafpxYU7nP9ySvy+/5k3qvRa62qasFwygnXNwsn7htVjfMYDotiBmxc+z76hk/GiV0Sqi907hzfF2eJP3u01zQh4UDOk7vvsp5dXV9pV9HvzHBa9YWXKB7IaYEnssxAp2ZnKDTweBPajcRNi7fIIP6EEdmuDZgrgb5K1Ry/QNsW49bIgkTMBefV3SAE0NcF8Gnwf+4nFZkH0W36Pl92733KCny2/t8c5CUaqmTEw23BOakQCVJRZrtB0VhfxHzgJK7E2TF7k6ve8MhmjNGNruRyi14OOJGsTV0sq/xlQS7lPiX64KRmsH413cObIJFvtMoC+hG5mH4/QbL4+n6EJkeqpaRLn5rTeGwJaQz0R8SkFiilpYxtAC/OkwAjUfj4RmhBENZ5wSOwVR7Vl6922suu0wE3E2K6/Q3R1vzBjNk7gwP4Sxs1AGr1s75SZ1WeAXk74RBvifuvLmdn+CuNgLXpo7mM+J3+GlJuI5SIL6dVFNtidVCob/WRYwP1HhwDprgU/M1e4cUhYBH4p2pEDGnYNrfqXUWTEu05lF5fTITtRvpX5WB9BsyQ++//uYo6mdaJ+101/5Pue/5IoMAWZYR1e/z1t3zEmAjaHvuFIy/JRnfon9fpU9eaMfleJ2sVxdrAnru/uz+KJCuoqSOUiOBkmk2ZAURY0TXxqBvUyadGz01Ciaa0SIygKPH1ZEUTdtmn7X8HIsD836nwJBcFyPpguz5GOkCgsv+6wcKbkafTlbGXw3SZS1xQWNb0BPeoDGvfRpr3WC9/8tDdvMzelgcfT6fXro5gf6bMut7rhVsEUxaIIUhUVNgs1Vbzr38CNDaBGHNN15n0XW00Z32G9e6CXC4bDs2TNCh72zGKnJopErVwo/c7MYI/Nhz9+iJb3SSx8LsQJModuVEYW8n5At5Dl+BkdeXbLMAhb6jsK0iitQ9F8QipoJeM3bMm9CZMw8FJc947Z9ZrM1XzAgMMjwQgfWWQeB1xbUNyPqBjNd6IvlCmByvuSgJ/lBOHzbByhBX7j3vjskvoRU1KeRvmEL8gnV/QooIzQR7eX+bZ+sjzFZ8+LOAivEnX3YXtsk/nJmwukg4TqIzVnRfc9pzEzaoLxBmJOhlrww9PPM1OeVdl4ttevwIob6sPGetXZk/yPv+qp8IIwKYW0ST9NGaR3ROsqPNf0DoBkloB5wo/FRTQo4mUIrXRQdK6ECjRcqLW6nDRRA1CbTdZ98RNzT83TIzB2WkeqqbUHuexEWw9i7yzlTzlIjFMF608LQrHDconSxqCpd7FAf/Qe8ZnAOmBm70NUmcvZPu51bRLKM5qPzS3ktHxSLNynEJBgcFHCfksT9FeY+Ao3Z7Sv+wX2KLLiUx7dZKwr1yD3gTZHcr2abbQfzT2xR/5WWs4I0FwO0z4c+oP0uJRIMYLH3Sja0G3kdH+/3LWkP8EFTFg6ceELqeEmdx+jj58U2UwN+T3/Xx7Wh6HBl/A697JV0vGYFfVuqC2HjmTX/V3pFj9FF8ItzZv9JH8SilRkxy8PXn4t9wTjTI5RLx4DAw4wwQp/XEnWiBTPUbsH6HhJrMy0X0MqPAh7mFAEsZeCtje2XJ2Q5LaI+VyMG2kGakLDlpQwaXKSNpOMWQ42P6mCaW8uzn1qBg1x/oeObqnj42ZKNe/Yv7yl2Bn/+afptqtMN7zlDsnzNO913VZzI+/PR4H1l+kSrUTpT9Y6rocC0i+oF7W36N1Rz3myIae5ZLE7V0aw1ipLRhjg76Dooda/psyRpTsX9EQivGm1g20lKkdvTkZfHUXPZMJkYx9N9kU1XbQSWOCw2j+Nqi8A00n6OoDYTv4nywPsJnYQA9AbYGS7crKlE8jTRnJz6LPjqkF/aDZZTVVi2d2TnsBVCK5U7fq8SAim10ZbjmhgsUwwokNt+FJN0mU9dNcJEE62kN5JEMbDHhOCMio8htR1W9RuZlUIxW/e+XNQRC9+PJsAd1FTYeKt43Eaxb5vkGqMTJej2bSRCoyCKQLdMtfG3C60NS7RFGa3+XL/Ept6V2TiiPiGz0nWF0L2id/aUkRYpiDIvsA8pivpZoQ38LUM5aUmgNvAG/dnKb3PZS4QBrjYcty64XrNU2Zn9P5C945fxGQpZfct52PbxSC7RU/i/vR1WbhROJXwx3nDzEyAlPun/liOw7sShiNsigkJqaujLFiSz79ItyjW6knbDsc1/o+9rwriBwRAqUjdjR5WLuGy8VsG+F7jwmlhETIY4Q0wwS5lZNq/WQpCYRp36R43AVN5L7HVE3ut2MJd4oRfna4k8r/ssko3xPEap/wzhIp5cdZ5yUUdfhaZQ80gnc6MHcdaZwukMxF8oMVWmkesj371317jCeKRejoxL1QyAeIEU6WnwjN7UIRKXcu0RLC/MUUMrAiwmPZ4CHkWCqG/0daddKnFsnPsHSzlvLCG+Q5UGTGP161/Adw/gs1IzVJkETmWhmHq52RA0ucFG+PR0bAUPRLXQ2dNzaao+yNFR93PysRmZ7KK36ujdD17oS1H+X5v9JAF6ME9ptqxeeRNRu2sRzhDLKeS/whH6elAAR/uZMv2F8UJhxTiKoeyLiTzPCsLA5JWDhMFNfhxGCM2wwzdMbl2ZhGtJ+98quC5zzBWyJ46//kW/8kZK1lHkpq+NOoX9sfNTt4U1LJU/wHrVa1g+0w4922JRdXVRN9Hxv/EgbyxxZ4T618QHvsoYRxxjQ428fffaXAfeixoqypSZFx8d7ZxO+m1GMXQob4X4FUhvl7MhTjuLKVYD4dCEU9W7Dm4eV92tE8nOSOCSGJ1sL14R4X5v3Nq+QC659fWe+NVixgJn2Vfar2J9wkbH2s91UuGIot+4YR+u/ZlhoEZ7PwylU092Lc3cJQnZ9jj0+Cjt4PyxCEpYAxJu4qniNaFAmikNDOzzJyURHtThK5gkD0anH14sbom+x0nrtkbmXtB0+ibFJ9lMc3UIc04HBIhHR5wPPIh9CHvUfFBHd1ppwzjBQltHt9rcCncvNNvyXCokNN+jBc34EywM1ZP6NG9CT5TaktFEtP9DyevAOutc+MOy6zECsRYtlC6shwtZU5skkE5Y8598wj7box5ZypHYcCXE45lf/49IDYByW6I6Pl3RvCylIeVsd1bF2G1xDFWoN5RkUwWb1hLM9BfjhLJ8Py8FW32Eq8ejDEzHBJ5iExEcFM/gKqtjTDaLkhy7hK5nXp6h/kV9oYCpb3hl22maorXoBB9DnpB8EV+VlPfK/K+5CdzTZSOFZCX5t0fFufsiAqnKNSGxSsWqcpEm4bIm2F+u0tXNu3jHtUee49luA7B6gWSS4NAjaM2h7k+DOkZS4u/onGKfj+MBTacAgRxhGtK0rW8evf9s5Y88UhiO8rhpN+FxpFsjn5M/G2i56SciCM6Dfn3auwNa8SV9tuYkEOBYr5VVMSaa2C6y1NBUk41JJYlQi2pE8XLwqez8INhw4NqZHtIKyA0rVQpYa44/38qYaJJ0iqQ+B7/U3M9ov8U1G5WpLVILih+SDVH5dbZswXqMpS3/k2aD7hlCZRpbxk7MlLFZZz42Ke9S6H0Au3qI77m/z04WdBHVtrtuSLx08kTI0lyqcItWLcdeKI36/zjbwOOjQzsCnmBm8sU+wV+laO0NatZRkmsI5QxzF2DSlH7ieAVRtDvUfHFHYy7SqLEhHe9r89ZKFvMvtKF/8uYAAWh5oo6slFQ0f3h78MfZIw6R/P+rmMPPM+C0O7DZaA29rn77e4Vh3D2roXnHzm5VsstZbbzyR51rtLL6IYOpjAxLTGmdlFNZN+VSDE+eD7dWG7/lHTwS3m0Rmh/oYvn1c0vaQcrqEEEEtZpQR2xQOxGfipinoTLcU3ZqJUHTMRuVb159qKoYyQeG+1D/TsbE44oH6Z1I9wVzfDrAuOhZJ4TlECjddo/6b8R53KUumdNoYL2n6VnwLqCcVTZO5s/xXh4+fyMEkujD0UsEURaHPMnbNG/fIeLZgEgFlrY4kaQJQf32sEZ2rcAwkwZEjnp1QuA9lZvU7pyBpTeJN7aI0cNdg9dNtsWn0qSFyPC3kulXW9OfLs9P7I3ItTBe9/GmdqKJ73W0VZs5JycPwwEXdx/9EH3tO216rtDPS8OJe8VHqsjCuySVEphb9kJjprv3YLVu9QLRl8zAmjvBXLg4S6OwSDSwQwkH3/KWgnGWvsZ8qaqrB8aR9Kb4yrQDUeWx5Jsmn3LS2bp/p70o+kqaS02728Ut8bnOhgQLpByPfd0dollKKQjGpGs65tqqf2rNtM8HaruIvYumf0fxY9q8LX3Eut+FWRVSr2OtX3gkaYly8+Zxfs7ABgF6HlA9OnUkfqZwfhH1PLSrfN6bPKb7P0Je8YCnY3yAWKZ5ebVhvD73STTUUwCEzh8b1Zd0cwx2ZGGbd1q+XvKw3yq23oKAgiClsxv7jQpPCa0oyGTpI6rJ3HAWfRcyEbvkJHbwuJuiyjJeBNj0Vo6sPN7Lh945Z9NIjcH00L/V2gUCkJEpLyHa95nu3N41m3zc0uu3yT+ll4M44T0oZ+JIegqhfCVKZHp1UEsxQCvjM1G1RmAxrXaW02026Kg11riiuup5goNDnj+UMotVtzVZnXmMbMTGDtFBOzUM/bNCdvDGcTAJsEziLqdv3S6n9VlT5LgRUIZImHlaTb75H5xmC4KvIPgjOXtKhj6RCRZ9VzbHfPfp2slRJmvvM7PWwevAvg0mlqEfVNWie25UJJZ59f50X6bLj1Wr2TxQS0zgImkIJIW6nbZxc75Nv/t+4//z8i4nPXc0p2KBCa7U6/PTP80GYMebsP1q2jDauaU4rtjIjfwWN2BmUu/76SOAQiyDuJ/Gy6O9woEFhAbpZ8Qo7iGtLOao+P6/++Cja0PdDPaXSKGMnX39sDWS9JiGzDai16A2vbebm/7pYdlta/Hb4a8N8zgbM3qcpe/wRvzodExjt6mScn1rYOCo1dFEYuzd4xMax5WLfyC0a7dolu7j4/OWNb5MYhevj0ah4Z4b9tbjiD21LHOPRv37M9CtKT/EttKAtL7D5ss8kkwraWxSZ4NpfdDGwFQ5mPcH/5cAMAoHZDw4St9Rn6OyuGGF1WEQZZlowq+qv94N5URz0XRCmiCdLgSaDwrJtLz0eNCMSs/vChHvG9gnoStMU0TMaXgjNz1qsqc882cskOpHrGKrB3coxrL6YyIX8xvHOgoM2bVlANKnUhyZQ0XpX1hXsfKHVZLYGqUpTpMHpM2MJ47rsruysD7inUP4tlsDX5mTil+0IsXzOS68KLy8WQhbZj7937Vmxy+u+MgZnI9g9wS32Nkuh1kqZtEjImYlPolS7xYMP8wfXLUIUN8onhcQfKAaA56Ce4K7DyQiIXvCTUlfUeq6LmB1m2fqpnPuVoFaajKL6kvTLMuzLzWOaZwd1X4QUMK9ss04s/jRnV8kszag6wQ7bP3v34VYsv+5TqmJ9JCumRPbM6SA/GI8gTGg7OX6cyPVeOb71vp0PkqP69uOniTUllmCLSOF9kdw3cE2Pr4TWwitt45dEp9Ye+LH2N3FUph1sv0z9z4vbH4ecDA8zM/MbR4rqeM4aJ7SJ/VujhtTndc2z/gbLC/pE2Hdfw9iaReaBWMcr+zJkx2tF+UgoWFNyBiqNvIg3RlnRYxNda7eoL5Mrez84kCbiVziY8rg0pvzlyS9893wlHUzTIutvJ3GgcNX98kER/JFFG2aGoe/fzkdwuY88QP6Yj3fi93N+CgNe2FyDfBeLWmr116fZOm09wRNoOj6bwgBcnMZ3xGKtqBL424XlUTMC86FO2nbWEMeHAgVLbvvvPRri19EBcpD8+aOBOUl4+VW1BlMNFvW5X5FnvGYm+O3WVIV/T2YQ2l0fsS5oLuteksxjqC7UG3uYPjsvk9RlcPsW/KtvNf59weSzNiOak1w/MHeW7C8+KGVaaXjeafZ2FlzWfRfd7XB/KXKNUdIwKPEEbr5/M2jk+Wyb7vebch5fgm53XKdtf0Iw6W68YqT98ezYsg1gLguNyLaanwr7D8ti1B7rsZV+o76vSLpiJXedFwJyPCPoPyn8ZVu66GaQdvKnEb21DZIY03eHcFviMYcwbMC+FFoliDVxZ0Uk1SqbsRD3mxQJt/zcsZNzo7vqKt4HzT60hjd2ZL2fDTD5X6WV/CkoYCUWrS3HtPLbSDgvP8ujL2G1HFw8JbXTNreLpuqlA+pvVcDsjHGkqyyK4LwMja+vM0K2x+Qso2AtcIMc9ZHsn+s0NUi13amDolaMUNTLst9/JOUKNHxemI4PA/Sq+bkfkBfCxivsMX7wQCKBSXziEDHtjdV1+aFAccrSG4FG8irPXI+L7+ZVbUEsNmp8IZFYWft/t9qtqriiPPCwHxghunVix5A/jTvDxGQl4iuXxEtigbzGut/fxQkJAGuqUL1HdrSQFVJEFCnRlM5WCE1444a1uksJQX9DUcOvFYseEqYhr+m1Py00XxkVQ9UWN3u2/o9ILFMu+yCqh7Rmdlaqxb9cnBArRGnz9UHHER/O82D6qF6TwxWq2/Ef1EUzYx3VVSkQbKxMGyCqgMheJQxuW30LHPfX+ohxC09OgUIsS+edj/iNvJgKnLTS3epPLWmV2H0prYHd2H3p4TrIGU2p0qmlPrYokSRoZQ9o6RtOlhX7XZmYxZBLlgd7oiWZ+qtPCfaKw+QmFiH1o7INxTaV+BAQ2M1NG1JCVQyLpO/RnGyLuNFOyW+o687x97cmhMDzSiwebyh60/nMEncArSknsBfItSzgRFmlCKF0L6+tCvXjqbD86emEs55mum5kbw7K/1Ijoj0CXSc2t6UmjKIQitto0HdWTFGVw68m68cwRJ0/LUUo/5lFJY0/8aXRj2cb/dOvrJnVXGw5IKGv8DoH5Se484l5EnmlIaTOOaU3EGJJWypJro+MvZ1uum9ufD/ONeJdyDnZ5kfjYuinp4+kTczviRqZy/hA8jGvgJsFVtD5TqJnBbeTLxoNDUiV1EtvZlrS14UO7wiXx/ap0I9aClp7d5fftn0i6D2QC3NeRN75ykHIo4cQIWDbCjBbCDVmkluXML2QdbTnkqnmLAbgVpmpNJAe8j/6i63uP2Mfz9+DjpUOd5Ivk/IdnyTm+pAMqPg0FT/62gnZmCb+jcmgI85ltPtqoBMDYDwPsVN184bbSjy1AcvCBDYJBHIUNS4mosts9P6PXVjtax+EU1SGooSUFyJVhDrOk9TMS/MqLyp9MGaQLyYxku+86k8IzLzrdS20bnlOxg0YAboh0b+nLpLeHeKYZTvYdvpkhfVu8e7pDC6MOVRBv7NflA+ZPqYGQdexsfXy7zx76cKtJVFEg7h/v//zdB3LjiM58Gv2Tm+O9N6TornRG0n0/uuXpde7ETMTE2q1JFahgMwECrC/K/z877FMs5n47PKVVtX7eqdmp8ZEk7K9rfyXjzNhEYB/cz44WZQaZ7vbDQWc6yGEVEPDyI+riD1oxEQKm4036iSy78RheTAHw8r2O7KqebsxQZD2AH6Ut/EwJlEAKD/fgzPRv4Trf/5WCZVZPWn6yG2ypKIWfaFZBzRAZjM4u8bzpmBHdmA+Uaj57hCg8Vyzcj746HA83DKVeX1whMxneTlp1r2ZIAaQLFI6/V5pMmrWUoFsg5VX/aT5pYi82Vc7/RpeEQTn9UccIxlZhHDBRSylo66+0xSYXi26j8sGbK8zNFhEffryvSNs+l8tGdnOdEOh5sGAq2rS9dIttzaY4csxEDg9bPEL85pyW1xeS1XGS7xmRxhWFdki62RHCCFC5m2stTo3ay0xQMQwis8qVgwjcF6Q7OhZA2i8Yl0qoetAK5xDBfl9BVC9nc4SNIpHWyqB11TjfouYa4SGPMmcU6h2jttDoDrLE9MwFowgeqn8O9y/R6BQ1hCjVGg5vvYcMzvEtcIJSEvRAU9hd82EwwWJ3ThbQLQVIeQJ0QZj4q/RdIk4VBWuulhIf7+UTtEC1e4/y9sQ2XeDvJYJv/oqwDzOgPuT7u443F9wt0rxm06/HZp+vu1UoUqQKo1xOCWnbDgNJbhYO9D6PRhAK840G0J5/gsIreo9cWP4jb2hNyp84zJRnXrCcuTgXswtU/SfCalUzR3JkaKXZD1hI+h4aCwlmgfiWYkyfB1Zi6Fi6yhdfSTzXvLmI6eFwThDkZo4kPR6aJ1nrHAco7uFhag833rTmA/SaCD1oVjBLFP+sa1V1sHMupbWlHB4m1GwGiqmzELKvRTZNk3CdfTvrF11pzOOmAcn1+DserNo4nAUibdVPxLrDoFPxxHMXuWlX2/uu3vvfufC7+s4OlBuMEF/mXzwz0f1bNwD21NZJ43/+kIhtelFI17n1+PEaCdMWvM9qMZoDo9b8D5ZKOD5B+aU+WIVKSmirins51+5H6r5ppmkRZcXm2KS1Z5f4dkouDBi8pWViRcWds2DYjjSqqvxbwRsCHNwt/jmsKmcvF0QbNYvmd4KaQjBId7lo+UYJmYjNJvR70iNIzT+a6EnxstJ8N6i0yMPcsq2emKYsSjh965hSDiusTc4oQcy/YkWEXhq93YwJlKPBelyjuEYIsIeRPmnvi1jVTXWdQXjYOQ407oq+JKUxmkHxsgMQt+KYU5p/6UjTSljg4GJQwYR9lv9RhsPGFI1ebU2yxMMmFpEPL2IxIcvLF/ehAgxzYFJxTR7Suo3H3GQu/sNO6j7hctIh0JyWM2+UNZ+0PCByezB3WqD8sPdVfbCmLF0DqC6JK3HTC5WIwyoT2f4o3Z9GmH/UU3XKdUXYjaCk5R0/hlpNUzfAZjS8NXuT9vdDgmEoiKEFoqqFGlM2Obok8/dUJ9fN46Ugr/N6z3S4u2JEqzoqQFQU/4SzK60oVHwx3kAFXl+B7UuhVSllq/aOF7GrHtR/qaMsKO/D7lCiwctFFxMt0sHozeEkGJ7sDqf8ritY5T90I86APs4aVyrMPa+nZxs7wN8MUlX7qAK3VuYQ3Edw24L6Nl9kt4DhgNdEDLdqljonmWHdRacGrTdptWT+sg99WM3JrgpArPeezDsvaJekzZMLlc74cwl3e2/4ESIw8QNP6Ko/wY7mm79u1x5craNQuWO3phUdTTjDTYhgSPjtKroKAX6Uhx4g6s7Gl+nqrzzl/zEAe2FrU6zTSWAsAQD3m8VZ3OLKJiIw75Zx840voxwWLxgIYg1Kwm/NBaGUaP6hq/KM6m85fUsu/1aYJsbUFJ9Z7DTo/cCtfFWOKSH8sw26WfsLK9X2o6Au0TSIjmfz9y3c4W+GcV5EBrBPlhUufXpZAW1qHmubt029BQmQ82oyZyC4YlmqhyEF5k64Bfr29eXNCDQu+nDQAj92FaC3sF9Zf9BUm+xnDmGE//9rn1oTTC/GOmFxPGNXOU1x2ayTCmd3KbCsvvShwzk+VFPzLQ1KYAUdgDVlNL6D7KUSGYd5jG++1a0tjsDcZjVmPa4u8+JqcBR8tSOMZx776Jz5/S5fc5irzZ7Lc1I77CUHOXQbLmbz/i4wUCFbBtJRq93GRxLc9o4Npn2/Oz72/4sTaW7XAzU8qiW/pw5G+EHgTqTfOBxLXuja4GfRqCvl6YFOIiI0dq98zQ/oWLJ8Yo8iTYEnIXVf4cjeTuY5By15KwuGWdp/FaTqaHFxz2KagjGd9acWmUt/O0blAxjnndyBOTP1bqj3oPAMldHrnjYbF0jFwk558TIQVCEXXSvf2ytNyXqcJWEymBY3WxxDwtcL7yOF6BfN4rQnqexxKG4Tdqhjaek8S5vgctUMKCrSx5813N4Sbg383CT6+gu593OXOp7vu+7hqNEkm9FpCMlW1qU5pJe9Befz5OAPOAAL8n97i3ycp3avldTLW6PxcZFhtFNziPxXuRD5sksIfNiehdpuyEpjAfsm7aiso9ejGUjQ7RIC1RtpwLWveooIXRS9N66wUoN9zEtEF7KFqx0V+xpzTqllqhR87LzOXzQPVBg7+8bINdcJw/jnSeCSRXGkVfQS7Q7EnkXZT3wzBwXvm1PGZsqfIbn5i7fX/bPzKosPl36S93aDPJrEMz+m22Z7vuxkGsSbl4yVV9ZM1hw0tPqlnWc5kD7PsXplCtk6kGc3flDAC0JEeSyn8P62CL26gqfOCtQe3BscKufDscqjrKtQJB5yetDtpyr17xec/3m0NkWCho9lpMqJg+Xl85v5kD3pnUb5hZyYA9E0XdmIhs2FyihVMe+xNRsr7yU/PUKAhznWihZRjg4yLaMR6K/70JqozJHmsyK59tAPpP7whBFK6AWAJlF4tyP5UPyBIX+FMXz5NHfKbp3EAFCarWXvE9ukSMQmrK/PBd8OlPl2iQPkIc6MGT8YTTBezvmHP8QOpEmFKlSFksmoZgwGwTlksgwrwymuiHCZcgsLes4UD0G5yi2/fsijWC2AO9nzTgaLYAnZIGgCQm72UMdQIsnLuAYVsP6v0twPerjdczgF6kSaMA5liJ+9Wex7eo3cKe2jIksdegb91VUF+mf/iqGNHVciFN/dc5k41sZlTcGHK/d55Jd7Uo8umL3b6pgDOSbPlDBuEKMgmbpI8S+l1G6WdmKJP8GgBrBx1y3AbcmLC+Rl3SSIrpLDY0HVO5M+ZIU+qQLUEUINUgmC8JrgF+huNlhiBYtuZ2a2MSD8r4uDcvJxtXLdQfytWnsMvSDEWDmQ/c8lxUnAOWXZ2/bBWV0VBnl2nHBH3Ewy+snPN7Bp4wbu3hQvAJDr8M62KX4eqynBjvhuSpK/2DaNAwPkAqGSBoEhzl7Y4hP8/bAx4vCGNwFjpZw0HQpL43jEy6O48bNh4thWtiPkrA7Hweim2X+1c/GCb9uUpLIwf3pUIjzALeCvoeWqlkELx3dfNCHD88dOMJuSvkIUi/r/nI1SB0hBpLcRp1iOfuEmOFvC1q8hW++0R7YlN/l4sfu85PSZhQz1GKGR7jJqA9zcOAPy4rnyzCTGS7PcKGZfxP/vvl9YoLByjbSd5RBPYAZhL1gawQtxkEB52+oBFjcZqxWXagcUegGv6a+DPxT0Gy7BUGFko+L9UxWiFPn1/LD057nB/mhvAagiXri2ZFTZxq1N1dVnvqbdW/tN8S1dLj8OnnEoshm33fFWfn+eliPtSz7PN5z+Sk8Ytiwj2boO3cBUUxhLRTNlAw+6dURRZdvUAJ3JxQ2Brd/7K25v9DYYBS9OHjPo5ETuPpHlDDlzbqHwJ5RnY1oY9v1e3K39TlMZ0+IFwkLa7CF1K4N33uZPDgxl4QX32/ZabXUwsoxo8y6BSrieiHTt1khvr0yneu06PMqaish/d4obQp5rEeHN09ktBi0u9Tt3Kb5NAJoVE/dkjGghd8/hWoRw2p/4DTSk+8GTeQYM2GhS3LxWb8mhbx/Db/doflQMZovG4t81xK8lBFA7/Lxc/KLe9WhvbYze6hMg0//0ttsMXbRUtkRo7CLUdHUJKc3g3H3HekYq0EZTtEs1tV5Vda6Z30pSahmoIM7ssI6j78zqvJGEXx8cNB0zowZndM/RkC0v9L+X1hXOKNT46kELAIuj99g9yjKi0NHUZmLetEZBJhCOkOdzIcFjwdR/X3IVDXGO1rOF09lcTWJ/APY0q4x0gKaJ9iGfDhmLXhWWK49l5D/jeA+qHmxA2yu4oePgFVtP8NsP/iVY7BcABDC0hsA9uVlgaDji6HBC+U+HrZ4BDVfgWil7mV6ExXeh4qDXlZirny7QW6z3zUv5mwiyBjbKl9+43bU9PlCGUfBRaYMOEVDkZyoSdWqJAiqPye5sQOk0mH84l7Lwjvb4xqRfX8tw4/23R/ysy4T9+7gnLMYxs6MKhbqTxhB0I54FwgYdi3jvOvjH2MsL7cZ9uPPn1YbBO6+QS8irdMo49eXP5VFenCJWt57wXKYZzrm/a5bxfH9gr7fxgOa8xz7GUyYM6xF9ge9oOXxqeUqZljl+Mp2a5MXTyIP3w5fFoBbAywSeAirVQepNMGYrCUPGvhmFEp9wbF4UBffaSDIU2n9uJ3FfC/BS65R9ZDdx9NjGrzvLCyq/tthKBEOffp9TxdZHk5VWnyFyWxchBsizoZbPAyxWgPJP3fXWvew+Sk93gtmv2YzaOfDNNoURXVNd1tE91CjYQNLF5oieU75eD9sUyrkxIg7YETAznuqOG8/cVxD3IwgZu3n0SxI403gu4vvzHXwbGR+NRO0fEKLshW4sZARmY5C9UnSj/VrbuBj8trt0vhBMJh/Cc3nux/gpJI3c2MvtXo3XG7B+/aBqy4rocpFusXr37d7YedLlGL/zpwH4wWlfexZ0qB0TPj4TZ53pgO9UVOm9wa+CJ0H56sT2Peiqr+ksvjW5cShn12uyMxK3929zNyXGPtXXxLNnijDUDeWychm23Ip5vfo5eTnB18U53mbMRtnSDwH1qYpD+cidRExxjHYt7uCOYbicINCGrQ+/CWN3PIS2VePEU1VTWtiJAU8HCFzYcDJ2/ubH3PUNLqkZjl9L+q/QV/s7W3HA0TgU2MV/7vQ0XEIq+4jgK/+msi2ohd8z5PFoGS3kFfjOo1uPtjKGu5/IvR7oKMvCHRgT3I9UF2CxhYb+wa7FXsttp3wAzPUCJJrL+lG3Hr/Zhr0Jzji3SYNmZlN8uUYSpKTofBO9lGR7HkRv6Ht6uHGmlZkHS2o7BSRfntbGvZwpVZd6XDF7Qvmkl+6dKpSfLeNnGKl6FhnGToes3pJyNA0ijZWd4x/GkqwLTF86A/EPVaUxAjy1ui1z6Pugd/WaW1SlSwUqfta1G90hgIyUgbfACwhQivZuGzWRR0xa046DOnEqeL6X8Vl4pbQe1A8Q23AASdkIqXwdq3iYcahIbZYzonrrwHh1LPth7UqzayGxw0HXC2yIBNt9cP+hXekPJjDWPX3N6B7llUFFailxsDwETlVhX10doJ8gWpFrv/Upy4VSRvzme7C7sAkSdq679vnTC9u1S+OHbpJExhYIbBfNPKLUe6cO5rJu8+Z9e8He8k0Tuk0NB52nJD9ffFZPJfx56FoyFsFpQ0QRMInFoAv5prZeKjhDhwmJf/Kss7opdwo6wC/MhIsKHDTysn2aLN/2bHe+7Nf0z8JZNSoVextTnPilUHdo7y/Wv4s/pjSHIbMu/yK0w1TGjf97nN72clEn10FanrWb3JnDW2ZCwRm3IpQugVna9zhZ6s8vFKEGjUxJsgSUIcSiFrvVAFdeJjEHJT/APCM95nkE4ALny/T1xnP0aWk+7JFq+fxvAaZ+4vqIBtUq3cmZkIiTTWEkgRdiJdVh3XWOT1ICn+KsYWh0VNvdnpYikpba3p2tPqRKq8Fd84g59dYLCPWE1lGJPOn6/04o6PA4Zlss7wJYsoBjJZne1ziofPvLAr9ZZk2357N7L+UlnFWqkKzakWTJWVYN994X28ztZuBdCTPrNagdvfhKQMyTvlF5X/m8JuziVHF47VdGufK5vimUppn8/nrQA20hDiVR6ImK5M4vNsFPWwnwUaVhczWXRf7hA+Gd9o5GXXcf7+MGR8UmkevLXk3+oLapyJJyzsg9W9S+12FBpaHBZZTcYyvthHayDQhbPPgtYbsNM0XDeYnhHfnBlw6SRZUdRI0Rf2G/wStO0hJ9ILeQqTeTFO1l9LlOP+w9blANtht5DB26BFkf3L+AUfF9wG+XYA5Mdwdtf62KrvnRClpOoaUiTyRJLKElg6Y4rGgajg+lCtTbs0fs+s3YJnQ84LRg/f0kO90dn8tiYzyQRlfASUUji/i1Pyu5MOviuF9rh0kqxBaXUzwYAR4ycj8Ft2J+Qa1d2r73kkv0rEiGKMeKy8npN4ny8HL8HFz7ieBd1FW4+Zt/M4pT5NzPa2ECP0RMFyVAqaU+i73nYPhOA2bC4nuXCoWDb7WFD1LrEsyxjw9Hrevfs/f4NQduIsqQSHrcoUSe5/mswTITFI6koUhwUeU0TgYKC5gBUA0xJrNCj4Gw3VEM1ni6M8KSFPjUN0tmOoUUrrV1vocUNLeW+dYg/J30BMMMM0TKpHjq9E5FD67TnzuVkgXn4mbITePO/ocaBRllE2iu3Jq6rm9ciFowlt7KL3xsMhj6Bp4H3uoUdX6TI4Y12/j1aBFGHAeBUutqseoLlGsP8Krmo4h/foQYY0P8Ad63L4GVzmdH3A1+J6H7JrERyI7v56gT82i13JSYt2Scs3cHDEd0JF1dqCr9BOlqaqzjAb5BjP1cA9izl/Pnj4EFs+6anIZeDJEK1UMCbiracvE9MkUisJGSmlEXfdeTMsEIE80sdpri26V9m/xPi2fJXAOyBUUWN89VHPZHn49hb6uAyAB8nkXNCqbD5Ee9OezWXDZVNSjN9Mw4m9WMhGS/ONpH27mQSgBGFb+eLfy7wwPGVWAVGflblw00xvOU4T5Nq138ELe7u6bZSVing2XYMIkWxIfa00k7IC/kI6rQl459EIBLX/xKgtLf/I6Sz4AeM3FiXlC80JcTQHUYeuhs2RLldYkwoPzBsTnpO0QYEJ0t7YNlVE0zh2qztHG4XS442O0SxkSJX9EwEX4vDQbEBZYH3mcDH1mcpnS0TUPEDvfogZijm2ZtLfDcMM1lB8vOQTfI8harGGCiyvoyFJ8nDDoPtOaxhrCG8nj6sUlTyw0xKFiT24DNo9fWE1f2bvXy6Hqx1uc0myjdEvt7Dl5eH6Iv4BP7bYUE15yyQmLoF/SfKwEb8e22xa/xCApR36wne8NPAEcnUhcg/QVOI48hEQP77Jn3/EVAT5uKZvfUQKTqN40/BG9VEo3AoZlvBH4WpOcSLJBQ/d6XaMI4TR6gJc58ZMGd8S8Db67RPAKb36WlpjJBWnvnT9A8Zi4syUadAWaDedV9FNglYTkvnCKssSffpeh/S1HnW3BOIfloxTXTcdud7+jQ4q83taoRcmAH5JxJB/V/4DYOn91yl75IE4s4UGqAzEMJSSFtpQu/f18ke8xRHXmgHF5VvCvSJr9yjyaLpp0q/3kTqfjEUVflhXAZqNW9pjhja9POL7jNWOR7LDqSvJiFZ0q6OVo7pvYFUx0fi0M32mfZymwZHwqbFRoJ3uIjphTHJFtMueQnNmRV5B6WAiEdUaV6liCGx0jZeQANRI6W8ewku5etdxl1M6Wdkt6bNaLQRX8BRHki2xrwNtAb17aHwI89XbNkFxnyeeA9iuEx5n31T5IbPsNEgMHKMBC7RjvA2eu/WpDco+w6cJgw1LenPW4Em/Otuvzpuu8Jcsh/h7wx1EawyvZGd/w8RNbFGl1oT5SBDRFh2h9AUAY3vUTMJG9BEDE2ivVNJiKwvgU60vOcN88EG43n7jnsoyAkoQuiILbY9879JEmfcirjYX87QB4a1xxhCIOBRQn/sfmz468JNOQO+V+ix+fPNqIjhQCxoGYR59/1sqiPTrwfENMPlQ7q0l0sGk+7BQgjw6/Bf7v41eH5CbbulP8tpJO7ooUIbkVoETCe4eakT/meWC0eZti+gV6jwfBwDZkkv7dWJjhEtjjPHs7r+4otMpcH/plRA2ESgaFNADnsnxYUrzdG9Se0cW7gjCXn1nGJz34gDkwLVQssQn7rFWLCzj+uXjFlX8+zxwIdyzfu9FDgv27X3qpv8a1H75VSOuAPqffKxcE8+4Q88jrO3qapHKpmcYflWH2tZjvgoHWyAKtstiynGJwj6eCO843eqkBWhLMh8KRiaNsjmW1vrpMWAuIOKCHzix9t1dOmH+oHYsnC4olrRoBbdY4ko1Ulu2Creo6kdwGuKGTvv4AF5OGOOg97AlMoypzgz/7vbGf4zuSd8N9UQk0HRcR/vun34fQt9BZ8Y7DBzuY2Bz2m4SNIINO8msoS4T3RK408UFVthdMuWxxRmPvhSc9rrGTwPpCkzrMGnyHJErrq2GmJgW3uBaz911CAJ6z0+KklnQr52woXAbY/Od7SJzV7mSfdDCGCdZ+LA+aTR1FNePBIHaE+lfNJtnwD5xLkiVnDCph1b7zaEZpW58EyoNbvtStpKx/4wVn/QWo/fWr7o5Q2YiqHbxYahzPB6EzYfMvM87HD2FugrkLePJ35Rm+bWeYjd4nq9HcofM3FOVw2D8sxE5iNIXnEULm9nE/NqgMYplf+4t7puUYYQNxmpRLgSS3F9Yy2aZpfPDYb+DbV1Insmmsid4+H/nTBeRuE0R0Bq8zbIZWsT9lhGvcbAwgKI5plBykHgb6TA1v7YPK+DmJ+cvhwN5i1LFjF3Db1aBS8BeDVAMitFiBhd/QWTip3o4ayeIbQw3E/xRRd9JjC20CwbCNmxeTaAZ8Up3J7EmnkekMKoBQvtzuCNcrq8RX8EXfXz5QE05d0o3qj+enkb82nSY3EvgTE7d9s0gwWm47IilPy68JeptKfHstuI7pSSLIjdf4Jih9MiaIxJdCVfoONZUMFMR813y3KhiTm+YBZwRYxdrQ2VhgBpk899+oNIr43jXHgF3MQuJtibwQA25kvWzMCBzVMinbXwJhFufiem3bV5R72+obRAgoC8xlFh2xiDw7uJRQWYHB9Vf1iTKx62cUuCjOqRAYZkGt92+kvaWEEYoioDsGQ0ENdEowogNn1aOv0op3kTAIpP9aJ9UJs0QE13JrRBmX1WtuYqaDaS/CEKWoxBiL1wf7dbxJNv9ald3gP11C0hT3rx3JjBSwl+SxAcAP69RFWWhcOSztS2+XM7n5mW7EgjK9xdmm+22bUS8TyrA/9sev2xa9I+I1Bq/S8SdNgvbXYMVghsQwRLbqJ1BrpTgZ9UNs/qZuMX8/A0eBbtrcYFGLIaIfyMRs7aZgecuacmzF38qss4c6dgzwtG5CFTmajJqb5atGhT6JMoeEG9vDmHN79Dr8WbIz9qf14c93Ar9vy5YVuEtPRWk919xT5KplbBvNzGUwwenmh0VVjBvSq6CgYc7VikcmcvkOPhc8iq7V77ZcvETN6PVQ1L9NCkcK63OLsxYfpXeKCmdtSEdM3vt7HmBd8ZSePiujwp1G0mVgKo98vntxP54lYCwLBzDqXLDcCme09gacLU8CXSumrxvtQf1BdfRyX69anolua0rDIcs+mdXu72pfZ2XUmi84RqW9vWNE++VhxzClDTkR7iJg+vMbKbN6BndXxa6uE4yL0hEON0bL8j1yUF66XZIDMetApC739JXKhGHp16Hepq7s55ZkgRRHLyEKVXbB//o7vVC5JCJ7R1+rye/2mJxDc8rukhKuCsKIccMPWObMS3Mr1HAyFwIJFvadH1SScMHlM07sZk0efdo1WW9keDwsga/bSe9lwZSbRWsMqh6Qt+W3w5UqNCuqosuRMW6GJXM1C5KfeA58XJ4jGGRWwfdm0sNTC4LB4u87wScXn81dU5/wm3mgrmfbabJp01baf0VLUU871VhmU9ZJ876JAdCFx59UIxxmN7Jm7xjuYFjct/KDQadOr59hQpzKPHs8Ft2pOVd6sXKBs2koDo9Qc7OoW7e0APyJ7/zb812JsCjQ3YR3N3Je5KpIGExEm4Dl2HnwbXkK5ix3cCmY5F5n5SC1+c//6998VxueidFKh6sChsS0AYezLqtqX5FT3gg5IiWeWYlbrJeBqzUtpEje4itRkT+Cyk3erxf2pK2sGVc7zVR37WJUe/pSZSRt3huBTKLUZ55BcQSLpWBurGass+IA+s/OisWjMM68jPPbLAZiehFsPrF/jRkqtnLwlIfKN0fgpOcWmQqo8wLMx/ZDmb1xTD8PCyFSJyChcDxliEVlLOCRn95H/1g4wSNWxU6l6TxMn9TM7SKZsnh+YmNgxQVun8mAoFWOfUluwi6kN8wAG/D4TWLJ7+LqbwKoWIBhuuw5LEUtMLUyE16go3ZN8lBM/doMAnmV+Amyd/69JNXoEPQ59NIwdP09Z128lsewhUe9pLJV8lzPe1rD2CzBtC+rfDBHI7TOPKclws8PHl7jq8gw2ZWNU7s4/aitA0blAgr9/QOQCVWN1CYn+QkJ1+DMgRiRXrHVhnEokv1eVCIpRs5JpPQ1Dx+NTIcpwu3D1EBAYgWiSgk+gpbB49Jztsi4OEnDrFyjadU4Cu/wMDr9OFYYBwOx2YCpOZcANNsJ9JHbdLAw3XHMXUT5H+BS55P4FKV7ihLKxTAnd9Tk7G9KD787/k5EmsTpeT1wGJI9zh0W+F4oi0yjN/7QGvF7/LmyX78RT/gqcfumv8L3l7v0YqxOZ/uQWfEhBTY75Wmbo1zeifoS1BXlFOd5olhum5oTiO44wZDhmtlLtKwKBMHDKjucYKT1yJTEm+H8woQVtJcVBaqXfm1V+rsVxvNGB2eMs6yUB8o2aQT7zRn9jU6qugumKQnNdl0rH3wBRIDvPtPIB38fUm/P9QxOGgrgWqvD5PT+fi1Y2Ov8XQ8aTEpAb63IF+9h3jpUdDdnxDIohtaGPNN7t5puULlNbNmkOPtis1v2P1dbVvvBqvrU2BVIwQ/3e/woLd9WRb+bUWD+YutzMClIldQqir8t3XwBtg7t3fTR/gAbZkUxmf4GsvQTtF5eUbQa6HQkruB0peW7Ou+HlFCcy93Kg2v57fEnQBMA4/iAr/p+ZMTzP0fzCWKHVPQBtR2G2XhpyRtVFpljyLI3E6LFLVpGZcDnB48+w8RtKjgV5IhCEfWb7leOtIPCQvbQ8nsT5/sL+9WY187y0ZMuNtTBNLnqE+PCXBPxlXeq/zY3v5sL4p/6WrLTE6+7yV4eJMj9gdL4pil0YDeaN07smOz0fsG8EeCiBrukCe/bsTTZxQPlxzNZh+eEdhipyiLu7HJ7sd/OyVrz9PmUo+QukoU1HMIIqvRGcRgM2lOOJiaTGjLkdJf4DKWYf/WypgazwVeAzGGY/V8I7NWPomKiVZX8P8RcAPCkNNrmGq7rfvWtN6yHq7z8njIvZvpgAsd4e2QuVyzQFEWw3EuJImNIl/l1ltWJUb0z975hHlLre3S1zx8tkF0LqCQvaMwJXzHObXAxifY/POjp6dP3OMIal8hTtQYfq2j8kLzeXxANAMj8S5N85G5YP0NrG0LPtu8vAGC4jx7+HYEtD5r+BD70+Djf0AB/hRM2wAV85mAs1HqwOCrHdNuTB/HLYnY7SESDsL1L0wu++vJs8DoogJ86WKmZygDkSLrrl3HCDvBZI519bn9KYO71OHKyqt9/61qGQBYFoEIsD8L0fRgKsAR2pqm1PYu/AgZOIxBJb06AAUF1JdfbDcV1XglhUQqI21saU/RAfBLz/Br7Koxo3AEcLcodCZJBtHC9LQRPf/3HKFw4ea5rHqs314xsffiya/hdf+Io3e/4nWC5dKQ3cANDZy0kxJFfX4ElABBFHGf3A3KK0qH1MAPCwxM4E5MZwdcwIA1W//KA1EC+k/Dl7auB4vJF25bdhzMLGb4xTHE2b0TpmINopuNj8CCNbmtdoOw8h80PtxlcVgWoXDo7FoNILH0Vn+ehG4pY5ktYkvxWhQhHorprDXPUIQCQ/HnZvbLIeiC0PBEfbJCs8q9IsILg2OEnivx6QPbffvHStj+qmuKIn04l/qbKH2KDk9kcUSId7lTd1YIlq11OMyr3hsxzambK0y8O3Y89/FptZWWWwf+Q1clE1RPWLP5NRIlHoxZZZV239s/Z0icBNNUT3cTwRbwhKBIzbCxcwRodwIBokuopDfjtigFuS9xOmzIx2x4cXST3MaFT03Bg1ZKv+rPqTjIs7gkRiDDR9CwHZDm17AB0pUFTbB5XLu8IRzelepe4sEa9SlOaG+4IeQ+5co6psDrq0Z1O213HED2Z+seN1lx+Pga59bl2/9TjURyJeQ4HUvzAxO13nilLO1cAkfowXfArHzibkDMUH4Lgjp4DQBENlnw/vLWqRk/Y2Hji3IuEhDd4zCTgmBDI0zCefYPVyTcs/fUhN4UM3T5m6KNmdzen+Jivad8dCg4FCVg8KqdXH1awQETlZ4eMlgtZiyWcA6coYPuCnV3Yh1Q6tXNKin7AEWS7FVtoNbBgC+7ujnrr/sBfK6+18V4QP08FtRDMuVif9Mmv+RWg17/0zzGm7iqxfYU7k564/ay1kTYjHDIgC8w44MaBeMIkMytjuWYpEJKijgY6Ed26D4TpfC0v60rWCoU0k6zYbJpE32NWdSI74VJ7fP/FflOtLOW2CdWoaXirH2LsuAntCQgsTE3tWI+l/StwfMINM+VPhFdeujgV1oobXIS7jXAFEMzmvqc0dNQQFRALoC7Ava3jX5s851WG9o8HmKOK1l2drC3HFezK4l6/Pk9tH2vMD57zv06AmMhLVZ6cC7Eqr+Ur97GUSU318Ic/uMLSb2sfdp9wt9kXzADk/MS9rJh/KQL9iwpCa7LpnJ6e7owL1mDdGHMuz9BdQz7+mQifcHbEjE46hu73kRPWsBGo8Xd5Y9vbr72OXmzz4sXPWG/fd0zYPkZV0wS+/F22L4YfC8TPN72Q1gdrskJG/xqwlu/CqOuUVfqKIFNscSPJf893sNk9gvQ5a1SidfCSuAmD+Zzf93MCvuhpb4eQv6h5BevGnL+yRe+H/G5woBI2iR80XZrcuX56hV2NIE4tOdWyjG7VgOfPpFmy/C/QsH9oGCwSjXaoqojbF/eEGb0qEyN0EC7G6+J/E6uE6iM8RxsYeEWJGblTENRpN5Z8C1ceJW7+TQ//zQ5o08o6iX6dXwEBJ+LXhV4fu/keVCUkndUYDO0GYv6Co96Yaqlz23SKYw8b71x2azRQmkQzK2xT5d2+gI8To5l8j3ztHatbpwWsTkX7SRg2wfnhtDDo1HeEJD4pNDPIB4CoXM8oVp1yE4nD+ky9kLfn90DRFk8G4pX3GfM+2LP41TUdYb/Kr8MRPG5G7X6rLqFS4ZHBcf+LVk3CpLhCQvSo7y6dnzp2SAeCYSjDz1zaZZ9UOvSdGQh4F4Ufi5sCF82709oywYDNtbRF2MJqxHLT3GAKesig6gHZxLDWFXM7aShkFDOs/1onuPj1ieliV7zkg/ztj5Qvxt9JF5t8B+vPOimIuwnxVXT2/k1Q9U/m5eAJHR4KCOGCd0rEKmTefDL1mEmPf7ghEPfEzBowhcxoSp186oLvVs9/7pwO5nsZ43wUszWOqRpg+rJTTJ2k1rgcs264jZiL9p0sSpv/6XuvLEIPO9rFKKNU9PfSje4y81NCc0lw061B/x5rQ6s4ciF1kIczHLNA4qOunoA8rfYNYREm+Rm+b5XdVHiOEKni9t4lGVSUOv97wjROMnSYvenfd0y4oyguGaFEwMv6kt/p2L3XGJfxJkg6n0yDpLqRVoxD0HDul7LDt6HtS172jSPklElJ/+kYgZPs8PaxOEZLrTqJRvZmXgs0gM7tl1ZKWFuTSsRD/SzOl0X0VKXUZt6LwXZ9kQkNJgP4kB5rkBLkXHyZbfFcHmAA41o2xh68v0fO2xWX0FOQD+ABXyOyhqYI5HXvAhI7O/R1csgRWoW9te87vxx35vZBAXWKvgkyoEYXQZE8qGMNCO8Ek4j5vcFWsh3Y7fuhM7CX7sEwyg9wevfdYl2PRwd8qTLLtM2i1wHuNnyrbE0lpDR66c0WWGVL8w6p31c7flWsOiCPBusLLA6QLsI8AXmsLhA4sF81N033mPzQV+97/L++8XO9pR+qvAJVLF94wh0GWwt0VZqn094L9nJfBA4A1ymgkGHw9WLpzD4OJsiQIuRrlBY0JNmQpikXPoQJcdjkWy8ZL6qMFS8PCkeIWpESB/7YdmOTkIh7Icl9ANkfpXqi9E+vlzMC/ZScP2v7z+9WJE0/Gy+ihCPiTP3gXft11Qfrvt+1/Bzhyx3tieJvwp1SRRGaDCSgsnLEVxIyY1LYot/Ungm4nwuS0z5gh/JBcGQFB7S/0/vccYoOMhF0zliT1+sDQbRGYwwPg/LH10fBYJkAs2uQjMzLSTBLeFNgreh2+sDt9+jsN/Xy+cjyOIPcz/FjV0fFr8Vac997bT2mBbK1Zjrw8phn5yC63VYOk7OQzfBQBQoYsu67kzflq26Eiz/VA/0CG7NW6Y0J2K+fOCBlLI9atkQUKJWv9I0HI7r8qtYswvtSpBnuLpqUnItaI44RPzYIjCDdUuGFlwzHdRvX6kFPGghzGJ1R6A5WG6rl7FfLgJoPQaIINaZVLsxJ+6ziJq41S7OAS6ucUkqWbazWNyaBoSxWyUv90Vw8fJYGjp6RC/pes3i6785HZJPVx2D0el8rCEr9eKXHsffWyudQU/ZGRpcd1/Ye8mBb9neDDptnSv3Ye4Nvnzs/OauOR6tBt0BgShJNl89dULQCT+qJZwlMvBjgkTyrqL4jrYLNJZR3VHxlPFeKNA2oPLOO+DushQCcRKXcENZFKCLdpwnxvv/LwD3GmMs9Fce8XLWkmExZ5aFmLCKQblbS/oYgmphRYRZth6fF4RjAiAbR4fwmDY7HyXGvx0B21KDRQtPEyf10zdJZz8ujlb0tzTO7l2Z6mfaA4UAiyZOWL2bfD/kq958iDlL+VWO8JkdIQrUgIbIGrdDE4CS981+HAQ15MA/aeo73nJBuSYNOv6YKJO3M0vbUHO3k8O+dgii2ZOAtoR4mX8vocIpRZdthPR1sAaJzRmeHL7cvZYMxYZw4CUXxPjikUS1HcTkJ7bs9eL9U/HnGC6ffjv5QWufr7gLlQpO5BJgoJIfxr1xGs2Ap/vM0InVl77gru1EC9ySAp8H8kd5AT4gnOL7pz8uT/t65vdBD4msYeqkN8djrjQaFVgxFxJ5JttU21Pr8n1diAdfb/oR3lvl+Ri2SrTaIipv5ZHcefzh6zLWEGzTjlID/puNQdeXrUCEeItohu5gXYyc93P06/pjI6r3OJ14CDHRSCra7H/eJPKnFdbWyDcuOFaiIW4yiDd/ohXFe+o2ibnOGcjq7qiKAMFd90onpmqYd4mCry0Ix5+mu4Au9Eh+9F/W1HdSzjNYDGy509WdapGeG6dp5iVD9ATrvH+666ROt81f0671cvh4S5SX556XP6nJBr2vGm5bBEkZoOahhFc+rWwHM+XJ+vcpq3/GE75A2ik8pztspOyPGaCLTje3SX2tIwuNIEL8ODRSyNWJKTkgCo7nV+3CXoSuOkjh050hlEQJMy47sWAx2Fq9XEw3CW0vawvblfFKhLT5VPDPPq2W18buNWl4VdtXYoCeVltn2EhcvHFGHQeExvmaTz5L7bEzuEhZFqa2qolaVcvQbw+MQ2Fdfcdu9OelavH4V7fOBPwWpkuCRnDAhJKo0wdSQ7xMa1PjGlqhj1k5nPSSNfP7f/ZwaMRi5+N0mFl2hIzOVyEW6gBGtdU3/wIBwNagU1cFF+ZtAkZH9AY5HXOiTmAtAkmGP06yH9Ypqz8Ep0WDj371OanVfUVSteiRXHUjh8DFoGvYcqapE3wghOwmZdgPMMk283obG2+Th8dXLFNJyN1Ialfk5GX2NJt8BywqDkkkP+AseE8vkO+cfMs8tulER3vBR7p6+lsAyd0nqOxxuVNMn43xoFIULtLXr5w/ivAQo7rHq/CCVQ76/rW94hTadA5dED4fvx3aXf+2MRDF++xZMVwiR7CdOcu/BcWVAqJQy4uttKz/etrlaVj3g9qWbcChHF4FB9Ae231Mj+EzJiJjn9WlvQ3YbJLakTBch34P3QqZpbH159GTM66znr1Udbag14f8uNjOr+r3Dkxr1C5Xmk8CE+DtKFU9Gegdt69p0WM9LF3grB3/DluWcXsJfCjLP37bB05Sf5gFujOQtnkYWX2WC2TLW+q90lkJkDcRyu7cdBo5S0UXIjL6VvBuJ/Coez+ov3vsq34N2rKvxtY04rXTgThGOjhL0NLuaP1+vw7Lh5vgA/xcdHjyCDWWULbqZ2l99rUTobjgAKhdL7EVQRiUg7WfZv/NLls8P6SmEPExqAfEnH12vWpIgUAuA9s1snJqPLW0tL8N+rjgoQRFlCoiktGuV7CeCF6tbzQzGTZYZ4wcjZDwPI6CZlHhfZu6LSG4Yeymr2QpCUW/NS/kH2dh0ec42BPxUELxiWtMo6+MFHqcAKbR33/5J+t6Cp1KXvGC+HT8gwsnqEajOA4YPdhR1PLPvlPs6R3J2dGeZeCOUOzuE6dZn66m5r+CsqFhou1mJIlcI9xkjb/hqRwA7miLCiHPE1tOW53mpHrZi1Xm0u0BmkSMsJ0QmnO6mNCVGYdwUFmWchtP7kEs4GGk5gUcsk9+CY2bRiCq3gtyp4n8qDpdQWeSzUrRt0tABoBHhRGCKJ4KSAPJRrHf8mBz64E7p+lRdmaSL871bgs7evf9JXVmjkLceHgxA1FjJLwlP2cBErgD3GJazXxxtUNbrMzSJcRXgykmg5DqFdb1x4R8OixrWUPK4wwdJpFUPqwXY1CwSKZHO93GPAIA8XoA+gZP1y0HaEPu+D85b9ikHdgQQQKM6TeOI3vvXtlAR5neCFE4BNV21TPylJAn09drUSWH620aITCHWr8Wztb/oooU3cB9cZl52gf+gHdLKdufwwDgFHcVeCiOvMAmus1elcr2AzjFlyLHfrxrKDYG2vFzTWvPtm0uISLskYioR9wFkNJb7nCtY4FGxiL0nxvqMUuXgIoJZIgqoSNveSZrUp5ZB8NLwbsgsFN2k+VB33IdKfoOAUIeyZ/+9WUS2X/wpfUi0/gavrhRuP7IQwwg4ByR62CbSlm9qaxJ4tKyp7Ymd3ypQ/vuMZ4vXSKcJhAXtyvdXiOWPGcC/i5Zi5iZa/lGW2ZSFCh1Bp1pwk5RlQB3+7/YN1mj+Enb4tGbyE4Grw8bXnn7A5ffkuDHdwxUlELM7pVeDkNNDmAUuXC9Gm7RmklhHfXVJNrDhMNWZ8BKUTStqbtNs/SMFD9u+YtddrAP5eqGrVlJDu1UmEYp/BVnUHs0uaSrb57js/0qmAXqvcdIvaih2dzPwHHTbNtpmXsLjvJ8/NHDZ+oGm4B4hqr5Bcc+vhg2H5An1CL9r4RFQK+cDaQhMZ75ymCq2zblJ8xVhdbOeuUBysMnHzRH/S3ig7yggL95gThXruWtXEGlGtyEbpIFQB9js3sPAawpV9U4rLqQx2BQ6L8I+pV6xI6Qch/OYnhkPThSi5lG6hP7lu/I02BATVA7bPXztYdin0I5FyjEVySin1W6btHK0DJKv9o3GwTAhXyO1D/ihoxuIwQTTfvtEIXdjhgH6eZHAwFJDZvSajmobiOFd9u7ywnpfZuGJcJpnrScQlgPirM9TeA1Xm86IqIQs28bS+ddA97l5082aV676he9TyV5XHklq5pXdsXAAJEzpuYjHKgxam6KlWzS47BSoimoQGEfCCk1l3AqaJIlVf5mChXwDi/lkN1EO01GbDcaECRcqq9JmzF8G+dr3JH03zhL5is+dicEhKKbs3JAhndSdYGPboB0WWnoQFA3zG4Fjp9LmioVXplG7Zbl+XdAcQxytwFDVtzmuTgQAnVGCYEBp+JQeUA9UnHAH91NYFOcuS2Pdj5QBiOwG71aD2lWH5galsmeTDE+gKeV4INTQsPXy7YLxzTqbRjgyZcmhg/uC5oxcqPmMYek6DpHS1H8T5hUxmrSZwy8Rwbs7IjPQSN2WsUfCOp2F5YKe+Vv4/o0dJzr+I2fxp0BnJhcg0h+4xStWwKdUdpQ7RAL4e4QFRuOabGXP94xT+QNSybKfOYbrCZfP/ZdjQnLMSh+czU76cVAatwu2+DgHVGx/k2UAwTkddEUd/jnw1plGEcYhJInNBtK+SBkGCdGiNj5fjsNW+ZUFPkNjUyAUs/zWAK/9LEp7hURUEUP2Cs08yxLdaHNjiP/1YQq9x78oOQ4W2UH33f+Ir2lftzh+HgKn0V25QUgu0eq4Hnt/6DkzNeuLvcxfqwD0QrsBSgqVWOeEMOIHSf6wLXp3vPkAwWP6fLGs6XGK/LyGl49fjrfrRQY4SUbJXPbtLvCohBDVtLSYW/VdBi3Lutr+Jp/uuB2HGZJYffV5NKxvIufeufdphJlI59U8a4ra41/X62AGgcD6XTGgH3KcsdsTpS4ors7ePxGdwosvEEYkm3qRLjnaZ7kZv9va0OBI1j3Sr3hF6odAgLDYpaASI/yAwXmi45XhO89OwQ0/PLtyAf5iVEpHTvDF4hvpXLDSu7wTtOtwmBDEoHRLWME4MxZc5I1wytZVmB08iOXkWtjLTyPhm7pQX/47DpIlKf7bF3FiLLP20h6E6EV8FORL/sGwd45i8WrblHeC39RjVwGhB0h33zWzbLmCczNNsDdBhg+q69VE7jPsinr/tiFdXFPpsMSjAXJ4gaXictuNyQJnq/hN8QDfjrrGmLKo3MNjPmdkAFD4+XtfQxZ58tu3+YNEtwdLvgPzxSlzVzf7w+hJxPo2Q69W+++8Cg/GmfIytqdep+e3w4j/pekqliVHkuAvieEoZmbdSgxVYv76Vb6ePYzZ2FhPtZSK9HAP5CoB/C32rIIICkuaMKR8qdU3vonEfdojo9IOHC7AVmJY0m9Tp+D9ZKdKS98daNJBxuQxVD8MPK8crfg/9etCLLiD1Xnx+xEyQkFmBcuKuB+/1xPpxNQlx0+d/PBZWYg7991AjCb3rOCtiwt7PPsq41dhuF8n70xKSNTSmVlEgNl/3cjXZ9KY+kXssQbJMlZhQADzHqiFNA+0TV8YDqbhIqmJwuRhlL45NNbNCwIKSmvR4OGLJ7j6RHAmab26kHiFF+CmYkPMD76KoAo4ZOFA9IG2YhN8qT44K0w4YEF8lhc5jGv8jYF3TRFtDMsO89htmonpK1cFYvB+cJ2nRDyjihiS0wuua2xGl3WD/vV5uwyWii0PrvDVuZQFh+d73sMr3xd5zvC8QkF6e/WAT7at24vsDQ35De9OZF+zNT7swFXX/TDkmLJv/D0WuMQJQ5GMCQQQfr2NV8Oy85e25GRLFXxyiI1iyP7ObRtZeAeGHTGRN/ZGsMjfTmhOw38Z5l0w8a0cqWw9HCae57bHRf3cLTy+vpNv07WUsXhgfTKnuAowAbbNq0F84QUZVsoVkhMLvnTh/tJX9XuwuuPamvTkz+LGI9Rnl5SKHNxtopuKhli5tj3BDTxoiiGLTkHs6gQChRcvQXQBrdA3jeR79/Isi1FY3kupEuBf/RmCCO5I+jryk+n0cslQl+1jVBmW9aRNE+Ij6b7ORG2FjYtFhXBVA7ctZ9Qcs1R/5mp9jd+ESyf08VWidC6ihFqWjUI6FMm/cQi0MKdDN6KfObkb4CSK/lzaYFXQ6scbe4fR8+5uRTyPevZS+EoE4LjlLRz91UIoH/CcPAT6h/610kYQLZDzuDyU9jErS15swVY+IJj1czr7b4gy/f2Oi/kMxVKdsLkSFg7jGI9G8LMxTxEObL8Gz/vNZ0RnLnP9zLumWNUyDLvDS1Wl6kaNlRphji7v8hKSYcJ1/o0wxKBFvWMvgxhRj59u9iTqT9YGCVjrznpFI65hvVy/ojWIe08t14xF6PvYdOdLbTYQJIfKUCjGvX0Qpdr5/TDKJgjXU9xv629qWfDhk7CQ41wZ5biMQNZ/63G/27Ltqxzs1Ojvj8uQQwnHJqgyRcyIHHXnOh324Fjk6CwoCaIg13L70MvU8IlQ/6FX3Z4gBvOKUQ3QaZ1Dyk5FRm/UxWaa86/giC2hhaTwdavOdHXUy6ev7RKE3OCR8SEof16e5Ao1pBQLnexdByy3FPee/hlQuy66hH0WPez1y6MsxZC+UDyQpgkOh43jInf6nNNLZ9A3wkrfr9FhxIgAZ04CplT0XTCqQa1xC7yCKsQp1KtrX54OD5x9F8h9Je5ZUqG8IY043SUDFc7+6n5ErIjv19TEyW+ggzt7CDDhv8YrI/LXMvkq7l5U5pLZWdsjKP7lKWdN/gsrvlrgc5MyvR8HC/JRi5Cee1MTjaYyIZ5zinJynxkInPU+awZslBBfkqqOKkSHJzLbTpRFj8rLT5ToUFsYZMaBQh0RhoOOZE4rZ+r3j98Gal72k2uekDU/GZqRPVdZI39C0/RAGRJ3R3jzr62UpXn1MN9HpC6WEQzyLnRwf4UHBE2rkFJjHZ60rcS3A1wGkAn5FIopFGEojAasvEKcpBWOpo4/gfvgu/YXgMaiLgku+4Tsyj4Qpnk0CxI0MLXyPQZvuvM5BZUJh38locT8tdWCG9Viw02GFfbiT6B/c5dCgpmRgifRn+/P7ZD0UcGod/YqyN4sQfmkQVJJP5bZX6lTO49e7TcDVLmMjjE7ML1Rh7ULUI4P1tHqpEAZEdkHSR5DYj7aeBvfAx9mEl0ouTDkSrqVlfmowHgPBDErQOux1RXyj0dgjDwN4zi3NfO3UCuBbJu+xd1wgCq0UdAcwVoPD2yCvUtTxqpAQEa6rKwY7vVnn/mNR6igmEXt5IczrC0B9phl3+2wvfhmVYpiJBk7NBPIcX1pxp+wcX6TAitcU9wKoztg78B/c0vtQ7k/8+ul9c6MPjoXHfEDUBnkz1r4d9j38SDc9dPFGQ6vvtClC4I50XHUOyeqdk3pHHDxaII14X7+1pYDTEdA6+Wu3SOQg8V8v2jPOPGd2TIG7dlTwPuvxoV72Gnr3FGxSi44uvjfq1EyirM+rGS4P4wkulR33/8n4gvJ6yeEDbM2SelXJog3rhRHEn8nps+3PTp/0BHKbISVTrghwHbcsGJ1YavBHlVWllsPQlYR+0RqVUHcQ2yIo6sLma12XwrMyUTwjCjo+wjdlpNQlKIsXtk8rn7m7N9B3TRI3lj/htSKED+3k/9XuHoTspAYKHZnCoERGv63OMPAi/61C7Vmrt/HtVrALxfltjH0F4S0K/NMibgMZ8CPwEH2eh5CKZ/nHN9XfjdwaKqQVBuAmVV/222zL4EzlaAYr39uSsaNIvhnD/6qa5p6Ukmke6V47AaID4mLdRtQLRCfv/G1Z6R6mYBF31Yb4t1KflIWbrFE3pE+bQYmnR9sqQS/VLCARF5pNosIiGPz0+li/DVAnsnDmNpQ9UeCOmZNB9j/EPs3ID5oRz8uVpk7RbIz1W/3n9NrA9EKh86IYO7z/nDlXCsq50YYhyaWAvmqf2DqCCCssFdfUwnyfS5ySD/Z4+5gq2B9/EU3QQicrw55EPd2+kvuPRnut6PZF0lkLIko/616abYem64wR+UjTgtA25bLMA4Qmokc92hr40BMNomxCmAOWiDSjx778iWbDNpdc2JIu5FCkV1brlNqWF5TX4HTr6XFrscAENH6Wo6y6RIKHUMI38SemEDF0X3CJ9G+1KR3Vkn05Z/akqYfZtTcYyVKb3fOtldlnwaExi+1QxMsAYE2Eib23GVGJaeDUVEtTyq3OSwCtHgPU7luOeqa8JnAFqr2T5h/LD+BfviCoFazCIBNHOdtOvp893lyvcT56fu5jvF0rPK2CFjxcofHWDaAkAoal8Pp2dtEtPykuK2OHZbHZ4C+s9//Nt03pp5S+HDZNApn5nl07UvZXIExOka7h9Z6j2u4Rr0bDV846SeAjGnQJOmYFjnu1NdbGdcnhxMLnu+ShUKPsBHd/2ZNKQoiUc3xly49f+QrlbYfhlKIytrt9TOAtIMilmQ6z0k+cJ/EpACKpE70TTLTHbRVsJBiRHhJng5/KbBpQllKcuIHXAdpUn0MMkBYdVjtQTpxB9q5z4qaEsoyH4BscrjcyHA0SLY1D0eV9heR95PTv4+6FBLju+FU+v+l9y5UwQNra8Pan8oYno9GjotBegQlGibNNj4/dXEl1rm7z8UBsd99CMb40KZzXYNhREVKz39RT8WGRbZNqV//N98HkNnEnKlIyK/02y9qFf5luKlMlqKW2GfD3k+i4hql+EwYGrNFCcnWTVExMYrtBvaQsvuXzs8pOAhFdfh64zQLf4+1SgV83o+zm3oB6Pvvpait6uMIeu8vJRVmQ7EXd8Uo9WutNaI3XSGNXJpzR8R/fPO7FUEpX57UIGXxqh9BNRfXDM2AyK1vaFp7DL8KJ5B3Hwjv7jXb1jV+fPt52lMGhtPLofiVyrDftnA2D1OcPKFVEkdiju+cKBv1zd7f8MKGJF8FVxFIZoGCoGbaZnVsLLYbHF2Ebl2xDeACNRlTzQu54cLJNUcoqtgEjaciP6Q1WkCLIqvYrXloayQMeyLcoFKlXET82q5WPX+kowXc6BpzcnqNgFReUx0wF2tutN2v3ZboEsJz9vqaARbhNs20KsuF9alg9p7DJYhxJM8geW4D4BxTv6IDuzwZQWigyG0xh19COFLAd7/wOcHrjG7IGaBJXChPzo3WKjKg3rdCrup43M2wflAefB1xX3vvXH1duybFWDw0DfDsOeIP0BNQFo9++Jm1hyoH/6qD13oEOVSgAOmuf123869vDVeNQie4Est3CDRy84dN/RBkzuUkK983AJB7MAUhQs3kK2eYI69EZxvspI3FkjXtMCp3vCx1eR/gZs11TFzNydm/TGKJkhhI9T5peV4h7CAbtb1UY9tapZOWqtotB7ke8YclT5L8eDwGa8/zicYYw0886TfiBRdwmBpwGb+vd2ifxz8GJlI58xGZkODh2PeKc8ghZXZdiYxaPCHQl4FRQmvtswKirXx2H5xX7jsDiTOdcZNRrxBJxUdMm+/HAffNN1B+zXQCp6Js1s6RoGYmbgqHthKq5IMAQOvHEE+Zn9BxZe9Bfz65IxpFl2vGOl+OZupY5n90T7nQXgLuZFY9jJ8DmkUfi2ruecZhtzw5aOU434sYmZHR3vh1rKprVmLcy/qio+oZUdDJTP4096ICybYNOr6wAt2PL47tM5RnHSUhoDOc3f7wlQXfB0VYAeUHs0+TCmJLaUZg9gpZbX/FrWWgiYpo23aWFdwETuG9ynIdz4Sq+t/v80RUZZnojYIa9o8TQnXAaLZQcu7fIghd42nNVMub671RW7HcyI8g5n+vkUzPX46YvBSwejN9wSfW6cwdz7EiUKQXKXiS9kVOA+Zp4rkTx13x6WwDVW415WSA+9oIvfVSZUn4VkVBUBTJrWezYjA/tuXI60i7IB1WEknSSm/pcHiSYG6wYvbxe5w468SD7eeIprT9XtczFdedx3rPYz8uieW7viWR1dVfN052cjKBFdkwTS+Ngf+MJHI4B8vLqsglzNwQZf7TqeV0AQLx1UEKwQuTS+uMEPeQj+fLbPwcuwmTM10NN0X6jNYSW228vsy7RrG77HJThoTaa302rn6JH/Ka+hufptLSFLXmVnR9rySdguyMwDg3oo2D2pC751u8n0hcjD2DVklD3eipxSf+/pp4bjcKV0RdQXJDEw0uwHsPwS+mqH39vbnqmX1pK206ipgmCbHgyFd7ZFv31jAMZGj0NToGU6NoKQ1D3pOB39I86neNzncn0544ileR7vzNfiWEcynJFx50oxYt+hsK6f/VdQt4aC6g5ETkwvRMH2MNX7DGI4qmsBx4eJ2YP1/XDALn5d4T1ozI62+b2Um/OEr9ucAH1MeLDJm0wqybvwMQ3tST3z9jArdjsGUuhHTojCPBjnM4XXLnLR+lzaLuCDK+Z16Vkbg7Wj6zrv2NwDJ8vrXL1XY3XmP8BU7dVXqmTmmkk2d+xcsFd7tt1eAexhzGlnC8ONx4oF6XGkN/+Pb4kpgPk1vO7mnZqs8jszlZIegpbUUfltYCPpMxIJUwaqbQwnOd9FWYH79eLhgz2GkmcEqVsHg+W/XVY0cihuxJsii5U1k6XK/trzurEWR9wT7p9/tbF5z2HQb6FLuinrzz5S+L0MC6uFB0iySfrczpm/mJckeW/7Lvc9vyTvxyTL4tZXX+2B6ovouKSJW7JdZozHX+us4oaEr2oSFcWahOusYl+C8qw4brR1eIj1WkN/QPNV1G4NQ4jGNU+HXBexl/691m/CuBRPUHJEj010PzsVL8wDH4oVkapratkPgOwfx2Jl3q0Rpy0Kev2iZW8XVmVLPH8NUbLiBchKT2VEB5REHbd5auj3z1ZenmSO+z2U1iNP4yxfHnV2jhJixmNxtmLz/saYrmyRG5hx4RS/rXTpIVQzIeisJNCkcS+p6WPK+DfvvdkdB/2yEMPS+T0KXxRVoCys+pMDe7wU+/vuGdhgplHUQKX+7bsV8kuDqOUxdTCZ3DfEIcs3nilcnJiNMe7Z0q9GMAoF7qCip8/xo1p9kXEJ27Mm4IiQvlmEc3soQCulXXuCzuB0/aAfSOyCgtryvxVdJoBuRpXdpzRyziZjAaT6R61xHal+3dv3opycC/0qyoqRuync5KI+ffd6mA1GBfUQoZIt0e8FccqL9Nd7ndEdl+wZ0X6IApmRePLmOeUpoPrxt8nAtKH6cxfZEP6g3MWWSiw3UkSnaUuZJdx/qXI4RiQEaR982gM1STRgTHJ7wiAHu48CObwaUrGztHa/ahrsSY1+8uiTYf0Mg9nky1FDt/vCff2+ctfR6sYdsc8HH9QGejHbewmhxF2G1sggBrag5N2TvLJSpZO0h3+1m1jlHDOQRakuTKbJDkBT0/z/CS4chnrcJK+EDl0tAa1IDC0b5RYgy7486G1fum1nBSfw2PB5FRf50kf6s7zM9IejFwP/3Afy6PKWI4LAvGCUMnHKJM7mT/3HR2vufPyg7xdTMfSXVrAUM9AZX7XLu7xmC6SojK9MQiMIOcJe3XJYAoG8v+ys5ClntunSlSuUyqvlR8Uj2n4bk52E4LPg5Eq/8NCNDBVb+qFxBfH8n1YOqi4H/uwJW/34gqsgxQd4m2cJyNXm0+odahIw2b/FSjZJwakdphQ/g9Ab/aXH0ICm+I4krV10+UFzTNq4QPjIvyXChHo8UiZxd+xS6hqfaajW+ngNgaNRw/3pgLxGB0z2pQbvRPSHoIEyPTuLYDqKmeBPmlIMawOqT5uHgkFJwKMs7p+sIPtmuLn/sKQhQw2Y2SyIBFHK8Mrr9sFbz3RuSxcV4TcwlWV3wM261h6TB+lssGEanlF6iAZpnXqdY/smgij1Oz/oNlFQGpRsaoWJ1UfU4b6SGIq3ws6o060GlXw2FqLK6BRfZyz2h0NS02AZHE4TKcYIgxK46xWxbG0GsXWKMMBhqyk30PROvPf3lPhQuCQFkh1//G7H5tzl+kdFmrAXEJ6bN4T29fzNmmDHjZEJYYcfztt34Q8yY4kka1o97uztHceS1N99z8iMmelk/tLDQpQob8x74bQnDCwkdXkn+iUr/3Z/xpLymW5mEOGYZ/1TkxUwmN8ND5Ez6mj93Gp7RGOCMLiXYWEj2HlqrM6hgzD2rmxGdvaqmzPLdKkFuyNxLAjzz8xVNFENmj9yv7/hlXQi190n49Av3CY8Ihnyu+DYaKZYR72US36suEACvoin0k/PHzg+xjrgS0v7t2EILHkL6rN1blcXfnzGwRB3uOKfN9lTzo7smiisg2iKgTSn0VpeD6SJL/CIaUnCKhZDo5hkwyBIVpVOjJEDr79WvU6scxbpRFSv1L/JZXHkdkHoN/ICbTBEleXUbMisv9D+Xa8yL9M9f1Il1jJE7ioEyIhr4VBs5iq5auVRWtL4G16hZKZkV6sCnWyi//rieoaCpRxeHfW8cIsi9c7V/Es6L3U4jiOEESRK9rx2VPCobsZNWCTGtnZj7CTgEpNSRBvX6arPnLElD1LLUZtt3e0B6uEtHxN/W2YCeKxDsmitnvL32xVKO3fH/+5O/priiSEe/rIqKtBIGuTNVANNyHgLkHq9W1snl0Zvf4BKOvWAaZ0jKq8+9TjToJ/IBNgLZUcb7QIhxgWoqT6JUvgzq7fzvrcrHke0jw5l2uyWFYCL3wQ1A+FS2uzTqGf5ldhK9IOtnCQjXd3d9SZw+s5PIIxa5GKW8FWaHRtn2bmdES9FvA9lfdD/3iD0WhA0Boh8tNr3E9KueWUefObAe895+6NzsBqwDFk3GTRmnAE5v8WTnKGPdgvaxNjIOKZRhHQnY9YP5aYjWv/FsB4vBg3njHXfR6B6Y53gc0vEaYNFCk1LK7Hoh53W6qJFJ97so8t5oisbFn2tzONjegDQO57vscMdK37uJTKg4sw8VXqXQKB8nEzm33dfSv9KDWmx8e4AAw5tegjaq7CCIfQ6rBbP9No3MrkNVXAHuiXMLWUTcvFPRT72lRuq+O9+TuE9foloUWaU5PxjDehn0OHGaxhbvmc56aSdW8pP9tecbbE1mVCIT/LZWHTPHVLW1ZfI4KiyqbvEqym9YtwmjlTEvro8qU4Q70pTF8utkj8FTJlBCFvRiEKapLX0kH+RK6fUPlVj6OA3E7W3T53wEZoFyko9MQ9v9qFwiPVYW+yGL8nB2slA6HorqPqT+EDLL9waNXD7MyrPBSYSplIhCURPhm5+hKViLzb6A/p/qU/ldkSHSr2I2VK43azv9xDfNopRQRTZbruL8N78Dv0OsVRh+irjnwc2SQHVDMe5bXFz79KuKp35Dmiv6aW3PdnVOC+8xtYG+fIDlLJmiVAmHOBkbGIfNarApQE7P4+mUHxymmSAwi1iovze0IhBYloRQ+SxlUXmttfmcXgkXOhVhFbPEATiMQn92hA8OesRnaWi73gj6wL5ygyVo2Pe39TbFjsUanj0o5t8fejt4BkuXF5dw7/hrkvOnjtP5BQ5zmUzIiAzn9ag/Jj3IQO6Oke4RscFaw8ded2uUdI0lG+Ocg7Sc5gjQ9C1NIP8ZVsCF5N931W1MQaBfL6r8Jd2egzht+GXTslNzfBE1Ghc0EO58iR/401E0RrJumzrqPnNepoP6CnrlX5a12zOWuI/6NahuGQES7lzCkceE/GWWWiHRHWbN/qwMqQYUm213U4eKyy/f+gz07ijb+f02lgoliB1fqSWPLmznT6l9g3o0MuTPcdaPl6rFUZcRyvURQW+FLRDU4kFcLWL3CNxbbPsDuMENBKLgluY//gdHlE4q5eWSS1WULovvHz8DyRQOfpi6E3+lx+yWZOV500GnaZ2PFl+5T7BWwiv03fKuVQIBZ9FEWzn7+QPOhG1YV7tlRBZJ7pP40L7fwg9WgYLxOJJgk6fMDuN6a/D5zTqx7vjQgJyJuV49Ed52Of+tCUDEcpVccjO3d2byRDeOdKm4QcWqaHkd3W/UErz3PEC5z1UmboKN48YiPR2eGTq1+vsYO7EP9mwsgP8K1Rqi9ZA2IgJQXXRhokuxqQ+ALJAdhhT7fqniemzUxYqePZFtWNmSCX+Fw1LlpntrOA34j74WhSRMTwTdZTQxF64/0W2QQXZo/kU+B0Hr6YxdQLCTmq+KoDw8P88F7agFA8NUJ4Lg+7jcPrIJm/hZslgjm+CFNWAu4NvQXWcwyQQoaSOC/xlb/zsm26F4waOplBAM0Nt5RMut8feH+LwBPRRGXbp/j8YPWOAswBVQExSxiObwS0K87I7m20kJJuvm7GDB7Rcqvx7SJh+JffHK+vrikNVfG3l61aMm/VpjoLZFUfTpNE0hLXt07yJ0f/iOMLCsjg1uXEMnN3CJMn6oKyWWgkT3tALG5sdz54o307/6koML9BzdUY3kRTPuDv+4FF3oOwXTfk1Kca6+mIYA1uBUkZmnuoT01O4Bj+Gs9MTx0ejsuYDAg2/7VZBrHlw6+bAjJrPVw6ipOQstTGWH83ZUHzSuu8reOVFCpJ4yrlknygDbu+9maIGyPapVV3IhH52hZ7GRce3NWJOBhiNIRhJeX0Qlk/YXqi8/pSFiQlmEXkkCYAJgtjZDAN736PpnXvEGKslRo7YYUmnfj3ctM9+dLG5wbjsgEr8kQXIqoUcT6TGmj+kLc/EwPOlWLBG6jjyWmQR3ZsvbMW6AJdzig59OWA4ShLfabsSEIk9e/r/D6N07uyM1SR/YE5F6gWl43NgAYqfsNqL/hGAEK/sp9YGMAhjNtDa2QlHWSB1u2tWR84aJ1C0INW2Cf739GvLr5+dDCzmjn/G0urrDJTD2bWrpWD/3LyGsOhLNfruWXVKWPUUWy+vteGacits0Ec2f9XmIE6FO2N85kfvd4svB2NxjBY3XjOOhv2I1KbcSp6Jper7M+AC5uFUOZzPK/ZTGAecs+bXcPRBjHWdpn57YVk750e4hT04/sJPKOKgbv3X3RM5uGdGgxbceo7mvCRrIeqJRhT0Egr83mv9AOf8o4sqhNNwA7XwcntSMNKUUDyZg99Obl+TFVgQsi+SGkufi/kRJQddTRYmcZjMLpyw7Ii7RMUHIIOgPFQAzddnu/eC3i+1OwImO4gk71gPrGUEfI3V+eepBTrSDjflgBLpybHVExrobcjhyRr/BI+gI4M6pmlrjFpkByaXoCLBYo5Lo1Og8qsigDlQXI+aANd71UjkJzLv8BFQTO2ckQpyw7h8MyA3dIoaEzY6gIdzra7B8Y2AzCubrFi4eW4FUORIaAH6xSm/DuhKLLRgSNwX1+8xt7J/L7RAb8C4LcGuo70KptVdI9U2Zh2gY98JVOJe7yJ4IqC9JyDpQzC3ecOGQN66Msf/lyr9v8y3HOxOBfFUH+9sNIWctezTLMikTymXzH0KEQjpifFT3MOl1QoRdastnHWrOILw9LQcqZNwCvJmU/LfP6/zOmvSGRZamZTGWhq57ADVYUS921iM5FSW4QuDDkI4/iP+aOqrlo3FVLMmHqMiqY/37KSqXtcxZlMtltCWrKAziO/a8CdiescaTZoMWX70uunhgQOj5ZYiBAfy9iMgKToH/3vU8lhp/Z6SccMJAaZSf+vEBvWj4B+8rE21DIMTmFH5O4ke3aR6f8OR7oL9rEk3gpp/6rlJhAg3wSu4UNYk29vEqnByQDFOBQKXS+3hEeWCdl3aLiAxJpAtefrWjyctd/SKwg7NiiR2N33p+qdJDEjAEAdoaa1h4g+swTnWWIzT/fKRy4UxFQYyMuPbjaENvH+NElCq06wkVx84P8eO6IrdVeGysg/ipm/wbD1VBw9rc2a3ufxEMJ/+Evy1SRPu0vk3R7pVa3NjH6mWj3DMFg2khoXSNGHsuoEQsJU2nN74tKaIWCC57DmRKNH9Vhbl5OpFMQ/BdcU6Zs1kF42ulq33NJU2q3E2ixHBqz5bMaisTVUFjFIHSUbfN6mAIgB9sGcMY3wiVxWLlUAv35zcUOahzYA5aDkURZIXtM7c94dN/rOyKP7utVEJ+M/fmj9P2AS8oU5MB5Rsw6AYgXgLxi9rsli7JmbdmhWJavkvtbzLAn3z7X/1aLUeNC1OXB14z88iY0qJ1a9aPtb5IKDm5exqS6iBIEDNEiLSd/I318OjhJj6qjmmAi13EIkvfy/Pai1DzdH/aTHQvqqL+tWdA3rEK71jS+QAnDGst7IEnyT/KDGDChjHBooEDmTbXNl9Zx9xNwVBNx1SzBeKFaxDZfeL3ZNwySnDlpd9cK8+a344pXCMHMev0EUB8EbHijMdoqj1H31m6sGRvyM5RM+w5dKft4ehwICwe/RIQEagGH+AvWDwUQDiYVvWfMztOIK9yWQFEE8ulyX0BsDgXkSBSgj/ldKtSSPqqYoN0MGmD450bEfpGRVk2U1vIKOxzDcGPOz4ef1KzcP/XsIEiFaLqck1/pb1jIk7RuPFc9yUnutxBG+kbA3CZxA6+w59l6J18F08P14RDTEdrmkqwWjE7B5Xbeia1+HzzRFiaH073oKEAjCteKg99jIZ7K9tJaQTErc+0qQ2Nc2QyZY/GiXW74gcfRXtSW1AT8K06I3Z0r0uZb6kRyq435GgzX3x7MnPnuR2mAhKxFAPJ6vz5pskVT8mb2U/vjRslfalepcfqgduW0RLt8znFRSyPgKiIorIcmFyQgzGqa5mP5GkgBBIrJJ9Gss43mJ4ekxyQkcfN1H8F8E8Use5AvSdb0H04arxSwPyUmhx6cyrrer/pMcCvdP5psBjxnvwd3QJf3RDpq6rqYZLKzQTiKeKBdSm3aLIZbnKpvaElKa3ESX5JD+IZSW7J9COmB96HHLM3Foa2Og5BVtPzRxtqijsNZ8QC40baRtyJ5ZH3HJO8HCjMzo/y8+t4Kw+VH1mzHdb+wCD9S8mwbjMk3RGmsgmnpdf2gk3ufaEXS2SaBw8V1m6Q2wIH/1sx+HYXRpLPk6iQv94TslgPE07pjwXAdwCdL8U5YXteLOWCflqiQiT5RMDfOHDT1+CxOtJu2jdFdYUoYWYUeeILVUU5Q6WsA7irLuuKCvy7oFBA8dwfJ3fCsqOrfirU2vMNsGoR0yfsSgnnclhZ8sXqiEJoh9PLMVXadvuXm/Cn3jUbuHz92EkocM3UcIzF+pl9pkjitsGeO/Z6r6ZdDgh8uuxQwZjpFYX6ZUOBJQDlK0FsI52lD3NH0p62KM0HsahxkkL2h2kFdERkEy4K/MtS5uuwsBZ9RBGcthGO898cqjjK9kJN1xIa9kJj/CjWaCSC8Pe1Sd9yDjR7E8TI0GW2bnkWQzv7LHt0KZ/qh9eOen+NWszne/KtRpTn9Gx5izxOYaSZTw0M1ffpKYJD08zXxfnVdaQfAbAXyl6TRDVgRDR6Tjo5G7pxVj4r3sY7nID9ostti/YLpxo0a4z/VeoHiVcDVagqYkgmaLIjsgNWzP6+VHIBu49mL3MmvhlZSKp+7DGPkGKOY0l4vr++kvSya5BNAUWdErhw1hTibkfxsq01Pk/76MTqIRcWntBRnQlCPx8a8ZEW7a0TQTjvySG33/TsmkyQ46lCwgDfXrIxW2/vO5lZWiDf53wX16q7/gvfKZqBvLIpudjRGzyHFf3WIRsorLFmc4IG0u/UN8/evs+8n68fSEU8fUo0BM8Cij4Uhimytg6gKx3lRfxNRTxRVJMDyuN9/9Wv8qthdDs23bZykCBYn8OCrT5hJ43h6bTcOvVoZBCVuFtQlfY22/8zjy+bA1JRIxspQZk7HBLZj/fvG5s+2h5/9ivLcFSAC4i0b/JXpfJEkNkBpTUy/vHFhZFiJv0AOYGV/6x5pSibHTvFcVvdIq9v+fo8lh6BXc6e2npWnfj8ofCLq7MIBL/98IgJ9wauhVvkvQYzSj3GnhvSytOBYIoIsk5zrDlUm8X8D7VknHGjqYGz9S511QpddE+URla9DTJTRvUMmaxXPWHZURvE/mhAReLj76hCiUDW+jdK990MQ2tkR7BJn1+mnMK8fhdUjUHyQV9AcxEYrowCdOK/1mEBRa2Nh+IL6eWntfpAjKf1Fi5YY+mv3Ac4Zy/VzjBNej0rlKZAZvfjYYRej3E8qDBSX2/q/kCrtF1+dkndwagSbyobTR0ly1gIT+F4+AYh5xXNg75qqPAaCAs6riEk11Z60tIWKzmhzeBdh9nASnUTztNTy4bDTH+um2cVXVfWvRpmv/SI3i450EDQA0dAf20Gz5cJXqzPwXiGPZUt03p4dq1osuIwtAGl2KA/sb/l01jrdJ+6a3lViK7eTGybX8mAnWEGqSlyGLv9gplea9Rr+9cHp7FLwJjxknSrpRYquVwknErRp3qj19EGXf6EgNjQd8jiWsrD/OhaODBGdNe7U2eVIzPh8SvvhBfuE1mOhjGJA52T0+vt1XwD0D6ZYnvXHng/joDFsz1dfVXrtZyQBq+WzgmtXkEG2rbozxYsXekQ/Kc76IcxTZmFt0hLsmlZqf7Gm93s/IYxo3sqFPtRWs18KXXhUYejuAK+vTw5ho5vXzT7Db8XB5Phb6UawOgxnvLx6jfIjLBWcYRm7LSlp/DfOetndW2wywI5R7bliL/GYle5QXPCI3Yn9IgKRi5OsQXyOtbICkW0cfKPhYT/Z176fOpwdgJN/W3ErJugYE2EwKs1KE59319OMoW2tdJRil3WGDrLxz7rXGwTmBbPZeOzM/OqNJFEJpebuBfk3zkz8qykEXtjtyeIKPY43cCNNDDEefsRrVXo5EurBBzmNYnF4ZI1BoXLu+QdK8Rl5w02uB+a04vkacU6sUH7FGT9M2/jn/OvpVcZWh0561H5jPN3iV7XFVfwEV8r9hKaNXGZMOnXBe6+f7gr9MU7StCJccEqKeXBGXUKMUzwJLPXu87Qthr+CK5/OS2krlnERQsqIfjb/UAhPnxp4K+z9vvie//g0fzXlej7pkJMs5fZGChpuxe2ojoTASqEkS8JkBQhNABW3eMaFxDipqnQtx7VRkxmaRyrfY7RL7a/3XR6wGdPFcnb0fQYfJS/4vuRWdGDfgXb0Octf5B1iGHTZi94RJzx2fd0CaCsQnRmaKKI0zkjPibIUf/JaaJBuK++huiqeEQfnCUicsanxQN1iqmNMWNrwF+cox7s9PslF2IK/lZfpr2VVxoQJwd0DZOn0ja6OYR7I9unUhCkE3KCP0Yh6nfFucuK54fBuJpjXd3r9mVH/blhZuI7d5KJyS62kE3aso89daLU/z+NHitQCwgU8854fDpHipn07I35NZdc7kpEn/9Ed3UU52gTVOlm2Tnkv8e578Bckx6be5HbjxaEkH7VrJ9sCMROEh8Hjc58uFHXYDopy2RbUZua/1cNdbg7x2frzkc7rJFvUw+Ko2BE3uTbl93XTrt2VaE9X7/tCZqw8KaQhoP7omRWz8cCNILC+wDAKNUNtaOiqPrUuYGH4os0ms27MD9PxsxM/qdVZKK0iozPPm8sTNCsa8r/N0wfKft0A5gzk6rMuu+ovhqUA++gR3i2eakK238Kj3Kh+TQ5EYiA/eAnJirmjQfZJflRpKXIQ3pPjAXWn5wpPcpz2fzX76jPbGdZbeTYcn8enEa0tTZ/FICKxdgqvlW+dJ5+PtNyjKU/j2AZUaYP4nCknhKaMzWZP7mxfID7ettHwSmKh9epB7P7a+YWHwIaXrMeCQ/ie5+PE8Jck7nChoYS/mLDlYyS7PrsbuRmtCExUerWCkg9tfR6sVZwbeeGy2JqMXJba3X9N2w4kz6lPygsxn8PhM9GoxgmMbYA6XPGs1nZ9PZArSszfON33L5UUZdJ1D+evKykU9MBesdLbTzTJKQqZnQKz32+srP01J9BXXd22+I6Nq4HB62nENHJNZrEnXdeLwZBlnNAfJWeDq/Ez+dVBhMwhsyN+Jz1Y9E9fU9dPgXuyckf/wyZV/ZXvIIO91/XnGoxCrH//2sSah+dvaxQmAUBqtmbWh6XmZaPgcSw4hZ1F/iNNdHST3P7Znh2BBQS3mAK4l6wDOpqmHfu2uOFBdmjwUdSt23uNecyHHl0+fxgFD38Bfaw8GMXqSx8cuqgi6zl/NhBelcVrtKEM/VvEajin9yUOGSnXQYGy5fvjJ8tPsj3PJQPcqhp7ARfkXwe0zV4tgpHShjdViaWILW/7XU+bAz1jJeAYLmWKAbhSr/e/+xwjHZ8tkLslKFz3lVCld0O8a153QykFAPlkwYnQCT7XiXkQ6hiwjrhmIlIu7ZEjP8OqfOVfh44o9J7xVZzf3WxSZcaBYazl4C6nhC71AaliFSoSh7VzwvL4fIO60GdH+ZbotvPFzPnHemJRMVlT+4xCHLq0ZyZijIrDUOdfQKUhKPgHYTsB3pnT49pTaqa30gf6kdssAHX8SrRFmvhlSdkvFOyP14eCKighs6msq0mIfGY3hLqPRbGYNAStyuJGX0vJZ4eu6TSOqzTWGtZAJIa73XAJVArpxkuP8TyZP9oftZtuXe6oYuDar6fwpIeZTw7vutj/lXVVZe70qt72tG3lsHmAfNty0rZ9BC1YRBlyGBHN2jDc10INPtuKtO30k+YLAincf+tGMXa1GHhLkPHjrWxbWKGqyJ+GUWvYSTFkd0wQeo8bvH9UXG8eHKT15HleC3MdpaLC7mykS+vFkBsEck4X8ESODdJ8OG+zQ0/1lZRIsypI5HMdqNGursT5S7bpptjO8Qft4r+tNrje+oJ1JiuVOegd6JlZKBUAiwr2sg5pNtopAKRkqher2oqXPyEGDHxHjittqueXHFtDEa+/QHVk7GSaHOrJc9rRQG3M/1Dul3aXMWUbDtpQ4Ytn7xvVj2r6RaL/fGNYzd3RDVvMv/pQWgonc1pdf/H9YX8IughycXqUWRtGyxi+IojTPff4h1KVebF315Uf8nyZYidUM7WFfeAnv0Nbq2bYTl0qbb75EX71ehgBI4QqHmXjef0zuHyaPS+EMNolDBwHfVV2p8r78wmDRpjbaBZaNo+avaXLeDXPOsSjXOgQMwg/9mL8TTT6SK9SiCtjmEIWp9kZdGqyrebI3mfV/uoyH+FvJxIQs3SMaU8iFxtJzhCTpqMnGJK1/7Kz/NsyMOmO9LeI5PMqk9Kn7OjQ8xJdummBhASkrAzQUw9E7d9opuWLns3USrlPavc2ADxx5I8o2/ZrJFJdWt+vjtmkHuuw8XWpCAc5Rbl2w8LdFA7cZKU8aIKekYlacOhv5Jb7ujDobx5Oax9UwRyS8ciMKUkBGwgzo9gNabAJiAHv595swy3/Xs43wl03pi+xjHuvKnwyoBPeBS0/UPAZ3EEP+O86mO+/GKCmy4DdwA6NxhWDWIC/bgP9VSqPChyIlAYzzMV/0PaC4T+xOBV05f9EjA5KdUDUuudgKkdNuTmpglWzrUF+jb7/1C4vvljbClynfxYorstWV7kv9Nd6lUiKma6Pdha2COEGUdSz31vGxJDUAEsg+P3V2hnETPID6F/JL+J2lnl1xdfawfW6J4G3wERbllTB9Mccpu4fLpPXRl3lAWcQdyOr1Hkx0C3kCD6RfWN5wbzkjbJRAGRjRzT7x0ked4aflej9hdyKi/QO1w+lx7agYa4aia6+0OCGYpzW5L2QWFhExo9V8AfhofR4EfMzxhmOo5kFOhsQJkgYv7Y6YDZhpSTLD6TYgmaR4a2U5pcKqYeLGMW1o1VbepScMbGcaNPsfj5XIH9jyzGpU6EX+BBOnkf2Oh214WcNlsemy6fV4nUifvnmGJGxR43m19HxgX3LYjtLGtEr57tltbipQaR9ObEr/qFNcaxnmo8JKmdPk9ZQ6lyBV+KXp+9fb1Qe8wenbvwVZeCvv6IX1mWMQMdAY/McOGriLCcf3OXwV33emzzYfLmWKid+eOpjT8q4pc2TVBcA7/JQ4btBFNjyZUUCOVtcdte9omNIcB/3r5kHN7PHZk9zC7v7byccs96lYFvZ32KonsutIQa2Wk7yqp71yj8aR2xn9ddPzjxquJTiojXJz1/D8VLbj4MlqUYQRz1k/ETUU4dzl65/amUaoSJER5lPrcKjO6T6VrmR2SBB9BLRdc7n1H79spB2bOGbS+mgAbVzjsFcg7H9XKbn3vuw5fVe1UmyTq+axC9u+nzy8T3tuilDiGWBT3f9PeAm8aIxOQ5m/pqomVegz3lO5DF4KsWllrPBPfFplAjKQ68XOh2LShur6ztRBBnwRVx/bl4M3VkhhdbwBrX47ni+pLv9mdeHIWz3N0/qFXl0zVzI7Jt/k20TNHTSJP7k3XVKHWM0HxLcL6tYhn+pj/+kLcQZmYy04tA0hxT3fdrdKIjIcJ3XEbuO9hD80+XlKqAuOa4vISFC3dbaKB0k+KwsckbruXIbE5I0/Ur3v/UkIHDXyw8W9sc1dCrM8gZdWZ3hbI56UmKddlMauBAJcUiGmRBdtTYaow4eO/UymQ9nMH/9q5mS/G4Qszt+Kk2/+pc5AoHhGxL/En+joNmBzxzxkl9q0ZXG1IaMDg+13ta3DkMQTOt366hO/9PPPRJhU5Dh0lqsOHNwpahzisu/53UXjPkF2CNWbf2ybbk/S5lpJc91rdscEOMvxwa8QrakchfazqQESq0tz9Sa2o0XrdP77mQJizZa/k9VLMfjasXEAZ3iZZzGmrLskL8pI5s/2n19019ez17L/0vsVskuoVQbLDcz6Yr/s6vXizL5GApMQp77jjjispDtYppedL1EgQnY5tz03Kb4ioi0jmW8+bcow3Rbv9SWR7A5noxpu/+pmh2/BAQcZWJEuMEboPAQF0WGPS9N3Uvj431lJgsvrzpsVEO+JQhfwEFhbO31qsvftmEKX+QUoB4cOi6hngw4lnJWb+clbRnf3Ylfrfo3f5Ri9o+k/eqCVto4dcL9e9L6dyUZPC1XR8x+lq07oRhkokjsp23XrkuKVxH/te3vVbDgbKae21wzPonarBkFVjGQh/OpfzlZGhV9XKDaeuEwrCYPdic+DHnFbJhef6O+QaoJ84S/rjQ3YKtsaKULMKjMzmmfDdq+6ZdBpzYpioOObry8nZbRJQCBpLwhAS5lsyGfJXQ9ltOmYUcuBRfBJO9Bu/COeWG9cWvgdIRWPsbe4BI60wSV+Yb2Iw+0+3H/CZH2A/0MTwkdbYun33ru8Z2sRP7ys/Fco8S4d36uyzET1S1IgcJ83z8R7u79MFEwaIukETTdjVBGQd1mpOOLNdvX3b8ifNsHqlEyqzZEKXod5Sie1u8XZv/jLSyFuJKtJehN+y/OIiSMGmPNugmvZZSZkH/V6uiBUvlPMLcLzmKwSMS/gh7PQuunGSAruvRZkFVQZd040SQ3yz18dTreS5PC9e9Db88Ixgy834Sxndbd/0Yaa9DIDdVSrZ+xeUyoG83e+Jk5Foh/U8PDKZI2bQynrkYjuP9Is9bdZW1c17M96g/49ax1vykGNc4djXppWB9kzvIXd1g80VpH+cCjzgRg6j4LcdeK4qE0HoRlNfoTs0AZJp7fgFhNiIwSfrgaTrSpntZUCBCpxRw+gtSbcblAq8gJTIl82d/4Kc12/Bw9lnW4LcJZB7KMQC+yWMaKj059jaYHybLXg+hspFKDpC3LWDTBYfO/Zl7lWxeijrbKlhvFlx9H2If60t9VRN6bvbO5843yRxrm7lyGCyDvKLhqnNvB6/wAN5sAOT/3e50iVz4co3WMnGlrADkq+yIaME/7TH2VADNnXdk31qKBVje4jISc1YK6UZH1idQ7re75mg06z44kPUlxGanZwGwaBbT57UaA1UoWP+QxBy/eS/Mo4igYgAE/l5ggz8rHIQhXrS16EgrDHkNTOrhto72O7UZeQ0lbWjxKCnx22dv/g4/g4ZtYJJeQFfzLAjvR2aw57RgVCHOB2/fHXi5zf/z1Q5Zzfqe5L3SMjDBGvNoA2FUsOwYHOt2asHooFM7WF7FD3dG85YW/+vgy2jt1pJeXOp0pxynSDIUSO85NwD/0EtZJpFML91i9b8hTYBvnNsXtx6//Otzq/QVVnLLX+R5NldW/tRbJ9ywtByIbkeTHPVZmmuYc128lho7tOcJtxRYks7vJkvXv82kTq7MdYoni+3P124///wJe7qfQlZrrwUs5VqWFNgFwx/8R915bjyvLmeDT6FK9AMLyEt57gCBwB++9x9MPklVHR1KP1DNrZnXX3n/VTxAmkRnmC5MR2GnBXNCWH4sBDZd4LPmapP3HA6p1ExV2SS2jOmy0/ImDYI38/UKnvN5H/XF0YiXkADVzbG8VIjBDAkgd6qO+sPRdaCHwhBul0xR2i7PXq23etOey18Gd/a9Xt+mTne6932gaJp5vVRTvHCUogg1z8oGapf+QUVrG4nDJyYCJfDh9vidVBx++DdklguzWsb9XjdnFG8PrSVnnYcn1sOarpa0CNE8gCx3Nc2nVHV1cEotttFRxvL8DgXkQMtaZCFd/AMlCtdwD06TmOhfa2fkkSNYOZTFlWT1kv+WAvl8A4cm5zAldpRnzlNEp+6X1Dk1ZwLK0UtZiHUQKyTRRZ9eOliaBKmefe7kCjX2nc8PIa46g6XwJsmwZxi1VlFKMlkGj7q9kA32aaW5Bdxlv1cswHj2CZRW1QbLl7Tvtfe1JcaxHprGwHSaLPjIsEd2PqgSmvD/D06uxcoaQ6r4AIU4eEkT+DdFdg1Xc/KutwYWh0HrkRwR4Z5P9iWrHIHgT+xdXiv0+9morfqEnO4pHjS912QKkhZNwYv2tCT68dgTSNeWrF72oEg19fjX/UJzB1R1xSL/E5/6u5LNavkVe1MqIgor0pGhUTlzy4QFBdm17VkA95lS1zAvDiSFZR3nS+cymsNySKpJWLw5IaiFwx5duQ7GEOsCukO7fCgwwlBk0tTY6legsHxAj6j6zHHOiXxYk8HWF39wKYJX57gmWyxXxbdREW3UeUuyW5mFRoAD8Jy6ygd29x6gAejWsmhJ7kSnS+RbmmDTkzdplUkyamzwOaAKCjz5e5Ndod1cznTH3gS5p4e2tCKGW+IcBtvudNK3mLCNRX09frBMWROcNHDpSw5w0RNDc/pk8Rlg7qvMm2t1kYs4jY1cylvx2A02EF0wrX/Uz6dfVuxPVdEKOMtr0si5Ao7neYr8o5/q2iFwsZYUFJYF40/yiNKhPCBxoVBNilUNFO7TK2518ggqQgmnlvQ0CoWCCduRhZvjdeWhmih+IOzuGActcKeJ7p2UdEt11QKdEyBJzJLwrksFTgdKkVLYt5HORwtc6V3zz4Z01djnjeVtO5hVbUeuKKvMzXWQvGR9rGvGj1tjIxKZaSIV3lhikpYt22VlyK/kIsV5WyUuclKlgUtjqEcq0DZ4n2ANzySLJ8x5S7nLwceSF5nyFxzUBWmA+T/XUAv58yK9Lk+fRmjvR397Pc/Gr5y5VhAFlwoq9gDNGyI8aIVDzTPvbF5ocHHOwoA5ZdO9gPW1d82VxAwAk1m7m1geH/npo39CK7bo5BAZD8SJnKR+eLDz5bXseUCk8pulE4CzrUIXcrpGfRmjgS6IKelS7Yod8qwCG4GKQzMeJOVmj4NQNJ6zCnrV2x4g7jTKqxo+GIi/p4OpA2IJcvOvLfOGSILlhSppFW5+XCzT8vOwHUZ5KYmxJSBWkaaV/BDubGO/qAgFi+lxsCAhdqt/surr5X3WgFSA2Sr9AuXXAL9JLkOIFk5QOh5I5eaWGAEdNONN5ANyC4MVFObPvUQWpaEv9TFmxkDzPNfs3upFjJZLTrJ6BFzOIvNN7NGhjF/S0RKrPie5D/5aUeO0xkqQw4Sf35lC9MB+is6VPv83jTQbC9QK6+I2TqmCeIoIy2bs98dINxWGqvOFLuek7rY7d8A86hT0wAXlT7kF5iJBzVbNq/enJw2czDNMf2GZO6mXO5AE52AvjSDfdzkh81ktblB/cRGpJI+nr9idhwIG7c8I6992UApU37oT86oVrYavXxCNkpdhdaJYOmyMRrcxa37cTQ5z5KcyhyJKwr2Gzsej1Yu0oakjG4HsG+uV6Ufz6cRJbKkzhgwgPMmhO6czTbaeoe7mN4komyziDYnCiuzJCgQcNUtk0y32IvenALkrm/chS5BHyYk0qdHe+mTtwZVcz8sHbiZKMbzf0MBAAUUTkk8RMmYW/vrBH+04gBdaEUGAHoWgtyvmVA61Fjs9wPgLvDNzx7M2i6pqand27pP1pgV+J3r5GT2mCdAjMxleMibxUZgqNT2YgsJtWkBGQQD3y3TJOtZwPAvjUj948NyAMAZKhaNbr39qvQVpZEbpOmKFPA0sHOCfySqspSqcpM8jzVizKbA0dh2KtM5UpLafjO6xRALAyQMYzDuOiMDa2XvKWx39wE2gfU6gTtW3xx5yymBixxtSLeECwE1VRV2SX0O6Q2p2bS9TVRM5iM8vI0mynt5yOCKp5zYCz7RvRr6lzlZT7pZNomggaedJHbg7weT5zztDxI92pg4rFRTkr+/LLTrDcNaEHMMBSfy9HaKcdxmJvmGSLqmvKL01E8iDUZGJW744DkeBg+TpdWMDJSymgUDH0Hvq8RSIfn5eHJPE0jy9oBqL/2quCLYcW1kVk5uzjsRSh/lZMZFqxX9fvNh3VMwtATi9v9XHTdxM94fv22VxtF+U4j/EywY3qV3UWgJSiZ7a7t2iBrnA6bRu7O1M9++1ooR5rfV3E257jKNZa+yOW5KPgfO0VmM7CGl+tCzI8ubxSdSM7Y64XV9jWpwDrPcZJpqKHqne8zY8OIJwYwQ8GeFMXBF2CEvUj5XnNOYqY9x0/d1tCcsJRxkPkyaXdt0xh7HGnvZT4DAF00Y2pOP01EO2QjoKuAPGQhaKR6QrzzVctIiFuNLVtWuM1L5/mTaACdOlwSi11QcQqruvf4xN+hsIapUuxX3Hna1dm2HwIkWH5VgrOMA6/YxCaRJINeixRhhx26f3Q363XMXEdYxDljrQtg6JcVLoiYiL23/PmKlv7VKIYBKyMwLTO0H8k421xmGZn/GeF5fQR8T61+TlL3Wnp0BRGdCn/NTr1nECTreHr39FemTHNYfmtDpxWsxArjYPUC/WtIbNmxkCwkWziUV+dX3fgLCzGlQm4RvnYRHzWnRf1p0O/HtHjQDZ3JY00nwGUg3QgeFvDofhld9LJoasz8Q1yN8qRqA8x7/NNq0cyhUmtmmhXn+RoA0LfuJevCKejvLhBH1ABBF8GrhqMbXFEc1b4D6vzU9ydCI2fq7gVhgt/eeoGLpWFtQtKsE67cN9oG44mloaBdmn0YUxbVlxXJHHAaefqF9hI/8fIL94LliSD1FAYtcKe65/jurpR27Y2Hud8MKlbRggPZnD5V8fcvlM19Yua6peI9h68wboHH+9+KKZjGPlfKWS65optZ9KccViMIqdHjVABDrJVkK83Aquw/+6wdv8ynVL1l/znpWWY4dosCG/eumAGIvIO7HISk8/x8lnJpEh95I5Hy2lFNJ7Qfuj1imMCFJww6gdyt+SZxEqWQxFWx8Zxw6gOfKH8nQ+AD7bVxrRALee7vzlQF4THLgkaJ4un5TX4tcwpPwTItjMu59fGZYoyV8/Cwi40+tFBHvA6nMFOmgjAJyCkRheBUayRbsNewPIoFEdLogXpl+q5PSWpWMBAlFLsblY1wx0mv68O8zuv1jpFxLyZQJNfJwNM0GB5eNFOKMbG/aYJVGyHQ+sVRO7cskZsIRi+fRz8one1maR6wEsMPXxTuho60hJegfH50F7rYXaJqPYbKJH4S3lioA5RIG9LQ31dEcmrscE1ZfYWKXPznDEuyG2vYrW7WZPmwX7QrBM6eXG9Vcxe9aYAvhlphd3e/b6oXxQKlkQWxhoegtbHOK7oISBlK6YfEUNxw2aLVu/x3oDmjzyTOfvmS5dCYJ6VReB+kArPZorTkqSFRpZjq4Ps0IMXGuSaUX8uZ+DrjUTxqIpXVPkIuW4vL5SlqsiuEfxlA5PIgt+AJu7R+socob10rpFfZbbxY3agH7K+8ofCqYfb3yuMDCaXcZahY9o6VaomLiSp2jBFE7kDV1Mz2MBDJw2x8eU5eYkPc3kXYazE7gmbvnszX/uCtdO/qK5G6QahEyjsnsFpD1KCRRslXL55i60eZjnBAr86f+Cw77BEHrvfO7wRltEepLl5TKeByAzOP2iW8DSW0mVmLovfPtfCf0U6enGD1thysX7ncun7Iu1rKiKFa1cGzJXeH5wS3BRHaa58SLAlXJsZmVAvSbZTmJo2e1zIH1ArjDdsN+5ak0sZ7p4W/FrxtPxBJ/ywMM+DNXMrgDgB9VDdzBWduDF1wfwWnPVMWXGwEXWs/FKBhVeRKXONXLz6/NdtW+GvtwzNL/JOwk4tTDbQTwsYozmec4v0RZEjQGq8WyLNJA3QHRR4UKehdAwZFFzjMaqliEhJ0tM345vkCg8uHg4S6CHKms/HLql4XULxtecdCGcWX7ICpgTR6z0T1s+iHhprAZVE5qeA/7prrfcCHIylLVBSEAml28WfD/CJtawRvji7e+7+8GiVvYG36iD5dU2BR48jSNQsLxLUBQitHH+4b2DG9vcI6mRvs1LlqVK2GdvOyTUFiwuGqcx7+bUnObDEApij/o5984HX3chaF/1kCN64w68iGotJo9czL8L/pF91KCjtd3cZiXJMC6cJAHNopsAEvh1JDMzyLXydd+mRUlhz4q3IAQVGHwn1DKRSbZvNtd4jNnIka4K2pXStfIwU4LeRGy/hW2XulwLvK5RL/UJeEz9WIN8e2IZ9rZnHMjoUecq9HkvB/RsO51xcSnlTXOJIdT6TGMc8PiVALyD7NJ0bCjm2+7X+3LVld+TlMKMwIovMBXXJUZSAofOll14YWNefN7TOvZ6nedrUOidUN4gOzKJ0I294ZYreXfqA+D/zXCg0OzFThDAF/CUXITgL90GgD2bSd6/0sb7yHn0MTmWsUP3VgX3jd8BxvIDCpfvMaCT9AlCvDF7mXQP31GSZRnKiGZYJ8/PsPh68Tis0buVAAPFAMEKV0/y5r3QWBExaA1Uz7LuUaoZiAoWZflEVrJvwUWUECyxyYFlRTmhhNYEvv/VKvPp10bkgoBIGw4gBZt13bx/c780kEsHJNwWtcTOFZNjGjwhjtTdwEETyCPjCvGmZMOHS5x91KJSFINAN66SuRXui9SZEjjgvgbYzKHDXnHplL8vUsUd/3GY+dLpdl0LNTwag82xbhT6y3EGpVLYEj3+riD2/bcjSNetXXDOIXL5u0pegfIL7NWGSWN6vLL7R2NPcs6iWasjKx2rJQ0marpC8UJKsKMkFOPXYMp76luqK7CGSZUXE5Z6PW3EJo1Ll0TDu6NY8f6Ct70hExvQItBNUAuiq29YSPZolicoyFlUZDqs6YhpD+jaYEsE52P7RqerN/2mLTIssxSYpwVRUEYnmYCmDV3+T5qzE8ooMO1LGDOLFeE3p0eTJGlM+Xe+l3GHR+pULhWWIKTYCn71e/8p5ioRmHeXximAr/bb+tfa/5vVGbQU1pcDSC3Qu51/aPANHQpH/48mS0VJzRJQV2deL+wBBL9/JzKxBMI+Jzypo0rd+6zeQAbpMsEV4hRIw2XMfBMW4zvNax3j0cKEIE8zdXcSAbRsTIrf9soqDwOgbm1p7p7Yplvd5Qge/LsGL9mgQbj8wCeK508B38VzfVNW0nHsQS9RauotCORAsMdJ6PXBaiJ/Jq6hv4XOBX1tZN01El/M9zVDT1MsdoqSKVxKz8JA6ZK4WlbqjMBBGNzzLAyQCjCIVOZZ4nSH2Nr+AqZNz96O0lZXvpySwS2BRvIJbl3/ZGJ3GDbQGTSF26x7idh5lUZC2c6/zc2bY2GA0Lixgj836cArwQyffxkz29FGWQsHY6Ml9y4y6ofud1DHJbxUQyV/3s2MAU4eRaOnSO+Df1Rl4tEAAufY2oIxHnT7DXtwJsBbfCkskNqt8qhfacXi+EfjGhSSkb0IBo2TnRj4nU2hCdh4Ph5MQyrg4oAnX2z3WZLHCrJQ3xkOVkuH35T8m5ErB7vVBMKBxHun7SVS51n9daJxTCFiAOS3Ks05EQFNSDdjxJxpjTv7eQPMwsx+xtWketgBELSCqtGUrIJ0BewrxDIK0okbfpLBH+rstUllD6tyiJ+0K93b8rFbs6w+SQl1LXqO4hkKrebHDgWxvD37xMmr1h8ilgjl868DQUJ+tANQoykWhOxxoeHLlpwqIt9Z+E8sJx6wVUBmIxGC4Mn4Qg6HkwASSWYNZ6Zumh++PDmwk3sZe6KYWWhRJUtsTKsTsApDwatMSD8n2iNYKxaMWxInvvnskVtPwPsM2jIBfACILPKGm6vjWcSrE8QrDS+mergS8zNliHmkKEXdrfBv1N9M9HWl01UxVQbBcLIPxmG5VovgRZG9XTO8tJiMRpJbRRmM+ek8z132BRH/1d/ob4RfkUxyq2vbrw5xO60EyuYiBwA5iaafn6xLVbK5zoFanMnitChpn2o75jyG1vy8S3tL+R2YmzXDsjirXnVPDnkuLTLPckdLag4xRNk1g0/wQHKh2TeeD+9yC7MF8vxsCcF8tTfUM/LUkjr3fkT/KurHNOgxdWzq3Dt5AE06l3Ul5OS2kLj3iVcFyAOQvV1HfGNdT98H3xbEp0lDXGGdz9GkfApf0N8rbITDHKl1SkRCBuhUnvNF7t5HzykgSI4kG4f2ibD0UnxwEA0CEfl6AWUeBeBMzAl7w3sHff5MxLLmW+1B4jSAw4gidRYXjNws00xyZ7xa4aP6oWAtCKKqCX+FIvp2oxDaTkxvJl0+H8cUHMlEa2j7In5pT1XshU+hgu6lhjnQygqII4l1xoLTPx2qOUXvelVV/rTZZjio2bXes46L4poVV9ZVU4ifxK0iZO6sxOkZ7VGCK35ALiR/0FWeP9SGRSugIzwJZ2vHbcbwmnEAj6QRqtxBeR8PD0i8ONxk1j5RqTiVhgF0vtkIp7wo+V1kl3CyIasBdd0u9zm/bQu+usBuhS1FuRpNjtCbtxgqg3DY0j+TMDxhQVTiDSVVN47MY+4k/S8ro1qn5Yx5jqZxDpBc4669jYYDrPw/aGPISCZ165WAop+ld8yrsvHXYbGfyJC5qYI7zTSOYRTV5nCAxidav6etUtxa4wU2N+I6JHGqNw+PsTtrv6trPy/Fhd/Ix09FORNxI5iaDOgJ4zEIiTIxdJq2YD9Pw7IknfmsoJNkqC9mPVmQXAxR7SNGVilKcIT9PjpIdWZ0J34LUUb0MxQ8zeeMAC3qhuLF34gRVjTwd3nWxvOPxnsmXEp7oly8LDsEw+oF9noM4jUufurzTkxogpmJzxxS+q7cyrJ3FFsCwqaxJSBKpFXRLHm7I6mLoogbFcTg7HA0uxYIMLV+zIrtnN2Ia07D7JrTEC007fbv83DulkiNKPEPmfaCc3z5UwGOix161ipSDQZtXAodtGzcb68HC+WtKPm9kf/pn9lhOJRp4xfxtx29xBNEnvvffdoDFQFOx9D5O8tigMZGPwIHJE4Ztf1FP9D7wGGom3tySA8CBxlaajMw4qSODQoezqOof2Zc5xXNam2O/iae3tIPvKpeyVZoAjwGvlx+qhGrNPWKDpIhyk6Si46MF6+GUjwtVGwFS0OabZPk2+0iM1AWcVMpMLvjS91UW9Ce7URuj1MA2zkp17xIPeHKuXR41aHKWoosaiZEgk9a6cZegrGMuXT8qgbb5qI3sVtb7PQ5d0DA5n6PePOELwFQ6wi6W/aqHAyT+86vPaqFFIBw2nDHSo3WRXUbt3VJty3EhStPYRHgTHb3h21prJQWFSd8otoHL4XPryQcvm+IBRGn2zh3hzLdTWn95x8eKLE25sBuFlyb2GjRl+0I1kJ7zvCi683oYbnkb7JKfNi3FRoGiLHq8oBOACzln7aWUya1Dz3tqA83Qbwea5PyPJ287gITjtp5nnNU+zU0TXzdvWxPTj7agftVmr3egctmSYNsjEjx2pRs1gL+aFaZzALllzYWYMT8mZT16rtNZaAWKrQgKO3pyhE7CyqJi0jj0A0i85tATZS2cjY++H2tM2GRYpVcaf3a0ku6snwOfbQUrYyeueuPguTcqkvscXgj6um54M0V06UfO3cbWXXfJMbql/L5XwhtI6LGwPgPYYFyH652r5UsLcIjeWR2vRsi3VFmIoiXlOh6YpOtMluqDyHF8L0fHnU64xhx0JdAOJ8UcW4nWualBdl/ApgiAg9IsyeUiLruoybCgpJPK0U6KgC71y87yD9161LQg++FOM0dJhuTcbr+qQa/RC512+H7OkNgtcjwXI9SnBFGnQ5RfK+Ufg0ruIIKQNjQZhe3brci94GGhNxy5y6pc7TLAjhuJbKbAtK8803zRWwv08EtL7x7REiCfXklFLc5pkGyzsVUFfGlIDJfIN5VZV6Joz3Sg6oHN3LctekPsGad33oVocMawWeL+/VwhnGvHmKhHCrJfRX3AGBSomtWkTGTmcb+5OwT7oGZaBOymEB6OPrRP4W1tiNiRn8Zmo7406VkeRZ8GeRTM0UU6JfxcCuz+bqiGu0lzdUGAslutgoJpixVuiCCswNweTCUP35tdPuWAlwFFw8TWr5j8aND91VUh4b2YderdqYhpXJOq/tMC5RGN9Kh+1iX2JaDQfMI6sPdAYr/uoDccLIkoDeOyMOfOWqjK3Fol5aYoVc0s0NNGJel56FhmkUSNkaLgvpKzJlk746IYzeICThx/bixg4juo6QzIo8KMBAF3H0bF4+Vi/SVyaNaHKwbkiJQ857SsPY2IlsCOFJ7TfHVAeMiXo/sEfKvrvqMF0atDv9dOn5nfZkxC1sqi5gIzQnCmyCJ3xgvBfT6TM/avdKdNKxN/zbBCzyWECpN5lJpIg/VN1aEuormaRLsQrv2qgG2AcWDr0QlvBHOeUnZf0G5rEvxhQYFLSdnZJur7BN7yX8u6IRMfXL5aGMLrfjGyVsRzIIuVVcjHbB77r3IuEUhH6AryR1h8hgCPO2oyzXTQjLfxzooKuwqc60xoowhd3arm7PX3Vw+pJx5EwJdcUzVJXf0qJkg5RWzFfWjJ88UjUQGiuS4yw0nAwuyAGRl9C+rxGKlSZOFih62mZHoSpVukbi+/FHqc2MVkFqSTm3SK9WjFqU/IlA4CvNNuy6eb4O6vc2W74u01vbEVMjZNn2K9RlweEWlyVDTmapbzWniJJpgJ4ICA3Huew5cIYfxWfinA+AwMVMU3sT77DuoFVOA0pHcYCWw04XXctxw6WZblvRbkTiHXazofmA1/dKWcQZDcvZJMDLq6n/Wx+9XwV4CPMPWKSkaw69dmlV6mvo6HsLAUvY/M2bO144Maicko8YEMtou7d3jmiForSnn7VUzuflLtcYEHJHjFr8gLqUVfVRSH7esYFN99ZjfqhsuSHjOr0kD1z/u1Zj8PvON6DLOQ1M4GUM0seH2PIXr7iYvsWD9+shjHW4x0enJrpj5kgijtqys78joz8bsyg567yqbXehJA9vClOCqq3I7OXldONdtU/dLkbA7sMaIBBYiqtWQcWU8yNDEJyMe853e6J6fcineMS5s16XWGWjbcD5rgRJGqwE0MQ/Cv4Wae5nE9RVLtv89EcWdujmsl3FV7to5f7xMjdbJRLsLGjeAWWr2p+b5Ytx+kfiEHY4vHC35jlOsZowk8NkX9dWKXbQd1bPnxPnCuvl6j2X2P/FaTAo1BulhWHGqUCYWfRVb6S/xv8ZcV9+hBi+RNonUDXiNP7gsOC08gZOKM8sCT+y1V+vpW8kPHB0Q7L3RwzuWXpU6C+LOs0Qo3M0cS6letIFCG02Ui1MPP05tWl2a0iBpFyMxF34MKSRF+f7+u+ScrgQbAVkg0AHJzcme1JKjDRcJ/qSnaSPF0vr6M1WO0m3NGUqyBI2JNrJnkOJs+fyFk+mrKXxR3Y91TjUaKVviROTwmbnrQoaPgMFnP38cv1XDTbPmqu6VJxzc+1d6mztMVm19kRm3KQkpkS7/7e+lb8o29y6rr6nixk/qz/bptEgIezc/6nr0qP+ySUwAdQq0cjHuP7P6LJjAuCilGQY5fjzF64xcKc9eEfcbbdYnelQFey9YhxOr0PWrM+9zlTLrCkQma1LvoFTav0zrsynDQFlmBbMrfIpx/CcGb1sX+wXrAHUDpLy+jShsbjGGFCl0r6OdmzBDRspOyQqq3s6a7SW4agzZlCRAx7PqOov6AMbDbkveSFRhi5PsRTR/fDfq1aNHTtc/slXe2MQteSW4kUaWqODGX2B9vpCi4pXnAH6yvqrcLAYWdS4z7chVa1vfDoLBmKRia+ZRknpfs+j32Eg7XO4Zc3b8VCzZed7bOxmyJ3XoFqt3A9n2D+tgwm0l9YtRC+r5/neSn90NZkigBWK7DWyfE05fU19rNbGCXYlWwqumrRGPUn1LLIuzNEl7oG968ZUwW0YLmsptmrc2JqITyk4jEMgI+D7o/GlUP8Jg9ETMTTzeMW9dFqYWSRbX/dWjABDM9ft0H5PsrUdq5qnNR8AZKLJ+Bll4WsyzbC6fKzJ2ibg/tTe8pZuIlNMUUMF4fsLnQyFi9vOT2FkljJKgiFWQc1r3NuhIcUWmZSimMUdGMeB2u8Vhy5gT3FwVfFKuxF07MAnn0u61OAndRaV//OoeylLabs3/QPpjL4TG6S5oEhgVEOveACB6mGFWvEGUXAsdGLQGzaJgPb3ct4fkP8Li5kguvyuelg1dE/IYuOyrzviRiMLKU9G9uKK6r6CVmJkOx4dt+mqVTdhnbxfZz0+Ph0rHXbU+cr6NZ9Su8OHxpr3xTmUVznwF44pjo5NCZlzSOfWb8psxHocU59oBhB7YbFgLsMVGvLMNsUVb0fLqPqPglo2ogbEdAL9MYlw92mCwoCoVHN5fKXC80MX9LlM1HLHizBymkAcXmm0uFdd+DBzsiiD1tJqxTaSukhd7V6svDfyEWTLmM+cH8Mp3U3+DoPAxfaPdX5yoT70p09hnxfzV5HZm+zekXOvwyNZNktWoJcWU9q0NZlBRsuwgiMbzJsAr+cWjygYIUYZsG/M1vuQWOABvZZVWEgvYUTurLoeSvqaaTAQ3ayMGECfkWLG/blGH+oQHQHIRvC6ukICa5Kc2U1RXQq7LxHv2CUpLVFmssGH7LZSnIGlfdWbitll+rRFa2BEc7M5JNtO6xU8z0jjJqka0ioi7u4cuUbFzzmYD9IQLWXB4kmJcCKGWOVS9KrKOcoM+HtJmgDTfNuN7qNW44eEF0f4ydhpJKtwuYZ5DGp/1OWRZDjw3N3brWQCff4Wxt7CGeWsPK1273E1HMLsiMs1Wxi4oXTYN58y7X4+Y6cRk/l2ndmlkaGl0FSHElJOH3+bUUJHbDTL4oPKKXNjhaN+CFcX5emZMsnXJlcwsMKlIwH79vjtYZO/7I1jUwRnaLBekbMtwWc0cdO6U0pmgNht+XcrioOE5Z/cd8lREbv9YaFLlxpEeOVM8rcN1N0tcCYBZ63YXN9BcsAQhH18/J5llree59UGUyz2V3t4JnsSm/MfoW4yMllnJkHwOiiCauy1YTipSG4oqwGxh9uomwmRRAIYozpF/6DvJsfbfD59Dqb0lm0eV1mSuNgpjcBn+ay/eTyvxj8oMZFJeHT5WxJR8bMwVc2lBBGY+xhZBf5swQxqWl1poHvoxsKfzKV4dlcArLjnSt+IOPBiqqIx2mAojiVB3p7+Wdkyup66qfstwf2qCkdwhbOUPl79VSg3u9IFdlLT+hgnwF2KJ7bNT068HuLBdqk9Db8KWKV+5Bix9Sykgk7GQH3RX2V/8hMsE/t1Xm2IdD5UDW8ff6SVWcxVOfYjyRrw5gWxEHIZzLYrme8mupZzwAqbG5uDqpD9ORIL6yA2yAL18o8ctfL90OV+efg8bWlYjY4g4v9JyAVJrBpNVdcow8gNgguQP6yoX1VTINeY/EcscEQqDxOOXqtTxAZYHfOEJJx4BKndDmJt41Z7d+7lcCuSzFG8QKQ5uPr7WEhQHyNl5E2ToldVBNttmFwLomNvr4oDmOzm/F8JH1LENXPMmDUfplP77T7Zj3xOiH+yOnFNUVD7YJli7ApvYGMpsNG4/udjt81azbUWBjxzwoKN/+ClMLxY61UIDMRtPYj0bn+q0c4++IO6bw8ZahrqmJ4OnrpbrLQqvYPTN7nR23QMpbR49ni1O1nrRKgWSa7h/S3quNFmeflrzXw8j5PHAeoKitSn+nKxdUMu27RtxjvrSLuiPsBVcqHEvRNqW5AFgowgYvlKJcv4Q69/R0pbYmQ0t06IMvLoQ3O/f+DkiN+X/3+3fhYw3ITMCGyyGVTkU9NsGdLSfjf/ivVUK91cotHvnRb+0FRmZxQ1E1Ng7A5826Fd+ZIkdEJ0jvcYEHvl3aCprfcPhEVvJEGB+RiFEP4iDha4m+NVI8BujihOZPuxkHNJOx+ayb8oHIQjbk0wkpTmbQ7voWX/b2x9P1vOCLRnZGKLjtazHbw2+nwitr1E25mfuU0xhaCsMItn9ppHLhAfnDM3XZLhHhqpQ6+gp7K/RaWo+x9YbqHGzy2Sg8JQns+2mDQqCI6p0UAl6KTkdHsmIELcmdDWPAd5inYzaGN/Ulm6BvYR9EfXJ/hnwdoCGcbggBOGPCLLu5LowfoxrYqaPOe4qfSI/Y2NLu+qlzqLZ/gUTinNFGLK58YMch97gw5FLTXfawpaj9Z7f9gnLfhsTwtYHGnpAfHWP4Jpq6Cg8Siu/xVMAUXsHWtlkTES0Os2yy6vmvpfgJmFK3ax6gbU0m6LxS08wxLOTR6CaLOV67qcokGskOSoTykSM9htn+nKep7IN9VJngamNQO1bzLXNeSeYTEqZZkoqkvOsVk3xUcFdD0t5fPTDweOZ4xneZz477WPw+YDzbsvB9aTJWxB//Bq4cx8/RXda+kub8mhJhr/nOul6FHFRuC/+9vweEg1bjLe4fDB1DcxX3V8+PblX4zXaFuvIrGf/YwC9BjfwkcAcEg8tBkLAHEwHILKJnlV8XDTKtt1l+odR33UeMIbSV4R592Xw/ICzJ2uFn16/PKX0EbLe0T218oIsVyoIJ2QPaATJp+jxEc5JKdQZWpQfuNJpPR43QbMBncghqjrATmGO2w8ebPTJG2xedBbgvBrSFr99t4CPPphRZwn1URfgJzsF77+yjUSH93rIugQqtmN/hXb/enG2bKYT26/yBhDj4xXQ0NOTVRcfuwMnTPS+nwRRJ3VFGt8f2bYDq12fnKRb9piqHwki6qdGWrXL2kF1pAEiJJJvIJK0VGjvaIYae3MSvTFf6QO0E8VjSpA8uH5zfxkWPIihRkXZcJEuEHoOhuueRvPvuNmaar+wOZfeatM8jwN8M87lCctwI92ImFE8o0dJlt5/2MAQsQR/fEK4ln8c0S0aEJEVOhh2FX5IXAWTD+clj7y5d6ev92XnleZDqoO7kLyAGS+sNZQTF4LP1JwIADRS/pb1x6DCPLYYwSIzPfHeDjN6E6i2q3mEs9H69fRdeiEBjM1OL7ReavqjgHc5dqiJqUkvvoWXSILGZ4Dg/JksKhwqw/1T7BqpjrEEDqWhaZoeWNwgZsJfEIyByXqfIDBa1KUazfE/ad8ffg5VJcuQMknbQVyu/a8jhM/V7pkrcfTO0hPQFaBBr4y1UY+ez31djCRNg1wpjdo53hrNf8bhrBOy9HGRMZxfP2QQMke94MzNnkVVaP5GCE2V42cLVWy4rP1AelTbT3tue6I2uHk/tbonvfoCcAWGYclgngtaNK+VNt1VAfEnpgVkvUqUPtfeMUgbzKqM+XRRQ1RJV3UEB7dw7o3MYJ9dvGJEGibddCYnQzMwGuN4v/0hSsvyqcs0ihA+B8PkZrK9RUz7Xp8DI0dCoz4ZEJcBB5hnCzSWbBBos6Hkh4nEa/pkDaTbpu5yMpM53S9LffVSmFvdMTm7qcQuzuxRiU8dA1EPuZU+8xCTbKOABb+D7mt9zRNzuKlBeABg9x1HfSh4axh4cszr8wRK+dPJ7LAtStggscJutQFt797UK39EaH6t/82nJWa/XQl+53w5qT0ZnejuGipESirXB0FXuckoKriasXBjbu2+P6NOM8JjjnpG+gkDYDMTs0eMBldRG952liJF+7jSVnsNbQNFSfqbJ6GlIBmoISKE2kbr12JH5UymjjSLS0ngiyKGFUAUw6PDp9bh8W8z2kIMOhFNQtX7GIkOtfc/5Q3H1cYtMNalm5wLnJayW5Sw5vzZhlyisas6hzGCivvbbayNFpGbvQnZcope0obc735hgljFwQR5OjORWfm1wQVkhRUkwSGqJ09O5z8xD6nJWNbSWaDyI481fPp7GhbmoFVDKGbootJoAYhEVpo56TobIUJ1vv21j5h7c2xvO7PqDKdDwxYznDQshvaG2gwNn7SiM33P5aqutwWKrKUfukWP77RSXEmAB8ESGAm4BFNTCm8Nt0HkaY30AFxLgSsiIdk8CEegO1+hHV5jvX//0GJtVQ1mC6sjG7MvWU3W8Z7V35rXamnBgWce6WapoVevzinGXz48HdYO3Z6Qa/qwIkvwaEtq4nqBvZQTFmqta+YxXC5QoiBHzPUlGH5RI+fi2bIWXqMeykbpozoDHHyaQ7I2XHDUykGWNNM9BLNHLLNPtnLdNfr/GVIFbkmVFVMdG6nddsTHCgBmIKxhielMXUW/aqqpG56DxMVWqKstYsEm04mnmweandooUEQO7txrEeIuSiBwgx2YYRSn87t5o5FbcDP865vt2XzOT2owoUTRz23u/p9WxKF9gfVe/wrBbrCxG7GMcx3nho+nOUSVV0QiiN7Bo2fpL7JBSccJjaynXGfG7yFvxw/d0+sW/Gm94IscBS1ToerYQEI5OH2v7cN8wxlU0MD273IsFkPPUBmI+lC1exnpUmBLLMCoRyaMLaYlDTp/mmM+RlE94xrThd2X7cJD5Pk5gQRTdFLtLIzAXrSTGu39L4zdRgV+Aht9I9H5MeKbhhEd7Er8KvxCMBeFr0CLgX14CQZrW7hkPL2CzqYbmfjtrJTww463+sFEkWyNFS9RkQNOLeHQ69gYWZPTbvm+E76aQrcE5tYPJySFqvTvjj0bYG+bPzEhKj69+KsAwsELdPUmAnPrwTlBbNA32kvLHxuPWYy73h6W4AE/AjVOCvBraybm8ZutmTR8pr37Z5HWoSjILPvMAFLBdjFKvISP8QwHGpfSYhiKk6vMDd36tc+gE+rpL/IzflqTyNOh3DHVppCXi4TMuyL1Xkz9zI5eEdKhrH25aaMrb3O294e7sq4motiAd8o6P7aIdbGKdl2DY/P7uHdphWIo6f+XkeKAb47t9lDcN3fc3PY6O17NOOijtQzL7Y+8G0ZepysW00+gH8JQ/82oE6Qi8IUtUHSvoVk2zLZ/O28a/auqZp8rtpHkwxq6zvD5UlJi0GfeHU9GWAfZ4ZTqCdc6ijNWbv1kerygfCnLtFbc0l80Svv1Qe3VqQBXg2dp/zYalnqVhmE5icDVe9w8N3N3zt3vjGC1ZRa9wF6PUpnXsp8s0Y4heg0NuxHtFfNQpK7AytDuIZ5y9WfWRNxrBTRuOXR2NVgxA7vAsbPTLIsPKRl3msSHibkObwCqAs+SkXmOGIBronUtPddcJS0k7UDZQw6wDOR94dNR2tLN3VHi2GQ7fCWmT0k8MB2U0gupoNAmrD3n8Csld9yd936VgccP1vPkGCG3D2X3bGkjKvHl+FWSNl/UqCYBGOLEp9HHNblSca0IY/bHbauFF2bzO2WzImlOEdbeooNan9EGQy5G7ESTF7WbU49n5kxg0J051sMxmOGYNElG/qn/fz0ongcrKXCgJQYmzKyBvdaCjsBnHWNShEugdvMPmbKg5KSmoR0LQi1/HxLDA04N5TmKBkI8nSknqcyEVSkoQvymIhWqn9dai6sQJH/HvSbmUL6PcI+Upzstoj3ifbyM2m/PsHwzqppOq65QEaZzcsXsRdrgYWx9orvQoOkNi/RV/XofYhprityq2PfLuhouVKLxneFFAXAg9iZ2sn3G0FIPmN09sn5udy6TRugcZGXEZqmZaJPEOiT9eZwRpI8dfC85oNjejfNWxqXbM1wg6HPGQj8V4Qk0hgDVbsL8rdwzbDWf5a313+2MoI9KxhX1Z1rPUgDPCisigL/4GgqokhIQmvivzeQwtSi98SLhoystJtu923fUXiWvurfGA478bze9waHHXO5bzuxdViVebvfTtobwjQUkONQTfib6NLwx6wZh9CghPVy6V+bZW8g0VaFa+iI/oOxRXnvWH9prZI9aRaEd12b1LYQ4qRKX5o41JzNGR10hMJUoKSec7BNR0h5O/giah4wUBmnmNvrVD/qpeJBByNfuZm8ryf+OTihBEKBkT7FLlXQcadkwIJw4zps41ksN1U4aMRVfjUa8SwqJtt8atKQj9oIaUKvIkUhnmAIhizRGozfoslsmCnBHRfJOBRHBSURyU5Tv5r86rtea1pr4IRzfhBeeHuZmawjAG9NEYhqdaXowNTFfjQbKAmGVO13k8TF4XKJwosEn6oc819IB7/jMw0a5Tmk4bRcn95IryQLDnGXN9cw3y1WtVgG/TgG/GCL1bkhPsWBM1gCXB8wxTCFRcF/SCzuvbGpEdWYuQqSi7f5Wi/KK49tbhBLAhbSp7qKSGgbvzNDM/TuY81dte6y/m72kvZ4k/mg6HpxIG47h+jhpZSuF6lz0biGlDe+hbk9EjKiHpxSCBgXQHyEeEB+dIFRqhdgxN45fNebXgFuwiKDbsSlJaqvaLnb4ctjnYHOSjWv3hOy36xQmfV329dN+P8kloSVfDUMJLoqmvZl6WYZbR21tUlS7NEGyTf32FSFHIj7J50MLt/bqhzrt59Jl8ezRh2OwvvTLXMJwr77zHXogrVcd8hE30saV3Nem5bxX00qY0ERsA9vPwiiMY88GGn4ClRS76CEi6YPy4NKA4+mMbSfK0BQ23tJ4mFV/rJAtfuURpJI7QdTEsVDaLCwrWQjsG/UDYRd0Ld+rlUvZSmineMkYud3iI4Je81BlqhjaNNPCdl3hO8SIkGy+LEtkCdF5kKxQOWCYViBWwzYBWr3xWqwA/FaNwzWFxlk1xAOU9fMlI1E/+0NzzA/TZ33O1n2h8fgDnFdXv59E71O9aQGW/a8Ey/L2WHqg/1z44kP5d+4gk5++1gPbt37U3SpAMRbH/8o8aXe+uxb1WpyaJAn+4lncbZ7M6hvkX5FnbZ72g8QGw/fovIJUC7AQHh/ZsXrPz3x1CuH9BmO4UsqHLVtCQEPr77b8iJPTnmuvvAfSF/zlwVOla/r0Hhv4PDPtzuMyqovzH41Diz8Fo+XOg+LcH/Eql/B4LnJMnk7XtP0bx+/0FVemfaygN+dc+/NeuLHb3PVyBOwnqv4Lrf28StY/G/J3358CyPqLyz4F52Po0A3eBnskY5rUciqGPWnUYxucg/Byss3W9nOoGV0TbOjyHyrVr/367rPPQZP7f93xmic7Oav3+/Rb8HoCb/w/s7yf2/Pus34frHx/SInP+juqfg+D+eZROtnn/jfN32/6Zn+8/7ws+Bv/+u38+5ffpH4/5Lxd7GbY5yf67ufy7cms0F9n6/+BE8Eb/LfHMWRut1Z79h3H839HA30vNoXoG/W9Eh0D/meZg7D/e489Q/172T1Ki5hl0Nfy300ZwwvJfPwj9zw8icOTf3+9/fQH6lz3+qwter//+AmAF/Ab9T1b4t3n8/8Ad6P8Z7vgn6RL/gXah/wXt/r9lkb9cCP0HLiT+ezb838Ef0P8m/iCg/4E+pPhvf7D/TMX/6Zb/Bbv8TzeGif/MDxj0H+/0Z7L+pzv9/0a42P+acJfHiAK/Vl0Eppn+/UstY5asf5c6+seHvDoB0dBA31XJQ9tRnLXmsFRrNfTP9/GwrkP3706g2qoAX6yAB/5SODO0w/x7NIIg73ee/0+0jz9H8qpt/3FmP/RgYPnQr3+5Byb+MdDnQxqt0b8g1J+PL37si395MdWHNuwDUoRiAIpcd7yS84rnN/BDiT1DBc+/rAFln/TR85HStJz1sdHXdr9p7Gx92OtrYMsj5iZ019zNG77e9w4xjw1h2YfPCa6o4XRhpXk1o0Y1dJQhdDm3lytnp441DJTm8GJB2prkijv9K5hH9CJPIDhcYDAK9YT+7mfENObPhhDTa5snLMHJrc/0D0zUcN8j39cr2wi4xehi4QuNLrRzHFjuoISCfD7LBQWO0YNJB5RKBSz4P2IZi2EPnzoE6uio55//o+dK96jxDcdqsXJM+Y546+vFejEa6bzQRzpZw65SisjwkQGsL5qypgtSfXEB1DLWMcs6MJJBZCRp7y/oYsQjX9lDaFFmKvrQ2iB6/mImlOm/fIwG5K5g+5gukUqwTRccnzrhKpSlfX4gkXs9D5bV2aJctE73K8xU0+Qzc8UrK/CQUee3zISPfbc2ztIGcmBtLav4XmAX3ciIPSlM89W/P8xI9EeekLRn4NzOPNgdOKx7OGQyKSzf7XiSGP5Nr5CUM0yKcuuTjB0xdn2mEHWt3ncGrzHrYv+YMZrp5WEgTD1NkTxxhVvmEZWQAhguVJLeu/lVfGY0XxVI5YpRChwKbOXcIQr/SIqzfBTh1cK89ZlXl1GpkWlNtPYZF8ohEA4t5l8ISMXEAPHe980Vy0gFjcyymmHpwP77bueo/JJi24Vo54ys8PeqRF37StiC5LhC44PUBYmOvfhqlQRh943M3uM3khzRZ65PlCsFfhGhFHynX0Pzzxxm+GNkQ+vEyvkq+ZwjGVQjDwdoy0J/oIEuHbXanMOmGZar+Hh8Z2zGBZ8XXcIGzGT7SFjt2b0S8x8jePf1C189fCCI24QnP/FsWnzmdi264rhZvVMjzfxlrT5a6fxswIb/Eu6vrI/NgJAG+m6oqOFFKrHfBi+BWP/4glSsvVheAJ1N7r7voDELxr5rNLrUKLrTZfJ7/9rJ5EpnfnSo1MVWszbFc+K1w2E+exPm3NPSa8u0IOUdbpRc69AKsqLLbcVoj3lPtGscvGkaCLF9YcKB+yoak0+hUYCnA859k0kUXTOoLnBZxX4izIbZSgCtZdczKYFUeZ0/wkfzkc4oqkDwNOpi14ZU3IqrS7bopRC7e/1yayL2zaRzgpoJqIcTE1KR0ezoFFCciOdHZn/3CKK5ix+9I2E3FtzOtmz5nlLh/l80Xdeyo0gM/SVyeCRnAybzBphocoavX9p3tmq3tvbOHRu6paNz1FLLNqSXxiTxrsSoyQhb/o1DnP9l4Hr+zkCSCOva83F+5vFrbdPiGMH6b7+mBeXClDhUlFKNkrnBY/7F01XmYuIo7ZppR0nmXgzkcUPAUasnqgUHKRmopZYDQsnOg/3tsS4FWXh8aQo+dvejQmgNU7qcrhLULBYbY+AWINbueMYQUAINrY/ihALYNj676fFVg9OORcD0WXze3GWYaILJkSfSIhWaxkTR0k/ialWYCdO7L8W3UYpBS9rJVClJEfW6uPHirV9hI1n2+rl4RfKl6ZJzpd9aTofTV+hrp4lfhgESqHRsO6feScf83gi/Pt8YdO/UiypWQ6tH/c/jb5bY8mrmFJyrnZVa//ZcjKJ7OIgUNf0/2Ylq1EnAZFyvoov5CCLM/S3wL/Z76NHe1fmCo8+zFyUBAhyN8+A2/6W3f2/ZSIH2pUzxdVg5f22iT9dI6q7mAooTLGekzvhWy2BgvMyycJMg7B7poN+tOB8vKM5XKf3QevnYGNr8nod1xia4j6ndayOor/xjj1gCk7wTU0fosyW66F5VZkkprwJWchRwedarTi4IBMVQGcU8HbmZCpF+heE+faDfFK4Nj+7XFF2nP7i/UVZWB5J/mLOUcdks6bdh2ioIzxyUXYuVzPTHUG3a910eX6a85d1fGyxX1uKL3mn3lnfY0wb/7CQmeR5dAHWH56CAZBNut0uSVcZnEanCYNA6B+cfPKs3f1iCBUaww3LG5fzGpIdXhtSJSUbzzRXlEvs0Z6IyFfYlEVAzTlDo9zwpY2KeLcVR93se4E9lDZL3H8bC9PLB4AL9UFVs8tK7pOzzqzfK6+X4zTU4caI8oShXMxv7Lov327ik2VZ1ZwpeZnRMtwUEvmGouBBjICH90CVvYcSrXTRja6YkMkH3Tb/belkc3wpdKCeuT9RhIzY6BWBFZDaPLqaDywBFhqeQX1I3DEbkxDAFvMBZ6sbbRXH3N3utBbUEILG1e9AMawkAw+YKXyAiBeYGMt2kr01vA/zVMmQ+1I3kt8grkpe4jBBRB25poesjzIrXfsTsROJpdd2iC+TEN8NG2gGfADddZ+pTRsWETLI3sO2VElgMTkbUedOgLR5VuWGNbnFTD9HWKVkaac/p0K9KsSWzsNENKqbE2ZtG8+AgPLWqKCN9kTdKU3REAxyX3AJSx8VvzH1qgfsTcs8baef5lEZ9OAr4FPf3KeTzKcEBQI1beCKPu9KeeOk7H69DrWQeY7kSgR7s5XPG5BJvG9Tx8w6K+jHqkjPa5MYKKHp+ZD22KUWf89OXD/P+HEIEv33CM27Zw0B9F3FZlFa0KVHYb0GoowvRx/xGGBFS6uV3I6YqmeeR4v75fYFFYEBBTOZIXlH3zgyOue/j/Eb97xLOTtMhMUNUaVmZ3gneD5ZO2LW2i87hVssf4kfbwJGExFVHH4IqEDryNlXJUAVH16RnissRTKW2EEESDl7K2hAk3E/5sX0G26omhLAduTxhW41EwcsHEQ59FqokASXqyVIncgMMwu1D5f6G+fnwjDtErl97BL9UoEFBbFUK/lvpASQxXbghaEsW9z2NB/u+9k1RNypZiwGFnfc+f9eY74NMvDkjPCzKtvcjsNDv7/k84FYPEIYPym6cRhh4w39zly7h1AplXhE/dCgLDe/5W2NL2EMXarBOj8/wxusula8McqY/65O1rf6DNCxpZeFjcCUmhIalWYivTis4MT1HEHffFcLs8Evr/Ze2PwSAwR4ck4xTtGhL00KCsW4uq+onPvRIiO/3s6uJ0C+p0K0f0fohlrhGrla78QHYL9sobzDRSoGmMT54CiW5TIsfrqzGS/nWGXKIMA5RnPR4H/bVw+x1aIBxnP0BEIykOIq/mPYi0MX+vkWZrWYWwZxOJMP4di2i4NmdMcePo0deZP7iJy/2Z0HzztRujER/5s40HK+HESLzCgaUV4kG70V6+HKcgYkT3Xy1ZQSngvo83ji6RNUK02WypZWK9cm9EgibKJ7iAbCKKNE2POiYY2P7hWltWdANLBaAWDbYPf+udh+pSlWOD8T7zQBd7h61nOuceOmfpUR5R3O+/3/HHQHBH6Vkiqd5UJkGrlGN+OHwQLx0O9lRNIGRN8snUri/lZbu+RKF70NW+JBzBClg7Z9HZgvom2MyXm4JLboRrca4PeuWqgEXFYhPEFItWvX48AHSeUCaV0BchhlJPMUUXA3MovLCHwuvVbBb5iFTeDsROOnOGDUO7TZz2aWzTi08lEfu1FcIx/fhpSWLjW++1oeN8+yOOtLoibUUMn5cfE1SSkgnp7mdXlMbA4dG7vr1dzIz941M8OrWQyswBXeIgY5FoKak31WnBVWXysLQE96a+TU1hDHGowpfPuiDcUBs1siVyfSAOKjxN/xVPR2irYG2zD6laIg7sv1ucFmo5cTBic+YOS3k3BJ34/tjY/xbspfY+0JImU46Y5weqhvnolWMyaPVG2QZDkIFzDTaf4h52wAtRYGhHcSYLCbIpGyy1+yvpua7FJPndZsRmKMRAL8fVmvGvpVVBmJp8jDF++Cn998uocvlTBHhll74YenG3llbdB31K61rKAaWsOt29OtHKW1sNBaujphGDYDVyRnAGjFjOvib0FQhfp2z5SxGX/RG269XNRC7FHfCAqDoagVT6wPmnAXoZn2suIp7YxzipmKwUrATbzdTxSCihoV/ig2ugwekRlGfWGVjgffGDnh795AxAyUCHosIBGNXx8SqRWi0ArKc97fgoucb3fN8O02fBUr5IIbVahb+cqZvyrxIBhrLtOsOl2TeUWCVWqK/Xx8tjSjgjp1hWJINdlEaOoY7xCa/APC9GBH/tn/EkB8+GLI9EGnX1MGsVXD+HMc/+VyeIsvAAaIkFdVTe2nlt8piueabF4swLACLxunS5CIWSz897RPsrTzANaaA58RBDazYM+klqi7AS0MLSniRjCoSKvY1YuivpGh4Yr/3YNiHZAYg+ts2PVM6MWSDaXQFxovyN9P+opSCCSCgIdWCL0r+QSb52YFHQlU6CGe/CSYUoyvOqzfEVNoVnzPI4g4dFNPPIzwYh1mKX5hBsyc8OgUEqclUHcLCMkflOWD+pois6vy8iRuUrTKd2i/3EIP2sZAyDwBGzwpE5gwQtFzOMjisQfbqdJ+TB9AYS3l4Tn499g3ilPkj3+LCoYtjB2J7aPnj4frJNgRKwRjUtTAg+ocEvsWMQyqgw8eX+OU3GmkpmEhPm/KcDf2JoY0GZ2TxzR7WkS0ssFurBZJroBiT8UsJWkE/SEZT7BMDbqm3pPvBaev3qzyTu7/bSSSFbRhRWZosZDfTqIDKN4JD0H/9g7y2RvrufYdPfAuPBuONsADxqwLdOyK2qea2sGLJRkowHWJ/9Iy0qO/xY8eP4nAZI+G1AoR9DgcI9LDGM9h5haH4CXiyZGcu+LRoQKtSVCeH6+D6wUZWYX+BFOgX/ODze1SjUGp42hqxZgLZg6qwGeZ52HrOWLjCEiY+fWeCG899WKD4Bn9xA0ifBKepN1dbBkhTtXT6g4Tz0BS0s9hURjxmL5dWn3zn0AC2ExnwwAt8p6pGhXg83FhaMFkGOSDKjwLlzbh0XNnJ+RF2do/e7Q2iy3vor58VLXUWIO9BmIf7+1AIdPKMT5RL5eiSZf588ZKGjy0V/kdX0t9VS9glfgu8OEJGg79OfWxrJWLR7b88swUgBC4YYqnvkAroEgkc8lqxm1KVgnifGSOU6m9dYcmm0Ttz3CW6+f3bbIm6N+7RGnrXVU2y8eERLAX6RDFQh9g9xjVzSAvU2F6cPrcrymNbvO3veHRkLwZdREwJGXqhAazLTHlczGa/5F7OIrSxjqWyFvUxIu1QexZ/WX9kBwS2mbxzwzVTTaAFdXAq6TtovaoSnFdOGwD6c64yX0RQ0WRfQaBReqmIbZpAbdSO2JYn4ldqJ3CgXHO97Gd9PzykU/P3r0fFttHJAm1av1b+uMBQUq4RQynovHiIfl/vWd5nkRUnbBtFQwkJlxgWkLYDVQL8opjd9MF741CYg+WZfYm+qRR/kxMtFMzFTIDCGTedu0ULAIceZgUUiuIi777Gx09M9UZcNAl6ddT3d19l/WZA/pwmpYeFLNynnPMXWb2ZIgLe4te+yRMgCVTAgmVJcxc6SZWJZasyTI8YnMQImDow6mHOfJ3LS0rXYMHdDU/VRDwC1iu1zEu+B98h4/JEdJ5NVYRyh+qUPjFIyfqlbzY38GPem+kpX7BVLl4Rz3Z0GQ/KygWLyO4SyWBWx71cW4HLbAs/2EDn5BhL4BMaxwTRFgQm+IFy0KH3wWfriBsm+zKtopzam3n00sF7MjvXMyQO9bHUPHefI+fpzHKUrB1CcJdDbg+Sf2IYEk7pgaXzC56OaEUtUZPDrCFTGCjn1SHS2vENhRadyhjFSQVUzj9Anam8ClGv5y1mlTMLMpJhpCG24YFBFCl2ExSEYe/qm/u3Kykcg0m1xZXi6EfRtWlj7lrNQ3KqzjySD4MLZfW28vImQHzGIcOfT5EGg+FEnfvVmz3WV5z9iGHpd1HO6qa5GwYb/81X/ngJ0Ax+9705fgVKJDkPbOmVExD4P7+aa8d+j8V12GxnmjHDtJLTGXjxyk5G+ayIgRSvD4131U1t3pI4wF9IC2Ux4qtVar+zNECyN1h414j45x/lHVuHFWQqRkEniMQW1FZZxpPmAZF5t9loof3GuPz9fvY16DaK+4CAsMcwn58HYyYIkRGWsMxvfck5+/JZ3fNrPE+4XVJwL48ClO8v/t0X+iHibmkCpiMqn2vEAo+47zAOhEN6osWBa4zLJB24H7mOZAOM9akP02TikndBcP32o/Mw/pFqeouWogdjW60kZEcYk1eYV0nFTVYr7gYC86rLqu74bD69SBQJVYD4WAZ90U9M1PAjuy3Cj1L9im8uEEbcSZQpflAtPSmCYNW5iZdwYUIqjt+u0ZoBSIPQtvPChechVWBnaxpOrtBK3dkgvh2O9EO/hhxUvprvMN2THcN7k5X4GLMqm64+BgKmXQEB2RhqZAZTvydeVSYwYogyzd8CS6LcSTCXQkHjI3jbkYS8jpmJBCWO2ps9sUmGIfWgnwZcg4AZ1xlQEucOzBdcRMGjFsVlxpa95lQ7u97l/qUtoxVDH825+F6elf8RbHDQ0IZXDlBieXHWrwYr4rnAs1oU/tTDap+AWaBn+1KKW4HbTF7gNxaPCuW9fTZ4fb1kFddCIb9nOs28Ngdbd7KSeeGhPe+kzdywiql5TPIQV7zAmCDhjkEGE1wTgJOT7fVJJhnYB4N7i5XYsQXR83nKDQSL4acCyEaTMfP8vBhib/zt5wB9dRuPsGuUrcd90LckRjOxjOwr6VNOGeHYwYP6JaQ+ytnfwhk2wsrIvrdrwUJIJTdvCVhyCGol2cez2g67ECo6Fc95kahJyNjrva9cqN0FvULjRkmz5vEI4787f6dgwuq/DM4p4p3BSK/hzsbXzpkHbwdsgxCqoI7Vm00XjyzNM1D3E+ScEnEp4R5bgyTvrEBJv5yWV41rPHbLwH14vCzsnaJ36bGV90OuAQU6wnRtcrJZrGdfeA1ETRahlOuQe9QGtIatbRiceXD2Bo9fQVs+GkdiAUzEpvQ2Jm/keAiFbZ99y97X/4g1TaIlOV8fFXPwtzDMG3d/6V9F3p78rqCVWgy6BOzBO5Yo471zxFdx6w/lx1lNB60jfohcFnr8pP5ZkpGLVEZw2hXmB854wv4OqvML+LNV8+szdJd/+qYoggr4KYnqMKw+wcuBGjPjTsEWDBGpfjMenjd6dewb4vHIPmNMHLOqX272RQFQzKUFpiWShqljNRn8YFXsAxz5WvLmJ3DyQQfFho0R4dHU340y1NqBEgifUSkHv9p4SURTww5r1CKwXijLRyH74mcXgcWQNylUnIeBe5fN2lpARhL398KMcWpINPRPpaJ8RfmrdqO7itRuh01enemvU9TVSucbvtc0Jhar57vhxJ9yFtH5FjmwGDNI2RlR9w0+D14uC5HLQu0ebmOLhhMdnjVgeJjldAZoXJMEGhklTFbZbu/0nO4AoWUlZg13Bo2q15eKqfxaGdpak2T4pQpj+u1HhAFbLfzrsrXCc1e/V0YhsK9ZCfT5yM9efe+jJt7Kbr+qnXuvzquSFQp408d9F/YXDbpLjDXO+VX/a2hOW+DDF31tfw34QI9GsqpMtYEfqbzT26X/WgZ5IfIfW3vgPHHWpHxYNqt4Qe8FTARSc48VNWKEua7Ag4cbuDgnCa4ytzc1lA6IQP5oNntsT9iz5J4Y8Y2G6ijoPSve8MMVtXL3FIc+Av8lW8fz7SL64DxLZR3kE397Mj3B+37nZtIU2r4471YaWWfoZUK0fhGL80RxI4YGt669kd78PVjvL7zbYzdAMvzQS75/tANyXr2uCMHUJkMOQQ1bXvXvfuVxKVEMYntlMyZc9vv3jWouc2Kvl+Mowfd6eDkf9rB5K4BCAVXB33ZL8GPC/A5zQ0458vl4rd8e3TXqLPPHq+27Q/3OZUS5RYaIoZusrTEJHKzO/YATRTsNnaP2UmKKR3J5MAIo85vulNjfiK7Bt5XYrBsxbDEy1HvtgwYdrZZt9qo3pOSsjYq5fCp69AvXYqUXPcKwZCXiIH2niUHu546+VG7jGuz5t8M8zOBOr75WxIn1GXh/nUnDflads9bprRgxFt9D9FWCup2Af8xiEfjL+4KpG32s1NUNqrQ7ETkVYVYN6A22L1hnWiSlLlbVQenFZh4EpFKCbCRAffAFQmlKykxT8kR24fRY+1s0rm3E8T3E979xzA524VAXlF0hUUMN9OFaTTBafuaJSUPvzBZ6n7HN9QNIMYJid/N+V5bTgEx3JdraXGum9nplDr0nqmo6dGowbASOVDiKDJinQaDrZisPE5nPD/biYrllKnXgIhKcGYwNbR51uXkE3/2wz8JdQAcB5/2QZUXvKIUvEF8H2duX2QLJgKfbUSkAh6mqZx/khDDwpbtfHivPoA1pUkNEeacE8nJ9+QfxP33wOm20DsVxltoHrbyGU6GQ2g8gN8BvLTXopRG/dwIxGus1QVfw4IpTsbuWAvK3R5BsjWDmtdGSXEhKXEOafFkA706wZEPrtXsk2XvrI4fCfSLZH4TQv2jUSKniXjAgqVZ+Y+OJ2/hO+SQtd3tjpsOHsL5SPvdInOlr/YVeC2KjLpPe0CgftaqHp3n/+ttvffRXtM2wF360DcTxqYfroCU0axiDuqmCBHwWC0Jy7hjeMCHMxBt66ax44/exMaRITt+CeedM0pS6TxvgNiUY4Ae5uXWrlrclK2yCvgM2jdaqZB16cObZeC3fCYYDWPN8qUqwMP5evb2TfmuBY3AxP8/Ce5+M6ndaJi4l9dWNNzY2H7VLBSyYjDW1JpAwn6FXCf3Otq5u2nslGs6Nl/IKlit1Mteh3aucf1cal2VVeJj2Amio+YnuBcT8wnoUKYXF7kF2xat7dCoqxeEjY+ixOgqlIXH52x41hwnB56oJxgGXns/rSwmiNR3GRIMht7nl5U6nBoxfzaPk5gvWS7lO+cDKITCJGEY38f7MKJl/61pShid2v9gq2NWBrZ3pfK9YVMYWlzRYGjiie3ycur+Gxa3FPdMiRjmsX3SZP+Lq/LrV8N/0dnazWEwNF8n/uLNqsaEJuJk6df3CsRr5FRZs3z/CzeD33gvpC52nRwv4/m3EKCZFQ0x2YvhiG18ttqgLTjkHKUeUNh+HePnfdnnJtyU0fCB/asNahB0TDYbFksmfyQxSlO+108a7uj189JtjtRf25Dh6K+wPOsDAwqVQH1stp8QMZIzHIDpuZ0PvNp573h7O8aFP961ZUqglYwzD5V9EkEb9BOeZiQMRODpY+bfaA4vk2k6VMy2dQkwcRBcOYX3zbzOzXeqTFzIIdGy10pxasve5PPJJyYyLJrusA0kYtnuxnKEJGC1zaGXf0zWlmoJsuzcARY5eswcOw75Yi0JIcvWexVJIT5Rl0/v1WjFe1hdjnK+cns3M3OK+nQDOTeNx2xu74nIZTycUnfQTSoSQ+1m+zCijoirgRmRk4NhM3Ro27l50rKyLIX6g2HL5xC39P3Ns7w/1Qaj44ZAmx2bp8ZccQZ/YYYkFtd5VQz3RQmpaiIUkUJXjimwZ+O+3fg3FbIXsvI/YmXEkC4fEN1jIuGyIPb29WSdyf6itVbMHk5A89nWaAJusDD7r74QHlvfLRcQwkumvryz5zpfSnZYc84wihCBjJA2asqm4Bbe3OzIQoF36tUloqNNGS0gwU5B3BZEBNHlNiwAvzhJorU37jhMKFag1dG7hw2+fnkiplXalTLdbp9Cy2n4rilPPdvfirzq6JiN/UDGZGjqQ9WiGe7VVb2EYJSWqbs2ltZDU5eqc95JgDEtkU4CSesRdxyMqmQ3pZGgtIKwnvmbU5Y3w+g3wGXy1RPhpUUWuzzczx94yTw3rgw1wDcGvIXo+U3q85zf4dLHLpdFS7EqJ7Am4wotvZOMiE6PGaevD316rBR8n0HBPeDTU2XpAjW/4eAlVQCuGBAMFYQTQgFvknnPeEjL73HIazesD3E1hyp3nN3ZRzRmKAFmBm6xLDa9KEPur5aBdfRk4KdHG17rhhZbRiYApG2M0SQ8kuerpXUkvj1ggYjPWKDQ6HdQRKZ7ouPWbdME7vPCp3o7dIcx2rvTORjaZfheJsqWi6IxZKGAOBYyLxqx5GxpRuFeL78KiIOyRSixqbQS2Y32T/1Ga9Nvwn/Xg7sOWIgL75mOqBfWug7POq/Z37t7FT8BNATzMj6XYnzWVRxLcjeFc+4o6lOv6W/rLz8vdqA/Oax4cBniEdeeZ+lbtuX/rzA/IP/DDei55RgLntdb5knwWp6v47GG5SgeLryUax1H4MZR1CxYTeHYlWbQAhZohmkwfN9eGV5fuQZtenv3kqdU1vqEH4gZjcvY9XSojC1eHXvK05ZBc+Hx67abvICzgRMPNGOtdyMAN54FWmG/tqakXp8G4dZ+j7Y1DoPuixcnjar+W6afmZo6NhQd8DI/PGkpJbr6mX5kPCExtaqHtBKs9O4MTvkd8B2Ad3At5RFIDKp7cAKnKB4+/lC17GJCIhwyxx10jLWp+CtkzhFddtqqd48PsNE4co99V0eNOV8ax4zDMQLPYw9i2EiNSEG4554M3/3D8ayvhMfZwD0/nRy2QM6RNByzdUbZoH8ExsCVY9bh8ea26nZjI5Pjuiuovc7w3VaCLBxjRMoOTeuv9aOdcDjahD6Hi8yuPMe15OzUkt99S3Ff7bHPkNJ2Tf4JObh+nHoaRHqgOam32K/mYzf0O971b2YFpnCgjigGHyu6TJt/qKwaaRp96nlMOSYfekj78ea7V/mvm34dw2N6rGzVK9oU3uLKOyBcUuSL6QcaiCrlfCAfAjdN8670HSTFdBsriSAOaMnYZbsLW5rPEcIc+QNmqSkZndHmzcOJ4QyX3ZeVlPymSjAMUR6BBszE/L1jJsVLaFWpYUtaTxtHPVWSKHtpFUVU8GJsaShpwvBFOn82at+sePUfLI6JqAuKd3aKQaNAOa9dpWfRMbr8Cp4vC8zy7Y3Ap4yaUOgpMhRWo+lt4t2N2aXxPln+07vcAMxQk8QmQhTqA7/nN15JTs2yc9Yw3BNsfLwukg+qyQ650SVYByaxrn1SpnWn3AgYZ2Y6iSsmrtMqu5wIKHpG5GsETIwzGHA2Qgk5XqudG0Bb93vDHXa+7epA5N1wD5DBGWj6sHJw3oF+0BwUxtpen0tXS95aib2jUBmqUlty4kVx6Fj4ULRV3bTX/IMrxf5LSxL+Ma1jnkQHZj2Iz+CHZezpmsWDUrj6gX8do91kXRvErBOvhcGS0PfbW5sxgt6lrbthMTtfR4AWDc5dOl052mYpGxdZrC63OjrHR+XV190R8M1H75uD5hX6+2hNymOZD9jhIU3g5Zo5T88CcfPtVefPUgmDpnkPUJ/FOyoA2nrP5ChearRf56DxApg/kNJ//eDXj7+PLeBurEe2gCAN6RxNKYQ89TEMKsdmlivLpPXlL1WYdqP6Dxvshyqkk1dmjNz/XDlTI/Zc+7GVK+g2SLd8SO00+BashZy3R5Xc4c7iSVWeCubRTcFZlz1y5vMR1lVEoWkQvYDOgmnlm0OhTXjl+w+fIgd1mx1T6aNdbEfhTcIRWQylQ9xzZV+lRDbsSD843kpufLzUqD7fEZX4bkYOkmwqoiF92OE1K2BgYHiisnvPfh/vOSfeDpbVTUChIBY39VfzGMPCuyqvyhxtxF+/z874hq/rqLsV7Tsy/spc+m+T9nZa4n03ivk8MMc25eBNARuLrkjfgPkRxyyh4Qt9+HzxSUFlRPjUeMtwI6MncfWOMb43wJnmg+m/D0EBF0tvsWsVV75pU/+o9wNGBRoKs4NpQ+c/iACuAIiTBHHvUBO+zIKrzLXeBP160y8rQVEQepgAlDYMkFNExi8hQ4mnd16bikC+W8WkjUjJ4tl6MTJoqQTsnmhm/xNM/wnA1FE71BBkgD5d4JRjMxmIvLRmcbIYMO0Uxa2RfDnbLklHNbNVV+ycaDIpfH1ZWbQVtMwTS8m+QdEi3oDqhgOG+g2LCuKa4q0u03sQ7dqu+Ghckd9lsEx9N9gmVK6TfLzRfZtyC73TsBsFInX7YWxs7pwqv2UwKXt/a1EgZV5zLv6bl+J3SnOWhf626IkWpMsfxXchxBZPchZH6EuTNfqE6Gij9cj0U+e0blBhffD2OnmkS8jIEiS7HzjY3ZGcenoihz7703dgFXzw5LFUJvqaTncGnki6k4WikXYI97gJ/SqSTDG+SlYqbptBKglDLzyDpZRFyVvevV56/rn57KzbB2TRC9Y2QizeYow0YSv/hLX26RqDoA8zbbVeg5Fv3My5/LL+rj1yBLLzG1V404N862Xg9qj3+cKRlmfLsejf5UnDYrTmZW/ZCEhJVnqHOiOsvcAZfF+fSkrCLU8Ex4Gf1sWm+aCwup83q2r/0tSiegcKr2F/rB2SH3v3KnC9OcvxjbHbF8b5eow4ufskyecr2yvdvFLEKIMT844nWLSDXu8OfVHPCBZQiiTLViE/wqO8IVWxV+XvP8l1cwSsapODguK/q69DbzWZ/mET7jl//HgtabxusmHqYslQafKZtJeNhVTtItLe5lxZNnm/SXefDQUjr9Kx8JbN3exx9749z10OaGzmMf31l68orzF/okw2Pob07DuRFVnPJxwK3nRLPXdGHehDm5/wM5ShBcLutMW2KdESjeKCz2HeyXfVx7ZR6tuYF0rKSH/QijYQGfDSfzrAzGFyx0ni13xjGLKnGZIIcKXNnw3R14h29P9Vr2m1MT+32xCDtfmHzN14WCVkhfqKXRn2wFJnRIOu4qY+yOH+FfnK4i83LJFJ8alU/2wzr/UDaJM8Xg/XLC0b/1lDQgeFhlYYUhd5D/UgX0HRSu6VTA/XW9b+RIGJxZ1M5kcSV2Fn62MDR5nFq0cgcfOpQ//Cp3DgzOhgz+DjSRU4u7NsrlBt7vDQYJq+R/t2KLfpYPN15HA0bkSyUdnpV4aniJ5WpPdjphy4axFnAH7zZkdO8dP3OgxCVQ1R6CXD05T7g7hyWJioRHKBoT7SQnWwP1ZgjFben/Ff361n52ksz6POuAb1BscSDB1VSHnzqtij2ykxzKdT+13Uz2ejh9ExpnZ92sSe1ZCTKWCYQIcz+QkCfywnqQnjM8O4Y6XkAgXKVSlptP99dWAVkJ3EJYaCtV2RBlwzuE4xdyo4hUwu93aMOzjQrOejTHqkVF7MWD/zuAfpspOFiGI9lnSSaQeiMDu3oRuJ3KEqIX1Zd/sUaJmmmCT/Cdw2ehtvS34Qukp69hUVNGBUacrL3IsYbyYHBodxNmumSRuELlpwvqPJgg/HruBCCCPW8JBQldLMgYVJ2LQzaoDaHzL4Xz+oAHc+j6jAU7uvA9RfIpkjB3pLv8Kgie6c92i6MzyHH9QZYFAVzZUiE4ZBrYUHUu8KDU3uQzAOxxqNsYcLaAX7MJ8FjZjLlxGxtR8d60WbNgK0PsYJnmKm55N9JHUj25QcK2HXH9A/Qh3MdWvsiWacFM49hnP3MSO18BkiUKQv+Ei8a8iA0rbUHSHdFlvd3XDfMzVzYyW3MX/5VpEiJ/LAY3x/f2RtWDZc/K9TF/EQarwTnHq7yvNGQQmDtvgmrZVHa28XkjorOBrps42I3DDEJzA+fYkY77Y96ERbn+PBRbzRFR6PPimNM8w/dDCFZviTXAStRTQsUYsOBKY/SYhJPUE14yT8Q5nm53ZVievfMiPEc892D6EMhR/IFqIEKmEHS0qum5LcFuy5LN/dh8Eai+ZF2mjyYxiUislXG6A5OvY+VRahFKQI9pfkZSCcpWd59JzPfI0rYfjqI+AE2S3vpFJOQZbBIT3BQt4AymVOwPed7GCgPwEmLEBnyVpi8+7QmARFlQZm+hdtghouYeFJT9yagvE/QkLgMHLnPbnveVe6ku3urQZMGux6Q/KBEq3QR+ZElu6X0j9HlgFvG72BXeV2ROxkrP9n1OgSo3RGsGNLiZFG6ujjCTlPmuKW61YQfbyMvFPgEn8tIxi2Cbhe7ToV4BziBnwDFKOUPmyzexvXOIEj/RCz18CjCuoHEZKuwsZ5YdQZ14+7gt+fpqziyLFb+OyV1SlsYVqYTnMyLWuE6VzJI9Jv5NAmB07AmfhYX8TD+YWy9xO0K0Goqa95ITQgEjX8URj2MCbD4Fq1yOwYntdjlA5OGH0MFCf5+34kLKd50BEjte0XxmOAXVbU/Fa5KL9vI9l0H8YmSsxcE8dxn49J4VU7uiqrBHhuPV8Efw6ScfXi0l97a3SgifGFztUGTn0wOiwuIoAa10T481XhAjfyePC/xM+b8GgkanpD25NwXeyR49v2dT2YtwUOboHyM/ty5QVmgQpRewQtUKQW7xUv2NZkx8PxUKvAi656+CjZN+x26Xj05MTf/G6vBuOvb9XwK5eBjLvxjUjvlAaZ+gl50Xg1OwnYSyn45dRg0mkNg+lV8Htd1Ka4Chwm/uaqzDN7wWSdwXZJu4I1Afg3+euynwtiBnIW3ZHsVM4IWP6yZbCIQjH3XXqYF0SkmuC6sEMpgnqgOMc4XAKTKYUjcF/u+ZEKk9zVt9p7ZBRYHDi76HQQRqR5ecxDVVoPIvXgTKv2bWt6K9/0YjP9KWEtD9pDCJQOhGw+K7BcZIcMEfYUTPcV5J+TChoWObxm4oTgCpFngIjof/iuQLyUhi+wl8LzzFQTfOrzLrt63r5INBbp8RemOTOvoH6VdRkc7Q+EhSxSIMd4HLUz60eQBSB/+tROxm150rfqb6XSjN1yxPiDVXsYcvEmSp6jHQExx9NDQFyoQdeekYQ2fMeahkLbdH3jQzLavzxPFaQgBKUjKe7831q5KZXz9w2wGy8tUNviSKMM7r1xbGFXXF0+tjYTDiHLGR7HeBZG1eN4X0aEjii960RXFTuYHgvtHjc8jpR6yzn+BZQWf0PVoYWfibSsWXy87J3KiC6hmEdQEiBLphT2Wh79po+BfPeA+j0K6i7/jQJYWCOi1Bu6v9HKnHTJ7NgiYjVo6OY68vsqwp3tmYvBjAm/DOxptzJm4txSwz175foFwgiDGiyzOtmSbIFzY1vz8yA37Ih/BAVIOrMTFQ255j+WfvGXJ+W+AweKr9iK9vN0s7A2ldAOc1vBLp/hziKy+7klkUG8HRWyf2YbgkdPR3021bzEXFlv6IaXY3RezgSP+Z5M22rYlra08H+MXUNzQWyOoEHrgPfTXKls/5tnoCPPEH8TZYoY77plf8cRThgfzAgsxcRvq1LJrK5o2f7djJSBM/zqE3C+h/woNAHOI0kTVky2FxP4RHvVaeaAgWENKM8AV5vMJ8+clGQ7PyCu6orNxYSBaqxz6zbbBzT+r47KIfA0c+J6wtqO7JezwEF9kmqF839wGGzzLZqHh2EA3OLsA7DwDEREexoLTddBXJ07k8jchGfzjhpN+Hw+7630cbOlKIvSQUtrm5vAwC7ANyYT9nT8XSaP2aHzgrEgT63NhCICa1HioB8PSx7I3BEhrsnwqovx3jZ69kPKR1d/8VT6itSsfJk7ciDY6O7ZNmegg0ltVihKRrnlhHxVpgPUAsEa0uk3pqIQZnQquo24tau6+VV+Rcr/QFCvBVvG+tHqvDDdfbws89dkpQQjmdrIcPES/E+1P8o5GrpI9NS8LmXr5LnxNncDMVhWb0wFbYJ+5NepKZjl3RG5im0pivm5nTNZAzojvs6V+uZpwGmUZbWazvPRmMT5yFESdYr1iZE7eZx5rnt9+ZeqhFVngUqwy5sbXgyoo/Urw8k6Tivv0ibm9nTVkayrh7gFk60RWEdQjmuKTFB5qWJAYTfI/s8y5jHW0v91pRBT3zw0tcBxURjRmiElzF3CJaZ8Ethd2OoOcxlFWKstWMjXaDjejVI7wDoStstHsISAND14oVfX2QI6f1W8YkTC0d7lyHXF3pAjTx/C6yMsQQP96cYIrSuhKsOuyfb3FJMQaKenPoY0dnNkECb0yAmw+IdElIsg063x5/uDpF3HVPi9dXuQiBdL8QDXWKPZmPkS7ZGja6MO9m4NTpeFyLNI3NXrv9ZHxF6u5l1JcwKzfapOHA8vzcl4E1S6/TUb1UnnyNnQsFDQzQB7p7DUuxkd7H6jPjR5qGNMY/0jG3wVcQTdF0GVOqOteyOc2fBKVszUWtBH/XT0ehufbgzhZzhOVYiKZcmAATZQzz+jySvlOSblQl29eNR4Nx1a8TdEf2HLRwZQn0wWslIZChbC+UVxxlvKOES5xIpbO2Y+sgD9+vpiMtzc6DxYGr0iEunw8XPAn4Kgh7r/RXG75UQcdphm/abpfMPdHzD+o+wsnwIs1qJ9J7CBjsIg4qsV9KxljJK6aeurMlrrnwxCBIqREUv5zdJFxPh/D49qe7gHqfMIAjVYyuDsDuHs7sarNxeFh5lZonw6XZBrHK2jTAcClCGq0QtakbNZz145Rr3dCruBKDHZ7FeClrDg+H95+ok5Gtf5MnFFkSrrqk9f6Cvf8zW5KVmsej2mpj4KzwSQNsUcczZQq2PArMpTvq8+9M0+AAtrCcIdM/AWi6rWDX88KyWv5UtBrngskZVByNnqNE5EilnXShL7tlvW7mT/wkzueCRTE13UQ5BW56+ThjRm8N5lmC3smvXfe6PLtuLXugpbxiDbApgsLXvfjkSz5wHvPwkHY+61p/HzL1lBY0+3o8ME+kHFM+VZnoLS3fgMA5WFZOfhs4fW7XtMG+Q0SE3LRmHXl4/aPwH+E7VmQ/E6Be63F+FGOiAH8FTVDmZeEDCNBGvxsNT7Zs5Bam3vz6BIFd3GzpshcY4M+gYYPfghGft4wkiKTPJPcVO53x62mbcoPp1g9wzDGRKoWF1zNDK7xpVwapFNNCadh0HHIti3IKL6vZ9UyP7WapUKkFy1Y4rdzH6abYW8GsT0ziCeQexWn9BzqrUHz/Dom0TkLS1+BN2CD3pM3gcjnujBXBHJOXTHlBXF3F44KHaOsL0bAgenodoWH2aRXMxfw2myhPPrBYVpulr+EhZof77IoT1zx0axPCJCXWNxcfxfx+u6x0R1w2HYjR7O/J009kcfy5VuuvKlcAwyX1tNMCAtwk57Y7DFmbNLpHixJQFr+vquOZhS5vhGdBHfKiPJW0xRvpgJnnZibc1Pv9b9p0sCP7MJsHllSU8t4Isctq9HwagjH+3LalLcl8q4EyPvoFmy9KLMFlAPT9HJj3d5mocACh39dySWM9wkFqCT05fK4dxd5wH7XvN4Ai8Eq2K4WaW3jPNvj0T76C/ojN48chQ5GrKRP/EiFxVJ7XZN9GaeigxIAa7i9pLkp0aI9exujGbAVzuQ/4Gx0iIKlmCBlFXQFt1d2JOGMiQLOOeXTdoF17QkOFIr7a+wUUuZ1IlBUp4uBxEl1HN+L9T1F4wXrYd5husn4HqlDaGuJnOWgd04sH3zOYXaOWKd/l4H/GX+1EDs4qtoAbyhVKS5Z2rrXiqmjAWxvSJSUuTr0ywp8YU+NKPIW6qB8ERgyKKgN9fjbhaBNk12wnMP0XsUBkWkzeTNj66XFFhNLasPl10c8XdcxgvAdtoqX9a9BejwW6IImz3L1VA9RmZxl/93P3zd7VKAFQ7ml4AOUIr76Q9ESsQKVrZFzsTDg1HZNBSeZ2Tdieeh+bNESDW0hrMWoEEG+j3TMX3qoiQw8Ec+qBerb+U57GFQgVIhJe9NdgNSDm3C4NQRB1c5E+DbE32YZzu5pydkZI2phxyzpEKKwjWUP83z/Yh9fQ/7HSEtDKSO1HRDiEA/w7DQg2UUp7eosTQOwncXAfKKba0R3Nu69J1IruTXr0UyKAy4jxJpSnSiUP25TlVrz3YRILqnms9AgZJZFP7AVIZkXsSZsxiLY5I9PzPrNsdXx8eGGpTdPD1+6M5NU+tN+l9+U32Awa48dijNwd7jRzUSf69+N8suBfYDwOSP3TVOJdSTt2okulAfmCM6w5wqb1HxuWXaoktSaEjRscuSVSNukEAuSfbtZfMfNwKHljZKrJbeKswST/iFysKyU3FvEJhMRY75ySFOAOqRPENcbGymKX0fYCVMVXqvHdW3XNH+gtWPv+H6+6tT0cWeKluHH/E6+oxRr8B/OQBhE7oalmc4tOG16cEFNjnq4nyvwWzx9V5krNWPfQhS/FjC6tmTy+uzGIUaR7nw2i+X9BRAIhgErZ7wjvQU5pq0wb5zY8pMKfuOoAImIPF0WetbHYi0CcxTFXE/708dARM5JQyi22Jy3KOExQfv1SDNXOknjXsZr4/pDmpxi6qo5an5ychtZjZx7fT2+Z1Kotiyc2a5EhdGh2Fr+SqhsA6bk7gvCRfSxst6rglCDoPrbS2lXf1aiu6TYK2vm2lhBCF+t9Y4ApUhe3q+nk/IOMx6CrW4UssxFOcSs5vfBLhO15YCUY6eBCLnC5GLNKsqdJsN6aX9Fa1G9nZ3hMz1yjqPg+qqrIyS865cOeDctUW8ipVQLFlOFsLOHoYQX9XWCvP4aNq6JMf512k60reLEbk4VxfT1ynpEp+TLxC8z1WEm3AGUs9c7sC93hfqsgmWxe+GBqUpliu9f1S3eamrCu418YgYmXue0hTOTNrEV2W6LYG/JKbf3RPDfyNgPPAyLmgKEyIILRQ5XUT89G/LR31UJuKmrpELOSQdVElqOQd1ohpM2voE5N7Ak3CmJajTeGv6wOlReuPPHsMEfqamoSswoavGqwNSwS3L5LeBfnkYxnMIkt/NK1Y0Nwd8gTbzWmat+Yvv8MCmh/7u6lAWmKjwcPHwREQUUdoB+IGHQFzg5kFX3uPrcApJZfrnSYS10n2vwC4cIcLZUE11LdI6IspM/cAhmmWaN2khDSOeLiWXofDNtvWG4qQVDrx+dmCj5cBPbuJAB0bBflgyz1tHy/Wiw80vRxd0uTcm8M594vlo/jlKjYVCdCZTAVXz9ikyqIcBh2C/s7Nd7+idEs31X6vxIxTF7K1cP+QQE4SDDADIitD41GhNQsm6S2ctBsV/PuVMEDZrgX0ZXXRjsxVz3x84X6jJlFlNEiODtiBU7D7OOFrq3sAkZb5zFA1kHJ1iJp63pKtHbATKqC5H0yt3GZjAWfIV9LOytl7BivCJmQstqya7tBfhR8yLZzDu/2ddvvexL2ookexvP0HsY7ZP4EK1eoMMSHop+gSn85aGs+7dP66s4E5M4ZdZMz1s5pRr+VLrilUWrV8QGgPfxWaqbYQlXEX74tcmzL5qklmN3bEw4Kco6x/D9yDqQbOgYZhH40Qznci49DNhnZxmv++ZHA19y4K4QywMjoQ5BUXM2RB4C4QmA1DDgx0JbnNT9u9Ulaj8gFVXv5ntd2K4dv9mBSSetD9I58jTFDJr/cq+6J4monIRP9J3MtUwdYG4zd7Z69rF+HafQi/+gr7TO3/j3De6qv7ehu3TEJfTWvt2vaYOl2PVTI531Zfiu8UqsGjAvOicrGOOlSJ8+qk7MTmOEpZnjO33eMQVhHWOZBvxIA7RzUmr7yTYjj4DCqRyvGWOY8J6ojFQWZuAwa3hfrSunyPKsSOTN+1OCU7cpMKRPcQfX6Xj9b+DgdOPvNLXbWvH86de9u3nunnJ9iNUM9Lt2JXGuWlKEZruTWkPngHpI0Pssu5/dANPdfh+1xYMlktYebwTlmE0Az/OYc/5+oTmWnqT+TRcObrRlWUZL/kV8bFCZ9846RzdqIlg+YUcfyZ4cD7/tV+AV1KI/srr7pVXkQ0bMfJVCdll5D8YgWkahnfj+Dqf8x9J+iq29ltQimmuswl+/OjpfLkRDJ0aQ2UcvvQsphQXNadb+XjxKSxqJYMb0GfWaXfBih3I2Nr0xtkPZhh/1eZKf7e49GZnUR1XaZt/X0AaPgc23mXhZFkftFln7dW2usI9pP70AGMnHhpV+aqnnxWTll8NvFqAmCOBD1Vk9b31vloB9vxne8gOVoUDEJVzGPDBofUfX8drDvoMl5o/mZWTjtygspXjM2iWxvEi1mrj/Vv+JPGNC9CyGH7E2Fa8u4+X57QO6MSOPihc/G90qGkwsBer5D1H7OI4v6sHDSa1bM3GKLEZmEsYEb9ICnr+7+nqTsftZ1PpX7z0vQXiz7qMYq6Ez9ocg2FwLmQY0coyz9nBDYKNKwKvSYFSXqTSJ97f4WaYexHzRgvS1VbGjTdSkqzdj5ILo1eMhg//GypNja8cev5ck8muRxg4BY3F7KWLtpnfH+m6fnbrlbnxzyy59ZvwQwh18MJQfVb/xq45pdUvNHTl6svJB0vqYKGHWHiVwbwQuPyTeD1DIu3widDYerAqLC0YX7R1WkdLbDhRuJ3+BGuDzFxvoUI3aQQwtUnOhSgkPHMrFlBzlqhFfFJkPuqxy7bJ7Tv8JfXh9xCtekmSTvU97MMtXnWxo3ssdsUIjqOXOcHEOgYL83ZKgkZHcbztPYXPO0xGYAQ2i0LR1E/MfTdexJimurJ/m7kk8S0i8926HT7y3T39Q9dzVfNNdXUkixW8iQqGbL76RPmNfFk0bKEh40lA51NT2b/z5nQTzoyx8ripeBKzBKHIVJ7yUFX9lO3iKTukMT/eF7LDhdmkEbvvTPbqXZM6ZsiWSwyYis7h4wUb5+zy2AeT84YJxOfzCWH7L9lcJoNqvR+zB7y75WOyUSkKInxvESJUIrs7mJ0Kl/m798/EGC+pf0vrXChfWQQvdVhTsak0kCRuEJxua6lQyJI1Lj+Qnvk4bYkC5VErX8jLcxzPPWRwc8PEr3FJHSfnN5WOzGiLPzjUq0JiMbVYyV1yTB9Y8CLPCuH3NKCP9Dk5MDb/sWUMJVktxroyC3X0Jjz4I7UTRVmMd0y2qWFRP7jY9lOuB+6T31PM1ManbZQSZIlUSwcvs4QhWJRUHv5h1EKnbUheHjS/BoRMve2TX7x4nVV7jn9CuH/hwtCdLXnT4kz4yhB4PxIeMCGaOggIcYzCc6s5PIIYdhtEYDB6+KlNUocL98/5GhUyogMpt2K8povPft8QRpXPnMphPwi/5G2nafbC6egOm4zNL6RmkUUx7KxGNne0rN4ZllYFJLaQi62wJo7Zym4oaNDAW6akzH15kNxe0bRoHxejBI/jSgeYf3MS/VY6kCn2/8vu+al/nyCUlIGI2O8HRmwZDYBmLlZ+/xfbYl5HRFPoy4ZjughWByrK+DJW4+3/oI2grFNOn7L/KWDQvpYj8L4b51V4jP5xeRMgwZpzAWQwRjfEIT8Ueoq/HY1bDgV3kUfYeeI49i45x1+0R+d2K4LoB0IvJ5kWZnj+ixjE2Xmm42U/8B3IC1K9R7U+7lXTOIwrEt9e9aRCVL9deoE3xfuA3Y9joQOKbIEUJYKsOHDd5ihUwXInyWc2y1VNzQBHU2YpO8+ej4okD3/DPfqXUisNKHEMplO3JOqxDlx268L5HhH3/corBLzGDsJmq5DOGHMcnqlvfhpebInILQzDWl3Mtl72xyLOgyD4q2YdUtNeG4JL/yV6hA9dHicxgfWGbv8JPaqCd1EdJiyNA3vjwXWPiWBAhTBxt2cp+ftYfzw3zooEl1lnbORE1u5F7RNV1qwfx5j1CO5t66UsTJmeZiwsldgVG2GZEPw0VE4tRpgOKJ5a0Ifut8AuL40zO/HZnNKGcjdPzYEeOLTKTOONQuj3JJiELKmTIyWlT0atWcKNfiFA0q+VbhlcYz6Lxvzk3Xzy7hkQTxUpOQaoO0otmgauYbaHQBFIOK2hHrAQo2TGK3RnGoH+QO8zRHo4qrZoLAEgh1SMLdYBUz0KB6I6IOgEvNPm4gHjlBvzeYXzzg6mLlD99tY1Tcn6ypRXGpB12PAz3+yfXkATuTeS/ijnpODaOQ4TOdIKesqd0WQObjsaMRreTKgR/jWlZlFz9M4PIjhmEyYcsjv8gpVGeAOHiMoN1gjuxlbidbsmyR5fn/h5LNrp6J3fs0rcnS24XXO7s9PDGhOba361udo8udkYVrQWQno31c2dqFlfwO27r6AGpAVRHd6cLIN6ef0o3g5eAUABESAbDtvYqaQ1CaJJgL4RuIUS9ld9gmueFoxzRrFQBJfGP9ub7NZq7Ezrf14nOdkrr5xfqDgQt8AVBv+nvwodkYt9oJQVQFuLQUgPvFdQZS2b4k66v9si80iTh/PPphsAGySr0iWcTvXO4fGD1eGmBeVaKlHiWfDc8MRIgj50G4Q5vV+HTuL98J4owVT38O+Yc/NpItv71pBbXER+vpBIg4u/eL+rWmP64SDSvOoyeDM1mTIW6QzG/v1QC69k54zUR+cv2rFfkpUgi1PPv+aVYK+Ru6DU4P0SMhV03BFvKEgArT/+a7HMq0Jen7Is27cmp7YYXGvdvvlw96L9gAAh5vLrtxhQY2X9/A3zZ0vhEKoi6848lij5JGgmBvl+cotzyOl/ReSK0AotPbwQPdpmrhROkHjvlJCbD/neJoa4qGbxBCuBzPIB632t/apfZYbUXrnkT8XYSIcgJiGxmmBGGmgu1wvdXPWUTTZ+PSC046DjJoJCY0eWFM5AwwoD+ibvJF3ODG7KtvjAg/MGpAt8saebrmPUVawHvdAcFLzIVG2Qvv0BlFyuMrzE8IVkkA+lf/nYD/0j99hGNasmSUKRu4WIVhRyoiJu/f20UQKJmMHeLIzPflVlSi0L5wtVvyzhnR1kSNgnaGIrwdtvX/99+bYOvFK4fzfGecZnnnuKV5NRcLpZe9gI2y6kNWijhGiF89Z6WpDUEak1mXankkZ3p7EfbAg2tR/u9/I8WmzpEbKkQ1FJKQt7X5XRHPH2qyVXi3bbFkVD434BBXc2b86LzKq7LCjSOjXOxhruM7aCthUnK9eanHQWPm8qmMJpkgLxwD+qASvFKTIMJXeczIgPk9jX2EbYG8vmiHIvJrQ1RHLvnIGgXNurV7gP8Bq8wtbj5DXkScBJPZU5EZ6SDa1i2f0FQbRG48ZMfLzmDcJueij9Tfit/3c091cqYm8Xka4Ijih+JHffkwXmgzdr9xZ4dlGlLF+PGXHxjVCwXDqOV8SF05H5Ghe+uV1w1SbY8voa+m/qDt6/Zl+aL2on+hf38k1PoZ+v/Jk7dRCmloDQZY60eE3OYs4lefBvZM01gc6e6Z98FhX+2g5ESfIKig9w6Mx/oPyMWuVR41xNXxuUEbiV3zUkUHZ56ymBl9cVCxvlFzL3CPD8gQWMFyDdWuOZi8nII9tlJM2iM50gn23imP5Grf7ngwsOBiI6y0kF2mZYrPinKCY62BJDDz40tioPvdAmKxfU+H5qZZvaFU1EJbDvusIlpPSfW9y6zpBQSmTbqMwY1fqW0c+qEj9PzOB/aYAewBAYgOREH1h4C6l5MP4u6HO7lbKgQiNK3fa0P2Otr0l/Hnv2ddxyvu4iyV/Ebsfui3XJRJFsQVRjOyo6uUrAHGJIGizisUB7zn6DeibAbd/jRsOKIjrTMjJbmVp/+9KxmrQcC7DEfasai2x9w2SMjTi2Vj55D6KzZwMAnwgteBRL/B5YI1i85IgygQhX14ZySecP/pqWFf/usCDir9WhY3Kuiad5VV4PFIdPBurr0NVsxzr4cV7pBdGtx0vXVHYmMNe49/tSKsmthRC44oMtzxXddaxMljVP2CCtAAttOnYT70VzFngpZ4Qr+DEEMwcYrXwutIAsUh8jlIucrC8OlQhYvo+O9s7/pmVrOrcmdr1PecPeqh7ymGf4KMPjuyjKWPmeiITd3M2oM5GEFdIIUaHwugAI4pCkaZ74kJV7PdddPvNtAGTwEJKjAaFY4nfZ8VpiImUmAvhM4Pe9lyKFtLk0XnAANz0ECaAUbKAcll8ZQawFK4YWgbhbc+sGTLtqmH3wW9u9gdTkVwNrGFD8/QcYGfJhDKUSJZ2bXuwj3U9P8tWCfZfq75uxw6g2blqdYRZ2/TpIs3Tjaya+aZc1O7OeUGtgsV81rbWWPnVMZP55fflBXAbuwMxG+rWTT2Qtli+q610O6blvnrWwk18ed5y7oLQITpkQcG6XKFXrU3LanOIwy02Y96A4bk9i0wd0AfNQt8JdTVD9gh+mI62tVVOpVbQ7klMvjRm0wJjuDwbkavtoH17+EvE2Ai8Qj4mxlHqH+hsOEvxL0j4Vas/9hOWGHXMEpWHDi/sdKGoXPas53Gxv5jrhanEazixhSSTJ9/wGcLzCdxiqNwL6qYnqwKqa4acGuZ/BM7Oi66qdEOIvfCxbJJ23UKPl0LaUiLGJeIhRbG0UJ1odEqIFY/4aVgOxnHlQm8RqBtDfC7u5axUX3Z07qau6Rr+rV5JwfP262J+3YdV2BjOXppYQB6fJ8lbZyCPYK+j10RZDrsEwqYVQzHVBU0GDlikDT/TB+RtGwORLAFf9MfAG5pXs0V2VdwvU3q/ZtwfC34zA3jEbP3d2vj67Zz6bisVbWbq59yq/wGsMZLZUVmZB4Z+zXHDTMM9Qky68tavD8hlQ61zUzLP+2x7HAV+W/Hh7Z0Vw3B6z3UUCISjgEsL6xwW9AAUr/CI/rc7oWQL2f2ar5r2sF+S7F3UTrJtxLGVEykAPMyQCsRrv8AsqNbG5cMqux4VGxAVO4+CyHj42RK9qsTC9c7t2/U/YGH2Kem6wiZvFQZ10C9xK3Jh3F/J41Pi9dxf7VFySZHQgnQzJ9GvToVf0e6oN/d3dDFJ8c42oinu4l9BMPU0J1iZvoCAtUqXQ8lATwau1PFKX5mU/FzSYLBk1GQzmyjJYdLwaf4UN4F4ydr29vqLYvfXKDMIdQOEZvsfS3uvaegy1hUxO7FJu2pkwgnHJpjHgdXnr0Mj7QPVtsvHaR8ExdNaUuP0nTe1SYXodkx382Z78e7f160rMiUfdZQBxzRQdr5F/50u+1pp0LtX8KjJH2LdgcqMd8w0KVv7Zspwf8UUaIl0R0N33hL7n/XQipuchrDko3cyu+r4myiUOCrWvgMUAPxd9lvV8SaUkk0nCcyic2odYUJUjDmOaK14qON+QXIMe/bNPqxJ6xaSEHacqLS0R5cIAFEyQLPWiKfAYtDTJIhd8xi3XxsVDgBA7Wl7jy71tA2cKQl5f378PbctQFZT0Z8jLTA3DHDgLpQXwka2f8Gr5wwq/OTwqCEAdmD2oHx2SEtlIFiTmOcdbncyfjR97jZsyjlggAd35lcGsXf8s5iuA5r4Z/1X6VCvAp2D83TP6y77JTTSRWGQ8TEPftl23zfy59N9TjH+f4N3f34aNl2aA0FrqfpK7EA9wb1C2W2Lc1Rv+OLnmOTzmeDdjWWX9F9l8+nqA48jHKHziuNINZka+4K5tL0X4HQcUfWO0niaxFtdsjCQG9ONt4GyFxy1XOQ1o2U6XIr2lyd8aBXvdnJvIi91f6RbifODN5BcVLpG7qtd64s3VIsFG2S/r7Ykzw707h4FZihf/WL3ZFrSYC71ZC3uKIOKVfHV/2dxMIfiWN8yutS3NIO6ZzxvUFKhAEx86gnc4zsFV6Gyx3d065VnnJBaLlnyX0UOpUKRzUg+zbM4eudGvz1UWXdAjX8F3TkhTZcA0mtbKAQRRX6QmmPdtL47AikBvgTwang4R9MM2DOvB0W0llkakbXM7cMwxW7uf9y8+w608Dx1r5RDTGMdCCT24uylBfaepUI5EdeQnyGglREncMHTNmschCbabtCeSWk/2nY6qQeb9j/5E6YbY1BTrMeA28MuZnFypTf0Zrv+3Z4T4nkEMN1aYwE6/yRXH3tkf+CCwzK2ar/CX6RvrfPc4OvOXqAnKqa6kd/JI3xJGTeY/sp4xuYrL1BnSokcC01RJRJkf5lPhIITPHbnpchA4YjMNQP8CUgl5frZAO+47sr3P/THLFIla+x+AkKAO5xu4q09Y16S9gNfwERpEXyLs48fNvJNKg14yIk9M8E+WjSun5uqTGD329tv80P1+cGgq6DDxjT8hl8tIpI70VFKaXv7npKXRUuVN8Jek4lu/q7e0uJPe9OKTRLXMWuuEbdNdyp1hnJ5wxIB27w1dH7NmRfvtTw/e8Xagbdh+wZkk8THvymh0SZSPok2SFUTHWXTM9ujpd8LHLU8XMwfIHvraxKdYeNdeHp8NEhGLQy4EYtHArXXV/O5BFhBm4s/ADMdKozM/iOmwsj2oZPhpt4GzzYhACk0DtBWlnKGf8QRs/hb3HMBubwkUMB7xDz3Dfl6k+f9vsSOQj1AvzXEbI0/w3vrQ/SBLdDg41BGJJkl3XW4HeD0qwj1r6aPpXm9PtcswfagEL8HxeiXQJne+oPBKOxHNKxApzlVB5a71cwupc0ag1gX/ih7zdYzSqcNMCzlOBigW3CyiO8Yv6yM1g2e1w0nV6e8qs6dXQ+LMTEczGg5ycG7LhP829CKIN/k7r3qCm0fHWgIBuoOCZUr9D7HkezC9sWaGK5A7ijUoPyV/wvRiv+f9WYVCks0CA90RKKz/Py0dZdcRHr4eiZQNkj2nt4kAP1/FNfLhKSjUV/fTwQoKp4LCpZaQA350nr8yYf44+at14hcaIrDDk4hSGDG6oPd4fAyEK6NNBEXpDlAge22495/3uebIA+UkVhDcfaLXSTTgSeZ98hxD8SnzbKHJDwUgWpnwrpLXvLHkYs0g/5heJYCXVNtdr3IljkWLy1xZC2sbuWOR+8AZqGTuN+OLIAxF8iTYoJp4OxSlhadFHNv7bYBKDxowXKkS+bMc+2qQWLoXu+OTsidtGE69RsBfsbyBYDY4YXy/QeTPywPph/h2PESLOB0J99RFxI/1sS152rLXyhx3Aa0LIBiEY+dBJ/b2k1h0sqC7bbLxS8I/8h88gVPGBl1JpEyGtxQNvlBvw+uinnHwl49+44D6PHUGuN48cj2xsObv7EQkJUj/toSjQohFB8ZehmvEArC6p//lmW9tGtulg3BEoD2qEbseKCiP0xbPs8JxfqPbnU1P4lDujcDHT35WiXy+tO6xRz0LPWpTtsVKkHH4WZB8nOmSDSe7BAP5qiRfS3w6c2/yJ9tbbllLD1cRGcJsZCMniwf1Dl3MvNvNLWD/9WZI9/XQ2Yj3DE2n+1Nj/1RWVtqVcH1NOu9lOIkD+4nM/jiuN7CyYKCZ79+p4x2hTgAqBySjfhAK7CaRCWjK+D1J1DyqdwBkevvvWkbxEACCeCZydN+HSckew4VVR8UHjRat3SPlp68Cvx6LH43P8OSMyc9cLaxBIVV5T+nEz7oRAb9XXmcyWeSwSToAitVkt9ZTCEA1w0Gb/rK1HaatzQFnw287EyAR4z0v3tfh/CdjEFMbY/vrj9HIsCVL2xIb4Dq9CFrpgBYvDqPxIWGkiGhUyhTthLke5ItWjPQvNHyL6Sp0Mvkb5V/jXxoQs/86NuMWerH+T/Ip5PlC5IBV8ji73G3PAqyYYQngAEYRl+mAgwUT+0CuA1oiwyvifTyF2hDk+qMjjDlfJsc0B8Vf8jmKPaN5qaCHbTZgbqXDhlei7sCPWSY+H/yQNiDaAWyOoemd6oXS4xTtw/ELC+weUmIe+BU2Tcn/+Rs+gSIsCHQcVvFVPnx4AEb7PMPizFSd0IvPS8EN8ZrbbctRoS75Vnc7/dmhdX8rXmDNYD4tdWeA2MZJ/D1/iNri+jvmFuyd1C0h2RjVcL/GWKCEWfbJJZUNXL91hQACrZR0jr5Kd8fYJ7t/l1ddiuJ6spNp1NyepL5u5Tyod8bHPVxx3CFWMqs0uu4VNrfFvdg94ihmkxnSkV+Cf36Apud51/SHl9qDmLvyVNJyow4B2IC5LkEPa462xxuEeRfPQ0xL9aGVu1DGdSTD0dwbB0xrmxLPIkq3a+mbGRm5d0VT/9Dfvx3/Jg9KIptQ8KhEy48ypn6wGYLnWJ39LAVZ6euo4xT1HU6niM/Ja7DnozzErM0TH3fM20P11hV+uxGzzbxJUzsZbaE/ClLrSLDCaaRM5eH09w+fUvZ2onoDrhYCsUPVHSJ6Vm/8mf3ZInW9UXvCl43qiEWwgr3F4ZLa1UJzBASvrNICvax0Q19G7h4cphy2f+HPo4hSmUq/P6YFYhCV56vGNQPvnRsxeNXiyoHP74ZsPK7NWh+ym/4sxjlWD9bhusjha5R5M+witV3SIMbAcU5gz3n4svxTCuOhgPeqvsySiPLSnBX80RAKOeCmZPgO2XDjajWGLs7RRlqzpJQXgvWkR7h9CWPUGku1oHBD6EQR/1iBwgCJ7ivpAjLlR+82bKBKalyh1M3uD+y1HqOSca6MST8fFAuXwcbjm98AziRf5P4E2Uxb8OpXyeIHa/cCY6wAZOzM2KPvk1zRzSAW5cKrErDFE5cr8eJxeMJT4Kv7qQLBUSl+CVO3Q3u38+WSHjuv3kxMK9a4vw9xh585RIvQki9h1o9krWnmfr1u/v5evv+/O3k0ov0V+bCyprynzQ4iP+pHm+PZZK4gGyKT0hSwvAiz0Uq0Zy4gcjc99If3V64q8I75MSVSDCHBsiH5uUoHb0wGSHt/5KNeF3faEqsFtCGAn0CzTkMKcKq8TKlZ+W4CP/OEyps8cyRfZbJN/bfxjNoW8/APieh1zqVq93oEW5aYoQhbyHlWNPBAKozuoFjwcMxAU2lIP3gVLbOCYWtqOQexovQfrUojeWLZcS+qqLtaj9EtZPTN6qiAwcQ4JG9GFEpRIeEKGt9IZHhctgFDiVS2cM72a5GtHaFjxwJBSXoPYsMeH3dpCcDiKB382h0k2C4TOhLB5fa1JJXoI/ILZY5oW6uu+5w1QHpR7CQ5+k2PTWPpSwPSNgQi1Xz+qqvqwUAVnvQ5+FPOzkYlpM9jAppHkOB4TVsFncflTlIjN7Gc1P6emvXTCL0yvlplgtKD3lYmgV7ptYAVCqjtVZv1IqIaSZnnYBxIvpLAkyTRSviHqulGB0/nU5Ee4z/Obx88nK0tsuN9FUszOY82OqmOha3axcZivpgfhoAdp/jlnUTSQFHfqauyT88OGJ5bivmcksd7EDZGoy4Z3kZbes8hQf6Neixym9pe4rETapjG57LQQyiOMkbu7+7NL7mLKCD6MrUZN2XGydS5FBy+Lv4njf22X06x4K90AI9X/SH3NjeL7fo6Z1I2UT0otOJ6zuhbSOS8kx7gy2Jya2GrQnMdL5VW+k1ZhSfNSi8oPSzUdIQAO3UwYEEUlRzpvSuJ66X998nGW7RnNjbJFD+oE2TdAajMgmYV7UGhkFse4AIKDdDaI1eSvpzdr5rBclaY9eCeb/TxwJna8NE8jHtT4cTuXbcqNeZYrBJL6d2eCgu5re5N6rtw5Ef8VjIGB4+IbLWQjmXlLkTddyQKdkN5FwvviEoZ/xwFva0w5KI4yrYGzmTnA/AAmfWXbs/mkv67QXJenXRihZ+E9QR0+ECbf6hXCfWXBWxM4zoNKGGtO5ahUZ/uVGTxT4IoX2xCxhwUqi0aDOPsyMajax0uP8eXvsAf4umI38JwkqU/td4SOS8HftSxlQZGMSHYxcsBlX8CLDwqLFHhJO7UbmsJURs+Yvy/x1BG20fLXCaCfeApRzdA4bWWOitGaFuawSszJs0w+h1M53HdEuj5A4iJFDMdBAMcfaILx/LWg5t63SN5hAKBy7L+81N+gnPZxwYmJy9v6Tfruul2nk7hFjPqC7AZrodF3tWOQWaZxtCn4n8LAXj9fXiMTpj0cPZfMovmtEzjgw4ih2Ze+e3LO+BWbVkw2APmCVIXka3Jv6/ltpimFfs3UqHHXXz7H4mOIOtpIp2iLXH/V59V4j6c5/H1q3lq0nwnyiHJsM7MxwXvKu2kTBBcpked5PceIAMPPu/Z325jMRA/cuSt2fQapFSpc/tpOp0OUzghIQ1jCgT2ab9yHAgkKYslT530bfnntawE1SSHwvrlClao8+4/xr7T7afV38x7qnkCf/FWxtMZ37vNUtEmfoLGhxGi6ztf0lB/RRsjIoGtL+lgPK60cM02zrK6R8K2Ou40vt5mZbgnXKxAQ0fqbrmGaF8NrDKFfaNyI6NqUDbrBR/WGujb749N/aca8pkIsKQeTaEv93E/3wQeyA+ivpit1Fxt1JoBVXkv2d0gUFN2q8o9FYFvasEb7Jlqsqo6HMG1xhn54G40amXQE23e321qFS9GPoj8jnn1A+S3ujeX77FOdbKoLJabssDPCSDozwpBUa8G5aZ25XYyDBFltYYtXyhuPFg6zG63VkIMY8Y5UMr8pbM/9i8Sg0sJp6uQQH87FYAmSytx16chqedjo/a4W85/sm86aMfRj45xM94xFV8VwUY8IlHEfPxCkNstCjQnTTdG2M1IUgfdHad8s970wnri1WnIhxsK+uMfBBE09VfXxppxmW6Lp9H9C93wxHp2CuHEsDGspAdBtsd0Q8rVYD1T7KRKHSN00i17+d/MIfy0DGVfbzSzzhd6Zeb7vrve/pI2YiYHvas3xRv37idA+h+org/Dr4yseMZRYFpt9NPaMAAXtCWGfO+sxtCXboYr03xN3oCxc31gzwH5LqXuzV5WOlX0aN/PXyPXX9OjVvfdGfxL2KbFBisr+VFM98UAXkanXtWLnASrzB1SxoBN8jO7XLy2Hi+GB9VIRRHf7zF8yFo6mitJljC6c7U8DPRo9GuKqtwQRA3uHo1a4q28+kzr3w9xjBVrD7Dce3v8YF7sqj+XTzLAG8llcQQhi3A+/4IccaaiftarbTj0qof4TZcxpEKWJ3jXAGshqGyhfe0YSTH+Ytu5KXmAg1Hh0B3mTDQS9Ehztg/z2FYW719dW/PVvBx7yI4WK0u1ik88MNXfkWyfRXryM5YXkKOscCkQPl/PxLnAQP8W0/qGAp52FLTYpdHXy0ncPsk0gpni2ok09e7G4ifeToODtbCAEaAl1gChfv0otRcxzsNt4mQPpV/2hWK/6Jv6g/MjpCdegvQRCGTer7wpsHr9imB6s6pJo2as5gAK80qBPRaJI+avyD1DBsNRpzKAHSgW2H7IdAgUDEfbuc5r+1n8m7sson96lSuRTWokm44nOwiZyhKj+WRmjVl3vB/cAQ+GPnlrT065WnmnR4hq66YyUFUnw/GO/dvRwUYZ0PbPolucHVbc7FDQihP9l8w87cXSTcxYB01BFbdZrs/+OTu68iOGMqJZmg2mvnukT8FDI/nk5kaKC0FO7Zos3VndybA736OT6mnTRjd6wNIRkVzUkASeBALboUuKsa477b/nV3AyxSQj3hJyVpkF6xNwR103pmu5nfTZVtwzDUlvOiunWAf0SL7cClrfp3gkKj0PHi6t3kSxhP3wpEDJpVTX0jeo1eyy7T+4ODY+prjhjt3ibBrjuia9NkKQ5tN/NJ2K6OYdhr8Fd3IuKIYMsl0NjhQRODUE4WKbSWuvqJcTYfNNkoiT98HMoavRH7M6x7i+Hvp6iTx7ayxF/klyEuWzzpiV6GAUZqsnaKJoRej/u10Shv3/rWV11p/U1j06/0vJkmyVfXeZDNZeyPRX65Qr/XSMt6L0qnfIQ8n9DQmHWnVy1Is1jKqTeN7aGSOdzkglh/9DfkZUHCj3GNqH/XBQfyYuwIkh59iyVvlGnrSTv0FLLNj9Ca3b4QJbPgq9wL9iZo6FqP9gfRPSGJ7rqCfoHZ1bsqEABib2s7DUgjHgMfKHhQorVPbGqxRnfSuEn1zZ1zGynxD1bH6BI6OpnJvwIw+421R9dX5LspdCw3uqjFEndWbzjDO1ABP7mySVaZRqa2n69rJ27B3vGF9pLyCtHh3G3Ydw1VGkLDYpKfAgoh+0XVHaAbvKEHR4u//6mjrvkq49GhTzE5jUBXfQbUZw0B/91PLUjT6xuhfGllneSq4NlNLouKb+iSbPvMqJnaJzx5P/dQPO0U46CHIG/G+OrzsBSbB6JfG8m9j7sjYHcj70hkPkzgt3lVsfAzA9+Idry6tOSILr8VEXkNsS2gmn8Pl5UZWrQd8w8JaTGKYGKXy3hxJJIsJRJZ91MxPOc3Oaq+alzxbS2RppQWEm0oy+jpo0U14KoJU1sQxibz43FwOvdr0yCn83cN22wWWdyd6G0rzBaOplpPOLwmcOL77IJ3J3A971Uhja0D7l1mR6akXScWC5kLwvbON7YVEhEIg4uXFSvaARMzQeMWvEbYEQ5lC+PyPm+qfgaNSlhbdaczE0J3YgLfVO2qcnxevgi7lrnE3mrVt3iMOzR+tcPp3GfptTK4mKb2A+rjgBSsnGNFC15LIegEhb9KzkNOsPKpEjemLa+UNqCcgj/jZ7smIm1/AwI6nyOC34/QORjYfhy7iKGuSe0tTm6gpeTNt/r3ClUudLPYaz2ykpeQfJSLRaDnM2SxhJDz4QAUxH+e/3rqe6wXtNA3DYBK79SDLnCqgVm4eXSKx+CY5bObwKfGEVhx736jLSNM6Ax1sUfrGWNrQyaGK7qKLeO2dEvoFvhZbIHbMniYPZAonS90NJtyg9uxnui93Yae/wbRPGuyLFIp9Hzo4cT/7seyKfHWu2T0P3BCJxX3yoJROuvG0zjzKYgSmr8fITgXuHmuEquB6lrhKJCRWXoL+qdEwGQzcoU8WinTzl3jAQ473sN3+ThXjGbgLS4uEOZ/ZU/yNaevw4mX9OrQYzVyh8D4+hJr7aOM2xdY0NaVNgUxUuM9PrBofJqrRvn1Z3AJjaIf0/3x2hwDI0hHpg141bGQyVzEWf8Gdrf19vT9qd7X1aw3gGAW5N6RQihxVClnMYHAbgSRLMphbfIw+HINLhKSb4AfpRK2DxtMhPCpxJFhEDdNLvSPng5kFIxdsHcDVcZm56H4nuVDSNXqhK9CrC4pJEp4JI96Tf+IjqLuoAxPvkC7x+loisUeRYqkqpAVoH1gTfCcW9z1ASJXF9UMgk/h78ZaCxvF34XI+2rv+/3dURdY09s9OMDd0zYcdWhHwT9frHwUx6BLGew9mwPURLc7MIMEqXXeaCKrIpU4WMfQzgyi1+pF8T8ci9Amnjheg51RctggqCDRf0q3K6vtnJ5HQewCllhYleL8TAkm900S6Xn1KKO2X7yuymAQFv4i+qvLDNuSPJwMnjQIy7US9YmkfaIp7mt4Mgd04V05b7upm399kffq6M12RuTsTt9s6flHWh5HeMrcv0F1iX6/F7nD9P0SjjA+tABx9Jx29AYoHrSLCvcxJivTfxadQbHHbjarlChitYvo7+EuLhQI9Ov/3wBIlGbwbS8WHGTxDkXr2c6xAupEGh0Q0dS+3XAHuUfur3d5PJIa0CssSgdwnfo19XVjVdiOkYRIjrmOBlNPd/+AOew9I7aysJAj6ej7ZpnXgWSVObChjvsLoBQlDiXemd90mbzXGguRctsfENRSy2hzo8JIRKxvpAMc9o3uGAL8oxeqrKJW+rfJ2ZYcHDF8Vc7/utt0ZaK9Z+uqyL6S0fmohJnZipfeINLiCrk17qWVh4/B6q3D+qONz869ZYrVLVdfaulT6F4pMPisoMlHvDzTHE2zyBsFGPq0AhtrjqbfyM5cu/o4yoSlsANlVPVVO0Ov9VeW2IR95MPEgThZxTg3U4/GpnfaCnRQqZHfyWhSdIdX7m47qO6sw0YEjLHQn33LwevYNoww4iABowTEoVzlHAtD2bjHtOoHJ6jtAoLqZ1f14fGE5BQSwf308uqUd4cIVWvW4d2O15KJxHKVoNDokQY7f23gBdFBNNO7vtQHMjcRPqOi9H+9dJIbqPfSsLYwG9GVzHxzWFLql0nsY2fw/VezMq9mxx6MCoxY2+zemMQ4BdIHwFT2gNHVcnvY6Xcpl48zUeYqZqqqA5+1Doz4i1CGjJrsJV08hpEP/Mw3cgXzqzGGOQoPskw5XWKT7nXEHTLK3S6kmU76kkrdVX+JbOtJwdjqCtaOJw5nh4Xd91f+eG2epM7J8QUCVVRrU6GXl8racV7sCP4ue/FXU4bJKDh5bW7i+6GrrVdady7Axubz2FyZLiSWbTT/WwPH0RN9uuARBef3GAZ7qO1Tu5vqjB6P2k7ZAe69vWjPzgr2GpyvPbO8wnsK9mC/xKRh4n9PTurwZiO6XZikPZUfqcKqwQS15l+isCB8YMH4xu2f9PPJMyKWPumIBk1uO1r0WhTr6+qdUi/RmYX6+NL5sryB02Dndl3/fL3ZobJtMVzmLbxHNXybfDCTkR0EEqHyjeVojw3HdNBojH5Vp8LLniG+fA343xEBu5scJ0p8/C0Zdi5T3yy5JBUhmPeda/uaMUesPGl6m+izmr+ZnOYLPobd3QECczfRG+zo4sxnL+PYndYoLStBUrsqtJg5qU0gzMl2O9u8VcXcimV6kP8SHEmMGkRV03ZaGgJQ2RjwA2E5TjOd2I5RPXd9hfnrC186QvvBa7/Jf5kfxkjCkQ76Qc/zsV5DY4/ElHbIJXcMs4QNqfEfb2vW8mlHUI+9qssSAc07NZf6mhAEgAuIwAdz23RGGfJVV57USB3fieEp70bxWKttGRkLfjZv4tLO3A5KoM0niiuX94C1MOo3wMsV7N3t8u3wut3lS9u0AUmWCjfJnuCxrqhkzi0yZrrz3a/Jz/2kZ+oiqK7Y/K2PRqzs7DlNcY+0DHgt0b1ZWmnOrG/wz+43XbKkh2+Gkv3upthbj0gCv61maqNpq5NIRaltH3+O/xSK9/ZoMvu5w8QhQUSyrrDi+gNuBrHSHNzIjtTKp1J4eW/i4oAOEK447DBXJ5oqFaYrcSYUPrt4PAmP4OOF+Vc8Vk6OnwJACZVem3B+eG6O9+sgiBFE9c6WdmUu1oIa+MymOC6/ivFxu2C+ilCP1vlf2yTps2Db2ruW5E+JmT7I8PW2IMt9zmF/mY4IcA3cIct4z+TTv4NSSz5/umdd8W9vxlKH4UAFsGwSGa0dozWL0Bv4F8g0X5j303xusHoQ5luf54X/9zfoqOpOirQ054olyno5u1o1qO17KvlPCnrwFDr8uVtvmj94FC0jK4+DtGBA0uM4ChWLLfZTTralzRct5LABmoZqS4kdFhTmds5RmjLminHrCNmEcrHxwKEylSWCKfk4cdaUbzQ8JeAJv6eld848D/NHDTe5f5q89vLFwTNT8lyQOm9EEBmYk9LyxLhVrppFH5OaIRfena/2GWWb/AxewmczVy7JiVI4UO3UNPExzE50cqon2MPacttJ8OstkbJ0h3XoY6vyoe6VntHYPLxTpAs22JUjXVkoX72l1YVSNJQTPvx4bJe/HlZAn0GajJcoLYRpJ2qM9WYp0AP6y9RENPfGL68Wyv4ZzaIdn8ZE82+jXxDzJbYhCPqyVkqWUUyjr6Vjgl5J0jgU+0I6gN8QHzkNf5dq8LlwV4r5DpNHxeGV4fWeJWTKwo3bdBUSGej0YA3b3rR6yrRZMye1U5Pkdi6JEiMMbL1smKEQeI/cGMrBzhIyIwHiytU+rE783Jri8ZVgzserW2r3wETu9DXZSU2GqK1qGYGkhivzYtQDW+liV2c8/eGug6YZ0dzJ4eCIkGGwRFHKga3zzkHBGz0axj6arZipfsrQdUbdlajelY1SRNgqDVjSxozoqYQvr7oQmsz/b7U9HfQQFx4wZerF/8Jz4ztNruufHcsmhbc3WU4egFfOCQeJvH0+AP2jfVv+zCjWX7Rv0fzBfsY5KpX9aHZpl1LtNZETQC5hovazpCBBgE+0CDF1kSdjFXmAx2fVwfMUo79HRPef1IHGzdDn5phXL2z98/zZDeaWFKJLAbolc3NPuI4ZqRjQSXnALv97lWNWbv7gJ5Drz9EMKmB/oOShhEn6Gic6/mZAtZCAWUIrW0jrG0f0GW/O9A+ffWV6cov6ZkngDwGr4/7gz6ejR74X3ZsmJ2MOSVlLfdnSJ+PZWNqGDXqa4Q5y+OLANC3Qg2vHkRTv8Kg1ondC9Q1GhnUvQHGh1zLoDiqM5FBFp5Pn7TVSn3Q1edLAteuWnTl7K9RvE7Qx84MqHVGzSLqBhhXcNwqYDqGrKY+ZrVtryUsfj1cgAngmj38l3BUb/tZac7eB6Nh0zX0w2lwOhqraszWuNCo+hOHXbvJgy2bxIcqpzWqGMYKo8SMjClwcxmI4MR0eZ7r20YceNP9sNrOXiiVRZ1CL9tcz1TEV79nFFf2Re1ClNf+iHSUtFFsMNMM+TCCrqLLE/2dMa4wP3kDQ8T+ZsqVCWMY9CsjcWzGY8nLHDVLXO9Lj3sqU6FzbMzffAQCemUuKlblB3Q188f3FJ8keTZvBDGKeTwdbrMDV0miq9MX2P5rNf37GQb7w/LcX3FmpsEOgA84zcPt81f5JXfnrHBoZWXl1OhCRsOsebcZ0Aj0QGQBLTGZXJdunO3sZJ3CV+LorUE+379ZRQDqtfsQ5CTdLiaqwJORNPrZq1v2olyoESIDDoQ3tiO1a/mpF0WdfKy/bpJSZKuIkqW0EviGNCdLTMee/Xqz7wJfldW1frIOYsTWnuxyeBzhUkXWWObdUO5rcwzVoejmXfhXjE1OA6/BqzSaTVnIvVhizI1AN5757kcruYI7z1tNV4k1SLr7Cs7DzSjcZ1jZRj57TwacQCQ+BwkutbQfGqdINRaePByojp76jvrU0ssGdxdtEGYJBUQKzD9C4yFFKDjI/M4yRe20mWJt5rQ3KDF+3PrgpoD/y1v71u5dS2gPGv3znN6EXznil13Y/J1dRAT1J8Hf1/IfTI/d0MGGNESOdrV1wbbB+R5I4Tl+LYwO2e0v0WElnD1BnwFsjf1iBP58LkMaQGhf/ZV38vxdZLmQImyq6tf3GNOrExlUQT8sXOH4DTkS9AYI4v+yn5xTbCgQSFyHo9gZEQPONjMhJgTWvRF80wqaq0E9wZjWfgilkW+GEH+FepROLZYq0mRQcMBbbNOSVOx9sEn7HitCQWeplSoquxQVLatOty/+ytS7VOX7XXQzAbpG5lYcoXS+eL3+nepWyUoDd2R0bUDoxj7MAEC8vABWrBAGzv0w2hElu64GVmH8bq07UY2FtX10Diwx7y2pcqRy4pv4/N1I/rEFzmOHV5kpRkQSRdRonXB8PGoi53h/94ceo+jHlj/yNiBCVr8/wvpzsd9HVtNFK+Uc1HMrQYNcDrdgr1L+8ueyI4NEWyydsKlqdtST+9iaaNfvR4ndSfIlmBLJJ69LNbx27kdq9VdicMQm+bna9Vy5KiSv/d6EVk6R+4c18K9ZMqw24NuL10N/uflvVKmof4iN2bSvpaQzmQXk51cXmQHqFQWwzR1x5NzhrpCwmY52GgOM7o2hjKPUP7sI6eNzjwx1HuPla4I771Km/ASIih6L4ryTp8eHa6dfRRp2+xTj0w7E8qSFw0IaJcJMPxsRRxjA7KVM4HN/41rMwdtmIXC6lRaIjhQvIb3O2S9/asMK5OvBjeTxwAQIpohmQzi5fgB2WHf/QiIOa6OzPuivCJTSI8nfCm9rMaIeFrxQc77eY/lxvKQdoEfbPMQJ14SmUI+ggJvhWO6OD1puIExoWNcnjg0kgikqCaEDVwi6+Av4XbKXn0EEXZJoUFO5IkVMd7i2Nx5jQS6kGr+sZ6ZUwcrTXytOOtQ5BZDCf78aO0iChUk1es/NX1tbXI2jOn1YAuTnSbUiKXYriegkJQ0oQDyJxnGP4PLqQgBUlhkeQDJ8qM8r6wIsM9pCbLLkV0BFIvQVmCbC64Lgaiiq3qufX0jsdtwKel6WxObc9bIe8dasVXesazXYiylqLCxa5cdZUH5Cv7ni4xnJDt54jSCAeJu6IH799qpEMrt2m/7UVsOEh9GsYft6/eQEOKGvaODmLidaZUfUECBvZOXL5aIL1fjPqHSwgo34D/5pf5eRY14WHUb8Q09Mpa+98VQl3AWPye8DUnUpnU6CU0PfLiARRKfwKxhppjLbCM74D7C3qRjdKoX+TV7mN1HJhb9TaGFijopUgVJkwi8qCCoYIJhRhCcUZhJ9xPKGPjSsYTkqtHWAauv2y3ebmbUoaHiChtAR4ceYVkws4iWG/M51OWsyv4G7aIHm78kK/TOPOuV1fULZfbTxmD+MCttDCyhLa2WAUX2zcuHXkDXnKrqg1xdgBp4BtoRZITc29MEOZCRBrbkE8TTnF7dT1PG400F8VxX4zwnVfbUS1rCt4AxfGVAr2TLw9gdW5GVIZNyDZXynXHco0CLCmmZ7N5NQzSfMs/uqKyWRp4/s+C0fYpgWBt4ZFNKdJfkcC5LN4xbYzCcvB40BbhVhujzbmqDCgdqU/OHvogO67vZviCQpaCd63e7ldMC6BF4uqPo0qLvw4lvRmo6seI25hN+ztuTvwpqmmtmFutYmyBBcL2GTYczBzQ0/WE0BpYCRE6WuF2JyWizkMTjCo325Vqni0HoYP2/pvoDsLPNqdvy65BJ5tMFgR32BRk6QFPyKO48xtMfOf8PWa3L5v39TuyM0nuisd0UhfDX5fEB67Lvm1AnzrPxK3GsOMe7rvuvWfSK1gb48fi+sQ3nihDthIxnMK2ArPO1fkVfWP2+lstwV+5mPUyonmQcKX7b+taXRQpe3M1fFDyNDp/0bEHl/mTDIETjGzNXKAx5J7PCmeCmN+oL8x580dhhGYbwNZ19gX4XTBI+d9xzNVFfT2+gdzLhvpbO77sx8jaDbn4HLBmHbF2ipdjth7fKCmgWKvne9g0TvnJ4ehEgUug0nj/qYKU99qL+muUtl3rDo/4r5EGOKPe5mn1E5DiwV1iMc1Jzt/1p3cLqCfKKqEEz7wLe4yjJRqRWN6j+H5th6d6WfYynQ/m2MRJJfornF8ngDkUdxxwfp61T6HeS2xaHn6H7wHNczbXnz+1ucv7LMzMkTpt6YAZZw+MjLz++gWTkD71FryOXwcb40k7qh7UzMAV5y6LsExcrQn9rx4geGZOwHLpJFRrY6RMuPE2sq9/me5HX5PX9W792Rq23qavH9KuenuFk+cdusbr6D6bSAM2Ln2PfVM34MSui0UHuncef4ujxJSu7TXDCHhQM5zuy+ynl1dZHSr2NeTvCatQUH6F6IKUHkcl+ByUxO0Jlg8Ce1mwgbl2+QQX2IIzNcGyhXg/oVKrX+Abatxy2RhAmYi88rOsCJIa6LAGnwPy2nFRmi6BYzv2+79x4lhX97n28OnNItfXKi4ZZgOylQSdGRZvtBUdgkbB5QcneCrNjd6XUvHaL518hmN1K5BQ9H3Ci2hkn2Nb6SYJcS/3JdMFIzGP/6zsGeYGFyGmUB3ag8DEkyWB5P14fI9FC1jHTxfyxd17KjSAz9JXJ4JJhoMpjwRs4ZTPj6oX2namdqy+s1dndLOudILVG1pr7wJWBT6TPEQFhilpYzNJ84O1wEhUfj1zMCGYbSzvEdgyv3tDh7t9dcfplIuJtU1+mvF2vNG8zRmTPcpLPwYQdOtXbMd+K04kMgqwMGek/UeXM73/5VbiShTR0rpGT1/SQF6TpqjnzsuJxqSyoXBqtaFzFQqPHgDBTBJ+Zz7B1aEn0BiXauRKSMgdnPzqyzaFqSNY/qY5OppF1I/7AMpN+QGXr++vVR1I+kjtvpqj+Vel3zSfs+siwjpl/HpbvHKcKG3/avQzR+QzKovH+W8kOfWPfKiDpezy7SROzYP7NbMUCuYuSOeYcio7B8wIsSzkmujUNUU8SdG941BjqasRIygK0n3iMtmbbN32uBfhcHFj6dCkNKnY+0C9TzMdRFhFA+jx2ohBminaKOVQ3kspY8obHN2YlosEjQ0MZaN1jvfzpkN9+jh0chiqK9fL1E9tdl9uO5lr+FMG0BCUNmJt/my7aYe+UWobXxg1povM5i63BjO60a17rxCbhoOz6IsaHvbM7KM2hkirdFfDOzG0PwscfusZPQ6LSAB+kXi8PIVWKVv5xAyJU5eAhGVp+KzQMvRI3itkoYUvcvP5LwN6g148esCZw59TCQ8ozW/p6l2nzIjMghww2ReG8ZJFGXr7YBqh9QvNYDyRbSfAmai9HgRSW5+RgvAkK99qz7Tp8CWjGTQX7NFKLHWfcHpErQTPKn95s8W3+DdCWG3wkouc/0UdyFbzL0Zca8LpGOE+ucFV7nnPSvCR/ejyNMaVDLXhp6cGRrfCi7ruTbdH9ChKF4ZU5buzQryKOqspJgRAR9i1ia/bbmNzwmxdHmnyN0/Tiw/ZckVm9TRr6Nr+aPiQ6M2IFCi0SQtkKHcz9sYmi7jr4jL2j43TIzB3VkeqabMHuepEW09i70jkTz1JDHcUGyiKTLHdcvnDRscvd1qw5RQc8eHAP+9t2QMmmCv+KucstwVrAMZH5Z74GjUp6kxbj4g4MBjBMI+I9RXiPAqN2esBVexbbkMjLXbq0sXuvrBitBd5eabrrtR5XeJsRDLyOVaCzg3NCJuEf9XgokHCR/6eNubDWwHh376R/UGhBf8a168NQDQNcz0ixNqJN9qVj1P3tcnejHDqfbUYgn8Lpn3PWy4X+K8r0gNpF60y/bO744fZTukHB2SkZVj1FrxKSHj36fwhPOyQY5XTIaHA7mnAF6aT15xZqvMP0GTr9DQ03qZRJ2mqH/gV8LCYYyCFbK9+qS8R0esx4v04NtIc3IWErcBhyhMEbcvFRDib4Tapp4IvDopUH+rt/Q957La0JtyMa8uor60l2BnVdNv0011hH9y1Dtyhmn6yrrIx5vYbo9VFEeTxVoB8b/kCo2nIuEoXBvK89hNcf9YsjGnpXCxCzdWv0IKWz4xfp9B0WONaFbvEZM9PmGYitFm1Q08pIndnhnRX7Xr/SeTJzh2F9n07e2g0zcKzCMnLIlkfK1z4thNhC+8+PG+5CYxQHUBNgaLF+upIbRNLIvO/7w2K1Dem7feMpYbdmyqZ3Bng8leOb0/ZscMKkNz5TQ3GCBIliFpIZaaNpt0ve6iS4S4z2rAR3JwBcTjlIyNPLMdt5vr1EEBSSj3x+K4g2R1D/RZNjDexU3AcqflfDXLfU+BsjEKXo9m7Hvv5FFpFuuW4TahNebZtpvEK79VTzAp9iW2jmgLCTT8eMMo3tC6/xZClpiGM6w6N5nLI6yJBv6DUA5alllNbACn9rJbHrbC/UFUGs0bGl6Ps763UZ8dYefhSidaiQV5QHnbdfDK7NAS/mpsn58a7N4INHjwx0nC3B6IuLuLx2RUhOPIWaDDCqtvRNXYV4Sz9/9op6jG2oHrHxeFEQ9Z3hXETikRcZG7PB0cfeJlyqYt8J2HhcpiImQ3wDXDBp+rZpW6wHNTBLBVKHjvMrXSO9XyFzYdnGWdGEM89GWz7QSVSobxbOLUF0N4yAfXvo9orgIu45IwviWD2BGN+6uM0OwHYa7UGq81Uauh2ynrrJ3h/FIXhE2qmE/+NIXSKSjJTRKU0uAVCq9S7asOE8+ow6CFAtECnAYDbq6sdTIulbsXDqJ+ks7by0nPkFWAEVi7GNdAzUG0ZG/U16bRE3iwpm7X7UjaXBOSMrl6fgIEIpuYbOhE9ZWe4ylYe/bzY5y5Lab0UrKvTi21lW//l2a/6MAghTFrNuWj3uSMLtpY88RizATPN8T+3lSfUT89Zl+wvigcuMch1B3h+SPM8KwuDkFQOEwV59fIwB72OCapzcuy8MspFXXyq8LnAk5bElj1Vf0E3/keC0UQU7rqFPZio++dfv1pqVUJngPWy3tB9YRRjtoiq7Oyya8qYv4Jo1ljrx4Hdp4g7WsYcQxBuz7m0ef/hRwL2ysMF1qWnI+RO9sIrUZ+dglsBHspy+3YcaPAuOorlL6yIfNxbzebXjz8OJ6DpFyHzQBScHB1+I5IR5lC97m5Ur+ah/bmS8NVi1wTPsyrUq+Ek860irbTcQzgjLrigis/hXDQot4o/dLZZruWpyrixG66zP8/mCwQ/TDIsZBAZx4E5WloJENxoVRYGhfT3ZSyXlbL0YRSAPRmfuj5xfEXlKp9dqtvB7Q9v3QOB+nleroFuKYDgwGiUgfwfcs+iaVUa+gkOy21oQzjoPSlG233wj0V2a2wZ8UEhmu34PviYqWB3LIwhM3oDvNbEhtw1q5oeWx4B1Ur6Ew7LrcQK55i6cLryHi1pTmwcUTHt/Hr5lHm/djyzhyO46kNHznh/8T8g3cOCyznRAtyd7msp8I9nt8j7XbEBqmMmugzCAJNr8PON0T4D+cBUX5F2z1Hf4mb324Q85H/UmMP5ho+pT4lnq2QdTsq8vESmf1IekUUgUG/uaLK8UP2wy0Vc9hH0IP9kaIt7Ks3+x3xV3svk06MkRagJ8tOd4lDClgVa4RSk0ilY0TN/ErXcLt8XXa2jmX4Jj2qL9eLZUDdQ/ALBpcGgTlGay9yXDnyGrUnf3tj9P3i8JTYcBAIwxC1tbVrRPW33TOyDPF4RucZ43FQqY2C/R5KejG2y52yshCcKDen3VO39a8SV9tpQlFOJJK9WFMcfpuF1hrWcaPx6WUpbBAtG98v6JV3ftBtGHfsXE9ZFWMHzCmFtPEGCvBy5pykHWGZlCS2GsqNVqKpOJRPdsCk6HopgU/UR5T2yZc0FjG0m9lNti+Id9coyjE8bJlPHrznhvm16h1FfBcgsV2r9/kpxM/SObcXLelHzh4IEVgLmUwhW8vIlwrCoX9PFrfe0Ff7fA/DDeDFUP9vUzW2hmSsmVk0xSPGXoxnM0y+pfQU+BVm+/7zx0x+IO0ytRPRnvaPuupiFmX2WG2fOYcAt7yizX6u2DCARXs4TNGHm2YbFUx1ws3j1TYIt9uw9X3tvbu6x2OdPfLDN1DbqpZTSdrvfTGk4RXqx05hYimPjZAmNZeVsrg3ZRNNdhxwd8ebvjsf3iHcLvJdPp938bH/p7ydtNKsgQQySxmmJLbFAwkOr2mye9Mt5DcmgsxbExHlaprtJzyoYgxuC90dPpuPIF4IP8Zl7c/15cDThcbSbR4fyWGMF2j/vV4j7qEpxM2aYwHNFGl0ALoyYTTZO68QOnwt5JeMI0tHLuUMMMw2L2MXfPEPTqaLZhGQFqro2mWBNBfH2tEf5UEDkRw5BvNTiBeX3Xm9StjIHmNo+3dhYnvrv76SbbFZrG7hujxsJDzevPmZz49O7lQ6dXCbN4raOuEJdvrbqtyc0bLHk74LubeHzRE7Tlpe63UjlAj8mslRrlLg6imlwCZWgylcdNd+7FbtnqBWMsWYFwaYUrJvzTg2QXmWyCEg+r5U8Velr1Gn0R9JxocTRrFCKVp+SDPYyszTd/FprV1e0+/K/lIkshOu9nfKv68NhcaGCA/GNm+O2KzFHIYSHHZvJxzK/upPdo2Fa3tzH8RS0dHE7Xsqgsecq60wVaGTKvaK6XsJAtxLtGgB2XmNnCg51fOBqdOZVQu5hvhn10Li2fF9Dkh9hmi6BOW/f0JYqHq6eWG9/bQq91UQz4ccHNgnBTvZjjhKOQw67Z+PuBlvbDXeokqBoKYyqf8HxaaVEFT48nQgajLX1Hoo4uUit1SiR28LiaosoyWgTW9N86W6GvkA+qKeOzUQ3B9NCv0doFApCQLS0x3vRZ6tzeNZt83LLzs4sf0UnBnXKDlFDykhyCmKoCU6bFxKcMco4L3TN0WBvGw1mXCut2kv+WhzlTVfa8HaCiE/lDOIFnd1mx16jW2EZEzkIUyZh76YYOu+InhdOzjm/iyuLp9Hsrsl/pWrlzEVDJuomE1hYb6dp4hip83sg+isxds8EESsWSPsn7x1B5SnSKXsubeM3/evO7/KZhMgnlMXYPiuV2dMPLe98d4kS79PlzNriQxNo0vyTIYKUbttI2T66HZ9vm1+8+OX0y8r3JOwAQVVqvfQ6WjN4pwY/Syf7BsGW1C05xWahVOoUSN3Dnsdf7qSOAAD6FXJQuK9NlhX4OCHFSzEiX+Jc8teTH1gT78A1W1oe+HekrkUcEPoUZtDahek5jaBtRa7EbUNncJvyqW3ZaXTzv8yjDvowG991nGHiuyqpMxhrGujsf5rsXtxSSGLolj9wSPyPhumdQ3SouFu3YqLiHdP934MslRPFGPxaQrNWzKeuU/b1sQuID96jETSpLvnMo1vy1OMPmyT2WT8dtLkjj/3B/vYuAr7M96TPxpYAaJwDxKAOGWQYfO7vIRfg+LpMBcC3pV/Wo/uMeLg7oL0pSIeMmxeFB5vu3l24NmROb2jzgRnrGhPltqmip5RiOIgYmu+Zp8uDt9kEQnvTqOaQi3dAyrzyd6oako2nmw0aatiIgmF/rQ+G+s3tV1BTNf2Hc8W4NcJgnSEOyR8qRxnnZXdBYK7ikUv8ESxNocXPSgHTFSjnnp3+KDi8WAR/bvb609K3IF/aOO/tGIdk++Fnub5UBr5VReZORIpTtWy90SwJuF76tbxDBqVDSBpAokA8D3YG7/qoPJ8Mle/MSFrr7ruc4jfpgVq3q/XEqGWnky8pKi2Qdl2KebRazAD+99EROAvNLNOvAIbY6uVHiWe+skP2x/5/0Dt1JBuXfxlupvvOJqaM8vHeiD0QjChLbTI+qErvfQ8a3/2NkgO++PfX+S2JpiS7QlJPcoJPsYhKZHZ8xqwZm00QOiE2v3P5H26ziqsA6+n6b+s+K2eiEHJ8DczFXjyDCd8LLGCWuiz6UxQ/IRdM2zfo3lRX0i7auvYXxNQ/OLWN+H9qXITtSqisoYGFNy+G8WeTzeGKZ5j09srV2Svkyu4lWwL09klTu4erssrv508pPYvY+aDKZp0TXVyS+QuOqqbFDFz8hjDTfDEPWb+Qgu99FfoB8L0U5WlVOpHFiwuQZ6F49Z7/JXp9k6bT3BEyg6PprcAFicJXbE4q2wFIXLhZVRM3zzZg7WdtaAIIYcA0Nue+oaDYlydOA5aOGoWHCcZKKg3lqDvw0e+7yqPEsFzYyJy62ZkqAObhDbXR/xzm9O61ri1OOYLtAbe5jQXVBo5jRe++b/5beaz3XC9L02I5bRr37grjTeH/+gFkmpEVmn2duRv9KOWnS3Iz5LnmmcmgR+QcRwQ/160Cjz0TZdVU2ZQCx+tzuu07aV6E+6G604baOeHVm2AY7LwiKK/c4Olf/zZRFmj/XYytWo7yuSrHjxOo8TARrPCOpP8s+yLV14cUg7faYRu7QNkjnz4w7BtkRjBuH4gFMMVsaINb2OkozLVTEjMeo3ORQv5b4iJ3uN7qq/id5v9qE1vLGj6/1owJGvSq0QDlEFI7FYbflOq7CNpPPgsyykSKt9wfldMDtrv3bJfHvJgH220ud2zvgmqyJJYL8MTajPI0S223xZRs5b4AY59kHSP9NpaiC1XYmDYVaEMMyDsp8/snr6GjEvXCcEPnbWwtwPyOPCxjPocWHwgJDAxR/yK+HaE6vrAmVAcsrSG5FFsjJLXe8VXc3D2vxIat7RiYRSae3/b7VbZX6Gme/hFWghunVSx9GV9jqC2ORlklIK8gGxgF7jXfW7OCEDx5qoTO/RHSumuRySeUI2hbPlYhNchGGt7lJA0K8pakBZkeQhQRIIml5/poUVSqPkmNp6zZ6t/wGJZcqUD4jqAauZnZVoUZVJE2IFGGs+Nug40v3yPJj9ls+RI1Sm24kK6sKZs77lWaoQbKxc6yNvEVG8UhzctqRygvis9TciLCI+RAaxLF1wUIkKvYjxnaTU3fLHlrRS6ihGa2A3ch94uA5KyjJv+dCSD76ooSxqRQ9g6RNOlhWvzlRKI8iliy9/YAWdfNYPI9orAdOblAdMxaQbhm9vEiUhMJubNeSYKTkeSZ6tOfgWccOdUZ5Q1x1j9RsTQhJZKeW3N+S96aCz/PoCrqnEsOcrtSISZFEmCKl2D64t8zfqvWEF7dn4Zd3NdF7I3nyZD8WMiHaLbB3bry0xYQyDMMLG/L6zIoYpHWE1+U8EQ8xRaQnCfO5ZUpMIjU6NvT3b6O9+5c20fkWWAwR9VdBfUHaAO5+4J9NnEkDvOWP0BkIseUtk2f0gY28nm/4xN6Ef5xrxTuSYbBPVXhFU6cktaxNX3WGjvFLBlz+4l8PNAqvYfMXhvYLbySeLBQb0lt+n3sysrK8LG1whoYztXWLouxC19urOT9v2saj3gC7MWR56x0MHIY8dAoSEbcvHbTHQuElpXdL0At7R7lNhmiMfgFmlptpAeiBULIWv7t9hH4/qxsZShzv5o9DKFR007n4gBUDxacor9txJxZgm4YnJoCLO5bTrbKADB854HiKn7OaNsNVoan2eh0l8Eknkm9eEFEsut7H7H7qw2ud8kE5eGqoaMF6AlCHmOHdccw/MK72w+IA2g0I+SaFyZWV/QKDnXa9rgXUpczxu4BBgGxL+fes87t0hini+g22nj1dY7xbvmg7/xLnvW9SvtAqzIdXHxCDZOjK2XqmOo5dPwlpiVTLIq6rMfoOffz3WedEjl117cVOc3jlVM9ZmmpTMfeN7Pkxe6wv4N6vDySxXOdPeb8jjbAchxBIaJ37aBOxBIzqSmWy4UyeR9DOHpd7ijRs7fJFNSeud8bx4APAjv7WHMQkvgPLTr3dG756w3e5vlVCJfUfVENhVEhXU+l5p1gINkNkETq7pvCnYkiyYj2RquRsEaDzXIp8PPjosBzd0edkeHCHxSZrPqnHvOogBJIvk1vAtVAnVSzFD9tFIi2FW3VxAWvZTz7+GVwTBOcMRhkhCZj6ccQFLvVH7/aUpML1asB+XDdheo6mwgLr05TqHXw2/WjKyXuiKQvWDAVfVxOvzNuxSY8aeYyBgPWz2C/OqfBtcWopFwou8agYYVmTJKr3Jhnj5CJnWoVq/uUWtiREixkl4VrFgmBfneNEXPUsAjTesiUV0G2mZsygvvS8PKvfTWr1KdmhDIfCSquw+C7nqVZEnmXIyVS9hfbyoxnCE2A9fmhd8FL71v/3hyZQxhijlG5arPmZm+riaWR5pyG/AU9ivqsP+ioR2mKwg2goQ8oRojdHxz6TbROgrMldcLPRuP3Ijq55iDt3aagLbVshnnfFrKDzM4TR4OOnmDv3vB242MWzpuG/QuOvruUBlL5Yr7bByTt5xGopwobSgrT8YQCvOOBl9afkLCLXiPHFj/I29oXfKb3GJKM53xHLkaF/MLVH03xFSqJI7oiNGL9F4wobX8NCUizQPxLMcZfgyMFZNwbZJvIZA4p2o5QOrhsE4Q4GaOZD0emido21wGKJfA/NRabnfVaU/SKOClIdieYtEuce+FUkDM9uWG3PE4XVCwYov6xILyfeaJfs8v65jaJN6e1uNdoQ8sFyNM8vdoInDkkXeVNxAKBsEPi3rpQ8KL/56c99N+3UbG26v42hAucEM/WXywT+d4pi4A7anME4a//WFQkrdCSa8TK/HidGWH9V6OyrapI+PW3C6xH/haQdz8nKxshhlQVNl5vNHGsZiuWkmqtH1w8aYaNRn/3o2Cs60kPwkeeT4mVnyoBiONMpi+hsB68Mc3KyuPu4KJ+0XBOvlR6L3TBx9YMRf6ag5hgnZAE0WtJ+oaYKm/y30hHA9Cd5Z3/TEg5yyqZwYpq2y398lDL2Oaxo07jUAmf5EswD8avu2MCZQjhVpUo7hGCLAHkT5p76tU1FUxnV506ilOFPbCnhITOO0BWNkAqGtrOlzPPR0oMp5qDEwcUggwvbFb7TxiCFFlRZbtT7BgCkFxHlngfDwhbXndYgQ4hQcqZBmT1Hp0wkHubvfsINyWLmEtCgkhZWkh5K6Q/0HJrMHdysVyo93U5gro4fiOYLqkricEinbNN+jukZzJ/Xqqtf3RzVtK1c+iF69rCin026iFT9uPTCloVfvrm5uiwRCUeZDK0UVsjhFbHUMUXdXVPfrxhFTcF992okWbkcQYfkdawA1pZ+X3uQmNL3caRlBRZ7bQLVNIUWupps6TZe2vJ0gbSnNb+j+IVdo9qCFjAvpem1g9IYQUqgP9s3HPG6+Mcp86EfpgX2cVa6WGfO7n5xkfkf4YqIm/4IqdGdlDtm2NLPOoGf3SfrrMRzogpC8jYKF7kWyWGvFqVH9mrRyUp00UD92o4ObIjDrtKNmfgvqM6vjbHOl5S9c1NzuB45eoR/ZficI799gR90uf5crT840USj/ojcmFg3NOKNJiMBkrFoRLDlDP7IF73BxB9PnVOQ2/UhPHFA/2GZV+5wDCEsw4P1Gdla3gIKJOGzLWmai8nmAw8IFv7xQNSK/pzHfDyrF1VxFWki5lbYzb77XCpvciJJKm8DWgN4rVId7ZpEOyjP7/D5Da/184noC3CUQV9HqumWolwJtGdl6EBrBPlhUvt/zyb6UrOS5srZr35GZBNWDKrEyhiequbAQXmBKj1+NfigvcUSgthp87+W7oSl7g4W78vcHSZ3VsJYQjty2LV1oizA3m+iVxPGd3KQtxRYyj6k3uc+ZYQ65C2nI86WemGmqogfJ7AiqKcXtP2TJkcQ49GNqh1ow9jsBcZhVmfq4m+7EFOAoeeqLMZx9fwXrTulz787sW+zmluvBu8FicpJ8veZuPuHDCgMVsnUgasO7SeBQXOLKMsl44BfX3b/P0hRvmwuBWh6U4p8zZwP8IFBrlg48LCVnsg3w1Qj081FVDwcRMdiaNo3TE8rWFC/Ik6h9wFnY9884otbCROsoRWuzyTCJw1aJ5ooWHvcoKD4Y31lySpHUcD9UKOmHPG+lCMifK2VDteOLZa6G3HC/2ptKyiJySYmJg6AAu+jh3ZnqoIvUYcsRlcCwspvC18/wd+Y0/Av6daPwzWWechwK66ge63COKudyVjiPXxp0NdGD7wYOzwn7Zh5uch3NZbX1wsWu47qurVlyILpGQFpitMdZrq/xRff4cp4E5AAHeIl2/62Rj22V5r3pSnY7LDatEozuUhoI9yodEk8mEZlmc5vF9Y7EMO6xLW0E+RB8GMNExmAVV6jYTxmse9FQL9+K0XtvRiPW7OdogfCS12Clm+wbl6yVq5ESVB8zXfwH3QMF9u5bgFzTN3lobRq9dCrTjrSAPoLZkEib5eXIM0uYuaY5J2ws8wme6l/p7tm/Y1Yk4WnTPXWrC8ivQTD7f7Zl/P0eK7lF/u5Ec9FLqsYCS4+LW3rjNAfa98lWI18+U47CYi8dAbQk5CXlw+KXxx6wV5O5xFmA2oNjh+v3aXGsbMn7BgSZj7Q9ZMu6BtUZVNutjjdbQ171DqWoCMnD5sWzTyzo3tVmx+xM8syRyIZGjyTN5DzZF8vQFZmSHeSPnH4+nofjXA1F6wR7B1nn4UQM952JdZCnSJUY4XJrSDfbHwyR1QyqAZBZRc7uDBeSZsh35yBcZofu5+D+ggjgU5u5pkN0CxyB0JTZ85zXNbrC1VHqIQ91YMiwY9SX01r6Ev4QOhFHFKlQBktGvhAxOwSlosAwnwSmmjHAJUjPDeM40HcI7Cg03fsiNW8xAO9n9TCYDIAnpBdBEyJ2s4cyghZPnMcxrIoNf5fgBtTFy5DBL1IhUI+zDFno389im8Vv4E5paDOZv6E+HIqgzOI//VXwaeq4EKvs35zOhrc8yS0GHK85pKJZfOVwsoXm/1TBEMg3g6eAcYUYBS1i9wpdJ6HeemHKovQbAKp5nb7tI27MWJojH/EkBfQrVjTuUak1p2uUvef3CyqIVwmSya/XZ4Q/vrCbvo9mNbmfqlCFo9xel4qlZGW/8+0L5Gtd+0rQD0aAmQ/N87uMMAIoPz8H08woraHyIFWPC+6EUc+vn/B4e10eVmb2oHgZhj6HcbBr1juso3hfwrEVlP7BtHkcHyDljYE4vizmHLQxPPXbAR8vvCbvznA0h72qiXlxmp5wcRw3rj9cDFP9YRJfX6uzILpal1/9bBjx2y5GkeTdXYNCnAO4FdQfaqwkAbw2dNWhDx9eGmDCdky5CFKu2/djq5AyQQwk2pUyh1LS+Zjm7iuata8+3WkHbMrvcvFz7tOTUhcU05RsgSe4SqiOOTjwH/OC53M/kRguTfBXtfwm/vXpfWIvjZVMZGgojXoAMwh73l691BAHBZy/oRJgcaup2N6vwhJezeiWVM/APwXNNGsQVCjpuFhHZ19hbP1afjjq8/tBfigtAWiinnh2pNQZB/XNFYWj/GbdG98b4mraX3+dPEJBYJO+LTgj/X4e1mOs63eZ7iXvMocYd6xTtfeXu4AoJrMGiiZyAp/0ZgmCzVcogdszCmujPTznrbp7aKowil4tfODRwPLsdyeImNyy9vFiz6BMJrQyzbKd7X17jOkcCOEi4dfm7T71Vcf+XmcHjvQ14oW2laxajQ0snxJKL2ugIm4XMvfVBvH1lby5Rg26T1YaEekOWm5SyHN63vDuCIwagnaXbzM1aT4OABp9x3bOaNDKf7tMMYhxMzs4Dt5Rv0MzOYWMn71FKeu2XqeQ9tfw2x6rjgrRdN1ZpN9y8FJCAL3Lxc/Zze7tDX1LMzHHQtf4+C+9zWZTE6yFGTAyu2oFTc1SfDMYd9/BG2NVKMEpmsWaMi3y8u0YPSW+igXo4JYks9bj77Qiv1EEnx4cNJ8Lowfn/J8REPWvtP8X1mVOa5RwzgGLgPPjN9g9CNLseKOoxAWDYI0vmEIaTZn1hwVPB1H8fchcVFobrOeHp5KwmAX+AWxxU2lxBi0zbEIuHLIGvMgsV5+rz/9GcB/UspoethThw0fAqtbduJgPfuUYLH0BCGG8KwD2pXWFoKPHUO+Dcp2DrQ5BLZcnGLF96c5M+feh4KCXlZDKfTNKdfK75sWcVQBpU12k62/cjhI/D5RwFFxkSoBT1GTRCqpYKXKCoIZzlirTQ4o3jF/cZ115a39cI/L9ftbxR/vujuy2debaBk45g2HMRCvCV9n5AQR9EecCAcMsJZy3XbzTpvyyq/F7/PnTYofA3TfoQ8RlHCT89nHnPIsPLlLy+5uxHOboln63ZS1brpvRd6s9oDlNsd+B8VOGNcjhoFc0P7pSKkKGlY9eMmuTvHgSefi2/zEA3BphgcB9WCkaSKEJRmcNaVTBk1Eodl+WwYO6+EYFQZ6Ky8ftrHq7eh+pRJVDsh9Pj6nw98vCguK2FkMJsO/S7T1fZH5YRW7wBSaxYebviLBodvYwxGLzRPf82sb29auf0uN8YLbXq1E9H6ZRxyj6Vt92jbwdVKtYz3i/qix6rHy6H7YpZlKkhQ04ROCcD1R23m5k2Zqwa17Ims9PMyCV14HvzvqFa+BFS9xiIWjphFZ5z3BtJQMynl5FF8Wd8Wtu4GLS1nzFqUMwmP+8qq7/HsBSyZu5sY9StBWXGvB37+CiSXKosJFmdYb2ti/s/Ahi6N6J9WA8LzePbxJVKB0SLn6T5528gd6oynO7gwehy2j1bwLrL6r4SyoL7VuKLPrZ5YJMjLht7nXhemIaPkNOVN9IHseyMnRG0uuaizF3QC8rPTt8la3nbdqinT7xGKxJUw7OBcoqYIylsa29gTmGwniDQhq0PNw1Duz8EtjPgBFVUcxbpEUZPB4+c2HAyZvflp9SVNeaqGS59zcr/wZ9sbezHw8QgU+Vld1+pYPjeG1vFwF89ddEthYcrz9PFoOir4F8Ktuq3vqDrYzx/i9CtyMd9CDQgT1J355iEzS2mljvfY3QqbH9hB+YoQSQVDpRM+FG+5tpMJzAxJtdHBM9maXL0uQoJf1XG30nWTSXVeh90377O6sbgXHUoLJTQIa9NVTs4Uq1stH+hpsXzEW/dOlcxPjX1FKKFYNjWyToeI7VR0TGqpLVqbhDvKuol2kI/kN/IO45RVGIIK1Kb0MaNA/8Nk5jF4topci3qwbDTicoICO513tgCRFaTqZ1Ny7qCFl9fsPQmzgV/P1XcRnZOdSOsqMpFTBwQiJiCq+3IhwXHBpDg+WssOw1CKeebT+MTa4WxT9u2ONKgQWZaGMYvz38RfKDObTt3fYePbCs8lKAWqqNDB+Qc5GZR2NGSA9UK3L7rz41sUCamMs0F3Z7OknSxn3fLqc7Ya30OHa8dZrAwAqB/aKRX4yyl9RSdd5+bNa9H+wl0Tj1pqHpMMOIHO6LT8IlD7uHoiGtAkobIIiET8wDD+aqRXuo4Rc4TEr6lWWdwUe+UdYCfmUiWFDgpuaz6dD68DHD9+Aubkn/JJBJpTZhMDnVCjcGtY/87tX0WfwppjkMWb7SJ4x3TK7suP8u9WVGM302Bajp2froTira0FcIzLgVoHj3zlq7/W4vHLyQXyWqY4yXRKAOxRPUwSo8OnMwkTko9wHgCe8yUeeBC58f3X0zjvUWo6Zns/qdhsvmJfYvqoNsUKnciZC8InEuIZQk6Ey4jNIvk8YaQFK4y6YahiZHudn5YSkKbWzx2dBKJxZODe6cQdavsVhCbCeyTkjizlf7OKMjw+GFrJO08kLKAoyWZwdc5KHzzxZfw2XoJl+f1eJ+5JqxNqpAk2JDozVmWDvdefddJ0qzAOlIWli1Qs2m4ykN0k7pQ6V/x+E3ZxOjssdr2zTO5dXRx2KcJsv560ANtIQwliaiJAudOJzbBj1s55eJyiuZbN+3MES8N7ZxYyXUcf99M2Z6UGgafPaord4rap6yKK6tR777qHSbAvUMB/MMq+AYV6kDtJJo4rUvo1NrklVVPeotTwhvzh24dJLMqOIkaIr6Df/xansUo+ADta9AuZmqqC+5SXH+YetLhuywXUl+aNETyP6k/AOOsv4Bvo2HWSHcHOW7NQpz4AQxqhqGlIg0EkUyh9YGHMVjRRV/eihXIt+qOyXXb8Ay8U4z5u2180O+48X+tSTS8gdl9C+UkDk+C2O938iHX2Vje24NJCkQWlyM92AEeE3I9Bbsmem90jnV77cRP6RlBDBGPac8n5HyOxsWnvuPm7O7CP4KkhJWrfazU54ml3LeCAH6I2C4InpMLg5N6loHw3EqtmQi3dhUKGh8qcrvJDIuUZvS+HjcvtKfv8GpX+AuigiFjMt+5Vh76s8SIAtJvZHE9wk+oLTKwkBxAfsCREMo2STjQzBcR9CjNQz+TgGpqxz6tjOmOF8xXatbeY4oaX5r69i8/GfoEQaY5gnlyNGrdAr5z64T3V2/4tVlwmpM9eMOugMNgoQySfQrn6py7p/05VX+rT6UXntY5DE2FfydBqhSlPKMjhB/39qnQjPf4xwKFmvlHaJvkWLdCd6UePLpT0f4JT7CHfS4fRUuUjo94GJ0HQf5qiIfCOzyeYI+tQhOzYmRcYvytXBLwDRAR36zI13EXRDHyptlVMjVmHmABxBz/nr2DD448aytRJeGR2OwUdkYgbuapkTMXSJTFDZRciW8386HqRkP5IlmVv3swa3Q7i3cp+GyBM4BuYIC6/v1lVQyx19Pod62ACRAujajUUl/iPT4fj6bBZdNhXfQMhUj/GYlEz7JP5724WYOhBKAYaWPd8v/bHhMqAykOgt754KF3nGeIvRWN1rvg7T219XzQsAcE87BhEk2Jzpji0TsgHvojSuvtLDolQJa/uoUBhb/5HWWfADwlgoz84TmlbiqDKjDxkNnyZrKjVmAR6sFxOekTR9gQvRr7DsqoWiYWlSZopXFveGGD9EmZkiU/BEBG+HTXK9AWGBd5HEy9JlIeUwH1zJC7HILKog5pqHTzheGK66i3HBNIfieQNZi8yNc2EBHlqyzfK/p5i0OVYTXosfVC2saGaiPQ9k3ujVYP35hNf4k7fDOx2KYbmGOk516G0pjLtHD8338A3xqs8fY6yPl3Gt9vS9xOTaCN0PTrrNfYpCUAtfbz3YHvwAOTiQsQfoKmCMPIcHDu8zFtVz5BR+3mCxtEMEk+q4q/gg+CvXWPIZlnAn4Wp2cSbJCffv6XJMA4TR6gJc5oYu9O2BajW8uAbzC691aEwu5IvX95Q9QPCZ82Rz1mgxNxvPKhtkzckK0PzhFGcJPv0vQ4ZaCxjRgnMPSSQzLqmH3e/iiY4x8WmNSg2jED1E7ok5xOxBbl/5NmRvvhZHxepDqSIxjDom+KcbrcD8Pch2GKM4UMC7H8P4XSbO9xKPxqoq3Msz2fFoOkQ15XgBsNqn5gGnO9On8qQ23hEWSwygL0QkVdC6gj6XaLfGVMcH6tTBs4yFNYnCS8Tkz0Vc9m2NwhJxsCWyVWIdoLZa0gdTDSiCsNSlUwxLcZGkxI3moFtHJNvmFeA+KYa+Teta0ndNTtV0MKuMfiCA/ZF0C3gZ689Lu6OGx81U10bbWdPFot0B4nGmv+kFi+2+QGDAgD/PVY7oPnLm+V+2T3wCbLwzWDLnljMeVOEuyX11Ll2lN5mPYH3BnyZXm5OyC7/jUhQZFGo3/nigCmoNDMHoAEMa2fAIm8s0BEDG+haJrTEFhfIwNOafZLQ+E290l7iXPA6AkoSsi4+Y0DBZ9xNHg80plIH87AN4aFhwhC2MGhZHbmfzZkJeoa1Ij363QueRRB3QgEzAOxDz6/DutLDqgI89XxOxCpbXpRAPr+sNOAfJo8PvF/338ZpHcbBp3jN9G1EhNFiMktwGUSDitr2rpczwPjNZvXYh7oPc4EAzOhkTSvxsLC5yD87gszpdXvii0Sdzgu3lAjYRCepk4Aueydiwp3PYNas/orC0gzOYXlnFJBz5gDkwLFXJsxrqtqPEXjncXL9vSz+fpI2FPefvVBuhl/u6XXsqvcW3H1zJpHFB3uoN8QTBvjyGPfPrJUUWFi/U47BSG+W7ZcmcMtAUGaJXF5vkcgns8BdxwrjaIFdCSYN5/HYkwSfqUF9unSV5bBhEH9NCZdWi+heWnHfXFwtmAQlEtJkCbVY5kA4VlG28vmkYg9xGu6GgoO+BiYh8HvYedF1Mp8lLhz37vbHf0E3lXXI+KoOm4gPD9n37vQ332ZoU79B/soGOLP+wiNoEMOslvviQSzhO54sgFVdmON6eSwWmV+c0c8XGNjQjWF5qVcVHh2ydR+r1peqxTcI2rIXvfOQTgOTuvVmyIt3wumswlgM13/SFyRv0lh6iBMexlfI/1QbOxJSt6OGrEF6H+V7OJJvwD56JoSAmDiljx/fJoQqn7EHnyg1t66pZj1r3xjDP+AtT386vuDlBJC4oveDFXOZ73fGvGll9mnA8fwlx5S+Px5O/KM3yb1rhog0sWk/6Fzt9QlMNi/7AQOwvB7J+HD+l7Z3cmqAximV/7i3uhpRBhPWGe5UuGRHt4bXm0z/P04LHfwLdeVGayqoyZ3rtO6hqP/JoEEZze5/SrsZbNLg9wlVu0EQTFKQ6ig3z73nuhxlbtUAk/ZyH9WBzYW4w6vtgF3HYxKhTcY5CiQYQayvDrN3QWjorWUgJJaDFUQ9wuC5qTnmpofxEMW9lpNgu6x0fFGS2OeGrJm0FfIJSvtz3B5cbK4eX1aNvznhJxyhrv1HA8X438tenUuYnAn5i4f3eDBKPl9iMQ0zjvddDbVOTra8Xf2DuKXlLlVK4OSp+0GSLxNVPkoUF1OQEFMf2Wfo0CxqSqesAZAVax1N5s+GJGiTy/v1FpFNHfJceAXUx8ojUE/hUCbmR8TEzzLMXQKdNdvdciLNn12fdekAbTGCrk5VEGmMssWEIWOKZ3yb68gQM3XEUXJEIzLChwUZxVIDDMglrv30h7Q/YDFEVAdwyGgiroFGHkDZzVgH5yI/wKhEYgQ2+cVPNaRMK71lsl8jAvPksVMg1MOwGGyFkhhFi4Pdiv4XWy+t+q7AZ/NRFJU9z/diQLksFOlIYaAD+sVWZ5pnL5uNafd72e0c0vdCVklO6s1j7frakHg0TI4/c5f/y270EbEJ/J++SWO6si9P2MRghmSIxjYCpuBNVGjJPBMIb6b+oW8/c1cBToptUNFjUbA/qBTMxe7zKW1qwuhUbYF3qZPNSxYYCntSMqS9FoUu0k3VTKd0mUOURc2x/GnJqT0+DPkp2hO28Pf74juL0NU5LhJj5luXZs/RsjVylh+6QnNoO9rGZ5WFTB2D69vWTUT7lSdshIyluvu+BJsI3ha0rZR1C14e0L776K4UBmXW61tqyTBysrcNaE3ojOO3+/B5yucI5Pl5XQ1x0H4qVhCo90/Te7H8/iMYaBAxh1rlhq+AtaOiPO5ieBbgUzlJX6oH6vOAZpKDc1TQS71sXxkCSXTEr7d7WvMRJqS1cco+LB/GJE3fOwpenijpwIdxEw3f1GymyOxt1F9lW2GcYF8fDHG6Ml6Z44KM3tJkqBmHUgYpM6741KXuM6bGO5z00+LDXJAimOXn0UKsyM//V3+qBSTgTmF/1sOv81p+gcq1Oy15iwFRBGtBt+wDKnX6pdoJqV2BBIsLBtelBRxHmXy1ihnVRp0NVbtN3I+HhYAt/2k/7mGZPvBq0yqHJAzp7eFpcr0CIr8lsKtGnXDIkrWZD8xFPg49IUwSC98PqbiQ9HyQgGC/s2wmcbX/SvqjzhN3FAXc/+pcmqjmvx+ytaCgbaKqY8mZNGXL674AFdePpJNa9DbyZWHyzNHjWD6wvXG9/U6QwLTAhzniaPx6IbJeVyJ5QvYJuabPEItVSrsjdrDcCf0Kb9wDc5wqJAd3u1zcQ5ga0gvjcTdQSW48uDp6UxmLPcwPlLJ79lko9inf78/7tPv0rFMyFavOEigyEhroBxlnlRfDfklHZCCkiRZzbiFsp15EpV9SmSN/hCkKXupXCz8+uFPasbq4fFl2aKu7Qxqj5dsdCiOh00TyJRqlsWUBzBYjGYG6tq2yJbgP6zi2zwKIwzH+3sq1VDdCeA9Sf2byFDhUYKfuWh8NXhWfG5B7oM6rwA8zFdX2JvHHufh4EQseWRkD+dEsSiEubxyE/vo38snOARo2DnXLcepk+q+n6RTJ49X7HSsOwCt88kQNAKy7xEO2JX0hkXgA14/Cax6Hdx9TcBVMjAMF32HNesfDGlvBCO90bNkuShkPq1GQTyKvETZO+0v0RFaxD0MXpxHJvhXpIm3PJj3P2jXGPJyHlu4B21YkyWYOqPkT+Yo3rV1rLEOcIvDx7ewitLMMmWtFO9uPdRGgeMShnku98OIBOqmKhditITel2jtXhCQDrZXmraIYtmuypElE2cFYnxZxk7lYzHOcDNQ1dBQGJfRBETfACto8PF52KQYXaSml7YWlUrYeDf/qE17+PYYBwMxGY9puRsAtBsy3tP3P4GC9Mcx9IElNsBl7qcRJfl9imIKBfCnNRQs/Vtqbfff/E2EmgSp5ftwGFIcjh7XOF7pQwyDlr8oTVCf/y5sl+/EefVy2Hd0v2r/+UunRAr48U8JFZ4SIHJzmlcpyiXNsJ79cqCsrLzPFEsNXXV8gR7mmFIs/XkIxhGAYLgYeQNTjDidiRy5CxwemGvDbSXFV7UIP7aqgx3/ZrOGx2tKUySXBopU6cR7Ddn9Dc6qWgumKZENPm+1fzBF0AE6L8LjXR4e4iDuZQLsDQUwLX6DZNz2/cG/PqWaVuOKkyKQG8tyA/vYM42FnSzJMQ6yppa+zwzOLcS71C+z2xexTj7YZNbcrurzovvwSrvuTILkIIf73bq5Jqvi2z46oGn/2LrY5gUpIhKEYR9TVc9wNa++dVddDjAhhlBSMa/gSzDDG2Xk2W1CjodCRuwrjhvi/N+SAnF2dwtP7iW3x9/AjQBMI4P+Kq+kxDH7Y6q80KLlN8jaloMs/PimlaKJDDHmCQt46PZLRhaocFnhwfdOHO7AqyCnFAooH7T/fKJtlD4lTy0/N6F5e5ht5jS0lq7d9SEmjLqOld0If5aSiK80kZxW313myUj/quvOTs/8bqZzfVBgtwfKA1vmkJHdqd57cSO2YzvD8xrHi6osE3q8Hc/1iq5eKD8ODpr8dyrHieqMIg7uexBGPZzNrY0fj7lyLmLZGEVhzCCyp1JGEeNduSjCsmohDQp/op8glLM/3pZXYVZr39B+jgu7i8EDkonK5hgFDn/HzFnADzJlbrbmm3b/XsfNOPhKh93oPSLmTvsxTHON9DXK3zRFEWw3EcOAm2M1+Vz5sWJUYO1DK6mH2LtOnTxXTrVk2wDqCQfaEoJV9bOfbQxkXY7HvT0dOl7mmCVi6S52LzOyCrXJ6+2B9EAgMy/NEknNePWjbWpvQa2bnsAwHAXPdw7AFvuVcMJfOjRWb2vgf+Fe+2AC7jMwRio8WBxVArpeiAP4pfFbL4gEQ3C9lecP/A15GeFl14G/NTBitWceyBH0ly/jBN2gM+a6KT7x9NVLLqqZNGv6TkuQ9zdmaEBkuD+9U3l3O7Bm5yXm0DVlrW23v6Uwlz4GHKyfr3/zrWKQFgUgAqxOgjT92EowFLYmabW9iz+Chg4i4EnvTkBBgTVlVxvNxTXCVPCohTgt7csoeiB+KTm+TX2VRjRpAM4WpQ7EiSDaOF6Wwie/eaPUbhw8lzXPFJvrjnZ+vBlv+D365PE2X4n7xQrpCO7gRkYOmshIY78+gosAYAo4ji7H5BTVg6tRzkIPDyOMzWZEfwMA9Jgr18ekBrIdxqF3r4aKC5ftG3ZfTSzkOEbw5Tk80ZUjjmIZjY+Ag/S6LbWBcrOc9j8cJvBZVWAyqWzYzGIxLKw/Dwv3VDEMl/Ckha3KsQ4Er+61jBHHQIAyZ+X3avKvAeBlsfjgwuSVT6MBSsIjh1+vMhvBmT/7Rcva/ujflEc8YtTib+t8ofY4GQ+x5RIRzv16l6CJatdQTMq94bMc2pmytMvDt2PPfpabW3llsH/kNXJxPXj1iz+TcSpR6MWWeddt/aPbumTAIbqiW5q+CLeEBSJGTYWreCMDiBANEn1lAbsds0AsyVup02ZmG0Pji6S+5jSmWk4sGrJ1+uz6k46LO4JEYgw0fQsB2Q1tewA4kqDptg8rlzeEY1uRvUucWGNelWmNDfcEfEechUcU2OvuEd3Omt3HUP0dOofM/riivMRyK0vtPsXPR7FkZjnaCDFD0zcfueZsrRzJQhSH6YLnvKBsyk5Q8khCO7oOQAU0eDI98Nb63r0hI1NJs69SEh4g9dMA46JQHgaxvNvsDrFhmW/OeSmkKPbx4x81Ozu5hQf8TXtu0OBUpCAxaNydvVRDQtEXH12yGi5iLVYwjlwigKyL9j5hX1IpVM7p6LoBxxBtluzpfYCEmzB3d1Rb90f+GvltTbZS+JnqaAWgjkX69M+/Q2/AvT6l/45xsxdJbavcWfSU7eftTbWZoRDBmSBGQd0HIgnTDKzMlZrnoFAUtzRIE5Et+4DYTpfK6pXLWulQpppXm42TaLvMa87kZ1wqT2+/3y/qdaWctuEarxoeHs9xNhxU9oTEFiYmpdjPZL2r8DxcTfMVDweXgl1cSqtFTe4GHcb4QogmC18T2nouCFqECyAugD3to4PN3ku6hztHwswxzWtuzr5shxXsGuLC39znto+0ZgfPOd/kwAxkZfqIj0XYlXC5Sv3iZRLTf3whz+4wtJvax92n3C32RfMAOT8xL2qmX8pAv2LCkJrstmcnZ7ujAvWYN2YcC7P0F1DPvaZiB53diSMTjqG7vexE71gI1CT7/LGtrf/8jp6sc2LFz/ja/u+E8L2MaqeJvDj76oNGX4sEb/Y9FJaH6zJCjn9G8BavUvj9cpYpa8JMsMWN5b893wHm90jSF+wRi1aBy+JmzCYj/6+Hw34oqe9HUIRUvMKzo05f2WL3g/53UChUjZNHjRdmdy5fnqFXY0gySw50/KcbtWA58+0WfLiz9Gwf2gYHBKNdqiqiNsX94QZvWoTI3TgLsbr4n8bq4T6IzyqDQS8psSc3CkI6rQbS7+lK48SN/+2h/92B7RZbZ1Ev85hQMCp+HWh8GM334OqhbSzGoOh3UAsQjjujekldW6bTUniYeNdyO4LDZQm1cwa21R5ty9g48R4Jt8j//KO1X1lJaxOZftJGTbF+eG0MOjUd4QkPhk0M8gHgKhCzylWnQoTSaLXmXkRb8/vgaItngzEq+hz5n2wZ/mrazqifpXDwxE8bkbtfqsvoVbhkcFx/4vWTcpkuEJC9KjvLl2cOnZIB4JhKMPPXNbln0w69J0ZCHgXhR+LmwIXLbrT2nLBgM21skXYwl6I5WaFwZT0kEP1A7KJYX3VzO1kkZBTzLD+G53g4tcnoctd8dIP8nc/UrEYf5ouNsUOzp91MuB3U+Kr6Oz926Dqn0zo4CkdHQpw4YJ3SsQq5N58Mq8xlx77cEPA74m5NWAKmdOUOvnUBd+tXvzMOR3M9zImxSjma5JQL4Dpq04xdZJak2rMu+E2Ei7ed7KsbP4X3wvzGD3seBfjnFLR359udJeZXyS0kAQ32xr077U2tE5iF1IHeTijMQ8kPu5eEwhPq31DWIRJfobvW2U3FZ5jRKq5vXdJBhWlzv+eMI2TDB3lb/r3GxPuKIpLxigR8LK+FHc2du81wWW8CdLOJ7MgrW+kFZMIDJz7pezwbWj7ipd944g4ZVKyf3GMwEl3ePtYHKNl1iuNR/ZmwgUawOT2S6skrH2RSsxD/SzOl0X0VK28zKIXg+36IhMaTAawIT3WIBXIufgy2+KFPMAAxrVsgj14f4+dtysukacgH8ADvkZsDU0ZyOveBSR2dmh4csgRWaW9te+7uBx35vZBAXWKvgkyoEYXQ7E8qOMLEN4JJhHze4OrZDtw2/dDZ2Av24NhlB/g9O67xboeiw74Um1WWZvH4QF6G751vmYSUhm99GZLrLaleYfUb9iOXxWrD8ijwfkCiQOkizBPQB7rCzgO7FfNTdM9Jj/01fse/69v/Fxv6Ycqr0AVqxBPucNgXwJdV+bptPeChW5I4ABwnQIKGQb/Wiyd2cfBBBlShAxHaUEjko1omnLhQ5gQh02/ryXnRZWxkuVB4QjxUqTUgT+23dgkJOJeRHIfQPZH6TVR+qfXqxmBfpGcP2n7z68rkqafixdRwhFx5vXgXTu8Xgfrvt8v+VHhyx3tieJvwp0yRRGaHCSg8mrEVxIyE1LY4t/WngmYnwuSsz5gh+pBcGQNB7S/0/vccYoOMhF0wViT1+sDQbRGYwwPg/LH8KNgsEyA3TVIThbVJJgVvCmwVnY7feD2e3T2mwp9PrY8ziD3c/zY9VHza7m+uO+9th7TgrC1Zjrw8ohn5yC63dYOU7CQzfBQDQoY8u67kzflq26Mi7+oB/oFMmat0hsTsN88cUDKWB61bIkoUapY6RsPRnT5Va1ZhPelSDPaXTStOBe1RhwjfmwQCEG2ZUKIVwzHdRvX6kFPGghzGJ1R6g72MlTL2a+WATUfgkQRakKrXFSQ9lknTfLSLM0CJq12KildtrFe35gElrJYFS/1R3Px8FkZOHrGLph7zeLZvjsfkU1XH4PR632twCn145Udx95bK19ATdUbOV11XNt7yINt2V8HHTbPlPqx9wbfPndxctYrGa0G3QKBqUg0Wz53SdEKPKknnqcwETLAInlWWX9HWgWXSyjvuPzKeKGUWRZQRW4dyXdYSwEYiVq5IayLUUS6TxPiff+XgXuEsZB7Kkl4uW5JMZ3y2kPNREQg3ayl/Q1BNDGjwizaDk+LwzGAFQ2iw/lNFhyPkePCR0B21KDRUtPEyf10zdJZz59HK39bmmd2oWZ6ufaA4UAiyZOWL2bfD/mq9l9EHKT868YIJ0dII7UkIfIFRqGJwUl6578JAxryYB609Rzv0ZBuyYJOv6YaJO3MyvbUAu3k6O+Tgii2ZOAtkR6lX8vocIpRZdthPR1cAaJzRmdHodtXssGYME6chKJ4HxzSqJajuIKE9t0evF8q/jyThdNvR38orfN1d4FyoclcAkwU0sP4Vy6jWbCU/Fkakbryd9JV3SiBPglgaTB/pDcwE+Jxjm/6E3rS3ye3ED0k/gVDodoQj7zeaFBq5VDG7Jnm28uGWp//s0os4HrbX+CdZb6fUYtlqw3i8mY++V0kH44eCy3lBs04JWC/6SRSXfk6VIiHiHbILyZk7LSHu9/EHxNZvfB8/CXAQCelYLv7cR/Pk1lc91K2YdmxEhVxi1G04RuHGOdl3zjuNmeoprOrawIE5upPNjFd07RDEmyvqlTMebpr+EKv1EfvRQ23g3qO0Xpgw4Wu/kyL9MwwXTsvMao/QOf9w103faKvIox/s5er8CFRXlp8Qn1WlwsKrxlvWgZLGaHloIZVPO/VCmDPl/ObVfbyHU/4Dlmj+JTivJ2qMxKMJnLd2C49XCMSHkeC+E1ooJCtETNyQlIYLazeh7scXXGUxKG7QGqLEGBadmTHYrCzDMMmHoS3lral7cvFpEJbcqp4bp5Xy2rjdxu1oi7turHBTCott+0lKUMcUYdB4TH+xaafpfDZhNwlLI4zW1VFra7k+LeGxyGwr77itntz0rV4/Sra5wN/SlIlwSs5UUpIVGWCrSHfxzWoyY0tccesnc56SBb7/L/+nBdiMHL56yYWXaEjc5UoRLqEEa11Tf/AQOBqUCmqg8vqt4EiJ/sDqEdS6pNYCCAkwx6n+RrWK355Dk6JBpv8+jqp1Q3juF71WK47kMLhEzA07FGpukLfCCE7KZl1A8wyTbLehsbb5OHxdWgKWbUbGY3K/JyOvkaT74BlhUHJpQf8BY+I5fJd8A+Z5xbdqAlv+Ch3T19LYJm7JPUdDjeq6ZNJMTSKwgXa2vXzB3FCAUp6rD4/SO2Q72/rG16pTefApfHD4fux3eXfOCNRTN6+BdM1QqT7iZPce3BcGRAqpYr517ZVH2/bXC2vH3Ab6iYcyfFFYBD9ge331Ag+UzEi5nl91tuQ3QapLSnTRcj34IXINI2tL4+ejHmd9fyzuqMN9UX4v8ZmZlW/d3RSo36h0nwSmJB8R6nmyVjvoG1dmw7reekCH+Xgb9SynNNLeKgg8/xtGzzL+Gke4MZI3+Jp5MlVpZgtY60fZrMUIWsgVtu97TAwlIouQmb8reXdSOWwfCyrv3jvq3oP2rGuxtc2kqzWgTlFODpO0dPsXvwZhodlw83xAfYvPjx4BBfKKFt8My9/9bUKobvhAKhcrLCQoIxaQNrPsn/nUJbPD+kphDxMagnxJx9f4UuSIFALgPbNbJyajy3tS16G/VxxUIIiyhQIktKuVbGfGF6sbjVzGDdZZkwejJDzPIyAYVLifZmFLyKFYeyVrOYrcEW9NS/VH2Rjs+XRbQjYqSAIE1rTKOvjBR6ngFBo7779k/S9Bc+kLg1hvh0/wMPJ6hGozgOGD3YUdTy374z7Okd6dnRnmXgjVDs7RNnW5+upuWFw1lQitN2sxLErRPuMkTd8tSOAHU0ZY8Q5Yutpy/O81A9bsV5FvLsgzCLHWEGITDTdTWVKjMK4GSzKOA1n9yFXcDDScgqPWC6/BcfM4xFVbgW5M8X/1BwuobLI55Vo26ShA0AjwqnAlI8HJQHko1jv+DE59MGd0vWpuyrNFud7twSdv3v/k7myRiFvPToYgKixil9SnrKBiFwB7jEsZ4ccbVBW+Bma1LhK0HISKIVOYV1vXPiHw+KGNZQi6fBBEmnVw14CbGoWiVRI5/u4RwBAniwgPoGTr9BB2gj7vg/OW/apAHIEEECjOk3jiN77N7ZQEeZ3ipROCTVdvUz8paQp9PXazMlg+tvGiEwh1m/Es7WHdNnCG+gHl5nQLvEftENa2e4cHginoKNYqDDyCpOgnb2ulCsEcY4pR479Dl9QYQi05RWa1ppv31wiRNolEVOJpA8go7HcR69ggUfFMvEeH+szSl2ARgSzQhRQkba90yx9nVoOwUvDuxGzUHSTFcOr4z5U+lsEhDqUPfvvzSLy/eJP6UOir28QdpVw+7GFGEbAOSDRwzaxtnwzW5PAq+XNy57Y+a2CyH+f82wZjnSWQljQrnx/RVjxiAH8a7QUczfVio+yzKYs1OgIJtWCTlKWAXX4v+4brNH8Jerwac3lxwPXh42vPf2Ay+/JcWO2RytKIGZ3SmGDkNNDmAUuWi9Gm7RmklhHDbs0H9homF65EArKppUvbtNs/SMFD9u+EtddrAP5epGr1lJDu3UuEYp/BXncHs0uaSrbF7js/0qmAXp/4aRfvqDE3c3Ac9Bt22ibCYXHeD//08Bl6weagnuEqNcNint+NWw4JE+oR/hdC4+AWjkfSENgOveVw1SxbS5Mmq8Jq5v13AUhB5t8zBzxv4QH+o4D8uIN5lSxnrt2BZFmdBvyQRoIdYDN7j0MvKZQde+04kIag02h8yLsU+aVO0LKSTSP2ZnzQKMQtYizJfIv35WnwYaYoHbY7uFrD8M+hXYsM46pSUY5rXbbpJWjZZB8tW80CYYJ+RqZfcAPHd2ADyaY9tunCrkbMwzQT0gCAcsMmdFfdPyyQTC8y99dUVrvyyw9Ec6KvPUEwnKAn/V5Cn/B9aYzIiohy7axdPE10H1u3nSzFrWrfuH7VPLwKmJJzb2qOxYOgIQpOxfxWIVBazO0cssGl50SVVENAutIWKGpjVtB0zS1Xl+mZCHfwBI+3U2Uw3TUZoMxZaKFyuusGYvQIMN9T7N34yyxr/jcmRocgmLKzg050kndCS62DdphoaUHQdEwvxE4diptoVh4bRovt6rWrwuGY4ijFRiq+jbH1YkBoDMq4AwoDZ+yA+pBFCfaQX8Ki+LcZWms+5FyAJHd4N1qULvq0NygVP5ckuEJNKUcD4QaGva1fLtgfLPOphGOTFly5OC+oDkjF2k+Y1i6jkOkNPXflAljRpM2c/glInh3R2QGGqnbMvZYWKeztFwwM3+L3r+140THf+Q8+ZTozBQCRPoDt3jlCviUyo5yh0gAf4+wwGhck6/s+Z5xqnhAKln1M8dwPeHyhR86JiQnrPTB2fykHwOlcbtgi49xQMX2t1kGEJzTQVfU4R+Ft84sjjEOIUlsNpA2JGUYJETLl/H5chy2ymEe+AyNTYFQzvJbA7z2syjtFRFxTQx5GJlFnqe60RbGkPybwxR5j31RChwcsoPuu/8Rw2lftyR5XgKn0V25gUuu0Pq4Hnl/6DkzNWvIXuZvVAB6od0ApaVKrHNKGMmDJH/YFr073nyA4DF9vlje9DhFfsIh9PHL8Xa9zAEnySmZy7/dBV6VEOIXLS3mVn+XQcvz7mV/00933I7DDGmihn0RD+ubKLh34X0aYSayeTXPF0XtyW/qdTADR2D9Wgzohxzn7PZ4qQtK6rP3T0Sn8PILAiOSTYWkS472WW3Gr1sbGhzJukc6TFbk9RAI4Ba7DFRiRB+wOE90vCp6F/kpuNGHZ1cuwENGpXTkBD8svpHOBSe9yztBuw6HCUECSreEFawzY0Ejb4xTtq7C7OBBLCe/hL36NBK+qQv15b/jIFmS4r99ESfGKm8v7UGIXszHQbEUHwx7FyiWrLZNeSd4ph67Sgg9QLr7fjHLVig4N9MEexNk9KC6Xk3lPseuuPdvG9LFNZMOSzwaEA4vsUxcbrsxWWBsFb8pH+DbUdeYUBZVeHjCF4wMAAo/f+9ryGNPfvs2f5Do9mDJd2CGnDJ3r2Z/GD2JWN9m6NV6/+mr8GCcqagSe+p1en47jMjVAvgVe1JBBIUlTRhSPtTiG59E4rJ2z6m0A4cLbCvRz+mneaXg/WSnTivf7WnSQYbkNlQ/DDyvGqz4H/t1IRboYH2c/LaHjFCSecmyIu7Hj3oinZi65JC9ki8+KTNxFb4biNHoHjW8dnFpD8e7zvlF6K/HyTujEhIv6cgtIsDsXzfymY0a83os9vACyTJWYUAA8+qpmTR3tE0fMxyM/UlSI4XJ/SB9Cmh4NY8RUFBai3oPnz3B1UeCM0nr4YXEQ7wANhUbYrrxRQRVwCELB6IPuBWb4HOd4aww4gAF8XlRFjCu8RcG3jVFtCGsOsxj13Eixo9cl4jB+8F5HBJxDypiSM5bcF1jNbq86/WPz9tVMNdstXOlr06VLDg8/+Y9vPZ9kecMzysV5G0vHvDJtnV5kb2iIb/i3YFsS77Eux246rLthhxT9oU/xwJXOGEokjGCAML3beN1P2/8qc0F2VIln+xioxiyv3HrSpbejmF7TBSNvRIs8tsJzWn4N8e8EyY+tSNVrYfDxH1f9jCr2dXCw+M7+TZdKhmLe9YnC4qrARJg26Luxce8IP1CuUJyYMGHLt1v+rB+D1Y3XFuSN/m1uGEP9cklpbIAuk10Y9kQC9e2B9DAnaYYsuwUxK4PQFB48RREF8AKfdVI/u2enmUxCst7KVUB+/fK+iCCO5I+9+JgOr2ac9Rl3zGq9PNy0KYJ8ZF0nUeitsLKxaJCuKqB25YzaI5ZqV9zsT7Gd8SlA8p8laick6iglmWjkA5F8jcOgRamtO8GNJuSqwFOonwfcxssClp/eWPrMHra3LWMp0HPHwhfi8A4rkULR79aCCUDz8lDoH/or5U2gmiBnIb5prTMrC15tgVbyUAw6+t09m+IMv35DLN59+VcH7C5EBYO4xiPRvC9MncZ9ux7Ce7nzidEZ05zyaZNU6x67vvN4aW6VnXjhVUaYQ4u7/ISkmPCefxGGGLQrF6xl0OMqMd3N3kS9aO1QQLWurNe2YhL+JrPb9kaxLWllmvGIvS5bbrzpTbvCZJDZSgU47e9E5Xa+e9+kE0Qrqe47/q+qHnG+yxhIcc5c8pxGYF8/dbjftZ53RY52KjB326XIfsKjk1QZYqYETnoznk47M6xyN5ZUBJEQaEV9q5XqeETof5Fz1d7gBjMQ0Y1AKd1Dqk6FRm8QRebcSo+giO2hBaSwsetO9PVUa8YP7ZLEHKDR0ZGUP4038kZakglljr5dh2w3FLc3vTXgNpl1iUsm/XwrZ8eZSmG9IHinjRNcDhsHJeF8y44vXJ6fSWs9LmNDiMGBDhzEiCl8t0Fgxq8NG6GF1CFOIZ6fW7z3eGBs20CuS3ENUkqVDSkEaebZKDC8T67LxEr4nObmjj6DbRzxxsCSPjXeGVE/lIlH8Xdytqccztv3wiKf3jKWZJ/YcWHC2QXKdPbvrMgHzUL6bE1L6LRVCbEC05RDi6bAMFZruPFgI0S4gNS1UGF6PBAJtuJ8uhWefmOEh1qS4PMOVCoI8Jw0JHMYRXM6/n4ZaDmad+F5gl585WhCdkKlTWKOzRND5QhcVeEN39tpSzNq7v5PCJ1soxgkFepA/0VbhA0rUNKjXV41NYKX3egDCATkpWKKZRhKAwGrDxEnKQVjqb2H8G98U37BaCxqEuC0z4gu7Z3hGluzYIEDUytfI7BG69iSkFlwu6fSSgxv7ZaoFEt1l9kWGOP/Qn0T+FSSDAxUnAn+v35uh2S3ioY9c6eJfk2K1A+aZBU8h6q/Ffq1E6D9/KbHqpdRseYDYjeoMPaCSBHhnW0OipQTkT2TpJ7n5i3NlzGZ8f7iURnSi4NuZYuZWEyFQjvjiBmDWA9trhCkXkExshjPwxT+2J+C7USyLbpS9wMB7BCGwXNEax180Am2KsyZawOBGSgq9qK4bd+bxO/8ggVlJOoHXx/hC9LgD1m3jY7bE++WZSyHEjGDs0EclxfmvA7bJzvqMAK15SXwugO2Dvwb26pvStXNj1eWu/MKNO5aI9vYJVB/qyFv7t97TfCnV9dnODwfJe6dEIwJzqOehVE3S4pXQAsHo2wJlz3b205sOkIaL3ctGsAdLCcrsfaM0585baMQVt+l/D2feHC1W+0dWyoWCcnHJ389+EoOcVZGSsZ7hcjiS7V3effRHwpee8RYcO8TVL6oQnihSvlnsSfkXkX6xYdX2gPZTbCKidcESA7blizurC+wB5VVpZbD0IWEcsita4h7iZWxNHVmcwX+10JzMFE8IQo6PMI3VqQUJSiLF7bPK5mU/53UBcNkjfW35BaEeKndvR/hasXIQuJgWJXrhAYoeG/xRkGXr4fuVBfzPnNXKsF+HJWLhtDv0FIuzLPVIjLcAZ8CxxkL8cuVPJxTPF1FlcDh6YKSS8DILP6t902/xA4UwuK8fjnpmLcKIK/du8vuqapB5VEuleJ+2aA+JA4W5cBvQQi+42vPSLVywUs+rRaH29W8pXycI0l8or0cTUw6ciwuRb8SsECEnmo2SQiII7Nj4eL8WcPeSYPY2pDvTIJ6pgl7WE/I7ZPQGRoR98uVpsbRbIT9V6vn9NrA9EK+86IYC57vrh2zgWVCyOMQxNLAX3VM5jaAwgr7cXXVIJ8novs0yy/3Q1sFXztv+gmCIHz9S734taOv+TeneN+O5jvMomMORHl36qXZn1j4xkWqLzHaQlg23waxg5CM5Hj7u3L2BGTTWKsBjYHLRHpSw/v6gGbDNqdU2JIm5FCkf2yXKfSsOJFfQROP+cWO28DmIjW1wqUTedQ6BhC+CT2yAQqjm4jPor2qSZvZ5FEX/6qLWn6YU5Nb6xC6fUq2Pas7cOA0PiBdmiCJSDQRsLEVrjMoBR0MCiq5UnVOoVlgJbPYSrnJUddE94j2ELV/oh5ZvkJ9MVnBLWaWQBoYj8u09Gn610k5wOc7/d7esV4OtRFWwaseLr9bcwrsJAKGlf94dnrSLT8qLitju2Wx+cAvrOff5vuG1NPKbw/bRqFc/PYu/aBbK7AGB2jXX1rPcfVn4PeDYYvHPQdQMbYa5K0j7Mcd+rjrYwzK+DEgqerYqHQI2xE9z95U4mCSNRT/KErzx/4WqXtm6EUorY2e8l6kHZQxIpMpykpei5LTApYkdSJPkluur22CBZSDggvyePuzyU2jihLSU58A3WQRtXHIAOEVfvF7qUDd6CNyxbUlFCWyYBlk8P5Qvq9QfK1uTmqsj+IvB2c/rnVuZQY3w3Hyv+X3jtRBQ+stQ1f/ljF8LQ3clz20i0oUT9qtpF91dmVWOfqspMDZL/LCMbIaNM5z94wojKlp1/UU7FhkW1T6vv+zfcBYDYxJyoSijP9vGe1Dn8ZbiqXpagltsmwt4OouUYpsxFDY7asINm6KComBrFdwR5SdvvQxTEGO6GoDv9aOc3Cn2OtUwGftv3oxrcA+P3nVNRW9XEEvbYHkgqTodizu2CU+rGWF6I3XSkNXFpwe8RnvvlZy6CST09qkKp82I+gmrNrhmZAFNYnNK0thh+GE8ibD4h394ht6xpfvs3u9pCB4LzlUPxIVfhe13Ayd1McPaFVEkdi9s+UKCv1yZ/v8MKGJB8GVxNIboGCoGZcJ3VoLLbrHV2ELl2xDeACNRlTzRO54NIpNEco69gEjaci36cvtIRmRVaxS/PQ1kgY9kC4XqUquYz4pV2s15RJewuw0TkU5PgIAak8otpjLtZcaLudmy3RFYQX7PkxAyzCbZppVZYLX4eC2VsBVyDGkdy95LkNMOeY+hEd2OXJCEIDRW7LKfwQwp4CvPuBjxFeJnRFjgBN4lK5C26wFpEB9b41ctb77a6G9YWK4OOI2/L2jsXXtXNUjNlD0wDP7z3OAJ+A8njww2zSbqrq/fMVPNIjyKECBUh3/nXdTt93a7hqFDrBmVi+Q6CRW9xs6ocgcy4nefW8ATC5O1MSItSMvnKEBfJQdLbBDtqYLVnTdqN2h9NS5+cBLtZchsTVnIL9ZRIrlMRAqvdOq+MMYQdZqfWBGuvaKp001/VmOch5i18suZPky+MxWHtejDTGGH7iSd8BL7mAw9SAy/ltuUL72P8QmEgVTCYyIcHDse+VR19AyuS6Ehm1eEKgDwKjhNbaJgVEW/n82jmv2jYGEic650bjtUAkFe8xbT6XA/TNN1B+yXUCp6J80o6BoCYmbkqHthKq4oMAmNbMEA+ZH9FhYa9ev7PCEY2yKzRjmU5HM3Us9zPdU070LQF3Mqkexk8BzaK3RTXXNOGwWx0ctHCc70WMzMjo2/h2rKprVmJc8/JYR9UzoqCTmeJurlkFlG3tdXxmBfo9PHZsm6Ai7ygJAZ3h7Pqzryy4HxRhBZTvzXea1BBbSRMCs2fIattDbi0DTVREW9ejquEmcErvYZbLcCRU/f5+szuiastELxTUsGdOCL0CRrOFinN/iyB0jac1U60u7u0N2oIVRrEHMf99hGS8fzli8lTA6s30MT6xTufucAw1gSJvkYJHaZvlNGDuJp46cdgUn85XUOX2opwcYF8bode3VFsSvtZREJRlcun5pBjMl2058tzTLkj7hUSStNZbOuzvJJgarJx8/BpGzjrwYP06oimt38f1jOV5FbH+5rEvl8Ty9bokkdXVbzeMdnIwgRXZME3PjYF/jSRyOAcrqrosJMxcEWX68dRqPAGA+OggheCFyal1Roh7SOb5Mhvf+2bC5ETX/UWRPqO1xPoyHl/mnYPYnXa1Kn1CbS99Ms73HN/kOb4vfBwrS1PUF7egy6OSdAqyMwLjXIg29GpDbp5v8X4icTF291ot9a9GTy0+8bdHxAu7Ubgy6kqS65uod4G99xD8ZMqXrz+aqx75h7bSpqOIcZQQC4589Y2sy9YahoH0jb5Ee29qFC2lYch7MvBbmkd9z8H5bGT6JvbyYaQbf7EfCeFcSvKFG12pWYt+QyH9X123gIfmDEpORC5Mj/Q2lvAx1nhE0RRWAA+vE1P2cc0gcB7sPWLNgDz+tpmc9IOj1M8F3qA+XmTIpBUm3fzuAPCmnvx8xgRux2CrQgjp0BkGgh2mcDzlzpszpc2jbg9y/s08LCNxN7S6J137jcAyfL61q8V2V15j/BlO3UW6x05ppINnvuWDBTe7bdXg6ocCxuZwODncuKG3LjWGfvPt/iExHybXgt3SqlXvW2YLskbQQ1rLd1hZM7gmo0dqYdBMoYWnV/Kuw2L/vuWSMYONZgKnUgmL5/NFXzx2IGLIHiWLkjuVpcPlXH/dWY0g6zOWpZ/Pd5lx2ncYKCs3RT1458OfFqGBdXGh6JZJMVm5826mOyocWf5l36e25Z34wZh8W8nqlNkeqL6LykiVuznWaMx1fl1nFDQmW98QrizUB/3CJfgXlWHDJdMVIrPK9IL+rKbLCJwah3GMCt8ueJTxu1xtzj8USFS/gIJEvx6azErxHcfgm2ZpmFrXUuI7BPPbiXSpW2vIXh8/aptY5ceZUM0ewodvuABwEZL6pgLKI0ravvJ0ueXzXVVugbx9Nr9IjMYfpDh8/Rot3YTF7GbF7PmL3U3Z3AUiv6FbxJL3IyfJgiE5D0XhKoUDCX0OS56WXr/8bk/o33YIQy+qJHRpfJbmgPILKizMrvfTj294h6FCeQeRwof7dOwHCc6O49TZVEJnN+8Qx2yeeGhyMuC0R3uHCn0ZYFBPdQEVvr9GzXHyBUTnzpzrQ+JEOebWjTyhAG/VNS6P370nbcD0DsggzY8r8VXSaHrkbl3acwcs4iYwGk+k3q4jtA/au76vuSID/0zz8kVdkO10Vho5f/dSA6rBPqQUMkS63eGP2FO/TXeF3RH5dsKdF+gAKZknj85DkVKaDy8rvB8zSu+HMX6QDPV65ihz0eE6EiU7ylzIrmP90xFCMSCjyPvk0BGqSSOC4xMeEoDdXJjJZnDqyspO0ZJn1JkY0/LZJNHmAxq5hoOp53Lj9+fk3/ZxSdmNNWxbADyu7+hktMMa1qOjCJuNjRBATc2uKVtnuUQtazvprl/rpWNUf/SBliSFMhkkeUL31zO8pN+LSauxCt5RuTK0BjWgcLAvlBjCbr/yfvE+qdUf1K/hcSdy6tdJ8lvdYWYD6cXA/bx7Pjs9pozhsCoZJwydsI9yuZP9Y9XZ6Zqyhe3j82IySXVfAoZ6Aiq/C+3qGoPpaiGq0gOLwAxylrQflwCibCz7rToLma+pdcZI5XKp/lDxQb05DS/M3nZacDkQrf4bEKADVT/rxyA+PpJ7g6mLgp9dgSt/PhFV5jmA7hJt4TgbPdx8RK1dRxo2+apGxTgvRGr7FeG3BHxrc75DUHhDlGeqPn6iOqFxWiS8Z1yU50I5GiwWObrwI3YJTbXnZHw6BcTWqH7/8sZUIgaje1aDcoN/QNJNmBiZxi87gJr6TpBvCmIMi0Oat4tHQsmpIOOcLo/5wTZt9gtfQYgSJrtBEhmwiOOhwa8PWweP3og8NkxLYs7B4oq3YbsvWNqNr+WyQURqxQkqoFnmcaqvL1k2kcep+TvD8pqAVCNnVOyV1O+CNtJdEBd5n9ULdaDDrvvd1FhcA4vs5Tej0fU42wREErvLcIIhxqw4xG5VGv1bO8EaZTDQkB3tqydaf/rlPRUuCAJlgVz/E7PbuTq/SOm81D3iElI2e/fbPpmjTRnwsiEsMeLw3S59J6ZVcCSNage93Zy9uYqXNF5T8yVGe5yzlzPTpAgZ8g99N4TghKWPLiR/R5V+bffw1R5QLE39FDIM/7BzYqISGuGh4ytkpo9dRlZZA5yTpUQ7M4kefUvVZr0PuQc1U+KzFzW/8qKwKpBbslcSmB+5/8VTRRDZo7cz//yEK6Hmd9J+PAL9wEPCIdkZXwZDxTLCPWiiW/R5RIAUdOU2EP6QfSF7n2oBfV9d2wvBbUifxRvqar+6Y2LWiIM9x5T5d53c6ObJoorINoioE8rrLCvB9ZGk+BIMKTllQsl0sve5ZAgK06jQnSN0/n0vUavv+7BSFim9H+A3P/Q4IosY/AcxuSZI8uIyYl6e7j8r1x4n6R+FrpfpEiNxEgdVQjT0pTBwHlsv6VxU0foQWKuuoWTWpAeb4kv5Fp/lABVNFao4/KN1jCD7wtn+Ip41vR1CFMcJkiD66+W47EHBkJ0sWpBr7cRMe9gpIKWGJKj3Hkdr+rAEVN/zywzbbmtoD1eJaP9NvS3ZkSLxjoli9vNNH1uq0Wux3T/6e7gLiuTE87qIaCtBoCtj3RMNlxEwd2MvdaltHp3YLT7A6CuWQca0il7F564HnQR+wCZAW6o4nWgZ9jAtxUn00JdendzfzrpCrPg3JHjTJr/Ivp8JvfRDUD4Vza7NOoZ/ml2EL0g62sJMNd31vqTO7lnJ5RGKXYxKXkuyRqN1/TQToyXop4Ttj7rt+snvikIHAND2p5uew7LXziWjzpXbDnjvH7s3OwGrAcSTcZNGaYATm+JeOMoYtmA5rVWMg5plGEdCNj1gfi2xmlf9VoA4PJg33nEnvVyBaQ7XDvWPECYNFCkv2V12xDwvN1US6XVsyjS1miKxsWfa3MY2F4ANPbls2xQx0ufVxYdU7liOiw9T6RQOkomNW69zfz/Ug1ouvr+BA8CYb4M2qu4iiLz3qQaz708aHWuJLL4C0BPlEraOukWpoNlrS8vKfXi8J3dZ/ELXPLRIc7xzhvFWLNtxmMVm7pyOaWxGVfOS93ctct4eybpCIPy3VB4yxYe3tFWZ7TUW1TZ5VmQ3LmuE0cqRVlamypTh9vSpMXy62gPwVMmYEKU9G4QpqvO7lnbyAXTbisqtvO874na26PLfHTJAuUhHpyHs/2oXCI9VhXeZx/gxOVgl7Q5FdZmp34QMsv3Brdc3szCs8EBhKmUiEJRE+Gbj6FpWIvM30J9TfUr/FRkS3SJ2Q+1Kg7bxP6xh7q2UIqLJch332/AO/A69nGGUEa8XB76ODPIdinnP8t6lTz+MeHyvSHNGv+bWQnenlOCyqQ3sNQuSo2KCVikR5mhgZOhzr8XqADUxi3896GA/xBSJQcRa5aWpHQDRoiSUwicph6pzeZmfyYVgkXMhVhFbPIDTCMRnN2jHsHto+vYlV1tJ79gHTtBkqZo37f2m2LFYo9N7rRzrba/72wGU5bHLhbf/GuS8MXNaf6chTvMpGZEBnX64h+RHBYidUdI1QDY4K9j4dad2RcdIkhH+HKR9J3uQpkdpCmlmnCUbklfTnd8lBYF2sar/Tbg7AnVa8dOgY6fifhM0GRU2E+y4ywL5caiLIlg3TZ1lGzivU0H9BT1xD8tb7JgrXEf8jWrr+0BEuwcwpHHp3zllVoh0RXmzfeodqkCFJtud1O7issu//Ru7NxRt/H9NpYKJYjtX6Uljy6s50eovMO9GhtwZ7rLScn1bqjJghV4hqK3wFaIaHMirBaxe4yuLrRmQO8xQEApuSS7zMxids1AszD2XrC6fEd3fvwZWzBq4mlcpfA+P207JLPCygw7TPhorPnWfYs+AVezf8K1WAgFm0UdZOP/6Pc2HbljXuGdHNUjukfrdPNjCDxaDgvFXIsEkSR8ZwHpL8s2mgli2Ym5ATkRczzcSXa90+K0LQcVwkB5yMLRXZ/NG3g9XqrhBxKlpuu/dZb1GeHnzDOEy5ytpE3QQTx7x8ejI0bHVj0fYgXyov7kA8i2cS4Tac96ACEh10qWBJsmmNgQ+Q3IQ1uj9qcv7vlgTIzZ6T9Z5YUMm+JYORx2r5qnt1OMX8igMTZqYCO5kMTEUfWXSd5ZBdGnKIp8CofX0y86gWEgsFsVRbx7up5331BIYwYcngOPK3E8RWCXN/BZsVgjm+CFNWDNQG/qDzGaVICUNKPCvsdW/CrItu8cYNK95AAM0Vt5Rcut4fOH2F4CnoohL12y//aA1jhJMARVBMYtY9Q8F9F+dkZxrZaEk3fwUA2bPSPm+MW3kofgbH5yvzy5pTbWxtedLtORvK4z0mkiqPh6mCaglr24d5E43nwkDy8pI774qiOQmbhbGrK5Dcu5pZEs7AGwurHA+eCP96U8KKty/cEM1lhfBtN/7y1ZyoecQTPc5KMU5t3rsA1iDW0Fi5ubq20OzAziGP9Ydw32nt8MMBgOy7a8m09g/dPBhQ0hmrZtTF3EUWp7KCeOnKzda1Fztrx2poNKbMM6XTJI7tHKfbG2CsN3rRVZxIx6cvWWxg3Ht1VmQgIchSkcQXp4HJ5D1x1SffEFHwoy0DDuTBMIEQGxphAS+6eH3ybQUDVJWlUJrF6TQvBtvXm66X19a4cJwRCZ4RIbgUkSNItZnKhvVZ+LiJ7rXqZdI4DZ6W2IavCJb1u5pDTThCnv0uNuqhzC0xb4T1gdh8vj3BV5+4+T2wqx0ZEtA7gV6ycvKBsBG6n4D6m84RoCCX7kPbPRAcMa1oRWSsg5yZ6v2JRkfuGzdklDDFsjn82fEezVfH5rZCe2c3+biGhvN1LOpuWv10D+N4sWBcPaDtfyKqvUhqklWf94r51TEtplg6qzvA4wAfMq3xhnNzxaPFt5uBiN4rG7sO/0Ju0F5GXEquqb31lkfGC5uEUOZzIvfshiAvGWftrsbIoz9qOyjc9uaSR+43cep6Ud2Enl7HYP37j7okY992reYtmFU9zFhI1l2VMqxuySQR2aLb2iHX2UYWNSmG2A7HwcntQMNKWUDyZjdv83T82OqBgoi+SGkufjfSAmo3l/RbOc5jMLpgw7Ik7RMUHIIOgPFQAzddn1u/CXi212yImO4gk69AfSNoY6Qu1+eupdTrSTjd78Au3CsdkTFuBpyG7JHvsIj6WPAmUE188QtVwWSK9MTYLFEIdd9oVOvIrPSU3mAHDfacOcD5Si04IovYEHgnJ0ccaqqczgsN3CHFBo6N/qacMe9zf+Mgc0gnKtbvLhrCV4XgGQI+M4qLxPenFB02YigMfhdXPzKXon8PJEBf4OgsPrXFWj1uijpliuTMK69HvhKpxJX9RVBlQVpOTvKmaU7jByyhK+9qr7FfC3r9C1wzsTgbx1B/vrFSFnLH87ST4pE8rl8xdCuEI5YHDXdTzpdUqEXWrL5jrVmFh8cloKUM28AXE3KfloVr//NmPb6RJalZjSVma7fBG6woljprkV0LkpyvcCFIR95FJ+ZG6oWonHVLcmEqcuoYP77ISu1tk15lMtktyaoKffgOLZfBexGWMNAs0GLz58HXN0xAHR8MseAgH4fi8kITIL+9P2dSgw/seNX2GFANapO/HqB3rR8AvaViZehkENyCF8mcSPbtfdO+Tke6Bdt4km8klP/YUpMoEE+iV3CCrGmXp2V8wYgAxTgUCl0PN4R7lknZd2y5gMSaQLXn6xo9ArXv0msJOzYogdjc56vqnWQxIyBAewMNX15AOgzd3RUITZ9fad04E5FQI2NOL+BakPsO8b3LlFo1RFOiptu5Mtze2wt9tJYAfGrmP0NhntBwfG+tEnb3kncV/DP/rJMHenj9iBJ96281LVNjPdEtFuOYDBtJLSuEQOP5dSAhYSptObnsUpojQIFL+BciYZMdZiLlxPpEAT/Ma4pUzVLL9zteLbPuaQptdkJNFsOjdnyUfdl4moorGIQOsi2ed5MCSwH2wZwzjfCKXFYNdcCnX2ncgM1DuwOy8FAoqyQ36b2Ex7d994dUUTX+TCILGe//iB9MqCkTEn2nGfErBOAeAHIK+bfS7Ioa9LmDYpl+ay432KGLfm8C/23WowaZuJV7fyLkR/chAYv56X60fqbpIIDzcuZVBdRgoAhWqTl5DfSx6eDg/SoV/QimMh1HILkvaK4vCg1D/eLfWXHgjrqtzUL+oR1aL80jS9RwrCG6upJkvxRfhADJpQBDg0U0LzxZfOVtV/vETiqkThfLMF4oVrGNl96b/PdMEhyFKTdnQvMm5+OKx8iBDPL+RVAfRCQ4ZXGaKvaB91buuHF2JCfo2T67tCFsvf7jQNi4eCniJCALeAQf8L6rgDAwaSidw/5cRhxjdsSKIpAsq7wBcTmUACORAHKzM9co5aUqWKCdhNogOHvCxHfs4y0aqK0llfa4RCGK3NkGT+qebVlr8lBkBrRdLkgP9JvWMidtG481W+Sk9xPKQz0hYC5TeIKXmEr8uVKPgqmh8vNIaYjtM0pWS0YnYLL7bQR6+t58ESbmQJOt7KjAIwoXSsOvreFeCr7lpYailmZaxcZGuLaZsgCi2ftdMMMHgZ7VltSE/CPOCJ2dyxIW6ypE8mtNhRL0J+/PZgF89n2ygAJWYsA4PV6fNJoi6bkTWz28oeVkj/UplLDmKF27bREO2fHMKuVEXA1EZTWTZMzEhBmPY7TPn8MpAQExeSTaNLZRvOTXdJjEpK46bz2YLqIcpI9yJcka/xnJ42HCthZhcmhB6eyrr8XfSK4hX7fmmwGPGc/B7dDp3dHOmrqupjksrNCOIp4oF1Kbdo8hlucel3QnFTW7CS+JIfwBaW2ZPsQ8gbehx7ytBD7tt53QlbR6ksbS4s6DmfFPcBG60peiuSRrysmeT9QmIkZ5Pvh91YYzl/yxXZc9w3LMJOSe11hTL4gSmMVTEvP8wsd3PNEC5JONgkcLq7bJLUCDPxbM/txFEaTjop7JcV/abquZUmRHfhL0PhHvPfQDbzhvfd8/aXO7I2N2FgzQVOFSspMqaR8D4lmOYCe1hwLimnAfTIkZ3/z63p9DpinJchEqE0kzI4zC00tNgsT5UR1pTfXN8L1pEAOLETLX4qT0WsAzipJmuyAn/MbGYjnziA6G5ZkRdmvaG3BO8xE/pfKOU/8oC67RRmXra7Af40v9OLMVXLstmbn9Mn3jfrcPTc2IoIfM3kcIz7GU58bBEbJzJmi/XNV7XKI8MMmlwzaTEcIzC0TAiIJKEfxW/PDuuoQNBQV10V2hh+rGAcJZG/IelDWjwTEMv+vDHUuLiuJwGcUwF7z3zHY22MVRolaiMk8At1aCNR7iRpF+xBWn1au2c7B/J6P7SZIOFoWNQsgnf2XPbpl1vC+Zs8+ve0UszHe3MtRxTn6ax5izRPoaSaRw0NWbfRSYJD081ThfnldbvnAbHmiD6PfDVARBV6T+h2V1Nir9sve1zqeg4iRcLeE8nWmGzuqtPcU6wWKVwFWK0lgSga4ZIEnB6yc7XmtxAB4G8dcxE50KlKIkXTuEowSY4Cgcn29uL4R9zyrwtiHfo3+c6RflQmz8ZOebbWoadLeOEb5gSB7pBph9BfU4zEBJ5q/3dF/0E7Z0khu990fk0HgLHnIqM8Za5L/VsvtZmPLi487ed2CuGXTdmBdyQz4jUlS1Y4EyDlEWF9+kZ/8EksGwzlA7W5tQ729s/f9ZLxAPIIpJpQAIAP0F5voR5bMdRAU/jgv8q8j6okgsghQHtv/V7/GrbLVpNB8W/pJCGBwAge++oQaFIZF13Zj0MuVgShxM6AuqdPrNp7HF82Brik/Cc2/En3aBrAd8983NnrLGnrrJeWpw0M4xJkW+MlovggCHaCoxKc+rRz4M6z4n5ADUNnfuEeKlIixkV2H0VzCbLa/5zHE4LdKapfms3Jk30Pf50eezXfA8r+Y+IE6sDTEzP8liBHq0e9IF1+U5h/LDyfyMGWbQ5EI7F9De8b+DhR50JbWkWcZUnlT/dIfma5DgOe/e4cMxsyeMW/IhOR6Chc+8HC3xcH/voreVXLzng+er2ebt3KMWadept84CiuHL3sgr6DaHwsp9AzcxHmtxwCMWh0z3eOV+IW1+0GMhPinFi0B9HfdBwRnNNXOMQg57ZfLT/aZkYsLbGbR8/0kv77ssFv7J6lSXtZppLSDXcOZSNLt9heGZ8nTvuemE3AxL3n2rV1V5Ef/IADzykJYTKUrLnWmIDNSHe6FGy0c/k68empyiVn09MayqnbhZVXty1Hma7+IzaR+GhANgBraMw00mw581RoN78XnMS2RSuuzYRSTAYexBk6aGfID/Rs+ndR2EwdN1TpyYKZWeMPEmh/MBMufohCWoUlj1HBzo1y/f/fgNGbJOAMekkYRtSxC1iuHQxHaVHdUW+qg8j8piPkaNnEcS55ZfzcWjuQj2GvQKLPDEqgex7n1cLx1QuuxkHo2IHM4uu39hi/g9A86W561Z86HtpEAtuarLQqt9BICh5X8WcGxywg/2VbNnoLF/bp4O8n2GuPGKTGwOqkhek0rub++pvVaL8T137zlC3UotWq9EDpzyUzXnAFe35j8hfVmXjfr/HYFCxNjv1IVbzYoRrtp8RplzC8FnKAJsy0Rof/XznrZnVuoEoCOEfW5Ajd06ZVqEIx38d0OvOwHlIuTKIE+x5hJ9pEsDHyj4WHipLPup/zONvCTf1NxC9pvaONDo2SU5AY2746r6kNdm9EoBg5jDw1kYfG6lxsE+gUzyXjs9PzyjTBUcLlk7+Xzr52Z8FdTCKKw0xLZ9XVZTsf0KNSFYOjx16q0fMSVg/NTCkGD75FUOolIqesdCMklxA1XqeYb04ql64+1A5n0ClbvUXXjnvPvTq881hp0UqPaj8F0C51iCasQ+1fE9nxV/xx6DBtlwVq3ne4C6Wk7rGoBzlg5Ql04IS8+wEiOAJZ6t2lUZ8NfwZVHpbm4Zcu48F9S//UW95AfjjpVsCr0/b7YnvZclL6ccj2faEgJhnRaPQIXboXtKI4QR3M+J3LcYHgICQEUNznagYQgLIpozce1UsIZmkcy3QOkiazO7ZYHTMZ00JQZPY/GRtH1uxfcCjbs2dCOPGfe/9xD+PpN8nrvH8s/VnndPLhWINgzNJF4rp8/LcXzXOilNVMhzZLfTXUULMEP1uU/QcJE+gM1i6GMAW6qw5/OkY93fcThhVu8t+WX4a15kQe4AcHNA2jp1P2uhqYfyPKoyIDJDzhBsV4JWplwTnhiqW5zTsIbVze98Uwv+xuWF7ZhNikrnFzNqZAZy198Z2rpzfMYiz8lgzAeS9ynxyBC2NSu0YPXVHatIWhp8h7N1hyEpQxQrZMk65S2Iue8G39BUmBoVWpVbvAVpaN0rHBbIHqCsK//eGzcfAUNtvwsX7YFsej5b/RwkxpDcNbefETzOkkm+TAYIjT4TaxV3r1h2rGaHGmp4l0vZATyE0HqB9QfPbNsVC44ETjaZihKIsZXHSqqKE+18RkYviijSswb9b7RGO94L9YaA0XFT2+M82bTEEmyivhv8vSBMJ3jw6z+udqkSa6yQ9EI+D5qhHeTI6sv027fI9/Idg2Pj0hDnv8CkhV1Rp1ow/QoolxgIawlxgNqTtfhn/A4rf9q9pVnthK0NdNkOOLHoz5qnRseg0J4aO4kVspdmYZxLC73aEjTONY+mVtAnzOkEFflsdqsyZmtC+jjdf0bXkrM1245CM3fdX7+wdHhBesBb+Oe63oYPvwliRuMr0j+TxM2PZRg1md3fk5CyTz9y91SRoiHMuMHrWX7/rzuMtuqhFiW0tn7qq4HgmOVJ+L4gEvh7zNRiMrytKWDOlzhLNZ6fSOQI4j0Xzvd90dFWZ40zcW46wozGTnQl6y01vObpAiBjEaGma4L5LW95hDqlNWps26sHBU0Xo9+dCWVRBK44nW9Phgy9RP6g+SMf1VeIr08CJfYz2wL3aT5ixa3JXn1MtwShTN6MRMWZSfdfgK7b+hPVRiBGO/u69Cch+dvahQqAodUbdWsDUvJSXrGYah/8jvz+Q80Ub+bYPd4e/YPzH8wk85AeEkawKMpyrZukx2ezw4NHoI4ZX2vAYd60KNJZ4+S8PAn6KP5Qctmm3tg0wXls55zvAF5VRKu0YIS5G8Qq26fbocf0idfBxlKlq7nJtMLkz1NRR2cqhJ9HS7Ivw5InbxcBCXEDauKHI0+lrTtdzltNvSMBY+hmJjIOsBKrdb29zn+NGw2Qe4WJzHNk78KtevCXXKa8xUj4CCfxD8/VIjNZWgcuDL6jC2sifDJl/pIP71uFp7874aOwLeu3sl2f1ebWBiBr+trPjjLKSJLeUCKUHxlkUXrOWQ4bL5BXeizI1yNN9v5+sy5Z1whK+ikKj1axg9N3BPjo4+yTZPnn6BS4STcQ+iOgzWzWlC6ckm3ZvRAPbHNPGDHL0VbxIlblojpIH9/3PbLK7z8pTeFcVTxI53JDSHOY5IMKg5+rTCY3pZiGO/QNZ36ceX6WsIqUGLY2/kuvkJ+mvHSAiwN51j9g3bTrUkNmQ1s3bkyR7io8aTwrgntX1lXkad2q2h1S1lmChsHyLctJ2VZh1+DQZRfFsV/szoM97WQg8fUAmXZ7aR6PE/w99+4UZRZTRrews8YuytTZ+ZXkaW4opUStiP0s9sGkN6DCmsfBdOqBwNpPWme18xYRzEr0DsZqdx8fcgNhJzTATiRZfwoHc7baJBTeSnlp1rlz89jG1CjXVyh/Zds0wyhnoMYaYK/qTaYVnu8eYYrmdjI7WuJkckFcBYF7CbNp9ooOwMuJVHcQFFXLO/5ACDw/XNcUVU8fXhsFYm/8QLRPmMjUcRQTq5djzpioV5MOh3lLGPEVCy0IXyHJe+KykcxvCzUek8fVmO3Nd0S0k4bclNmJVYtyw7bH6b/IAsvZadLGqWu17Tuybww3XOLxaQiz4u1O470EOeLFBu+mMnt2/pe2B/qWlTDdmpibnFVj3vFG2F4FOeLYJT0543P4PCp1rzg/GjlMAgc1FVYjSLtT/z1K36ufzNfM+mv2msqD1bjLL/YL+Wbj+F/Y2vR/zoaxeLLFIJCH6Yvg1HMDG5qMrVqS268qn91mQ//NxMJkFkqQNUnlLKNIGaIjqLR5XXR3PvkzP+mDEyaLf4NIolfZpJ7pPU7tDRHlmZaID4EKSsd3KkHpPavNdPSIWc11WLqEeq9DcCf2FIsSJb1GolY5mbXaahFaIEG651D/jCQU5RK55s5m8yCkyznB4VT82ciFwz6a7nlvCEM+uuHU1sHmdGHqD8SbYiiz/j8TMtWRehMCDTg/dyrbbil/sV8I9w0Y/QCy6B1i8wjfCrkHHDlB/LjwRk0n+vWwXj/QQc1XTrs+NZXrxzBD3i4cyror1J5lGFfIFWYpi8uRuoLhv/I4pRRhdcLKOXnyvBRypaFyRQxpOokM0ZJturTV9reK02adWhd82yjxQsUlHmtKWwH/V29CkXZiNZHPTNLgDAdz8rZa019oglygEUgfndqPQPNJD0A/xW9LKhniVNWbC1tTCtbAkQLVLAkUeENb0xh8u4xibg28soPOIHY+7OKjRsA3kKM4BNZN5pm9AveSAsBjmxs8GqP7fBxZvhZ8dZbiC27CPdwvK/4WCY0zEUlUkUHDc5XCKKSuBcC/WY/vWdk7PlwUHS8HjMegwTDkMQENxs+tB/SXmk2wGy+hRwuPUix+dUiwVsuzi8UUg7no2fXjhR17pJSQgdSqE6zE8eXL3WBaRvkKVMLfPAnx332MhrVoTcH02WiJa7VYJ3wPt1s/afvv0r1yt8Rw55pMo0pjsiVcs2ymuxUfcR9OdEr6JEqO9YzSscQkZKnikoosi/fzbHL1fbOHeXH6OHICTpBAvG6E9xvmQcf6BgodJ59Wwnt5eT8Ox/+qs9bgwOTL9dcYYWYI2Nrksctqp6wuIDzzg8FvquPDJueJIsgZ4tJzroXVADxzuP8XebBjOSxmNPYvs39NxOOXu+ct8zkbzBUy6bmEABbzSdpVc5y5R6Vxbez+LtPTj/Kd8mFRa3C3lu/46XUsY2GkYrjRzkk3ISXU4Oxl6bFpTyNUPZFRomLzMylmk/RFameWCBB9ALRdU7nyHrjMh81TOYZS24jPrmztk5fg771Dt2y73nY0nIvyjBcp5dNYhc7xXE6vrtdVvkXYhgQ0x1v99lJuChUCvyZuyZy5mQoPs+JOAZXIdnItDe4xeNK/kHp1235RkN/uYWW5R3KvATwIqY9Nyd8nVkm+Fp3ByXrdixdot2K5/Whccvp50m5fi5V0tdn9oy/zrYh8rWjMIjT5jrFhtarmADny8yW4V/q4z9qC7F6In1qYaiqQwzaNmpuBCgybOM2+K4hLQT3mrRcGdSEx9Xh4ocv61IdxYMAn5X5nL/1XNmN/hIU9VL3v/EkQLhrpQf9tsc1NArMcDpVmI1ub7ZykkIZNVPkOxABsZ8ENSCqqC0kQGwssMtlMh5Wp//uryZy2N9Aszt6haJe/ksfPk9zFYF1+F8raGbgElu4pBdaNLk+1V9ag4dSq8tbgyEIprS7thW77bVz/wmwwUtwbi5mkNiYnJUpyabded0ZbXTA9whFXb5oW2rPXKJr0XUc8zaGj/6XYwNRIVkiqfla9iT7cqkuz1Qb6o1ltd16zmTyizqaXq/Ipu2ypWxgAE5xEkahVZ43n78uI5s3Wm15Ux2nJa/l/yV2i3AXEbL2l5ueNNnrreKNonQ6fnk6JM59/9jCshD1Yhju73qBAu0z1blpqUVyBf5TG4Z2536Rh+k2+8iSRjA5nggoq+0V1QpeAAK2MtR/mM7poPAQEwSaOS9V2XM9djuJTr6XWxwWon66HMgXsJ/pW3297LLfNlTmspQE0INFxuWrhQOGRqzZWmlOmXq328HLVf/6j5L0HotqX2aUXAeR/d27k9K6laCxKF9tIelNS7O/gp8IAr6fllU6DiFcWfB3bX8v/AVjEuXc5pL2CMRijJ9vZgNx2HHZp0SuF9RxgWrrhUXRkjiYHY9p4gqYb3T9tfoGqSbU5f9upTk+UyRDLV4AQSVWSnmMX7dVuwwauYm/wG+oyk3raRkdHABI0h1CEFI2C/IYXNMCKaoqZmQjcBAM4h7UC2vo161XTgmCDl9Lx9jqbEglKq/Q3dd6pIFyYucfEaljqNdd+WurWzD167kHd7ji6YvPxnP9hfq9c3OZj4mgbH4EGOa7/pC/m/fD/PxBXUQVp6hmhBISajY9Gl9fs3XO3gnwbR2ISkqMUuG54DakLbtqu1+o9Q+3MOTHES01RG7Ke/3sh4ARfSwZJ+TUhDRC4q9aHTkQMu15Y7vgJACDRLzLb7Hka/aqDrKiS5v4SQEV5o3hVXgz7MMVp+2+MOm7/n3o7RlBm4H3m9CWXTv7X0tjFRrZoViKNR6rx4Ca0Wj13khRX/jrGv6dfuKmjt+pKZEf3MbirDZ3XurX9WyP0oO4ntROF6FQZd+/Uct1M/7MSfr6HQYL1dqWY3jUaB903Wcg9loR7CuOB26alfYEDGCGoetVQKv5fkYROxwVw+tIi0ryCzxSjdrcD1Ju2mF9tSAm0CXyRX9jnBv1GB8tmjSYJcBJA7KMgC8yaMIIj0Z2etWCZNkbQTTmp5CDqC7LmFX+YXF9Na/SrfG/hjLzmh2FFx//0JjsqG4VPu/J3pnU7n7pIw5zcy7DBTzvyDtKkFr+G/wANpsAOD/3e51+jnTYem3rKV2XwOUozOvRgHlaZ+QpOOg560ievmYVtDr+pYfErGTkjQiMh0fuaTZPZ1TIPNui+ITZpUdGBTPRz6eMrhmBr5aT4CGO2X/9vTiPAoaABhjwcwnh51m54AvkqrVGTlymmWOochuzLKTV0F1PSyisc5NDCJ5LLmv7v/gIXr4KBGL5Mrx3mWAmOpNUpxUgPG4scP0+7MUyd+ytMZHP6R2lHt/Q0ofWg9UCjl1Bk2OwodMpcbOFvvxZewJ6KDuS1hz/Vx+f//ZGGanlhU5nxLKyOENfkRnnyuceavmW4U8jF/YxW0+XJt/Sz20K6tgr/264lfvrVDHSWud7NBRG60r1J92zuBwfSf+JXtCieaKq9nH1Kz40TMviTi3UIJndTKakdU9ch2Zj2fjyC+74aree+/8AXraXqUJJNf+FHKtcQxsPsCN22TAfdtXXZsHAJQFLA4t0/imgej/TUZ82CmrAZidcOEjWKEEAXcr2nM3XNYiNUELUKrCjU4nQigjgdeiv9sEyqtQjoISblduWTodz96drKcb3uPvkr+FvVrf1I3vDpyg0i1L/Z9e04J4VaIIN88qJWtXvNaOsSqTxVtIRk4Ro/gYX3YRfoYu4NYacznWCu8GcksLwZla3ZVwLI2qEeu3qEC1SyEYn61o77UBXj8QSB600HB+eUGRfhIz1FsI3X2CyUKMMgJo0fO9BB7dcBMk5kSJlHGdEXFCNKPUBCE8pFF7sa91c5pzJuIAxejTjwJFl1KqRmjBWSbaNe6dx9SwNNSX/Pusd6hyVLS2rbAWCZsstKoptmo9c02o52SaDen8tG5jLygobeqpkrz+m+cYRLK/pHVJs/zgYP3Bm1bVfn8bBTpSuxsRyRPy8oRJQ+d8Cz5/WLlhCboYSpDgFSJQECmL6Fqv55a+3Bh9FYueTXwngnV35zXQ3hSFFHAGulsdzHvVe/qWenDiZdKEyFBuYFk7Cqf1fT/DxcyCQoauBUQ6SRrTMFei/U3VHz3ClMQuI7xNs5Pu1fjZ50xsriRoykJJZu0klRCcEOY3j2yH90ql6XVaWlyKyiYu0/7G7yvFrpsp6s7qgqIXA3Z/8mKotNiF2R8xAqTDAUFbYNvrk1pK7fkGOqP8uSsJLv6okgdYVBYUdwhobHClWKDURtFqqb4YAqU7HCLAk0gD+EzfZwt4xYHQIfVpOy4ijzFX5osQlIU1ltw+FlNL2Ic8TmoHjY84PGZjd4emWOxU/EEs6eKdUMdLT32mC634Xw2gFx8p04BurfcGi5FJA0JFb9mIgguGP7+yz4tbTvT8z3q4QSxGbh5pzZNCPDBHdMKMG2nc27nvwZrrtxQJl9flj38BGC6PD/rKcG2UThVQpKgdaAgmWFaAM6E8IBDS6jbDapeMD2pT9Sb9hDUzBsovBAYlQsEEH8h5mmOp9NLekL8RfPcuCz1yrEnUwigFJ3jaicyrmqTUR/h0r4FdB0KQ1riuVa5Wjz7bUQvsV3C3xePNdLa8IqqNqTU1XxZWtip9OL5tGfnFn7mTq0B2kwQdHjPLax4firoWdfsXEqOr0I83qXLIZbA8IbTmmIBDciXlkmRbFAKlPNf5w5IMWQo0nDQFGYL6/6msl/P2SgceQ19lZBzE8/p9y8dfPXa4JE8rFDfsAMUYszgYhUOvKhucntgX4by4WNhGHHj1sZJ1nfWx+BIDEPqzC/uLQfwotBW3YYVhjaLK0IPG2+hXI0lcox/dBSBEw3SBCd93GOuIPnfy2YgvfMl0yk9aXB/SzS0AEV5Nkv27CKzoNZ140YzX2fmtvivnLrOJ6+uoo8pFPvgnFPSykp7mtDy6LshdlpFV2zXV7IMIv63ES1aWm5p5GdEladvbPsXOpSdU3SBAz1+pAwOnSw+409SP8dQfaAGKjjRu0WwfnRf6IcrJistrjULqkn8wU4biNFqYIgSwIFi4pufNMGihFW5t3y8qVFAS+PYL4Qc6NSC+rfl+8XEDmnTniUZ/6cGBkUnv/oPfavy2nfndOJCnO+MVTPGqU1mt0jvwd9mV6yFC8PyAWUzipidYlISibU92FV14kjXPtjwHtZVRWn4f5O5kM9sEGFG11hNUpQe5dL5r9byaPkC8wzHxhh73oj7WQJ+RiH4wnvWy/Yun9Xvqq/sFNpJF1krmf3yyOOJA7Z6z3qLYS6aL1ZuSvX7gedUZDvE5WTryV4ZioPVPJzu2NetwE4q1vaY1lnkZDA1utzWw358RxS7KmMLDQX60XLWxfN3Xk0hK/iPgig/aSryLbD5p+1scs73S2zSssRzd+ajMSBTAglcvy4gdxDxM6ZcVSry9FXicvNaTK9BfFPqGneLpZjP5BVGTyeJGPgQSIKiHfNGGrPPqbC3t2VAqpsC5GIjeKZWfT7l870EbihRwXYrBmIMdzD4dqW2b1zuCRzrcDuhKzB+ZA66J8iuwu1KyFfDR2jsxvbiKwl9WQGZIgPAr9Os2NUowi+Ldh8pelBWkIUAzFcP5A6X8D0qqaMAzCin4MYDpAnChqvaFpg6GtsCg6qazyLXJdmrOvTKH1gkmeqEEBwMqBGS84jEvi1DpGJdi+8MUtEH0ssUm1rsNfOmWzCWJPmR8LwGBnuqbv2KmgwyX1p7DWuG+IgsMWjlXkxckeJZsQVPfbEec6CjHuuffUjP8rJ9F1CQzyZM7CGuHrevecZZLXu9MnnUiretXO/at60fa2lBnBC1YGtZ6Rk/UYh1EwyZV131YBQ8TKKDZkatVUz4NMcLgGbh+VcPpRSyhSTWOAvpREFNO7eEiWLusMwDAQ42+8KrhyaGN9TObuMZ1rGRmUaiHzhv1N/e6ySbvyENT0CvaQtEM/MzN+7N/d0w9JSYoEr1LcrP+6zgKQUg7s/gw2IzI1zmRd6/RXZuR/N1rol61vq/Q4SxIneud8pYp8A9xP/4SWu3JmoPdhjqe3X2le7OTs/eFLx/6W4HtPSZpr6KkZveAIkwsMJ0HwkwVq6oqga1ihv1h9l7nEMUs9yfu0NSJnHGV9RJk9xqMUGuPOJxvk9McSIBY9mIYzgYnop3yWTA2MhyxVncw2WGgDrYzFpNW1ru3Mz7J+W4pAReg24Ixem5JINNwwgvMbfcfSnuRbdT5J/9Pv3HSECCKjilJL3jTPX88iDImkO/QyUZYcD5l67e8xmoS4zymMC1fe11FVbzrbECmVhuB6+NrRv7UkhSGnIDBjsMw/z/jYPKY7ufDdYCV7XfyP3n8FRz9Z5TI0RvSZEJi9ds1gyNYY/J74qK2E4bHi0UZebziIk6dRHsTm0ZFFtxLg2Egu9enAELYDiIXltLEh36pfh0iupvfj4XKZz+t6XMjh77SVlyuEClAOBO9bNJZ/1Z1MehraQgRh4cUFEg8R5n+DrH49U5Q2moX2zUVODjD0nf/8VPFy1Q8/GiMqguTLyNejua+uZC2q8OUMYU76C2Hwa5P20vTgQKAfIKmsnFPSon05pUehXTRZWBaF+q0zpznveXnfscwD0c4zbnCR/h/JL6kVS9NRbmmM3mDf+13Ttnlx13UOnhRCOGt7TogvZvCET88+P7dumw89Nx8JHXx4hw0fPqlhLOdznIS/VshMw5f7wWYF63IYTc5vGKFDHFSrIIE/AVY4BAesP3+VTpn2V/znZ1WU4/oiipRg3zALEUUPbjlJ6ff8/DjZoklj4s83yullPF3QcRrNhmMiFF4w+guVfi1ymZNtlybsnkuSltVc+EaFpxjBOdg3B9NDrVqe4eFBXxABu2Vomm2BUbbwb2RO9SVAtZ15u39jXOY494w8Kp1SZ94Y5APV4QoP0kIAPgEpNaYMzXKLDQf2Q05AoSReUz3MAnrgj4ykExEDWUqpfzjNig6YDD499uv9Ru9VCfMXAk3/Jhlgog4r44dxIykxH4ohUKkbT31QEaX3qgZxxHAMhiT8y941VpoZoSCzzBhkTD32pC1+QvP7ZfzOx5wK0RwKBJEkoH0p1MY4VPa1pQNPQop6anFdXfxVzr2iYM0b8rq73Jx+0eVldF4060ZuUd6Uhjmb0ZZAm5E32Bu84EP/ZaFgWeJgrBUgaHvJcc2MIanYCfO6GJofd0eyB1/wR7R4/ZnCO49QeTQCC5wiAflBLn2HLS9bllcGWc+9CfPTCD9oWOhm873dUWh2EsXjOtlQ9SsWhrN+UI6uY6dB8I8DKJENU8AmnskOFJ7QPwbfKp8q34UpP9Ev2dzFa+H0e9qpDUZGi8952zQwfZtrTZdWktQcmGaIwoXruR0doNDJY2IGAq+syWmtVBklauJdsPXzHjZwbli/fjfdNyjTIkwKRf37cvqLlGDJQQlPaCmpM6K8IDigqwsnDv9cjigSL3iiB+FY/UWau8/2OsjM4MKLZglf52hDYZeq/LvnWv4+sYHe/Ki3jlJuwVKtw1BmQ0PHpHgf6oh5MvXFadHLcJThq9cEO8Jz2ImNjIrkepVtGGvAxeIFteL0wE7rbQ25VtHh6+HfKJ5OOJlUGFf2/WHd2kvgTkA/VC/3JDdpLUO0gpK33y0rTy6mz01Ya/DhNWTOPbOQ7qH4m7atCjelQMuHfNKo10qLC43LBmS0wAt+lQMUOUOkwfs11i3SBNNBgYI6j5VrKqDhmoDRHU3EappdPyt5SL704fI9QSIzxnn7/ToVnWxrJH2OogfpzDIga0AliMEY2Kh5P+qpczYISWRxifjfdK3tWYHAWDkiLYexWHl98v0CTazjzOjDO/379PeM1jkF1KqTFLYtA4oeT5CoVd0k6AsQ2QX+nr6Rnbq/n6Av7rFqTZlrdV+w/Zo9S7T5cJyrYlA+R1oAJhbCPP3fu+8/oLqbeeeh3xzBW2/864jGYfLkD+yH+H2zQBtLWv97uoLEBaZH8wyAObTQYAMpV5ZCq6LEwKUqn5SjhpceVQlp8Pax2CzAKzWO1d7bM2ETT3IWGFvKNOrXzAB+m/jpFoM69wIarFes1uaDfGZhqkG9PeCGQ6Nb5zq5NHkpg5HI4fP3Orx78xntz0mFI/X1bmKSCPicgriAHPN87SjkOl5g/3tqxx3Ix2UncUJWhQ+biqdpEUOX26j8KLTvfyu0r6NZ5mXetaYgNC+MT8ymDbNoBXWOqT57Qfy/fS5VhpvZOUbYEg7IVQyv0nsR6IuZjMOvfthQ+288Bn+UtSPtrw8shT8hzwsiClfeu6Ox/JeA+uTwuhw6eKauKAxSEO24ztivyJ/zxeuMyuB2ARyQABwjVLvtv+fKV0nApD3SDctRldywNBuq7PyXVcH6GZ80VrTBRw5tOy4IPapn8D+DZiM+w7YafBjSKYthxAhzHjU4J/+3MplEcJKioS1p54iMuuR1YZxOAYEgViZwLqyHUQgLrn7CGw7FqhRFpuXczLMZX7IpQuKJ6xYZJ4dCbyvoT/6xLQN748djFWNvOE0lNsJsAjvP900cYtsb1VrjKvDzlIY4C+VAtqHbf801w9gTmjb7iOo3fD4zJkvV88mTB0183bvKeq3HvHpZSxHJ8nxH5I2SZE3LHsCp554LdFBpG3JESJ6XMV/4P9xOKhiVa5+Bcdewl+UL7UNPIgpmxGCcoBpCd9N1tuQzHEnUtrlq6nja9ZkwGDJ04ZyK7skNb0zVHuHfWGRG4mguzQi2pstYskZbHf0mSNurlqo7Np1YnXJIkJItYyZLIBtM/faDn/GnzRh3IZa2KWXYBDR7o/lr5ykRun1W5yeG7Szofvc2/A2vNxs7bGgVlj9gcrnw0ZcFCAll8f9fls2OXmKiqsmhWb0XCPrFQeZWA5J5bHLVYZtRxmM8wAcYCsGV0R3JgLIXP5AU43vf71zzjcOlKs4w//QxC65tzIjSDesmjSJr7FxmH73WZVgxFCkT/k0JXvU3gvDHicmQwF8mfkjXRtF12/HeSaxxZxseChXAsSRI5w9AtJC+s1/TQfnjw19j5/08E30hDAxLz/Og9IiaqX5FLOJr6pC12XTmTeJImP34fh7gEWAUqcmpwpsccfblA6hOwT9v0FY3YZjT0KkAo/iEj6H8VWP0Oj8yOjRH2GP4iNf7tE1D+sF/ru+VY1OLMbi4gjs223tSgA6dBq2VHtkbLMWSddCLD6qcfqCHSpuEFPYauOTA+x4YwNRRLNmGTIUCVV+hz4gE8GuUCeUC6g459uEvgLWETlxjqd2US7vRnseLncB3PiIhYxdLGCV7L/7xCo2mZO8LcDSLkYJLI5rygzNgbZ6o7Eb7UzLWGRkFn99LITca9u4vgoGI83rfb6opjfE3hca9xJADmNOmfftCRDQjtZCb/lxjwivBAyIPu/xirrGs0xGBqwVGlXVcDbwzOJ5isoAkraQzDykesUF1ZaboSFPYzKzf0dFN381OfsaLpFDPVrY4aaDIbj/ceCI75cMfQUHt4ZT4TLTGoAlNHf1xNYAaZbWqTI+DCE9uwlwD99Y5FLFecMLZIZ2DTAyGq9MXMVlaCS3gmXWYk4MsO3+/yYXN1N+5G921Uo9jWe4GQoPYQwQeXms74jXZAdE7sXzDgjQLfXDEUj2P1BV1UQx0AYgs8ZSe6zNokkxMkg2G18q7PBmozPlqnVkGEU9nBq32t9MDE+tM3c51SXB8ooD3sby6QvEzzClPyp49IWMJlJYxZmu9cU+3tmOFpN/2O5ggxm/oR/Oo5jifL3u5nQ8p5CqFIjdKlZNdn1vS8qUpQFidq/CzqWiS6wf2e4nUQd0kvGfDn5lZDMtzB6reT0GPRyGvCsPxZ8boLzJGuSyFLetL8KDbNVOM3vsIcgD7TbUEOH2NPDcL0GtJHKOo+DcphrkvBgzde7Z0Lt5CM05n/UX7BSNmHjPhdcnxAOSvd9k8GD/QzykM5bmr8tg0GO/wzOWcIp8ODyo4EaBjtSFrSIRA/YYT/uRTXex+cpLESKJFhF9ZdT6Kzy6CASDCvAtgt0kkKGJBwAKfA/z9v2IMW2mUIRI/E0iMuGJv09EU5KFuWRMb7KGHFm+ItSGEpmv4E00k5cYVtlu80so/5XLZn/RCJlpHuxf500um+R9kjlzssHTMlS9WVFVRemoetPb52u056e9aOe1v1CbH0+WuH6593rTQdrCmfdJa+qa/GlKX3m7NntXfEJjhD+RB0hf9JPnLPmRSjVzx/UC2fv7dON5SXmSQbAa9Wwi/Z+BxHVaXn81GQCqtoNMoxO4PV6O0f4ffu6pTfhElLeTvp6M/V9B1ENWXTiv2GcovaHpO9qw/WAmC244WsZL/QhZ0Fc5hUtOy5CqnYRauijb7bW7/0WMsUwqI9EN3+5tYGOLGn4I2RYJMQpdRuxjK60bffkqn6FwuP9giTcoG0HGhbUWrrGefF2U21Yct+1za3gEZ3NKJYEqVSG9dAecO0qHq+7hu9wd78w+zXP1CpJ1kHzJsYoDHbCTGpMRjs5r9sq3AXXj660yVJDt1JYfJjp1yhBIfKftKVcsrEpbZVfMzb3IxKEkDNapI+rKzP42waJSql/gXTtD1JDDR05QrlUzPQn7U6EIDoSp5BMOYF/b5LuK2HnMZysHMWohYqsOfc0TVlDpuvc2VgNjU9iymqdyJhq2MD2T3CXTTo+q6vBNNJp9hYY5Wn0VVvKufMJ1tuWMXO+KDZr2x37/Cv+SKJyo8R5ZjpN2/e6jgjEk+dzcaUo0mY90pHHVd0u6cD4vX31DyZSeH63flL3Oq0NAvl6CbgvIM42/yHH/XAVYTzaTK/7rpy0ETopiAgCkQpuMEqC/5X3iKdAtvH9kF4EDnal1BFpw0kFFlokXSjK/yU3jVdzuH54LUNzrGxQ+Nz7g6S4FiIBjVl66gRvfOxCRpotplueyFeMUGOBOSUtMngBT05SE5ocu/Miv3IS9XCluIPzn4VCXzzR/UwWgtdMyr1rynwkOBXBpPQE2GXOT4pidiIsi0sx/cI2j7XCrvF1cg2ny1VvFqm6KmsQ9bthAK1F9mfAWYykC41XY+zXiCwn9h+3F6ZBMIj41XggxoU+a32fiP3DhKUkryPLUx3sbnYP4cvbPTksbkIE4cIDl8HyP94lVbvoAoy6nCFa9iv+Ttr+743JC1rVZup/HKwj6jru4B1ADvuSyrarif98CtlMmtxeUwcmKWKMqh5we6ALhQCs5ZK4Xce/R65i7UTeNxoVkp/il5+wk8HL8PAutuzmXtuvR5BMee2WFyRC3Q2qM5QMjlKoLrzlj0uY1ptRAOdDvKlhDyqoaPMHN5KWUz+Z7b22gNmq2IKjf5SozO4sahUtq6zAtI/PY0UnUr3V2Ig689pVw6bvInS74HWstPPizhj+tEO+dmvqZw8LsPKpHHEt0I+rkfeLckdB0m3tunztsO2TX7tQqojfBHEnoZ1ncEF4ybaHsKrfroIQ4xB2fg9QT9bE0R43jN+F4AlHRbyEp7ETmOH9XkevMFN5iLbgTa46RUYBvRuQ89Kt4HcIoQCJRWRa43cTtlQ0YlLV90gfZyDGLpr+rt32nYb5gWlV90MOxZkRG5dPtf16DP5EduNwbfKyIOm5yu1YyMOUW0+ZSUz0b/zlEjD5BByFqGjKOO8mryKAVYHExX6fO60PocHMedRHZLZLtPkes/yd9K9PxVttG/riVEvoOaSXpSMKDYZufqGmhpSAJXSJApnCfTjG+5UP3CZj7oysGUBtYdXKqUTN4cd1s6gu8dwYV+Tql2ZqD6VTJGjEVBqNks2kIWAf+1T49gX9TKypDbVcLH0df2abxrTAk7i8vcHfQnz0ZexPG3Rd4Ac/axQYt/kgJ3UC3d8g9pbR5IUPabXdIwY3PiAxGEHVr7i6mUMXi49VuNeBXSDEzsw4YpbwQ9Pn0dEf6H3ebBm8uEwXW5Hr4dCB7xxEzad1uTnwwC2o+wT4waSexvOugDh2sqyeO0rux1cDaqsY9ey4UlyXW7iMy802l2nQaW2yTRYKQkep/0akjOyfk4QfOkhFP3t7Q2oPguarkj8oYwM0XA08dJ9QWl3P4KOXT7y5cjcsZqUfB63l1mzMjgRorA6z9tRATop8TPBc6tYfxcPYw/PRrcB3Plvy5nU7JRV60Q2QmCc1WR+CtZCf77nd1p+GQHY9m59DcMK/I9QqwxRUDpmTS5n6W59E20d5vqN8J3gQaODSAHjhFf8E6w1yXnzw0dji7DXw40uJTVg2vjYUjhvfgbWTfm0ovLNxtDBONXTpwdCzyoYuVU8qXN0xCo1xqDcoS+JP8MS8gRoLijFtvOJ8P6u+BuqHhoQFxnIwdFmPrRdPdogr9+SAPxIgKh4tu6TZv6r2OCXNDEXj6nnr7/4/WoANHcN5njJDjC3IiZOfOI2vmSVDm2canHNku2fJk2bNJw1r8Sepw4pHQR5YufDZrzGdVtLsiSTwKs6XCUy0tx729yZbfh3T1T2AaZu27MidEgnoBIDDmpOnu363WvgswQ7AxwQEgeg8Dja4ywv075qIB8hiaq4bvUXEMPDSIq8joyuKwMLpoIBv6zXSZd15XaSvKgkfszXy/Mhr+GWi0gSe7daS6FfTMsxtT/9fBXgUaY+WWtINj9N2aVWeehScaotFVjiK3Fd/Tzi5qpxarJiYyOh3tPdBWI1qhq9fzqhDx+aX0kJR6SYImBJIiZzdx1nETd5xzVn/fubtyPty2/NKvWQffP57Plfwq86/ksu5L0wYVQw65480wR+vxSDzmwYfrmCY53GOkO5N7OQ8SGcTbUd34WTW7hT22FA39X7aAPJIDs0Ud1NVR9XIO774Ju97n+K5NzeHDHiAEWIGn2mvNkMyvQzKagHvNZqOxIL6WTngSXd3s2mhy1HXgYddGNY02F2wSG4L+Bm0VWJM0cy82PulLVW/gladTo0JzFPv9mn5iZm09KGbVeDHfQ5s9t8OG8YZSHlRzNPZlumMJozzcnCyg2ZRO4icd1ozZ1wvScON/cn8nqg7N4tLREE1AulpenFudi+ctjO/sr/O/wj50M6MlI5EOiTQuWUaTPDUelLxIKccVF6CvDnqlD86jFaeAjol83OrrX+lelToL8s6IzKr+wZxoZd6MiUI4zVSo245/Sm9W3bnaIFsfIwsfBSUekBFNB4Fn/qhIYAGzFVAcgtyAPTk/DJlpl/K80RZ9ogSm2j7n5rP7w7kRKDRAittReSJ53mOsvhczcbfWXxd0579LiiWZUYWJPn03aAUzoKHlMMQrq/Cs13HVHuZt+bbOJwufG37VlvhMrQBbUoW2kQvYsOKh16EgKo6q675tkddLmu/9N2yREPF7e73sNmvIel4IG6BDqlHA6BuT4fRgC4+OIZlXk/JsxxuzCSmPelnLv+/Z9avRViDeKfYqJNgdng/nfp1pITzxzUZcHD72j9nPZp1ObLtohG/BNBSXBRUCI/rytzh+sB6cDBP31Y9ZZ64B32KDS0EvmfRg7xoziZpyYGd2iG15aWOaoz3kKXAy3UXE8nDAGblsKfroBIkZSr2v6/rxw2MoOvTznyj9F75iL6FfkThJ1pkkze0vDSSFlya/tC/5gY9P8Qwxp7FoT/KfUkW0HXxaFdVvF0PxHy9Z1K95vwD7i6fnnWGhHUHPg4nXvGFzCVdhj1KDbDew8D+iPDXO5PKRmI2bU8zdJfqZey5IlGcByA957MZkD0tgaL3cAL8XqcNOyT4Um6G/ObJtwdlv8oBS8++uUrpINLVU/L3pXEHEFFRcRS1UMNA9mOFvNCPGEuxArly4vSjrPQ+mVViRt+JvQgIlWdv5NH1CeQKb1a9OWshRMlFi/IyN/bHZd9w9OV7k3x/0RObsx0OwsyGiGqeB9f+CYi62CNetH6R6JNCeCLjNRwWHD3+07xRGNUeiMxlgNzYnP6Zkvk7NmeLhp+KY5nbtxYhHJczgcbRb5m86G5m9yKEfrh7X8TuYH9nJ8SXfFkIBYQKT7jIjoY6pZDypR9REQNhoZ0KJxOf3Ds8X3L3DGrY1cBU25bgMsEfm1TNXTuR+QiMkqcjpQ/FjedznI7EJGUit0w7zIl+Kxjocd124k421gn8eZ+Z+B5vVf48UxYPyKonOb4b8jUOLY+OLRRZB1nnt3/KGtN6AlBfaCYRd2Wg4Cx2OmP3mOOZKiGsX8nHH5V4yqg7QdAX0sc1q/2GlxoCkUHj98pvCD2CbCI9OOEHNgZS9SyEKaK3aPjpphAD/sSiD3tFuwQWedmJVG32gfH/9LsWDqbS4v5leYtAnCs/cxfGW8vz5XufTUknssyO+vJ6+rMI81/6UOA7Zh07zRbDGp7ffr0DYth/shgUyMYLGcin9dhnyhIE04lgkHxaN0QAhwkEPRJCjsLvGiAx4l/4ZqujmIoK0SzphY7OFKOZYCC68NgOEgQlfaFQ2x6UPrlqJtwF7VXfCZD5SRnL7aU8kKe6HIYd562sHBXb3+jUrkFFt09SsnuVTvX55iZU+c06tilzF98++5zMjWs94NOF4j4Kz1RYJFJYJW5lj9oaUmLgjmek2bDbto182b0u5px8EC0eMlOy0tV14fsu9Lmt8umPM8gV4OzT+G3kKX0ONcYx4RntnjJjRe/+ei2ENUWHevEw+VboYB++bfns8vTeqxv0JhDHvhGGjyVODF1YiEqSuwVSTxoly5aTxm1i48Oy8UxGl5l8zLtkF7irWHJh2r2A9/Hp4xWCf5KvY9smb+SCX5MxW4K5eePg9abS3JHs3fUCnRquE4bQ9f61PFXPLZGtDkxpVfP1K/S+D7h2TuFcAs9H5Khx1uWAYQjmneP2xdjV4U/hdVZ+taD28vBQ6biwdjHik5M2KtJu4lEGU8832+WVCstjRfRv3IGvNDRO2sAgtR3TELmCcs8o3qxu+pN0FF5vHt97knT6KUPqZwWWvwzRThpfxgB6X1Pafq1JEvx8zAKW3psEqmxEbIgL1yhPUYubOXUahiR44C5e6xHM5gxZXvDX/x0UjHTWzAdAjRvGYgw7NSBbmRhqH9Mo7/Zxu0TEWwXbB0QW22Fj7bDXkaZ/9SOiw2gC36l6NmgQ97i1JqbcrsY0CXn8KH/lfel3U5iiNt/5o+38xF1WFfLtmMMRiMMcbmjs2AzWZ28+tfCWdWZTnd1VndVfP1TPucXMCSQIpHEU9ECNE4LqdWdCBet8f85ha3Yk9HsjN27UoSwQxdHVc6xbb7UKNEKnQ4wV4u0gH6VvRAy2PTmDtbnV+pZwCCdNlKfjpyeyFnYH6lh9yAag5I4CTzu3RzSqvnAM1WVz2683Mq1k80ovECqbS75kQyA1QbjDQgh1VsHtRojbMV3Uw+jdOEX11P2q0BRKVBWQrnlKEklFzOThsqv4x5u5+wANmJ3MKgWxTpHKo9K6R7xFkDo5PMSriBu0TdNpbF3YasHKpcW5a+6OJyv9KjiGip4HSslHn1Ixt2Q90HRlFO+1XIcXkMuM2xyY/kNZugzhbdi83n/dbFzuIu5+CDHXWpEots3phajnsyQ454bVwuW2DRpaJLKv9QUdZG3ttNeT5zV3rB3zBt1zS8Rk610J+jYZKZVZfz1ZhR3FkPMjXGo7XuDEpfaJe1H+0zZmoH47Q4HS1AFNetWkxhKx3TFe/sDL8gHaVf6pbcx1KiSiLHb7n1DhILVe7QhlPV27ygbjfauno2r8Y60JE91ewQ6tJL7KHEz6Tz8rx/7gJvYCUcRbcZlMRKOeATTFEzCs5+cTATpDCzVUZ5jjfLXhZWImWo2lr0j/C4MyfVsa6etSSuiF5Q8gLGdnnzeJlvZxGs1FMgV0AlkhxgHAx6a7zDGY+BA9pY7ma2bsaA1Iy/AXJT9wgTr4zVaLmctBKI/HaID+LkVOPOto8HwttGtEptnbUvFihrpVRqVvpmdamLUFoLvOK6Hrqdl5GuYhvqn4VwTrLGo3cap1WOKk4q3yYmcLZY5HyCD/l0HBUyNHnYZ8dY5uiUDWKZSpZWznsr1ThmjDReBAOd3FNYRZU7cQfmciwy1IFZn5NTI44O2RDFX2gZBmPcKJqk3PWBUw391Epf2KoTKEBtdGF+m805ct7OiUR6rInLMr6dSrEqT7bkulK42TW9m3FcP/ttc1LucGFIqr0gVUGvgI0xnA0R7tQFXFA8VaMKh/B27LIsunh0RqGiGLT6aX6l+Agnpb49LyDbXq9o/pRqYWQZJg4s+kYkLTvrNPW6NIIebhG68CwFOGY9KLfWRMB9tBUtnY1Sy8W1Y27qlhH2Lr3ZJIyqqOy5JRWHkHetoazZg340KL+WFoKzE/Y95ZA+O6BU1EUue1uvyNjfOxMM5VjOiehX64OytuaXEpFYPUV5oSEWscpih+3ZEpeQ1mCX/Z4kKnfTLnusWFS7NHYu3c3V1XnLeOADY7LmOcFxV+IkmpSyQgJOBCnzkhjT0+3Gw5XWXb3CCO7Q9hUp0OtWkIC9vBz2MC0pbt19r9/2o7KXyd5c78/GHrmJchILrjggPWQml+LkEieGC3UB1RRAdy5rh/cu8qWDMZNB1k64eIVjLOZUNYlDJKz7Rhch7/Mhtqj20JULz95y6kqhHELDF1f0BPvdi8CiIvrURXmAxOu4Zt3pjLHSdrsJEaJo6z0i+8c5p7Mm3IXW6OR0tE5hf0qu5WbJ6JZa7Qqy70rkjO37BScSh1CTCBQPO83ronYVAdglBqSUeNAthSAzXaMnctzQg4met+kKAdUOcFtkNvwgnUprfnDR5mhuqSo9tWQSnK+OZTrVFTMV+WTU/CLd5oTYn5ntOBwpVhD2N5epOnp3E64EFXBLU1/timvvunBK8MPBRc+KsyDX5gqXgxAfBbGS50VeNNQN4/7k21OyUw72/ckr20Y0i9hdnQbmYHn9whnHuHTE896DBA1ufsvbVZmTthiX7jEw9vWUlytiojW70fScFBEWY50d2tDHtRht1v4WI0KMO7JunYcargVnhS0zITwGW+E4jPuNyMiDBrn/9ewYhE6KBg+14sbc5EQywZSBeFMWOMycn0O8hkK9xNUmYa/rQ0+xpRkpK88qlfXA37IVe0asRaQdxlD180NEJIjeQAtidguTWIv1WPSt0bgB9GvlKhqrKaLEw3KYzjh89rJckbrY2FYnk/hq8rtNZDUrjddHPJaWK7Tp3NZubuZpIBaE0m22fVbQhZGfq3E9ZfShH+CaAbm8nlCdPmY7P1VZPkuP9IFRAM3CGI0ftMI2khUc1xXh8HGMpBmdnnPkyFtTL+gSKa3OLIorpbLY7hTcI6JNVKLn/uYMQcgkB211FnHaQWD6fDy2WLVW97d9TDKVseb2He4lkAdtRhe93FYbmjg2xHjDl8NoOOMJarOr3q+CitEXeRMUU+EloSmBwTltdD9DxV5xyWsuIByAe1LQ2DKIOg5GwC/odKvZ2qOnXStz9hFO9BNFOGYAMEwCHtNai0GkHWVc9P5KVqJGFmHYrIXW2p5urXyozAp4/Z3DK1Z7wxr+dnKyUisYbwwny9BIRiHI7Fjm6a4ZFZXSAnEVGx1bZIO3v1RodaJsI8SOR7kz8E1BDIBUch1f5Ka69PSx57lwLFmZIJIVGCaj4JEVNENQC2WBkrdDj9f7VK22BK40F3sJ19AihAonaLkvdD9hTaEDcNChcjqmmROJeHleH8Z6z0nnYVoK6VXb5DsYvES1JKkVa35N2G0pt9pJIoRyQzjr+VkbxWPW216OhtvSDjLX7q2DTwtNddzBdTg+fjJPtw6NOdPlOAWFi1r8cLSmMbLxc1Jra+Ks8NTR9zun2dtryT0t1zESSoa+lLO1DHMRKalV+olx8TIdWSfLfGEqd5NdjtHtzikIFxOqcUJll++IrUXBYG0lV4exOazb7RpdZmt1ONlMlR1ydcfJqAznRETA2QIRlKGdJXXIOBrVeYAhJDgrEcPrbQVmoHNqzQNbsWHn96f7ZK0ZanNMh6iKDuL5mg5srRVW3abdxS1F0TInkYszzdxjPrVbnAbAumHvBeWM7lscD+YXEm4pPSBYtYKbNadndV/dMmhEYY54UTCMtyfocOFP5lZdKBzwbJTcqyMY8UdpPGKpROIqATHNil9IiEgXK1HIe8nurk7R+lxMmYppelwuetqhbcnKI6EbSKkkvrGvucexvJmmF11CKuCqpGkUifAh0XTBC4Cbj+txydE+9HvTcul3XuAxJWJtBUFVYyefOh6f1F1EHawNO+2wWgi3wlLheGHa9kUfpkOjHqD3nc4bw3a+2hi+Q0qSZLvA0o2VxmhL4+ix0KMVzwe6R9RUkoGvpd5Gb9EvF6YP5j0fHqjDemHYS0mCnqicF2Is4xIfAm972LEoKaU8dD3zk+3LcM1TdlyeyiSjEl/34o0iCoJGe6tqh6wDi7nuL0M9VsxqRGtyXc41MzCDNuwwQg8izq/+rrnIwo1XA4MtWKU6BBqMC/Aoi3sscOGFiyQD60nPO/wiKHl0sXLtwfhyc5SVa5uD+1nIZL3R3E0/WW0qA5rBajM38lZmxfEKdzWQK0YDm06y0IP05sf3DZe9xCuztMb1IJyY0svsKVoMF7m/CPeRUdSCap1QRlHohe76IIB6ar+wjmeT5+GzpIuhW1AmcJeLwVR3kE+gFyuB62p46ySdzuL50oZAy2sHMcAGTQ1q2REAQYGPi3HarYxoZ1Chc6kA13CJaHoN6M786hw+QA67xgf3v1WUZDR41kfy0FsHy8ERdnDtvRbcx2aV0MqgtYXbrd3NqqvzvjB2vYhdPC6LGYuZ/KG78RZ5FS1MNraLni0s3hJEjhvn7eQW0Db6UwaMN49M0yEchnyhR7kycOs9I/TA3z16ByFNms029GaCp97H1TiGFYyGNF46tPBt1byYLcK66xbYmQPjlO5ypS6NKs9Nu3BV1We2wm7mqUQmQH883ViyOdbLFXnunM60F6q655DdtqXM9U6MgkW25/p0XENTQEVtcdhcRA6IRhByRaA0v+33PAx314ecpUheMeNClW6Cet6YQz/uhEvlErfSYjqabXGHsJIUSobflcvRj1hRA/pmTUvXjiJvOU+kAmTuaC13PGYybroldgLwIfy8Iy5HM4bBkpHDqgjH1/Ddufz1nOdyk/AWEpVcWetQzx9t3sty3upzzh2ziEKngNkyyqyGj4lXwd3ReAbVADzmjeRu0z5kp0Q2pfIGet5BoHWU2HfdBVEiu66xmDlTyblVZIgRaXmJ9aqNJmJZn2m5cqq8O8sYt13o0lZ0xc3VI/NpqRLmPnFgksta5RVcFNdvvIKKxllj8NLyej429catogvucfOuf4d9ywdHTVxJriIfE0psIby1kvfcS1X5Sx1JoN2hcrKOyrOkBDEHNATfOGefLhv0CjjPSDcIvreXShA6ksu5inr0WQ4RkbOV2W2c5ssrVVGHkdtxzoqQgJbnJDvibZodWcPfXMaxABx0F141XecUZC2tcrGP3Zxa+uYeqVPd80aXbufNn9vS3yKXeJbKdlstdh21TJcyW6ONCvNCxEj3zBncR8YJxGla0N1+EuskuKxzwIwMP3G1TRgHfo8s57kuyErHVPMrOL160xkJdvY3Wi4cjGNO4Ta+NwVbPnM4nJoZfL7rZBnbnVuvDuah3+4NtcJzMd7eTBOIGs4MN6Uj5ECxUFEltBzw9KEV9sDR4vTYQeQbz9knRizyXt85jSJdpu5iw8B/Xm0O5bD288IyrbktLl3esgjTO4C8ISAYiTBkx/IOF0cu9VjYFCEEnq7eNOGQmcHBVZFaPeAOrveIn9rmHXuX2qbbis4qrentmyoMnEso9X5dBb7Ee/ZFEdKlojL8qUegmc4pZt7QxLXs45GI7IveZeUJSzEGKrmzuK8vqenM96fELsxQChv4lOpiZyFlT8ruVSKNa74zgmG3CwXGX+7WC8JOZTfOsu6yO3MIsScMJVRX1yUXkRakKGbtwb1ZgbA2IlwzstywzFGhJSWOB850rNO8z6vZns5rDaMtfYM21KKsL9dLbBglASyGYWum7ZOlkJ+pY9DAnOWJP5/88mrnR1VaymIQ7vmxdW0Ynt+Xgtfr3FrnjTiRZr2iAgoGrlGfJ+mCH/SzJqPTxkAnwXDtSVkF5NAG2hFVZNs2NvJRo3RZj/nTeTIrvMfb2BVSbltgyXKFcVI26WgApyG/UXtXDQ2D2tXXWphnsmRrdoe1c87fXmNW4+/XOuqOqnusqnY/nPEmkW9sUojHZXjhbYJdr4jBSxAFE/CjgecDXI+IltYQqjzO9SQR+thWss/yLhYbWd2iO0UJE22LideDRHYWWR9PlZbe593am/OEoKsYpjuOd7rKGbNbkwRtB961SOvFaoWKgp5NS03Nwwgnu9X8XiFmKZ+G5ALYwmTPb0Ot+81QRKvJ5mljK87LK09rkpKS6VSQGL5T0qEe3Iu33ypsetVPjhnzTRbytG9A2r9AWwonhT1ZzgqWX0reXsbDhlxUzQVujg58I2V17Y4XqcnstRIfzJGJHfW2VCp6cHc7knTVzpSOsWgSuUDsEfLGTY006kmTFEoYqXZTeTtpsHHZSRZKbmgRcbko5SK3A9uKMVrZUkmc4N2RqJuV6coDumJURJTJzkBaOwHSiuFPKqjSZTAlc8tJkOWBeSko3Kx/eAn8QHv2UnY9q0bwA2denM4/wO5wc12IsrkuFMNLXb7k7nUBD+TnukAlWS91Ifa3c92JoBmB48TfXvfoYvOMsjOduyoc/EjZYnexOjMXhN9wIFsgL6QCBLZof4NLKeCT4PBUH9VtNL45hUu/4UI+ylGZRy18ISHy+i2NfIabfsFat/upTwyK3E8Madgm95Ms+xkn72eTKI2Tl+uxxOeXs15zPxN/ucS8Wcp8YRieHIUoy17vY/4fQ9LwXodb458K91OexP2OLW/H3VXWPmHUS1+8DNjMudxvGJWBC/NhCjgKFcN/0c+giFIEwOODr5BGaq+II/g3unZR077WgMskvlZ60k6StkDFIVEbhOBPvN2Am0aaqAYj+Vrarx/rP7YKb7dpgSq/3y4Qwdx0m4OOiyj418vSuAD/B0BgoGGch4JKAy/jXr7I0zCE1fk6atLJ8+emEHBclWnRzkNM8r+RImyra0tQJnpp+lQWrfVySPwcbHyiUfoBGdR7ZKAM8h4XBPKLUIGjH0EFDlGxvUMAirWrPrXlp9CDmf0PAqKOvFApQjBSGHIq6/eQ+ufBgUUe0IA8QQP1H0UD8QQND8MeFSFX1+UAxznzmiYNvhVC09blJXJeugD6z0dj2h7gQH8mUPTl+AhLf8bIl0NxfBHEfHB7c7CJ6hR0Dorzfq4AHX1p7V4dnoDNfUI+Iyj+euZri/PR7e3RY5u/K8ym7Oog+t6AvSjV1qvjqP1eQeZeMArj6LvgeCN78onoX8/VUea1aR99c7vP8PByhQ0E+BvsoTj5gD4Ce0DVvfMvFb8C631bOPPQFE4/NHUfnndNzRj90ve/AFvyj2H7BqGh1yRR+CL7AViqCPCdWcxD7VVwsqdZJpRZWc9VcYyhSQn7Au0335zmzzv9wQMkwmCwgHymcPgHqhRMmL9A59PYw1n26dm5iceS7O80TM+1wfdPGkEfzmHMQ9lZ5TXA24ajkI8xGIfk86Xzo7oA86T5nAZlAbtZK/M/4t2u/3UVSNL4Z4z9Fj0Uib2SoDeTAfAqEn8/HyjslxEm/D1hgt3+9Mpi/tfMEfk4jUn2CW99opMeFcefkUDLVMx2ssXyZsecf5QWJFN9Qtk/ntd12QFW8Tqby7pNyrgsvEwry+plsM5R295eRguO4x/Zq8fBfWNySJJ6a3LQP29tHgzBU+H9sVWhnov0r1oHEn/U6I+NfNg4kK/K+YudQf7DxoF5B6L1Hrh7j0BqhjTPvCJ6RIdXv4IBg9JLyjqdAEC81xJBkmah5t3KDkqqraPo9eBN2R04/SL+x7kewAJpEdW726x927noE1i+sz7wM1ufB3uFIAH4gPNAjYcpANUzi/VTNAZLPsLkCYPFnqgMlPyOzni54DYKWuB9ZtHvU2YUeaKi6A+oKC8D87EA/gsP1UfzS3D3AeU1kw/rW/LxBnwPcg29iDkFz5BABUzkP5fq9+fEh2VNMJ/pb4VNvB957Isx/tnm4WkvXu/gJ44wGTEh8WyEGczHKepXjjD2AO0nQaNnM+nXje6zyMCjLwiMmPVyGGV+OUhfT/DzCfDFK+v5duS/tds/OKp/7JexH7Sgrxj6Q7/swyb1rw069r8EaebvBmn8HwFp7G8FafIZpN+FBzEYYRTqCJhkGGD0wE9XpNfuRwKMczhRARd4CTC2tRdc7mFsUMjL4US5//5wo35WBvDdzQjwemEw/NTO49ZUZdFE/9zIJUGyn5lHt51B33O/Z9FL6ldNb/LZ9H4nUgYijWtmqd4dgTtcPirLdw22CWSol+j2ab5w8+GKQVnfoRTeQQo80x+75kNy5vM/BX4U/eCeEiz1Hnv4E+w9BiZ/HvaeRc7fZ9eQeyLlVX/cM2LhP0VuJPogN4Z84i8+C3v/MkpAfiB0/AtCTF9TIi8RpZeUCMxhvH7/AzmRN5zl6+29JS5BN+Psfju/K8o/5CCvYvhDDoKRP5uDPA8voSzymabYrx/mW4DRFPaZQmgCRRCaZDGW/vYCvxPF4urau70p9jJZfj9ehrHE43XRB2je2/ypEQaS/r7OeYNh6tqV7cus/nSf5RwogBLV+PXLR3sDC//5Vgio6rTIC2GsHCi6OcwCJrVXtz6ged81uPcrf9gO///tKDTbgEJkWTlENezjv+YzQVmc0jr/b+3Vff1GNsuvSdIK4gxutoeAcnWX//u/tV9eET6IC0LzGTL/23r21S15xd6dVqZ/fa79jxER7JGHENR73wVDnvAQ9pfxkPdZiif8kYQ61XpRpW9W1HzQa/jX3DACfFovm1GS52nbRj/YSnmCDGB2QLJX9V6UYfTv78LsfxlPn2iY8H/zoR7YAPoso/2fdY6pDwTLQTNp1cBR/aMY45sxhPT1Z8Qcf3yp5UPE/BP7zJt4lrBmftkgv4+Zf2FAOpgifx/ks6/HL/fy+0noHxAI+ZAOZLEnqH/mluO/TCAfWdD2k1B/Op2w4Gl6LqR8ivxJqP+Eo8hnFP12oJ+uIyX+o+rlvR+9eOFYfzvsU78A+5/giu7HkCjzJEf6FP+/LLxBfygkSn2NSs3MYkjB3f5gKPLN8t47xfb+Qkg1SLyiiLKZZ0BCjrzG4T9YvyvaNPvKUl74DiQp/6/5C7f1perXCO4Lg3rThP+1NALX/6Q/EhH22gCMPNJVX6TwhWP9U7kV8bjIhPyosvuSb/z5C9M+lNOi4bRyvLsnfU9JeXPbD7D4MzmHdG7o7az7geyBD2fFOzB7VZWlvwvlOaZxz5d8CHdfrerrstIgK7vwx6ws8YRbfnQRzDuIPQHidxg99UAuqScu4lNyif6MTOpT1GHPKPw74bKfvwgKenhV1/4lpMHoAfKvF11016dFOXxcE795UgQBf7x/f7jq3YzUUdvVRTPHMT5/vB9/Py341/D4bkUm8WQxxVMt+CdSXvAZxRLGlr5Gq+GC8DWwn7DE/wE=
\ No newline at end of file
diff --git a/Documentation/etcd-internals/diagrams/consistent_read_workflow.png b/Documentation/etcd-internals/diagrams/consistent_read_workflow.png
new file mode 100644
index 00000000000..501d505fe15
Binary files /dev/null and b/Documentation/etcd-internals/diagrams/consistent_read_workflow.png differ
diff --git a/Documentation/etcd-internals/diagrams/etcd_internal_parts.drawio b/Documentation/etcd-internals/diagrams/etcd_internal_parts.drawio
new file mode 100644
index 00000000000..2521cd62bf3
--- /dev/null
+++ b/Documentation/etcd-internals/diagrams/etcd_internal_parts.drawio
@@ -0,0 +1 @@
+3LxXk+xIlib2a9qMfJg2AAH5CK1FQAMva9Baa/x6Anmruqu6i7vD5YwZjXlvZiA83B3uR5/vOOJvH7o7+TkaS3VIs/ZvEJCef/swf4MgGIXR5+VtuX61QBAO/Gop5ir91Qb+s8Gq7uy3xt+7bVWaLX/quA5Du1bjnxuToe+zZP1TWzTPw/HnbvnQ/vmuY1Rk/9ZgJVH7761ela7lr1YcAf7ZLmRVUf5+ZxD47ZMu+r3zbw1LGaXD8YemD/u3Dz0Pw/rrqjvprH2p9ztdfo3j/m8+/cfC5qxf/zMDKG7j8nsfWwsOP6BgOiU1/QdE/Jpmj9rttx3/ttr1+p0Ew7a2VZ/R/6Aw8LcPlQ/9Sg/tMP/0+Tz/uPe2VDFHaZX987N+6LO3e9W2f+jO4CgKfp72ZZ2HJvuXzmm0lFn62432bF6rhx9KFGetMSzVWg3981k8rOvQ/aED2VbF+8E6jE9r9Nu75FlL9sxNlWvXPu/B39b+m6SB0O/vf9vve8toGX9tNK/Odx3UOFTvLOz+TLb8NsnDzvEd0J3FK/p/j44F/nvVL2vUJ9k767/z5zeWvQvOzj80/cYvPhu6bJ2vp8tvn/4Hhv4mPNfvDTj6d+RX0/FPccR+F7ryD6L4j8boNxUo/jH/P6XkufhNUP4fCM0H+F8Lze/kSa5HdtIfDhxltWbWGCVv+/EQ7c9ciYft6Zgq8T8aoqQp5rdV/yWBv1P+R2Z+18a/EKNHwPIcBwDgv4gLKIz/HUL+zAjwdwP1BzbAxF+w4YP9t7EB/Dc2ePND4qeJLLMofV6VoXj+/h8eqfyf/8ahZ/frnznwbzrzr6rVVWn6DqfmbKnuKP6HxvwoyM/+EOpvCPPOta2Prt6/8+yPKof9F7EFBqG/Y58/sQUk0H9w6g+MAf+SMf9tfIH+1+rxR6r/q7X5S5v4D+8B/GcY9csG/ovR/VA4w/2V6SqSEfr7XmXH/2irZf2vYM0HQ//+Oxl+Zw30F4YLhMG/Y8i/8wb8vfG/njmf/7ztSqM1WtZhzv7XxuvfDNDnQxB5/m/WCv0XtvzuHv8b1APB8b8Df1aPv2AA9FfUx//Bq/96+sP/efpX3U9wRv28kr+ryV966P/NKOEv/EaMYMBfO5l/USfg5+cvmPfbsn/E528f8tdbiBv7xxbTlUvp5gHIfDGQz49mOSXrFM+V+v5hRJoMnld69WQ0ezvQPiV6vvpcYezzRz9J3pUPOH5HVE7Lfl0ThrZPmkOli5wfAbfTcROtL/VtrGKoRItuLatqLZP9Ai1HsVbVVM4wwLILhKfpmXbzlZ1BZFmrt55Oo1hJXFV+TdpxUNrD5qp0z950RrsRhy74fiWlWC5MfPbTb/nSr9ejEBCFI8R99/3DfSrnzlNW7Q7JQuy+/Z+mEv+y5p//NamE2I33pcMPSW+UfzDXMdCiWZgZecOCcEiHVlc5GXr0XDW5jJHkQV+3t5nhgnOm43IiVEIHqYgRrBRsKtfiWAFC3HOTmuiguzhNOJNiJxaxIPZMnm4mR08L/eGLUlfuZ1UXu2jYCjA7OS+UEpC5NW5XuNfKNxIe30kB+qYnEUwdh/PsVgdvmJztQJIlxsgO8i7PovtCPSayDOMiwpaR+eo+HSWpRW/q5uHlnF/BpjIIZvshxve+LxTXY0KO9XE+EQ6bZQhtxIiCNcQP9gyl+KjzK9O260WsGxcTbuYWnzkpMHsJ/nFNyJL8RTTOPkCFQZ0V+2NQdW28RJ6t6jtrBWMzg1Qza7lgWB+m8xjvaUK4wdp8v8QBgoKNTjQRPgTgDlmPCGwaJ4kH0kNohcMXws+E25s+F/k755hV5ordjVwXqRPiuqErc7+hQ2al6qxxT25HCXsUrXlCq8+EyVkim54PT/NkabNZUgMJro9DoQofD5+XkjyQjmmufVWMg1b1o9IzjxamBrVWaoCjHLcycK2Qm32k+JkwKIlGGVihCIeNSlYsGw9Z1bsxYr45brQQRD/55jR6cLpq+CUEPC83UqvlC3/5siwZPsPMncyUm6NBJiR6oaKcrfEs5PmfPPO/ohvk3jJ6tHX5jhmHVOHIrAEN3g3kqR5spr5PdryLTNFh0mK8A4juixOfQk9dxFQ8UgX96w5R+OVQnA/Dc2Pc3mlkJv06BhErPlWAhaEd6rdF3IF2FrhqnsbksOsSdkSVDr/5ISS777nu8sFdDnjJN+xe8sGFD4ud4M8y6bbysUSh/JPCp0MBSVSN9+eW975Hz0ul6/kagtO63jfBRNjirFlhNU2Ap1ST1V+DW9LU3jP9woh7A4zNxJ2XTySoGfP00IP7Khz5NAzxKK+nfk9ZC90CKjx/H5UL9wP+RDclSgQr0IfJdo6OAolMMHvx+gHu+dXuT2rWjyWZTO2ZaH08CCVHdFE5RhxLnB5jM1Hv6Jfc9L3A3xHOYElMKn9NIY7IZMDGb2L5mnP4BEZuyweVuTqUv8w4Gqjiz91spTFD3+g78lWKWyua5xpC3Q9/e4JoYa+u+KS0Hgvge2DskDn/fXuAE10iwMt4DQaY80PcAohytC2W34MrX4cGUZQQyzkG1dr+AaXUMVwQfnfwqRvAsgqKKg/QRmQYW4eCl2lUeud91Q4hLkgTjC0woJotUupWI0HDWsI2eIsPGdC2O/WVt3EI7nIszueyz5/+pA+/lKOZ+Du/GqI2Tg4L8Nv1/qFOoeGSQbwsxlR5i1eiy/F3RBGHVY3Yr7jNo2elOFYTpZw8pKCOGAEFzONenOMr4Y+r5epMCAt8b/BXSiS0jhR3RxNby/s6flomJxVT7Kqb2LdUsBf6EPiAq8MBS0bfnazBD0VCky/zGttfRZ7R7MotmnrJKvXpayelLsj0BoOgo4JK5qPkJrOYFqRfRRY7/fBu6KBntQIb346QeGIWD9ye3b55EdfgzLXG39tevkML8/NOAIYaLLxLBtwlP56zV4RPMjpjXvI3VqwRBNItFQAtPn3qcSVVnOqVEjh9CEcOkKWQr+CHJbqqeKS/NjzZjYpJgJkPvSNfciv7YJ2qv7RaD4elq562kzH7vkTvzzsJgyEnbt+HV+Rmpk8oz2YGd67CEKuwBWj4cI8jd9gta/Jyhzx19nXr+9ubP7CFmgcY5vtlbG0nE9/vtHWvCVsVsCcl+SWDblEidbMKZeTRNzC2bno6IIDG9bpuY7uHY9srVqhI+n4V7k7fCq9qj3gJ1YKm1OKSNx5ukphWzlk78Kh1r9OUOFc2M0yldNo7mFxF9wOFOjRmbaKOQLuR+mubpM51JZWlOtXKq6DJPV/+Xt3CLlnIWhAroPLHe9frN5GoEPOjdZQRYKiQjl/XltLzCXm46NZcFoYDtMcEV6H3Pre0ft6HFGop99v24SufMguLFy/fFLOD7Mfz+37qCAjUX48Tvirls1srBbY1O/krUqpO3nvTw1Qw2tqzBKqu0DoXbZ6wPiHE4JiThcYy5h1+f03ldSM4Rr90CneXf1eM4lnjyzPADdBxWbEHcHHA4aQyc08wlDVCKMzMp8DOFXq8S0wwBX8ztKZEPvFac8Opo92tyg4X4lTv0wapOC+sFmmFL6hzLpb3dOnHdxnC682HDDwLGP4o0s68OmUkDME7rylghKI/0StHbh0x3zCZiPnPncXLvTyf8iXp4FNMnu2IZ69FHx7TXMyXvcS56LFkPlDEXS4tXOlEbOffkHhdqoshFKrX5BxjBsDtwhUYbtlOuVoaTyxOiSkuelpZwhux7KhhEx+Na2e/H2/RkSiiDlerxpr8E36w3HymU9P6Ry/mNwCidTuMJI1+Sa6AHDzZEG5iKBHHiMYLuUxuwRY658122GdU4FfX311hhFSLIhR5bXKB5WsAFiKcu5eWnwmi+eJJUNLH3lJe1A9n9n21wAksWmVMDhfa1FfeWVAMOkLhjOEzq6lbXlTw8WbU9gqQPZnlCjfeqYvzILV8rTe+h1eJqbTxGH3v8rt8VWVzQBbzAOd7ba/13pF8SU/EXp3F4YFXwW2X9O7riWQzsLACpWYfqyuEvKU13d5WFGm0nGuhO3olsH7S1uYlp0yOvMdkSher5OJ3qbu7vjF9ywhfCmjFAh8zclSDAhBHrMnTlelkwQigzxQBrRVexQmuUu5SZ5CjPl85mwxQgfTpGlo5SG+hvGuUIRMrfvIADu90nYQsY2ppa6UF4Y7OOWKhTNIvpSdjYX0il2Y2QURQzQGUZavdEmkk9hpeJ6/pLWp4SU/0rQeM4wSyg5XeV//6cTgNLVpCLy3eYoPuhw12QKfHfLIpXyk1RHSAoiDN8DHRUUaRnUpak4rqD58zel8a24ciNbVDXaXAKz2Zpn4b3hOaMa4XruzGgQ2xWNUy72wdYypiJsK12nMHOEd66FdOJa8XTpOu+whe1fmyvb23VV8BedRsArR91KErHUJ099bAtdxxY+F3SBFNzoU8EQwrxDAuuCpecnw3X2dj1GVfhK5njzzLwAnu2+hzy+FomnFaJEgNHNOVvDWyMO+rvmFN9eWB1mxH9TXY4oGMwLfh3FY/d9LJ2McQ8r0c1TJPJTciWPJ4+eJVtvokNG3s1jr4Aene1xTve0e2al+PqnGWbLq8c/KJuuDyNJmr7J3P3Wj8jZpcB0RXY7qjl7WhfsxFjVOGHJ9vZFUIuPNukM3o+RVM+HF1RKqtHOhH22kFxrvIUT79GjYcrX4Npfshj6jJT1MKYT8RKO3rCZTaKuoQNDO6kusJc3Qo+yF5Sr6bHAexVEWEujpTbPJdNY2byq9tiuBSC0HL1PK2/sJKVcCj1GgHd0jFyxNlFUb6W5f+sZHw9JqEr6r9oqL1pH0cdVjglLQ0jBhqrhJiA2gBI5pt5cB2NcCSOJds4QuVKDHDUl7v6uqG+yZs0ZTAYglq4giU5DI3ZeIqFZAiOk2T4wrIt9BJdO6e68dbJ2vj6PWgT2HXu/sgCufBuckTRxvzHpHe8TrBcbZeAdpaSv8J17jXFlOf46UvwD9OLA/rooPULGMJThE6sNhMq1Nh5g0Ufbbpklw6i0TfETKZgLxj4ULXI3IFPbzpk559MpEQpzYu+jQfc1S1PvaPnPeqNpJh/qCfa75qPRkm/QTr9Zzyo8F78mafD6XsO6iTU7UyEz5ehEopNVFVuXiGKgP5BIyMiK5yK45NB8FXSsKMd5qJM5ufEqnfXI1eqwYerQrKmkKX0MKBrrZgJkq/eSY4ZdvktibRUJ03XzEkX3M987InbgMpqIH0bPv7ceCuQXcgOGxevyx4d5qCICpul+wiI/F3BC37otENp07BFAq6uNOZRMcL4aQKJ8tVPTzqj+NB6IS5yZ5tKqer5kpAgKP/qFI1mce+LEHPJCQYzZD8VcHSWCElwogBqJ1i8MXCFgTn7FiEszKxPzqUfzkERtMlm+o8fNV6w48DJ6sx5sNE2sr43TZ/oAW1lMWmdlKZ7ZbcqOW30mOYy7jQc+woX537K9cdfjxmlrO61H9ViIbnjf4u00AadyDW06MRj2Rf3Zs0Fj+fF2uryZwkf8KS4AHhsUo9JXquj4E3ZWHxKLlSRCSwu8tMwZLvnG/yTuKnjU8eao1kGOXyp/QOi6tACZ4zNn7Gy0uYdUgxJa5AS++AXuVsiPXkYBClRS9TFCvoR1cvTsqDPcQYeoFNKb7u0sHHjfx60cBsZJC4Pi0qSqI1kOabpDIYLGn+K0LCmg/HDfiV/rjg33RVilT9RYJI0nJc3ZQROhDFF4z6LwIMsX8DDPG/BG2Bv8IMgf82xBD5N8SQGtqVof7/V9FAUODvyH+qooH9NxU0/gdg+DxD3o+/TW1NNrTjP+z/+E/UM7K0yH4voQ7zWg7F0Ect+89Wav5V3vuNvP/sowwvEvtD0jpb1+s3mr7U/jMvk23ef8b/ew3wn+XWdx1/ov5fMGQZtjnJ/mcSh/3quEZzka3/k444/tf8fLKzaK32P6/kr9jy21DjFbY/VrY+wN8hjPjHD/YvxUcUgf9OEPA/O/z5Dr/W/duk/8L7f6zy/4VCYv+fhvD/31Ve/vfAe2d5oXr3F3jPmIRWgs+FPPMt8wUp6QuohSNIe9i1S/h9zLfYue2ROi+M1ViFQWAAoNHWCwOtBB2KEkw7AKv7TB3SbEU7PClSpyhzp+km9DVUA11dLun6Y1JGS8RUw0STucN6pOemilZ+MzOBKSHPf/LMVLvfvKB7t6H3PvHmYy9ymOheQpNSL7NFZQVeQzp7PR4dubvg2LjMV9OeKHEgC7IhPUgRoqigWAcRE7zweyL99DmFkmGepo20Rr9N+dxtFxejHGeWKkTxW29es7jq1xigxYUuSqUA0qXd6smfVA6/jzSwxu9kkiX1k0gbR2Lj54HRVw3E6psiJ9xG02p/SFqg8CxO0t/0ZDElUJM6ceqNlvZTt48XYVqKO2FHyUtAilcbhaHInEzEG8+684N7L1wA7qoncCpXrdktaH1nerh+EVqjnhH+IbLz1/q9lrfNnISJUGKtUsOoSgoFbHthO6AbczXXavJQ+i7JQiJ8hrSUxH0g5roB4qoQ5ImQ9sR92Knj0t7rEtw759H3ZoPexAHUCIVwE6xzWyaz2HAiY9P6ajFbxWBDpw4/IRSOifa3EnodFh3hWWfkDPVevDlNDNGwQwX7jLwgC+FT+ZUNs9GPCvTxW8Q2hIwQze+L6gmh993BN2To1DIsxyZTIp07P/Xh5arygiaiBro9u30F2oaQK8SqDBx5S3hsVi9SWHdoQImXcLP0KIVQG0aSb2BVGucgkkhM0mPBeDCT2MzwiV4MPaY8dKa8jd2Nik2pgUFtcauTtu8utPLCMvRI8QCwRxJxO4YwnaDpnlfW0hoK+rgc4U2bRFm7v3oBmuXYBqqi1cAEDFhJSvccBBvL+zrU+QHNsyTrPIF1K5IoQAb8QZoBSe0FyV3fqgdJkSw4xyPbQFTZhpRZkdIXEllJoSrI7SdZc0hFPlzuS8Io06Ak15OefzD+l85Q0t9JkRGpEqcktTDSgh7EQmAtXpbIJyYUxu8SfSnHEnqaNC4Sl8tJLni6yOBGrSIKAihrIumJtVCydF7rCtB98JDZzop4KyiNrJ5M7lXz+yZlaXkX+vGKM2dSqXgRwmdCnBxgoUG5FzPikoxME4bLyVf+qDcDopSEznP65QelZJTIOZQ4ZOG3uwqTM7GuJFvBnJjKYRWHagN8+rZewbfUOpGY46dfMipvkA5AziFIYCX7iMwgjWn0G4j6gcnqFaUk4LG5Yf51Ce6GxL55lfNAyHMqy7pmIL4Kevbk1Uv1u0BsDncs2ptLdYVPIvyE5BNvS2KmPx5/bpydWNvpYYKwfKGgIqoN4xiIbRJmylgVDxC4kE6FO1ClID6kkKjpIfjFufN5Yiow4x9cXxo5Y2U18ZOv4ooPJz+o9hus12+k39HaIxsBi4uSQIrRUWB06LHaIq1HTZVVVTIgnaBso0tVYmonL1Uv7dgJt4VzDiy2lCK2T0yLDjHWHNqqMlULvBxeslEzZFteYnIrGWVxFTXHjkcxayjUuiKbSIoN4ctL7xthC0O8y6+3Tsjxj3Fvpiu0G0sPxbYZIOvFBMR9FCtLb2U1Mj2rcxXO6dJmHlbNQrRGi+x01DrrlY2vMr5+QSSA3bBaprF5nw5NqUmbzbnS+wuFDldl84tJPjk3ImitDUwZ2EKVKFfRY0Zl/pa5ZmgRa2w7cWrRVg9B1pweFwGvwYKKVjKHl+MrY9K61/ixO+TRDm+X3kTRsZE4l33C2ptfBnGB5YhsdGrCKG1w+NJ8sUxmGKaquiRffcGe04k5H49Vy05aRnbacQVlBSi1dlVkE2+V1tZd5gkN3BP0BrmAp1hy8TZUJwZ0S9cy3EDzwKlJ3fhZ6jR1cxY1Ekiig+8GsYukfgpyhvsWlea8RQhb1Cs4simAqObEEWrKgfRzIlsAHkcXKuxumqLPE7/flh754Wh7+xgF7hSv5gT2XWGvWR8l6Yit6QZKny+QGicIbr2D3rcjZbWVmhrwitsKoQL2BYhMAbRtAlHxM70YyXATtgG+0L2fQyep9SRkMd6gKmVfMV7DX0Eo1BYeHN/YM5dHgJzoY0d87UFqlgRCHS9+YMahkon71NZODIjPiLLpNCtmpFl+3fMUxnW4qj4Eenx6ErqSVvfi5bd+a80CTbeJhTQyY90L5ULx/eL+IsQ1X/+Jleyqr/wQWNXrxBQXnz13MX0fHRk39zZ5fd0D6G7P3lAcWp/4ojuiWHWXFDrrWP0p1uqjk5kaoe/P/hdfBy+MxV4Pn0ufJXhLYjn6yoaL61KBSEzv6AOwA+2H9l0aDpPVvEKmyRLzPIa653QKhPie8VI+QNz7NfQJwUFwXDpbcBwM1Uk1r6suinRzo/jSmhjxHm4oAWzbZCR7et+feMFD5kMb7ID3/ThlSYiB/apnXIvNuYc+jDmJ3c+w9GO4vH51sa/SJhH3vxUK3c8p7dULR0gxZry0uvFSQJ6tcvNro+sXjn0NCp5QWyHghRSSrFqoH8qAJfZdscz4A4j6wehwioUkL9Z/B4Zf5gnriJJnutYLSYkv1sS4wFuiqGpVVA/HLbHHBsL4zkQ4iZtYyXPsVzery/DNT8jLTn4O7VQnqVACrm82+IK3Gxhr8/1krH2Gz/CrX9V2uesZTPFxZZmHuK48e/Kqnx5ViZ0yBTN6kG3jzmobhN0ZYqQd1zGUv2ZHqY+1V/hMRV9zJX54PQnc8wknOF7H/b06Tzlh4cE6+3bbH4eCAvZ+GgrLAB1lpa4E4I15BcD4Es8KWkbDPR9+DRo+TkTqhhO2cE9QgB8OPeXZqx4g2ZUzRk6MM8ZNGkV24/EDuFQkwT5BPauO8G3VL6lCOOz6pbPeOgbXfcCyWqE47CRtuX9V6DMQVfxlzT6zpqWrV2dwP7+w30WGw1uxu3iivNdeuxI4ADMeWiD2MUBxPq4L0XfH8tnXxlyiZHUvAgpe3NBiwOOV/yyUkOOnBjJ/kfj6EOy7oxpbvC1G4oCBMKz9AVx2bixcjnJxKMGyfFd7/KeqnSMvHpTVEZprd6Nji7LD25slQPqvpW9y+B1vGk6g7b3v+Gn22PtlT5/AGXM3CHMesXEVNN/eRiBHmWDGarBQUcr/vmF7S1sOIUVoW0feFIbri+euRyPoB4IwyhCE15rLNP6GCUIW5PuLE7Eo0F6lC3IAJyzAiznX97Uzcv3iYqNBpJ7sJq4rQCvSJgZI2r4vd+AcQauDfmLvLer61UsVoa7e6my6ZfDPhjjBo0adF+HPVX+d/iB7szJflTmN20oJ5fNW6N5zCx0PYFN/IcNg573mGHqEDJ33+FgKnGoPq9+oPiTmWNCmKK99Ys2nBVl7K/kofTrCfb/ghPEB3toGrjRmInC4s58tHEcu9rrXrTxfDqSoO7oZOu9jtvuYhhPz4yhjocw2HXy8GJctCnEiVUyEzdbbj+VUsjW245+InYidMDXcXNuhdFvS7V7Xj7Oh74mYBCbAC0U/2AeqP93+8LzQsG1/rQT1iw4f5U028u21AahhND8LNcbyXPTRPJc8OLHeQF6WOrvpEF7PYfBrO7UDCxjpiNOfKI+w7gaK1JJYGcUglF+zM/I7KXLjO1+yAWpCIK8AmFT3kAT4Fwir+2nhPTW/xxhAo7sxyacm4hccTj0dUEi6xL4EnxgJeCsu0A666Ct0Hx1tP1j4RHkvT5MRQ39uJBCvcWzru8rREf4YLILwj8y+BQoFd3MmfufIc4Mg+gIN/TE6KPkKm4TiFKXYmIL8tF/my4YdvQwpKdYK0sWTFwivAytyld1Lw2DSzw0PGql4Yld2BFkHtKNH9E0zYtPE3s1VbM12xxi58J38ovCCGWjNFvkbqTMH9soLkGK/dvk4osN+bgO/ze+pCQSPKvkliJ3jSrE67GvPl3Z84lySpLzQLaWwATWS/snXRZLkhehLsj8Y6k+Xn/b3XJ1VFqTwXrFvPs9axZP4/rOfsxXk8Yf3kvPr0tUYy2neiezgI7Whh9wRTwAQE5zR2y8NR5crAY8FVb3T9thCypAnqtBC4Nj/7dbMCet9uSY82KY8W2Q8uMS9imYMUAWeuQedg77vY88FAguvRKFAnz5Hyi+E2JaaQ1NM/NxbZIJNpeFDrGFZpMnCEKQmrEfLZIN/zJl0ZmdY0pAK5qFX+J5+0kfvklvpiCu88CeXbhDlJi/lFq/HFzdhBd6PwwECv1if8fXvc/9hfjb0tTrp2mc97R5X1BXyARp40p76X0J8QtLf+//++/taQr8cwxqoIsEEEmbYFQh51sE1MSS1yg+tiDbj20652WdfeGUUwx/nwX+NH7+RX7Yx17KRB7Zxr40x9Jos1f4iBv1rBr2lxox3F88xx4eKWwI9K/3DTn52w7tw5AWow7dw6J2BybfrDzc9YjNq9VbtY//zHct/uSNd/WGfzPnbnd079CUmhsB3NsBoX1q576gh/nxxBfp1V9fX2qR59vwz4/ePszHH/nJNb8w2gbQr8ingZ02/qIErn39w+R1VH3vGjI9sUWvom6VRTHLYkeEjn49M/zos+uR7KZWQrYn4e3TdUsiLdnrlG9ZYJKyjx6bfdDDvIvyr3E+Vnl8euUy2CX0pygRT3EJbQSVctM0aoaZ+OKDn/V9Fuc8bGFJ5tntY/JppA9md9wSTv2KjZL857OxM2TyhXW/kb71fcGFr5LloeYS4O9mPTcTm66vo4fts9VrFb2Wd3a7o1nRxbYfmbrUVo9rd8Gpree6SiBgIPf+utbMO3zaNaTpyoZtHubYRBpqsJWHmpVazJ3EH9HEyqgnA94jU6EyRJl2XYfOYkJCNG1YxQe9ROAeRMunLF8XId+soU24st9qjAmFF+kI3ZU3cvnEl5ijMFa22QvAZz4KH5qPEKBqkPc6UREqiVQAN08Noly4GJlRt2KS2HRCC1BIfIBnHACLuBY1I+TZxyXYtYQm75Uht/T32QUVJQzQ2B9ZeTJeBZdYnTV1nmbPJAn9lQ4tHu2XU8OoKVxjnINkYsuv1xdtf7028qWGnf3l/vIRNwOtERnyeX3VDPr/hcC4+i5UR9kXCM3Iun6tRcq/G69HF0zH5Z/9ruO5HeF/lrvjsMYA3zfIY1EWP4M/UYCG++0le0eCpAOghut2Zmpc+UBlEZln8hFLRZBq1vEuSRpuNouUaIXOiGPFn2Sa1eo4SaBoruzuhg83v+TubjECzuNgdbuS1G7aI5lLNSM1mfIzJ1TLfTpjZ5vgJ58ero6svfptJHAJ0pMkW555VeEwKqpVuKIbgk/gbrh6PoGYpOyV/aV0/mSdWVrfiOt3rcVQfwY1ElbBWQAz977Uvyas+1EaOO99iRfglwjv4JKY4Qswx1ZOavWADVwvmDSB2ikD4NvpKp44DLD+Sh7RLxYH38HioXvdOHIHMTvxu/ZNc8iagWjc9nS6qxcrWRcFhy4KkTCBkmuUuYqPKqRCgTFZUt0j9ppVNfVWvt6OLRG2/BOvU4tmpupfYngGAw5vW3C7BQB8BAloZcMZW4T5BkiDLpg7szZyOat+G8/Fzl0ZGTHYolwOdJypsubjk/XpSRt6hoIWez7QDt2/NxJ3iiFlt09M3/T5ZH+qCXsaVfEckH/MNhL+e7Ahaj2NUcQTFNfLy3EwYIUcW6ZYy4p220usObuPwy6Hos6wgaJJfMaxX9Ot+S+xqWQdgPfPd30mFrhbCE6Bb984jLxwTnfA0U1PZmaIaFkgPx53vBJXXFIm2E7E8FiL7rcZh0p+4AXBC14KPfTknPnRQLqfmXN3ca6wrXwBGM/b2aYBBJ0veYN+qUBUewFNCjfCaGbo+ASmvTf9UHDZWustzEu+3NeAAn6/f+0nds96O5QAoCAq3PkihFf4kDNKnM/dyKRBmma94KXMbUZwervpR4lFH+MaLYPl4M5KmT1SFROgu1wIuU23uEth9FGkt1RvfzVmiaMGCWsdrrnfAN5+oRd+jrFDlmNEX2pUzKek9u9nKH9SbUwp1Ej40OojH42yyuU+iKreCVi6T2q6+58U0n8AEOFdMlg392E21PKh5GAQS1lTVlgAFoWXizs83oguOtlfZo6J7plO2BV9/6vSh+ZkpWzE0W3ysXtPmnhxFyDffQxwXZKSmQ/kkhCkprdRqkS9x1ipQp5ov+Q0wraPu5ueOKWj7WHmZFdUejc4AF8pmpswb3KvmSYRb0NWhYgW0NtVSYeJlX20i1LnbkU7VIjLoH2ylP+EASY5Q4DkOaTD+Q9SKVE4WOz6J853x8p7ySghfkvAN2+rjq5/L/7ryRiasxbgDU5qlj4EOlPSRdE627u4FyX8/ookMhEivqkQ1alxkXHex2DynfH1TA8pd5VH0qYx6p6nO9kgKOk+NRH4QmKDK8NW3ifwi8Jr5oifoeLPhC0YBCZB1rvrkdp/mdMgPycSVoeNdsGqEcIYtZRNflJbfwgLAiFwRMgerSqpe6pV+t7Yzjr7t8aH1PbkBU0C0ve1DUxkLHzilqxluxjj7nLfBuTxwNDnXVo6tAYf3uNvoUnRQyBwXwUByHnKuafB35bW41df3c1avDQbiZuqCp+FeyC4TEZ35jJ7X6dLCaTGIrh8opxT+HA7O9Iav+9ld9AmQvJrqhcBouiqItxWdL8gLVJP0FHhpPUT9wT6gqXMQy50aO/Bs2feUsMAhjspgukFRXU5oEBiiYYYh4NhXrCk1M3TGyiOwJnblEJhjtWkrMNDfkGGILzqlXZhSjLdQ+jbdardGo4n0g7Cp0RdtJyJQOn16LJOvTKlzaXYUqRtxe7Ck7FPabie3E4llgE7dC/gd+6dzZvxo9U28zTOobO2ID5FG1h/mBbnsUCk4J7y9NIgSBO7M53rK5zUGG8oo+cLG2wCkrYvZfnKglVJcI74HMg1eIRDwUnQj3bSEJ4TgbtpXhikuuEggey7xGLiBGnpkshXUuydFDeOFvlELTESKv2PU27xHXkpI8rppr5xKXT+ZYIU2WJalSagvcoTJIg/WJe9eoPWmq2NNNUf+LQu9pdlCLcTvQtZo112UuFG57mUS6ZRvPlTFNe1KTLB/RZRapouHvalOzmUqCXVCV4GeX7y8YPhEOj6ZNMNXAhRzzKd9HYZ7DX/dvYcfd6GnHJ/zrnAIhf0xWnqSC3Hj3UyoHqumTiOdDYrw9aVREbRkgo8+y8/xJdRIdD0KOIT4CglpIdo5LaT9/Q41BIqHW3dF0uVVSvJ4b3al/cHVz8oUVQ/egCk+sszHlllK4nvsdaJ4FfCWgnd5jX1dihpJY7qom2nzhF2relJ81EfiGNMx9zb2SJmmgegkPNB6D8ZP5iVXaBgFSG+jgGGAxqriuPXGRZCkgEDP3u61OxTpfJA1syrDbzNo0c6P+o0wh8DaD/2iZ/bqfVK1yuRBUgt6aiYPccpIfgQqnhWPYwFuOQhDXNei6hLVMJnHe8j2a0/vC/MHAYNSNXyhZG5eFNsqIReyZUSV8MFpnuA01ntV2OrGk3VxzhH26LZ1I4uKMJCXU+PNHB9O5GZSFDiT3B+Pctpn82a4sItSETIZIV1eMba8Qd+tmf6TUa4mo1wIlbQ9nyqf2ZyjyvYTdjnseLYog+6/PHhZPIy+iIPBTNdKM9kO4MRqrUnrpDYS6QkVyC5oy64vcDq6zZ0Xgs3ALJfzJFKEjbLJGR4W8yIPXiXNx2SDK5kecyDaMgW0rM1lvjUjD1kwpb1g6QeNeI3gT2AvNzvkfL93TZcLmZ7DnJbNgPB1kXjfVCGphPbwb0nIvSkQuILn2EcGZnVRZ64Boq8tDsXX3Vb4NqaIoZILKm1+0NcRGeDoZX1KL0+61Xr9Juu3OZuT7rqgw8h5cDlSIGq0ilU/GN6KcPcdWiAfBP41cVJGDtC3Y180sqarQ6bMltUQnSb2VI0gD07DF+s5/LY6M0EKVt8cu4/vGgdcQHKJjZz79VquIWYZlH1U/lzySR1SdkLDuI6qaZA6kw0U4Jp1KUraqyI2hKM1uChcH2e6YPNxO4uEatXlJ3+2NAw7x8aQXWSW5bJmeFdctip0VkAZlzhLbqGlNuNLar3qMUHV6n5yl1VxdFpeACyw7TS5v6qrElyiIfO2PEmKvEQUdD9S0wuf8OsZc3OUVCNLSZJ1VLAaHB8knSXFct11a9kcDtc86aJwBshNEW7DaIMZh5XuKdbE2C9MhyDT6DMV4/cSXq93R4hrA+UEu0XUPmirHWweVJqT6A7OUU/TMqtB83iOtbkBCcsCfFnnDHZ6QlEhuFDD+clRM9PXkI+bC86NWMowgD1kLbdDKcD3iDVK5jMGZcumEQNmZHFCvOaLo7d8r6kgFWwbyFNnPS0mcXKDr/JFHxALV34eUnzBV7+lLQIVeeGSFp4WYbMkXeZiScnxXG/K6oR8S3CR6Td5Es0207zyIPivrINfD4tI4z1GP6/RVoBKHHDy5Te3XsSJ2XJlX5UH6rmuth2Vw7vrIBPgBKBVN6gG8wRmNd9l+ucyaOnoY3FpQzcSdkCd4tYFa94U4XRQpF0ySTd411vxAPotj7grfeFJqD1nYBC5zNUJf2twoh81r+JvIxZgcaxDnXBWMK5NIHFlYQ2ilMrzJB9+DOfOCN89SkfhBpvDa5jo1MFd5gRGwmWLsXxs0JWwtF7VKLcXI2CyoDnR19dcxtMXT0CfIt1tvsrqpLnj1DKlO3U9voGqkqEG4M7t1iFhRyXHx6xlR62P3OUDUhshF1yGhIM0dLihErob3tmucX6460mO41J1PA+5MV6SO5HOudQ0YtO4xZr3ZcZOH3H7nDKf7hsABl84vPBUV25jZ7/gBqdc95s3foOGOH0xwOu86+kFGQxYGDtGRt/YOkncCMbEILYWTXlUfDItFlA0ISk4sfQADuTA/PTn19HmsS63XBspVW0D0Qq8CGTfc1VtmPr3ibmfjKWeBrHkGCDB+Cd9VX6ynV//0So1Un4QuzBp8PAdihlQY/+Uj9CvgISBK4MDPuE9y2q0O3ivm4NmOhyUjP4wO0iORiMIvVPzh9IfGxMYBrOVsP0E7DLWcpWsZebgn9H3lzFt38PADkKW/bdMNq98VCqE7ZpYmycQnyFoD8hLmLZhcgqU9yfMr2QC1237APNfUZa7d2i7JHiGSx//rZVR3+RNLSWucPUFZpOYlkFJcc719sRog2TQTdToPI/w52hCLiBxkgjaQriuNVU/eesShRxoXJamXst4WnUWmMVu8Dwm5MZH9SjUHkt/iuNh8z+C0mwLliDB+dYSv/vdGy41Zq+P+NYsItsaN70kilvE941v2BmslMVBy2Uub3Ceq3xKH4HDBYM6rVI08HuP/GPWdvYIXGFLDxVx1cAqgLcs0nt5nqhbGbHLB2JXVUjfY+GZkfen/bLuwPtu5QE6e5KCajaAUV9DuLKecOcetAMQmppqx4HkEBCxaQA2oe16cXVzmmA3vG8YsDXdOcabIArQmKw1SzVDc8wJMbWd8pLeCvjP66LrUvF3WsN5yfrMmQmxM79a46W6uPx83AgpT3anIG8Ea22OQXIChHvlEjHXQZNwL8RrvYw9pFa6DHTH9JYN3prLt3oRNhHhODd8q5uDfsOgjvZrsL0QWz4S7cHR50zlr8/hgSHO0B7zAOp+H/GtZadCUDlB7M7J5pGAhqt9xSP7eVTrwMHZHJCvHfixxMe1g7RRV76il+ecAd9935cF0E6QYxkCI3knyPbzxCRDa5lW4KfjJLxwXjuCoDz66jqu9Q0qx7UGsvaF+SW+CcW9W3hV3sfL8IahiI/vAz/P6Eqq8OlmhYGOfBNOxO7RcN3q3nWsyKFvCK2twd/Whn43aUp4ZGH6DmJUKvl5x/Y6vkhAMWSc/UgPHdMm8MZU3konAJOl1lY0MWdTCM7QkQ5vr/gqvEtbM7vTUWS7q2yhABqBYeAn2JMPNgiIXvzw/RQHc62Ijyta/IFKbUDA68NkjfY4k66P+2JuXNJDn2CEfSVPcm8v53vN/ZYNzLbYSbgyjkts9JZQ1eFMSDC0ik9uBCXg0rKVaq9FsUTtcLNctw0cBxGsRD6etyxqd7byE0h4rBRMGHCEy1sMxRIfg1Hd80ufOfJOQE4345jXfWCfw35zQU/ZnhDgCNX3+6G487hkfwD6CHS6131gdkF5fDNdIvRVh51uWkz9DJTZ01N9A6CcjZATIDiSStf5ChBnBaI5Il+oCp80If0esFpfc9/cear2FgP5sNimLHJqu2VRDs6LgTnGfMQRonnDMXkOL2tbI4KYGK+FyDBYxgMg55RD5i0KO42LUxxYc1IwHl1piuudxqIBHnsUx2YEffzrXgyn4+FMA0Hgc3PgXMcMj2ltyWUtim4NxgU7ABuOhOYo8+zBexL9tyQVM8U97zQR56lQPrbLSV/Y2GB7W4ky7kSjmWU7JqO6HDC7yHmRbADL2bAyJRIkojmn+EaOFSscodN4TaD781glqXmIiXUeo/s8P8Ab2Bq7ZIuXa0QNUnL9NZaEP4TX+Yl4pzlgbIgROND8J2qZCEwsyAwEANO5OhIPZja8+S8VcgkULuzN77RzTbCiBwoEgseQ5ys4z3Kayg2BnPM4l9fKIne2b9W0TATij6s09JF3TUBxuqv3xsNkbvRmlm1621kObR1PHMrr0bY5cZDv6lryaDOsj0EkgyBTmaoWsyN7GLkIjAkQwVd4bQQMMJewk+Fs8NiCF9E3hJDbhrV2rgpolAozXaAmNDPKHjOyxAwFbBcfZPNWmRUgD2QbpcIeEQfKk0skrY8uEhM/85laN1Fex3MOQzX9UIvguBoQe+5h8CjouoG0obsmzHc8d/weQoC/rnL0GFwhCL59/xnQW3nruUTPlFqacTT6fiGBtJ7XCr6slR15BfKvKiTY8K3X2JGY3BERqPP2iR+7c1RQ8MxNrR6tkpN0b1oSGrZyF+VD6X3InBhvDJS/UDP9PFenI63w2oka996j6gqgI9NggZUYC809sBJErBAZPdF1JE4XhsFPVJw6AgznroWtVJoWaJ4TeVxUZ9nli7ASR7L3cUp4HGFkVijZgqVH1DVBdRS4fvuqbr7guFJrBLEI/RtSUKBKoz5NFSt71ET5nmDgmDHF0PWNCrBSirUGPXqJez1nafc9kpGY/+nHeTMESMelWq2+j4Q35McsFW0Iis+FxPHpvccWiMXIZqc2bB3IuTMDDt0wqHljdeDzEbh+01HPgwFhFNoDymsCklACAq11lR7TZLjNZ17aF+oy+zjmkEz7eZId9hmCOOpYUbKtAdpbItFellO4qoC30OyLIv14kLG9UB10+Y5/nzHdMuVYO9834QxGHadMBHicUOJ46yKkZOMz34MLu+4xktLa6G8OCr0OyU9LVaj8WRZa8kBoNw2t+dJKItoT4ihQycS7L1jo1SsZSPxQ3zF6BkG8J8nzCavcg9F4lOCGBaoVM6S7b5XxC9BZwjtjeXgaBvQtEfQ0rB6PszQRIdi4G0b3vO3fKTdeKEHHLd8zdJSYf6Q5zt5SCPZmjXlb/hy78hs2aYukqrTXKw7BzeyGXif4UtCM7HSyUTkxnn2wi4zanyfG5Vwvsu199JbLtTy2v5wmfsGv+B5PO+biIrSZzPNMkRLd1TSkv27ske3doCdly6mBzDoNFY3NpAYDCc+vymBTrniaOKTEOFn34zYktQetdKMOgHcAJuKDGhN6hL2IdmP4NM8aD0aVeT5KcS0tFQAQOFKdTTf17spq84u8cMSgAANgD0ggorGly7E0znnVvOcwXAGl39ypdQWhhHVQ68fiRbYHLs/GwtlLQtjXsnzdVSbUw5LkjICjMfdBCPDJJG06z/D8ixshYRvB+7wVFx9T5iBPgjZmCHytxFuWGYBP23zesw2FTMdGGwkPSWvmKGWPOl3Vv8y64HJwsqGIiZywlyUar8X3EFSu4CiuCgq2ADNFMT3xjY3v9TgB32KGWMC7q6mzF9Cp7USc7s/lcWXqDxgY0sOvIwlc+3O+E63gUKFIAITpBTt6bPCunEmzxaTg+G4tXHOZGw7qdYSbmxLK8UL6++N35t5F2ufhVZ7PuA+pqAtH9UUfmpbVuoHCq1bJyWgzN8SmC4m+aqocKgjPP4dt5Ka5Wxs7xlZ9tHg2TZ2P8zwoJi68AMc8vcpH1fsD80nguyDdkTSBSS/y4LWz8sTlmwD9fHmPYkPz8KT05RmeP8+LBEK1cUwCFxqvDKJRnLl/Lbrf38PXcZNYf8hhr3emrevaNXD2nsQgDsz4QM76SuaTqIe1hGufi3uDAwaYlJByw1TDt+yGCqj8qhxTFS/H9mVhDfjHbGcCgaHFlnG3nSeHQKvMx4rSqPA6SplQQUBcyQW3kOL5Q+UZeIccboU/g4B1iPHzTRphSTWo+d2svNMMVgCWCJfTIhH4iw1RXF8wpLAuHmu7bYPVh2KqUQHazn9trYJjTp/VBNWJVlbSXL2xJ0MmpM6dC/7XMZPusULzYSP7+/U2XFj3J6fVEQUL+E4JX95TCePLVoGzxhi83pJygrJbNDUSkFTGdvTE0GGLvpJSn/DQrFrRKyCIf2Z56Is3jpB/viij1UP4sffFIA9K8TXyJ42wiXsQ44O4wgkq5G/PXl0qMD0f3p2SpTnwFr85YkZYrxMjSj02wxIBvc0uSKhQ6LENCWGFQb7SsUmNljMiYYsxy/0+rpFfgIobhl0vhKL54tFzm+weZuElIQ9+6yZzmX221deCcfD7XNDjqMVU9adW15zpPXU6TY0fSCefPlkQa5TUlRNqeLyJuSmLJYKeTMt9ho8QRRqEwC6w5z4Lh284Fmz2263uVTdugpj++htV4FxMqaqyN3a+mm1KoMjdXiOQRr9S1rc2ACeKxteujwY7Cn0ZLUrDewqMxPKx+OwahVHDsKKFal9xAwqCnWHWePsG21JC5FvTZ4LwBx577fTR2ofG0AxhZuNj0OAjji/n47PBWXft+1RJmucf7PFdvKAReIL8XzRd1dKsSLN9mv8el8sGGm3cucPdnac/1LfnTMTESOy2InPlWmm1jY71QZ3LEXMdph9yu04UDoKqO3RjKzBYOk4hn5/pwxskAHjQIBficQi3cw6GNr55YoyWE+EfigEJc00xCmR6ctjVxs8IN4KfcsNXJVjuC6L/j3GWQkgrjjjyiIm5fycwzwfnvSECwCtXjsB3tD3lVa6wf9yDhpzuxSkco03pP4GftkGAw9dI6CTR8TWl1k+m7m2kfn4M9n3s3ud10GeR5aJ5Dpkvt376A8rlXyPjnxJ8z1ydeR4t2t4ajLH9nQRzuzMMvyS8r1LXT0Q3NMvMgHkBbY4gRpmFTqnHXXp+tc3ypij2PODq44VI7/FUCC/XzBffiZWMPxgD+AEJUsFw6nnRKm968LMnuZarQRqj9X5osMzoUNJ2WZJPEGN3lcu3c+n1IG3RFmPi25hTII0iMR/TKIFyddUOfQjF6L+pl3+92a9oH9oOO1cITN9w+Dn24Lg8xv58rspNhZ59oy+PYh3BJy3BQjNtzR/AAmVp9OqsyuSKEl/VMG4n+hqDIv7XpPpQSkEuVN3OhmSjRhZZn78GjjPPO2iJY/MM7eACcoEKxRAUFYzlJ9NIvaSkqesoAGCxBwTm5xtFbVHEdBSr7UKzKf5cFj5KU6SfzA5uG57gr1+K0pdsPiS2NPDgHM1JwJQzLOhL8iJBZGCf958y5SviE67e5/NKAxU0Oievwj5+a55zH2qSMtJRqPNJWDeb05uTt2G+kXWnXzddnOJVC9uJkwbSJCelNr6qbAh0IdmMliI5nvEJhNpAAQvH6idVnjGp9jtPES0qp23KrEi10MJuAmNC0UDMO69VgkEqdmtyql5dLZtbWCZPC34J6WMwWEeFClJTbeg5xbLDWtCETscuj7207Dz8YUlCuNIT7SMq0ms2FwrajZwhVXYBI7a5OVT5Md2dP2Sjxr5rm7sFWzP1viv+4mDko4nxkkHH4bwqaZF8t5waRwbO+TWPl09MpSA2BMqX8Fd0BWEbV0NkC0o2XRzWSZJRcgCWQp+GyHgfskn8VXRHfVaqi9IFpNI5xK/6mJpNFCgFOMsFnb5Mr1QLe79p7nn5KmA4vP65+u57kdQvOQMMnruKKB8r2fMPBDGTln3WU611/OmdCUpIE/owOOCQmYYe4DP1XXfsrUQ/dx0pBPkBjdjMVRGOo5WhzuEe/7XijwVeVooALiSM/pqwxKkgi60/fMfd8MLCiV+XQQvzPdNY2s9shgj8QNH0BWTJyOhfIybzI08qBuHc8C7yRiANaSETZLUc40duGkY9IdLiqJAbyPE86e+voUvdwkLVtOZziNbQafxHCGYh6T7qnxg9j0NP4lTFu7+29gpN0eqb+JfTC/jwUahHzfNC+71HCYIueEDjsu6eINATZ+vRs9Ed9MX25tc3EmWHfxttWBGDUNGo83UvllfXgRL9Syqe8uEwB+laFoljxGOzzP9i1yQpl84Om3zVp9sPVQ8IEE+fp3nOSCJ/kbv7UHJZHQSWoighV6+ePZA48UJOqe7paaNGgV65ADqlwWYjS9+/mTHF/qNJN/yFkaAM9mMpp+hIRIOZcv7izDb/XQa5Y9UatnAFFZULrCSXEpufc8M3xe1HPd6/Q1euScHS2EnO5y/eklA7sLywj4JaWarpcozSKC4HX3SQNGQg9i6kWYIpoAsZMZJ0As8QcZyyj2bT8xzGP2EQEu0+H4jqf2w+aF3hi+kMTHLy6+ZLkGd9mGXbdmJkkvTdEqjtdPfY03lCeyZlsAbFG7Q1LHkZGOw2r/gENif21G1sIfgdSM1BLwn0O/jint51twIpgzFCGmnQBRQkmwsy67oFIj58SbWcKDSF86G5YQl4vv+iUEJwBpvYrA7Fx7Xu+nQYx1Fk2L+gz6BwBnzg1js3rrdDpb4Y6Y0XYdA7mdOIM2Iw5s6tjAs+pS3G9ws0pPCGPf9xmAkrRLvUQEuy4+kAflUI4fCaLb9Wn939a9hokChxNKNGraabvu6Y92oVWzDSZ12sYfaoIiEEHUSCv21T3NeNKlUSF2syEEysnj7MK6NExFPxiM9BhPBEbJM7oEQhs4jRpiXWVz7vatGGcdyf1T/YfNL0BY+Y5DmsaxRO8gzM9ps9opz+YHU39p2S2bzajbyH6nC+JFW9yTlxrOl7AAunEK9qKmLXRYM0iK3GTI6bPyv2fDrkA/J80fkylsKzsPvBSeQy2cfPq+5l7upkB0Yhn76Ye14MNT37zR8tIUmMzosAwSxo7WaBMk2o46yui3TxUY9hD3JJNUrHuQOx2TJvFPPolcZ10nVrwBAG++otCHCG2LD6m7cLXV8ZBLaRMQA5E4ZwOzT5GUUuAMwqiu8X5GqwvzzFJbaAQx27na9bMafrT/Rx6nsIIPfknbYJmm2YGuS2gyRWi3+Rlxe7SvXUVxih7UZ/wzqxRb+eAbfE5SyQt7X7EcwwAjMiZJZbnOsSx89P3PKxJfHQJutxw8BHPHTTc58zThSHVgs9/G0J4ZvPSVhZ8d1x3DXkLCeQc5gSVg+giSuJWDdqtiNnbqpBBUHRCCa2jy2YwZTGeh0KoCAgqINnVb5nZatsw0iQjIn46XXUD9hTSsly34Gkqp009zUd7saE0fuwov6c7e25blNraU3s8nbmMipZ6PeM1KyK24Q4PJAPOsbKXA5LOTCrGbCDNHzoFX+p0WvseR8vEm8SId7Z3AhQ1T8Nl2YRxwWnwNVh+grxeWOkz2E/eMFM0rACzvlS9roOX3Wru7grlmTPcDpy+WNyz88OQ3zPqrcqM38LU2Y5NfzVBj/l9INKa+9eVSWl0LI6nr8dHfF6hQX2A0K9bVW1Bqocka03SDljvqQVdOqIjkMT0Q9KBIgG5RMYK8iHNhw3cmEiTj85+X5s1+gQ+in+8i/4bIOJY5LeUildpE+0L9xsfweJ467yafEQyvkenHqYYbxX5selIAc3i3rIK/7GuXDxunvOaUaCSmHw0rZv8x1PVlInDw71Jr32j8Bv164seCC4JdhuyRBzkjRRExduWJoEMBnUSHs4gEDLmt5/bI2s5iWIW5Eg6eBCbSS7yqYufSWdB0eM/cLSsCPGwZ4GxrqPDY0Nt91U7fuea4vQeEsEHKjp/h6MwbLcwHUJ/zePzqskunEklO+rm7qEmQnLSR0KjQ57UpaFRMERvLMF4NGoFnq2vgdJESFH1aHNMPkmI+JtgCJ5F2Fuog25MvX1/RQKcFUxmS9G+DbNVEwspuJOiLBbJIXO5/2PoTVQgRZScJTfkJd8a5psO25RWNmZHKyP4juKT2/xbIfdWw87eHBeA2kutA+qrZ/DywReyvUEm1xzhX1BTE+V37n3QcAkOmyIBQ9j4VKxOUG15cKzR63wqWMm7e3vO3v+AQUhWrpSVwyXOYIq7jjf+UK7xe5fv670wm+RK7LwPvk3HuvCDQ13X++Gn2o5sfdPz7F+uOyzIK6zlWVhTx+aof455dpPTe98EM8q8follQtrTaMhKv6VqLJmiQcn05jGC9Q1FMkASiUoTdgQZMGZuD8sukp6/4NIE/6ABRHM82H3kUmFv3oXMcFH4xLSh9ELBmW46F9wD86CXGkUH09EzxNGG7ZPr1MOduiSRbrXwJSgrKgmMA6RRdAvwCROgiL7XDeFFt1e9EodOIhDcWfkPd1Klq1gzpfaa/rFbmx1ah8Zc52F4HDWray2FyB1ERfy5Hx1PTdxjMxi3k9/6XXkJrcPjNv9JM+EFqk4kf5np8reOLT9tlejll/Hmki2ObVJEdMMwAONOQncHXYIimk9TWduDpvP7E1H4oq4Z4Tw37KpBccSHuepQPjBs/jNP4HdaP61SHpElQx32XH1d6LHN+jZIyPgI34fg5GNjGCCbAMI+NBpdSbh2itWhvc4xl6SSCE3CvKThzMUTF6qnbApGO+brP15da9YzmNhACvuDVBSFkXwhSPGmnyRdR4FUodcBucLWU3rdqL7woMHNaISPqz2n7QGtCx5XdLV5lyUysTeMNC7tAKiWah6/qwa+Wmk6g1Okfse90zoI0uPUdvocJqCTG3g4V6Nn4GVYo9SB1hyDGh9U1FzY9e37qDIVClZrL8HROn63kKUcT/ua7Rx0KP193QYatkBoP5mnvaSVUFCPS1HQWjW5W4lC66XbTL2epUwSu9vOv6WGEYTynm1Mr0iuXRapsff/nacBabwG6ECMfoE3+i5o2Goqo6oj8elnfx7PYJbceRSS75IY0dJmTWpJ/PWxV8b10Tz+BsAlzHH+S0EpCkfnOMyFphkt/E4dkMPR3CxVr8UNuQ8DFmYIOuMIaVRzASztUwvOREBSfEenhR1fj9GXJfMuZiCcsJharJU9qKfJw4fwBGPI3gK47u8ZPxp9aSpyTFATMP4iKLOBRdGkg/1WVbUFh9etR+Sex2Pwp89lmakjCcdz2+MVH0wbmSynIxpOAkA1Gj5AtsUVMSVXdaNZfWSfq/EPTUq8UrQhYdMqC8PqdLP8va0jn6mpWoMSGeZVUYPmVWCtdtqkPI7WznKOcRa7xdaCdS4mzUdCuT3aqoJ/IILlbFXfHcl6ZqKEkuR7VW/DrCcqPxeXMJK5i0KiGTmCeIsBQighAteSJ25yDMi+j6CZkwfHVgc3NmcfXIDjPb9dBksl+oY3H/z1twbjgeJUyhEjlP427r9/bUWOgzLr/DOuIJ/NFnlBOpgIvhzD8hfQoyAes1kaB9DjUHV2Av8ffmx1e/39U5vrH1c8/LpxK5rMNT/NqHoEer5dIp9iB0khIgAu8ZDPZzwN+a/yD/RE0si7kOx33BU8VIfBob3CQ7e+YVpXOGjAACnYfylO3iY4Fh6BPFT94cWfzo/1vyjQNyghicdhU+9WYCWuLFuJtbuLkP3N+L9JtbcqocaIrk6Ri+/7H0+n5neb+KDWmnouhjaLkhSfiKWK3XuDVGGHO1We55r8Cc3v8xwf3R7mHC4ahwJFb4crMuxBQlRQBgQ7AuBmBuyuhvFT0ZNMaT8Je8LaaXCEi+mNrWVUKbd0PmCFC3t9aL01FQfGBK57DGWqiW+fjXcHm5F7J3amKaWAqOKBqhLFjVuXixp8X+hFnayeQOe3RSV8g9rGDkHz1/99TKO6qqr99Ms+88WtSJbxsusQv2akbT2UlFUihwpadfJ7m3D2gQVVP4yynjpEp3nEIIRCy+C8/bidHgfXA2wzz1UAmQJ/ReCKd1Pnx/dM8dLwa9t2C3BO2Fl2G+Q7/+mRoKH0/GpCdKqONlrvz/SPpWLI0z2WxgQdJV4OaKCO7l25PtJN6DopxXkz+7QvI9+rgVam6Xf00ODuVt7pTpxaveHiFKEcJYkooJQHoWoCs6y5yoh+6q67sJ5Oq5I4pTHD/2OH5ljYvIvLd39SA+pmOcuGYGl8aJYWYaiTDCIonUDs+JPsLzelZBB0GfptYjbWcVkMQxgyTCjwlFxL3g3pMhJzIf3lA94BLvMf1BdWyn3gstaNS04Tl1ussKm21d4Dv3Xfr3eKFLpr/fuMdvNyPWSM+k429wF665XfLL0M4bPwC4QvOo7VSNwMFp2qUDuTnGWgTquEpUgE+zOTzz8dCq5mmoajgtvXkifua0tJyyFx57B3zcmW+7bMCe9WyCbyF/6JJTRGUfxvTaKCYeQ21xDXNO/mXgZDZx5+6qA7LWTRogPdI3Yf7/aK2Ku486f9xvpRmUR8sid5z8Xnr0fmcM9Eo0R4zgmN1LRNNjRC6YoHhd/VRvpeb/LJzmho0lixktxtB4o17tJtGSTvc5sVSfqXH0iNSwkYzgpCPDfmcOG6tiL4hBq8TDmRf/dSFrez+A1+qT3V1NjVy9trfrir3lRFKMHZZgS8fco1587Ff81zmXkzMOdAJRSV+YhId2tyAWRi0bZ9kAB7owR4tkTRc5iFv6TVAz2fUEQoZ4fdwqsm+K32pRDzFRpS/VCqdm/tD5flP10zCz9fD7cg79JeG95bqv9xNpXR5Su+rjucStnL6NgX2R3W5PtKscEFkUBfYmgMJUYOV29zwA3YmfeDZ27wPITnpK5565+GD/5R817B7spR601kxJFcgb9EkGfnS0nniXp8kJI17+dQzd4pSM3u2sKHHBOJ4vk3RQ7+zVvKOYIaHOlSZWuMgw4jfhI4RSJZrlhoS8mMVIkpvXvxFQlor+zoE9DRRqw9HuSlPTKLtByb8h5IUKa6MI9k2Z3ASddAP5iKcrwvKhcrU1tXBaGqBUvBKst5pIbDrdewMwPHYZOjnztxqD5aI+EEbJ/taG0a0gFv92Yf0hFGUo3FNuZhMOkO/ZwzUr5yrb9DSE8OhN456onKCzzKbfs+0EzNdTb4UiwHzk0aWtM4WYDNrf+4nFnuU+BwNWiB0RnqMFXTU6/XOxmzq8YxeohRD7DIJ7IB1HlEgP7F+JQDdA5TVxeh7AKP8BtBfzrwNhI3OzBkqYtfxVnbW/njC4QQ5uu2rdIdaa9g0kZesSrDrNmHT4Hq1SVYtZimfMgqdBfY61BlReBJr2P9ynkIxYIWiRrMUlzZAC4A7s3pm7YmGdcc+ZvNMISBYd/mxSRtc3lOJ9pDKU8nSs5cGxykzqzRmYWJO/aGbjKgvkNBvKNOXMiErk3IJq1mwIdGtyJsfc9KQtldywjl+JxjqRnKVwhO49lHzb6HA0CYgpeinWAs2L33Pu+Y/2u53eaTshEwFR8i6ahtuzIXZupzdVhKe9fUPtq8qndXxWvwLU2P4zc30K100qGQx5LaJ3rio6pQHzwrH04QlFY/dX6OJFTWOlMAsfXlp59kYfTXShmvn7i5QhSs8XLzrv7wuYcud2Gmv08o7EOxaGwyzhvihvfHtbpJWtFmAyDBhVZwvoiLlaTwlN9Jx2pSaCH4F2P9dpjlyCppiK6Qtck/zWY1vepy5zblBMXuKhJyWcGXeD0OFUp7K9z3vaeQToivpLgb41ZRcU+R2f+tc5FBIPhofF+Lb49SVvsVDd2AaInSFdZhU04XLx9nR9+Z7eVkq/MrI4/hiS37uKB5JRMKyCBNv/tZIAw5SJUeRrVOkb6Omi1n2b4vRtQkyAtNvcrpfNWj0GBq/0CV3hAbRpfEd5F5i1wqxF7qPOEBdrppurYi60o3nKm1WR/55WXi9gkA6u+GagSaFUtQ1T/Rum9XPku6LlMqTwsAW3ixYojRhL79UUfo1whxl6zeHz4wiYifgPceeIfReKsydGVz02pP5QxbU3dxpUqiLEEzUGned7htu0HSXSWiQVXNFuqIIhMqKBd6s9SudJxWf9AjlSh3sAZAa/++kIlg/Rb2B925QBDzrS9oPmVyJ1z5BJ0oEguYw4zGIK7Cna0RbbIwI9zxgzc9Ykf/pWhuzLMade1xCNBJ4tJPw5A5EMqluwe6gNdsVvF+RwMJQzYpazbhSqDn/Pq98INhawTjuijcGElaJvmGSIE+2CUft5LjSE7sZ0rX6m0T51JJ+rbfh3ia2jJwBy4k+QqW459/NCW//KGqOKgblsk0NCn8zDBegDlEYTUmv+TcfN3gOoW/eoTNPitZEyDV8H80AxdgU/vA84fXDNSsBCJ2QYRD5tQvCoxyYuMRiAIbk5rBNw+Os/TWS5umXnKN7xT50liXL+W1ZOKSIx/6U/7cH4GtGcdm3upnKVymvHZaj+egcg62UEgxDMjMY+8umSVEtK9bPHqrcawu40KgyfI9w1CA6U98ednV6+edEcu082t+0lP47nW4st/9SPZ3C4i6LfTipRbwE4EIZDeh6HVaAUCy3FkvT3OG6JT9oX7PEjSkKkFdzauh7gtZ/ed0rSMhrwD6dLz89lVBalg2M7htgXjEvzvnuZE9aRbsJD9UO1iXVfUtypMGO0lTY+c5kv4gN/Ti/w28U4ETiIxvtgpPHzpO0RO5FRbsqqvoFKrKcNBFxWfKTvmIlUIB9LuiJlJ7Ty8gURX773fErXsVGzQnBR6zWHTpL40+TI+m5LppwNHD5YN+oDE2qJFtqsV1yI1y21KX8i7tN41R/rLe7iiq7JZ9StRt5/FifGD4UUmsQA/JwPd3j2mUs6aTBLG12HlryoF7HaSkoOBWIZk/7wl53EaulHfxWkS917p/5k/IPCqbrdlWlAcx6G8/x4Bmw/X5sUAEl+Nu4D1Y6inhu3NJXiAazWXuCLHz63CEz0WuD7Pby0YJLISF7QYu0vgR+ATjnJfbk1T+of72bugdIgKxv9cPCAoFIKvYAe2udjn1FyTEwhl94IJ0u9i8bcs5tUGTM7g3jemixVMSMSIjoyL9DU1aGI1iRA8SGDzxdAKJb0IbU0fjMdeOK1S1hwYJDJ6Yndcp9ZM5UeaoL9TW+vLEzaXikiaPus9L0IApBZ5bV+69K9AaOvuntxSMJoHqUbmVHv3L/+v/M77MYy1ghNFlmIjJqdvkWRpWAAx0+t6PC9BQsd5SnwjG+EG6BTyiFSK4bUx+XAIbO43yg4rjsMbl++f1x6Q5dbbAxRcb2erfjR7GnKXRvYRHjqfFGcY9tj2K7ef9u9xRnOSkELrsdSWwVG/e5j9SfE8Xxgu1oUBa6vhforcvjOTO/OzhyRhv19fegXwPSavXKBd+8KcEG8fzhXIVUJxlvcjdRL7y14ONcJG1lyYNP/sOlagh2HxeMcTn2DaiVotLHirWhz7uk+TgdgWbfXfKhwaqTZN3AA7BU7MPEfyQH9dK/qG/KheWmGC018d/kxBu5SgWvjkpsJCPhwA6KNEsTX+yQAwIkPXgzSIYu0yvc1vqI8bPX9uJHE8wdGG7RxHmZztENGYEY1R17YWFfhUWBP1mAB1KynxXBE7f39raj83+jTTNBJY/YHewFg51ISWQcLLz+c73dqsRraPHJCRxTn2N1JFyM1LGpNmL130HnSt/vF63oeyLLE4xbMe+I42fIxO3fa21H20y6Ts5Pvvu98HZ58nccGa9cHXZb68L1vi7Sj3Q8lwOJJtpzK5fERxSUFTc4C0Dvl3wdSWcAnl5MfHN2p6wMTfliCiT7E5qYZMv0kftPiIKKgZVXuahz9lVi1v6Tskz5PXVK69CKDDkVPQq8ZAoGWQKdy7ELKOrMjmSm3w0oH524Hq2udTB87p2AfIAevlox8y5o2kO/M/OoYqvIoCXj7c6aVX4BWzvfxGmWkKrgmShg5cLiSWVgiypncM22IrRhv7+6s8Yp8K7vm9Llg8brB6ijdiXXPw2qZlCvHufGNGs2/7R2ynEa1xY7iq7uMTW0fvCMoW6GuUoBa1/1wKMHCLMLSr+XhlwB32VLVrwphPblMf8GhfVdf9zv4AUjocMcj3RP1jUU6RFn368t55SVgc7aTwpsNbuP14EIO/LiTBWVCSvLKclwqgUpM4fZYSKk4C4jdVkJOdbbmNK6asI8Yd0BomuxsFmr9GLz5F//wtvtFsMtg0kBy+vuby2ByAqA6JPFjQdOE1q4xpQ0IVMoDZxQjhc1GkFEm+MDiEsn1tOkUVCo6QB3hhNpFZwWiRTHwpmpWqS1wIe8CrY/R1wAVY6jgnjhllq9JD4wsSZhNoyeGNlyABX3Pu3RtsffQILcfd9Hic0aQUPdgHLNCP4ideQTSyiDuQzd/uDuTVh6gBCsXbixr0b2nmzxnT4Jf9UJpVXVsuafUTIYAk8D7w2uBlD72yHVYyRpr41+UqrV9SKPV5gJbNBXNCThHdKok3qGef49fIi2Hpgr+NzjIZtHO/BHKV+8n3hjnOaAJMz+6lcKCpqU2ek4pWoKwCeizRX2l3mj+Hd6/V59l0EQOIQuH8j5z0KOMeEqjsb4TJvZs7/Q0SVsMSI75Lvq6FssYxEU6cv4Gev52gSui/2l+zNaoqDMGrEBqTQBQ6QoU91qypReiO+AgJxg+OhZwkggXQdswGBw0A3wUyMZr1vNbBvqSIwYBNHM/9ALjhgS0mxagSY+4SK0/exGMetLUcnuen3jyL3SYeoingSO7QbUPDf3v0RIQLxBrZxD0OTocQC7ulL9NdsHkUtPIoBIKAx3vjTR7dxio39vGeDEFyf1rcjW58L00V1N2fd0rrEX+lBcL6RPDSkz88PDyJ8OzYMbHJ+HewIC26H0BcQNWY3pNFlCw4T+9vpM15JnjgEEf5Jbri/Fom9Y1Ptbgo6AJtseLQh5TSfX828LpPC7HD/8UjhdcAaStK8xrms+duP6vFmsytWqF+L3a08xT7t2bMoegrAWm1qzVguCO75WkS31/kIHBZ9OLh89MWWaRCetW+rlQYM1uEe7f7U5e6OsQf85jC8d0ITW1AE/UbddcsAlkGcI8Ak4EpYrdakr4uElXTFsZAl7+BF5w+oxedUknzLfJYpPawjHk7aLBHkxH3enhtc3kxFIYDQdOig6663zH9NAR1I/Qw8RFrkeJ1JYu4joMizLvbkqT2jSWeGqoAFkUuh8x5sKsQ64TjLoRWwDFB8bTg43EQFspamE+ZdVsabyBWjtv0+V2HbjW1aKQPSkXFcPh6Np207yPNzeC2qeeA1n3+ag3ahsVTrZuVIRuaOSsyv2Oy/bfTlXY+sRhHA6BU4rcrVtSyzqV4ARd1tcQVXZYypbOFToTCydsXewIHmhkyMTb4u+PNt5Cs1eQINWQg26+fo6dkIQ9XsRNZAmOgiS7up9dS1AMPqGH8af3DO6KiLfAv4HoJ7kryYemfOPsZWed4XwRWHAFPPLAEPUmUAPfX8Rt3FMbfICcS/K1i5OE3ylC7kJg/h221uTG2Fwcmm4GML/g1gUHnqKHR7h7R6dXJqUWBUsBDH/l3S4KvWnsq4wuCy/CjBhqc9Df4UT8P5Khfdm/BrX7shhZdbIt7icu9UNyZsdqrDHzo5qSt8wcqnhYmqSgYhhbLRW5BERhvyUZ4kF1htmmZdA4Tv5Z5fJdcPTo8q2zBdZljrmslW8Go88gwOJWa53Iy9EFoTGj7mSUUg9TQAu01/0oR//uvHZhREu1rbbuEkJEDjNhRrIdjv5ixKl9Sw/FJTLGBau728f9kXYweVb0GqOB0g0iDMD0kgaBO0cuxNUy0LseC//f/ja9c41w/D/pbPAikGH51vZESXC2MwW7x06mTz4cPLM+6YFNpqSgcp1LCLU8Uhei4aPPlnEo5gLeyP3hRDCKiC+6itOBzPz9pTZQqb2va0eU+8CbXZpSBqD/H1BrH3+2rTo+CKIyTkvjsP5qe0GdCptxT04+PgGXgt2ioO5RPxN/8s5oYI850Ay9ttBMADNPUGY3iU5aXZhlRdzlT2hMOwTLAgCUvUiylBAU6UdipuZMqLqrZ0NBOQyXxc5IK5hjypo2+Fq92hf/A2QCvu+U8cr67jSPms8BjJzweBudy8pGbac9qshyGcSIjeBbqii6FAuf7an1q0DLw0mUZ8l+GGRr3g57LV24Hd+JYX/mY8zn/ThqZIrZErc4OOtBH84PKy3katOBVNdqH1GXZE+XZHs+0RYDYpBKMbJI7XSnxlJJBL06hwzCWa4DnQAn218YNgu8NKMpCfZcq27FumObGzIsm9P2crTyYUvWTwX4nbsY2l86G6GB02Q7Vou57ehKS+LeA3USSm0vOa/xPOajIy6hxDKoHAAGcoav7YzvLqu2TJ89I1SQfheiukcJm1jjVgmJFN8bVxFLrv1XhRgphvC4PNmwcwpLKp0sG/E+YlnrFLAubk59/2133E5pPH/VjGEihjYKKd/xyfZCjWhQRH+/5gPaXKb5aNf+bEitxQTArfPL4IgC1WwMktX+hOvz4l8UJBh4FFLjDgyH/CESueNE+KntlOdtvEE1GcAdIK6oRqtw8rKNWSdGvXlOhcghQbYI9t6SPUMbxNBAo1/Df34IOn/D982OkakWdPalOWcJ0IRNOwZ3Y2Af9ao/3B2WVW7/PEjhTxm0JpkRDWfniyG3UWDWzHqadUo7wdMuG6owTzHOH+juz9Fdg7NYMvgDnBts2zTkOIJOfSgcob7NEb5dCpkqJDad31yKUzazOyF44yN3MAlVKGxgQWXYSoiAT6tIzoNtQFd8gIZJmSufkRR0H26xLUTC++6vT8I3UbbQ0bGsqfzfIMmZwP85g42ISKL8WXoc4WhsT/fAdBBGQtoZrSM82hUc4SX8SmcsFSiEo3aNpKr31I1WpYEjTHlHL+66PJoox1fDCUEDo9WKczniVvW9x+otOq/1sIujMgOkjVo0dPboCw/NnoSgxIHvS/mkhFg5N3lzIzvxotE7dy0s1PODLvUu9NNyxuayodgtATiHl6gMLTu2wEDPAjkVTc0GVQ5Tr/CThvH7G1VCZFJ0AztNJLQVJX+0kJl9GrSLQ56lm4L86dbAxTo9pMLLlbnd94GyUflOCJEAURZwXJ1Y84YyNjPOI2VJaXUKKPvd5SsaA+rG/aMCFUBRFvHaKq2tVgpKRPkSpMvjqxgcMJ1szp+jL2xZgVv8m9wGRev8Or8pfAeK9ZwMQkf+UQQyZBpRFXNnWKJBnxG1Jj3lrrv3ZYFKXxjL5rJqWlEp6kiFRf585NeBAu9AHxr2H4fYUco5ijDDRzj4JaF3SZ80gl47lzI8Hxmpuy8h9RV4TsSLIj0gKAXQqJjTK0oKPwLjyaNrzvDQ0NJ3yIxGg5o0nrPwlKPv+645dyhimoiMu1QcJw8cxoQn1UIuE10+apzKUeali0IB1IW6m8TBMDz27dzvieZ3P5SKPGuofZXn5iqlLFPxDhYtuCrFlbwx6Pb0/zYNzgt4Uo6ZDdPd21NWg6JF8g/Kx13GabSIf96ymYf6KBekE9tdD2B7EfPFM52+RQbTzAt1unOeJwrK8vtpHKM4QbldMdCVQuKqvROS8Zg0OxhFX+SC7rFgKEefA6gFjd9cbupX3u1tcEXTlPdezWN7hZefy+bAlxrAiRSkdbyFL54CU2Y9NZScizav6deDxiiE13p05DFzlfyFnWHCMojDocoQODJqBVBDz7YYurUhsaCZrBEZQZCgO5MvLU/6SsZ8g5B9MHftBcg0eGcyfP3UpaP2lJq4jjiWmJT9T2aswDpgQSEActyyJmfz3RSKjYf5iW1bEEjKZPqXG4QoduHiE/9hABzIRVN0KDEMkH31OX5wlxMaQqNxvUOyrhwV9bC9ub7W8oUqjigOXwe8zygg2EtG2d7S2OsmGv+/DLXaNkQxeZZeP2hUUWbwwknxYvkyfKufKxSYzv+i28c8y9FSAuROcwhdOjwxlXKc9vIs6fc5GP/ODOt4nFX/qCAjufXpOAH3jFAo9yv8J2gF6AV6SeChzdBTDkx6vePb3A+v1lOrkVZi+ZjFHQ4X0DWZbAZcS8wzrEfb9UaMj01t/TfZA7111xE49soENnZ5mWeDM7wuOyi79JAuvpRjEQrnQ/mVI8fzbXCUOWEnMPVpsf49rEMnHSX+/Z4FCfiuD0dfOXwp9DtnDY7SBbu76DakPJ/qeNS7tuuQkcwhNomYW5Q7VPIrA0bjFSfISeEOQuyH4YbMnNISuQlQJLzBlQ0tlUdzOJ5frGxSaRfIC5FYF2i3C/SZ1XyEJjxAxxv2BAYMhJuVfd8OjPh3NKTTDUF3DNc7w5wxNpKvLqtEdOvlKsyxXH2FoQ65SKNdJJ5Dk//3VRpYyi4i/zYe8lu3ETnxHe6XvVbbyOc/Yebl1/gnmlrzqJpteyeioZ2bMVVenfZRzIn5XfYYR98NeWIklOOQTrLEHNo2Hr3xVkDJ3AOO1qdxWoTDc4/NOa0SXqw20uqMpFOVNl8J9m43gojDG7Cgaj9SB0euJXW5FtxImc4TDJi5rVl+OQqOFEUHkUP+IOhA380ATb7UhT8nF6MTLs3rV2hNRJ2K/rA02vmaHJ8vyJA3CNxl9muGwpfoEieXG2YG3O3pSZNOGR5mTzN53sq92tuat8zeEF1yIEdKIWpjN5fTAPsdybSuRnis8/QZyPHB4DjTZ8+uAWlKhSrsjyaI+mg5wLyh3wf2BS4iZRw/8Rg++e2sYap6gf946SlHyOxcR3kjL7tsY5FPQN6aBfx5QP4y0edG7m8ZHSmGflsv39Hn05dNtZT4eySJnvHxjYhEf0f0cOvPdg5Td6GscocW4qqCnqhXoA8y4oppG3tjE1N66Oko/0FbcJOWuC7p+n5WXLf1giHqcOzoRD4qG1CnEjhSaOEI+N2W5Ct8uP9Hbk/4yLDvnhIZo5P9J0vS3UwcR8MtV+L06QuSvySIoIVCMmBNRQAM3/JHUvL/kj+8JavvHPwlYRqD5ZpCm1Ye5ld0y44kODuDpZVuAQDiJd9uUfULzoUwMGlw19SlIqm4OmVSun+JUQe0q2He21D0i8kCebHl/FLiPjXValn/1Dr7MZC8IB3+1rZr/q2yA/MAn+9GmanXyEUSI/gzCP0Um+7RoSmmCVXj/36oiuvNsc0z63F2ewkNxvLub/V7z0fE8SOmcmBqppklRfoLPZpT+xoKDIGi29ei/6Sx9dvf8fNRTk60v79ZdDnfJk45GlicnMZTn0iJpAaElMuaEcbLOMsff3lLo+9KEqQ7+7lviguZpmUXfr5evy4p8wijyMfj94K59UfE/a2I+MOxVlzqWQjNG/SMfr2gXeNzXOPxXfzXMZXY9EteRA1RJrMSiyBYIlp/lFbo/FsvIIXv5gABfOD194EFqIajPov6DQoCmepvTiZBqTl6h+nPOQa++fz4UbKcluP+Yb8RjqOjAyEe8KWTQwYOO5+UdgIAyc4q0rI9RWJS+0hsAFyrv57UpXpnC3WvbtsncjOZTX1xWldklzDdcfg2MfEaIQrTGOfffS8xs2SNaEge1Fw4SgISgBS7V1b91gN5o0xbI/cTVDoxSN/iW/Mtwr8aX1vBGKpcaBdyswUNTw4EijErrlCvCUo7KjWPnqwhyh8S/fA1lQo2aoH1AN6HcWSkyYegBKNGPxMllh8HA0+g4xWyHVCy86NxyWLdPCj+WNQxb/BoV8GrLsPDNnFI+Nj2IWyBkXiQZ/waotslaTXSbJ8Dt7Z7eiEs9mK9yeYbRZB6D6CEVko+0wwmhis3fJ88isx0m6Q7tnHJeVVlaP2BIDZJO8QnB8JWNsoDDS4JSmLDlUZZqcWl94ow/070T2ttNIL/u0/moH5Jr5mrE9V9lCUNEnxgjnig8Q7nSA/vI+1SD8VB3pf4RDN6GtbTFTLX5DjRsVG63knGjFLKtxdYqVw39Hb6KB599IxbqrE7UsdpI/J6hpVv0/GnTLhQuTgRke1JPQr2GYZDl/GsfxfG3/YOCFuolezmdYyZDN1vzm8SuVSqD84M6I3YL4Ck0Gq/G+yo6OVsV5MyRoRBBlPVDAWtBb6ORrlMUqxNjsa/RTtlzFFobpYgKlc5h3x5PtxsZNDBz08olmuKT/EofdbpGeqgZXIJkkELDJ4Rz3/83/k6lt/YAfjUbKFBmNOk05PvRgoen2RvLu3moGtzqoNdTweAaKAowVLrdxfpMmVWDciWU5f+AZfbRzKQdGorQXAWGICtd4wZ50GdZ74o1sOlHsnQdXI3KVNiT6UuPNi9HI/UC0ETS7CHMvPYREIKMOp+qsS+ZdEdFXau9Y9R9tobZ0sC4rO2Ls8ZKbt5tCNVRnflLjYWWjFy89dg8WNZ8cPJEtzS5yfZjeyce9eT7e3peGT75J8MM6dccajI3OfpXrsGSQp7VQzDP1jO1YdcyqkQaIFual+TeJCefCHYwoDOMJy0Bo2ln1UmPu6haym6CHP8KIlhyYoNYF8KrewD+0yeZ6UNQqVh5UXxRps3zp5bacGSA+FsDUZ4K7dAYCzQY9qBHYtsQBF1ZfN1BhleoLaJSiYlbeJpWYvU5o8coosbiVVM5P6c+21zoq18t5rTtGVWjhQzEuCQfU9zbvfYjLVQDfuLyrz+NQYvv3Xk+OG6BIrQB98ZLtFAmx3Xmdvt+Jqq/ez1Q0gLPOrnobn708GXZPwqnh9+lfc77hP+kE3tzEa6+JPfvzh46Swk32qud277jp9I4UkqjvwtMquDrFCJRfI57gRgCUqCBa0T+VfUE9ENr/G/C82qIRVKNBNeLSvcaGrpH7TK2ui65LlTa65QqSX8B8nM+Iqt8icf1Y5Iv85i/h4+uko1J0hLiMxj17BROEtfxsqWK2ynoIepd/NsTrO2AwuRPBn7al/4krrrlRrAfKPnoj546i/F0UsShCmioz8WAyA5eB6rWmlxX/CczJIyKn2X/AnKqT0+BnS+FAw+bp7grYLP6Ndfy+R0zcUgOnzi89qvQPGr6X9ChA2qiaw/eMfSwRLzXj7XeMRUfktYLjbtFbEisjteXpgm3z8CWEDtRqVMhhlSUD/ba1ux/HS4OplbinPxvgI3IzxVf1K/y6ZT2Ex2eeNsdZwWT8Y3LUU/AUFcM0pSTVJ6BAPU3+OrS8UqnyIiamrji3noE0GRVDil7HHngyxtO/qZClJLjEErS0eicfsEwGBwhm3PxSyHPGMSasdxH8e2o0Y1ByjSQgXsQ601ix3Swlmf9gkFQzjfalMo3yMoXjFSbVwtuKP7t++QV95rjSYzZFurHUiG/kQNlbKX1L4P0+fxOzEidpNCuUCeLIc3yi3FgSGcLV65+CW4nM/g0x5yUrPqhUhgHj1SaUr+/FRI7I6b/oEngSklxxvJ5YCpN2KfbRBJXBLi/bJX4NIcYcDsYPWHGstL/qlzK+KJ0rJdIkIlfTuNQMJK0fFd1na4L7N1sjSRY0J43QucHPUc2/qlPKdMwVbaNCt2rpD3J01HoziAuCnu0gpiaC23dvIWY5P3MlbD7QtdeukMyJBlcarc6ebNJaSqb6zx1KA4j7PfFeTzLWtAv6Y4mQBld5Y7bqmdco2sfcWyL3zy4TGR1NgiTz5owKJupmBVrNNhxuhCex8csx4XlJUYefWEtFPRcZiAx7Cj2idGwpiycWHJIXx8B3+tIXxzuKGalEcXry1bH7LC3k7QJJiAwo7fkwrl/RezgHRwSHmI7C65tjKj4MI4tPEQ2ZyKaPN63D7+5uY54nhNx3tqClnv0W0s/1OKJlgPpHNDQyUvDhF/98JrEb7ww3AY1jw/xCiRfL+57YRFUmTfKcUVlmyF4z8AaOj6X1CDdZLoi2vPQJRZcL82Ud8CdtREUAQ85Rvq3D8rSaZH+lk1psRHolxhpjVqk7/G7FWczkJfl8ktrg3W7vN0RuBMcemQMe7Z4oO8+uV0cpYW5tGOZCl5uCNJ+VTYiZlS53Bm/tIAwA6TWErlWGBUqdhX8JER7ev4mj99l4x/6dZtDdX1haV6PJSDxPSaW1Kt82yj+10FHvCja1enQ9iHNy1LT5WyOOtWgTcXQhnFuvkEfII9NUiNyzf3KqZBLFIXPBB4dTdjTeAkTRl9YBlWBuHuuE2hp5ls8SNqDKLPM6U2rg/+upf1Vg0eoeXEWCDo6BweKdnL52CJHT7UvaLKrEzzr5RLod+o+fyv4BhIkLqRKnQgFh2YmlWsVv1QxQY+KPa2Llo3IM0/xdzo/hloRf8j3V57qynZjqiKaumz38y5fjaTlq6CO97eoUY7tlde8qUWIKKmAVik3N+ULEVB9VGEyNQNyNtO6f54HPeGYsbU0vBvVfc0yluO6it53xmsCJDCgAOaHybXllfP+sA4lHo/NWXUZsbmUtdfpab+jJ+R3Jg5CkTuCQIFxWPGS9d4X+1mVTFysg0VfZHu6iH/g1vLrENAMUalVwtjYFztBeD4Lvv67aKCZJuJGFjXod7L4W8cSJ2hPF8eDoZIsohNp7Bxzy2lWtE2YputW9cooiiH6Ru9wnmgU+TfSxoxVqm5mGtzUVC7n7meX0ZyVFb5MAfCln7YF8DcewX6SSQ/Dv1uQn0h9OeUXB4gFiNtNUwxnxNwgcgxyehmSDEEQqaVUNx03qBVrfFP/gzTXGLdsxyKqNRjFsywYMSgLSFFEqJFnEIjDGBYcFIc16C0JsCeRYQ9nXtCKQouYmRvuYqGQR0yhBntLJ/ZHCo8RuLKXhv360LNFDv0olVTeS41NK4UNw10lWMH3ZqC8/KS8Gj12zioIh7nmFEkiu0B/mVya0CyTAd28aWo3HxlQfgJ/7f9foGgtQyOfJp/VdOSMN2BXmS9kWq6SezywHC8fyybJSKh/xGCKxJ2jCQq8hW+ugZ+yDU2iM+6/+n/evSbOkVMqnXEX1PJe0eFvT9/rpIkmWMleTuPEdCaUt0ImOzlwKe86/2gnIKbD1dnqYV0XPTEwTdXwAVWZlJGqnsSmADjb5yiKZsYXCQSNwEuBdn2jzRkiQGaGmjHEgAIv+V+R7y+RHei61sBsByXRUR1+ZWJ+Eg4MrWoISJUIIc6d1KuVGGSwo1yG4g2UmLn8O59sQAoi07zk6Q6onindBdObwSIeAUJR26LMQ31qPBkf28gQnJ2Es4WxzKGboogT+sNiioGiwdraUzlf5usLETn+2Fz7Sj6g7GC33NUGAZxMWFBymKI9JM0MCs8gWB0pH0YTVPb2L6REmAxFASXJZvIx9KJJPCjPNcFEPNw6uuEvTS5C4WhOCaYpDNeqAQ5OhMZztEHQYY+XRlmdwl1+jiFpsawhfi38TYpp/i/wMR2dnBSkhbXnMgJU77i/ZnMK03U1Aht2/upl4lMVOaUJFJ693yp9uW3Z55L8s/6PuPfadh1p0gOfpi/VC95cwoOEd4S5A0B4783TC8lzStPdI/0XktZMraram9gkkMyM/OILkxGk5muqousv10hz/Lrt7qvnBZaacPlIwu4oYDA5lrvFYJrmNPBZoGPQaWleoh53jwwmunIOn/GtdedSsKVY31kS/A0+SodC65Gmv1pkKssfIDxMU51Xa5FbgOCj+fVU5E2TjHx6Co1pbQBCNu2cp3S/fNCe/4Y6Xm2GKd+a2Oz89sF43nmPMhymA5SmQPEkTvHrz6pKhiyjZcvfodybqybd5sqICJWYmI5l2x9g6SDcxbsaG0LRCO6p9yl1vucJceNlDTH1jQSvIpji7K1FlHoQPUzTiC3JZZBNFoT0HHnWb9BvOwgVoNLIvL95+eIHFXZN7aVL3pGD9qvfNKzg5GOXaoj3PnpzKZUJ1tHjaYLhvxPlYE2qF3SO8TAYwmOg1JyToHemZeIO7lwQBAoXijYJWOaWXnDnHaCsVByDjADkoShzsn/1hGOqjB8YIA3IuKbonRr2Wcta8xq7S2Fku9ROoJ/FOAXP5R+AyfvgmrIQxiMsoa7fOVZzvwn4OFOSKJhBTld08qbXicVQpSOkDBhCAByLjrjqSQRvyqvd8u8IjLYUsSbSUfl02e+avk0+WSgKVrHLhDKPRZn+he9ILOw91tF0sZ22jV8VdXP1Xb0eRfw7D8b7WXEIDUa95q58jO19E91Tyx3qm66wgaKonW8PdpRXg+E3SMUnvyH+1TDX8bl3ZZMHDrKuDp0iq8fA+fLQSelkeqpq7YsHz/pKaEUuDxgU7KVfISGvIwfVhp1rgA5BwydjEwd8RiYCptvtepHW7lxH996aL0rsZeEN/e5XZcDRWtDAx10lQD108avGPRq0o1CiXtKjo8KSNTfrUYkz2UA2f3kUEMCKnPyumyCe+BVqHdlXC2ioPJhuFHid33T9+2W28qN9C6IW34sb9XNCnVg1fT+PzVZgb/hEKxlocdGxxONj/HxZzlrEbs5WDX7SkGVWJnAVwToCNlvfRxPlGkVoF7ziqh+XRwW+lUv5EYeBfsWyYdP07ywDn8EkgnXFIALjoA5N4INmhSzP+F9f7XbPeQ/8QicOL38HPIFTnC9g5WMEgh7FZ4RmYF+BmnBnFE4aRpUFgmpohA/3GToXiLcAjQOkTCHPEH2Up/fGGaPJ37zUlSq8XbXdAecgKwxgHwvCmTuE7+X4DLrwsbt8XQR/3u+APJY05/zomyW7F/LqAoRHy2fGVgY3dCwMEpIFQrj6z17XhyhFY04tjVyiqiVVmKYYcZ9r30tkki2KKM06VBOlUXRkg1AFR/vX6N6n9mUZ4AvY1O/3YCzNLN/mDuaHRAsKyuFz15Q+4d+BskCm97fEH7Fnj0EPVJe0GD1BPFv5XdezJESHYsQoYcUYwTBSBm68rgedw199I+4T5mEc19TBqwITmnya1ukbpTitrflz/50rLdo6mwOZ7hNVzouINhSMO/Y31z87O5oRdsulMzq3XxvcABcNwUnY+ELpdpbO9/3mGLvFtn4WgocdWWScRNlBGa1YhBDeAfNDk76i8MvcOUPrXbtuMOHndcxX712332tdjsRFmICU7o3NdcAKMBJ8U+O+yiTzh/quZQ6ti6+OrCsW3KstcQp2BSTqGGnTYypcQNbGXamVq2n1JqW+o9w61nAX3YJtX8q3Zs7fN7W/Rgwh0QfDkUdHR8vwyyyvrZyDDkOXsk/f0AP8wSn3Xo3gdUD7TfO89KmtW87ioD9DkuV+YstDAdkXUb8wsLXTBD8dhlVyfv5ejxdxojmGAIovssLntuRtLucHmwxmwOOXvz9aaDP87zcbm6zUDp0UDcZzlGY3RfIq5fDL6HIhym6QTxIsmYQujFoVxp9liNqjpWbE+NVQj9MUt2GZnHPPOSbtlh+LY0VguLbSPUaalw5bRbGuv6POQdB+iVECHpD8V3eI+5ZGfJAAshRdgDaN6C5H5j7v9+atyoFV3fGp/Jat6gtShhdwThm5kXevv7II51wL3O3nJ9NDv8N3lhZgVHYMrb0N3sz/tEd+KMaavEoMHT9BlfVBjKBFkMd6cuJKpvBar3f3QUVx8CIqErC2+y6njmT5zpkYQYTxSTH6czKfKaMwBT92xAD2/vVaFrVfNC3jdzXRqYGzC1b3b1hT9pEbwxFwN6RBYoN9ZTGoolipzV8euQnosOrLUOCWTz3f8884dY4/fokOBk1nHWVjh769RYNFEYvQAEYMFkT5e3G9QAInTyqPiaQKLH0MGbk21K6EM5bnPZBZpu7oFWQRYVuWEOyithzlYqZaJu6vAfr9AgsQFCWW+V9JAfTM7KvUbHQdmd/opECVCNUq/KlgCOP1C2djNLeGQ3f54I45ds8Xvn+Y4VD9gpqalRGfsn23lfANzErnsxBL3wY4UMaKe7sCHm9UAf/QV2PzYmaaRyGeqbKGVlasSKHhHkqXyxbFX0cbDr2IMN+yazmJeRfqYw6RBcBACniYQhMlZjgkN79OIcF3uLc2cKcOAS/7nG/xL9sdKi75mVcgdPJaFh3wVXcvBszzizR9qTc5f1rQ/Cr0whLm0keUQY8FYebll7ToVB4TAgKgMBAA3gguUbZkWj0j+/XDHAzFs1IVLbgaIWicfmRCT6RHn+M2vl2fG2bKN0WRZq+q6IGzrl7Oq5B2wTiTj7UyYhP+PbT7K7gij1qwtJChI3VDlY1oEAcq9l3AemgCkpgEJjfWRWZSljcEL10r1N69MJCWVw+mXo79o8Drn6ZEjFQFfrVxGZ23v5ofb7JCpkjbvV2AAdl1YUx9+zJAkFyM9O1GVd0Wd79LqNo9RtEUD8qvd7P+1WkHLieibsQon8vmm3/IpHd8UQkx23FvqWGGrCxfq3JyCDlc5dR+DI472P7KtwuEpM4+o7+vgMADObxgtfUUzNJuxkkr78zWkrRp+WMxRpEo4xvqUg7XdDvAT2TsSwUAL12mYnLL+yZHzovaIYnXX1dX2jAh0x56znv5EOyEmSvPv6fFN7idBmtNOU6yt2XuXUFnOrVRPMC7y4GYjmIeUwtIcdKPKr6ow93SjoRf2OuZgRDDqImZ60kInW7ql+usmV9aAjUOSUP97Y9lBqfGw/ID2a1s0SisE2mtR+P05hPxsC2TPeT6zBWaRYNj9HpEn32TbZUAWc0FnEus3d33keVZmPPrQ+ayRnNcz8BHSZkK4ZX3ozQJJ9+BbsnN/JfIh7kRXY09ibJEi3vRydtkNoUsOidSk/Y2l0UzhwBvj6xZs+6YtSdYFqYB7kj0p76Ym8bPKCEmcWNCVSEQPvZI2c/qCh6a4CmyZbnq+5DbwxcqUyWQ8/6O+YSaKl9k7UoBVKobWds4mk/TL7tX5sQS+wskkwcWrTXMY9AfHHyfwKOB0SCvU/gRezKA7F3CPkpR7vKXRCHbTWR8pHHbCd13am2DItRf4Ewd0cfEcUlx6YqzYzBdqlNcN+51P935Mor8q+hYW/8w7R/Yp7Sxl08yo7JHxorNSo/LGu+sOKsFjfRfr+W+sUo0Vw5rKVAr1F4yZ33pX9o4k4zJeoEn400q6ANUjuWFic2nZ75GgpMyREGcgstkMV6OjTYR2Yk9dJWxEKP+i+siafQaf8R12R/HeRPTg+K6F/d2QDKry51ycX7kZyOhFE6pLlVPLzRttPcCw4PinkWTpibZm1grf0rPooFfDnXxNhO+v7Bp/9CFtIBSNJFkvOTgQOk4BHqn1e8wYGzUaKOaKuZFUOK6sh8+jDF5FG/p4IdpBFyFHCEgomxaYliAZ3wiTbsJL6AdTUuvcHGBo4Nkv78lLd1cS008Sh+kfgSOLdFd3ntQpN29J9SHxYEyAhKwwcgB8NdMzNwTSHGE56wpM6Zvf9WnkY3yNLyW7Ml8CyD54bt8H57Ag2cO7KckXyckxFlM/LlblBxFQOHqfsszitfl+Mag3WAOzKa1cRinwxy+W9G72DeXzROXgoxI15p5SVSG9JP/4WFaWqxf4reO3K+klmJjkhgc4RisS6VkVvYoioz6eFVQgnQGmXiQpdhBQ5x2euj4+OCeoAMNHmCY/m2pK0+oIPIeJiPP7rUSvuJVsgibdTyWjhm8lVSJPgPBxc32iYa8bImEhZOhEI0BkLCy7T+oB4m63BqiC6CC2WAU+1IoIABr2pvIpEOBqIA3a7SsAsePuf3l0729bbLKumksT0XstJscQymLMWdOeDtc4NesAs7aWXaeHAy0u0SInfSF3QEv7nvTiu1qQwaEvM6bPjkZdpV61ifql2G99Oaq7wSSifB5Ht++MjONpF8F0IaHEHXdrtrgMIvN9tCGFSlF5988JA7tA+NETJK/ZgBmHkCp+RJzMqxGtdW8s/ZIP3AyhZyx/VGVD0ymDnr50qaIcXzhhWgOcCkMwnieyAu721OX1/l9mZIAuXeJT4LKgtwGladbWr/RD7qK2dWlj/WKb0ehO7OxbhsNYDejFKdHlzxbK2Vz3vZ4tO+KkV1eVD6kX32cwITH5bQHy7cA6/F9MpCaIyuiNgGPcGgk1rpn220G8KG3x3EETAbJfHOmuwTB9r3LcoYqjMxcWZqiODYFsTQVk4ETeUYnybEuNIXWMTKnU3BSYvfA1XhjxY5VoD3FgyquM9GCYC7vWBESM1YyrRCDcPGJT0GHASNTxhvGNNbqkkFlYgXATvijeJ1JDy5hfIfgu5owDOk2z1YEKSHKz4S8affzGQn7aN4+KqLqPaZz3gdbNnsoUt5Zg+PQJL0Ef2WOHeYDhagHInvHBRtpIU6mXFCNn95tPVzUkInH3uvVvjAdOq6z00O16pZApVYaWwN0N2RzX351SUUdxnfzswbrPHRj0FV1wvhxkU+A9oBehqJP0FufkXCxXL2W1bTEcfAAzU2ak1IYXOdKQYGGYgH1MctD/n5Z8xKBsS62ylpxD5SjDVOhcv9H+nMFeDKJ89mpG+KYsMVSRv83hwmDCHVKzYRCo/PepY4nrVd4jPrPWAwF7HjUU5NklMbprm0gqFzyPjQKGWhJNtDzSZkxGLDeQIDI8XYeUl+NTd6Bw7WUbpKodk3utqvUL1df2qfjoWb8knzwqdjI6d5b/8U2RCNkzPn9QuWfETf7XQB8fTaFU2ZmlGX05AJwjDr6I364EgdpKapO8l5DqWVKzQ8pEgssv/R4zBAes6bw9VFlCYmQJle9jQthoE2XGJdtYWNltdtN3UAs8DVPxrZFs+7Sa2PEyW1FXtweSz5Aspn+kh4ua75ABh5mb7X0ydpjW/1LRn2a0EHjPZbGug8BDJoDd33gm54zWUlNOBWAohaHNne8NzGtPxf8Nu6bZ5/vZQxl50G65WFCgoRZNo2/SH7B1uy3q3uOKYi9+kQOIdpU1Oee9cHknI5jkkVgouvjCu54eh4q3SzO4vjqX8gJg7oGxxvyZF2P63diH0F8y81u1bDXjWZ1ctiarNasu3nU38a983Gz1GBucu/OjwlMOPGiGeCNb9Sz4+8X46iaUV8YdWHx50ZdcSofGFa8eyGcto+EQYM/+cHa7+NUaBDyD1IlZPjoa159Oll3CThxqVrGeOSmGJodOn1b+Fc7vSD80t/4mQgiX37M0oFXGZESbybU51EXnPt1RoE4dap+J48IDGM6Eqai8kac4Uq/fkf4+ugYPLPnTqT07S47nNlwB2dL/5hqeyvVb2PUDw6aT50GAQBWpTtncGLgv9Q19dAxLgC4XA65Zr/QTJfn6BB0Zg+Wu0icztiC2X0w4Ii2VZ9FD3p1rFJJn3sWgcFFak2/WglB5RwDd8WVpB9ngK8NfukPYYN3WJKaT+tAHLFSNk9/fnGI7CMGD2pKzevZFtkxDeQdTFq+H1lbUdd8rtstDa9JCQzGblz9jSOakLx1ir5CTcUioe+XyiV10nVDSZWp/IPpcTmhtdPB6XAaIkoaMbWVdc+ndr3dcO/O2Ieg/fOyV219lMCfKPeiNHl9bfl6/bLujWXz0oehuym8WiF1sj1F6zw0CLq+uXGTK64+RPkgQsl6eGm+YHrwnTZudW6oSd3qJNFQFqOYp/u3DsHwrIPSuKJweNOJOCJbHKIYF5w1ftZOBEKC+r7i2PMtVI91b4f5un4+H+l4T1MrbeY3lfjgw3dezDrKIT4ma8a+35qghSLSvUoik2hvUPUBBMYQMwJ1pVkdq7wiXiOei0TlrKt7TNhfS5EYmn07yV8nx6Pc5IEcjc15Af9AnFARLNXVRS/n5gm13nnfcbV8AeRNiN6EHMri8DbtFi7I21iLdc4Z8TsLMXNzC9Lztf+RvngcaNDyrudl5AMd1AFmpZ0uRRQZh+Xt+x7cxyFoQSeGSxHWYolzk6aewStE5l/qJ3511vYojN/AACbv94y5oGiVmMzZSRMTkFftlk5FJt6Z/jr7j1HEDRcCXau3r4fkXVKvvX91TK+OFQ7N+ywDxm3hLgic2WtpbbfgCMAL1bULk5QughWRaE+VodkPAYlyHVyvHKQ+LXZN9zbAsAU96OfrSXBpq351zGWKITGsce5zv0FPQgk4094t2FaIPhz76/o6Eqw7/ZzDgQji+sO3irfwHq9PxXEMN0/T0O7TNW3Hjt6rmjaE+B1KZoDOJKa66UGw+IiJjM0eqZ963AydayZM99FL9zcDLFfkgttVI4TkvvL8EbB3SgLvihwr1acdgdXpGZUVyCs75e+GybLPgYqAOcPfV5lKbNJAcbDXkVPJb3CgIwY1GURpkZU6zEqiPEl1apFRYb4sPx1cg1ZOcxz9BFWSFAxQO50jdE63VA0PEFd82VnTq6KGbivLpoYpnytOeYk2BIHngxcOs20M26qhoILlcDed0Pq6lugzR2ofn4VEJXFf32TEUVuf48vXE8taih57gD1NBBPvASVZu5/dg+pzsY+4MhtTSTQelV8NUxhVr56LaIFp48Rpv3NEfGsnMQj/7pjEcjFSHr+htmsPtUbZh+R3YVrnNfK5K9bLtvnXlmpRpR7RKLgw5kE/dDFMgeNH0FeF2OL5YZqo4ItCOLoy770FRG7lLbeRCyGwFU4Wlro8VxITE8XfeMFUvEITqNNEjOs4YsDNAnAj0vQLbq1KhaFyuATowzbx1DAt60fxnEJjCRq4svSDvRNnu4WH19QUdrG0xMflNmnr9PGIopP+2CQ7AcdjffKubC+bTdzQV9J1Q/Ppm/OzSP2Ge7IzObJ/xNwu1ZeiHZBlns6OPLvTknBX/vbQQ/riylAaIN+QdbUYadh6u38zLF9ry9I+J06liQVPbFRR9E2j7rLtUervRWHs8gs3eA6ft1CNXzdPlQYCd1cL1bqJJpoOVFqbaqVaul+z4+CWUMl0/MplOoIEPfp4b6/M7miKHburyTbsSlQ6mj+obA3qtZWOMIcOV5RiX8ON4vI66OrSmpoiLnMRf9xQbngdz/FQYjd1PSKFFrIhzjmHloNBlzQ4eE1bxOGegi0x8zmTSO1GL2iHO9nQ/XiJGcn5sowMSTe/lqG8Htsbn02tU2fAS/YG/J+bUEsDLOtUgCsRP9r11i41V4ZWtLL0HNJ2jHEAUs3Y5iG6lyPrOrOMw6/BQx5udYlj4gpvk5+mD25JdV0McVzUwdetUKitCW7tPvPzUWtGJu3yaV1cTtqJxdCOIwgTtICX1LHu2V0f0yGTT6z9WAOy1dd3hWoM2+Tq9HbemwlUzcxRuqgGcZYddbimCvFKWnP9m+5LGTFIgR/eWdUsa0Yy/vCqXyyFwvHGnpZXrbiJ46A8CEWqws+Ps0PdQ2QcEmJ07L4TcqlRWUVNL3ItnKntGBiyGFdkNpExi9FF8UF4co8o6P3YaEPrFO7C9ns90kWk11MU4rtqNMfDNp/Pve+qES0peDZ7Hai1/zXbaJ6w/XxbO3TMzfQd02xhuiHWHHModzENl9jVXMpdPn5ZNKT6GR/Ua16Ojemq1WED6tRnsn8Dc3cD+lfXJMA0MpppmswQhnKVMFn+tEpiW3+H2UWf4LW+AkmeWdPskdwbwxB6Jdnu4M5EAAJ/YQRtqDX80UqrqO3h7NVrXII6gmYWoHwO9Av2JYYwu3aKI++DKeuFZVuC5zUDoK9hs2hT8nMSSJck2A/OLV8FGMiUqcwa0Fbz/BnB5rN9YpnqXIJue1zSqM2czzvzj0IV99kkxYVjejOKXpm7KrP6KfyaA+Ei44UswOmUhQbo98VehOVJTUMrjfn1cuBKFPh3qvChaQeP7QRSvb0bl8mMyfdxefbjepbAb8J8jV0Nah8YJG1yEdCvV1L/9T6Bcj0CL6LZC9HYFC06nmrziNTAItqDiAnt2TYImnhvraZOFbotC6caT2L4zj9LTu6Viag5crCQh1KtwI+29GKD4EOiDQ+0DRn9UIfjSrBtlmTD5GCn+4LSAuzMNEo6STfb/0ZCP9C6owgWhn4vhhDnvVz8XaxjR8sD3DowPG3ygR9yirzljvsbE03oEicIVFPePglftTuCi/g686XRm3wNUTgcTpiQm9dwl6Qc7DNyAa1KXPCuzUTrKl+DkkKMMgzXJU0zwPnM6hYZN5l7WRN38FwTyfZSCx/zE+NfRScMo/6y3FhilqC/Y2zsgM/lUwCuMdqi/1LO/muF20DdvdFJKyPBdQG+YWpk4gtm21KKdlEvFEqAs+4jKZPwkZCXmUQWUjNcKM4fT5Y2pSOGISknI+SdrOWQvOxM4OR5/zpnTAw9fN6T23DKqngtceVKP7qKJVz6LK3wTVRHBMFilLJf715BCi1YbXS/guVFGR7IBOPd47AMmoTTnXPbdZdyEo5AGrMldpvpI432SfJ7vtqVjMmv81LlIdiX5IG22nIZ4NJzWIavspWGyZ4kiUyJ26+vDRcVOqRHkR9uBnBanT7nrJIzFgrExns4ARe8PU6q+U5sD2aN5dWEyOezMOE0F6WrdtF7QpZU0qgBgRLYUBRZt96DPFfSw1RZdeHjj5gVj/EaEc16VNGFrBiHMrrOZlSZNtBu/9oSiq3fmEunbKUuXZZTfjjlhbTiBYCcZgtWHWw7ZI1GAUZsn/CkXHaCqErSG37lXWnJkkeUW9zZOvTNVsyOzxQxsDMMk7ald5eBnDGB7BC/DHjNh8cGpHeUJA16h2A0UcSpikAqlpZVKxQqCN8V7gyKiccVX6CdtZIk2N353JDWsiqk02dss3PzL0HFBJ2CgvhIP+R93shiUHdSYHr+tkHiF8tU7jFGXmub1dZoQ4XDjLcLoPf5JAATRJI/dnTSUaeuS8a9bOaXVtx/e5PBYoBEa+EvVDFmzPaFqEBYn6lrke5e36hd6mRxr2WF/pquohZm66cVcvLOfZRyXLHH3t4MleaGMrmN7faXAvFVpn1bF/BhzKAkjljYeYRubm1I5Nq1Wcrl2XFH0j53wNYWSdIWDKSDAj5AQML2C1jLJluUD+9Mo6MZU1598Sbc8v7C+uWFl/5ptkCW1pr5HDXSb6JLomWSGb9Os1AcstZ5UAyTuaxJtXdMRTHhbO080NrNG+VyNjY+ECKAUfTjmTs5q4jTBIGcfbXRUkFO1OtE4oYxqJUJZLHSGNbH7WGFGanDMqIOUmHmUwyV4qAKTtIhVc/z7embX6TPmu1spwmGmIppWcXqdIJlXfP2XWzZJ7o+OTKayIut8var//JvtrpNwyuy4iMIoSDpOMarhV1EA1SU2y+vL8+M/HYzGt/YUnqATNaKs4HtjyIdsvKFLoJ4v8/7tD/lYcNo5z3mCmiZxeqrZBBFvu/Tuu7AUkmVWNryEnt2JIDl1MRELDAB9GJy6egEW2yLwpvRvEHnthTTgjnNH0/UjhSMp+H6EA8a+8DWHnwOqw5iycVyTQwxC8nX5GoOfcfrNH94uFb0Y3qjXRHnzFDV0T7Bd+nfn5Pp63IKt5gp/PSSZwYM1P+GpoJu5ps775ccMwTgkTUXHZpEPutJQ0k4ZhJ/O1p4hK8/PcqWi0RPCUH6qjY4pwveirkz9h5LGFQl+9r3+5o95sPVbD+1QJKkvsu5/KUqlwjtqBo3L8s2Vcvf7iInSR6NnSyrydd7bh+HvXnhWhwajHrCTJdlbwPMUgoCKBKESPa9gDNvrBUwPjCfPond+t85Y2rSbpIIJ06lTozRsU9oZ5CBUoboMZfKT+mh3vgVYpc6qX3UCHTRpiCJXhC4sYZmfAmlw8XLGX6E2YR/8mWs8Zf1yhjSEo/JR4GBLCf9YI+qUry+dANDIe1vj1aFYrwILYHWeE7cNJiMu34kcUTfszciHfsaqWssURPymUYmXuIRwvb75nvvUAFxmpJcYQSE++URdLWJ479O12Jz4rpnCD2yjim0DS6x9h3puk6km06DIO9270RjGb/AzCHxA6P7QZAX7p36lxS4Y2v3djklMKuE0y+poLnqrUtm9GHTtEyr9ISL9oeEHXW40Zaop+GLt0Sb8+pYftQXrX3Zezf8IJTHugsySMOPFMGnjRoKXDxBFN4Mek4MH8aB1lEsaBdBmvpNH4xewgiCrBttMAfUWUCPBS58ThhzSsy8ah/iQbfg0MwwrzGedba3C/ODhKNCmuXQO7/27dxn8S7LscyxxKHYtsIOLzsCp7Ip//V2/lZUrO7p9YqoutLeLdaS+/dEJ+Bd2sIrqttE3ZQMAchOWaG5fXhH6exaxFFZuM8vZmZv4Bd+Vq7JX1zYMwW1JwGHy6/DMp0HobpgSG6Ph9WDyo5qmUEwgy2E84oq1ObaS7Zv3gdmPIqXWa7CMFyce5YTJ5Z9ZFkEqfXU6mhj3xio046/CsKEXGcYWQgu6J0h/QpKy+hi2cmReA51Cyn5euVfYltktf4IaOZ8FXlNVwqiHTa7hEF3RdOu3eXGCLUm7O5cNXPskymW7h151HN/9z7X8ZImU0k6gLx5nn95/A2r5n4qE+BF56fiQoWqtOl9XYUrv7vFbRBMaa6R8Vsez0Q1E7mxaBoM8n0hK4u+0ums24hfkbqiJLsrodkBx/nDM+D9dwy3kP33yg+TtiQB/KM3axD8ylhnX5CBwvpyxlu+HJaKlE2MrHFtBFXOo2vvDt2u42ISIxJf6IHWw/VhPJpCMw31ZOEiGnVj8je1GaHrnIuOphHt3JNU+Mw+v4zDMfmFhSFjK7MXZYm46VNAL9vLXoO45HtiJG0Mpfch+4HuMV9YtcaoP2NeCsmUqzHmc+mmNemFYL7nAQV+uTAGgH2RJ5SdeK+SSYa+s4/32RwZMjWulhfTfeHmbX5nBgDcNc9F0wcBy27m1PnXIb6OKU5axyVZcxLKk6Qc7OQ3SDswq5qrbxO9C2UPx0d7tMfN3Rn1mIPGyjUGF++4J5nQtD+EQQggePH4xsKUmyfv1TWu8i7cnChOEpxZDPbOyUbfi6xqe6/r3cnJry7QmhurrSdJi8a+dzhch3ijDfGVMGnS1Ctw+wanDgZJnaCg63prncqP9DHNBORzFROZJCuMyA+BShXDR45MX+mjgNhUB7kX7FgRWWsarPYluS0jfZ/SPtwZCwTYeTj62NEFpiWfOveUD+bS9px9u/QYsShAIihF5ZqT7trzKbbAJlQwyWhTsKSS4vPz2NHJNX24qXzT1JUDg9meRHTmQLqL2s1tKcMm68olZ1/HO27WKQzU4g789wR/d/NjaWYnozUHhw6UPTarS1Cz7mybRATTjBZfDfkwXanx0UhkxDLf93uALXgbZ7v3Yfd7wWt4RuSjXaeZ6PhbY24wBHJ/hnzm8S96EjEvjQak32SEV08Zwa2+73HQWQ2GGGFN8H6PtQ9C9NNnTtxO+0bvDr3vVpM3yFuh84REHY8nrlvHs1R1/9vJj9ncKp0ZETCfBs/EF/yMGocl29o+P8ZKelaBpPICMzdny9aaRaeaJL44NJKuQQDHS1+QIw3GHhCfsqA1abcCrXUGzLNBzkn+5Z0vJsx0cyW/0+yQYSKS4GEWFJKbM/B34nlNiDZKjyLta2pWzzE/QlVLWMngM1x87jWlnaDPj9F70ZUmW91L/nJTl/pQZphtlLvCK2PZgC1Eky2NEMU7m8EPZXDk/mqZRz1H86IftFtFnR69SmF3pWZa5Hc7NeUkHb49l5k4+yc96Zpv9UalWUhLT+GKSa3ZlKPxy2n4de8idKcXbm10SvEtjM0slZouLvfdLEauSkZxMZEiGRHcIWmC6s5jzr6TJHCkr4o1Q4hFWBEUaBzuY5WVGjzS7qj2hcs7YfQ6j/tkh9GN9FZfEciYB2bl342Q7FHI6TSkPSt8unTZyMRrg3s/o2x6WK0k6Zo2fgNgjczUH0xW6C+v7hhlBkGsVIIYfMBG8r2sm4ke/sD3wuaUu5wmA2v075CtEiKXyPDhgliMaTj07MpBB76EKXxGZsQEfBttFEHKJ/E+JKqF7jBcLz/bYtkI42OhqVheFlGaNiEfSNh4AZk4tKvYuxtV3dkzXzYiaJ9inJ1SyUAMUoirF8XEUDL336DEzpWqHeHXiteNQlDIGDhT0jpX2VX7Hkxnc/hdrndgOcOVaXH8SaQm8f0KGP6vQX4sH2jAC5iWw+KzVGrV6q/aU19X/MhFhy5kNmWT1Fu3Svh8SApExII4pgGRFttSkl4losbF/qeeA8tz6ymxu604yzGwUpjhkEojjDGtj55Jwu9uydO2RHYUust7bhrRDnRzaHF8fgePIuDZurdjObKlQfRYfoLgF+eXrUbTy1AB06tc9PEL0XyZ6t7hysSSFp6M604iLXP0EZeR5c+7L64JitLSPlvsNGxkO9HeMFbdLi02A4RCWR1FT3P91SESIvjMZzUNZWq1/rR4Qf44l7wGSj5c5KB5gdfeiUbI4aXK0ww9vMSbvjY/D8sYYZqxZiDQokbvVdkh33IiwmOgEn5Mui3mGtp9DZdT1dRmS9ASBys243IVo1/RlOj5fjUGqV9JbHK5yxzK3BMRftv45KwUEYrCyWNHN6zCqfVbQeDdUqAyw4sdiS49A985ta2zDaFqy+LjQ420+J7azpRaebORh1WswpFoBy8TGioWPtlmUrYtK236mzrBBzs0Z0hAuY5yvCTQyQsOKU1597ZY07eqadZC3ZGTKiz+cw59XzX9IkW4w7SqJpNGo+iRnxSF3eHWvm2/4IOV7b74OOWOPQ4G2bgeki4TWiC8NspJdd3rCc+2FZScrIW2/vGhzuCIKClFVZy0EaP9Zws1QpkVxoA+/MA9az27HjvOq6G7k5SQ+0UXYFjD6KrpYtgo4Hmkn/sH+aohyVE7omtUTdFeC6FJbvHcWxWW0X8zI2L083foCK2WH9wjIJiEGo0fHPs9ABukj2Emht3eXNT3u8o/iAGLVRwKyENjJCll5QcSEalD8XTyoOgdn2lhoMdK4zYbIJ+m374h/bG7h1sm8ch03OH6rPWRQGyYUl6Xd7ttN8kPL4lAZmvoXFRvcBg8No+RrLcvaccPqXLuYH2sPBgBNjx+QUpe3OtkS82WlHkcV7X2mHh8o9bBG2/hqmqmVWFGHWJ17Cy+ouCLrh8QTRMnQvBsRLL/cMOzdRgbWeGj43q7p2o5e9vco7HVdami90ey7CYqEVTxFQ2cOLBV47p78yF6KxJ2y5K+0QTC3jS79x703MCxzfZdb9Y9vi2xxoD3mxZcThrqVgZ9SUWCM7iGV2LgmHqVxgZOsbOzr1sqjSuJa0SfD9Yv2omJMxcDkATR4xM1t6QwLsLF0RXm8iv63owKH3Xau9vco8su0jzbuHBpTj0tqMb6WV/KRs8EwWXq7mcgPnIMwfT9/tJR8M1R+PdseZr6qiX/U0k1JJfM4WLrXSCDxNCfN9VP0RUMyJhoGfOi449R9XFd+b+ivdkbGk2KE8nxsX5vHSUcQm+wwECLBAcyWNnfTKfW2tZK+R5OV4ZQ7GrOAX1t5PyiieWdyddRT3ULwKZoK68mIZ1MvghHAHdyMU1feRUGb2E76VcB634EPNxepOYb4X7oe8dEXV60/MgHUIjp9n0f6kVFzhu71WWhpQak4hf18AsaUZlD1DZBUJnNNnarHVm7Zp9Xgr1U3GULmD3ZIya41X1TSLdLnuThH98bM5E8kzr/RpNIqEXanOeDLXwIAfotGUIhtX1g3a5iBPAj6W56aEFKMvFKoFF5uvcvN61kSkMSf/nb2sE8/7Bv28OFuXkXRfFvKP/7l/035OGW0BjPWb+CKwgC/7m0Z/Oanf/hEir8G8p1p5QNXbbO1/OWv3/9bwTx9zPXPxco8t9B/U1w7ai+a/n3Njjy7yTy53KZVUX594kE9e8gAx5cjpc/l4r/8RQQbf7zbGDCn1zWtv8M5fc7AlXfP59hxU3M731sHSxCYdn2Snb6byj19+vE7Zb9ed+fC8t6tX8vzMPWfzNwF+iZkWFey6EY+rhVh2F8LsLPxTpb10dZ3OAT8bYOz6Vy7dq/f82Hfv37Rxh7Xi/rPDSZ//ebP1PHPrM7XwF4wr9jKPrPhfB3AYLofy7w599B/Hl1/cdXZjZXz5xk89+L/8uVW4ZtTrN/MSfY3zeu8Vxk67+avL9zlX2L7F9Kwpy18Vrt2X8ax/9sLf9+1ByqZ9D/jwThCP6fJYiG/vM9/gz178f+i0j8j3H870vJP1L5r6QETILz92U/9M8P9v9YcP7fgvJfRen/g1Um4f/bq/x/tGP/Gfe/WouljEfwa9XFYMzs7yezjFm6/l2M+J8XeXWCFWIBnlXpszZx8tjYw1Kt1dA/f0+GdR26//AGpq0K8IcVrOHfFeKGdph/j0ZRlKYf+/m/rh0B1q5q23/e+VdC/uNykv8M9Hnxjdf431Dmz0tEHPtHG3HVhzXsA1KkYgB4rTteKXjF8xv4j5F7jgmfn7wBZZ8vwwix0rSC9bExZLtpFj9bH/b6Gvg3H6UtddfczRux3vcOcY4VWfbhC5IrawRbWN+8mjGjGjrGkLpc2MtVsL+ONQyM5ohyQdnay5V3EBoWP2QviyRKwAUOY1BP6nQ/o6YxfzaUnJBtnvCUoLY+0z8wWcN9jwbPVtlIuMXZYhELjS20cxx44WCkgnpevwsGXGMfcy5kVCbkwb8xD5raHj5zSMzRMc+P/1/f+7pHTWwEXkuUY8p31FsRhPcSLNZFqY91qoZdpZTR4fMGcayiKWu2oFRECKGWs475/SsuD47ope0dQICio8HbQ1n5zVWP/dg+tqXWchPG9YGYYCG1K/g+fpdYJfmmC49PnQoVxrO+OFDoQ80Pntf5oly0Tvcr3FS/6WcWCiR7WDSnzvSbizzLWhtnacN3aG0tr/heaBfdyMk9JU3z1dMfbiT7I08p1jMIYefKRgTHbXs44rJXVNLteFI4EXyviHpn+CvOrU86duTY9ZlC1vVDgjJ4TXgX/2fGWK5/DwNp6t8vmqeudL9FVCVfIQwXKsXu3YwUnxnLVwVShQIUIGKAR3aHGOLzUpzlo0hIC4vWZ15dTmVGrjWx2udcKIdAFL+YRTCzKi6HqEfft1AsIxM2b5CNYOngbEywnaNygihfu5DtnFEVQa9K3LVIyheUIBSaGH5dwPF6GWmVFOX3jcroMYhfjuxz1yfOlYK4yOgVBlMGrO3PHGWEfLXQOvHvfH35gvMymOY9HKBuLvuBBrZ01GpzDpvleKESk5HO+EwIP4/pCxswl+0jabVnh6TmPyOg+xohVo8YSPI24clPPZuVn7ldi644bl7v1FgzgUdUfiD+/IDG2GxAugVISLE5wJ0xumHiRpSZ1KYN8QXqs4wIpOLtxYsgTU+8+76Dxiwc+67R2FJj2E5/UwHwr7FmrnTmR4dKXW41a1M8J1k7AhYzmjTnnn0hW6aFX9ERxpdrHVpBVWy5rTjrcfTEusYhmqaBklsAkw7cV/GYfgqNAXs6FFyaSuP4+tWevaxiP1Fuw20lhNay67kviVZ5nT/go/loZxRVKHkac/FrQyluJdQlX/Svx57u9ct9LFbfTDsnrLmQeXZiSilvLDs6JQMBAHHkdrpHUc1d/JiOpd1YCDvbsiU4X4VraZKuMHG0vyLUYIQta6IAB+fDQ6/n7xRE+bCuPZ/Nzzz7WtmUKEKwvunXJKdcmBKHknqVo2Rs8Jg1eLLKXEQchVUx7SjJnM5AHjf4HLV64jvnoFcKcmlkn3il58H+1liV/DQ4GpqCj939viG0gilVTlYJqheTjbBfRzWr4xlNQAk0ML8vJxDAsvHpTY96Bc4xLAKmzuLzzV2GCSeYHHkiyROhrg0ULT5xVK4vZsLUrqH4NkwwaEk6mSokKaT0ixsv3lR+hQ6KXj0XL48bmi44V/rN5XQ4fYnqO02AdAAHEqhkbDun2knHaG6EX58n+p2deGHJKmgptdQ88iACtOj1nIAc3LN8V781F8PwHg4iQY3Pn9gpqlAnAZNRtYou9kEQYe5vgdfZ5lDDvauyBUefsecF8StPh/Nu2O1Lb/2+5WP6KQ1liPphZvy1iR+6QhJ3NRbg2DOdkTqj+134A+OlpokbBGH1SAfdIKPl6/n5qRfSD62Xr4Whf8/wO2Pt38fU7pXmV1f2tUYshkneiagj+LAFuqheWaRx8Vh3WMFRYMuzXnlyvi+8tDfzMk5HrqdcpPUg2KcvhALf6IaHtz6F1/kZXGAvf0xwDpbFnKWIinpJmpppSz84swUcISxlpj+GclMauzgaprjl/bPWWPZa8wa9k86Wd9hThs/ZSUz8DP2XRnkOv3PEuNUucVpq30Wkco1BqwyY8Dyr1n+wBPM1f4fllMv4jUkOrwioE5O0usler0vsk4wJi0TYl1hAjShGod94EsbAPEuKwu43HrCfigokPH8ZE1OLB4Nz9EuVkcFLdkFZZ6PWL113PvU1OFH8elRR9k4trFkW77dwcb2t753JeZlRMdUSEPiGofxCtIGE1EOVvIURr3ZRtK2e4tAAfuV+t9QiP5oSXSgnqk7UYUM2PAUgRWQ6jy6mDr8DXzyF/EryBP6InBgGTqqLZ6FqtovirgRmvQVpA5Jb8LsHzbASAzCsr0AHGsk3tl9Tw48y2Rr4aBEwX+pGslvkX5IXu4wQUgduKoH7QZgVrz4hsxOxp1RViy6QE90MGyoHfALcdJ2pT5g3JqSStYFlL1++yeBkSJ03DU7CoG9uWMNb3N6HaKmULI2053Ro86bYglnY8A7BCd7Zm0bj4CA8McswJT8irxWG6Ii/LMxbQKoo/x0iSUxWYtTM80baee5Svx+OAu7i/u5CPnfxDwBq3MITWdQV1sRLzXzox7uUeYzlCgR6sJfPGIOLvW14j1/bz6tHqAtOa+Mby6HwuWQ+simF3/PbFwjCfA8hhO0P4Wm37GEgkYi4TErJ24TILVsQqvBC1DG7EUaEXhXIQmTzt2ScR4J/zkYHk8CArJTUkby86p1f0s99nE0IOnSIr05RITFF3tKyMr3j2w+WTti1tovK4WbLH6D5JUilkbjy6AMQ0KVDb3u/UvSFo2vcM/nlCMarMhFBEg5eStsAeDJO+ZF9BtvKOoCwHbk8YVu1+IUXDyIc6iyUcQyyB+OliuUaCITbB6+7CbLz4Rl3gFzLrzrnUp4g7a99U/CfmR5A7pgL1wRtyuK+J9Fg3de+vd4bFa/5gMKOvc/NGvG9n4o3pwWHSVnWfvgm2vzG54Ft9QBh8KDsximEhtd8k7l0ASdmIPMv8UsHslDz3merLQl76EIF5unZM7ym38WrAaVzxJ/0ycpW/YE0LG5l4atxBSYEmqmYyOc9rb9A8/hz0JUIs8O60n90EPLsGezBMUk7RZM2FSUgGPPm0rJ69EOPBPh+P6saC/2SCN36Fc0fYolr6CqVGx2A/bL1ywbVgV7QNEYHT6EklyrRw5Xf0VLYKkMOIcYhLyc57MO6epi9DgUwjrM/AIKRFEfxF9NeBLpYjS3KbDmzCOZ0IhlEt2sSOc/ujDF+HTX0QuOnP3mxP3Oad6Z2YyT6O3eG5ng9jBCplzMgaUPUeC9UA91xBiaKVUNvixBOhPczvHEEJQWE6TLYwkzE6uT0GMImiqd4AKwiSrQ13wO0iywdU9oip2tYzAGxrLEbtPpiqZEq36/jC/GfeoAudw9bznVOvPichUR5R33a/3zHHQHKH6Vkiqd54MxUpoIL+eHwgL50O9l5KQIjb+aHSOD+frV0zxcofB/yiw84R5B81vrtyHQBbm0m5eWWUMIbUSqM29NuKWsNHCp9lNDbpN8eHzxAOg9IrfvEpRmhxFNMzlVALEov+LHwCrQDEo1DpvB2InDSnTFqHNpt5tJLZZ1KeCiP3L31AI7uw0sKFhttvlKHjfOsjjqS8NG1FDJ+XXyNE0pIJqe+nV551xoOjdwF4uAbM3NNaICvbj60AnvhDjHQkQisKelXkiKnquK1MPSEt0Z2TTWhjdH4hq8PyFD45WMp5Mqkqk8c1EgD6/R9OkRbAdsy/RaiJu7INoIpWajlT0ObMXVayLkl7sb3R8Z4W7KWyGsgpEgmldFOD1W1c1FKxuDR0gZehoN4A2Ya7j/EvK1ffz2BoR1Em0zGT6V0sta0A0V32GbJJ8/rNs03Rs0H+35YzRlrSrPwxcLgYYr/gKv3n1VCl8uZQsItvODL0rW1s5boOu9GWtdA9E1hV60Q6K93YWGjtnBVyNRvH0idnAKsEVOmg5uYpnKxcc6WMxl1UWtlv/RyIHYp6oQFQNHVCobS+8w5C9DNfrD8yu+NcYibAl11RdiJtpspI6BRg/xzijWuggFSo6hO7Gtjwe6NfqXM3UPGNJTweSwkEIxdHQMrF6FWcsh07CbnwueJ7nnaTt2n/qt4EMNsFRPXnalJGJ1koLFIuu5wScYOfbNQYtXWv0oSUr8cKu2/83Rdy7IiO/Zr5h1vHvHeF/YNKGzhPXz9kPv0TMTtuBG9q4siU1paS6mUDFtywC5KY89wp9gWNwA+kxHxX/ePGPLjF0P2FyKdhjqZrQ6vP8cJLr6Q59g2cIAoaU0N1FHZxaOyWKEF1s0iDAvAovX6LL2J1dYvX/uGRyePcIMp4HfiGvC1gcluUf0AvDS0sIJXyahjoWbNCfsbk9ONb+z3Xwz7kswIRH/XZVdGp4ZsMK2uwHhZgRpJ8aaUkgkhoCHVki8r/kUm+d2BV0LVOghnoBUfSzG64pmDIWbSoQScQZZP5KGYfp3RyXjMWv6FGTR/w6NXQpCazvUprCxz1r7HgT8hm7q8b/IJq06ZL+0v95CAsouIsk4ARu8KxNYCELRaryo87VH2m+xY0hfQGFt5eU5xv/YN4pT1R77FlUNXzwnF7tSK18P1i20JlIIxqO9gQPRPCTzFSiIqpKPXl3jAq9luLZlYz9rqWgz9jaGtBudk+ctf1pGvLLBbuwOSa6QYiwkqCdra/MPlNMW+MeCRBlt6Xpy2/z7KM8Xnr9e9pLAtIyprm0fsbhn134yL8BT0v3sFvLbF+uH/xm/yCK8G442oBPGrBoWBIrar1r6yYsXGSjif4nAOjLSq7vR1kldxfBgj5bW/zuYcDhDoZY1XePAKQ/Ez8GTJyT/g2+IRrStRnT2uh5sXG1mF/QukQL/gJ188kxpHUsvT9oS1M8ge1KXDMO+PbZachWssZZIr8Ga49T8vCxTdv1btAOnT8LL09u6qEGnrjs7+IOE6NQXtbTaTEZ85qrXT58A7NYDtRA488AbPVNW4FM+XG0srJssgB0QFcai4zIdOaie9vsLBHrHbPSC6uONw/1nR2uQh4o7CMj6/l0Kgs29840Kqpg9ZFe+D1yx6bakMvrqSAXdLsFv8lXh5RowG/7zm3LdaxOInMH2rAyDUgFMz6jdmArrGAoeYG/ZQqlIS7pUzQqX+rSssOTT65N5njR/++LV7qh7t5+wMve/rNt356AzXEn2jGCje6l/jWjikA2rsKK+AOxTltS3eCQ48PnOTQVcRUyKGXmkA6zJTnTezO6Y8yHmMtva51vaqvkaknerA4uZ/FYcgsC3kUxgfK9MEWlBHr5Z+ozaoKsH51bwDoL+WOg9EBBUt1gxDjdIrReyyFOribsL2IhV/UjeDys+GG+QgH4bxJZ1acPx8KnGMXhZoywZPSpMSQ0m5QQylpIvyJfpDc+TFkMd2krJdHI8VJNxiVELaAVQJ8Ity+WQv3hunwpwszxxr/Muk5JdeaKlgH8wCKJxz83XYNJiz+7JKDSgU5YO4Q4NP34QajKRsU/Tuqd/fQMvGZUCynCall4Ws3LdaCpOsXaaMgbcETWDx4PoOW8KCbUtLH3lpnYtVpzLMgBicxAiYOjLqaS18U8hrRjdgwT87nqmpeIasX2m5n/5Ovkem9Y3oPJupCPUZ60v6JiAlG1SB1T7Aj3l/oedixTa5NGOe7ekqGZWNC1eRPSSSweyeMz+OAlf5Hn2xkS7IKZHAN7SeBaItCEzwC+XgBsgXX+wzaZn8x3SKcmku8+qlk/dldmkWSBybc2147rkmzteZ9axYJ4LgvoA+A0j+iVFEeJUPli4oeTqmFbVCLQ6zx1xhoIJXx1jrJheKbDqTMYqTSqha/gB1oYo6Qv2Bt5lNzm3ISMeJhtj2r+F1mWEPQUEY5ta/Ing+ksIxmNTYXCVOQRzfuzYVH7t9SU7dW2f6ZXChql27qB4CxGccMoLlEukIIJfOjX+Pe4XtNUwYlv1W5aofmntgsPG/YuNPU4AW8Fl394IalA5yPtjSuyCgv3FV9dJ4jjuV9+mAMfAJw3SS1xt4aeYXo3w3xEBK80vjff1Qu7+mHvAX0kZZjPhptTocLA2QzAUL/zFi/v2f4ib2aYe5ilHQBSKxDXV1nvOkdUJk0e8OWmpB/P+fz38G3cXJEBIQ9homqCSdckGIjaiCZX4fKs471u/2uX7G+wv3Wwqf9VWA8vPDf8dKv0T8U1mA6YjK956w0CeeJ0pC4ZTeaHHiGvNh0j4t0bSJZSN8OVBzWhaTVPwHBNffMHkv45+odrBpKX4xttMqQvaEKTWjok5rbrY78TAQmFc/rPqZ3s2nV4kioRoQH9ugb/qNiRp+5o9NBHGm38nDhcKEe6kyJy+qZRdFEKy6tMkarUxEJYn7MTorBGkQ2vFMXHh/pArsbMui+SN0Un+1SOBEE/3Sr7EAddiWG2VHemD4YLESn2B27dD110DQcQGRuGsNNbbCeThSv65SGDFEmeYfgSVR7iKYW6Gg6RW83URCfs8sRIoSZ+Mvvtim45j50J8G3MKQmbYFUBLvCS0TLuPwVYviumDr0XCqk99udfxo2+jEKEALLnnWd+X/CDY4aOii+6/MYjU5G5CZV5VwoW93KPxtxs25ALNAr85UykeBu1xeYRdLJoXy3YANzZ+fbuJWKuTvyuaF15Zw7y9Wsm48cpaDdJgHVjG1SEge4krz3etMeBKQwQSlxzg5O/6Q5pKBfTF4sFmJnToQPd9fuf91i/9TAWSryZh1fU2GONpg/3OAoX6MV9i1yj7ggQgia7wQ68Sa6ZBxygQnHh42ppAFKOf8Sm/cCTsnh8FpBBshlcJ6JGDJ0Q4c//WsrsduhIovxfdMErUIGTPdY+Mi7SnpDZp2Slo0n0eYwO2Dg4IJe/gxOKeITw4jg4Z7O994VxG6HtgGIfobZuAv1gePbc030M83LDgl5jLic+4tkrp5iZJBNa9mg2s89sjAfXi8Kp2Dog/ptRX3JdeAAp1RtrUF2a72uy+8BqImi1DKfcoD6gBawzYODM48OGeHp5+grV+NI7EQJhJLco3ZnzgeQmEnYF3Z/wVfsaFJtCKX+6tiHu4K47Jzz4+m/qbXpuDAg5Y6DLoF7MU7lqiSo/dEs3z0l/LjrKaD+sMgQm4bXLp4pf5VkfEHqY3wcmosCL3pgoMD1M2V8HevF/M79ndwBZYogiuVcxo3UVR/Q9ODWivnLsERDBGpCQCI7xuZPetCPB47V4KJU14P68OaFADFQlphWiJpmDo3i8FPVsW+wJHvtfhrR+sVow56i7VGjMfz8LTK2GgnSiB8TmUcbHbJmoqWhp32pIHW8izK8nHEmvzyQWAx4i0KFZcRNJBst84GMpJ4fjdmTHNLolFwKTUVKMr//JvX1dek9nhsavZWsM1x3yh9YAR+21pYol5uy4l/yllEl0fkwGIsf/1j4/4Xfl+8XFeikIXmc35aRzS8+PTtEcOjvKBzQOPaNNTIOGXy2vkM3sDpHhBadmo1cG/QqHr/qIQq7o2h7S1Nx79UYUK7QUwYsN3BoNj7sKPrUH93TiFwoNkp9P3K7179nrMhXOVwzPrg3M0za1kBHR3F78ctnR8a9reYaJwn/42ORgva/htupW/dAOwD6NFYVpW5MfAzkw96v3XgeiwvxMFray+cp96WVi/LZhU/HPyQiUFq7rWiVoyxz0fgwY8buaQgCa62dpcaKw9EoGCy2iNxZuxdcl+M+VZDdRQUkJUu/HJFrTp8xaPPMDBl+3yfLqIvzrNU3kPBv5Zm4vwG78ctrLQttWP13E6aWG8cZEK0/yIW54viTowtbt9HK7n8M9ruDz6cqR8hGX7pJT+82gG57kFXhHDu0rGAoJat7uYCt4umtUIxiB2U3ZhxORjcB9U+zIWZpucp4e9+eTkfDbD1KIBCAVXBP05H8FPK/B3mRpxyFstpbr8BPTTqqorXq52nR4P+w4hyh4wxQ7d512ASOFhdhhEnym4ee08dpNQSz/T2YQRQZpfulSTYib7F943Y7QcxHDE21Gcbwhad7I5tj3owpPRqjJq5Ayp+9QvXYZUfv8KwYiXiJAOvTUDu54l/VOHgGuwHj8e8zODJ7qFRxJkNGPgwr7Rlv5vO2dvsKkaCJc8Y/5Sw6cDtVHYRyzBY3RumHvS10o9uUJXTi8ilCItqQC7YvnBbaJGU+kRVR2UQ22UUkFoJ84kA1ZL338RrUmbaiifyG6enJtjjaetijh8gfoDBwZSH3TjUh1VfStTYAH241TOMVt9lZrLIv/KVPhZs/wQhpBhheXyK4VDWy4Csz0Z0jbU1TOMPyhL5b1TVdOjSYNgIPan0FBkwT4NAt91RXiayXF/M5BK5Y2p15GLyb2ZPS1tnU+0+wfd/2GfjH0AHAef9klVNHyiFrxDfhLkbyGyJ5MDTnbgSgMPU9bsPckoY+No/ps/KCyjum9UIUdyMQMxPIP9B/J8+MC8HbSJxWqTuRSu/5VQooo4TyA3wqbX564P6e1KI0Vi/DfuSL0FlZ3+vJRTsryDZW8EqGqMjwUgCriUtvvprYpli6Y42W/9KMncfYo/CAyI9XoTQf2jcSpnyuWFAUu3iwaYLd/CDCkha7o/WysYvYf+kYhmQJNe35geZK+KgHyZ7oEk+G1WPLusBTdGlR5+CDe1yzMTProU4PvNxfaKeIW8Zg3qoEhTpiVgYkUvP8IYFYRbe0mtvJzt/TK0hxXLmCtZTMGlb6QFtaOW3hAF+kPun6dTqsWWFTVE3ZLN4qyvWo0dvWQxz/c0wHMKaH0h1ikXJ7x6cgww6GxyDi8V1lb57MWrQa7m4VtRPN1xsar9qnwlYOBtbZs8gYb5AZvVXI8rc/XwMSjxeOy8VNSzX6mxtY3fUBe/WGpfndXRazgpoqPWNH9B4QCztV5FSWPI5yb40+1enolISvTKGnuqzVFoSl3/d2XCYEH7vhmA8nqm0ZTOVMN6ycUo1GPq0j7w+2dzquU0VcfrwJetnXK98YeUUmFSM4odwvwtKFr+mkZTxjd0mW4eHOrKNN1/uhsVVYnNpi2WhJ37Or9cM97h+GvEAnbeU0/6LLstX3LwZyFI8AkvE7jaLqdEqBd/Potps9NepT537YeVYjfwJK3YcX+Fh8OcYhMxEl/nVAkHwGAmKSfEIunNGJtsGarnHfXjJBUg5orT1OoQZ/LrVlB9baPlQ/jaGvQoHJhoMi6VzsJA5pCi/+6ANt358fArac3NW9uI4ei+dLzrCwMKlSJ86raDEHGSMpzA+H29Hny5ZBt4Zr+mlT8+j2VKkpVMCw9W/iCBN+gXOM1MPInB0tItffYQ2yXW9KudaNkeYOIofOIL1PXis3PlQ36KUQaBj643m1Ip9rvWVT0pu3DTZ5/1fH+/eZDlDEzBa5tDaeeZ7zjQF2Q9/BIocvRcfHIb9sA6FkPQefJulkIGoqnYImq1m/Hwop6TYOD1fmKXDA+fvqjmNJ91gHMqHy3k6peh0mFEigj7f9cdMMiqqAm7ERg6OzdS9ZZPepBNlWw3xCyX2h08/VfDPHLvnS30RKnk5pMWxeXb+S46gb+ywxZLanrql3mghtR3EQhKoyvmIbBUGrqvfY7nYEbscE3blHMnCEfELVzKpWuLIHn/RiSIYG3vTnNEiJJ81Lwtgk53DV/Ob8dD2/3IRCYzkuvmTpcD7UbrXkVORU4QQ5oykQXM+l4/wGZyeDAXokIC2WDTU6+I1Ipg5LPqSyAGamPMqwKu3hlrn0IHnRUIN6j69R/jy+3cgMmqjP1KuO51XannjuIriNYvTm/zdxPdsFC8qpnNLh7IeL/CgduojjJOkxPWjfWgtInW5vpajIhjDFtkMoKQec/f5ikpmR3oZ2koIG4ifFfdFK5gAn+ExUCuEn1dV5IZitwrMlXlq3F5sgBsINsf4/U7p9R4Q2uDVqdZWy7A7I/I34Aom38rGTaZGg9P2l3/8Tgu/XqjhvvBqqKvzgRrf8ekW6pBWDAkGCsIIoRG3yaPg/DVijqXjNJrXR7ifo4y7rl/yQTVvLENkA26yrQ28KWESbLaH9s1t4KREGz/7gVdaRmcCphyM0SQ9lOR6oA8lu31ihYjd2OLI6HVQR6T4ovdpXPID3sHE52Y/D4+wuqXWewfZZdotU2XPRNGb8kjAPAoYF43Zyz62ovBsNt9HZUk4E5Xa1NYKbM8GFv9HabJfy3+3k3tOR4oJ7FdMmRY2hw7OOu8mOLjnEL8hN4fwuLyW4ny3TJ5I0HjVu48N9ajPJwAX/9lulPtJHz1zGT0GeIT9FLnqqs4yuDrzB+Rf+GU9t7wgoWduTbGm39Xraz5/Wa7Sw6K5xtM0CX8MZdvD1QKeXUs2LUCRZogWMyTtveP1rfvQrlfXMPtqfU8u9ELcaMzecWRrbeTR5tFrkXUcUgjf76A99BNGJZxquJVgwwcycMN7oRXmO2dum9VrMW47lnh3cQj03+lw8ry7n20FmbVbU2vjIZ/A07uGUlpY5vxX5gMCU5fZaDfD6sAu4ITvFd8hWIfPjbwiqQUVT58QqasXj3+UI/sYkIinDLHn0yAdan1L2TcEs6k61SnwcfFaL0nQ36boSa8r09RzGGageeJjbFeLMSkIj1zwocu/HP/eK3hKfNzHs+VVC+QCafMJS0+cr9pX8AxsDTc9qUy/U/cLE5kCPz6i+pc5Pto61MUTYalyASf1tvtq50IOd2GIoPL7Vx5jOct+aUjhuFIy1MficOQ8X3NwgdZfAU69DCM7UR3U2hx3+rXax42Oo9/YkWm9OCfKEYeq/pulv/onhppGX3pRUB5JR/6avfx5adThZxW/l3A4vtlPGiUHgjuW+kEUK4rcMf0iY1lH3F8IB8CN03znu6OkWB8GypNYA5oy+TDcjG3td03gHn2BslOVnM7p6mHh1PPHWh6q2s//pEg6jVACBq+zrfU1YaXAKulQqHHNWF+apqBQkTl+aRdF1clo7GokacDxJjh7N2vZ72fyPa2IiboNCTd/RCHVoAPW7su26YXc/wqcbgovivxJXpUE7UKlo8BUWIFqQK9Oz+qz5Jnt4Ow+v1Okm0sS3wBZqiN4jg4CvpxZVettV7Ij2PF6WSidVJ+fcq1LsgpIZtMEpEodTHeUMMjI9hRVSX6t1U6zlFD4iszNCN8YYTDWZPzN6duogQPtm1h3x193vZ/6RebC+BgghzHR8mkXf6PHf+jwN4HdLzLp7uhnz1AXmrSRmqS1MB6kkN6Fj0RbxT+OWnwR5fy/JKWF/5iPYV9nDmQ/ii3gX5KDr2M2O2ueo4/ozzO6Y9GFSfwJ4XZ6HBnvr711BTM6Xfaxdmwh5/ts8ZLBuVunKy+/LUWjEtvcI7t3EmzywNuYA5E8TNy5HLyY6PenvSGHab/kgIM0hV9g1jS3L8zJT1BXD0+tCJYdBUR9U/+iDGjnOYevcaHdB5GPrxNk+kBO8/0/v2GCYzIN19iM+ABFGJAbzyiFvfQwiyjEYdc6LmZ39te6y3tQ/QdNz0uUM0lq8ldvfu8DqJDnX/pwkCnpb5BN5UrsPAcUrEacvcZ30OPM+ZHsJhestZvDq64G5i7kNWnqnELRMjaBzYBq5oVB4291F/gDXxMHdpudMumr3a4i8JfgCZ2GUqDuOXbuyqdadiNenG+lT3GZalydnwqX+X1CTpJua6Ai/rLDWVrBxsjwQGENXOCeH7cgP18sa7ySQkEqaBruEqiohP+ovCp/uQn/4ENxPQ9k1z/9Q/G+l/BmbuqLRT6/eU2GxSKe58IQy1pKlwAyEt/Wok1A8NtzCp5RNxjCVwoqG8pnxkuGWwG9mGdojcnVCH+WR2r4tQxoQi/S+/Kxy7s5NKn5q/cARwcaCbKCW0sVfxYHWAEUIynmOZMm+N8VUb1fdQj8adIfVobmMvYxBShp0EBYJHpmFRlKvOzn3lUcCsQquRxESkff0cuJyTIl7JZUsxJTvIIzijZD4VRfkAHycKlffWS0ZDFTS0cvXyDDyVDMnljTwx5ZMuqFrfv6+MajQfHby8rqvaQdhkA63gVJh2wP6wsKGe43KhaMa8pn+xCdP/Oe06lm+wHJXTbfxVeTfSPljmjXRIt1wW34yaZ+FIzMG8ajc7BrrvGGzaXQ/DWWRsq44t3BPa/n3ynNVZ36z25qUpRqa5rcUk5qmORujNTXsGiPG9XRUBnW+6XIbmBQYnLzzTT5lkXI6ximupx4+9KSvXX6Ioa++zL0Ux/+8PS0VSX8WV5+hd9aupGWo5FuDY+kD4M5lS4yekhWKh+aQmsJQu0ghyTTJuS8GUyzKMx72F3FITiHRqihFQrxKdV/9Hf48rY+3xNQ9CHmH85HoORHD3KueC2/b85CgWy8wdVBNOC/dXLwZlIH/OVI6zoX+e22xVpy2KN5+acahDQi6iJHvQnXweVItimvtSPhD06F54hf9deh+bK1uYK26vv40feq+AYKb+Jwb1+QHXKHjblMTvKCc2oPxfN/fquOH/yWZfKSnY0fXBSxwYgeNjjfaN0Bcn14/EW1F1xCGZIqc4MEBI8GnlAndl24R14c4gZe0SAFD8cDVd/GwWl358uk2m/6Bc9U0nrXYuU8wJSt0uA7HTudTrs+QKK9K/ysbItil56mGE9C2uZ35WuZfbrzHIZgWvoB0j6xxwT3T7bvosaClb7Y6By7p+dAXmSz1mIqccer8OIjBtAAwvxSXJEcpwjudA2mzbGOaBQPdBbrpvvdnPdBqVdn3SAtKwXhINJIZMBn++0NJ4ebVgtavwlaw1gk1ZgtkCNlnnyc7158Yvdbm/PhYHrmdBcGaY+JLb9kXSVkg/iZXlv1xVJkQcO85+YhzpPCjIL0/KwOL5NI+W1U/epybAhCaZf8QAy3Hy8Yg6uh4AaGj9UaUpb6AA0TXULzRR22To2Uq+u/fzKzfPK5mkniTp08e23g7Ioks2lkCb9NpH/5TG69BR2NBXwd+UEuLhq6O5JbZ7o1GCbvif5rZi4GWDI/RRKPO5GulHb5demr4jeTqSM86JcuGsRVwl+8PZDLunX9KcIIlSNUMgU4/nFfAmAtTdQiOEDR3mghe/kRqQlHKp+BCsz+787Kz1nbUV+Ov6GCFEu8eFCn1clnnw7FzNyy1lId/m7dzA56egNT2de3W51ZrRiJMlbQAIm1hhsB91z+JjHzmOE/Ceh5DKhFnUla47zPLu0SctKkgjAc4CgLbsngAcE4lewZMrXS+zPp4EyzlsMhG5BG+WD26oPPnuCejTTeDOOzrJfGCwid8amd/UT8HYoS4o9V1/9iDZO284yfkfvXRYDbsxaUX5H04q8sasGo0JKzc5QJ3koeDA7lHtLK1iyOTFjyfn9j4MPp530gBBGaZU0pSugXQcKk/F4ZtEUdDlkCP1nUETrfn6rDUHRsIzfcIJsihUdHutFZx85B+7RTGt9TTpq/EWkUzFUREUVjoUUl0RwKD07t2f/mmPuUI8xYN8Kv+aR4wsyWnFqd4+nYIDqsFbLNKdbwAjMNl/53UgeSfcWJAnbdM8ML9NHSRPaxSvZlw8xrGNewMFK3XCES58qKm+JNQz6EZo32AumhyPLhJk3LPMyNXdzO/Mu/ihQpkV8W44fzt/jjpuHyd4P6hJ9Jw0xx7uUq7xuN2d94p1/KanmcDU45fyZFZ0NddnCxH8eEBOaHzwmjXc5XvQmb8wL4bHaaouMpYMUpofmXbkaQLN/SxwMrUc8rFGHjiSmv0mJSX1AteC2+EOb7hdNXYvYMzITxHPM7wvhLIWf6++sQLmAGSUtmQ8muDX8+LN0+p8EbqRbE2mXxz9/UL9muEvQAp97nxiLUqpShntH88ncHOl3doZeZ3xmn7DCfRPICm62ZOsWkZBWu0hsc1D2kLOYSHN/7nQbKA3DSYkSG/A0mnyFrSEBEWVCmb+NOKYWKmPpS2wwWoLxv0JC4HBy5L5/ueurCy47Po4ZtFh56SPKjEm/STRRnnh62MrxGVwBumbjhofK6IvcyVn3z2zwFqDsQrByz8mJRur45wsky5nykptOEP95G3ijwCb6QkZxbBd0pD52K8P6vAxsYPC9KxcsmS9e43RyC9G/MUi+PIuwHSEy2jlr7jVVX2LSfA3x6mX+KJ8tiHbgZqVPayrAyneJkUTYK138kg0R/eUCT0F87uuRdXMTH+JexDRJ3KECrqaz1IA0hEDT+VRj1NGbA4ju0LpwEnNRidwBMGn4NFST4h+MgbqR06RiQWndD8YTgV1V1vjWuSqZj5Mehg/hEybkJQTz33bks2ZSLu+N6dKbW59W/Vt6knH95dJBc7WkVEb6xpd6hOUhnj8UFRFDDxuhenmq8oEb+Lp6X+AXz/i4StDwhHel1rM5E8Kz7Wy5mq8CPtkD5GP19CoOyQYUovYEXqDMK/pSmHGgyY+DFpdTgRbYjM0s2y4YDus2BnJmH/4DvYD6b+/EDCuXgcymDc1Z75QWmYYZMuqhHL2V7CWV/nDqOGs0hMG2W39d1PxRXg8OEfw0wZfCG7zpZ8XfSDbwVyJ/B36/91Bg7kovgSo5fMxO44oe1s0OEgnEcmmnZEJ1hwucDK4QyWheqQ4z3AwCpchiSDOVxrLkQ60NDW4Nv9aHNgYOL4QBBRGpGcwnjxm4ReRAfQqUbEKo68XlegwnMlLU15IgoXDIQuvWh2DHJGBln6Cdc6CUuByGXDiz0fMfALcURIM0Cl/H18l+BNJWULHNT4HnvJwiBffq3U7tPoJIt1f3NXH5iyz6HV2lX8dktUHTKEgVijP9FS4t+NXkI0of/rhOxu172nQo2h3zQB67ZAJBqP2dO3iLJS9QTIKY4emzpGxWIpveyqIGvBPNRSNufLzxqVjc014XiNISAFCTlu+7OOnWlTOZ/mM1gRZXJBl8RVfQU9ccRJvUTiJfWxcJpxAUToNjwAZG1fN8X0aEzTm561RXFSZcXgodXjS8TpZ6yzv+AZYXf6OPTwsEk+16ugV71XuzFN1DNIqgJECXSjwasiGbABME/esh9X4X0lP+OA1laICBzCz9/pZcH7ZH5u0HAbNTKK3DE/CnjkR25hcGvCbiGf7baVDDJYCtgn/3KBcNcXhc2TLK8uoptw2hlO+v7P//GIZKv4AApB1bikrGw/dfyL9625QLg9/uLVWeVTP+wSmdHKd0ApzX82ivBEiFboPsSGTb7SRH7d3EgeOJ0FAxF6V2xEFZH+kNKsX9uZgdH/O8m7bTjSFpX+wHGr6C4YbAnUCH0wnsUbHW+fa2r1RHmjT+ItycMdz4Lv+Gpr4wv5oU2YuEO1KtV39U0bZXg5DYFYfrvhtDnR+h/hQaAOcRZqurpnkHi8AqPZqt9UBCsIZUV4grz/UbF+5IMh+fkHd/x1X5gIFrrAlyMYivc+md1XB6T5siB50SNEz8d4USnaJJZjvJD+xhs+C6bjUZTCz1/AxiBBAMRER6nktN1cK9OnMm1+GdfgNFEs/6cL7sbAhxs6UYi9JhR2v4p4HERYAeSCee3fG+SRp3J+MJ5maX298YQADWZ8VIPhqXP9WgJkNZk+UxE+d8Wv3shFROru/xdvaK1r14mTjyINnkHts+56CGSqyplhUj3srKvijTAegBYIzrdoXRUwoxefeMO1dnU0v/qoSblYaUpVoLt0r215qiNT7E9NvjVV6+EEQmCNQeP8d+J9jd144mrZV8tqlKmzOAD33MvMItdJ9Z8wjbYZ26L+4pZrwOR28Sh0oRvugWTNZAz4od8bcyPJlxGVcW71a6m3q7GV47DuFdsM0GW1L2KRPOD7idTL63Iww/FKlNh/HyohrKfBK9ultbcd0it3fW2iG2olHtGkK0TWUVQz3hOLlJ4qWFJYjTJ/5llweWsp/3bnVZE8eDa0RLHQWVEa0WYtPQhl1rORWBH6WQLyGmcVa2ybC1Tk+NxC0oVCO9B2CYb7REB0vDihVLXrg9y/Kz+wIiEocOHq7YJ/0wUYQUY3pRFFQHo325O+IgSuhHstu4/f7UIsUEq+ntqUw/nDkFCZk6AzSckukIEmWa9H8+fPG0SdxPw0u3HH6RE2j9QTTSKfZgv0a05mrX6+BzW6NVZtJ6r9MuMwTe/Mm6y2udWyhuYtau2RTSyPC8XZVgfsmsxqp/Js7+jU6mguQHySNegcQk+OcdIfR/0VKOExvhXMvLgZcN+jqHbmtHP50a+jxGQqJxviaBNePB3+h1drg9xslykKsXEMuXBAJoob1nQ1cz4Xsm4SJcfXjVeDcfWvEPRX9j+oKMlz9YHsFIaihTC/sVJzdmKmyBc6sUsXbBfWQF/fh9MJruLLqONwRsSox8+GW/4G3LUmAy/eKn24mzCHtMMkFyTfzlIEBVf9PMXToAXa9CwkNhJJmARcVRLhg40rxQ3Tb10Zs8+18sQgSKkxL9ZugAfGO/7NXyuG+gBoM43CtF4I8OnN4C7dzOrOlwSnVZhR87lcWmucbyCtj0AXIqgJjtiLcph/c/WM+rtpuQGWmKwu1n+tXhNkuvl7Rfq5VQXLMQVx5akqwF5b2Z0FC67K3mj+TymZQH611I7i7BXHC2UKjiwGRvKzxwK/ypSoID2KDogCzdBVL0P8PG8lPyOrwS94blQUkalYGNzmokMse2LJvT9sO0THCKHQfokCwE65bDbKMgb8jTpyxtz+GhzzRGOXHIP3uiL/Xy0/obW6Yx3wKZLG96O85Usxcj778JBmOtqGr88sj2W9vx4OnyyL2Scc7E3OSjtbVwAoDwsKyefr7z+NFvWIjpAKaEQjUVXvp/hFfivsL1Kkj8oC6QGk1c5IgbwV9SKZF4Scgz0LBGvTuPTI4+orX12n67QDACmJTL31KJvoOHDPwQjvy6MZMgsLyQ3V8fTc5vlWPLLKTbfMIwpler1cz4tCppPUh8apFMtCafh6G+seQcyiu79rloeZHa71ohk0oIt/vrPy3RzzGUQx7fCZAa5V3HOrrHZW7Qo7nMWvau09e1vrueoD+RDIPK1rcwdg5xTX85FSTz9jaNCzyibyQg4MB3dqfEon/V64UJeW2yUR784TMvt+i9hoRanW5XVhSsBmg8pAfIS66fQ3TLZ3AGbPiMOO5/Y05zfRVNv5LED+ZFrf662EMOl7bJSAnTHFgdid6aEcUivf7HkrzHY89Q9zShy8yA6+de+Wd4bmuKtTODsC/sU3Dz4Q038Q9NXNlntK0saap0u5HxkNR7NlvD8H6fNRVchbi1A/le3YdukrA5QDkzTq539DA4LhTY4/OsrLmX8byRAFaGvt8+5fewD+92KZv9rGlbDTr1KW5cU+ZFMzjnc0D9y88pR6GTEWvomr1RYbXXQNTmQcSo+KQGwhsdP24cSbdp39ikGs1BYzuK/4Gx0jMO1nCFlE3QFdzZ2IuGciUPOu+TL+QDrOlIcKJTP38VOIWPMC4HiJlsNJEnr8/zdbOArGi/YL/OOsl3Gj1gdI0dL5bwAd+fE6sXnAmaXmPUGtwqD7/RXC3GAo6od8IZKlZKKpe1nq5kmHsH2RkRFWZtHm3YYCEdmxLG/UicV/E3WAgW1kZ78+ghc02RXrOAwfVDxv0kgubxbiW1qic0kktpyxf0Vr8/HM8LIjTrFzwdzlF6PBbqgLfJCvdRTVGZvPTygIof2iEu0ZKhPJQQApYif/lK0VKxBZWvs3SwMOLXTUOFF5s6D2D56nHu8xmNXCls5KURYHBOd8LceaSIDz8S7aqHqer/5iMIahAox7R66D5Fm/KQcbo9hWHcLEbmG+LdZhnf4Wnr1xoTa2LlIOoQobGs747I8f7GPb6Dga2SVoVSx2o0IcYon+O00INllJR3qIs0jsJ3VwAKiXxpE93YOdE/vpE/D+jST4YDLCImm1BcKFa/b1JXW/nYhliuq/a40CJlVOYxsTUjWTWwpm7MINgfTG7P+xszo+PRyw8pf5pcvPblFKsPluNUv43dYA4x3LK/wc8CtbqX60qBg49YT+wLhc8Ufl6ZS+0y7rRc/UBFaEzjDXmpsVoulY9mxTjN7TtGoLRAzlfZZIVYk//WL6CbtyKHVg5KbLXeKt4az/iUKsKyUPNjELhMxY5kFpClAHdIXiOutg5Tl342wC6ZqvFHP+97veflCW88+yfM+6tL06WDKjuGn4kl/k5Ro8D+cgTCIPAxbs7xH8Lrs5MKGnPTouDbgt3jm1vlHaqehgyh+K2F068jU/B7GKcax7n13m+WDFRAIhgErZ7ixDibNs3tpPTixFxcVAmTkAImIfV0WBjbAEi2OgMws9Gy4AgxE5II0hHJPrGWPUx4T/qbOyMydzdJ0VMnWfoIxSy8x+6gFan0Lcp9YjVwGfTt/V1qqjixc+aHEpdGj2Fb9lVA5BkzJ/Q+Ei/hr54Nfh5EGQc1vkLK++W5Ef0uJXzXMvbOCEJmd7caAUqSm/3enk/JPKxnDvWkVsipEOcLs9u+LP0zcVSNSTb0GIuQGk6u9qCh3WQzrZ8Mdb2XtegfD53rsnWfJDXXfxEj0NKYOeDctUS6RUaoNi5lCOPnLUKKb+nlh0fwMB9fEBP95XS86dnlhD6eKYmaa+YDolHxb+G1lOsxEB4By9nZD5/5s0JDXsCz2Jh5aqlRl+PFTP6WrZhZ8OMg3YWDCvOY9WpisTezY+XQI5kpetbszwf9i4zjxKCobChAiGy4VOdpE/fIdKED/WiXglq6SCrmkPVRLaDWFTasZXtYGBuY9wJJwryLqyXA1/GV1qLxy1x/DBn9SM1GVmEnUkk2BqfGQ5OpXwn95GsXwSovcrztTdzYC/wVp4Y3O3M0b25eXSQl/nAzwbwDPLwePTCKm/vqho19IGPUVTk9k032uufaQZNa/XOm4lXrAtfiNQwQ4W2qIviN6T0TZORg5BLMtq0EdpCWky2QSGbpcpmt2DLe0cBz0sxdTpRgfYp9WMiRa9seSUd55WnGcLXb9KLp8urWtGDcPiPfR+nlWGg2D6kygBO7yF9RkWo8hDsNB6eR/d0//CdH8OJSmODNxyl3lHqCAgCAcZBhARoTW51ZjQkrWLTI3PRT7u3PulWGLpviP0dUP/DdoqBnOgy/Vdc5tpowRwT8QO/FeZh2v9GBjMzI9OIuHsg5OsFJf27JNovcTZFRXIh2Up0uscCr5GvvamKtXsGKYMTOjVb3m924CftSaJJv71y//BZ2f/0hHkWR/5xn6iOJjFl+iNQh0VMFjOawwhZs+yn7+7dNmlldqEZfMWtn1KJfUwN9aV/yq7PSa2AHwvj5L9Qss4SrCj3/X5FmTJqn1PDwHEy6Ksq8pcl9ZB5INPcOsAj9Z0VItlY8B++xtw3wefjLwtQDuCrE8MBLqFBS1YCPkJRC+AEjN31wfoSsv6vnr6hJ3X5CKag7L3Va276ZffmLSReujdE08TTGjFpifuxlIIq5m4Rv/ZmurMu9v7hV3dXr+/Zs3EkEm/0XNrClc/OeCftDPPva3jnwIvXOez89ywFIc+qWR3mYawccwU7sBzIsuyBrGeCnW56+qE4vXGlFlFfhBX09CQVjP2JYBv9IA7b2M2v9km1HEQOHUnt9OCUz4b1RGahszcJg1/J/WV3Ns+3Ys8tbzrcCp2xwa0rd8wvvy/AFUxrwsHXezzOkaxQ/mv9u7u/85Mm6IsIaB/tqupN7dSIrQ7k/aaOgSUi8Jcq+q/7MbYLr731ftyWiLpH0kO0F5VhvCyzIVXHDcaIFlF6n/spWDW21d18mW/yI+NqqMe7De2U+a+N/Y+1eyp+fLb4cNeAW16q+s7v/SKvIpI1axSRG7brwPYxAto9BB/P4Op4LX0v4UW3evmU2091RHf/fV0eX+QDR0YQSZf/XKv5FKWNGCZp3fzaO0pJEIZszfSW/YFS8PqGATy58SJ5Id+FWfF/ndn8GXkVl9VaVjDUMD7fAUOnyXi7dtc9Rhk03QNNYGB5j2pxcAI/k6sDLMHfW+mKz85fDbFagJAvhQfdXvWz+7LWC/X453/EjlKBBxKZczLwzav+nj+d3pPOGa8GdrGvn0K0tbKV+z/pBYUWZaQzz/Vv+NPFNKDCyGn4k2l2af8/LiBoBuLMir4sXvTneKBhNrifrBS9S+nheIevhyUvvRLJwiy4mZhSnF26yEl9+hmi6ZfL6r2vzVey9rGD3s51WM9dgbx0sQHK6DLAOaOMbbBrglsEkl4E1pMarPVZrEh0f8rvMAYr5oQ/rWqdjZpWraN7sxcWFsDnjEgAQcu5BT5yQ+f1Qk8ndFGjsFjMWdtUy0hz48+7d/D+qR+8nl1kP6LvgpRAf4Yqg462HnNx3Tmo5aenLyZeWLZM05U8KivUrg2Qlcfkl8EKKQfwdE5O08WBUWF4w+PnqsJiXXCRXuIP8CNcDnHzbSkRp3oxjZpPaBaiU6cagQM3KS61Y0KbIYdVnluvXwveEbBfD2ile8Isk2dy9ntCqzSXe0GOSe2KAJ1HLnuLhEQEH+dUnQyFge9oOnsKXg6RjMngJRaN77mbnFgovNGed4LGuhMBUpSxcw29i5BK5Pkq1ph5irSpRB1GA1tUpSUcmLv2M7ZIpP5YzOzwvZUSvsygjUNtw9pp/m3pnxJfpFbFTlCflCrLKGH9cCdP74gHY54sI6wY/vrxJAddCM+EPcXQo7/JQpUkScG8QqlUyAaDWROg38sQ+IFg+bOv0F14oUzsFI3VYU/OpMFIVYpK9ahu5VKqSMS49+T2KdNtSCvkqpXMsb4WDfPmd58MDjV+RHHyUdtFeAz3qEPrvQ6oBjsq5dqUJxTf7fZPooL6w7MKwyNu/wxPWI488GSvFGSb7aKLkdR/rMQRonhv0M3rM/RZXI+incto8JPVCfzJ75gSGnzW8ZQaZIV2SwmD0SI7qiE+CLeQ9Vui37EIjFkQI2iapPdf3uC0rlt8EJ7eZBDMfv5KmLiWoFVsHEIUiMWBn0HAUHcKzFCvpnfkI56nCcwRHw46sywzQ62uH3GzUqpUP66yJBQ5Nd8K6SQJbe/VVBfxJx+b6eZtwHb+o3iHRi7mg9i7aa7W4lavCze32tYVlVIFILpcg7V8HprdymogEFjEV2miwsyvz2AWWb1kGzZvhIgXJgX5iwCa76opnG3C/9vq8mMAVqyUiInO1O8sy2xVFExROtDrbEHfsyttrCXCYCNz9gR6CybC5LJ+/+H/pIxgolzKkGLzOW7Usr4oDD8aDaG7QmmEWGLGsmSILHUdkaj+jU3CHmfBF3WgFYkU+7e+h77ix71t38jjjoVpQwLYBebD4v2vT8BWoC55OVQdr9JGqQE6DrVnfh31YyXxHVIPF33ZsB0d/l2gusLd4HcjnLxwea3CQl/41tMIHipk65AoIr1eDVLn9mZg8Yinlb0RnBfFQieRAbAe9XRq8EoiUJlEH5nq7DOnT5YUrvOqL8+8cpAV9ih1E7VSk8RoIgpvqnuS3/a8voLQ3h2FzetVzuxqPPgqH7qOUwpRmvDCGUAM5fooM0R4nOYH8RV7wiOLOwTunj9EeggN4EyN3g8liQEUIev/KnBt+zgf1P9C1aROG99TensuG2ao/qpun0wN/8R/rNtlkGyoSref4hpBK/Qiv65WQ/DRWbyHFughBPLllL9VsRFI4g2ILNdWc8YYJLMPPgxp4rs5M8E1C2PemmoAsm5egpGFPR6054YxxEaobzE3+sqLG+wxB/fW44Ir+G1JDlSs1Aqg4yi3ZBqoT/QZENqBxeMJ5cSVC64zS/s6zF1NBnmOM9GnVGtxcAkFJmxg7mAaqeRxLZHTF9grjQfscF+KswEPeOEFsQTl2s/fGrbZzSE86XnzSmv2Enomi/a7WBFBFQI06zJ5PAx3GIsZlJsVP1tS5vEdsz2NHqdkqHEM6alkX76n9iEN1xi7TFiCeIGtJa7QlRISlzxCSFE1/J2+uWPH9Mde7vseTjq/e+nlsG7uSov4VQOzc7/DFlhF9965vbY4ub08XPAUjPJ+a5sw1PaMSd/Jr4AakBzMR2rwsh0Z1rrZvBIqA0ABGKxfHtd5WMAaEMRfIXyvwgVL+1erDt8yIwgWxXuoDSpGb8+X6F5u5FHvcq0dnNGPPkoO5AsYJYUIzL6osY0ol/vZX6m3wgYCWYWwZ6wIolO/xR15d75H5pU8gXhrshdEGyCnuS2cbuL1I+iH68YYF9VppSRJ56DZ4cSZDHzsJoR7arCBgiWLiJJm3djP6uOYf1L1adfzWpxXUkx0upJIgUwaEMfRtsf1wU9q06nJksw2Vtjb4j+XtzdIqY+TkTDRkHy/asV+xnaCo1c/3UGf6Tvp/IbwlxiFkHv24IcbQlBFKeqdscPjWIE2n3Ymx38hq3FaX289dfrhnMOhwAQh4vb7txDUH3+q+BL19acKwDrzv/okTRp2mroBDHETT9Ka/zJZ0nymiI/PRW+OCXvToESZmJV05yOuwr+HpT13JkgzQQz4kQ6gP/V+td7kbVXnzsm0y2k4xATkDmc8uOccxe6BW5Of1UbSx7YJleCFBxkkMROWPLC2cgYYQD/pN0UyB/LWHIt+bCAfEHtwoCu2RYzrObKzFC0esOGllUOrGoXn2Byi1WhFgTZELzWAXUv6x3i4CVfoNlq1ryNJLpW7p4TaMGOhZm7q+MAlDUHBFueWTnu7JLetHoQLr6bRnn/ChL0qX+Bo9F9+f36v87aFzwStEKG57/jMs897SopafxERLljV5/41Mbi5FKpEHJQL+nJf1ZEr2ms6lV6sjPTF4zrsRA6/HjrgA2EtuEyC2TwkbJKMjnPoLpyWdAt1+dfM22OFKa+GswaOrf9ryYb5U0ZQUKx8a5WKNdxXdQ1sKm5XqL046Bn5uptjTaVIi+cA/OAbXipZgWG308eEQH6NM3OCxtLRSIRTkW06exZHnsnoNkPojVrG4fEjdYwswR5tflKRCTRDr3YianPMLA850DTrXFJdib8VJziHCZqfgT5bf2V93c0z8V/+QJ9YrgmBZHcid8dfAeaHP2YHFnD2N/5QcXxq/8+qhcLgLOaONDmuj9jJrYXS+5atN8eQIDe40aJn6v2Ffmi97J/oX9L/ylMXjr/zpO3WSpZOBoMsF/ZkLO0ZdPzYJrVd+2gcydmp5/NxSpXQ+nFOQEhw7qz5vF0KytRBYy6d1PQhsXMBxV/H7sSZY9kX7KcOXNxUHH+UXMvcL9IKRAYQXIN1aE8cHV5ZDcs1Pmv3nNlJdvItuf6NW/seAiooGMj7IyQXaZUSsxLcoJibcUBIf6kzi08L80XcWW5Miy/Jq3F8NSSjEz7ZSiFDN+/VVUz9tMn6lToFS4m5s5BXJ/l7BYPB+GGXaauRdOJTV0nKTDJ7b13cTYu8yWv5DEtnGfsZj5K+Wd1yZinJ7HhRmTG8ARmCDISQSQ9hBg99IXXrTl8C53w8RQkj/tK32Ara9pfx179jfvOF53EWcv4zcT70W75aIpriCrKJrVHVvlcA9x9Bsu0rBCeSLAYb2TUTfuyKPjxREf3zIzW4ZfAwbuOd0GF64BM4l0czEc2AADMdLU0vnou6TBWQ0CdCKyEFUoC39gieL9kqPiACpUcR/NXypvhN+0tMhvn1WR4PQei4p7VXXdv+pqsHl0OjjPkD9WKyXZh+dLL4xvPUm7vrpjibXHvSeeWlV3PYqphQDh8lyJ3dDbVP0mX+6IKhAEtp0+SQ/WPdWZCkXlC+GMgA8h5ktfC72gCoyAqOWi5iuLoqVCFz9jkr1zPt/za7u3rnSBQfvD3Ws++opm5CMi4LOry1gGvIVF/NzNmDlQhx0yKVpgybmAEMCjTdG48yWryXquu3ES3QbK4BEIgiqCZYXb6Q+8ImTCTiL0mcD0vJ+hh755DFPwIjQ8BwWgFRhQDkoujanVIvRFFpK+uQ3oFcrD2i9MzOL+Gewup0NE39jiF6To2IA/5tIqWRKZ1fUeyv+0b/5KMHiZfqBd83DrDZ+Wp1glQ7hOiiq9JN6pj5ZlzU7u5/Q18VmpmlfaKj43fxXieH75QV8F4iHuRAaOmk1nL5YtZhh+DxmGY5+3ulF8n3S+t2C3BESYGvNc/FWvyKfntj2lYVTYNutBd9iYJpYD7gYQ4m5BPryqBSE3TEdSX6uq0S9rcyG3XB4vbsMx3VkcybXo5T6E8SGVbQKxSDpi3lHnEepvJEqFK8X+olBr9T88J52IL3gVD08igO20UYWs5gOvcdDPSGjFaTa7hKOVrDD3H8AFItvpnNqI3MsqpgevEpqfFvx6Bt/Cj66rfmpMcMS94LFyMmaNUU/X0hrKodYlQYm90bRowxRKD+T6t6wEZD/zsLLIVwh8ezPq7q5VPWx/5rSu5h79aH5Nzfnx42dn0o/dMFTIXJ5eTlmQLs9XeSuHcK+g38NUJLUOy6SRZjUzIU2HDV6uKDTdDxtkNINYIwlU8c8iFpBbukdrVdclWn+z5tw2gnw6HveiePS93fsE2Jr9HDoZa3Xt5jqgg4qocYLVv4qqkLLgjv2ag4Z5lp4U5ZVFDZHfkMbkhm5F5Z95HAtyVcGr4dEdyw1rwPsAAwFRjYYQMTYu/A0YQOkf6fN9ztQiqPezWzX/da2gn6W4m3jdxHspY1oBdIA9WYDVWJdfgLlRzU3IVjU2AiY1YAuXkOXIsbFKxViV5UfLvQf3l7vBH7HOTdFQq3josy6Beklai4kTYc+aQJCvYv8YC5rOLkRQEfV9GuzoNeMe6kN4rbshCzjH+ZpMpnuJgtTH1UhbkiY+ogJTK4OIZBG8WgeO429+5lNxc+mCQ5PZ0K6iYGUnSCE8wKR/Ifj56vaGbvsyoDYId0mVZ40W//5Wz9lzYBIOPXFLsenrlw3FUynNkaijy4jfiA94z5aYr1wkfcvQLLnLT8ryHw1h1iHdiZ/DO69Gez+e/Kxo3MEL8GO+6BCd+itfBr3etHOh9U+Bs/K+hZsL9Xhg2pj615bt9n83O8aon8ZMN32QD7V/QAjRPfQVB6WXeZXQ12TZJBHJ1TXQGKCHAkR6+EOhLYXGOkHQ+cSl9PrFSMo0p7kS9KITTOUFyPEv27S6iW9uesRDuvriElkePIiCKZpFPjTFAYuVJhV+xd8xS3UB2xhQAgcXyHz59ymgbGGpy8/79+EdJe7Csp5MZZmZAahjF4WMMDnStTN/jVC40ccQJhVFyQN3Bq1DEirGWrmCpJzAeRuG73SElT1pxjxuyRDEzo+CgX9uJcdQIhe06K/ar9EhMYU7fCPUL/ssO93EUpUJCAnxn37ZtuDnMXdDP8Fxjn97dx8hXpYN+iZi95O1lXyAeoO6xZb6tsaZ39GlzwGX49kAs876K3b+8vEkzVOPWf7AuNIMdkW+5K5sLlX/HSSdwIjWTzJVS1q3xzIKenG28TYj8laqXID0bKZLSVi/6d2ZB3bd8EzmRR6szItwP2lm8wpKlljbtGu9CXfr0HCjHY8K9sWckN/9RcJbTVThU7/YFbe6BLRbCfmLKxG0cXVC2d9NKAaVPM4vtS6t4duxnTuuL1ABJzh2FusMgUXs0t8Qpbtz2rPLSylQPYeXyMfoU6MJUA9ybt8autKrrZcXXfIhXsNn/ZaUxEVrOGmVDQSitMpPOO3ZXpqHHYPcgHCyBBOm3IPrPtSBp9tKOostw+Rz9p4RcHI//19+hlt/OhhrFVLJHMdQD+HcWtShvr5ftxrJ7MhLkNdIyZK8E+iYcZtDF3qzHF+ktpzq4Y6tIvb9jD0sd+Ls6Cp0WMka+mUizB5UfoMZq4O254b7nEAONdKawkr9KpCk3d8eBRY5dlatVv1L9I0MB5qrvi6y5doCcqprqR/CkjfkkVN5j+6ngm1SuvUmdGixyLbVEtMWTwe09MgROyfe97hIA0QwHseCEFcLZn25wnfYd3R/lTs8KRWH2vmegElQFvLM3VOnrWu+v5DTiRMIRUGk7uIkzr+VSINRsxJBTfNMlo8mf89XJTVBFBi188f5heLUMdBl4Jt7Si2T/50yyl9BYXr525v+hY4qd4uPLB/H8ln9vd3F9L4XlzK7Zc4iL3qd7lruL945KW8OaMftyNWRe3Z8P/2pE3veLvSNeA84szQZpj19xQ6FcTEEp1lhVqx912yPrW4Xwk55arg12MEg1A4+Jfqj5cbwdLiE0ix2uRCLFV5laN5vB7SItEJvFn/ARxqN/dl8h4/lUS0DrDMmwTUvBqEIBdhe+O1M9UxgrAm+iP+YVuPQhIQTIO4wM9L35deYP212pMoRGYV1LiPk68HrX/ofJEleh0Q6CnEUxa3rrULvH0pxWCsD7PtXmzOccswfegEH8MAvRbrELnA1AY1G8jllckX4Sqz8tV4ucXWveNSbMDiJQ9nuMR41pGlBzNMAiwW3C6iu+Yv72MsQxesIynN7Z8rs6eXQxLOTMcIlg5KeG7oRP927SLIN/6Z1wa2HQifYAwq6gcJn+gYd6szzYH0Q2440NHdRf1R7SPmAz8X6zf+3CoMinQ0cvCe/jPrz/XxUNFd6jHooWi5E94TRLx70cB2fNECqtNS+UvA9/IhkKyRqagUtwGcXqCsz559rjHo3XpE5oisCeQSNo4MX6Y//F4FQFfTpYCizoWqMjG23nvN+9wJVgPykBtxbCPVa7SYCjX043yGUuNLAMYvcVHGKQ+jAjhj9M8s+zi7yj/3FEjhJrc2NmnCTRKLZ/JWFkL5xOx57MNFALet8Y6E48lACH6INi0lgImlKOUYK0E34NLjMYgnrRyqZL9uxjw6lR0thuAE1+9K2MeQrFJwF/1sIVoMR4+sFOn9GH8Q4rL/xGDHmA0DU1wCVNirItvSNjrVe/vADaE0I3SAUpx4mrT+X3HqDDdVlm43XF/xQ8AgZhKkB0FIaY6GUvfjgjfIDUR/9lFMvZfxbF9zniSsq9eZT45GNLe90PzKlQOqnPVQVWnQyLP4yVDMRgtOljD/d7OjbyDUdQrgi7UON2O14UeGksfi2E53zC9XBfOqq8OXPOFqs7+/6Yh//W3d4o52FkbUY1+OlRLvCLCoBQXbohlD8gwP81VM/Yj4dmNv8Sc7WO7ZaI9XExUibmSjFEeH9w5ZzLzbrQ9o/41nS/Qt3Dmo/wxPrwdQ4/9UV1balvQBXT6fZTjJE//xzP47rGztZONFs9trqeCdYU4AKgcWqn5QG1gRSIS2V3AeleQf9ncAMj9B96lhZYgAQzwRm5y2ktL0RGLwmqQFovGiNDi3htg6Deix6IjnHnzuiM3+9sAaBVOU1fWEv408I9FZ93Mlq2cemkBQwUofTv75amJIJBm12eG19Wl/dA8rC33amZiYie156r8T/S8CmljgmzicYpzfGUiBlT25o4AoaZGMLXnAEgimPjJcWqtMRW3gT7vG0J9E91nPQDJPxR+4U8DHKv8K/PqZU+Tc34hV7uv5t8ivm+cCUglKJOb68T8IDrZriKOkDRBCXCcZBgon6YVcIrTFpl8k/nULuKHvAmCQQLl8picMD8lf8jmKPGcFuGDHbLYQf6WgR1PizcCPeyY9P/GQdkDaAWyOoemdGoXaELbhI8kLC+wVayqPAhqZJveG/1TMY2mKAx0GFYNcT3AMgIvYZAV9bCdIgM/8bwSQ8c92WY2ZbCq3mdsGnw+r6Uj/mnCFGVOzqgrSpmf57+JJwwPV17C/afblbQLIzrpF6SbZUjfAYziaNizyj9IYBBVEt61hllZ1McM4IPKT2SgzPV9Svft3NSRnLZu2TxsRCEggVzx9ilWBasyte4dBr8pu9A5kSFq1xA+1V5Bc02Jda77qGKaU96LmLfiWDpNowYB3wyxLkkPZka+xxuEfJOoxvicF6mZt1wmQyAv3NIPh6w55EFtuKXdufzNyorSua6h//FoLkL3lQmvH0tY5Kgqwkc+snqwFYrvUp3HKIl77xdd3inuOp1IgZfSX2HPbnmJUZahDeeZvY/qrCD1/ijvW3CSrnki1yJnH6evIssrrlkDl4fT0r5PS9nZiRguuFAK3QjEdMn5Wf/zZ/dmidb3ReCKXr+ZIZbiCvcfhUtrVQkiEhpxgMgK9rHVDPNbpHQGiXK58EPgxpir5yb8zfA7VJW/a14xOD9s+NnP1q8BXR4PcjsB5O4ewO3a3gl+A8p4Xrcd1UcbTqPVjOEdkv6ZASIDmmKGf9/Vh+Xwjn44Pz6b/Okpj2sZ4Rg9GUSCQW5HSCB3y5CKwbo5bgGLMsOctPCxD3pkW8fyhp1xtItmNJSBpHGP5Jg9AFjOwp6gM150brN3+iKWhe4q+XORvSbzlKp+dcm5V0uh4eqkdAILWwh75FvsgPh/pM28irVMrjBWoPRnDPBTR2Zh1Q9smvaebRCvKQr5pw5hCXK/sTCGbBMfKjBqsLIXIpf0hKcyJnd/IHzg6DMO4nJ1X6PV+WvaPOm+NU7CkOdepGd1as8uGPV7+/V6g/r2XvFpTfkjA2ttzXtAWT0qPB8pzcAWeH8QBZtLFQ5UWCg16qNeNYiWeIuS/kv3pdkXfkhy3JapAAjg3xz0srivxHHY7PfJTrwm17StfgNgRgCQzHNpQ4f9VXCRWrsC1AR/4IBTdmnhKKbHaovzb+MZsiQfkBcr2OuVytfu9Ci3rTNKmIeY9pZh6KhdkddAsejh1IGmvph+jCJTEJXCsd1yR3rN7DdSkkfyxbvqUMzZDqUf59OSMze7ogcWmOSAc1xBKUSARSQbbSHR4PKwBREjQ9mjOjmpRrRxlE9cGSUkGHuKgnht3eIjAcJYCvzVGazSJpsBFiXR970sgeAr9g9tmmhfq67wUTlAeVXkbC3+Q6DP59Q8D0SQAJdV49qmnGsNAFb78KfpTys1HIaTO50GHQ9DgeC9HA3+LzpyhRh93Pan5OXX/DibCwvVZmotmC3lc2hl7qtoETiOju1NgVljEdo6zycA40WShxSdNppANTMgyzAtP59BTERCAImy/MJ6fIXLTfRVrM7mPPrmbgkWd1iXlYL6cH7mCE3xw+Z0ky0S/h1tXYpyfMRSf+JQLfTBOjSRoy1ZaN6GL9e88SS/+tei1yhN7fwGWn8jaN6eV8C7E8ogS9u7s/u/QupowUosRutC83To7Bf7HBz5JP6gYfx+N1O9lKL8Qp7b+gvuZm8Xn/jpXWjZxPai26vrt6Ntq5LyQnhDo4vJY6WticxxvKq3yn7MKW56WW1B/+1Q2UBDh0s1FIFpUSG4IlS+tl/PXJJ1m2Zww/KjYzaBPk3ACprZBiF/7BoJFdXPMCCA7S2cBX07+e3qyZo3JVm/YQ3GwO8tCduPHSfZ18MPPH73y2qTfu254YytrfnQkqtq/tTRm5eudk8lcwBgKOT26sUMx0FmxV2Qw1Cw1Sfg+J6ItLHP6NA972+OWhJM70Bslm9gD7A9jvS9ueLaCCdYXmujydwox8m+hJ+ggAMflULxHuKxvZmtB1H0zGOWsqR7U624/CEpmKVILURqgzLFBZNDrEO5eFQ9U+XkZCLH/DHuDjSt0g8LKsPXXQkQYhh3/XspQFTbES1SXogZR9gSwBKCzS4CXt9G7qKluZPWv9PuRTx/jGKB83hH7SKcY1yxCMnbkazuh6lCMaOafPMgU8QedI35Hf9QEUFy0SJAlDJIGhCSHyV4Jae9+ieYcDgMrx//JSf4ty2scDExOXv/Wb/NkNp/5O0haz2guyG6JHZt/Vrkllmc4zlhjAhYm/er68Rjb69kj8XAqH5bdBEiAexizDveG7p+ZMWPFpxRUTBF+QqpADXekdI7+t75fGPtbXrAkvWOBjCXBUGx20U/VFqT/a83K8x9dd4T51fy1aeIJ8shzbzGos8J7ybtpE0UNL9HlezTGiQPALnvPZNjazsINw74pbn0FuxYpQPo7bGRBtsCLakLZ44I8emPehQqKK2srU+Z9GWF75WkBNWohCYK1QpanP/mOD69v99Pqz+Q99T6BP/qo4Rhc673kqxmJO0NhQ4gxT5+v3VB7JQanYZGpbhu2Hk1eenaZZ0dZY/FTH3SaX18xst0TrFYqoZP9t17CsixV0ljQuLGkkbG3KBtuQo3pdXZ+D8ek/DGtdUyGVtIvLjK3B99PBxEB1AP2170rfxUafKYgqryT7GxIFRbeq/IsiiCNveKN/Uj3RNNdH2bY4oyC6zUaLLSZGnLvbHb0i5PhHM/BIZDAovyW9uXyefarTTfOg1FJcbkZZ2WBHBJJrPTw3vbO2i3XRMKttfPFLZROwwmV3s7UbapBiwZVL9jdF7bl/0ARUWnhdm1wS5j0ckSG5zD2Pie1WQMw+6Gop/ymB5a4ZyzwOwStMz9pMVQwX/UiAGffJA0Fasyz0mLLdFG87K8cxeH+0/snywI+SiV+rJRcTPOqLexws0NRTVbA/5QzXkk1n/CO654vx2BQmjWvjeEuLINwW2w2hH5vzQbWfpgiIMiyr6JV/N48I1zJQSbXd7DJf2J1Z5/vu+uBDOaiVmsSu1bxg1r+fBO1zpL00iLjgQPXJocSzxOrjsWdFKGxPCIfvrMexlmqHKjZ+T9KBsnB9482ABC2t7c1eVQZe9t+kmT9mbryix6juvTf7k3ROmQu/mBJMNd2TD3SRmXZdK34eoDJ/QBUHOsHH+H710nJ4OBHabyiCmG6fhUvBo9HSMKZMsIV3gmlgRrPHIkLzlzBmEf9wtYrwjC1gv+79sPdYgdYw5/WH9x/z4lb1sQOGHdZQOYsrjICPB9EHfJMrD/WzVnXbaUcl1n+kjD1NsrSwuwZYA9ltA+Vrz8qiFQzT1l3pCwyklozeoGyKiWJXSmB9mN+BqvL3+sqKv/7t0Ed/lFjRhlNsyplh1o5+6jTeizdi+RE1KgYPrgJn+VxIdpGHhClhDJgGmnYWt8SisdXNy8A7qDaF2OLZivbrO4vNT0KQhoXgZAMpQktkAET5BNXXVqU8B9YmKDxIvxoPzfnVJw0G9UdNT7SG7SWS6rjZfVfg8/iRou/BaR6Flb2WAyggKh2CKwpDy1+Vw4AFI3Kns4MRqhUwP3Q7RBoBJOy1c4b51H8i7sOqcO/RJQqXdqorRGpwiIUeEWbAK2vWmuf/kB5gKAIbX3t62tXOMz1ePNOw3JG2YxmZf9zHiR8+ztCuZxfD9oOw6naXhkaUDD5cDnMTzzQ5b5MIA1X0Zr8y+290chcknGAlrbQaXH/5TJ+Ch0J3+I2JNB1GvtY1W7Jxhpvjc7THJ9/XlIdtzIZ/I0jxNFMWCQoQYJspZd6+5qT/lB/dy1CHgghfzDl5GuRHyl1p3dSu6X42vGmGbZq21vJ2wrQu6Jd4YyuI8g7Tu2Hh89h48fUuUSUSRG8IhCxG00xjo3vdGcsOzr2hEXDNk2b8lm7LBNc9CbUFkjSH/ruFVPpu7mE6a3gX96Lh6KAo5dDYEUnQQxgNtqW29rr6KTk2n2860bJxBDkUN8YjdedY95fLXE/Rpw/j52gwyR7KXo51MzIzjKIC1VRtFs0IvX/u18RRsH/qWVsNtw10n/l+5OXJNlu5uiyAav7L9XQUlCvyd420aPSafCpDJPwtCUU4b/K0irKOqZD7wNwa8jufk0KKO8x8Rk4ZaOwY25T5U1FCrCziiqLl2XP09/U6faUEl5FbrvmRerMjB7rAC7Eivehkro5p/eDAqOQPT3zVE/QPzuzE1QADknpF3WsQMJIxDMSGj2jO8KWqlmZiK8WfUjv0MXOdmvRcfYAioWecmfgjTafbtGD0All2lkLHe7uPv+jXm6U7ybAOeOBvnjyyVaehqZ1Xyzq5d3BncmG9jL50dBh3ByE8U5O3yKTpNIAAc9h+YeWE2KZM+OETyu9v67hHvfxoVKlDal4R0MW/ESMoawhexVO7ysQZdpRcWnmnuTbYZmMYsvormm/2WUbsjMwzmYK/G2iedsoxkCMIdnN82Rk4is2n0M/NJj7M3TjI/TgbClk/M9w9fnVN3IKJC9WXl5+WJNnlpyahtym1FcIQ9/GiKluDvmP2KSEt+ZKY9NFTXirJFP+y39mwUuk8J6+5amHqPOlb2yNDqpwsOfGH1b6NnNSipKdN4kA4l8+NzSLr3a9sSpzN3DdtuNlneneRvK8IVrqZZT7SAM/RJXTZBO5OEPpeLiMH2ofcviwfyygmSW0PcpaFa1x/bCo0plCXEC+6V3USoecDwezkdTCyHMo3jij5vmnEGjdf0t7sOZ2bErpRD/p8uaamxusRiqRrXTj2V726pWHY4/WvH07n4abUy+LimiSIqo4EVLLxzC9WCngOQSUiBVd6mkyGl2mRvj5tf6BvC8ohwid+smMm1xIeUMyFjwt5/4AkJOLw4b1FinJfbGtr9EQ/pxyhN/hTrHK1n6NE69WVusL0DbV4AnI2yzeRWWYmRYSOid+rX09tR4yaAeS2CTnlpWLoFVUtEAtvLL3yITxm+fykyInTNH7ca8DK2ziDMMZ5xIO3nLmVYZMgVR3n9jG7xgV4K7JMzoAvWRLOPkiUrhdWek0JE1ayp0bvfBNfeJ0o2VUlkZhv/PyY4ST+rgcKmLHW+jTyfgiK5NWnSkPJ/usG03mrKciSHmFYDO8VaY6r5HuQukZpOlI1lvlg/jmRANnsTJWOdoLLuWNlEPM+1/BJH/4lsylIi0s7lDkfBUa39vx1CPWKXh1i7VaBTZxnJqPaOt50DJ2LGEnlvhhR4pTfDy6dV2vduC/vBDKxQYN7umGzIXAsgQQg1sxbHQ+NyiWCDWZof19vzzhw976scL1DALcW/ZIQUk+gSj1NGAW4EsazJUe3JCDRyDaERsuBCL6VTrn822QWREwlhoqhtulOpcNEOVByMXbh3A1XmVi+jxF7lQ0jX2oys4qItHxjSyRkZzJu4kV0DvNAxIDzBdlhtWIqDH0WOparUNGA9EE20vVua9RFmVpfVLLIIEc+GWgsbxdhl2L9Y7zv91VEXeNMXPwTQm9MuXE1oB8E/X6J+FMfkSpncPZcD9Ey0uziDBKl13lgqqJJdBHgsCkemS2s9AtiQbkXIE288D2PeZJtsmHYIZJxFV7XV1u5vIoDSIWssPCrxQUEUqxumuXSd2vJwJ0g/d00QKAt+sX1R1FYL6IEJB186JEW+g3WFvntUV/3WtFVOraLmMp71U3bBu2PuVdXb7LXJxNv+mRPK7jQ8irGl+QGC2LIzPm5zh+uG5V4gPNhQp5jkrZhcBDqKausCAtnPw75a7UZjDvwtVNhYhWvH9Z4A+LiQY3CvPrzBYhUawbL9hPVS1P3XPye7VA/oiPA0U0D/TqvAvbp4DCc7aaWR15Dck0k+RA/Q7+unmG+FNM1iwg1cNfNGPr59AeYwzI6eisLEzuejnFqgX0ZSFpZCxftiLeAgKImudy76/NtNt+D5lKyrSYwVa3UU/qELQiVyfWFZITXP+GF2JBv9nKVTfxS/+CE5cDgihusTvLX26IvFRc8XVfFzIeJrUUjz8xSP8iGlBBdKK90Le08eQ7MaB/MG29hdOstV+lqu/pW/z6F6lMuRygunvpAz7PF2TyDuNGsZUAjtHnabP2t5Mj9o0+qWFxCL1JPTdf0O/pUe21LRdJPAUgQRPAoIrvzhXUqv7FSZsTMiP9KQpNsuIF68R2sebMDIiRkjYX22i+PrGDbMMtKIAyYJySJ5ygTeh7O5j1+43J4jtIubLR2f10fmU9IQS0T3k+vaGZ586RcvWod2p1kKd1ULFsdicgSZfX3Z0FclFBcP/nPQ/MgcxMbOyHF+8f/xkob/1YKwQdhM7uKTW4eX776dZLbCB+e/2JW7t/U0INViRl3W9XrgwC/QPoIiNIeKKpKeR/ry2/aJTBCjFuapUnaEMStO6P+In4jdg23kklfgRhkPm6Y+cJb1ZiAHAWcDlNef4kp9xuSaQWV+a5U2Y5G2spdlX+obOupwRzqihEPd06mxyM871fC/FZvSudGuCpjGqbX6dAbayWvRA8sQpj7XtqVb4OGDLK8cncxvMizt+ub9N7AJdZzWDwVrVQW70w/OwOMaul+HZDkEZMXLsN9tPbJ/20Vxu7n2w7Zga19/RgPwYmOlh6vvPMDEv/Ijhi8gcjHpf6e3dVkLdfyOin89nR+f1VODWW+s4IvioTmDxnMT9T+bT+TcTvmnJuGFMzkt4/NYE29vqzWpYIanT28Ty6FL8sfNA1O5tz1G783K0qnLZmjb5vMca3cpiDuZMyEkXxoQlOp6nMzCROmOptv9bkQom9aj3CzLiyxSOeA60zZR2Bs08kDEs7SQ9ZYnn3PvbrjFX+A4cvV30ad1frN1jDZzCfpmBgS2b+N3lbHFGM0fx7V6fBQbVsblNg1tcGtS20Gd0rx390SLy/kv/TXGJJHTjKR/RZJ1ZSNjpUIRDUm0kB4ThBCJ5VDXN9tf/Hu2iKXsQh+6AUf8o/2lwmqQoz7hYnjXNxX4AQjGbcNWikt6w5Rc8r8x/94lVI6ERTgv8qGDBCGvfpDHw1IAiBlDKDjuW0G522lyms/DpUu6MTodHazWOyVkc2sBd/7d3FpBy5HZdHGl6T1I9gg9LDa5wDH1ezd7Qmt+Opd9UOYTIGLNia06Z5iiWEaFAFtiu4Fs9Pv6Y97lCeu4vju2Lxtj8bqbHx5hXEAeAz4rXF92fqpTdzvCA5+d9yy5IaPzjG94WW4Vw+oSnwctmrjqWu/EIfR+j7/Db/U6mc2mbL7BQNE46GMcd7wInoDrsYxv7k1UZ0ll+6kCsrfRUUAHCHCdblwLk8s0ircURNcLIN2cAVLmEHHi3quxCwfHbGEAJMqo7aR/PC8XWhWUZTjiW/drGzKXSvEtfFYXPS84KVi43ZB/RRj8FYFsGMxjHUITc1/KirAxWx/FMQee2By8Cn2N8uLIbGBO2zZ4JkM6m9JYin0T+++J+7/7VCCVRJIBNOm2NHecca4QHgDP4HG+41/NtXvBrOPFKb9+X7y836LgX21UYWe9sT4TMU2f8eyHquVQCvnSV0Hll6Xj+AIRRuEh6pnTAW7ZAcGlljRVe1EabObcvUPZXpeJQMDalm5LmRsWL8Kv/Os2JY1W45ZR84SlI+PDQIqW9kS8qWOINGL4oWGvwQ0+feswsaD/2nmsPEv71dbn165IGh+So4HTO+FACqTekZelpiwv5tOE+eExcRlZPeLXVb5Oh+7l0DZzLVn0aIcPUwLNU1yHJMbr6wGH3vE2F47mVa1NWr23QkD6oSqfOhrdXYUoR7/BMmyLcG0xEAX+ud8GE2FZB3D9Z8QLeslnJctMmeopcMFahvht9MMthrzL+DDxhsoyOlvDV/erRXysxpUvz+shWWfRrkhdksd0pWM9CzVrKJY19hK14L8EyTw6XYE9QEhJGFlTX7XqvJ5uNcqtU4T7CHI6jK6oPFKRROWA5oKmWw0G/DmLT9+VSWWjtmzOt9TIrcuDVNzjB2jrFhxkAUYaRz1AIOE7HhwhEp/YaezLq+2GUIz+ePR27b6HQi5i31dVlKjo3qL6VYoS8navAjVCPY3dYpz/txQ1wHx7Ore5NJQLCoIGHGkE3D7nHtAQEa/gqGvZjtRu78SVL3hZzVqZ1VTDAmWWrOOrLMjZonRq4surLa+nzc0/Q0aSIsgBkr14j/pW4nTZteV767NMKK3eyzPLOADR+TDpr6RwMBu7H/mw45W+cH+Hi0QnWNQql4zhmabdj3VWwuzAOSaHua4QwYaBIRQh1RHlwwq0VgYOuCXB8xyjv+NCe8/uUPMm2VO3TSv3t3753myG0ttuUQXE/TK5lYf8zw7MomoUXOI30H3ssas3QMQniO/PySwqYH5g5KGlSboaNzr+Vki3kIhbYqt46Cc4xzQ5bwW6JyB9tJ09Zf27BNCPkvUxw1jj+9gB/GXHRtmN2NPWV3L/Rm+D2w7uBbFjfYKYd72hSIE4Vulh5cPYt+gwqHWTbwL1DUaBdS9AcZHfMtiBGawsUkVfsCcjN3KfdjV5xsErl2zmcrdX6F4naCPnR0w+4ybRTJMsK7guDUQ6ViqmvqE07e9lvHk1XAhLoJr9ohfytO9E2SlNfswziCWZxqH2xBMPFbVmK1JodM1nERduymDo1gkTJfTGlcsa0dxasXmFHq5AkhwanmCwPdtIw2C5cGcvnMXRmdxpzLLNtczHQvV7xmllXtRu5CUtT9iA6McDB+sb4bCrGho2PLEfzPGFR6kr2NI+N9OuTJlTZN5aSSBz0Qi+5mrZannf5hx/yp05B4b+7cfgYRemotJVQmDrmbh+JzSk6bP5o/AR3FfYKJtdpEqTQ1t+gDZf61WcD/D4MCcwP8VZ2YGWAByIN882uC/yi+1u2dFQCunqKfOFAoWZc1rZoAjMAOZhYzMZkpdekm2c5N9ih+ZZ7YGhT9/u4oA1Ov3ISrpd7vYuAJPRjEYvFe34se5WKNkBhSIYG7H16mVp15UbQrw/ropWlXsIk6X0k6RG9LdLLVcZw7qzbkLYlVXz/4pBvARR3+yyxUIlP+qis6xr0F5r8wxNZdmmvfgXzI2uQ2yhi/TaDZ1ofZiSXAvBt141muPdnqFd563uqGRa5h29xWeh5fRRMByioPCe0+FvEimAQ+JHr20MEPQlJaITx4NdMdMfUfDtfxGg7uLNwi3xQKiRPZfQBMgVSx4yPrMCk3vjPXF28xtb1BihL364KdQ+MtbB/buX0vkDDrz893eQl46EpRd1PzNLqKi9pORzyv5D7bHb+jgIgaiRqfaunDbkHwP5egcPzbORNz2l+iwU96ZIHgAprFfrCicz2XKA3Dtq7/yTpk/i6IUcoxPVf3qHnN6eSKLqRjMIRVB3JArQ6+DoMEv+yk5zUUiiSZ1NEqdGbNgtpmNcDG0740UmlbUPR3qSday90MszXwzxeQj1qN86olcURaLgQFvqf2WlOrsg0M591iRKjbLrVzR2aVqWFl1hnMJV6bdpabc76FbKeA1Cr8SKG0Ixav1769hl5w88EfG1CaEbdzDDgDEywtgxQrhYO6H1Y843Q0ttAvzd+vdiekcou+je+CpdW9plaOVm9wk/HcjOeyIvM8NLzNTzZgii7jRO/GAfXqi5mR/7cNIMAx2FFjZBlTM6vdbuGAu9vvIaqZo5ZyHen4lGZDL4Rf8Zcof4Vx2dJAZm2NS7qtZHf3kAb6m+vX70VJ3UkIJtkQK6atSTb+d+5Feg5UcXKlJf55+PVeuiekrvzexVb7o/cMb5NcsGV6byO0n62G8sflvValkwOTGbvrHVr8zlYUU/KuLzAT1igLI5o48cv7wVkjcLFc/zQHB9sZUx1Hun12CjPG5R5Y+j/EKdNGbdzlTfyJEx49N8/4pMOPDt9OvokynfYrxaQdyeb6Fy0E6LSFsP5sxT5pA7H3ZMOD/1rVYg7/NYuh2KyOSHSVd4vc656D8aQ0nUq8GN9PHBxsg2CKeTfHk+wHIYcP7c4kkqs3OhrFfEaqlT1G/FdnWYsR8PHyh5ny1x/LjBVk/QI+2dUgToYtNoR1hgTTDsdydELb8QFrQsK5PkphojNB0GkEHoZJM8efwu+wsP5MMuzTVoabyJJqc7mhtbyLBw1z86sKynplahavAfOwk7TD3FEEK//1o3CCLNi7X2D03f21tSTWO2gRzJMjPU1pF0dxWkvFJyTpggEQaj+MeI+XVRQCobCs6AGWAafildSGemW0hNVn6K6AiFfsKbBMRDFH0dAzT7jXILzTxOn4FPS9L6vDeetmPdOv2arj2tZrcxRY1HhWt+uNtKD+h31wJyYxmh2C+QhBAvENfkLB+ek2m2F2/rWBqq2EionjW8X29fkoKlNBHMglrV1K9cmJ6CNHXs/Ll8rCFboJnVDtExUfih/z0v8vIcT+LDzP5YSeuMdfe+Joa7aLP5vcBaYb8nU6S16LAKSAJeKf4K1h5pjPHDM/kD7C3qRi96gv927wsbJKai39TaFFqjapcgVJkKiwacCoEIJhZRCcUZTJzJMqGPQyi4zkmtnWI6ev2y3eHnfU4bASSgbARFcaEUS08FmSW+sx1OeuKsIG7aAHn76kK+xOPBu13fUo7fbwJeDCMKtdDCyhL62WI032z8tHHVHT3KrqwNxYgBp4BscVZpTYuCoAFsrKo1XyK+rr7S9op7gTC7SChqwri50bavtopZzp2eEYvDajVbBkEB0ZUZRlShfARhdhpzxsKrIjxptleYxKr+UQEbl8NtSTz76O4QStEOK5HoX+GhXxnaT4nouwIhA2M+RSUsDHBrSJsl2dbE1YEYJtyMPxddMDU3f6J0PQL2oletXu5HZAuoZ+LmjEN2i6++Fa0lquofmMt0eesbeWzcJalZU6hrbUFMgTXG7CpKOGR5kYevKYBU8CpidbWC7V4PRHzBIzw6B++Vasksh82yFumLyAny/yaGz8etcQ+Y7L4UV+gkRMkBT/SLuAs43Pz37L1mlr+79/W7hhLJibrPUmMXk4+H5CRBJ41deI8q7+S8JtDSvq677p1nyh9YC5f2Av7UJ8k5U/ETAfrCrmK+PYvySvrn7/SWe5J/SwkXzqn2AeK3mj9a0uzhS5/Z69KGEaW+favQ+T9ZSEgR+CaM1+rD3gkqSOa4g1p9AfkP/6oscuyKutvBPcC+yqeFnjsvOcZtrqa3sHucCYC+zt7687O1wi6/VmkbFCufYGWbrcT0S8/rDnA6HvPPyjszpnpQclUZdpo8mnY+go0TP81zV0a+7pF/1fMh1hL6gkvg0f1OPCvuB7RoOVc/9e6QzAVFJBVheI6jNzSqihkpVUMZvxchufq3ZN/rq1C+6cxU1l5A80tlcfriAJGuAFIX3/l30FtWxL5rhGEz3E905Y3v7/D+SvLzLwy4dqNm+AIB1hZfkEHzeoZ+o9WQx5PjPOlW/QNbWdqDciSQ58lLFaWgWvXTx4EUvAfuEgWHbnqkOwgSe2p3Od7Utbl9/xJvdciV8cytOLzUU+4uDkh9dqsbj6D5bYgZiTuse+rb/5YjDQYsfZP886JdXnSL7VPc8EeNgHoOLsHGu/X1UXJv459Y4LfrC0YoHshpgSey39ENrN40WDDIZi0biIdQrlBBvUhj8z0HMBcTfpXaPT6B9iOkbRkGqVgL76gGgAnhqQuQrQh/ricXmSoatjs/L7t3n/UL/Lb+3xzkS/TMicvmV4JzEmFSpqJdScIi8KhEOuA0rsTFdXpTr97wyGWf8xs9mKNX4hoJMxia9h0X5MrDXc5DS7PAys1w/Gv7xzYBIdQ06iI2EbnUURR4fL4hjHElo9pZWxIVK2rPL5EbCYFQwoSS8zSfkw9JK4OF0Hj0Xj4ZiTDUNa5oWt+qj0rr97rdY9bJhLuJtVz+5tn7XmDP3TuDg/pLlzcAavWz/n5uq34CsjfCYN8T9L5czs/4V1tJKFPHStk5O8IviXpuWqBBE5aTbUtVQuD/VoPMVGo8eEcNMF/rdfsXVoSQwFJ9k+FSDkDs8HOrLNo2ZI9j+rrk5mk30j/qgyk35AZev/zt0fROL912k53HfzU+54vOgyRZRkx4z5vwzsvETbDtudP0fy7JIMq+vdVBvSFdXxO1Ol6dYkuYucezN6PAekqRu4YLRYZheUiTpTwj+Q5OEQ1Zdp58VNjYKMZKyEDOHpCG2nJchzuWUv0WFxYCDoVhpS6GGkPZM/H2BARQgleP1AJK0Y7RR1/NUiXteQFjW3BTkSDJYKONva6wUb/l4fs5mf08SRGUbSXb15k/7bMBr5nh1sM0zZIYcjMFDpc1ZZzrzwitDZhVAuN39lsHW9sp//GtW5CAi7bjotSbOg752MXOTQypWYTR251Ywx+7bn77CQ0Bi3gUXZgaZx4SqpytxsJhTJHr8DI60txOIBC1Chuq4Qhdc+HiYRroNeMG/MmcufMx0DJM1n7Z5Zq6xUz4gcZHojEe9skibri2wZk/UDGaz2RfCEtXtA9jAZfVL4Pl+JlRKj3nnfHFJTQilkM8rdMIXnBuj8hVYJmkrv8v5tn6yPKVmL4s4DqE0yB4i1ck6O8lXKGRLpuanzs+L7mb89P+KC9QJjRoJe9Mo3ozNf0VHZDKbbpCWKEoThlzlqnsn6QT/2qnwQjIthbxNLs0VpHfE6Kq89/QOiFaeSEvCT+NEtGjiZUi9dFB0bsQKPFV5C20oCLMG5SaLvPviNvaPibMrMGdWR6ppswZ56kRbT3LvbPr+6rMYfjgmQT365wvbB0s7gpPP5RXeIHvWdwDrgWejFl0QR3p93Pq+JZwXJQ+WX9l45KxTcrxyUcXAxwnEjA/xTlPQKO2u1f9of/UkfyGPnTbq0s3iv/gDdBd7eabYYTJj+j/RKvvExUorEBuKET8YzGs5RIPEjh0qfd2OrgfXRs0L+sNSIOUVN9eOoBoesZaZYm1M0PKlXDYE9/Fxo48fS4CvEGXu9Ku142w6CstAVxiMyf/qq9I/8xRumJCXenZFT1GbVGLHoIjOcS3nBONsjlkcngfuCPO0C83pN3qocK02/A+l0aajI/l7DLisMA5hcSXMog2BnXq0vOdXjK+pxMD46NNCNjK2kbfQiFMdOGV00lOSbUsvCvwKG3DoW78UDHM1f3hDqQg/n1L+krbwV+/mv6baqxjuh5U3V+7jjdd1Wf6fgI0+OjivIiVaSfGPfHVLHhWiQMhXtHeY3VGvebIRtnVkoLsw17DROkdGCeDfsOSlx7Qrd0TZgkOGKxlZJNKht5Kb5O/ORl8dR89kwWznzYv82mmr6DShwfmWZBOZJIhXrAM8wGwndxPngfE7M4gJ4AR4fl25PUOJlGlnfSgMMeAzIK58Ezxm6rls2cHPZD6Ivnbt9r5IBJbXxlhO5FC5TAKiQ11ELTXpNp6yZ6SIr3rA7ySCa+WHCSkbFZ5I6raX6jCAooRmsBRXGmSBpBMpnOoK3iJkDF+ybCdcv8wASVOMWoZysNQw1ZRLr9dItQW/D60Ex7RPHa3+VLfMptqd0TymMyGwN3GL0LWudgKWmJYT6mTfchY38oW3KgvwtQzlpWWR28gaB2c4fe9lLlAWtNhi3LrhestTbhfk8cLETl/kZSUV5y3nY9vDILtFTBL+9HTZ/FE0leDHfdPMLpiUi7f+WIjJo4DLEaZFBpXft6CsNLHPf0i3qNXqyfsBLwFES9NryrCBzTIuMgTnx5uPfGSxXct8J2/idREAshjwjXTRrmV12vjYhmJolgfrHr8hU/0vsdMze23R9bujGGCfQlmFbil8lm+Z4iVP+GcZBPPzvOJC3jriO+cfrIJ3CjB/fWmSHYDsM9KDM1tZHrId+pu+q9YTy/fIKNatwPoXSAFOloC43S1BIQlUrvkS0rzlPIqIMgpQKRAR5Gg61uLDWynp26t0Gi4dLOW/sR3yArgCYx9vWugRqj5Cy0jNMnUZc+8fx5+NqVdLggJOX2DXwEDMWwsdk0CHurfcbWMe3x8rMaP9vD6BXl3R+2NtSw/hua/ycBBClJWa+tXniSMKdpU98VyzgX/NAX+3lSQ0T82zP9hvFB/YxzGkPdE5N/mhGGxc0tAQuHP/V1mBE4wwbXfaPxWA5mIf13r9y6wLlQwLY0/vof/cYfOV1LRZCzOulU9sclR90e/rRUygTvcatn/cC6wuhETdnVRdXED3UTx7exrZET71MfH/AuaxhxzQE7/u6jz/4y4H7c2HG21LTkBkTvbiK1mcXYfWEz2q9QbuOcGwXGVT2lCpGALcSi3h148/Hyfo1IeU6agKTo5GrxmhCfcgR/8wul4NvXd+Zbh1UbmGlfZb+K+4kXneg/x/uKVwLl9p0QWP3XDAst4oM+vMo03b24d5cidNfn+BNgsEv0wyKmUQlAvEmqStDJBvvESWTqhy+7meRqNs8oAmkiBvMERnFD7C1Veq8/Cv+StiOgcS7Nfqpr2IhruTC4SEQKhNC36YdURuMHxWS3tRacfz5QlrHt9ncFOp9bbfQvFZKYXtiD50RF2wc1ZOGNG9CT5Q6ktv/j6TqWHEd24Ne8O7050hvRG9Hc6K3onfj1y1JPvIidjY3eHlGsQgGZCRQQNeoNLc8J3kH1GgrDnscN5Fp0eLbwOiJtbWWdXDLhyX3+mnl0xWfsGFfpxpGUh2N++D+h3MCNwwrbi/GS7l2hBKnoaKM2Nl5L6NiLWUN1BkmwWTvhbE+B/3AXFOUF2P70uEbexnBHXIAGk5S8MckKKEmTP2yLvPLDUIiVzptTNiikDk1c48tvhp+OFeqrUcABhJ7sjRCauqxH/rviLvVHm40MkZXgtWXX/4pDBliVZ0Zym8pV6yZtImRLtD2+Tl979yu6ljMagtBRBVD3AMyiwaVBUJ7BOpsC967yivvrcwfjdBwoPJUmDDTCMGId47X14vqbzhn7ljQc4XU1WCLmr3aB3oKKbrzjYZeCLAQH6v1Z9woc3Z+M1VHbSIJjuXo9jCnJtG6B9Y5lgmRcKkWOSkQ/kluI19f+GSQHDlwHNyL2hfEDxjRSlppjLfp5Ww2KwdAMShJ7Q2VmR5FUMr6ursQUKL5pMUjV56htEy7qLGMbtzqb7KclNa5VVeIUHAWPNd73ouI76n0NPJdos73wm/x04SfJXJvndfQDB0+kDK2lCqdI82PCs+NI3K+zC3wBOvQzeDPcDFYMDfYqXRt3SKuOUSxLOmdIYDiHZYyDMDLgVdtD+3NHDP4grSoL0tGZtvd6qVLe506UL++5gIC3PLDW0EomGlDRGd5j7NOmxdY18xVw68zELQ6cLloDf+vuT7PDseEdzNA/5KaeX9lkr1+j9WVR6PSzoBDJMsYWCNO6YGcM3k/51IAdF4Pt4YbP/kd3BHebQmeHdptv57iU7abVdAkhklmsKCO3KRxIdBKmKegtr5S9hoswbMzGF9U0aDUVQ5lg8Kc00OnYeALxQf4zqe5gbr4usC42lmnpPmSGsDyz+fV4j/uUp1M2bc0HNFGV2AHoyUTTZO28SBnwUcsCTGMLxy4VzDAMdi9j3z5xj45nG6YRkNbqaZolAfQ3xgYxhIrAgQiOHPHshtL3eM288c0ZSFmTeNP6KA28NVjf6bY4LHY3ED2eNnJ9Nd56z5fvpF9UFjqYLT4q2rlRxX4Mr3txc04rPk4EHubdbzRCnTntPnqln5FOFN+VGJU+C+OGXkJk6jCUxi1v/Yz9sjULxNqOCOPyCFNqcdCAZ5dYYIMQDqrnrxcm2M4av9OXlupwPOkUI1aWHYA8j6PONH2Xm9413T39ruQjaaq43eYcdfIWNg8aGCA/mPm+u1K7lEoUyknVCu61VZ+pO7suk+ztKn4Ry0BHC7Wdug8fcq524VZFTPdyVkrdSRbiPKJFT8oqHOBAr0PJB7fJFFQp5xvhn12LymfFjDkl9hmi6AtWgv0JYtHLN6oN/zjD59VPDRTAITeH5kXxXo4TrkoOs+EY1wNe1i8mrF/phYEg9uIz/g8LTS9RfyWTaQBRl//GUYAucib1Sy318LpYoMoyXgbW8jWcrVBh5EPqG/PYZUTg+mheGt0CgUhJlraU7UYjfryPZbb7vmHR1yl/TC8Dd8ZFWsnAQz4QxNQlkDJ9NqkUmGNe4HemfovCZFibKmW9fjI0ZWjy18vT1hM0FEJ/KGeQ7X5rtybzW8eMyRnIQjkzD59hg77JE8PpJMA3SbC5pnseyuzfl6Z+Cwl7kUkbD6slttTR+6YkvTVkHyR3L9nwjaRSxZ5VI/DUHlG9qlSK7t0zf928EfwpmEyK+UzTgOK5/TVh5L3vz+FF+ux4uJpTy1JimQfJMhgpxd20jZPno/n2/rX7z89fTLy/1ZyCCSqs3mhDbaA3inBjLDg/WLaMDqHrbid3KqdSkk7uHCZcvzoSOMQjSKgVUZXfOxzoUFiAalaiwg/y2lKBaU704R/oSx8+n6GZUmVU8VNsUEcHqtckZY4JdTa7EY3DfcVfFcvuKMu7G35lmPfZgt77LOOMNVk36ZjAWN8k43w30iYwqWnI0tg/wSM2jy2XP63aYdGuX6pHyPdPN/5a5ChdqM9i8jczHcoWip+3LQlcxH71mCklK3dBFXrQlReYfPnJFIsJuq8sc8G1P97FxFc4mI2E+NPATBKBeZQAwi2DDr3TFyOsDYuswlwHelX9aj+4x4uDugvSkolkKbBkePF891FuH5oRhdvf0kT45oYGbKXrL9k3W1EKLXQt1vTN3dmDJHpZ6DmmJbzKNe1PMdELTcXxzoONthxVQnSlNIY20LBmf60rmPnCaslsD0qVpkhLsGfGk+Z1OX3Z2yi4p1D+BksQa3ty8YN2pFg95+WjSQ8ulkIe2Y/fWvt27InG+zUGZys5H1JYnG1WQr1TMmVRkDOT7+RV7bYIflk8hH6Rorh9oSkk1yAZAL4HcwffJpzMgPxI76Q0XlozN0XMD7Nq15rgUQrUKZNZVBTNPijDubw8ZkV+0PZFSgHyyjb7xGO0PftK5VlOM0h+2P7s/Q13ckl5d6nJzZGs+CtyZsEA+mA8gjCh7/SIupHnP3R8+7ydfFBc7e3c7zSxp8SWHBkpfArJ3yahG/GVsHp4pV38gOjU3oN3rP86jqqsi++XZfxOcVcLyMmJMDdz9TgyTC8K9jhhbfz+6syQvkVD9+1fY3nJmEjn+2lgfM0i60Ds46F9GbITzeuFKhgYU3IGGos8Hm+MsuKDT2yjf2VjmTzVr+FAmci6cPHX7bH466eTX8Tuv1/pYFk23VC9IoDEVV/nw0t6jzzWcjMMUb+Zj+ByH30A/ViMd7Ku3frFgQWbG6B38ZitVb86zc7tmgmeQNHx2RYmwOIssSM2b0eVJH49WB11M7Bu5mQddw0JYigwMOT2Q31HU6ZcA3gOWjxrFpiTQpSUpre4ZvLYW6iLPBN1KyG+XsNUBHVyg9Ttxoj3QXvZ3yXJfI7pQ6N1hgndRZVmLlPYt+Avv9W+vxdM32s7YjktfAbumyX74x9eZVrpRN7rznYWQtZTi+H1xHspcp17pWFQEgncUr8eNOp8dm1f11MuEkvQ767ndl0tBZPhxStOO6jvxLZjAnNZWER1tPx88X++LMacsRk7pR6NfUXSFS+F67wQoPGMoP6keC/b0kdfDumm9zRiX32DFM56e0O4LfGYQzg+4BSDVQliT8JZkUm1qlYsxZ9NiaSven9jNxdGbzU04hO0+9CZ/tjTzX62wOTrSi/FU3qBkVisvhzTKm4j6T74LI8o0u4EuLhLZmcdYZctzU8H7L1VAbdz5pGuqiyD/TJ1sbnOCNluS7DNgrfBDXLsjWR/R6dtgNT2TV0Ms2OEYR6U/fxRXlegE/PC9WIYYFcjzp8BeVzYeIUfXBx8ICRwyZs8ZFx/YnVTogxITtlGK7FIXuWZ5wvxt31YWxDLrRZfSCRX9v7vVrtdFVeUBz5egxaiWy/3HF3rwhkmFq+QlFqSD4gF9Brv69/FCQU41vTFfHy6Z6WsUCKySMm2dLdCasMvYdqrt5QQ9GuKGlJ2LPtImIaibjTvaWHFyqw4prGF2XeMPyCxTLn6BlE9ZHWrt1M9rnN5QuwQY63nDLqufAu+D7NH9Zgc8WL6naihPpo5+6iu6gXB5sp1AaJJiOpX0uB1FVUQxHttjpiwieSUGMS2DdFFZSryYyZw08rwqh9b0iu5pxi9hb3Ye+DhOqgZy2jKqadvfHlFiqSXHwBLn3CyrHh9ZXIWQx5dHvyJlXT6Xt+M5KwETG9yETI1k20YvmkkSkJgNjdrKglTcTySPltz8h3iRTujPqGuP8f6NyaEJPJKLm5/KD6Wi86KcACuqSawH6iNKhFkWaUI+eofXFsVGuprsIp+2ESw73a6vsjeHsybYkZEvyW2SRxhSy0YwyCMcLDg09sxw1SuuFr8O4Yh5qz1FGHe9yy/0hiNL529fcf83J+Vt7JGiG0XCPov0RCg/AR3PnFfoa80hLQ5Z4wWQmxlSxXFeyPjx0k3421t4mecG8S/kHNyLFQXYqg20lvRJ66+o1YVMjFQ3rhfwO0Cv7D5m0T3Cm4nXywWmpCmaJfRzqxirAsbfiNCHbu7wlCtlPTu21/vrvskkvEBdGHOi8g/HzoI+ewQIiTs2AHuSKHOTWrnkZYf8q5+XyrTnsUAjlVmvVrICMWapfDV+zP28axvbKwMuFfeKq1+45PGvTekAig+TUXNXjupmtMkPjEZVMR5nP69WujEgTOeh9it+nkjnFc8dQHPwyQ+SSRyFA0hJ7LHbez+hy7s7rEP0i0q8/UKGT9Eqghz3TtpuAfmVX5UvkGbQbGY5Ej95tXnhEDPu4+hh/ZXnZNxA0aAbUj0962L5OMNcczzPey4n2SFjX7xv9MZXDh3aJLxzeooHzJjTE2SbWJz+6j1eX6Ui7CX5CWb5Leurc8GP/95rvNixB67fqRNdT/u9bISfaZJ2do3/sNHqbAKwL/ZPU7mxYuznP2GfM5xEUKqoHHip03EHjRiILnFRjt1Eeln5rDMX/xxY4cD2dSs2RnfTwYAP4pbfxiTKACUnx3+FWsfwvH6v1VCZVaL6yF06jQuqVVbadYGDZDZFE6/03VTsC3bMB8r1HK3CNB4votyPfjotF3cNJRle3CEzKdZMb/MezdADCBZpLCHo3zJqFFJObKPZlYO88srRKRj3838a3hFEJw7nFGEpGQewDkXspSGOtpBU2B6teg8LhuwvVZ/wSLq0V/PPYN6+NWSkc1C1xRqnAy4qiZ935rpVDozfjgGAqeHzX9h/qXcJpdVUpnyEv+yQgwr83SVNbIlhAAhsyZ6NRq3vBpihIhxEp9VLBlG4Fw/PtCrAtB4w9pEQreRVjib8rP760PVftmrXysubaoEXlG188kjrhZq8iIzTqGaJWpOgWpNV0yCSND98K3yXXB8Tl+hzDFCqcC0vddzzKwAf+W2T5qKBngKe7wMOFiRyInSFURbEUKeEK0zBv6eDIeIAlXhyi8Lad1baZWXr1pDv3a6yHY18l5n/DuUPuZyOjxcdHtHwfGG202KOjr5tGjSf5q5RBU/UWr9tAtO2XEainGxsqHtczKAVlxJOgby8hcQGtV94sb4G3tD71TQ4TJRXlrMcuTofJlbpug/E1KpijvjM0G/kvmEDb/loamQaB6IZwXK8FVorrqKbZP0HUKZd+OOD+0GBuMMRWrmQNLroXWuvsFRhB4mFqDycmt1bTxIo4bUh2L5i0x5576VaQsz21aYc8zhTUrBaqAYMgsp95qn+zwL33Po0mbT7FY/Ix6cXJ2zqt2kidNWJN5SvVCsWgS+bFswBpWXfr2577Y7vNaBu+95tqDcYIb+Mvngn151LdwF21OaF43/+kIhleGGE15l38eJ0XYQN0Y3qvpkjI9bcPs0EPCshzll+bKKFOdhW+fW80cexnK5aSZu0PXNJphkNtdHeDYKzvWIfKdF7Aa5VfGgGI40q3L6GwEbwBzcrp4x7ion718INqq3TO+5NAbgEB/y2XAME7Ehmi7oZ6KmCZr+tdATo/UieHfV6IkHOWVLvTBMX5Xgc1cwJJzfadA5YQAy/YXmIXhr57YxJlTPFWkzjuEYIsQeRPmnvq1TWdbm9+tPo57hTOOo4CEJjdM2jJEphHaKbszJ8KHDl1JEOgMTpwwi7Kf8jTYeMaSss3Kr1ycYMJWIuFoeig9fWD+8ARFikgGTimj2ktRPNuEgd/cbdlANK5eSNoVksJp+oLTp0eCByezJ3WqN8uPdltbKGJF0jaC6JKmmVM43PfCpvtW96fXta+H4UU3HLtQ3YtSCHRd01k+0GiSdD6Y0fF5337S3TQKhKA+glaJKRZpitj6HuL9rqv9140go+FO/u4kWb1eUYEVLdICasrdgtIUFTYI3LSOoyPNaqHEopCxe2faapq++aG6YdZQetPTnIVdo/qCFnIvoZm1h9IYQUmxOVuMTHrc0jLIe+lH5YB/nF9cojHXsFydbxwh/mbgtDlCF7q7MqTi2bjU59Ow+SR8+w4EuCKlmlix0L7LN2itOja/DotWL6uWB+rEbA9wUgVm3G3XrKKn3/Bpnh6vsYOHi9vbecCxEQewEvShqv8GOhlP9LldenGWhUHGgNyaVLc24o0VI4MjYjSraSo6+FRve4fIOp/elKl32lp848Hpjm13vcwEgLMGA3zfzq75FFEzEYTvWttIXX4Q4LH5hwY9eZhx8aCwIwlr1dE+VF1Lp5O0q2uO7whY3oqTapbA9oPcKNdGe26SL8sw+a1dkr+930kyAu4TSKtl9vwzNUqIdo9gPQiPYB4sqtzZfrKDmFc9VjdMErsKkqBHWqZ0zPFHPpY3wIlP5/Gp+huorjQjU1UPgC4EXWYo/2LinHD9I6q6mvURw7HVd5UFbjHn5RK8kju/kJm8ZtpBFQmnkPuemNRQepCPPl3pipvWSfEhhR1BNKW3/IEuBpOZpnFM3NKK53ymIw+yLac677S9MBY6Spw6M4Zz7EO07o6+9v/Kj3K2tMEKtxRJykgOj4W4+5aMaAxWyTSjpg9amcCQtSW1bZDLwi+ftx7M0peZwEVDLw0r6c+ZsiJ8Eas/yiUeV7E6OCb4agb7fr5ePg4gYbm2XJdkF5WuGl+RFNAHgLKz2OxxxZ2OSfVaSvTlklCZRp8ZzTYuPexTVAIzvrDi1TBv4M9QoGUQ8b2cIyJ+rVUt1o8Ay35bc8KDe21rOY3LJiImDoBD70oPWW6/BkKjTUWIqhWF1t8QjyHEtd1tegH7dKAJrmacCh6ImbsYmmuPa/borXCSCDn3b+MF3A4cXhHMzDzf5nu3X7pqFSzzX8zxHt5VQ8syQtKV4T/LCWJMv/cGX6yIgFzjAr+R8jgZ5O3Zl3Zuh5rfLYtMqw+guZ6F4r/Ip82Qak1k+d3nS7EgC4z7b0WZYDOGbMS1kDFdphcr9UsC6ly0lBHaC3ns7monuPKYFwkvRgJVu8yOpWLt4xWpYv61sCR50DxTY+9MB5Jpp5Kl3WSwYVK6fWQm9RaslkS4vqpFnlij3LGtO2UThUzwzDvn+sH9mVqbR5dAf6n4tIL8Gwey/2ZbJcZwrucXB7sZz+ZFfOgtOelLesobTHGjfp9it8g2YahQXZ+kJoCUhglwMS1Cde8h+29wjrhLUHpw73GiXzbGKrewbEGTe8vaQLfs7vNzh5Xj1qbEN5NdaJMdlRJ4OL12f1Ibu/dXumJPLvjUS+dAasaxbnK8EUhV5ElOxg/JWsvfb93Gca6B4nWD/JJsimojhvnOpCYsMqVMzWm4d6WfnjSHKK4caAGRWiXN604PkGQq8OYyW2aU/c3gfIAIE1Gat2RDfIkcgNGV9eM7vW0PlmjjzkYc6MGTUMy/B7WxjiX4InUhiilQpkyXjQIyZHYIySWSYdwpT7RjiMmQUpnmeqBaBcxRZ3v0ldX8xAe9njSicTIAnZIGgCQm72VMdQYsnzucY9oUNf5fgBtTDq4jBv6RKoD5nm4r40Z7FtsrfwJ3K1Gey0KBPNJRhlSd/+qsY0NT5Rezqo3EGG93KpHQYcLzWkElWeSjR5Ijtv6mCEZBvBl8F4woxClqkXog8N6U0o7QUSf4NANX93tj2ETdnLCuQt3SRInpINY37VGbP2Rrn2qwJUEkIFUgmC8J7hN+BuFtBgOYNuV8vsY5Gpft+X1hG1o5WbAeQrw39kKEfjAAzH9rnvcwoBii/uAbLyim9pYowe51fuBdHo/j+hMfb74uotvIHxSsw9D7Nk13zj8u6qn8QrqOi9A+mzeP4ACl/DKVRsJlr0MfoMm4XfLwoTP6d42gB+3Wb8NI0PeHiPG/ceLgY9gqGSRIOu7chul6XX/1sFPPbLsWx7N99i0KcC7gV9DlfiZqG8NrSdY8+fHhpwRF2EspDkGrdjrfzgtQJYiDJqdU5ktM+wHRvX9G8Ez7ZTrtgU36Xix+7zy7qtaCYruYLPMF1SvXMyYH/WZQ8XwSpzHBZigv18pv498nuCxN0VraQoaV06gHMIOz5ey28IhwUcP6GSoDFrady04TSFoV29Crqw8A/Bc2yGhBUKPn8sq7BClFi/1p+uK/n/UF+KKsAaKKeeHZm1JWEzc2Vpav+Zt2bxw1xDR2sv04ekSiy6acrOTM73g/rMdf1WKZ7KfrcJcYd61+6dnBfIIoprImiqZLCF73ZoujwNUrgzozC+ugMj73V9weaaoyiVxsfeDS0fUfrRQlTOtY5BfYKq3RCa8uqutnZt+cwXQMhfklY2Pw9oI7X+LnX2YVjY415setku3klJlZMKWVUDVARty8yf+oN4ptvqnHtK+zfeWXGpDfohUUhj/Vo8O6KzCsC7S41K7NoPgkBGtUSp2B0aOWPPldNYtysHk5CLf7s0ExOERPkmiTn/fYxKKT7Nfx2xrqnIjRbdxb5bAX4UUoAvcvDr9nL702DjspKrbE0dD75S2+z+dSGa2mFjMKueklTs5zcDMbdd6hh7AtKcYpmsbbKyqLSXPNDSUK5AB3clhXWfvydXhY3iuDTg4Pma2GM8Jr/MQKi+ZX2/8K6wumtGs0FYBFwcf4Gu4dhlp8aispcOIj2KMAU0urqbDwseDqJ8u9D5rLWu3C93jyVRuUs8g9gS9paT3JomWEL8uCINeFFYbnmWgP+N4L7pJbV8rGljB4+Ala16cfFevArx2CZACCEqdUA7MvrCkHnB0P9N8r1Lra6BLV8fdFMnK/hzlRwnyoOelmJmfJpR7lJf9e8mKsOIX1qymz9jdtRk+eBMo6Ci0wpcIq6ItlhnahlQRDUcM1ybflIqcH4l3uvK2/vj2tEjuO9jj/ad/dkv60z17VwxpkMY6V6GQlVH4QQdCDuFwQMq5Jx3vHwXp+Kr1OPx/nnT8sdAnffoDeRVEmY8tvbm4s8OblYLe4jZznMNWzj7qpGsT0vp+9Of0BzlmE/gwkyhjXJ4aRXtDj7Si4jhlXOj2w1FvnlSeTh28HbBHBrhEUCD2C1bCGVJhiDNeXxBZ6MQokn2CYP6uLbFwjyVFI9bmc1utV/yxWqnrLzeHrsBR8HC4uq19kMJcKBR3f3/CWL0y4Lky8xmY3yYEfERXfyhyGWmy951+GY2xHUP6XHfcPsx6jH1/UwjSZBUe2lOQ2iuahes76pCXUeP6d8uh+2KeVyrEctMCJg5wOVX7cX244u7rofsdbzaib04g3gu/PPwrXwoqdeuRC0fEGrsue4vpIhmUxC2cdJb/6aG3iYvLWHNPUIBvNvoe4/xwlOKnkzN/ZWy67mMhM+9h4u27SASgdpV3fobueLXW9Rirw7tR+M5xfWeaRxjdIR4eE3ed2pBvTGlzJ3O3gQuoz2RyOwz5cq/5LKYqfJsU0/u1ySqZl07b0u3IeYhvdQEPURK+NY1abByEbTcAnmDejXzq4eXxX7+TV90a+AeA6sRVMuzoXqKmKMrbOds4E5huJ4g0IatDq9NQmd4iuy7wEj6rKct1iPc3g8A+aLASdvHR0/Zaiht3HFctqRV3+Dvtjb3c8HiMDXi1W8z0qH5ylsmocAvvprItuIrv+5LhaD4sNE3rVj15rxYCtzvP+J0N1Ihx8Q6MCeZJqvOgSNrRb28Q8zchtsv+AHZqghJFdu3E642f1mGgwXOOLtLo2pkc7y19aVOCMDoYuPSZGsZRU/geVowc4aZmieDajsFJFh78wX9nClRt3oYMOtL8zFv3TpXCb4YekZxUrhuS0ydD5m9ZaQsa6V11TeEd7XlGCZYvDQH4h7rCiOEKR70duQhe0Dv83L3KUyXilS817hsNMpCshI4X98sIQIraTTuptf6oxYY9ZgSCMuFdf+Ki5jp4C6UXF1tQYHnJCJhMKbrYzGBYfGyGQ5O6o+OoRTz7af5qbUixqcN+xzlciCTLQ5jMcHPpDiZE5907qPTw8sqwoqUEv1keFDci5z62ytGPkA1Yrc/qlPbSKSFuYx7Re7fYMkafO+b48z3KhRPzh2agZNYGCFwH7RyC9GOUtmvwzeec6sdz/YS6ZxSqOh6bSimBzuL59GSxH1D0VDOhWUNkAQCV+YDx7M1Yv+UMMDOExK/pVlXeFbuVHWBn5lIlhQ4PYqZsuljeFtRdrgLV5F/ySQ6UVt4mBxLzvaGNQ5i/vzyp7FnxKaw5DlkN9RsmNK7SSfY2m+VjzTV1uCmp7tE99pTZvGCoEZtyKU7P7V6HfQ76WLl4pQoQbG+GkM6lB88TXYpU/nLiYxJ+U9ADzlPSbufXDh8214GuPamhS3HzZvtCxaNj91flEdZIMq9U7FVIiluYJQkqBz8WtWQZW29gCSwn0+NTA0uerNzg9LUWlzS66WVnupdBtw5wyyf43FUmK7kHVCUm/+do8zOnMcXsgmzWo/omzAaHl2wCUeuv7OojB8TcPim6tevLfSMPZGlWhabmi8JgzrZDvvaU2qtguQjuSFfdWo1fY8pUP6Jb+p7M8cfnM2MSp/vLZD41xRn59ESrJ0uX4dqIGWECXyRFRkaRCnezugh+0sWKiykul2aOIQ8/7YJa2dUuf9982Y6UGhWfje467WVtS6FElaO5/UPnHltSXqmy7mm3bJMZ7ahGgt04SwL6Pb6LJd1x/UX54Q3l47cOkkmVPlRdAU9Rv+4zfOKMXhG+qEUL2Zumy+Spvh/MPWlxzZYaeWg8imJ5D9yfgHHOWfB/i2PmZHcHtWWmeW1sCJUly3DCkTWSxJZAGtLTDFc0XVYHooV6rcL29Kv78By4SW5Yzmd/NDvpPF+bUk0osHZXwElFA4Po8S47ORD7/Kx+7aWkhWIbT8Mv6DEeA1JbNbdGbm41fu9TqOVnqTthnCGPVYeTEj1TGbNl4Ej5tz+hg+RFmN6k7/nVOeJpdq3ggR+iNguCr5TCENbebZJ8NxL2zJJbp1qEjU+eqlaGlsfiV9ypLzcfvq5/oNTj2AuyhjFDK/jlBg3WU8S4AsJKUhaRAQfEjptY2B4gJWAERDrNg05yMwXEc04jUK/6yANF4cqjk5U15CQjevrbpGlLSOxj43v/gd9BgDTPOCCuT8vOgMCp5dJ/q7EZLVY6J6zIzzDvsTDcOUskj0UK6Xeu3vTPDr4H49lF5/WOQ5tjV8TANUq2p1xWeEa7f+rtE88DmXgqVG1SJUkyjWm+BNTaaAfvdEUOEj3EOP23/BZUZnJ1yOnusix0viQ5Fd3k/QpxbRbTgpNm9J+S7cEjIt0JE1dqTLpA+TRNVY5gV5OjMP8ABizl/PniEAFs86avzV8XgMNyofY3BX05KJuU8VisImSqlFTXPfTMP4IE80s6/3Ht4q7d3ifZkeS+AckCsosL5HoGayNf56Cn0cG0ACpO9yGpWNh0iP2vPZLLhsKmphx9SM+JuVTAQk/3jah5u5EEoAhpU93q34O8NjSuUg1Vk6Oxcu9I7zFGF0htn5b6RzDs8oShFzLbgAEybZgujNLZawE/5AGq4KWWnTKwW0/NUtTSz5yess+QDgLRNn5gnNK/Gtc6AOmw+dJRuqMGcRHu0OEJ+LtgKACdHD3HdURtEos6kqQ2ub0+CWj9A2YUiU/BEBB+GzwqhBWGA95HEy9JXKRUKH32WE2OUWXyDmWKZBuwcM11xNedGaQfA9gazFFsS4uIGOLHlvB37bz1sSvRBejx9XL65ZbKIBDuVHfOuwcf7CavJOu0ErxnKYbnFO0p3STLW1lvjh+QH+Bj613RNMeMsFJ6yC9pWWcyN4K7KcJv8lBkk59Pz96nbwBnB4IVEF0lfgOPIQEj68y1o821ME+LyldOnCGCZRra75M3yrlKb7DMu4E/C1BjmTZI0Gzvf9nUQIp9ET/JgT+8S/Q6bT+fYrgp/wRr82xEKuSHMf/AmKx8SDLVC/zdF0vL75MPtmQUjOG6coU/zpdyk63HLYWiaMc1g2SVFVt+x+Dwc6Jsi7M6dXGI/4Keln3KteD2Lr8tEoa+P9KDaFB6mOxDgWkBRYUrIO9/Mgz2WI8soA43JN/1+RNPuReTRZX9KtDrMzX7ZL5ENRlACbTa9iwHR3evfB1EVbyiLpaVal5EYqOpfQ2345HXEomGj/Whh2yZClCbBkfM4tVGhmawzPiFNska1T+5TsxZY3kHpYCYS1J5VqWYKbbD1hZB/VYzrdpqCU7kE1nXV6XQ3tFPRUb18GVfA3RJBvsqkAbwO9eWlv9PHEPV665Nhrtvi0VyI8znTf5kFi+2+QGDhAPha8zuk+ceZ7fJuAPEJs/mKwbiodZz6uxF3S/dt3dJU1ZDFGnxPubaXW3YJd8B2f+sikSLMNtIkioDk8RfMDAMLYVU/ARI4CABHzKFVDZ0oK4xNsKDjd6Xgg3O4ecS9FEQIlCV0RBbemYbDpM4mHgFdrE/nbAfCrUckRijjmUBR7vcVfLfmVDF1ulbsTe488m5AOFQLGgZhHX3/WyqIDOvJ8TcweVNmbQbSwYTzsFCCPFr8F/u/jN5vkZsu8E/w241Zu8wQhuQ2gRMLtgpeePeZ5YrRxG2LyAXqPC8HANmSS/t1YWOAC2OOyuAevHii0ydwQeEVIjYRK+rk0Auey9iwp3s4Nas/ovCshzOEXlvFIFz5hDkwLFQtsxvqtbHABx/svrzjyz+cZI+FMRXfoAyRYv/ulX/XXuLbnG4U0T6i/vEH5QjDvjBGPvD+T+5JULjGSqFcZ5tjy5c4ZaAtN0CqLLYo5Avd4SrjlPH2QaqAlwXwgnKk4ycZUlNu7TYUth4gTeujMOrRHaQdZTx1YNJtQJL3KCdDmF0eyocqyrb+XbSuS+wjXdDxUPXAxSYCD3sOuwNSqstT4s98725+fibxr7oNKoOm4iPCfP/0+gD65xop3FDzYwcCWYNglbAIZdJLfAlki3CdyJbEHqrJdf85kk9Nr68hd6XGNrQTWF5rVcXnBd0CitLbpRmJQcIO/Iva+CwjAc3Ze7cSUbuVadIVLAZvvP6fEmc1BDnELY5hgHuf6oNnEVlQjGnXiQKh/1WySBf/AuSSZcsqgElYeB4+m1GsfYl95cMuHupWE9W4858y/AHW8f9XdISrrYXmAHxYvjuf9wJ6x5ZcZ56OHMNf+0vo8+bvyDN+WPS764JHlZBzQ9RuKctrsHxZiZzGcg+sMIGPvnd4ClUEs82t/cS+0HCGsL86z8lUgyRmErYj3eZ4ePPYb+PaR1Jmsa3Om976X+9YnD4sgwst/X0E9NorVFyH+4hZ9BEFxSsL4JLXA1xZq7F49KuPXLGZvmwN7i1HngX2B2y5HlYI/GKTqEPGKFFj4DZ2F47Kz1VAWOwzVEa/Pw/aipwbaBYJhayfLZ9Hw+bi84sWVLj3VGFQAoXy9nQmuNlaJvv4H7T68r8acuiY7NZzPVyN/bToNbiLwJybux26SYLTcfoZSlhQfA/Q2lfjmu+IapsWxINdu7Rmg9EmfIRJfc1UZWtRQUlAQ89mywyxhTK7rB5wRYBUrXWMjgRll8jp+o9Io4nNXHAN2MQ2IzhR5IQLcyHxbmO7bqmlQlrf6wiIu+fe97x9RHixzqBHBp0wwl1m0xTx0Lf+rBMoGDG74ln2Yiu2woMBFcXaJwDALar1/I+1NJQhRFAHdMRgKqqFLghENOKsBfRdmdIiETiDDx7yoVlgkwv+u94sooqJ8L3XEtDDthhii5KUYYdH2YL+WN8j6X6uyG/yrjUma4v61I1mQHHbjLNIB+GHtKi/yF1eMa/PWmvWKb36hazGnDHe19/nuLCMcZEIZj8f++G3fwy4k3pP/LmxvfknQ8R7NCMyQGMfQUr0YaswEJ8NhjIzf1C3m72vgKNBN6xssaj6G9AOZmL3ZFSxrWEOOzOhTGlX6UMeWAZ7Wiak8Q+Pp5aTZ9qICj0SZU8L1/WHMmTW5Lf4s2RV58/bw5zuGu9u0ZAVuk0tRGtcxjgT5VjK2T0bqMJhgt8vDokrGCehNUNAg4yrFJWO56Pz+C0+iYw6HJedv8aUPWiBqnzqBQ4X1uNXe8l4Z7LzEWQvSEIN3/94HWFc0J5fHyqhwJ6H01TGVR/rPkd+PZ/EZ08QBjLpWLDODBa3cEWeLi0C3khmq+vWgfr88B3motleWik5jSOMpyx6ZVs7val9rptSWrThGJYN1YETz4WFbN6QduRDuS8B0/xsps7k6d5f5oW4zjIvSGYw3RsvyPXFQVjhtnAEx60SkNnO1jUqFcR22sdrnthiWhmSBFEevAQqVVs7/+ju9UbkgQutA35vBH9YUX2N9yc6aEI4Kwoh+ww9Y5ozvyylR3U4dCCRY2C47qTjm/K/H2JGT1lnYN1u83cj4eFgC3/aLPoqcKXaTfjGoekLunt02V6jQoqiKJof6tOumzFUsSH7iGfBxWYZgkFH6n5tJTlfNCQaLPl2Mzw6+GMdLfcJv6oK6nv2gybpJGun4FS2FA22XU5HOaSstxy76QBeeflKNcBrtxBqDrTujbnKf0vNHjbrcYYEJcS6y9PFYdKtmXOFGyhecTV2xeYRa6lXd27UB4E/sss/AtwXCokB3E7p24tzQUZHAn4kmBstx8OBpWQLmLLdwIRjkUaXFKDXZz/9rn+xQa56J0FKDyxyGxKQGh7MqyvLYkEveCTkkJZ7ZiFus1pGrXq+AInmTL0VF7gWVm91fL+z5tbFGVB40U96Vg1HN5UmlHjfZoPsyiVL9soDiCBZLwNzYl74tig3oP7soJo/COPPWr0+96ojhhrDxxP4tYqjIzMBbnipfn76dXHtoKKDOCzAfywtk9sYx7TpNhEhsn4SC6ZIhFpUxn0d+eh/9Y+EEj5glOxeG/TB98mXsX5Ip8ucr1jqWf8HtMxkQtNK2vpITsyvpjgvABjx+k1j8u7j6mwAq5mCYLnuNa14JTKUshOtrqFWRPBRRvzaDQF4lfoLsnX2+kqq3CPocemkc2+Fe0jbainPcg7NaE9kseG7g3VfNWCzBNG+zeDBHLTT2siQFwi8PHt6ib55isiPr1+vLaWdlnjAq51DgHT1AJlQ5UbscZxckfEd78cWQdPO90vVTkaxuVYk4nzg7lpL3MvYvMhnnELdO4wUCEisQZULwIbSOLpdci0lG+UXqRunodaNGYXAHp95q57nBOBiIzfpMxTkEoNm2r03croGFac9zaUPK64FLXS6izwvnEiWUi2BObqnZPjpKCz4H3sUiTeL0sp04DMku54wrfK+USSZhhz+0Rvycf67s12/EFT5K1HT0R/j8cpduhFXJYp0yKz6kwGLnLGkylMtaUVv9qqTs/LouFMss42X7ojPNMKQ7RvoWTbMEQfA0ixYnGGk7UyV2Fzj7YsIG2suKAjVIv7Yqw90I03Wjoz1FaVrII2UZNIL95oz+RieV7RemKQlND+1VPPgCiACfY6GRHu9OabCWagEnDQVwrdFgcu4+HxMWjirrqvEFkxLQW0vyzbuYu40l3S4psY6K/moCnhncW012qNhntqgTnH2z6S17/bcpyuNkVW2urRKk4Me7m3ql4ZsyHw4j9I1fbH0OJgWpklqG0aeh6w/A1oF1GB46nGDDzDAik99AlmGGtq+b580LdDoSN3C6kqIrr/shJRTncLfy4Fp+f/wJ0ATAOD7gqz69jLhef9a9H9mkoo2oZTPMzktrVquyyJxjmnZMgOa3aOqlDl89HvbjzO0qOBXkhEIh9ZvuV0y0jcJC+tDyexeX+wN75ZRV9tprcRvp6mgYXNlHuLBURPTNWtXrjN1rl5z4p74W7PzE63a21gcJcn+gNLppCh3Zneb1CztnK7nfMK/7uPiCHdKAj/1c6/TLA+XHNVib54RmnKjSJO706wzisF+zuWXJ8ylnwX1JFn7hEEZQhTuJ46jTrnLWERlXkC4nh8SnKMX8q5c1XjDrfwTIGMfF+4XAQe0VFRPNsuD/IeYcgCelfu2O7jjOR9sH3Xy4ytsbKOPLzD0mcIx7hMb6jQSaogiWeythqI/JuryvorwwarCXwdONU2o8ly6PpX/5smMCleQNTRnhKfq1jw4m0V7Pg56eHn1PE/ziYnkuN78389oLyG/3AdEAgMy/NEkvt+PWj42lCwPbdB8AwHAPPb07BFvu18MFfOjZ259AB3+FE3bABTzmZEzUfLA4Kkd0M5An8ctitgdIRIOwfUjzG/4OxVXjlZ8DP3WyUj0XPsiRtN9fxgk7wWdNdNrf3hzD3Ptx5GRZdX/rWgRAFgWgQixOwvA8GPKxGLbnubFck//6DJyEIJLenAADgupIjnvoimO/Y8KkFBC39ySi6JHoY+P66McmTGjUAhwtyi0JkkG08O1MBE9+/ccoXLh4rq0fqze2lGw8+GtVcFf1UZgcd9TFWCadyQ3cwNiaKwlx5MdTYAkARBHH2eOE7LywaS1IgfDwBM7YYCbwGAakwapfHpAayS4O3u6x6Sguf2nLtIZgYSHd08c5SpedKGxjFI1kegwepNGtV+srB89hy8NtRodVASqXrpbFIBJL3nn/vHRNEevyFdY4u1UhxJGwahvdmDQIACRvWQ+3yNMBCC1PxAcbJKv8OxRM3z8P+Ikivx6Qw2dY3aQZzrKiOOKnU4m/qfKnWONkuoSUSAcHVbWVYMpqm9GMynWQcc31Qrnal0OP8wg+ZlOaqanzP2R1MWH5hDWT74gwdmnUJMu0bbfhOVvaLICmeqIT656I1wRFYrqFBRtYoxMYEE1SA/UCfrtkgNsS98uiDMyyRlsTyWOK6cTQbVg15W/Vb5odj6tzQQQizDS9yD5ZzA07Al1pfCkWjytf9wwmJ6EGh/hitfotDGmpuTPgXeSbcUyJVeGAHnTSHBqGaPE8PG604rLrMch9yF73Tz2exIlYlmAkxR4mbq91DVk6uByI1KfhgG/5wNmYXKDoFARncm0Aimiw5MfpbmU5ucLORjPnfElI6MBrxj7HBECehvH04292tmPJrw+5IaTo3huBhxrtXV/iY76GdbcoOBQkYPGonHyHoIQFIiz6A9IbLmBNlrBPnKKA7QtW+sV6UmnV1i4o+gFHkOWUbP6qgAWbcHu3VKd5I//d+FcTHTnx81RQA8Gcgw3xEP+aXwF6/Uv/nFPibBI7lLg9a7EzLK8mfC0Ih4zICjM2uHEgXjDJLMpUbGkChKSwpYFORDfOA2Fa75UVVSm/coU04jTfLZpEuyktW5Gdcak5P/9iv6GWpnJbhKpXNLxXDzG2nZh2BQQW5rqyzcfS/hU4PuGGmbMnwitvTZxzc8N1LsSdWvj6EMxmnqvUdFgTJRALoNbH3b3l37u8ZGWKDo8HWMKS1hyNrEzbEazS5N6/Pk/NEL2YHzznf50AMZGXyiy+VmJT3utHHiIplery4Q9/cIWlO/MYD49w9sUTDB/k/MSjKJl/KQLtgwpCY7DJklyuZk8rVmPtFHEOz9BtTT7+mQiecHZGjEbauuYNoR1UsO6r0WftsL3zKrelV8v48mI/VfuniwjLw6hynsHDu6J5M/yUI162a7m0PViTFVL614C16HK9qhJWGUqCTLDVCSWvW25/twYEGTJWL0Xz5CVxF0bjOb/dcwI+6GXtp5C9qWUD68Zcv7JF94f8bnCgYjaOHjRdGNy19YPCbrofJaacvNKUblSf56+4XtPsL9Cwf2gYLBKNtqiqiPsHd4UF/ZYGRmggXEzfL/+bWCWUvfAcbWDgJSWm5EFBUPu6sfiTO/IkcctvevhvdkCTlOZFDNvy9gk4Fj8O9O6t+nNSpRC3Zq0ztOOL2RsOB32upNZpkjmKXGy6M9mpUF+p45dRYrsqH9YX+DgxXMhu4iv33JwqyWF1zps+ZtgY58fLxKBLOxCS6BNoYZAegKhMSylWnTMDiYLqStyAt5ZupGiTJ33xmw0p053slf/qms5g2OT3aQsut6DWsJdfoVThicFx74OWdcwkuEJC9KQdDp1dGnZKJ4JhKMMvXNKmfSKd2sGMBHyIwo/Fzb6DZu1l7qmgw8ZWWCJsYhViOkmmMzk9plD5gGxi3KqSue0kEFKKGbd/rRMc/NtHdH4obtwjf/sjZav+d9LFOjvA+rN2AuJuTHwUjb1/E1S9i3nbeEwHpwJCuOBeErEJqbtcTDWl0uMfbgjEPTE1R0whU5pSZ4/6wnejZT93TvvLvU5RNonpFkVUBTB90SqGRlJbVExpO956xIXHQeaFxf/0vXcaoqcVHmKYUir6+9GNHjLzU0IzSXCSvUb/XmtHyyh0IHWUxyuYUl/iw7aagTytDjVhEgbZj59OZXcVXkJEKrljcEgGFaXW+1wwjZMMHaQd/XvGjNuK4pAhSvi8rK3ZnUxtt0W4jNd+3Hpk4sfljTRiFICGc7+UHb6PzVDwsqefAafMSvJPx/Dt+ID33uSYV2JWcTixN/NeoRF0bv++CglrKlIJeWhYxOVrEgNVKpWRDaK/fz/IjPqzDnzIgNVIAXIunsw2eCaPMIBxDRthD94/QrtzxDVwFaQHPOCjh+ZY5768Ha1PYleLvi8OOQMzt/amu7Ov7SzcMSqgTtEzQAZUb0MolEd1qgDhnWESMT432Eq2Bbt9P3QGdpPDHyf5AU7d0K7m9/HogC+VRpE0afg+wd2GT5luiYQU+iB1bI6VlrQckPp5N9NHxcoTcmmwvsDiAOkijAuQx/ILAgf2q+am6QGTH/rqfs7/1zf23076ocqvr4rFG4+5U2crgS4L47Kbe8XezpvAAeC6BBTSdb5aTY05ptEAGVKEfE/SigYkG9A05cCnMCM2G3+qNeVFlTGj9UHhCFEpUmzDvWXVFgmJuBuQXA/I/iRVM6X1g1YsCPRTcv6s7X+/W5E0/Wy8iBK2iDPVg3et97c6WafrKvk5wl9nsmaKvwlnThRFqFOQgEqLCd9IyIhIYQ9/U3tm4H6+kJwMPjsWD4IjS9invYM+lpZTNJCJoDPGnN1BGwmi0Wt9fBiUN717BYNlAsyuQVIyK2bBKOBdgV95e9AnbnWTfdzU2+ND0+V08rim3irPkt/yreI+99a4TANk65dhw+tjnq2NaP/RdBULjipR9JdwWeLuzi5YkAT3r39Uet5iNj3pNFRdOeeq3VQOk7OQzfBQBQoYsva7kzflq26Mi7+oB/oFMmatUocJ2G+eOCBlLI9atkQUKJWv9I0HI7r8qtYswvtSpBntLpqWnItaI44RPzYIhOC1vYQQLxmOazeu0YOeNBDmMFqj0B3sbaiWs18NA2o+BIki1IRWuSgn7bNK6uStWZoFTFrllFK6bGO1dpgElrJYJS/1R33x8FkaOHrGLph7zeKvfXc+IpuuPgajV3etwCn14/U6jr23Vj6H6rI3Mrpsuab3kAfbsr8OOmyeKfVj7zW+fe785Kx3Mlo1ugUCU5Loa/ncBUUr8KSeeJbCRMgAi+RZRfUdaRVcLqF0cfGV8VwpXq+AyjPrSL7DWgjASFTKDWFtjCLSfZoQ7/u/DNwjjLncU0nCy1VDiumUVR5qJiIC6WYl7R0E0cSMCrNoOzwtDscAVjSIDufXr+B4jBwXPgKyowaNFpomTu6nrZfWen48WllnaZ7ZhprpZdoDhgOJJE9avph9P+Sr3H8RcZDyr2ojnBwhjdSChMg3GIUmBifpnf8mDGjIg3nQxnO8R0Pa5RW0+jVVIGlnlran5mgrR3+fFESxIQNvifQo/VpGi1OMKtsO6+ngChCdM1o7Ct2+lA3GhHHiJBTF++CQRjUcxeUktO/24P1S8eeZLJx+O/pDaZ2vuwuUC03mEmCikB7Gv3IZzYKl5M/SiNSVdUlbtqME+iSApcH8kd7ATIjHOXb0J/Skv09uIXpI/BuGQrUmHnm90aDQiqGI2TPNtrcNNT7/Z5VYwPW2v8A7y3w/oxbLVhPExc18sjtPPhw95lrKDZpxSsB+00mkuvJ1qBAPEc2QXUzI2GkPt7+JPyayeuH5+EuAgU5KwXb34z6e52Vx7VvZhmXHClTELUbRhm8cYpz3+sZxuzlDOZ1tVREgMFd9XhPT1nUzJMH2LgvFnKe7gi/0Sn30XtRwO6jnGK0HNlzo6s+0SM8M0zbzEqP6A3S6H+666RN952H8m71chg+J8tL8E+qzulxQeM143TBYyggNB9Ws4nnvRgB7vpzfrLK373jCd3jVik8pTueUrZFgNJHpxnbp4RqR8DgSxG9CA4VstfgiJySF0dzqfbjN0BVHSRy6c6SyCAGmZUd2LAY7izCs40HotLQpbF/OJxXaklPFM/O8GlYbv9uo5VVhV7UNZlJpmW0vSRHiiDoMCo/xbzb9LLnPJuQuYXH8slVV1KpSjn9reBwC++orbrs3J12L16+ifT7wpyBVErySE6WERJUm2BryfVyDmtzYErfM2uqsh7xin//Xn/NGDEYuft3Eoiu0ZKYSuUgXMKI1rukfGAhcDSpFtXBR/jZQZGR/APVICn0ScwGEZNjjNN/DesVvz8Ep0WCTX18ntbphHFerHstVC1I4fAKGhj0qVZVohxCyk5KvdoBZpk7W29B4mzw8vgpN4VXuxotGZX5OR1+jyS5gWWFQMukBf8EjYpl85/xD5rlFNyrCGz7K3dPXEljmLkl9i8O1avpkkg+1onCBtrb9/EGcUICSHqvOD1I5ZPdtfMMrtOkcuDR+OHw/Nrv8G2ckiknnWzBdIUS6nzjJdYPjyoBQKWXMv7et/Hjb5mpZ9YDbUDfhSI4vAoPoD2x3Uy34TMmImOf1r96G7CZIbUmZLkK+By9EpmlsfHn0ZMxrrefXqpY21Dfh/xqbmVX93tFJjfqFSvNJYELyHaWKJ2O9hbZ1rVus56ULfJSDv1HDck4v4aGCzPO3qfHXi5/mAa6NtBNPI0uuMsVsGWv88DVLEbIGYrnd2w4DQ6noImTG30rejVQOi8ey+ovXXWU3aMe6Gl/bSF6VDswpwtFxip5m++bPMDwsG66PD7B/8eHBI7hQRtnim3n7q6+VCN0OB0DlYomFBGVUAtJ8lv07h7J8fkhPIeRhUguIP/n4Ct+SBIFaALSvZ+PUfGxp3vIy7OeKgxIUUaZAkJR2rZL9xPBitauZwbjJMmPyYISM52EEDJMS78vMfRHJDWMvZTVbgSvqrXkp/yAb+1oe3YaAnQqCMKE1jbI+XuBxCgiF9m7nn6TvLfhLatMQ5pvxAzycrB6B6jxg+GBHUccz+35xX+dIz5ZuLROvhXJnh+i19dl6am4YnBWVCE07K3HsCtE+Y+QNX80IYEddxBhxjth62vI8L9XDVqx3Hu8uCLPIMZYTIhNNd12aEqMw7gsWZZyGX/chl3Aw0nIKj1gmd4JjZvGIKreC3C/F/1QcLqGyyGelaNukoQNAI8KpwBSPByUB5KNY7/gxOfTBndL1qdoyfS3O924IOut6//NyZY1COj06GICosZJfUp6ygYhcAe4xLGeHHG1QVvgZ6tS4CtByEii5TmFtb1z4h8PimjWUPGnxQRJp1cPeAmxqFomUSOv7uEcAQJ4sID6Bk+/QQZoI+3YH5y37lAM5AgigVp26dkSv+40tVIS5S5HCKaC6rZaJv5Q0hb5e83JeMP1tYkSmEOs34tnaQ7po4A30g8tMaBf4D9ohjWy3Dg+EU9BRLFQYeYVJ0M5elcoVgjjHlCHHfodvKDcE2vJyTWvMzjeXCJF2ScRUIukDyKgt99ErWOBRsUi8x8f6jFLloBHBLBEFVKRtXfpK36eWQfBS827ELBRdv/Lh3XIfKv0tAkIdyp79brOIbL/4U/qQ6PsbhG0p3H5sIYYRcA5I9LB1rC3fl61J4NWy+m1P7NypIPLfZzxbhCP9SiEsaFa+vyIsf8QA/jVaipmbavlHWWZTFip0BJNqQScpy4A6/F/3DVZr/hK1+LRm8uOBq8PG155+wOX35LjxtUcrSiBme0phjZDTQ5gFLlovRpu0epJYRw3bNBvYaJjemRAKyqYVb27TbP0jBQ/bvhLXXawD+XqRq1ZSTbtVJhGKfwVZ3Bz1Lmkq2+e47P9KpgF6f+OkX7yhxN3NwHPQbdtomwmFx3g//2ngsvUDTcE9QtT7BsU9vxo2HJIn1CP8toFHQK2cD6QhMJ35ymGq2DbnJs1XhNXOeuaCkINNPmaO+D/hgXZxQF68wZwq1nPXriDSjG5DNkgDoQ6w2XbDwGsKVfVOIy6kMdgUOi/CPr28YkdIOYnm8XVmPNAoRM3j1xL5l+/K02BDTFA5bPvwtYdhn0IzFi+OqUhGOa1m26SVo2WQfLVvNAmGCfkaL/uAHzq6AR9MMM23TxVyN2YYoJ+QBAL2MmRGf9Px2wbB8Dbr2rywusssPBF+5VnjCYTlAD/r8xT+hqtNZ0RUQpZtY+n8a6D7XHd0veaVq37h+1Sy8MpjSc28sj0WDoCE6XUu4rEKg9a80NItalx2ClRFNQisI2GFujJuBU3T1Hp/mYKFfANL+HQ3UQ7TUZsNxpSJFiqrXvWYhwYZ7nv66mpniX3F587U4BAUU3ZuyJBWak9wsU3QDAstPQiKhvmNwLFTaXLFwivTeLtluX5dMBxDHK3AUNXOHFcnBoDOKIEzoDR8eh1QD6I40Q76U1gU5y5LY92PlAGI7AZdo0HNqkNzjVLZc0mGJ9CUcjwQaqjZ9/Jtg7FjnU0jHJmy5MjBfUFzRi7SfMawdB2HSGnqvykTxowmbebwS0Tw7o7IDDRSt2XssbBOZ2G5YGb+FnW/teNEy3/kLPkU6MzkAkT6A7d4xQr4lMqOcotIAH+PsMBoXJ2t7NnNOJU/IJUs+5ljuJ5w+dwPHROSE1b64Gx20o+B0rhdsMXHOKBi89ssAwjO6aAr6vCPwlvnK44xDiFJbDaQJiRlGCREi7fx+XIctsphFvgMjU2BUMxypwFe+1mU5oqIuCKGLIzMPMtS3WhyY0j+zWGKvMe+KDkODtlB993/iOG0r1uSPC+B0+iu3MAll2h1XI+8P/Scmeo1ZC/zNyoAvdB2gNJCJdY5JYzkQZI/bIveLW8+QPCYPl8sq3ucIj/hEPr45Xi7XmSAk2SUzGXf9gKvSgjxm5YWc6u+y6BlWfu2v+mnPW7HYYY0UcM+j4e1I3Kuy71PLczEa17N801Re/Kbeh3MwBFYvxYD+iHHGbs9XuqCkurs/RPRKbz4gsCIZFMh6ZKjfZab8evWhgZHsu6RDpMVeT8EArjF9gUqMaIPWJwnOl4ZdXl2Cm704dmVC/CQUSkdOcEfFjukdcFJ7/JO0K7DYUKQgNItYQXrzFjQyBvjlK2rMDt4EMvJb2EvP7WEb+pCffnvOEiWpPidL+LEWGbNpT0I0Yv5OMiX/INhXY5iyWrblHeCZ+qxq4DQA6S77zezbLmCczNNsDdBRg+q69VU7jPsinv/tiFdXF/SYYlHDcLhBfYSl9uuTRYYW8Wviwf4ttQ1JpRF5R6e8DkjA4DCz9/7GrLYkzvf5g8S3R4s2QVmyClz+673h9GTiPWth16t9p++Cg/GmfIysadep+fOYUSuEsBfsScVRFBY0oQh5UMtvvFJJO7V7BmVtuBwgW0l+jn91O8UvJ/sVGnpuz1NOsiQ3Ibqh4HnlYMV/2O/LsQCHayOk9/2kBEKMitYVsT9+FFPpBVTlxxe7+SLT8pMXLnvBmI0ukcFr21c2MPRVRm/CP31OHlnVELiLR2ZRQSY/etGPl+jxrwfiz28QbKMVRgQwLx6aibNHW3SxwwHY3+S1Ehhcj9Inxwa3vVjBBSU1qLew2dPcPWR4EzSengh8RAvgE3FmphufBFBFXDIwoHoA27FJvhcvXBWGHGAgvgsL3IY1/gLA++aItoQli3mses4EeNHrgrE4P3gPA6JuAcVMSSnE1zXWI02a3v94/N2GcwVW+5c4atTKQsOz3e8h1e+L/Kc4XmFgnT24gGfbFuXF9krGvIr3h7ItmRLvNuBqy7bbsgxZV/4cyxwiROGIhkjCCB8Oxuv+nnjT23OyYYq+GQXa8WQ/Y1bV7LwdgzbYyKv7ZVgkd9OaE7DvxnmnTDxqRypbDwcJu77sodZfV0NPDy+k2/SpZSxuGd9Mqe4CiABtsmrXnzMC9IvlCskBxZ86ML9pg/r92B1w7Ul6civxQ17qE8uKRU50G2iHYuaWLimOYAG7jTFkEWrIHZ1AILCi6cgugBW6KtG8p17epbFKCzvpVQJ7N/71QcR3JL0uecH0+rlnKEu28Wo0s/LQZsmxEfSdR6J2ggrF4sK4aoGblvOoDlmqX7NxfoY3xGXDujlq0TpnEQJNSwbhXQokr9xCLQwpX07oK8puWrgJIrumJtgUdDqyxtbi9HT5q5FPA169kD4SgTGcc0bOPrVQigv8Jw8BPqH/lppI4gWyGmYb0p7mZUlz7ZgKy8QzPo6rf0bokx/PsNs3n0xVwdsLoSFwzjGoxF8r8xdhD3bLcH93PmE6MxpLq9p0xSrmvt+c3ipqlTdeGOlRpiDy7u8hGSYcB6/EYYYNKtX7GUQI+rx3U6eRP1obZCAte6sV9TiEr7n81s0BnFtqeWasQh9bptufanJeoLkUBkKxbizd6JUW7/rB9kE4XqK+67dRc0z3r8SFnKcM6MclxHI92897med122Rg40a/O12GbIv4dgEVaaIGZGD7pyHw+4ci+ytBSVBFORabu96mRo+Eepf9Hw3B4jBPGRUA3Ba55CyVZHBG3SxHqf8IzhiQ2ghKXzcqjVdHfXy8WO7BCHXeGS8CMqf5js5Qw0pxUInO9cByy3FraO/BtQssy5hr1kPO/30KEsxpA8U96RpgsNh47jInS7n9NLp9ZWw0uc2WowYEODMSYCUiq4NBjV4a9wML6AKcQz16tzmu8UDZ9sEcluIa5JUKK9JI043yUCFozvbLxEr4nObmjj6NbRzRwcBJPxrvDIifymTj+JuRWXOmZ01HYLiH55yluRfWPHhAq+LlOlt31mQj5qF9NjqN1FrKhPiOacoB/eaAMFZruPNgI0S4gNS1UGF6PBAJtuJsuhWefmOEh1qCoPMOFCoI8Jw0JLMYeXM+/n4ZaDmad+55glZ/ZWhCdlylTXyOzRND5QhcVeE139tpSzNq7v5PCJ1soxgkFehA/0VbhA0rUJKjXV41NYSX3egDCAT8ioUUyjCUBgMWHmIOEkrHE3tP4J745v2C0BjUZsEp31AdmXvCFPfmgUJGpha+RyDN175lILKhN0/k1Bifm21QKMarL/IsMIe+xPon9ylkGBipOBO9PvzdVskvVUw6p09C7IzS1A+aZBU0g1l9it1aqbBe/t1D1Uuo2PMBkRv0GHtBJDjhbW0OipQRkT2TpJ7n5i3NlzGZ8f7iURnSi4MuZIuZWFeKhDeHUHMCsB6bHGF/OURGCOP/TBMzZv5LdRKINumL3EzHMAKbRQ0R7DWzQOZYK/SlLEqEJCBLisrhjv93iZ+5REqKCZRO/j+CN+WAHvMvG122Jx8vShFMZCMHZoJ5Li+NOF3WDvfUYEVri4uhdEdsHfg39xSe1eu1/R4ab01o5fORXt8A6sM8mcN/N3ta78R7vzq4gSHZ1fo0gnBnOg46pUTVbOkdA6weDTCmnDdv7XlwKYjoPVy064B0MFiuh5rzzjxldkyBm3ZXcDb940LV7/R1rGhYpWccHTy34ejZBRnvVjJcL8YSbSp7j6/E/GF5HUjwoZZk6T0QxPEC1eKPYk/I9Pl6xYdX2gPZTbCSidcESA7blixurC+wR5VVpYbD0IWEXtFalVB3E2siKOrM5ktdlcKzMFE8IQo6PMI7ZqTUJSiLF7ZPK6+puzvoC4aJG+svyG1IsRPzej/ClcvQhYSA8WuTCEwQsN/izMMvOgeuVDfzPl9uVYD8OWsXDaGfoOQdmWeKRGX4Qz4FjjIXo5dKOXjmOLrzK8aDk0Vkt4GQGbVb7tt9iFwphIU4/HPdcm4UQR/7d5fdE1TDyqJdK8U980A8SFxti4DegvE6ze+9ohULxOw6NNofbxZyVfKwjWWyCvSx9XApOOFzZXglwoWkMhDzSYRAXFsfjxcjD97yDN5GFNr6v2SoJZZ0h72X8T2CYgX2tK3i1XmRpHsRHXr9XN6TSBaYd8aEcy9ni+unHNB5dwI49DEUkBf9RdM7QGEFfbiaypBPs9F9ukru90NbBV877/oJgiB89Uu9+LWjL/k3p3hfjOYXZFExpyI8m/VS7122HiGOSrvcVoA2DafhrGD0EzkuHvzNnbEZJMYq4DNQQtE+tJDVz5gk0Hbc0oMaTNSKLLfluuUGpa/qY/A6efcYOdtABPR+FqOsukcCi1DCJ/EHplAxdFtxEfRPtWkcxZJ9OWv2pCmH2bU1GElSq9XzjZnZR8GhMYPtEMTLAGBNhImttxlBiWng0FRLU8q1yksArR4DlM5Lzlq6/AewRaq5kfMX5afQF98RlCrngWAJvbjMh19uro8OR/gfHfd9I7xdKjypghY8XT725hXYCEVNC77w7PXkWj4UXEbHdstj88AfGc//zbd16aeUnh/2jQKZ+axt80D2VyBMVpGu/rGeo6rPwe9HQxfOOg7gIyx1yRpH2c5btXHWxnnK4cTC56ukoVCj7AR3f9kdSkKIlFN8YcuPX/gK5W2b4ZSiMra7OXVg7SDIpZkOk1J3nOvxKSAFUmd6JNkpttri2AhxYDwkjzu/lxg44iylOTEN1AHaVR9DDJAWLVf7F46cAfauNeCmhLKMi9g2eRwvpB+r5FsrW+OKu0PIm8Hp39udS4kxnfDsfT/pfdOVMEDa23Ctz+WMTzttRwXvXQLStSPmm28vursSqxzta+TA2S/fRGM8aJN5zx7w4iKlJ5+UU/FhkW2Salv95vvA8BsYk5UJORn+ulmtQp/GW4qk6WoIbbJsLeDqLhaKV4jhsZsUUKydVFUTAxis4I9pOz2ofNjDHZCUR3+vXKahT/HWqUCPm370Y6dAPj951TURvVxBL22B5IKk6HYs7tglPqxljei120hDVyac3vEv3zzsxZBKZ+eVCNl8bAfQTVn1wzNgMitT2haWww/DCeQNx8Q7/YR28Y1vnzzuptDBoLTyaH4kcqwW9dwMndTHD2hURJHYvbPlCgr9cme7/DCmiQfBlcRSGaBgqB6XCd1qC227R1dhC5dsQ3gAjUZU80TueDCyTVHKKrYBI2nIt+nb7SAZkVWsUvz0MZIGPZAuF6lSrmI+KVZrPf0kvYGYKNzyMnxEQJSeUS1x1ysvtBmOzdboksIz9nzYwZYhNs006gsF74PBbO3HC5BjCO5e8lza2DOMfUjOrDLkxGEBorcFFP4IYQ9BXj3Ax8jvEzoihwBmsSFcufcYC0iA+p9K+Ss9ttdDesL5cHHEbel847F17VzVIzZQ9MAz+49fgE+AWXx4IevSbupsvfPd/BIjyCHChQg7fnXdTt9u8Zw1Sh0gjOxfIdAIze/2dQPQeZcTrLyeQNgcnemIESoHn3lCHPkoehsjR20MVuypu1G5Q6npc7PA1ysuQyJqzk5+8skliiJgVTvnZbHGcIOslLrAzXWtVFaaa6qzXKQ8xa/WHInyZfHY7D2PB9pjDH8xJO+A15wAYepAZfx23KF9rH/ITCRypmXyIQED8e+Vxx9DimT60pk1OAJgT4IjBIaa5sUEG3ls2vnvHLbGEic6IwbjfcCkVS8x7T5XA7QN99A+SXTCZyKskk7BoKamLguHNpKqJIPAmBaX4Z4yPyIDgt79fr9yh3RKNpcM5bpdDRTxzL/pXvKiXYScCeT6mH8FNAseltUfU0TDrvlwUELx/lexMiMjHbGt2VVXbMS45qXxzqqnhEFrczkd33NKqBsa6/jMyvQ3fDYsW2C8qylJAR0hrPrz76y4H5QhBVQvje7NKkgtpQmBGbPkNW2h9xaBpqoiLauR1nBdeAU3sMsl+FIqKr7fl93RFWWiV4oqGF/OSH0DhjNFkrO/S2C0DWe1ky1vLjOG7QFy418D2L++wjJeP9yxOSpgNWb6WN8Yp3O3OEYKgJFOpGCR2mb5TRg7jqeWnHYFJ/OVlDl9qacDGBfG6HXTqosCV+rKAiKIrn0bFIM5ss2HHnuaRuk/UIiSVrpDR32dxJMNVZMPn4NI2cdeLB+HdGU1u/jesbivPJY73jsyyWxfL0vSWR19dsOo50cTGBFNkzTc23gXyOJHM7B8rIqcgkzV0SZfjy1HE8AID46SCF4YXJqrRHiHvLyfJmN730zYXKiq/6iSJ/RGmJ9G48v885BbE+7XJU+oba3PhlnN8c3eY7dhY9jaWmK+uYWdHlUkk5BdkZgnAvRhl6tyc3zLd5PJC7G7l6rpP5d66nFJ/72iHhu1wpXRG1Bcn0d9S6w9x6Cn0zx9vVHc9Uj+9BWWrcUMY4SYsGRr3bIumyNYRhIX+tLtPemRtFSGoa8JwO/pXnU9xycz0amHbEXDyPd+Iv9SAjnUpIv3OhKzVr0Gwrp/+q6BTw0Z1ByInJheqS3sYSPscYjiqawHHh4nZheH9cMAufB3iNWD8jjb+vJST84Sv1c4A3q40WGTBph0s3vDgBv6snPZ0zgdgy2zIWQDp1hINhhCsdTbr35pTRZ1O5BxnfMwzISd0PLe9K13wgsw+cbu1xsd+U1xp/h1F2ke2yVWjp45ls8WHCzm0YNrn7IYWwOh5PDjRvqdKk29Jtv9g+J+TC55uyWlo163zKbkxWCHtJadGFpzeCajB6phEEzhQae3klXhfn+7eSCMYONZgKnVAmL57NFXzx2IGLIHiWLkluVpcPlXH/dWbUg6zP2Sj+f7zLjtO8w0KvYFPXgnQ9/WoQG1sWFolsk+WRlTldPd5Q7svzLvk9NwzvxgzH5ppTV6WV7oPouKiJVbudYozHX+XWdUdCYbH1NuLJQHfQbl+BfVIYNl5euEC+rSC/oz2q6jMCpcRjHqPBtg0cZv8vVZPxDgUT1CyhI9OuheVkpvuMYfNMsDVPrWkh8i2B+M5EudWs12evjR20Sq/g4E6rZQ/jwDRcALkJSOyqgPKKg7StLl1s+u7J0c6Tz2ewiMRp/kOLw9Su0cBMWs+sVs+cvdtdFfeeI3EG3iCXdIyfJgiEZD0XhKoUDCX0OS56WXr/8dk/o33YIQ8/LJHRpfJbmgPJzKszNtvfTj294h6FCWQuRwof7tOwHCc6W49TZVEJnN+8Qx2yeeGhyMuC0R3uHCn0ZYFBPdQEVvr9GzXHyBUTnzozrQ+JEOebWjSyhAG/VNS6Lu96TNmB6B2SQ5seV+Cpp1D1yNy7tuQMWcRMYjSdSnesIzYP2ru97LsnAP9OseFMXZDutlUbO371UgGqwDymFDJFudvgj9tRv011ut0S2nXDrBTpASubJo/OQp5Tmw8sK78eM0vthjB/khXo9cxSZ6HAtiZItZS5k27L+6QihGJBR5H0y6AjVpBbB8QkPCcBuLnzJZnDqyspO0ZK9qDMxpuWzSaLNBzRyDQdTzcXG78/Jd/ZxSa8bq9kmB3hc39HJaIY1rEZHETYbGyGAmupdU7bWcolK1nbSXb/WW8eo/ugDLUlyZTJI8oTur2d4Sb/nk1ZhJbyjcmloNWpA4WBfKDGE7X5l/eJ9Uqs/qF/D405k1K+T5Le6w3wNpBcD99P1/Ov0mCKGw7JgnDB0wj7K5Fb2j1Vnp2t6LWwfnxfzklT3LWCoJ6Byl2tXWxtMWwlRmR5YBGaQs6T9uAQQZWPZb9layHxNjTNGKpdJ1YeKD6rjNDw3e9tpwOVAtPpvQIAOVP2sHoP4+EiuA1MXBf91Ba78+URUkWUAuku0heNs9HDzEbV2HanZ5KsaJeO8EanpV4TfEvCt9dmFoPCGKM5UffxEeULjtEh4z7goz4VyNFgscrThR2wTmmrOyfi0CoitUf3+5Y2pQAxG96wa5Qb/gKSbMDEyjd92ANXVnSDfFMQYFoc0bxePhIJTQcY5XR7zg23a7Oe+ghAFTLaDJDJgEcdDg98ftgoevRF5bJiWxJyDxRVvw3bfsLQbX8tlg4jU8hNUQLPM41TfX7KoI49Ts+6FZRUBqUbGqNg7qbqcNtJdEBd5n9ULdaDDrvrd1FhcA4vs5Y7R6GqcbQIiid1lOMEQY1YcYrcsjL7TTrBGGQw0ZEf76onGn355T4ULgkBZINf/xOx2rs4vUjovVY+4hPSavbuzT+ZoUga8bAhLjDh8t0vfiWkVHEmjmkFvNmevr/wtjddUf4nRHufX25lpUoQM+Ye+a0JwwsJHF5K/o1K/tnv4ag8olqZ+ChmGf9g5MVEJjfDQ8RVepo9dxqu0BjgjC4l2ZhI9+oaqzGofMg+qp8RnL2p+Z3lulSC3ZK8kMD9y/4uniiCyR29n9vkJV0LNXdJ8PAL9wEPCIa8zvgyGimWEe9BEu+jziAApaIttIPzh9YXsfaoEtLvapheC25A+izdU5X61x8SsEQd7jinzXZXc6ObJoorINoioE8r7LErB9ZEk/xIMKTlFQsl0sveZZAgKU6vQnSF09u2WqNH3fVgpi5S6B/jNDz2OyDwG/yAm0wRJXlxGzIrT/WflmuMk/SPX9SJdYiRO4qBMiJq+FAbOYustnYsqWh8Ca9Q1lMyK9GBTfCvf/LMcoKKpRBWHf7SOEWRfOJtfxLOit0OI4jhBEkR/vx2XPSgYspNFCzKtmZhpD1sFpNSQBPW6cbSmD0tA1T2/zbBpt5r2cJWI9t/U24IdKRJvmShmP9/0saUavebb/aO/h7ugSEY8r4uIthIEujJWPVFzLwLmbuytLpXNoxO7xQcYfcUyyJiW0Tv/3NWgk8AP2ARoSxWnEy3CHqalOIke+tKrk/vbWZeLJd9Bgjdt8pvs+5nQCz8E5VPR7NqsY/in2Ub4gqSjLcxU3V7dJbV2z0ouj1DsYpTyWpAVGq3rp54YLUE/BWx/1G3XT35XFDoAgLY/3fQclr1yLhl1rsx2wHv/2L3ZClgFIJ6MmzRKA5xY5/fCUcawBctprWIcVCzDOBKy6QHza4nVvPK3AsThwbzxljvp5QpMc7h2qH+EMKmhSHnL7rIj5nm5qZJI72NTpqnRFImNPdPmNra+AGzoyWXbpoiRPu82PqRixzJcfJhKq3CQTGzcep1791AParn4/gYOAGO+NVqruosg8t6nGsx2nzQ61gJZfAWgJ8olbB1180JBX+8tLUr34fGe3L7iN7pmoUWa450xjLdirx2HWWzmzumYxnpUNS/pvmue8fZIViUC4b+l8pApPrylKYvXXmFRZZNnSbbjskYYrRxpab1UmTLcnj41hk9XewCeKhkTorBngzBFde4qaScfQLetqNzI+74jbmuLLv/dIQOUi7R0GsL+r3aB8FhV6Iosxo/JwUppdyiqfZn6Tcgg2x/cenUzC8MKDxSmUiYCQUmErzeOrmQlMn8D/TnVp/RfkSHRLmI7VK40aBv/wxrm3kgpIpos13K/De/A79DLGUYv4v3mwNeRQbZDMe9ZXlf49MOIx25F6jP6NbfmujulBPeamsBeX0FylEzQKAXCHDWMDH3mNVgVoCZm8e8HHeyHmCIxiFirvDQ1AyBalIRS+CRlUHkub/MzuRAsci7EKmKDB3AagfjsBu0Ydg9137zlcivoHfvACZosZd3R3m+KHYvVOr1XyrHe9rp3DqAsj13Ovf3XIOeNL6fxdxriNJ+SERnQ6Yd7SH6Ug9gZJV0DZIOzgo1fd2qbt4wkGeHPQdp3sgdpehSmkL6Ms2BD8qrb87ukINAultW/CXdHoE4rfhp07JTcb4Imo8Jmgh13kSM/DnVRBOumqbNsA+e1Kqi/oCfuYXmLHXO564i/UW19H4ho+wCGNC78O6PMEpGuKKu3T7VDJajQZNuT2l1cdvnOv7F7Q9Ha/9dUKpgotnOlntS2vJoTrf4C825kyK3hListV7elKgOW6yWC2gpfIqrBgbxawOoVvrLY+gJyhxkKQsENyb38F4zOr1DMzT2TrDabEd3fvwaWzxq4mnchfA+P207JzPGihQ7TPmorPnWfYs+AVezf8K1GAgFm0UdZOPv6Pc2HblhVuGdHFUjukfpdP9jCDxaDgvF3IsEkSR8vgPWW5PuacmLZ8rkGORFxPTskut7p8FsXgorhID3kYGiu1uaNrB+uVHGDiFPTdN/by3qP8NLxDOEy5ztpEnQQTx7x8ejI0LHRj0fYgXyov7kA8i2cS4Tac1aDCEh50oWBJsmm1gQ+Q3IQVuj9qYr7vlgTIzZ6T9Z5YUMm+BYORx2r5qnN1OMX8igMTZqYCO5kMTEUfb+k7yyD6NL0inwKhNbTLzuDYiExXxRHvXm4n3beUwtgBB+eAI7r5X7ywCpo5rdgs0Qwxw9pwpqB2tAfZDbLBCloQIF/ja3+lZNN0T7GoH7PAxigsfKOklnH4wu3vwA8FUVcur722w8a4yjAFFARFLOIZf9QQP/dGsm5lhZK0vVPMWD2jJRvh2kjD8Xf+OB8fXZJa6qMrTnfoiV/G2Gk10RS9fEwTUAteXVrIXe6+ZcwsKyM9O67hEhu4mZhfFVVSM49jWxpC4DNheXOB6+lP/1JQYX7F66p2vIimPZ7f9kKLvQcgmk/B6U451aNfQBrcCNIzFxffXNodgDH8Me6Y7hv9WaYwWBAtvnVZBr7hw4+bAjJrHVz6iKOQsNTGWH8dOVG84qr/LUlFVTqCON8yyS5Qyv3ea11EDZ7tcgqbsSDszcsdjCuvToLEvAwROkIwsvz4ASy/pjqk8/pSJiRhmFnkkCYAIgtjZDANz38PpmWvEaKslRo7YIUmnfjzctM9+tLK5wbjsgEj8gQXIqoUcT6TGmj+kxc/ET3OvUWCdxGb0tMg3dky9o9rYEmXGGPHndT9hCGNth3wvogTB7/vsDLb5zcnpuljmwJyL1Ab3lZ2QDYSN2vQf0NxwhQ8Cv3gY0eCM641rRCUtZB7mzZvCXjAxeNWxBq2AD5fH6MeO/660MzO6Gt89tcXGGjmXo2NbeNHvqnkb85EM5+sJZfUpU+RBXJ6s97ZZyK2DYTTK31fYARgE/ZVjuj+dni0cKbzWAEj9WNfac/YTsobyNORdf0Op31geHiFjGUySz/LYsByFv2abu9IcLYj9I+WrepmPSB232cmn5kJ5G3VzF47/aDHtnYp32DaRtGtR8TNpJlR6UMuwsCeWQ2/4Z2+FWGgUVtuga283FwUjPQkFLUkIzZfWeenh9TFVAQyQ8hzcX/RkpA1f6OZjvLYBROH3RAnqRlgpJD0BkoBmLoNutz428R3+6CFRnDFXSqA9A3hlpCbn956l5OtYKMu34BduFY7YiKcTXkNmSPfIVH0seAM4NqZolbrAokl6YnwGKBQq77RqdeRWalp7IAOW605s4HylFozuVfwILAOTsZ4pRl63BYZuAOKdR0ZvQV4Y57k/0ZA5tBOFe3eHHXErzKAckQ8J1V3ia8OaHoshFBY3CXX/zKXon8PJEBf4Mgt/r3FWjVuijplimTMK69HvhKqxJX+RVBlQVpOTvKmYU7jByyhO+9LL/5fC3r9M1xzsTgbxVB/vrFSFnLHs7ST4pE8pl8xdCuEI6YHxXdTzpdUKEXWrLZxVo9iw8OS0HKmTcAriZlPy3z9/8zpr0+kWWpHk1lpquOwA1WFEvdtYjWRUmuF7gw5COP4l/mhqq5aFxVQzJh6jIqmP9+yEqlbVMWZTLZrglqyj04ju1XAbsR1jDQbNDg8+cBV3cMAB2fzDEgoN/HYjICk6A/fe9SieEndvwKOwyoRtmKXy/Q64ZPwL4y8TIUckgO4cskbmS79t4qP8cD/aJNPImXcuo/TIkJNMgnsUtYIdbUy7N0OgAyQAEOlULH4x3hnnVS1i0qPiCROnD9yYpGL3f9m8QKwo4tejA25/mqSgdJzBgYwNZQ07cHgD5zR0cZYtPXdwoHblUE1NiIcwdUG2K7GN/bRKFVRzgpbrqRL8/tsbXYS20FxK9i9jcY7g0FR3dpk7Z1SdyX8M/+skwV6eP2IEm3U97q2iRGNxHNliEYTBsJrWvEwGMZNWAhYSqN+XmsElqhQMFzOFOi4aU6zMXLiXQIgv8Y15Qp66UX7mY8m+dc0pTa7ASaLYfGbPmo+iJxNRRWMQgdZNs8b6YAloNtAjjja+GUOKycK4F+fadiAzUO7A7LwUCirJDdpvYTHt33upbIo+t8GMQrY7/+IH1eQEmZguw5z4hZJwDxApBXzL6XZFHWpM0bFMvyWXK/xQxb8uly/bdajBpm4l3u/JuRH9yEBm/nrfrR+pukggPNy5hUF1GCgCFapOXkN9LHp4OD9Kh39CaYyHUcguS9PL+8KDUP94t9ZceCWuq3NQv6hFVovzWNL1DCsIby6kmS/FF+EAMmlAEODRTQvPFt86W1X90IHNVInG+WYLxQLWKbL7zO7GoGSY6ctNtzgXnz03LFQ4RgZjm/AqgPAjK80hhtlfuge0s7vBkb8jOUTLsWXSh7vzscEAsHP0WEBGwBh/gT1ncFAA4mFb17yI7DiCvclkBRBPJqc19AbA4F4EgUoJf5mSvUkl6qmKDtBBpg+PtCxG6WkUZNlMbyCjscwnBljteLH9Ws3F7vyUGQCtF0OSc/0m9YyJ00bjxVHclJ7qcQBvpCwNwmcQWvsOXZciUfBdPD5eYQ0xGa+pSsBoxOweVm2oj1/Tx4os1MDqdb0VIARhSuFQff20I8le2kpYJiVuaaRYaGuLIZMsfiWTvd8AUPgz2rDakJ+EccEbs9FqTJ19SJ5EYb8iXoz98ezJz5bHtpgISsRQDwej0+abRFU/Im9vX2h5WSP9SmUsP4Qu3KaYhmfh3DrJZGwFVEUFg3Tc5IQJjVOE77/DGQAhAUk0+iSWdrzU92SY9JSOKm89qD6SKKSfYgX5Ks8Z+dNB4qYL9KTA49OJV1vVv0ieAWurs12Qx4zn4ObodO74501NR1MclkZ4VwFPFAu5RaN1kMNzj1vqA5Ka3ZSXxJDuELSm3J9iGkA96HHrI0F/um2ndCVtHySxtLgzoOZ8U9wEbrSl6K5JHvKyZ5P1CYiRnk++H3VhjOX/LNtlz7DYvwJSX3usKYfEGUxiqYlp7nFzq454kWJJ1sEjhcXLdJagUY+Ldm9uMojCYdJfdO8nJLyHbeQTyt3WcM14H5ZCneCcvzfGwO2KclKmSijxTMDRMHjR0+iSPtpk1ttGeYEkZWoTueYO8oJ6j0EQB3kWVdccGfC1oFBM/dXnJXPCuq93fBGhveYDYNQrrkfQnBPG5NC75YPFEIzRB6cOYiu07XcFN+l9tKI9eXH1oJJfaJ2veBGF7jtzRJnFbYI8e+91l38y7BN5edChgznaIwP48o8CSgHCXoLITztD5uafrVVMWRIHY19DLI3lBNry6IDIJlwa8MdapOO0vBNYrgrIVwiLduX8RBpmdytPbYsGcS8x+iRjMBhDeHXeqOu7PRjThehiaDbdOTCNLZv+zRpXCmH1pf7v46bjWZw8U/HFWa0t/wEHsawUwzmepvqu7ShwKDpJ+vidfD60o7AGIrkN8kjS6AimjwmHS013LrLHpUPI+13zv5QpPNFt+PMV25QWP8u1pOULwKsNqbAqJkgiYLItth9eiOcyF7wNt49iQ38qOhlZTKxybDGDnEKKY054PrW2krizp5BVDUGpErR3UhTmYk3+ti0+OoP36MDmJR8SktxZkQ1OOxMS9Z0eYaEbTRjjxQ63V999EkCY7aFSzgzSUro8X2PpO5lhXijf5nRr13233Ae2UT4DcWRdcbGqNHn+Lfd4hGykMsWZzgAbW79BXzt4+zbQfrx9Iejy9SjQEywKKXhSGKbC29qAr7cVK/iagHiioSQHnc91/9Gr8odptD02UbBymCxQk8uPURM2kcT8/1wqGHK4OgxMWCuqSP0XSvaXjQHJiaEslYGcrM4ZhAdqy/Oza/tt1/7YeU564AERBv2eBPptNJklgPpW9i/Oa1CyP9QvwCOQCV/dY90pRMDq3iuazukVa7/r6PJfugU3Pnbd0LT32/UHhH1NGGPV7+fCICfcCroVb5lyBG6du4UkN6UFqwzxFBlknOtbsqk/jfQHvWCXua2hlb/1DHO6HLto7yiMqXPibK6Nogk7WKeyhbKqP4L02ICNxfXbULUagan1ppH/0QhGZyBLvE2WX8KszjR2F1DxQf5BU0B7HRyihAJ84jPSZg1NpQGL6gvh5Yu+3kQEq/aNEcQ792H+CcsVw/hjjh9ahU7gKZ0JOPHXY2yu2gwkBxubX7hVRpv/jolLyBUyPYVDacLkqS4y0wge/lIzAxD3kO7E1TldtAUIB5FTGpxrcnzU2hohNa795JmB2cRAdR3w01vzjs8Id3XW/iw6q6h6NM53aSq0VHOggagGjol22hyXLhs9EZeKuQ27IlOm+OllUtFihjA4w025c79ls+nTVO+4rbunOV2Mrt5ILJpdzZEVaQqhLnvs1fmOmV5nsJf31wOjsXvAn3WatKepGiy1nCiQStmjdoHb3T5S8UxIamQ+77XBb2r2NhzxDRWeJWnVyOxIzXq7RvXrAPaNlnyih6dEoGr7se9wWM/s4U87182eNmHDSG7ensqkp/+xlJwGp5L0DtCjLI1kV3xnj2Qo/oRsVZXoR5yCysjVqCneNCbY+t6fzOTwgjmtZypne10ewHQhceVRi628PL45ND2GinZbWP8FNxMDl8F7oWrBbDGS+vHqF8CXMFZ1jGrnNKGv/GWc+be4l1BtAxqt1n7CUes9AtigsesTmxX0QgcnGQbxCfY62sQGQbB3fU3+wr+9jX/Q4nB9jJ31bciglaxkQYjEqz0sSnzfU0o28aKx2k2GWdvoVs/LVs7xUC84LZbNg3Znr4RpKohPLmrhn5G2cm/moKgRd2O7I4Q4/jDdxIE0OM+y/xSJVeDoS680FOo1gc7lltUKice/6OUnxGXnCd64E5Lni+RJwTK5RfccYX01b+Pn49vcrQ6NBBD9p3iMdL/Ki2uIiv4Ey5r1A3kcsMSavOeOd141WhX8ZJ6kaEC05JMQ/OqFOIcYongaReXZ42Rf8ruPLpvJTWYh5mIaSM6GvzN4Xw9KGBt8Ke+8W3/Mun+cMpl+NO+5xkKbczUtBwK657tScEVgolWRImK0BoAqC4xTMuJMZJVaVLOSy1mkzQNFD5FqNtan+8z3yDzZgulrOD7zP4IHnB5wG3ogP7DrSh91F+I28Xw6DNHusdccJtv89LAG0FojNBI0WUxhHpOVGW4ldeCg3SbeU5VFfFM2LnPAGJMzY1bqidTXWICUvrf3GOcria/ZWchC34a3ma/lJWZUyYENzegJaOn+hsGeaGbJ9OTZhCgAa9jFrU3xnvJgeeGw7vZoJ5fsbHnxnv7wUrM9eyq1xUbqmVdMIO7+h1Fdrbn6bhJUVqAeECnnn3F4dIcdU+rRE/orLpLcnIo3/rju6iHG2Cap0sW8a8k3j3OfgTkmNTr3O79uJQkve3ayfrDDEjhIfB7XOvNhR12A6Kcl5n1Gam3+rhNjf7+Gj8aU+nZZQt6mZxVGyJi1zq8vO4adduS7Sjq+d9ITNW7hTSEFB/dE+KWXtAIwisKzCMQs1Q62u6eh9aG7AwfNJmnVkX5ofp8NqIr9ToLJRWkdGax8XlCZoVNflv8/SOsh83gDkDObuszc73B8NSYPvoAd4snqpDtlvDvVypbkl2RGIgP3gAyYK5g0F2Sb5XaSlyEN6Rww61h+cKd7If9r+affWe7AzrrDzr99ft04jWlKbPYhCRWBuFv5XPO09eL2m+BlMeh6EJqNIG8TlTTghNGerVHt3JPkF8vGmi/qHEQuO9e7H9tfMLN4H1D1iPBYfwPc/Hif6XJG5xoaaEX0zY8jGSXe7NjdyMVgQmKr23gpI3bb1urFGcC3nMZbHWGfkfTde1LKmSA38JGv+I9x66gTe8956vX+rM3ZiIiTEnuqlCJWWmVNKylM7eV3U9EByrPBHHB1wKf5+JQlSWpy0d1OEKZ7HW6xuBHEGk/9rpvl8qyvKkaS7GXVeYyciBvmSltZ7fJEUIZDQyzHRdIK/tNYdQp6xOnXVj5aig8Xr0oyupJJLAFa/r9cGQqZ/QHyRn/KvyEunlQbjEfmZb6CbNX7S4Lcmrl+GWKJzRi5mwKDvp9hPYfUN/qsIIxHh3X4fmPDx/U6NQETikaqtmbVhKTtIzDkP9k9+Zz3+gifrdBLvH27N/YP6DmXQGwkvSAB5NUbZ1m+zwfHZo8BDEKet7DTjUgx5NOnuUhIc/QR/ND1o229wDmy4on/Wc4w3Iq5JwjRaUIH+DWHX7dDv8kD75OshQsnQ9N5lemOxpKurgVJXo63BB/nVA6uTlIighblhV5Gj0saRtv8tps6FnLHgMxcRE1gFWarW2v8/xp2GzCXK3OIlpnvxVqF0X7pLTnK8YAQf5JP75oUJsLkPjwJXRZ2xhTYRPvtRH+ul1s/Dkfzd0BL519U62+7vaxMIIfF1f88FZThFZygNShOIriyxazyHDYfMN6kKfHeFqvNnO12fOPeMKWUEnVenRMn5o4p4YH32UbZo8/wSVCifhHkJ3HKyZ1YLSlUu6NaMH6olt5gE7finaIk7cskRMB/n747ZfXuHlL70pjKOKH+lMbghxHpNkUHHwa4XB9LYUw3iHrunUjyvX1xJWgRLD3s538RXy04yXFmBpOMfqH7Sbbk1qyGxg686VOcJFjSeFd01o/8q6ijy1W0WrW8oyU9g4QL5tOSnLOvwaDKL8sij+m9VhuK+FHDymFijLbifV43mCv//GjaLMatLwFn7G2F2ZOjO/iizFFa2UsB2hn902gPQeVFj7KJhWPRhI60nzvGbGOopZgd7JSOXm60NuIOScDsCJLONH6XDeRoOcykspP9Uqf34e24Aa7eIK7b9km2YI9RzESBP8TbXBtNrjzTNcycRGbl9LjEwugLMoYDdpPtVG2RlwKYniBoq6YnnPBwCB75/jiqri6cNjq0j8jReI9hkbiSKGcnLtetQRC/Vi0ukoZxkjpmKhDeE7LHlXVD6K4WWh1nv6sBq7remWkHbakJsyK7FqWXbY/jD9B1l4KTtd0ih1vaZ1T+aF6Z5bLCYVeV6s3XGkhzhfpNjwxUxu39b3wv5Q16IatlMTc4uretwr3gjDozhfBKOkP298BodPteYF50crh0HgoK7CahRpf+KvX/Fz/Zv5mkl/1V5TebAaZ/nFfinffAz/G1uL/tfRKBZfphAU+jB9GYxiZnBTk6lVW3LjVf2ry3z4v5lIgMxSAao+oZRtBDFDdBSNLq+L5t4nZ/43ZWDSbPFvEEn8MpPcI63foaU5sjTTAvEhSFnp4E49ILV/rZmWDjmrqRZTj1DvbQD+xJZiQbKs10jEMje7TkMtQgs0WO8c8oeBnKJUOt/M2WQWnGQ5Pyicmj8TuWDQX8st5w1h0F8/nNo6yIw+RP2RaEMUfcbnZ1q2KkJnQqAB7+debcMt9S/mG+GmGaMXWAatW2Qe4VMh54ArP5AfD86g+Vy3Dsb7Bx3UdOmw41tfvXIEP+Dhzqmgv0rlUYZ9gVRhmr64GKkvGP4ji1NGFV4voJSfK8NHKVsWJlPEkKqTzBgl2apPX2l7rzRp1qF1zbONFi9QUOa1prAd9Hf1KhRlI1of9cwsAcJ0PCtnrzX1iSbIARaB+N2p9Qw0k/QA/Ff0sqCeJU5ZsbW0Ma1sCRAtUMGSRIU3vDGFybvHJOLayCs/4ARi788qNm4AeAsxgldk3Wia0S94Iy0EOLKxwas9tsPHmeFnxVtvIbbsItzD8b7iY5nQMBeVSBUdNDhfIYhK4l4I9Jv99J6RsefDQdHxesx4DBIMQxIT3Gz40H5Ie6XZALP5FnK49CDF5leLBG+5OL9QSDmcj55dO1LUuUtKCR1IoTrNThxfvtQFpm2Qp0wt8MGfHPfZy2hUh94cTJeJlrhWg3XC+3Sz9Z++/yrVK39HDHumyTSmOCJXyjXLarJT9RH35USvoEeq7FjPKB1DREqeKiqhyL58N8cuV9s7d5Qfo4cjJ+gECcTrTnC/ZR58oGOg0Hn2bSW0l5Pz73z4qz5vDQ5MvlxzhRVijoytSR63qHrC4gLOOz8U+K4+Mmx6kiyCnC0mOeteUAHEO4/zd5kHM5LHYk5j+zb330w4er1z3jKTv8FQLZuaQwBsNZ+kVTnLlXtUFt/O4u8+Of0o3yUXFrUKe2/9jpdSxzYaRiqOH+WQcBNeTg3GXpoWl/I0QtkXGSUuMjOXaj5FV6R6YoEE0QtE1zmdI+uNy3zUMJlnLLmN+OTO2jp9DfrWO3TLvudhS8u9KMNwnV42iV3sFMfp+O52WeVfiGFATHe83Wcn4aJQKfBn7prImZOh+Dwn4hhchWQj097gFo8r+QelX7flGw395RZalnco8xLAi5j23JzwdWaZ4GvdHZSs27F0iXYrnteHxi2nnyfl+rlUSV+f2TP+OtuGyNeOwiBOm+sUG1qvYgKcLzNbhn+pj/+oLcTqifSphaGqDjFo26i5EaDIsI3b4LuGtBDca9JyZVATHleHix++rEt1FA8CvFbmc/7Wc2U3+ktQ1Evd/8aTAOGulR702x7X0Cgww+lUYTa6vdnKSQpl1EyR70AExH4S1ICooraQALGxwC6XyXhYnf67v5rIYX8Dze7oFYp6+S99+DzNVQTW4X+toJmBS2zhkl5o0eT6VH9pDR5KrS5vDYYgmNLu2lbsttfO/SfABi/BubmYQWJjclamJJt253VntNEB3yMUdfmibak9c4muRddxzNsYPvpfjg1EhWSJpOZr2ZPsy6W6PFNtqDeW1XbrOZPJL+poer0im7bLlrKBATjFSRiFVnnefP66jGzeaLXlTXWclryW/5fYLcJdRMjaX2560mSvt4o3itLp+OXpkDj3/WMLy0LUi2G4v+sFCrTPVOempRbJFfhPbRjanftFHqbb7CNLGsHkeCKgrLZXVCt4AQjYylD/YTqng8JDTBBo5rxUZc/12O0kOvlebnFYiPrpciBfwH6mb/X1sst+21CZy1ISQA8WGZevFg4YGrFma6U5ZerdbgcvV/3rP0rSeyyqfZlRch1E9nfvTkrrVoLGony1haQ3Lc3+Cn4iCPh+WlbpOIRwZcHftf298BeMSZRzm0vaIxCLMX6+mQ3EYcdlnxK5XlDHBaqtFxZFS+JgdjymiStgvtH11+obpJpQl/+7leb4TJEMtXgBBJVYKeUxft1W7TJo5Cb+Ar+hKjetp2V0cAAgSXcIQUjZLMhjcE0LpKiqmJGNwEEwiHtQL6yhX7deOSUIOnwtHWOrsyGVqLxCd1/rkQbKiZ1/RKSOoV535a+tbsHUr+ce3OGKpy8+G8/1F+r3zs1lPiaCsvkRYJjv+kP+bt4X8/MHdRFVnKKaEUpIqNn0aHx9zdY5eyfAt3UgKikxSoXngtuQtuyq7X6h1j/cwpAfR7TUELkp7/WzHwJG9LFknJBTE9IIib9qdeRAyLTnje2CkwAMEvEuv8WSr9mrOsiKLm3iJwVUmDeGV+HNsA9XnLb7wqTv+veit2cEbQbed0Jbdu3sfy2NVWhkh2Ip1nisHgNqRqPVeyNFfeGva/h3+ombOn6npkR+cBuLs9rcealf17M9Sg/ielI7XYRClX3/Ri3XzfgzJ+nrdxgsVGtbjuFRo33QdZ+B2GtFsK84HrhpVtoTMIAZhq5XAa3m+xlF7HBUDK8jLSrJL/BINWpzP0i5aYf11YKYQJfIF/2NcW7UY3y0aNJglgAnDcgyAr7IoAkjPBrZ6VULkmVvBNGYn0IOorosY1b5h8X11bxKt8b/GsrMa3YUXnz8Q2Oyo7pV+Lwne2dSu/uljzjMzbkMF/C8I+8oQWr5b/AD2GwC4Pzc73X6OdJh67Wtp3RdApejMK9HA+ZpnZGn4KDnrCN5+ppV0Or4lx4Ss5KRNyIwHh65p9k8nVEh82yL4hNmlx4ZFcxEP58yumYEvlpOgoc4Zv/19+I8ChgCGmDAzyWEn2flgi+Qq9YaOXGZZo6hym3MspBWQ3c9LaGwzk0OIXguuazt/+IjePgqEIjly/DeZYKZ6ExSnVaA8LixwPX7YS+WuWNvjYl8Tu8o9fiGlj60HqwWcOwKmhyDDZ1OiZst9OXP2hPQQ9mRtOb4v/r4/Lc3ykgtL3Q6I5aVxRn6isw4Vz73UMu3DH8aubCP2Xq6NPmWfm5TUMde+XfDrdxfp4qR1jrfo6EwWleqP+mexeX4SPpP9IIWzRNVtY+rX/GhYVoWd2qhBsnsZjIlrXviOjQby8aXX3DHV7v13P8H8LK9TBVKqvkv5FjlGtp4gB2xy4b5sKu+NgsGLglYGlik808B1fuZjvq0UVADNjvhwkGyRgkC6FK252y+rkFshBKiVoEdnUqEVkQAr0N/tQ+WUaUeASXcrNy2dDqcuz9dSzG+x90nfw1/s7qtH9kbPkWhWZT6P7umBfesQBNsmFdO1Kp+rxllVSKNt5KOmCRE8ze46Cb8Cl3ErTHkdK4T3A3mlBSGN7O6LeNaGFEj1GtXh2iRQjY6WdfaaQe6eiSWOGil4fjwhCL7ImSstxC++QKThRplANSk4XsPOrjlIkjOiRQp4zgj4oJqRKkPQHhKofBiX+vmMudMxgWM0aMZB44so1aN1ISxSrJt3DuNq2dpqCn591nvUOeobGlZZSsQNFtuUVFs03zkmlbLyTYZ1Ptr2cBcVlbY0FMle/0xzTeOYHlN75Bi+8fB+IEzq679+jQOdqJ0NSaWI+LnDZWAyv8WeP60dsEScjOUIMUpQKIkUBDTt1jNL3+9NfgoEjuf/EoA7+zKb6a7KQwp4ghwtTye86j38i/15MTJpAuVodjAtHASTu3/eoKPnwOBDF0NjHKQNKJlrkD/nao7eoYrjVlAfJ9gI9+39bPJm95YSdSQgZTM2k0qITohyGkc3w7pl07V67KyvBSRTVyk/Y/dVY5fM1XWm9UFRS0E7v7kx1RtsQmxO2IGSoUBhrLCttEnt5bc9QtyRP13URJe+lUlCbSuKCjsENbY4EixQqmJoNVSfTMESHU6RoAlkQbwn7jJFvaOAaND6NNyWkYcZa7KFyUuCWkqu30opJS2D3me0AwcH3N+yMDsDk+33Kn4gVjSwTulipGe/k4TXPe7GEYrOFamA99Y7QsWJZcCgo7cshcDEQx/fGefFbee7v2Z8XaFWIrYPNScI4N+ZIjohhk10L6zcd+DN9NtLxYoq88f+wY2Whgd9pfl3CibKKRKUTnQEkiwrABlQH9CIKDRbYTVLh0f0KbsT/oNa2AKll0MDkiEgg06kPcww1Tvo7klfSH+6lkWvOZalaiDUQxI8rYRnVMxT62J8O9YAd8KgiatcV2pXKscfbalFtqv4G6Jx5vvanlFUB1Va2q6Kq5sVfx0etk08os7cydTh+4gDT44YpTXPj4Udy3s9CsmRlWnH2lW55LNYHtAaMsxBYHgTswjy7QoBkh9qvGHIx+0EGo8aQgwAvP9Vl8r4e+XDDyGvM7OOojh8f+Ui79+7nJNmFAubtgHiDFicTYIgVpXNjw/sS3Av7lY2EQcevSwkXWe9bH5EQAS+7AK+4tD/ym0FLRhh2GNocnSgsTb6lcgS1+hHN8HIUXAdIMI3XUb64g/dPLbii18y3TJTFpfHtDPLgERXE2S/boJr+g0nHnRjNXY+669KeYvs4rr6aujyEc++SYU97CQnua2Prgsyl6UkVbZNdftgQi/rMdJVJeamnsa0SVp2dk/x86lJlXfIEHMXKsDAadLD7vT1I/w1x1oA4iNNm7Qbh2cF/kjysmKyWqPQ+mSfjJThOM2WpgiBLIgWLik5M4zaaAUbW3eLStXUhD49gjiBzk3Ir2s+n3wcgGZd+aIR33qw4GRSe39Qe+1f1tO/e6cSFKc8YuneNQordfoHPk77Mv0kKF4f0AspnBSE61LQlA2p7oLr7xIGufaHwPay6isPg/zdzIZ7IMNKNrqCKtTgty7XjT730weIV9gmPnCDnvRH2shT8jFPhhPetl+xdL7vvRV/YObSCPrJHM/v1kccSB3zljvUW0l0kXrzchfv3A96oyGeJ2snHgrwzFRe6aSndsb9bgJxFvf0hrLPI2GBrZam9luzonjlmRNYWChv1ovWti+burIpSV+EfFFBu0lX0W2HzT9rI9Z3ulsm1dYjm781GYkCmBAKpflxQ/iHiZ0yoqlXl+KvE5eakiV6S+KfUJP8XSzGP2DqMjk8SIfAwkQVUK+acJWefQ3F/bsqBRSYV2MRG4Uy86m3b92oI3ECzkuxGDNQI7nHg7VtszqncEjnW8HdCVmD8yB1kX5FNldqFkL+WjsHJnf3ERgL6shMyRBeBT6dZobpRhF8Ldh8pelBWkIUAzFcP5A6X8D0qqaMAzCin4MYDpAnChqvaFpg6GtsCg6qazyLXJdmrOvTKH1gkmeqEEBwMqBGS84jEvi1DpGJdi+8MUtEH0ssUm1rsNfOmWzCWJPmR8LwGBnuqbv2KmgwyX1p7DWuG+IgsMWjlXkxckeJZsQVPfbEec6CjHuuffUjP8rJ9F1CQzyZM7CGuHrevecZZLXu9MnnUiretXO/at60fa2lBnBA1YGtZ6Rk/UYh1EwyZV131YBQ8TKKDZkatVUz4NMcLgGbh+VcPpRSyhSTWOAvpREFNO7eEiWLusMwDAQ42+8KrhyaGN9TObuMZ1rGRmUaiHzhv1N/e6ySbvyENT0CvaQtEM/MzN+7N/d0w9JSYoEr1LcrP+6zgKQUg7s/gw2IzI1zmRd6/RXZuR/N1rol61vq/Q4SxIneud8pYp8A9xP/4SWu3JmoPdhjqe3X2le7OTs/eFLx/6W4H1PSZpr6KkZveAIkwsMJ0HwkwVq6oqga1ihv1h9l7nEMUs9yftpa0TOOMr6iDJ7jEcpNMadTzbI6Y8lQCx6MA1nAhPRT/ksmRoYD1mqOpltsNAGWhmLSatrXduZn2X9thSBitBtwBm9NiWRaLhhBOc3+o6lPcm36nyS/qffuekIEURGFaWWvGmev55FGBJJd+hloiw5HjL12t9jNAlxn1MYF668r6Oq3nS2IVIqDcH18LWjf2tJCkNOQWDGYJl/nvGxeUx3cuG7wUr2uvgfvf8Kjn6yymVojOgzITB77ZrBkK0x+D3xUVsJw2PFo4283nAQJ0+jPIjNoyOLbiXAsZFc6tOBIWwHEAvLaWNDvlW/DpFcTe/Hw+Uyn9f1uJDD32krL1cIFaAcCN63aCz/qjuZ9DS0hQjCwosLJB4izP8GWf16pihtNAvtm4ucHGDoO//5qeLlqh9+NEZUBMmXka9Hc19dyVpU4csZwpz0F8Lg1ybtpenBgUA/QFJZOaekRftySo9Cu2iysCwK9VtnTnPe8/K+Y5kHop1n3OAi/T+SX1Irlqaj3NIYvcG+97umbfPiruscPCmEcNb2nBBfzOAJn559fm7dNh96bj4SOvjwDhs+fFLDWM7nOAl/rZCZhi/3g80K1uUwmpzfMEKHOKhWQQJ/AqxwCA5Yf/4qnTLtr/jPz6oox/VFFCnBvmEWIooe3HKS0u/5+XGyRZPGxJ9vlNPLeLqg4zSaDcdEKLxg9Bcq/VrkMifbLk3YPZckLau58I0KTzGCc7BvDqaHWrU8w8ODviACdsvQNNsCo2zh38ic6kuAajvzdv/GuMxx7hl5VDqlzrwxyAeqwxUepIUAfAJSakwZmuUWGw7sh5yAQkm8pnqYBfTAHxlJJyIGspRS/3CaFR0wGXx67Nf7jd6rEuYvBJr+TTLARB1Wxg/jRlJiPhRDoFI3nvqgIkrvVQ3iiOEYDEn4l71rrDQzQkFmmTHImHrsSVv8hOb3y/idjzkVojkUCCJJQPtSqI1xqOxrSweehBT11OK6uvirnHtFwZo35HV3uTn9osvL6Lxo1o3corwpDXM2oy2BNiNvsDd4wYf+y0LBssTBWCtA0PaS45oZQ1KxE+Z1MTQ/7o5kD77gj2jx+jOFdx6h8mgEFjhFAvKDXPoOW162LK8Msp57E+anEX7QsNDN5nu7o9DsJIrHdbKh6lcsDGf9oBxdx06D4B8HUCIbpoBNPJMdKDyhfwy+VT5VvgtTfqJfsrmL18Lp97RTG4yMFp/ztmlg+jbXmi6tJKk5MM0QhQvXczs6QKGTx8QMBF5Zk9NaqTJK1MS7YOvnPWzg3LB+/W66b1CmRZgUivr34fQXKcGSgxKe0FJSZ0R5QXBAVxdOHP65HFEkXvBED8Kx+os0d5/tdZCZwYUXzRK+ztGGwi5V+XfPtfx9YgO9+VFvHaXcgqVah6HMhoaOSfE+1BHzZOqL06KX4SjDV68JdoTnsBMbGRXJ9SrbMNaAi8ULasXpgZ3W2xpyraLD18O/UTydcDKpMK7s+8W6tZfAnYB+qF7uSW7SWoZoBSVvv1tWnlxMn5uw1uDFa8ice2Yh3UPxN21bFW5KgZYP+aRRr5UWFxqXDchogRf8KgcocoZIg/drrFukCaaDAgV1HivXVEDDNQGjO5qI1TS7flbykHzpw+V7gkRmjPP2+3UqOtnWSPocRQ/SmWVA1oBKEIMxsFHzvtRT52wQksjiEvG/6VrbswKBsXJEWg5jsfL65PsFmljHmdGHd/r3098zWucUUKtOUti2DCh6PEGiVnWToC9AZBf4e/pGdur+voK+uMeqNWWu1X3B9mv2LNHmw3GuikH5HGkBmFgI8/R/z77/gOpu5p2HfnMEb73xryMah8mTP7Af4vfNAm0saf3v0xUkLjA9mmcAzKGFBhtIubIUWhUlBi5V+aQcNbz0qEpIg6ePxWYBXqlxrPbengmbeJKzwNhSplG/Zgbw28RPtxjUuRfQYL1itTYf5DMLUw3q7QE3HBrdOtfJpclLGYxEDp+/x+Hdm89of04qHKmvdxOTRMDnFMQF5Jjna0ch1/EC+9+ndtyBfFx2EidkVfiwqXiaFjF0uY3Kj0L7/rdC+zqaZV7mXWsKQvPC+MRs2jCLVlDnmOqzF8T/2+dSZbiZnWOELeGAXMXwKr0Xgb6YyTj86ocNtf/GY/CjrB1pf31gKfwJeV4QUbjy3h2N5b8E1CeH1+XQwWfqisIgBdGO64z9ivw5X7zOqAxuF8ABCcAxQrXb/vtc+SoJmLRHumE5qpIblmZDlZ3/sipYP+OTxoo2eMmhbccFoUf1DP4zaDbiM2yrwYchnbIYRoww51GDc/J/K5NJBCcpGtqSdo7IqEteF8bpFBAIYmUC58J6GIWw4OonvOFQrEpRZFrOzTyb8SWbIiSeuG6RcXIo9LaC/uQf2zKwN348VjH2htNUYiPMJrDzfN/EIba9Ua01rgJfT2mIs1AOZBu6/ddcM4w9oWmzj6h+w+czY7JUPZ88edDE172rrNd6zKuXtRSRLM93RN4oSda07AGceu65QAeVtiFHhOR5GfOF/8PtpIJRufYZGHcNe1m+0D70JKJgRgzGCaohdDddZ0s+w5FEbZurpo6nXZ8JgyFDF86p6J7c8MZU7RH+jUVmJI7m0oxga7qMJWu01dFvgrS9aqm6Y9OJ1SmHBCnZMmayBLLB1G8/+Bl/2oxxF2Jpm1KGTUCzN5q/dp4SodtndX5i2M6C7ndvw9/werOxw4ZWYfkDJpcLH31ZgJBQFv//Ztns6CUmqpocmtV7gaBfHGRuNSCZxyZXHbYZZTzGA3yAoRBcGd2RDCh78QNJMb73/c413zhcquIM808fs+Daxowo3bBu0iiyxs5l9tFrXYYVQ5Ey4d+U4FV/Iwh/nJgMCfxl4od0bRRdtx3vncQad7bhoVABHEuCdP4ARAvpO/s1HZQ/Pvw1dt7PM9EXwsCw9DwPSo+omepXxCK+pg5Zm01n3iSOhNmP7+sBHgFGkZqcKrzJEWdfPoDqFPzzBm11E4Y5DZ0KMIpP+BjKXzVGr/Mjo0NzhD2Gj3i9T9s0pB/85/peOTa1GIOLK7hjs70nBejQadBa6ZG9wVIsWQe9+KDK6Qd6qLRJSGGvgUsOvO+BAUwdxZJtyFQoUPUV+oxIAL9GmVAuoO6QYx/+AlhL6MQ1ltpNubQb7Xm82Al85yMSMnaxhFGy9+Ifr9BoSva+AEezGCm4NKIpPzgD1uaJym60PyVjnZFR8Pm9FHKjYe/+IhiIOK/3/aaa0hh/U2jcSww5gDlt2rcvREQzUgu56c81JrwSPCDysMsv5hrLOh0RuFpgVFnH1cA7g+MpJgtI0ko685DiERtUV2aKjjSFzcz6HR3d9N3s5Ge8SAr1bGWLkwaK7PbDjSeyUz78ERTUHk6Jz0RrDJrQ1NEfVwOoUVaryvQ4iPDkJsw1cG+dQxHrBSecHdI5yMRguDp9EZOlldACnlmHOTnIsvP3m1zYTP2du9FdK/U4luVuIDSIPUTg4bW2I16THRC9E8s3LEiz0AdHLNXzSF1RF8VAF4DIEk/puT6DJsnEJNlgeK28y5OBypyv1pllEPF0ZtBqfzs9MLHO1O1clwTHJwp4HsurKxQ/w5zypOzZEzKWQGkZY7bWG/d0aztWSPptv4MJYvyGfjSPao7z+bKX2/mQQq5SKHKjVDnZ9bklLV+aAoTVuQo/m4omuX5gv5dIHdRNwns2/JmZxbA8d6Dq/RT0eBTyqjAcf2aM/iJjlMtS2LK+BA+6XTPF6L0fQQ5gv6mWAKevkedmAXotiWMUFf8mxTD3xYChe8+WzsVbaMbprL9ov2DEzGMmvC45HoD89S6bB+MH+jmFoTx3VR6bBuMdnrmcU+TT4UEFJwJ0rDZkDYkQqN9wwp98qovdT06SGEm0iPArq85H8dlFMABEmHcB7DaJBEUsCFjgc4Df/yvGsJVGGSLxM4HEiCv2Nh1NQR7qljWxwR56aPGGWBtCaLqGP9FEUm5cYbvFK638Uy6X/UkvZKJ1tHuRP71kmv9B5sjFDkvHXPliRVUVpafmQWufr92ek/6uldP+Rm1yPF3u+uHa500LbQdr2ietpW/6qyF16e3W7Fn9DYEZ/kAeJH3RT5K/7EMm1cgV3xdk6+ffjeMt5UUGyWbQu4XwewYe12F1+dlsBKTSCjqNQuz+cDVK+3f4vas65RdR0kL+fjr6cwVdB1F96bRin6H8gqbnZM/6g5UguO1oESv5L2RBV+EcJjUtS65yGmbhqmiz3+b2Hz3GMqWASD90t7+JhSFu/CloUyTIJHQZtYuhvG707ad0is7l8oMt0qRsAB0X2la0ynr2eVFmU33Yss+l7R2QwS2dCKZUifTWFXDuIB2qvo/rdn+wN/8wy9UvRNpJ9iHDJgZ4zEZiTEo8NqvZL9sK3IWnv85USbJTV3KY7NgpRyjxkbKvVLW8ImGZXTU/8yYXg5I0UKOKpC87+9MIi0apeol/4QRdTwITPU25Usn0LORHjS40EKqSRzCMeWGf7yJu6zGXoRzMrIWIpTr8OUdUTanj1ttcCYhNbc9imsqdaNjK+EB2n0A3PaquyzvRZPIZFuZo9VlUxbv6CdPZljt2sSM+aNYb+/0r/EuueKLCc2Q5Rtr9u4cKzpjkc3ejIdVoMtadwlHXJe3O+bB4/Q0lX3ZyuH5X/jKnCg39cgm6KSjPMP4mz/F3HWA10Uyq/K+bvhw0IYoJCJgCYTpOgPqS/4WnSLfw9pFdAA50rtYVZMFJAxlVJlokzfgqP4VXfbdzeC5IfaNjXPzQ+IyrsxQoBoJRfekKanTvTEySJqpdlsteiFdsgDMhKTV9AkhBXx6SE7r8K7NyH/JypbCF+JODT1Uy3/xBHYzWQse8as17KjwUyKXxBNRkyEWOb3oiJoJMO/vBPYK2z6XyfnEFos1XaxWvtilqGvuwZQuhQP1lxleAqQyEW23n04wnKPwXth+nRzaB8Nh4JciANmV+m43/yI2jJKUkz1Mb4218DubP0Ts7LWlMDuLEAZLD9zHSL1615QuIspwqXPEq9kve/uqOzw1Z22rldhqvLOwz6uoeQA3wnsuyqob7eQ/cSpncWlwOIydmiaIcen6gC4ALpeCctVLIvUevZ+5C3TQeF5qV4p+St5/Aw/H7ILDu5lzWrkufR3DsmR0mR9QCrT2aA4RcriK47oxFn9uYVgvhQLejbAkhr2r4CDOXl1I2k++5vY3WoNmKqHKTr8ToLG4cKqWty7yAxG9PI1W30t2FOPjaU8ql4yZ/suR7oLX85MMS/rhOtHNu5msKB9/7oBJ5LNGNoJ/7gXdLQtdh4r196rztkF2zX6uA2gh/JKGXYX1HcMG4iban0KqPHuIQc3AGXk/Qz9YUMY7XjO8FQEm3hay0F5Hj+FFNrjdfcIO56EagPU5KBbYRnfvQo+J9AKcIgUBpVeR6E7dTNmRU0vJFF2gvxyCW/qre/p2G/YZpUflFB8OeFRmRS7f/dQ36TH7kdmPwvSLisMnpWs3ImFNEm09J+Wz07xw18gAZhKxlyDjqKK8mj1KAxcF0lT6vC63PwXHcSWS3RLb7FLn+k/ytRM9fZRv961pC5DuomaQnBQOKbXauroGWhiRwhQSZwnkyzfiWC9UvbOaDrhxMaWDdwaVKyeTNcbelI/jeEVzo55RqZwaqXyVjxFgUhJrNoi1kEfBf+/QI9kWtrAy5XSV8HH1tn8a7xpSws7jM3UF/8mzkRRx/W+QNMGcfG7T4JylwB9XSLf+Q1uaBBGW/2SUNMzYnPhBB2KG1v5hKGYOHW7/ViFchzcDEPmyY8kbQ49PXEeF/2G0evLlMGFyX6+HbgeART8ykfbc1+ckgoP0I+8SokcT+poM+cLimkjxO68peB2ejGvvotVxYkly3i8jMO51m12lguU0SDUZKovdJr4bknJyPEzRPSjh1f0trA4rvopY7Im8IM1MEfPo4qb6glNtfIYduf/lyRM5YLQpez7vLjBkZ3EgReP2njYgA/ZT4ucC5NYyfq4fxp0eD+2Cu/NflbEo26qoVIjtBcK4qEn8lK8F/v7M7DZ/sYCw7l/6GYUW+R4g1pggoPZMm97M0l76J9m5T/Ub4LtDAsQHkwDHiC94J9rrk/Lmhw9Fl+MuBBpeyenBtPAwpvBd/I+vGXHpx+WZjiGD8yomzY4EHVaycSr60eRoC9VpjUI7Ql+SfYQk5AhR31GLb+WRYfxfcDRUPDYjrbOSgCFM/mu4eTfDXD2kgXkQgVHxbt2lT/3VMkAua2Mvn1NP3P16PChDNfZM5ToIjzI2YmTOPqJ0vSZVjG5d6bLNky5dpwyYNZ/0roceJQ0oXUb742aA5n1Hd5oIs+STAmg5HubwU9/4mV3Yb3t0zhW2QuevGnBgN4gmIxJCTqrN3u173KsgMwc4AB4TkMQg8vsYI++uUjwrIZ2iiGr5LzTX00CCiIq8jg8vK4KKJYOA/22XSdV2prSQPGrk/8/XCbPhrqNUCkuTeneZS2DfDYkz9Xw9/FWiEmV/WCoLdf2NWmXUemmSMSls1hthafEc/v6iZWqyanMjoeLj3RFeBaI2qVs+vTsjjl9ZHUuIhCZYYSIKY2cxdx0nUfc5R/Xnv7sb9eNvyS7NqHXT/fD5b/qfAu57PsitJH1wINeyKN88Uoc8v9ZADG6ZvnuB4h5HuQO7tPERsGGdDfedn0eQW/tRWOPB31Q76QALIHn1UV0PVxzW4+y7odp/rvzI5hwd3jBhgAZJmrzlPNrMCzWwK6jGfhcqO9FI66Ulwebdno8lR24GHURfdONZUuE1gCP4buFlkRdLMsdz8qCtVvYVfkkaNDs1Z7PNv9omZufmklFHrxXAHbf7cBh/OG0Z5WMnR3JPphimM9nxzsoBiUzaBm3hcN2pTJ0zPifPN/ZmsPjiLR0tLNAHlYnl5anEulr88trO/wv8O/9jJgJ6MRD4k2rRgGUX63HBU+iKhEFdchL4y7Jk6NI9anAY+Ivp1o6N7rX9V6iTIPys6o/ILe6aRcTcqAuU4U6ViM/4pvVl962aHaHGMLHwcnHRESjAVBJ71ryqBAcBWTHUAcgvy4PQ0bKJVxv9KU/SJFphi+5ibz+oP706k1AAhYkvtheR5h7n+UsjM3VZ/Wdyd8y4tnmhGFSb29NmkHcCEjpLHFKOgzr9Sw113lLvp1zabKHxu/F1b5juxAmRBHdpGKmTPgoNah46kMKqq+75JVidtvvvftE1CxOPlfb/XoCnvcSlogA6hTgmnY0CO34chMD6OaFZFzr8ZY8wurDTmbSn3Pm/fp0ZfhXij2KeYaHNwNpj/faqF9MQzF3V58NA7aj+XfTq16aIdsgHfVFASXASE6M/b6vzBenA6QNBfP2adtQ54hg0qDb1k3g9jx5hR3IwTM6NbdMNLC8sc9TlPgYvhNiqOhxPGwG1LwU83QMRI6nVN358XDlvZoZfnXPmn6B1zEf2K3EmizjRpZm9pOCmkLPm1fcEfbGyaf4ghjV1rgv+UOrLt4MuisG6rGJr/aNm6bsX7DdhHPD3/HAvtCGoOXLzuHYNLuAp7jBp0u4Gd5wH9sWEul4fUbMSMev4myc/Ua1myJANYbsB7LyZzQBpb4+UO4KVYHW5a9qnQBP3NmW0Tzm6LH5SCd3+d0lWyoaXq50XvCiKuoOIiYqmKgebBDGerGSGecBdi5dLlRUnneSi90oqkDX8TGjDRys6/6QPKE8i0fm3aUpaCiRLrd2Tkj82u6/7B6Sr35rg/Imc3BpqdBRnNMBU87w8cc7FVsGb9KN0jkeZE0GUmKjhs+Lt9pziiMQqd0RiroTnxOT3zZXLWDA83Dd80p3M3TiwieQ6Ho80if9PZ0PxNDuVo/bCW38n8wF6OL+muGBIQC4h0nxERfUw160Elqj4CwkYjA1o0Lqd/eLb4/gJn3NrIVdCU6zbAEpFfy1Q9nfsBiZisIqcDxY/lfZeDzC5kJLVCN8yLfCke63jYce1GMt4G9nmcmf8ZaF7/NV4cA8avKDq3Gf47AiWOjS8eXQRZ57l3xx/aegNaUmAvGHZhp+UgcDxm+pPnmCMpqlHMzxmXf8WoOkjbEdDHMqf1i50WB5pC4fHDZwo/iG0iPDLtCDEHVvYihSykuWL36KgZBvDFrgRyT7sFG3TWiVlp9I328fG/FAum3ubyYn6FSZsgPHsfw1fG++tzlUtPLbnHgvz+evK6CvNY81/qMGAbNs0bzRaT2n7fDm3TcrgfEsjECBbLqfjXZcgXCtKEY5lwUDxKB4QABzkUTYLC7hIvOuBR8m+oppuDCNoq4YyJxR6ulGMpsPDaABgOInSlXdEQmz60binaBuxV3QWf+UAZyemrPZWssBeKHOatpx0c3NXr36hETrFFV79ykkv1/uUpVvbEOb0qdhnTN/+ey4xsPevdgOM1As5aXyRYVCJoZY7VH1pq4oJgrte02bCLdt28Ke2edhwsED1estPScuX1Ifs+pPntgjnPE+jl0Pxj6C10CT3ONeYR4Zk9bkLj9X8uij1EhXX3OvFQ6WYYsG/+7fn80qQe+ysUxrAXjoEmTwVeXI1ImLoCW0USL8qVm8ZjZu3Cs/NCQZyWd8m8bBu0p1h7aNKxiv3w5+EZg3WSr2LfI2vmj1SSP1OBu3Lp6fOg1daS7NH8DZUSrRqO0/bwtT5VzCWfrQFNblz59SP1uwS+f0jmXgHMQu+ndNjhhmUA4Zjm/WHravSi8L+oOlvXenh7KXDYXDwY80jJmRFrNXEvgSjjme/zzYJitaX5MupH1pgfImpnFViI6o5ZwDxhkW9UN35PvQkqMo9vv889eRKl9DGFy1qDb6YIL+UHOyit7zlVp458OWYGTmlLh1UyJTZCBuyVI6zHyJ29jEIVO3IUKHeP5XAGK658b/iLj0Y6bmIDpkOI5jUDGZ6VKsiNNAztl3H8P9ugZSqC7YKlC2qztfDZbsjTOPuX0mGxAWzRvxw1C3zYW5RSa1NmHwO6/BQ+tP4iWp2IlJudsL+j4R6+RC7+rn1TeO49oUqoGDi1fTMN5/DsR7O+JNQn4FbESYjXutqer/6N1DNfgNQ6fFJf9JftSZBfOQA2wNcASn/V3yzdHteWP4HGMdSY2JMeL42CgDSGxeTNWwuMPIHbIPkTCpTSDtRcR6iJWJ+EQAg0meZCu9cXqKwwhSO0fI6o3ItdYeF9e/Xb9/mkkMfRgklsMLT/8K2RsShEKPNDVJ1b0Sfd5rtTipxnYdMPH3XXNYS9HL+KkefohqdFOMl/1Y9Utp/LkZrD+HyVjKb78sU24dqH2Nw9wGdzUesz/eFEn4bzehpc7FhGFRW6v8bUYnlgHRQii9m2zhvR+WGvpiSYcNcSv/46Ng09EwJzfzRvXRkNexb2aPLzEUll75np6nC6MdJOLZFcN36nfAxaqyf5tyOf7TQLoQjdFyjqmzo82caHtcL8PDMZsJ98SIYrHiVfqTxHMw6tewBYqOIOr7Sq3n8Fdd7lG2pjz6aeGtAXXz0Ibw+eCkakwX7/3ffvo5cNKGzIRespV25Nv5zgydeL/X2FwK6gwe6UDo9/8d+7F1mFw01V07kkBH/f7Uf9uXPsSugMGQMuCkDbZeyw/XscIVXUIhWn1yVi9Is4SPhe46BBypeArm5k/UU384QWMrHe96Z+IbJUTOVyI5pXWLS/gzLgnt90eb4fBmjs5ISKOz894QaYcmu8tifDUtplyHidZeQoimHnr4xUKX3gfwS2qbo1JjyN1qafyj0qs1X2S7YoqCnAJZ+dxjOSwIJvF5YiTdRUWop4Jbk9EyuqGXYkf7WsCT9RkU35FD10QLbh0ME/kPUpfgv0MwAawpmWEIEYE+X5w/dR8pJqwFMnQ/DVXyq/bmPP+vsvnEON85dIJK4FbaXyLkZuGgufjyI+s7z1iDqaPv54219SLmhJDN9aaBoI5Y0x5s9CM08VQEHxM10q2MI73Lsub2Oiw2GOSzej+BspfoFDaTiNANC2rhBMUWtZ7po28kZ0i8Ncv9s1dZbM9AAtQoXYlV9idrw/p2vci300heAbc9R6Tv/Z1rKR7DciLKsiVVmlmg2Tf6jobaasU4ERmniy8AL789jvgf+whDphPN/ziLp1BSuT7+8BUo77K9BD0QNZd/+GEmGf5cn7QYNcVOnKH3VQI8JDm0lJxxdDp8japOMzCJNXl792vyND/WsZ/3Lgj6jFvzT0RgSDq1GUsRcTAcgsoVdd3DcDKq33RfmgdLAdE8YS+sbyb7xsgy9IS3JO9D2M+3vJXxE7bP3bmF/o5sSqZCPuhA6ATNqhiNCCpDODhTX5hTut/mPiVmx3oJmcolYg3Az2mOvx6eHOnNWP1eAA7kuAbeFbsI9C7Du0qsj4D9UQYYYLsO6DeyMqZDx73qdQqZcLFT3Nh+Idx8ogdNiWLyQm4V9OR0cjQVsN7AndIjuKah4tiTRcdfIG7NhHqPl8D4Hm0CDTeBRGsl2L93xT8tfsKhNASiTdJTbt7Mg80B4xjfQh/tp0ZS/UThGfIy3m5IvR/bu46NMELanygUtkhTBTONbPMpHP0D/mwgi106Pc0ZDOdYY4xbLfOyKnnfBudkbxlJZsQ/GG+YgicCSYM4jgRv4JmG4riJhmyMVyk/hX5EUA33B9i8R/Kk8O/H83r3wf0lzUm38ryMEyRkubYTn+uOYbA4AGmt8y/jT2mM+VYxSm5nd5+lFBH0LzV83oMQ6iPtTPg1ci1Lnc0hPng2YfOqSipc80REsbmRo7NgtThw3P62txpHhqAPvPzc9EDYwzGeAVLdvq0eoBKQPulgUEZM6bDFnAS23LyaqoWQ8OnBrtXFZid5T1k7k7hWogV8i14MrUpA9ytIKMFUQQexdsVOeWazg2c41SwGvFKb+mJ8e5QDqfBgF3L0cFM7jVd3cRQ5Qn2a3cXRWNMS6k5CUFXvdo89fbLk5UQOXdco5uIAazb6ZLfzoiOE5QMyCOcwEbRNh5Sa1STFeHREDKL8z6kBpzaoNvVgrYVwX9MWUJ1R1RNz0UMu5zsAaP8UpDwYg8yoLjyUiM5lY+ws1x/840I6tAUxoOIX4QSJ9f4faZdPV7f0uMnEyd/u5IXAEcZF0R3N6KRaDhil43Ip2X+bsK4M1m41DSiTSEfk2HZ4irzObfzSksI+lg7pAjbO5ZiH7NvRqIj5TmOw0U8BZ+7oVaYuLxNpH2Q3DQCxz92elrw9iLYzZXODniJ1/CkSiinK8iB2SzDURr/7k3MZjs6WX9+4+R3e3+rMxd/LpRG8j4yh7X1DBSRrEuHPvaWy9ZxbWUU0pzp4bujL/tBE8F7pvZJwzF3USsAT1fUEnvzNDbqhQb18HQ2TVSIopWyrtN5sBACghDwAt1qdxv54Es31qdHBSR19aXQA0thKrggI7fwUgqymb31xwM4JzCuvvlHDI2enAtX5pvzkdi61mzeg+Il7BWVYvs/o0JuyVx0woeZUcL/el/d23kmNSdQ8zPW/LTLvIPN0gIdp1CD9ThJEhhF/cOl7Qd0bQMg6KWJLvc58p9pKkWTUcbmcHDJNl/69fX+aiQ9BLKeNOQxE4XQS6ixrTJKMgIGeuL+nVdwj6j9/jjld//MAUafdjpemAxYnbUcXEg1k7iFFxroG+ODkudrp6FT05d0KseLcIiOBM5Ck4LsKAO3l1+h67LnJoTSEjgVEJmfPgyyED3uM68scKi/uanJ9iimeoa1mc+5QHXzPVJLdrgLlu9t9H/2ruu5laVLf1rdtXMw3GRwyNJCIECAhR4IwkQUURJv366kby3Fa7tc7a9a+6dUVllddOJtb4VgaYQRUM/i1yYavoKcylztOuB1w3PXlD26KrBcW94IeGSmnkEq5Zws+Z4r67KUwqNKLxGPMoZxlkRtD9yz/pSHSkciGyUzKkCmPFHaTxgqUjiSgHR9ZIfSYhI5xNRyDrJag/rvHG5kNIVXXe4THS0TdOQpUPCMJBSSXxhHTKHY3k9jpOZhJQgVInjIBDhQ6LxiBeAb36cHscc7cK4Ny7Gbut4DlMgxlIQVDVcZ+eWx8+qGVAbY8GeTawS/KUwVjheOC+7vPPjvlY3MPqOh41hW1et5+6alCTJsoGlO5Yao43nW4eFEa2439AdosaSDGIt9XR0Rt14pLtA7nl/Q22mo7k1liQYicpZLoYyLvE+iLZ7k0VJKeZh6JntLFeG9zyl2/GuiFIqcmdOuFBEQdBoZ1KayNQzmMMq6atjyUyOaEVOi6FnCiRowfZHGEGE2cE160QWTrzqzdmcVcqNp8G8AI+yuMOCEF5IJBlYT3rY4RdBya2NFVMH5pfrrawcmgysZyST1UKzF93ZaGIZuBmsNvhGzkQvOV7hDnPkgNHAppMsjCCd4fH9uc0m4UQvjOO0F3ZM4aTWORj1idwlwoUyippTzdqXURRGoWbneVBPrUbGdq/zPHyWdNS3I0oH4XLe66oJ/Qk0MSJ4Xw1v7KTdXtwnjQ+0vLYRPazXVK+S1wJwUODjYpx2KgJ63aswuFRAaDhGtFkF3J3h1Tm8h2zM2gXrXypKdJzzrItkvjP1xv1aMOG995p3oc0kopVea3K7ndqLSVtlXT43OxFLHC4NGYM5u3174g3yIBqYPF+OOjY3eEMQOe44bCc3grbRPafAePPI+bzx+z4bzYJM6bnpihE6EO9unY0QR/Vi6TuDg6de6Drf+iXMhtRO3DfwbdW8mI78qm1H2J4DdIrNTKmKeZllupXbquoyS8Ec/FQiFWA8Hi8MWT9W4wm5b9etbo1UdcUh5rKh9KkpBt4oXXFdfJxCU0AFTb5ZJCIHWCMImSJQmtt0Kx6mu6tNxlIkr+hhrkonQd0v9L47mkJS2sSpMJiWZht8TRhRDDnDm8X46AasqAF9M6WlQ0uRp4wnYgF67mgltzymM3a8JEwBxBBu1hLJVg9hsuTIYWWA41P47lz+sM8yuY54AwkKrqhmUM9vLd5JM97oMs4+pgGFnj1mySiDGt5GTgl3R+MZVAPwGDaSO51XPnuOZF0qTuDMWwi0lhK7tk0QJbCqCguZPRXtG0WGGJHGSTgrm+BMjKs9LZfrMmv3MsYtRzNpKdri4uCQ2XmsEvoqWsOLXMYkK+FNcd3CyangOGgMXhof9tu6WthlkOAON+z6t1k1vLfVxIlkK/I2osQGwlsreMdOytIdz5AI2h0qI6ug2EuKF3JAQ/D1eu/SRY0egM9zpGsEX1ljxfPXks3Zirp1WQ4Rkb2RWk0YZ+MDVVKbI2dy6wkhAS3PSVbAWzR7ZOfuIjkec+CDmv5Bm804BZlKk0zsQjujxq6+Qqp45jhHm26GzZ+bwl0iSThwZbksR2ZLjeOxzFZorcLrQsSR7pg9WEfKCcTuPKLb1VmsIi+ZZsAzmruRrS380HM7ZDzIuiArLVMOr+B0qkU7j7C9u9AyYTPfZhRu4StdsOQ9h0PRTOHzXTtjvjTtarLRN91yNVdLPBPD5UnXAauhZNgxHSAbioWKKqJlj6c3jbACgRY3C9eIfOI5a8eIedbNzHWtSMm5TSyY+M/Kxabop26WG7oxjMXF41MaYLMWIK/3CEYi5vLacDbJWi5mobDIfQi8mXrShE2qextbRSp1g6/xWYe4saVfsJdUFt2UdFpqdWedVKHnbEKpVtPScyXesRJFiMeKyvC7DoFmOqOYYUMT27C2WyKwklmbFjssxhio5PbiqkpifT2sTwlteIVSWMCnVEemgRQdKdsHiZwfMnPu9abpC4w7NqcjwoplO0zTNjH3HEKsiLniq5PDmAtIA7ooeuXAvVkBsxYivGdkvGCZrUJLShj2nL42dsM+r3qz2081jDZmC7SmRkWVHJJwPi8IYDHmlqZbLlkI2Z7aejW8Zrnj9zu3OFjZVpXGsuj5K/7Y2BZMz68Kwelm3HTGz8NIGvSKClwwMEe1P0sJvpntNRk9L+boWZjb1lmZeGTfeNoWVWTLmi/krUbN5FnI7/ZnvcQ7vAltIeaWORaNJxgnpecZ6kEx5BdqZ6v+fE6Z1aESBkmWLM1qsWa45m9NMaN2V9MZah9Ve1uWzarf43Ukn9goF7djP+Etgp1OiN6JEAUT8O0cz3p4PyJaGL2v8jjXkYTvYkvJ2stmKNayukRNRfEjbYmJh41EtgZZbXelFl/kbuoM1wnBqWLYbL12dgc5ZcwpSdCW5xzyuBpNJqgozNLzWFMzP8DJdjK8V4gZy7s+SoC3cLaGt6FW3aLPg8nZ4un5Uhxur9xNSUqKzrucxHBTifuqtxNntVTY+DDbrfWQr1Ofp905dPtHaEPhpLAii0HB8mPJWcm4X5Ojsk7g5uggNlImh3abSHVqTZVwox+ZcK2exkpJ97ZpkqSttrq0DUWdyARihZAn7lxLx1lUR7niB6pVl44p9RYur6ORks21gEgSpRhllmcZIUYrSyoKI7zdElU90W25RyeMiogy2c6RxooAt0L4jQVVSnpd0pecBL08IJeCwg36h5fAF9qza9vpoBrBF0peGA9fYHe4oS9E2dAXsuHaly+4S1/gB/JDX6CSjGtfiP3l0PdM0IzAceKP1z262CylrHTGHRQOfqR0ZCZGq2eC8AMHvAX8QkrgwObND3grBXwSHFZ1QdUExzdVuPQDF7KjHBRZ0MAXEiLXo3/hLPmCXLudrnUEgVwq+thvouswJPFCkpfqKIjD6HVGgr5UOvWlIvw5x7BbyjAzzE8ehSBNXxcy/MaQ2L/04UftaHfuytQgbBwdL62IP/xFXGfrnBQYzaHdpaJugLa8VAS5z1VV0YOSlzp1HXuALFGTgYlEFPysm6pIgvX1NLD3aFYXbeUF76wHvqgANmycKgya9xbOXhoGfhi8y4M35CWRR9K+1lVB6jRxF9ws9xm5rzMsihic2y8WExjygrO/PhRxy24SQW7HvJDiOswvLj6OjDPI7VA4fTfUhVgPQw2A+EmJf44Riv0YI1XR5n4AR0EA/4uqiYqwyJ1UK4ryCpN90DQnIz7DHk7bFP8YRB9jA32KjU/D7xFEX4COr4IDRiMvGH03GvVnEUEwn9AagPjGtZgXOfjHfwNIdkXeXJujBCgDrFSnDZzhhXwtbq8TDgXxeFM6XUv/XGO9KqIPUYnjn9RYvws24C68INgvVXQHFoz4w2B5VB/TFTCt94Cp+zhLnQEoNyx3qlcOY5BTUVHFZ8B157WFF8WprzmnooXUb6ogeC28aWuC6iurq6COz46bvpY92CDOg8o8lcF1hKdYu9QIRVpUw4px+NntIArjNH1TjyAe+ID6sHL8GCDqzbHd8Pkiv4JkAKfxO+vAPvErsCeWDyWRD6G1DLzGyUNAq1+TsreqDEWeTEg/mQ+7m85Jm6DKnSbgoVqovwN6r2f4np7qo7gJjNIZhLyvnPIWf3es9Z2A2XnPwEB5TOA+Z+z7YvFpdhPMC03e0P6JC4lhL08cyHvaf5n/SKJfTmEyYHziGYUZzMUp6jspfGelUfQJfT+B7K+jLvb37GyQukUv/arghwpwAJIg9qDKfEv5W3v8N6n6oWF8Fb4PDSP53F37DcP4e0TH/5MgzfxvgzTxfwLSX+7r/RbR6U9E+X/WOUcI7MY9fyFA6PW+iz6UFkEVA5oE1W/77fRno0nms5mG3/Xb0fs4n2Jux/hmR51+ZsupFDrSftyBnyH8uXR2cNFj01y8HgWzvWnwpM9/lVj53/+q9bs4/EDRfoy5r1DFf98xx+55SRD4g94l0Z/+3Kf98t9SA8wndO8XqAEQsD3mC4fqUZy+NnuTVLxmB0DN9TjyOW1yjJvNq24Avy96hLyWfikRWLjRIZufS/ilff6s7sHpz+oe7A/pHgIlX8i3OYPbQOMvFv2zSYPXE38PqiWcfFgOyYM/ZAiFhdd/P0jQRhjqyaF8WwezIo9t0aGSeN6WJsgnA6N3dddFPDS+HRn84fyrC6M5bpAuijpu4iIH5+MWTVNkt2L12pZL4xC2aaA48k5dgggdFHfxEQot71wPewCTEKj8QKSgkrrgQqtBRCNnyHRkxxBo1OjFi2uvQNmXvqgSIKeXVTwmNUiExh8051V43won/UVKFEXus6Y08qhF0WdZffTb/Ffm0ZUSUpjjeYAnOM3mlocPzLlnahb7/uDk3ueobqEOsQMVb30l+DfR/0EJkMzP5MIbBlBP6I9/G/mfZZ4f/A2p8XzIjKACp/tpLyVz4hz0SqGlux51q/v2H3gv96Lllln+Mox4g4SibdI4B0KU5xf5/ZzJ+wquEizQW3c5QwphX1j2UbQQ4olo3VuCr+Pts+tMD1xaBJClyJXFM7i70j/mFlhUXNbBG5XopUXrf4Xf+SVe5L0TST6Tv6fJVpTGvolL7Cdyqm8o5Tt1dOvN17dUvTMzGEOT0tNs+8/M+aPdRwZb+0LdG/6LOcfuzfnT2qdOAvsvBqaH3oPMfOgQYMxd20F/3yuKpHWDKgfeZf0Se4P9LStl+CEGEOrfgjAUuQ85ycf00DP7SjDfha5P5JP/H13/Hui6v2+BwL8NXPAGqqJo3sYg8LynwDrAFv8D
\ No newline at end of file
diff --git a/Documentation/etcd-internals/diagrams/etcd_internal_parts.png b/Documentation/etcd-internals/diagrams/etcd_internal_parts.png
new file mode 100644
index 00000000000..10634dddc31
Binary files /dev/null and b/Documentation/etcd-internals/diagrams/etcd_internal_parts.png differ
diff --git a/Documentation/etcd-internals/diagrams/write_workflow_follower.drawio b/Documentation/etcd-internals/diagrams/write_workflow_follower.drawio
new file mode 100644
index 00000000000..3fe9f07a2db
--- /dev/null
+++ b/Documentation/etcd-internals/diagrams/write_workflow_follower.drawio
@@ -0,0 +1 @@
+7LzXruw4ljb4NA3MXHRBUsheynsT8tJNQ957Rcg8/Yj7ZFZlVeXf3fNPNQYYzDF7SwySItf6lqfi317scIprMtf6lBf9vyFQfv7bi/s3BHnhEPr8Ai3XrxYEeyG/Wqq1yX+1wX9rcJq7+K0R+q310+TF9ncd92nq92b++8ZsGsci2/+uLVnX6fj7buXU//1T56Qq/qnByZL+n1uDJt/rX60kBv2tXSqaqv79yTD02ydD8nvn3xq2Osmn4w9NL/7fXuw6Tfuvq+Fkix5Q73e6/Bon/C8+/evC1mLc/zsDQuc/qD3UbwTheigT/kPz1v7ff5vlm/Sf3zb822L363cKTJ+9b8aC/SuBoX97MeU07uzUT+tPn9fzVwBPZao1yZvib5+N01iA7k3f/6E7R+I4/Hrat32duuIfOufJVhf5bw/6FuvePOzQkrTorWlr9mYan8/Sad+n4Q8d6L6pwAf7ND+tyW932bOW4pmbqfehf+7h39b+G9Bg5Pf73/YLHpls86+Nls0J1sHMUwNm4b/PZNtvkzzcnMGA4awA8v+SHBv6l2bc9mTMCjDrP7Pnd1o/Cy7OPzT9xi6xmIZiX6+ny2+f/juB/4ad6/cGEv8L9qvp+Bsaid8xV/8BiX9tTH6TgOqv8/8NJM/Fbzj5v4EZ5L/GzO/Uya4HOvkPA4662QtnTjLQfjw0+3umpNPn6Zhr6V8bkqyrVtBq/gLg74T/gczvsvgnKHrwVZYkBEH/IibgKPkXBPt7PsC/q6c/cAGl/oQLL+J/iguv/19y/4WSm6f/MY3/8S+WX+ofpPevVuMPqEGIP0EN8j+GGvSfUBOsj1w+TXRdJPnzW5uq5+f/EdDa//lPgHr2vf89R/6JW//I1KHJczCcWYutuZP0r7z6Yc3P9jDm3zAOzPXZH5TcxZ8wm/gXyTIKI38hXn/HFZjC/yref+AL/KfS/D/FFuy/FuY/Ev0fYf6nwvhXfwP67/Dpl/D9g7S/GJIT/kxmqmxG/vJtiuM/+mbb/xWceRH4X353DX/nDPIntg5G4b8Q2D+zBv698V/OG/y/b+7yZE+2fVqL/9re/ZPNer0oqiz/ycDh/8CV39Xy/4BwYCT5F+jvheNP6I/8GfHJv7LqX05+4r9P/mb4ceaZn9/070Lyp4bhf9M4/YmnkWIE9OduyT8IE/Tz509499uyf9Dzby/61y0izOOjiNnGZ0z7gFSxmujnj+F4Ne9Vz5UOfnAyS0fPb3YPVLwAHdiQkYNQf64I/vlhnrToqweaghGN1/Nv30aRzysvkdrHzpdEuvn8kZ038+6campkh+0dp+kdm39DvcDwTtM13jShqg/Fpx3YbvdWvUnmeWd0nk6z3ChCU79t1vNwNiDWpvbP0fZmt5OnIXq/Fa3aLkJ+9jN+ym3cr0ceEIbEqPsex4f5TCmcp6q7A1bExH2HP001+ebtv//b5QrmdsGbjV80+2HCg7uOiZXtyi7oG5WkQzmMtinpOGDXpitVgqYP9rqDjx1vpGB7viAjNXLQmpygWsXnaivPDSSlo7DomQn7m9fFKy0PcpVK8siV+ccW2GVjX2JVm9r9rOriN4PYIe5LrxujRXTpzJ8r/rbaO5Eew8lA5sfMEpQ5Du/ZrQnfKL26kaIqnFUc9F2f1fBGRkLmOc7HpE9Bl7v/dFSUHr+ZW0S3cwXAZgoE5ccpJb/jWGl+wMUCH5JiJh0uz1HGTFAVb8mvRywERkyGsLFdt93ktvMJ6eZu+ZmTgQtA8JdvI44SbrJ1jhEuTfqquS+LaVsLEHl1mvdqVJzLTUrL7fVGEGOcr3P6zTPKj/bu/aYOGJZcfGGp+CGAcKhmQhHLvCgilB9SLx2hFL8W0v2Ya1WCOeeisXfi7tS2yr2YNC1TW8cPPhVOrq+G8GhSRvomyV5mrP5MmJ019jHL6WleHGO1a2ai4f0xJ0wVkvHzq6YPbOC667tr1sHq5tGYRcBKS4c7OzOhSUk6Bbw32M0/KH4mjGqq0yZequLpw2Q7UcyHqpvDnHDvkrR6BGHzol/mAM13g7ykSBTVTumNchOvUFUVK+S4dVC5+uMZiI3IQaxpZ289C3n+Zc/8ALpRGWxzwDpX6NlpzFSeylvIFNxQmZvRxza/i5t+Za4aCGWzwABqeJPUqzJzH7O1gNbh8LpjHAUcSstpeh5Mul8WW+mwTWHMSU8d4lHki4yfTf5C/SoJzbrM2eG2NerJOhu/y0PKvmHg+9uL9AUIkG/6BtmLlF48ccI/y2T7JiQyjQlPhlwODaZxPf0+j7y/3+T51Zhmucfwsu/3TXEJsXl7UTldF5E50xXt2xK2PHe/hXkR1P2BrI9NeoBPNGxY6/LQQ3hrAv00TOms7qd5L0WP3BIuPT8fkYu/B/pKbkZWKF5iD5sfPBOHMpXivhUwA8Lz37hfud0+mmSxjWei/TEgjJqwVeNZaaoIZkqsVPvF3/TH/FYkGOFNjsLl6tuW0oTOJmJ+Z05oeEdIEfRne+Gq0Mbqm5tnC9fCdVidPOXYGwcjgVDcRtU91wjuv8Q7kGSHALIS0sp+bFAYwKlHl+Ib9IAXtsYgwHgDhbjzRd0SjAusK9fvQ6iBPUMYRkrVkkBa4/uCldyzfBgFO3i1HeQ4FcPUB+xiKkrsUyWqLK6AeYHYYdSFGJL1iSyk5aucufVEMoieci3REWMOdt1BB3ibp+iu5+p8Lsfy6U+HKKAcy6XvFUiI3nklKqGg6/1DncogFYsCLCZ09ZPu1FCSYESVxk2LuQBu6xw4OUm0VK1mDymYI8VgiQgEkBd7K+QTHAhtIcUV+e1IgBIFbxPN/+KZa5Rjmz4ti5fLOXG1XRo6OjxKYwy94N0ToK1g70E10IcisS3WZUt8gSCveHGVDssAsipjDvSkMkSF2REIcjRIzb200uY220HMqypSb5zAhg521Ru4C90ESxduC+DPs1sQEwkdyV17+r7d7T31qLh+KcjSo0306Ui41Mdyjpr0ymZvLmvxJqo9QWC2ZyKoJ5dXO++0TjKjVkNniJDYAfMM9pbCuMZ3nUxMoMOzr9VwGbSKcXCUW+kUL2LQTUCr/fB4thlZN5uLNyD6eN5ZHE0ldYchumM3t7xidbULdPA1jtqlT4THD/cE+ov6dUtf/lTm3nf/jOMdrC/Uwe0DjsvvZX36QaXe7+UzABW2a/BIKyogg+kwMnPzGmOVyTuyPsPydMAgQxhN0yW+AUl8AKxwmQ7DJv56Yy8B0Z7JGmklQ2vlrewC0qYJo16LfhJx596XJfOuYuW4RhsMMJjeZf+FxCYyF32mz1D/oU2gm5TB9xWdZwbdKZuoK4NQfV/Dxm9FzDsIL+HqKwDrDbtE1qj1kTrGighcyue37yr5+bg8QnIbPo+iET4Skq+x37F0jHH9TjnSM/67H2OAT5VH5UtUb4b7wvwrCMdxGSgENoHFiYFIhfynVyLXWb0SQEo36fvbjSgTza7xLIFpG7wtZVeknFeMcCThFbG1zeVA3m9bA2aEJFhAp/jri2DFOFl0obpCwoQcl5MGkJBGAklrq/A4Q0UnxdLKvSri3JHHuqQUV4k3xxpaElJAm1tem3z9ph5IKc3NMe+wRgjiZlN29EIG7+LFwFR+bJclAWs+FfBZoehLU74ckCkr4yjRA6qAk6rxxK8Su03MBl4ylYqvu0i3e3s+FWvaI5eUPvuZLIBGnx7VXK2Xu6WlHPB0OTHUXW892phU6pbvmAIm1ScwBjdbek0JCxK+0hVZft0vpV5bjyvOyDkpB0Zdox9q++KWS70MoV/Dcb5lT2GoNt6dlujKV/wiSvuZTs/bH7lYgQPEmm6cKAYLSK7BArq4CGkTOJWmmCFKpUp/ok/snTc/EK9ZQ4Gsg10RlNLKMpIEfXbBNVAAGxWvA6Dla0FYsXrik/zRt0yQjNNZvIEUeJHD6pwtkFKfhxqYBSeQI5bOFD2LlrnVTYcfa8Z8AIDcxa53tAtOU14npRdbswsDsslsrU/n5H3X7+2tax8P5okA8t7XB2jvL1Zu+Ym5u7d5IgQE3PXp4L4eT7aAKyfSWv7RulIsOkY3fPuGoa1e8B38i18Zap6s8wmyU6VnMeAKbUh1eguH3P/6obW864TcKmQnopCwStxAIpjEnCUwteXk4QRizxyDnR3d5QVtcuHSV1hgXm+1WCxYQ8zlmno1ym+pvlucozMnfeIAgRxMk0Yca+lZZ2cl6U7ONeGRQjEvbaRTaX88l261YUzS7QlWVaf/ZMpMfVt0X4JudJgJkJ4a+wCa5wXmJye/rxHYcTSPHVbBLyP9pBY7Th/Ug72RCOmuBii1ZHxCkigvyDkzcU5TvUbZs4YZj1CwxlCZ+4ciLfNFhkZDd3axbfO2gsc14/wg3vmPAHfU5jTb+uXblNAxO5Ou3V0HyDvyw7xKJgNWOM+G4SUFzRCq7gc8VgcAecRsgYzvbCJXPsX4N9gj3/HnD4+CIVWyeBf2eDC8lKKk5OtkLYjDep2d1dZjFfuBO4s8h2Zk6OLPI6ej6+ZlUxA98mxfCfbEIYK3Dtya5i1Cvd3POlDY8oHN0LsT/N48v7RX8I8iFEc1aVWRyW5MctT5CuWr7s1F6vrUb034BbNjaGjB+05c3b0eURMc1fZF7xQzfSPVZbF3NTifp7Ek8Jp8D8Z3a7kTwNrYPNaqJRlLTU/gWVUS6YEN8gW7AmCij6mjcmMX4DD5nE5kgUXO6hm2qOUZLVCU/os+kq48bSVGw0xijHcgMXqv6VPUrfhO7ycqsLEaxvSphH52HNTWVAnum1z1Ue+m6/xcBbopQWsjhh3bKPv2jWpNhc5KZxzCoVSAJ9ouzey7rcPjQ6MLUAlv3fhFRecJ+wTmcOAl61kUs/RSp+QOMiJOtvvGQ91mQhV5rfkqlBpZ4aatvsDq2k54Z3zV1dDmSHrmSYziczdjkzoT0TK+LIvnS9i7Mml8HZ7rx1pne+eZ7WQu8TD630mWzkPws8ePttZvQgcHMILz6gAAfXrG/HHXBKCLmdcB6AuJjxEr47YaEL0oeErQpAGuPrYz6CgHHMWQ74asVM4qM78YnS1QOfBoZZoJvcMB2Y3ZyD+RSEwyHyF5dS971o0xDY9SDJo+UVHxYJ9rsekDFaXDjBjNkgmTKXji5lCMleI96YvX9CoXP1aEyRk903W1eoZqE/04jJyM72ovz92AoFdOo1xw2pm32q8aa0Gsxu5Nh85OgxRdZSp45SFXX3ELY94iF52qawufLjNwU7QBDGmgrldRDeTPREt6pDzbfr88dOjwLxQdrmheDvr1uoqiGuGruFVBk2AEq4ayNUynyaAMDvukN9jUIErxoksnLzQjOpuP4cHYjLvpke8ab2jWRsKgY3zpSrPYx3fbopHLaDhZEfWtw7W1I1pCUBPUetUUypUrSd458JjgFPJ4DLgIOAQny6Xa+jq99fZDHgdJN3MqxpnyqVOwbfHAK2arq48+KHXxddROr9+NmaJCIcSB5ybl7t1vtR3I41GzgjPkIRAhFl0/7HtbJtq6I7ldHol4kH0NIGisfj6v9t5QBUV9xTUlQtKjlUZGDvyQgG/GIdJZ8ZWEylD/q3IVT4M5QfBOk6dLLgHuzHSclOqrDg5HaGAFXQs+fcarW1wMWLVkvsQqYMCoCy7CB2o0ycpm1jlOVOwjq5eglNE3Jjh2Q20lve7aI+cP/Q6SifvQUeaHrKxpmdEhRmjT2mTxtP2PGRLefjhuoQD9aSWCcFVJdBNkgmja8XzTVjE2kmWQjPoX5QuJf8oXkn+asoX+LGUI/U8lDMl/ShgyU79zzP/3qhkYDv0F+29VM/6syPQ/Vsyg/uuMbZFXxe+Fu2nd66maxqTn/9bKZJ/1+1NKBMRbfxWHf6P13wZoE8jK/nRpi32/fiMwIP2fZdt/z84if8KPhw3rFYIn/AVD0d8bot8e+XPDnX93d/31LqfBQROAmz7Ztib71Sg0/e+P/xlgFWvzEBjgivtPa9Pb9Fmz4j8h8Os3Bu/JWhX7f9IR/o0VgNz/KaKe8DDZm+/fn3n5M2D8DH22m1x/6PAb+P82swUa/ljcgeG/Q+m/4789428w+zXn30D318X97+Pw9xr9fwbE/2Fk/QDpbPZfwMJx7Ld7gKt/h/4C/X7/N2iBm+sPN/+Imj/gFIfIP+IU/gv0NyT/L7H6LwXi72z9r4H4+h8B4j8jDYNf/4A06h/U3K9N/Tbuj8el/nmqfyjfU/94oubXtv9pqn8Zfv8bh7P+Xyx9/T8rWP7vFb28DZS4/F9FL86mjBp+LtRV7Lk3zChvSK88SfnGQ7/F78ftkQe/P3IPpH87p7IoAoIM1gHp051iY1lBWQ/izZBrY5ZvWE+kZeaUVeG0/Yy9pmZim8un/XDO6mRLuGZaWLr0+IAO/Fwz6ndhZygjleVPfiY3bhBPD2Ab5hhSII8BMu6ZGWQsrYwqXzVOFHS0923nY6C/Pjx3Pvc2jCe6muiK7ugA0aQkqRjew+SMrMKRyl9jyeB0XOZ5p+zJb1M+T/vKm1XPK89UsvxuP0G3+frbmpDNRy5GZyDaZ/0GJjFdIO8jj5z5vdh0zfwkoKwjc8nzINirhVIdpJYy4cOy+ngoRqSJPEmz7/zkCS3Sszbz2g+rfE/TPUBmdqvujJ+VIIMZUe80jqFLOpNvshjOFxmANBv81QNJ0IVmL27JGAc7IM2LMjr9TMgXVZy/1h/0omuXNErFCu/UBsE0SiwRH5Duhoa51EujpQ9tHLIipuJnSM8owgvhrhuirgbDnsjim/kPO01S+Y6mgo7eeYyj3eE3dUAtxmDCgprCp1B5YjqxuetDvVqdanKR00Sf0IMkZPfdSKOJyp70rDPxpvZbgVxAirCox0TfFQPJSSpkyquYVmucNeQV9phrSQUl22+QDZfi4P2Fgas96HVcz12hJaZwvtojKHUNJBtlA/ZH/vOWWBfBrphoCngWHenRcKPMEMNhQDVZo9024gzGfAiaBgFJbZ2TTGMpzc4VF6Bc5nLTKwG1p5QJ8JUJPvzXavicmTjclT9t1o/DhTdBXMcBLR8Q8SCRdFOEMCmWHUVtr52pYo/Lk0C6QVaN+21WsF3PfaRrRgst0ETUtHKvUfThxdBEhjBiRZ7mvScg7WUah+hIPGg7oplvRQvXuxlhWqYrwQvoPpJ1vqNVXmbMjcZ2Wmoq+vOT5PBoTT184U2jONfhtDDSQXhw4ZstcDr80jInMzXJKHpl5RU7yZXEO6Kq0E8sJc3vLXkzniONLG1dNKnWi1qJbFWgnd4kDAIxzkKzC+/gdO0B7QqxY/SQ2S2q9FMxBt0Yb5Aaru6bVpUNLPQVVGfJ5UoFMuvPhCQ9oVKHCyDXKmQFnWecUNIAfwzIHDBaxpYlC/jBaAUjCx4jT0X8Hq7KFmxiqOlesheu8XjNY/qIXN59UIk9sy804YX5m07qG2YjWPAoGtrpMaELxOA684aSceKKdscZBXp0bly+fUq4EXnsgHAeGH0udd22HCI20cifon7p4RDJ3eHPVX8LuamJWUKeiHqSfU2t7CsQz4/gZs7nDAhJ2t5I1FDNhxA4hO8ybil4nYwwtFJOTThwraJetJTp+SGF1fkVy8zWUC48hLG2Ss4pWuonz0NqIZr9VIOAa9aCCHlgjQcbEU/KikTLyVERbBzwxqbsR8vUTVNzMJvhfGcqTWYbp6g0gHb8QrrSuUYOXysJP2a2w8YEb09909i6A1+eqLi4HfO9qHClk82qvMuG56azXHQM7lyJS2XVBxPryxw76RPH5FBeoL4uiI9y75YrdjvHjOW+mxAH5NLk7yw3jtmremIHzuBrgjfk3TrthoMZnZG4+WwMDsDGW5uBXZAp6Gs5Pde5YsjGttLl3ce78vuNxJ7QFCvI5W/1hUlG70JLAfdII6tN8qhRVbxVoZt6zJn7QV56vDdjmLeXx0Sge7ThspOt8eWF2pz1/jW/3AF7pCP4KiDB4rlYWqoh5Xy7XwpxQ9WE7kxmIRhj8sTaBjUAbpqWprmUUAdJ0tNLhZBMdcfNek71+nmHVQ2qjX7XVJvstd41fe5xDfwTDia1QpdU8ck+1hcO9mvfsfzICOCly/30WeqyDGuRdApM41PoR6mP5WEOC5YPirFr2WOUK5sNmrgMRDVr5kkt4yHmudA9hM6zj1TusCzJ6/H9b8dMwnh2g++cRP6S7vYCj0Pl7sWYZPlM7PkHVl5vKLdOGP6MHn7fnlK0Tm4bEIDbjuAS8YaoQoOMzwLj8msBucXpplwLBiWvsERO2hhpxOGCSdfqseGCTryiWGodMjreaWBvD4C85OUmYhsgepFFUptuYWSnsVbI36VvvRSSnxF1NxhOyimrCszzEqdtvOshAgdiflKmljf3FpS3eRvdhiy3TcQsthIDKIEg6Q3qZTIidO/w8ZXcZmzCGNr16yQ0n1wDf7PDEJ85vww+6g7MA+x/nr3hJLI//sVwJKnubzlytqn+c8jBnL3CNijz++x/C034IngCWPhSeW0RKCWXOMCGT5pKhSnc6JkT9IX6Fxv6LBpnu33FXFdk9nlM7SiYDIyIIxfkYoT5N1D0GSUgaFp7n+g4OGZQWtHUfRwb1k4LlT2z0m/8edzjz2exsm9+3690I2PuxVr8RI7jvBRZTMDjbhZCT6xlgD+MOalvWBD5y/JF8xrSUGdtKh1/K7D7r1P5NiCNp6SEBWh1k7WEPVsVVqCjW1DGAAqFzJhPJZGVEtO8XukvxkIVHqxY5cIJxsNo9gTNwTJQI7sjK6zLjPdkJbB9B6RyZZCj5XwIlPaaVpf1w/Nr4tGBKPnlEpImbaIWBf5t2s1lhfYrFlWvPKd+abNcqiE/tDtyI/sPnBrrnRXjWJArCuSr+Vz+fkZLelxFEWC+r66BuptnwDTyoC3Rih903/mr3kfxcMYE7aZtipRA7WjtsY+aWOg4UFfySzSzyD8fd0IQTTL8NuepZjw6OefYf76PQcEh93taGs9BA+PkvgKRnX1F0AyI50Q9Z5BBiAKFRs4LlfvxQmzC4xSQh8cuZQHEA6aHeiXohfPmtMuTxO0CcYK3hqb4x6nn9Rm9nRaQKkbjYdwGB9T/hOEF182OpPGgGNv962RLAeNauO3FazWMfA/aAh1XkC6/6HgCle5LpOp7H40rQyO4EJEN4R8FlJbzvlHjcGyv797ZW5Lt/kUhEci3Oxx8APwXsYIdP7XD9Y2l14viwY5aYgs+KZZGHEIQ/U+i8ivMlS8wPolkRFF+9ZH8OQ1SYiCPWrQJXhp3ZxKb9kU/IEpAzF9L/6jxe75ZNEM+4Lnzq/umwS99+jjOhP9BCO+Bja/h5Qc0QiXORSvRwpWOM+EbuO0963iUkuB9mwRLHO+gDrIfnWQeGMZpUxRfe6myJHATpCIqvyC/yuNQf9U+LECCtIFXnJj2vr6c2oJ88mxReaD6me9LyI71mQXTbhiqA7wmyO7hrzQAhyHCBlBFahtwqiH/FOjPhgQpYGZTlNHX1b698aBHu7GByJzW7eSU9gKVbXDeZxAhYhkvbJrccjQ8y0ywaQgeG8vASxsQLfDqY2pNJWNJyjak9nLZsH10spc25jM6jhtJWS8I1ARJrbMzSSC979mjaeITwLx+6hNwIMf92S/w9TsX35AwSGp9DGUq1cXHhB8rJhSbRp1Yk1Jx9xndR3NqxZ666Y/HTqVenFt+aXyR/LPln3vfX94HByfJMpSCLxx/ES+kfQ3fh+eVQXy+QEswv+jw0kCwUX6ADsAtq/tZqDXX52bO9rmV0UmMFgZY6n1tjwpGgUCB7jQOIuKUI81/vDzKuTsk0Wtq5zSL0n7NzqlgUuwmv2LNR7iNwKIGEUo7IgoUXjCqf0+HHJkVHP+BreEmlJBZqF9lJObpgCPKJY81/PhIEKhUIl/YxwHoXibev4j48fIAT7OZwH8eJFFAOfbt3ZT4jL4sHsPEB7OgsKeRfsmlYI6ytChqrPA4nJODUa+4yxhB06oPV9Gv/s29+Xhgtymn5VbDhnQJIgkYsKrU+W9tWVz+utHJoLVAHuqBotuI9cyEvVlO7ro0uIWGb/nhmBMfvbNfFN4IC2/5qgSeOncQAC9QTvza5WOIDvd5DAqawWkjjEwaFRDELUmt2j0e6POtnx8/l6aZIPZrJe5gg2Z/4nWZpkUpedP8T+3hp8tPOziP6tQVLYErHsTzvFM9ge/f+nmfij7+cK94vy59g3O8DkzkRi+ljwPsTkQKQrjoTEC/PJ59oYYCHtbNwfimDlbHItXEDoam4W+P5k7UHOs9E+E+F/mqEOEtHXW84KAmCuxvNHg4uE8DH4ocspGlCn/6HLm4UXJfGx7LcOnzbJmLPjqLHnKLqjJLV5akdHE7OzYf/XXObLAHy1GmXLIPsyG/+St/5C67tYG64ot8YukO02760m75emxxFzfw/RgcKAqr/Rnf/j73H+bn49Bos6F/1tN/04a5YjHCo0D55uGbkh+X9Pf+v///fS1xWM9xCzWJZEMZN301BHvWIXQpovTaD62ovhD7Qbv5Z19kY1XTH+chf42f30lY96nQ80kA9+lozCkCVJbuvjGL/TWD2TNzIfpb4NnzQ8VPhjwr/cNOfnYj+mgSRLgn9mgcnJEt9vsPNwPqY7X6rbvH9++fWP/DE9nmD/vkzt+e7N9xqHApAoPZIKsHtPLBqCl9vUkN+fVUPzT6rHv2/DPj+4+zcccXcM3s7D5DjCsJGehnTb+oQWqvv3IZjGqPb8HND7aYPQ7t2qoWNR7o+MHng+lfh6yfeC9nMrq3sfCbXLcSi7KbX+WH6BwaNfHjY95stH5l9NcxGaYOwvooVbrP2EvTFpQRNtaJGuliXd6KDf0lQKMY/ipmv4BjyJTFNyBSoKYt7OuBk3/hTsyKC2LY1VuKdcGH0SrBORnJR51ZFJLtAfFw8i+XSm1gq9jp/Wz12uV345zDVzOd5RL6AS/95lPN+nCju2uUpU9jciSNIljr4Byha1vLcpTSsM5q62Icsjhbxq1bqxdP4A6Z82I1C0R+E9pgC01ZTFNF7WPBYj7teM2Gg0fgPEwplLdYVbM47LPK+KnaG48IxA0dSsNSdGkP/ErC07gr2V2NEguRhw8jxKlZtmh3XhmFVmSngjpuRPEh3yxCavq4y103oiSlp15QNs8RQt0bntDqbZOK6zvSFg/bkbsmOC7FJFlHda4At0HK1pFjtyfLXGdd8tmGvlXLSGe35/T4Gipfmtco+3D0MJpb8AXWmwKh4WC+xXC+pI9EtpmKhaK4m5Z6vuPp3EKeqBPijcVn4l2h0OL0t5mvRxZPzxaf/e/x/j3i+6q/WsgfE3yzvEggQ/IAf2UmBwv9VwagITIRNCJs/+VaUXkhdZTYdfXjSiWLbbXqV1EM1u40ozQoVZDlRDzrPmv1c1Zg29r5rxd7xArOrbp0AtvVxX/RTt2H6ZOwQm5Yud3NjzK5eu49SCvfHT/u/HwNbPMmbztLY4hNDNUR/LOJj0XDjdqP5Rh+An/LN9MZNhzty6hv1jRP7vGV9U91nf71GKqX5CeyTjk7JMfh+/puGRAf5kPPX7EnqvhNxXf0ymx5RrhjaRe9AMkGoZXsG8LcHEPIzxxqgz5PqPogD+u3RoDv6bFQoxmcJIbYg/z+jE9wKdqQ7tzscvq4kWqfIYkOV5UUbYER266/MjHrgo5A2uIkbY+1IKzs2qsB1o6tMr1/U7zXyuegm0HmBhYETyCsuX2KQ14SAvUq5M29JryiLMO2jz7xN3d6untb3issfRabCdVjfAH2Hq+wF9JaDNtFm0WPQTZ2PfMB/rxbLh00Ty5al13e+fuJ+nAfDgqhFgcqe9nAEX4HqicZI0kw1RFV1yyqa7cQlJo4tF+rWHC62mh6pEuigEPJa9th2Kbfctzu+Nt/18TV8x7EBzbY38nEvhGjC2Q691fEQDomOdFlZZZ6sGU9rrARTYfQi5qgqzLjS6XqXMn8u5mnxXz8BsiLfQc9vtu5iLGHCyWzlvrHv+a2CSVottPgu0wo7BUZcPadBtfRCT4V3IqvlWPbE1LK1g5PzeNTbbgCLwt+WwMJieX+vp/QvRjdVI2gimJI54VVRhUu0qS8BvtbbxXGbeuVbnXpYpo3os04KyLuSe90k5yQ7GbaDqmmUijTF3rI55qPv0XumCRGz4zW++NtSbIRUWuSrTB6MIgnWjkMGCfWBW4OpX4XbEYBZ5579YUHa87gXibG1oCIZFosrvDKdO3W8Mbnctc3v2W1rCe0QN6V0nXHPnpTrw9mnSaJRg1ddxVIw1iVussTeHTR0Y86fzTsyA3aZyP3n/Mtsf1aGVezDFd+tF7Xl4GaJNi7/MYkKalYy8bqSUlLVju502Nv6mx1qM2NUAk7aNln0y/PL6Hh/aPlVV7WRzw5I1Kqu5Wxb/jbdE8g3MO+iVQ7ZPS5kUuLqIZ6l+De3c9srleJxf7kVsYTjbDsiCVRELCOEF9Uqyn14vDzEzjfhah+c1GL0UuR3nHfvEL9dYVvX/3QGe9w/sTVdh0SsIdkY6Kci2v634oW3y/ZxiZKZnddYTo9rQphuHhiXXOxvZkJF676qMZcxYPT1ld3piVTZGaqPChC0lX0GvtMBRl4wwbZE3y++Rgko6AMKgZff2K7V3d69Ivm0sYyySHaDUo6455xqTfOqqCwAHGyUMXcweuKbtZmY969681z6AZi7LxPYSI0GO9v9zB0ziEnQRtaTlgJwT3Xz+RdATzbgu9qx6eDJ3BMdPYZNqpUQUhQKDsPtTQM9L2LRtqbO/icN1uLQ4SVudBlujd6KGTM5F5zEAymsglGCuP7CykZTTynQ7CD6e2/vj7+OEhBy4xSZHVDE6WfHV8vJIh0mw40dOsDTP/JfSDL4GGOv3RuFLhqGGhxRSICU6Bsh+OmmrEwNCXTiiLQ8d2Jrjbs2JubgCK61FdjaE31rm/gyAQuw5RebM76KKNZoKwKmm592JPZxsZJ+ujJG+8XKtIGc3k0U6gtuXcZbpLoH+oOUEX7Lnn/OYUvlTkW7LWjRN5peHpnIc7O2KWfdYW1Tz+TU2LQ7YsDSS431irBi+8gj5IMQwf7uV7KdU/hjrFqsXLJPoJZ5+I+PzHQzmi+ld4TnUcABBJZy35i2o70uBDCzYbatKSVkEj0KGQBh3ZIx85cscPm8ISocbqxN+7AmcyId4oHn+DBS40owbB8G6/R91chObEL13VtUzrIHBGqLMJtLfoX7IBwdW6Z7ijfdWX2LF/plfze6BYfhouRP0xpBoVCezWIh5q0ZX2Fi75vGWe25RLRYGmzc1tqSl/wXWJXkC+vODFTjlehrOiVQdWaivnYxvG3Rd/+d0Qfc2HmgliKvnRIlfuyenZRK/kj+oXUPFpNX2a2mDTpHSqzJhnZgh5jUZ4zINRMDSMOeZQMQEI7mHEuG+2+31OLwPLht0OVDWWT0yI52kPtvkj9tXNVM8I3ZMsPlsXUsWtFBsfFF0bUoWCrRF80eGBS9ESZ803/2K5Iua1uZtVLfxDH2Z797dOAVlkWSk4qgB3wQsliX2qDx0mEjS4OWRZs7TpJOsAvQhQNhkb+9q+vx9DeC9sLp7HCvkA243zp74TwKKJ/sSB75u7BK9ebQp0UvWKXbgkwr07UB1DpqgUCDwnbQVnyvlfNkOmWzT3WQ3WBPr0vIpwkAsn1GKSShXXTXKdGfMRVMV0hJ697nNPUHHXp03aBasprifHH8Nk/dNVQFgY4Nd/c8RJkYaVlSbDp72NRTvfsQISL+jiTYIsVs/WVEhtw+m7DDp+Icrc57cKYrB/FXHut9po0bpjx2+Gmq8NY7PgW4csRURxkHCxuuXaWK74QSe3OnvVe7mKJmTGR6sOu6oeSYOKfdQhiuJu47fKeQIpycT4748PhQOYhaJT1WFx4p/NjjWRXZaCed4UidFbsIQuh9Req/GQjgBL8cezV7ot47/fdsvVG5+e05nU3YWJbZcE712gmYwPyXVPqaEsUqZEl8VKhVd/0Veig5O3KU/X2Pzt6W0vCMdmF1K44mfuMTWgCWJ+z2xNu9cH4Uc3bXu3F9H3Y49Qyujwlkg1WJ5qfHN6OCfcdO7AYReG1CEpBT8h74EE2smWbQ2Xsnjcwk6W+uZ4gAZrHINdzhH1zFpIS7aE9D6/Qtw60QtSamAX/HfRCR60qrIa4+rrUkzmU4kSmeZ9126JNrpgYyLfbWlYMICIuQuItvGnCmBam5Ippv8qU7rT1q3y2NE1fgU8Rtyocx+ft+G6EYtfYokIKIfO20sFrYyW33AHisSDN7r9Kn9dJfNlAAixy3Ty737qvU0JmYOtne4IUdUsY5H5QM0qv+B1Ya3fUTKcqWVYMTLRbghhlg6OkajsMe90dntA94aJ0RtjNUH7HGZOdxo0ZaM7CuSBNh2HLHHINF44K2e73QMl7h5QU/0mY72TsbvQJkNpeZH/yjnZZtlWPusdy7N0NKUQRkdu+Fqg3UpqOoJUer0+MWtihgb38UvJuzNGmCR4RZ7s9RoPeR2owqlhwOF93nRxxM09S8rVeAvspvy0T5ZLrQmXu7afDZV5piU25mRPmkNrPy70g+Rr2rEPhsihdyiayMmrXtM9dPK14gR8sRZvRoASX2GFXZsnqch3AgxQCrMPvgEhoC7x+su7Jp4K1NBLUK+xus0ozuxfqsakPPPB943M0nujvk0rBC4Q3w6Rb3OOYteJQmK/LYpVjTOWtj/1E+kL6kvY+3Iq2jOaTpnwVm/YjsN5GhPB3faRDHUpPQB14E4epdakvJKjByWHSAcH/zEREpKmJDNLZoKSxwNRVxC2MM7oo0mL8sry7oEL/qD1NmFyBbFFq0Cd/WzMUi7dPSpRzh++UY4y6wfijnECLg6yZuQN1mS5vMoNDhvY/61U3Jyscp1Fow2ma6Q01jYp0kHB+bhORvrjihYSzfXHnpQ7lhLVWLESXpZAwixx+rMX+hxxc3zpfwvUEx2mte0GA3YSoqIPMlkJuW6lt3XIrhirn5g/cXqcq5t8PBEdvNL7I3NRu68u/4Q+aC8Nv1hg4DWkOcoDXebcLSDJYqDQPnIoD3zrL/AQl5Ch1NkN7RHyxHR7SDCmrBLkOIAEW4PIMV2Boy9RUe6FPtKZ1oWSHQAZyHIWmtWzz/fjcT8TSLpNcCxyUEeITvmo/0c6vf3iTW7k4yUOcdWQMhhIW0rk/5SP8LWFx5KvwRC7kyPMG608BMHPIysaTVrAv7gvTs9VJ0ui14qGNx4eLLIv71Kj7OOwq0QuNahT2FJ7J+5cy7cEheg+j6/FdZ5+gfkQqRt2W2rvHEV8R5BvRl7R8psWrcDFciLBRKdJ03QMuf3lZ/nfA+y0jC1J5haBWxrwzEFoqQuWbG8pnKavCiuad+x3IyQdRYT/Tk/M84p+jCaWEpVkmGRvl+87S/MStWxILsHU5hn5t8+m0RWRXX0sUCam0XnrA4O5ch0uaTp/wJWndZyMyLDpBLfH9vUfLZ+YC2Ih3y2OqawgLIFHaY2FovePB4pUijXqh8EVLCHztVYcYGm8EMhiNZsDvexYftfblj8iXPvmhY74eORUEyiJjUJaZ/qkTfnsh/K5LOXidorDK8XQB6w5yHHYRYosnKGhWC5rNPUYb53F37sk4IKlrmX6eaAGDMZeFUBv5XCCvbi8L6sf3jUKuYXrHfFNUBVuLsxe5YRmevWC28WWCbHQi8QVMdFtr4Zc1SFFxXmthI/wq7s586T6pPh93Ui7SwympH4p3Pp5FCxJCBvWWcNfB0ugopXu7zSOiN6YKDccCygag5vJuQIZNxgTBj0F1czJvFDbxcY8+IMVWzlR/COy5MiWwOSI0pQU+EgHE3ODV+Fb1GgxXM8wdvGKdKWS6egCP4ucVx4OEV3vC3m4UpoqYth7WJ0MNoFeWgoXe4zjWFdQviOdYEqcEJ8yP68JlU+/YThTm8yKBdF4/w7A6h/o+7+0Na8e1R6rxRsUtvSnNv3t018BrmWTHMdQrDKGfd9sVXXoNq8YhR/mRTswd8Xj/tKPvOYnH3gjeOlP42TsWbNJWyMQhzC9MMLkSlgM/muSmQNVUCO6DHjZlbQj4VMHOZhBX5M6n6lLBZTCSYxMT/QD4aqLPOiv/ZZPE9XfVwSE8geMozIgnHuwwGL/E6f2qDu7asZDUjPSF1MaEwdeLKzrjMSbDmI7V2vl0gD/OCA+Qp/h3UIqj4b/rDuV74qR8lSQVPgElVH06MxqOnepVWlEN+azq5AbQKI5sHH5Rmq5FkjBG1NgrCLZNH85efRyJgFeihYCOeAPFUCILCRQ3g7AOuaMcJOz0C4ED5oN4HS6IBQPt87gAR6yD7+ETzuNSwwkaE9gbgPkg3IoJxG65ZOStT1+26wn9NTH2yC7tDcFqMSNehJFYrlwnAJDgRLI9Y2+kiZ8wIX8fqN5e69jdZa6PDoeEqNznPHYaX8dhPFKUI3tOxUSgZPtGU/qcAGt7K0G4lGylxLJ4LoAQ71RjDhSFvc4nGQFuBSWaj6G25f3OU9mCj2+SpnaCvMLr3ixvENHCgGHodQvw2qacSBh9LRQ9jn86Qoi+EGp5Cl7i3LOH4An0QUkq5ap7/bJUWuZS/eguLwdpY4sfXS0phBNPVp4fuIIZSsgeEg9ksiGi5OPGVmiYStaSETs11Zx4Rk4LqED/53Vk2ggwmxgCzgxFcUI/cG99FVe+fCvpsFoYr7mmwim+zlciet2BElOKoZERPl7LQhFyRRcwBNneNdBktPLxLb6ZWMiQeONv8ct614JqZqQhMHxMZbnD66rmudpR2LnOa33tPHYX30+zbAuFhfOuTGMSXAtUnf4eAH+YLq3RLoqP2Q+OxzrH44eKZvL5eGlUfvW9FvFu2h+FSEdRoXNNKxdH8TBykzgboqK3BHQECnGX9KXj1RKJjaySd4xgt4sa/dpUyKxUdr4hXWwXjDsXdE1YGtxvIcyXvbZqUBmpLs7EIyZPTKDWWN4eQyJnYRFyrWnjokmWAoEb5qFX0XF1MPE8wxJx2Pcj5YN/DWm903UQvzEChfuuJo/ClaLoPY6vCb81UM+lRq428kJgcfBFHsp+XjsMWKt66g6Vb13KiOnd7qmncKUnY8gQfBdxHs5Zw+GztI12dmpBMYNly1jUKX1cjBXw5QzUfBOw+ka65ed9VBPrJaAnWjIAh+81yMSWyYEbOZW6e+IVhNoROnm860ReLoJAH6849yQULX2H2Jk8r/CypMq0as56KDdpp47sO6Y5FQiUVTix4kqOmTDXgrRJ5Ic9EN1yI0mtNShqk0bgUjCwzuIhy1Q7f7RUDU4wCNycE/gOvAKiVlKjw49REYDlrN1xxAqaCF/jvH4sCTFJpdWb94Pwjn7ZtWZMUfW6sDQ9A3BsgdqsYvVayzWhUjgL6DAti1k/vAm9XpIwfkw8CFBImqX+QMqWQhScQmBn35VHNVl+91q3HqS67DFNBawwfr4BAg05ijraVNOKTwf1t0Ljo6rmaNNAoNAcyjL7WJC5v3AT9sVBBO9mfwrt2IcwtNECxT2vziR0XnDqAHURWnHJVRzhjd+/KZazxhx+PBwBBinMa11qwlWVevrAWD+PnfUyair5ZtRR4YpNDm+4MhuADCx9qO9ZI4dhwRPkhZRTf6PZeoTgRiWmlwtsuG+dCyvY2+K74EV0mSYclAhGFtWPx1jamBR9hBvFv2U/gik/olTDnl+DM3SMXL6UNS1AKYQAUWPZ1z/HrsKOz/oqaxoDWMUpurmvZbYZuVUsp3qDajVeShYv4qKT/uebFtTSrIoPeGVdKI0ydd+CIb/htwyOpx1rdVHGSpdloSmZ6RsGNl438WD7a7GL9imZiS4GA5etj81MFhafb50jllILDHnKqXlx7sdsKPoIO/mHOSDRg7hEjFpCGjH+ovoPJ+Zl0QUorq3rUct77egQhKGJ7n1M2xyuorXfGEhHTBo0Qe6ERTKeOqaaKvNaNh04h+FLOAtip96XpBo1YWOcK5DZnoSymCvvW1PSd69rYK4KqZ22rOQkEk+FF0bBTyTpsmVBlm/SiinXisB7ikJ6LIWHPQHaXGDotVOgLDNBr757gbMNlcqmVp9ID0lb7qjVgDl9PbzsthJKeHGRhEu8eFQVlmxlcAiq1Eic1CWN2KCVYbiReqfW+3qMQOhwUyqRw9W1BUjotG4mL/frCoQ6DycCjtnp15EEof8534k3aKwxNASj7EYcIzEFV8nlxWYzaHr3Dmn43I1G7T6j3c1I9Xxh4/0KB/s7JMbr4VVZrmSI6LiPJu3FHoZRtKaFo7vRqNnscjfC5xuNAzHVDh1G15/DNmrX3b1LHHOvP1K82rYppmUZVYsQX5Bnn0ET4vr9QsUsCn2YHWiWIhSQeQj6VXv88o+E/HzpleYi6/SE9PUZnz/vi0RS8xG4DK0MUZtkqzrL8NrMcLynt+dnqfmQw93vwtj3fejQApzEoA7CeiHeDpD5BOpxq5DG6xKAc8BBixYzfpwb5Ke4kQqp37rANRXg2HfbeAv9UduFRBF49SmE2y2zQ2J17uUkeVIFA6MtuCRhvuLDn5gRxUMXOfSLeMKOviaJGDDr5xto4prpcPv9ccrBsHgJ2hJSzatMEi8+xklzI7DKuUSiHz4fVH8oplsNZHzFt2s0aCqYq57hJtWrWl7qN/FEyJQy+Gsl/jpmMjxaaD1c7Au+FkqI2/EUjDZhUIn8MtJbDHTKevNN5O0pge63op2w6lddi0U0U/ADu3Bs3OMAKe2JTt1uVKMGw+RrVaexAn6E+vMFM70Zo4++ryZ10qq3VT5hhEvdk5we1BUvSKW+R/4acokbxfgetCIvIVD8FqgV44NBThj9+FiODJl9cSFSgyOPbsgoJ47KnU1tZna8GYt7gttu8LpGeUE6aVluu1GaEcrHKHxU/7CrIItF+N12hc99V1cHGkxAwXtBj6GWcz1cetPwFnDqdFm6MFJOMX+iIN6qmauk9PgAgbmtyjWGn1wvvKaXlCQGgqE+9C1DHo2BOxZ9XNCtHXU/7aKUfYcfpiKFlNF17du55W73OYVjd3/NUJ78CllBbQDNNENs/RD/v2i6qqVZkWb7NP89LpcNNNq4c4e7O09/qG/PmYiJkdhtRebKtdIqPAjE5LQ4i545NFI7IJOrb3+cGkU1K9bHRhlIGB4ctyW7Ge5rhXxATZ8Lo7/0GMDps3NOjWM52sqnF9CwM0luFw2+4dX0HZgqyYoCJd/YJYgaTaX4NjrWB3UuR8x1mH7I7TpROAiq7tCNrcBg6TiFfH6mD2+QAOBBg1yIxyHczjkY2vjmiTFaToR/KAYkzDXFKJDpyWFXGz8j3Ah+yg1flWC5L4j+P8ZZCiGtOOLIIybm/p3APB+c94YIAK9cOQLf0faUV7nC/nEPGnK6F6dwjDal/wR+2gYBDl8joZNEx9eUWj+ZureR+vkx2Pexe5/XQZ9FlovmOWS+3PrpDyiXf42Mf0rwPXN15nm0aHtrMMb2dxLM7c4w/JLwvkpdPxHd0CwzA+YFtDmCGGUWOqUed+n51TbLm6LY84CrjxcivcdTIbxcM198J1Yy/mAM4AckSAXDqedFq7zpwc+e5FquBmmM1vuhwTKjQ0nbZUk+QYzdVS7fzqXXg7RFW4yJb2NOgTSKxHxMowTK1VU79CEUo/+mXv71Zr+ifWg77FwhMH3D4efYg+PyGPvzuSo3FXr2jb48inUEn7QEC820NX8AC5Sl0auzKpMrSnxVw7id6GsMivhfk+pDKQW5UHU7G5KNGllkff4aOM4876Aljs0ztIMLyAUqFENQVDCWn0wj9ZKSpq6jAIDFHhCYn28UtUUR01GstgvNpvhzWfgoTZF+Mju4bXiCv34pSl+y+ZDY0sCDczQnAVPOsKAvyYsEkYF93n/KlK+IT7h6n88rDVTQ6Jy8Cvv4rXnOfahJykhHoc4nYd1sTm9O3ob5Rtadft10cYpXLWwnThpIk5yU2viqsiHQhWQzWorkeMYnEGoDBSwcq59Uecak2u88RbSonLYpsyLVQgu7CYwJRQMx77xWCQap2K3JqXp1tWxuYZk8LfglpI/BYB0VKkhNtaHnFMsOa0ETOh27PPbSsvPwhyUJ4UpPtI+oSK/ZXChoN3KGVNkFjNjm5lDlx3R3/pCNGvuube4WbM3U+674i4ORjybGSwYdh/OqpEXy3XJqHBk459c8Xj4xlYLYEChfwl/RFYRtXA2RLSjZdHFYJ0lGyQFYCn0aIuN9yCbxV9Ed9VmpLkoXkErnEL/qY2o2UaAU4CwXdPoyvVIt7P2mueflq4Dh8Prn6rvvRVK/5AwweO4qonysZM8/EMRMWvZZT7XW8ad3JighTejD4IBDZhp6gM/Ud92xtxL93HWkEOQHNGIzV0U4jlaGOod7/NeKPxZ4WSkCuJAw+mvCEqeCLLb+8B13wwsLJ35dBi3M90xjaT+zGSLwA0XTF5AlI6N/jZjMjzypGIRzw7vIG4E0pIVMkNVyjB+5aRj1hEiLo0JuIMfzpL+/hi51CwtV05rPIVpDp/EfIZiFpPuof2L0PA49iVMV7/7a2is0Ratv4l9OL+DDR6EeNc8L7fceJQi64AGNy7p7gkBPnK1Hz0Z30Bfbm1/fSJQd/m2CYkUMQkWjzte9WF5dB0r0L6l4yofDHKRrWSSOEY/NMv+LXZOkXDo7bPJVn24/VD0gQDx9nuY5I4n8Re7uQ8lldRBYiqKEXL169kDixAs5pbqnp40aBXrlAuiUBhvBLH3/ZsYU+48m3fAXRoIy2I+lnKIjEQ1myvmLM9v8dxnkjlVr2MIVVFQusJJcSmx+zg3fFLcf9Xj/Dl25JgVLYyc5n794S0LtwPLCPgpqZammyzFKo7gcfNFB0pCB2LuQZgmmgC5kxEjSCTxDxHHKPppNz3MY/4RBSLT7fCCq/7H5oHWFL6YzMMnJr5svQZ71YZZt24mRSdJ3S6C2091jT+cJ7ZmUwRoUb9DWsORlYLDbvOIT2JzYU7exheB3IDUHvSTQ7+CLe3rX3QqkDMYIaaRBF1CQbC7IrOsWiPjwJdVyotAUzofmhiXg+f6LQgnBGWxiszoUH9e669NhHEeRYf+CPoPCGfCBW+/cuN4OlfpipDdehEHvZE4jzojBmDu3Mi74lLYY3y/QkMIb9vzHYSasEO1SAy3JjqcD+FUhhMNrtvxafXb3r2GjQaLE0YwatZpu+rpj3qtVbMFIn3WxhtmjioQQdBAJ/ra0cV83qlRJXKzJQDCxevowr4wSEU/FIz4HEcITsU3ugBKFzCJGm5ZYX/m8q0UbxnF/Vv9g80nTFzxikuewrlE4yTMw22/2iHL6g9Xd2HdKZvNqN/IeqsP5klT1JufEsabvASycQryqqYhdFw3SILYaMzlu/qzY8+mQD8jzRefLWArPwu4HJ5HLZB8/r7qXuauTHRiFfPpi7nkx1PTsN3+0hCQxOi8CBLOgtZsFyjShjrO6LtLFRz2GPcgl1Sgd5w7EZsu8UcyjVxrXSdetAUMY7Ku3IMAZYsPqb94udH1lENhGxgDkTBjC7dDkZxS5ADCrKL5fkKvB/vIUl9gCDnXsdr5uxZyuP9HHqe8hgNyTd9omaLZhapDbDpJYLf5FXl7sKtVTX2GEthv9DevEFv16BtwSl7NA3tbuRzDDCMyIkFluca5LHD8/ccvHlsRDm6zHDQMf8dBNz33OOFEcWi308LclhG8+J2FlxXfHcdeQs5xAzmFKWD2AJq4kYt2o2Y6cuakGFQRFI5jYPrZgBlMa63UogIKAoA6eVfmela2yDSNBMibip9dRP2BPKSXLfQeSqnbS3Nd0uBsTRu/Divpztrfnuk2tpTWxy9uZy6hkod8zUrMqbhPi8EA+6Bgrczks5cCsZsAO0vChV/ylRq+x5328SLxJhHhncyNAVf80XJpFHBecAleH6SvE542RPof94AUzScMKOOdL2es6fNWt7uKuWJI9w+nI5Y/JPT87DPE9q96qzPwtGprl1PBXG/yU0w8qrb17VZWUQsvqeP52dMTrFRbYDwj1tlXVGqhyRLbeIOWM+ZJW0KkjOg5NRD8oESAalE9grCAf2nDcyIWJOP3k5PuxXaND6Kf4y7/gsw0mjkl6S6V0kT7RvnCz/R0kjrvKp8VDKOd7cOphhvFemR+XghzcLOohr/gb58LF6+45pxkJKoXBS9u+zXc8WUmdPDjUm/TaPwK/Xbuy4IHglmArLEPMSdJETVy4YWkSwGRQI+3hAAIta3r/sTWympcgbkWCpIMLtZHsKpu69JV0Hhwx9gtLw44Yp0E0te5jQ2PDbTdV+77n2iI03hIBB2q6vwdjsCw3cF3C/82j8yqJbhwJ5fvqpi5hZsJyUodCo8OelGUhUXAE72wBeDSqhZ6t70FSRMhRdWgzTL7JiHgboEjeRZibaEOuTH19P4UCXFVM5osRvk0zFROLqbgTIuwWSaHzef9jaA1UoIUUHOU35CXfmibbjlsUVnYmB2vX+I7i01s822H31sMOHpzXQJoL7YNq6+fwMoGXcj3BJtdcYV8Q01Pld+59EDCJDhtiwcNYuFRsTlBtufDsUSt86phJe/v7zp5/QEGIlq7UFcNljqCKO853vtBusfvXryu98Fvkiiy8T/6Nx7pwQ8Pd17vhp1pO7P3Tc6wfLvssiOtsZVnY04dmqH9OufZT0zsfxLNKvH5J5cJa02iIin8lqqxZ4sHJNKbxAnUNRTKAUglKEzYEWXAm7g+LrpLe/yDShD9gQQTzfNh9ZFLhr95FTPDRuIT0YfSCQRku+hfcg7MgVxrFxxPR84TRhu3T65SDHbpkke41MCUoK6oJjENkEfQLMImToMg+102hRbcXvVIHDuJQ3Bl5T7eSZSuY86X2mn6xG1ud2kfGXGchOJx1K6vtBUhdxIU8OV9dz00cI7OY99Nfeh25ye0D43Y/yTOhRSpOpP/ZqbI3Dm2/7dWo5dexJpJtTm1SxDQD8EBjTgJ3hx2CYlpP05mbw+Yze9ORuCLuGSH8t6RtwbGEx3kqEH7wLH7zT2A3mn8tkh5RJcNddlz9nejxDXr2yAj4iN/HYGQjI5gg2wACPnRanUm49oqV4T2OsZckUsiNgvzk4QwFk5dqJ2wKxvsma39e3SuW81gYwMUQBigpiyL4whFjTb7IOo8CqUMug/OFrKZ1O9F94cGDGlEJH1b7T1oDWpa8Lulqcy5KZWJvGOhdWgHRLFQ9f1aN/DRS9QanyH2Peyb0kaXHqG10OE1BpjbwcK/Gz8BKsUepAyw5BrS+qai5setbd1BkqpQs1t8DonR9byHKuB/3Ndo46NH6ezoMtewAUH8zT3vJqiChnpajIDTrcreSBdfLNhl7vUoYpfc3HX9LDKMJ5bxamV6RXDot0+NvfzvOAlP4jVCBGH2Cb/Tc0TBUVUfUx+PSTv69HsGtOHKpJV+ksaOkzJrUk3nr4q+Na6J5/A2Ay5jj/BYC0pQPznEZC0yy23gcu6GHI7hYq18KG3IehixMkHXGkNIoZoLZWqaXnIiApHgPT4o6vx8jrkvmXExBOeEwNVkqe9HPE4cP4IjHETyF8V1eMv60etLU5BggpmF8RFHnggsjyYf6LCtqiw+v2g/JvY5H4c8eSzNSxpOO5zdGqj4YNzJZTsY0nAQAarR8gW0KKuLKLuvGsnpJv1finhqVeCXowkMm1JeHVOlneXtaRz/TUjUGpLPMKqOHzCrB2m01SPmdrRzlHGKt9wutBGrczZoOBfJ7NdUEfsGFytgrvruSdE1FiaXI9qpfB1hOVH4vLmEl8xYFRDLzBHGWAgRQwgUvpM5c5BkRfR9BM6aPDiwO7mzOPrkBRvt+ugyWS3UM7r95a+4Nx4PEKRQixyn8bav//loLHYblV3hnXME/mqxyAnUwEfy5B+QvIUZAvWYytI+hxqBq7AX+vvzY6vf7eqc31j6uefl0Ytc1GOp/m1D0CPV8OsU+xA4SQkSAXeOhHk74G/Nf5J/oiSUR96HYbziqeKkPA8P7BAfv/MI0rvBRAIDTMP7SHTxMcCw9gvip+0OLP50fa/5RIG5Qw5OOwqfeLEBL3Fg3E2t3l6H7G/F+E2tu1UMNkVwdo5df9j6fz0zvN/FBrTR0XQxtFyQpPxHLlTr3hihDjnarPc81+JObX2a4P7o9TDhcNY6ECl8O1uXYgoQoIAwI9oVAzA1Z3Y3iJ6OmGFL+kveFtFJhiRdTm9pKKNNu6HxBipb2elF6aqoPDIlc9hhL1RJfvxpuD7ci9k5tTFNLgVFFA9Qlixo3L5a0+L9QCzvZvAHPbopK+Yc1jJyD56/+ehlHddXV+2mW/WeLWpEt42VWoX7NSFp7qSgqRY6UtOtk97ZhbYIKKn8ZZbx0ic5zCMGIhRfBeXtxOrwPrgbY5x4qAbKE/gvBlO6nz4/umeOl4Nc27JbgnbAy7DfI939TI8HD6fjUBGlVnOy13x9pn8rFESb7LQwIukq8HFHBnVw78v2kG1D00wryZ3do3kc/1wKtzdLv6aHB3K29Up04tftDRClCOEsSUUEoj0JUBWfZc5WQfVVdd+E8HVckccrjh37Hj8wxMfmXlu5+pIdUzHOXjMDSeFGsLENRJhhE0bqBWfEnWF7vSsgg6LP0WsTtrGKyGAawnJtR4ai4F7wbUuQk5sN7ygc8gl3mP6iurZR7wWWtmhYcpy43WWHT7Ss8h/5rv15vFKn013v3mO1m5HrJmXScbe6CddcrPln6GcNnYBcIXvWdqhE4GC27VCB3pzjLQB1XiUqQCXbnJx5+OpVcTTUNx4U3L6TP3NaWE5bCY8/g7xuTLfdtmJPeLZBN5C99EsrojKP4XhvFhEPIba4hrunfTLyMBs68fVVA9tpJI8QHukbsv1/tFTHXcefP+410o7IIeeTO858Lz96PzOEeicaIcRyTG6loGuzoBVMUj4u/qo30vN/lk5zQ0SQx46U4Wg+U690kWrLJXme2qhN1rj6RGhaSMZwUBPjvzGFDdexFcQi1eBjzov9uJC3vZ/AafdL7q6mxq5e2Vn3x17woitGDMkyJ+HuU68+div8a5zJy5uFOAEqpK/OQkO5W5ILIRaNse6AAd8YI8eyJImcxC/9JKgb7viCIUM+POwXWTfFbbcohZqq0pXqh1OxfWp8vyn46ZpZ+Ph/uwd8kvLc8t9V+Yu2rI0pXfVz3uJWzl1GwZ7W7rcl2lWMCi6KAvkRQmEqMnK7eZ4AbsTPvhs5dYPkJT8ncc1c/jJ/8o+a9g92Uo9aaSYkiOYN+iaDPzpYTz5J0eSGk69/OoRu80pGb3TUFDjink0Xyboqd/Zo3FHMEtLnSpEpXGQacRnykcIpEs9yw0BeTGCkS0/p3YqoS0d9Z0KehIg1Y+j1JSnplF2i5N+S8ECFNdOGeSbO7gJMuAH+xFGV4XlSu1qY2LgtD1IoXgtUWc8kNh1svYOaHDkMnR752Y9B8tEfCCNm/2lDaNaSC327MP6SiDKUbiu1MwmHSHXu4ZqV8Zdv+hhAenQm8c9UTFJb5lFv2/aCZGurtcCTYjxyatDWmcLMBm1t/8biz3KdA4GrRA6Iz1OCrJqdfLnYz51eMYvUQIp9hEE/kg6hyiYH9C3GoBuicJi6vQ1iFH+CWD/51YGwkbvZgSdOWv4qztrdzRheIoU1X7VukOtPewaQMPeJVh1mzDp+DVapKMWuxzHmQVOivsdagyotAk97H+xTyEQsELZK1mKQ5MgDcgd0bUzdszDOuOfM3GmGJgsO/TYrI2uZynM80hlKezpUcODa5SZ1ZIzMLknftDFwBw/wGA/nGnDkRidwbEM3aTYEODe7E2PuelIWyO5aRS/E4R9KzFK6QnceyDxt9jgYBMQUvxTrAWbF77n3fsX7X8ztNJ2QiYCq+RdNQW3bkrs3U5uqwlPcvqH01+dTur4pX4FqbH0bub6HaaSXDIY8ltM51RcdUID541j4coSis/mp9nMgprHQmgeNrS8++yMPpLhQzXz/xcgSp2eJl5919YXOO3G5DzX6e0ViH4lDYZZw3xY1vD+v0krUiTIZBg4osYX0RF6tJ4am+k47UJNBD8K7Heu2xS5BUUxFdoWuS/xpM6/vUZc5tyokLXNSk5DODLnB6nKoU9tc5b3vPIB0RX0nwt8asomKfozP/WucigsHw0Hi/Ft+epC12qhu7ANETpKuswiYcLt6+zg+/s9tKyVdmVscfQ5Jbd/FAckqmFZBAm/92MkCYchGqPI1qHSN9HbTaTzP83g2oSZAWm/uV0nmrx6DA1X6Bq2+gNo2vCO8i8xa41Yg91HnCAu10U3XsxVYUbznTarK/88rLRWySgVXfDFQJtKqWIap/o/RernwX9FymVB6WgDbxYsURI4n9+qKPUa4QY69ZPD58YRMRvwHuPPGPInHW5OjK56bUH8qYtqZu40oVxFiC5qDTPO9w2/aDJDrLxIIrmi1VEEQmVNAu9WepXOm4rH8gR6pQb+CMgFd/faGSQfot7A+7coAhZ9pe0PxK5M45cgk6UCSXMYcZDMFdBTvaIltk4Mc5Ywbu+sQP/8rQXRnmtOta4pGgk8WkHwcg8iEVS3YP9YGu2K3ifA6GEgbsUtbtQpXBz3n1e+GGQtYJR/RRuLAStE3zDBGCfTBKP++lxpCd2M6Vr1Tap86kE/Vtvw7xNbRkYA7cSXKVLcc+fmjLf3lDVHFQty0SaOjTeZhgPYDyCEJqzf/JuPk7QHWLfvUJGvxWMqbBq2B+aIauwKf3AecPrhkpWIjEbIOIh00oXpWY5EVGIxAEN6c1Am4fnefpLBe3zDzlG96p8yQxrl/L6klFJMa/9Kd9OD8D2rOOzb1UzlI5zfhstR/PQGSd7CAQ4pmRmEdeXbJKCeletnj1VmPY3UaFwRPk+wahgdKe+POzq1dPuiOX6ebW/aSn8Vxr8eW/+pFsbhcR9NtpRcotYCeCEEjvw9BqtAKB5Tiy3h7nDdEp+8J9HiRpyNSCOxvXQ9yWs/tOaVpGQ96BdOn5+eyqglQwbOdw24JxCf53T3OietItWMh+qHaxrivqWxUmjPaSpkdO8yV8wO/pRX6beCcCJ5EYX+wUHr70HSIncqotWdVXUKnVlOGgi4rPlB1zkSqEA2l3xMykdh7eQKKr995viVp2KjZoTgq95rBpUl+afBmfTcn004GjB8sGfUBibdEi29WKa5Ga5TalL+RdWu+aI/3lPVzRVdms+pWo28/ixPjB8CKTWICfk4Fu7x5TKWdNJgnj67DyV5UCdjtJycFALEOyf96S8zgN3ajv4jSJe6/0/8wfEHhVt9syLSiO41Def4+AzYdr82IAia/GXcD6MdRTw/bmEjzAtZpLXJHj51bhiR4LXJ/ntxYMElmJC1qM3SXwI/AJR7kvt6Yp/cP97F1QOkQF438uHhAUCsFXsAPbXOxzaq7JCYSye8EE6Xex+FsW82oDJmdw7xvTxQomJGJER8ZF+poaNLGaRAgeJLD5YmiFkl6EtqYPxmMvnFYpaw4MEhk9sTuuU2um8iNN0N+prfXlCZtLRSRNn/WeFyEAUou8ti9d+lcgtHV3T24pGM2DVCNzqr37l/9Xfuf9GMZawYkiS7ERk9O3SLI0LICY6XU9npcgoeM8Jb6RjXADdAp5RCrF8NqYfDgENvcbZYcVx+GNy/fPaw/IcuvtAQqut7NVP5o9DblLI/sID51PijMMe2z7ldtP+/c4ozlJSKH1WGrL4KjfPcz+pHieLwwX68KAtdVwP0Vu35nJnfnZQ5Kw368vvQL4HpNXLtCufWFOiLcP5wrkKqE4y/uROon9ZS+HGmEjay5Mmn92HSvQw7B4vOOJTzDtRK0WFrxVLY593afJQGyLtvpvFQ6NVJsmboCdAidmniN5oL+uFX1DflQvrTDB6a8Of6agXUpQLXxyU2EhHw4A9FGi2Br/ZAAYkaHrQRpEsXaZ3uY31MeNnj83kjie4GjDdo6jTM52iGjMiMaoa1uLCnwqrIl6TIC6lZR4roidv781tZ8bfZppGgms/kBvYKwcakLLIOHl5/Odbm1WI9tHDsjI4hz7G6ki5OYljUmzly56D7pW/3g970NZllic4lkPfEcbPkanbntb6j7aZVJ28v333e+Ds8+TuGDN+uDrMl/ely3xdpT7oWQ4HMm2U5lcPqK4pKCpOUBah/y7mG1LuIRy8uPjGzU9YOJvSxDRp9icVEOm36QPWnxEFNSMqj3Nw58yq5a39B2S58lrKtdeBNDhyCnoVWMg0DLIFO5dCFlHVmRzpTZ46cD87UB17fOpA+d07APkgPXy0Q8Z80bSnfkfHUMVXkUBLx/u9NIr8IrZXn6jzDQF1wRJQwcuFxJLKwRZ0zuGbbEVo439/VUesU8F9/xeFyweN1g9xRuxrjl4bdMyhXh3vjGj2bf9I7bTiNa4MVxV9/GJraN3BGUL9DVKUIvafy4FGLhFGNrVfLwy4A57qto1Ycwnt6kPeLSvqut+Z38AKR2OGOR7ov6xKKdIiz59ee+8JCyOdlJ40+Et3H48iMFfF5LgLChJXlnOSwVQqUmcPksJFScB8ZsqyMnOttzGFVPWEeMOaA2T3Y0CzV+jF5+if/4W32g2GWwaSA5fX3N5bA5AVIdEHixouvCaVca0IaEKGcDsYoTwuShSiiRfGBxC2b42naIKBUfIA7wwm8isYLRIJr4UzUrVJS6EPeDVMfo64AIsdZwTx4yyVemh8QUJswm05PDGS5CArzn37g22PnqEluNuejzOaFKKHuwDFuhH8ROvIBpZxB3I5m93B/LqQ9QAheLtRQ36tzTz54xp8Mt+KM2qri2XtPqJEEASeB94bfCyh17ZDisZI03863KV1i8plPo8QMvmgjkhp4hulcQb1LPP8WvkxbB0wd9GZ5kM2rlfArnK/eR7wxxnNAGmZ/dSONDU1CbPSUUrUFYBPZbor7Q7zZ/Du9fq82y6iAFEoXD+R056lHEPCVT2N8Lk3s2d/gYJq2GJEd8lX9dCWeOYCCfO30DP305QJfRf7a/ZGlUVhuBVCI1JIAodocIea9bUInRHfIQE4wfHQk4SwQJoO2aDgwaA7wKZGM16XutgX1LEYMAmjud+ANzwwBaTYlSJMXeJlSdv4jEP2loOz/NTb57FbhMP0RRwJHfotqHhvz16IsIFYo1s4h4Hp0OIhd3Sl+ku2DwKWnkUAkHA473xJo9uY5Ub+3hPhiC5Py3uRje+l6YK6u7PO6X1iL/SAmF9InjpyR8eHp5EeHbsmNhk/DtYkBbdDyAuoGpM78kiShacp/c30uY8EzxwiKP8El1xfi2T+sanWlwUdIG2WHHoQ0rpvj8beN2nhdjh/+KRwmuAtBWleQ3z2XO3n9ViTeZWrVC/FzvaeYr9WzPmUPSVgLTa1Row3JHd8jSJ7y9yELgsevHw+WmLLFIhvWpfVyqMmS3Cvdv9qUtdHeKPeUzh+G6EpjagifqNumsWgSwDuICDycAUsVstSV8XiappC2Ogy9/AC06f0YtOqaT5FnksUntYxrwdNNijyYh7Pby2ubwYCsOBoGnRQVfd75h+GoK6EXqY+Ii1SPG6kkVcx0ER5t1tSVL7xhJPDVUAiyKXQ+Y82FWIdcJxF0Ir4JigeFrw8TgIC2UtzKfMui2NNxArx236/K5Dt5paNNIHpaJiOHw9m07a95HmZnDb1HNA6z5/tQZtw+Kp1s3KkA3NnBWZ3zHZ/tvpSjufWIyjAVAq8dsVK2pZ51K8gIu6WuKKLkuZ0tlCJ0Lh5O2LPYEDzQyZGBv83Y3oW0jWanKEGjKQ7dfP0VOykIer2IksgTHQRBf302sp6oEH1DD+tP7hHVHRFvgXcL0EdyX5sPRPnP2MrHO8LwIrjoAnHliCniRKgHsf+Y07CuNvkBMJ/lYx8vAbZahdSMyfw7ba3BjbiwOTzUDGF/yawKBz1NBod4/o9Ork1KJAKeChj/y7JcFXrT2V8QXBZfhRAw1O+hv8qJ8HctQvu7fgVj92Q4sutsW9xOVeKO7MWO1VBj50c9LW+QMVTwuTVBQMQ4vlIregCIy3ZCM8yK4w27RMOoeJX8s8vkuuHh2eVbbguswx17WSrWDUeWQYnErNczkZ+iA0JrT9zBKKQWpogfaaf6WI//3XDswoifa1tl1CyMgBRuwo1sOxX8xYlS+p4fgkpthANXf7+H+yLkaPql4DVHC6QaRBmB6SQFCn6OXYGiZal2PB//v/xleuca6fB/0tHgRSDL+63kgJrhbGYLf46dTJ58MHlmddsKm0VBSOUynhlieKQnRctPlyTqUcwFvZH7woBhHRBXdRWvC5n5+0JkqVtzXt6HIfeJNrM8pA1J9jao3j79Zip0dBFMZJSXz2H01P6DMhU+6p6cdHwDLwWzTUHcon4m/+WU2MEWe6gZc22gkAhmnqjEbxKctLs4you5wp7QmHYBlgwJIXKZZSggKdKOzU3EkVF9VsaGinoZL4OUkFcwx500Zfi1e7wn/gbIDX3XIeOd/dxhHzWeCxEx4Pg3M5+cjNtGc1WQ7DOJERPAt1RZdCgfN9tT41aBl46bIM+S/DDI37Qc/lK7eDO3Gsr3zM+Zx/J41MEVuiVmcHHeij+UHl5TwNWvCqGu1D6rLsifJsj2faIkBsUglGNsmdrpR4SsmgF6fQYRjLNcBzoAT7a+MGwfcGFGWhvkuV7Vg3THNj5kUT+n7OVh5MqfrJYL8TN2ObS2dDdDC6bIdqUfc9PQlJ/FvAbiLJzSXnNf6nHFTkZdQ4BtUDgADO0NX9sZ1l1fbJk2ekapKPQnTXSGEza5xqQbGiG+NqYqn136pwI4UwXpcHGzYOYUnl0yUD/idMS71iloXNyc+/7a77Cc2nj/oxDKTQRkHFO365PshRLYqIj/d8QPvLFF+tmv9NiZW4IJgVPnl8EYDarQGS2r9QHX78y+IEA48CCtzTzZB/BCJXvGgflb2ynO03iCYjuAOkFdUIVW4e1lGrpOhXr6lQOQSoNsGeW9JHKON4GgiUa/jvb0GHT/j++TFStaLOnlSnLGG6kAmn4E5s7IN+tcf7g7LKrd9nCZwp47YEU6KhrHxx5DZqrJpZD9NOKUd4umVDdcYJ5rlD/Z1Z+iswdmsGX4Bzg22b5hwHkMlPpQOUt1mit0shU6XEhtO7axHKZlZnZC8c5G5mgSqlDQyILDsJUZAJdekZ0G2oim+QEEkzpXPyoo6DbdalKBjf/dVp+EbqNloatjWVv5uXGTO4H2ewcTEJlF8Lr0McrY2JfvgOgghIW8M1pGebwiOcpD+JzOUCpRCU7tE0ld76kapUMKRpj6jlfddHE8WYanhhKCD0ejFOZ7zK3rc4/UWn1X42EXRmwPQRq8aOHl2B4fmzUJQYkD1p/7QQC4cmby5kZ340Wqfu5aUaHvDl3qVeGu7YXFZUuwUgp5By9YEFp3ZYiBlgx6KpuaDKIcp1fpJwXj/jaqhMik4A5+mkloKkr3YSky+jVhHo81Qz8F+dOtgYp8c0GNlyt7s+cDZKvylBEiCKIs6LEyuecMZGxnnEbCmtLiFFn/s8JWNA/dhfNOBCKIoiXjvF1bUqQclIH6JUGXx14wOGk62ZU/TlbQswq3+T+4BIvX+HV+WvAPHeswGIyH/KIIZMA8oirmxrFMgz4rakx7w11/5sMKlLY5l8Vk1LSiU9yZCov8+cGnCgXegD497DcHsKOUcxRphoZ58EtC7ps2aQS8dy5scDYzW3ZeS+Iq+JWBHkRySFADoVExplacFHYFx5NO15Xhoamk75kQhQ88YTVv4SlH3/dccuZQxT0RGX6oOE4eOY0IR6qEXC6yfNUxnKvFQxaMC6EDfTeBimh57dux3xvM7ncpFHDfWPsrx8xdQlCv6hwkU3hdiyNwa9nt6f5sE5QW+KUdMhuns76mpQ9Ei+QfnY6zjNNpGPe1bTMH/FgnQC++shbA9ivnim87fIINp5gW43zvNEYVleX+0jFGcItysmuhIoXNVXInJeswYH44irfJBdViyFiHNg9YCxu+sN3cr73S2uCLrynutZLO/wsnP5fNgSY1iRopSOt5Clc0DK7MemshOR5lX9OvB4xZAa784cBq7yv5AzLDhGURh0OUIHBs1AKoj5dkOXViQ2NJM1AiMoMhQH8uXlKX/J2E8Q8g+mjv0guQaPDObPn7oUtP5SE9cRxxLTkp+p7FUYB0wIJCCOW5bETP77IpHRMH+xLStiCZlMn1LjcIUOXDzCf2ygA5kIqm4FhiGSjz6nL84SYmNIVO43KPbVw4I+the3t1reUKVRxYHL4PcZZQQbiWjbO1pbnWTD3/fhFrvGSAavsstH7QqKLF4YST4sX6ZPlXPlYpOZX3Tb+GcZeirA3AlO4QunR4YyrtMe3kWdPmejn/lBHe+Tij91BAT3Pj0ngL5xCoUe5f8E7QC9AC9JPJQ5OorhSY9XPPv7gfV6SnXyKkxfs5ijoUL6BrOtgEuJeYb1CPv+qNGR6a2/Jnug9646Yqce2cCGTk+zLHDm9wVHZZd+koXXUgxioVxo/zKkeP5trhIHrCTmHi22v8c1iOTjpL/fs0Ahv5XB6GvnL4U+h+zhMdpAN3f9htSHE33PGpd2XXKSOYQmUTOLcodqHkXgaNziJHkJvCHI3RD8sNkTGkJXIaqEF5iyoaWyKG7nk8v1DQrNInkBcqsC7RbhfpO6r5CER4gY4/7AgMEQk/Kvu+FRn47mFJphqK7hGmf4c4Ym0tVl1egOnXylWZarjzC0IVcplOukE0jy//5qI0uZRcTf5kNey3ZiJ76jvdL3Klv5nGfsvNw6/wRzS151k02vZHTUMzPmqqvTPso5Eb+rPsOI+2EvrMQSHPIJ1tgDm8bDV74qSJk7gPHaVG6rUBju8XmnNaLL1QZa3dEUivKmS+G+zUYSaEizo2g8UgdGryd2uRXdSpjMEQ6buKxZfTkKjRZGBJFD/SPqQNzMA0281YY8JRejEy/P6lVrT0SdiP2yNtj4mh2eLMuTNAjfZPRphsOW6hMklhtnB97u6EmRTRseZU4ye9/JvtrZmrfO3xBecCFGSCNqYTaX0wP7HMu1rUR6rvD0G8jxwOE50GTPrwNqSYUq7Y4ki/poOsC9oNwF9wcu72YePfAbPfjurWGoeYL+eesoRcnvXER4Iy27b2OQT0HfmAb+eUD9MNLmRe9uGh8phX1aLt/T59GXT7eV+Xgki5zx8o2JRXxE93PozHcPUnajr3GEFuOqgp6qVqAPMOOKahp5YxNTe+vqKP1AW3GTlLsu6Pp9Vl629IMh6nHu6EQ8KBpSpxA7UmjiCPnclOUqfLv8RG9P+suw7JwTGqKR/ydJ099OHUTAL1fh9+oIkb8mi6CEQDFiTkQBDdzwR1Lz/pI/vieo7R//JGAZgeabQZpWH+ZWdsuMJzo4gKeXbQEC4STebVP2Cc2HMjFocNXUpyCpujlkUrl+ilMFtatg39lS94jIA3my5f1R4D421mlZ/tU7+DKTvSAc/NW2av6vsgHyA5/sR5uq1clHECH6Mwj/FJns06IppQlW4f1/q4rozrPNMelzd3kKD8Xx7m72e81Hx/MgpXNiaqSaJkX5CT6bUfobCw6CoNnWo/+ms/TZ3fPzUU9Ntr68W3c53CVPOhpZnpzEUJ5Li6QFhJbImBPGyTrLHH97S6HvSxOmOvi7b4kLmqdlFn2/Xr4uK/IJo8jH4PeDu/ZFxf+sifnAsFdd6lgKzRj1j3y8ol3gcV/j8F/91TCX2fVIXEcOUCWxEosiWyBYfpZX6P5YLCOH7OUDAnzh9PSBB6mFoD6L+g8KAZrqbU4nQqo5eYXqzzkHvfr++VCwnZbg3nC+EY+hogMjH/GmkEEHDzqel3cAAsrMKdKyPkZhUfpKbwBcqLyf16Z4ZQp3r23bJnMzmk99cVlVZpcw33D5NTDyGSEK0Rrn3H8vMbNlj2hJHNReOEgAEoIWuFRX/9YBeqNNWyD3E1c7MErd4FvyL8O9Gl9awxupXGoUcLMGD00NB4owKq1TrghLOSo3jp2vIsgdEv/yNZQJNWqC9gHdhHJnpciEoQegRD8SJ5cdBgNPo+MUsx1SsfCic8th3T4p/FjWMGzxa1TAqy3DwjdzSvnY9CBugZB5kWT8G6DaJms10W2eALe3e3ojLvVgvsrlGUaTeQyih1RIPtIOJ4QqNn+fPIvMdpikO7RzynlVZWn9gCE1SDrFJwTDVzbKAg4vCUphwpZHWarFpfWJM/5M905obzeB/LpP56N+SK6ZqxHXf5UlDBF9Yox4ovAM5UoP7CPvUw3GQ92V+kcweBvW0hYz1eY70LBRud1Kxo1SyLYWW6tcNfR3+CoefPaNWKizOlHHaiPxe4aWbtHzp027ULg4EZDtST0J9RqGQZbzr30Ux9/2DwpaqJfs5XSOmQzdbM1vErtWqQzOD+qM2C2Ap9BovBrvq+jkbFWQM0eGQgRR1g8FrAW9jUa6TlGsTozFvkY7Zc9RaG2UIipUOod9ezzdbmTQwMxNK5doik/yK33U6RrpoWZwCZJBCg2fEM59/9/4O5Xe2gP41WygQJnRpNOQ70cLHp5mbyzv5qFqcKuDXk8Fg2ugKMBQ6XYX6zNlVg3KlVCW/wOW2Uczk3ZoKEJzFRiCrHSNG+RBn2W9K9bAph/J0nVwpzBTYU+mLz3avByN1AtAE0mzhzDz2kdACDLqfKrGvmTSHRV1rfaOUffZGmZLA+Oyti/OGiu5ebchVEd15i81FloycvHWY/NgWfPByRPd0uQm24/tnXjUk+/v6Xll+OSfDDOkX3Ooydzk6F+5BksKeVYPwTxbz9SGXcuoEmmAbGleknuTnHwi2MGAzjCetASMpp1VJz3uomopuwly/CuIYMmJDWJdCK/uAfhPn2SmD0GlYuVF8UWZNs+fWmrDkQHibw1EeSq0Q2Ms0GDYgx6JbUMQdGXxdQcZXqG2iEolJm7haVqJ1eeMHqOIGotXTeX8nPpsc6GvfrWY07ZnVI0WMhDjknxMcW/32o+0UA34icu//jQGLb535/nguAWK0AbcGy/RQpkc15nb7fuZqP7u9UBJCzzr5KK7+dHDl2X/KJwefpf2Oe8T/pNO7M1FuPqS3L87e+gsJdxor3Zu+46fSuNIKY3+LjCpgq9TiETxOe4FYghIgQauEflX1RPQD63xvwnPqyEWSTUSXC8q3Wto6B61y9jquuS6UGmvU6ok/QXIz/mIrPIlHtePSb7MY/4ePrpKNiZJS4jPYNSzUzhJXMfLlipup6CHqHfxb0+wtgMKkz8Z+Glf+pO46pYbwX6g5KM/euosxtNJEYcqoKE+FwMiO3gdqFprcl3xn8yQMCp+lv0LyKk+PQV2vhQOPGye4q6AzerXXMvnd8zEITl84vDar0LzqOl/QYcOqImuPXjH0MMS8V4/1nrHVHxIWi807haxIbE6Xl+aJtw+A1tC7ESlToUYUlE+2Gtbs/91uDiYWolz8r8BNiI/V3xRv8qnU9pPdHjibXecFUzGNy5HPQFDXTFIU05SeQYC1N/gq0vHK50iI2pq4op76xFAk1U5pOxx5IEvbzj5mwpRSo5DKElHo3P6BcNgcIRszsUvhTxjEGvGch/Ft6NGNwYp00AG7kGsN4kd08FanvULBkE532hTKt8gK18wUm1eLbih+Lfvk1fca44nMWZbqB9LhfxGDpSxlda/DNLn8zsxI3WSQrtCnSyGNMsvxoEhnS1cufoluJ3M4NMcc1Ky6odKYRw8UmlK/f5WSOyMmP6DJoErJcUZy+eBqTRhn24TSVwR4P6yVeLTHGLA7WD0hBnLSv+rcinji9KxXiJBJn45jUPBSNLyXdV1ui6wd7M1kmBBe94InR/0HNn4pz6lTMNU2TYqdK+S9iRPR6E7g7go7NEKYmoutHXzFmKS9zNXwu4LXXvpDsmQZHCp3erkzSalqWyu89ShOIyw3xfn8SxrQb+kO5oAZXSVO26rnnGNrn3EsS1+8+AykdXZIEw+a8KgbKZiVqzRYMfpQngeH7McF5aXGHn0hbVQ0HOZgcSwo9gnRsOasnBiySF9fQR8ryN9cbijmJVGFK8vWx2zw95O0iaYgMCM3pIL5/4VsYN3cEh4iO0suLYxouLDOLbwENmciWjyeN8+/ObmOuJ5TsR5awta7tFvLf1QiydaDqRzQEMnLw0TfvXDaxK/8cJwG9Q8PsQrkHy9uO+FRVBl3ijHFZVthuA9A2vo+FxSg3ST6Ypoz0OXWHAZNVPeAXfWRlAEPOQY6d8+KEunRfpbNqXFRqBfYqQ1apG+x+9WnM1AXpbLL60N1u3ydkfgTnDokTHs2eKBvvvkdnGUFubSjmUqeLkhSPtV2YiYUeVyZ/zSAsIMkFpL5FphVKjYVfCTEO3p+Zs8fpeNf+jXbQ7V9YWleT2WgMT3mFhSr/Jto/hfBx3xomhXp0PbhzQvS02XsznqVIM2FUMbxrn5Bn2APDZJjcg19yunQi5RFD4TeHQ0YU/jJUwYfWEZVAXi7rlOoKWZb/EgaQ+izDKnN60O/ruW9lcNHqHmxVkg6OgcHCjayeVjixw91b6gya5O8KyXS6Dfqfv8reAbSJC4kCp1IhQcmplUrlX8UsUEPSr2tC5aNiLPPMXf6fwYakX8Id9feaor242pimjqst3Pu3w1kpavgjre36JGObZXXvOmFiGipAJapdzclC9EQPVRhcnUDMjZTOv+eR70hGPG1tLwblT3NctYjusqet8ZrwmQwIACmB8m15ZXzvvDOpR4PDZn1WXE5lLWXqen/Y6ekN+ZOAhF7ggCBcZhxUvWe1/sZ1UycbEOFn2R7eki/oFby69DQDNEpVYJY2Nf7ATh+Sz4+u+igWaaiBtZ1KDfyeJvHUucoD1dHA+GSrKITqSxc8wtp1nRNmGarlvVK6MohugbvcN5olHk30gbM1apuplpcFNTuZy7n11Gc1ZW+DIFwJd+2hbA33gE+0kmPQz/bkF+IvXllF8cIBYgbjdNMZwRc4PIMcjpZUgyBEGkllLddNygVqzxTf0P0lxj3LIdi6jWYBTPsmDEoCwgRRGhRp5BIA5jWHBQHNagtyTAnkSGPZx5QSsKLWJmbriLhUIeMYUa7C2d2B8pPEbgyl4a9utDzxY59KNUUnkvNTatFDYMd5VgBd+bgfLyk/Jq9Ng5qyAc5ppTJInsAv1lcmlCs0wGdPOmqd18ZED5Cfy1/3+BorUMjXyafFbTkTPegF1lvpBpuUru8cByvHwsmyQjof4RgykSd44mKPAWvrkGfso2NInOuP/q/3n3mjhHTql0xl1Qy3tFh789fa+TJppgJXs5jRPTmVDeCpns5MClvOv8o52AmA5XZ6uHdV30xMA0VcMHVGVSRqp6EpsC4GyfoyiaGV8kEDQCLwXa9Y02Z4gAmRlqxhADCrzkf0W+v0R2oOtaA7MdlERHdfiViflJODC0qiEgVSKEOHdSr1ZikMGOchmKN1Bi5vLvfLIBKYhM85KnO6B6pnQXTG8Gi3gECEVtizIP9anxZHxsI0NwdhLOFsYyh26KIk7oD4spBooGa2tP5XyZry9E5Phjc+0r+YCyg91yVxsEcDJhQclhivaQNDMoPINgdaR8GE1Q2du/kBJhMhQFlCSbycfQiybxoDzXBBPxcOvohr80ufh/5P3XsuxIkiWIfk09dgk4eQSHOzhzkJcRAA7OOfn6gXlEdGVl5lT1zHTVHZEbcuLsA2y4ucFMbelSNTVVKBysMcF0heUbLcDBiNB4htYI2m/xXCuLm3uz6povPX7riF9Jv5Niuq8GPmago5sCt7B+n2aAGi3/CzanMMPQIpBh57dfJt9lnlG6ROHfp1fpw22LLnu9VZvUfE1VdP3lGmmOX7fdffW8wFITLh9J2B0FdCbHcrcYTNOcBj4LdAw6Lc1L1OPukcFEV87hM7617lwKthTrO0uCv8FH6VBoPdL0l4tMZfkDbA/TVOfVWuQWYPPR/Hoq8qZJRj49hca0NgBbNu2cp3S/fNCe/4Y6Xm2GKd+a2Oz89sF43nmPMhymA5SmQPEkTvGrz6pKhiyjZcvfodybqybd5sqICJWYmI5l2x/A0kG4i3c1NoSiEdxT71PqfM8T4sbLGmLqGwleRTDF2VuLKPUgepimEVuSyyCbLAjpOfKs36DedhAqQKWReX/z8sUPKuya2kuXvCMH5Ve/aVjByccu1RDvffTmUioTrKPH0wTDfyfKwZxUL+gc42EwhMdAqTknQe9My8QdtFwQBAoXijYJWOaWXnDnHaCsVByDiADkoShzsn/1hGOqjB8YIA3IuKbonRr2Wcta8xq7S2Fku9ROoJ/FOAXfyz8Ak/fBNWUhjEdYQl2/c6zmfhPwcaYkUTCDnK7o5E2vE4uhSkdIGTCEADgWHXHVkwjelFe75d8RGG0pYk2ko/Lpst81fZt8slAUrGKXCWUeizL9C9+RWNh7rKPpYjttG78q6ubqu3o9ivh3Hoz3s+IQGox6zV35GNv7JrqnljvUN11hA0VRO98e7CivBsNvEIpPfkP8q2Gu43PvyiYPHERdHTpFVo+B8+Whk9LJ9FTV2hcPnvWV0IpcHjAo2Eu/QkJeRw6yDTvXAB2Chk/GJg74jEwETLfb9SKt3bmO7r01X5TYy8Ib+t2vyoCjtaCBj7tKgHro4leNezQoR6FEvaRHR4Ula27WoxJnsoFs/vIoIIAVOfldN0E88SvUOrKvFlBQeTDdKPA6v+n698ts5Uf7FkQtvhc36ueEOrFq+n4em63A3vCJVjLQ4qJjicfH+PmynLWI3ZytGvykIcusTOAqgnUELLa+jybKNYrQLnjFVT8ujwp8K5fyIw4D/Yplw6bp31kGPoNJBOuKQQTGQR2awAfNClme8b+62u2e8x74B504vPwd8AROcb6AlY8RCHoUnxGagXUFcsKdUThpGFUWCKqhET7cZ+hcYL8FaBwgZQp5huijPL03zhhN/ualrlTh7artDjgHWWEA61gQztwhfC/HZ1CFj93l6yL4834H5LGkOedH3yzZvZBXFyA8Wj4ztjK4oWNhkJAsEMLVf6x1fYhSNObU0sglqlpShWmKEfe59r1EJtmiiNKsQzVRGkVHNtiq4Gj/Gt371L4sA3wBm/r9HoylmeXb3MH4kGhBQTl87prSJ/w7UBbI9P5M8Ufs2WPQA9UlLUZPEM9Sftf1LAnRoRgxSlgxRjCMlIGG1/Wgc/irb8R9wjyM45o6eFVgQpNP0zp9oxSntTV/7r9zpUVbZ3Mg032iynkR0YaCccf+5vpnZUczwm65dEbn9iuDG+CiITgJG18o3c7S+b7fHGO32NbPQvCwI4uMkyg7KKMVixDCO2B+aNJXFH6RO2dovWvXDSb8vI756r3r9nuty5G4CBMQ0r2xuQ5YAUaCNzXuq0wyf6jvWubQuvjqyLpiwb3aEqdgV0CijpE2PabCBWRt3JVauZpWb1LqO8qtYw130S3Y9qV8a+b8fVP7a8QQEn0wHHl0dLQMv8jy2so56DB0Kfv0DT3AH5xy79UIXge03zTPS5/auuUsDvozJFnuJ7Y8FJB9EfULA1s7TfDTYVgl5+fv9XgRJ5pjCKD4Iit8bkve5nJ+sMlgBjx++fujhTbD/36zsclK7dBJ0WA8R2l2UySvUg6/jC4XouwG+STBkknowqhVYfxZhqg9WmpGjF8O9ThNcRuWyTn3nGPSbvmxOFYEhmsr3WOkeemwVRTr+jvqHATtlxgl4AHJf3mHuG9pxAcJIEvRBWjTiO5yZO7zfm/eqhxY1R2fym/Zqr4gZXgB55SRG3n3+lMW4Zxrgbv9/GR66Hf4ztICjMqOobW3wZv5H+WRH4qxJq8SQ8dPUGV9ECNoEeSxnpy4kim81uvdfVBRHLyIigSs7b7LqSNZvnMmRhBhfFKM/pzMZ8goTMGPHTGAvX+9lkXtF03L+F1NdGrg7ILV/RvWlH3kxnAE3A1pkNhgX1kMsihWavMnj9wEdFj1ZShwy6ee9/yjnzrHH79AB4Oms46ysUPf3qLBoohFaAAjBgui/L24XiCAkyeVx0RSBZY+hoxcG2pXwhnL8x7ILFN39AqiiLAtSwh2UVuOcjFTLRP3VwD9foEJCIoSy/yvpAB6ZvZVaja6jsxvdFKgSoRqFf5UMITx+oWzMZpbw6G7fHDHHLvnC98/zHCofpuampURn7J9t5XwDcxK57MQS98GOFDGinu7Ah5vVAH/0Fdj82JmmkchnqmyhlZWrEih4R5Kl8sWxV9HGw69iDDfsms5iXkX6mMOkQXAQAp4mEITJWY4JDe/TiHBd7i3NnCnDgEv+5xv8S/aHSou+RlXIHTyWhYd8FV3LwaM84s0fak3OX9a0Pwq9MIS5tJHlEGPBWHm5Ze06FQeEwICoDAQAN4ILlG2ZFo9PfvVwxwMxbNSFS24GiFonH5kQk+kR5/jNr5dnxtmyjdFkWavquiBs65ezquQdsE4k4+1MmIT/j20+yu4Io9asLSQoSN1Q5WNaBAHKvZdwHxoApKYBCY31kVmUpY3BC9dK9TevTCQllcPpl6O/aPA65+mRIxUBX61cRmdt7+aH2+yQqZI271dgAHZdWFMffsyQJBcjPTtRlXdFne/S6jaPUbRFA/Kr3ez/uVpBy4nom7EKJ/L5pt/yKR3fFEJMdtxb6lhhqwsX6tycgg5XOXUfgyOO9j+yrcLbEmdfUZ/XwGBB3J4wWrrKZil3YyTVt6ZrSVp0/LHYowiUcY31KUcrul2gJ/I2JcKAF66TMXklvdNjpwXtUMSr7+urrRhQqY99Jz38iHYCTNXnn9Pi29wOw3mmnKcZG/L3LuCznRqo3iAd5cDMR3FPKYWEOKkH1V8UYe7pR0Jv7DXMwIhhlETM9eTEDrd1C/XWTO/sARqHJKG+rM+lhmcGg/LD2S3skWjsE6ktR6N05tPxMO2TPaQ6zNXaBYNjtHrEX32TbZVAmQ1F3AusXZ330eWZ2LOrw+ZyxrNcT0DHyVlKoRX3o/SJJx8B7olN/NfIB/mRnQ19iTKEi3uRSdvk9kUsuicSE3a21wWzRwCvD2yZs26Y9aeYFmYBrgj0Z/6Ym4aP6OEmMSNCVWFQPjYI2U/qyt4aIKnyJblqu9Dbg9fqEyVQM77O+YTaqp8kbUrBVCpbmRt42g+Tb/sXpkTS+wvEEweWLTWMI9Bf3DwfQKPBkaDuE7hR+zJALJ3CfsoRbnLXxKFbDeR8ZHGbSd036m1DYpQf4EzdUQfE8clxaUrzo7BdKlOcd241/1058so8q+iY239w7S/YJ/Sxl4+yYzKHhkrNis9Lmu8s+KsFjTSf7WW+8Yq0Vw5rKVArVB7yZz1pX9h40wyJusFvhlvUkEfoHIsL0xsPj3zNRKclCEK4hRcJovxcmy0ichO7KGrjIUY9V9cF0mj1/gjrsv+OM6bmB4U1724twOCWV3ulIvzIz8LCaVwSnWpenqhaaO9FxgeFPcsmjQ1yd7EWvlTehYN/HKoi7eZ8P1tm/YPXUgLKEUTScZLDg6UjkOgd1r9DgPGRo02qqliXgQlriv74cMYk0fxlg5+mEbAVcgRAiLKpiWGBXjGJ9K0m/ACytG09AoXFzg6SPb7W9LSzbXUxKP0QepH4NgS3eW9B0Xa3XtCfVgcKCMgARuMHAB/zcTMPYEUR3jOmjJj+vaXfRrZKE/Da8mezLcAgh++y/fhCTz4zoH9lOTrhIQ4i4k/WouSowgoXN1veUbxuhzfGLQbzIHZtDYO43SYw3crehf75rJ54lKQEelaMy+JypB+8j88TEuL9Qv81pH7ldRSbEwSgyMcg3WplMzKHkWRUR+vCkqQziATD7IUO2iI004PHR8f3BN0oMEDDNO/LXXlCRVE3sNk5Nm9VsJXvEoWYbOOx9Ixg7eSKtFnILi42T7RkJctkbBwMhSiMQASVrb9B/UgUZdbQ3QBVDAbjGJfCgUEYE17E5l0KBAV8LBGyypw/Jjbn3y6t7dNVlk3jeWpiJ12k2MoZTHmzAlvhwv8mlXAWTvLzpODgXaXCLGTvrA74MV9b1qxXW3IgJDXedMnJ8OuUs/6RP0irJfeXPWdQDIRPs/j21dmppH0qwDa8BCirttVGxxmsdke2rAipej8m4fEoX1gnIhJ8lcMwMwDKDVfYk6G1ai2mnfWHukHTqaQM7Y/qvKBydRBL1/aFDGOL7wQzQEuhUEYzxN5YXd76vI6vy9TEiD3LvFJUFkQ26DydEvrN/pBVzG7uvSxXvHtKHRnNtZtowHsZpTi9OiSZ2ulbM7bHo/2XTGyy4vKh/SrjxOY8Lic9mD5FmA9vk8GUnNkRdQm4CscGom17ll2mwF86O1xHAGTQTLfnOkuQbB977KcoQojM1eWpiiOTUEsTcVk4ESe0UlyrAtNoXWMzOkUnJTYPXA13lixYxUoT/GgiutMtCCYyztWhMSMlUwrxCBcfOJT0GHAyJTxhjGNtbpkUJlYAbAT/iheZ9KDSxjfIfiuJgxDus2zFUFKiPIzIW/a/XxGwj6at4+KqHqP6Zz3wZbNHoqUd9bgODRJL8FfmWOH+UAh6oHI3nHBRlqIkykXVOOnd1sPFzVk4rH3erUvTIeO6+z0UK26JVCplcbWAN0N2dyXX15SUYfx3fyswToP3Rh0VZ0wflzkE6A9oJah6BP01mckXCxXr2U1LXEcPEBzk+akFAbXuVJQoKFYQH3M8pC/X9a8RGCsi62yVtwD5WjDVKjc/yH9uQI8mcT5rNQNcUzYYimj/zOGCYMIdUrNhEKj896ljietV3iM+s9YDAXseNRTk2SUxumubSCoXPI+NAoZKEk20PNJmTHosN5AgMjxdh5SX41N3oHDtZRukqh2Te62q9QvVl/ap+OhZvySfPCp2Mjp3lv/xTZEI2TM+f1C5R89bva7APj6LAqnzMwoy+jJBeAYdfRH/HAlDsJSVJ3kvYZSy5SaH1IkFlh+6fGYITxmTeHro8oSEiFNrnobF8JAmy4xLtvCxspqt5u6gVjgNU/GtkWz7tJrY8TJbUVe3B5LPkCymf6SHi5rvkAGHmZvtfTJ2mNb/UtGfZrQQeE9lsa6DwEMmgN3feCbnjNZSU04FYCiFoc2d7w3Ma0/F/w27ptnn+9lDGXnQbrlYUKChFk2jb9IfsHW7Leqe44piL36RA4h2lTU5571weScjmOSRWCi6+MK7nh6HirdLM7i+OpfyAmDugbHG/JkXY/rd2IfQXzLzW7VsNeNZnVy2Jqs1qy7edTfxr3zcbPUYG5y786PCQw48aIZ4I1v1LPj7xfjqJpRXxh1YfHnRl1xKh8YVrx7IZy2j4RBgz/5wdrv41RosOUfpErI8NHXvPp0su4ScOJStYzxyE0xNDt0+rbwL3d6Qfilv/EzEUS+/JilA68yIiXeTKjPoy449+uMAnHqVP1OHhEYxnQkTEXljTjDlX79jvD10TF4Zs+dSOnbXXY4s+EOzpb+MdX2VqrfxqgfHDSfOg02AFiV7pzBiYH/UtfUQ8e4AOByOeSa/UIzXZ6jQ9CZPVjuInE6Ywtm98GAI9pWfRY96NWxSiV97lkEBhepNf1qJQSVcwzcFVeSfpwBvjb4pT+EDd5hSWo+rQNxxErZPP357UNkHzF4UFNqXs+yyI5pIO9g0vL9yNqKuuZz3W5peE1KYDB24+pvHNGE5K1T9BVqKhYJfb9ULqmTrhtKqkzlH0yPywmtnQ5Oh9MQUdKIqa2sez616+2Ge3fGPgTtn5e9auujBP7Y5V6UJq+vLV+vX9S9sWxe+jB0N4VXK6ROtqdonYcGQdc3N25yxdWHKB9EKFkPL80XTA++08atzg01qVudJBrKYhTzdP/WIRiedZAaVxQObzoRR2SLQxTjgrPGz9qJQEhQ31cce76F6rHu7TBf18/nIx3vaWqlzfymEh98+M6LWUc5xMdkzdj3WxO0UES6V0lkEu0Nqj6AjTHEjEBeaVbHKq+I14jnIlE56+oeE/ZXUiSGZt9O8tfJ8Sg3eSBGY3NewD8QJ1QES3V10cu5eUKtd953XC1fAHETojchh7I4vE27hQviNtZinXNG/M5CzNzcgvR87X+kLx4HGrS863kZ+UAHeYBZaadLEUXGYXn7vgf3cQhK0InhUoS1WOLcpKln8AqR+Rf6iV+dtT0K49cxgMn7PWMuSFolJnN20sQE5FW7pVORiXemv87+YxRxw4VA1+rt6yF5l9Rr718e06tjhUPzPsuAcVu4CwJn9lpa2y04AvBCde3CJKWLYEUk2lNlaPZDQKJcB9crB6FPi13TvQ0wbEEP+nk9CS5t1a+OuUwxJIY1zn3aG/QklIAz7d2CZYXow7G/rq8jwbrTzzkciGBff/hW8Rbe4/WpOI7h5mka2n26pu3Y0XtV04YQv0PJDNCZxFQ3PQgWHzGRsdkj9VOPm6FzzYTpPnrp/maA5YpccLtqhJDcV54/AvZOSeBdkWOl+rQjsDo9o7ICeWWn/N0wWfY5UBEwZ/j7KlOJTRooDvY6cir5DQ50xCAngygtslKHWUmUJ6lOLTIqzJflp4Nr0MppjqOfoEqSggFqp3OEzumWquEB4oovO2t6VdTQbWXZ1DDlc8UpL9GGIPB88MJhto1hWzUUVLAc7qYTWl/XEn3mSO3js5CoJO7rm4w4autzfPl6YllL0WMPsKeJYOI9oCRr97N7UH0u9hFXZmMqicaj8qthCqPq1XMRLTBtnDjtd46Ib+0kBuHfHZNYLkbK4zfUdu2h1ij7kPwuTOu8Rj53xXrZNv/KUi2q1CMaBRfGPOiHLoYpcPwI+qoQWzw/TBMVfFEIR1fmvbeAyK285TZyIQS2wsnCUpfnSmJiovgbL5iKV2gCdZqIcR1HDLhZAG5Emn7BrVWpMFQOlwB92CaeGqZl/SieU2gsQQFXln6wd+Jst/DwmprCLpaW+LjcJm2dPh5RdNIfm2Qn4HisT96V7WWziRv6SrpuaD59c34Wqd9wT3YmR/aPmNul+lK0A7LM09mRZ3VaEu7K3x56SF9cGUoD5BuyrhYjDVtv92+G5WttWdrnxKk0seCJjSqKvmnUXbY9Sv29KIxdfuEGz+HzFqrx6+ap0kDg7mqhWjfRRNOBSmtTrVRL92t2HNwSKpmOX7lMRxCgRx/v7ZXZHU2xY3c12YZdiUpH8weVrUG9ttIR5tDhilLsa7hRXF4HVV1aU1PEZS7ijxvKDa/jOR5K7KauR6TQQjbEOefQcjDokgYHr2mLONxTsCVmPmcSqd3oBe1wJxu6Hy8xIzlflpEh6ebXMpTXY3vjs6l16gx4yd6Av7kJtTTAsk4FuBLxo11v7VJzZWhFK0vPIW3HGAcg1YxtHqJ7ObKuM8s4/Bo85OFWlzgmrvA2+Wn64JZU18UQx0UdfN0Khdqa4NbuMz8ftWZk0i6f1sXlpJ1YDO04gjBBC3hJHeue3fUxHTL5xNqPNSBbfX1XqMawTa5Ob+e9mUDVzByli2oQZ9lRh2uqEK+kNde/6b6UEYMU+OGdVc2yZiTjD6/67aVQON7Y0/KqFTdxHJQHW5Gq8PPj7FD3EBmHhBgdu++EXGpUVlHTi1wLZ2o7BoYsxhWZTWTMYnRRfBCe3CMKej822tA6hbuw/V6PdBHp9RSF+K4azfGwzedz77tqREsKnsVeB2rtf802midsP9/WDh1zM33HNFuYbog1xxzKXUzDJXY1l3KXj18WDal+xgf1mpdjY7pqddiAOvWZ7N/A3N2A/uU1CTCNjGaaJjOEoVwlTJY/SiWxrb/D7KJP8FpfgSTPrGn2SO6NYQi9kmx3cGciAIG/MII21Br+aKVV1PZw9uo1LkEdQTMLUD4H+gX7EkOYXTvFkffBlPXCsi3B85oB0NewWbQp+TkJpEsS7Afnlq8CDGTKVGYNaKt5/oxg8dk+sUx1LkG3PS5p1GbO5535R6GK+2yS4sIxvRlFr8xdlVn9FH7Nge0i44UswOmUhQao98VehOVJTUMrjfn1cuBKFPh3qvChaQeP7QRCvb0bl8mMyfdxedbjepbAb8J8jV0Nah8YJG1yEdCvVlL/9T6Bcj0CL6LZC9HYFC06nmrziNTAJNqDiAnt2TYImnhvraZOFbotC6caT2L4zj9LTu6Viag5crCQh1KtwI+29GKD4EOiDQ+0DRn9UIfjSrBtlmTD5GCn+4LUAuzMNEo6STfb/3pCP9C6owgWhn4vhhDnvVz8XaxjR8sD3DowPG3ygR9yirzljvtzTzShS5wgUE15+yR81e4IbuLrzJdGb/I1ROFwOGFCbl7DXZJysM/IBbQqccG7NhOtq3wNSgoxyjBclzTNAOczq1tk3GTuZU3cwXNNJNtLLXzMT4x/FZ0wjPrLcmOJWYL+jrGxAz6XTwG4xmiL/ks5+68VbgN190YnrYwE1wV4w9TIxBfMtqUU7aJeKJQAZ91HUibhIyEvM4kspGa4UJw/nixtSkcMQ1JORsg7WcshedmZwMnz/lXOmBh6+Lwnt+GUVfFa4sqVfnQVS7j0WVrhm6iOCILFKGW/3r2CEFow2+h+BcuLMjwQCca7x2EZNAmnO+e26y7lJByBMGZL7DbTRxrtk+T3fLUrGZNf56XKQ7AvyQNtteUywKXnsAxfZSsNkz1JEpkSt19fGy4qdEiPIj/cDOC0On3OWSVnLBSIjfdwAi54e5xU853YHsway6sJkc9nYcJpLkpX7aL3hCyppFEDAiWwoSiybr0Hea6kh6my6sLHHzErHuM1Ipr1qKILWTEOZXSdzagybaDd/pUlFFu/MZdO2Upduiyn/HDKC2nFCwA5zRasOth2yBqNAozYPuFJuewEUZWkN/zKu9KSJY8ot7izdeibrZgdnyliYGcYJm1L7y4DOWMC2SF+GfCaD48NSO8oSRr0DsFooohTFYFQLC2rVihUEL4r3BkkE48rvkA7ayVJsLrzuSGtZVVIp8/YZufmX4CKCSoFBfGRfsj7vJHFoO6kwPT8bYPAL5ap3GOMvNY2q63RhgqHGW8XQO3zSQAmiCR/7Oiko05dl4x72cwvrLj/9iaDxQCJ1sJfqGLMmO0LUYGwPkPXIt29vlG71MniXssK/RVdRS3M1k8r5OSd+yjluGKPvb0ZKs0NZXIb2+0vBeKrTPu2LuDDmEFKHLGw8wjd3NqQyLVrs5TLs+OOpH3ugK0tkqQtGEgHBXyAgIDtF7CWTbYoH96ZRkczprz64k245f2F9csLL/3TbIEsrTXzOWqk30SXRMskM36VZqE4ZK3zoBgmc1mTau+YimLC2dp5oLWbN8rlbGx8IEQAo+jHM3dyVhGnCQI5+2qjpYKYqNeJxA1jUCsTyGKlMayP28MKM1KHZUQdpMLMpxgqxUEVnKRDqp7n29M3v0ifNdvZThMMMRXTsorV6QTLuubtu9iyT3R9cmQ0kRdb5e1X/+XfbHWbhldkxUcQQkHScYxXC7uIBqgot19cX54Z+e1mNL6xpfQAmawVZwPbH0U6ZOULXQTxfp/3aX/Kw4bRznvMFVAyi9VXySCKfN+ndd2BpZIqsbTlJfasSADLqYmJWGAC6MXk0tEJttgWhTejeYPObSmmBXOaPzxRO1IwnobrQzxo7ANbe/A5rDqIJRfLNTHELCRfk6s59B2v0/zh4VrRj+mNdkWcM0NVR/sE36V/f06mr8sp3GKm8NNLnhnQUf8bmgq6mW/uvF9yzBCAR9ZcdGgS+cwnDSXhmEn87WjhEb7+qFG2XCR6SgjSV7XBOV3wVsydsfdYwqAq2de+39fsMR+uZvupBZIk9V3O5S9VuURoR9W4eVm2qVr+dhc5SfJo7GRZTb7e03wc9uaFa3FoMOoJM12WvQ0wSinYQJEgRLLvBZx5Y62A8YH59Ens1v/OGVOTdpNEOHEqdWKMjn1CO4MMlDJEj7lUfkoP9cavELvUSe2jRqCLNgVJ9IJAwxqa8SWUDhcvZ/gRZhP+yZexxl/WK2NISzwmHwUGspz0gz2qSvH60g0MhbS/PVoVivEitARa4zlx02Ay7vqRxBF9z96IdOxrpK6xRE3IZxqZeIlHCNvvm++9QwXEaUpyhREQ7hdH0NUmjv8qXYvNieueIfTIOqbQNrjE2nek6zqRbjoNgrzbvRONZfwCM4fED4zuB0FeuHfqX1Lgjq3d2+WUwKwSTr+gguaqty6Z0YdN0zKt0hMu2h8SdtThRluinoYv3hJtzqtj+VFftPZl793wg1Ae6y7IIA0/UgSfNmoocPEEu/Bm0HNi+DAOtI5iQbsI0tRv+mD0EkYQZN1ogzmgzgJ6LHDhc8KYU2LmVfsQD7oFh2aGeY3xrLO9XZgfJBwV0iyH3vm1b+c+i3dZjmWOJQ7FthV2eNkROJVN+a+382dGxeqeXq+Iqivt3WItuX9PdALepS28orpN1E3JEIDslBWa24d3lM6uRRyVhfv8Ymb2Bn7hZ+aa/MWFPVNQexJwuPw6LNN5EKoLhuT2eFg9qOyolhlsZrCFcF5Rhdpce8n2zfvAjEfxMstVGIaLc89y4sSyjyyLILSeWh1t7BsDddrxl0GYkOsMIwvBBbUzpF9CaRldLDs5Es+hbiElX6/8S2yLrNYfAc2cryKv6UpBtMNmlzDormjatbvcGKHWhN2dq2aOfTLF0r0jj3ru797nOl7SZCpJBxA3z/Mvj79h1dxPZQK86PxUXKhQlTa9r6tw5Xe3uA2CKc01Mn7L45moZiI3Fk2DQb4vZGXRVzqddRvxS1JXlGR3JTQ74Dh/eAa8/47hFrL/Xvlh0pYkgH/0Zg2CXxrr7AsiUFhfznjLl8NSkbKJkTWujaDKeXTt3aHbdVxMYkTiCz3Qerg+jEdTaKahnixcRKNuTP6mNiN0nXPR0TSinXuSCp/Z55dxOCa/sDBkbGX2oiwRN30K6GV72WuwL/meGEkbQ+l9yH6ge8wXVq0x6s+Yl0Iy5WqM+Vy6aU16IZjveUCBXy6MAWBf5AllJ96rZJKh7+zjfTZHhkyNq+XFdF+4eZvfmQEAd81z0fRBwLKbOXX+dYivY4qT1nFJ1pyE8iQpBzv5DdIOzKrm6ttE70LZw/HRHu1xc3dGPeagsXKNwcU77kkmNO0PYRACCF48vrEw5ebJe3WNq7wLNyeKkwRnFoO9c7LR9yKr2t7rendy8ssLtObGautJ0qKx7x0O1yHeaEN8JUyaNPUK3L7BqYNBUico6LreWqfyI31MMwHxXMVEJskKI/JDoFLF8JEj01f6KCA21UHsBTtWRNaaBqt9SW7LSN+ntA93xgIBVh6OPnZ0gWnJp8495YO5tD1n3y49RiwKkAhKUbnmpLv2fIotsAkVTDLaFCyppPj8PHZ0ck0fbirfNHXlwGC2JxGdORDuonZzW8qwybpyydnX8Y6bdQoDtbgD/z3B3938WJrZyWjNwaEDZY/N6hLUrDvbJhHBNKPFV0M+TFdqfDQSGbHM9/0eYAvextnufdj9XvAanhH5aNdpJjr+1pgbdIHcny6fefzbPYmYl0YD0m8ywqunjOBW3/c46KwGQ4ywJni/x9oHIfrpMydup32jd4fed6vJG+St0HlCoo7HE9et41mquv/t5MdsbpXOjAiYT4Nn4At+Ro3Dkm1tnx9jJT2rQFJ5gZmbs2VrzaJTTRJfHBpJ1yCA46UvyJEGYw+IT1nQmrRbgdY6A+bZIOYk//LOFxNmurmS32l2yDARSfAwCwrJzRn4O/G8JkQbpUeR9jU1q+eYH6GqJaxk8BkuPvea0k7Q58fovehKk63uJX+5qUt9KDPMNspd4ZWxbMAWosmWRojinc3ghzI4cn+1zKOeo3nRD9qtok6PXqWwu1IzLfK7nZpykg7fnstMnP2TnnTNt3qj0iykpadwxaTWbMrR+MU0/Kp3EbrTC7c2OqX4FsZmlkpNF5f7bhYjVyWjuJhIkYwI7pA0QXXnMWffSRI40lfFmiHEIqwICjQO97HKSg0eaXdU+8LlnTB6ncd9ssPoRnqrrwhkzAOz8u9GSPYo5HQa0p4ZPl26bGTitcG9n1E2PaxWknRNG78BsEZm6g8mK/SXV3eMMoNNrFSCGHzARvK9rJuJHv7A98LmlLucJgNr9O+QrRIil8jw4YJYjGk49KzKQQe+hCl8embEBHwbbRRByifxPiSqhe4wXC8/22LZCONjoalYXhZRmjYhH0jYeAGZOLSr2LsbVd3ZM182ImifYpydUsnAHqQQVy+KiaFk7r9BiZ0rVTvCrxSvG4UgkTFwpqR1rrKr9j2Yzubwu1zvwHKGK9Pi+JNITeL7FTD8X4P8WD7QgBcwLYfFZ6nUqtVftae+rviRiw5dyGzKJqm3bpXw+ZAUiIgF+5gGRFpsS0l6lYgaF/ufeg4sz62nxO624izHwEphhkMqjTDGtD56Jgm/uyVP2xLZUegu77lpRDvQzaHF8fkdPIqAZ+vejuXIlgbRY/kJgl+cX7YaTS9DBUyvctHHL0TzZap7hysTS1p4Mq47ibTM0UdcRpY/7764JihKS/tssdOwke1Ee8NYdbu02AwQCmV1FD3N9VeHSIjgM5/VNJSp1frT4gX541zyGij5cJGD5gVeeycaIYeXKk8z9PASb/ra/DwsY4RpxpqBjRY1eq/KDvmWExEeA5XwY9JtMdfQ7mu4nKqmNluCljhYsRmXqxj9iqZEz/erMUj9SmKTy13mUOaeiPDbxidnpYhQFE4eO7phFU6t3woC75YClRle7Eh06Rn4zqltnW0IVVsWHx9qpMX31Ham1MqbjTysYhWORDt4mdBQsfDJNpOybVlp09/UCT7YoTlDAsp1lOMlgU5ecEhpyru3xZq+VU2zFuqOnFRh8Z9z6Puq6Rcpwh2mVTWZNBpFj/ykKOwOt/Zt+wUfrGz3xccpd+xxMMjG9ZB0mdAC4bVRTqrrXk94tq2g5GQttPWPD3UGR0RJKaripI0Y7T9LqBHKrDAG9OEH7lnr2fXYcV4N3Z2khNxvdwGGNYyumi6GjQKeR/ppP8hXDUmO2hFdo2qK9loITXKLp21VWEb/zYyI0c/foSO0Wn5wj4BgEmo0fnDs9wBskD6GmRh2e3NR3+8q/yAGLFZxKCAPjZGklJUfSESkDsXTyYOid3ymhYEeK43bbIB8mn77hvTH7h5umcQj03GH67PWRwJ7w5TyurzbbbtJfnhJBCJbQ+eieoPD4LF5jGS9fUk7fkiVcwfrY+XBCLDh8QtS8uJeJ1tqtqTM47iqtcfE4xu1Dt54C1dVM60KM+oQq2Nn8RUFX3T9gGiaOBGCZyGS/YcbnqXD2MgKHx3X2z1Vy9nb5h6Nra5LFb0/kmU3UYmgiq9o4MSBrRrX3ZsP0VuRsFuW9I0mEPam2b33oKcBxzbbd71Z9/i2xBoD3m9acDlpqFsZ1CUVCc7gGl6JgWPqVRobOMXOzr5uqTSuJK4RfT5Yv2gnJs5cDEAS7B6fqLklhXERLo6uMJdf0fdmVPio097d5h5ddpHm2caFS3PqaUE11s/6UjZ6JgguU3c/A/sjxxBM3+8vHAXfHIV/z5anqa9a8j+VVENyyRwutt4FMkgM/XlT/RRdwYCMiZYxLzr+GFUf15X/S9qbvaHRpDiRHB/r99ZRwiH0BgsMtEhwIIOV/c10aq1trZTv4XRlCMWu5hzQ10bOL5pY3pl8HfVUtwBsirbyahLSyeSLcARwJxfT9JVXYfAWtpN+GbDuR8DD7UVqvhHuh753TNTlRcuPfACFmG7f96FeVOS8sVtdFlpqQCh+UQ+/TSMqc4jaJggqs9nGbrUja9fs80qwl4q7bAGzJ3vEBLe6bwrpdsmTPPzje2MmkmdS599oEgm1SJvzfLCFDyFAvyVDKKS2D6zbVYwAfiTdTQ8tSEkmXgk0Kk/3/sWmlUxpSOIvfls7mOc/9m17uDA376Io/gXlf3/Yf0EebgmN8Zz1K7iDIPAft/ZsXrPzb26hwr+gXHdK2dBl63w9j/z52/+Bk/i/4n9+7PrzHkFR/wrC2sG9o/qu5R/3afpfaeKPu2VWFeWf34n9+WS8/HFd/M8vAZvNf3w1sOBPLmvbv3ry+zcCVd8/PhM4/we9BtqNIHwLpeL/oXpz+z9g5M+3idst++O5P24s69X+eSP7Fpnz5+Uwr+VQDH3cCv92l52Hrf9m4Hug5+rfnlGHYXxuws/NOlvXR5vcoJF4W4fnVrl27Z+/zYd+/fOXMPlcL+s8NJn/57g8Y8tmZ7UG4Av+FUP+ug5/1zT91zV//tmD38X1NxdmNlfPgGXzX/f6Z/D+aA6l8L9ugPb+B/SvEEL9deffWvxdXX979fdt/l9KyjJsc5r9R5OA/vHgGs9Ftv5HD/7ZIpiR/1Dy5qyN12rP/l0//pnw/PlRc6ieTv+bxJIQ9HfiSv+dBP7R1z8/93dC+D878v9CLtH/XC5H8OW/7uDs8+eZuOdD3F8//gV/nuF+9/Hf9b+/B+K0//FZ+HcT++fPkhj+TxqG/+7en534h4f/fcs4wBYAIVX6LJM4eczaYanWauif90mGdR26f79C/nqWaasCPLOClcXGy5ilQGLy6gTrj43//HX6CCGQTPY3SNks7NkfYwXaWsp4BGPYncUcj+W/ptWSDjD9r8cwN8sa/9ELNq/alhvaYf6NNgpBOPSY/n+tzb9+0w999k8W8P8O2CQg6F9B4bO/lUMYgqF/AE0Yh/4RMmEE+q/CTOwfZJNrq+zPhfC3Evq86frvp/Ef5ufv57Wrvt8/MDVbqjtOfk1Bf03k/5R2ID4ARpc/x/y/aApwHP8nU4D94xQQ/2QG0P+yCSD+c3D436qSsH+ukv5NiWAo+rdK5EEBiP5v1iH0/6IOwf+bVAiO4P9ebui/E4f/ag1C/jcIyb+xEgIh/oaV/PW7/3VG8h+KF05if89RIPT/3+ULIv6Oovw3yxf1/wnm/I9y8/fA9TdyRGN/B1Mkhv0/EaN0m/dfp+H/SKb+U1FB/7tkBUX/js7i8N818of8/4Ow/JOmYPJfSQSH/voP/TspxP8XYY6Z5/j6m8f+VO//N96B+HMB/Jtc/9Hm/1YpJ6H/j0r530g18u8MOPg/E+g/IRv+S0f/T0MShv7vG5L/ayvhfx+6/jX8/+VLBiYeVvNvYg5Bf6fNMZT41/9mk5D+z0XxL8um6mIwPuzvJ/OXkQT9M4vpPzXC/rnl9e+toMc+QlGazvN/kFXi7yyp/wt76c8O8994jf8FZf64RMSxLx6Lsfqwhn1AilQMwE+lO14peMXzL/A/I/ccEz4/eQPKPl+GEWKlaQXrY2PIdtMsfrY+7PU1iOtAzU3qrrmbN2K97x3iHCuy7MMXJFfWCLawvnk1Y0Y1dIwhdbmwl6tgfx1rGBjNEeWCsrWXK+8gJFb8kL0skigBFziMQT2p0/2Mmsb82VByQrZ5wlOC2vpM/8BkDfc9GjxIsZFwi7PFIhYaW2jnOPDCwUgF9Vy/CwbcYweTDRmVCXnwJ+Y5i+MPnzkk5uiY58f/T5993aMmNgKvJcox5TvqrY+EegkW66LUxzpVw65SyujweYP4vaIpa7agVEQIoZazjvn9K6oFUpOk7R1AYGsCDd4eyspvrmIPrQ3j5y9uwrg+EBMspHYF38fvEqsk33Th8alTocJ41hcHCr3X8+B5nS/KRet0v8JN9Zt+ZqFAsoKIOHWm31zkWdbaOEsbvkNra3nF90K76EZO7ilpmq+e/nAj2R95SrGeQQg7VzYiSDPUwxGXvaKSbseTwonge0XUO8NfcW590rEjx67PFLKu1fvO4DXhXfyvEWO5/j0MpKl/v2ieutL9FlGVfIUwXKgUu3czUnxmLF8VSBUKkHiVAZEoO8QQn5fiLB9FQlpYtD7z6nIqM3KtidU+50I5BKKXi1kEI6vicoh69H0LxTIyYfMGUdiWDnICBNs5KieIbmwXsp0zqiLoVYm7Fkn5ghKEQhPDrwt8272MtEqK8vtGZfQYxC9H9rnrE+dKQVxk9AqDKQO7jJ85ygj5aqF14t/5+vIF52UwzXs4QL0Q9gMNbOmo1eYcNsvxQiUmI53xmRB+ELaEDZjL9pG02rNDUvOvHtB9jRCrRwwkeZvw5KeezcrP2K5FVxw3r3dqrJkgEkR+1Mn52cDJqoB0CxCIb3NgzwCjGyZuRJlJbdoQXyAv5YhAKt5evAiOJ4l333fQmIVj3zUaW2oM2+lvKgBxBayZK5350aFSl1vN2hTPSdaOgMWMJs25Z1/IlmnhV3SE8eVah1ZQFVtuK856HD2xrnGIpmmg5BbApAP3VTymn0JjwJoOBZem0ji+fjU3LqvYT5TbcFsJobXseu5LolVe5w/4aD7aGUUVSp7GXPzaUIpbCXXJF/0rwu9ev9yaTHwz7Zyw5kLmWYkppbyx7OiUDAQ+iSO30z2Kau7ix3Qs7cZC2NmWLcH5KlxLk3SFiaP9FaEGI2xZEwXAE8eGXs/fKYhuxLr2fBY/86xrZVOiCMH6pl+TnHJhShxK6lWOkrHBY9bgySpzEXEUVsW0oyRzOgN53OBz1OqJ75yDXik4QyD7xCs9D/Y3x6rkp8HR0BR87O73DaEVTKlyskpQvZhshP0qSVsdz2gCSqCB+X05gQCmjU9vetQrcH57ETB1Fp83dxkmnGBy5IkkT4S6NlC0+MRRub6YCVO7huLbMMGgJelkqpCkkNIvbrx4U/kleCt69Vy8PG5ouuBc6TeW0+H0JarvNAHCoB1IoJKx7ZxqJx2juRF+fb7R7+zEC0tWQUuppeaRB5Fvi17PCTh7eJbv6jfnYhjew0EkqPH5I2YUVaiTgMmoWkUX+yCIMPe3wOtsc6jh3lXZgqNP3/OC+KXlxnk37Palt35vWUu+0lCGqB9mxl+b+KErJHFXYwEBDaYzUmd0vwt/YLzUNHGDIKwe6aAbRPJ/PT8/9UL6ofXytTD0z9xlzlj79zG1e6X51ZV9rRGLYZJ3IuoIPmyBLqpXFmlcyKuAFRwFljzrlSfn+8JLezMv43TkespFWg+CffpCKIgJ2fDw1qfwOj+DC/YJPybI/8NizlJERb0kTc20pR+c2QJSp5Qy0x9DuSmNXRwNU9zy/llrLHuteYPeSWfLO+wpw+fsJCZ+uv47PnYOv/xJuNUucVpq30Wkco1BqwxsXfKsWv+BJZiv+Tssp1zGb0xyeEVAnZik1U32el1in2RMWCTCvsQCakQxCv36kzAG5llSFHa//oD1VFTgoOeXMTG1eDA4R79UGRm8ZBeUdTZq/dJ151NfgxPFr0cVZe/Uwppl8X4TF9fb+t6ZnJcZFVMtAYFvGMovRBtISD1UyVsY8WoXRdvqKQ4NEE/T75Za5EdTogvlRNWJOmzIhqcApIhM59HF1OGX6IKnkF8q0sAfkRPDQIYu8SxUzXZR3JXAqLcgXFpyC373oBlWYgCG9RXoQCP5xvYr5v5RJlsDHy0C5kvdSHaL/EvyYpcRQurATSVwPwiz4tUnZHYi9pSqatEFcqKbYUPlgE+Am64z9QnzxoRUsjYw7eXLNxmcDKnzpkEGAPTNDWt4i9v7EC2VkqWR9pwObd4UWzALG94hyFw0e9NoHByEJ2YZpuRH5LXCEB3xd/rsFpAqyn+H5xOTlRg187yRdp5W6vfDUUAr7q8V8mnFPwCocQtPZFFXWBMvNfOhH+9S5jGWKxDowV4+Ywwu9rbhPX5tP68eoS44rY1vLIfC55b5yKYUfs9vXyAI8z2EELY/hKfdsoeBAxTEZVJK3iZEbtmCUIUXoo7ZjTAi9KrA6Ss2f0vGeST452x0MAgMiMZPHcnLq975HXa4j7MJQWVC8dUpKiSmyFtaVqZ3fPvB0gm71nZROdxs+UP8Khs4QiBx5dEHIJCVDr3t/UrRF46ucc/klyMYr8pEBEk4eCltA7CDe8qP7DPYVtYBhO3I5QnbqsUvvHgQ4VBnoYxjcGoqXqpYroFAuH3wupsgOx+ecQfItfyqEizlCY47tW8K/mOkB3BmxoVrgjZlcd+TaLDua99e742K13xAYcfe52aN+N5PxZvTgsOkLGs/fBNtfv3zwLJ6gDB4UHbjFELDa77JXLqAEzOQ+Zf4pQNZqHnvs9WWhD10oQLj9KwZXtPv4tWAlKHiT/pkZav+gDQsbmXhq3EFJgSaqZjI5z2tvwDb8ReYUCLMDutK/9FBqGfPYA+OSdopmrSpKAHBmDeXltWjH3okwPf7mdVY6JdE6NavaP4QS1xDV6nc6ADsl61fNsiK+oKmMTp4CiW5VIkervyOlsJWGXIIMQ55OclhH9bVw+x1KIBxnP0BEIykOIq/mPYi0MVqbFFmy5lFMKcTySC6XZPIeXZnjPHrqKEXGj/9yYv9mdO8M7UbI9HfuTM0x+thhEi9nAHB6qLGe6Ea6I4zMFGsGnpbhHAivJ/ujSNIpSZMl8EWZiJWJ6fHEDZRPMUDYBVRoq35HqBdZOmY0hY5XcNiDohljd2gxDFLjVT5fh1fiP/UA3S5e9hyrnPixecsJMo76tP+6x13BCh/lJIpnuZBEIcyFVzID4cH9KXbyc5LERh5Mz9EAvf3q6V7vkDh+5BffMA5guSz1m9FpgsI52FSXm4JJbwRpcK4Pe2WstZAMp1HCb1N+u3xwQOk84DUuk9cmhFKPMXkXAXEovSCHwuvQBlU0ThkCm8nAifdGaPGod1mLr1U1qmEh/LI3VsP4Og+vKRgsdHmK3XYOM/qqCMJH11LIePXxdc4oYRkcurb6ZV3reHQyF0g/ndjZq4JDfDq5v/J03UtS4rE2K/Zd7x5xHtf2DegsIX38PVL3p7diJmYiO6aosiUjs5RSqmXVmAK7hEjnYhATUl/V/GVVFMpK0PPeGcV99wSxpRMKnwHoDL7rw9FIzcm10PipCYaqFP18oiuAdoy/1aiIR7IPoElWan13yDPKfc6yHsk7sGP18Z4V3LWxP9BSJXNOmNcPqob16rVjMWjtQuyDCehAmYaH3+I+Th/c8UFhvYQY7aZMJfy2dnyHlw2yv7Wcvb9fjdCazJC4PfjZi/Yr7arUKwsHqb4APzp82+X0PX25pj4VH70ZenWOVhH/HjqT9q2SAxt4dCdGMQvtXKwyVi5JmZaNQRWJ+cAa8Sc6eFfSlOl+POujrMZfdVb7bjNeiQOKemFFUDR3QmWNoTMtQjQwwZYeZfPznjEQyVgpWAv2R+mTkBEjcrgEltcBz+QmkR9ZpWdBd6b/I1w+pwyZqBEyGMxgWDs5llYvQqtVkK25/5KLn6f+Lku12uHPFSqFzHsTrNx05t/GWOSDDRVWd+fH5Jx49CutFR3za+WxdRf74hh2JIDdlEae4Y7xba4AfCZjIj/un/EkB+/GLK/EOk01MlsdXj9OU5w8YU8x7aBA0RJa2qgjsouHpXFCi2wbhZhWAAWrddn6U2stn752jc8OnmEG0wBvxPXgK8NTHaL6gfgpaGFFbxKRh0LNWtO2N940G58Y7//YtiXZEYg+rsuuzI6NWSDaXUFxssK9IaJN6WUTAgBDamWfFnxLzLJ7w68EqrWQTgDV5CzFKMrnjkYYiYdSsAZZPlEHorp1xmdjMes5V+YQfM3PHolBKnpXJ/CyjJn7Xsc+CtkU5f3TT5h1Snzpf3lHhJQbh5R1gnA6F2B2FoAglbrVYWnPcp+kx1L+gIaYysvzynu175BnLL+yLe4cujqOaHYnVrxerh+sS2BUjAG9R0MiP4pgadYSUSFdPT6Eg94NdutJRPrWVtdi6G/MbTV4Jwsf/nLOvKVBXZrd0ByjRRjMUElQVubf7icptg3BjzSYEvPi9P230d5pvj8zfiSFLZlRGVt84jdLaP+m+0XnoL+10/Na1usH/5v/CaP8Gow3ohKEL9q0BAlYrtq7SsrVmyshPMpDufASKvqTl8neRXHhzFSXvub6MThAIFe1niFB68wFD8DT5ac/AO+LR7RuhLV2eN6uHmxkVXYv0AK9At+8sUzqXEktTxtT1g7g+xBXToM8/7YZslZuMZSJrkCb4Zb//OyQNH9G1EFkD4NL0tv764Kkbbu6OwPEq5TU9DeZjMZ8ZmjWjt9DrxTA9hO5MADb/BMVY1L8Xy5sbRisgxyQFQQh4rLfOikdtLrKxzsEbvdA6KLOw73nxWtTR4i7igs4/N7KQQ6+8Y3LqRq+pBV8T54zaLXlsrgqysZcLcEu8VfiZdnxGjwz2vOfatFLH4C07c6AEINqBakfmMmoGsscIi5YQ+lKiXhXjkjVOrfusKSQ6NP7n3W+OGPX7un6tF+zs7Q+75u052PznAt0TeKgaaV/jWuhUM6oMaO8gq4Q1Fe2+Kd4MDjMzcZdBUxJWLolQawLjPVeTO7Y8qDnMdoa59rba/qa0TaqQ4sbv7XaQUC20I+hfGxMk2gBXX0auk3aoOqEpxfzTsA+mup80BEUNFizTDUKL1SxC5LoS7uJmwvUvEndTPoeGu4QQ7yYRhf0qkFx8+nEsfoZYG2bPCkNCkxlJQbxFBKuihfoj80R14MeWwnKdvF8VhBwi1GJaQdQJUAvyiXT/bivXEqzMnyzLHGv0xKfumFlgr2wSyAwjk3X4dNCwCHXmYFFIryQdyhwadvQg1GUrYpevfUz/27zd5lQP6cJqWXhazct1oKk6xdpoyBtwRNYPHg2gK2hAXblpY+8tI6F6tOZZgBMTiJETB1ZNTTWvimkNeMbsCCf3Y8U1PxDFm/0nI//Z18j0zrG9F5NlMR6jPWl/RNQEo2qAKrfYAf8/5Cz8WKbXJpxjzb01UyKhsXriJ7SCSD2T1nfhwFrvI9+mIjXZBTIoFvaD0LRFsQmOAXykHn+xdf7DNpmfzHdIpyaS7z6qWT92V2aRZIHJtzbXjuuSbO15n1rFgnguC+gD4DSP6JUUR4lQ+WLih5OqYVtUItDrPHXGGgglfHWOsmF4psOpMxipNKqFr+AHWhijpC/YG3mU3ObchIx4mG2PZv0E+ZYQ9BQRjm1r8ieD6SwjGY1NhcJU5BHN+7NhUfu31JTt1bZ/plcKGqXbuoHgLEZxwyguUS6Qggl86Nf497he01TBiW/Vblqh+ae0A9G/srNv40BWgBn3V3L6hByxTngy29CwL6G9NbL43nuFN5nw7bW1bCMJ3k9QZemvnFKN8NMZDS/NJ4Xz/U7q+pB/yFtFEWI35arQ4HSwMkc8HCf4yYf/9R3MQ+7TBXMQq6QCS2oa7Oc560Togs+t1BSy2I///z+c+guzgZQgLCXsMEHXRTLgixEVWwzO9DxXnH+t0+1894f+F+S+GzvgpQfn7471jpl4h/KgswHVH53hMW+sTzREkonNIbLU5cYz5M2qclmjaxbIQvB2pOy2KSiv+A4PobJu9l/BPVDjYtxS/GdlpFyJ4wpWZU1GnNzXYnHgYC8+qHVT/Tu/n0KlEkVAPiYxv0Tb8xUcPP/LGJIM70O3m4UJhwL1Xm5EW17KIIglWXNlmjlYmoJHE/RmeFIA1CO56JC++PVIGdbVk0f4RO6q8WCZxool/6NRag/9Ryo+xIDwwfLFbiE8yuHbr+Ggg6LiASd62hxlY4D0fq11UKI4Yo0/wjsCTKXQRzKxQ0vYK3m0jI75mFSFHibPzFF9t0HDMf+tOAWxgy07YASuI9oWXCZRy+alFcF2w9Gk518tutjh9tG50YBWjBJc/6rvwfwQYHDV10/5WXryZnAzLzqhIu9O0Ohb/NuDkXYBbo1ZlK+Shwl8sr7GLJpFC+G7Ch+fPTTdxKhfxd2bzw2hLu/cVK1o1HznKQDvPAKqYWCclDXGm+e50JTwIymKDlEidnxx/SXDKwLwYPNiuxUwei5/sr978pWX8qgGw1GbOur8kQRxvsfw4w1I/xCrtW2Qc8EEFkjRdinVgzHTJOmeDEw8PGFLIA5Zxf6Y07YefkMDiNYCOkUliPBCw52oHjv57V9diNUPGl+J5JohYhY6Z7bFykPSW9QdNOSYvm8wgTuH1wUDBhDz8G5xTxyWFk0HBv5xvvKkLXA9sgRH9D3PzF+uCxrfkG+vmGBafEXEZ8zr1FUjcvUTKo5tVscI3HHhm4D49XpXNQ9CG9tuK+5BpQoDPKtrYg29V+94XXQNRkEUq5T3lAHUBr2MaBwZkH5+zw9BO09atxJBbCRGJJrjH7E8dDKOwErCv7v+ArNjSJVuRyf1XMw11hXHbu+dEU6P09UnDgQUsdBt0C9uIdS1TJ0XuiWT76S/lxVtNB31UQIbcNms1fqX9VZPxBaiO8nBoLQm+64OAA/UIl/N3rxfyO/R1cgSWK4CqZOY2bKKq/oelBrZVzl+AIhojUBADE943MnnUhHo+dK8HEKa+H9WFNCoBiIa0wLZE0TJ2bxeAnq2Jf4Mj3WvyN4fCKUQd3KrdGjMfz8LTK2GgnSiB8TmUcbHbJmoqWhp32pIGRWizK8nHEmvzyQWAx4i0KFZcRXJzfbp0NZCTx/G7MmOaWRKPgUmoqUJT/+TenuK9J7fHY1OytYJvjvlH6wAj8trWwRL3clhP/lLOILo/IgcVY/uZmxP0v/L54ua5EIQvN5/y0jmh48enbI4ZHeUHngMa1aaiRccrktfMZvIHTPSC07NRq4N6gUfX+UQlV3BtD21uajn+pwoR2g5gwYLsDpfrsYUfXof7unELgQLNT6PuV3736PWdDuMrhmPXBuZtn1rICbrIXvx+3dH5o2N9ionGeDAiVhha0/TfUV9+6AdgH0KOxrCpzY+BnJh/0fuvA9VheiIPX1l44T70trV6WzSp+OPghE4PU3GtFrRhjn4/Agx83cklBElxt7S41Vh6IQMFktUfizNi75L4Y862G6ihonCld+OWKWnX4ikefYWDK9vk+XURfnGepvIeCf1c5i/MbvB+3sNK21I7VcztpYr1xkAnR/otYnC+KOzG2uH0freTyz2i7P/hwpn6EZPill/zwagfkugddEcK5S8cCglq2upsL3KowrRWKQeyg7MaMy8HgPqj2YS7MND1PCX/3y8v5aICtRwEUCqgK/nE6gp9S5u8wN+KUs1hOc/sN6KFRV1W8Xu08PRr0H0aUO2SMGbrNuwaTwMHqMow4UXbz2HvqIKWWeKa3DyOAMrt0ryTBTvQtvm/Ebj+I4YixoT7bELboZHdse9SDIaVXY9TMHVDxq1+4Dqv8+BWGFSsRJxl4bQJyP0/8owoH12A/eDzmZQZPdg+NIs5swMCHeaUt+910zt5mVzESLHnG+KeETQdu5WEXsQyD1b1h6kFfK/3oBlU5vYhcirCoBuSC7Qu3hRZJqU9UdVQGsV1GAamVMJ8I0CV2g1CakTLTVjyR3zg9NcEeT1sXc/wA8QMMDqY87MahPqz6UqLGBujDrZ5htPouM5NF/pWv9LFg+ycIIcUIy+NTDIeyXgZkfTaia6ytYRp/UJbIf6OqpkOXBsNG6Emlp8iAeRoEuu2O8jKR5fpiJpfIHVOrIxeTf7NKW9o6m2r3Cb7/wz4b/wA6CDjvl6xq+kApfIX4JszdQGZLJAee7sSVABymrt99kFPCwNf+MX1WXkBT06xGiOJmBGJ+AvkP4v/0gXk5aBOJ0yJ1L1r5LadCEXWcQG6AT63N3/yH35NCjMb6bdiXfAk62vp7LaFgfwXJ3gpW0RgdCUaxcS1p8dXf5f0plu5os/WvJHP3IfYoPCDS40UI/YfGrZQpnxsGJNUuHmy6cAc/qICk5f5orWz8EvZPKpYBSXJ9a36QuSIO+mGyB5rks1H16LIeMAxKevQp2NAux0z87FqI4zMf1yfqGfKWMaiHKkEfk4iFEbn0DG9YEGbhLb32drLzx9QaUixnrmA9BZO2lR7QhlZ+SxjgB7l/mk6tHltW2BR1QzaLt7piPXr0lsUw198MwyGs+YFUp1iU/O7BOcigs8ExuFhcV+m7F6MGvZaLa0X9dMPFpvar9pmAhbOxZfYMEuYLZFZ/vXHM3c/HoMTjtfNSUcNyrc7WNnZHXfBurXF5Xken5ayAhlrf+AEXroml/SpSCks+J9mXZv/qVFRKolfG0FN9lkpL4vKvOxsOE8Lv3RCMxzOVtmymEsZbNk6pBkOf9pHXJ5tbPbepIk4fvmT9jOuVL6ycApOKUfwQ7ndByeLXNJIyvrHbZOvwUEe28ebL3bC4SmwubbEs9MTP+fWa4R7XTyMe4MZh5bT/osvyFTdvBrIUj8ASsbvNYmq0SsH3s6g2G/3dUK7O/bByrEb+hBU7jq/wMPhzDEJmosv8aoEgeIwExaR4BFMJIpNtA7Xc4z685AKkHFHaeh3CDH7dasqPLbR8KH8bw16FAxMNhsXSOVjIHFKU333Qhls/Pj4F7bk5K3txHL2XzhcdQQ2/KEX61GkFJeYgYzyF8fl4O/p0yTLwznhNL316Hs2WIi2dEhiu/kUEadIvcJ6ZehCBo6Nd/OojtEmu61U517I5wsRR/MARrO/BY+XOh/oWpQwCHVtvNKdW7HOtr3xScuOmyT7v/+YX9SbLGZqA0TKH1s4z33OmKch++CNQ5Oi9+OAw7Id1KISk9+DbLIUMRFW1Q9BsNePnQzklxcbp+cIsHR44f1ds0XjSDcahfLicp1OKTocZJSLo811/zCSjoirgRmzk4NhM3Vs26U06UbbVEL9QYn/49FMF/8yxe77UF6GSl0NaHJtn57/kCPrGDlssqe2pW+qNFlLbQSwkgaqcj8hWYeC6+j2Wix2xyzFhV86RLBwRv3Alk6oljuzxF50ogrGxN80ZLULyWfOyADbZOXw1vxkPbf8vF5HASK6bP1kKvB+lex05FTlFCGHOSBo053P5CJ/B6clQgA4JaItFQ70uXiOCmcOiL4kcoIk5rwK8emuodQ4deF4k1KD60HuEL79/ByKjNvoj5brTeaWWN46rKF6zOL3J3018z0bxomI6t3Qo6/ECD2qnPsI4SUpcP9qH1iJSl+trOSqCMWyRzQBK6jF3n6+oZHakl6GthLCB+FlxX7SCCfAZHgO1Qvh5VUVuKHarwFyZp8btxQa4gWBzjN/vlF7vAaENXp1qbbUMuzMifwOuYPKtbNxkajQ4bX/5x++08OuFGu4Lr4a6Oh+o8R2fbqEOacWQYKAgjBAacZs8Cs5fI+ZYOk6jeX2E+znKuOv6JR9U88YyRDbgJtvawJsSJsFme2jf3AZOSrTxsx94pWV0JmDKwRhN0kNJrgf6ULLbJ1aI2I0tjoxeB3VEii96n8YlP+AdTHxu9vPwCKtbar13kF2m3TJV9kwUvSmPBMyjgHHRmL3sYysKz2bzfVSWhDNRqU1trcD2bGDxf5Qm+7X8dzu553SkmMB+xZRpYXPo4KzzboKDew7xG3JzCI/LaynOd8vkiQQDJ7z72FCP+nwCcOEZ241yP+mjZy6jxwCPsJ8iV13VWQZXZ/6A/Au/rOeWFyT0zK0p1vS7en3N5y/LVXpYNNd4mibhj6Fse7hawLNryaYFKNIM0WKGpL13vL51H9r16hpmX63vyYVeiBuN2TuObK2NPNo8ei2yjkMK4fsdtId+wqiEUw23Emz4QAZueC+0wnznzG2zei3GbccS7y4OgXtHO5w87+5nW0Fm7dbU2njIJ/D0rqGUFpY5/5X5gMDUZTbazbA6sAs44XvFdwjW4XMjr0hqQcXTJ0Tq6sXjH+XIPgYk4ilD7Pk0SIda31L2DcFsqk51CnxcvNZLEvS3KXrS68o09RyGGWie+Bjb1WJMCsIjF3zo8i/Hv/cKnhIf9/FsedUCuUDafMLSE+er9hU8A1vDTU8q0+/U/cJEpsCPj6j+ZY6Ptg518URYqlzASb3tvtq5kMNdGCKo/P6Vx1jOsl8aUjiulAz1sTgcOc/XHFzgyuMAp16GkZ2oDmptjjv9Wu3jRsfRb+zItF6cE+WIQ1X/zdJf/RNDTaMvvSgoj6Qjf81e/rw06vCzit9LOBzf7CeNkgPBHUv9IIoVRe6YfpGxrCPuL4QD4MZpvvPdUVKsDwPlSawBTZl8GG7Gtva7JnCPvkDZqUpO53T1sHDq+WMtD1Xt539SJJ1GKIlBC31rfU1YKbBKOhRqXDPWl6YpKFRkjl/aRVF1Mhq7GkkacLwJzt7NWvb7mXxPK2KibkPCzR9RSDXogLX7sm16Ife/Aqebwosif5JXJUG7UOkoMBVWoBowo8Cz+ix5Zjs4u8/vFOnmksQ3QJbqCJ6jg4AvZ1bVetuV7Ah2vF4WSifV56dc65KsApLZNAGpUgfTHSUMMrI9RVWSX2u10ywlFL4iczPCN0YYjDUZIAWdbdTAgWtrWXfHX3e9n/pF5sL4GCCHMdHyaRfgvAH9oQMoiHH8IpPujn72DHWhSRupSVoL40EK6V34SLRV/OOoxRdRzv9LUlr4j/kY9nXmQPaj2AL+kBx8HbPZWfMcfUR/ntEdiy5M4k8It9PjyHh/7a0rmNHpso+1Yws532eLlwzO3TpdefltKRqV2OYe2b2TYJMH3sYciORh4s7l4MVEvz/tDTlM+yUHHKQp/AKzprl9YU5+grp6eGpFsOwoIOqb+hdlQDvPOXyNC+0+iHx8nSDTB3Ka73/8hgmOyTRcYzPiAxRhQG48oxT20sMsohCHXeu4mN3ZX+su70H1HzQ9L1HOJKnJX735vQ+gQp5/6cNBpqS/AZ6VK7HzHFCwGnH2Gt9BjzPnR7KbXLDWbg6vuhqYu5DXpKlzCkXL2AQ2A6qZFwaNv9Vd4A98TRzYbXbKpK92u4rAX4IndBpKgbrn2Lkrn2rZjXhxvpU+xWWqcXV+Klzm9wk5SbqtgYr4yw5naQUbI8MDhTVwgXt+3IL8fLGs8UoKtKiI03CXQEUl/EflVfnLTfgHH4rreSC7/ukfive9hDdzU18s8vnNazIsFvE8F4ZY1lK6BJCR+LYWbQKC355T8Iy6wRC+UlDZUD4zXjLcCujFPENrTK5G+LM8UsOvZUCjtkjvy8cu7+bQpOav3gMcHWgkyApuLVX8WRxgBVCMpJjnTJrgf1dE9X7VIfCnSX9YGZrL2McUoKTB4BSR6JlVZCjxsp97V3EoEKvkchApHX1HLycmy5SwW1LNSkzxCs4o2gyFU31BBsjDpX71kdGSxUwtHb18gQwnQzF7Yk0Pe2TJqBe27uvjG48GxW8vK6v3knYYAul4FyQdsj2sLyhkuN+oWDCuKZ/tQ3T+zHtOp5rtByR32XwXX032jZQ7ol0TLdYFt+Enm/pRMDJvGI/Owa65xhs2l0Lz11gaKeOKdwf3vJ5/pzRXdeo/u6lJUaqtaXJLOalhkrsxUl/Doj1uVEdDZVjvlyK7gUGJyc030+RbFiGvY5jqcuLtS0v21umLGPruy9BPffjD09NWlfBnefkVfmvpRlqORro1PJI+DOZUusjoIVmpfGgKrSUItYMckkybkPNmMM2iMO9hdxWH4BwaoYZWKMSnVP/R3+HL2/p8T0DRh5h/OB+Bkh89yLnitfy+OQsFsvEGVwfRgP/WycGbSR3wlyOt61zkt9sWa8lhj+bln2oQ0oioixz1JlwHl8KwTXmtHQl/cCo8R/yqvw7Nl63NFbRV38ePvlfFN1B4E4d7+4LskDtszGVykhecU3sonv/zW3X84Lcsk5fsbPzgoogNRpOywflG6w6Q68PjL6q94BLKkFSZGyQgeDTwhDqx68I98uIQN/CKBil4OB6o+jYOTrs7XybVftMveKaS1rsWK+cBpmyVBt/p2Ol02vUBEu1d4WdlWxS79DTFeBLSNr8rX8vs053nMATT0g+Q9ok9Jrh/sn0XNRas9MVG59g9PQfyIpu1FlOJO16FFx8xgAYQ5pfiiuQ4RXCnazBtjnVEo3igs1g33e/mvA9KvTrrBmlZKQgHkUYiAz7bb284Ody0WtD6TdAaxiKpxmyBHCnz5ON89+ITu9/anA8H0zOnuzBIe0xs+SXrKiEbxM/02qovliILGuY9Nw9xnhRmFKTnZ3V4mUTKb6PqV5djQxBKu+QHYrj9eMEYXA0FHRg+VmtIWeoDNEx0Cc0Xddg6NVKurv/+yczyyedqJok7dfLstYGzK5LMppEl/DaR/uUzufUWdDQW8HXkB7m4aOjuSG6d6dZgmLwn+m+IkxhgyfwUSTzuRLpS2uXXpa+K30ymjvCgX7poEFcJf/H2QC7r1vWnCCNUjlDJFOD4x30JgLU0UYvgAEV7o4Xs5UekJhypfAYqMPu/npWfs7ajvhx/w9QplnjxoE6rk88+HYqZuWWtpTr8dd3MDnp6A1PZ17dbnVmtGIkyVnDxK2sNNwL6XC5QF8Jjhv8kYNYLoBZ1JmmN8z67tEvISZMKwnCAoyzoksEDgnEq2TNkaqX3Z9LBmWYth0M2II3ywezVB589QZ+NNN4M47Osl8YLCJ3xqZ39RPwdihLij1XX/2INk7bzjJ+R+3d7GrdnLSi/IunFX1nUglGhJWfnKBO8lTwYHMo9pJWtWRyZsOT9QJUHG04/7wMhiNAsa0pRQr8IEibl98qgLepwyBL4yaKO0Pn+VB2GomMbueEG2RQpPDrSjc46dg7ap53S+J5y0vyNhqZgroqIKBoLLSqJ5lB4cGoPknkg1viUI8xYN8Kv+aR4wsyWnFqd4+nYIDqsFbLNKdbwAjMNl/53UgeSfcWJAnbdM8ML9NHSRPaxSvZlw8xrGNewMFK3XCES58qKm+JNQz6EZo32AumhyPLhJk3LPMyNXdzO/Mu/ihQpkV8W44fzt/jjpuHyd4P6hJ9Jw0xx7uUq7xuN2d9Y21/KanmcDU45fyZFZ0NddnCxH8eEBOaHzwmjXc5XvQmb8wL4bHaaouMpYMUpofmXbkaQLN/SxwMrUc8rFGHjiSmv0mJSX1AteC2+EOb7hdNXYvYMzITxHPM7wvhLIWf6+5uMJGAGSUtmQ8muDX8+LN0+p8EbqRbE2mXxz9+0Y9muEvQAp97nxiLUqpShntH88nf3U7q6Qy8zvzNO2WE+ieQFNlszdYpJySpcpTc4qHtIWcwlOL73Ow2UB+CkxYgM+RtMPkPWkICIsqBM38adUgoVMfWlthksQHnfoCFxOThyXz7d9dSFlx2fRw3bLDz0kORHJd6kmyjOPD1sZXiNrgDcMnHDQ+V1Re5lrPrmt3kKUHcgWDlm5cWidH1zhJNlzPlITacJf7yNvEGDtMgXMpJzq6A75aFTEd7/3TydAsUoFS+bLF3jdnMI0r8xS708irAfIDHZOmrtN1ZdYdN+DvDpZf4pniyLdeBmpE5pK8PKdIqTRdkoXP+RDBL95QFNQn/XcCfv4iI+xr+MbZC4QwFaTWWtB2kIgaDxr8KopzEDFt+hdeEk4KQWuwNg0vBrqCDBPxwHcSOlS8eA1LobiicEv6qq861xVTIdIz8OHcQnSs5NCOK5785lyaZc3B3XozO1Pq/+jTAi5fzLo4Pkak+riPCNLfUOzUE6eywuIIIaNkb38lTjBTXyd/G8xC+Y99dI0PKEdKTXsToTwbPub7mYrQI/2gLlY/T3KQzKBhWi9AZeoM4o+FOacqDJjIEXl1KDF9mOzCzZLBsO6DYHcmYe/gO+g/ls7scPKJSDz6UMzlntlReYhhky6aIevZTtJZT9ceo4ajSHwLRZfl/X/VBcDQ4T/l38L4M3fNfJir+TbuCtQP4M/n7tp8bYkVwEV3L8mplAix/Wzg4RCsZxaKZlQ3SGCZ8PrBDKaF2oDjHeDwCkymFIMpTHseZCrA8NbQ2+1Yc2Bw4uhgMEEakZzSWMG7tF5EF8CJVuQKjqxOd5DSYwU9bWkCOicMlA6NaHYsckY2ScoZ9woZe4HIRcOrDQ8x0DtxRHgDQLXMbXy38F0lRSssxNgee9nyAE9unfTu0+gUq2FOjyFaUntuxzeJV2FZ/dAkWnLFEgxvhftLToV5OHIH34r52I3fWy71SwOeSDPnDNBoBU+zlz8hZJXqKeADHF0WNL36hANL2XRQ18JZiPQtr+fOFRs7qhuS4UpyEEpCAp33V31qkrZTL/w2wGK6pMNviKqKKnqD+OMKmfQLy0LhZOIy6YAMWGD4is5fu+iA6dcXLTq64oTrq8EDy8anyZKPWUdf4HLCv8Rh+fFg4m2fdyDfSq92IvvoFqFkFNgCiRfjRgRTQDJgj+1UPu+yqkp/x3HMjSAgGZW/j5K708aI/M3w0CZqNWXoEj5k8Zj+zILQx+TcA1/LPVpoJJBlsB++xXLhhi+bqwYZLl1VVsG0Yr21nf//k3Bp58BQdIObASl4yF7b+Wf/G2LRcAv99frDqrZPqHVTo7SukGOK3h114JlgjZAt2XyLDZT4rYv4sDwROno2AYZO+KhbA60h9Siv1zMzs44n83aacdR9K62g8wfgXFDYM9gQqhF96jYKvz7WtdrY4wb/xBvD1huPNZ+A1PfWV8MS+0EQt3oF6t+q6maasEJ7cpCNN/HUKfH6H/FRoA5hBnqaqnewaJwys8mq32QUGwhlRWiCvM9xsV70syHJ6Td3zHV/sBF0qJdQEao9gKt/5ZHZfHpDly4DlR48RPRzjRKZpklqP80D4GG77LZqPR1ELP3+B5IMFARITHqeR0HfTViTO5Fv/sCzCaaNaf82V3Q4CDLd1IhB4zSts/BTwuAuxAMuH8lu9N0qgzGV84L7PU/t4YAqAmM17qwbD0uR4tAdKaLJ+JKP/b4ncvpGJidZe/q1e09tXLxIkH0SbvwPY5Fz1EclWlrBDpXlb2VZEGWA8Aa0SnO5SOSpjRq2/coTqbWvpfPdSkPKw0xUqwXbq31hy18Sm2xwa/+uqVMCJBsObgMf470f6mbjxxteyrRVXKlBl84HvuBWax68SaT9gG+8xtcV8x63Ugcps4VJrwTbdgsgZyRvyQr4350YTLqKp4t9rV1NvV+MpxGPeKbSbIkrpXkWh+0P1k6qUVefihWGUqjJ8P1VD2k+DVzdKa+w6ptbveFrENlXLPCLJ1IqsI6hnPyUUKLzUsSYwm+T+zLLic9bR/u9OKKB5cO1riOKiMaK0Ik5Y+5FLLuQjsKJ1sATmNs6pVlq1lanI8bkGpAuE9CNtkoz0iQBpevFDq2vVBjp/VHxiRMHT4cNU24Z+JIqwAw5uyqCIA/dvNCR9RQjeC3db9568WITZIRX9Pberh3CFIyMwJsPmERFeIINOs9+P5k6dN4m4CXrr9+IOUSPsHqolGsQ/zJbo1R7NWH5/DGr06i9ZzlX6ZMfjmV8ZNVvvcSnkDs3bVtohGluflogzrQ3YtRvUzefZ3dCoVNDdAHukaNC7BJ+cYqe+DnmqU0Bj/SkYevGzYzzF0WzP6+dzI9zECEpXzLRG0CQ/+Tr+jy/UhTpaLVKWYWKY8GEAT5S0LupoZ3ysZF+nyw6vGq+HYmnco+gvbH3S05Nn6AFZKQ5FC2L84qTlbcROES72YpQv2Kyvgr98Hk8nuostoY/CGxOiHT8Yb/oYcNSbDL16qvTibsMc0AyTX5F8OEkTFF/38hRPgxRo0LCR2kglYRBzVkqEDl/aLm6ZeOrNnn+tliEARUiIp/3N0kfG+X8PnuoEeAOp8oxCNNzJ8egO4ezezqsMl0WkVduRcHpfmGscraNsDwKUIarIj1qIc1v9sPaPebkpu4EoMdjfLv9EWSXK9vP1CvZzqgoW44tiSdDUg782MjsJldyVvNJ/HtCxA/0YJZRH2iqOFUgUHNmND+ZlD4V9FChTQHkUHZOEmiKr3AT6el5Lf8ZWgNzwXSsqoFGxsTjORIbZ90YS+H7Z9gkPkMEifZCFQEF+3UZA35GnSlzfm8NHmmiMcueQevNEX+/lo/Q2t0xnvgE2XNrwd5ytZipH334WDMNfVNH55ZHss7fnxdPhkX8g452JvclDa27gAQHlYVk4+X3n9abasRXSAUkIhGouufD/DK/BfYXuVJH9QFkgNJq9yRAzgr6gVybwk5BgJ0uBXp/HpkUfU1j67T1doBgDTEpl7atE30PDhH4KRXxdGMmSWF5Kbq+Ppuc1yLPnlFJtvGMaUSvX6OZ8WBZfuUx8apFMtCadh0HHIdh3IKLr3u2p5kNntWiOSSQu2+Os/L9PNMZdBHN8KkxnkXsU5u8Zmb9GiuM9Z9K7S1jfgDdioD+RDIPK1rcwdg5xTX85FSTz9jaNCzyibyQg4MB3dqfEon/V64UJeW2yUR784TMvt+i9hoRanW5XVhSsBmg8pAfIS66fQ3TLZ3AGbPiMOO5/Y05zfRVNv5LED+ZFrf662EMOl7bJSAkwFEgdid6aEcUivf7Hk70Lk56l7mlHk5kF08m9sjbw3NMVbmcDZF/YpuHnwh5r4h6avbLLaV5Y01DpdyPnIajyaLeH5P06bi65C3FqA/K9uw7ZJWR2gHJimVzv7GRwWCm1w+NdXXMr430iAKkJfb59z+9gH9rsVzf53WXINO/UqbV1S5EcyOedwQ//IzStHoZMRa+mbvFJhtdVB1+RAxqn4pATAGh4/bR9KtGnf2acYzIBkOYv/grPRMQ7XcoaUTdAV3NnYiYRzJg4575Iv5wOs60hxoFA+f42dQsaYFwLFTbYaSJLW5/m72cBXNF6wX+YdZbuMH7E6Ro6WynkBeufE6sXnAmaXmPUGtwqD7/RXC3GAo6od8IZKlZKKpe1nq5kmHsH2RkRFWZtHm3YYCEdmxLG/UicV/E0UBgW1kZ78+gi0abIrVnCYPqj43wTEXN6txDa1xGYSSW254v6K1+fjGWHkRp3i54M5Sq/HAl3QFnmhXuopKrO3Hh5QkUN7xCVaMtSnEgKAUsRPfylaKtagsjX2bhYGnNppqPAic+dBbB89zj1e47Erha2cFCIsjolO+FuPNJGBZ+JdtVB1vd98RGENQoWYdg/dh0gzflIOt8cwrLuFiFxD/Nsswzt8Lb16Y0Jt7FwkHUIUtrWdcVmev9jHN1DwNbLKUKpY7UaEOMUT/HYakOyykg51keYR2M5qYAHRLw2iezsHpkZ10qdhfZrJcMBlhERT6guFitdt6kprf7sQyxXVflcahMyqHEa2JiTrJraUzVkEm4PpjVl/4zV1fHq5YeUv88uXntwileFy3OqX8TusAcY7llf4OeBWt1J9aVCwceuJfYHwueKPS1Opfabd1osfqAitCZxhLzU2q8XSsexYp5k9p2jUFoiZSvusECuS//pFdJN25NDqQcnNljvFW8NZ/xIFWFZKHmxil4mYscwC0hSgDukLxPXWQcryryPsgqkab9Tzvvd7Xr7Q1rNP8ryPujR9OpiyY/ipeNLfJCUa/A9nIAwiD8PWLO8RvC47ubAhJz06rg34LZ65df6R2mnoIIrfShjdOjI1v4dxinGse9/dZvlgBQSCYcDKGW6sdyDHtJfWgxN7cVEhQEYOkIjY12VhYAMs0eIIyMxCz4YrwEBELkhDKPfEWvY45THhb9qmzNzZLE1HlWztJxiz9BKzj1qg1rcg94nVyGXQt/N3paXqyMKVH0pcGj2KbdVfCZVjwJTc/0C4iL92Pvh1GGkQ1PwGKeub70b0t5T4VcPcOysIkdnZbgwoRWr6fz2dlH9ayRjuTauQVSHKEWa3f1/8YeKuGpFq6jUQITeYXO1FRbnLYlg/G+54K2vXOxg+12PvPEtuqPsmRqKnMXXAu2mJcomMUm1YzBTCyV+GEt3UzwuL5mc4uCYm+M/retGxywt7OFUUM9PMB0Sn5NvCbyvTYSY6AJSztxs692eDhryGZbE38dBSpSrDj5/6KV01s+DDQb4JAxPmNe/RwmRtYsfOp0MwV/Kq3Z0J/hcbx4lHUdlQgBDZcKnI0Sbql+9AAfp3VQJu6SqpkEvaQ7WEVlPYtJrhZW1gYN4DLAn3KqKeDFfDX1aHyit3/TFs8FdqJqoSM4lasikwNR6SXP1K+C9PoxheaZH7dWfqzkbg/yAtvNGZu3lj+/IyKeGPkwH+DeD55eCRScTU3xwo9AsJo77C6Ylsus811x6SzPqXKx23Ug+4Fr9xiABnSw3Rd0TviSg7ByOHYLZlNaiDtIR0mUwiQ5fLdM2O4ZYWjoN+9mKqFOND7NNKhkTL/lgyyjtPK46zxa4fRZdPt7YV4+YB8T5aP89Ko2FQnQmUwF3+gppM6zHEYTgonfyv9/SfEM2PQ2mKMxOn3FXuAQoICMJBhgFkRGh9bjUmpGTdInPTQ7G/nnOvDFs0xX+Mrn7gvwGrzXAefKmuc24zZYwI/oHYifcy63ilBxubkenBWTyUdXCClfralm0SvZ8go7oS6aA8XWKFU8nX2NfGXL2CFcOMmRmt6jW/dxPwo9Yk2dy/fvkv6Pz8RzqKJPs7z9BHFB+z+BKtQaCjCh7LYYUp3PRR9vNvnzazvFKLuGTWyq5HuaQG/ta64ldlp9fEDoD39VmqX2AJVxF+/GuTZ02apNbz8BxMuCjKvqbIfWUdSDb0DLMK/GRFS7VUPgbss7cN83n4ycDXArgrxPLASKhTUNSCjZCXQPgCIDV/80yFrryo5+9Wl7j7glRUc1jutrJ9N/3yE5MuWh+la+Jpihm1wPzczUAScTUL3/g3W1uVeX/zfrmr0/Pv35zFCDL5L2pmTeHiPxfMwXn2sb915EPonfN8fpYDluLQL430NtMIPoaZ2g1gXnRB1jDGS7E+f1WdWLzWiCqrwA/6ehIKwnrGtgz4lQZo72XU/ifbjCIGCqf2/HZKYMJ/ozJS25iBw6zh/7S+mmPbt2ORt55vBU7d5tCQvuUT3pfnD6Ay5mXpuJtlTtcofjD/de/u/ufIuCHCGgb6u3Yl9e5GUoR2f9JGQ5eQekmQe1X9n90A093/vmpPRlsk7SPZCcqz2hBelqngguNGCyy7SP2XrRzcauu6Trb8F/GxUWXcg/XOftJEsHzCgb6SPT1ffjtswCuoVX9ldf+XVpFPGbGKTYrYdeN9GINoGYUO4vd3OBW8lvan2Lp7zWyivac6+utXR5f7A9HQhRFk/tUr/0YqYUULmnV+N4/SkkYimDF/J71hV7w8oIJNLH9KnEh24Fd9XuR3fwZfRmb1VZWONQwNtMNT6PBdLt62zVGHTTZB01gbHGDan14AjOTrwMowd9T7YrLyl8NvV6AmCOBD9VW/b/3stoD9fjne8SOVo0DEpVzOvDBo/6aP53en84Rrwp+taeTTryxtpXzN+kNiRZlpDfH8W/038kwpMbAYfibaXJp9zsuLGwC6sSCvihe/O90pGkysJeoHL1H7el4g6uHLSe1Hs3CKLCdmFqYUb7MSXn6Harpk8vmuavNX772sYfSwn1cx1mNvHC9BcLgOsgxo4hhvG+CWwCaVgDelxag+V2kSHx7xu84DiPmiDelbp2Jnl6pp3+zGxIWxOeARAxJw7EJOnZP4/FGRyF+LNHYKGIs7a5loD3149m//HtQj95PLrYf0XfBTiA7wxVBx1sPObzqmNR219OTky8oXyZpzpoRFe5XAsxO4/JL4IEQh/w6IyNt5sCosLhh9fPRYTUquEyrcQf4FaoDPP2ykIzXuRjGySe0D1Up04lAhZuQk161oUmQx6rLKdevhe8M3CuDtFa94RZJt7l7OaFVmk+5oMcg9sUETqOXOcXGJgIL8uyVBI2N52A+ewpaCp2MwcxdEoXnvZ+YWCy42Z5zjsayFwlSkLF3AbGPnErg+SbamHWKuKlEGUYPV1CpJRSUv/o7tkCk+lTM6Py9kR62wKyNQ23D3mH6ae2fGl+gXsVGVJ+QLscoaflwL0PnjA67LERfWCX58f5UAqoNmxB/i7lLY4adMkSLi3CBWqWQCRKuJ1Gngj31AtHjY1OkvuFakcA5G6rai4FdnoijEIn3VMnSvUiFlXHr0exLrtKEW9FVK5VreCAf79jnLgwcevyI/+ijpoL0CfNYj9NmFVgcck3XtShWKa/LBnodRXlh3YFhlbN7hiesRx58NlOKNkny1UXI7jvSZgzRODPsZvGd/iiqR9VO4bR8TeqA+mT3zA0NOm98ygkyRrshgMXskRnRFJ8AX8x6qdFv2IRCLIwVsElWf6vrdF5TKb4MT2s2DGI7fyVMXE9UKrIJJq5AYsTK4cxQcwLEWK+if+QnlqMNxBkfAj6/KDNPoaIffb9SolA7pr4sEDU12wbtKAll691cF95OIy/f1NOM+eFO/QaQTc0frWbTVbHcrUYOf3etrDcuqApFaKEXeuQpOb+U2FQ0oYCyy02RhUea3DyjbtA6aNcNHCpQD+8KETXDVF8005n7p9301gSlQS0ZC5Gx3kme2LY4iKp5odbAl7tiXsdUW5jIRuPkBOwKVZXNZOnn3/9BHMlYoYU41eJmxbF9aEQccjgfV3qA1wSwyZFkzQRI8jsrWeESn5g4x54u40wrAinza3UPfc2fZs+7md8RBt6KEaQH0YvN50abnL1ATOJ+sDNLuJ1GDnABdt7oL/7aS+YqoBom/694MiP4u115gbfE+kMtZPj7Q5CYp+W9cnQkUN3XKFRBcqQavdvkzM3vAUMzbis4I5qMSyYPYCHi/MnolEC1JoAzK93Qd1qHLD1N61xHl37+cEvAldhi1U5XCYyQIYqp/mtvyv7aM3tIQjs3lXcvlbjz6LBi6j1oOU5rxyhBCCeD8JTpIc5ToDPYXccUrgjML65Q+Tn8ECuhNgNwNLo8FGSHk8St/avA9G9j/RN+iRRTeW39zKhtuq/aobppOD/zNf6TfbJtloEy4mucfQirxK7SiX07201CxiRznJgjx5JK1VL8VQeEIgi3YXHfGEya4BDMPbuy5MjvJMwFl25NuCrpgUo6egjEVve6EN8ZBpGY4P/HHihrrOwzxd88NR+TXkBqyXKkZSNVBZtEuSJXwPyiyAZXDC8aTKwlKd5zmd5a1mBr6DHO8R6PO6PYCAFLKzNjBPEDV80giuyOmTxAX2u+4AH8VBuLeEWILwqmLtT9+tY1TesL58pPG9DfsRBTtd602kCICasRp9mQS+DgOMTYzKXaqvtblLWJ7Bjta3U7pEMJZ07JoX/1PDKI7bpG2GPEEUUNaqz0hKiRljpikcOIreXvdkuePqc79PZZ8fPXe13PLwJ0c9bcQaudmhz+mjPCrb31ze2xxc7r4OQDp+cQ8d7bhCY24k18TPyA1gJnY7nUhJLpzrXUzWASUBiBCsTi+/a6SMSCUoUj+QpkfhOq3Vg+2fV4EJpDtShdQmtSMP9+v0Ny9yONeJTq7GWOeHNQdKFYQC4pxWX0RQzrxr7dSfxPfBKwE85rBHbBiyQ5/1PXlHrlf2hTyheFuCF2QrMKeZLax+4uUD6Ifb1hgn5WmFJGnXoMnRxLksbMw2pHtKgKGCBZuoklbN6O/Nuew/sWq868mtbiO5HgplQSRIjiUoW+D7Y+Lwr5VhzOTZbisrdF3JH9vjk4RMz9noiHjYNme9Yr9DE2lZq6fOsN/0vcT+S0hDjHr4NcNIY62hEDKM3Wbw6cGcSLtXoztTl7jtqLUfv7ul2sGsw4HgJDHy9tuXEPQvf67wJcvLTjWgdedf1Gi6NO0VVCI4wia/pTX+ZLOE2U0RH56K3zwy14dgqTMxCsnOR32FXy9qWs5skEaiOdECPWB/6v1Lnejai8+9k0m20lGICcg87llxzhmL/SK3Jx+qjaWPbBMLwSoOMmhiJyx5YUzkDDCAf9JuimQv5Yw5Ftz4YD4g66CwC4ZlvPs5kqMUPS6g0YWlU4sqldfoHKLFSHWBJnQPFYB9S/r3SJgpd9g2aqWPI1k+pYuXtOogY6FmfsrowAUNUeEWx7Z+a7skl40OpCuflvGOT/KknSpv4HL0f35vfr/DhoXvFK0wobnP+Myzz0taulpfIREeaMXkFleYzFSiTQoGej3tKT/S9NVbEmOLMuveXulWEspxcy0E6eY8euvonreZvpMnQKlwt3czClaQ6DWZNaVSh7Zmc5+tC3Q0Hq038v/aLGpQ8SWCkEtpSTkfV1Od8TTp5pcJV6zLY6Ewv8WDOpq3pwXnVdxXVagcWycizXcZWwHbS1MUq43P+0oeNxUNoXRJAPkhXtQB1SKl2IaTOg6nxEZILevsY+wNZDPF+VYTG5tiOLYPQdBu7BRr3Yf4Dd4hanFza/LkyAm8VTmRHRGOriGZfsXONUWleBsxkvOINymp+JPlN/KX3dzT7Uy5mYx+YrgiOJHYsc9eXAeaLN2f7FnB2Xa0sW4MRdfHxXLhcNoZXwIHbmfUeG76yVXTZItj6+hr1F/8PYV+9J8UTvRv7Cff3IK/Wz938apmyilFJQmY6zVY2IOczbRi28je6YJZO5U9+x7oPDPdjBSgk9QdJBbZ+YD/WfEIpcK73niyricQK3krjmJosNTTxmsrL5YyDi/iLlXmOcHJGisAPnGCtdcTF4OwT47aQaN8RzpZBvP9Cdy9W8suPBwIKKjrHSQXablik+KcoKjLQHB4efGFsXBd7oExeJ6nw/NTDP7wqmoBLYdd9jEtJ4T63uXWVIKiUwb9RmDGr9S2jl1wsfpeZwPbbADOAIDBDkRB9IeAuxeTD+Luhzu5WyoEIjSt32lD7D1NemvY8/+5h3H6y6i7GX8Ruy+aLdcFMkWRBWGs7KjqxTsAYakwSIOK5TH/CeodyLsxh1+NKw4oiMtM6OludWnPz2rWeCiaWAmoWYsuv3RwUCMOLVUPnoOobNmAwOdCC94FUj8H1giWL/kiDCAClXUh3NK5g3/m5YW/u2zIuCs1qNhca+KpnlXXQ0Wh0wH6+rS12zFOPtyXOkG0a3FSddXdyQy1rj3+FMryq6FEbngIFyeK77rWpsoaZyyR1iBILDt1Em4H81V7KmQFa7gzxD4EGy89LXQCrJAcYhcLnK+sjBcKmTxMjreO/ubnqnl3Jrc+TrlDXevesgrmuGvAIPPrixj6XMmGnJzN6PGQB5WQCdIgcbnAkIAhzRF48yXpMTrue76iXcbKIOHIAgqMJoVTqc9nxUmYmYSoO8Epue9DDm0zaXpghOg4TlIAK3AgHJQcmkMtRagFF4I6mbBNRs86aJt+sFnYf8OVpdTAaxtTPHzE2RswB9zKIUo8czsehfhfmqavxLss0w/0K55OPWGTctTrKLOXydJlm4c7eRXzbJmJ/ZzSg1slqvmlbayx86pjB/PLz+oq4Bd2JkI31ay6eyFskV13eshXbet81Y2kuvjznMX9BaBCFMijo1S5Qo9am7bUxxGmWmzHnSHjUls2uBuAD7qFvjLKaofsMN0xPW1Kir1sjYHcsrlcaM2GJOdweBcDV/ug+tfQt4mEIvEI+JsZR6h/obDhL8S9C8KtWb/w3LCDrmCU7DgxP2PlTQKn9Wc7zY28h1xtTiNZhcxpJJk+v4DOF9gOo1VGoF9WcX0YFVMcdOCXc/gmdjRddVPiXAWvxcskk/aqFHy6VpKRVjEvEQotjaKEqwPiVADsf4tKwHZzzyoTOIVAmlvhN3dtYqL7s+c1NXcI1/Vq8k5P37cbE/aseu6AhnL00sJA9Ll+Spt5RDsFfR76Iog12GZVMKoZjqgqKDByhWBpvth/IyiYXMkgCr+mfgCckv3aK7KuoTrb1bt2wKXu3GYG0aj5+7u10fX7GdT8VgrazfXPuVXeI3hjJbKikxIvDP2aw4a5hlqkuVXFjV4fkMqneuaGZZ/5nEs8FX5r4ZHdjTXzQHrfRQERCUcAljf2OA3oAClf4TH9TldC6Dez2zV/Ne1gnyX4m6idRPupYwoGdAB5mQAVqNdfgHmRjY3LpnV2PCo2IAtXHyWw8fGyBVtVqYXLvfu3yl7gz9inpusImbxUGddAvUStyYdxfyeNT4vXcX+1RckmR0IJ0MyfRr06FX9HuqDf627IYpPjnE1EU/3EvqJhymhusRNdIQFqlQ6HkoCeLX2J4rS/Myn4maTBYMmo6EcWUbLjheDz/AhvAvGzle3N1Tblz65QZhDKByjt1j6W117z4FJ2NTELsWmrSkTCKdcGiNeh5cevREf8J4tNl65SHimrppSl5+k6T0qTK9DsuM/m7NfjfZ+POlZkaj7LMCPuaKDNfKvfOn3WtPOhdo/BcZI+xZsDtRjvmGhyl9bttP/3WgfIV4S0d30hb/k/gUhRHORVxyUbuZWfF8TZROHBFvXQGOAHgoQ6T9fEmlJJNJwnMonNqHWFCVIw5jmiteKjjfkFyDHv2zT6sSesWkhB2nKi0tEeXAgCiZIFnrQFPkMWhpkkAq/Yxbr4mOhQAkcrC9x5d+ngLKFIS8v79+Ht+WoC8p6MuRlpgegjh0E0oP4SNbO+DV84YRfnZ8UBCEOzB7UDo7JCG2lChJzHOOsz+dOxo+8x82YRy0RgNj5lVHwzy3nKILnvBr+VftVKsCnYP/cMPnLvstONZFYZTxMQNy3X7bN/7n03VCPf5zj397dh4+WZYPSWOh+kroSD1BvULdYYt/WGP07uuQ5PuV4NsCss/6K7L98PEFx5GOUPzCuNINdkS+5K5tL0X4HQcUfWO0niaxFtdsjCQG9ONt4GyFxy1XOQ1o2U6XIr2lyd8aBXvdnJvIi91f6RbifODN5BcVLpG7qtd64s3VIsFG2S/r7Ykzw707h4FZihf/WL3ZFrSYC7VZC3uKIOKVfHV/2dxMIfiWN80utS3NIO6ZzxvUFKuAEx86gnc4zsFV6Gyx3d065VnnJBaLlnyX0UOpUKRzUg+zbM4eudGvz5UWXdAjX8F3TkhTZcA0mtbKAQBRX6QmmPdtL47AikBvgTwang4R9MM2DOvB0W0llkakbXM7cM7jHkf95//Iz7PrTwFgrn4jGOAZa8MnNRRnqK02daiSyIy9BXiMhSuKOoWPGLBZZqM20PYHccrL/dEwVMu9n7D9SJ8y2pkCHGa+BV8b87EJl6s9o7bc9O9znBHKoodoUZuJVviju3vbIH4FlZsVslb9E30izoLkqdeAtVxeQU11L7eCXvCGOnMx7ZD9ldBOTrTegQ40Epq2WiDI5yqfERwqZOXbT4yJ0EME4DPUDTCno9eUK6bDvyP4q988kVyxi5XsMJkEZyDV2V5m2rkl/AavhJxCKvEDexYmffyuRBr1mRJyc5pkoH1VKz1clNX7o67X9x/n54tRQ0GXgGXtCLpOXThnpraAwvfztTU+ho8qd4itJx7F8V29vdyG578UhjW6Zs9ANX6e7ljvFOjvhjAHp2B2+OmLPjvTbnxq+5+1C3bD7gDNL4mHak1fskCgbQZ8kK4yKse6a6dHV6YKPXZ4qZg6WP/C1jU2x9qi5PjwdJiIUg14OxKCFW+mq+9sBLSLMwJ2FH/CRRmV+FtdhY3lUy/DRaANnmxeDEJgEbC9IO0M54w/a+CnsPYbZ2BQuYuC2PZ6e4b4vU33+ttmRyEeoF+a5jJCn+a9/aX+QJLodHGoIxJIku663Ar1/KME+aumj6V9tTrfLMX+oBRzA83kp0iV0vqPySDgSzykRK8xVQuWt9XIJq3NFo9YE/okf8naP0ajCTQtingpYLLhdQHGMX9RHbgbLboeTrtPbU2ZNL4fGn52IYDYe5OTckA3/ae5FEG3wN60LbnvnO94aENANFDxT6neIPc+D+YUtK1SR3EG8Uekh+Qs+F+M1/98qDIp0FnDwnkhp5ed5+Sirjvjo9VC0bIDsMa1dHOjhOr6JD1dJqaainx5eSDAVHDa1jBTgs/PklRnzz9FHrRuv0BiRFYZcnMKQwQ21x/uLQIgC+nRQhN4QJYLHtlvPeb97nixAflIF7s0HWq10E45E3iffIQS/Et82itxQMJKFKd8Kae07Sx7GLNKP+UUiOEm1zfUad+JYpJj8lYWQtrE7FrkfvIFaxk4jvjjyQAQfog2KiadDcUpYWvSRjf82mMSgMeOFCpEv27GPNqmFS6E7Pjl74rbRxCsU7AX7WwhWgxHj6wU6b0YeWD/Mv/EYIeJ8QNRXHxE30s+25I2OtVb+sANoTQjZIAQjHzqpv5fUuoMF1WWbjVcKfsh/+AxCFR9oKZU2EdJaPPBGuQGvj37KyZcy/q0L7vPYEeR688jxyMaWs7sfkZAg9dMeigItGhEUfxmqGQ/A6ZL6n262tW1kmw7GHYHyoEbodqyoMEJfPMsOz/mFan8+NYVPuTMKFzP9XSn69dK6wxr1LPSsRdkeK0XK4WdB9nGiQzaY5B4M4K+WeCH97cDc5k+0t962lBquJjaC28xASBYP7h+6nHuxmV/C+unPkuzpp7MR6xmeSPOnxv6vrqi0LeX6mHLazXYSAfLnn/txXGlkZ8FEMdlrq+Mdo00BKgQmo3wTClgTSIW0ZHwfpOoeVDqBGR6++9aRvEQAIJ4JzM6bcGm5IzB4VVR80HjR6h1Sfto68Oux6PH4HH/OiMzc9cIaBFKV15R+3Iw7IdBb9XUms2Uei4QTwEhtVks9pTBEAwza7J+19ShtdQ4oC37bmRiZAO956b4S/y8Bm5jCGNtff5zeGEuClD2xIb7Dq5CFLljB4jAqPxJWmohGhUzhTpjLUa5I9WjPQvOHiL5SJ4OPUf4V/rUxIcu/uRG32JP1b5NfMc8HKhekgs/R5X5jDmjVBEMIDyCCsEwfDCSYyB96BdAaEVYZ/9MpxI4wxwcVedzhKjm2OUD+it9R7BHNWw0tZLsJcyMVLrwSfRd2xDrp8fCfpAHSBnBrBFXvTC+UDrd4B45fSHi/QIl56FvQNCn352/1DIq0KOBxUMFb9fTpARDh+wyDr604oROZl4Yf4jOz3ZajRlvyrep0/rdD6/pSvsacwXpY7MoCt4mR/Hv4ErfB9XXML9w9qVtAsjOq4XqJt0QJseiTTSobunrpDgMColrWMfIq2RlvnyF4SPWVGK4nK6l23c1J6stm7pNKR3zs8xXHHUIVo2qzy25hU2v8m90DnmIGqTEd6RX45zdoSq53XX9IuT2ouQt/JQ0n6jCgHfDLEuSQ9nhrrHG4R9E89LREP1qZG3VMZxIM/c0geFrDnHgWWbJVW9/M2MitK5rqH//m/fgveVAa0ZSaRyVCZpw59ZPVACzX+uRvKcBKT08dp7jnaCpVfEZeiT0H/TlmZYbouHveBrq/qvDLlZht/m2Cytl4C+1JmFJXmgVGM20iB6+vZ/icurcT1RNwvRCgFar+CMmzcvPf5s8OqfONygu+dFxPNIIN5DUOj8y2FoozOGBlnQbwda0D4jp69/Aw5bDlE38OXZzCVOr1OT0Qi7AkTz2+EWj/3IjZqwZPFnRuP3zzYWXW6pDd9H8xxrFqsB7XTRZHq9yDaR+h9ZIOMQaSYwpzxtuP5ZdCGBcdrEf9dZZElIf2tOCPhkjAES8l02fAlgtHuzFscZY2ypI1vaQAcW9ahPuHEFa9gWQ7GgeEfgTBnzQIHMDInqI+EGNu1H7zJoqE5iVK3cze4H7LESo559qoxNNxsUA5fByu+T3wTOJF/k+gzZQFv0qlPF6gdj8w5jqAxs6MDco++TXNHFJBLpwqMWsMUbkyPx6nFwwlvoq/OhAsldKXIFU7tHc7fz7ZoeP6/eSEQr3nyzB32LlzlAg9ySJ23Wj2ilbe5+vW7+/l6+9r2bsJ5bfIj40l9TVlfgjxUT/SHN8+awXRAJmUvpDlRYCDXqo1YxmRo/G5L6S/el2Rd8SXKYlqEAGODdHPTSqS+Ecdju98lOvCbntC1eA2BGAJNMs0pDCnyquEipXfFqAjf7iM6TNH8kU22+RfG/+YTSEv/wC5Xsdcqlavd6BFuSmKkIW8R1UjD4TC6A6qBQ/HDASFttSDd8ESGzimlrZjEDta78G6FKI3li3Xkrqqi/Uo/VJWz4yeKghMnEPCRnShBCUSnpDhrXSGx0ULQJR4VQvnTK8m+doRGlY8sKSU1yA27PFht7YQDEfx4GtzmGSzQOhMCJvX15pUoofAL5g9pmmhvu573gDlQbmX4OA3OTaNpW8ImL4xIKH2q0dVVR8WquCsV8GPYn42MjFtBhvYNJIcx2PCKvhbXP4UJWIz+1nNz6lpbzjhF6ZXy0wwWtD7ykTQS902cAIh1Z0qs34kVENJszzsA4kXUliSZBop3xB13ajAdD41+RHu8/zm8fPJyhIb7neRFLPzWLOj6ljoml1sHObL6YE76EGaf85ZFA0kxZ26Gvvk/LDhiaW47xlJrDdxQyTqsuFdpKX3LDLU36rXIoep/Q1cViJt05hcdloI5RHGyN3d/dkldzFlBB/GVqOm7DjZOpeig5fF38Txv7bLaVa8lW6Akep/QX3NjeL7/h0zqRspn5RacDxndS2kc15IjnFlsDk1sdWgOY83lFf5TlqFJc1LLSo/LNV0hAA4dDNhQBSVHOm8KYnrpf/1ycdZtmc0N8oWPagTZN8Aqc2AZBbuQaGRWRzjAggO0tnAV5O/nt6smcNyVZr24J1s9vPAmdjx0jyNeFDjx+1ctik35lmuEEjq350JCrqv7U3quXLnRPxXMAYCjotvtJCNZOYtRd50JQt0QnoPCe+LSxj+jQPe1phyUBxlWgNnM3OA/QFM+tK2Z/NJf12huS5PuzBCz8J7gjp8QEy+1UuE+8qCtyZwnAeVMNacylGpzvYrM3imwBUvtiFiDwtUFo0GcfZlYlC1j5ce48vfsAf4uGI38JwkqU/td4SOS8HftSxlQZGMSHYxcsBlX8CLDwqLFHhJO7UbmsJURs+Yvy/x1BG20fLXCaCfeApRzdA4bWWOitGaFuawSszJs0w+h1M53HdEuj6A4iJFDMdBAMcfaILx/JWg5t63SN5hAKBy7L+81N+inPZxwcTE5W39Jn133a7TSdwiRn1BdoO10Oi72jHILNM42hT8T2Fgr54vr5EJ0x6Onktm0fzWCRzEw4ih2Td89+Sc8Ss2rZhsgOALUhWSr8m9ree3maYU+jVTo8Zdf/kci48h6mgjnaItcv1Vn5fjPZ7m8PepeWvRfibII8qxzczGBO8p76ZNEFykRJ7n1RwjAgQ/79rfbWMyEz1w567Y9RmkVqhw+Ws7nQ5ROiMgDWEJB/ZovnEfCiQoiCVPnfdt+OWVrwXUJIXA++YKVary7D/Gv9Lup9XfzXuoewJ98lfF0hrfuc9T0SZ9gsaGEqPpOl/TU35EGyEjg64t6WM9rLRyzDTNsrpGwrc67ja+3GZmuiVcr0BAROtvu4ZpXgyvMYR+oXEjomtTNugGH9Xr6trsj0//pRnzmgqxpBxMoi31cz/dBx/IDqC/mq7UXWzUmYCo8kqyvyFRUHSryr8oAtvShjXaN9FiVXU8hGmLM/TD22jUyKQj2L673dYqXIp+FP0Z8ewDym9xbyzfZ5/qZFNdKDFlh50RRtKZEYakWgvOTevM7WIcJMhqC1u8Ut54tHCY3WithhzEiHekkvlNYXvuXyQGlRZOUyeH+HAuBkuQVOauS0dWy8NG73e1mP9k33TWjKEfG+dkumcsuiqGi3pEwIz7+IEgtVkWakyYboq2nZGiCLw/Svtmue+F8cSt1ZILMRb2xT0OJmjqqaqPN+U02xJNp/8juueL8egUxI1jYVhLCSDcFtsNIV+L9UC1nyJxiNRNs+jlfzeP8NcykHG13cwyX+idmef77nr/S9qImRj4rtYcb9S/nwjtc6i+NAi/Pr7iEUOJZbHZR2PPCFDQnhD2ubMeQ1uyHapI/z1xB8rC9Y01A+y3lLo3e1XpWNmncTN/jVx/RY9e3Xtv9CdhnxIbpKjsTzXVEw90EZl6XSt2HqAyf0AVCzrBx+h+9dJyuBgeWG8oguhun/lLxsLRVFG6jNGFs/1poEejR0Nc9ZYgYmDvcNQKd/XNZ1Lnfph7rEBrmP36w/uPcbGr8lg+zQxrIJ/FFYTAx/3wC77JkYb6Wau67dSjEuo/UsacBlGa6F0DrIGstoHytWckwfSHaeuu5AUGQo1Hd5A32UDQK8HRPshvX1G4e31lxV//duAhP1KoKN0uNvnMUHNHvnUS7cUbsbyQHGWdQwHp4XI+3gUO4qeY1j8U0LSzsMUmha5OXvruQbYJxBTPVrSpZy8WN/F+EhS8nQ2EAC2hDhDl61eppYh5DqyNlzmQftUfivWqb+IPyo+cnnAN2ksglHGz+q7A5vErhunBqi6Jlr2aAyjAKw36VCSKlL8qB/eUM7DUacygB0oFzA/ZDoGCAQl77Zymv/WfiPsyyqd3qRL5lFaiyXiis7CJHCGqf1bGqFXX+8E9wFD4o6fW9LSrlWdatLiGbjojZUUSPP/Yrx09XJQhXc8suuX5QdXtDgWNCOF/2fzDThzd5JxFwDRUUZv1yuy/0cmdFzGcEdXSbDDt5TN9Ah4K2T9vTKSoIPTUrtnijdWdHJvDPTq5viZddKM3LA0h2VUNScBJQIAtupQ465rj/lt+NTdDbBLCPSFnpWmQHjF3xHVTuqb7WZ9N1S3DsNSWs2K6dUC/xBtbQZS36d4JCo9Dx4urd5EsYT98QyBk0qpq6BvVa/ZYdp/cHRoeU11xxm7xNg1w3RNfmyBJc2i/m0/EdHMOw16Du7gXFUMGWS6HxgoJnBqCcLBMpbXW1UuIsfmmyURJ+uHnUNToj9idY91fDn09RZ88tJcj/iS5CHPZ5k1L9DAKMlSTtVE0I/T+uV8Thf7+rWd11Z3W1zw6/UrLk22WfHWZD9VcyvZU6Jcr/HeNtKD3qnTKQ8j/LQmFWXdy1Yo0j6mQet/YGiKdz0kmhP1Df0dWHij0GNuE/lNRfCQvwoog5dmzVPp6nbaSvENLLdv8CK3Z4QNZPgu+wr1gZ46Gqv1gfxDRG57oqifoH5xZsaMCBiT2srLXIGDEY+ALDRdSrO6JVS3O+FYKP7m2qWNmOyXu2foARUJXPzPhRxh2t6n+6PqSZC+FhvVWH6VI6s7iHWdoBzzwN08u0SrT0NT2q2Xt3D3YM77QXkJeOjqMuw3jrqFKW2hQVOJDgDlsv6CyA3STJ+zwcPn3t3XcJV9+NCrkITavCOii34jipDn4r+KpHXlidSuML7W8k1wdLKPRdUn5FU2afZcRPUPjjCf/7waap51yFOQI/N0YX3YGjmLzSOR7M7H3YW8M5H7sDYHMnxHsLrc6BmZ+8AvRlpeflgTR5acqIrchthVM4/fxoipTg75j5ikhNU4JVPxqCSeWRIKlTDrrZiKe5+Q2V81PnSumtTXShMJKoh19GTVtpLgWRC1pYhvC2HxuLAZe735lEvxs5r5pg806k7sLpX2F0dLJTOMRh88cXnyXTeDuBL7vpTK0oX3Ircv00Iyk48RyIXtZ2MbxxqZCIhJxcOGiekUjYGo+YNSKXwcjyqF844ic75uKr1GTEtZmzcnclNCNuNA3ZZuaHK+HL+KudT6Rt2rVLQ7DHq1//XAa92lKrSwuton9sOoIQCUb10jRksdyCCph0b+S06AzrEyK5PVp6wulLSiH8N/oyY6ZWMvPgKDO57jg9w+IfCwMX85dxDD3hLY2R1fwctLme507hSpX+jmM1V5ZyStI3lCLxSBns6SxxNAzIcBUhP9e/XqqO6zXNCC3TcDKLxVDrrBqgVh4Y+mVD8ExS+c3gU+MorDjXn1G2sYZhDHWxR+sZY2tDJoYruoot47Z0S/AW+FlsgdsyeJg9kCidL3Q0m3KD27Ge6L3dhp7/OtE8a7IsUin0fOjhxP/ux7Ip8da7ZPQ/cEInFffKglE668bTOPMpiBKavx8hOBe4ea4Sq4HqWuEokJFZegv6p0TAZDNyhTxaKdPOXeMBGLe9xq+ycO9ZDYBaXFxhzL7K3+QrT1/HUy+oleDGKuVPwbG0ZNebR1n2LrGhrSosCmKlxjp9YND5dVaN87LO4FMbBD/nu6P0eAYGkM8EGvGrYyHSuYizvgztL+vt6ftT/e+rGC9AwC3JvWSEEKLoUo5jQ8CcCWIZlMKb5GHw5FpcJWSfAF8K5WwedpkJoRPJYoIgbppdqV98HIgpWLsgrkbrjI2PQ/F9yobRq5UJXoVYHFJI1PAJXvSb/xFdBZ1QcT45Au8f5SKrlDkWahIqgJZBdIH3gjHvc1REyRyfVHJJPwc/magsbxd+F2MtK/+vt9XEXWNPbHRjw/cMWHHVYd+EPT7xcJPeQSynMHZsz1ESXCzCzNIlF7ngSqyKlKFj30M4cgsfqVeEPPLvQBp4oXrOdQVLYMJgg4W9atwu77ayuVVHEAqZIWJXS3Gw5BsdtMslZ5Tizpm+8nvpgACbeEvqr+yzLghycPJ4EGPuFBvsDaJtEc8zW0FR+6YLqQr91U3beu3P/peHa3JXp+M3embPS3vQMurGF+S6y+wLtHn9zp/mKZXwgHOhw44lo7bhsZAqCfNssJNjPnaxK9VZzDuwNV2hQpVtH4Z/Q2Iiws1Mv3qzxcgErUZTMuLFTdJnHPxeqZDvJAKAUc3dCS1XwXsUf6h29tNLo+0BsQai9IhfId+XV3deCmmYxQhomOOk9HU8+0PMIeld9RWFgZ6PB1t1zzzMpCkMhc23GF3AQFFiXOpd9YnbTbPheZStMzGNxS11BLq/JgQIhHrC8kwp32DC7Ygz+ilKpu4pf59YoYFgyuOv9rxX2+LtlSs/3RdFdFfOjIXlTgzU/nCG1xCVCG/0rW08vg5UL19UHe8+dGpt1yhqu3qWy19CsUjHRaXHSzxgJ5nirN5BmGjGFOHRmhz1dn8W8mRe0cfV5GwBG6onKqmanf4rfbaEou4n3yQIAg/owDvdvrRyPxGS4kWMj36KwlNku74ysV1H9WdbRAhIXMs1Nd+OXgF24YZRgRhwDghUThHCdfyYDbuMY3K4TlKq7CQ2vl1fWg8AQm1dHA/vawa5c0RUvWqdWi346V0EqFsNTgkSoTR3p8FcVFEMO3kvg/FgcxNpO+4GO1fL43kNvqtJIwN/GZ0FRPfHLak2nUS2/g5XO/FrNy7yaEHqxIz9jar1wcBfoH0ERClPVBUlfw+Vspt6sXTfISZqqmK6uBHrTMj3iKkIbMGW0knr0D0Mw/TjXzhzGqMQY7ikwxTXqf4lHsNQbe8QqcrWbajnrRSV+VfMtt6cjCGuqKFw5nj6XFx1/2VH26rN7lzQkyRUBXV6mTo9bWSVrwHFsHPfS/uctogAQ0vr9xddDd0re1K494d2Nh8DpMjw5XMop3uZ3v4IGqyXwckuvjkBstwH611cn9bhdH7SdshO9C1rx/9wVnBVpPjlXeeT2BfyRb8NxB5mNjfs7MajOmYbicGaU/ld6qwSiBxnemnCBwYP3gwvmH7t/1MwqyItW8KklGD274WjTb1+rJah/RrZHaxPr5krix/0DTYmX3Xb/zezDCZtngO0zaeo1q+DV7YiYgOQulQ+aZSlOemYzpINCbf6nPBBc8wH/5mnI/IwJ0NrjNlHp62DDv3iU+WHJLKcMx77tUdrdgDDF+q/jbqrOZvNofJor9xR0eQwPxt9DY7uhjD+fsodocFSttaoMSuKg1mXkozOFOC/e4Wf3khl1KpPsSPFGcCkxZx1ZSNhpYwRDYG3EBYjuN8J5ZDVN9tf3HO2sKXvvBe4Ppf4o/2lzGiQLSTfvDjXJxX4PgjEbUNUskt4wxhc0rc1/u6lVzaIeRjv8qCdBCG3fpLHQ1IAsBlBKDjuS0a4yy5ymsvCuTO74TwtHejWKyVloysBd/7d3FpBy5HZZDGE8X1y1sg9DDq9wDH1ezd7fKt8Opd5YsbdIEJFsq3yZ6gsW7oJA5tsub6s93vyY995CeqoujumLxtj8bsLGx5hbEPeAz4rVF9WdqpTuzv8A9ut52yZIevxtK97maYWw+Ign9tpmqjqWtTiEUpbZ//hl9q5TsbdNn9/AGisEBCWXd4Eb0BV+MYaW5OZGdKpTMpvPx3UREARwh3HDaYyxMN1QqzlRgTSr8dHN7kZ9DxopwrPktHhy8BwKRKry04P1x355tVEKRo4lonK5tyVwthbVwGE1zXf6nYuF1QP0XoZ6v8j23StHnwTc19K9LHhGx/ZNgae2Byn1Pob4YTAnwDd9gy/jPp5N+SxJLvn955T9z726H0UQggEQyLZEZrx2j9AuEN/AQS7Tf23RSvG4w+lOn253nxz/0tOpqqowI97YlymYJu3o5mPVrLvlrOk7IODLUuX97mi9YPDkXL6OrjEB0YWGIER7Fiuc1u0tG+pOG6lQQMqGWkupDQYU1lbucYoS1rphyzjphFKB8fCwRUprJEOCUPP9aK4oWGvwQ08fes/MaB/2nmoPEu91eb316+IGh+SpYDTO+FADITe1palgi30k2j8HNCI/zSs/vFLrN8nY/ZS6Bs5to1KUEKH7qFmiY+jsmJVkb9HHtIW247GWa1NUqW7rgOdXxVPtS12jsCk493gmTZFqNqrCML9bO/tKpAkoZi2o8Pl/Xiz8sS6DNQk+ECtY0g7VSdqcY8BXxYfwMFMf2t4cu7tYJ/ZoNo95cx0ezbyDfEbIlNOKKenKWSVSTj6FvpmJB3ggQ+1Y6gPsAHxEde49+1Klwe7LVCrtP0cWF4dWiNVzm5onDTBk2FdDYaDXjzphe9qhJNxuxZ7fQUia1LgsQYI1svK0YYJP4DN7ZygEFCZjxYXKHSj92Zl1tbNK4a3PFobVv9DpjYhb4uK7HREK1FNTOQxHhtXoRqeCtN7OKcvzfUdUA8O5o7ORQUCTIMRhypGNw+5xwQkNGvYOir2YqV7q8EVW/YWY3qWdUkTYCl1owtacyImkL46qILrc30+4amv0EDceEFX65e/Cc8M7bb7Lry3bFoWnB3l+HoBXzgkHiYxNPjD7Ab65/5MKNZftG/R/MF+xjkqlf1odmmXUu01kRNALmGi9rOkIEGAT7QIMXWRJ2MVeYDHZ+XB8xSjv2NCe8/qYONm6FPzTCu3tn753myG00sqUQWA/TK5mYfcRwz0rGgknOA3X73ssas3X0QnkOvP0SwqYH+g5KGESfoaJzr+ZkC1kIBZQitbSOsbR/QZb8WaJ+++tJ05Zf0zBNAHoPXx/1BH89GD/wvOzbMTsackrKW+zOkz8eyMTWMGvUVwpzl8UUAwrdCDS8fRFO/wqDWid0L1DUaGdS9AcaHXMugOKozkUEWnk+ftNVKfdDV5xsErl216MrZX6F4naCPnRlQ64yaRdQNsK7guFUQ6RiymvqY1ba9lrD41XABJoBr9vBfwlG97WelOXsfjIZN19APp8HpaKyqMVvjQqPqTxx27SYPtmwSH6qc1qhiGCuMEjMypsDNZUCCE9Plea5vG3HgTffDajt7oVQWdQq9bHM9UxFf/Z5RXNkXtQtRXvsj0lHSRrHBTDPkwwi6ii5P9DdjXGF+8jqGiP3tlCsTxjDol0bi2IzHkpc5apa43pce91SmQufYmL/9CAT00lxUrMoP6Grmj+8pPknybN4IfBTzeDrcZgeukkRXpy+Q/ddq+vczDPaH5bm/4sxMAwuADzjNw+3zV/kld+escGhlZeXU6EJGw6x5zQxwBHogsoCWmEyuSzfOdnayTuErcfTWIJ/v364iAPXafQhykm4XE1XgyUga/ezVLXtRLtQIkQEFwhvbkdq1/NSLok4+1l83SSmyVUTJUloJfEOakyWmY89+vdl3ga/K6lo/WQc+YmtPdjk8jnCpImss8xqU+8ocQ3UounkP/iVjk9PAa/AyjWZTFnIvlhhzI9CNZ772aCVXcOd5q+kqsQZJd1/BebgZhfsMK9vIZ+/JgBOIxOcgwaWW9kPjFKnGwpOHA9XRU99Rn1p6o8HdRRuEWUIBkQLzL6DxkCIUHGR+Z5midtpMsTZz2huUGD9ufXBTwP/lrX1r964ltAeN/nlOb8IvHfHLLmz+ZhcRQf1J8PeV/AfTYzd0sCENkaNdbV2wbXC+B1J4jl8Lo0N2+0t0WAlnT9BnAKaxX4zAn89lSANw7au/8k6ev4ssF1KETVX96h5jenkigyroh4UrHL8hR4JeB0H8X/aTc4oNBQKJ63AUOyNiwGwzE2JCYN0bwTetoLka1BOMae2HUBr5ZgjxV6hH6dRiqSJNBgUD3mKblqRi74NN2vdYEQo6S61UUdmlqGhZdbp98Vem3qUq3++hmwngNTK34gil88Wr9e9Ut0pWGrgjo2sDQjf2YQYA4uUFsGKFMDD3w2hHlOy6GliF8bu17kQ1Ftb20TmwxLy3pMqRyolv4vN3I/nHFjiPHV5mphgRSRRRo3XC8fGoiZzj/bUPPUbRjy1/5G1AhKx+v4X152K/j6ymi1bKOajnVoIGuRxuwV6m/OXPZUcGibZYOmFT1eyoJ/exNdGu348Su5PkS7Alkk9elWp47dyP1OqvxOCITfJzteu5clVIXvm9Ca2cIvcPa+Bfs2RYbcC3F6+H/sbmv1Wlov4hNmbTvpaSzmQWkJ9fXWQGqFcUQDZ3xJFzh7tCwmY62mkMMLo3hjKOUv/sIqSPzz0y1HmMl68J7rxLmfITICp6LIrzTp4eH66dfhVp2O1TjE87EMuTFg4LaZQIM/1sRBxhALGXMoHP/a1rMQdvm4XA6VZaIDpSvIT0Ome//KkNK5CvBjeSxwMbIJgimg3h5PoByGHd/XOJOKyNzvqgvyJQSo8kfyu8rcWIeljwQs35ao/lx/GSdoAebfMQJ1wTmkI9ggJuhmO5Oz5ouYEwoWFdnzg2kAimqCSEDlwh6OLP4XfJXn4GEXRJokFN5YoUMd3h2t54jAW5kGr8sp6ZUgUrT3+tOOlQ5xRACv/9aOwgCRYm1eg9N39tbXE1jur0YQmQnyfViqTYrSSik5Q0wADxJBrHPYLLqwsBUFlmeADK8KE+L60LsMxoC7HJkl8BFYnQV2CbCK8LgquhqHqvfn4hsdtxK+h5WRKbc9fLesRbs1bdsa7VYC+mqLGwaJUfZ0H5Cf3mio9nJDt44xWCAOJt6oL49durEsns2m36U1sNEx5Gs4bt6/WTE6CEvqKBm7ucaJUdUUOAvJ6VL5eLLlTjP6PSwQo24j/4p/1dRo55WXQY8Q89MZW+9sZTlXAXPCa/D0jVpXQ6CU4NfbuAROCdwq9gpJnKbCM44z/A3qZidKsU+rd5md9EJRf+ptDCxBwVqQKlyIRfVOBUMEAwowhPKMwk+ojlDX1oWMNyVGjrANXW7ZfvNjNrUdDwBA2hI8KPMa2YWMRLDPmd63LWZH4Dd9ECzt+TFfonHnXK6/qEsvto4zF/GBW2hxZQltbKAKP6ZuXCryFrzlV0Qa8vQAw8A2wJs0JubOgDC2QkQa25BPE05xe3U9TxuNNBfFcV+M8J1X21EtawreAMXxpQK9ky8PYHVuRlSGTcg2V8p1x3KNAiwppme41JqOYT5tl91ZWSyNNHdvyWDzFMCwPvDArpzpJ8jgXJ5nELGPPJy0FjgFtFmC7PtiaocMA2JX/4u+iArrv9GyJJCtqJXrV7OR2QLoGXC6o+DeouvPhWtKYjK15jLuH3rC35u7CmqWZ2oa61CTIE1xuwyTDm4OaGH6ymAFPAyIlS1wsxOS0W8hiM8GhfrlWqOLQexs9bui8gO8u8mh2/LrlEHm0w2FFfoJETJAW/4s5jDO2x89+y9Zpc/u/f1u4IjSc6611RCF9OPh+QHvuuOXXCPCu/EveaQ4z7uu+6dZ9IbaAvj98L61CeOOFO2EgG8wrYCk/7l+SV9c9bqSx3xX7m45TKSeaBwjda/9rSaKHL25mr4oeRodP+dYi8v0wY5AgcY+Zq5QGPJHZ4U7whjfqC/McfNXYYRmG8DWdfYF+F0wSPnfcczVRX09voHcy4b6Wzu+7MfI2g25+BywZh2xdoqXY7Ye3ygpoFjL53vYNE75yeHoRIFLoNJ4/6mClPfai/prlLZV636P+K+RBjij3uZp9ROQ4sFdYjHNSc7f9ad3C6gnyiqhBM+8C3uMoyUakVjeo/h+bYeneln2Mp0P5tjESS30Bzi+XxOiKP4o4P0tep9DvIbYtDz9H94DmuZ9ry5vd3OH9lmZmTJ0y9MQMc4fCRl5/fQbNyBt6j1pDL4eN8aSZ1Q9uZmAO85NB3CYqVoT+148UPDMnYD1wki4xsdYiWHyfWVO7zPcnr8nv+pN5rkatt6mrx/Srnp7hZPnHbrG6+g+m0IGbEzrHvq2f8GJTQaaH2TuPO8XV5kpTcp7lgDgsHdJzZfZXz6uoipV/HvDHBa9YWDNC9EFMCz+W+ApOZnKAzweBPajcRNi7fIIP6EEdmuDZgrgb1K1Rq/QNsW49bIgkTsBefV3SAE0NcFwHS4H9cTisyRNEtZn7fdu89Sgr/9j7fHDilW/rkRMMtgTkpUEnRkWb7QVHYJGweUHJ3gqzY3el1bzhE86+RzW6kcgsejrhRbA2T7Gt8JcEuJf7lumClZjD+9Z0Dm2BhchplAd2oPAxJMlgeT9eHyPRQtYx0kaw1hcOWkMlEf0hAYole2q+hBfjVYQJoPBoPzwilD5R1TuAY32rPyqt3e81ll4n4dJPiOv3NMda8fb5U7gwP4Sxs1AGr1s75SZ1WeAXk7/yAfE/ceXM7P8FdbQSuTR3DZ8Tv8NOScB2lgH07qabaEquFRn+tCxsI1HifHDTBp+Zr9g4lCgEPx/u3gsWc/jD+Tq+zYFqiNY/K65OZqN1w/6oMuN/gGXr/87dHUT/TOmmnu/Z/yn3PFxUE8LKMqH6ft+6el/AxgrbnTsH4uySDLPr3VfrUhXZcjtfJenWxJqDn7s/ujwbpKlrqaDUSaJlhQ1YQsa/o2hhENmXSudFTo2CjGSPCAzh6XB0p0bRt9llL5FicD+93ygeS62KkXJA9HyNdgHHZf/1Awc0I6WRl/NUgXdYSFzS2BTPhDRrzGtJY6/bR+788ZDc/o4fFEYIgvXRzAvO3Zdb3XCvYog9lgRSGRE+BzVZtOffyI0BrE4Q133idxdTRxnTab1zrJsA/ZduxYYIOfWd/rSKHRrpULfzIzW6MwK89d4+Z+EaneCzMDjSJYldOFPZ2Qr6Q5/AVGHl9yTYLUIgchW0VUbjuuSAWMRX0mrFj3oTOnHkoKHnGa//MYm2+Ykb4wsMDEVhvGQReV1zbgKwfyHitJ5wvhMnxmotS4Ity+rAJVoa4cu95d0x+Ca2oScN/yxTiF6z7E1JEaCbYy/u7ebY+wmzFhz8LqL7+5MvuwjY5wpkJq4uE4yT614rua057bsIG9QXCjAK97JWhh2e+Jqe863KxTY8fwTTJynPW2pX5gzzyV/3EDyyAvUUMxRyteUTnJDva/AeEbpCEdsCJwk81JfhoAqV4XXSghQ40WqS8uJX6pwiiJoG2++w74oaGvykzc1BGuqe7CbXnSVwEa+8i70w1T4lYDONFC0+7wnGD0smipnC5R3HwH/SewTlgauBGpEnh7J10P7eKZhnNQeWX8V46KhZpVo5LMDgo4Dghj/0pynsEHLXbU+aH/RJbdGnp226tJNwr94A3QXW3km26HcQ/vU3xV17GCt5YANyQCX9G/VlKOBrEYOmTbmw18D46xu9f1hrih6Aq3mfqAaHraXEWJ8TJDzJRAn9Pfhfi29H0ODL+Bl73SrpeMgK/rNQFtvHMm/6qvSP31UfxiXBnJyVE8Wilhk1q8PXn4t9wTjTw5RLx4Hw/X2eAOK0n7kQLZLrfgPU7FNRkXi6ilxkF/odbCHApA29lbK8sOdthCeOxEjXYFtyMtCUnbfjFZdpIGk4x5PiYENPEUp5Fbg0Kdv2Bjmeu7gmxIRv16l/cV+4K/PzX9NtUox3ec4Zi/5xxuu+qPpPx4afHQ2T5RapQO1H2j6miw7WIKPLpbfk1VnPcb5po7FkuTdTSrTWI4dL+cEzQd1DsWBOyJWtMx/4RCa0Yb2LZSEuR2tGTl8VTc9kzmRj9Zf42m6raDipxXGgYBWmLAhloPkfTGwjfxflgfYTPwgB6AmztI92uqETxNDKcnfgs+uiQXtgPltFWW7VMZucfL4BSLHf6XiUGVGyjK8M1N1yg+KNAYkMuFOU2mbpuggsnWM9oII9kYIv5iTMiMorcdlTVa2ReBsVo1SdJ1hAI3Y8nwx7UVdh4qHjfRLBumecboBIn6/VsJkGgwotAtd9u4Wvzsz4U3R5htPZ3+RKfcltq54TyiMhG3xlG94LW2V9KSqTpr2FRfUBbX9ISbejvApSzlhRGA2/Ar53cpra9VDjAWuNhy7LrBWu1jdnfE/kLXjm/kZDll5y3Xf9Z6QVaKv+X96OqzcIJxy+GO04eYtSEJ92/ckRGTiwKmw08KJSmpq5McyLLPv2iXKMbaedH9jkSIl8b3hX4E1ECbcN2dLmY+8ZLBdy3wnTeN5ZhEyaOENMM6sOtmlbrIUVPIk7/IsfhKm6k9juib3S7v5Z4ozTta4s/rfgvk4zyPUWo/g3jIJ1edpxxUkZdh6dR8kgncKMHc9eZxpkOxVwoM1Slkeoh38m76t1hPFMuRkcl6odAPECKdLT4Rm5qEYhKuXeJlhHmKaCVgRcTHs8AD6PAVjeGHBnXSpxbJ5Bgaeet/QpvkOVBkxjzetdAjmF8FmrGapOgid9o/j5c7Yjap8BF+fZ0bAQMRbfQ2dBxa6s92tJQ9XHzsxq/20NrFeneX6bWlaD+G5r/JwF4MU4Yt61eeBJRu2kTz/kfS9e17CgSQ38JTH4k5wwmvJGjycl8/dC+U7WztXX3jjHdaumcI7UkllEu+IEvfuZJDV7ir8/0E8YHlR3nJIL6OyJ+nBGGxc0tAQqH2eY6zBDsYYvpvtF6DAczkF5/V25d4FwoYFsa609NPfFHTtZSEeSsiXuVqbn4aLrDn5ZKmeA96vTsMzCuMDphW/ZNUbXRTX7xI21ta+TE76mPN1jLBn655oAev3n02U8B96PWjrKloST3jX/cTSQ3sxj7FDbD/QrkLsq5UaBd1VOq4PVmCrFodgfefKz8Pkak3CeFQ1J4co14TS+fdAR/8wul4Lvn7MxfHVZtYKafKqsrrhYvKtZrx0vFK4Zy+xvjaPMrhoUW8UZuXqXb/ru43z55Uf0nx+43Crv4Z1jEJCyBE2/jqhJ0okXZKA5N/fBlN5NczeZpRSDMl0Hfb6P4QsxXqvSPfiv8A9qON4VxSVarrmG/XMuFwSAR6S0Evk3dhDIaNRQR/dZZcM6yUJYx3fYbgc7nVhf+SSGx6QUf8D0R0fZBDll44gZ0Z7kDqV3UKDe0PCd4B9VrCAx7HjsQa9Fh2cLpL3FrK+tkkwlL7vPXzKMrPmNHu3I3joQ0HPPD/3H5Bm4clpleiJd07wo5SAVHG7Wx8VpcR1V6DZUZJMFm7YSzPQX+w10QhONh+9NjGnEbwx2xARJMYvJGRSsgRU36MO1LzQ9Dxlcqb07JIF91aGIaV34z7HSsUF+NAg4g5GTuF64py3rkvyvuYn+02UjjWQleW3L9rzBkgFV5ZiS1qVS1btImfLZE2+Pr9LV3v4JrOaPB8x1ZAHUPwCwKXBoE5RmMs8lw78pq3F+fOxin40DgqTRhoBGGEeMY6tYL6286Z+xb4nCE19WgiZCr7QK9eQXZOMdDL/m14Cyo92fcK3B0fzJWR2kjEY6lSn0YU5Jp3QLrHUMHybhUshSVL/1Ibj5e1f0ziA4cuA5mRIyKcgNKN2KWmmMt+HlbDbJBUzRC4HtDZmZHEmQyqldXojIU35QQpMpz1LYJE3SGto1bmU3m0xIa2yoKfvKOjMUa53tR8R31vgaeS7CZnv9Nfrqwk6CvzfM66oGD56sMraUKp0jzY9yz40jYr7MLfB469DN40+wMVgwJ9ipdG3dIq46WLUs8Z4inWYehjQM3MuBV20P7c0c09iCtKgvS0Zm293opYt7nTpQv77mAgLc80NbQSjoaEMEZ3mPsU6bF1DX95THrzIQtDpwuWgN/6+5Ps8Ox4R300D/kpp7VbLLXr9H6ksB3+lmQL9EyxhYI0zpvZzTWT/nUgB0Xgu3hhs/+R3cEd5tMZYd2m2/nuOTtppR0CSGCXqwoI7YpHAhk4qcp6C2vlLyGjVB0zEaVbBqkmoqhTFD4UxrIdGwc/vJB/jOp7mBuvi6wLiaWKPE+JBq3PLP59XiP+5SjUiZtzQc0kZXQAehJR9Nk7ZxAGvBRSzxMoQvLLBVM0zR6L2PfPnGPimcbpl4grdVTFEMA6G+MzcvgKxwDIvjriGc3FL+HOnPGN6cheU3iTeujNPDWYH2n2+Iw6N1A1Hjar+urcdZ7vnwn/SIS38FM8VGQzo0q5mN4ncrOOSX7GB54qHe/kQhx5rT76JV+RjpefFd8lPssjBtqCV9ThyIUZnnrZ+yXrVkgxnYEGJNGmFSKgwI8u0QDG4RwUD1/qShvO2v8TlUt1eF40klaqCw7AHkeR5kp6i43vWu6e/pdyX+lqex2m3PUyZvfPGiggfxg5vvuiu1SylEoJVXLu9dWfabu7LpMtLer+EUsAxktxHbqPnzIudKFWxXRneqspLITDMR6eIucpFU4wIFeh5wPbpPJiFzO94t7di0qnxUz5hTfZ4ikLlgO9ieIRapvVBv2cYaP2k8NFMAhO4fmRXJejuGuQgyz4RjXA17WL8qvX1FFQRBTuYz7w0KTKuhqMpkGEHW5bxwFyCJlYr/UYg+viwWqLONlYCxfw5gK4UcuJL8xh15GBK6P5qXRLRCIlERpi9luNMLH+1hmu+8bGn2d8sf0MnBnXKDkDDzkA0F0XQIp02eSSoZZWgW/M/VbFCbD2lQp4/WToclDk6uqp60naCiE/FDOINn91m5N5reOGRMzkIVyeh4+wwZ9kyeGU0mAbSJvs033PJTev6qmfAsRVYmkjYfVElry6H1TFN/aax9Edy+Z8P1KxYo5q4bnyD0ie0WuZN27Z+66OSP4UzDpFPXppgHFc7s6ocS978/hffXZ8XA1p5bExDIPgqFRQoy7aRsnz0fy7f1r95+fv5h4f6s5BRNUGL3RhtpAbuTFjjHv/GDZMjq4rrud1CmsQoo6sbMof/3qSOAQiyC+lgVFeu9woENhAapZ8Qo7iGtLebo5kYd/IKo+fD5DM6XyqGCn0CCODlSvScwcE+psZsMbh/0KvyqW3ZGXdzf8yjDvswW99xnaGWuibtIxgdG+Scb5bsSNp1PTkMSxf4JHbB5bLn1apUOjXb8UD5fun278tYhRvBCfQaVvZjqkzRc/b1vimID+6jFTUpLvgiz0oCsvMPnyk8kWHXRfSWKDa3+8i4mtcDAbCf6ngZnEC+YQHAi3NDL0Tl+MsDYskgKzHehV9av9YB8vDuouCEvCk6VAk0HluO4j3z40v2R2f4sT7psbEjCVrquSb7aCGFrIWqzpm72zB0n0Et+zdIt7lWvan2KiFoqM450DG205ivjS5dIY2kBDm11dVzDzhdGS2R7kKk1fLc6cGUeY1+X0ZW8j4J5C+Rssga/tycYP2hFj5ZyXjyY+uFgMudd+/Nbat2NPMN7qGJyt6HwIfnG2WQ71Ts7kRX6dmXQnarXbAvhl4eD7RYziVkVSSKpBMgB8D/oOvk04mQHxEd9JaahaMzdFzA2zYtca75Ey1MmTWVQkxTwow7m8PGYEbtD2RUwB8so2+8RipD37SuEYVjMIbtj+7P0Nd1JJenepSc2RrJgaOTNvAH0wHkGY0HdqRNzI8x86vn3eTj7IrvZ27nea2FNii470Knzylb9NXDfiK2H08Eq7+AHRqb0H71j/dRxVGBfbL8v4neKu5l8nK8DszNbjSNO9wNvjhLbx+6vTQ/oWDN23f43lRWMinO+ngbE1i6zjZR8P7cteO96oKiKjYEzJGWjM6/F4Y5QVH2xiGv0rGcvkKX4NB/JE1IWLqbfHYOpPJ7/w3X+r6WBZNtWQvcyDxFVf54MqvkcObdkZhsjfzEdwuY86gH4sxDtR126tsmDB5gboXRxqa9WvTrNzu2aCJ1B0fLaFCbA4g+8vm7OjShS+HqyMuhlYN30yjruGOD4UKBhy+yG/oymRrgE8ByWcNQPMScZLUtNbTDM59M3XRZ4JupXgX6+hK5w82UHsdmPE+qC97O+SZD5L96HROsOE7IJC0ZfJ71vwl99q398Lpu61HdGc4j8D+82S/fEPaplWOp73urOdBZ/15GJ4Pf5eilxn1TQMSjyBW/LXg0aZz67t63rKBXwJ+t313K6rxWAyvHjFKAfxndh2TGAuC/NSHC0/Ve7Pl8WoMzZjJ9ejsa+vdMVK/jqvF9B4RlB/UryXbemjL/vqpvc0ol99g2TWentDuC3xmEMYNmAkjVbJy574syKSalWsWIw/mxyJX+X+xm7Oj95qaPgnaPehM/2xp5r9bIHJ15VeCqeogpFYjL4c0ypsI+E++CyPSMLueLi4S3pnHH6XLM1PB/S9VQG7s+aRrookgf0ydaG5zui13RZvmwVngxvk6PuV/R2dtgFS2zd1UdSOXzT9oOznj6xegY7PC9sLYYBejTB/htfjwsYr/GDC4AMhgU3exCFh+hOrmxKhQXLKNlqReeVVnnk+H3/bh7UFsdRq8fWKpMre/99qt6viivLAx2rQQnTrpZ6lap0/w8TiZIJUSuIBsYBeY339uzghA8eaqvTHp3pGzAo5IoqUaEt3K8Q2/OKmvXpLCUG/pqghaceS/wrTUNCN5j0tjFCZFUs3Nj/7jvEHJJYpV94gqoeMbvV2qsd1Lk0vO0QZ6zmDrivdvO/DzFE9JoerdL/jNdRHM2sf1VWpEGyubBe8NPGl+JU4eF1FFjj+Xpsjxm08OUX6ZduG4CISGfkxHbhpZXjVjy3pldSTtN7CXuw98HAdlIyhNfnU0ze2qJEs6uUHwNInnCwrVl+ZlMWQR5UHd6Illb7XNy06Kw5Tm1SEdE1nG4ptGoEQEJjNzZhyQlcs90qfrTm57uVFO608oa4/x/o3JoTA80oqbn8oPpaLzDJ/AK6pJLAfKI0i4kRZpS9C7R9cWxUa4muwgnyYhLfvdrq+r7096DdJjy/9FpkmcfgttWAUhVDcQYNPb8c0XbnCanHvGIbos9bTF/2+Z0lNYyS+dOb2HfNzf1bOyho+tl0g6KuCwUP5Ce58Yr5MXWkIaXNOGy30suUtlWXv/Ro/TroZb2sTPuPcvPzrdU6Oheh8DNVGesv6xNZ31Cp8JgTyG/MLuF1gFZ2/SXSv4HbyxaChCWmydhntzMjGujDhN8KVsbsrFNFKUe++/fXuuk8iGh9AF+a8iPzzoYOQzwzhi4AdO8AcMdTZSek8wvJDztXvS6HbsxjAscostYWMUKgZElu9P2Mfz/pGx8qAe/mtUMo3PinMe0MKgOLTVNTMtROKOU3CE5NBRZzH6t+rhU4MOON5iN2qnzfcUeOpCzgOJrBJJF5H0eBSInnsxux/6MLuHvsg3KIyVTWk/fBVRajr3knDPjCv8qPyDdoMCsUkRco3rz4nBHrefQw9tL/KnIwbMAJ0e0V/37pIPt4QxxzXw477SVbY6Bf/O53BhbGHJhrfrI7yITPG1CSYJja3j1Kf50e+cHtJVMkkvnVtfTb4+c9znRcj9pj1I26K+3Ev1Ur0mSIka9+4Dxel/MoD/2b3GJEXKms5+w35rOO+cLGCxombNgF90Ijxyi0m2skLTz8zi2b+4o8bMxyvTcmanfb9ZADwo7j1hzEJPED52eFfsfbBHa//WyVEYrS4HkKnTuOSXLWVYmzQAJlJ4fQ7XTcJ25INc7FMLnf7AhrPd5GvBx+dtouZhrxsD46QuDQrZtW8dwPEAIJ5FfZwlKqEGJWYv/bRzMphVr1CeHXMu5l/Da9wnHWHM4peKZEHcM6GDKkhjnZQJJheLTiPywZsr9VVWEA86uu5Z1APv1oyolmomkSMkwZX1cTvWzOdSqfHD0tD4PQw+S/Mq/JtslkllikncqoVomiZp6ukES3OBy8iayK10dhFbfARwsdJeFaxpGmedf34QK4KQOMNbRMR2UZKZm3Sz+6vD1X7Za9+LbuUqeBYRdbOJ4/Ymq+Ji8hYmWyWqDl5sjVdIQkiXvfDt8J1wfE5fZk0xwghA9P21OeYWQGm5rZPmLIGeApzqAYcrK/IidIVRFsBej0hWqcN7D0ZDh4FisyWXwbSurfcyqqvWEO/drrAdPXrvc7Ydyh91GV1eLio9o6C4w23mxh1VPJpkaT/NHOJyH4i1/ppF6y8YxQUY0JlQ9vnpAGtuJJ0DKTlLyA0ivvEjfE39obayaDDJLy8tJhhidH50rdEUn8mpJAVe8ZngnxF8wkbfstBUyFSHBDPCoTmqtBcdQXdJvE7hBLnxh0X2g0MxhkK5MyCpNdD61x9g6MIOUw0QKTl1uraeJBGDSkPxfIXifTOfSvTFqa3rTDnmMWalISVQDYkBpLvNU/3eea/59ClzabZrX5GHDi5OmtVu0nhpy2LnKV4oVC1L/iybd4YFE789ea+2+7wWgfuvufZgnKDGfrL5IN/esW1MBdsT2leFPbrC/WqDDecsCr7Pk6MsoO4MbpR0SdjfNyC26cBj2U9zMrLl5HFOA/bOreeP9IwlstN0XGDrG8mQUWzuT78s1FwrkfEOy1iN8itigPFcIRZldPfCNgAZuF29YxxV1hp/0KwUb0las/FMQCH+JDOhqXpiAmRdEE+EzlN0PS/hZ4QrRfOuatGTRzIKVvKhaL6Kgefu4Ih/vxOg87yA5DpLyQPwVs7t43SoXKurzZjaZbGQ/RBlH/q2zqVZW1+v/406hlGN44CHpJQGGXDKJFCSCfrxpwMHypU5SLSaRg/JRBhP+VvtPGIvso6K7d6fYIBXQkvV8tD4eEL64czIFxIMmBSEcVcovLJJgzk7n7DDqphZVPCJl8ZrKQfKG16JHhgMnOyt1Ij3Hi3pbXSRiReI6guSaoplfJND3yyb3VvUr99zR8/qunYhfJ+GTVvxwWV9ROlBEnngykNH/Xum/a2CSAU5QG0kmQpi1PM1OcQ93dN9r9uHAkJf+p3N1HC7QoiLGuJDlBT9uaNtrCgifemZQQVeV4LNQ75Kgs129Rp+uqL5oZZR+pBS30ecoXkD1rI2Yhq1hZGbuhFCM3JaFzCYZaGktZDPyof7OOsso1MW8d+sZJ1jPCXjtviAFXo7kqfsmPrVpNDz+4T1OHTLOiCkGpmyUD3ItmMvWLkqB4WpVxkLw3kj90Y4KYIzLjdqFtHSb5ndZwdtrKDhY3b23vDMR8FsRP0gqD9BjsaTvW7XHmxloVAxYHcqFi2FO2OFi6CI2M3imDLOfKWbXiHyzuc3pcid9lbeuKA+kY3u97nAkBYnAa/b+ZXfQsImIjDdIxtpSpXhBgsfGHej1QzDj4UGgRhrXi6p0gLIXfSdhXt8V1hix0RQulS2B6Qe4WaaM9twkU4ep+1K7LX9ztpJsBdQnEV7b5fhmYpkY6W7Qeh4cyDReVbmy+GV/KKY6vGaQJXplPECOvUzmkOr+fSfnECXfncan6G6iuOL6irh8DnAy+yZH+wMU8+fpDUXU17ieDY67rKg7YY9fKJWgkM24lN2jJ0IYqE1Ih9zk1rKDxIfz1f6omZlir6kMyMoJpS3P5DluKVmqdxTt3QCOZ+pyAOMyrdnHfbX6gCHCVHHijNOvch2HdGXXt/5Ue5W1thhFqLJsQkBUbD3lzKRTUKKmSbUNQHrU3hSFyS2raIZOAWz9uPZ2lKzWEjoJaHlfjnzJkQO3HEnqUTiyrJnRwTfDUceb9V1cdARAy3tsuS7ILyNcNK4sKbAHAWRvsdjrizUdE+K9HeHCJKk6hT4rmmhMc9CkoAxndWrFKmDfwZaoQIIo6zsxfInytVS3Yjz9DfltiwoN7bWspjYsnwiYWgEP1Sg9Zb6mCI5OnIMZnCsLJbwhHkmJa7LcdDv24UgbXMU4FBURM3YxPNce1+3RUuEl6Hvm384LuBxQrcuemHm3zP9mt3zcImnut5nqPbcih6ZkjYYrwneWGsyZf6YMt14ZALHOBXdD5H83o7dmXdm6Hkt8ug0yrByC5loXCv0ilxRBoTWT53edLsrwTGfKajzLAYwjdtWq8xXMUVKvdLButetiQf2Aly7+1oJrrzmBYIL0UDVrrNj6Ri7EKNlbB+W9kSPOgeKLD3pwPINdOIU++ymDfIXD+zEnoLVku8uryoRo5eotyzrDllEplLscw4pPvD/JlZmUaXQ33IW11Afg2Cmf+zLZPjOFdii4PdjefyI6k6A056Ut6ShlEsaN8n2638DehqFBZn6XGgJb14qRiWoDr3kPm2uYdfJag9OHe40S6bZWRb3jcgyLyl7SFb9ndQ3UF1vPrUmAbyay2S4jIiTocTr09qQ/eutjvq5JJvjXg+tEYs6Rbry4FYRZ5IV8wgv+Xs/fZ9DGMbKF4n2D+JpogmfLjvXGzCInvVqRktt/7qZ+eNvmQ1hxoAZFaRdXrTg6QZCrw5jJbZpT5zeB8gAgTkZq3ZEN8Ci78o0vpwrN+3hsI2cea/HupAE1FPq7zb2cYS/RA6nsQkoZAmQ8SBENM7BGWiQNPvFCbbMcQkyChM8zwRLQLnKLK8+0vo/mIC3s8YUTiZAE9IPE7hInozpzKCFk+sz9KMig5/l+AGxMOqiMa+hIIjPmubsvDRnsW2yt/AncrUZ6LQoE80lGGVJ3/6qxBQ5Pl92dVHYw0muuVJ7lDgeK0hE63ykKPJEdr/UwUjIN8MvgLGFaIktIg9H3luSmpGacmi9BsAqvu9se0jZs5oVrze4kUIyCHWFOaTmT1na5xrs8ZDJc5XIJnM8+8RfgfCbgUBkjfEfqlCHY1y9/2qaEbUjlZsB5CvDf2QoB+MADMf2ue9zCgGKL+4BsvKSb0lizBTzy/cC6NRfH/C4+33RVRb+YPiZRh6n+bJrPnHZVzFP3DXURDqB9PmcXyAlD+G4sjb9DXoY3QZtws+XuAn/84xpID9uk04cZqecHGeN2Y8XAxVg2ES+cPubYiq1+VXPxvF3LaLcSz5d98iEOsCbgV9TjVR0hBeW6rukYcPLy04wk5Ceq9XtW7H21EhZYJoSHRqZY6ktA9Q3dtXJO/4T7ZTLtiU3+Xix+6zi1QXBNWVfIEnuE7Jnj5Z8D+LkuOKIJVoNksxvl5+E/8+2X2hvM5I1mtoSZ18ADMIe/5e82qEgQLO31AJsLj1VG4aX9oC345eRX5o+KegWVYDggopnV/GNRg+Suxfyw9Xfd4f5IeyCoAm8olnZ0ZeSdjcbFm6ym/WvXncENtQwfrr5BEJApN+upI1s+P9sB5zXY9lupeiz1183NFe1bWD/QJRTGZMBEnlFL6ozRYEh6sRHHNmBNZHZ3jsrb4/0FSjJLXa2MAhoe07Wi+IqNwxzskzV1ilE1JbVtXNzr49h+kacOFLwPzm7wF5qOPnXmcXjo015oSuk+xGTUy0mFLSqBqgIm7f1/ypN4hrvqnGtmrYv/PKjAlv0AuLfD3Wo8G7K9BqBNpdalZmUVwSAjSqJU5B69DKHX2umPi4WT2chFr82aGZmCI6yDVRyvvtY5Cv7tfw2xnrnoyQbN2Z12crwI9SHOhdHnbNXn5vGnRUVmqNpaFzyV96m8mnNlxLK6RlZtVLipyl5KZR9r5DDWVUKMVIikHbKiuLSnPNDyny5QJ0cFuSGfvxd3pZ3MgLmx4cNF8LbYTX/J8R4M2vtP8X1mVWb5VoLgCLgIvzN9g9DLP81BBEYsNBsEceJl+trszGw4KnEy//PmQua70L1+vNkWlUzgL3ALakrfUkh5YZtiAPjhgTXmSGba414H4juE9yWS0fXcro4SNgVZt+XKwHv7I0mvEAQphaDcC+tK4QdH5QxH8jbO+iq4uTy9cXzMT5Gu5MBvepYKCXlZDJn3aUmvR3zYu+6hDSp6bM1t+4HSV5HihhCLjIlAKnqMuiHdaJUhY4Tg7XLNWW/yo1GPuy73Xl7P1xja/jeK/jj/bdPdFv68x2LZyxJk1bqV5GfNUHIQQdL/cLAoZVSRjneFivT8XXqcfj/POn5Q6Bu2/QG0+qJEy57e3NRZ6cbKwU95EzLOoatnF3VSPbnpdTd6c/oDnL0J/BBBnNmMRwUitSnH0llRHNyOdHshqL+HLE6+HbwdsEcGuEBRwLYKVsIYXCaYMxpVEFT0agxONtkwN18a0KgjyZVI/bWY1u9d9ShSin5DyeHlXh42BgQfE6myYFOPCo7p6/RHHaZWFyJSoxUR7sL2HRnfxhiOXmi951OOZ2BPVP6XHfMPMx6lG9HqbRJAiiqZrTvDQX0WvGNzW+zuPnlE/3wzbFXIr1qAVGBOx8IPPr9mLb0YVd9yPGel7NhFTOAL47/yxsCy966pULTkkXtMp7jukrERLJxJd9nPTmr7mBh0pbe4hT/0Jh7s3X/ec4wUklbvpG30rZ1Wxmwsfew2WbFlDpvNrVHbrb+aLXWxAj707tB+P5hXUeaVwjVIR72E1cd6oBvVGV524HD0KW0f5oOPr5kuVfUlnoNCm2qWeXSyI1k66914X94NPwHgq8PmJ5HKvaNGjJaBo2Qb0B+drZ1WOrbD+/pi/6FeDPgbUo0sXYUFkFlLZ1pnM2MMdQGG9QSINUp7cmoVN8BeY9oHhdlvMW63EOj2dAf1Hg5K2j46YMMfQ2rhhWO/Lqb9AXc7v7+QAR+FIZ2fusVHie/KZ5L8BXf01kG8H1P9fFoFB8mK937di1ZjzYyhzv/yJ0N1LhBwQ6sCeZ5isOTqGrhX78w4zcBt0v+IEZSghJlRu3E2Z2v5kGwwWOeLuLY2qks/S1dTnOiIDv4mOSRWtZhU9gOVqwM4YZmmcDKjuF17B3poo+XKlRNirYMOsLs/EvXTqXCXZYekYyYnhuiwSdj1m9xddY17I6lXeE9TXJW6YQPPQHYh8riqPXq1OpbcjC9oHf5mXuYhmvJKF5ajjsVIoAMlL4Hx8s4YuS02ndzS95RowxazCk4ZeCaX8Vl7FTQN0ou7pSgwOOS3hCYs1WRuOCQWNkMqwdVR8dwshn209zk+tFCc4b9tlKYEAm2hzG4wMfr+KkT33Tuo9PDQyj8ApQS/WR5kJiLnPrbK349QGqFbH9V5/aRCAs1KPbL3r7BkFQ5n3fHmu4UaN8MPTUDApHwQqB/aJevxjlLJmtGpzznFnvfrCXRGGkRkHTaUUxMdxfLo2WIuofivbqFFDaAEEEfKE+eDBbL/pDDQ/gMEnpV5Z1hW/5Rhgb+JUJZ0CBm1rMlksZw9uKtMFbvIr6SSCTSm7CYLGqHW004pzF/VGzZ/GnhGLR13JI7yjZUbl2ks+xNF8rnqmrLUFNz/aJ77SmTGOFwIxbAUp2/2r0O+j30sVKma8QA6X9NAZ1KL6gDnbpU7mLivRJeg8ATzmPjnsfXPh8G55Gu7Ymxu2HyRsti5bNT51fVAfZoEq5UyHlY3GuIITAqVz4mlVQpa09gKRwn08NDE2ucjPzw1IUytySq6WUXizdBtw5g+xfY7EU367XOr1Sb/52jzM6cwxeiCbNaj8ibcBoOWbARA66/s4iP3xNw+Kaq168t9zQ9kaWSFpuSLwmNONkO+dpTaq0C5COpIVRa8Rqe47UIf2S3mT2Zw6/OZsomT9e26EwtqjPTyImWbpcvw7UQEuIEmnCK6I08NO9HdDDduYtRF6JdDs0YYg5f+yS1k7J8/77ZvT0oNAsfO9xV2srYl2yKK6dT2ifuPLaEvFNF/VNu2RpT2lCpJYonN+X0W10ya7rD+IvTwhvrx24dILIyfLCKZL8Df/xG2cU4/ANdXyo3HRdNl+5zTDuYetL/tphp5aCyKYmkP3JuAcc5Z8H+LY+akdwe1ZaZ5bWwApiXLc0IeFZLIpEAa0tMMVzRZRgeihXKt+qN6Xf34BlXMtyWvO7+SHfyeL8WhLpxYMyPjyCyyyXR4nx2YiHX+Vjd20tJCkQUn5p/8EI8JoS2S04M/3xK/dSj6MV34RthjBKPlZezK/qmE0bK4LHzTl9DB+CpER1p//OKUcRSzVvuAD9ETBMEX26EIc28+yTZlkVXXKRah0yEnSuUmUtjc2vqE9Zcj5uX/lcv8GpB3AXZYxA5tfhC7S7jGcJXgtBaq80CHAuJPXaRkFxAcMDoiFUTJpzERiuIxjxGoV/VkAYKotoTk6XF59QjbpV14gQ1tHY5+YXv4Meo4BpXlDxOj8qlUHBs+t4fzd8snp0VI+Zcd5hfyJhmJIWgRzypSrX/s54vw5u9aH0+sMiz7Gt4WMaoFpRqis+I0y79XeN5IHPuiQsNooWIZpIMt4Eb0oyBdS7x4MKG+Eeety+CpcZlZ1wOXqu+zpUkQsFZnk/QZ9cBLdhxdi8Rfm7sEtIt0BH1piRKpM+TBJFY2gV8nR6HuABxJy/nj1DACyecZT4q2PxGG5kPsbgrqYl4XOfyiSJTqRcC5rmvumG9kGeaGbU9x7eCuXdwn2ZHoNjLJArSLC+R6BkkjX+egp9HBtAglff5RQiGQ+RHrXnsxlw2VTQwo6uaeE3KxkPCO7xtA83cyEEBwwre7xb8XeGx5TMQaqzdHY2XKgd40jc6Ayz89+vzjk8oygF1LXgAkyYZAq8N7dYRE/4A2mYwmelTa0k0PJXtzTR5CevM8QDgLdMmOknNK/4t86BOmw+dJZoyMKcBXi0O0B8LsoKACZEDnPfEQlBoswmqwypbVaDWy5C2oQmEOJHBJwXlxVGDcIC470eJ0NdqVQkVPhdRohZbkEFMccyDco9YLhma9KL1gyC7wlkLbYgxoQNdGTJezvw237ekkh9cXr8uHphzWITCTAoP+Jbh43zF1aTd9oNWjGWw3QLc5LupGYqrbXED88PsDfwqe2eoPxbKlh+5bWvuJwbzlmR5TT5LzFISKHn71e3gzeAw+sVVSB9BY4jB73Ch3dZi2d7Mg+ft5guXRjDBKLVNXeGb4XUdJ9maHcCvtYgZoKokcD5vr+TAGEUcoIfs0Kf+HdIdzrXfgXwE87o1wZfiPXV3Ad3guIx4WAKxG9zJB2vbz7MvlngovPGSNIUfvpdigy3FLaWCWMsmk1iVNUts9/DgYzJ692ZkxrGI3aK+hn3iteD2Lp8NNLaOD+KTf5BqiM+jgUkBpaYrMP9PMhzaby8MsC4XNP/XyTNfCQOSVZVvJVhdubLdvF8KIoSYLNJLQZUd6d3H0xdtKXMKz3NqhTdSEHmEnrbqtPhh4wK9q+FYZcMWZoAS8bm3EL4ZrbG8IxY2RaYOrVP0V5saQOphxV/MfakkC2Ds5OtJ7TkI3pMpdsUlOI9KKazTurVUE5BTfX2pREZe0M48SaaCvA20JuX8kYfS9xD1UXHXrPFp7zyxWF0920eJLb/BomBA+SjgXpO94nR3+PbBMQRovMXhXVT7ljzcSXuku7fvqOqrCGKMfqccG/Lte4WzILt2NRHJkmYbaBNJA7N4SmYHwAQxq56AubrKAAQMY9SMXS6JFEuQYeC1Z2OA8Lt7uH3UhQhUJKQ9SVj1jQMNnUm8RBwSm2+/nYA/GpUsrgsjDkUxV5vcVdLfEVDl1r57oTeI84mpEIZhzEg5lHXn7UyyICMHFfjswdV9mbgLWwYDzsFyKPFbp77+/jNJtjZMu8Eu824ldo8eRHsBlAi7naBqmePeZ4oZdyGkHyA3uNCMLANiaB+NxYWuAD2uCzuwSkHAm0SOwReEZIjrhB+Lo7Auaw9Qwi3c4PaMyrvSgh1uIWhPcKFT5gF00KFAp3RfisbjMew/svJjvTzecaIO1PRHfoA8dbvfulX+TWu7blGJswT6i9vkL8QzDljxL3en8lVRYVNjCTqFZo+tny5cxraQhO0ymKKYo7APZ4SbllPH8QaaEkwF/BnKkySMRXl9m5Tfssh/IQeOrMO7VHaQdaTBxrNJhSJajkB2qyyBBMqDNP6e9m2ArGPcE3FQ9UDF5MEGOg97PJ0rchLjT37vTP9+ZmIu2Y/iAiajgsv7vOn3wfQJ9cY4Y6CBzsY6BIMu4hOIINOcFsgibj7RK4k9kBVtuvPmWSyem0duSs+rrEVwfpCszIuKnwHBEJpm24kBgk3mBox911AAJ4z82onpnjL16LLbArYfP85RdZsDmKIWxhFefM41wfNJrasGNGo48eL/F/NJlrwD5yLoimlNCKi5XFwSEqq+xD78oNbPuQtJ4x3Yzlr/gWo4/2r7g4RSQ/LA/ywUFmO8wN7RpdfZpyLHsJc+0vrc8TvyjN8W/a46INHlJNxQNdvKMppM39YiJmFcA6uM4CMvXd6C1QGMfSv/cW9UFL0YnxhnuWvDInOwG9FvM/z9OCx38C3j6jMRF2bM7X3vdS3PnFYOB5e/vsK6rGRrb4IMZVd9BEExSkJ45PQAl9byLFTe0TCrlnI3jYL9hYlzwP9ArddjgoJf1BI0SFcjWSY/w2dheOys5VQEjoU0V9en4ftRU0NtPM4zdROls+C4XNxecWLK156qtEID0L5ejsTXG2MHH39D9J9OF+JWWVNdnI4n69G/Np0GuyEY09M3I/dJMBouf0MxSwpPgbobSpyzXfFNFSLY16q3dozQOmTPkMEtuaKPLSIIaegIOazZYdZwqhU1w84w8EqVrrGRDw9SsR1/Ealkfjnrlga7GIa4J0pcHwEuJH5tlDdtxXTIC1v9flFWPLve98/gjRY5lC/eJ80wVxmwRby0LX8rxzIGzC44Vv2YSq0w4IAF8Xa5QuGGVDr/Rtpb8pBiCAv0B2DJqEaukT4pQFnNSDvwowOAdfx1/AxL7LlFxH3v+ut4kVUlO+ljugWptwQfcl5KURotD3Yr+UMov7fquwG/2pjgiLZ/+1IllcOu3EW6QD8MHaVF7nKFuPavLVmveKbW6hayEnDXe19vjvLCAcJl8fjsT9u2/ewC/H35L8L25tVETreoxmBGRLjGFqKF0ONmWBEOIyR8Zu6Rf99DQwBuml9g0XNx5B6IBO9N7uMZg1jSJEZfUqjSh/q2NLA0zoxmWdIPKlOmm0qGXgEQp8ipu8PY86syW2xZ8muyJu3hz/fMdzdpiXJcJtcsty4jnEkr28loftkpA6N8na7PCyqpJ2A2ngZCTK2kl0ilorO77/wJDjmcFhS/hZUfdACQfvUCRzKjMeu9pb38mDnJcZYkPYyOPfvfYB1RXNyeYyE8HcSil8dVbhX/zny+/EsPm2aGIBR14pmZrAglTtiTHHhyFbSQ1WrD+r3y3OQhmpTs1RwGkMcT0nyiLRyflf7WjMlt2zFUDIZrAPFmw8H27oh7q/rxX5xmOp/I2U2V2fvMj+UbYYxQTyD8UYpSbonFsoKp40zIGadL7HNXG0jU35ch22s9rkthqUhGCDFUWuAQKWVc7/+Tm9EKvDQOpD3ZnCHNcXXWF+Ssya4o4Awot/wA5ZZ46s6JaLbqQOBBAvTZScZx6z/9Wg7ctI6C/tmi7f7NT4eFse2/aKOIqeL3aRUGlFOyN2z22YLBVpkRdakUJ923ZTYigHJTywDPi7LXihklP7nppPTVXKcRqNPF2Ozgy3GoSpP+E1dUNezHxRRN0kjHr+ipXCg7HIq0jltxeXYBR/owtNPquFPo50YY7B1Z9RN9lN6/qiRlzssMC7MRZY+HotqlYwt3Ej+grOpyzb3Ipd6VfZ2bQD4E7rsM3Bt8WIQoLvxXTuxbugor8Cf8SYGy3Fw4GlZAuYst3DBG8RRpcUoNtnP/2uf7FBqjo6QUoPLHIaEpAaHsyrK8thel7TjUkiIHL3ht1CtI1upakASnMmVgiz1vMLO7q8X9qxujBGVB0WXd+WgZHN5YqnHTTbovkQgZL8soDiCQRMwN1bVt0W2Af1nFtnkEBij3/r1qVf9ZbghbDyxf4toMjIz8JanwtWnbyfXHhoyqPMCzMfyAom5MVS7TvOFJ7ZPQMF0SRCDSKjPvX56H/Vj4Tj3MktmLgz7YfqEauxfgi7y5yvWOpp/we0zCRC00ra+ohMzK+GOC8AGHHYTaPy7uPqbACrkYJguc41rXvF0JS+462uIVREcFJG/NoNAXsV/guydfb6iorcv5Dn04ji2w72kbbQV57gHZ7Umkllw7MC5ak1bDE43b7N4MEfNN/ayJMWLWx48vEXfPEUlR9Iv9ctqZ2WeMCLlUOAdPUAmZDmRuxRnF8R/R3vxhZBw873S9VMWrW5V8DifWDsWk/cy9iqRjHOIWaehgoDE8HiZ4FwIraPLJtdiElF+EbpROnrdKFEY3MGpt9p5bjAGBmIzPl2xDg5otu1rE7trYGHa81zakPR64FKXC+/zwrkEEWEjmJVacraPjtSCz4F1sUARGLVsJwZDkss64wrfK2kSSdhhD60RPuefK/v1G3H5jxw1HfXhP7/cpRuhVbJYp8QIDymwmDlLmgxhs1bQVr8qSTu/rgtBM8tQbV9wphmGdMdI34JpliAInmbRYjgtbmcqx+4CZ1+U30B7WYEnB/HXVmW4G366bmS0pyhNC2kkLYN6ob85o7/RSWX7hSlSRNJDU4sHXwAR4HMs1KvHulMcrKVawElDAFxrNJiYu8/HhPmjyrpqVGFCBHprSbw5F3W3saTaJcXXUdbVJuDowb2VZIeKfWaKOsGYN5Pektd/m6I8TkbR5toqQQp+vLuplxuuKfPhMELf+MXW52CSkCIqZRh9Gqr+AGwdWIfhIcMJNswMIyL5DWQZZmj7unneqKDTkbCB05UUXXndDykhWYe95QfXcvvjT4AmAMbxAV/16aWX6/Vn3fuRTcjaiFg2Te+cuGa1Ign0OaZpRwdIfgumXurw1WNhP87sroBTQUwIFJK/6X7FRNkIzKcPLb93Ybk/sFdOWWWvvRa3ka6MhsGWfYTxS4VH36xVvM7YvXbJ8f/qa8HMT7xuZ2t9kCD7B0qjmyKRkdkpTr/Qc7aS+w1zuo8JKuwQBnzs51qnXw4oP67B2BzLN+NEliZ+p19nEIb9ms0tS55POQv2SzCwikEoThbuJIyjTrnyWUdEXEG6lBwilyIk/b9e1lBhxv/wkDGOi/cLgYPSywoqmGXB/UfMOQBPcq3uju44zkfbB918uMrbG0jjS889yrO0e4TG+o14iiRxhn3LYaiPybq8r6K8UHKwl8HTjVNsPJcqj6VXfckxgUryhqYM92T92kcHFSmv50BPT4+6pwlW2Viay83vzbz2AuLbfUA0ACDzL03SS+249WNj6fzANN0HADDMQ07vDsGW+/VwAR969vYn0MFfYfkdcAGPPmkTMR8sjkgR1QzEif+ymO0BEtEgbB/i/Ia/Q3HVWOXnwE+djFjPhQ9yJO33l3FCT/BZE5X2tzfHMPt+HDlRVt3fuhYBkEUBqBCKEzc8D4Z8NIbteW4s1+S+Pg0nIYikN8vDgKA6ouMeuuzY7xg3SRnE7T2JSGrE+9i4Pvqx8RMStQBHC1JLgGQQxX8784Ulv/5jJMZfHNvWj9UbW0o0Hvy1Krir+ihMjjvqYjQTz+QGbmBszZWAWOLjybAIAKKAYcxxQnZe2JQWpEB4eAJnbNATeAwN0mDVLw9IjkQXB2/32HQEk76UZVpDsDCQ7unjHKXLjhe2MQpGMj0GD9Loltr68sGx6PJwm9FhFIDKxatlUIhAk3fePy9dk/i6fPk1zm6FD7FXWLWNbkwaBACSt6yHW+TpAISWJ+KDDZIU7h3ypu+fB/xEkV8PyOEzrG7SDGdZkSz+06mE31T5U6gxIl1CUqCCg6zaijclpc0oWmE7yLjmeiFd7csix3kEH7MpzdTUuR+yuuiwfMKayXV4GLsUYhJl2rbb8JwtbeZBUz3BiXVPwGqcJFDdQoMNrNEJDIgiyIFUgd8uaeC2hP2ySAO1rNHWBOKYYioxdBtWTOlb9Ztmx+PqXBD+4meKWiSfKOaGGYGuNKqyxWHy1z2DyUnIwcG/aK18C0NcavYMOPf1zVi6RKtwQA4qaQ4NfWnxPDxutGKz6zHIfcjU+6ceT8KEL0swEkIP47fXuoYkHmwOROrTcMC3fOBsTCxQdPK8M7k2AEUUWPLjdLeynFx+Z6KZdb4ExHfgNWOfpQMgT8NY+vE3O9vR5NeH3OBTZO+NwEOM9q4v4TFfw7pbBBwKArB4REq+Q1DCPB4W/QHpDRswJoPbJ0aSwPZ5K/2iPSG3SmsXJPWAI8hySiZXK2DBJtzeLdlp3sh9N05toiPHf54KaiCYddAhHuJf8ytAr3/pn3NKnE1khhKzZy12hkVtQnV5sa/xtcK0DW4cCBdM0Is8FVuaACEpbCmgE1GN80CY1lOzoiolNZcJI07z3aIIpJvSshWYGROb8/M/9htKacq3hSt6RcF79RBj24kpl3/B/FxXtvlY2v8Cxyfc0HP2RHj5rQlzbm6YzoaYU/NfH4KZzHPlmgprvARiAdT6mLu33HuXlqxMkeHxAEtYUpqjEZVpO7xVmuz71+epGSKV/sFz7tcJEBU4sczia8U3+b1+pCESU7EuH/7wB1cYqjOP8fBwZ1883vBBzk84ipL+nyLQPgjPNwaTLMnlava0ojXaThHrcDTV1sTjn/HgCWdnRGuErWveENpBBeu+En3WDt07r3JbarWMLyf0U7V/ugi3PJQs5xk8vCuaN81N+cvLdi0XtwdrMnxK/RqwFl2uV1XCyEOJEwm6OqHodcvt79bweg0Zo5eCeXKisPOj8Zzf7jkBH+Sy9pPP3uSygXWjr1/ZovtDfjc4UDETRw+aLgz22vpBZjbdjxJTStQ0pRrF57grrtc0+ws0zB8aBotEIS2iyML+wVx+Qb6lgeIaCBfT98v9JlbxZc8/RxsYeEkKKXGQENSqNxp/ckeaRHb5TQ//zQ5oktK88GFb3j4Ox8LHgd69VX9OsuTj1qx1mnJ8IXvD4aDPldg6TTJHkYtOdyY5FeLLdawaJbor0mF9gY8TwoXoJq5yz82pkhxW5rzpY5qJMW68TBS6tONF4H0CLfSrByAq01KSUebMeEVBdSVuwFlLN5KUyRG+8M2GlO5O5sp/dU1nMGzS+7R5l10Qa9jLL18q8ERjmPdByjqmE0wmIGrSDofKLg09xfOFogjNLWzSpn0intpBjzh8CPyPxc2+g2TtZe4pr8PGVlgCbKLVy3SSTKdzakyh8gHZ+LhVJX3bScCnJD1u/1snONi3j6j8kN24f/3tj5it+t9JF+rsAOvP2AmIuzH+kTXm/k1Q9S76bWMxFZwyCOG8e4n4xqfuctHVlIqPf7ghEPeE1BxRmUgpUpk98gvfjZb93DnlL/c6RdkkpFsUkRXA9EUrGxpBblExpe146xEbHgeRFxb30/feaYicVngIYUoqyO9HN3JI9E8JzUTeSfYa+XutHSmj0IGUURqvYEp9kQvbagbytDLUuIkbRD9+OoXZFXgJX2LJHoND0Iggtt7ngimMoKkg7ajfM2bMlmWHCBHc5yRtze5karstwiSs9uPWIxI/Lu9XI0QBaDj3S9lh+9gMBSd5+hmw8iwn/3UM344PeO9NllYTs4rDibnp9wqNoHP7Vy1EtKkIOeSgYRGWr4kPZClXRjYI/v79vGbEn3XgQwa0fhUg5+JJTINl0ggDGNcwEfrg/SO0O0dYA1d+9YAHfPTQHOvcl7aj9Qn0H09XjeA4kEVPs7kYQjEzK7PIAlvMp1+Ve3aDSXrcbqnqw3sfzxYNTw45Iquwt6a788txZ24fFFCn6JsgA2q0MRTLgzq+AeGdYBIxvze4SrYFt30/dAb2XnswjPIDnLq+XazrseiAL1Vm+WqyODxAb8O3ytaXhJRGL3VsgVW2NO+Q+g2b8ati1QF5NDhfIHGAdBHmCchjdQHHgf2quWm6x+SHvnrf4//1jZ+rk36o8gpUsQzxlDsM9i3QVWmeTnMvWOiGBA4A1ymgkGHw78XSmX0cTJAhRchwlBY0ItmIpikXPoQJcdj0+14yXlQZK1keFI4Qb0VKHfhj27VNQiLuRST3AWR/lN4TpX96vZwR6BfJ+ZO2//y6Imn6uXgRJRwRZ94P3rXD632wbte95UeFL3e0J4q/CXd6KYpQZyABlZUjvpKQmZDCFv+29kzA/FyQ/OoDdigfBEdWcED7O73PLafoIBNB54w1eb0+EERj1MbwMCh/DD8KBssE2F2DZGReToJZwpsCa0W70wdud6Oz31To87HlcQa5n+PHro6KX4v1zX3vtfGYBoStNdOBl0c8WwfR7aZymJyFbIaHKlDAkLXfnbwpX3VjXPxFPdAvkDFrlTpMwH7zxAEpY3nUsiWiQKl8pW88GNHlV7VmEd6XIs1od9G05FzUGnGM+LFBIASv7SWEeMlwXLtxjR70pIEwh9Eahe5gb0O1nP1qGFDzIUgUoSa0ykU5aZ9VUidvzdIsYNIqp5TSZRurtcMksJTFKnmpP+qLh8/SwNEzdsHcaxZ/7bvzEdl09TEYvbprBU6pH6/Xcey9tfI5VJe9kdFlyzW9hzzYlv110GHzTKkfe6/x7XPnJ2e9k9Gq0S0QmJJEX8vnLihagSf1xLMUJkIGWCTPKqrvSKvgcgmli4uvjOdK8XoFVJ5ZR/Id1kIARqJSbghrYxSR7tOEeN//ZeAeYczlnkoSXq4aUkynrPJQMxERSDcrae8giCZmVJhF2+FpcTgGsKJBdDi/fgXHY+S48BGQHTVotNA0cXI/bb201vPj0co6S/PMNtRML9MeMBxIJHnS8sXs+yFf5f6LiIOUf1Ub4eQIaaQWJES+wSg0MThJ7/w3YUBDHsyDNp7jPRrSLq+g1a+pAkk7s7Q9NUdbOfr7pCCKDRl4S6RH6dcyWpxiVNl2WE8HV4DonNHaUej2pWwwJowTJ6Eo3geHNKrhKC4noX23B++Xij/PZOH029EfSut83V2gXGgylwAThfQw/pXLaBYsJX+WRqSurEvash0l0CcBLA3mj/QGZkI8zrGjP6En/X1yC9FD4t8wFKo18cjrjQaFVgxFzJ5ptr1tqPH5P6vEAq63/QXeWeb7GbVYtpogLm7mk9158uHoMddSbtCMUwL2m04i1ZWvQ4V4iGiG7GJCxk57uP1N/DGR1QvPx18CDHRSCra7H/fxPC+La9/KNiw7VqAibjGKNnzjEOO81zeO280Zyulsq4oAgbnq85qYtq6bIQm2d1ko5jzdFXyhV+qj96KG20E9x2g9sOFCV3+mRXpmmLaZlxjVH6DT/XDXTZ/oOw/j3+zlMnxIlJfmn1Cf1eWCwmvG64bBUkZoOKhmFc97NwLY8+X8ZpW9fccTvsOrVnxKcTqnbI0Eo4lMN7ZLD9eIhMeRIH4TGihkq8UXOSEpjOZW78Nthq44SuLQnSOVRQgwLTuyYzHYWYRhHQ9Cp6VNYftyPqnQlpwqnpnn1bDa+N1GLa8Ku6ptMJNKy2x7SYoQR9RhUHiMf7PpZ8l9NiF3CYvjl62qolaVcvxbw+MQ2Fdfcdu9OelavH4V7fOBPwWpkuCVnCglJKo0wdaQ7+Ma1OTGlrhl1lZnPeQV+/y//pw3YjBy8esmFl2hJTOVyEW6gBGtcU3/wEDgalApqoWL8reBIiP7A6hHUuiTmAsgJMMep/ke1it+ew5OiQab/Po6qdUN47ha9ViuWpDC4RMwNOxRqapEO4SQnZR8tQPMMnWy3obG2+Th8VVoCq9yN140KvNzOvoaTXYBywqDkkkP+AseEcvkO+cfMs8tulER3vBR7p6+lsAyd0nqWxyuVdMnk3yoFYULtLXt5w/ihAKU9Fh1fpDKIbtv4xteoU3nwKXxw+H7sdnl3zgjUUw634LpCiHS/cRJrhscVwaESilj/r1t5cfbNlfLqgfchroJR3J8ERhEf2C7m2rBZ0pGxDyvf/U2ZDdBakvKdBHyPXghMk1j48ujJ2Neaz2/VrW0ob4J/9fYzKzq945OatQvVJpPAhOS7yhVPBnrLbSta91iPS9d4KMc/I0alnN6CQ8VZJ6/TY2/Xvw0D3BtpJ14GllylSlmy1jjh69ZipA1EMvt3nYYGEpFFyEz/lbybqRyWDyW1V+87iq7QTvW1fjaRvKqdGBOEY6OU/Q02zd/huFh2XB9fID9iw8PHsGFMsoW38zbX32tROh2OAAqF0ssJCijEpDms+zfOZTl80N6CiEPk1pA/MnHV/iWJAjUAqB9PRun5mNL85aXYT9XHJSgiDIFgqS0a5XsJ4YXq13NDMZNlhmTByNkPA8jYJiUeF9m7otIbhh7KavZClxRb81L+QfZ2Nfy6DYE7FQQhAmtaZT18QKPU0AotHc7/yR9b8FfUpuGMN+MH+DhZPUIVOcBwwc7ijqe2feL+zpHerZ0a5l4LZQ7O0Svrc/WU3PD4KyoRGjaWYljV4j2GSNv+GpGADvqIsaIc8TW05bneaketmK983h3QZhFjrGcEJlouuvSlBiFcV+wKOM0/LoPuYSDkZZTeMQyuRMcM4tHVLkV5H4p/qficAmVRT4rRdsmDR0AGhFOBaZ4PCgJIB/FesePyaEP7pSuT9WW6WtxvndD0FnX+5+XK2sU0unRwQBEjZX8kvKUDUTkCnCPYTk75GiDssLPUKfGVYCWk0DJdQpre+PCPxwW16yh5EmLD5JIqx72FmBTs0ikRFrfxz0CAPJkAfEJnHyHDtJE2Lc7OG/ZpxzIEUAAterUtSN63W9soSLMXYoUTgHVbbVM/KWkKfT1mpfzgulvEyMyhVi/Ec/WHtJFA2+gH1xmQrvAf9AOaWS7dXggnIKOYqHCyCtMgnb2qlSuEMQ5pgw59jt8Q7kh0JaXa1pjdr65RIi0SyKmEkkfQEZtuY9ewQKPikXiPT7WZ5QqB40IZokooCJt69JX+j61DIKXmncjZqHo+pUP75b7UOlvERDqUPbsd5tFZPvFn9KHRN/fIGxL4fZjCzGMgHNAooetY235vmxNAq+W1W97YudOBZH/PuPZIhzpVwphQbPy/RVh+SMG8K/RUszcVMs/yjKbslChI5hUCzpJWQbU4f+6b7Ba85eoxac1kx8PXB02vvb0Ay6/J8eNrz1aUQIx21MKa4ScHsIscNF6Mdqk1ZPEOmrYptnARsP0zoRQUDateHObZusfKXjY9pW47mIdyNeLXLWSatqtMolQ/CvI4uaod0lT2T7HZf9XMg3Q+xsn/eINJe5uBp6DbttG20woPMb7+U8Dl60faAruEaLeNyju+dWw4ZA8oR7htw08AmrlfCANgenMVw5TxbY5N2m+Iqx21jMXhBxs8jFzxP8SHmgXB+TFG8ypYj137Qoizeg2ZIM0EOoAm203DLymUFXvNOJCGoNNofMi7NPLK3aElJNoHl9nxgONQtQ8fi2Rf/muPA02xASVw7YPX3sY9ik0Y/HimIpklNNqtk1aOVoGyVf7RpNgmJCv8bIP+KGjG/DBBNN8+1Qhd2OGAfoJSSBgL0Nm9Dcdv20QDG+zrs0Lq7vMwhPhV541nkBYDvCzPk/hb7jadEZEJWTZNpbOvwa6z3VH12teueoXvk8lC688ltTMK9tj4QBImF7nIh6rMGjNCy3dosZlp0BVVIPAOhJWqCvjVtA0Ta33lylYyDewhE93E+UwHbXZYEyZaKGy6lWPeWiQ4b6nr652lthXfO5MDQ5BMWXnhgxppfYEF9sEzbDQ0oOgaJjfCBw7lSZXLLwyjbdbluvXBcMxxNEKDFXtzHF1YgDojBI4A0rDp9cB9SCKE+2gP4VFce6yNNb9SBmAyG7QNRrUrDo01yiVPZdkeAJNKccDoYaafS/fNhg71tk0wpEpS44c3Bc0Z+QizWcMS9dxiJSm/psyYcxo0mYOv0QE7+6IzEAjdVvGHgvrdBaWC2bmb1H3WztOtPxHzpJPgc5MLkCkP3CLV6yAT6nsKLeIBPD3CAuMxtXZyp7djFP5A1LJsp85husJl8/90DEhOWGlD85mJ/0YKI3bBVt8jAMqNr/NMoDgnA66og7/KLx1vuIY4xCSxGYDaUJShkFCtHgbny/HYascZoHP0NgUCMUsdxrgtZ9Faa6IiCtiyMLIzLMs1Y0mN4bk3xymyHvsi5Lj4JAddN/9jxhO+7olyfMSOI3uyg1ccolWx/XI+0PPmaleQ/Yyf6MC0AttBygtVGKdU8JIHiT5w7bo3fLmAwSP6fPFsrrHKfITDqGPX46360UGOElGyVz2bS/wqoQQv2lpMbfquwxalrVv+5t+2uN2HGZIEzXs83hYOyLnutz71MJMvObVPN8UtSe/qdfBDByB9WsxoB9ynLHb46UuKKnO3j8RncKLLwiMSDYVki452me5Gb9ubWhwJOse6TBZkfdDIIBbbF+gEiP6gMV5ouOVUZdnp+BGH55duQAPGZXSkRP8YbFDWhec9C7vBO06HCYECSjdElawzowFjbwxTtm6CrODB7Gc/Bb28lNL+KYu1Jf/joNkSYrf+SJOjGXWXNqDEL2Yj4N8yT8Y1uUolqy2TXkneKYeuwoIPUC6+34zy5YrODfTBHsTZPSgul5N5T7Drrj3bxvSxfUlHZZ41CAcXmAvcbnt2mSBsVX8uniAb0tdY0JZVO7hCZ8zMgAo/Py9ryGLPbnzbf4g0e3Bkl1ghpwyt+96fxg9iVjfeujVav/pq/BgnCkvE3vqdXruHEbkKgH8FXtSQQSFJU0YUj7U4hufROJezZ5RaQsOF9hWop/TT/1OwfvJTpWWvtvTpIMMyW2ofhh4XjlY8T/260Is0MHqOPltDxmhILOCZUXcjx/1RFoxdcnh9U6++KTMxJX7biBGo3tU8NrGhT0cXZXxi9Bfj5N3RiUk3tKRWUSA2b9u5PM1asz7sdjDGyTLWIUBAcyrp2bS3NEmfcxwMPYnSY0UJveD9Mmh4V0/RkBBaS3qPXz2BFcfCc4krYcXEg/xAthUrInpxhcRVAGHLByIPuBWbILP1QtnhREHKIjP8iKHcY2/MPCuKaINYdliHruOEzF+5KpADN4PzuOQiHtQEUNyOsF1jdVos7bXPz5vl8FcseXOFb46lbLg8HzHe3jl+yLPGZ5XKEhnLx7wybZ1eZG9oiG/4u2BbEu2xLsduOqy7YYcU/aFP8cClzhhKJIxggDCt7Pxqp83/tTmnGyogk92sVYM2d+4dSULb8ewPSby2l4JFvnthOY0/Jth3gkTn8qRysbDYeK+L3uY1dfVwMPjO/kmXUoZi3vWJ3OKqwASYJu86sXHvCD9QrlCcmDBhy7cb/qwfg9WN1xbko78Wtywh/rkklKRA90m2rGoiYVrmgNo4E5TDFm0CmJXByAovHgKogtghb5qJN+5p2dZjMLyXkqVwP69X30QwS1Jn3t+MK1ezhnqsl2MKv28HLRpQnwkXeeRqI2wcrGoEK5q4LblDJpjlurXXKyP8R1x6YBevkqUzkmUUMOyUUiHIvkbh0ALU9q3A/qakqsGTqLojrkJFgWtvryxtRg9be5axNOgZw+Er0RgHNe8gaNfLYTyAs/JQ6B/6K+VNoJogZyG+aa0l1lZ8mwLtvICwayv09q/Icr05zPM5t0Xc3XA5kJYOIxjPBrB98rcRdiz3RLcz51PiM6c5vKaNk2xqrnvN4eXqkrVjTdWaoQ5uLzLS0iGCefxG2GIQbN6xV4GMaIe3+3kSdSP1gYJWOvOekUtLuF7Pr9FYxDXllquGYvQ57bp1pearCdIDpWhUIw7eydKtfW7fpBNEK6nuO/aXdQ84/0rYSHHOTPKcRmBfP/W437Wed0WOdiowd9ulyH7Eo5NUGWKmBE56M55OOzOscjeWlASREGu5faul6nhE6H+Rc93c4AYzENGNQCndQ4pWxUZvEEX63HKP4IjNoQWksLHrVrT1VEvHz+2SxByjUfGi6D8ab6TM9SQUix0snMdsNxS3Dr6a0DNMusS9pr1sNNPj7IUQ/pAcU+aJjgcNo6L3OlyTi+dXl8JK31uo8WIAQHOnARIqejaYFCDt8bN8AKqEMdQr85tvls8cLZNILeFuCZJhfKaNOJ0kwxUOLqz/RKxIj63qYmjX0M7d3QQQMK/xisj8pcy+SjuVlTmnNlZ0yEo/uEpZ0n+hRUfLvC6SJne9p0F+ahZSI+tfhO1pjIhnnOKcnCvCRCc5TreDNgoIT4gVR1UiA4PZLKdKItulZfvKNGhpjDIjAOFOiIMBy3JHFbOvJ+PXwZqnvada56Q1V8ZmpAtV1kjv0PT9EAZEndFeP3XVsrSvLqbzyNSJ8sIBnkVOtBf4QZB0yqk1FiHR20t8XUHygAyIa9CMYUiDIXBgJWHiJO0wtHU/iO4N75pvwA0FrVJcNoHZFf2jjD1rVmQoIGplc8xeOOVTymoTNj9Mwkl5tdWCzSqwfqLDCvssT+B/sldCgkmRgruRL8/X7dF0lsFo97ZsyA7swTlkwZJJd1QZr9Sp2YavLdf91DlMjrGbED0Bh3WTgA5XlhLq6MCZURk7yS594l5a8NlfHa8n0h0puTCkCvpUhbmpQLh3RHErACsxxZXyF8egTHy2A/D1LyZ30KtBLJt+hI3wwGs0EZBcwRr3TyQCfYqTRmrAgEZ6LKyYrjT723iVx6hgmIStYPvj/BtCbDHzNtmh83J14tSFAPJ2KGZQI7rSxN+h7XzHRVY4eriUhjdAXsH/s0ttXflek2Pl9ZbM3rpXLTHN7DKIH/WwN/dvvYb4c6vLk5weHaFLp0QzImOo145UTVLSucAi0cjrAnX/VtbDmw6AlovN+0aAB0spuux9owTX5ktY9CW3QW8fd+4cPUbbR0bKlbJCUcn/304SkZx1ouVDPeLkUSb6u7zOxFfSF43ImyYNUlKPzRBvHCl2JP4MzJdvm7R8YX2UGYjrHTCFQGy44YVqwvrG+xRZWW58SBkEbFXpFYVxN3Eiji6OpPZYnelwBxMBE+Igj6P0K45CUUpyuKVzePqa8r+DuqiQfLG+htSK0L81Iz+r3D1ImQhMVDsyhQCIzT8tzjDwIvukQv1zZzfl2s1AF/OymVj6DcIaVfmmRJxGc6Ab4GD7OXYhVI+jim+zvyq4dBUIeltAGRW/bbbZh8CZypBMR7/XJeMG0Xw1+79Rdc09aCSSPdKcd8MEB8SZ+syoLdAvH7ja49I9TIBiz6N1seblXylLFxjibwifVwNTDpe2FwJfqlgAYk81GwSERDH5sfDxfizhzyThzG1pt4vCWqZJe1h/0Vsn4B4oS19u1hlbhTJTlS3Xj+n1wSiFfatEcHc6/niyjkXVM6NMA5NLAX0VX/B1B5AWGEvvqYS5PNcZJ++stvdwFbB9/6LboIQOF/tci9uzfhL7t0Z7jeD2RVJZMyJKP9WvdRrh41nmKPyHqcFgG3zaRg7CM1Ejrs3b2NHTDaJsQrYHLRApC89dOUDNhm0PafEkDYjhSL7bblOqWH5m/oInH7ODXbeBjARja/lKJvOodAyhPBJ7JEJVBzdRnwU7VNNOmeRRF/+qg1p+mFGTR1WovR65WxzVvZhQGj8QDs0wRIQaCNhYstdZlByOhgU1fKkcp3CIkCL5zCV85Kjtg7vEWyhan7E/GX5CfTFZwS16lkAaGI/LtPRp6vLk/MBznfXTe8YT4cqb4qAFU+3v415BRZSQeOyPzx7HYmGHxW30bHd8vgMwHf282/TfW3qKYX3p02jcGYee9s8kM0VGKNltKtvrOe4+nPQ28HwhYO+A8gYe02S9nGW41Z9vJVxvnI4seDpKlko9Agb0f1PVpeiIBLVFH/o0vMHvlJp+2YohaiszV5ePUg7KGJJptOU5D33SkwKWJHUiT5JZrq9tggWUgwIL8nj7s8FNo4oS0lOfAN1kEbVxyADhFX7xe6lA3egjXstqCmhLPMClk0O5wvp9xrJ1vrmqNL+IPJ2cPrnVudCYnw3HEv/X3rvRBU8sNYmfPtjGcPTXstx0Uu3oET9qNnG66vOrsQ6V/s6OUD22xfBGC/adM6zN4yoSOnpF/VUbFhkm5T6dr/5PgDMJuZERUJ+pp9uVqvwl+GmMlmKGmKbDHs7iIqrleI1YmjMFiUkWxdFxcQgNivYQ8puHzo/xmAnFNXh3yunWfhzrFUq4NO2H+3YCYDff05FbVQfR9BreyCpMBmKPbsLRqkfa3kjet0W0sClObdH/Ms3P2sRlPLpSTVSFg/7EVRzds3QDIjc+oSmtcXww3ACefMB8W4fsW1c48s3r7s5ZCA4nRyKH6kMu3UNJ3M3xdETGiVxJGb/TImyUp/s+Q4vrEnyYXAVgWQWKAiqx3VSh9pi297RRejSFdsALlCTMdU8kQsunFxzhKKKTdB4KvJ9+kYLaFZkFbs0D22MhGEPhOtVqpSLiF+axXpPL2lvADY6h5wcHyEglUdUe8zF6gtttnOzJbqE8Jw9P2aARbhNM43KcuH7UDB7y+ESxDiSu5c8twbmHFM/ogO7PBlBaKDITTGFH0LYU4B3P/AxwsuErsgRoElcKHfODdYiMqDet0LOar/d1bC+UB58HHFbOu9YfF07R8WYPTQN8Oze4xfgE1AWD374mrSbKnv/fAeP9AhyqEAB0p5/XbfTt2sMV41CJzgTy3cINHLzm039EGTO5SQrnzcAJndnCkKE6tFXjjBHHorO1thBG7Mla9puVO5wWur8PMDFmsuQuJqTs79MYomSGEj13ml5nCHsICu1PlBjXRulleaq2iwHOW/xiyV3knx5PAZrz/ORxhjDTzzpO+AFF3CYGnAZvy1XaB/7HwITqZx5iUxI8HDse8XR55Ayua5ERg2eEOiDwCihsbZJAdFWPrt2ziu3jYHEic640XgvEEnFe0ybz+UAffMNlF8yncCpKJu0YyCoiYnrwqGthCr5IACm9WWIh8yP6LCwV6/fr9wRjaLNNWOZTkczdSzzX7qnnGgnAXcyqR7GTwHNordF1dc04bBbHhy0cJzvRYzMyGhnfFtW1TUrMa55eayj6hlR0MpMftfXrALKtvY6PrMC3Q2PHdsmKM9aSkJAZzi7/uwrC+4HRVgB5XuzS5MKYktpQmD2DFlte8itZaCJimjrepQVXAdO4T3MchmOhKq67/d1R1RlmeiFghr2lxNC74DRbKHk3N8iCF3jac1Uy4vrvEFbsNzI9yDmv4+QjPcvR0yeCli9mT7GJ9bpzB2OoSJQpBMpeJS2WU4D5q7jqRWHTfHpbAVVbm/KyQD2tRF67aTKkvC1ioKgKJJLzybFYL5sw5HnnrZB2i8kkqSV3tBhfyfBVGPF5OPXMHLWgQfr1xFNaf0+rmcsziuP9Y7HvlwSy9f7kkRWV7/tMNrJwQRWZMM0PdcG/jWSyOEcLC+rIpcwc0WU6cdTy/EEAOKjgxSCFyan1hoh7iEvz5fZ+N43EyYnuuovivQZrSHWt/H4Mu8cxPa0y1XpE2p765NxdnN8k+fYXfg4lpamqG9uQZdHJekUZGcExrkQbejVmtw83+L9ROJi7O61SurftZ5afOJvj4jndq1wRdQWJNfXUe8Ce+8h+MkUb19/NFc9sg9tpXVLEeMoIRYc+WqHrMvWGIaB9LW+RHtvahQtpWHIezLwW5pHfc/B+Wxk2hF78TDSjb/Yj4RwLiX5wo2u1KxFv6GQ/q+uW8BDcwYlJyIXpkd6G0v4GGs8omgKy4GH14np9XHNIHAe7D1i9YA8/raenPSDo9TPBd6gPl5kyKQRJt387gDwpp78fMYEbsdgy1wI6dAZBoIdpnA85dabX0qTRe0eZHzHPCwjcTe0vCdd+43AMny+scvFdldeY/wZTt1FusdWqaWDZ77FgwU3u2nU4OqHHMbmcDg53LihTpdqQ7/5Zv+QmA+Ta85uadmo9y2zOVkh6CGtRReW1gyuyeiRShg0U2jg6Z10VZjv304uGDPYaCZwSpWweD5b9MVjByKG7FGyKLlVWTpczvXXnVULsj5jr/Tz+S4zTvsOA72KTVEP3vnwp0VoYF1cKLpFkk9W5nT1dEe5I8u/7PvUNLwTPxiTb0pZnV62B6rvoiJS5XaONRpznV/XGQWNydbXhCsL1UG/cQn+RWXYcHnpCvGyivSC/qymywicGodxjArfNniU8btcTcY/FEhUv4CCRL8empeV4juOwTfN0jC1roXEtwjmNxPpUrdWk70+ftQmsYqPM6GaPYQP33AB4CIktaMCyiMK2r6ydLnlsytLN0c6n80uEqPxBykOX79CCzdhMbteMXv+Yndd1HeOyB10i1jSPXKSLBiS8VAUrlI4kNDnsORp6fXLb/eE/m2HMPS8TEKXxmdpDig/p8LcbHs//fiGdxgqlLUQKXy4T8t+kOBsOU6dTSV0dvMOcczmiYcmJwNOe7R3qNCXAQb1VBdQ4ftr1BwnX0B07sy4PiROlGNu3cgSCvBWXeOyuOs9aQOmd0AGaX5cia+SRt0jd+PSnjtgETeB0Xgi1bmO0Dxo7/q+55IM/DPNijd1QbbTWmnk/N1LBagG+5BSyBDpZoc/Yk/9Nt3ldktk2wm3XqADpGSePDoPeUppPrys8H7MKL0fxvhBXqjXM0eRiQ7XkijZUuZCti3rn44QigEZRd4ng45QTWoRHJ/wkADs5sKXbAanrqzsFC3ZizoTY1o+myTafEAj13Aw1Vxs/P6cfGcfl/S6sZptcoDH9R2djGZYw2p0FGGzsRECqKneNWVrLZeoZG0n3fVrvXWM6o8+0JIkVyaDJE/o/nqGl/R7PmkVVsI7KpeGVqMGFA72hRJD2O5X1i/eJ7X6g/o1PO5ERv06SX6rO8zXQHoxcD9dz79OjyliOCwLxglDJ+yjTG5l/1h1drqm18L28XkxL0l13wKGegIqd7l2tbXBtJUQlemBRWAGOUvaj0sAUTaW/ZathczX1DhjpHKZVH2o+KA6TsNzs7edBlwORKv/BgToQNXP6jGIj4/kOjB1UfBfV+DKn09EFVkGoLtEWzjORg83H1Fr15GaTb6qUTLOG5GafkX4LQHfWp9dCApviOJM1cdPlCc0TouE94yL8lwoR4PFIkcbfsQ2oanmnIxPq4DYGtXvX96YCsRgdM+qUW7wD0i6CRMj0/htB1Bd3QnyTUGMYXFI83bxSCg4FWSc0+UxP9imzX7uKwhRwGQ7SCIDFnE8NPj9Yavg0RuRx4ZpScw5WFzxNmz3DUu78bVcNohILT9BBTTLPE71/SWLOvI4NeteWFYRkGpkjIq9k6rLaSPdBXGR91m9UAc67KrfTY3FNbDIXu4Yja7G2SYgkthdhhMMMWbFIXbLwug77QRrlMFAQ3a0r55o/OmX91S4IAiUBXL9T8xu5+r8IqXzUvWIS0iv2bs7+2SOJmXAy4awxIjDd7v0nZhWwZE0qhn0ZnP2+srf0nhN9ZcY7XF+vZ2ZJkXIkH/ouyYEJyx8dCH5Oyr1a7uHr/aAYmnqp5Bh+IedExOV0AgPHV/hZfrYZbxKa4AzspBoZybRo2+oyqz2IfOgekp89qLmd5bnVglyS/ZKAvMj9794qggie/R2Zp+fcCXU3CXNxyPQDzwkHPI648tgqFhGuAdNtIs+jwiQgrbYBsIfXl/I3qdKQLurbXohuA3ps3hDVe5Xe0zMGnGw55gy31XJjW6eLKqIbIOIOqG8z6IUXB9J8i/BkJJTJJRMJ3ufSYagMLUK3RlCZ99uiRp934eVskipe4Df/NDjiMxj8A9iMk2Q5MVlxKw43X9WrjlO0j9yXS/SJUbiJA7KhKjpS2HgLLbe0rmoovUhsEZdQ8msSA82xbfyzT/LASqaSlRx+EfrGEH2hbP5RTwrejuEKI4TJEH099tx2YOCITtZtCDTmomZ9rBVQEoNSVCvG0dr+rAEVN3z2wybdqtpD1eJaP9NvS3YkSLxloli9vNNH1uq0Wu+3T/6e7gLimTE87qIaCtBoCtj1RM19yJg7sbe6lLZPDqxW3yA0Vcsg4xpGb3zz10NOgn8gE2AtlRxOtEi7GFaipPooS+9Orm/nXW5WPIdJHjTJr/Jvp8JvfBDUD4Vza7NOoZ/mm2EL0g62sJM1e3VXVJr96zk8gjFLkYprwVZodG6fuqJ0RL0U8D2R912/eR3RaEDAGj7003PYdkr55JR58psB7z3j92brYBVAOLJuEmjNMCJdX4vHGUMW7Cc1irGQcUyjCMhmx4wv5ZYzSt/K0AcHswbb7mTXq7ANIdrh/pHCJMaipS37C47Yp6XmyqJ9D42ZZoaTZHY2DNtbmPrC8CGnly2bYoY6fNu40MqdizDxYeptAoHycTGrde5dw/1oJaL72/gADDmW6O1qrsIIu99qsFs90mjYy2QxVcAeqJcwtZRNy8U9PXe0qJ0Hx7vye0rfqNrFlqkOd4Zw3gr9tpxmMVm7pyOaaxHVfOS7rvmGW+PZFUiEP5bKg+Z4sNbmrJ47RUWVTZ5lmQ7LmuE0cqRltZLlSnD7elTY/h0tQfgqZIxIQp7NghTVOeuknbyAXTbisqNvO874ra26PLfHTJAuUhLpyHs/2oXCI9Vha7IYvyYHKyUdoei2pep34QMsv3BrVc3szCs8EBhKmUiEJRE+Hrj6EpWIvM30J9TfUr/FRkS7SK2Q+VKg7bxP6xh7o2UIqLJci332/AO/A69nGH0It5vDnwdGWQ7FPOe5XWFTz+MeOxWpD6jX3NrrrtTSnCvqQns9RUkR8kEjVIgzFHDyNBnXoNVAWpiFv9+0MF+iCkSg4i1yktTMwCiRUkohU9SBpXn8jY/kwvBIudCrCI2eACnEYjPbtCOYfdQ981bLreC3rEPnKDJUtYd7f2m2LFYrdN7pRzrba975wDK8tjl3Nt/DXLe+HIaf6chTvMpGZEBnX64h+RHOYidUdI1QDY4K9j4dae2ectIkhH+HKR9J3uQpkdhCunLOAs2JK+6Pb9LCgLtYln9m3B3BOq04qdBx07J/SZoMipsJthxFzny41AXRbBumjrLNnBeq4L6C3riHpa32DGXu474G9XW94GItg9gSOPCvzPKLBHpirJ6+1Q7VIIKTbY9qd3FZZfv/Bu7NxSt/X9NpYKJYjtX6klty6s50eovMO9Ghtwa7rLScnVbqjJguV4iqK3wJaIaHMirBaxe4SuLrS8gd5ihIBTckNzLf8Ho/ArF3NwzyWqzGdH9/Wtg+ayBq3kXwvfwuO2UzBwvWugw7aO24lP3KfYMWMX+Dd9qJBBgFn2UhbOv39N86IZVhXt2VIHkHqnf9YMt/GAxKBh/JxJMkvTxAlhvSb6vKSeWLZ9rkBMR17NDouudDr91IagYDtJDDobmam3eyPrhShU3iDg1Tfe9vaz3CC8dzxAuc76TJkEH8eQRH4+ODB0b/XiEHciH+psLIN/CuUSoPWc1iICUJ10YaJJsak3gMyQHYYXen6q474s1MWKj92SdFzZkgm/hcNSxap7aTD1+IY/C0KSJieBOFhND0fdL+s4yiC5Nr8inQGg9/bIzKBYS80Vx1JuH+2nnPbUARvDhCeC4Xu4nD6yCZn4LNksEc/yQJqwZqA39QWazTJCCBhT419jqXznZFO1jDOr3PIABGivvKJl1PL5w+wvAU1HEpetrv/2gMY4CTAEVQTGLWPYPBfTfrZGca2mhJF3/FANmz0j5dpg28lD8jQ/O12eXtKbK2JrzLVrytxFGek0kVR8P0wTUkle3FnKnm38JA8vKSO++S4jkJm4WxldVheTc08iWtgDYXFjufPBa+tOfFFS4f+Gaqi0vgmm/95et4ELPIZj2c1CKc27V2AewBjeCxMz11TeHZgdwDH+sO4b7Vm+GGQwGZJtfTaaxf+jgw4aQzFo3py7iKDQ8lRHGT1duNK+4yl9bUkGljjDOt0ySO7Ryn9daB2GzV4us4kY8OHvDYgfj2quzIAEPQ5SOILw8D04g64+pPvmcjoQZaRh2JgmECYDY0ggJfNPD75NpyWukKEuF1i5IoXk33rzMdL++tMK54YhM8IgMwaWIGkWsz5Q2qs/ExU90r1NvkcBt9LbENHhHtqzd0xpowhX26HE3ZQ9haIN9J6wPwuTx7wu8/MbJ7blZ6siWgNwL9JaXlQ2AjdT9GtTfcIwABb9yH9jogeCMa00rJGUd5M6WzVsyPnDRuAWhhg2Qz+fHiPeuvz40sxPaOr/NxRU2mqlnU3Pb6KF/GvmbA+HsB2v5JVXpQ1SRrP68V8apiG0zwdRa3wcYAfiUbbUzmp8tHi282QxG8Fjd2Hf6E7aD8jbiVHRNr9NZHxgubhFDmczy37IYgLxln7bbGyKM/Sjto3WbikkfuN3HqelHdhJ5exWD924/6JGNfdo3mLZhVPsxYSNZdlTKsLsgkEdm829oh19lGFjUpmtgOx8HJzUDDSlFDcmY3Xfm6fkxVQEFkfwQ0lz8b6QEVO3vaLazDEbh9EEH5ElaJig5BJ2BYiCGbrM+N/4W8e0uWJExXEGnOgB9Y6gl5PaXp+7lVCvIuOsXYBeO1Y6oGFdDbkP2yFd4JH0MODOoZpa4xapAcml6AiwWKOS6b3TqVWRWeioLkONGa+58oByF5lz+BSwInLOTIU5Ztg6HZQbukEJNZ0ZfEe64N9mfMbAZhHN1ixd3LcGrHJAMAd9Z5W3CmxOKLhsRNAZ3+cWv7JXIzxMZ8DcIcqt/X4FWrYuSbpkyCePa64GvtCpxlV8RVFmQlrOjnFm4w8ghS/jey/Kbz9eyTt8c50wM/lYR5K9fjJS17OEs/aRIJJ/JVwztCuGI+VHR/aTTBRV6oSWbXazVs/jgsBSknHkD4GpS9tMyf/9vxrTXJ7Is1aOpzHTVEbjBimKpuxbRuijJ9QIXhnzkUfzL3FA1F42rakgmTF1GBfPfD1mptG3Kokwm2zVBTbkHx7H9KmA3whoGmg0afP484OqOAaDjkzkGBPT7WExGYBL0p+9dKjH8xI5fYYcB1Shb8esFet3wCdhXJl6GQg7JIXyZxI1s195b5ed4oF+0iSfxUk79hykxgQb5JHYJK8SaenmWTgdABijAoVLoeLwj3LNOyrpFxQckUgeuP1nR6OWuf5NYQdixRQ/G5jxfVekgiRkDA9gaavr2ANBn7ugoQ2z6+k7hwK2KgBobce6AakNsF+N7myi06ggnxU038uW5PbYWe6mtgPhVzP4Gw72h4OgubdK2Lon7Ev7ZX5apIn3cHiTpdspbXZvE6Cai2TIEg2kjoXWNGHgsowYsJEylMT+PVUIrFCh4DmdKNLxUh7l4OZEOQfAf45oyZb30wt2MZ/OcS5pSm51As+XQmC0fVV8krobCKgahg2yb580UwHKwTQBnfC2cEoeVcyXQr+9UbKDGgd1hORhIlBWy29R+wqP7XtcSeXSdD4N4ZezXH6TPCygpU5A95xkx6wQgXgDyitn3kizKmrR5g2JZPkvut5hhSz5drv9Wi1HDTLzLnX8z8oOb0ODtvFU/Wn+TVHCgeRmT6iJKEDBEi7Sc/Eb6+HRwkB71jt4EE7mOQ5C8l+eXF6Xm4X6xr+xYUEv9tmZBn7AK7bem8QVKGNZQXj1Jkj/KD2LAhDLAoYECmje+bb609qsbgaMaifPNEowXqkVs84XXmV3NIMmRk3Z7LjBvflqueIgQzCznVwD1QUCGVxqjrXIfdG9phzdjQ36GkmnXogtl73eHA2Lh4KeIkIAt4BB/wvquAMDBpKJ3D9lxGHGF2xIoikBebe4LiM2hAByJAvQyP3OFWtJLFRO0nUADDH9fiNjNMtKoidJYXmGHQxiuzPF68aOaldvrPTkIUiGaLufkR/oNC7mTxo2nqiM5yf0UwkBfCJjbJK7gFbY8W67ko2B6uNwcYjpCU5+S1YDRKbjcTBuxvp8HT7SZyeF0K1oKwIjCteLge1uIp7KdtFRQzMpcs8jQEFc2Q+ZYPGunG77gYbBntSE1Af+II2K3x4I0+Zo6kdxoQ74E/fnbg5kzn20vDZCQtQgAXq/HJ422aErexL7e/rBS8ofaVGoYX6hdOQ3RzK9jmNXSCLiKCArrpskZCQizGsdpnz8GUgCCYvJJNOlsrfnJLukxCUncdF57MF1EMcke5EuSNf6zk8ZDBexXicmhB6eyrneLPhHcQne3JpsBz9nPwe3Q6d2Rjpq6LiaZ7KwQjiIeaJdS6yaL4Qan3hc0J6U1O4kvySF8Qakt2T6EdMD70EOW5mLfVPtOyCpafmljaVDH4ay4B9hoXclLkTzyfcUk7wcKMzGDfD/83grD+Uu+2ZZrv2ERvqTkXlcYky+I0lgF09Lz/EIH9zzRgqSTTQKHi+s2Sa0AA//WzH4chdGko+TeSV5uCdnOO4intfuM4TownyzFO2F5no/NAfu0RIVM9JGCuWHioLHDJ3Gk3bSpjfYMU8LIKnTHE+wd5QSVPgLgLrKsKy74c0GrgOC520vuimdF9f4uWGPDG8ymQUiXvC8hmMetacEXiycKoRlCD85cZNfpGm7K73JbaeT68kMrocQ+Ufs+EMNr/JYmidMKe+TY9z7rbt4l+OayUwFjplMU5ucRBZ4ElKMEnYVwntbHLU2/mqo4EsSuhl4G2Ruq6dUFkUGwLPiVoU7VaWcpuEYRnLUQDvHW7Ys4yPRMjtYeG/ZMYv5D1GgmgPDmsEvdcXc2uhHHy9BksG16EkE6+5c9uhTO9EPry91fx60mc7j4h6NKU/obHmJPI5hpJlP9TdVd+lBgkPTzNfF6eF1pB0BsBfKbpNEFUBENHpOO9lpunUWPiuex9nsnX2iy2eL7MaYrN2iMf1fLCYpXAVZ7U0CUTNBkQWQ7rB7dcS5kD3gbz57kRn40tJJS+dhkGCOHGMWU5nxwfSttZVEnrwCKWiNy5aguxMmM5HtdbHoc9ceP0UEsKj6lpTgTgno8NuYlK9pcI4I22pEHar2u7z6aJMFRu4IFvLlkZbTY3mcy17JCvNH/zKj3brsPeK9sAvzGouh6Q2P06FP8+w7RSHmIJYsTPKB2l75i/vZxtu1g/Vja4/FFqjFABlj0sjBEka2lF1VhP07qNxH1QFFFAiiP+/6rX+MXxW5zaLps4yBFsDiBB7c+YiaN4+m5Xjj0cGUQlLhYUJf0MZruNQ0PmgNTUyIZK0OZORwTyI71d8fm17b7r/2Q8twVIALiLRv8yXQ6SRLrofRNjN+8dmGkX4hfIAegst+6R5qSyaFVPJfVPdJq19/3sWQfdGruvK174anvFwrviDrasMfLn09EoA94NdQq/xLEKH0bV2pID0oL9jkiyDLJuXZXZRL/G2jPOmFPUztj6x/qeCd02dZRHlH50sdEGV0bZLJWcQ9lS2UU/6UJEYH7q6t2IQpV41Mr7aMfgtBMjmCXOLuMX4V5/Cis7oHig7yC5iA2WhkF6MR5pMcEjFobCsMX1NcDa7edHEjpFy2aY+jX7gOcM5brxxAnvB6Vyl0gE3ryscPORrkdVBgoLrd2v5Aq7RcfnZI3cGoEm8qG00VJcrwFJvC9fAQm5iHPgb1pqnIbCAowryIm1fj2pLkpVHRC6907CbODk+gg6ruh5heHHf7wrutNfFhV93CU6dxOcrXoSAdBAxAN/bItNFkufDY6A28Vclu2ROfN0bKqxQJlbICRZvtyx37Lp7PGaV9xW3euElu5nVwwuZQ7O8IKUlXi3Lf5CzO90nwv4a8PTmfngjfhPmtVSS9SdDlLOJGgVfMGraN3uvyFgtjQdMh9n8vC/nUs7BkiOkvcqpPLkZjxepX2zQv2AS37TBlFj07J4HXX476A0d+ZYr6XL3vcjIPGsD2dXVXpbz8jCVgt7wWoXUEG2brozhjPXugR3ag4y4swD5mFtVFLsHNcqO2xNZ3f+QlhRNNazvSuNpr9QOjCowpDd3t4eXxyCBvttKz2EX4qDiaH70LXgtViOOPl1SOUL2Gu4AzL2HVOSePfOOt5cy+xzgA6RrX7jL3EYxa6RXHBIzYn9osIRC4O8g3ic6yVFYhs4+CO+pt9ZR/7ut/h5AA7+duKWzFBy5gIg1FpVpr4tLmeZvRNY6WDFLus07eQjb+W7b1CYF4wmw37xkwP30gSlVDe3DUjf+PMxF9NIfDCbkcWZ+hxvIEbaWKIcf8lHqnSy4FQdz7IaRSLwz2rDQqVc8/fUYrPyAuucz0wxwXPl4hzYoXyK874YtrK38evp1cZGh066EH7DvF4iR/VFhfxFZwp9xXqJnKZIWnVGe+8brwq9Ms4Sd2IcMEpKebBGXUKMU7xJJDUq8vTpuh/BVc+nZfSWszDLISUEX1t/qYQnj408FbYc7/4ln/5NH845XLcaZ+TLOV2RgoabsV1r/aEwEqhJEvCZAUITQAUt3jGhcQ4qap0KYelVpMJmgYq32K0Te2P95lvsBnTxXJ28H0GHyQv+DzgVnRg34E29D7Kb+TtYhi02WO9I0647fd5CaCtQHQmaKSI0jgiPSfKUvzKS6FBuq08h+qqeEbsnCcgccamxg21s6kOMWFp/S/OUQ5Xs7+Sk7AFfy1P01/KqowJE4LbG9DS8ROdLcPckO3TqQlTCNCgl1GL+jvj3eTAc8Ph3Uwwz8/4+DPj/b1gZeZadpWLyi21kk7Y4R29rkJ7+9M0vKRILSBcwLP/0nRdy5IqOfCXoPGPeO+hG3jDe+/5+qXO3I2JmBhzopsqVFJmSiW5T49BhLCpXaMHr6nsWkPQ0uQ9mq05CEsZoFonSdYpbUXOeTf+gqTA0KrUqtzgK0pH6VjhtkD0BGFf//HYuPkKGmz5Wb5sC2LR89/o4SY1huCsvfmI5nWSTPJhMERo8JtYq7x7w7RjNTnSUsW7XsgI5CeC1A+oP3pm2ahccCJwtM1QlESMrzpUVFGeauMzMHxRRpWYN+p9ozHe8V6sNQaKip/eGOfNpiGSZBXx3+TpA2E6x4dZ/XO1SZNcZYeiEfB91AjvJkdWX6bdvke+ke0aHh+Rhjz/BSQr6ow60YbpUUS5wEJYS4wH1Jyuwz/hcVr/1ewrz2wlaGumyXDEj0d91Do3PAaF8NDcSayUuzIN41hc7tGQpnGsfTK3gD5nSCGuymO1WZMzWxfQx+v6N7yUmK/dchCav+v8/IOjwwvWA97GPdf1MHz4SxI3GF+R/J8mbHoowazP7vychJJ5+pe7pYwQD2XGD1rL9v153WW2VQmxLKWz91VdDwTHKk/E8QGXwt9nohCV5WlLB3W4wlms9fpGIEcQ6b92uu+XirI8aZqLcdcVZjJyoC9Zaa3nN0kRAhmNDDNdF8hre80h1CmrU2fdWDkqaLwe/ehKKokkcMXren0wZOon9AfJGf+qvER6eRAusZ/ZFrpJ8xctbkvy6mW4JQpn9GImLMpOuv0Edt/Qn6owAjHe3dehOQ/P39QoVAQOqdqqWRuWkpP0jMNQ/+R35vMfaKJ+N8Hu8fbsH5j/YCadgfCSNIBHU5Rt3SY7PJ8dGjwEccr6XgMO9aBHk84eJeHhT9BH84OWzTb3wKYLymc953gD8qokXKMFJcjfIFbdPt0OP6RPvg4ylCxdz02mFyZ7moo6OFUl+jpckH8dkDp5uQhKiBtWFTkafSxp2+9y2mzoGQseQzExkXWAlVqt7e9z/GnYbILcLU5imid/FWrXhbvkNOcrRsBBPol/fqgQm8vQOHBl9BlbWBPhky/1kX563Sw8+d8NHYFvXb2T7f6uNrEwAl/X13xwllNElvKAFKH4yiKL1nPIcNh8g7rQZ0e4Gm+28/WZc8+4QlbQSVV6tIwfmrgnxkcfZZsmzz9BpcJJuIfQHQdrZrWgdOWSbs3ogXpim3nAjl+KtogTtywR00H+/rjtl1d4+UtvCuOo4kc6kxtCnMckGVQc/FphML0txTDeoWs69ePK9bWEVaDEsLfzXXyF/DTjpQVYGs6x+gftpluTGjIb2LpzZY5wUeNJ4V0T2r+yriJP7VbR6payzBQ2DpBvW07Ksg6/BoMovyyK/2Z1GO5rIQePqQXKsttJ9Xie4O+/caMos5o0vIWfMXZXps7MryJLcUUrJWxH6Ge3DSC9BxXWPgqmVQ8G0nrSPK+ZsY5iVqB3MlK5+fqQGwg5pwNwIsv4UTqct9Egp/JSyk+1yp+fxzagRru4Qvsv2aYZQj0HMdIEf1NtMK32ePMMVzKxkdvXEiOTC+AsCthNmk+1UXYGXEqiuIGirlje8wFA4PvnuKKqePrw2CoSf+MFon3GRqKIoZxcux51xEK9mHQ6ylnGiKlYaEP4DkveFZWPYnhZqPWePqzGbmu6JaSdNuSmzEqsWpYdtj9M/0EWXspOlzRKXa9p3ZN5YbrnFotJRZ4Xa3cc6SHOFyk2fDGT27f1vbA/1LWohu3UxNziqh73ijfC8CjOF8Eo6c8bn8HhU615wfnRymEQOKirsBpF2p/461f8XP9mvmbSX7XXVB6sxll+sV/KNx/D/8bWov91NIrFlykEhT5MXwajmBnc1GRq1ZbceFX/6jIf/m8mEiCzVICqTyhlG0HMEB1Fo8vrorn3yZn/TRmYNFv8G0QSv8wk90jrd2hpjizNtEB8CFJWOrhTD0jtX2umpUPOaqrF1CPUexuAP7GlWJAs6zUSsczNrtNQi9ACDdY7h/xhIKcolc43czaZBSdZzg8Kp+bPRC4Y9Ndyy3lDGPTXD6e2DjKjD1F/JNoQRZ/x+ZmWrYrQmRBowPu5V9twS/2L+Ua4acboBZZB6xaZR/hUyDngyg/kx4MzaD7XrYPx/kEHNV067PjWV68cwQ94uHMq6K9SeZRhXyBVmKYvLkbqC4b/yOKUUYXXCyjl58rwUcqWhckUMaTqJDNGSbbq01fa3itNmnVoXfNso8ULFJR5rSlsB/1dvQpF2YjWRz0zS4AwHc/K2WtNfaIJcoBFIH53aj0DzSQ9AP8VvSyoZ4lTVmwtbUwrWwJEC1SwJFHhDW9MYfLuMYm4NvLKDziB2Puzio0bAN5CjOAVWTeaZvQL3kgLAY5sbPBqj+3wcWb4WfHWW4gtuwj3cLyv+FgmNMxFJVJFBw3OVwiikrgXAv1mP71nZOz5cFB0vB4zHoMEw5DEBDcbPrQf0l5pNsBsvoUcLj1IsfnVIsFbLs4vFFIO56Nn144Ude6SUkIHUqhOsxPHly91gWkb5ClTC3zwJ8d99jIa1aE3B9NloiWu1WCd8D7dbP2n779K9crfEcOeaTKNKY7IlXLNsprsVH3EfTnRK+iRKjvWM0rHEJGSp4pKKLIv382xy9X2zh3lx+jhyAk6QQLxuhPcb5kHH+gYKHSefVsJ7eXk/Dsf/qrPW4MDky/XXGGFmCNja5LHLaqesLiA884PBb6rjwybniSLIGeLSc66F1QA8c7j/F3mwYzksZjT2L7N/TcTjl7vnLfM5G8wVMum5hAAW80naVXOcuUelcW3s/i7T04/ynfJhUWtwt5bv+Ol1LGNhpGK40c5JNyEl1ODsZemxaU8jVD2RUaJi8zMpZpP0RWpnlggQfQC0XVO58h64zIfNUzmGUtuIz65s7ZOX4O+9Q7dsu952NJyL8owXKeXTWIXO8VxOr67XVb5F2IYENMdb/fZSbgoVAr8mbsmcuZkKD7PiTgGVyHZyLQ3uMXjSv5B6ddt+UZDf7mFluUdyrwE8CKmPTcnfJ1ZJvhadwcl63YsXaLdiuf1oXHL6edJuX4uVdLXZ/aMv862IfK1ozCI0+Y6xYbWq5gA58vMluFf6uM/aguxeiJ9amGoqkMM2jZqbgQoMmzjNviuIS0E95q0XBnUhMfV4eKHL+tSHcWDAK+V+Zy/9VzZjf4SFPVS97/xJEC4a6UH/bbHNTQKzHA6VZiNbm+2cpJCGTVT5DsQAbGfBDUgqqgtJEBsLLDLZTIeVqf/7q8mctjfQLM7eoWiXv5LHz5PcxWBdfhfK2hm4BJbuKQXWjS5PtVfWoOHUqvLW4MhCKa0u7YVu+21c/8JsMFLcG4uZpDYmJyVKcmm3XndGW10wPcIRV2+aFtqz1yia9F1HPM2ho/+l2MDUSFZIqn5WvYk+3KpLs9UG+qNZbXdes5k8os6ml6vyKbtsqVsYABOcRJGoVWeN5+/LiObN1pteVMdpyWv5f8ldotwFxGy9pebnjTZ663ijaJ0On55OiTOff/YwrIQ9WIY7u96gQLtM9W5aalFcgX+UxuGdud+kYfpNvvIkkYwOZ4IKKvtFdUKXgACtjLUf5jO6aDwEBMEmjkvVdlzPXY7iU6+l1scFqJ+uhzIF7Cf6Vt9veyy3zZU5rKUBNCDRcblq4UDhkas2VppTpl6t9vBy1X/+o+S9B6Lal9mlFwHkf3du5PSupWgsShfbSHpTUuzv4KfCAK+n5ZVOg4hXFnwd21/L/wFYxLl3OaS9gjEYoyfb2YDcdhx2adErhfUcYFq64VF0ZI4mB2PaeIKmG90/bX6Bqkm1OX/bqU5PlMkQy1eAEElVkp5jF+3VbsMGrmJv8BvqMpN62kZHRwASNIdQhBSNgvyGFzTAimqKmZkI3AQDOIe1Atr6NetV04Jgg5fS8fY6mxIJSqv0N3XeqSBcmLnHxGpY6jXXflrq1sw9eu5B3e44umLz8Zz/YX6vXNzmY+JoGx+BBjmu/6Qv5v3xfz8QV1EFaeoZoQSEmo2PRpfX7N1zt4J8G0diEpKjFLhueA2pC27artfqPUPtzDkxxEtNURuynv97IeAEX0sGSfk1IQ0QuKvWh05EDLteWO74CQAg0S8y2+x5Gv2qg6yokub+EkBFeaN4VV4M+zDFaftvjDpu/696O0ZQZuB953Qll07+19LYxUa2aFYijUeq8eAmtFo9d5IUV/46xr+nX7ipo7fqSmRH9zG4qw2d17q1/Vsj9KDuJ7UThehUGXfv1HLdTP+zEn6+h0GC9XalmN41GgfdN1nIPZaEewrjgdumpX2BAxghqHrVUCr+X5GETscFcPrSItK8gs8Uo3a3A9SbtphfbUgJtAl8kV/Y5wb9RgfLZo0mCXASQOyjIAvMmjCCI9GdnrVgmTZG0E05qeQg6guy5hV/mFxfTWv0q3xv4Yy85odhRcf/9CY7KhuFT7vyd6Z1O5+6SMOc3MuwwU878g7SpBa/hv8ADabADg/93udfo502Hpt6yldl8DlKMzr0YB5WmfkKTjoOetInr5mFbQ6/qWHxKxk5I0IjIdH7mk2T2dUyDzboviE2aVHRgUz0c+njK4Zga+Wk+Ahjtl//b04jwKGgAYY8HMJ4edZueAL5Kq1Rk5cppljqHIbsyyk1dBdT0sorHOTQwieSy5r+7/4CB6+CgRi+TK8d5lgJjqTVKcVIDxuLHD9ftiLZe7YW2Min9M7Sj2+oaUPrQerBRy7gibHYEOnU+JmC335s/YE9FB2JK05/q8+Pv/tjTJSywudzohlZXGGviIzzpXPPdTyLcOfRi7sY7aeLk2+pZ/bFNSxV/7dcCv316lipLXO92gojNaV6k+6Z3E5PpL+E72gRfNEVe3j6ld8aJiWxZ1aqEEyu5lMSeueuA7NxrLx5Rfc8dVuPff/AbxsL1OFkmr+CzlWuYY2HmBH7LJhPuyqr82CgUsClgYW6fxTQPV+pqM+bRTUgM1OuHCQrFGCALqU7Tmbr2sQG6GEqFVgR6cSoRURwOvQX+2DZVSpR0AJNyu3LZ0O5+5P11KM73H3yV/D36xu60f2hk9RaBal/s+uacE9K9AEG+aVE7Wq32tGWZVI462kIyYJ0fwNLroJv0IXcWsMOZ3rBHeDOSWF4c2sbsu4FkbUCPXa1SFapJCNTta1dtqBrh6JJQ5aaTg+PKHIvggZ6y2Eb77AZKFGGQA1afjegw5uuQiScyJFyjjOiLigGlHqAxCeUii82Ne6ucw5k3EBY/RoxoEjy6hVIzVhrJJsG/dO4+pZGmpK/n3WO9Q5KltaVtkKBM2WW1QU2zQfuabVcrJNBvX+WjYwl5UVNvRUyV5/TPONI1he0zuk2P5xMH7gzKprvz6Ng50oXY2J5Yj4eUMloPK/BZ4/rV2whNwMJUhxCpAoCRTE9C1W88tfbw0+isTOJ78SwDu78pvpbgpDijgCXC2P5zzqvfxLPTlxMulCZSg2MC2chFP7v57g4+dAIENXA6McJI1omSvQf6fqjp7hSmMWEN8n2Mj3bf1s8qY3VhI1ZCAls3aTSohOCHIax7dD+qVT9bqsLC9FZBMXaf9jd5Xj10yV9WZ1QVELgbs/+TFVW2xC7I6YgVJhgKGssG30ya0ld/2CHFH/XZSEl35VSQKtKwoKO4Q1NjhSrFBqImi1VN8MAVKdjhFgSaQB/CdusoW9Y8DoEPq0nJYRR5mr8kWJS0Kaym4fCiml7UOeJzQDx8ecHzIwu8PTLXcqfiCWdPBOqWKkp7/TBNf9LobRCo6V6cA3VvuCRcmlgKAjt+zFQATDH9/ZZ8Wtp3t/ZrxdIZYiNg8158igHxkiumFGDbTvbNz34M1024sFyurzx76BjRZGh/1lOTfKJgqpUlQOtAQSLCtAGdCfEAhodBthtUvHB7Qp+5N+wxqYgmUXgwMSoWCDDuQ9zDDV+2huSV+Iv3qWBa+5ViXqYBQDkrxtROdUzFNrIvw7VsC3gqBJa1xXKtcqR59tqYX2K7hb4vHmu1peEVRH1ZqaroorWxU/nV42jfziztzJ1KE7SIMPjhjltY8PxV0LO/2KiVHV6Uea1blkM9geENpyTEEguBPzyDItigFSn2r84cgHLYQaTxoCjMB8v9XXSvj7JQOPIa+zsw5iePw/5eKvn7tcEyaUixv2AWKMWJwNQqDWlQ3PT2wL8G8uFjYRhx49bGSdZ31sfgSAxD6swv7i0H8KLQVt2GFYY2iytCDxtvoVyNJXKMf3QUgRMN0gQnfdxjriD538tmIL3zJdMpPWlwf0s0tABFeTZL9uwis6DWdeNGM19r5rb4r5y6zievrqKPKRT74JxT0spKe5rQ8ui7IXZaRVds11eyDCL+txEtWlpuaeRnRJWnb2z7FzqUnVN0gQM9fqQMDp0sPuNPUj/HUH2gBio40btFsH50X+iHKyYrLa41C6pJ/MFOG4jRamCIEsCBYuKbnzTBooRVubd8vKlRQEvj2C+EHOjUgvq34fvFxA5p054lGf+nBgZFJ7f9B77d+WU787J5IUZ/ziKR41Sus1Okf+DvsyPWQo3h8Qiymc1ETrkhCUzanuwisvksa59seA9jIqq8/D/J1MBvtgA4q2OsLqlCD3rhfN/jeTR8gXGGa+sMNe9MdayBNysQ/Gk162X7H0vi99Vf/gJtLIOsncz28WRxzInTPWe1RbiXTRejPy1y9cjzqjIV4nKyfeynBM1J6pZOf2Rj1uAvHWt7TGMk+joYGt1ma2m3PiuCVZUxhY6K/Wixa2r5s6cmmJX0R8kUF7yVeR7QdNP+tjlnc62+YVlqMbP7UZiQIYkMplefGDuIcJnbJiqdeXIq+TlxpSZfqLYp/QUzzdLEb/ICoyebzIx0ACRJWQb5qwVR79zYU9OyqFVFgXI5EbxbKzafevHWgj8UKOCzFYM5DjuYdDtS2zemfwSOfbAV2J2QNzoHVRPkV2F2rWQj4aO0fmNzcR2MtqyAxJEB6Ffp3mRilGEfxtmPxlaUEaAhRDMZw/UPrfgLSqJgyDsKIfA5gOECeKWm9o2mBoKyyKTiqrfItcl+bsK1NovWCSJ2pQALByYMYLDuOSOLWOUQm2L3xxC0QfS2xSrevwl07ZbILYU+bHAjDYma7pO3Yq6HBJ/SmsNe4bouCwhWMVeXGyR8kmBNX9dsS5jkKMe+49NeP/ykl0XQKDPJmzsEb4ut49Z5nk9e70SSfSql61c/+qXrS9LWVG8ICVQa1n5GQ9xmEUTHJl3bdVwBCxMooNmVo11fMgExyugdtHJZx+1BKKVNMYoC8lEcX0Lh6Spcs6AzAMxPgbrwquHNpYH5O5e0znWkYGpVrIvGF/U7+7bNKuPAQ1vYI9JO3Qz8yMH/t39/RDUpIiwasUN+u/rrMApJQDuz+DzYhMjTNZ1zr9lRn5340W+mXr2yo9zpLEid45X6ki3wD30z+h5a6cGeh9mOPp7VeaFzs5e3/40rG/JXjfU5LmGnpqRi84wuQCw0kQ/GSBmroi6BpW6C9W32UuccxST/J+2hqRM46yPqLMHuNRCo1x55MNcvpjCRCLHkzDmcBE9FM+S6YGxkOWqk5mGyy0gVbGYtLqWtd25mdZvy1FoCJ0G3BGr01JJBpuGMH5jb5jaU/yrTqfpP/pd246QgSRUUWpJW+a569nEYZE0h16mShLjodMvfb3GE1C3OcUxoUr7+uoqjedbYiUSkNwPXzt6N9aksKQUxCYMVjmn2d8bB7TnVz4brCSvS7+R++/gqOfrHIZGiP6TAjMXrtmMGRrDH5PfNRWwvBY8WgjrzccxMnTKA9i8+jIolsJcGwkl/p0YAjbAcTCctrYkG/Vr0MkV9P78XC5zOd1PS7k8HfayssVQgUoB4L3LRrLv+pOJj0NbSGCsPDiAomHCPO/QVa/nilKG81C++YiJwcY+s5/fqp4ueqHH40RFUHyZeTr0dxXV7IWVfhyhjAn/YUw+LVJe2l6cCDQD5BUVs4padG+nNKj0C6aLCyLQv3WmdOc97y871jmgWjnGTe4SP+P5JfUiqXpKLc0Rm+w7/2uadu8uOs6B08KIZy1PSfEFzN4wqdnn59bt82HnpuPhA4+vMOGD5/UMJbzOU7CXytkpuHL/WCzgnU5jCbnN4zQIQ6qVZDAnwArHIID1p+/SqdM+yv+87MqynF9EUVKsG+YhYiiB7ecpPR7fn6cbNGkMfHnG+X0Mp4u6DiNZsMxEQovGP2FSr8WuczJtksTds8lSctqLnyjwlOM4Bzsm4PpoVYtz/DwoC+IgN0yNM22wChb+Dcyp/oSoNrOvN2/MS5znHtGHpVOqTNvDPKB6nCFB2khAJ+AlBpThma5xYYD+yEnoFASr6keZgE98EdG0omIgSyl1D+cZkUHTAafHvv1fqP3qoT5C4Gmf5MMMFGHlfHDuJGUmA/FEKjUjac+qIjSe1WDOGI4BkMS/mXvGivNjFCQWWYMMqYee9IWP6H5/TJ+52NOhWgOBYJIEtC+FGpjHCr72tKBJyFFPbW4ri7+KudeUbDmDXndXW5Ov+jyMjovmnUjtyhvSsOczWhLoM3IG+wNXvCh/7JQsCxxMNYKELS95LhmxpBU7IR5XQzNj7sj2YMv+CNavP5M4Z1HqDwagQVOkYD8IJe+w5aXLcsrg6zn3oT5aYQfNCx0s/ne7ig0O4nicZ1sqPoVC8NZPyhH17HTIPjHAZTIhilgE89kBwpP6B+Db5VPle/ClJ/ol2zu4rVw+j3t1AYjo8XnvG0amL7NtaZLK0lqDkwzROHC9dyODlDo5DExA4FX1uS0VqqMEjXxLtj6eQ8bODesX7+b7huUaREmhaL+fTj9RUqw5KCEJ7SU1BlRXhAc0NWFE4d/LkcUiRc80YNwrP4izd1nex1kZnDhRbOEr3O0obBLVf7dcy1/n9hAb37UW0cpt2Cp1mEos6GhY1K8D3XEPJn64rToZTjK8NVrgh3hOezERkZFcr3KNow14GLxglpxemCn9baGXKvo8PXwbxRPJ5xMKowr+36xbu0lcCegH6qXe5KbtJYhWkHJ2++WlScX0+cmrDV48Roy555ZSPdQ/E3bVoWbUqDlQz5p1GulxYXGZQMyWuAFv8oBipwh0uD9GusWaYLpoEBBncfKNRXQcE3A6I4mYjXNrp+VPCRf+nD5niCRGeO8/X6dik62NZI+R9GDdGYZkDWgEsRgDGzUvC/11DkbhCSyuET8b7rW9qxAYKwckZbDWKy8Pvl+gSbWcWb04Z3+/fT3jNY5BdSqkxS2LQOKHk+QqFXdJOgLENkF/p6+kZ26v6+gL+6xak2Za3VfsP2aPUu0+XCcq2JQPkdaACYWwjz937PvP6C6m3nnod8cwVtv/OuIxmHy5A/sh/h9s0AbS1r/+3QFiQtMj+YZAHNoocEGUq4shVZFiYFLVT4pRw0vPaoS0uDpY7FZgFdqHKu9t2fCJp7kLDC2lGnUr5kB/Dbx0y0Gde4FNFivWK3NB/nMwlSDenvADYdGt851cmnyUgYjkcPn73F49+Yz2p+TCkfq693EJBHwOQVxATnm+dpRyHW8wP73qR13IB+XncQJWRU+bCqepkUMXW6j8qPQvv+t0L6OZpmXedeagtC8MD4xmzbMohXUOab67AXx//a5VBluZucYYUs4IFcxvErvRaAvZjIOv/phQ+2/8Rj8KGtH2l8fWAp/Qp4XRBSuvHdHY/kvAfXJ4XU5dPCZuqIwSEG04zpjvyJ/zhevMyqD2wVwQAJwjFDttv8+V75KAibtkW5YjqrkhqXZUGXnv6wK1s/4pLGiDV5yaNtxQehRPYP/DJqN+AzbavBhSKcshhEjzHnU4Jz838pkEsFJioa2pJ0jMuqS14VxOgUEgliZwLmwHkYhLLj6CW84FKtSFJmWczPPZnzJpgiJJ65bZJwcCr2toD/5x7YM7I0fj1WMveE0ldgIswnsPN83cYhtb1RrjavA11Ma4iyUA9mGbv811wxjT2ja7COq3/D5zJgsVc8nTx408XXvKuu1HvPqZS1FJMvzHZE3SpI1LXsAp557LtBBpW3IESF5XsZ84f9wO6lgVK59BsZdw16WL7QPPYkomBGDcYJqCN1N19mSz3AkUdvmqqnjaddnwmDI0IVzKronN7wxVXuEf2ORGYmjuTQj2JouY8kabXX0myBtr1qq7th0YnXKIUFKtoyZLIFsMPXbD37GnzZj3IVY2qaUYRPQ7I3mr52nROj2WZ2fGLazoPvd2/A3vN5s7LChVVj+gMnlwkdfFiAklMX/v1k2O3qJiaomh2b1XiDoFweZWw1I5rHJVYdtRhmP8QAfYCgEV0Z3JAPKXvxAUozvfb9zzTcOl6o4w/zTxyy4tjEjSjesmzSKrLFzmX30WpdhxVCkTPg3JXjV3wjCHycmQwJ/mfghXRtF123Heyexxp1teChUAMeSIJ0/ANFC+s5+TQfljw9/jZ3380z0hTAwLD3Pg9Ijaqb6FbGIr6lD1mbTmTeJI2H24/t6gEeAUaQmpwpvcsTZlw+gOgX/vEFb3YRhTkOnAoziEz6G8leN0ev8yOjQHGGP4SNe79M2DekH/7m+V45NLcbg4gru2GzvSQE6dBq0Vnpkb7AUS9ZBLz6ocvqBHiptElLYa+CSA+97YABTR7FkGzIVClR9hT4jEsCvUSaUC6g75NiHvwDWEjpxjaV2Uy7tRnseL3YC3/mIhIxdLGGU7L34xys0mpK9L8DRLEYKLo1oyg/OgLV5orIb7U/JWGdkFHx+L4XcaNi7vwgGIs7rfb+ppjTG3xQa9xJDDmBOm/btCxHRjNRCbvpzjQmvBA+IPOzyi7nGsk5HBK4WGFXWcTXwzuB4iskCkrSSzjykeMQG1ZWZoiNNYTOzfkdHN303O/kZL5JCPVvZ4qSBIrv9cOOJ7JQPfwQFtYdT4jPRGoMmNHX0x9UAapTVqjI9DiI8uQlzDdxb51DEesEJZ4d0DjIxGK5OX8RkaSW0gGfWYU4Osuz8/SYXNlN/525010o9jmW5GwgNYg8ReHit7YjXZAdE78TyDQvSLPTBEUv1PFJX1EUx0AUgssRTeq7PoEkyMUk2GF4r7/JkoDLnq3VmGUQ8nRm02t9OD0ysM3U71yXB8YkCnsfy6grFzzCnPCl79oSMJVBaxpit9cY93dqOFZJ+2+9gghi/oR/No5rjfL7s5XY+pJCrFIrcKFVOdn1uScuXpgBhda7Cz6aiSa4f2O8lUgd1k/CeDX9mZjEszx2oej8FPR6FvCoMx58Zo7/IGOWyFLasL8GDbtdMMXrvR5AD2G+qJcDpa+S5WYBeS+IYRcW/STHMfTFg6N6zpXPxFppxOusv2i8YMfOYCa9Ljgcgf73L5sH4gX5OYSjPXZXHpsF4h2cu5xT5dHhQwYkAHasNWUMiBOo3nPAnn+pi95OTJEYSLSL8yqrzUXx2EQwAEeZdALtNIkERCwIW+Bzg9/+KMWylUYZI/EwgMeKKvU1HU5CHumVNbLCHHlq8IdaGEJqu4U80kZQbV9hu8Uor/5TLZX/SC5loHe1e5E8vmeZ/kDlyscPSMVe+WFFVRempedDa52u356S/a+W0v1GbHE+Xu3649nnTQtvBmvZJa+mb/mpIXXq7NXtWf0Nghj+QB0lf9JPkL/uQSTVyxfcF2fr5d+N4S3mRQbIZ9G4h/J6Bx3VYXX42GwGptIJOoxC7P1yN0v4dfu+qTvlFlLSQv5+O/lxB10FUXzqt2Gcov6DpOdmz/mAlCG47WsRK/gtZ0FU4h0lNy5KrnIZZuCra7Le5/UePsUwpINIP3e1vYmGIG38K2hQJMgldRu1iKK8bffspnaJzufxgizQpG0DHhbYVrbKefV6U2VQftuxzaXsHZHBLJ4IpVSK9dQWcO0iHqu/jut0f7M0/zHL1C5F2kn3IsIkBHrORGJMSj81q9su2Anfh6a8zVZLs1JUcJjt2yhFKfKTsK1Utr0hYZlfNz7zJxaAkDdSoIunLzv40wqJRql7iXzhB15PARE9TrlQyPQv5UaMLDYSq5BEMY17Y57uI23rMZSgHM2shYqkOf84RVVPquPU2VwJiU9uzmKZyJxq2Mj6Q3SfQTY+q6/JONJl8hoU5Wn0WVfGufsJ0tuWOXeyID5r1xn7/Cv+SK56o8BxZjpF2/+6hgjMm+dzdaEg1mox1p3DUdUm7cz4sXn9DyZedHK7flb/MqUJDv1yCbgrKM4y/yXP8XQdYTTSTKv/rpi8HTYhiAgKmQJiOE6C+5H/hKdItvH1kF4ADnat1BVlw0kBGlYkWSTO+yk/hVd/tHJ4LUt/oGBc/ND7j6iwFioFgVF+6ghrdOxOTpIlql+WyF+IVG+BMSEpNnwBS0JeH5IQu/8qs3Ie8XClsIf7k4FOVzDd/UAejtdAxr1rzngoPBXJpPAE1GXKR45ueiIkg085+cI+g7XOpvF9cgWjz1VrFq22KmsY+bNlCKFB/mfEVYCoD4Vbb+TTjCQr/he3H6ZFNIDw2XgkyoE2Z32bjP3LjKEkpyfPUxngbn4P5c/TOTksak4M4cYDk8H2M9ItXbfkCoiynCle8iv2St7+643ND1rZauZ3GKwv7jLq6B1ADvOeyrKrhft4Dt1ImtxaXw8iJWaIoh54f6ALgQik4Z60Ucu/R65m7UDeNx4Vmpfin5O0n8HD8PgisuzmXtevS5xEce2aHyRG1QGuP5gAhl6sIrjtj0ec2ptVCONDtKFtCyKsaPsLM5aWUzeR7bm+jNWi2Iqrc5CsxOosbh0pp6zIvIPHb00jVrXR3IQ6+9pRy6bjJnyz5HmgtP/mwhD+uE+2cm/mawsH3PqhEHkt0I+jnfuDdktB1mHhvnzpvO2TX7NcqoDbCH0noZVjfEVwwbqLtKbTqo4c4xBycgdcT9LM1RYzjNeN7AVDSbSEr7UXkOH5Uk+vNF9xgLroRaI+TUoFtROc+9Kh4H8ApQiBQWhW53sTtlA0ZlbR80QXayzGIpb+qt3+nYb9hWlR+0cGwZ0VG5NLtf12DPpMfud0YfK+IOGxyulYzMuYU0eZTUj4b/TtHjTxABiFrGTKOOsqryaMUYHEwXaXP60Lrc3AcdxLZLZHtPkWu/yR/K9HzV9lG/7qWEPkOaibpScGAYpudq2ugpSEJXCFBpnCeTDO+5UL1C5v5oCsHUxpYd3CpUjJ5c9xt6Qi+dwQX+jml2pmB6lfJGDEWBaFms2gLWQT81z49gn1RKytDblcJH0df26fxrjEl7Cwuc3fQnzwbeRHH3xZ5A8zZxwYt/kkK3EG1dMs/pLV5IEHZb3ZJw4zNiQ9EEHZo7S+mUsbg4dZvNeJVSDMwsQ8bprwR9Pj0dUT4H3abB28uEwbX5Xr4diB4xBMzad9tTX4yCGg/wj4xaiSxv+mgDxyuqSSP07qy18HZqMY+ei0XliTX7SIy806n2XUaWG6TRIORkuh90qshOSfn4wTNkxJO3d/S2oDiu6jljsgbwswUAZ8+TqovKOX2V8ih21++HJEzVouC1/PuMmNGBjdSBF7/aSMiQD8lfi5wbg3j5+ph/OnR4D6YK/91OZuSjbpqhchOEJyrisRfyUrw3+/sTsMnOxjLzqW/YViR7xFijSkCSs+kyf0szaVvor3bVL8Rvgs0cGwAOXCM+IJ3gr0uOX9u6HB0Gf5yoMGlrB5cGw9DCu/F38i6MZdeXL7ZGCIYv3Li7FjgQRUrp5IvbZ6GQL3WGJQj9CX5Z1hCjgDFHbXYdj4Z1t8Fd0PFQwPiOhs5KMLUj6a7RxP89UMaiBcRCBXf1m3a1H8dE+SCJvbyOfX0/Y/XowJEc99kjpPgCHMjZubMI2rnS1Ll2MalHtss2fJl2rBJw1n/Suhx4pDSRZQvfjZozmdUt7kgSz4JsKbDUS4vxb2/yZXdhnf3TGEbZO66MSdGg3gCIjHkpOrs3a7XvQoyQ7AzwAEheQwCj68xwv465aMC8hmaqIbvUnMNPTSIqMjryOCyMrhoIhj4z3aZdF1XaivJg0buz3y9MBv+Gmq1gCS5d6e5FPbNsBhT/9fDXwUaYeaXtYJg99+YVWadhyYZo9JWjSG2Ft/Rzy9qpharJicyOh7uPdFVIFqjqtXzqxPy+KX1kZR4SIIlBpIgZjZz13ESdZ9zVH/eu7txP962/NKsWgfdP5/Plv8p8K7ns+xK0gcXQg274s0zRejzSz3kwIbpmyc43mGkO5B7Ow8RG8bZUN/5WTS5hT+1FQ78XbWDPpAAskcf1dVQ9XEN7r4Lut3n+q9MzuHBHSMGWICk2WvOk82sQDObgnrMZ6GyI72UTnoSXN7t2Why1HbgYdRFN441FW4TGIL/Bm4WWZE0cyw3P+pKVW/hl6RRo0NzFvv8m31iZm4+KWXUejHcQZs/t8GH84ZRHlZyNPdkumEKoz3fnCyg2JRN4CYe143a1AnTc+J8c38mqw/O4tHSEk1AuVhenlqci+Uvj+3sr/C/wz92MqAnI5EPiTYtWEaRPjcclb5IKMQVF6GvDHumDs2jFqeBj4h+3ejoXutflToJ8s+Kzqj8wp5pZNyNikA5zlSp2Ix/Sm9W37rZIVocIwsfBycdkRJMBYFn/atKYACwFVMdgNyCPDg9DZtolfG/0hR9ogWm2D7m5rP6w7sTKTVAiNhSeyF53mGuvxQyc7fVXxZ357xLiyeaUYWJPX02aQcwoaPkMcUoqPOv1HDXHeVu+rXNJgqfG3/XlvlOrABZUIe2kQrZs+Cg1qEjKYyq6r5vktVJm+/+N22TEPF4ed/vNWjKe1wKGqBDqFPC6RiQ4/dhCIyPI5pVkfNvxhizCyuNeVvKvc/b96nRVyHeKPYpJtocnA3mf59qIT3xzEVdHjz0jtrPZZ9Obbpoh2zANxWUBBcBIfrztjp/sB6cDhD0149ZZ60DnmGDSkMvmffD2DFmFDfjxMzoFt3w0sIyR33OU+BiuI2K4+GEMXDbUvDTDRAxknpd0/fnhcNWdujlOVf+KXrHXES/IneSqDNNmtlbGk4KKUt+bV/wBxub5h9iSGPXmuA/pY5sO/iyKKzbKobmP1q2rlvxfgP2EU/PP8dCO4KaAxeve8fgEq7CHqMG3W5g53lAf2yYy+UhNRsxo56/SfIz9VqWLMkAlhvw3ovJHJDG1ni5A3gpVoebln0qNEF/c2bbhLPb4gel4N1fp3SVbGip+nnRu4KIK6i4iFiqYqB5MMPZakaIJ9yFWLl0eVHSeR5Kr7QiacPfhAZMtLLzb/qA8gQyrV+btpSlYKLE+h0Z+WOz67p/cLrKvTnuj8jZjYFmZ0FGM0wFz/sDx1xsFaxZP0r3SKQ5EXSZiQoOG/5u3ymOaIxCZzTGamhOfE7PfJmcNcPDTcM3zencjROLSJ7D4WizyN90NjR/k0M5Wj+s5XcyP7CX40u6K4YExAIi3WdERB9TzXpQiaqPgLDRyIAWjcvpH54tvr/AGbc2chU05boNsETk1zJVT+d+QCImq8jpQPFjed/lILMLGUmt0A3zIl+Kxzoedly7kYy3gX0eZ+Z/BprXf40Xx4DxK4rObYb/jkCJY+OLRxdB1nnu3fGHtt6AlhTYC4Zd2Gk5CByPmf7kOeZIimoU83PG5V8xqg7SdgT0scxp/WKnxYGmUHj88JnCD2KbCI9MO0LMgZW9SCELaa7YPTpqhgF8sSuB3NNuwQaddWJWGn2jfXz8L8WCqbe5vJhfYdImCM/ex/CV8f76XOXSU0vusSC/v568rsI81vyXOgzYhk3zRrPFpLbft0PbtBzuhwQyMYLFcir+dRnyhYI04VgmHBSP0gEhwEEORZOgsLvEiw54lPwbqunmIIK2SjhjYrGHK+VYCiy8NgCGgwhdaVc0xKYPrVuKtgF7VXfBZz5QRnL6ak8lK+yFIod562kHB3f1+jcqkVNs0dWvnORSvX95ipU9cU6vil3G9M2/5zIjW896N+B4jYCz1hcJFpUIWplj9YeWmrggmOs1bTbsol03b0q7px0HC0SPl+y0tFx5fci+D2l+u2DO8wR6OTT/GHoLXUKPc415RHhmj5vQeP2fi2IPUWHdvU48VLoZBuybf3s+vzSpx/4KhTHshWOgyVOBF1cjEqauwFaRxIty5abxmFm78Oy8UBCn5V0yL9sG7SnWHpp0rGI//Hl4xmCd5KvY98ia+SOV5M9U4K5cevo8aLW1JHs0f0OlRKuG47Q9fK1PFXPJZ2tAkxtXfv1I/S6B7x+SuVcAs9D7KR12uGEZQDimeX/Yuhq9KPwvqs7WtR7eXgocNhcPxjxScmbEWk3cSyDKeOb7fLOgWG1pvoz6kTXmh4jaWQUWorpjFjBPWOQb1Y3fU2+Ciszj2+9zT55EKX1M4bLW4Jspwkv5wQ5K63tO1akjX46ZgVPa0mGVTImNkAF75QjrMXJnL6NQxY4cBcrdYzmcwYor3xv+4qORjpvYgOkQonnNQIZnpQpyIw1D+2Uc/882aJmKYLtg6YLabC18thvyNM7+pXRYbABb9C9HzQIf9hal1NqU2ceALj+FD62/iFYnIuVmJ+zvaLiHL5GLv2vfFJ57T6gSKgZObd9Mwzk8+9GsLwn1CbgVcRLita6256t/I/XMFyC1Dp/UF/1lexLkVw6ADfA1gNJf9TdLt8e15U+gcQw1Jvakx0ujICCNYTF589YCI0/gNkj+hAKltAM11xFqItYnIRACTaa50O71BSorTOEILZ8jKvdiV1h431799n0+KeRxtGASGwztP3xrZCwKEcr8EFXnVvRJt/nulCLnWdj0w0fddQ1hL8evYuQ5uuFpEU7yX/Ujle3ncqTmMD5fJaPpvnyxTbj2ITZ3D/DZXNT6TH840afhvJ4GFzuWUUWF7q8xtVgeWAeFyGK2rfNGdH7YqykJJty1xK+/jk1Dz4TA3B/NW1dGw56FPZr8fERS2Xtmujqcboy0U0sk143fKR+D1upJ/u3IZzvNQihC9wWK+qYOT7bxYa0wP89MBuwnH5LhikfJVyrP0YxD6x4AFqq4wyutqvdfQZ13+Yba2LOppwb0xVcPwtuDp4IRabDff/f9++hlAwobctF6ypVb0y8nePL1Yn9fIbAraLA7pcPjX/z37kVW4XBT1XQuCcHfd/tRf+4cuxI6Q8aAiwLQdhk7bP8eR0gVtUjF6XWJGP0iDhK+1zhokPIloKsbWX/RzTyhhUys972pX4gsFVO53IjmFRbt76AMuOc3XZ7vhwEaOzmh4s5PT7gBptwar+3JsJR2GTJeZxk5imLY+SsjVUof+B+BbapujQlPo7Xpp3KPymyV/ZItCmoKcMlnp/GMJLDg24WlSBM1lZYiXkluz8SKaoYdyV8ta8JPVGRTPkUPHZBtOHTwD2R9it8C/QyAhnCmJUQgxkR5/vB9lLykGvDUyRB89ZfKr9vYs/7+C+dQ4/wlEolrQVupvIuRm8bC56OIzyxvPaKOpo8/3vaXlAtaEsO3FpoGQnljjPmz0MxTBVBQ/EyXCrbwDveuy9uY6HCY49LNKP5Gil/gUBpOIwC0rSsEU9RalrumjbwR3eIw1+92TZ0lMz1Ai1AhduWXmB3vz+ka92IfTSH4xhy1ntN/trVsJPuNCMuqSFVWqWbD5B8qepsp61RghCaeLLzA/jz2e+A/LKFOGM/3PKJuXcHK5Pt7gJTj/gr0UPRA1t2/oUTYZ3nyftAgF1W68kcd1Ijw0GZS0vHF0CmyNun4DMLk1eWv3e/IUP9axr8c+CNq8S8NvRHB4GoUZezFRAAyS+hVF/fNgErrfVE+KB1sx4SxhL6x/Bsv2+AL0pKcE30P4/5e8lfEDlv/NuYXujmxKtmIO6EDIJN2KCK0IOnMYGFNfuFOq/+YuBXbHWgmp6gVCDeDPeZ6fHq4M2f1YzU4gPsSYFv4FuyjEPsOrSoy/kM1RJjhAqz74N6IChnPnvcpVOrlQkVP86F4x7EyCB225QuJSfiX09HRSNBWA3tCt8iOoppHSyINV528ATv2EWo+30OgOTTINB6FkWzX4j3flPw1u8oEkBJJd4lNOzsyD7RHTCN9iL82XdkLtVPE50iLOflidP8uLvo0QUuqfOASWSHMFI71s0zkM/SPuTBC7fQodzSkc50hTrHs947IaSe8m51RPKUl21C8YT6iCBwJ5gwiuJF/AqbbCiKmGXKx3CT+FXkRwDdc3yLxn8qTA//fzSvfhzQX9ebfCnKwjNHSZliOP675xgCggea3jD+NPeZz5RiFqfldnn5U0IfQ/FUzeoyDqA/18+CVCHUut/TE+aDZhw6paOkzDdHSRqbGjs3C1GHD8/paHCmeGsD+c/MzUQPjTAZ4Rcu2erR6QMqAu2UBAZnzJkMW8FLbcrIqataDA6dGO5eV2B1l/WTuTqEayBVyLbgyNemDHK0gYwURxN4FG9W55RqOzVyjFPBaccqv6clxLpDOp0HA3ctRwQxu9d1dxBDlSXYrd1dFY4wLKXlJgdc92vz1tosTFVB5t5yjG4jB7Jvp0p+OCI4T1AyI41zABhF2XlKrFNPVIRGQ8guzPqTGnNrgm5UC9lVBf0xZQnVH1E0PhYz7HKzBY7zSUDAij7LgeDISo7mVj3Bz3L8zzcgq0JSGQ4gfBNLnV7h9Jl393t8SIydTp787ElcAB1lXBLe3YhFouKLXjUjnZf6uAniz2TiUdCINoV/T4RniKrP5d3MKy0g6mDvkCJt7FqJfc68G4iOl+U4DBbyFn3uhlph4vE2k/RAc9AJHf3b62jD24pjNFU6O+MmXcCSKKOf/4+69miQFuizBX9OP04YWj2gIdKB5GQMCLQMNv37xqPpazO72zK7N9NhuWWZlBolw3K+fe65wv4vEA7fZCrS1d1+rFE729Fj9W8AqznohC3sVQTdqA5Wcn9sxNZxSMLyLxr52l1NRCS3jX6W50UN3JH47wVNBeOYHiSJpM1FrwI6HVDIbO/S2KifGubPM5xxpCcOq19NN5sBCL6CGAAp1mdKvx47Ofq1ObwxVltaTQQ4thKlggo7+YKQVbXPbIw4GAKeo7oKcR8dGD8/ZZ4TmuGWu/mpW7wLnJaxV1aw4vzJhlyytWiFg3Ghhgf5ba6MklP7epfy4ZC/rYm93wpTklilyQR5OihZ2cW1wydgxwygwSGpJP6dzn7mHNtWs6VijsESUpluw+J4uxIWsl9BHMA1Z6nQJxCJqXJuMgorRsT7poOtS7h7d2xvP/PrDKbAY4abzhqWY3bC3QwBn7SRN4bmE+vrWYbnT1aPwqKkLe9VlJFgCcyLHwGwBEtTBmyNs0HmaU3MAFxKYlZCZ7J4CItA9obOPrrDoX/30FJ81U12i+sinPOSbb33QszY481pvbTzyvGPfPFN2mu0jKeGKxfGwbvD2nNLA/oqi2a8g4ZswMoxWJ7BZc92o/nR1QImCGLE4UFTiY+RHTG/7rYoK81g2Sp/MOfD4wySa00QlMBMH2fbEigLEk8OL5/pd8LZvMKwpUxK2YtsJ0/OJFq4rPiU4MAMJFUct79snDM3add0aAjQ9pkpd5zkPFonWIss93PzUT5khU2D31qOcbkmWUCPkvDlOVcugvzcWvVU3J0LHom8XmbnPm5MVhuXu9z7sn/pY1BBY3/VvY9gtVRczDXBBELz40XTnpFGabEYJDSxavgnJHVJrQXpsLfU6E3GXRTt95j37CYlQF01PFgRgiUr9wJcSKrCfx9o+XBrGhZoFpmdfeKkEcp66SC7GqiOq1EhKS+E5TiOT1+RCeuZQX7895nOiXic84/r4u7J7ZpBFHyewIMr+m7pLK3EXq2YmPdDKFGYa8AuwMI0m9GPCc60gPdqT/O3wC8F4FCOjngD/8hJJynftn/aIEj5bWmztt7PW0kMzaO3HjZKXPTGswnxN6IuQj07HaWBBJr/l+2ZMt+XLHp1TP7iCGpPOu3PxaKW95f70jKIOxBp8JBgGVqi7ZxnAKV90osZmWbCWVDw2kbAfc3k4bNUFfAJunQrk1bBOIRQN37Tr50F5LeQz5NDUbJYC7iEoYLkYo11jTgaHCoxL5TENZUgz5ofu/ErnsBkUukv6tP+tKNVpsnQK9Z9Ez+Qj4FyQe69lf/rmVZHKoa1DvOmx9drmfh9Md+eRNmG6knKoOz22i3XwL+8gkvkWd3pwWIfjGeb8bScnAt2Y3t2jvFnovsPPcfSikffKweg+xe2PvRslIVdXi/X+JD+Cp/7pVzP6TMAbsiT1sYJq1SzfiZ9520SkYZ5+qt1emUdz6nvbG2JVTak35/54KtZxwB6vLUeyz1l+4c0WbLYnqqrPQO57JWzd5fNM7Hxmr08dqAIiX4fQannmGRqO6xWO0NJ191ng7p7DniZwVrHLQRUuTm0s+9hPl2unGLtGh9pIekUDzKlqMDKsO8pnmtO89uCNTgrfjcCvnsVqDjB3eJY2FrGpuH5jLvfYEGm/YW1kl8BZcjLIlKOoDmrnst+m76WlYh0oH5lxNgDORx6bdD3r7D0Tn11OwHdGvSnlB8NRlUxgdzSWgrVHPH4byV23/6HvSrKF8XrefAOCthH8vm0tpOTePCMl1RBVsyoSkBFBbktjWvMbk+eGlKZg6rdGQpi3aAhvPuatb4L3t6xitl8FIMjlvPoJJMXtVjIQ+flDDFaQv020zFY85S2aML9d/0J/ZbNI419CrEhRRfArEG9tZJO4naZUNqAK6B2ix+d8bAQlK5kHIdglaFJyXODvw3lOcoFQ35OV7BMIMRMrapTSDMRDjdN5a1n38peYiPBkXCZ4YcKD8ozg5axH0idtplZ7nsPDQd3PVzMMRoF04dXzexn3hJzaPjTXRpKcMbn+Nn9ex/QNteVvVN7vSXQ3Qq5liZ7hRQVxIewkd6p52tExHFbcIrn5Nz9XWav3DzMy0yrWrE+ZpTsk/+Y6JykbNf1KcCaztZkV0qSW1nOhGfUE6qG+zXlSw6BganZgfVfhmG83nl+hHe5v31QntOfL92Xbz1CDmRHXZA6FBA2AqiKljCXDlfMfQ4sxygCSLpbxCoof+t1wg0UR2ntrPeD47ycrHA897QfHdn73Ymr56nLE2B7JOzKMEjBTCpwkbANpNErOGj5A8Az10riws7MwVqFZDdEANXYorT37j+y1s0euE9lN2rJ7l8odTIwps69PWSqwidcqXC0rKsUWOwTUdE9Qvw1NYseLIiz3WmPrxgKpEQqAXMP7c1vbwa99ShmDCCVngVWqoutA445L8VfAzW/vmtnhuh+OSmVXFzGvluKy67bWbRgI8zFT+aivr8zkuAMoij0nYG/WZ7AsHuSMyBZNRQopKGV5MHbgFL99Xu21aHQNIR3DghdCHOf225amOWKPxjA9zfZSfOT6hoiyBcQsC7Yp0vHr9ZEqyBKffXz2XGMPuOf9kUt2g9EN1iwr4Ycr6kPBnmfMzS20aGg0mgTflgnfnBl7t/LK8GPNtAhWJM8zLSnSCEMySrZobntCd3QtY65m3gNSyS+EEbrbgDMwDVlL3WP1Y5qEO39n7jeTBU/zNmT9xfw9HXGW1NcNOD7VOJqm1T8adKmki64GPpI/LethtP7CjqSCFIRDIxPtD5CPCI/O8VFZlNlx7JMib8FrJLfkF0l9w66ifCrtjfDfUMA3B5+jYtLqP/NOT35xwudVEcQIgqT4Sh3l6jhGelnyHepZfL1gnjO6W9bU/pOj+Pb61RWiZKk4qvZhC7f3q4Y679Yx5K/bY0nzzf/SKwsdJ4TqLgYcQV2lPuYjbhP/rdD11ygCu2SX7sOSqQlovwivBIpzPj7+AJaVhcSX0M+Ci9PSgs3RH9tIeX23qBWWztOVMrRPqgzUS1Ym8ohdF8djdbOFqORtrOcwH8Iv5l6E06iWalA+ueotU+IKh4dKQSUqvanlWNsqo9h7meeUCKm8iaqs0C3C5uVlx9IBvygV4iV8M6HVq57RKsF3zalCe9iC/WYEwPKeeckpzA9/WOH5Bvrs77n6DxqfbzDzyvr3/egd5nctkLLftWAY/l7Ljsyfax8eyP6ufSDJ+XstkP3379obIymOYfh/+sceXXTfEV5nMF+FAf+ETnRbZ7N7jvsn9BnbZ7yg6SGww/pPIJUCrAQHh/Z8XvPz3xxChX9Cuf6U8rHPV1CQEPr71/+CgmALuOb6ewCHoT8HjvqzVn/vgWP/jP89r8rrsvrH4zDyz8Fk+XOg/JcH/LZK+T0WOCdPLu+6f7Ti9zsC1Z8/14TOf6XXUL8RhO+gTPyvmjd3/wX524g96R6N+Tvvz4FlfaDyz4H8U+bO34/DODw/2Hnchk8Obgw9n8Z5rcZyHJJOG8fpOQg/B5t8XS+nvsFVybaOz6Fq7bu/f13WeWzz4O+rPx3HFuOw/j0dxp7PT1/PVwie8M8QSv7jQPQc+C/PEeJfjvDn31b8+XT9209WPtdPP+Xz34P/twO5jNuc5f9RP/3tljWZy3z9D07E/g4f6LT/UDDmvEvWes//XTv+r8b376XWWD+N/heBgkn8nxGY/pd/1L+XLgJB//0d/zT8703+G6H5l1b9v5cjAv/vy9H/AqHJz3r9IyP430/R33PB7/8qGuDDv0jGvxHnf22E8K9H2Wyb91874f8cmYHp/xyZQfD/WGYw8p/x/yGpYeYZVL38l9MmcMLyHz743z8Ko6D/Rgr/3PJ/rkyS/8+w7X9AGP4XiPB/BiZR/x+Tr/9ZEkAi/3tQ6V9VF4n/W80F/XeU1v+PsAYloX/GMOhf//03CEAS8H+qKCD/A6KwVMkEfq37BHQO+/vJLFOerX8HKPnHh6I+gcywgAHW2SMtSZp31rjUaz0Oz9/TcV3H/t+cwHR1Cf6wAqn6KzPc2I3z79EoitJ0UfyfpIkAxKjuun+c+Zd+/VuuRP6joc+HT7Im/4Qyfz4i4jSU/4Rwtc+a7wNSpXIE1NZwvErwyuc38M3IA8dEz0/ehHL/8zDfRG07wfbfGLLdNIufXQB7QwO8W6i1Sf019/NGrPe9Q9xjVdvvIxAkV9YJtrQ/RT1jZj32jCn1hbBXq/D+OPY4MrojyiX11hVX3tnfFpLkIIskSsAlDmPQQBr0MKOWOfsbSn6Rbf7iGUFtQ274MNnAw4CGD1ZvJNzhbLmIpc6W+jmNvHAwUkk9n18lA46xo8VGjMZEPPhKeM7m+CNgDok5eub58b/1XOWedLEVeD1Vj2+xo976SKiXYokhSkNiUA3sqpWMjv4LGLplWzVsSWmIEEEdZx/zywBuIxArzLo7BHW9RDR8eSgrv7iaPfQuSp7/uC/GDaGYYhG1q/g+fZZEI/m2jw6/yYQa49lAHCn0Xs+D5w2+rBa9N4Iat7RP5s9CieQlEXPaTL+42LPttXWWLnpF9tbxauBF77KfOHmgpO98DbTPTeRwFBnFeiYh7NxjzYIQzgDHXK7EFd1NJ4UT4eeKqVeOK0lh+9nUk1M/5CrZNNp95/Ca8i7+jx5jueE1jqRlfD5okbnS/RJRjVQiGC41it37GSn9GStWFdKEclIihwGLm3eIIXxFdRZflZAOFm1/Xl1OYyaus7Am4FyogECCQDn/gqIaLkeoR9+3UC4TE7UvntdN2wAekXA7J/WXJt4tZDfnVE3Qq5r0HZLxJSUIpS5GHxek/g4y0qkZyu8bldNTmCiOHHCXnxRqSVxkrEThN/+tA5zjnJCvDlq//KtYlUBwFJNpX+MBChWxPjSylaPVm3O8WY4XajGd6JzPhchH2Ao2YS7fJ9Luzh7JrH+0gB4ahFg9YiTJ24K/Qea9Wfnp27Xsy+PmjV5LdOuXx/3oktPfgFcrJN3fRldvDgT5MLplklaUmexNm6ICsl8mBNLw7uJFCdT6uYehh6Y8moa+1dlKZ9jeeFHh/SuwVKi95RtQZcidbm+q56RrT8BiTpPWPLAKsuV69BEdYVJc+9BLqmarbcVZj6O/rGseomWZKLmFMOnAQ51MmV/qDJjTkeDSVJYk1wz227jscj9RbsPfagStVT9wHxKti6Z4wEcP0N4s60jydObi15ZS3VpoKr4clBi/B+NyGzINrKx3ooaLmGcmZpT6wvKjV8F2XaI4cTs9oKjuLkFCJ9JuLsQ73/IlPJXStXXJUJkk3pUYNRlhy9s4xPmfT3rg7wy4TbG+O5/JzzzzWt3UOEawoR3WtKBcmBLHilKqSTI3eMpbPF1lLiaO0q6ZbpJkzmAgjxsDjlo98VVwkJKB1QVyQCjZebC/MdakIAuPlqbgY3c/LwitYUqT01WCmsViYwzsi8XaPc/oAkqgofVRnFAAw8ZnNz0ZNYj/LQKmzeLz5i7DRF+YnHgiLVKhaUwULf0krlaF+WJa31J8F6UYtKS9TJWSFFHGxU0Xb/1Sfcly0M7FK5KWpkvOlX59+T2coUKNnSZ+PjdIoNKp6516Jx2zvRF+fZ4Y9O/UiypWRSupo+bpV11vMZo5BZHms3rVvzEXo+geDyJFTf+PIwZVqZOAybheRRfzEUSYh1vgDbY9tGjv63zB0aftRUkABUfjPKhvsQz27y0bKVBbyhSNw8r5axN9ukZSdzUXkK5jORN1xverDEbGyywLNwnCHpAe+u0T9fGC4jRK6YfWy8fG0ObXHtaZmuA+vt1e60F95R97whKY5J2YOkKfLdFF86oyS0p5FbCSo8CUZ73q5IJAUPQXo5inIzffQqSNMNy/H+hXl27Do9v4Rtfpj+6vuJvVA3c45ixlXDZL2jZMVwXhmYOFCGIlM8MxVpvavsujZcpb3v21wXJlLVr0Tvu3vMOeOvpnLzHJ03QBZOKeowLcr7jdLUlW6Z9FpAqdQescRAR5Vmv+YAkW6MEOyxmX8xuTHl4ZUicm6U2bK8olDmnORGUq7EsioGacoNCvPSljYp4txVH/aw+YT2UNwlkfxsK08sHgAv1QVWzy0ruk7LPVGsUwHL+5RidOlEcV5a/Mxtpl8X4DlzTb+tqZgpcZDdNsAYFvGCouRB9JSDs0yVsY8eoWVd+abxKZYD3asNtaWRxthS6UE9cn6rARG50CkCIymycX08D2mCLDU8gvzBEGE3JimAJe4Cw1/e2iuPurRtiB7Brg6t09aIbVBIBhc4UG0EiBuYHYD+mr37cOLi1D5kPdSH6LvCJ5icsIEXXglhq6PsKseO1HzE4knlrXHbpATnwzbKQe8Alw03W+Q8q8MCGT7A0Me6UEFoOTEXXeNNgoAn1x4xrd4vY6RFujZGmiPadH2xfFlszCRjfIIRRn7zuZBwfhqVVFGemLvF6aoiPqIIB4C0gdFzQIX6cW2FEk97yJdp67NK+Ho4C7uL+7kM9dggOAGrfwRB73pf3lpXY+jONVyTzGciUCPdjL54zJJd42vqbPOyjqR6hLTu+SGyug6DlkPbIpRZ/zM5QIwnwOIYLfPuHpt+xhIOORuCxKLbqUKOy3INTRhWhTfiOMCCn18tsj9iWZ55Hi/tkaoBMYkCKWOZJX1IMzg8SP+zjbaPhtS9urGiRmyEtaVmZwgveDpV/sWrtF43Cr4w/xo24gSCdx1TGEIC+KjrztpWSogqNrMjDF5QimUluIIAkHL2VdCEJQp/zIPoNtVRNC2I5cnrCteqLg5YMIhzYLVZKARRvJUidyAwTCHULlbsP8fHjGHSLXb8EQv1RgyY7YvSj4T0+PwK3vwg1BW7K472k82ve1b8pro5K1GFHYee9zu8b8EGTizenhYVG2vR+Bhba/9nlgWj1AGD4ou3EqoeMN3+YuXcKpFcq8In7oUBYa3vO3xpawhy7UoJ+eOcPrxl0qrQyiCD/pk9Wt/gNpWNLJwkfnSkwIdUu1EP/1XUEOwTkBvfuuEGaHDXXwDXV/CACDPTgm6ado0ZaqhgRj3VxW1Y9+GJAQ3+9nVBNhWFKhXz+i9UMscY1ctXbjA7BftlHeoMabAn2n+OAplOQyNX648iteyrfGkGOEcYjipMf7sK8BZq9DBYzjHA6AYCTFUfzFdBeBLnb7FmW2mlkEc3qRDOPbtYiCZ3fGnD6OFnmR+dOfvDicBc07325jJPoz96bueAOMEJlXMCDhUNR5L9JCw3FGJk400+jKCE6F19O8aXKJqhO+l8mWVirWJ2ckEPaleIoHwCqiRNfwYA0pG9sGpnZlQTewWABi2WD3/Ct2MFHVSzk+EO83I3S5e9RxrnPipX+WEuUdzfn+xzvuCFD+KCVTPM2DXE2wsXDEj4cH9KXby46iCoy8WT6RwsOtdPTAlyh8H7LCh5wjSAFr/2ZktoCVpEzGyx2hRjei1hi3Z/1SNWDrDvFRQi+Lfnl8+ADpPCKNERCXbkYSTzEFVwOxqLzwx8LrFxgt85ApvPsSOOnOGDWN3TZz2aWxTi08lEfuX0YIx/fhpSWLTW++1saN8+yeOtLo0bUUMn1cfE1SSki/TnM7g/pqdByauOu34pmZuTYywatbD63AFNwhRjoWgTUl/Tb/Lai6VBaG/uKdmV/fhtCneHrBlw9WhjlAN6vkymRaQBzU9CuH/DodoquBbZl9SlEXd2T77Wm0UMuJgxjolDkd5NwSd+P7I2P8W7KX2GshpEy/GqOfHqrp56JWjMmj1Rt4GQ7iBZhptP8Q87YBWooCQzuI/rWYIJOyr71mf7LM2qX4el6/6YE56QGY9+NqzVhbWWUgliYPU7wPjt5/RgldLucbEW7phR+WbuydtUXXebXSuoZiYAm7Zke/FVqljU36wtUR07wCIHVyBrBGzJgebhOaKsTWOTvOYrRFa9T9MqqR2KW4FxYARVcnmOoQMOcsQDfrY8VV3BvjEDcVg56CnXi7mSoGGjUs/FNscA00kJpE7csqGwtmb+yAt3cPGdNRIuCxiEAwdnVMrFqERi0gy3m3BRc9T3TP8+00QxYo5YMYVqdauOF825QxSAaayrTvD5dk3lFglWqivY2PmkYUmI69rluSDUZRGnuGO8QmvwDwGYyIt90fYsiPHwzZHoi0a+pg1io4fxPHP/lc/kaWjgNESSpqoPbSyu8Xi+Wqb14swrAALBqnT5OLWCzt9NRPsHfyCNeYAtqJg6xwcWDSS3y5AC91NSjhRdKrSKhYY8LQX5Ld+Oh+78GwD8mMwOjvuvRM6USXdabRFBgvSlDPWLwopWACCNiQr4IvSv5BJvkZgceEqjSgzn41fShGUxxj0MVU2hWf08niDh0U084jPBiHWYqfmkGzRz06BQS9km91CAvLHJXngIq0IrK+5udN3KDslO+p/nwPMVhQGVLmAcDo6YHInAGClstZBoc1yl6d7nPyABpjKQ/Pya9HvoGeMn/kW1w4dHHsQOwONX9muHayDYFSMAb1HQyI/iGBp5hxSAV0+MwlfvkVC1sKJtLSpjxnXXt0aKPCGVm02cM6soUFcmt1wOQaKcZk/FKCVrBCKqMp9tEBtzRY0v3gtPU7lWdy97dfj6SwDSMqS5OF7GbqFbDy9eAQtN+KWl5dI2332vET38Jjg/F6WAD9VYH1bCK2vcxtYcWSjZTge4jDMTDS8npPHzt+LA6X0RNeLYDa53CAQA9rPIOdVxiK/4KZLNmZC+4WjWhViq+vw/Vw/WAjq7A/RQrsF/zg83t6RaHU8LQ1Yc0XeA+qwmaYp7H1nLFwhSVMfPrOF24892GB4htcuAGkT4LT1JqrKwOkqTo6/UHCeagK2ltsKiMes5dLp31951ABthMZmIEXeObrFRXi8XBjacFkGfiAKD8KlDfj0nFlJ+dH2Nk9enc30C7vcbh+UrTUWYC8R2Ee7/ahEOjX0z9RLpWTS5b58+AlDR9ZKvyPpqS/zcewS2wLvDhCRoVbpz62tRKx6PYNz+wACIEtt1iqHVMBXSKBQ4wVu6mXUhDvM2OE8vXrV1iyafTOHHeJbn5vmy157Y17dLrW91WTbHx4BEuBPloMZOb2j3DNHNIBa2wvTp/bFeWRLd72dzw6MoNBFxFTQoZeaADrMlMeF7PZhjzIWYQ21rFU1vJ6hEg9XgOLG9YfsgMU20zeue6aqSrQwmt0Kqkd1eH1Ijiv/G4A6M+5ynwRQUWTNYJApbRSEbs0gbqom7AtT8RW6r4gxaLmBtnPhmF8SKfq761HxbbeywJtWr/NLeICQ0m5RnSloPPiIfpDvWf5kEVWnLBdFI0lJFxiWEDqDqwSMC+K2U0fvNcPhTlYntmXqE2luE1OtFAwFzMBCmfc99wtWgA49DArYKEoLvIeanz6xNSgx0WToFdPtb8dXOs3A/znNCk9LGThPuWcG2T1ZooIzBa/9k2eAE6gAhYsS5r70EmqTCy7F8MMiM5JjIC9RuZ1mDNf5/KS0jXocHfD01ciHgHrlWrmJe3B98i0PBqdZ9MXQrljdUqfGLhk/dI3mxvMY96b6W++YKtcGBHP9nQZj8rKBYvI7hLJYFbPGa6twGW2hR9spHNyiiVwh8YxgbYFigl+oBysWf3gs3XEDZO1TKcop/pmHnvp4D2ZnesZEsf6WGqeu8+J8zRmOUrWDiG4zyF3AM4/MQwJp/RA1/kFT0e08ipRk8OsMVMYKOdfY6R20xsKLTqVMYqTCqicf4A6U3kVot7AW8wqZxakJ+NEQ2zDA4EoUuwmKAjD3lWb+7crKRyDSbXFleLkR9G1qVPuWs1DcqrePJIPgwtl9bby8iaAfsYh3Z9PkQalEkWN+2VgPtJXnMOEYWm7KGd109wNg4Fv85U/DAGawbnvzfErkDTMeWBIr5yAwCe/mmvHfk/Fddhsb5oxw3SS0+t4YWQno3xWREcK40PjfXVTm7ckDpgvpIWyGNGq1WvYWRog2Rt0vKtH/POlvGPrsILshVHQCTSxBXVVlvGkeUBk3m82Wqi/wkZ/zs9ane6ieAgICHsE8zkeTJkgRHpYwjK/DSXn7Mtndc9Wf1q4XVJwL48FKN8t3u4L/RBxtzQB0xGVzzVhgUfcdxgHwiE92uLAVcZlkh7sGF5Hsg4KXdWHaTJxybtAubbD5DyMf6KawaKl6MHYTi0J2RGmxAjzKqm4r9WJu47A/MtlX+70DD69SBQJVYD4WDp90Y9OVPEjuy3Cj1Ltim8uECbcSZRv/KBaelIEwb7mJl7ChQmpOH67emcGwA1C246BC08jX0DO1jT8ukIn9WeD+HY40Q/9GnOQC26+w3RPdgwfTFbiY8yqbLr66Aio/wYMyEZ/RWbwHfbEq8oERnRRpvlbYEmUOwnmUihoegzebiIhr2dmIkGJo/ZmT2yScUw96GcDrkHATOsMKIlzB6YBF1HwWIviMmPLXnMvO7ve5d7Slt6JoY/mXHwvT8//CDYINHThlQOUWAzO+mUlRjwXeFaHwp96XO0TMAv07AyluBW4y+QFfmPxpFDe22cDo/WSVVwLhWzP9Dvz6hxs/clK5oWH9ryTNnPDL+yVxyQPcYUBCmcJdww8mGDjDJz82t6QZJKOfTB4sFiJnTqgPZ9WbkBZjD8rgGxUGTPPj8EQe+NvvwkwVLf+GHaNsg24D1byidFMLBNrJEPKKRMcO3hQG0Lqo5zdFs64EVZGDoNdCxZCKrl5S0CSQ5A9zD4zq+uxC6GiU/Ecg0RNQsaM975yoXoX9ApNGyXNqscjjP/u/Z2CCWtoGZxTxDuDkUHFnY2vnTMP3g4YBiF8gcxubzZdPLJUT0fdT5BzSsSlhHtsDZK8swIl/fK7GDWu8tgtg+nD42Vh7xS9S4+svB9yDSjQEaZrk5PNYj3jwqtAa7IIpVyHPKA2oDVsbcMg5sHZGzy1grp8VI7EApiITemtf72J4yEUtn32LXut/xFrmkRLcr4+L8zB38I4b9zd0r8c1T35bcosdRh0CdiDdyxRxnvviEZxaw/lx1lVA4up/BC5LPT4mfpnSUYuUunBaVeYHzjTCfs7WK9SwJ+tmo3P2F/+6ZuiCNaEfJOoDsPqExgO1JgZdwq2oItI9at68ryR0bNviMcj+4wxccqqYblZgwKgmEsLTEskDVPHajL4wb6wD5jI15I3PwMnHzWQftvoER59h7tRxlo9UALhMyrlYKOLl0Q0VeywJjUC/YWyfBSyBj+7CCyGvEmh4jyO3Lts1s4CZiRxtxemT9+GREP/VCrKV5Q/+Z90X5Hq7bCJ0Zv++o36Wul93feaxsTi1/luOPFnOYvofIsc6IwZuOz0qG+Dz4OXy0LkslC7h9vYou5Eh2eNGB5mOZ0BGtckgUpGCZNVtjs4A6c5wNCyErOGe51GX1dLxVR+rQxtrUky/lyFMf32I0KHrQ7+rTu3wnN/tVdGIbCvWgn0+cjPWLX3URNvZbeNaufeq2NUskKB2fRx34XdokF/ibHKOb/1MCqa0xa4+aKt3W9LCmCPRvJL+dY6fqTyTm+X9ltEywuR/8jaA+eJsyblw7JZxQsGL2Ai4Jp7pKgRI8x1BR40buTinCS4ytze1Fg6QAP5k9nssf3Fni73xIhvVFRDwWrM4g0/XFEtd09x6CPwDdk6nqeL6IPzLJX1kE/8GZPvo7zvd24mTaHui/PupIl1xkEmROunsThPFDdibHDr2hvpzd+j9W7h3Z76EZLhh17yw2M7IOc1aIoQfLtkzCGoYcur/u04Pi0likHsoGz6F5f94X2jqsucmGE4jhK018PL+XCAzVsBFApYFfxtdwQ/JcwvmBtyypHPh7G2A7qr1Fnmz6y27x71e5cR5Q4ZI4Zusq7GJBBYnYcRJ4ruO/bOa5ASUzySy4MRQJnfdK/E/kb0Db6txGbdiG6Lkf661yFo0Mnq2GavBl1KzlqvmMunosd+4Tqs9KLHMCxZiThI32li4Pu5o5bKbVyFPf92mIcZ3Ok11Ir4ZX0G3o0zadjPqnHW+n0reozF9xi1SlB3XzA/ZrEI/OV9wdSNPlLqajpV2r2InIowv3ToDYYvWGdaJKU+fr1GZRCbeRSQSgmyiQAZ8xdQpSkpM03JE9mF01Ptb9G0dhHHDxA//AqUO9iFQ31Q9oVEjTWwD9fqC6PlZ/4yaeid2ULvM7a5fgApelDsbj7synLqkOmuRFeba83U3qDMofdoVVWDThWG9cCRCkeRAfPUCXTdbOVhIvP5wQwuljumeo1cRIKYwdTQ5lGXm0fw/Q/7LNwFdBBw3g9ZVvSOUvgC8XWQvX2ZLZAMzHQ7KgUwYarqGQc5IXR86W/DY+UZLMz7vkJEeacEYri+/IP4n31gnDZah+I0S92DVl7DvaCQ2g9gboCzlhqsLhPbO4EYlfWaoC94sOmv2F9LAfnbY5BsjWDmtd6RXEhKXEOafFmA2Z1gyYbWa/+YZO9tiBwK94lkfxBCa9GokVLFvWBAUq38xqYTt/Gd8kla7vfGTMcPYbVSPg9InGlr3ULGgtioy6Q3NMlH/dLC07x/Oz7c2uSvaJdhBn50DcTxqYdrYJF01jA6dVMFCfgsFoTk3DO8bkKYiTf00lvxxu9To0uRnL4F886ZpCk1n9bB/mIwwA9yc+vuVd6WrLAJ+g7YNFqrknXo0Zln3VjaLwwHsOr5UpVgYdxeg72TfmeBMLiYn2fhvU/m5fdqJi4l1Wr6G5uaz6tPBSz46mtqfYHDfIaMEvrFtq7+uw9KNJ4bL+UVLFevr7mO3V7l/LtSuSyrwsO0F0BDzU90L0DnF9ZjkVJY7B5kXxj9Y6eiUhw+Zgw9VUehNCQut91Rc5gQfK6aYBxQBmBeDSWI1nScEhWG3OaWlzv9NqAgcR4lN1+wXsr1ygdWDoFJxDC6ifdnRsm8rWtJGR/dbbBVsL9Gtna+53vFojK2uKTB0sAR3ePj1MM1Lm4t7pkaMcph/bTL/BFX57d+EwebTovsZrHYK1wk/+POL4sNTcDNXt9+WDhWJVthwfb9I9wMfu+DkBro/H1sAd+/9RjFpGiMyV4MDbbxX8UW9cEp58DliNLmMyEMv+0WQ74toeED+VPr1iLsmKgzLJZ8/ZnMIEVpr53W39Xt4ZPfHKu9sCfH0Vthf9ARBhIuhdrUqTklZsBjPAXRcTsbenfxPPD2eE4Pfbpv1ZJCNZliGC7/aARp0k4Qz0wciMDR0crbag8skuv6l5yp6TfExFF04RDWNv82M9ulPnkhA0XHVivNvUr2PpfHfFIy/aLJPuuBE4btDZbTVQGjZQ6t7Pt7fVNVQbbdG4FFjl6zB4JhLdahEJJcg2exFDIQZdkMfr1WjJcNxRTnK6dlMzN3uG8ngHPTeNwN+q64XMbTCUUnwxclQsj9LC0zyaj4EnA90jMQNnttDRv3Bh0r66KLHyi2XD5xS/+POHb3h/ogVPxwSJNjs/T44xxBH91hiQW13lVDPdpCajqIhSSQleOKbBn477d2jcVshey8T9iZcSQLh0QbLGRcNsSe3t6sEbk/1taq2qNJSB5rnCbAJiuDz7r94oHl/XwRMYxkmtHKku+0lOZ05JRnFCEEGSOp0Df7FrfgDnZPBgK0S7+FQyrqdNESEsw3yPuCyACaGN9FgBdnCdTOpn3HCYUKpB46t/Dht89ApNRKu1Km2Z1TqFltvxXFqWe7N/irjq6vnj+omHwbOpC1aIaHV/e6hXGSlKi6VZdWQ1KTq3PeS4LRLZFNAUpqEXcdj1HJbEgvQ2sBYQPRmlGfN4LxK2k1+q8S4b/LS+SGfDNz7C3z1Lg+2ADXEGyM0XNP6Zk9v1LAi10ujZpiV0pkj8IVDL6R9YtM9BqnrQ9/e50afJxAxT3hsaHOzgPW+IZPl1AFtKJLMLAg9AAacYvcc85bQmafO06leW2E+2+YcufZxi6qOmMRICuYJutSw6sSxP5qOWhfXzpOSrTeWje80DL6JWDKxhhV0gJJrgZ6V9LLIxaI2PQ1CvVeA3lEiic6bv0mXfAOBv6tt2N3CLObK623kU2m30WibKkoOlMWCphDAeGiMWvexkYU7tXi+7AoCHuiEotaG4HtWd/kf5QmbRv+sx7cfdhSRGBtPqVqUO8aiHVetb9z9y5+Au4bwOP8SIr9WVN5IsFuMc61r6hDua6/pT//vNxP2ugY8+gwYEZYd5693i97Ht4a8wPyD/ywnkuekcAx1jpfks/i9BWfPSxX6WHRWKJpmoQfQ1m3YDHBzK4kixagUNVFkxni5trw6tI8aNPKc/h6r+qa3tADcaP+dfY9XSo9C1eHXvK045Bc+HwG9abvICzgRMXNGBtcSMd154FWmO/sb1MvToNx6z5H2xuHwHqkDiePq2st00/NzZwaCw/4GJ6ePpSS3DS+vzQfoJi61EK7L/wa2BlE+B7jOwD94F7IYyQ1IOPJDZCqfPC4pWzZw4CJeMgQe9w10qHmp5A9XTDqsnvZOT7OTuPEMdquihb3mjJNPYdhOprFHsZ2lRiRgnDLOR+8+YfjX1sJT7GHe3g6P9YCOUPq94ClO8oW9SM4OrYEqxaXhte9thMTmRzfXfH18xzvTRVo4gGKFs0gUm+9H9s5l4NNGEKo+PzSY0x73k4Vye23FA/VPtsc+f2eX/8Eexv4OPUwjPRANZBrs1/Jx2zud7jv/cqOTONEGVGMOFT2nzRpq1YMVJU+tTynHJIOvSV9+PNcv4bWzNuHcNie0U8qJfvCG2ziSOQLilwR/SBjUYXcT4UD4MZpvvPeo6SYLgNlcaQCmzJ2Ge6Lrc1nieEefYCyeykZndHlzcKJ442VPJSVl/1MkWQaoTgCS5Yb82PASo6V0q5Q45KynjRNfv5CvtFDuyiqikd9e4WSCibeBKfPYM3bdU+eo+YRUTUB8c5uUUhUaIfV67Qseia3X4LTReF5nt0x2KZ0E0oNBaLCClTdFt7tmH0a31/LPzq3PUBVEUl8FGTxGsFzfhXn5NQsG2c94w3B9meWBdJB9dkhV5okvwDJrGuffFE70+0FDDyyPUWVkleplV3PBRQ8RuaqB4+O0Blz0oELOl2pgZvARgHvDX+m63VXDzLnuqsDH8ZEy4eVg3gD2qIDSIixvTyVro6+txR9Q5M6UpO05PqN5NLT8aFovXDXfuUfRDn+4aQ08ZZxdes8MmD2o9gMDpKDp2EWC4pPayPaOnq3z5owia0QrIfDkdH2yFuXM6Pdpa65YTP5vY4GLxicuzS6dLLLVFQqtowttHo7xibnt8/BQMQ3E3VvDp4N9NOqj8phmg854MBN4eWYOX2bB+bk26/Km6cWBEv3HKI+iXdSOrTxnM1XuNBsg8hH5wE8fcCn+fzwasbfJ0N/66se7SAJA3pHX5TCHnqYhhRis0sV5d/311uqLutB9h803Q9RTiWpzh5783PtwAq5/7gPB5mSfqWVy7fEfr8+Bb9Czlqiy+9x5nAlq84Ec+m+wVmVA3Pl8hLXVUahaBEZQGZANvPMoNGnvHL8hs+JA6PNTqn0Ua+3IvCn4AidilIg7zmyr9KjGnYlHpxvJDc/jVdUHm6Jy/w2IQdJNxWwIn7e4TQpYX1keGBhDZz/Ptx3TrofLK2dgkKBK2garuJXmIR3X/xL/nAT7uJDft43ZFWt5lK858S8kRnabJJ3+13iYTaJ+z4xxDTn4k0AMxJfl7wBO4SKW0bBX/TtD8FjCioryqf6Q4YbAT2Ze2j06a0S3lceqaFtGBpYkfQ2u1Zx1bsq1b98DxA6UEngFVwbKv9JHGAFUIQkmGNPquB9FuTltOUu8IdBu6wMfYvIwxRgScPACUX0zCIylHha97W9cMgXy/i0ESkZPVsrJiZNlaCbE9WMDfH0jzBcdYV7eYIMkIdLvBKUKmQxQ01GJ5sh3U5RzJpYw8FuWdKrma36av9Eo07x68PKqq2gbYZAOv4NnA7pFlQnFDBcOyomjKuKu7pE5315x+5eRuMC5y6bbeJjk31C5Qrpt4Hmy4xb8J1O/SjoqTOMe2dj57fCazaTAqOtTZWUccW5/Ou7HL8ozVkeWmvVFSlKlTlN70KOK5jkLozUliBv9gvV0EAZluuhyG9fp8T44utp8kyTkJcxSDQ5dra5IXvz8EQMfcZl6Kc+aPHksF5K0JpOdgafSrqQhqORbgn2uA/8byKdZHiTrFTcNIVWEoRafgZJhkXIWT0YRp4b17C9FZvgbBqhhkbIxRtUlgcMZfjwlva9JmDRB5i3265AybfmZ1z+SH5fH7kCWXiNvwZRh3/9ZOP19BrwhyMtyzfPrneTLwWH3aqTueUgJCFR5RnqTLhmgBh8XZxLR8IuTgXHiJ/Vx6b5orG4nDara2/pa1E8HYVXcbjWD/AOvYeVOQ1OcvxjanbF8VqveY0ufskyecr2yg9vFLEKYIj5x6OtO0Cud4c/qeaECyhFEuVbIz7Bo74jVLFV5e89y3dxBa+ok4KD4/5LW8fBbjb7wyRqO7X+PRW01jVY8R1gynrR4J62lUyHVe3A0d7lXlo0eb5Jd52PByGt36fnK5m9u+MYBn+a+wFS3chh/KuVrSuvMH+hTzY8xu7uOeAXWc0lnwrcdko8d0UfGoCan/MzlKMEwe2uxtRvpCEqxQM7i30n21Uf1069zs68gFtW8oNBpJFQh4/m0+t2BoNNhxqv9htdn6WX/jWBj5S5s/F79eIdvT+V8d1tTEvt7sQg9TawuY2XRUJWiP/SS/N6sBSZ0SDrue8QZXFuhH5yuIvNyyRSfOqXdnYZNviBtEmeLwZrywv68FZRsALDwyoVKQptgIaJLqDvSe2WRo3UW9P+FMkRizv7ll+SuBI7Sx8ZOLo8Ti0amYNPHWofPpUbZ0ZHfQa3I13k5MKhu0K5sadLhWHymujfPvGij8XfO4+jcSOShVJPryq8l/hJZWoPdvqhizpxFvAHb3bkNC9Nu/MgROUQlQwBjlruA3aTYmmiEkEARX20hexke/iKOVJxB8o3+t+aldZemlGbdxXYGxRLPHhQJeXBp26HYkZmmkvxGn6rbr42ejgDU1rnp1vs76tkJEpfvkBDmMOFgHUuJ8gL4THdu2Nk4AEEylUqqbX9PLuwCshO4hLCwJpQkQWrZHCfYOxSdnSZWujtnjQQ06zkYEgHpFZczFo8cO4B1tlI48UwHss6STQD1Rkd6tFPxC8oSogt+1r+6homab5f/AjfNWgNt6W/mnUkPXsLi5owKjTk196LGG8kBwZBuZs00yWNQgOWnBZkebDB1DouhCBCPS8JRQn9LEiYlF0LgzaozSGz78Xza4SOp6kaDIX7OnLDBbwpUrB35Ds8qsjeaY+2C/1zyHG9ARZFwVwZEmE45mpYEPWu8CBqD5x5QNd4lC18sW6EH/FJ8Jj5mnJidrajYYNos2bA1odYwTPM1FzyN1IHnH35gQJ23TPDA/ThXIfWvkjWacHMIxjnMDNSN58BEmXKghviRUMehKa1+gDprsjy/o7rhrmZCzu5jfnjfxUpUiI/LMYPRzt746ri8meF+pj/krqR4NzDVZ43GlMI9F2bsGoWpYNdfN1J0dhAk21c7McxJoH44d+YUU/787oIi3N8+Kg3mqKjyWfFKab5h26GkCxfkuuAnqi+CxRi44Epj6XFJJ7wMuEl/0CY5+V2X4rpPTATxnNMuwfRh0KOpAWogQqYTtKSUVPy24Jdl6Wb+9B5PVH9SD1NHtSnExHZKmN0B1HvY2URalGKQEtpfgamk5Qs76GXmfaIEnb4HkT8AJulGhrFJGQZLNKjHF5bQJnMKdie0x46ygNwUiNEhrwVJu8hrUlARFmQpm/hNqhqJCae1NSDCSjvozQkLgMh99ntzrvKnXR371fQpMGuBSQ/KtEqXUR+ZMluKcMjdDnglvE72F+8psi9jJWf7DIOAep2BCvGtDhZlK4ujrDTlDluqe5U4cfbyAsFc4LPZSTjFkGzi12jQrwHnMBPgMUo5Q+bLN769c4gSPtELPXwKMK6gYnJVmFjPbrqDOrG3cHZ87dVHFkWK/+dkhqlLgwr0wlO5kWtcL0r6STaZj5NQiAa1sRP5yIexj+MbZC4XQG22os1b6QmBILGPwrzOvQvYPEdWuV2DCK12OUDkYYfQQUO/mHfiQsp3nQESO17RfGY4JfXy/5U+EsybD3bdw3oJ0rODAjiuc/GpfGqnNwVVaM9NR7/An+GSTn78OggvdW7UUT4wuZqg75+8nVYXECEV1Dr3cNT9QfUyPbkeYmfMee3kKDhCWlPzn2xJ4Jn3+18MmsJGm2C9DH6c+c6ZYEMUXoFL1ClFOwWhuyrMqPj+alU4EXWPTUKNk2HHbqMgfwyN/8rNMO469v1fArl4GMu/OP76pUHmIYvZNB5NToJ20so23KvcVRpDoFpo/g8U9eluAoEE36VhmcZvOHTT2ADMU3HG4Fsdf565KfC2JGchbdkexUzgSV+WPO1iUDQ9101TAuiU0xwXVghlNE8UQ1inBYA5IvDkHgo9n3JhEgbatocPLMPLA4ELoYdKBGpHo05iGqrQeRBvIkXXf+qtoj3/QiMbySspSJ7SOGSjtCNB0W2QUbI+IVa4URPcd4JubBhoec7Bm4ojgBuFriIzof/CqShJGSRGQLPO60g+NbhXXb1vv0X2VBgla8o3ZFpHcNjaZfR0c1QeMgSBXSM90ELk35s8gC4D/8sJ2I3rei716/K2Y3ecMX6gFR7GXPwJkmeohYDY4qjx4a+UIGoeycNa/iMMQ+F1O3+wKNqdkN9nihOQwhwQVLe+72xdlUqk/EXsxksL1NZ50uiDO+8cm1herm+eKpdJBx6lDM+ig0u0KzF876IBh1RfNGLpih2Mj8QPDzW+DxRr0PW+BZIVvAJXY8WdibetmLxtbJ3Iie6gNUsgpwAUSK9cMDy8Fd/F3xrAfd5LKS7+BMOZGmBgIw1cH+plzvtkNkzQEBsXqWT44jRKuOe7pmJwY8IvHXvaNQpZ+LBUsA4e+XbAOoEQXSDLM6uZJsgXNjO/PzIDWuQj8EBXA6sxMVjbnmP5J+8Zcn5r6TH4r/sRTK83SzsDaU0HURr+KVX/DlEVl/zJDKot4Mits9sQ/DEaehv7+a3mAuLLf2QUuzvi9lAiP8ZpI22bUntKs/H+AUkNwzWBDKEHngP/bXK1o95NhrCPPoHcbaY4Y575lc88ZTxwbzAQkzchvpX2XcVTZu//eISoKZ/K4TcltB+iQaAOURp8tKSLYXE4TE86rXyQEKwipRmgCvM5xPmz0syHJ6RV3RFZ+OCXXzEKod+1Z5w84/UcVlEGiMHnhPWdnR3hB0eokGmGcoPza2zwdNtFhpODXSD2AVg5xnQiPA4FZymgXV14pdc/tQMB19u+NXu42F3g4+DIV1JhB5TSt3cHB5nAbYhmbDb+XORNGr/HzRdx5bjug78JeWwVLRk5Rx2ylbO8euf2HPfas7p6bYskihUASAwaTmclWli5jeGAKhJtZd6MCx9rkdDgLAmy6ciyrdb9O7Fp5hY1ebv6hWtffUyceJBlMk5sH3ORAf52F+5rJDPvazsqyI1sB4A1ohOtSgV/WBa/wUN2juTWvr2N/xIaVhpiv3AZmnfSn38NLfYHhN866uXgxBMsmU5eIz+Mtp5YkcT95O8b1GVEqX7LnzPvcAs5i825hM2wT5zW9RXzHodiNTEFpXEfN0tmKSAmBE/ZGutu4pwaVUV7Uaz6mqzarkUBVEvm3qMLIl9FbHi+V0rUS+tyAKXYuWp0FoP+kFp+4FXO01+XD4kxm47W8jWVMI9I4jWiawsfM9oji9SeKlhSWI0yf8dy4LLWEf5tzuNiOL+taMljoPKiMYIsc/SB1xiWBeBHaWVLiCmcVa/L8v+JGqyHG5BqQLhHQjbJK05QkAaXryQfz/bAzF+Vn1g5IOhg8tV24S7E0UYPobXZVGFAPq3mxNc8YNuBLute+utBiHWSEXnpzL1cGYRJKRnBNh84kNXiCDRrNPy/MnTOnHXPv+5vchFSqT5A9VYodiHyYluzdC0UcfnMEbnl4bruX7aVBs8PZdwnVXcWy5vcKztb1OEI8vzUlEGv0OyDebrpdLs7ehUymimgTjSNShcjE/WMVL5g57fMKYx/pWMfy3pgn6OoNuYUde9kfzRfBKVsi0WlAn/a8YfhpftQZwkFcmXYiKJcmAATZSzLOiqp3wvp1yoSg//1V4Nx/54i6Jz2HTR0ZBmwwWslIZCmTDbKP5xpmzHCJc4EUsXbC7J4L/fB5PxbqPLaGLwhkSoy8fjDecBR43x0EZLtRdnHfSYov3Nl27BJCyxyFH3z50AK1agYSGxk4zBIuKoEg/dR5sicVO+l8rsqXu9DBEoQkokpX+GLjJOnmse1w30AFAnDwM02sjg6TVg7t3Mfi0uDk+jMEPrcrgkUzheRpseAC5FUJMZsgZlsZ679cz3thNyAy0x2F0vwUuZcXy9vP1CnYzq/IW4osj4qF+fvDc9PAqb3eWsVjweU1IfBbnBJA2xVxwt1FewYD3S5FYfCu8qEqCA9jA8IAPXgVe9D/DrWfnxOr4S1Jrngo88ygUb6dNMpIhpXjSh7odp/s2qCPzkiRcCBf51GwVpQ546eXljBh9NpljCkX3sg9f6Yj8fpb+hdTqjHbDp0oS343wlSzHy3rtwEGbbisIvj2SOpTk/jgqf7AsZ51zsdQZKe2sbACgPS/LJZyuvPvWWNsjfaD2hELVFlXN3eAX+K2yvkuQPCnR6F+NXOSIasFfUCCX+I2QYCcLgV6fwyZGF1NY8u0dXKOhOzxoic08N+joaPvhDMDK3YSRFZmkhubk6np7bDMuQXk6xeZqmTcnnt7qgWTlobE25NAinGh+chsGNQ7brQETRvt9Vy/zUbNYf8tFpwRTb3n2ZbobZDGJ5RhDPIPYqzuk11nuDFsV9zqJzlaa6AWvARnUgHwKRrm1l7gjEnPpyLkri6W8cFXpG3nRGwMHRUa0fHmaz+lu4gFcWE+XRHIdpqVn/BSy+xWlXZXXhso9mQ0KAuMTqFqpdxps9YJM74rDlRo5itRdNvZ7H9KVH+nlztQUY/tkuIyFMwE0GYremmLFIp3+xJAFh+ef59TQjS/WDqCToBCNKe01TvJEKnHlhbsHNgzf8zVcHdmSVRvPKkppapws5H+kbjXpDOF7LKXPRVYj9EyAvV03Y1CmjA5QDU9RqZ93BYqHABMm/vuISxstDAaoIdb09zu4jD5zfrah3wGKwH2z91s/WxUV2xJN1Djf0j9y8chQ6GfH3yeNXKqzmd1AVyZdwKjopAbCGx0uahxJN2rP2KVoAW+EMPge50TEK1nKG5E1QZdza2ImEMyYKOOeSLssFp+tIcKBQ3L+LnULK6BcCRXW6akic/M6zvVnfkxVeMF/mHaa7hB/RdwwtJZGyAtydE6sXnwuYXSLWGewq8PPprxbiAKmqHfCG6vuJK5Y2n+3H1NEItjckKsrYHFo3A184Ui2KvJU6KV8EBxkU1IZq3PYhuKbJrljBYerwxQGR6TJpN2JTV2KTiT/fhivuXLxc19GC0A472csGffy8Fgt0QVNkxff6nqI8O+vxN7FiaI6oREuGcivBByhFtOpL0RLxBypbI+dmYcCprZoKLjKzHsT00OPcozUau1LYykkmguKY6Ji/1VARGXgm3lULvrbTzkcY/ICrEJPuofsAqUc34XBzDIJftxChrYl/m6U5h6ckV69NqImdy0eFEJltTGtclufP9/E15OdaWmlyFX27ESFO8QTfnQYku6w+x3f5zCM4O6uG+US/1Ijq7Jx9JJ/u49asRzMpDriMECvy70Kh4jWbX6U07S5EUkU1+UoDl1mVw8j+iI9xE1vCZiyCzf70+qy/yc4qPr3csPKW+eVLT2aQ8nBZdtWm/A6D6ZPsWF6Be8CNaiTqUv/NWFhPLAfC54pcm6YS80y6rRddqAiMCeSwlx82f4ulY9nxl6TmnKBhUyB68tlnmViRrO0X0Y6bkUOrByU3U+pkZw1mNScKsKyUNJjELhERY+gFpMhAHdIX8OuNhZTl342wC6Z+eP0973u/5yWHtp594ud91KWo08GUHcNPxZO00ydW4H84A2EQeWimYjiP4HTpyQU1OanhcW3AbvHU/mXup5mGDqL4rYTRrSMTPT+0U4wi1cl3k+X9FRAIhgErp9mR2oEY014aD07sxUUFfwPaAImIPFUSBtbHYiUCk0XFQk2Hy8eARy5ITSj32Fj2KOExQfm7I83c6fyZjireGtcf0+QSU/dboEZekPvEKuQyqNvZXkn5tSThyg45KrUexbbqr4TK0mBK6lvgLqLczAbvF4QKBNXt8En7Ot+I/v7EXlUz984KQqh3ph0BSpHo3t+dTso7jXgM9rqRyaoQpRAzm78Pdpmoq0akmnoFeMgNJldz+aLcZTCslw53tJU/2zkYPlMj5zxLbvj1dYSET62rgHfTH8omUuprwmIqE1b2MpTwplonKOpWs3BFjPHW6XrRMssLe7ivKKa6ng2ISkm3gd9GqsJMeAAoZ287sG53g4bsB0tir+OB8f1UKX60X7e0v6kBHxaSxwxM6Ne8hwuTNrEZWW6HYPbHqXZ7Jvg20o4TD8OypgAhMuFSlsJNVC/Pgnz0r1UCbqhfUiaXpId+H7SagrpRNCdtfA1zHnCScKcifpNmK/jL6lBp5a4/hg3+65uK3w8ziUq8yTA1Hh+pakv4L04ja05pkPt1p9+dDcFfkAZeq8xdv759eZmUMPxr5suCoyq8HDzUiYgCCjtAc0gY1RVOTmRTPa6+9oBk1r9Y6biVqs81+I1DBMgt1UTfEb0jouzsjxyCmYZRoxbSEJ9LZ2IJumymq3cMN5RgHNSzFxO5GB9in1YyIBq2Zckw6xylOM4Gu1qKLp9ubSrGznzifbR6npVCw6A6EyiBu2z9H5n8xgCHYb+0sr+7p/+EaHYccl2cqThltnwPkE9AEA4iDCAiQqtzozABJakGmekOiv3dOXfKoEETvGXUrwuDvVjq4Tz48rvOmcmUESJ4B2LGzsuso5UeTGxGpgdn8UBSQQYr8ZQt3T70foKI6kokg/x0sRFMJf/DchOz1QqWNT1iZrT6rdm964AfNTrJZt7VZq3feVlLWvJH8naeoY8wOmbxJVqDQIcVPJbDClO47qGs+2+fNr28EoO4JNZIr0e+PjWc/1TZq8pO/RE7AN7XZql+gT/4F+HHv2vyrE6T1HoejoUJF0WZ1xTar6wDwYaeYVaBn4xwqZbKw8D57E1Nfx5+0vC1AOYKsTw4JNQpyN+CDZGXQHgCIDUM+LHQlRf1/HV1ibochKLqw7C3le27qc1O7HPR6vi5Jp6mmFHxdfeuB5KIqlnIo3Y2tip1wHFbuKtTs9z8u3EK6XyO6mld2Hhrg+kNzz72t4q4hNpZj9saFliKQ70U0tl0zXc1PTFrwLzogvzBGP+J1Dn/qsTiNFpYGQV+0NcTUxDWM6ahwa80QHsnpfY/2aYVEVA4P8drphgmvNcrIz8T03CY1bxW6as5Mj0zEnnjySuQdZsD7ZOXT3Bfjjf8jeCcH9xOU6urZc+f/27v7p57pNwQYjUD/bVdSZy7/shCsz9JraBLQL0kyL6q/u/cgKO7/33UHo+mSJpHvBOUYzQBvCxTwfnHjRZYepFqm64c3Cjruk6m9OfxsfHL2AfrnP2kiGD5hAN9JXtyvvx22IBVUKv6yur+L6winRJiFNsnZNeN92AMoiUUOoj2LznlvyftT7F195qaRHNPv/Dvvjq63C5EQxdGkFmuVt6NVMKKFjRrtTeP0h+FRDBtzie1Zle8PKCCjQ1viq1QsuBXfV5kvj+DJyHz91WVljEMNbTDU2DxXSbepslRh0nWfl0bG+xjyp9eAIwkt2B5mDvqfTFJ/ovhNytQEwSwod/1e9/62U0Ba9sM7/iRylAg4hIuY14YNNvJdbzutJ5gjfmz0bVsasvSlMv3WLskVpSpUhPPv9V/Pc+UEAOL4WeszKXeZ7y02D6gGwvyqngx3+lOVmBiLVHPf4la7ji+qAYvJzUfxcApspyYWZgSvElLeGmPr26TsZuv3/qv3ntZg/Bh3Vcx/sZeO16CYHEdZGjQxDHONsANgU1fAt7kBqP67EuT+PCI+ToPwOeLJqRu3Rc7u+Sb9PWuTVwQ6QMeMiAAxy7k1Fmxxx8VifxdkcZOAWNxay1j5aEPx2z3/KAeqZ9sbj0++YKfQniAD4aK8zfs/KZiSt1RS09OniTnSFqfMyUsyqsEnp3ApZfE+wEKebdPhM7Og1VhcUHro6PHfuTHtgKZO8g/Rw3wucVGOvxG3SiGJqm40E8OTxwqxJScpF8j6hRZjKr05br18JwhD314e8UrXpFkk9mXNRqVXic7WgxST2zQBGq5M1xcQqAg/7okKGQkDfvBU9hS8HQEpqIDLzTv/czcYsFF+oxzPJY2UJCIlKEKmKntXAz/TpL90RYxV5UoAa/BKt8qTkQ5K/7SdsgUnfIZnu4L2WEj7PII1DbcPbqXZM6Z8iWaIyb65QnpQozyBz+2Aej84YJ2OeLCWn7L91cJoNqvR/wh7i6BLX5K5U9InBvEypUEhsmLE6nSf3MwfaLBg/qXtP61IoV1MJ9uKwp+tSaKQgzS+xqa6lRfSB6XHs1PYp021IByuZSv5fVwsGeeszQ44PEr0tJHSfvN5eOzGqLPLjQq4JisbVZfobgmD+x5EGaFcfuaUUb6HZy4GnL8WUMJXstxrowfu+NIjzlI7cSwVuMd0y2qWFJP4TY9TOiB+mT21PM1KanbZQSRIlWWwGL2SISoskqAD+YdVO621CUQgyMFbBK/HtX1uyfIldf4J7TrBzEc7clTFxP+ZPgLYccDiSErgZ6jIAHHGqyguvMTSGGH4wyOgC9flSmm0OEOv5+oUAkd0LmN+DVNdv67SgJZOnf+Bf1JxCV/LU27D15Xb+DpxMxSehZtFNPeSlTjZ/vKjWFZv0CkFnKRdbaM01u5TUUNChiL9NRZWJT4zQVlm8ZBs3rwfHz5wHKYMAmuytFUYe6Xft9X7esCtaQkRM5m93H0psFR5IvHys/fYnvsy8hoCn2ZCFx3wY5AZVlfhkre/T/0+WgrFDPn13+ZsWReShH5HI771V6jP4JZJMgwZoIkeByVjPEIT8UeIs4TcasRwCnyaHsPPMeeJce46/aI/G5FCd0A6MVm86JMz5+jJnA+Xhmk2U/iB2IC9K9RbbjdSiYXUQUS2+veNIjOl2svsKZ4H8hlLB8daHyTlCQDbNWB4qZOqQKCK1Hg1SxbPTUHDMWcreg0fz4qkTyIjYD3K6VXAlHiGEqhbE/WYR267NA/7zqi/PufUww+xAzCZqoSeAwFQUxUt74NLzcl9P4MwVhfzrVc9sajz4Kh+6hkMKVorwwhZB/OXqKD1EeJzmB/EVu8Qjg1sE7uo6QlUEBvfOSucWksyBAhj7Zsv35+1rDnhnnRIDLvrO2cSJrdfHtU1XWrB/bmPZ92NvXSlyf8m2Uu8SnxKzDCNiP7aajYWIoyHbh4ckkbqt8Kv7AEwRRMrjujCRNsgpkHO3JsiZ2kmYDS7Uk2GV2wT4aegjYVvWoFN8ZBpKJZrdiyosJ6FkP89bnhiOwaEk2Sqm8KQnWQXjQLUsV8C4UmoHJ4wThS9YGSHaf5nWUN5ge5wxzt4agyqrkAgPykemRhDqDqWfghuyOiT+AXmnxcgL0KA3HvCLH5wdRFyh+/2sYpOeFsaT9j0g47EYb7/fvWkAwmiYqcYk46gY/jEGEzk2Dn11O6rEFMR2NHo9spFUI4Y1oWJVf/xCC64wZpiiFPED9IaZQnQIW4zBCdFE58JW+nW7Ls0b9zf48lH129kzt26duT9W0X4tvZ6eGNCSO0v1vd7B5b7IwuWgsgPR/r587WPKEQd9zW0QNCA5iO7U4XQKI9/5RuBouA0gBEKBbHt/YqGQ1CGYrkL5RpIVS9ld9gmudFYALZrHQBJfGP8eb7FZq7Ezrcq0RnO2X0k4O6A8UKYkExLv1dxJBM/Gut1AekhQSs1MC6gjxjyQ5/1PXlHplXmhSSw3A3BDYIVmFPPJvYnSPlg6jH6xbYZ6UpWeSp98CTIwni2GkQ7sh2FT5D+As30aSp6uHfNefg10Zf619NanEd8fFSqg9E/k3Co2+N7Y+LwvKqw5nJ0GzWVOg7lPKboxNEz86ZqMnIX7ZnvSIvRZNPPf+eX4q3n9wNvYYQh4i18OuGEEtZAiDlmV+TwacCcSJtX4xpT05tN+Kncf/6y9WD/gsGgJDHy9tuXEHQ/ffXwJcvDThSgdWdf16i6JOkkVGI4wiadsvrfEnniTIKIj29ETz4Za4WQVJ67JSTlAz731hPXVUyZIMU4M+JAOp9r/2pXWaH1V645k3G20mGICYg8ZlhRjhmLvSK3Jx6fk0sfWCJXghQcZJBITljywtnIGCEA/4Td5Mv5YYwZFt94YD4g1sFvlkyLOeY9RVrgeh0B40sXzo2qP77ApVdrAixxsiEZtEXUP/ytxsELPcbLBnVkiWhRN+fi1cUaqAjYeb+yigARc0Q4ZZGdr4rs6QXhfY/V78t45wdZUnaFChjKMLbbV/9f/u1DV4pXGHN8Z5xmeeeFpXk1Fwhll/vBWSWUxvMp0RqlPTVe1qS1vjQazLrSvUd+ZnJfoz9YaD1aLnLh7XY1CFySz9BLacU5HGuoDvS6dNNrpLvsS2OhCb+Ggzqat6cF5NXcV1WoHBsnIs13L/4Dspa2KRcb3HaMfB106/5GU0qQF+4B3lApXgppsGGrgOP6AC5fY3Dn62BfLEox2Jya0OSxu45SMZFjHq1+4C4wRKmljC/Jk8BnyTSmRMxGeUQGp7tHDCqLQIzcMXx+mYQYTNT8SfKb+Wvurmn2y/uZjH1iuCIFkdyJ7zv4DzQZu3+Ys8OxraliwtjLr02KpWLgDPK+JA6ej+jInbXS66aJFseX8PeQw0T7Sv25fmid7J/YT+HcxqDt/6v49RNlnIKUpMx3uoxOYc5n+gF13w90wQyd6p7/t1Q5Gc7OCUjJ0g6fFtnFgP9Z8SSkH7e/SSUcTmBWsldc5IkR6SfMlh5fbHQcX4Rc69wzw8oUFgB4o0Vobn4dzk+9tnJMyiMFygn20S2P9Grf33BRYQDGR1lpYPoMvOtxKQoJyTaEuAcfm5s0QJyp0tQLK4Hwww7zfwLp5IS2Hbc4RPbek6s711mySkksW3UZyxm/Ep5F9SJGKfncWDG4AewBQZwchKYdMJCgN1LKbyoy+FezoZ9Aknm2lf6gLO+Jv117Nnffcfxuosoexm/Ebsv2i0XTfEFWYXhrOzYKgd7gKNpsEjDCuWxCAf1TobduCOPhhdHdKRlZrSMsPoM3POatR4okMdiqBmLbsNg/CkrTS2dj55D6rzZIEAnIgtRBbL4B5Yo3i85+hlAhirqwzml8kb8TUuL/PZZ+RC81mNhca+KpnlXXQ2WgE4H7+oyZ7ZSnHGCULpBdGtx0vXVHUmsNe498dSKsmthRC0EcJfnSuy61iZKGqf8EVbACWw7fZIurLmKPRVfRSjEMwQ2hBgvfS20giowAqKWi5qvLAyXCl28jIn3zubSM7WcW/t2vk57w92rHvqKZoT7IODdlWUsfcHEQmHuZswYqMMKmAQtsPhcgAsQ0KZonPmSlXg9110/iW4DafAQOEEFwbLC6bQHXhEyZqcPxE3g9ryXoYe2uQxTCB9oeA4KQCs4QDlIuTSGWn+gFFlI+ubBnBORcrE2hYn5s3OD1eV0gGgbW/z8BB0b8DCHVsiSyMyud1Hhp6b5K8HgZfob/Hc49YZPy1Oski5eJ0WVbhztFKdmWbOT+zmlBj5/q+aVtl+Pn9MvcTy//KCvAnERZyJ9W8mms/+ULabrXg/pum2dt7JRQh93nrtgtwREmBIJfJQqV+jRc9ue0jB+2TbrQXXYmMSmDWYDiFG3IJygqH7AD9MR19eqqPTL2hzIKZfHjdpgTHYWR3I1fLkPoXPkd5uAL5KOSLCVeYT6GwkT8UqwPy/Umv0Pz0k7FApBwYOT8GEraRQxqwXfbWyUGwm1OI1ml3C0kr/M/Qdw/oftNF5pPvzLKqYHr2JamBb8egbPxI+uq35KRPDEveDR92SMGqOerqVVlEfNS4Jia6PpjwVTKD2Q61+zEhD9zIPKJF8hkPZG2N1dq7jY/sxJXc09yqleTc358RNme9KOXdcVyFieXk5YEC7PV3krh2CvoN/DVCS1DsukkkY1MwFNBw1erig03Q/rZzSDmCMJVPHPJBYQW7pHc1XWJVx/s2rfFoJwnYC7YTR67u5yPrZmP5uOx1pZu7n2ab8iapxgtfSrfElZdMZ+zUHBPEtP3+8rixoivyGVyXXNDMu/43EsyFX5r4ZHdyzXzQHvfQw4RCUcAkTf+OA3YAClf6Qn9DlTf0C+n92q+a9qBeWW4m6idfvcSxnRX0AH2JMFWI11+QWYG9XchGxWYyNiUgO6cIlZjhwb+60YszK9cLl3/075GzzEPLeviprFQ591CdRL3JpMFIt71viifBU7py9oMjsQQYVU+jTY0av6PdSH+J7uhizgHBdqMp7uJfQTD1dCdYmb6AgLTKl0IpQ/YGltOIrS/Myn4uaTBYcmo6Gd7xcrO1EK4AEmvQvBz1e3N3Tblz61QbhDKgKrt3j6W117z8GRsOmJX4pNW1M2+Jzf0hiJOrz06PX4gPdssfHKRdIzddWUu/ykTO9REWYdkp342YL9arT39eRnRaMOXoAdC0WHaNRf+tLvtaadC7V/CpyV9y3YHKjHfcPClL+ybKcH/qOMUC+JmG7iEI7a/0akai76ioPSzdxK7GuybOKQ5OsaaAxQQ/E3vpqj0JZCI40g6HziE3pNMZIyjGmuRK3oROP7AuT4F21andgzNi0UIE15cYksDwF4wQTNQg+aIp/FSoMK0s/vmKW6gC0MKIGD92Wh/HsLKFtY6vLy/v3y9jfqgrKejO8yMwNQxw4K6UF8JGtn/BqxcEJOFycFRckDtwe1Q2Iqwlq5gqScwAULhu9khL973Ix51JIB8J3cF8yxE+9vjqFELqrhX7ZfpQNiCnb4Rqhfxi073URSlYkICQlcv2yb/3OZu6Ef/zjHv767jxgtywal8af7yepKPkC9Qd1iSX1b48zv6JLngMvxbMCxzvorsv/i8SQtUI9R/sB1pRn0inzJXdlcivY7SDqGEbWfZKqW1G6PZBTU4mzjbYTk/a1yEdKymS4lcU2TuzMO7LrhmcyL3F+ZF+F+0szmFRQvkbqp13oTztahwUbbLuXvizEhvztFgluJFZGrX+yKWk0C2q2EvMWRCFq/OrHs7yb4+JU8zi+1Ls0h7djOGdcXqIARHDuLdbrIIlbpbci3u3PatcrrW6BaDi+hh9GnShMgH2Tfnjl0pVubLy+65ONzDdyalpTEh2swqZUFBKK0yk8w7dleGocVgdiAeLIEEyT8g2se1IFvt5V0Fpm6IeTsPSNg537ev/gMv/40cK1VTCRjHAMtgHNzUYb6SlOnGsnsyEsQ10jIkrxj6Jhxi0cXejNt70NtOdXDHVuF7PuOPSx3n9nWFOgw4zXwylicXahM/Rmr/bbnh/ucQAw1VJvCTLzKl6Td254v/OHZWTFb5S/QNzL/Jps7yJarC4iprqV2iEvekEdO5T26n19sk5KtN6BDjT5sWy0RbQq0T0uPHLJz7KbHRerAgwk45ge4UjDryxXSYd/R/VXu8PSteNTK9xjcBGUh19hdZdq6Jv0FvEacQCiKH+ouTuL8a4k06DUrEdQ0z2T5qHJ6viqp8UNfr+0/zi8Wp4aBKgPP2BNqmbx0yihvBYnp5a9vegodVe4UnCwfx8Kt3t7un+S+F4cyumXOQjd8je5a7hTv7EQwBrTjd+TqyD07Uq4/NWLP24W+EfcBe5bEw7Qnr9ihMD6C4CQrjIq17prtsdXpAtguTxU3B8sfxNrGp1h71Fwfng6XUJrFLgdiscKtdNX97YAWkWbgzp8fsJFGZX+W0OFjeVTLAGuMQfDNi0EoQgG2F6SdoZwxjDV+iniPYTY2TUg4AfwOMyN9X6b6zLXZkXyPUC/McxkhT/Nf+9L+IElyOyTUUIinKH5dbwV6H5TgsFr6WPqXm9PtcswfegEb8MAvRbo+ne+oIhqO5HPK5IoI1afy1nq5PqtzRaPWBP5JHN/tHqNRRZoW+DwVsFgwXUBxjF/UR26GfN2OoFynt6fMml4OTTw7GSF8PHyTc0M34qe5F0m2wd9t3RvkNDrRGlBQDRQ8U+p3qD3Pg8khlhWqaO6g3qj00JcD78V6zf9LhUGSzgIG3pMpo/w8Lx+/qiM9ej0ULR+ge8xolwBquA4u8ZEqKdVU8tPDC0m2QsKm/qIFeHeRujJj/jn6qHXjFRojuiKQS9A4Orih9nh/HghVQJ0OhjIbqkTI2HbrOe93L1IFiE+qwLzFQKuVbiLQyIPzHUKJK/Fto8gNBad4hPatkNG4WfZwdpF/7C+SwE6qba7XhBPHEs3mryyEtI3f8ciFiQZqWTuNxOLIAwm8RBsUk8iE0pTwjOSjm8g1uMxiMeuFCpkv27GPNqWFS6E7PjV70rYx5CsU7AX/awhWgyvG1wt03ow+iH6Yf9djPpHgA6K++qi0UX62Ja93rLXyhx9Aa0LoBqE49TBJzV1y6w4WVJdtNl4p+CP/ETMIU3ygpVTGRClr8cCKCgNRH/2UUy9l/GsX3Oex8/nWm0eNRza2gt39yIQCoZ/2UBRo0cig+ItQzUQAdpfS/3SzrW0j33QI4XxoD2o+3Y4XFU7qi2fZ4Tm/UO3Pp6aIqXBG4WKmvyvFOC+tO7xRz0LPWozv8VKiHXH+fH2C7NANoYQHB/irJV7IcB24t/mT7K23LaVGqomPkDYzUIongvuHLedebCZHWj/9WZI9hTsbtZ7hiTR/auz/8opK29Kujyun3WwnGaB/9rkfx5VGdhZMNJu9Z3W8Y6wpQIbAZBUuocFpAqGQlorvg1Ldg04ncIdH7Lg6+i4RAIhnAnfnTaS03BEceFVSfFB40eodWsJtHfj1WPREfI4/Z0Rn4XphDQKhymtKYTcTTgjUVnHOZLbsY1FIAhipzWuppxSGZICLNju8th6trc4BZcFvOxMj+yB7XrqvxP8LwCbmZ4xtzh+n18dSIGRPbqjviCpkYQte8ASCfR8ZL01Uo0O2cCfcFWhXonus56EZJiNO7r7gNcq/xL82JlT5d2/ELfZk/evkV8zzgX0LSiHm6HK5WABaNcFR0gOI8FkmGAcBJuqHXQG0RqRVxv90Crmj7AFjkkg4QvWNbQGQv+J3FHvEiFbDfLLdRISRDhdRibiFH/FOfjziJ2uAtAHcGkHWO9MLpSMs0UHiFxLeH9BSHvoWNE3KDf+1nsHQFgM8DipEq57gHgARsc8I+NlKkDqZeWkIk/DMd1uOGW0ptqrT+VyH1fWlcMacIXpY7MqCtImR/PvyJWGD8XXsL9w9uVtAsDOqkXqJt0QJ8QjOJpUPXb10hwEFXi3r2O8q25lon2Aitai+EsP1vkqqXXdzUvqymfukMpEY+2IlCMenijG12b9uYdNr/JvdA5liFq1xHe0V5Oc3WEqtd13D1Lc96LkLfyWDJOowYB2wyxLEkPZ4a6xxuEfJPPS0xGCtzI06ZjIZgf7uIHhaw55EFllfq7a4zNiorSua6h//Fv34L3hQGtGUmkclQWacOfWT1QAs1/oUbznAS09PHae452gqVWJGX4k9B/05ZmWG6oR73ga2v6qQE0rcNv86QeV8vIX29JlSV54/rGbaZA6Wr2fFnL63E9MTMF4I0ApVfz7JswrzX+fPDq3zjc4LsXRcTzKCDcQ1Do/KthaKMyTgvzoD4OtaB9R19O4REdrhyyeGD12awlTu9Tk9UIu0ZE89uAiUf27k7FWD9/3own745sN/eatDd9P/xbjAq8F6XDdVHK1yD6Z9hNZLOqQYSI4pzFlvP5ZfCuFCdPAe/VdZEtEe1jMffzQkEolEOZngAV8uAuvGsCV4xihL3vSSAvi9afncP5S06g0E27E4IPUjCP6kQeAARvYU9YEac6P2mzfRFDQvUepm9ob0W47SyTnXRiWdjosHyuETSC3ugWeSL/LDgTbTFvIqlfJ4gdqFEdx1AI2dWRukffJrmgW0glwkVWLeGKJyZX8iwSw4RnKKvzoQIpcyR1KqHdq7nT9wduiEfj85qdDv/rLsHXbuHCWfnuJRu240e8UqD+bc+v1csebek72bUH5L4thYcl/TJkxKjwrLc3z7vBVEA2TS+kKVFwk2eqnWjGclgSHmvpD/8nVF3pEcW5LVIAEcG6Kfm1QU+Y86HNx8lOvCb3tC12AaAjgJDM821GdOlVcJFau4LUBH/ogvrs8CJRbZbFN/ZfxjNoXi9wfI9TrmcrV6vQMtyk3T5PeT95hq5MGnMLqDbsGXYweSxlr6IbpgiQ0CV0vbMcgdq/dgXQrJG8tWaCld1aV6lH8pr2dGTxckLs0haaP6pwQpEpH8IlvpDI+LFYAoiaoWzpleTd9rRxlE8UCTUlGD+LAnht3aQnA5SgQ/m8Mkmz+kzoaIeXHWpJI9BD5g9timhfq670UDpAe/vYwEv8mxGTx9XcDExYCE2q8eVVV9WOhCsF4FP0r52XzJaTP4wGbQ5DgeE1HBs4T8KUrUZvezmp9T0153Ii5sr5bZx2hB7SsbQS9128AOhHR3quwKy5iGUWZ52AcaL9RnSZJppH1D0nWjArfz6cmPCF8UN0+cT/4r8+F+F0kxO481O6qOh67ZxcZhvpwemIMepDl8zpJkoCnh1NXYJyfMhyeeEr5nJLHexA2ZqMtGdJGW3rPE0n+tXoscoffXcVmJvE1jctlp8SmPMEbv7u7PLrmLKSPFMLYaNeXHydaFFBu8LOYSx+dsV9CseCvdAKfU/5z6mhsF9z7HTOpGziel/jies7oW2jkvJMeEMtiCmthq0JzH68qrfKeswpLnpZaUH55qOkoCHLrZMCCL6hvpoilL66X/1cnHWbZnjDB+LWZQJ8i+AVKbAcUuwoNBI7s4xgUQHISzga0mfzW9WTOH5ao07SE62ezngTPx46V5Gvlgxk/YhWxTbtyz3E8gq38zExRsX9ub0nPlzsn4L2EMBJwQ31jxNZJZtJTvpitZoJPyu0lEX1yf4d91wNsaUwGKo0xrkGxmD9A/gE1f2vZsPuWvKzTX5WkXRuhZRE/Shw+ICVe9RLivLGRrAsd5MBnnzakclepsuS9LZApSiVIbovawQGXRaJBgXyYOVft46TGx/F32AK8rdYMoyLL61H5H6oQc/I1lKQuaYiWqi9EDKfsCWXyQWKTBIu30bmgKWxk9a/448qkjfGO+nBNAP+n8RDXLEIyVOSrOaFqYIyo5J88y+QJB50jfken6AIqLFjESBwESw9CEEPkrQc29b9G8wwFA5fh/cam/Rjnt44IbE5e39ZvM7bpdp5O0Raz6guyGaKHRd7VjUFmmCYz58eHCwF89X14jG6Y9Ej3Xl8fyWycJ4A8jluFf991Tcyau+LTiXwM4XxCqkH3t29t6fptpSmOcmRo14foLfCw+jqqjjXaKtnxrTn1ejvd4miPep+atRQtPkEeWY5uZjQnWKe+m7fNx0RJ9nldzjCgQ/KJrc9vGZiZ2EM5d8eszyO2nIr6c7XQ6ROvsB21I63Pgj+Yb96FAHwW1vlPncY24vPK1gJqk+Ii+uUKVqjz7j/WvtPtpNbd5D31PoE7+qnhGEzv3eSrGZE5Q2FDiDFPna3p+H8lGqchgakuGrYeXV4GdpvmrrtGHq467jS+3mdluCdcr+KCS9dddwzQvVtRYUr+wuJGwtSkbbEOO6jV1bfbHp+cY1rymQippB5cZS4Xvp4OJgeoA+qvpSt/FRp8J8CqvJPu7JAqSblX550UQW97wRuMSLVZVx0PZtjhDP7yNRo1MJkLsu9ttrSLk6Ecz8EhkMEi/xb2xcM8+1cmmulBifh1+RllZZ0cEkmstODetM7eLddAgqy188crvJmKFw+5GazXUIEWiI5fsbwrbc+fQGGRaBE2dHBIWXByRIbnMXZeJrFZEjN7vain/fX3TWTOWeWxC+DI9azFVMVz0IwFm3McPBKnNstBjwnZTtO2sHEVg/WiNy3LfC+NJWKsl/8R42Bf3OJigqKeqYG/KGb4lm07/R3TPF+OxKYgbx8Lxlv4Ad1tsN4RyFu+BbD9NERClm2bRf/9NHhGvZaDiarvZZb6wOzPPd+16n6Ns1EwMYldrQTTq30+C9jlUXxpEXLCveORQ4lls9tHYsx8oaE8Ih++sx7GWaocq0n9P3IG0cH3jzYD4La3uzV5VOl72adzMnJHrr+jRq3vvjf4k7VPmgxT7+lNN9+QDXWSmXteKnwfIzB9QxYNK8DG6X720HC5OBNbriiCm22fx+uLhaKoYU8bYItj+NDCj0WMhoXpLELGIdzhqRbj65rOpcz/sPVagNMx+7eH9x7j4VXksn2GHNfiexRWEwMb9kAO/5MhD/axV3XbqUX3qP1LGngZZmthdA6yBrLaB8rVn5Y/pD9PWXckLDKQaj+7w3b4Gil0JgfVBfvuKItzrKyv+6rcDD/1Rn4rW7WL7nhlm7ihXJ9FevB7LC6nxqwsYID1CLsb7R4DEKWZ0mAaadv5ssUljq5OXvntQbQKxxbMVberZiyVMop8EhWhnA/mBllAHiML5VWopUp6D0yZ+BRB+1R+a9you8QflR01PuAbt9SGVcbP6rsDnkZPC9OBVl8LKXs0BFBCVBsEVhaHlr8rBfHYWkTuNHfRAqcDxQ7fjQyOAhL3nnGG4+k/EcawC9y5donBpJdqXSHQeMdEjxHR4ZY1adb0f0gMMRWA9taanXa0806LFNXTTGWkrkpH5x3N29AhRhnY9u+iW5wdVtzs0NKKkz/E5zE8C0+SCRSIMVNGb9crsv6uTuyjhBCuppdng2stn+gR8KXSHX59I00HoqV2zxRuvOzk+h3t0Cn1NudjGbHgaQl9XNeQPQQECbDGlLFjXHPdcyWluhtoURHifnJenQX6k3JHWTema7mfBm6pbhmGprWDFTOuAeonXtwIvbzO9ExSegI2XUO8SVSJ++LpAyGRU1dA3utfssezg3B0aEVddacZv6TYNMO5JrE0QpDm03y0mUro5h2GvwV3ci4qjw/dbDo0VkgQ9BOFgmUprrauXkGPDpclEy/rh51DU6I/UnWPdXw5zPUWfPIyXo/4kuyh72ebNyMwwfr5QTdVG0YzQ+7hfE4X+ztWzuupO62sek3Ly8mSb9b26zIdqIeV7OvTLFfkbI/3Re1U+v0Mo/jUJRXh3ctWKMo+pkHvf2Boync/pS352mOFG/jvQ2DG2CfOnosTou3xWFC3PnqfT1+q0lRIdRm755kdqzY4c6AIvxIr0HztzNEztBxtGJW94oqueoH9wZsWOChiQ1H+VvQYOIx4D/9MIIc3rnlTV0kxs5ef3rW36mPlOiXu+PkCS0NXP7PMjDbvbVH90fVm2l0LDe6uPUjR1Z+mOM6wDFvibJ5dslWloavvVsnbuHvwZX1gvoy8dHcbdRgjXUOUtNGg68SHAHLZfUNkBtn0n/PCI7++v67hLvfxoVKhDal4R0EW/ESMoc/BfxVM734nXrTC+1PJOcnWwjEbXZeVXNGnGLSN2hsYZT/7fBJqnnXIMxAj83Rhfdga2YvMolLvZ2IP5GwexH3tDIfNnBLsrrI6BmzBxodry8tOSJLv8VCX0NqS2QhjiPl5UZWtQd8w+JaTGKYlJnJYIUkkmeMqms24m0nlObnPV4tS5UlpbI0MqvCzZEceqaSPH9UfSkia2IZzP58ZikfXuVzYhzmbumzbYrDO5u1DeVwQrncw0HmmA5/ASu2wCsxPEvpfL0Ib2Ibcu08MyiokTy4XsZeEbxxubCo0o1CE+F90rGonQ84FgVvwaGFkO5etHvvm+qcQaNSlpbdaczE0J3agLcSnf1NR4PWIRd60DR96qVbc0DHu0/tXDaQLclFpZXHwT+2HVkYBKNq6RYqWI5xBUIpJ/JafBZHiZFMlr0xYHpS1Ih4hc9GTHTK4lPKCYAx8X8j5AEuPPwAnuIoW592lrc3Q/Xk7ZYq8L56fKlX4OY7VXVuoKktfV4jGI2SxpLLPMTH4QOiJ+r3491R3RawaQ2ybgvy8VQ6+waoFYeH3plQ/BMcsnlyAnTtP4ca8+K2/jDNwY7xIP3vLGVgZNjFR1lFvH7OgX4K3IMtkDvmRxMHsgULpeWOk2JUyY8Z7ovZ3GnvgaUbwr31hi0uj5McNJ/I0H8pmxVvskdH8IiuQVVyWBZP1Vg2mC2RRkSY8w/AnuFWmOqxR6ELpGaTpUVJbhMO+cSIBsVqZIRzvB5dyxMvB53DVwySO8ZDYBYXFphzKb+8Lo1p6/DqFe0atBrNV+YQMXmEmvtk4wbF3jQ0ZS+BQjSpzy+sGh82qtG+flnUAmNqh/TzdsNASOxZAIxJpxK+OhUrlEsP4M7e/y9owNd+9iBesdALg16ZeEkFoMVcppwCjAlSCaTTm8JREJR7YhVFr2P+BX6YTP0yYzIWIqMfQTqJtmVxpMlAMlF2MXzN1wlbHpeRixV9kwCqUqM+sHkZY0Mj+EbE/6TbyIzmMu8BhwviA7rFRMhaHPQkdyFXxVIH2QjXTc2xy1j0ytLyqZpJ8jXAYKy9tF3KVI4/R3fV9F1DX2xEc/MXDHhB9XHfpB0O8Xf37K86HKGew930O0jDT7ZwaB0us8MOWrSnTh47DxOTJLXOkXxPxyL0CYeBF6AXMly2CDoEMk/Srcrq+2cnkVB5AKWWHiV4uLCPQ1u2mWS8+pJR23/eR30wCBtvAX1dz3y7ohJSLJ4EGPtNCvszbJtEc9zW0/zrdju5Cp3FfdtK3f/ph7dbQme20ydicue1rRgZZXMb4k118QXWZO7jp/uKZXnwPsDxMIPBO3DYMDV0+ZZUWYOMvZ5K9VZ3DdQajtCvtU0cqx+usQFxdqvsyrP1+ASNRmMC0vVtwkcc7F69kO9UI6BBzd0NHUfhWwR/uHbm83tTzyGpBrLMnHhxv6dXV146WYjlGEqI47TsbQD9cf4B6W3tFbWRjY8XSMXYvsy0CSylz4cEfcBTgUJc7l3lmftNk8F5pLyTIb31DUUkvoEzYhVCbXF5IRQeOCC7Egz+jlKpuEpf7BMcuDiyuOv9rxX22LtlS8/3RdFTEcE5mLSp6ZqXDIhpQQXXxf6VpaefwcmN4+mDve4ujUW67Q1Xb1rZY+heJRDk98HTzxgJ5ni7N5hs9Gs6YOjdDmqrP515Ij944+rqLPErihcqqaqt0hV+21JRVxP/kgQBDC4wfZ7RTWqPzGSpn5ZHr0lxKaZN3xlUvoYNWdbeAhIXMs1Pf8CsgKug2zrATcgHFC0uccZULLg9m4xzQqh+corcJCa+fX9aHxBBTUMsH99F/VKG+BlKtXrUO7HS+lk3zKVkNCskRZ7f1b4BclFNdOgXtoAURuIn0npGjnvDT6ttFvpRB8EDejq9j4FvAl1a6T3Eb4cL0Xs3LvpoYetErM+NusXhsE+AXCR0CU9kBRVd/3a6XCpl4iI0a4qZqqpA5+1Doz6i2fNGTXYCuZ5BWIfubhupEvglmNMYhRwMkw5XVKTLnXkEwrKky6UmU76kkrd1XOUdnWU4Mx1BXzOZw5nh6XcN1fCQtbvX07J8QVGVMxrU6GXl8reSV6cCLEue+l/Zs2aMAgyyt3F90NXWu70rh3Bz42n8MUqHClsmhn+tkeYFRN9uuAJJeY3GAZ7qO1TuGvqzB2P2k7ZAe29vWjPwT/sdXkeOWd55M4J9sf/3VEHi719+ysBms6pttJQdrT+Z0qvBLIQmf6KYoExg8ZDC5s/7qfybgV8fZNQ1/MEDbOYrCmXl9W61B+jc4u3sfXVyjLHzQNdmbf9eu/NzNMpi2ew7SN56j+3ob42cmICUL5UMWmUpTnZmImSDQ23+pzIT6eYT7izTqwxCKdDcaZso/IWIad+yScJYessgL77nt1Ryv+gIMvV38ddVbzN5vDZDFc3DER9GH/OnqbHVOM4cw9it39j6Wr2pJT26Jfc9+BQh9xd+cNd3e+/kLnjJHOSDqVLmrvJXMuRXy5bc0vxa7IDWJccjPYU4xUd4u+uJBNiEQbokeMUp5K8qhsikaFCwjAGx1qACRDUa4TiiGs77a/WHttoUtbONd3PBr7g/1F9JMB0k5A9DgX+yU43oiFbfMrpZayh6A5RZZ2aaeUCisAPKQqTUD73LBT08TRfEEAqAg/0/HcJomwplRmtRv6Uud1fHBau54v5kqKetp+r/1bXNp9y1GpX+MKwkpz5ud6KIU+vutq9u52uJZ/+a5MozqZI7wJc228x3Ck6RqOApukOt5s9XtcMY/0hGUY3h2Vte3RGJ2JLC8x9j4c8/3UsL5M9VQmpjq8g90tuyiYgVYZstecFHHq4SejtEWVbTh1bQIwMKHu81/zSy3Ts04WXeUNAIH4Isw4w2vRm281jp5kxoR3hljYk8xJf4uKPuMIoLbN+HNxwoFSIpYcIXzhtYPNGdz8VbzI54rO4tGhi//ZpFKrTSg7HGfnmpXnxXBiWzstmmJXcn5tHArhHcd7odi4XUA/hTC4lR5oGSRpHFxTs3SJewif7o8EmWP/iRx48v1NsbyPbt8OW8p7Jg3/G5JYcP3T2++Nu38zlEAZ+yiCbuLUaO4IqV2fe/v+xy/cb4TeZLcb9D6QyLZy3ahyqkWDE2WUgac9YTaV4c3d4bSHa8lTinmS14Ei1oXmLC5vPf+Q1ZQsQRvrvoYlirdlM5La9MZtlcZ1xynFT4BaSqxzER7WRGJ3luLboqaKMe2wWQCy8TE/h0qVpgAl+OFFap6/puEvAI39PSu3sd9fmtlv3MupaoPupQsA5qdg2A/pvSYAT4WeFJclRM1kUwn0nOAQvbT0fm2XUbzKR+3Fx2zm2jEIXgwesgWaJjqOyQ5XSgGPPSBNp510o9waOU12VAM6riwe4lqt/Qfhj3t+wbItgpVI+y1EZdGkIgOiCiNqxQXLenHnZfLk6SvxcH25DT/pFI0qxyz58LD2Ogps+hvDl3VrCVVG81NvmjLglG6kG6C22MJsQYvPQk5LnLK1rbANwD2/AD7Rjl9+gPMxUFqj6lplNvP3WsbXaQIdCFptUuUUVioJ1LC+okIyHfXmO3nDDV9WCcdj+qxWcgrY1sV+rI+hpRUlxQ8iB0KNJR9fIyE1HgwqEwlodcbl1CaJKjp7PGrbltUBYTvf10UpNOpPbWHV8EUhWpvXQjWcmcRWfs70DXTdR55t1ZlsAgh5CfpaHIno2z5nH8BHo1/C0JezGcndXwqq3pCzHJWzrHES+4ZaU5aoUiNs8MHLiy64NhL6dU1/jQbCwvGeVL72H3ONyGrT68p22yRJ3tkdiiWX7wMH2EPFrhaBn9yY/8SHGo2Chv8ezeOtY5DKXtGGZpt2NVZbAzY+k6s7sGUP6VcgwPkqIFuqoOGRQoHAAb44YBYz5K9NeK/EDtJvijxVXb96e++f50lvODbF4rfoX61sZvQhy1IjGfEKPvvI7XUvakzb3fvcc+D2h/BNaiD/TElDCRNwNPb1VAaPtIBP6HxrWT/Gsg7gsl4JtE5PeWG6XMU99fiAS6H1cYPw41rwgf5Fx4bZTqlTlNdif4bkAU0LUYKwUV4izJoul/uf+5aJ4cWDcOKVCNDakXN9eY1G+vLen40P2JaCUVijQh3PXY88SbMVe7+rz9cJXLtikqW9v0TxOr86dmqAzTNsFkHTv3EFx618no7Cy6mPGHXbaxGJXg7nI/y3Zg+tYpboLS8tjNkFERIyHF077AYlw7Esx3SNcpWowSjo2k0aLMnAQKKY1rCkKDMIYyPUJ9/JpA8Ex4bDcWzfNsLAGQ7IqDtzwUQadjK5bHM9EyFXVs8orMxrtXNBWvsj1GDcgpHBSNIfSPGaAi9P+NdjXCJe/CqGgPzNlCtiStfJF0aiyIxGopvaSho7Lk2OeyIRgX1s1N98BAx4YS4slAX4VTVzB30KTxw/mzt+Ooq4HBlssw2VcawpE/3R/ms1vPsZBgtkOPYvOTOTnwRAB5RkwQb+ZX7x3T5LFFgZST5VMpfgIG1eMfswAjlgqU+KVCrVhROlOzOZJ0+LLLk1P5D+m1X0mXr1PngpTraLCsvvyXASBvfyltww4+sfln4MhNO3I7Fq6akXWZk8pL9unJAlMw/jpTBj6AZUO40N25q9erPuHF3l1TErSft0xFKf9LI59McmsqQy1CtQzktzdMUmyOa9+BeMTXYDrf6LNJpNXvA9XyLECb9qPOOVRzO+/DvLWlVTsNWPu/vyz8NJCdSjGMn6gXuP+yyPxR4L8A6xtCCJErgS8U8WDERHTn1HgLX4eoO7CzcAMfkcwHnqn0PjAJnPWcCgZ4kgdtJIkDa12/tLMYJOfbCTz/3FrT1zd68lsAaVrFy7N6AXjnhFFzR/vYs/XqlEiH4p/0H1yA0cTEAC+GiVW+dvG5TtvhicI20iZMBsf4EOM2atCQCHTzT2i+K587l0cfhU++qvrJNmepGkXAyRqaxf3qNPL06kYBkGGahE0RuwReBVkJ9XpZWUEUzAY7+oDkah00Pq622mAoT3zXvDuKblVUcFeowyzP3gCz3bdD6i+XoUTzUSS9yg4K/BW2iTApetfbBw6x5LTIZnsRVLIr1kBS7KTrMu7kqVu1Ck+710I/5wjcSu6I/QuPzl+neimQUjDuyRkrUOwBvzUMNnxIvrsxUrgHx9P5R6hPGuKb6Z69WtdiesMpC6j/aBxMa9xWX2K+3oxsC/jeSgxbMuM7zITNZDHMvDRu34A3SJCZ+j/ZUPLYJh0JJAaRt+fFq/L2G8Od/vI63JvBUzFujZFSO/WA67IC9Sprlz2X+DSJoMGTOJYnTEk3nIGqtXVRFCd+Jc8U2J5OKXpepuO/cjsXorNthCE1eOej1XpvDxS783vpWS310hDVQ1S4rUOnS70Xpor2/+G1UqaCC2UZtKm3Iy46mPg1Wdp/qXr8g/2txhR8Yezgrwm2Grpz5A8N7o8jiK/bMLgDY+90gR5zFenso78y6mcsUDRPiYBOueHDk+bDtVJa5b7ZOPTztgy5PkNgOohABR/ayHLKZ/ZC+hfI/9G9diDO42877drSSPdbhw8cl1zl5RKQ3D4y8H1+PH/SZAUHk46/zJ9sNHhzXnTyWioNY7E4Sr3JcLF8erFdrWfIRdxH9Nzflyj6ViOVE9vhpt4xAmVOWbXDn8HGqGY7k7zm/ZATOAYV2fKNJ/IUQQcQAcqIyR+Z/C76K1VDrmd3GsAk3pCAQ23cHa3miE+BmfqNyynqlc+itH0mYUd7B98l8I//1ozCDyJiLW8D03f2VtUTmOygQy2Befx5USJ5itwMITF9UPAaJxOI57CBVXF3yGyjSC44MMIAG+sM5HUr3NhSaNqxzIY74vv2kinMbzjgrDyr162fWLnI5dv5qXJbZYZ73MR7hVc9Vs81p15qLyGgnyVq5YE8hOoJpLLpp/6cHpLxH8TLxFXAC30r0i4tSu3oY3teUwoUE4q8i+XpUUf0yIFnTU2KVYLa2QGPzfq1nZcjnwQjTeM8odJCMjWkGV+reMHHHT8NCjCj4Rhbz2xlXkYOddKrsPQNHEZDoxVgk8KweETzv5KqfEmUgt3T+jP4O9TfnolAnwb/Iytwlyxv91oQWxMcpi+aUiY25RPqWCPgum58EJBKlIHpG0wQ8JqUgG823tw+q6VdluUbMa+g2HkQA8/rgxImUDCTmRwum5LmZV4rZvF+2H+Xu8hP/Io0a4XR8TVh9uHOINo8z0wPKlpdXCR4i+WdmA1iXVvvLO77XlIwPPAJn8LOMbE3ifBFIir9Rs/HNVu4raKew41O4AritztLIDZV/NmNEt0z+DFwbUcroMnAVCsrQMsYS6kITuhOMMOZyHSNNsrzDx5XxCHLOvmlxgWfJIttdyAYKoge+efi7eaZzNES9aHGp+wnxykt/o31YRqsvSrfFL9EObojf8LTog626ng1+cfOVEL9u97O6jLr6b8Yo2DcrOv/Ytbw1bkt3GWAL6rE2JXhjDUFIrV9ba+CIE1+uw8SBioeaGHqQmPqSA4BOhrNfPYNWIz6KvhUel2VYuo8B8KC9ryT4HrDR1a2akHXwJXVKnkKO+vkLOLyhICzuHUKTLzH/D1mt8+d+/qd0hHE1k2jsCH7yYfD4ALfIcY+r4eZarAnWbQ4j6uu+6dZ9wdSAvl9tz85CfKGZPSI8H4/KZEk36F+QVdeWuRJo5Qj9zUUJkOPUAweutq7bQW+Byd+oquWGkyKR/FSLrLwP6YgS2PrO1/HyPJHRok78ujaC/+McfNLYpSqbcDWVew77yp/E9dtazJFVeTW/Btz+jnpnMzrpT8zV+1f4UVDQ/pn0NLdFuJ6Rerl8zH6LvHffA4Tsjp+eHxTLZBpNLgEbCESDxVzR3KdSrFv1fMh+gDKFHnRQc5eNAEn49gkHJmP6vdAclS8DDyvKHqCB0C6skYaVSkrBW2STL1LsjVrYpAzvd6LEovY7mForjVUQORm3vC18nYnXg2xYFrq15/nNcz7RlTfV3OX9pmZmVJkS5Ef27wgGUlsrrgFk+ffdRasBh0XG+VIO4ge2MjQFaMoBe/HylSLC23eiBAAmpvkWyv5EpD8H0oticin2+J2ldqueP6r0SuVqGpuQ0LZ9gfjNc7LRp3dCDYbefz4jsY99XV68oGNNIvnZP/c7QdXniBN+nOacOE/3gOLV7CuvW5YWLVUe9PsFt1vZroHtNTPFpLkvzVGqwvEb5gzcp3YRZqHR/EdQHO1LdsT7kqhNVrhDrn8G2tKjF4iD+5uJzsvbZiSGqc//XoH9YTs3Tn6yZ1Pyedu8+cgJVe59tNpSQLXmygu4UnzjJQEGQoWp5fp5bOGQcQHx3vCRb3el2rzuEM1pPZydU2AUNRlTPt4aK9zW6Yn8XY+9ynG+kpj/+1Z1/MsFA+DRKPLwRWRDguL88rqYNoeHCShFqAl6rMossAZUK3hB/gSVyaWld9dGrQ/iv8Gg8XD0QQSDtbN/W6XJPi6t3etVhlgkDu0l27P5mKXPeQJrI7OHB7IUJu0+q1XN+ErvlXwJZneAX74k6d27nx7/LDUPVqaO4FKsOLykwx5ZzyLPicqpNoVxIuGodSP8BjQtmXxF8YrxibxMC73NQtNMlJGQkSHk7uc68YQrmPMqvTqaCekP9yzKgfoNm4P3tb46idiZ13E537VXyfc8X4fvQsoywdp+35pwXD+p+27Mnr/8tycDz/j1Kj7jgjs3QOl6vLlJ5+Ny92anIL1xFih2phDwpUUzA8AJCC46FAHhTxJ0TPjX8TTSjBGj4rh5VRkIwLIt51uJ3LDbIeZ0MAlKdj4TzRc/HUOMhVPJePZBRI/x1kjxW9Rcua7ELGNucmtAGjjj115jrBmr9Xxyym5/RRaLw9/v14s3y1N+UWc91TH8LQcL8QhgiOfkWU7bF3EsPD6yNH9Rc43YmVYcb1anVuNaNj4JF2zFBDA99Z9FmngEjWSgmemRGN4bfjz13l5q4RiM4JEgPOA4jR4pl5rYDLpfm4CUYWX1JFvNZIXzkt1WAobpn/UhAlK/WjBmzJrDn1IW/lGe09s8s1MZLZngaGh4AQ3pTx9C6ZNvmi/p9Ea/1hLIFM1hOdWDi+6aUPEyMFAEq33vWHZNXACtskNDfMIXoNdb9CcgCMGPM5f5tnq2PIF3R4U8CStqbPMlZmCb7sUbMaAJm27FGm+F9zUnPTsigvIYwJb5a9lLXgjNb41PaNSnfpscLIRJnpDltrdKoABevykoAIf6bW0QR1NEaR3hOkq3Of4bQ8ePA8lmBrxRDhI7Gl/NXRQeS775Ci4QTtkIDcz9sYmC7z77DbmD46zIzBnkke7KbYGuehIU39y50z0R15ZBBEE4w0aTLbccv7DRscod9ZButgPcOzgFRfCfEDQJl7rirnDKcJTj7Mr+U+8JRIU/SYlz8wYY/jBNwyB+jvMcPo3Z7QlVIFVuCQ4p0u7Uif6/s850E0d1yummWH1Vam6AvvYxktDE/4/ab0GfUnqWAwkHwlz7uxlb9zqOjvP5FrQF68IrsglP/AbqeFGZh+tnZgcey7+1xdf08K5weW0Jfx+tccdeLuu8VpbJAFpq601+2d2RpbRSeELV3XPzJLinXkEEMnvZc3OvOsQa6HCwabBqk7QFg1R67Y9WXyH77pN8mgCZ1MwG+jND3QHbBvqUMnJkyvbxkTIfElMuIxGCZUDOSphS3AY1KpB43rKxL0TH9DANJOOZ3q4C/aw9wPHN5Tz8LsGC3rqK+dNZPz6um36Ya7tCe1WWrssfpvsv6jMeHmx73J0mvpQrUE2b+kCo8XIsA/8Dekl5hNcb9JrHGmqXCgE3NXP0IKiyQpfy+AyLbnH5bvEZk5B0h3wrRJhSNuOSJFT5ZkT81mz6TgZA09TfZVFH3LxPHBrqe45bA477qsSS5fe47Px+kD9GZH76aAEsFxdsR5DCaRoq1Yo+BHw3QcutBUtJsy5ZKrQx0fSBBMrvvFWyAhTa8UlR1ggWIQBkQGnwhCKdJlXXjHShGekr94kg6shhglGKhnmeWrShuI3HSl4xWPBxndB7TvGjSrUFZ+Y0D8vck/HVLXU//MnGSVs9G7PsKtPBES3cLVxvg+hBkewTh2t/FC3yKbantE8hCLB09exidC1hnbykIgSRp3SR6nzRp3BQs4G8BylmLMqV+J+DVdmYR217I7Idao2FL0+s11kobMdUTegta2tWISdILztuuB1dyAZbSq7J+VNSZP6HoteG2nQUIMaFx9y8dkeITA0NGAw0yoSqJI5GswDBPv8jX6ITqCUoeiwP4K8O7DIEhwZMWZIWXgzivv5S/fStU59KRBBkQdgSIqhMgu6pqrQUEOQkoWYW2zZbsSOx3SN7wdtOmcMMk6amLN61olYp68d4iUFfDOIinmx5nFBdh16FJGD/i+anRgzjrTKJUByMOkOqK3Ij1kO34XfbOMJ4JG8GjHPaDLxxfiHQ0uUZqauEjlVLvYC3Fz5NPygMnxByafjiM+Ka6UfhIOWZs3xr285d23lqaf50s9xWJUa92DfgYRGeupIw68apAhzP9sLUtqGCOCtLtasj4IRTNhGddQ82tdklThZXHyc5ypLeHVEvcuWmq1mS//mua/0cBOCGKKactX/MkwFbTxq7NF2HGub7L9/Mk+xD/N2f6deODTI9zHALdE2J/nBEE+c0uPhQO0vV16MF3hw2iulrjUAxIAWp1r8y6gBmXg6YwVn1FvP5HjNdC4sS0jjqZqpjoqNvDnZZSmsA9bNW0HyibG62gKbo6L5vwwW/0SBrTGBn+PtXx+c6yBiFbH+Djbx99+hcBd8PGDNOlJgTbQ3t74/FNz8cuAfVgv3yxDTNm5EhbdqTShzwq5/N6t8DNRYr7FSLpOQkUEIKTqflrglzc4tzNzaWcbV/dmW8VlM1PTPsyrUqm4i8iUivLSfgrAjLzjlC4/iuGBRb++T2sTDbdvdh3F0NE12fI48GgjfbDwsdB8RnxJipLTsUamA6jQFcPV7RTwVZMlpQ4TIc08vG0/AaoWyjVXn0k9gVth0cgTJxWsq2ZkG3Y4LdIRPA43zWJB5NGrQJCrNtaA8xoGkhTqt3+VqCzmdEG/0Ihke74/fecP950vxwy9/oN4EkzC5DbsJYeYHk1eP+q134g6Dj0gK15i6QLo0L81pTGSccTEj/n3zCPNu/HlrTFdhwxYTjml/+j4vOZcVCkOi5akr3NRT/hLGVUxtppUBWWyTWQ5i8JNisnmO7JZz/s5fdjWNDsO0TBHm14Qtr/+RMfezBv+DivCD3VQHJ2aCK6Ell9ChoOVYGOKExxp8hpGYG6ajnoA7+TeiBUkZb1yP5a3PnuaNKRRNPi+9iC7d7ckH6sytFDoUmEsrHjJmbTJdxeW6eunX1ztmGNGsu2eP5F9z6YRXxNg195BmVtItjZohx1V//443QcP3AqdPCLEQYhZWny1nHr33bOyDX44Qiuq4ZjLpObBfBY6bcxlgNfIrSg9FfvT9mXb6nupK2W1IQ8GAml/DKmOFXaBVRbivTjcSlFISwg9YgfNlrlvR94C/RtC9FCSoaZASZrPk30seLcrCkHUSMJ8oehe42neotjeDzKV1vAIhA9BOcn0qtq24RwKkWa2iPNOtU3mEI3koSerCUikcK4Tpjfo9pVn+XiTKpj/zY/XciJkdfmOC3xwsETKgJjKYMpVNwIdcwo5PbrbH2XBQ719D2Snr8T+/l7may1PSRlS4qGwZ8zwJK0RZHagWrpZ1WbQ/lnjkjkRVpl6iejNW3eekl81mVWmC3enAOftTzgRlMKMhx+nDV4Y+QSukFVFXmziHGm3Bb5Vhuuvru1T1/vYKQ5Bzl0L7mpZjmdzPXWGlfg2FY9cxziDW1svsC0ypopiXRTNtXfjXP+9nLD9/7DJwTbTSTSQ3l0zzoucXsIKVkCACMXI0yxbQoG7Dex0+R3hlMITk2HMDymo4zX9a+c8qGIYbAvtN90bAwKuV/+My4ff65v+5MuKhII/jkEEjUcvf6b8R51CUMkVNLoL2jCS679oCcZTpOxMxyugUclsCABLzS1lCBJkvCzjF3z+j0imk2QgL60VkcQFPZBf22sIY0tUeQLgkNHNNsBfx/yzGh3RgLiGkeb0oWJ76z+6iXbYlHwUwPEeJrQdSuM4c2XayX3T2BbkMp76dfaYUn1mtPK9JwRoougvgM7j/cLf9actL1aqmeoovm9oqPYpUFUE0sATS38IxDDWfuxW7Z6ASjT4kBEGEFcyg/i49kF7JufC/+q5y8ZZk1rjbxEVhIVjCYVJ7nSMP0vz2NJM0E8xaa2dftMfy35UJKIdrtZRxV77OYAA/mFH/Rs322+WQoxDIS4bFj72sp+as+2TXlzu/I/j6X9RuNnWlUXvORcaoOtDMlWtlZc2jEKoB20+Z24kVufAb0OMRvsOhV/YjE/EPPeWli8J6bNCbrPAE5coOjvrxMLZVcrN6S3hl7uphrwwYCeA/3CGSdDUFvChlmztOsFL+sNs+vNy/DnxGQmZf5hoUnmVDmedO0L6jJ3FPq/RUj5bqn4DlwX46uyjJaBMlwFocofOzIBfkcMfGnh1z6aFVq7AJ+nxAqTT3et5nqnN/Rm3zc4vK3ij+mlX884R4jp9yY9AJBV8YUyXSouRZAm5e81U7eFQTysdZlQTjdpijjUmSw7ynp+A4V+fyhnEMxua7Y6dRtLj7D5Cwtl5Dz0wwbc8evDidhHNp416bp935Tcb1mR7pyHZSxuomE1uAY/OlfneU+B9oG394IKPCjhS+osa5bB9xDvJLEUVeeZmethNP9fBJNMYJes6694bpcnGHv2/VVeqEuPl6tZlcDHhn5gFAljfNRO2zg57i/bvL9x/9n55xOfu5yTb4MKpdbKUGm/5wfRY8Raf7BsGS1UVe1WaCVawnkV22mYvf7qSMAACQG2EjlJ8HbQV4Eg/6pZ0RI5sGtLWLI+fy//+Mnq0PdDPSXiKCEnV/8s9Yt6TXxq6UBrUhtaW/TN/VWx7Ja4eO3wV4b5nM03e58irbHCqjoZYxDu6nicn5rfWDLRNYEfu9d5RPqxZULfSC0c7uolOajw/MWNbwMb+evnUrBwp7qFm2z+Z20LFOHgv3rMBBfEJ8dz1W+L69t82aeiQfrtLQi0f+2vddGRFfRnLUb/xcB0DAKZH/oFbsnf0FldPoLKsAgSSLffrKq/2g/6teJf3QVmCGi85HA8yAzT9uLjAjMk0rvHT6irbz+fKlVVFly94fjA+K35mnj0k75IohPYjiYb1Clt3ezziVgIPIp25rtow5J4SBULbWh8Ba53eV2/nS+UEs/mIJZJAjUodaYMpl+X1RWd+fv6FIq/xRLo2px09KIdPpLOeekV/sXFfMBA+/F31q4ZOZzmyaN/NrzVY+xibbMYqK2YiosInanwxHK5m9z3Yu5gu4UPo0b+JYBQfcmA7znIx7/rYNJ9rOe9uNBkpZ7rPGKGWTIrhXVwEWjFSc9LnKBelGFdThZRHDMo+8InH/JKN/NEol9zdqXEULSiYcyw/ZN3D2yFAneeQhHqI14RObRmVvvig9H4uQl1J8afHTruS8e33rOyQbQVz3q8JDan2OQtAcpdHMo8HVW16IopNbiSNnpBdGLuvhepfxNHJcpG9svQ/rS4rVjopDmQnulqHEmy41hznOAm8m6VHBKP01TX/Bssz2sTZt19DSJrGhoHZB4v7UuhHa1l+SfC35qS01co6LV4Y5jmPTJRtXoL2jI5kluBvjhhVW4j8uNQiPwXJ7/Q3fXkZDAMk6jxTmS/xFVXZYPMeyMDN/QMAvjfzsevuY84vvgxF+1YVdmVTH8HNtdfvIuBTaX8q9Ns7baewOkrOj6bXP+wOIXukMmYYclztwNKo6r7xkOelGWvAYoOOfwtue3xe9QF3NY+y0FwZ0V94iSiBa6oDaLoDOyxVZ6lnGrE6O3UZIniJz3w7a6NSOc3l3kvcerSZBdojTVMv52TCPLS2X3z/+W3Gu++QOJZmxHOCLYf6DuN99c+yEVSqmjWqdZ25mza4YvmdKi35JlKy0ngF2gMNvjfDBppPtumq6op49DF73bbsdu24v1Jc6IVIayfa0WmpX/islCQZCnZKTP/bFkEW2M9tmI1avsKJStSsNd5QV+MZ/zqT3Jv2ZYuvGmonbxphG91A0Ta8Jwh2JZozAAEGRCchMsYMif2LLG4XCUj4qN+E0P+lp47sjN2dFZNQXu/2YdWd8eOqPez+US+KtWCO3n5W4lFqcsxrdw2YvaLz7IQx8yWBfOnIHfKYnfBUNxkgL2t9Omd1o9klQThuy9d5errDKHtMVhTzxnz6yCHPSj9pzpN/YXa7sSGYTOCSPJF2e+XKF++is4L3XGBD181N/cD9Jqw8Qp6hBvcL5BAxx52CIj6+uq6+JFfcsrUGp6CsjJLHZeN7uZlbX4kNEp0QaFQmvt/Xe1mmV9h5rtI9Y0Q3Tqho4lKZc8gNhgRw6UCe0HsR6+RrvprnBA/w5rIZO8SHcWnuRhieYI1hb3lfBPcqG6uzlIAwN9Q1AA3I8GFgiTgVK32poXiSr2kydpkZ9fS/gGJZcok7/PqAaUanZmoUZUJE2QGMGW8OmjbwsO6Lkgd5StyqEx2O1oBXTjT5lFepQyA+kq3PqTwkOSW/OC0JZ6jqLfWR4SaaHzyJGSaGmf/BDx0I9K3k1Jzyj+2pJZCh5NqAzqR88LDdZBSilTEU008ZJFDkVeL/oOlrztZVqS6UiGNAIcoDuaECyLxVo/krRUFiU3IA7Ii0w1GNgX7YcC3m5vSxZgsaQZK3qs5mRZywp2UXlfXnWP1tyYEQ7NSyB93yHvD/s0ie3xcU4pB15dqiUexokwgTO5eXFvmys9VQOnXUzFrPs103dDeHKSHkyOkPjxVxxa7JQYIwwCMWrDfd2ZEkqXNrQbjRSBAnpWaQKT3zIKcRL/oUqnHtfT+6VfGSGs2Mu0voC9zGgtk59fzibgicSUBoMwZqTUAZIpbIoqOB429lWyaZ2xcP8415F7QOVnGT2UjoNKSR1QnunrCRmJTzhc9xM3BZgFleL7j8Fm/7uSLggMdUETl0pqZErV1oYI7RKWxfUr4pxS82t7d5bVtH/Na/9GFOctD93zpIOBSQwBhoGX6iMUHKj1JrYMZbsDY6nNJZHPmw6dWqSE3gBZwFYUjq/NP2MezeuCx1MBO9CRCuqOTQBwPkD4oPk15RV07JunTxL0++auIc2j1vhrgRD5jPA+RXXbzhlpyNLU+w4AYMvEYdOQ1KsSCQ2/U/g9dmO0rH5idl7osB6QbQGUI2/YT1/QL80o3LLxvzCCXT0Io3VnZn8A3867X1MC8pTket08I4A0K/z11HvfOEEUM04GW3ccrqHWLe0+nfyH0ofDanVZhNqTamOgYVUf61kvVefbihZpLLAs6dleV0W/g+8dznRctcqi15zfJ7u1LNmJ1JjDB2DemZ8KEXdnPvpkdgmW5TBvW/gAubdkQypfAODHTxsEvGtGgzKDCHb/QpJ9pOHUXd9yo4YA2Ka130nXj4YMf+aO+jIljP5SfHu4VKT1qOd2/U/oJlBJVQ2BVSVTgq7ISlPkNQKYSMLmn68FBUzBBJhLx5WmgL8ZzL+L14qPTtBFdE5ftxRECk6T5LOvPrn0+AKOg3ByOQhZ+Wsln0D7qaTHMspNzUEt59fw38ApFaXs4wxBKsMwHMzqgcOVnKQeBf9urOes12R/ba1QZ5H4OcTv26VfDXy0ZVi9Ehf+0k/xa1fjbU3SrVMmxp0ng0x4q+3PzsvjodFryRcLwjGwEMFxkySooWIOyPoSldSjXCr3INToC6Dhx7ykWJMnSthsdv6v8oPEGNzH/20ZCpE3cTZ/bBcr9Mle3Em1Cl1CkxCurz0K6YivswlJaxOslrE8Wb3Sbi/2QVd3Ak5jWP/rTFXF9DH+4r5uO/KqZ4SNyZrqYLiofT6EOWQP9FQqtMFk/b8sB0OuiVVJDvEmz0NCXRLq4KUBpPbERZVcyhm5tVY5qK8hbZ+QeChe2aRUcLqJ5Qv/wwGbjw5aI++YXd309Fz/RjcVKPc2cFneEACKEK01g60/yoxVXnIy+sPxzCLVkv35j/Ft7Q+y43yICWlxKRNHYaN3kI+DEPxGS8JI+ozP+3bz+ug23YYAp5wnmC57lP5IpA31VJXib+HsIBMaOWiYwa/BbZ8jhM/0lvV5aZ6sbGIa/Q4f9n7A8SlVpL9KoAOmlWO4i4M65b0XSgOS25foc0Uid4KDki5pAAeKzZsk+z+x9Dm1Sb4rZqGfIfJqr0ka56wR6miLPGJITcGUDgZdpstogMfzfbO6naQ+nscD2Ps/mKzeYgX+Z/O9XJ9kGYn/XU+gXgfzNhYJKzQ4mpEzv14gRph/VWjtK6qSNr1mwu8RnkbQDaXG5KZGPsqCpMuP9EoaxWB6CjOrf6lExzOv11bPvRYGZGmJekke2nxkl8xXDYXpZTP9WwPogDTaro427RAv7DYBa6QnEnvGj/ynxIZw1TZIhFfyS5ddP+DQB038j9LhwvVDGXhViYr6csiFdMKyuot8/JQiw5z0NKs0OX5j++mXB96mtx4TJQDpXqElpkibRAH4R5b/o2zoVRaXftzuNaoqQtSV9bxITCGGCMJYAv1ZUtTkeeiKQxTxUSRA9hc/D9sXfauMRhooqLbZqfZ0BWXKQrWQB9/KFtWc0AOXi9BOpkKAuXurTCflyd3/LDsphpRPMxKEUlJIeSOru578wmTrpR6p+zPg0hbGSWshf41ddEpdTImSb6rt416jOJN9dxR5/VNMyc8mDtIo1o5xIu4mQ/Lh1vy0Nvfx0dfOY2BcoynxgxfFC5KeIqs4h6p4K7/6mccQ42FdeOxHcY3M8KCqx+qGm1GO1JjeAiXWmZfwq8pwGqC0cKnI53eRputVFsYO0xVW/IfqXXP2yFy1kdEjUawP+HgDCuPqkFCZmEEOBceOlH6X73eMs07VIGsd+0YJxjOBNRk1+fFXo9kqeomWqRp0B7+1jxOGS9DcFIVH0ggKeRTApc0XwUT4MQrrwThjwP3ajfZ0iIGW3o2ocBe7N8jhbdGn6Cx01j+OBERv6keV3HKf8LXbUrPKvufKiDeMH5MfvgfmiIUh7NFD+UxmzljhTzH6eaII7WDzB5F2S2Kae8PoB2YM3s9rn/IOwKPm9Xs+u6uF+30YcqqVMI5GZPEBA7gZZN5T1yO8J2PeDSnJURxIWTGyF7cqb415Bgx5/mNQmoDn8nhWowz0zMfvHkPusXKG5el5cTx93CfiVN7tuGeql+LWkaL4IDaVeLCo+ynxRrJSVDF3WVu3bIpn8tKBKzIxk0GouTIjhyNJlVr0fypsfIaCtBt9lfSc0RHcwEUc8/iCpvermEoKR07alA2wR7GQTsWIIsmObsKXwguUxrmD7nOnGkDuACr0P9fpMQ+ZdQKTGr5qS3/6DLDmU6Kd2Tu1Qc/r+JJ8fpmSyPp+mu2DpM5QMfsAkbT0HZz4pce3dlR3Fbmy5FigNHGOT4Gs1/TAJE1bwVyFbB7w6KE0ChvwSV6aBxQOzOM5+vEdTKBYdftHyoOT/GXMqQE70Z87CiYSlYE+W/j0a+vM8WXaRzyMGW9OmcXoB2ZoiBXahtf9xFkr5U46oNWHePEve3CwsTOKwlaK5IrjXPHKS/63vLGmpSGqwH6of5ocMY6bQlz+XygZvR5Yi7wbbEL/am0rIImxJ0YkGgAC+iUHpDHnQePy0xAhPQFDaDe7wM0TJ7IZhgb9pFL6xzFOOAGEd1WMdzlFl3/YK5jGrAncTvfhuoJEctR7y5Sb32dxmWy907NiO41iqKQa8oweYyUd7nOXaGt9EjyzXhQL2ZwBv3uqPGvIsszSeTZOyx6bgaRXA3y6kAfeswikwWBJhaTa3WVzvUAwiLtUSepAPgUfqBjQGK78CxX6J37kXDc76Zvx79mbUY9V6RetzL3n9nXSTHXFJmbkcSUHlGeniv+j+i8A+ffsh11TBTrVNI1bDM/VMC8DjjAaD2iwvR4ZcwswxjDmhYpFJkFQ7hKen/olZkYSXRfT4Iy9ffg0Aqf92W8bHca7YFvm7Hc1FL8gq9Wl6XDyCghD0N75PNBvx9sly5BZr6dAvlgSxQj4sfnnuAXU3mYNexVd7cO5grVwmTYmmuG9fQMYTtpdsmfcg24NsOdWpUDXgVkooREWInRbDX31iAs8uNztsZYJrjGg2NFokqAbtij5fhg5PltQgemLqea6LIHQNROsEuidW5+GEDs+T8XWQp1CV6OHyqFA3Wx4MiXIG1B+QWXna6nQHEGbAd+YgXGab6OfgOT4P4OObsaZD9HA0ChG40TO02zWaRNdR6kIvdSCxsCNl1m5NbQn/EDoaRzgm4TqFRT4XkTsApDxHkl4C4s0YIAKg5bp+nj8l/PQoNJznxlR30T/eT2lhMOkfnhBYlEB5+KFOafxGPNEuTVIyPPxrght+DlKGJHJjEvpzaVMXuV55D9so/hbulLo6Y7kC9OFQBGUW/4u/cj6Bnzdklr1Ca1T4iJPYwp/hNYaUN4pDDCeLa/7bKhh+4ZvBlb51hTAOLHzHho6d4IpWGCIv/C0AVd1O2/YR0Wc4zSGPvzDud/AVgbh4as7pGmXKrLBAgbLll0xmWW8EPZ/bDd//ZTW2XzJXhaPY3rcMp1hlKfl2fOFrTT0E4A9GfDsfmvdz6WH0ofz8Ggwjw9UGz4NUPm+w40Ytv/8Cj4/b5WFlZC+KF0HAO/WTWrPepmzJPVDbkn7EH0ybx/EFUu4Y8CNrktegjuGlPfb34zl2cp8M+eWgWzUxw0/T6y7O80G0l4vBsj9MPHuYnQkQ1br81c+GEbPtfBQJ7tM1P4C2P24F9KccS0kArg1Rdb+XDy/Np8JWjDsQVK7b4VkyIE0ACfBWJc2hkHQ+rDr7+statk93wv4u5a+5+JX79MLl5QerUraAE1gleEee9PePecEwuZ8IJJ0mCFstfxv/+vS5YFalBAMaGlzFX8D8uT13r1g5RL4Czr+lEt/hVlOxKWxhcmwzOiXek+BfBM0w6s+p4MJ5U7ZGsWFs/o38sOX383/5obT8QBP++rMzxa84qB+6KGzpb9e9fjwAXRP++jfJI+Q4KunbgtbTw3tZj76uxzI9S95lNjrucCerykHfX1BMpPTfLxET8CI2k+MspvqhiDX/QHW0hlfeqqcHpgrGidVEBuYXmK6ldBwPiy1lnSx1BWUy/SrDKNvZ2rdXma4B5W4MZDd39/FDHvtnnW0w0taI4dpWMGs51uF8SnCtrL8o4nZDc19tAFPfiUI3ctB5WalHmDOouYFDr/Qo4G5zpBx+4y4VIzUIJg4+NKrEVk6qwMocXSbp6LgZHRgHStTvwIxNIelnCi9k3dZrONT+Dfy2xqrDw1+67hTUb/n3rQT94l0Ocs1O9mwKcJRGYoyFpjLxv/Q2lU1NsBZGQIrUqhYEPgvxQ8L08wQKTMlAguAEBTdlWuSlYus9zrPF8sXBTUGkzNfeqUX+/CBkenHQfC2kFlzzf4wArf9K+//cukirjRTO+cciwPz8W+weBGl2Kr+fQAcDZ44siEONKs3ay4KnEy3+/ZC5qNQ2WC+PwZOwmDnmBWxxU6lxBiwzaAAOGFI6uIgUXV+rz/yt4D7xZTVceCnCl498p1p342K8+JUm4ZT9IISuVB/YF9YVAM4e/rnej+5seLVRfLldTo+tW7Nn3H9OCflmWXGp2DejUCd/bV7kVQWAOtVFuv6t25Hi9w0F5Pc1MiWfUVRF3gyqWCpyFMWHaxYqw4UKBURu2ltXxtxf0wgdh7eOf7Tv6bBuW2e6bcCU1knSSNQiZMvODwDggOz7cxhGKSCM5SCdOuW3VY3H+c+eFjvw9b4BHhqXcZAwm+fMeRafdCTlz5FRNGxrpva0ZS2ajpMRT6u+oDlN4T+B8VOS0rHhJNZffnalUIQkJZ69YNQGdjMY9PJt39M/uDWCHIr4oFQ0gESgpEbpwih/7/wDYoc1deari2/kz8njcfmanVVrV9cTyp90CtZr6WEZPA4K5CSnNUmcA32HaJ/5xvLTLHKdKWCBCjN/h7hFtbKXIRabyzvXYenb4Vd/kR7bA6leq0b5eplGHf9+iqxYNaTYP7WiXF1hqyx6tXx6XrbJZ0Kkhs0nRJ+cD3h2PU5kWiq3q25IGe9H0wGZ0T7bnfUL3YCLmjjFghLCBaziniHqigVYPLFFF8Wd/jfcwIGFrTn4qYNgkPHYquuP89NU7CEf2JOKtqJTHTz2DiyaJAcKC2pWe2gf64Yvj+ND50nMF+O5uXEeSVT9iBB1kAe7nkT54o2yOLf790a/ZTR7BYX7Gy/+JZW5VhEik3hvucASPW6bZ13oHp0Gb8jR6ojEcSwrXSMFra7pGHaG322mV4esovm+TF3Uy0dfhTUI3EboQFo5mDRVqrW2b48hNz5fIc2vPJ01Dqz85ihvgNGqKOYtUqMMHE+fvOHPyBtHy0zpT1ObqKRo5cjKf4u+qMfezxeIgJdMiU6/EsF5spviQB9f/RsiW3O2218XBQPRoUNeZZmVor3YSh+f/4LQ7UgE/efovjtJFVeyUAJeDbh3Dz20a3i/wBdmSAEglHbUTIje/u00GK5PxZudHxMtmYXbVMUoxXy2jY5J5I1l5XrfsBR/pzQ90M/6q+zkoGFvdRl+uVItbYS/IcYN0tFfunQuYuQw1BSn+ODcFgE4X7HyeGisKlGeiidEugpnDZ3zX/oD0K8URSEEtTKxDWnQvPBbv/SdL6IVxxRHDoadSH4fGcnd3v2OECLEZFp3/cbPkNJmBQQU9JIQ5V/FZWTlQDuKtipVn4KjAhrjSL0V4bggwBjqFG2GZa8CCP5e+6lvYrVI/vmALl1y1JeJ1ofx6MEDyk/yVDel7V1ioCiJlb5oqTqSTIDNRWacjRFB/Re1wrb/ok9NzGEG7JDNDT+uhmGE/jyPQ2t2WEs9Ap+KRqDwd0LffRHQn4+yltSUNcZ6ddZ5XuwlEAiuEMB0GmGEDc/NJOGSh91L0aBW+kobAAADL9j93piuFvWlhsdnMHHhryzrCjzx+VHmZ1cmlPoK3OR8NmxCGzwjVAZncUriLwQyyfjGDQYtm+FG/qwzf3o5fQ9/igkahpZD8MJ4h8XKivtjqW8jmomrKb6anq2PnqQidG0Fvh23HBDv7lWrj9/thY0UIlv+NJh0k+irQ3E5eTALl8hsmCdP3HkBeMI4ZNS5X8OnpzkKaZsKHzU9ldVKGi6bm1h/Xv3LBpXSk3AJG/FzCfwwlMi4Wy/9MmnM4UsKd9lUg8BkSw81vyxFIvQtvhpC6vjCrr+eM8D8GyyWoNsFrROUOPPdvsbozBBwweokrdwQNz9Gy1ADwjPA9U8X2eHWNYOpr2pxPLEmzQ0vfkmx/aI1Jikr3RlHqROpWb7QkbBQcvUzmo7BVUC9BA9P/4nD355NGM9eq20RCJ1XZx/zcZos198E6i+WEMbChJZYoaGn/VjfDNuZNX7iiiXboXBDxLhjGzdmgp/PvycjpxeFpoG3R22lrD/jEnl+bV1M6aPSaYqfq9uwq5sFTTpSHfwqgUDZfRntWhXMqup/7vK68ObaP5OOYRleXCiB43/Lf9zaGvko8ICWDaSHrIr6FpsUYV62vmTQDlqV4IcmMX3Zn5R5wVHWv8C3cWEzBJuzVFq9MAaa46OqITEBTSOex3JgbT5RPNef5E8v5UrER3am5P5bsIwqaUYqbju/5DterL+RRGr+ooye/aEizWRhrPUb9vKrbGyvrQEECfgVN+m+GAFcEyx9OGsme7e0L/k4Gt7DTD0AYfyV8nyGymPWTST3XzNndRF4cIIUVq36p6cMgS3lvKEc8I+AIRLvkjk/NKljniRNy/CS8URj4SGnMqUsKkmk37w6pfH5mn2pv/4Wpx6fuSiiH6DfFpvD7aW9RwAtGK5Aie+jTICrlQl/xQUU+xENrqSSjAm/5TqcFq1h8E8KME2mf4qVkcXFxkQtb+U1/jDjqM1zc/M/RY/gj2leQA6dvUykgP/eOto9NRuvDhlWY6qdT9CdvyBIcAP7HeIlS9fupaxb+Y/8Unr1ZZHn2FTgMQ1AJUnlFZ0hojyqV/0y36VtHORrSQl/Co9TzgRuUjz5/+fpKhYdhbLg18welyXuLgF2aIAQ3L9+uHk9s+lFOi8JV+pUHaXDjni98QHuoAf2NbjK6fyAq8H3PGTXJD4S2Tl8jD41i17DSYl1S8o1c3PEtMCPrLMDXaVdlKaqzjIa5BvM1MM9sDl/PXv6FzjxrKsml4EnQ7RSxZCAWk1bJqYuUygKGymlFnXdC5mGCUCcaGK1cItulfZv8T4tnyVwDrgrKLC++0vNZXv49RT6ug6gBEj3KWhUNh8hPejPZ7Og2FTUow9TM+JvVjLxIvkHaR9t5kEoARRW/qBb+XeHh4wqQKizcjcumukN5ynC/JjWJwiRj7v7ZlmJmGfDJZgwyZZEZ62JhB3wF9JxVcgrh14o4MtfvMrC0p97nSUfArzm4sQ8pnkhrroA3mHrkbNkQ5XWJMKD8wHC56TtF+CE6G5tGyqjaJw71DtHa4fT4ZaP0TZlSJT8CQEX4fPSrIFZYH3kARn6zOQypaNrHiB2vkUN2BzbMmlvh+Gaqyk/XnIIvkcQtVhfCS6uoCNL0TmvoO2mNY01hDeSB+rFJU8s9IVDxZ7cBmweP7Oahtmn18uh6sdbnNJso3RLbe05eXT+Cw8BprZbigmhXHLCIuiXNB8rwdux7TbFLzBIypEfbOdnA08ARycSv0H4ClxHHkKiR3fZs+/4igAft5TNnyiBSVSva/6IQpXSjYBhGW8EWGuSE0nW6Mu9wmsUIZxGD/AyJ3ZpcEfMx+DbSwSv8Ga3NMRMLkhz7/wBksfEnS3RoC3QbDivop8CqyQkN8QpyhJ//rsM7W85am0LxjksH6X4Xbfsdvc7OqRI+LFGLUoG/JCMI+lUvwO2df7qlL3yQZxYwsNUB2IYSkh62VK69PfzRb7HENWZA8XlWcG/JGn2K/NoumjSrfaTO52ORxR9WVaAm41a2WOGN4bda/zEa8Yi2WG9K8mLVXSqoNDR3A+xK5jo/FoYftI+z1JwkvGpsFGhmewhOmJOcUS2zpxDcmZHXkHoYSEQ1hlVqmUJbnSMlJED1EjobB1flXT3quUuo3Y2tFvSY71eDKrgIUSQIdm8gW4DvXlpfwjw1Ns1Q3KdJZ8D2q8QHmc+V/Mwse03SAxcoAB7acd4Hzhz7VfzIvcImy4MNizlw1kPlHhztl3dh37nDVkO8feAO0epDa9kZ3zDxy62KNJqX/pIEdAUHaL1BQRh+Lwfg4nsJSAi1l6ppsFUFManWF9yhvvhgeN284l7LssIeJLQBVFwe+x7hz7SpH/xam0hfzsA3hpXHKGIQwHFid/Z/NmSl2QacqvcH7HzyaOJ6EghYBw48+jz77SyaI8OPF8Tkw+9ndUkWtg0H3UKmEeL3wL/9/GrQ3KTbd0pfltJK7dFipDcClgi4X1empE/x/PAaPM2xfQL/D0eBIOzIZP0r2JhhktwHufZ23l1R6FV5vqXX0bUQKhkUEgDAJelY0nxdm+Qe0YXnwrCXH5mGZ/04APmwLRQscQmrFurBhdwvLt4xZV/mGcOhDuWn93oIcH+1Zde6q9xbcc3CmkdUHf6vXJBMO8OMY+E39HTJJVLzTTuVIbZ12K+CwZaIwu0ymLLcopBHU8Ft5xv9FINfEkw/xKOTBxlcyyrNWwzYS0g4oAeObP07V45r7yjdiyeLCiWtGoEslnjSDZSWbYNtqptRXIb4JpO+ncHICZ94aD3sCcwtarMNf7s98Z2x3ck75r7ohJoOi4i/PfPf/+CvoXOinf8eriDic2vfpOwEUTQSX59yRLhPZYrTXyQle0FUy5bnFHbe+FJDzS2ElhfaFKHWYPvF4nS+mqYqUnBDa7F7H2XEKDn7LQ4qSXdyjkbCpcBNd99D4mzmp3skxbGMMHaj+Vhs6mjqGY8GMSOUP+y2SQb/pFzSbLkjEElrNp3Hs0obeuTQHl4y5e6lZT1b7zgrD8DtYe/7O4IlY2o2sGLpcbxfPByJmz+Rcb5+BHMdTC3AU/+Sp7h23aG2eh9shrNHTp/Q1EOh/3jQuwkRtPrPF6QuXVuZ4PMIJb5tb+4Z1qOETYQp0m5FEhye2Etk22axoeP/Qa+fSV1Iuvamuit6+SuDcjdJojoDMLzVQ+NYndlhGvcbAzAKI5plByk/gr0mRo+WofK+DmJeehwYG8x6tixC8B2NagU/MUg1YAILVZg4Td0Fk6qj6NGsvjBUAPxuyJqT3psoE0gGLZ282ISzYBPqjOZPek0Mp1BBWDKl9sd4ffKKvEVfNHPlw/UhFOXdKP64/lp5K9Np8mNBP7YxG3fLBKMltuOSMrT8muC3qYS31wLrmN6kghy7dW+CVKfjAki8aVQlb5FTSUDCTHfNd+tCsbkun7IGQFW8W3obCwwg0ye+29UGkV87zfHgF3MXsTHEnkhBtrICm3MCBzVMinbXwJhFufiCrftK8q9bfU1IgSUBeYyi45YRJ4dXMpLWcGB66+qizKx7WcUQBTnVAgMsyDX+zfS3lJeEYoioDsGQ0E1dEowogOw6tGwtOJdJAwC6b/WSbXCLBHBtdwaUcZlFc51zLQw7UUYohSVGGPx+nC/ljfJ+l+rshv80yYkTXH/2pHMSAF7SR4bgPywzrsoC40rh6UJ9WY5k5uf6VosKNNbnG26P7YZ9TKhDPtz/vh126JPRIRjEJaOP2kStIeDFYMZEsMQ2aqfQI2V4mTUD7H5m7rF/P0MHAV+0/oGi1oMEf1QJmZrNgXLG9aUYyv+VuY7e6RjywCkdROqyNFk1NwsXzXq5ZMoc0i4sT2KObdHr8WfJTtjf1of/Xwn8Oe2bFmB2/RUlMZzzT1FrreMbaOZuQwmOO38qKiKcV/0KijoK+feikcmcvkJugseRdfqd1suQlEzev0l6t86hSOF9bnFWYtO6Z2iwlkb0hGT9/6eB5yueEpPn5VR4U4j6TIwlUe6717cD7IEjGXhgEadC5Zbrxl9ewPOlieBrhXTv2vtYf1BdfRy/161PBPdxpSGQ5Z9Mnu7v9K+1sqoNV9wjEp7e8eI5svDjmFKG3Ii3EXAdPcbKbN6BndXxa6uE4yL0vEaboyW5XvkoLx02yQHzqwDkdrc01cqE4alX4f3NrVlPzckC1xx9PJCocou+F9/pxCVSyKydzRcTX63x+Qc6lN2l5RwVWBGjBt+yDJnXppboYaTuRAIsLCf/KCShAsun3FiN6vzqGvWZL2R4UFYAl+3k97Lgik3i9YYVD0gb8tvhytVaFZURZcjY9wMS+beLAh+4jnAuDxHMMisgu/NpIenFgSDxd9Pgk8uPpu7pj7mN/NAXs+202TdpI20/5KWop52qrHMpqyV5n0TA+AXHn+uGuEw25E1e8dwB8PivpUfDDp1ev0ME+JU5tmDWHSr5lzpxcoF7qahODxCzfWibu3SAPInfvJvz7clwqLA7yZ82pHzIldFXsFENAlYjp0H35anYM5yC5eCSe7vrBykJv/hv/7Nd7XmmRitdLgqYEhMa3A532VV7StyyhshR6TEMytxi+9l4N6a9qJI3uIrUZE7QeUm79cLe9JW1oyrnWaq++1iVHP6UmUkTd4bgUyiVDfPIDmCxVIwN1Yz1llxgPxnZ8XiURhnQuP81ouBmF4Em4/tX2OGiq0cPOWh8vUROOm5RaYC8ryA8rH9l8zeOKafh4UQqROQ0Gs8ZYhFZSzgkZ+/j/6pcIJHrIqdStN5lD6pmdtFMmXx/MTawIoLVJ/JQKBVjn1JbsIupDfMgBvw+E1iya9w9TcBVCzAMF32HJbiLTBvZSa8QEftN8lDMfVrMwjcq8TPIXvn30tSjRZBn0svDUPb33PWxmt5DNvreC+pbJU81/OeVjM2SzBNaJUP56iFxpnntET4+eHDa3wVGSa7snFqF6cfb+uAUbmAXv7eAWZCVSO1yUl+QsI1OHMgRqRXbG/DOBTJ/iwqkRQj5yRSGs5Dp5HpMEW4fZgaMEisQFQpwUfQMnhces4WGRcnaZiVa9SNGkev+3UYrX4cK4yDgdhswLw5lwAy2wn0kdt0sDDtccxtRPkdgNT5JLqidE9RQrkY5uSWmpz9Q+mv745/EpEmcXpeDxyGZI9zhwW+F8oi0+iDP7JG/B5/UPbrN+IJXyVuPvRX+P5il16MvdPZPmRWfESBzU552uQol7eivgTvinKK8zxRLLdNzQlEd5xgyHDNLBQtqwJG8LDKFicYaT0yJfFmOL8wYQXtZUWB6qVfW5X+boTxvNHBGeMsK+WBsk0awX5zRn+jk6r2gmlKQrNd18qHXwAnwHefaaTDP4fU2/N7BjcNBXSt0WFy+ny/Fizs7/zzHjSYlIC/tSJD3sO8dajods6IZVAMrXnxTO/darpB5TaxZZ3ibMhmt+x3V1NW+8Gq+lTbFQjBD/dn7JSGb6qi380oMH+29bmYFKRKahXF34auv4Bbv+zd9NH+ABtmRTGZ/gay9BO0Xl5RNBrodCSu4Hal5ac670eUUJzL3crDa/ntwRPgEwDj+ABWfTsZ8fzuqLsgdkhFH1DbYZiNl5a8VmWROYYs+zAvtLhFy6gM+OzwqBsmblPBrSBHFIqo33S/cqQdFBayR5bfmzjfX9ivxvztLJ2etLGhDqbJVV2MC/ObiK+8Vf2PufntXBD/vK8lOz32up3s5WGC3B8pjW+aQgd2o3njxI7JTu8Q5o0AFzXYJU14346lzi4eeH48k3V4TmiGkaos4s4utxf77ZysNU+fTzlK7iJZWMMhjKBKbxSHwaA95ahjMnlDhpzuEp+hFPMvX9bUYDb4CpA5DLP/M4G92ikqJlpVyf9jzAUgT0qtba7huu5X33rDerRK6PeUeTFThwkc4+2RuVyxQFMUwXKhEkXGkC5zeJbViVG9M/e+YR5S43t0tc+dFsiuBbwkITTmhK8Y5za4mET7HQ96evr0PY6wxiXyVK1BZxW1/yKvzxdYA0Ay/8IkndwOazc0tiH0bPP5AgKG++jh3xHY8qDuT4ChR+d8Xwb4E07YgBbwmYOxUOvh4qgc001PHsQvitnuIBANzPYuTSF89eVZ4++gADh1sFI9lQGIkbTXL+KEHeCzRjrrbn9KYC58gJys3p+/dS1fwC0KSIVYHoTp+zAUYAnsTFNjexZ/BQycRsCS3pwAA4HqSq63G4rrhAlhUQqw21saU/RAdIl5fo19FUY0bgGPFuWWBMEgWrg+FoKnv/5jFC6cPNfWz6k314xsfPiy3/Dn3cVRut/xJ8Fy6UhvAANDay0kxJFfX4ElQBBFHGf3A3KK0qH1VwYcD4/hTExmBF/DgDDY+xcHpAbyk7xCb18NFJcv2rbs/jWzkOEbwxRn80aUjjmIZjo+Bx6E0W2tDZSd57D50TaDy6qAlUtny2IQiaVh0T0PXVPEMl/CkuS3KkQ4Er3bxjBHHQIEyZ+X3SuLrAeOlsfigw2SVT6MBCsIjh1+rMivB2T/7RcvbfqjelMc8fNTib+p8odY42Q2R5RIv3bq3b4FS1bbnGZU7gOZ51TPlKdfHLof++trNZWVWQb/Y1YnE1WPWbP4DxElHo1aZJW17do/d0ufBNBUT3QTwxfxmqBIzLCx1wrW6AAHiCapntIAblcMgC1xO23KxGx7cHSR3MeETk3DgVVLvt7dqjvJsLgnRCDCRNOzHJDl1LAD8CsNmmLzuHJ5x2t0U6p3iQur1as0pbnmjhfvIVfOMRX2jnp0p9Nm1zFET6b+gdE3l5/Pgdz6XLt/3uNRHIl5fg2k2MHE7beeKUs7VwAn9WG64Fc+dDYhZyg+BMEdPQeQIhos+X54a1WNnrCx8cS5FwkJH/CYScAxL+CehvHsG6xOvmHprw+5KWTo1pkvHzXbuz7F5/ia9t2i4FKQQMWjcnr1rwoWiKjsdshouBdrsYRz4BQFzr5gZxfWkUqrtk5J0Q85gmy3YgvtDU6wBbd3S310f+CvldeaeC+IH1JBDQRzLtYnffJrfgXk9S/8c4ypu0psX+HOpCduP2tNpM0IhwzIAjMOqDgQT5hkZmUs1ywFjqSopYGfiG7ch8K0vpaX70rWCoU0k6zYbJpEP2NWtSI74VJzfP/ZflOtLOW2CdV40/D2foSx4ya0JyCwMNVvx3pO2r8Ex8fcMFP+WHgl1MWpsFbc4CLcrYUrgGA29z2lpqOaqICzAGoD3NtaPtzkOa8ytH8QYI4qWnd18m05rmBXFhf++jw1fawxP3rO/zoBYiIvVXlyLsSqhMtX7mMpk+rq0Q9/dIWlP9Y+7D7hbrMvmAGI+Yl7WTH/QgT6FxWExmTTOT093RkXrMbaMeZcnqHbmnzwmXg95uyIGZ10DN3vI+f1ho1Ajb/LB9s+/ttr6cU2L17sxvf2/cSE7WNUNU3gyz9lEzL8WCB+vumFtD5ckxUy+teAtfwUxvudskpfEWSKLW4k+Z/5Dja7R5A+Z41KtA5eEjdhMJ/7+3luwBc97e0Q8pCaV7BuzPlLW/R+zO8GFyphk/hh06XJnWvXK+xqBHFqyamWZXSjBjx/JvWS5X+Ghv1jw2CRaLRFVUXcvrgnzOhVmRihA3MxXhf/m1glVJ3wXG1wwCtKzMidgqBWu7HkW7jyKHHzb3r4b3ZAk1bWSfTrHAYEnIhfFwo7u/4eVCUkrVUbDO0GYh7CUW9Mb6l1m3SKYw8b71x232ig1IlmVtimyrt9AYwTo5n8jPzbO1b3nRawOhVNlzBsgvPDaWHQqe8ISXQpNDNIB0hUrmcUq065icSv95l6L96ePwNFWzwZiFfeZ8znYM/il9d0vPpVDg9H8LgZtfutuoRKhUcGx/0vWtUJk+IKCdGjvrt0furYIR0IhqEMP3Npm3WpdOg7MxDwLgo/FTcFLpq3p7VlggGba2mLsIW9EctNc4Mp6CGDqodkE8P6rpjbSV9CRjHD+q91gotfXUwXu+IlHfK3P1K+GH83XazzHaw/66TA7ibEV9HZ+zdB1T+Z0MET+nUowIQL3ikRq5B588m8x0x68OGGgN0TM2vAFDKjKXXyqQu+Gz3/wTkdzPcyxvkoZmscU2/A6ctWMXWSWuNyzNrhNmIu2neyKG3+598Lswg97GgXo4xS0d9LN7rLzM8TmkuCm241+vdYG1rFkQupgzycrzELJD5q3xNwT6t9TViESXbD96OymwrPESJV3N67JIOKUut/T5jGSYZ+ZR/69x0T7iiKS0YoEfCyvuR3OrafNcZlvA6S1ifTIKlupBHjF2g49wvZ4dvQ9CUv+8bx4pRJSf/5MQIn2eGtszhGS613Eo3szYQLNIDO7ZdWSljzJpWIh/pZnC+L6KlKeZt5Lwbb9UUmNJgMgCE9ViMliLn4MtvguTzAgMY1bIw9fH+PnI8rLi9PQTqgA75GZA11Ecjr3gYkdrZoeHLI8bIKe2s+d3457sztgwLyFH0TRECNNoIieVDHNxC8E0wi5vcGW8m2YLfvR87AXroHwyg/xOnTt4t1PYgO9FJllmmTReEBahu+VbamElIavfRhC6yypXmH1G/YjF8Vqw7Io8H6ghMHRBdhnkA8VhcwHNgvm5ume0x+5Kv3Pf6f39hdH+nHKq9AFcsQT7jDYN8CXZXm6TT3goVuSOCAcJ0CChkG/14sndnHwQQRUoQMR2lBXyT7omnKhQ9hQhw2+b6XjBdVxoqXh4UjxFuREgfubLu2SUjEvRfJdUDsj9J7ovSu18sZgX6enL/T9p9fVSRNPxsvooQj4sz74bt2eL0P1v183vJzhS93tCeKvwl3ShVFqDMQgMrKEV9JyIxJYYt+U3smAD8XJKd9wA7lw+DICg5of6f3ueUUHUQi6JyxJq/XB4JojNoYHgXlj2GnYLBMgNk1SEbm5SSYJbwpsFa0O33g9md09psKfT6yPM4g93Ps7Oqo+LVY39z3XhuPaYDbWjMdeHmOZ+sgut1UDpOzkM3wUAUSGLL2u5M35atuhIs/rwf6BWfMWqUPJmC/fuJAlLE8atkSUaBUvtI3Hozo8staswjvS5Hma3fRpORc1BpxjPipQXAI0i0VQrxkOK7duEYPetJAmMNojUJ3sLehWs5+NQzI+RAkilBjWuVeOWmfVVzHb83SLABplVNKybKN1frBJDCUxSp5qT/qi4fP0sDRM3JB32sWT/fd6UQ2WX0MRq/PtQKj1I9Xehx7b618DtVlb2R02XJN7yEPt2V/FXTYPFNqZ+81vnV3fnLWOx6tGt0CgSlJNF26u6BoBZ7UE88SmAgZgEieVVTfkVbB5hLKJyq+Mp4rRZoGVJ5ZR/wd1kIAIFEpN4S1EYpI92lCvO//InDPYczlnopjXq4aUkymrPJQMxYRSDcraf9AEE3MqDCLtsPT4nAMYESD6HB+nQbHA3Jc+ByQHTVotNA0cXK7tl5a63l5tLKPpXlmG2qml2kPGQ4kkjxp+WL2/ZCvcv95xEHIv6qNcHKE5KUWJES+QSs0MThJ7/zXYUBDHs6DNp7jPTekXdKg1a+pAkE7s7Q9NUdb+fX3TkEUGzLwlpf+Sr6W0eIUo8q2w3o62AJE54zWfoVuX8oGY8I4cRKK4nU4pFENR3E5Ce27PXi/UPx5xgun347+SFrn6+4C5UKTuQSYKCSH8S9dRrNgKf5DGpG6sk/clu0ogToJgDSYP9Ib6AnxGMcP3YWe9PfOLUQPiX/DUKjWxHNebzQotGIoIvZMsu1tQ43P/6ESC7Te9ud4Z5lvN2qRbDVBVNxMl9153HH0mGsJN2jGKQH8puOX6srXoUI8RDRDdjEhYyc93P46/pjI6oXnYy8BBzopBdvdzn0sT2px7VvZhmXHClTELUbRhm8UYpyXfqOo3ZyhnM62qgjgmKu6dGLaum6GONjeZaGY83RX8IVeiY/eixpuB/Uso/XQhgtd/ZkW6Zlh2mZeIlR/iM7nx7tu+kTfeRj9ei+X4SOivCTvQn1WlwsKrxmvGwZLGKHhoJpVPO/dCGDOl/PrVfb2HU/4Dmmt+JTifJyyNWKMJjLd2C49XF8kPI4E8evQQCFbLabkhCQwmlu9D7cZuuIoiUN3jlQWIcC07MiOxWBnEYZ1NAgfLWkK25fzSYW2+FTxzDyvhtXG7zZqeVXYVW2DnlRaZttLXIQ4og6DwmP8m026JffZmNwlLIpSW1VFrSrl6DeGxyGwr77itntz0rV4/Sra50N/ClIlwSM5r4SQqNIEU0O+j2lQ4xtbopZZW531kDTy+X/1OW/EYOTiV00sukJLZiqRi3QBI1rjmv6BAcfVoFJUCxflbwJFRvYHuB5xoU9iLgCXDHuc5ntYr+jtOTglGmz8q+ukVjeMomrVI7lqQQiHj0HTsOdKVSX6QQjZSci0HWCWqeP1NjTeJg+Pr0JTSMvdSGlU5udk9DWa/AQsKwxKJj3kL3iOWCbfOf+IeW7RjYrwhk65e/paAsvcJalvcbhWTZ+M86FWFC7Q1rafO8QJBSjuserskMohP9/GN7xCm86BS6JHw/djs8u/dkaiGH98C6YrhEj2Eye5z+C4MhBUShnx720rO2/bXC2rHnIb6ib8kqOLwCC6g+3PVAs+UzIi5nl92tuQ3QSJLSnTRcj34IXINI2NL4+ejHmt9fxZ1dKG+ib8X2Ezs6rf+3VSo36h0nwSmBB/R6niyUhvoW1d6xbreekCb+Xg76thOaeX8FBB5vnb1Hia8tM8wLWRfMTTyOKrTDBbxho/TGfphayBWG73tsMAKBVdhMzoW8m7kchh8SCrv3ifq/wM2rGuxtc24rTSAZwiHB0l6Gm2b/4Mw8Oy4froAP5FhwePYEMZZYtu5u2vvlYidDscgJWLJRYSlFEJSNMt+3cOZfnsSE8h5GFSC4g/+egK35IEgVwAtK9n49R8bGne8jLs54qDFBRRpoCTlHatku0ieLHa1cxg3GSZMX44QsbzMAKaSYn3Zea+iOSGsZeymq3AFPXWvJR/lI1Nl+duQwCngiCMaU2jrM4LPE4BrtDe/fgn6XsLnkptEsJ8M3bAwsnqEajOQ4YPdhR1PLPvlPs6R3K2dGuZeC2UOzu80q3P1lNzw+CsqFho2lmJIld47TNG3vDVjIB21EWEEeeIractz/NSPWrFeufR7gI3ixxhOSEyr+muS1NiFMZNYVHGaTi9D7mEg5GWE3jEMvkjOGYWjahyK8idKn5XcbiEyiKflaJtk4YOCI0IJwJTPBaUBJSPYr3jp+TQh3dKV1e1ZZIuzvduCDr79H6XurJGIR/9dTCAUWMlvyQ8ZYMjcgW4x7CcHXK0QVlhN9SJcRWg5CRQcp3C2t648I7Dopo1lDxu8UESadXD3gJsahaJlEjr+7hHAEIeL8A/gZPv0EGaF/b9HJy37FMOzhFgALXq1LUjep9f20JFmD8JUjgFVLfVMvGXkiTQ12tSJ4XpbxMhMoVYvxbP1h7SRQNvoB5cZkK7wH/UDmlku3V4cDgFHcVChZFXmATl7FWpXCHwc0wZcux3+IZyQ6AtL9e0xvz45vJCpF0SMZWI+wAyast97hUs8KhYxN5jY31GqXJQiGCWiAIy0rZPkibvU8sgeKl598UsFF2n+fBuuY5KfoOAUIeyZ/+zWUS2X/wpdST6/gZhWwq3H1mIYQScAwI9bB1pyze1NQk8Wla/7YmdPyrw/PcZzxbhSKcJhAXNyvfXC8ufYwD/Ci3FzE20vFOW2ZSFCh1Bp1pQScoyIA//V32D1Zq/vFp8WjP5scDVYeNrTz/k8nty3JjurxUlELM9pbBGyOkRzAL3Wi9Gm7R6klhHDdskG9jXML0zIRSUTSve3KbZeicFj9q+YtddrAP5ei9XraSadqtMIhT/CrKoOepd0lS2z3HZ/6VMA/b+xkm/eEOxu5uB56DbttE2EwoPeD//aeCy9SNNwT1C1PsGyT2/HDYckifUI/y2gUcgrZwO0hCYznzlMFVsm3OT5ivCamc9c4HLwSYfmCP+F/BAP1FAXrzBnCrWc9euINKMbkM2SAOhDrDZfoaB1xSq6p1GXEhjsCl0XoR9Sr1iR0g5fs1jemY8uFGImkfp8vIv35WnwYaYoHLY9tFrj8I+hWYsUo6pSEY5rWbbpJWjZRB8tW80DoYJ+RqpfcCPHN2ADSaY5tsnCrkbMwzYT0iCA5YaMqO/6ehtA2d4m33avLA+l1l4IpzmWeMJhOUAO+vzFP6Gq01nRFRClm1j6fxroPtcf+h6zStX/cL3qWThlUeSmnlleywcIAlTei7isQqD1qRo6RY1LjsFqqIaBMaRsEJdGbeCJklivb9MwUK+gcV8spsoh+mozQZjwrwWKqvSesxDgwz3PUk/tbNEvuJzZ2JwCIopOzdkSCu1J9jYJmiGhZYeBkXD/Ebg2Kk0uWLhlWm83bJcvy5ojiGOVmCo6sccVycChM4ogTGgNHxKD6gHXpzXDupTWBTnLktj3U7KAEV2g0+jQc2qQ3ONUtmzSYYn0JRyPBRqqNn38m2D8cM6m0Y4MmXJLwf3Bc0ZuZfmM4al6zhESlP/TZgwYjRpM4dfIIJ3d0RmoJG6LWOPhHU6C8sFPfO31+c3dpxo+U7O4q5AZyYXINIfuMUrVqCnVHaUW0QC/HuEBUbj6mxlz8+MU/lDUsmynzmG6wmXz/3QMSE5ZqUOZ7OTfgBK43bBFh9wQMXmN1kGCJzTQVfU4Z8Lb51pFGEcQpLYbCBNSMowCIgWb6P7chy2ymEW+AyNTYFQzPJHA7q2W5TmehFRRQxZ+DLzLEt0o8mNIf7Xh+nlPfii5DhYZAfdd78Tw2lftzh+HgKn0V25gUku0eq4nvP+yHNmqteQvcxfqwD0QtsBSgqVWOeEMOKHSf64LXq3vPkQwWPqvlhW9zhFduEQ+vjleLteZECTZJTMZd/2Ao9KCNGblhZzq77LoGVZ+7a/Sdcet+MwQxKrYZ9Hw/ohcu6Te10tzEQ6r+b5pqg9/nW9DmZgCKxfiQH9iOOM3R4rdUFxdfb+iegUXnyBY0SyqZB0ydE+y834VWtDgyNZ90iH8Yq8HwEBzGKbgkyMVwcG54mOV74+eXYK7qvj2ZUL8JBRKR05wReLH6R1wUrv8k7QrsNhQhCD1C1hBePMWFDIG+GUraswO3gQy8lvYS+7WsI3daG+/HccJEtS/I8v4sRYZs2lPQzRi/goyJe8w7BPjmLxatuUd4Lf1GNXAaEHCHffb2bZcgXnZppgb4J8PayuVxO5z7Ar6v3bhnRxTaXDEo8auMMLLBWX265NFoCt4tfFQ3xb6hpjyqJyD4/5nJEBQeHn730NWeTJH9/mDxLdHi75CcyQU+b2Xe+PoicR61sPvVrtv/sqPBxnysvYnnqdnj8OI3KVAL7FnlTgQWFJE4aUjlp8o4slLm32jEpasLgAW4l+Trr6nYDnk50qKX23p0kHGeLbUP0w8LxysKJ/6teFWHAHq+Pktz1khILMCpYVcT96rifSiolLDuk7/uKTMhNX7ruB+Brdo4LXNirs4fhUGb8I/fUYeWdUQuItHZlFBJj9q0Y+01Fj3g9iD28QLGMVBjgwr56aSXNHm+SB4WDsT5IaKUzuB6nLoeFdPyCgoLT26j189gRXHwnOJK1HFxKP8ALcVKyJ6cYXEWQBhywciD7QVmyMz1WKs8KIAxbEZ3mRw7jGXxh41gTRhrBsMY9dx4kYO7kqEIP3g/M4JOIeVMSQnI/gusZqtFnb653P22UwV2y5c4WvTqUsODz/4T288n2R5wzPKxTkYy8esMm2dXkve0VDfsXbA9mWbIl2O3DVZdsNOaLsC3+WBS5xwlAkYwQOhO/Hxqt+3vhTm3OyoQo+3sVaMWR/49aVLLwdw/aIyGt7JVjkNxOa0/BvhnknTHSVI5WNh8PEfV/2MKvp1cDDYzv5JllKGYt61idziqsAE2CbvOrFB16QfqFcIT6woKML95s8qt+D1Q3XlvhDfi1u2EN9ckmpyMHdJtqxqImFa5oD3MCdphiyaBXErg4gUHjxFEQX0Ap91Uj+456eZTEKy3sJVQL8e6d98IJbkj73/GBavZwz1GU/Ear083LQpgnxL+k6j1hthJWLRIVwVQO3LWfQHLNUv+ZidcZ3xKUDSn2VKJ2TKKGGZV8hHYrkrx0CLUxJ3w5oOsVXDYxE8TnmJlgUtPryxtZi9LS5axFNg549FL4SATiueQO/frkQSgp+Jw+B+qG/UtoXRAvkNMw3paVmZcmzLdhKCpxZX6e1f02U6a4bZvPui7k6YHMhLBzGMR59wffK3EXYs58luJ89nxCdOc0lnTZNsaq57zeHl6pK1Y03VmqEObi8y0tIhgnn8WthiEGzekVeBjGiHt3t5EnUT9YGMRjrznpFLS7hez6/RWMQ15ZYrhmJUHfbdOtLTdYTJIfKUChGH3snSrX1P/0gm8BdT3Hf9XNR84z3acxCjnNmlOMyAvn+jcft1nndFjnYqMHfbpch+xKOTJBlipgvctCd83DYnWORvbWgOHgFuZbbu14mhk+E+hc9380BfDCPGNUAndY5pGxVZPAGXazHKe8ER2wILSSFzq1a09VRLx872yUIucZfRkpQ/jTf8RlqSCkWOvlxHTDcUtw+9NeAmmXWJSyd9fCjnx5lKYbUQVFPmiZYHDaKitz55JxeOr2+Elby7EaLEQMCjDkJmFLxaYNBDd4aN8MLyEIcQ706t/lu8cDZNoHcFuKaJBXKa9KIkk0yUOH4nO2XiBTx2U1NHP0a2rnjAwEm/Cu8Ml7+Usad4m5FZc6ZnTUfBMU7nnKW+J9b8dEC6UXK9LbvLIhHzUJybPWbqDWVCfGcU5SDSycgcJbreDNgooT4kFR1UCE6PJDJdl7Z61Z5+X7FOtQUBplxIFFHhOGgJZnDypn38/bLQM3TvnPNE7L6K0MTsuUqa+R3aJoeSEPirhde/5WVsjSv7ubzE6mTZQSDvAod3F/hBk7TKqTUSIdHbS3xdQeXAURC0kIxhSIMhcGAlUeIk7TC0dT+E7g3vmk/BzT2auPgtA/IruwdYepbsyBBA10rn2XwxiufEpCZsPtnHErMr6wW3KgG6y8yrLAHfwK9y10KCSZGCu5Yv7uv2yLJrYJW7+xZkB+zBOmTBknFn6HMfqlOzTR4b7/uocpldIzZwNEbdFg7AeVIsZZWRwXKiJe9k+Tex+atDZfR7Xg/kehMyYUhV9KlLEyqgsO7I4hZAVqPLa6Qpx6BMfLYD8PUvJnfQK0Ysm36EjfDAarQRkFxBGvdPDgT7FWaMlYFAjLQZWVF8Ee/t4lfeYQKiknUDr4/wrclwB4zb5sdNidfL0pRDCRjh2YMOa4vTfgd1s53VGCFq4tLYXQHzB3417fU3pUrnR4rrbfmK9W51x7dAJVB/KyBv7t97TfCnV9dnODw/BS6dEIwJzqOeuVE1SwJnQMu/hphTbju39hygOkIKL3ctGsAcrCYrgftGSe6MlvGoC27C3j7vnHh6jfaOjZUrOITfp3899EoGcVZKSsZ7hcjiTbR3edvXnwheZ8RYcOsiRP6kQnihSvFHkfdyHzydXsdX2gPZfaFlU64IuDsuGHF6sL6BnNUWVluPAhZRCx9qVUFcTexIo6uzmS22J9SYA7mBU+Igj4/oV1zEnolKItXNo+r6ZT9LdRFg+CN9dekVoT4qRn9X+LqRchCbKDYlSkERmj4b3CGgRef51yob+b8pq7VAH45K5eNod8gpF2ZZ0rEZTgDvgUOspdjF0r5OKboOvOrhkNThaS3AZhZ9Ztum3UEzlSCYjz2uS4Z9/WCv3bvL7qmqQcVv3SvFPfNAP4hcbYuA3oLRPprX3u8VC8TsFfXaH20WfFXysI1ksjrpY+rgUlHis2V4JcKFpDII80mEQF+bH48XIw/e8gzeRhTa+qdSlDLLEkP+ymxdQGRoi19u1hlbhTJTtRnvX5GrwlEK+xb4wVz6fPBlXMuqJwbYRSaWALkq57C1B5AWGEvvqYS5PO7yD5Js9vdwFTB9/7zbgIXOF/tci9uzfgL7t0Z7jeD+SnilzHHovwb9VKvH2w8wxyV9ygpAG2bT8PYgWvm5bh78zZ2xGTjCKsA5qAFIn3p4VM+ZJNB23OKDWkzEuhlvy3XKTUsf1OdwOnn3GDnbQCIaHwtR9lkDoWWIYQutkcmUHF0G/FRtE81/jiLJPryV21I0w8zavpgJUqvV842Z2UfBoRGD7VDYywGjjYSJrbcZQYlp4NBUS1PKtcpLAK0eBZTOS/51dbhPYIpVM1PmKeWH0NffEZQq54FwCb24zIdfbo+eXw+xPn+fKZ3hCdDlTdFwIqn29/GvAKEVNCo7A/PXkei4UfFbXRstzw+A/Sd7f5Nuq9NPaHw/rRpFM7MY2+bh7K5AmO0jHb1jfUsV38OejsYvnDQdwAZY69J0j7OctSqj7UyzjSHYwuerpKFQo+wEd3vsroUBZGopqijS88f+Eql7ZuhFKKyNntJexB2UMSSTKYpznsujU0KoEjivLo4M91eWwQLKQaEl+Rx9+cCG0eUpSQnusF1kEbVxyADuFX7xe6lA3egjUsX1JRQlkkBssnhfCH9XiPZWt8cVdodIm8Hp3e3OhcS47vhWPr/wnsnquCBtTbh2x/LCJ72Wo6KXroF5dWPmm2kX3V2Jda52vTkgNhvU4IxUtp0zrM3jFeR0NPP66nYsMg2CfX9/Pr7ADIbmxP1EvIz6T6zWoW/CDeVydKrIbbJsLeDqLhaKdIRQyO2KCHZuigqIgaxWcEcUnbr6PwYg51QVId/r5xm4c+yVomAT9t+tONHAPq+OxW1UX0cQa/toaTCZCj27C4YpXbW8kb0ui2kgUtybn/xqW92axGU8ulJNVIWj/oRVHN2zdAMiNzqQtPaIvhROIG8+UB4t8+xbVzjyzfp3RwyODgfORQ7qQw/6xpO5m6Koyc0SuxIzN5NsbJSXfZ8hhfWJPkouIpAMgskBNXjOqlDbbFt7+gidOmKbQATqMmYap7IBRdOrjlCUUUmKDwV+T55owU0K7KKXZqHNkbMsAfC9SpVysWLX5rFek+ptDeAG51DTo7PISCV56j2mIvVF9ps52ZLdAnhOXt2ZoC9cJtmGpXlwvehYPaWwyXwccR3L3luDeAcUzvRgV2efEFooMhNMYUdIewJ4LsdfIzwMqErcgRoHBXKnXODtYgMyPetkLPab3c1rC+UB50jbsvHOxZf185RMWYPTQI8u/coBXoCyqLBD9NJu6my98938JweQQ4VKEDa86/qdvp+GsNVX6ETnLHlOwT6cvObTfwQRM7lOCufJwCQuzMFIUL16CtHmCOPRGdr7KCN2ZI1bTcqdzgtdX5+wMWayxC7mpOzv0hiiZIYCPXeSXmcIewgK7U+VGNdG6WV5qraLAc5b/GLxXccf3k8AmPP85HGGMOPPek74AUXcJgacBm/LVdoH/sfAxOpnElFJiR4OPK94uhzSJlcVyJfDR4T6MPAKKGxtkkB3lY+u3bOK7eNgcSJzrjReC8QSUV7RJvP5oD75hsov2Q6gVOvbNKOgaAmJqoLh7ZiquSDAEBraoiHzI/osLBXr99p7ohG0eaasUyno5k6lvmp7ikn+pGAOZlUD+OngGbR26Lqa5pw2C0PDlo4zvdejMzI6Mf4tqyqa1ZsXPPyoKPqGa+glZn8rq9ZBZJt7XV8ZgX6Mzw4tk1QnrWUhIDKcHb94SsL9gdFWAHle/OTxBXEltKEwOwZstr2iFvLQGMV0db1KCu4DpzCe5TlMhwxVX2+3/R+UZVlohcKcthTJ4TeAaPZQsm5v0EQusbTmqmWF/fxBm3BciPfg4j/PodkvH8xYvJUwOjN5AGfSKczdziGikCRj0jBo7TNchIwdx1NrThsik9nK8hye1NOBrivjdDrR6osCV+rVxAURXzp2aQYzJdtOPLckzZI+oVE4qTSGzrs7ziYaqyYfPwaRs468GD9OqIprd/H9IzFeeWR/uGxLxdH8vW+JJHV1W87jHZ8MIH1smGanmsD/xrxy+EcLC+rIpcwc0WU6adTy/EEBKLTQQjBC+NTa40Q95DU82U2uvfNhMmJrvqLIn1Ga4j1bTy2zDsHsT3tclX6mNre+mScnzm6yXP8XPg4lpamqG9uQZfnStIJiM4IjHMh2tCrNbl5vsX7scRF2N1rldS/az2x+NjfniOe27XCFa+2ILm+fvUuwHsPwU+mePv6c3PVI+toK6lbihhHCbHgl69+kHXZGsMwkL7Wl9femxpFS0kY8p4M7JbmUd9zcLqNTD7EXjyKdOMvtpMQzqUkX7jRlZq1168ppP/L6xbw0JxByonIhcmR3MYSPmCNvyiawnJg4XViSjvXDALn4d4jVg/IY2/ryUk6HKV+JvAG+fEiQ8aNMOnmdweEN/Hk5z0mMDsGW+ZCSIfOMBDsMIXjKbfenCpN9mr3IOM/zKMyYndDy3vStV8LLMPnG7tcbHflNcaf4cRdpHtslVo6eOZbPFxws5tGDa5+yGFsDoeTw40b+uhSbeg33+wdifkwuebslpSNet8ym5MVgh7SWnzC0prBNhk9UgmDZgoNPL3jTxXm+/cjF4wZbDQTOKVKWDyfLfrisQMRQfYoWZTcqiwdLuf6q86qBVmfsTTpuu8y47TvMFBabIp68E7HnxahgXFxoegWcT5ZmfOpp/uVO7L8i75PTcM70cMx+aaU1Sm1PZB99ypeqtzOkUZjrvOrOqOgMd76mnBloTroNy7BP68MGy6prhCpVSQX9IeaLiNwahRGESp82+C5jN/lajL+kUCi+gUS5PWroUmtBN9xDL5ploapdS0kvkUwv5lIl7q1muz1sVOb2Co6Z0I1ewgfveECwkVI6ocKKI8oaPvKkuWWz09Zujny8dnsIjEaf5ji8PUrtHBjFrPrFbPnL3bXRX3niPyBbhGLP885iRcMyXjoFa5SOJBQd1jytPT65bd7TP+mQxh6XsahS+OzNAeUn1Nhbra9n3S+4R2GCmUtRAod17VshwRny3HqbCqhs5t3iGM2TzwyOR5w2qO9Q4W+DADUU11Ahu+vUHOcfAHRuTPj+pA4UY65dSOLKaBbdY3Lok/vSRuA3gEZpPkxJb5KGnWP3I1Le+6AvbgJtMYTqY/rCM3D9q7vey7JwD+TrHhTF2Q7rZW8nL99qYDUYB9RChki3exwJ/bUb9JdbrdEtp1w6wU6YErmyaPzkCeU5sPLCu/HjNL7YYwdkqJezxxFJjpcS6JkS5kL2basfzpCKAbk6+V1GXSEalyLYPmERwRgNxemshmcurKy02vJUuqMjWnpNkm0+YBGruFgqrnY+P1Z+Y99XFJ6YzXb5ICP6zs6Gc2whtXoKMJmYyMEWFO9a8rWWi5RydpOuuvXeusY1R99oMVxrkwGSZ7Q/fUML+73fNIqrIR3VC4NrUYNKBzsCyWGsN2vrF+8LrH6g/oVPO5ERv0qSX6jO8x0IL0ImJ9Pz6enxxQRHJYF44ShE/avTG5l/1h1drqmdGH76LyYVFLdt4ChnoDKn1y72tpg2kp4lcmBvUAPcpa0H5MAvGws+y1bC5mvqXHGl8plUtVR0UF9OA3Pzd52GrA5EK3+axCgg6t+Vg8gPjaS+4Cui4KfXoErd92LKrIMUHeJtnCcfT3afEStXUdqNv6qRsk4b0Rq+hXhtxh8an1+QpB4QxRnoj52ojyhcVokvGdclOdC+TVYLHK0YSe2MU0152R0rQJ8a1S/f3ljKhCD0T2rRrnBPyDpJkyMTKK3HUB1dcfINwE+hsUhzdvFX0LBqSDinCwP/GCbNvu5ryBEAZPtIIkMGMTxyOB3x1bBc29EHhumJTbnYHHF27DdNyztxtdy2eBFavkJMqBZ5jGq7y9Z1C+PU7NPimUVAalGxqjYO64+OW0kuyAu8j6rF+pAh131u6mxuAYG2csfRqOrcbYJiCR2l+EEQ4xYcYjcsjD6j3aCMcqgoSE72ldPNP70i3sqXBAEygK5fhex27k6P0/pvFQ94hJSOnv3xz6Zo0kY8LD/pem6tmU1duAvkcMjOecwwBs5DTDk8PWX3sf3wV72sRdDN+pSlaSWQlhixGnYb/0g5k1wJI1qJ73dnaO581r63XMzED/7t6S1s9CkCBnyH/tuCMEJCx9dSf75lPq9P9OgvaRYmsc5ZBj+VefETMU0wkPnIKSmj91GWloTnJGFRDsLiZ5jS1VmdUyZBzVz7LM3tdRZnlslyC3ZGwngRx7/4qkiiOzR+5V9/4wrppY+br8egX7hKeaQ9Ipug6EiGeFeNtGt+vJDgBV0xT4R/pQOkH3MlYD2d9eOQvAY0nf1pqo87u6cme3DwZ5jynxfxQ+6e7KoIrINIuqEUl9FKbg+EucDwZCSU8SUTMfHmEmGoDCNCj0ZQmdDv35a/TimjbJIqX+J3/LK4w+ZR+AviMk0QZJXlxGz4nL/Q7n2vEj/zHW9SNYIieIoKGOioW+FgbPIqqVrVUXrS2CtuoWSWZEebIq1MuTf9QQVTSWqOPx76hhB9oWr/Yt4VvR+Cp8oipEY0evacdmTgiE7XrUg09qZmY+wU0BKDYlRr//9rPnLElD1LLUZtt3e0B6uEp/jr+ttwf4oEu+YT8R+h+TFUo3e8v35k7+nu6JIRrzLRURbCQJd+VUj0XApAXMPVqtrZfPozO7RCVpfsQzyS8pPnX+fatJJ4AdsAlxLFecLLcIRpqUo/rzyZVRn929mXS6WfA8J3rzLNTmOC6EXfgjKpz6La7OO4V9m98FXJPnZwkI13d3fUmePrOTyCMWuRilvBVmhn237NjOjxei3gO2vuh/6xR+KQgeA0I6Xm1zTelTOLaPOndkOWPefujc7AasAxZNxk0ZpwBOb/Fk5ypj2YL2sTYyCimUYR0J2PWD+rsRqXvk3AsThQb/xjrvo9Q5Mc7oPaHyNMG6gj1LL7nog5nW7iRJL9bkr89xqisRGnmlzO9vcgDaM5Lrv84eRvnUXnVJxYBkuvkqlUzhIJnZuu6+jf6UHtd78+AAHgDFDgzaq7iKIfIyJBrP9N/mcW4GsvgLYE+USto66eaGgab0nRem+Ot6TuzSq0S0LLdL8PRnDeBuWHjjMYgt3zef8a36q5sX9sOUZb//IqkQg/G+oPGSKr25pyyI9KuxT2eRVkt1v3T4YrZxJaaWqTBnuSF8awyebPQFPFf9iorAXgzBFdekr6SBfQrdvqNzKx3EgbmeLLj8ckAHKRTo6CWH/r3aB8FhV6Issws/ZwUrpcCiqS039IWSQ7Q8evXqYlWGFlwpTCfMBQUmEb3aOrmTlY/419OdUn9L/igyJbhW7qXKlSdv5P65hHq2UIKLJch33N+Ed+B16vcJPStQ1Bx5HBtkBRbxneX3h068i/vUb0lyfv8utue7OCcGlcxvYWxrEZ8kErVIgzNnAyDRmXotVAWpiFl+/7OA4xQSJQMRa5aW5nYDQoiSUwmcpg8prrc3v7EKwyLkQq4gtHsDJB8Rnd+jAsGdqxraWy72gD+wLx2i8lk1Pe39d7Fis0emjUs7tsbejd4BkeXE5946/C3LeL3Va/6AhTvMpGZGBnH61h+R/chA7o6R7gmywV7Dxdzu1yztGkozwz0HaT3wESXIWppCkxlWwIXk33TWsCQi0i2X1X4e7M1DnDb8MOnJK7q+DJqPCZoydT5EjfxrqpgjWTRJn3SfO61RQf0HP3KvyVjvictcR/1q1jWMgot1LGJKo8J+MMktEuj9Zs3+rAypBhSbbXdTh4rLL9/6DPTuKNv5/l0oFE8UOrtTjxpY3c6bVv8C8+zHkznDXjZarx1KVCcv1EkFthS8R1eBAXi1g9QrfWGxLgd1hhoJQcEtyqZ/C6JKGYm4emWR12YLo/jEYWL5o4NPUhTCcHrdfkpnjRQedpn02VnTpPsVeAavYf823WgkEmEUfZeFs8EeaD92wqnDP/lQguUfqT/NyCz9YDQrG61iCSZI+U8D11nhI55xY93xpQE5E3K4e+dx1Mv2NC0HFcJJecTC1d2fzRjZOd6K4wYdTk+Q4utuqf/Da8wzhMlcdtzE6iReP+PjnzNBfq5+vsQP7UP/6AsiPcK0f1F6yBkRAyosuDDSOd7Uh8AWSg7BCn29VPM/Nmhix00e8LSsbMsFQOBx1bpqntvOI38h7YGjSxETwTVYTQ9E6lYZFBtGlOf34FAitJwO7gGIhMV8VR314eJwP3lMLAIKvTgDblbrfPLAKmvkbsFkimOOHNGEt4NjQX2QxyxgpaCCB/y62+ndOtkX3gkFTLxNooLHxjpJZ5+sL938BeOrz4ZItPR4/aI2zAF1ARVDMIpbjKwH9ujPiaystlKSbv4MBs9dHGXpM+/FQNEQn5+uLS1pzZeztVYuWPLTCj95iSdV/p2kCacmrewe588OnwsSyMjK6dQmR3Mwtwi+tqpBcRhrZkw4QmxvLnS/eSP/OTwIq3Ae4oRrL+8C0P/rrXnCh5xBM9z0pxbn26jcGsAa3gsQszT22p2YHcAR/rSeCx05vpwU0BmTbv5pM4/jSwZcNIZm1Hk5dxZ/Q8lRGGH9n5UHziqv8rSMVVOoJ46plkjygjfumWxOE7VGtsoob0eQcLYudjGtvzooEPAxROoLw8jI5gay/UH3xOf0RFqRl2IUkECYAZksjJPBNr76P5zVvkKIsFVq7IYXm3Wj3MtMdfGmDc8MRmeA1GYJLEPXzYX2mtFF9IW5+pkedqkUCt9HHEpOg/tiy9sxboAl3OKLn05YjhKEtNszYGITx699XeP1rJ3fkZqkjewxyL1AtrxsbAIzU/QbU33CMAAV/5T6wMQLD+W0NrZCUdZIHW7a1ZHzhonULQg1bYJ/vHyNe3Qw+tLAz2jl/k4sr7Gcmnk0tXauH/mXkNQfC2S/X8kuq0qdPRbL6u66MUxHbZoK5s4aXGAH6lO2N8zO/e/Sz8HY3GMFjdeM46G/YTUptRInoml6vsz4ALm4VQ5nM8r9hMYB5yz5tdw9EGMdZ2mfnthWTvHR7jBLT/9jxxzuqCKy7+6Jn9huTscW0HaO6rwkb8XqgUoY9BYG8NpsPoR0OyjSxqE03ADtfBye1Ew0pRQPJmD325uX5EVWBAyL5IaS5+L+WElB11J/FzjIYhZOXHZAXaZmg5BDcDBQDMXTb7f3itYjvT8GKjOEKOtUD6htBHSF3f3nqUU60goz6cQW4cG72h4pwNeR25Pj4Co8kL4Azk2pmsVtsCiSXpifAYoFCrluj86giizJSWYCcD9pw10vlKDTn8gGoILDPToY4Zdk5HJYZuEMKDZ0ZY0W4v6PN/oGBzSCcq1u8eGgxXuVAZAj4wSq1Ce9OKLrsh6AxuM9vfmPvWH7fyICHIMitsb4DrdpWJdkzZRZ+26gHvtKpxF0OIqiyIC3nQDmzcKcfh6xhfZTlkC/3us1DjnMmBg/VB/K3ASNlLXs1yzgrEsln8h1Bh0I4Yn5W9DjrdEGFXmjJZh9pzSK+PCwBKWfeALyalP2kzOv/95j2xliWpeZnKgtd9QRusKJY6q5FdC5KcqPAhSH/8Sg+NXdUzUXjrlqSCROXUUH/91NWKm2fs08mk90Wo6Y8gu3Y/ypgd8KaJpoNWnz5vuTqiQCh4+MlAgJ0eBGTEZgY/TvvfSIx/Mz+BuGAgdQoO3HwAr1p+RjMKxNvQyGn+BQGJnY/tmsfnfLneKC/aBNP4qWc+K9SYgIN8knsFjaINfXyKp0ekAxQgEMl0Pl6R3hknYR1i4oPSKQJXH+2Pj8vd/2HxArCjix6MnbnfVSlgyRmBACwM9Sk9gDRZ57PWYbYPPhO4cCdioAaG3HpwdGG2D7Cjy5WaNURLoqbH2TguSOyVnttrID4q5j9awxXQ8HZ39qs7X0cjSX8h78sU3303/4ySbdXanVrY6OfiXbPEAymjZjWNWLisYyasJAwldb8vqiEVig44DmcKZ8pVR3m5uVYOgXBf8E1YcpmHYWn/V3tuy9JQu12DC2WQ2O2fFZjEbsaCqsYhE6ybV4PUwDkYNsAzvhGuCQOK5dKoNNhLnZQ48AesBxMJMoK2WNqf8aj+17fEfnnvl4FkWbs4E/SNwWHlCnIkfOMiHUCEC8AecVsuCWLsmZt2aFIlq+S+xvMsMffPtf/RotR00LU5cHXjPzyJjSonVr1P9tfJxUcnLyMSXQRJQgYokVajv9a+vh0cJIeVX9qgvm4jkOQvJfnt/dJzNMdsEF2LKij/qZmQd+wCu1a0/gCJQxrKu+RJMk/yQ9iwIQywaGBApn3q22+tI67/wFH9SOumiUYL1SLyOYLrzf7hkHiMyft7lph3vx2XPEKIZhZr0EA9UHAhjcao63ymHRv7aaasSE/Q8mk79CVso+nx4GwcPBLREigFnCIv2D9UADhYBLRe6bsPI2owm0JFEUgaZf7AmJzKCBHogCl5nepUEtKVTFGuxlcgOGfGxH7RUZaNVZayyvscArDjTnTlP+pWbmn9ewgSIVoupyTX+mvWcgTt240Vz3JSe63ECb6RkDfJnEDS9jzbL3jr4Lp4fpwiOkIbXNJVgtap+ByO+/EVr8vHmsLk8PJXnQUoBGFa0XB8FiIp7K9tFZQxMpcu8rQFFU2Q+ZYtGiXG6bwNNmL2pKagH/FH2J354q0+ZY4H7nVpnwNxutvDmbOfPejNEBC1iIAeb1fn/SzRVPyZjat/Wmj5C+1q9T0S1G7clqiXdJzWtTSCLiKCArrockFCQiz+v3mY/kaSAEEisnHn1lnG82PD0mPSEji5us+gvkmiln2IF+SrN9/OGm8UsBOS0wOPTiRdb1f9ZngVrp/NNkMeM5+N+6ALu/56Kip62Kcyc4G4SjigetSatNmEdziVH1DS1xaixP7khzCN5TYku1DSA+8Dz1lSS6ObXUchKyi5UAba4s6DmdFI+BG20beiuSR9R2RvB8ozMxM8vPqeysMl4Gs2Y7rhrAIUyl+tg3G5BuiNFbBtOS6Bujk3jdakWS2SeBwcd0mqQ1w4L8xs19HYTTpLLk6zss9JrvlAPG07lgwXAfwyVK8E5bX9WIOmKclKmSs/yiYm2YO+vX4LP5oN2kbo7vChDCyCj3wGKs/OUElrwG4qyzrigt+LugUEDx3R8nd8Kyo6mHFWhveYTYJQrrkfQnBPG5LCr5YPVEIzRB6eeYqu07fcnP+lPtGI/fAT52EEsdMHcdETOlvKE0SpxX2zLHhuZp+OST44bJLAW2mExTmlx8KPAkoRwl6C+E8bYw6mk7bqjhjxK6mUQbZG6od1RWRQbAs+CtDnavLzhLwGUWw10I4RXt/rOIk0wv5s47IsBcS81+hRjMBhLenXeqOe7CfB3G8DI0n26ZnEaSz/7JHt8KZfmgN3DM4bjWb082/GlWak7/mIfb8Az3NZGp8qKZPXgkMkn6+Jt6vrivtAJitQA5x8rkBK6LBa9Kfo5E7Z9U/xftax3OQKRrvtli/YLpxk8b4T7VeoHgVcLWaAqZkgksWRHbA6tmf10qOQLfx7EXu5FdDKymRz12GMXKKUExpr5fXd9JeFk2cBtCnMz6u/GkKcTY/8rOtNv376a8fo4NIVHxKS3AmBPV4bMRL1md3jQ+00448Udt9D8fPJAmOOhQs4M01Kz+r7X1ncysrxPv53wX16q7/gnVlM9A3FkU3Oxqh55jgQx2iH+UVlixO8EDa3fqG+fvX2feT9SPpiH4pqUaAGWCf1MIQRbbWUVSF47yov46oJ4oqEmB53PBf/Rq/KnaXQ/NtGycpgsEJPPjqP8ykcTy5thuHXq0MghI3C+qSvkbbp/P0sjnQNeUjY2UoM6djAtux/n1jc7DtcbBfUZ67AkRAvGWDn0zmiySxEUpq4jfkjQsj40r8BXIAK/sb90hTMjl1iueyukda3fb3PJYcg17Nndp6Vp4aBih8PtTZhSNe/vlEBPqCpaFW+S9BjNKPcSeG9LK04Fg+BFnGOdcdqkzi/xras0440tTB2PqXOuuYLrvmk3+ofB0jovzcO2SyVvFMZUdlFD/QhIjA491Xh/AJVePbKN17PgShnR3BLnF2/Q0K8/pRWD0CxQd5Bc1BbLQyCnAT57UeEyhqbSoMX1DTl9buBzmR0l+0aImgv+s+wDljuX5OUczrn1J5CmRGLz5y2MUo95MKA8Xltv4vpEr7xVen5B3sGsEmsuH0nzg+a4EJfC//AYh5xXNg75qqPAaCAs6riHH1qz1paQsVndHm8C7C7OH4cxLN01JLymGnP9VNs4uvqupfjTJf+0VuFv3RQdAAREMHtoNmy4WvVmfgvUIey5bovD07VrVYcBhbANLsWB7Y3/DprHW6NOqa3lUiK7fjGybX8mB/sIJUlbiMXZ5iplea9Rr+3YPT2aXgTXjMOlXSiwRdrxKOJWjTvEnr6YMu/0JBbGg65HEsZWH/3Vg4MkR01qhTZ5cjMSNNS/vhBfuE1mOhjGJE53jy+vt1XwD0D6ZYnnVgz4dx0Ai256uvKr32M5KA1fJZwbEryCDbVt35RYsXekT/U5w1JcxTZmHtp8XY9Vup/cWa3u/9mDA+81Yu9KG2mv1S6MKjCkN3R3h9fXIIG928bvYZfisOJqdhpRvB6jCc8fLqNcpUWCo4wzJ2WxLS+K+d9bK7t9hkgB2j2nNFXuwxK92huOARuxP5xQdELk6yBvE51soKRLZx8I3Gh02zr30/dTg7ACf/puJWTNAxJsJgVJKVJj7vrqcZY9taySRFLuuMHWTj6brXGwT6BbPZdOzM/OqNOFYJpebuBfnXzkz8qykEXtjtyeIKPY43cCOJDTEaB+K1Kr2cCPXgg5xGsSg8ssagUDn3/AOl+Iy84SbXA/O34vn64ZxIofyKMwZM2/jn/LvTq0ytDp30pA1T9LvFr2qLq5gGV8INQtN+XGaKO3XBe6//3RU6ME7ctCJccEqCeXBGXUKEUzwJLPXu86Qtxr+CK5/OS2krlmkRQsr4DDb/UAhPnxpYFfZ+X3zPBz7JX025nk8y5iRLub2RgAu34nZUR0xgpVCSJWGyAoTGgIpbPONCYhRXVbKW09qo8QzNE5XvEdol9tf7Lg+YjOliOTv5PoNPkhd8X3IrOrDvQDv6nOXw8Q4xDLrsRe8PJzx2fd0CuFYgOjP0o4jSOD96TpSlOMhroUG6rbyb6qp4RhycJyBRxibGA3WLqU4RYWnjX5yjnO72SOOLsAV/Ky/TX8uqjAgTgrsHyNLf93N1DPNAtk8nJkwh4ASlRiPqdca78YnnhsO7mWBe39/rz4x6uGFl4Tp2k4vKLbWSjtmp/qR3odX+PE+p9FELCBfwzHsGHCLFTft2RvSayq53JCP//Ed3dBflaBNU62TZ+st7iXffjb8gOTL1JrcbLwol+ahdO94WiPlBeBg8Ppd2oajDdlCUy7agNjP/jR7ucnOMztafj2Ref7JFPSyOih1xk2tTfl837dpdifZ09a4XMiPlSSANAfVHz6yYjQdOBIH1BYZRqBlqY0NX9al1AQvDF202mXVjfphM6U4MUquzUFJ9jM48by6P0axoyP8mTx8o+3UDmDOQq8+67Kq/GJYA7KMneLd4qgnZfguPcqP6NT4QiYH84CUkK+ZOBtnH+VElpchBeE9OB9Sdnis88XHa/9Xsq89sZ1hv5dl4pI9PI1pbmj6LQURs7RReK986j9NUWu7JlH/T1AZUaYP4nCnHhKZMzWb/3Nm+QHy8bT/jK4mF1qtHsfu7zi88BDa+ZD0SHML3PB8nxr8kcYcLDSX8xYQtHyPZ9dndj5vRisB8Sq9WUPKhrfTBWsW5kRcui63JyGWp3X1o2nYkeU59El6I+BwOnx+NapzA2AaowxXPam3X1wO5osT8tdN9f1RSlJ+uezh/XXGhoAf2ipXefj4/OUEhs1Ng9vuNlLW/5hj6qqvbFt+pcTXQeD35MI1ck1nkSdf1YjBkGSf0R8nZ4Gr8TH51ECFzyOyI358eLHra19Q1KHBPVu7kp2xc1V/5DjLYe11/rsEoxPr30MbWPD5/U6MwCQBSszWzPi41LxsFj2PBKews8h9poj83ye3p9uwILCC4xRTAvWQd0NE07di3xY0PskOjj6Ju3d5rxGM+9OjyOWAUPP4F9LHyYBSrL32w6aKKrOecbiC8KovXZEMZ+jeI1XBO70scMlKuowJly3fgf5YfZ3ueSwY4VTX2Ai7Iv45om71aBCOlDW+qEksQW972u/5tDvRMlYBjuJQpBuBKvd4P9zl9dHy2QO6WoHDdV0KV3g3xrnndDaUEAOSTBSdCx/hcx+ZBqFPAOuKaiUi5tEeODIZV+cq/Gzqi0HvGV3GGu9mkyowCw1jL0V1OCV3qA1LFKlQkDmvnmOXx+QZ1oc+O8i3RbeeLmfPAemJRMVlT+4xCHLq0ZyZiTIrDUOdfQKUhKHiAsJ0Aa+b0qPaUmumt5IEGcpsFoI5fibZIP35ZEvYLBfvj9aGgCkrIbCrrahIin9kNoe5jUSwmjUGrsrjR11Kc7tD1O43jKo21hjUQieFuN1wClUK66dIjPI/nVPujdr9blzuqGLn26yk86WHmk8O7LvZ/ZV1VmTu9qrc9bVs5bB4g37actG0fQQsGUYYcRnxmbRzva6FGn21F2nb6n+YLAincf+NGMXa1GHiLkSn1VrYtrFBV5LRh1Bp2EgzZHROE3qMG7x8V15sHB2k9eZ7XwlwnqaiwO5vo0nox5AaBnNMFPJFjgyQfz9vs0FN9JSXSrAry8bkO1GhXV+z8Jdt0U2znKEW76G+qDa63vmCd8UplDnoHemYWSgXAooK9rEOajXYKACmZ6kWqtuLlIESAge/IcSVN9QzxsTUU8foLVEemTqbJsf55TjsZqI35KeV+aXeZErbhoA0Vvnj2rqh+VNMvYn3wjXE1d0c3bDH/6mNpKZzMaXX9xfeHHRB0EeTi9CizNoyWMXxFEH/33OMppSrzYu+uKz/k+TLFTqhmagv7wI+HQ1urZtxOXSptvhkIv3o9jIARQhVNsvG8/hkcPs2eF0KY7BIGjoO+KrtT5f1Jw6AR5vYzCy2bf5q9pctoNc86xD+50CFmEKb2Yvx1NEqlVylElTH+Qhan2Rnc1GRbzZG9dNX+6jIf4W8mEhCzdIRpTywXG0nOEJMkkycYkrUP2Vn+TRn46Y70N4gkfZVJ6VP259DzEl263wIJMUhZGeBOPRC1f62Zli96Nr9Wyn1Su7cR4Ikjp6Js26+RSHVpfb86ZpN6pMPG16U+OMgpyrUbFu6mcOAkK+VBE/SM/KgFh/5abrmvC4P++uG09kEVzCEZj8yYkhSwgTAzit2QBhuDGPB+7s023vLwcr4J7ropeYll1HtV4ZMBHfMuuPIDBenojnrAf9fRfP/BADVdBuwGdmg0rhhEAvx1G+ivUnlS4ECkNJhhLj5F2wuG/8Tir6ArfxAxOijVEVHrnoOpHDXl5qQKVs22BhkafR/ULi++WNsKXKenCxTVZaur3Bf6u3oVS4qZrI92FrYI4QZR1LPfW8aPIakRlkDw+6u1M4iZ5AfQv5JfRO0s8+qKr7WD63VPAm+BibYsqYLpTzlM3QMuk9dGXeUBZxB3I6vUeRHQLeQEPpF9Y3nBvOSNslEAZFNHNHvqxI87w89K9P5CbsVFeofrh9JjW9A4V41EV19odEMxSmryXkgsLD7GwCr4g/BQcryImU5RhuNoZoGbDQgTxIxfWx0wm7BS4mUAKbagWWR4K6X5pULq4SJGce1o1ZYeJWdMJMfab3bT9Arkb2Q5JnUq9AIfwsnzyF4nkzYO1mh5bLKkrRatP2LIN8f4GPun0fz6c6Swb1lsZ0kTeuV8t6wW92sQaV9O7IoGtCmO9UzyKUbl7GmSGkqcK/BK/PL0/etNymMOcOJGX1EG/voremFdRgh0jDQ2z4Gjxs5y8sFdjn/V573Jg8mXa6lyYspTqf1Tpi1pnri6AHiXhwrfDaLAli8rEsjZ4rK77hUdQYL7uH+XeXAze2z2NLewu/9mwjHrXQq2lf0Nhuq53BojYKvlT17Vs175R+OI7az+7pMzjxoupbhoTTz4azhdaps6WJxoBHHUY8b/iPrX4dyl62mt/CaoCNFJ5hOr8OgOqb5VbmQ2SBC9RHSd8zmxX78sJB1b+OZSOmhA7ZxjMNdobIPL9Nx7Hra83qs6jtffqybxi/ulaT69u103ZQixLPDprr8H3E+8aEyOgpm/ftTMK1B6nj/yGD2V4hLL2eCeSBvlA+Wh1wudjn1KG6vrO1YEGfBFXH9uXgzdWSGF1vBGtfjueL4ku53O68MQtjvMP/X6eHTNXMjsm3+dbWM0dJI4SvPuOqWOMZqUBOfLKpbxX+rjP2kLcUYmI604Ns0hRX2fdDcKIjJc53XErqM9BA+6vFwF1MXH9SUkRKjbWpukgwSflUXOz3qu3MaEJE2/0v1vPAkI3PXyg4X9cY2dCrO8QVdWZzibo56UWCfdLwlciIQ4JMNMiK5aG41QB4+cevmZD2cwf/dXMyUebhCzOwaVpl/9yxyBwPANiX+Jv1bQ7MhnjnjJL7XoSuPXhowOj7Xe1rcOQxBM63frqE4/6Of+EWFTkOHSWqwoc3ClqHOKy7/ndReM+QXYI1Zt/bJtuT9LmWklz3Wt2xwR4y/HBrxCtiRyF9rOTwmUWlueX2tqN160Tu+7P0tYtMnyB1WxHI+rFRMHdIqXcRpryrJD/rqMbP5k9/VNf3k9ey3/L7FbxbuEUm2w3MxPV/zBrl4vyuRTKDAxee474ojLQraLaXqf6yUKTMA256bnNsVXxEfrWMabh0UZf7c1JLY8gcnxZETb/aBqdvQSELCVsfHBDd4AhYe4KDLseWnqXhqp95WZLLy86rBRDfmWIHwBB4WxtderLodtwxS+yClAPTh0WkI9HnEs4azezkvaMr67E71a9a//KMXsqaQNdUErbZQ44f49af27kgyelKsjZoNl604oBpkoEvtp27XrkuJVRH/X9vcqWHA2U89trhmfRG3W/ARWMZKHk9ZDTpZGRR8XqLZeOAyryYPdiZQhr4gNk+uv1TdINWGe8HcrzQ3YKhtb6QIMKrNz2meDtm/6ZdSpTfpEQUc3Xt7+lsklAIGkvDEGLmWzIZ8ldD2Sk6ZhJy4BB8Ek71G78I55Yb1xa+B0hFY+pt7gYjrTBJX5hvYjj7Sbuv+ESJtCg+EpoaNt0W9Yzz2645XIX342nesnNu6dn+tyykR1CxKgMN/1x8LdvR/mE4zaImkETXcTlFFQtxnJ9GLN9nX3rwjf9oFqlMyqDVGKXkc5iqf1+4XZ/3gLSyGuZGsxetP+i7MICaPGVLNuzGsZZcbkX7U6eqBUPgjmdsFZBAaJ+FfQ41loDZoBsqJLnwVZBVXWjRNNfLPcw1en4700KVz/PvT2TKDNwPtNGNtp3f2vpbEGTdxYLdWaTs1jQt1k9sZg5lgg/nUND38fadOm8NfV6AfuU2nWurusjet6tkcdgF/PWvebYFDj3J9JLw0rReYsf3GHxWOtdZQUnnQmAF33WYi7VhQPpekgLKvRn4gFyjD2/AbEakJkkvDD1XCiTfSkpkKASC3m8B9IvRmXC7SK/IEukS/7m9LSbKf06LGsw20RzjqQZQR6kcUyVnx06ms0PUiWvR5EZz8qNUraskxFExw2PzTzKt+68Oloq2y5SXz58QdLqS/9XUXkPdk7mzvfT/5I49ydy3gB5J0EV41yO3idH+BmP0DOz/1efx9XPhyjdYycaWsAOSr7IhowT/tMfJUAPWdd2TfWooFWN7iMmJzVgrpRkfWJxDut7vmaDTrPjiQ9cXEZidnAbPIJaPPbTQCrlSx6yGMOXryX5knEUdAAA34uMUaelY9CEK5aW/QkFIY9xqZ0cNtGex3bjbyG4ra0eJQU+Oyyt/8HH8HLN5FILiEr+JcFZqKzWXPaESoQ5gK378NeLnOn/pqS5ZzfSe4LHSMjjBGtNgB2FcuO0YFOtyasHgqFs/VF7FB3NG954a8+vvzsnTrRy0udzoTjFGmGQomd5ibgH3oJ6/ijUwv3WL1vyL/ANs7tF7WpX//dcKv3F1Rxyl7nezJVVv/W2ke+Z2k5ENn4SH7UY2Wmac5xDSsxdmzPEW4rtiCZ3f0sWf8+aRtbne0Qyye606vfBv7/A3i5QaErNdeDl3KsSgttAuCO+OXAQvxtQocDA5dEPI9syv0XATWGmUmGvFMxE7a+4kWAZI0aRdClbs/ZhZ5JbqQaY3aFH1+NjO2EBKjDhDqCF3RtJCASbjVeX7tfgr+Rb0+zgc/fp3CNf7O67Q81mAFNY0WSBx+nZUTvbEATbFhQT8xuPq8ZFU0mT7eaT7gsJnMYXUwXh+I34dcUcr+eG90d7tY0TnSzti3TWplJJ7brt42xKocc7Gdf61c/sNWn8MzFGp0gxieWuJch44ONCl0ITBbq1BFIk04YfOjgl4ukeDdR5YLnzYSPmgmjEcDw1EoVpKE1rGUu2YKPWHPACh4cWVZrOrmLU43i+nRwO88o8lhXy/BZ79jg6WLpOXWrUKxYbklVHct6lJbR6p9jsZj/17KBveyicqCnyfYWsazXj+Bly+yQ6gTHwQaRO2ue82IaD7tJvpo/jifT53WVQMp/FnhGeqfiSKUba5DiFCFJFmmIHXq8FZa/3hpCkkjfgAplwHd29TMz318c0+QREVp9POfR7vVf6slNs58hNqbqANMiKDh3/usJPiEHCpmGFpn1KOtkz16R8Tk1b/JNT56KiAyfaKPer/VxqJvZOFnS0ZGSrdbLGjE5Icjt3MCJmVdOteuycoKcUF1a5cOH2zVeWAtNMbrVA0UtJOF9lMfSHKmL8TthR1qDAYey474zfl4re2sIckRDuKiZIH+amgKxriSqnBjWuejI8UptyajXc2MzRUhzv6wIyxID6D95Uz3sHyPOxBDS83pBHnWpKRctLRllqbtzqJSc9w91ntAMgI89ESqyvodv2N6v+gBf8oV3WpMSI/+cFrjud7GsXvGcwkSBuToXLMkeDQI6Ss9dLESywhHOASdtAzMEM+vvKrlUqXVoJU9Fw8SSyQ2zWqSHs3nfoz8z/SBVGGfMiHMDG63ML/6X5dxoh6zkRtV40BJItO0IY0F/QhBAY/oEbz0mPaBN3Z88jFtgCrZTjS5IhIINOtD3MMP0EGClLYeQcA0cBz5zq8n0waomJPvbhM25VOb2jwzuVAW/Cpwmo/PfWr1WJUG2pRX7UPS2zBesd7WCKmqupnct01RXsapB/nvVNPpJv9ZO5S7zhXT44MlJWYf0UL21cvJQysymzRF51uaaK2BnRBnbtUSR5E/cp+q8qkZIe5rpQ6AIVoktkXUkGIH5/mqg13AYUpHPUtf5tQ9yfIK/yMVfP3elJS2olDYcAcEYqTo7lMTsqxifj9RX4M88PO4SHjsG2Cy+vo04wgQIiXPYlRMS0H8RWhra8MO0p9jiGFEWHC0UqTpQaTcIgEsRccMkY2/dpjYRDoMKe6mHb4Wp2Z8+1Af0cWogBFeL4kIvE1SDgQs/mfEWf7+1/0uFy2rS9hcaGIoop9DF0h5X8tPdNkIokuInBWXX3+66feDhl/U4yebScmvPE6ambKf4B+x8btHtDRLE7LW6EABdZtzdrn3Ev+5AG2BsjHmDduvgvCiIpGQrrmgDAeVLjhSWBKd9srBVDMKCYOGyWrrPTwelaGv3blm9UqIo9EeUPui5kfllt++L1wvIvLNHOhm/IR5ZhdLf/9F/7d9R8uB7/ihKmolLoAXMrO3X6FwlHPfl91CxdCPAF9MEpUv2JaMYV9Lfi2j8RJ7mNpgixi/ooj0P63OyBRyADaj65oibU4a8u110599MHrFcYJgNYZe7GMReqBPycAQXKL/Yr1R+v5exan90E+0Ug2Lv5zNLEwHCnTM++HTfSEzV+zP61y/cSL5mR74gq2T+yvJs0p+57JTORj9eBgl2WNtTXebJ2MF277Dbzbtp2lOcJY4c9FfrxYhb6OWuUttSiEovM+gv5aqK/WCYZ32s+s5nx7rievLSp7USSQQDUvmirD4Q/7CxWzcc/WIp+oK83FEaO1w098S+6htWNQUH2VDZ4ycBDhIgmoyGecY1ZfI3F/b80jmkwYaUSPwk1V+H8f7agXayIJaEmII1g3A8//CYvhX24I4+5YZfEFdi98gaGUNSTonbxZazUUTn5sQKSwuF/aKFrJgC7lEc1t/cqdUkgX8bf8Gy9CANAYqhWD4YaeNvQFrTkqZJ2smHBUoHBCeq1ugYxmQZO66qr1w35ZZ4HsM7V6EyRsVmT9JhgGCVwIwXAiZk6de7ZiM6gRgSNvA+ttTl+vdLvHLK4TLU+RVBKgKDnZmWuVO3gQ6PMp7KXtOhIyseX3hOVRa3eNTih2JG0E8E/6VR854HXyuEv3ISw5DBIE/2rOwJvq53zzk2e9GdOZlMXrWrde9PM0iOv+XsBF6wMen1TNxiwHmchim+boe+iVgyVSepo3K7pQcBZILjNfKGpIZzRKuhRLPMEQppmax+7+IhRb7sMwLDQMy/8argyqGDDylVesfvXOvEpDUbnTf8b+r3t/jpVxmDml7RGbN+HGZ2Jo493H3jkNWsyogmJ6z2r+ssICn1yO3P6LAS2xJs8e3d4SrM8u9GC/Oq9W2VH3fJ0sz4uqHcUK+D+xhIbHsrb0XGEJdEfgeN7qduyd2IULtOWIPv/cvyUsdO3RxEV/x5wHAylDg5EE1dUWyNG+yTau8ylzTl6Cd7n7Ym1ExgXICqs8/6tMrg/PkUo5J/OBL4ogfXCTayUONUzpptgfFQtWZQxQaLfaTXqZT1hv7tvxayrGFPk5gE3SZcMGtXk5lOmGZ0hkk41c5PuTUXyYaPcZeWKyYQlTS0VguWdX4GDmUpNN+hV4ly1HQo9Gt/j9ll5H3+4rTylH2dNO1mig2Vc3mMrkdoXSNsZTmOeRWFWZNj/yHj4wi44ZZiuMFq8UL8h9k/Fc88ReOxDE4OhRhZg37NYMjWFH2e9GjtjBXw6tEnweh4iFd+kzJK3WOgi2FnANgoPg+YyBS3AwQL69/GxUKvhS6ZXd0QpOPlscgLPR7kCnfeK8sVQxUoB4L3LZnqv+pONj9NfSGjuPLTCk3HBA/CqGhfZEryTrexobuonwsMfReQjyZdnoYIkzlhEki+TEI7WfvqyfaiiSFvinM2XChLXJu815YPRyLzgJDKyrs1IzmXW/s09k1+Nl4ksXEb7GnNe1nfd6oIIGjnmze4SP9P5Nf0iuf5pPQMzmxw4H+u37b56ff7dYmsEuNZ30tSejmDLyID93y8tu8QZu4QGRsDeIfNAD7pcarnc/qJf62Q2U6o94MrKs7jcYaaXzfCxASoVkGj4AdU4RgdsPH8VToV+l/xX1A0SUkYiyTRonPDHERWA7jlJOfhiXx4xWYo8yecr5cz6vR3QcdpdhuBS1B8wdgnVoe1KhVecTyGdAY+y3pO9+AbE59qAudg31zciPVmecZHAH1BRPxWoN/siKy6xX8jc5qQBNV21u39jXGZ09I3y6R2a4N9fVAAog5XfFA2CvgJSKmxdWzVW2q6cBDzIgZl6ZobcRExo3AUFJNJOMhSysPD63ZywFSEDPhnCDpj0GQ8WEgs/5tkgEsGrE4I6yVyZj00S2LydzqNUUPVwW861JXiKRqz+C9719l5YcaiwrFTVLDtNFCOhMRWGLLBN8DdBtVdGjiRLGICOdanNFb3tWciX0ar9tcThrYEq1L6VcVZN+R/73pzh8VQlsl92ayXeFV90zrubmZfg9iMssH+6EcI85eFghWZh/FehKDtFcctO8WU6mTsCzGMMO2u7IyBGExY9eKZKriP2PgMCou8KoPwg1IHLldfjqKsLLqeexeXpxkjWFwZVhfe3iR2O4URaZttmBZKlemuCMYzbep2KIG4QBI5MA1s4vk5kSqQBmIKvYo05S7+yhMLqe6uXgtn3tNObzA62UIpOJaJG9vc6oa8UpTuwgxLVh7czv3kggidMmVWJArqmp32StdJpmX+Bdsf/+Ei94aN63MzQ4exPcrmUDK8L2e8TAmWXYz0xZ6Wv2ZSViQP4uriScAfjyerzI+e5EF5zniZ5h5wgwEyM4T4slkyMHjGVLmlqf/uudYfJDWxW5iM3lXrLVqadRzrYuyYlJLuQ5twX6FDgpH8gsBYoXlN8Ev6LvfjErOh+EHjOtYeCal6Sa30e2C397eOWpvkCIz4bxTPVzzZXJxW7v1hw95rACegH6pf+rKX9bYp2VEtOO+W1SefMucmri348Do6l75VyfdY/U3b1sSbVqEFoZ48GfTa5mPzcoAYrYhKWJUIQ88Y7YhhTQ2bssB0UBBBnafGs1TQcE3EmS9DplpeXB87eyihDuD6PUESO6VlH4Zuw2TbmsjIUQ0gnVlHVAukBDmaI5d070c9Dd4BLomqLon4m661PSsIMDauxChxKjX+kIUhiIl9eStBBHd4n/6e0bakQbTqpMRtK0BETyApzG5uCvQFSJyKeE/fxP2+fz/BXPxjt7o6t9q+4Ps1+7bkCPE0N9WoIkdeASUWwwLz37vvHxB1t8qvj4UlSvT+9NcRjceVXzByCPkJi0ifasb4e7qKphVuJPMMiDm0MGADaU+RY7uhpcijm4BSkk6QH02NGfD2qdQtAJU61+7v7fnhP4HibTC2lO200CoAf/sJv1uK2tKPGLBeqVk7BEVm8deCenugDcfOsM/15zHUpY5mpsTP3+sI3i0UTDBnDYG217uJWSYScw78AnrM87VjkOf6kfPvqV/+QBGP+0k/dFWFuGsEhpFwbLnNJkhi5/63Quc6umVe5l3vKlL34/TEHca0ql7U5pQeipfE/9vnWmP5mZtTlKvhiFql+Kr9l4G+nMk8guaDj23w+mPwv3JOov/1gaWJJxYEUcLgxn93NFX+ElBICa/LYYBnGqrKohXZT+uMf6ryOV++zmos4VQAgEQAjFDr9f+eq1w1CVPOxHQcTzdKxzFcrHHzX1YFH2bip3OSAz5y7DhpRRpJO4P/GHUbiYzbagpxzOQcjpMTzPv06J7C38oUCiUomoG2rJ8TKvlmL4TxBg0CBKn6A+fCfliVtOHmI77uUGpqSWJ73it8hw1khyZlgbxuiXVLKPa3ikFKxLFN/PUfj11Ng+l2jdSJswXsvNw3aUwdf9JanW/Az9M66i60Czmm4fw114xTX+z6ApG0MH6QGVfk5kHK7MGywPCvul3bqWxe1VIlijLfCXVjFNUyig946rmXIhM1+oYeCVqWdSpUwYdwsgbGlDZgYcIznWUJoX0cKFTFzRSME9Ri6O6+X0cOWJ4iW8dadW06nfbMWBwdv/GcS97Jj69P1R/x31hkVuYZPi9IrmXqVLYnR5uCLsr7q5WbO7XcVPuVkChnW8H+bJHqcC0cxqAQToc170qqHUsu8B+I2ZvdXztPmTScszmRFHaK6Pu5t/FveL3VOXHHaLCCgMnlImIsCwgk1NX/f1mxvsySkk1Ljd3qv0QwqA6qtDuQzOOyq437gjYf8wEYYKokXyd3ogDJXn1AUkwYguDrWa8frjVphoVnSDlwbWNG1e+4bvIkcebOF84x6N8Cr8YqZ+O/KcGr8XoQ4ThxBRKFyyIO+dpopu2/gn+Sa/p1TB+DKgAsGfoNRhC0kMM5aJmo/gjxp3PKYZ7JoRJHlmPmeVQHVCu0oCEX6TV1yN4cpvB/0kRaw/R+HoAIMIa21K8huhJ19wUBUqcSntdpa5s4znnsNkBRIPFjqn/VGIMhTKwBzQn+mAHqDwHjMJBxCMgVXiX+63GWkFZwx2Z7TwqIQ+dRb+dH8TpLqeZc7BKipmQe6KHzLqPEvQWQHPnhgQNOnaSyYyp0LNLtFQesRAJcoy2oFDFvLHFEuADXEr/Smsr9pl76jQ0CUe0ksQsJBZm7VMMYNfjpR1AZLKeGQISTWUpUQp6wXBjdEe/LTOM2JvhlU1tQSYR8Xgm5MbB/hygOPM6LvmGuq535N4XGu6SYB5zTYQLnQiWsoPSY//1BYyao0QM8D7d8Ur6z7dOVANQCoyq+fAvQGRxPKVtAklY22IeSjtSkv3WhGmhXOexs3Mnx/YWbk33Ml0lhvqNuadZBidMj/HSiOx3AiKhiznjKQiHZU9TFloF9+BZQjbpZNXYggIenNnFuAbx9XZpcLzjjnZgpQSYGJ7RfiFoco8Y2QGYD5pWoKM7P5+fBVh7s/I3tem2kqaJ8R1KHuEMCCK/3X/I12RE1vlL9ugV5FofoSOV2nugr+SYpiAtAVE3kzNyeUZcVUpZtMLw2/uUrIMpcrvZZFBD5fK2o1/92emRTg237ua1JXshU8D623zYYccYl7cvFs2dUKoPSMtbq7dfvGfZ2rJD82T4HG6XEDX0YAdNdFwm5y/sGkEqtcizxk9y4xYXcsl4uXQXc6tzEyKZhWWkc+OcVUgd9U/BejH9mZrOcwB+Ydj8VMx2VsqosL5wFa7zMGOOLHLbtkBRAt2u2mvz3EdQI9pvuSXD6OmXuFhCvpQicptPPTzWtfTFh6N6L5esRPTQTTDFcTFCxUuGzP6KteQGQ/PWuuwcXRuY5xbE+d02Zug4XXIG93FMS8vHBRDcBcqw1FR1NUGjYCDL4BfQ39ZCSonCK7FHxUzffACNmD8UBEWHfBXDbTyJpckHBAp8D/P2/YgxH7dQxkZAfSIx40uAwyS8qY8O2f1y0xz5WvS7WgVCGaWEk+VG0lzb4bgtqr3zUy+M+8kuZGAP7vsyfWQo9QNA58fDDNnBPuThJ0yT5aQXQ2id0+vNnvGvl9b9Rm7zA1LtxeM55M2L/hXUdyVs5zD8tpC2D01sDZ7wusCAeyIfkEEOy8lUfCqUlnvR+oP+x917dbqJpovCv6XWuuhYZdEnOIASIcDOLnIPI4td/vLJdXWW7u+tMh5lv5mx7bwkECJ6cX0vfPx3HS8KLDJq+wOwW0u0YeJj72eZfZi2gpZbTSRjgb4SrMNp9B893WSX8JEpawL/PlkYOv22hW1c8GrFLMX7Ckn20XvqJF0C5rVgeKZkXsGCqcAZTmpbGRzH2L+EoabNbXs0X9xhPlRyi3MBePisWBoTxiaCNoSBT0GFUNo7xutE1SPHIW5vLNjZP4qIG7rjQNOK9qF4uL8psovdLihza2oIw+F0n/TFRQr2xBYLbqMetem/H2/Zg5+Xhd1s/UGml2JMK6gjYYxYa4VLssGnFPtlG4A4i8VpTpahWnal+tKJHMUCxixZdqarFEQrTy1azPasz0S8oAzPKUHqyL3ccYNEoVCd2D4Kkq1FgwrMu5ls8nhOFqOGB+UJZ8CiOM5fZ59qo3TjMYSgb89IC9K4++P0V3qqbOiydxRXAsamsl5gkcisaljKckNXF0JseVNvmH+Fo8ikeZFiJTKriHN2I62zDbavYkgiWdsb69nL3kEueLIkMnbaBtj99qIDHJJd71xpaDiZzfydw2LZxs3IuLB6fRcmnleoP78guz6nEAreY/Hb0iz2InvG5fdoBZhNLpdJ92snlg8ZkPoIApkCaj4ePuZL7hMdQvxPNKdvAONC5SlfQiaAMdFCZcJI046l4Cq+6dvvgOT9xjZaxiU3jU65KExAxEIzySZdQrTt7bFI0Wa6yXHRCNOM9nApxoekjsBT06aQ4oc2eMit3AS+XCpuLnuwjZcE8sxN74LQWPMyj0pyzJAKBmmpHwEyGmuToTY/kSFJJa52EQ9LWPpWOF5VA2zy1RnEq63Ybhy5o2FzIMXd6ETOwqQyUm60HUg87KPwXFo/TQ4tEeXw4YrTH6iJ7m7V7yvVDiQtJfo1NRDTR3pveQ2+tpKBx2Y/iBwg5PE8jeRJlU1wGUZrdcls88vWQl0/d8b6gc1PO3EoT5R1HBl1dfagG0nOaZtWwkYvh5pvJzfnxYOTYLDCMw3YEOoBxoeTcYy4Vau2w43y1gW4apw29lPxLJG/dgYTj115g7eVx3FddQk7hYb3YfnyImq81W70BlcuVJNfukehyC9NoAezrVphOAeSUNR/i5nS5lPXoOnZnYRUYtiKq3OgqEfYSFw6TksZmLoPEbXYjUZfCXoXIf1pjwiXDIiNp/NywSj6zfgo8rhWtjHvx1Y0A33tiErVN4RvFkPcJr3cJm/uRd9axdZZNts1uLv3bQroDBV0e1nMADcZ1uJy5ViJ6QEDMxhlENUKepSliFM0p3wnAJV0mqtQui5wgtnK0ndcB17iNLSTWEZSU4wvZ2ic9KA4CfIoABCjvJTW/yfejqKmwoOWDzrFOjoAu9crO8nbDutS0qHjhxrB7SYXU1K6fqUHI6IZ2O/jPIyQ3ixqP2QyNV4Jqr11SkIX29kGjNpBBSBuGisL25lTUVgiw2Ju20mVVrnUZYMeVQte7yLZInume5C4FtnulZXSXaAnQZ6+mkh7nDCi2WbmqArE0NIZL1E8VzpFpxr3bUHWZzbzfFr0p9azd27dCMnlzWC1p85/vEM71fUy0PQXVr5Ix4CwGVM1yp+/oJBBec3Yo/sTuaRFwq0q6BHbRPk20tSnhe36Y6wPz5JeR5VH0bNBLwexdZNDiJ6TAbbeGbviTui8OSFB2i1XQMGNx4gmRpBXc18umUgb/5OZnORBlQDMwufYLrlwadEO6KiRdhF1evfMqYobQ5ap/tkB5RCMzas9ljj0ZKDSPtHb8NlD4Z3XQEw7mRJKHcZ7ZY+MsTGNPvZLzuyRXzSQyr5VO0mM38MyiyBqnJNFBkqOmuEfGRzGWxQWc2N7UWMDFt7G7PaCXCjMTFFx9GFVXUIrlU8ihW0++GNA9UvOc17P2MCNGBh0pAq972oAKkKdE5wH41jA8Ww8ipMP898YcmddmbELV6qzlIjtCcKYqEn/EM8k/ny977JF0Y+5WJn0WwwpdhxQrXBEw+kWZnHfXbPpNNu8m0d8o3/oaYBvgHDyM6IBXkj0OOTvf0PbQZfjJgQGXsrpxTdT3CbzmnyXrhky67PLFwlHB8IqRsyKBB1WsnEpdbvPY++oxR6AcoSuoD2EJGQoi7tidbV47w7qrYC+YuGkguM6GDwxlqlPT7a32P/OQevKyCISSb6omqavPxAQ5p8m1OHc9uT64JCqwaN5vKiMowMLcgJsZc4rafjmpcmQRUocvd/nuyrRhUcZj/pTQE+QmJZMoH/zLoDmXUe36gO7yToJn2h7K4SSE81m5sl2I9v264Qtkrrrxio0adQRUYqhR1dl3Mx/vWZAZkn0BOyCgtl7giTlCWa9VEBU4n4GJacQq1UffQb2IibyO9jYrg0YTwSA8y2aSeZ5vS0FtNPpGXsdlZsNPQy0nkCR33kkmBV3dT8bYfWb4qyBGmLpFpaD4+7PMKjO/+joewsJSjT66T+5D35+YmdxZNd7R4eEQzhkeOarVqlqeXhVTm5dUW1wQAQUe0ZcEMbWYdxXFYYvsg+o5F3Sjbnhb8uVmVTqY/nkiS/aJwNuOy7IzRW9cANXsTNTnGGKnlzjohvfjM4sJosUpu6fW5tWHbBClffXO9rzO7sRZ3YOef5dNr/cUMNlDRLU1TD1tg3u/c7pZX9WnTO7Bgx4jBlCApFlzxlP1S4FebALqMc/plm7JobTSGRPyar2MOsOsB9wPumhHkabCTQxD8GfBzTzN4/oVybV3OxLVmfgprtVw0x6TtX/WPjFTOxuVImycCG6hxX01PsI5/SD3MzWYazy+4RtOO6453kHEpqh9O3a4dtDGVhjPneDrNzLeO3/PTy0psBiUi2XFrkWZWHhZZKWfwv+WQKy4x3ZGok4KqxvwGHlyvuGwcEVSIY8oD1ylX1O1r0813w1iQPXjjQ32MX+q1CmQf1Z0RuUndk9C412rKJQRTJmI9fCJ9KbVWzdbVIsidOIjf6dDSoJvvu/cv1QlMMCwFRMdGLk5tXF6EtThLBOf0hR9pAUmXxBzcVn95O2RkmoQiFgSa6J4/sEcnxQy827KTxZ35ZxDi0aaUYWR3V02bnqwQkfB44qR3/ZPqeGqP5R33c1NOt6IV+2u2vR6x3cfnbAHbaEluqb+dpv7lrrht7LqujqeH0n9XD+rbZIiEU0Xfo9eUy52yWlgHUKtEoxbj24ewpA4H4U0q6L7Z40xZhVmGneWhLvut+sSoysDolasXYy1l7/XuPs8y4lyxD0Tdbl3sHfYIIe1PyrTxlp0AbIpv0lw7pOi+1rmx8esB9wBlP6MmFXaPMA9LFBh6AVzXYwdIkaxU05MjXbSDSfJ7+agv7IEiBhuuUVRv8M46LYU3GQBjhh1u0TT03OCfila7HAeR4bk3cOcRLekVoqsUk16sW+p329oUfBzcxl/sLFo7iYGNH7MMeEpVWhZ/pPFYN1ScSzzaPl+vBXH63FE3B13H3Jt8ysONF53D4OLuRI/jQpMu4Ef5wnmY8NcJveJWYvp7fysJP+6XZQlSzIwyw147cT45VPGUjvZA/ileBUsWoqUWIx5r9SyyMdqiQh2g1d3HpNZsqCp7F6T3uZkVEL5QUZSGYGYB9PvjWYERMwd6D2TDieMW8fB6JlWJK3/rNCAi/d0/6w+oJy+TOvHok1FIZgYOT8HRkYsdp5XhKDLzHlF3RY+VqOn2ZcgYymugvv1AJuLjYLXM6K0p0SZI0kXqagQsOGu1jshUI1R6JTGWQ3LSGR3zMuTu7/g/k3Db5rTuTdBTiK199tDe4n8m077+rNyKEfr233ydsYDsBwup7tkKOBYQJR9Dqjo4qpZ9SpZdiEIbNQycIuGaXc3xxKvf4DH7ws1C5pyvA3wiKjXMGVHZ65PoSaryEl/44fi/S56mZ2oUGqEtn9N8qE47MPBt2M14uFt4Mj5ePGegWXVZ/Di4DNueaMzi+GfA4jEsdHBY5Mg6zx3Qfyk75dCi3P8MoZt+NFwEGCPF41kGf6QFNXIX+ceFZ9iVB2k7UgIuZvj/MT3OweGQhHRyacK34tNLJwy/RAiDjzZZSmkAc3lq0OHdd+DL7YlkHta77BBp62YFkZXa4hLfFIsuPo2p8vmV5ik9oO9c3FiZpzPnKtMOivJ3ibU+8zktRXmvL8+qUOfrdkkqzVLjCvrwg5t0XKwbhLIxAh3llOJp81QlylIk4+7Cfv5qbQgEPBAN0WToKA9xIP2eYz6LKppZ0CDNkrwwsV8Debb467AwkUDYHEQoS2skobY5KT1u6ItgF7VVXAZBEopTp+tsWCFNVfkIGscbePgtpo/SyVyiiXa+pFRXKJ3l59yT88oo2fFKiL6zV98mVKNc78AsF1EwN3nyxLMSxGMMscrhJbqKCeZ4yJtNmjDVTffN+09rgR4QGy7nJ2GlkunC9jrJs1n67+yLIYuH5o/Db2BDqEjuNrcQiK1hkWone4jothNVFh7rWIHk94MA+Dmvh2Xn+rEYb1cYQxr4hhodFQgxdWQgm+Hb6lo7ISZ8qaJiJnbYG+dQBDH6XpkXrYM2lHua2DSkYp7xHnyjME+4qdivQfWzE6poDxTgdti6uh9o9XmLlmD6fWlEs4aQdBW/7wjZcTFyFKDITe2fMmR6noEvjsp5j0DMwt7n8WD7d+wDEw4pr4Ovh+1nufuE1Nf92PenLUQOPyVnzhzSvGeknM5cpcDUUQvvsuWOxSpDc0XYTewxuskw+alAgpR7SH1mTPIs+XWDs9dr/2SyqK322WOPIpScprCcZ/9Z6oIl8sPICjNF5+qY0tdPmYKuLShgzIeYwulfPbIUNZh5NaaBqGMHnLoK+8Oz+AUVmz5vRCXfTTQUR0ZMB1ANK8ZaH/Ot5xaKMPQvJTjv9AGLd9C2MpZOr8tlhacyxtyNM7yEjrIF2BbdJePmvou7ExKoTUJsw4+XSC5C81eSKsjmXCvR9C9w/7dP8lM9I51UXju4lAlUAzitjxTjeCI1KNZVxKqHfhW5E6Kxzxbjqt+ltQzLwOpefBxddBPtqNAfmUDtgEx+1DilZ+1dDtCmz4BmoehRuQad0Rh5CSkMSwuL86c49QOxAbF75CvFJavZjp6G8n5jEmUxOLxlWvv+TJUZvhGoLS8D5jciW1+J7rm6JbniSSQw9GCSS4wtHrEUst4GKA3EyHL1i7pnW6y9VGInHPHR48YdNs2hLUYnoqRZdhCJHkwyp/qx1u67tOWmP1wPpWUprvism2CuQvwV3sCmc2Fjct02yNEas7paNDYMQ0qJrSfwdRiseEtFKCT2TSPS6Pz/VqOsT8S9l18uvNQ1/SLFJg3ojnzzGj4ObFbne2nSClrx4xHS9C1kbRqgWa64e3y1muNHmfPljqX3cyFPLAvQ1Ff1P5MFz6oFMZzzLjHPXmTDFvcCr5UeY5mHrTuAMNCFVd4plX1/Smocw7XUGvrZeqJAT2J2YGIZuNv/oDWuPe1378LL29AYQMunHe5tCv68gnObD5Y7yn4Vgn1Vqu0RORFH9yLrMIRpqrpXByA7dU6Vc9+RbaEvSCjJ0QBxHYZK2g+tyMkipon4niJRJy+LA4Kfs+RX6PF5YDOdnj/aDdzhyYqvl94U58QVSimctghzSss1r39wudObzwc1w18LHpkpEo8PD3mevhmV0RljcZdaaY+5XWWkcMwgh+fMlKlcIH8Edi6bOeIdDRaGz2VO1VmKa3L2bpBdQ6afFaaSCkS959tUIg0Wd2SQiRKye6YSFHNoKX4o2FN+AzzdMzG8KR9qgn6FvZA1if3JsgzgDVEMA0pgmBMmGUn34Xx5VQDP3U0BFf1EvkSG2vavT/qHKofn0QieUxYIxXvfODGIXf5MOTTuzNvYUvT28dv+yTl/IbCiaWBxp5ULh1jencsdVQBFBSf46ECEL6DtW2zJiJbAua4ZDHyz5LiB2BK41ELwNrWFZLJKy3NbNNCL41+53DbbVdNfUlmsoERoUJky5djtl3H6Rp32T6aQvK1OWgdp3vWfVoo9hmS93tJqbJ6qxdc9jDRWUxZv/lGYBLxxAus57DPjfDw+LbDRLZm4e2tK3gRP70ThHJsL8c2Rfdl3f4sSoQj05l1vQbZmNIW3m27DSgPLeZN2p44Nob3RdqQXhidqvCa9R0a6mdk/OUDI6IWeUngDCgOl4Mo45dNBExmCTuq/P1mQKX1OikIRvvLNuIsqS8sf+nLxn+CtCT3CJ+b8X4e8lPEN0t/1uYTenNiWbAht0MbsEyaPg+xnKJTg4U1+TJ3Gt1jokZsVhAz2UUtR7kXgDHXEePJ7Rmrb7PBAbsvBrRFLP46CJH7oFVFJjxMQ4UXnIPn3rhLo0LGuWZdAhV6Md3Cs0Zu/ONxTyGsX6YnJMbBJ6ejY6GgzQZ+Bnaebnn5Gu4SZdjq6PT4tg5QjTw3geYwP9V4DEbTVYvWbFGyi+xKE5iUaLJKbNJaoblhHWoayUl+xnSll6mdoC5H3Zmdzwf707jo0iQtqfJGSFSJMmMwVOc0UmffnebECNWjw7itph7HHhA3ln2+Q2pcSefNvjAioSXLUJz+tYUhYAlm90O4lj0B1y0FFZMUPVhuFD9FXiSQDcczj92zdGTf/dJ55bqQZmPOy5tBDpYxGtoMisHj6mcEDDQw/JZxx6HDXa4YwiAxn9PZDQp2kpo7a0aHc9ANuXkOPJOBzmV3PX4gWIrQwS2culRDtaSWb0PLpkHyYIP9eN45Stw1YPu/as/EDJwzGSAV79a9w8oTpAy4tyygIHNep+gEkNoU4728vXR/I26DlclKZA+yvjPvVrnVkC1kmn+katz5GVZCxgw0iLUKFqZz09FvizmHCfBrxTE7xjMjOF/azxoFvZeDghvc7NqriKPKGa/3zJ4VjTEOtOAlBZ7XcHHnt5XvmIDJ6/2xtT3Zm109HvrZkv62g5oBcXjlsEEGrRNX6o1pq4D0KfkysxBKY3atd81SAXBVMI8pCqhqyaruoICxz401eJxX6huMyoMsPBwZjbDsng1wvb29PUmp0teUmkNJDwLp8yNYkFFXn+9ngVOjqdPPFY1KYAfdjxBu3sqdxIIZO96otB+md+RAmr2MTUlGyhC6OenPPipTi7+Ak9+NuIW5TQ7xV8dC9EXuZU8iUpKtNIiAN/D5nm5TRJ7OItJuABg9JzDPSi4axi87ZrGFnSM9+RC2WBHlbBY5EDZbgLZ2z/ci+qM1Xl7/6jGyvbyRmXnnXjtoPRUd6WmbGk7JGN4GQ1c58yGrhJZwSmGut77do2czwmNOuGaKBIG4mui9x/bLqKRXpu8sVYqMY2Po9BhuIoaVygUms2cgBaghIIXaRO6WfUOnZ6WODwyV58aVQA0thKmAQYdnb8TlzWLXixwMIJyCqvUyDh1q3T+mJ83X+ymx1Uu7dw4IXsJaWU6y/Vkm7C2Ji5bzGDvcMU//9NrIEaU/NjHb35KbtKG72X5MsvMYOKAOJ0ZzK3+vcEFbIU3LMChqidPDPo/MRety0nSslhkiiOPVm5+uzoe5pBdQypuGJLa6CHIRFa6NRk6F6FAdN69tY/YcnNMdjuz9xabAQoQdjxMWQ2bFHjYBgrWjOPrH7OvLQ4elVlf33KXG1u9UhxZhEfBEhgFuARTUwqvNr9BxmGO9gxAS4ErIjDZXBhnojtCZS1fcb5/102N80kx1Dqo9GzOfq1/Vfpu03p6Wam3CgeNs6+TootWsJxITjpDvl9UNnp6Va/i5oGjyWZDwQRgJdlNHMKy5qtXn+G6BEgU5YqGnqOiJkakQn9ZDFWT68mzkLpoyEPGHSTS7ESVPjyxkWSMj8BBH9grHdhvvri+vX2K6ICzZsiK64yLNXxZ8jHDgBhIqjt7dVxfRN8aqqsbgofFyVaoqyzjQJFoJDHvZ5od+SDQZA7+3GqR4jZKIGiD7wbKqWnjduTLoqToZ4dv32+kgE5s+WEmmGfZ8bP2WVvus+sD7rj6DYddYnc3Yw3med8NL0x2jRmmSGUQ34NFytU9ukFrx4uVrqe8jEjZJsOKL75nUJ3xdMF2J54EnKnY9V4goz6SXt707NxjnKwa4nl3uxiKoeWoDKR/KlihjIyruMseyGhkpowPpiU29ns0+HSOlHPCE68PnzPbioPttP4AHUXSv2JkbkX0zamLe+ps8+okG4gIMfEOj2+XCsw0vXtqT/Ez4hWA8CJFBj0B8eQ5E+bV01/0IIj7dtfC+nfZSiZeZcdM+tlGkWCPNyPTLhF4Ieel0/AY8yOjTvm+Gt6ZQrME+9J3NqSFq3TMT9kbcGvYLZGS1JxYvFWEYeKHOliRATj0FO6gthgG9pMK+CoR1ucv9bqkOsCfgxi5BXQ1j53xec3WzpJeU13wuQXZNTSbRYy8DBbSL0dp7yEhvV4FzKV+uoQRpxnSZO5+lc5gE8p05vu7/IcvlYTK3GOrSSE+k3WMdUHuvJV9go5SkvGtLH656eFfWqdt609k4pInotqBs6oz39c3Y+IuzEdF8CNuttxmb5Wj6+IyTE4BujM/2Ut4MdJ5+uu+dYGSdvNP6k2K3y98NIp+tyvn+SKOPgad+gasZpCOIhsxRtS9gtWqGa4V0WlcBqekLTpXTydNgjl1nuX2oqjH1YJ2PnYq1LPDHq7stWsckKXi9eqvlCqr6pCHnsRCW7nBZIrRPeqsOHagCIlt6/95w9IUalu1kltDiZXsyINw9+d2NwBnZKnqVf7Nqfbf27XDYZgyx92BTK3lbUA+zywpghnEG6YizG6dd8kYn+ddK4O+OwSoWWO7wJK4MYlFh9cAc9vIh4m7FmsAqQLDkoJExQ1EdrJ3LvOquE+eSsaFsoIfJAHI+cJmo7Rh76+jwaDMCPhPqQckfMRyU0QimozEUrF3k8Rkk9z6f6e0sRYsf3teTr4DQVoLb1rWB5MydJqSgaqKsF1kENMJLTWGMS3Zi0lST4uiN3VqLCP0QDP7Bhdz9FeHdKamY9Sw9kOSylW4ERXHbPeqJ7PhIDIaXXnUwT/dwzBo0oj9T//znwiSBxil8KItBSXALIG9tYKKwGcdYMqAS6B2iw6dsqHk5KehLQjCzV8fkMMOvy+Y5yBlCn64kJ6nHh3Qoq0F8oyEOqu3WXYqqk17ESPgH7dCegvGXlKd5N2Nc8nbczPjeHEd/2aBO+tIMg5YhnVc6bivCjpBi6wlNlRFFR0gun+HPyxA/oKb4YOXxGAVnJaRKEm8TPKsgL4Qd5EbV1320NIvlp0Cuz5ObyqTRu8syMuMy1O5pkcQbJH14nRXllRo/S3BG0301S6SO71rH+mbQEaiLPi3WFWsaBazZgv6u3DYfTjgpvuVvj6epjmjHFY+3ZV2oBpwRVmQG+cQNCKqSFBOG9Bf2eTlatFF4kPhmaDenuL7bDMebZb4518YFgf9uvPvDrsddb1v251p0Jb3bDDHWi/L2BKN4zBQ9O/IbTxyMgr33KSA8Q31rrN9aiR+q0KT6qIcaGxRXrvWF9prJJZeRbEdt3ty3yu50iMnTUx+TmGcit5HZSpJVisk3CKjpjqA+A01C2w0CLHMbY22HHKkQCgi5mntOTWV5n/uTixBkKNk76FIVHBsaNlwMXzxuvjrHTHbHSVkqlhxdwNxKDIu2XRunpiHsiZlyqiovic5wG5go1hSB2awXsu4cqBmR7jcqkEleLoqdtjw7/8x5tZa81jWEtI07PBPCMDWvpjDNAbs0hulqlhvjA9vVRJDMIGeZM3UeDy+3C1ReErkkfTLHErogPP8c2GgzaN1gzKLkP3JFvUyw6zum+uQb1DdqTYTPuwmfrBm6p6wk+L4kWgDLouuadzHQCEM0CiavT2tEN3QpQraiHz1SSgpC8+1pwAlgQ+aubqGamibhTK+J/XAy72ruiiyfnL+rI/YcP3UDDg81DMZxee41Opfi+1b2XCClDeNiN13B9qiEZIRFAxPtdlCPCA/2nqoMSm84lsbIg3dr0Sm4WVQfsCPLaak9EO7l8/hq41OQj1r1he/06JMnvB4VQQzPi/KX2FKOjmOkm0SvvpoERYE51mhPSVO7NEPxVfmsK0RJYr6XzWUtnO5nNdRpu+99ppwuQ5oP7lNemes4wZdn3uMI6sjVPu1hEz0f8q16GblnFczcpgwZm8DsF+CFQHH2iQ8fActIfPQU0XTGhXFuwHD0yzeSldcaNPzcurpc+NZBFZ76luSR3EPHwfFQXS0+KDgL61jsCeFv+pz5wyjnspfTTHXnMXL43UVFrxTkztQyrGnkQejcxLULhJQfRFmU6Bpg06xYobjDCqVCnIivJrS45YWtAvxWrMo3u8VbD5oHVt7Fl6xMf+QPw1+/QJ99PVb/iMbrF3BeUX1+L71Df84FVPY5F6Dh67nMQH8597IDmc+5l0iyv54LaP/xOffESIqlae5P32Z03bqWcFuDfsk0+OFbwWns1epY9k/ohdsLX9B4GbD98idQSgE6wcGuLZuW7PjNLpT/E8p2h5gNXbaABQmhr58i8O2X229/yC9XeH/5+M/kDfsF/7Jrr9Kl/LKbhH7BsC97y6wqyq/fTnw9OZq/bBe/ft1ncMrnJkCo8mCztv12T5/3CFSlX87x7f+4Lb5+IgjXQonwH5o7tX9Gvt7CFrWX/vzyWL9c23KfXK4eWDsa2qcK5G2hKXut2bz8CSHa666YtLrMGaIAb8tquaQYlC1Jer0UjzsLtvp0HKr+1+Pj6dvh3/aACou/XOTLfczLJa+/3McF58/Fl+56Ig6+3kZtVfTX++TCSjZdOwA2qiRq6a8fdFWagtOZKZurM4o/l4Ku7c+dfGCHM3/COXCtdRmuY7Kvl86HfrG/bmL/HAL4M4lAv0c5RUA/IBwmoR/RjUH/KnQTP6D7J/jEAQXcs2mu5g9ilzL7azj74dxlivo5SpZq6MGZwx9H/w+Xul6jbrze9PEMXr5QmUdr/3tJCvvGr99ICkV+QlLQT0iK+JeRFPkTkvoO7vNlvYO3abRE8zJMAJ77JTUye4wS8ME+RePvETMv09Bk7NAO0+caKIrebnn+6yfe1+dFAKSrtv12ZD/02XfAJ/9JAp36jp0JHP4F+VGEIz+BPop8k/X/fPhTfxz+VRcVADqfV3oes2T5Ss3Rt428OrL0N3ygRXHW3i+6/nA0ysXDsgzdTxhlGcafoS3PY5yEfkAbjH6Ht+tQ6PPzE+R9ve0P+fwJpb9sIsII1BNbPRnzsUOqWAxAjxu2W/Jucb3TwR9OZungemUXTyUycADrM7Ln69c7kr/+mActPtUdi8EZldvy1vOBISua5kj5xA9Uopx0XGXbYqzGLoZKttnWtqvWfvAW1AoMb1dN5Q4Dpj6h8Hh4D6exVHeQed7u7eugUa4UoSqtB+u6BOuRU1U+j/7hjs5lR3WBZSlaMb9JYM31az73yzsBRVIUfjvPvgc5slw4DlV3OjwLyfP0P7tKyuIfv//XpAruNJ7FhijNroy/c+99YOVH8cgu40eSdmU3arAWpMdOVZOrJE3v7Pv01kc4U8LDfQoyUiI7rckRphV8qtbyWEFS3AsvPTHh5+w24UTLnVzEktxzebo+BPY1s6hYlKYGXI43PxvkAnEbPc2MFtC5Pa7vcKs1K5I+SSlzNZMIY/YdFFdejgFGT06gqAp3z3b6LI+is5CelHmOe+LSmtH5Aqr6FKUlTuYUsfmYAGEzGYLx/RBTW98X2tPjQoH3KTGRdofnbsZI3gr+LqOgB5IRo86vHo5Tz3LdPEnp5E4ZJH7hDAAcfT4QW/Fn+X70ASEN+qQ56J2p6/tnZqNdWZNRcA43KDW3lDNJ9mE6jfGWJrdnsDSWddthWHKIF3sLQc3QrprRjXyNL0WE0l1qpd2XQvRFOas5FTm45phVj4U8G7UuUjekzLupTf1KDJmd6pMhAGdU2qJoyRMWRMeTo7ws6fwzCcg2pkd5GbvwAjKfhU+BIu2S3vGOa97bot13Vjf3ysw8Vno1hL0wAxbllJ3BS4Wf/EXFIHBe3hpt4KUiHFYmWchs3FXd7MaIs3Lq3iIIm2bta/SwdDGotxSIotoorZHP4ttXVeXuc9zUqVy5ugbyQGQv1LSj/TooEywYAkg3yL159Fj77buPOGQKV+XvyOCdUJ6awfowt5cTbzJXdKQy38EJt86ibmhhpk/8oXm0DvvvMyQwgKE4H4briylnY/GJ9usYxu340CEeQzakX2d5g9pJEqrpNV7Ocl1irqyzoZXvUrL53vM5o9RT+FRmDZuXoJSE8uQBf26TbSufTDTGPxjqtWswTeifAPK5fRa4r0wzX0L4tSzneeMicnaXrLCbJqBSpslq6y7Maepsmfkmb+cK3dcH5QI80bBxn0DSU7A0ARRIDfGoLod5vrIWOSVCuv5eLBduO4ZGJyMrN15i9wffuSYBJeqN2z5FY5/VpU40fdSXJHk9QIJtAUklNWKLyr3HsSKYMTnd6o2w6NXcCgqc4Q62wqWq9ZDiiE4GcrQS2zfc3b+R9DqjhCrUoWpx43gnNH/qJjuNOfYEoQeQB4JPowABIoR4ouLpSbJNAl7xaWXZZ8j34Nilc1BjJSDwiy1xCCDewCDuQG+nBBMC68iltYP+DRBGZ6RYzUmkNjYUVlL3/oQx8ARo3UC2XTBMucMOrmLkMhSiyhIgwQ64e8Jvb8SQ7mtwR2q+SJlTjySDbG/OXbTFkIMdp9MBvY1DcJbjZ3pZn1/H0z4GIMdysQUKLwq9cXNMwsCh5wc6hUEpd5CiPEldXePl1uUUOKOIw6rGwaIk8TR6dkqR9a1UExCm3GMclkhPQKB8sBQKRHvrTAoLamuozzhRoo6050YkjpH3NUjavtxUTsl33cS+rcO91IcQCi+uAM0Ze3aqgV0QCR9imdfkBhh5IrJ3brOfdaWUPgVyUumCzGxIBNkrpORQLX9w88NGzHeRxW7/mQG3s5NewY3vRHj84mYPXq+nBZ6E0FDce4mt05mtocXEabtBdz2YxScdCG/10py9JqHJ6I55KZ5ksUQIzLZMALXUC63HhdYpptdK6PARCt9hnsEtyQ9LYtGpCMTvoGS7V1wCTWLo7fmc2xlKdroJYLXsLs9WPeskYwbSH2d/nEkYDPnt9H1swU/uhYbq9Miw7qlxt0VaAyIEIT56w55lTb+fQ56627L2/elNKGYTjx0O8+19X9tOvVnWawVpj2DR4J5WVAAG02Zk5uQ15p5HoEOlA/EnHDKE3jQdcvMoEky1EQiZ9v0q3Ny+BY0z1UiVSC0ZWi3PeeNRD5o0yilrB5Gwz+X1Stx3NnFcpXUgXkfQi/xEkdBExqxN9BFqV9oEsknpnk9F55lOt/MqaHLPV613N/NzFvI2wkuEioJWj8BvIlm7TRfXMfeAJKR0tJ6Okh4gFBGdxpPHsIDoSempsVuf20Y/bUOKtMzTavtPF7nKY/JbVE+G22Ae9fy+f3U35JOY3EPAUj6/tkrg2JObA5LSTfrcmh5jgtExQGahrog6lx3xZqMhwlGkm4X3ecw76rQe2mc5EZIFcAq3J0gwBgSVNb46QcKA7G879iAhDgSK1ibhMoayRgqliUML8liQS7vEN64QT441tOjTynG7u3W0Pauyo6Q4Nfu0wSvBC6tZWbA30rlvXvTMz6wh+jMxLRsy+CgwDNWUjfusjJhwN9EFooCTiv4g3jl+mvgDmMm3WETPLJ5PkIkSS9qlXjF9tCOVAYk+XKK5mN7OHOeyx9P5wNzOcm6xyrzFTm6Fn3XsnyTOEGZNTzF5h4RNegf3Z9m+cr28gzyqnFKyZ5Qltt7mjbg7N9QQ2snvx1N2FeZWh4tdk02Ohij5GW+lp/WHLyZgALGmE0aKAULRtQYL2MtBqAdJ3OIYN0QpV+k1WEP3OPmOREcNA7wOnoq8KbUsI5HXJm8Y1MErMyjIALBEXwgrFpeDkl7ylvGifjgyC3CBG9iszj0ESmpTXwNXIUhkD6Ujxo6sZk511mGQbloBATmvR7lgjXeY8jQorVibje9RVfLQ2niMrLO0ZkvXVhfmSQ9yrTfIszAbns/pgTuLO7siBBjcedLe+b4s2Qwu7ECr+UvqSqFoG023tRVD31vhaRMb8U4w82Dt1UsOlR5Fj8u0Ltbp2e/S5/b07y+rjKi5QBYy8Ml7ThhIAFO4/fJM7XXwcASxR4rD9oIt8gurUuGtT7DAoJaave6whpiv99CqQXpK5VkTHJ3YMajrpTrTpBH7/mpZe2El6YyOKeKRTDHfWk/H0nJZLs30gHFJfwywqtrtmijjbaux5eU1vc0MAPS3vvWgcXzB/GCn5xuk+CwsDW1WId5GvMZ3th9WzIXdnvTp5hPVvcvEgERBmlFjYhKcprqVsiQV0+++cO99ZWwviNQMaJHTsIV9PR7mefcu04x7euHCrwLc3Ga7mqeNr2NSxx+J9F6cqYPcPd3Nd84kQAunSdehkld1vuqs4Gt1QCAXm70gYxtN5J0OIbF5S/C0n+PKg46DtIhe7hu/LBheijFKeupUKYjd9D6ae132Rfj0nFHkOSyhfIe4vnLYm2Z8zQqiB+7jqXhLZJOepQOzprJEqH20ow4EtrzjI2Q1wrM1j412M/4ShGKvRrUqMsmJS7Y6vn35XbbmS2ra+FmbMAqzvW9onnVGju68QRmErT6eonuIiT5T6uv1WFTvuL6N/ZSVP12YWO6vMwKoDc19KmqKuasxKNkTColywQPyGTsBwsQuVXdLjUWA/Wg97OAObnJUD7/G7q4BWnGFJ0rvUZMfDyXE/ERiDMuTGL3V9CFoJmKhlwMT2FD1Q/pQ/Gey77e5KiLiaXLFqp5V0zxTkBK8R1hphLD9MPK2tjCtKrBRaYxd2BXQHaRrizSyVl36+0pjoCOItXTjCxTty+0TmN2GX0nLYvhdz/Wb3EBGwMmPtnIxpxowRZ5KvvClSla4YS7f4O7qRrASvmhKaLYlPXElRnlyJ/OgdCagZeL1erlPCbcKkyam7np/aetkaVyzHsxX2PXPbZClYxeeyWVH36ctor0dKMFxsgEBre23tUyBLGbQHcAXEi8llod10SF6lvE3QZM6uFgfdqdjYAg07PNNl+TKUSTmhtPJC8o7HitMM6IX2KOaPun5yxMJKWYVIrRBH6Nu9LG/56JXtZGKiTt7vRer1lMx2k/I3swZPxq8y2/2xVDJrEF/uVULRgeXHZMyeqLranGdqg30ZTByMrGorTw2HYK9UxrjvOORuNMDLfEa+GrsUjXYaFdI1hSmQhQu8m4L7sWYp8gFh+o8hLVJDMIUH4AMaSCuJ1H15HWgJT1Qrse2UBfrGmKDgt0RzbeNbW5T3G6VsClOkdEgeTSxqi/fu+EwGYwh4Cfldo9bJ0rhS5cOXqh6bDQvxYOzCXfSPd9UbldNlYRDe4/qSvV67Ns8Bz2X0HA0Iaqlw+V9QbSIvA1Q7RaDLxeOJLlHx+OCncn93hEg3cXA0eutPvRpsPR6pfadoqsxFsNEWcsYPLa4EwUzl8Wqd0qZbbba6KVVmTEmZELouU6UL+5pqXVH7ZeYFewu9QELsdi0stb8Guj7Gcj16+KIi7LfHXAai8/nxdIaqqCoaFjeREi6pFLPyN7TJ+GTscl4VJ5KdEuw56ZyBU+DawLnnaYOh3p5hD3SYZSraOnttlDBCjZlfHydr85h1uHFK3lKrAJO6HXBQXhPDQZZmc0yJciCvXj1LSh5sIUkx87YQ4nfZ+lS40pbXjRwKx0kT5+VNS0xGsTwH7Q23Hn68X2EhH9cGL9jgPrjQgTuqhLppv5J/9ju03yoOBvIMghG/YsChtjPA4a/Bgd/GzJE/oUhw9sPIUPs//w+5v9jXH8AV/1fH4FH0e8C8CT+YwCe+nemdFDoj6R0UIBe0DcXTemvKZ1vqb3/dIbm15TgJ9EDtVmUZtP/Wtr4M4z8vZTvT5I1+L+VWOA/QiwEIBZ26PNq6uZP5ndsL6h/yen90Tzg/5MXXzJ2t9/TAE7+l9MA+kdogAI0oEdT810C+D8hJX6fEo7mP3zi9fpaB/AuGbquWpYs/cuu/y0EhNy+syL+61O+KPb3U45Zn9LT5VECQLfRPFfJz/K7v83iZke1+ADSv2A3/Ot2AI7+Bf62yR1fMfHZeP9m47JcquvhAD4/+/4q5OdhnZLsbz3c1xKJJZqKbPlbB35NvGZpkf1NTP4GTz9j9G/7pqy9ROyW/e52f4a7r99w/1Jd8xftg+LfUQqGfEcBXx7+64l/IYIfr4V+b7qi5HeX+gKeHy71oadfn/0fILEf65J+ILHfUFMazSVIW39w/6ksmH9fWfBdqhmhSJxHfp6vBj8/MDsD/QKBrh4W+oVAwQvgf4T9fAB/diPf7b39dO/nEt8fefsrFyY/Z1+f/+Qi8Hf7QBnA7479yKdvif/uKC44lL80a5xN/cUn8y9VAvL5zDjJnzfcl+qtf1xe4cR1g+jvqYfAkZ/Xun078Hdi61/n+Hzj7d9QFXjsP8/ZtAHD9X+a7sAJ6hfoO1zgPzFCbz8RS9/Ljn8eEv5AwRCQqfbXza8VPdOw9umvPD5MSzkUQx+12gAKTz4grLNleX+FIYDu39M434P8gvT0/qKC8G+bwTclAzb+on8+W98U0K/SB/7HNA/1BzUP8QcVzz+qUXAE/QVC/qo784OJ+q/WCT9WOulPlv2BeOa96troQzS/Q380fcM2ArBWDlN1XhQQfTsiKas21aL3sALgL1OWfdv4zbHOtfsr2r9n8QQcUPXZ5Lw/Qnf5HPoTuvtJbRuKfpTODxVRyfVz7b+kd1pdBPUzRfVPERQU+oOgwH5Wrfqz4rZf3Ze/QVqPLFmivmiz33zpd4YtDP1EMv2sOvZ7yRS1lyzuoyVjgIiY/yWk92PE7Aey+zsFjd+hNo0yKk9+RgxEQmXxzxH7t9niD6Mbo34hfx/Gwn6E/F+JSv6rtAL2szDWPwZhPKNS7GcQppAYJYh/JYS/q/yG4Z/A99+pc7GfxX3+hs7N2njY+b/sYD47rg++2Tu/h/zvdfP/JVT/vl68/UG9+I2G/nmK8R8DOvI/iaSp/24k/bMw1v88kkb+e5H0/51b/kNV+e/p9qtlfzmp6QcT0B/xsL7UmX/HGChDcQL8E4e3SEbkl63K9v9oq3n5J5hKKPxdZOTnvi1C/oL+pI8LRX/VvP983Pyst+c73FzWbHtZqOzQ93+p/Qdu0G9jIygigK/9wej81lrxe9BzFEF8qvl/itvfBWb+kz0FP5DEbwXkb1y4X126r8/7076Gjx+eTfyWfXHHf0Y00T5jv6Txfwz9f1T95Tf0SYb8cwztH2K50E+k6E9t3u/jb/88svn/gzsOoeRvHfI/X3uIX/f8Fa/8s/V9XPgvQWbydzHmvxlg/quI/7tyHvujLj0O/7Pl/M99epjEf0Hgv7j01O/pkUR/f8F/sUdP/gGR9e+hvd/QBfwbuoD+Jl38LPHwG6IlcPR3UaRfbiT1n6HZfwP5fYP7v5r8fhVjv8aQvpdrfzRF8cOV8H9zhgL7791391faJYk//aF2yf9cxx34paX+S8cdZ0LZM6VpPlKbb111543Bj9aD3b4GlbXofRW799RNK7Gc5waxthVaj93jRUfSCaaw0ryaMLMaOtoUu5zfyoV/pLY1DLRuC1JBPXTZkTYwc114kr0kkCgBFziMQT1p3PoJvZvTc0XJF7JOLzwhqLXPjCdM1nDfo/4lWlYSbnGmmIVCZwr9GAeO32mxoK5tpaDBPma4MwGt0QEH/kcca7Hc7tG7SO8dfb38lx4rn6MuNDynx+r+yjfUXS4KdWMsMgSxjwyqhh21lNDhqYA5GkVT1kxBaQgfQC1r7ZNigKYTUAWatKcPgc4Y1FdclJEUtmJ2vQ2i6w/7wtjeF2IsoDYV38Z0jjSSa7pgf9YJX2Ec4wkDhZ7LsXOcwRXlrHeGV+F3LU2eE18gWUGErDbdFDZ0LWtp7LkNlMBaW0713OBRdCMr9ZT4mt797cmOZL/nCcW4JsFvbNl8JsT1cMhmclje2vGgcMJP3yGlZLgc5dYzGTty7PpMJetaO88MXmLOwb9BjGF7ZRjIu5GmaJ444qkIqEbKAQwXGsVs3YQUzwnLFxXS+GKUA5sGfYMbRBNPWbXnpyoiLSxYz2lxWI0e2faO1R7rQDn06cKbPjMXNVwKUPd2nnwxj3TQKBynm5YBBq746zGqn1Uo2plsp4yqiNuiRl2LJFxB8XyhC0HqgArLXkJaNbnc0ZXKbqMfybbkse9nlKsF8SZDOfBf2WeZsSnMCOndQsuLU/JF9nhbNulGGXYG9GM9oYEpba1a7f3BsBxfCfF4y7iMD54IU8ImzGbbSFrt0SHJ/dsd3PoaIRaXGEjyvMMvL3EfjHTBdim6Yj85o9Mi/f5ZJuJSQ8dzBX2KPukUn7FwLOgdw24NHTWCRCePmyl8eixHBNLw9s0Jn1LLs+87aMyCse8anSl1mukMhfJPMK/vnqvd/WlApSG1urWqrh0vHQEL2Y28Tz0jI2umB6lg86PsWLteUBVTrgvOuOztxTjmLtzvJkquPkzacF9FY/IsdBrwdMA7NyqJovcElvN9W8V2oOyKP9QAWsquZ1MSrfI6v4SP7qGdWVSB6Or0m1saSnUqvi65opdD/OyNt1OTsXdPOjuo2YC+ODGhVAXL9k7NHqBfb2S3W4+iujN70S0SN3MmHtmazf4hF46li4ZKR+Emh6hJ82vWhD5I3TKB23Mn6DQTsK49LuanL75WVzUMEaxv+iXOKQemhKGk5HIUzRUeswaPF4kNib2wKrodRYk1aMhlB4+lFldQchaSE9BfK3mEnBw788GxJnqJvzc3Ct43J1UgtIIpTYoXEarnOxNiEShptjqO1nmUQP17Kts+D9DGJedtNCowXnDmMW0Srid3aDp4weTIEXEe83VtomjxjMJykekXpnUNxbVBjEFz3ElUIYoBZbzZ8c3dPysJkEWvHbObR83tVrCO+IHla7f7EjW226fG2YZ4Kh7bzq420jabE+GW6xu97hG7QcmoaCm21DRyYA7cbNRTDAZZHqVSfXAuBME57ESMms8vde2oSh0ETIbVIjjYE0H4qT95zmCaXQu2rspmHL3uPS8IoOBuOOcE3Tb31ucpa9FTG8oUjP2ece9VeN4qJHYWcwbdUnd7pI7wVApvoN3kfsdNgrB6pIM+y9CnrpcfRiF+pPWcWhhaf+6HscfaO/dXu1W6V72z1BqxCCY5O6R2/8kU6Ky5ZZFEhbTwWMGCBkWGccuD9Txe1hVaNg9bql+5cDN8f3ulEAqmXK54cBqv4H08Bwd0hD3vHehKwuy5CIt6jpuabkvPPzKwzolQSnS/D+WqNo9ib+jilLbnUmOZvOQNesbdQ9pgVx2eRyfS0XXrPOjIOAb5011ntXOUlHo6C1Su02iVgYGDHKPVX2QJ5uneBksJm3ErHe9u4VMHJup1k8nyW+jjjA6KmN/miEfNMEKhz/3EtIm5lhgG3ed+AD8VoKaLSek7phWXDM7RlCpDkxMfBWUdjVbLhmE/6/dgh5F8qaJMSSysmWf3g7ioXhdlo3NOojVMs3gEPmEofyP6QELaronuTAvvdlb1tX5FgQla4PrN0op8b0p0puywOlCbCZjg4AEVkck0OpgGOi8FmqOQzxQ13xuRA8Nk8ABHoekPB8Ud8TPjDPSBgklSmwtNsBoBYVi/fQNoJM9cQR8J+VRfDx2cWvh0Sp1IdgqcLLqRQ/MBteN31XeeCL3g1TOgNyJy1apq0Rmyw5NmAnWHDyA3HfvVx7SC8YlorQDtpezdaZwMqOO8gXVoUYUdluAUVmUXLI2SxPHm2h3aKBRT0DMTnGBEuTC5r9HcWQiP72WQkE+B0wtTsAUddOOdPFKF+Q1Mx4zvYMHizHXHm31dpVYuGwVcxflchbyu4u1AqLEzR2T/H0/XsSUtjqyf5u7xZon3PrE7ILGJ9/D0g+qfuYs+fbqqOhOkiM+EpFDSV87MS7/lNE+1lnmM5SoEerGXLxiLS/19VKevG5bNG9QVZ3Tpg5VQ/P7IfmNTir/Xd6gQhPmeQgy7AeEbj+xj7/ewxG1TWtllROm4gtDEN6JPxYMwIqQ0K+gxWaqSdZ0ZHlw/EwwC83c43ZP8shm8BfSVfc7rFw9g9JVe0yExR1Rp3ZjBC90XS2fs3rpV53C740/xq+3gPLPE1ecQgbbLdOzvqpKjCo5u6cCUtydYSmMjgiScvJR3Eehwd8lv7DPYXrcRhB3I7Qv7ZqQKXr2IcOqLUKcpOMCUrk0qtyAgPkOkPL+ouF6d8UTI/XcfEb/W4EYgsVMp+N9Ij+A8xgduCdqWxePIktF57mNX1J1Kt3JEYc89lt+W8EOYiw9nRKdNOc5xhjb6+3s+H6TVC4TRi7I7pxEG3vK/4kNXcGZHMq+IXzqShZb3g711JOyVCw0YpzdneMN8KuUngyZlf9Ena3vzD9KwtJOFr8FVmBAZtmYjgTpv4JTiNQHedWuEOWBTGwJTO14BwGAvjknGJdq0rWkRwdgPl9fNyw8DEuHH885qKgxrJvTbV7T/EEvc4o/WfJITqF+2VVxwWluB5ik5eQoluVxLXq2sJmvl6gw5xhiHKF52uqdzDzB7nxpQHNdwAgQjKY7ib6a7CXR1fq4os/XCIpjXi2SUPB+bKHn2YKzp6+mxH1t//MmLw1XSvDd3OyPR36W3DM8fYITI/ZIB/cxFg/djPTI9b2SSVLfMrorhTFDfx5umD1F3wnxbbGVnYnNxZgphM8VTPABWESW6lgdX1LGJY2JaV5V0C4slEJYt9iwgpKmJqlXl/EJ80I7Q/Tnijvt4F14FVyVR/tle7v/e8UAA+aOUTPE0/wUdA+aKi/nx9AFffnrZUzSBkXc7IDJ4eJSOHvgKhZ9TVviI8wQpZJ2/jMxXcHqZyXm5I7T4QbQG4468X+sW3AwsviSk2rTq89ELpMuItGZI3IYVSzzFlFwDwqL2oz8V3qhgtqxTpvBuJnDys2DUNHb7wuW3znqN8EoeuVfNCE6e088qFptcvtHHnfOdnjqz+OVaCpm+H3xLM0rIZq99vEFTWwOHJu7+u1CRWbhfbIFXt19ZgSm4R4x0IgI3JW3gKoCSaiplZegZ76zinlvCmJJJhe8AnPr0ADdr5Mbkekic1EQDd6peHtE1wFvm30o0xAPZ/65MX6n1wsFx3yn3Osh7JO7BjzfGeFdy1sT/QUiVzTpjXD6qG9eq1YzFo7ULqgwnoQJlGh9/iPk4AC1FgaE9xJhtJsylfHa2/F8T699azr7f70ZoTUYI8n7c7AX71XYVipXFwxQfgJ8+/2YJXW9vjolP5Udflm6dg3XEj6f+pG2LxNAWDt2J/y6AqhxsMlauiZlWDUHUyTnAGjFneviX0lQp/ryr42xGX/VWO26zHolDSnphBVB0d4KlDSFzLQL0sAFW3uWzMx7xUAkYKdhL9oepE8CoURlcYouD1jAiNYn6zCo7C7I38cDbf04ZM1Ai5LGYQDB28yysXoVWKyHbc38lF7/f+Lku12uHPFSqFzHsTrNx05t/GWOSDDRVWd+fH5Jx49CutFR3za+WxRRIx94wbMkBsyiNPcOdYlvcAPhMRsR/3T9hyI9fDNlfiHQa6mS2Orz+Eie4+EKeY9vAAaKkNTVQR2UXj8pihRZYN4swLACL1uuz9CZWW7987RsenTzCDaaA58TBpRPiwGS3qH4AXhpaWMGrZNSxULPmhKF/PbzHl/v9F8O+JDMC09912ZXRqSEbTKsrMF5WItDdN6WUTAgBD6mWfFnxLzLJ7wy8FqrWAZ2BU4wsxeiKZw6GmEmHEnAGWT6Rh2L6dUYn4zFr+UczaP7So1dCkJrO9SmsLHPWvseBXyGburxv8gmrTpkv7a/2kIBj9RFlnQCM3hGIrQUgaLVeVXjao+w32bGkL6AxtvLqnOJ+4xvwlPUnvsWVQ1fPCcXu1Io3w/WLbQmUgjGo72Ag9E8JfIuVRFRIR28u8UBXs91aMrGetdW1GPrLoa0G52T5y1/Vka8siFu7A5ZrpBiLCSoJ2sAFTDlNsS8HPNJgS8+L0/bfn/JM8fm7DlxS2JYRlbXNI3a3jBq4fCM8Bf3vwj5e22L98H/jN3mE14PxRlQC/qrBdVkitqvWvrJixcZKOJ/icA6MtKru9HWS13F8GCPltRLQPocDBHpV4xUevMJQ/AwyWXLyz1/nihGtK1GdPa6HmxcbWYX9I1LgX/CTL55JjSOp5Wl7wtoZVA/q0mGY92GbJWfhGkuZ5Aq8GW79z6sCRRf8jztA+jS8LL29uypE2rqjsz9IuE5NQXubzWTEZ45q7fQ58E4NYDuRgwy8wXeqalyK56uNpRWTZVADooI4VFzmQye1k15f4WCP2O0ewC7uONx/UbQ2eYi4o7CMz++VEOjsG9+4kKrpQ1bF+8VrFr2xVAZfXclAuiXYLf5KvDwjRoN/XnPuWy1i8ROYvtUBEGpAkx7qN2YCusYCh5gb9lCqUhLulTNCpf6NKyw5NPrk3meNH/74tXuqHu3n7Ay97+s23fnoDNcSfVns/bC7f4Nr4ZAOuLGjvALuUJQ3tngnOPD4zE0GXUVMiRh6pQGsy0x13szumPIg5zHa2uda26v6BpF2qgOLm/Y/sQOIbSGfwvhYmSbQgjp6tfQbtUFVCc6v5r+OC9dS54GIoKLFmmGoUXqliF2WQl3cTdhepOJP6mbQRKThBjnIh2F8RacWHD+fShyjlwXasv/uzk1KDCXlBjGUki7KV+gPzZEXQx7bScp2cTxWkHCLUQlpB3AlIC/K5ZO9eG+cCnOyPHOs8S+Tkl96oaWCfTALoHDOzddh0wLAoVdZAYeifBB3aPDpm1CDkZRtit499XOBK25cBtTPaVJ6VcjKfaulMMnaZcoYZEvQBBZPgCJQCQu2LS195KV1LladyjADYnASI2DqyKintfBNIa8Z3YAB/+x4pqbiGbJ+peV++jv5HpnWl9F5NlMR6jPWl/RNQEk2qAKrfUAe8/5Cz8WKbXJpxjzb01UyKhsXriJ7SCSD2T1nfhwFrvI9+mIjXZBTIoFPaD0LsC0gJviFcnAl3hdf7DNpmfzHdIpyaS7z+qWT92V2aUDzk+ZcG557ronzdWY9K9aJILgvoM8Ain9iFBFe5YOhC0qejmlFrVCLw+wxVxio4NUx1rrJhSKbzmSM4qQSqpY/QF2ooo5Qf+BtZpNzGzLScaIhtuVBQJQZ9hAUhGFu/SuC5yMpHINJjc1V4hTE8b1rU/Gx21fk1L11pl8GF6ratYvqIQA/45ARLJdIRwC5dO6vwfsbfeU1TBiW/Vblqh+ae2Aw8b9i409TgBbwt+7uBTW4k4DzwZTeBQGB/wrqpfEcdyrv02F7y0oYppO83sBLM78Y5bshBlKaXxrv64fa/TX1QL6QNspixE+r1eFgaYBkLhj4jxHzBuh3kNinHeYqRkEXYGIb6uo850nrhMii3x201IL4//8+/xl0FydDSEDYG5jvz8MpF4TYiCpY5veh4rxj/W6f62e8T7jfUvisrwOUnx/+O1b6FeKfygJKR1S+94SFPvE8URIKp/SyxYlrzIdJ+7RE0yaWjfDVQM1pWUxS8R9Arr9h8l7FP1HtYNNS/GJsp1WE7AlTakZFndbcbHfiYSAwr35Y9TO9k0+vEkVCNRA+tkHf9MuJGn7mj00EcabfycOFwoR7qTInL6plF0UQrLq0yRqtTEQlifsxOisEZRDa8UxceB8SnI1ntyyaP0In9VeLBE400a/8Gv96LVlulB3pgeGDxUp8gtm1Q9dfA0HH5e9i3NZQYyuchyP16yqFEUOUaf4RWBLlLoK5FQqaXsPbTSTk98xCpChxNv7ii206jpkP/XnALQyZaVuAJPGe0DLhMg5ftyiuC7YeDac6+e1Wx4+2jU6MArTgkmd9R/5PYIOFhi66C4ASq8nZf03PY54LfbtD4W8zbs4FlAV6daZSPgrc5fIKu1gyKZTvBmxo/vx0E7dSIX9XNi+8toR7f7GSdeORsxykwzywiqlFQvIQV5rvXGfCk4AKJujCgZOz4w9pLhnYF4MHm5XYqQPs+T7lDsjir/eLR7aajFnX12SIow32vwQY6sd4jV2r7AMegIvCxHgh1ok10yHjlAlOPDxsTCELUM75ld64E3ZODoPTCDZCKoX1SCCSI3A5AftmVtdjN0LFl+J7JolahIyZ7rFxkfaU9AZNOyUtms8jTOD2wUHBhD38GJxTxCeHkUHDvZ1vvKsIXQ9MgxCpoL2Tv1gfPLY130A/37DglJjLiM+5t0jq5iVKBtW8mg2u8dgjg/Th8ap0Doo+pDdW3FdcAwl0RtnWFmS72u+88BpgTRahlPuUB9QBsoZtHHj762iyw9NP0NavxpFYCBOJJbnG7E8cD6GwE7Cu7P+Cr9jQJFqRy/1VMQ93hXHZuedH/7XAP0BXF5aWOgy6BezFO5aokqP3RLN89Ffy46ymg7ZuQYTcNnr+Wf2rIuMPUhvh5dRYEHrTBQcH6FZVwt+9Xszv2N/BFVgiaPcnzmncRFH9DU0Paq2cuwRHMESkJgAgvm9k9qwL8XjsXAkmTnk9rA9rUgAUC2mFaYmkYercLAY/WRX7gkS+16L9MzjFqIPu/q0R4/E8PK0yNtqJEgifUxkHm12ypqKlYac9aTEYL5Tl44g1+eWDwGLEWxQqLuPIuVW7dTawkcTzuzFjmlsSjYJLqalAUf50hkj3Nak9HpuavRVsc9w3Sh8Ygd+2Fpaol9ty4p9zFtHlETkwGAso2Rlx/wu/L16uK1HIQvM5P60jGl58+vaI4VFe0DmQcW0aamScMnntfAZv4HQPGC07tRq4N2hUvX9UQhX3xtD2lqbjX6kwod0gJgzY7uBXErCHHV2H+rtzCoEDzU6h71d+5+r3nA3hKodj1gfnbp5ZywoFsun7cUvnh4b9LSYa5/1dt6OhBW2DD1/1rfu78Rb40VhWlbkx8DOTD3q/9b87+nghDt5Ye+E89ba0elU2q/jh4IdMDEpzbxS1Yox9PgIPHm7kkoIkuNraXWqsPMBAwWS1R+LM2DvkvhjzrYbqqAukiwu/WlGrDl/x6DMMTNk+328X0RfnWSrvoYD4NyfzS96PW1hpW2rH6rmdNLHeOMiEaP8xFueL4k6MLW7fRyu5/DPa7g8+nKkfIRl+5SU/vN4Bue5BV4Rw7tKxgKCWre7mApeaTGuFYhA7KLsx43IwuA+qfZgLM03PU8Lf/epyPhpg61GAhAKugn+cjuCnlPlbzI045SyW09x+A3po1FUVb1Y7T48G/YcR5Q4ZY4Zu867BJLCwugwjTpTdPPaeOkipJZ7p7cMIkMwu3StJsBN9i+8bsdsPYjhibKjPNoQtOtkd2x71YEjp1Rg1cwdU/PoXrsMqP36NYcVKxEkGXpuA2s8T/6jCwTXYDx6PeZXBk91Do4gzGzDwYV5py343nbO32VWMBEueMf4pYdPNID8WsQyD1b1h6kHfKP3oBlU5vYhcirCoBuSC6Qu3hRZJqU9UdVQGsV1GAamVMJ8IcCHHDag0I2WmrXgiv3F6aoI9nrYu5vgB4gcYLEx52I1DfVj1pUSNDfCHWz3DaPVdZiaL/Ctf6WPB9k8QQooRlsenGA5lvQzI+mxE11hbwzT+oCyR/7KqpkOXBsNG6Emlp8hAeRoEuu2O8iqR5fpiJpfIHVOrIxeTYM1gamnrbKrdJ/j+D/ts/PPXExgkDVnV9IFS+ArxTZi7gcyWSA4y3YkrASRMXb/zIKeEga/9Y/qsvIBuTrMaIYqbEYj5CeQ/iP/zB+bloE0kTovUvWjlt5wKRdQBGj+x4K/WBlxeJf6eFGI01m/DvuRL0CK0v9cSCvbXkOytYBWN0ZFcREpcS1p8VYLsTrF0R5utfy2Zuw+xR+EBkR4vQug/NG6lTPncMBCpdvFg04U7+EEFJC33R2tl45ewf1KxDEiS61vzg8wVcdAPkz3QJJ+NqkeX9fxdKPvoU7ChXY6Z+Nm1EMdnPq6DOxjzljGohyrBDlsRCyNy6RnesCDMwlt67e1k54+pNaRYzlzBegombSs9oA2t/JYwwA9y/zSdWj22rLAp6oZsFm91xXr06C2LYa6/GYZDWPMDqU6xKPndg3OQQWeDZXCxuK7Sdy9GDXotF9eK+umGi03tV+0zAQtnY8vsGRTMF8isoL+1rbufj0GJx2vnpaKG5VqdrW3sjrrg3Vrj8ryOTstZgQy1vvGzAs4v7deRUljyOcm+NPvXp6JSEr02hp7qs1RaEpd/3dlwmBB+74ZgPJ6ptGUzlTDesnFKNRj6tI+8Ptnc6rlNFXH68CXrZ1yvfGHlFJhUjOKHcL8LSha/ppGU8eVuk63DQx3Zxpsvd8PiKrG5tMWy0BM/59drhntcP4145FrMKKf9xy7LV9y8v+vh8AgMEbvbLKZGqxR8P4tqs5EFtJk698PKsRr5E1bsOL7Cw+DPMQiZiS7z6wWC4DESFJPiMSF7MTLZNlDLPe7DSy5AyRGlrTchzODXrab82ELLh/K3MexVODDRYFgsnYOFzCFF+d0Hbbj14+NT0J6bs7IXx9F76XzREQYRLkX61GkFJeagYjyF8fl4O/p0yTLwznhNr3x6Hs2WIi2dEhiu/jGCNOkXWM9MPYjA0dEufvUR2iTX9aqca9kcYeIofuAI1vfgsXLnQ32LUgZEx9YbzakV+1zra5+U3Lhpss97UIRhe5PlDE3AaJlDa+eZ7znTFGQ//BE4cvRefLAY9sM6FELSe/BtlkIGoqraIWi2mvHzoZySYuP0fGGWDg8c0HdDpPGkG4xD+XA5T6cUnQ4zSkTQ57v+mElGRVXAjdjIwbKZurds0pt0omyrIX6hxP7w6acK/oVj93ypL0Ilr4a0ODbPzn/FEfTlDlssqe2pW+plC6ntIBaSwK6cj8hWYeC6+j2Wix2xyzFhV86RLBwRv3Alk6oljuzxF50ogrGxN80ZLULyWfOyADbZOXw1vxkPbf+vFpHASK6bP1kKvB+lex05FTlFCGHOSBo053P5CJ/B6clQgA7p714iDfW6eI0IZg6LviRygCbmvArw6q2h1jl04HmRUIO9ht4jfPn9OxAZtdEfKdedziu1vHFcRfGaxelN/m7iezaKFxXTuaVDWY8XeFA79RHGSVLi+tE+tBaRulxfy1ERjGGLbAZQUo+5+3xNJbMjvQxtJYQNxM+K+6IVTIDP8BioFcLPqypyQ7FbBebKPDVuLzbADQSbY/x+pvRmD6A2eHWqtdUy7M6I/CVcweRb2bjJ1Ghw2v7yj99p4dcLNdwXXg91dT5w4zs+3UId0oohwcBBGCE04jZ5FJy/RsyxdJxG8/oI93OUcdf1Sz6o5o1liGwgTba1gTclTILN9tC+uQ2clGjjZz/wSsvoTMCUgzGapIeSXA/0oWS3T6wQsRtbHBm9DvYRKb7ofRqX/IB3MPG52c/DI6xuqfXeQXaZdstU2TNR9KY8EjAP9G9macxe9rEVhWez+T4qS8KZqNSmtlZgezaw+D9Jk/1a/rud3HM6Ukxgv2LKtLA5dLDWeTfBwT2H+A25OYTH5Y0U57tl8kSC5uzefWyoR30+wZ791eflftJHz1xGjwEZYT9FrrqqswyuzvwB+Rd+Vc8tL0jomVtTrOl39fqaz1+Vq/SwaK7xNE3Cn0LZ9nC1QGbXkk0LUKQZosUMSXvveH3rPrTr1TXMvlrfkwu9EDcas3cc2VobebR59FpkHYcUwvc7aA/9hFEJpxpuJdjwgQzc8F5ohfnOmdtm9VqM244l3l0c4oHvxsnz7n62FWTWbk2tjYd8Ak/vGEppYZnz3zYfQExdZqPdDKsDu4AVvtd8h2AcPjfymqQW7Hj6hEhdvXj8oxzZx4BFPGWIPZ8G6VDrW8q+IZhN1alOgY+L13pJgv42RU96XZmmnsMwA80TH2O7WoxJQXjkgg9d/tX4917BU+LjPp4tr1sgF0ibT1h64nzVvoJnYGu46Ull+p26X5jIFPjxEdW/yvHR1qEunghLlQtYqbfd1zsXcrgLQwSV37/tMZaz7JeGFI4rJUN9LA5HzvM1Bxdo+Rvg1KswshPVwV6b406/Vvu40XH0GzsyrRfnRDniUNV/s/RX/8RQ0+hLLwrKI+nIX7NXPy+NOvys4vcKDsc3+0mj5EBwx1I/iGJFkTumX2Qs64j7o3AA3DjNd747Sor1YaA8iTXgKZMPw83Y1n7XBO7RFyg7VcnpnK4eFk49f6zloar9/M+KpNMIJTHo1NtaXxNWCqySDoUa14z1pWkKChWZ41d2UVSdjMauRpIGEm+Cs3eylv1+Jt/Tipio25Bw80cUUg06YO2+bJte/rXNtm8KL4r8SV6XBO1CpaMgVFiBan6l/3hWnyXPbAdn9/mdIt1ckvgSZKmO4Ht0QPhyZlWtt13JjmDHm2WhdFJ9fsq1LskqEJlNE5AqdTDdUcKgIttTVCX5tVY7zVJC4WsyNyN8OcJgrMkAJehsowZuAt2f3R1/0/V+6heZC+NjgBrGRMunXYD1BvSHDmBDjOMXmXR39LNnqAtN2khN0loYD1JI78BHoq3iH0ctvohy/q9IaeE/5mPY15kD249iC/ghOfg6ZrOz5jn6iP48ozsWXZjEnxBup8eR8f7GW1cwo9NlH2vHFnK+zxYvGZy7dbry8ttSNCqxzT2yeyfBJg+8jTkQycPEncvBi4l+f9pLOUz7JQcclCn8ArOmuX1hTn6Cunp4akWw7Cgg6pv6F2VAO885fI0L7T6IfHydoNIHaprvv/yGCY7JNFxjM+IDbMKA3HhGKeyVh1lEIQ671nExu7O/1l3eg91/0PS8QjmTpCZ//eb3PoALef6VDweZkoCHEipXYuc5oGA14uw1voMeZ86PZDe5YK3dHF51NTB3Ia9JU+cUipaxCWIG7GZeGDT+VneBP/A1cWC22SmTvtrtKgJ/CZ7QaSgF9j3Hzl35VMtuxIvzrfQpLlONq/NT4TK/T8hJ0m0NXMRfdThLK9gYGR44rIEL3PPjFuTni2WNV1KgNaI4DXcJXFTCf1Relb/chH/wobieB7Lrn/6heN9LeDM39cUin9+8JsNiEc9zYYhlLaVLABuJb2vRJoD89pyCZ9QNhvC1gsqG8pnxiuFWQC/mGVpjcjXCn+WRGn4tAxpmiPS+fOzybg5Nav72e4ClA40EVcGtpYq/iAOqAIqRFPOcSRP874qo3q86BP406Q8rQ3MZ+5gCnDQMilBEz6wiQ4mX/dy7ikOBWCWXg0jp6Dt6OTFZpoTdkmpWYopXcEbRZiic6gsyQB4u9auPjJYsZmrp6OULZDgZitkTa3rYI0tGvbB1Xx/feDQofntVWb2XtMMQSMe7oOiQ7WF9QSHD/UbFgnFN+WwfovNn3nM61Ww/oLjL5rv4erJvpNwR7ZposS64DT/Z1I+CkXnDeHQOds013rC5FJq/xtJIGVe8O7jn9fxbpbmqU//ZTU2KUm1Nk1vKSQ2T3I2R+hoW7XGjOhoqw3q/EtkNDEpMbr6ZJt+yCHkdw1SXE29fWrK3Tl/E0Hdehn7qwx+enraqhD/Ly6/wW0s30nI00q3hkfRhMKfSRUYPyUrlQ1NoLUGoHeSQZNqEnDeDaRaFeQ+7qzgE59AINbRCIT6l+k/+Dl/e1ud7Ao4+xPzD+QiU/OhBzhVv5PfNWSiQjTe4OogG/DdODt5M6oC/Gmld5yK/3bZYSw57NC//VIOQRkRd5Kg34boJ1uCb8lo7Ev7gVHiO+FV/HZovW5sraKu+jx99r4pvoPAmDvf2BdUhd9iYy+QkLzin9lA8/+e36vjBb1kmL9nZ+MFFEbsERiw4X7bugLg+PP6i2gsuoQxJlblBAoJHA0+oE7su3CMvDnEDr2iQgofjgapv4+C0u/NlUu03/YJnKmm9a7FyHmDKVmnwmY6dTqddH6DQ3hV+VrZFsUtPU4wnIW3zO/K1zD7deQ5DMC39AGmf2GOC+yfbd1FjwUpfbHSO3dNzoC6yWWsxlbjjVXjxEQNoADS/FFckxymCO12DaXOsIxrFA5/Fuul+N+d9UOrVWTcoy0pBOIg0Ehnw2X57w8lhcKd56zdBaxiLpBqz9dcd+snH+e7FJ3a/tTkfDqZnTndhkPaY2PJL1lVCNoif6bVVXyxFFjTMe24e4jwpzChIz8/q8DKJlN9G1a8ux4YglHbJD8Rw+/GCMbgaCk5g+FitIWWpD9Aw0SU0X9Rh69RIubr++2czyyefq5kk7tTJszcGzq5IMptGlvDbRPqXz+TWW9DR+Ls4gPwgFxcN3R3JrTPdGgyT9wR6g7GRGGDJ/BRJPO5EulLa5delr4rfTKaO8KBfuWgQVwl/8fZALuvW9acII1SOUMkU4PjHfcFl9SxN1CJYQNFetpC9/IjUhCOVz0AFZv93ZuXnrO2oL4cG/AbFEi8e1Gl18tmnQzEzt6y1VIe/Uzezg57ewFT29e1WZ1YrRqKMdQYMYQ03As65XGBfCI8Z/pMgAw8gUK4zSWuc97tLu4ScNKkgDAc4yoJTMnhAME4le4ZMrfT+TDpY06zlcMgGpFE+mL364G9PcM5GGm+G8VnWS+Pl7/aLUzv7ifhbFCXEH6uu/+UaJm3nGT8jtwFPw+1ZC7ZfkfTiryxqwajQkrNzlAneSh78dwEKaWVrFkcmLHk/sMuDDaef94EQRGiWNaUooV8ECZPye2XQFnU4ZAn8ZFFH6HwfVYeh6NhGbrhBNUUKj450o7OOnYP2aac0vqecNDtQURTMVRERRWOhRSXRHAoPVu1BMe/vTiDKEWasG+E3fFI8YWZLTq3O8XRsEB3WCtnmFGt4gZmGS/+7UgeKfcWJAnXdM8ML9NHSRPaxSvZlw8wbGNewMFK3XCES58qKm+JNQz6EZo32AumhyPLhJk3LPMyNXdzO/Ku/ihQpkV8W44fzt/jjpuHyd4P6hJ9Jw0xx7tUq7xuNGQTG7peyWh5ng1POn0nR2VCXHVzsxzEhQfjhc8Jol/NVb8LmvAA+m52m6HgKWHFKaP6VmxEky7f08cBI1PMKRdh4YsrrtJjUF1QLXosvhPl+4fSVmD0DM2E8x/yOMP5SyJn+AGqgAmaQtGQ2lOza8OfD0u1zGryRakGsXRb/AKWJyHaVoAdY9T43FqFWpQz1jOYXYJ2kdHWHXmZ+Z5yyw3wSyQtstmbqFJOSVbhKLzmoe0hZzCU4vvc7DZQH4KTFiAz5G0w+Q9aQQIiyYJu+jTulFCpi6kttM1hA8r6kIXE5WHJfPt311IWXHZ9HDdssPPSQ5Ecl3qSbKM48PWxleIOuANoyccND5XVF7mWs+ua3eQpQdyBYOWblxaJ0fXOEk2XM+UhNpwl/uo28QVNGkS9kJOdWQXfKQ6civAeaIACXoIhS8arJ0jVuN4cg/Ruz1KujCPsBFpOto9Z+ueoKm/ZzgL9e5p/iybJYB25G6pS2MqxMpzhZlI3C9R/JINFfHtAkBFbD2uQdXMTH+FexDRJ3KMCrqaz1IA0hEDT+VRj1NGag4ju0LpwErNRidwBCGn4DFRT4h+MgbqR06RiIWndD8YTgV1V1vjWuSqZj5MehA36i5NyEIJ777lyWbMrF3XE9OlPr8yr4NUzK+ZdHB8nVnlYR4Rtb6h2ag3T2WFxABDVsjO7VqcYLauTv4nmJXzDv7yBByxPSkV7H6kwEz7q/5WK2Cjy0BbaP0d+nMCgb7BClN/ACdUbBn9KUA01mDLy4lBq8yHZkZslm2XBAtzmQM/PwH/AZzGdzP35AoRx8LmVwzmqvvMA0zJBJF/XopWwvoeyPU8dRozkEps3y+6buh+JqsJgAypnlIoM3fMfJir+TbuCtQP4M/n7jp8bYkVwEV3L8mpnAET+snR0iFIzj0EzLhugMEz4fWCGU0bpQHWK8HwBIlcOQZCiPY82FWB8a2hp8qw9tDixcDAcgEakZzSWMG7tF5EF8CJVuAFV14vO8AROYKWtryBFRuGQgdOtDsWOSMTLO0E+40EtcDkIuHVjo+Y6BW4ojQJkFLuPr1b8CaSopWeamwPPeTxAC+/Rvp3afQCVbCpzyFaUntuxzeJ12FZ/dAkWnLFGAY/wvWlr068lDUD78d5yI3fWy71QwOeSDPnDNBkBU+zlz8hZJXqKeADPF0WNL36hANL2XRQ18JZiPQtr+fOFRs7qhuS4UpyEElCAp33V31qkrZTL/i9kMVlSZbPAVUUVPUX8cYVI/gXhpXSycRlwwAYoNH8Cs5fu+iA6dcXLTq64oTrq8EDy8bnyZKPWUdf4HIiv8Rh+fFg4m2fdyDfSq92IvvoFrFsGeAFEi/WjAimgGShD8o4fc93VIT/lvOZClBQIyt/Dzt/XyoD0yfyfo7wq0yitwxPwp45EduYXBbwi4hn+22lQwyWArYJ79yjUBnSCIYZLl1VVsG0Yr21nfP3HDmuRrOEDJgZW4ZCxs/438i7dt+e9ip/eJVWeVTP+wSmdHKd0AqzX82ivBEiFboPsSGTb7SRH7d3EgeOJ00Oic7V2xEFZH+kNKsX9uZgdL/O8k7bTjSFpX+wHGr2Bzw2BPYIfQC+9RsNX59rWuVkeYl38Qb08Y7nwWfsNTXxlfzAttxMIdqFervqtp2irBym0KaPrvhNDnR+h/Gw2AcoizVNXTPYPE4TUezVb7YEOwhlRWiCvM9xsV70syHJ6Td3zHV/sB/WXEugAHo9gK/3c5C8vlMWmOHPieqHHipyOc6BRNMstRfmgfgw3fYbPRaGqhB6xdAHWeA0aEx6nkdB2cqxNnci3+73/Xn3+iWX/OV90NAQ6mdCMReswobf8U8LgIsAPJhPNbvjdJo85kfOG8zFL7e2MIgJrMeKUHw9LnerQEKGuyfCai/G+L37mQionVXf6uXtPaV68SJx5Em7wD2+dc9BDJVZWyQqR7WdnXRRpgPACsEZ3uUDoqYUavvrxDdTa19L96qEl5WGmKlWC7dG+tOWrjU2yPDZ766pUwIgFZc/AY/61of1M3nrha9tWiKmXKDD7wPfcCs9h1Ys0nbIN55ra4r5j1OhC5TRwqTfimWzBZAzUjfsjXxvxowmVUVbxb7Wrq7Wp85TiMe8U2E2RJ3atIND/ofjL1yoo8/FCsMhXGz4dqKPtJ8Opmac19h9TaXW+L2IZKuWcE1TqRVQT1jOfkIoVXGpYkRpP8X1gWXM562r/ZaUUUD64dLXEc7IxorQiTlj7kUsu5COwonWwBNY2zqlWWrWVqcjxuQakC4T0I22SjPSIgGl68UOra9UGNn9UfGJEwdPhw1Tbhn4kirADDm7KoIgD9280JH1FCN4Ld1v3nrxYhNkhFf09t6uHcIUjIzAkw+YREV4gg06z34/mTp03ibgJeuv34g5RI+weqiUaxD/MlujVHs1Yfn8MavTqL1nOVfpkx+OZXxk1W+9xKeYOwdtW2iEaW5+WiDOtDdi1G9TN59nd0KhU0N0Ad6Ro0LsEn5xip74OeapTQGP9aRh68bNjPMXRbM/r53Mj3MQISlfMtEbQJD/5Wv6PL9SFOlotUpZhYpjwYQBPlLQu6mhnfKxkX6fLDq8br4diadyj6C9sfdLTk2foAVUpDkULYvzipOVtxE4RLvZilC/YrK+DX7xeTye6iy2hj8IbE6IdPxhv+hhw1JsMvXqq9OJuwxzRwjRYr/3JQICq+6OePTkAWa9CwkNhJJmAQcVRLhk4ypljcNPXSmT37XK9CBI6QEkn5X6KLjPf9Gj7XDfQAUOcbhWi8keHTGyDdu5lVHS6JTquwI+fyuDTXOF5B2x4ALkVQkx2xFuWw/mfrGfV2U3IDLTHY3SzBS9lJcr26/UK9nOqChbji2JJ0NSDvzYyOwmV3JW80n8e0LEDB2mCaRdhrjhZKFRzYjA3lZw6FfxUpcEB7FB2QhZuAVe8D/HleSn7HV4Le8FwoKaNSsLE5zUSG2PZFE/p+2PYJFpHDIH2ShQA91thtFOQNeZr01Y05fLS55ghHLrkHb/TFfj5af0PrdMY7UNOlDW/H+VqWYuT9d+AgzHU1jV8e2R5Le348HT7ZFzLOudibHGztbVwAoDwsKyefr7z+NFvWIjpAKaEQjUVXvp/hNfivsb1Kkj8oC5QGk9c5IgbIV9SKZF4ScowEZfCr0/j0yCNqa5/dpys0A4Bpicw9tehLNHz4h2Dk14WRDJnlheTm6nh6brMcS341xeYbhjGlUr1+zqdFwTWG1IcG5VRLwmkYnDhkuw5UFN37HbU8yOx2rRHJpAVb/PWfV+nmmMsgjm+FyQxqr+KcXWOzt2hR3Ocseldp6xvIBmzUB/IhEPnaVuaOQc2pL+eiJJ7+xlGhZ5TNZAQchI7u1HiUz3q9cCGvLTbKo18cpuV2/VewUIvTrcrqwpUAzYeUAHWJ9VPobpls7oBNnxGHnU/sac7voqmXeexAfuTan6stxHBpu6yUsIE2GYjdmRLGIb3+xZIUlOWfp+5pRpGbB9FJ0JddlPeGpngrEzj7wj4FNw/+UBP/0PS1TVb72pKGWqcLOR9ZjUezJTz/x2lz0VWIWwuQ/9Vt2DYpC/SoZTFNr3b2MzgsFNpg8a+vuJTxv5EAVYS+3j7n9jG48FfcimYHKgarYadepa1LivxIJuccbuifuHntKHQyYi19k9cqrLY66JocyDgVn5QAVMPjp+1DiTbtO/sUL0CtcBb/BWujYxyu5Qwpm6AruLOxEwnnTBxy3iVfzgdE15HiwKF8/g52ChljXggUN9lqIElan+fvZgNf0XjBfpV3lO0yfsTqGDlaKucFODsnVi8+FzC7xKw3uFUYfKe/vRAHWKragW6oVCmpWNp+tppp4hFMb0RUlLV5tGmHgXBkRhz7K3VSgQgCGWyojfTk10fgmCa7YgWH6YOKAyHT5fJuJbapJTaTSGrLFfdXvD4fzwgjN+oUPx/MUXozFviCtsgL9VJPUZm99fCAixzaIy7RkqE+lRAAlCJ++ivRUrEGO1tj72ZhoKmdhgovMncexPbR49zjNR67UtjKSSHC4pjohL/1SBMZeCbeUQtV1/vNRxTWgCrEtHvoPkSa8ZNyuD2GYd0tROQa4t9kGd7ha+nVGxNqY+ci6RCisK3tjMvy/HEf30DB18gqQ6litRsR4hRP8Ow0ENllJR3qIs0jiJ3VwAKiXxpE93bOPVKpkz4N69NMhgMtIySaUl8oVLxpU1da+9uFWK6o9rvSgDKrchjZmpCsm9hSNmcRbA6ml7NAzsg6Pr3asPKX+dVLT26RynA5bvXL+B3WgOIdyyv8HHCrW6m+NCiYuPXEvsD4XPHHpanUPtNu68UPVITWBNawlxqb1WLpWHas08yeUzRqC8RMpX1WiBXJf/0iukk7cmj1oORmy53ireGsf4kCDCslDzaxy0TMWGYBaQpwh/QFeL11kLL8OxF2wVSNN+p53/s9L19o69kned6vujR9OpiyY/ipeNLfJCUa/A9nIAwiD8PWLO8RvC47ubAhJz06rg3kLZ65df6R2mnoIIrfShjdOjI1v4dxinGse9/dZvlgBQKCYcDIGW6sd6DGtJfWgxN7cVEhQEYOiIjY12VhYAMs0WJw261Y6NlwBRhg5II0hHJPrGWPUx4TtL8z0sydzdJ0VMnWfoIxSy8x+6gFan0Lcp9YjVwGfTt/V1qqjixc+aHEpdGj2Fb9baFyDJiS+x+gi/hr54Nfh5EGQc1vkLK++W5Ef0uJXzXMvbOCEJmd7cZAUqSm/3emk/JPKxnDvWkVsipEOcLs9u+DP0zcVSNSTb0GGHKDydVeVJS7LIb1s+GOt7J2vYPhcz32zrPkhrpvYiR6GlMHupuWKJfIKNWGxUwhnPxVKNFN/bywaH6Gg2tigv+8rhcdu7ywh1NFMTPNfEB0Sr4t/LYyHWaiA0A5e7uhc382aMhrWBZ7Ew8tVaoy/Pipn9JVMws+HOSbMDBhXvMeLUzWJnbsfDoEcyWv2t2Z4H+xcZx4FJUNBQSRDZeKHG2ifvkOFKB/rRJwS1dJhVzSHqoltJrCptUML2sDA/MeEEm4VxH1ZIBLAFMPlVfu+lPY4FdqJqoSM4lasikwNR6SXP1K+K9OoxheaZH7dWfqzkbg/yAtvNGZu3m5fXmVlPCnyYD+BvD8avDIJGIKOOwQ/ULCqK9weiKb7nPNtYcks/7VSset1AOuxW8c+rsYvSH6jug9EWXnYOQQzLasBnWQlpAuk0lk6HKZrtkx3NLCcdDPXkyVYnyIfVrJkGjZH0tGeedpxXG22PWj6PLp1rZi3Dwg3q/Wz7PSaBjszgRO4C5/QU2m9RjiMByUTv539vSfEc2PQ2mKMxOn3FXuAQoICMJBhQFURGh9bjUmpGTdInPTQ7G/M+deGbZoiv8YXf3AYC6WZjgPvlTXObeZMkYE/0DsxHuVdbzSg43NyPTgLB7KOljBSn1tyzaJ3k9QUV2JdFCeLrHCqeRr7Gtjrl7BimHGzIxW9Zrfuwn0UWuSbO5fv/wXdH7+Ix1Fkv2dZ+gjio9ZfIXWINBRBY/lsMIUbvoo+/k3T5tZXqlFXDJrZdejXFIDf2td8auy02tiB8D75izVL7CEqwg//h2TZ02apNbz8BxMuCjKvqbIfW0dKDb0DLMK/GRFS7VUPgbis7cN83n4ycDXAqQrxPIgSKhTUNSCjZBXQPgCEDUM+LHQlRf1/HV1ibsvKEU1h+VuK9t30y8/Memi9VG6Jp6mmFELzM/dDCQRV7PwjX+ztVWZB8Jt4a5Oz7/234lTyOS/qJk1hYv/3BasVexjf+vIh9A75/n8LAcMxaFfGultphF8DDO1G6C86IKsYYyXYn3+qjqxeK0RVVaBH/T1JBSE9YxtGfBrDdDey6j9z7YZRQwcTu357ZTAhP+yMlLbmIHDrOH/tL6aY9u3Y5G3nm8FVt3m0JC+5RPel+cPYGfMq9JxN8ucrlH8YP47vbv7nyPjhghrGOiv7Urq3Y2kCO3+pI2GLiH1iiD3qvq/uAGhu/991J6MtkjaR7ITlGe1IbwsU8EFx40WWHaR+i9bObjV1nWdbPmP8bFRZdyD9c5+0kQwfMKBvpY9PV99O2wgK6hVf211/1dWkU8ZsYpNith1430Yg2gZhQ7i97c4FbyR9ufYunvNbKK9pzr6O6+OLvcHoqELI8j8q1f+jVTCihY06/xuHqUljUQwY/5OesOueHlABZtY/pQ4kezAr/u8yO/+DL6MzOrrKh1rGBpoh6fQ4btcvG2bow6bbIKmsTY4wLQ/vwAUydeBlWHuqPfFZOWvht+uwE0QIIfqq37f+tltAfv9crzjRypHgYlLuZx5YdD+TR/P707nCdeEP1vTyKdfWdpK+Yb1h8SKMtMa4vk3+i/zTCkxsBh+Jtpcmn3Oy4sbALmxIK+LF7873SkaTKwl6gevUPt6XiDq4atJ7UezcIosJ2YWphRvsxJefodqumTy+a5q87ffe1nD6GE/r2Osx944XoHgcB1kGdDEMd42wC2BTSoBb0qLUX2u0iQ+POJ3nQfA+aIN6VunYmeXqmnf7MbEhbE54BGD/13uTk6dk/j8UZHI3xFp7BQwFnfWMtEe+vDs3/49qEfuJ5dbD+m74KcQHeCDoeKsh53fdExrOmrpycmXlS+SNedMCYv2OoFnJ3D5FfFBiEL+HRCRt/NgVFhcMPr46LGalFwnVLiD/CNqgM8/bKQjNe5GMbJJ7QPVSnTiUCFm5CTXrWhSZDHqssp16+F7wzcK4O01r3hFkm3uXs5oVWaT7mgxyD2xQRPYy53j4hIBB/nXJUEjY3nYD57CloKnYwNwIEjLvZ+ZWyy42JxxjseyFgpTkbJ0AbONnUvg+iTZmnaIuapEGbAGq6lVkopKXvwt2yFTfCpndH5eyI5aYVdG4Lbh7jH9NPfOjC/RL2KjKk/IF2KVNfy4FpDzxwe0yxEX1gl+fH+VAKqDZsQf4u5S2OGnTJEi4twgVqlkArDVROo0yMc+IFo8bOr0F1wrUjgHI3VbUfCrM1EUYpG+ahm6V6mQMi49+j2JddpQC/oqpXItL8PBvn3O8uCBr1+RH32UdNBeAT7rEfrsQqsDjcm6dqUKxTX5YM7DKC+sOzCsMjbv8MT1iOPPBkrxRkm+2ii5HUf6zEEaJ4b9DN6zP0WVyPop3LaPCT1wn8ye+YEhp81vGUGlSFdkMJg9EiO6ohPgg3kPVbot+xCIxZECNomqT3X97gtK5bfBCe3mQQzH7+Spi4lqBVYh7HggMWJl0HMULMCxFivon/kJ5ajDcQZHwMNXZYZpdLTD7ydqVEqH9NdFgoYmu+AdJYEsvfurgv4k4vJ9M824D97Ub8B0Yu5oPYu2mu1uJWrws3t9rWFZVWBSC6XIO1fB6a3cpqIBGxiL7DRZWJT57QO2bVoHzZrhIwXKgX1hwia46otmGnO/8vu+msAUqCUjIXK2O8kz2xZHERVPtDrYEnfsy9hqC3OZCNz8gBmByrK5LJ28+3/oIxkrlDCnGrzKWLYvrYgDDseDam/QmmAWGbKsmSAJHkdlazyiU3OHmPNF3GkFEEU+7e6h77mz7Fl38zvioFtRwrQAerH5vGjT80fUBM4nK4O0+0nUoCZA163uwr+tZL4iqkHi77o3A6K/y7UXWFu8X8jlLB8faHKTlKwAbDWB46ZOuQKGK9Xg1S5/ZmYPGIp5W9EZwXxUInkQGwHvV0avBKIlCZRB+Z6uwzp0+WFK7zii/PvLKQEfYodRO1UpPEaCIKb6p7kt/2vL6C0N4dhc3rVc7sajz4Kh+6jlMKUZrw0hlADOX6GDNEeJzmB+EVe8IjizsE7p4/RHoEDeBMjd4PJYkBFCHr/ypwbfs4H9T/QtWkThvfU3p7LhtmqP6qbp9CDf/Ef6zbZZBsqEq3n+IaQSv0Ir+g9N17ElKbIsv+btSTRLSLTWaodOtJZff4nqeZvpM3WqMiEi3NzMVbQZ0U9DxcRilOnAxRNL2pD9VviFxXEmZ367M5pQzsbpebAjxxaZSZxxKN2eZJOQBRUy5OS0qehVK7jRL0QomtXyLcMrjGfR+N+cmy+eXUOiiWIlg6tMeUgvmgWuYraFQhNQOaygHbESoGTHKHZnGIP+Qe4wR3s4qrRqLgAghVSPLNQBVD0LBaI7IuoEfqHJxwXYKzfg9w7jmx9MXaT88attnJLzky2tMCbtsONhuN8/uYYkHlCjr2JOOo6N4xChM52gp+wpXdbApqMxo9HtpArBX2NaFiVX/8QgsmMGYfIhi+M/SGmUJ0C4uMxgneBObCVup1uy7NHlub/Hko2u3skdu/TtyZLbBZc7Oz28MaG59nerm92ji51RRWsBpGdj/dyZmsUV/I7bOnpAaADV0d3pAoi355/SzWAREAqACMlg2NZeJa1BCE0S7IXQLYSot/IbTPO8cJQjmpUqoCT+0d58v0Jzd0Ln+yrR2U5p/fxC3YGgBb4g6Df9XfiQTOxrraQA0kIcWmpgXUGesWSGP+r6co/MK00Szj+fbghsEKxCn3g20TuHywdWj9ctMM9KkRLPku+BJ0YCxLHTINzh7Sp8GveX70QRpqqHf23Owa+NZOtfTWpxHfHxUioBIniQlKFujemPi0TzqsPoydBsxlSoOxTz+0slsJ6dM14Tkb9sz3pFXookQj3/nl+KtULuhl6D80PEWNh1Q7ClLAGQ8vSvyT6nAn15yr5o056c2m54oXH/5svVg/4LBoCQx8vbbkyBkf33N8CXLY1PpAKrO/+8RNEnSSMh0PeLU5RbXudLOk+EVmDx6Y3gwS5ztXCC1GOnnMRk2Ffw8bqqZPAGKcCf4wHU+177U7vMDqu9cM2biLeTCEFMQGQzw4ww1FyoFb6/6imbaPp8RGrBQcVJBoXEjC4vnIGAEQb4T9xNvpgb3JBt9YUB4g+6CnyzpJmvY9ZXrAW80x0UvMhUbJC9/AKVXawwvsbwhGSRDKh/+dsN/CP120c0qiVLQpG6hYtVFHKgIm7+/pVRAIqawdwtjsx8V2ZJLQrlC1e/LeOcHWVJ2CQoYyjC221f/X/7tQ1eKVw/muM94zLPPcUryam5XCy93gvILKc2aKGEa4Tw1XtaktYQqDWZdaWSR3amsx9tCzS0Hu338j9abOoQsaVCUEspCXlfl9Md8fSpJleJ99gWR0LhfwMGdTVvzovOq7guK1A4Ns7FGu4ytoOyFiYp15ufdhQ8biqbwmiSAfLCPcgDKsVLMQ0mdJ3PiAyQ29fYR9gayOeLciwmtzZEceyeg6Bd2KhXuw/wGyxhanHza/Ik8Ek8lTkRnZEOrmHZ/gVGtUUl2JvxkjMIt+mp+BPlt/JX3dxTrYy5WUy+Ijii+JHYcU8enAfarN1f7NlBmbZ0MW7MxddGxXLhMFoZH0JH7mdU+O56yVWTZMvja+h7qD94+4p9ab6onehf2M8/OYV+tv5v4tRNlFIKUpMx1uoxMYc5m+jFt5E90wQyd6p79t1Q+Gc7GCnBJ0g6yK0z84H+M2KRS4V3P3FlXE6gVnLXnETR4amnDFZWXyxknF/E3CvM8wMSFFaAeGOFay4mL4dgn500g8J4jnSyjWf6E7n61xdceDgQ0VFWOogu03LFJ0U5wdGWAOfwc2OL4uA7XYJicb3Ph2ammX3hVFQC2447bGJaz4n1vcssKYVEpo36jEGNXyntnDrh4/Q8zoc22AFsgQGcnIgDaQ8Bdi+mn0VdDvdyNlQIROnbvtIHnPU16a9jz/76HcfrLqLsZfxG7L5ot1wUyRZEFYazsqOrFOwBhqTBIg4rlMf8J6h3IuzGHX40rDiiIy0zo6W51ac/PatZ64EAecyHmrHo9kcHDTHi1FL56DmEzpoNDHQivOBVIPF/YIlg/ZIjwgAyVFEfzimZN/xvWlr4t8+KgLNaj4bFvSqa5l11NVgcMh2sq0tfsxXj7MtxpRtEtxYnXV/dkchY497jT60ouxZG5IIDd3mu+K5rbaKkccoeYQWcwLZTJ+F+NFexp0JWuII/Q2BDsPHS10IryALFIXK5yPnKwnCpkMXL6Hjv7G96ppZza3Ln65Q33L3qIa9ohr8CDN5dWcbS50w05OZuRo2BPKyATpACjc8FuAAOaYrGmS9Jiddz3fUT7zaQBg+BE1RgNCucTns+K0zEzCRA3wl0z3sZcmibS9MFJ0DDc5AAWsEBykHKpTHUWoBSeCGom92AXiFdtE0/+Czs38HqciqAtY0pfn6CjA34ModSiBLPzK53Ee6npvkrwT7L9APlmodTb9i0PMUq6vx1kmTpxtFOftUsa3ZiP6fUwGa5al5pK3vsnMr48fzyg7oK2IWdifBtJZvOXihbVNe9HtJ12zpvZSO5Pu48d0FvEYgwJeLYKFWu0KPmtj3FYZSZNutBddiYxKYN7gbgo26Bv5yi+gE7TEdcX6uiUi9rcyCnXB43aoMx2RkMztXw5T64/iXkbQK+SDwizlbmEepvOEz4K0H/vFBr9j8sJ+yQKzgFC07c/1hJo/BZzfluYyPfEVeL02h2EUMqSabvP4DzBabTWKUR2JdVTA9WxRQ3Ldj1DJ6JHV1X/ZQIZ/F7wSL5pI0aJZ+upVSERcxLhGJroyjB+pAINRDr37ASEP3Mg8okXiGQ9kbY3V2ruOj+zEldzT3yVb2anPPjx832pB27riuQsTy9lDAgXJ6v0lYOwV5Bv4euCHIdlkkljGqmA4oKGqxcEWi6H8bPKBo2RwKo4p+JLyC2dI/mqqxLuP5m1b4tGP52HOaG0ei5u/v10TX72VQ81srazbVP+RVeYzijpbIiExLvjP2ag4J5hppk+ZVFDZ7fkErnumaG5d/xOBb4qvxXwyM7muvmgPU+ChyiEg4BrG9s8BtQgNI/wuP6nK4FkO9ntmr+q1pBvktxN9G6CfdSRpQM6ABzMgCr0S6/AHMjmxuXzGpseFRswBQuPsvhY2PkijYr0wuXe/fvlL3Bl5jnJquIWTzUWZdAvcStSUcxv2eNz0tXsX/1BUlmB8LJkEyfBj16Vb+H+uDf090QxSfHuJqIp3sJ/cTDlFBd4iY6wgJVKh0PJQEsrf2JojQ/86m42WTBoMloKEeW0bLjxeAzfAjvgrHz1e0N1falT24Q5hAKx+gtlv5W195zcCRsamKXYtPWlAmEUy6NEa/DS49ejw94zxYbr1wkPFNXTanLT9L0HhWm1yHZ8Z/N2a9Ge19PelYk6j4LsGOu6GCN/Etf+r3WtHOh9k+BMdK+BZsD9ZhvWKjyV5bt9MB/lBHiJRHdTV/4S+5f4EI0F3nFQelmbsX3NVE2cUiwdQ00BqihAJ7+8yWRlkQiDcepfGITak1RgjSMaa54reh4Q34BcvyLNq1O7BmbFnKQpry4RJQHB7xggmShB02Rz6ClQQap8DtmsS4+FgqUwMH6Elf+vQWULQx5eXn/PrwtR11Q1pMhLzM9AHXsIJAexEeydsav4Qsn/Or8pCAIcWD2oHZwTEZoK1WQmOMYZ30+dzJ+5D1uxjxqiQD4zq8Mbq/ibzlHETzn1fAv269SAT4F++eGyV/2XXaqicQq42EC4r79sm3+z6Xvhnr84xz/5u4+fLQsG5TGQveT1JV4gHqDusUS+7bG6N/RJc/xKcezAcc666/I/ovHExRHPkb5A+1KM5gV+ZK7srkU7XcQVPyB1X6SyFpUuz2SEFCLs423ERK3XOU8pGUzVYr8miZ3ZxzodX9mIi9yf6VfhPuJM5NXULxE6qZe6407W4cEG2W7pL8vxgT/7hQObiVW+G/9YlfUaiLQbiXkLY6IU/rV8WV/N4HgV9I4v9S6NIe0YzpnXF+gAkZw7Aza6TwDW6W3wXJ355RrlZdcIFr+WUIPpU6VwkE+yL49c+hKtzZfXnRJh3AN3zUtSZEN12BSKwsIRHGVnmDas700DisCsQH+ZHA6SNgH0zyoA0+3lVQWmbrB5cw9w2Dnft6/+Ay7/jTQ1sonojGOgRZ8cnNRhvpKU6caiezISxDXSIiSuGPomDGLRRZqM21PILec7D8dU4XM+479R+qE2dYU6DDjNfDKmJ9dqEz9Ga39tmeH+5xADDVUm8JMvMoXxd3bHvkjsMysmK3yF+gbaRYUV6UOvOXqAmKqa6kd/JI3xJGTeY/sp4xuYrL1BnSokcC01RJRJkf5lPhIITPHbnpchA48GIehfoApBb2+XCEd9h3ZX+X+meSKRax8j0EnKAO5xu4q09Y16S9gNfwEQpEXyLs48fNvJNKg14yIk9M8E+WjSun5qqTGD329tv84P1+cGgqqDDxjT8hl8tIpI70VJKaXv7npKXRUuVN8Jek4lu/q7e0uJPe9OKTRLXMWuuFrdNdyp1hnJ5wxIB27w1dH7NmRfvtTw/e8Xagbdh+wZ0k8THvyih0SZSPok2SFUTHWXTM9ujpd8LHLU8XMwfIHvraxKdYeNdeHp8NEhGLQy4EYtHArXXV/O6BFhBm4s/ADNtKozM/iOmwsj2oZPhpt4GzzYhACk4DtBWlnKGf8QRs/hb3HMBubwkUM3FXN0zPc92Wqz982OxL5CPXCPJcR8jT/tS/tD5JEt4NDDYFYkmTX9Vag94sS7KOWPpr+5eZ0uxzzh1rABjyflyJdQuc7Ko+EI/GcErHCXCVU3lovl7A6VzRqTeCf+CFv9xiNKty0wOepgMWC2wUUx/hFfeRmsOx2OOk6vT1l1vRyaPzZiQhm40FOzg3Z8J/mXgTRBn/dujfIaXS8NYBbJJngmVK/Q+x5HswvbFmhiuQO4o1KD8lf8F6M1/x/qTBI0lnAwHsipZWf5+WjrDrio9dD0bIBsse0dnGghuv4Jj5cJaWain56eCHBVHDY1DJSgHfnySsz5p+jj1o3XqExIisMuTiFIYMbao/354EQBdTpoAi9IUoEj223nvN+9zxZgPikCsybD7Ra6SYcibxPvkMIfiW+bRS5oWAkC1O+FdLad5Y8jFmkH/OLRLCTapvrNe7EsUgx+SsLIW1jdyxyP3gDtYydRnxx5IEIXqINiomnQ3FKWFr0kY3/NpjEoDHjhQqRL9uxjzaphUuhOz45e+K20cQrFOwF+xsIVoMW4+sFOm9GHlg/zL/2GCHifEDUVx8RN9LPtuT1jrVW/rADaE0I2SAEIx86qb+X1LqDBdVlm41XCv7If/gMQhUfaCmVNhHSWjywotyA10c/5eRLGf/GBfd57AhyvXnkeGRjy9ndj0hIEPppD0WBFo0Iir8I1YwHYHdJ/U8329o2sk0H445AeVAjdDtWVBihL55lh+f8QrU/n5rCp9wZhYuZ/q4U/Xpp3WGNehZ61qJsj5Ui5fCzIPs40SEbTHIPBvBXS7yQ/nagb/Mn2ltvW0oNVxMbwW1mICSLB/cPXc692MwvYf30Z0n29NPZiPUMT6T5U2P/l1dU2pZyfUw57WY7iQD5s8/9OK40srNgopjsPavjHaNNATIEJqN8EwqcJhAKacn4PkjVPah0Aj08fPetI3mJAEA8E+idN+HSckdw4FVR8UHhRat3SPlp68Cvx6LH43P8OSMyc9cLaxAIVV5T+nEz7oRAbdXXmcyWeSwSTgAjtVkt9ZTCEA3QaLN/1tajtNU5oCz4bWdiZAK856X7Svy/AGxiCmNsf/1xen0sCUL2xIb4Dq9CFrpgBYvDqPxIWGkiGhUyhTthLke5ItWjPQvNHyL6Sp0MXqP8S/xrY0KWf30jbrEn698kv2KeD1QuSAWfo8v9xhzQqgmGEB5ABGGZPhgIMJE/9AqgNSKsMv6nU4gdYY4PKvK4w1VybHOA/BW/o9gjmrcaWsh2E+ZGKlx4Jfou7Ih10uPhP0kDpA3g1giy3pleKB1u8Q4cv5Dw/oAS89C3oGlS7s/f6BkUaVHA46CCt+rp0wMgwvcZBj9bcUInMi8NP8RnZrstR4225FvV6fxvh9b1pXyNOYP1sNiVBW4TI/n38CVug+vrmF+4e1K3gGBnVMP1Em+JEmLRJ5tUNnT10h0GBHi1rGPkVbIz3j5D8JDqKzFcT1ZS7bqbk9SXzdwnlY742OcrjjuEKkbVZpfdwqbW+De7BzzFDFJjOtIr8M9v0JRc77r+kHJ7UHMX/koaTtRhQDtglyWIIe3x1ljjcI+ieehpiX60MjfqmM4kGPrrQfC0hjnxLLJkq7a+mbGRW1c01T/+zfvxX/CgNKIpNY9KhMw4c+onqwFYrvXJ31KAlZ6eOk5xz9FUqviMvBJ7DvpzzMoM0XH3vA10f1Xhlysx2/ybBJWz8RbakzClrjQLjGbaRA6Wr2f4nLq3E9UTcL0QoBWq/gjJs3Lz3+TPDqnzjcoLvnRcTzSCDcQ1Do/MthaKMzhgZZ0G8HWtA+I6evfwMOWw5RN/Dl2cwlTq9Tk9EIuwJE89vhEo/9yI2asGTxZ0bj9882Fl1uqQ3fR/McaxarAe100WR6vcg2kfofWSDjEGkmMKc8bbj+WXQhgXHaxH/VWWRJSH9rTgj4ZIwBEvJdNnwJYLR7sxbHGWNsqSNb2kAH5vWoT7hxBWvYFgOxoHhH4EwZ80CBzAyJ6iPhBjbtR+8yaKhOYlSt3M3uB+yxEqOefaqMTTcbFAOXwcrvk98EziRf5PoM2UBb9KpTxeoHY/MOY6gMbOjA3SPvk1zRxSQS6cKjFrDFG5Mj8epxcMJb6KvzoQLJXSlyBVO7R3O38+2aHj+v3khEK9+8swd9i5c5QIPckidt1o9opW3ufr1u/n8vX3Pdm7CeW3yI+NJfU1ZX4I8VE/0hzfPmsF0QCZlL6Q5QUucuWXas1YRuRofO4L6S9fV+Qd8WVKohpEgGND9HOTiiT+UYfjOx/lurDbnlA1uA0BnASaZRpSmFPlVULFym8L0JE/XMb0mSP5Iptt8q+Mf8ymkJd/gFyvYy5Vq9c70KLcFEXIQt6jqpEHQmF0B9WCh2MGgkJb6sG7YIkNHFNL2zGIHa33YF0K0RvLlmtJXdXFepR+KatnRk8VBCbOIWEjulCCFAlPyPBWOsPjogUgSryqhXOmV5N87QgNKx4YUsprEBv2+LBbWwiao3jwszlMslkgdCaEzetrTSrRQ+ADZo9pWqiv+543QHpQ7iU4+E2OTWPp6wKmbwxIqP3qUVXVh4UqOOtV8KOYn41MTJvBBjaNJMfxmLAKvovLn6JEbGY/q/k5Ne11J/zC9GqZCUYLal+ZCHqp2wZ2IKS6U2XWj4RqKGmWh30g8UIKS5JMI+Uboq4bFejOpyY/wn2e3zx+PllZYsP9LpJidh5rdlQdC12zi43DfDk9MAc9SPPPOYuigaS4U1djn5wfNjyxFPc9I4n1Jm6IRF02vIu09J5Fhvob9VrkMLW/jstKpG0ak8tOC6E8whi5u7s/u+Qupozgw9hq1JQdJ1vnUnTwsvibOP7XdjnNirfSDTBS/c+pr7lRfN/vMZO6kfJJqQXHc1bXQjrnheQYVwabUxNbDZrzeF15le+kVVjSvNSi8sNSTUcIgEM3EwZEUcmRzpuSuF76X518nGV7RnOjbNGDOkH2DZDaDEhm4R4UGpnFMS6A4CCcDWw1+avpzZo5LFelaQ/eyWY/D5yJHS/N04gHNX7czmWbcmOe5QqBpP7dmaCg+9repJ4rd07EfwljIOC4+EYL2Uhm3lLkTVeyQCekd5PwvriE4V874G2NKQfFUaY1cDYzB5gfwKQvbXs2n/TXFZrr8rQLI/QsvCeowwfE5Fu9RLivLHhrAsd5UAljzakclepsvzKDZwpc8WIbIvawQGXRaBBnXyYGVft46TG+/DV7gNcVu4HnJEl9ar8jdFwK/q5lKQuKZESyi5EDLvsCXnyQWKTAIu3UbmgKUxk9Y/6+xFNH2EbLXyeAfuIpRDVD47SVOSpGa1qYwyoxJ88y+RxO5XDfEen6AIqLFDEcBwEcf6AJxvNXgpp73yJ5hwGAyrH/4lJ/g3LaxwUdE5e39Zv03XW7Tidxixj1BdkN1kKj72rHILNM42hT8D+Fgb16vrxGJkx7OHoumUXzWydw4A8jhmZf992Tc8av2LRisgGcLwhVSL4m97ae32aaUujXTI0ad/3lcyw+hqijjXSKtsj1V31ejvd4msPfp+atRfuZII8oxzYzGxOsU95NmyC4SIk8z6s5RgQIft61v9vGZCZ64M5dseszSK1Q4fLXdjodonRGQBrCEg7s0XzjPhRIUBBLnjrv2/DLK18LqEkKgffNFapU5dl/jH+l3U+rv5v3UPcE6uSviqU1vnOfp6JN+gSFDSVG03W+pqf8iDZCRgZdW9LHelhp5ZhpmmV1jYRvddxtfLnNzHRLuF6BgIjW33QN07wYXmMI/ULjRkTXpmzQDT6q19S12R+f/ksz5jUVYkk5mERb6ud+ug8+kB1AfzVdqbvYqDMBXuWVZH9NoiDpVpV/XgS2pQ1rtG+ixarqeAjTFmfoh7fRqJFJR7B9d7utVbgU/Sj6M+LZB6Tf4t5Yvs8+1cmmulBiyg47I4ykMyMMSbUWnJvWmdvFOEiQ1Ra2eKW88WjhMLvRWg05iBHvSCXzm8L23L9IDDItnKZODvHhXAyWIKnMXZeOrJaHjd7vajH/yb7prBlDPzbOyXTPWHRVDBf1iIAZ9/EDQWqzLNSYMN0UbTsjRRFYP0r7ZrnvhfHErdWSCzEW9sU9DiYo6qmqjzflNNsSTaf/I7rni/HoFMSNY2FYSwnA3RbbDSFfi/VAtp8icYjUTbPo5X83j/DXMpBxtd3MMl/onZnnu3a9/yVtxEwMfFdrjjfq30+E9jlUXxqEXx9f8YihxLLY7KOxZwQoaE8I+9xZj6Et2Q5VpP+euANp4frGmgH2W0rdm72qdKzs07iZv0auv6JHr+69N/qTsE+JDVJU9qea6okHuohMva4VOw+QmT+gigWV4GN0v3ppOVwMD6zXFUF0t8/8JWPhaKooXcbowtn+NNCj0aMhrnpLEDGwdzhqhbv65jOpcz/MPVagNMx+7eH9x7jYVXksn2aGNZDP4gpCYON++AW/5EhD/axV3XbqUQn1HyljToMoTfSuAdZAVttA+dozkmD6w7R1V/ICA6HGozvIm2wg6JXgaB/kt68o3L2+suKvfjvwkB8pVJRuF5t8Zqi5I986ifbi9VheSI6yzqGA9HA5H+8CB/FTTOsfCmjaWdhik0JXJy999yDbBGKKZyva1LMXi5t4PwkK3s4GQoCWUAeI8vWr1FLEPAenjZc5EH7VH4r1qm/iD8qPnJ5wDdpLIJRxs/quwObxK4bpwaouiZa9mgMowCsN+lQkipS/Kv8AFgxLncYMeqBU4Pgh2yFQMCBh7zmn6W/9J+K+jPLpXapEPqWVaDKe6CxsIkeI6p+VMWrV9X5wDzAU/uipNT3tauWZFi2uoZvOSFmRBM8/9mtHDxdlSNczi255flB1u0NBI0L4Xzb/sBNHNzlnETANVdRmvTL7r3Vy50UMZ0S1NBtMe/lMn4CHQvbP6xMpKgg9tWu2eGN1J8fmcI9Orq9JF93oDUtDSHZVQxJwEhBgiy4lzrrmuP+WX83NEJuEcE/IWWkapEfMHXHdlK7pftZnU3XLMCy15ayYbh1QL/H6VuDlbbp3gsLj0PHi6l0kS9gPXxcImbSqGvpG9Zo9lt0nd4eGx1RXnLFbvE0DXPfE1yYI0hza7+YTMd2cw7DX4C7uRcWQQZbLobFCAqeGIBwsU2mtdfUSYmy+aTJRkn74ORQ1+iN251j3l0NfT9EnD+3liD9JLsJctnnTEj2MggzVZG0UzQi9X/drotDfv/WsrrrT+ppHp19pebLNkq8u86GaS9meCv1yhf+ukRb0XpVOeQj5vyGhMOtOrlqR5jEVUu8bW0Ok8znJhLB/6O/IygOFHmOb0H8qio/kRVgRpDx7lkpfq9NWkndoqWWbH6E1O3wgy2fBV7gX7MzRULUf7A8iesMTXfUE/YMzK3ZUwIDEXlb2GjiMeAx8oeFCitU9sarFGd9K4SfXNnXMbKfEPVsfIEno6mcm/AjD7jbVH11fkuyl0LDe6qMUSd1ZvOMM7YAF/ubJJVplGprafrWsnbsHe8YX2kvIS0eHcbdh3DVUaQsNikp8CDCH7RdUdoBu8oQdHi7//qaOu+TLj0aFPMTmFQFd9BtRnDQH/1U8tSNPrG6F8aWWd5Krg2U0ui4pv6JJs+8yomdonPHk/91A87RTjoIYgb8b48vOwFZsHol8byb2PuyNgdiPvSGQ+TOC3eVWx8DMD34h2vLy05IguvxUReQ2xLaCafw+XlRlalB3zDwlpMYpgYpfLeHEkkiwlEln3UzE85zc5qr5qXPFtLZGmlBYSbSjL6OmjRTXgqglTWxDGJvPjcXA692vTIKfzdw3bbBZZ3J3obSvMFo6mWk84vCZw4vvsgncncD3vVSGNrQPuXWZHpqRdJxYLmQvC9s43thUSEQiDi5cVK9oBEzNB4xa8WtgRDmUrx+R831T8TVqUsLarDmZmxK6ERf6pmxTk+P18EXctc4n8latusVh2KP1rx5O4z5NqZXFxTaxH1YdAahk4xopWvJYDkElLPpXchp0hpVJkbw2bX2htAXpEP4bPdkxE2v5GRDU+RwX/H6ByMfC8OXcRQxzT2hrc3QFLydtvte5U6hypZ/DWO2VlbyC5HW1WAxiNksaSww9EwJMRfjv1a+nusN6TQNy2wSs/FIx5AqrFoiF15de+RAcs3R+E/jEKAo77tVnpG2cgRtjXfzBWtbYyqCJ4aqOcuuYHf0CvBVeJnvAliwOZg8EStcLLd2m/OBmvCd6b6exx79GFO+KHIt0Gj0/ejjxv+uBfHqs1T4J3R+MwHn1rZJAtP6qwTTObAqipMbPRwjuFW6Oq+R6ELpGKCpUVIb+ot45EQDZrEwRj3b6lHPHSMDnfa/hmzzcS2YTEBYXdyizv/IH2drz18HkK3o1iLFa+WNgHD3p1dZxhq1rbEiLCpuieImRXj84VF6tdeO8vBPIxAbx7+n+GA2OoTHEA7Fm3Mp4qGQu4ow/Q/u7vD1tf7p3sYL1DgDcmtRLQggthirlND4IwJUgmk0pvEUeDkemwVVK8gXwq1TC5mmTmRA+lSgiBOqm2ZX2wcuBlIqxC+ZuuMrY9DwU36tsGLlSlehVgMUljUwBl+xJv/EX0VnUBR7jky/w/lEqukKRZ6EiqQpkFUgfeCMc9zZHTZDI9UUlk/Bz+JuBwvJ24Xcx0r76u76vIuoae2KjHx+4Y8KOqw79IOj3i4Wf8ghkOYO9Z3uIkuBmF2YQKL3OA1VkVaQKH/sYwpFZ/Eq9IOaXewHCxAvXc6grWgYTBB0s6lfhdn21lcurOIBUyAoTu1qMhyHZ7KZZKj2nFnXM9pPfTQEE2sJfVH9lmXFDkoeTwYMecaFeZ20SaY94mtsKjtwxXUhX7qtu2tZvf/S9OlqTvTYZu9M3e1regZZXMb4k119gXaLP73X+ME2vhAPsDx1wLB23DY0BV0+aZYWbGPO1iV+rzqDdgavtChWqaP0y+usQFxdqZPrVny9AJGozmJYXK26SOOfi9UyHeCEVAo5u6EhqvwrYo/xDt7ebXB5pDYg1FqVD+A79urq68VJMxyhCRMccJ6Op59sfoA9L76itLAz0eDrarnnmZSBJZS5suMPuAhyKEudS76xP2myeC82laJmNbyhqqSXU+TEhRCLWF5JhTvsGF2xBntFLVTZxS/37xAwLGlccf7Xjv9oWbalY/+m6KqK/dGQuKnFmpvKFN7iEqEJ+pWtp5fFzoHr7oO5486NTb7lCVdvVt1r6FIpHOiwuO1jiAT3PFGfzDMJGMaYOjdDmqrP5N5Ij944+riJhCdxQOVVN1e7wW+21JRZxP/kgQBB+RgHe7fSjkfmNlhItZHr0lxKaJN3xlYvrPqo728BDQuZYqO/55eAVTBtmGBG4AeOEROEcJVzLg9m4xzQqh+corcJCaufX9aHxBCTU0sH99LJqlDdHSNWr1qHdjpfSSYSy1eCQKBFGe/8W+EURwbST+z4UByI3kb7jYrR/vTSS2+i3kjA28JvRVUx8c9iSatdJbOPncL0Xs3LvJocejErM2NusXhsE+AXCR0CU9kBRVfL7WCm3qRdP8xFmqqYqqoMftc6MeIuQhswabCWdvALRzzxMN/KFM6sxBjGKTzJMeZ3iU+41BN3yCp2uZNmOetJKXZV/yWzrycEY6ooWDmeOp8fFXfdXfrit3uTOCTFFQlVUq5Oh19dKWvEenAh+7ntxl9MGCWh4eeXuoruha21XGvfuwMbmc5gcGa5kFu10P9vDB1GT/Tog0cUnN1iG+2itk/ubKozeT9oO2YGuff3oD84Ktpocr7zzfAL7Srbgv47Iw8T+np3VYEzHdDsxSHsqv1OFVQKJ60w/ReDA+MGD8Q3bv+lnEmZFrH1TkIwa3Pa1aLSp15fVOqRfI7OL9fElc2X5g6bBzuy7fv33ZobJtMVzmLbxHNXybfDCTkR0EEqHyjeVojw3HdNBojH5Vp8LLniG+fA343xEBu5scJ0p8/C0Zdi5T3yy5JBUhmPefa/uaMUecPCl6m+izmr+ZnOYLPobd3QECczfRG+zo4sxnL+PYndYoLStBVLsqtJg5qU0gzMl2O9u8ZcXcimV6kP8SHEmMGkRV03ZaGgJQ2RjwA2E5TjOd2I5RPXd9hfnrC186QvvBa7/Jf5ofxkjCkQ76Qc/zsV5BY4/ElHbIJXcMs4QNqfEfb2vW8mlHUI+9qssSAdu2K2/1NGAIABcRgA6ntuiMc6Sq7z2okDu/E4IT3s3isVaacnIWvC7fxeXduByVAZpPFFcv7wFXA+jfg+wXc3e3S7fCq/eVb64QReYYKF8m+wJGuuGTuLQJmuuP9v9nvzYR36iKorujsnb9mjMzsKWVxj7gMeAT43qy9JOdWJ/h39wu+2UJTt8NZbudTfD3HpAFPxrM1UbTV2bQixKafv81/xSK9/ZoMvu5w8QhQUSyrrDi+gNuBrHSHNzIjtTKp1J4eW/i4oAOEK447DBXJ5oqFaYrcSYUPrt4PAmP4OKF+Vc8Vk6OnwJACZVem3B+eG6O9+sgiBFE9c6WdmUu1oIa+MymOC6/kvFxu2C+ilCP1vlf2yTps2Db2ruW5E+JmT7I8PW2IMj9zmF/mY4IcA3cIct4z+TTv4NSSz5/umdd8e9vxlKH4UAEsGwSGa0dozWL+DewF8g0X5j303xusHoQ5luf54X/9zfoqOpOirQ054olyno5u1o1qO17KvlPCnrwFDr8uVtvmj94FC0jK4+DtGBhiVGcBQrltvsJh3tSxquW0ngALWMVBcSOqypzO0cI7RlzZRj1hGzCOXjYwGHylSWCKfk4cdaUbzQ8BeAJv6eld848D/NHDTe5f5q89vLFwTNT8lygOm9EEBmYk9LyxLhVrppFH5OaIRfena/2GWWr/ExewmUzVy7JiVI4UO3UNPExzE50cqon2MPacttJ8OstkbJ0h3XoY6vyoe6VntHYPLxThAs22JUjXVkoX72l1YVSNJQTPvx4bJe/HlZAn0GajJcILcRpJ2qM9WYp4AP66+jIKa/MXx5t1bwz2wQ7f4yJpp9G/mGmC2xCUfUk7NUsopkHH0rHRPyThDAp9oR5Af4gPjIa/y7VoXLg71WyHWaPi4Mrw6t8SonVxRu2qCokM5GowErb3rRqyrRZMye1U5Pkdi6JEiMMbL1smKEQeI/cGMrB2gkZMaDxRUq/didebm1ReOqwR2P1rbV74CJXejrshIbDdFaVDMDSYzX5kWohrfSxC7O+XtDXQfEs6O5k0NBkSDDoMWRisHtc84BARn9Coa+mq1Y6f5SUPWGndWonlVN0gQYas3YksaMqCmEry660NpMv69r+ms0EBde8OXqxX/CM2O7za4r3x2LpgV3dxmOXsALh8TDJJ4ef8C5sf4dH2Y0yy/692i+YB+DXPWqPjTbtGuJ1pqoCSDXcFHbGTJQIMAHGqTYmqiTscp8oOPz8oBZyrG/NuH9J3WwcTP0qRnG1Tt7/zxPdqOJJZXIYoBa2dzsI45jRjoWVHIOsNvvXtaYtbsP3HPo9YcIJjXQf1DSMOIEHY1zPT9TwFoooAyhtW2Ete0Duuz3BNqnr740XfklPfMEkMfg9XF/0Mez0QP/i44Ns5Mxp6Ss5f4M6fOxbEwNo0Z9hTBneXwRAPetUMPLB9HUrzCodWL3AnmNRgZ5b4DxIdcyKI7qTGSQhefTJ221Uh909fk6gWtXLbpy9lcoXieoY2cG1DqjZhF1A4wrOG4VeDqGrKY+ZrVtryUsfjVcgAngmj38l3BUb/tZac7eB6Nh0zX0w2lwOhqraszWuNCo+hOHXbvJgy2bxIcqpzWqGMYKo8SMjClwcxmQ4MR0eZ7r20YceNP9sNrOXiiVRZ1CL9tcz1TEV79nFFf2Re1ClNf+iHSUtFFsMNMM+TCCrqLLE/31GFeYn7yGIWJ/M+XKhDEM+qWRODbjseRljpolrvelxz2VqdA5NuZvPgIBvTQXFavyA6qa+eN7ik+SPJs3AhvFPJ4Ot9mBqyTR1ekLZP+1mv79DIP9YXnuLzkz0+AEwAec5uH2+cv8krtzVji0srJyanQho2HWvMcMcAR6ILKAlphMrks3znZ2sk7hK3H01iCf79+sIgD12n0IcpJuFxNV4MlIGv3s1S17US7UCJEBBcIb25HatfzUi6JOPtZfN0kpslVEyVJaCXxDmpMlpmPPfr3Zd4GvyupaP1kHNmJrT3Y5PI5wqSJrLPMeKPeVOYbqUHTzbvxLxiangdfgZRrNpizkXiwx5kagGs98z6OVXMGd562mq8QaJN19BefhZhTuM6xsI5+9JwNOIBKfgwSXWtoPjVOkGgtPHg5UR099R31q6fUGdxdtEGYJBUQKzD+HxkOKUHCQ+Z1litppM8XazGlvkGL8uPXBTQH/F7f2rd27ltAeNPrnOb0Jv3TEL7uw+etdRAT1J8HfV/IfTI/d0MGGNESOdrV1wbbB+R5I4Tl+LYwO2e0v0GElnD1BnwEcjf1iBP58LkMagGlf/ZV38vxdZLmQImyq6lf3GNPLExlUQT8sXOH4DTkS9BoI4v+yn5xTbCgQSFyHo9gZEQN6m5kQEwLr3gi+aQXN1aCeYExrP4TSyDdDiL9CPUqnFksVaTIoaPAW27QkFXsfbNK+x4pQ0FlqpYrKLkVFy6rT7Yu/MvUuVfl+N91MAK+RuRVHKJ0vXq1/p7pVstLAHRldGxC6sQ8zABAvL4AVK4SBvh9GO6Jk19XAKozfrXUnqrGwto/OgSXmvSVVjlROfBOfvxvJP7bAeezwMjPFiEiiiBqtE46PR03kHO/v+dBjFP3Y8kfeBkTI6vdXWH8u9vvIarpopZyDem4laBDL4RbsZcpf/lx2ZJBoi6UTNlXNjnpyH1sT7fr9KLE7Sb4EUyL55FWphtfO/Uit/koMjtgkP1e7nitXheSV35vQyily/7AG/jVLhtUGfHvxeuivb/4bVSrqH2JjNu1rKelMZgH5+dVFZoB8RQFkc0ccOXe4KyRspqOdxgCje2Mo4yj1zy5C+vjcI0Odx3j5muDOu5QpPwGioseiOO/k6fHh2ulXkYbdPsX4tAOxPGnhsJBGiTDTz0bEEQYQeykT+NzfuBZz8LZZCJxupQWiI8VLSK9z9suf2rAC+WpwI3k8MAGCKaLZEE6uH4Ac1t0/k4jD2uisD/orAqX0SPK3wttajKiHBS/UnK/2WH4cL2kHqNE2D3HCNaEp1CMo4GY4lrvjg5YbCBMa1vWJYwOJYIpKQujAFYIu/gx+l+zlZxBBlyQa1FSuSBHTHa7tjcdYkAupxi/rmSlVsPL014qTDnVOAYTw31djB0mwMKlG77n5K2uLq3FUpw9LgPg8qVYkxW4lEZ2kpAEGiCfROO4RXF5dCIDKMsMDUIYP9XlpXYBlRluITZb8CqhIhL4C00R4XRBcDUXVe/XzC4ndjltBzcuS2Jy7XtYj3pq16o51rQZ7MUWNhUWr/DgLyk/oN1d8PCPZwRuvEAQQb1MXxK/fXpVIZtdu05/aapjwMJo1bF+vn5wAJfQVDdzc5USr7IgaAuS1rHy5XHShGv8ZlQ5WsBH/wT/t7zJyzMuiw4h/6Imp9LU3nqqEu+Ax+X1Aqi6l00lwaujbBSQC6xR+BSPNVGYbwRn/AfY2FaNbpdC/ycv8Jiq58NeFFibmqEgVSEUm/KICo4IBghlFeEJhJtFHLG/oQ8MalqNCWweotm6/fLeZWYuChidoCB0RfoxpxcQiXmLI71yXsybzG7iLFnD+nqzQP/GoU17XJ5TdRxuP+cOosD20gLS0VgYY1TcrF34NWXOuogt6fQFi4BlgS5gVcmNDH5xARhLUmksQT3N+cTtFHY87HcR3VYH/nFDdVythDdsKzvClAbWSLQNvf2BFXoZExj1YxnfKdYcCLSKsabb3MAnVfMI8u6+6UhJ5+siO3/Ihhmlh4J1BId1Zks+xINk8boHDfPJy0BjgVhGmy7OtCSocsE3JH/4uOqDrbv+GSJKCcqJX7V5OB6RL4OWCqk+DugsvvhWt6ciK15hL+D1rS/4urGmqmV2oa22CCMH1OmwyjDm4ueEHqynAFDByotT1QkxOi4U8Bi082pdrlSoOrYfx85buC8jOMq9mx69LLpFHGwx21Bco5ARBwa+48xhDe+z8N2y9Jpf/+ze1O0Ljic56VxTCl5PPB6THvmtOnTDPyq/EveYQ477uu27dJ1Ib6Mvj98I6lCdOuBM2ksG8ArbC0/4leWX981Yqy12xn/k4pXKSeaDw9da/tjRa6PJ25qr4YWTotH8NIu8vEwYxAseYuVp5wCOJHd4Ur0ujviD+8UeNHYZRGG/D2RfYV+E0wWPnPUcz1dX0NnoHM+5b6eyuOzNfI6j2Z+CyQdj2BVqq3U5Yu7ygZgGj713vINE7p6cHIRKFbsPJoz5mylMf6q9o7lKZ1yz6v2Q+xJhij7vZZ1SOA0uF9QgHNWf7v9IdnK4gn6gqBNM+8C2uskxUakWj+s+hObbeXennWAq0fxsjkeTX0dxiebyGyKO444PwdSr9DnLb4tBzdD94juuZtrz5/W3OX1pm5uQJU2/MAFs4fOTl53fQrJyB96g15HL4OF+aSd3QdibmAC859F2CYmXoT+148QNDMvYDF8kiI1sdouXHiTWV+3xP8rr8nj+p957I1TZ1tfh+lfNT3CyfuG1WN9/BdFrgM2Ln2PfVM34MSui0UHuncef4ujxJSu7TXDCHhQM6zuy+ynl1dZHSr2Nen+A1awsa6F6IKYHlcl+ByUxO0Jlg8Ce1mwgbl28QQX2IIzNcGzBXg/oVKrX+Abatxy2RhAmYi88rOsCJIa6LAGnwPy6nFRmi6BYzv6vde4+Swr+9zzcHTumWPjnRcEtwnBSopOhIs/2gKGwSNg8ouTtBVuzu9LrXHaL518hmN1K5BQ9H3Ci2hkn2Nb6SYJcS/3JdMFIzGP/qzsGZYGFyGmUB3ag8DEkyWB5P14fI9FC1jHSRrDWFw5aQyUR/SEBgiV7ar6EF+NVhAig8Gg/PCKUPlHVO4Bjfas/Kq3d7zWWXifh0k+I6/c0x1rx9vlTuDA/hLGzUgVOtnfOTOq3wCsjf+QHxnrjz5nZ+grvaCFybOobPiN/hpyXhOkoB+3ZSTbUlVguN/loXNhCo8T45KIJPzffYO5QoBDwc798KFnP6w/g7vc6CaYnWPCqvTWaidsP9qzLgfoNn6P3P3xxF/UzrpJ3u2v8p9z1fVBDAyzKi+n3euntewscI2p47BePvkgyy6N+l9KkL7bgcr5P16mJNQM/dn90fDcJVtNTRaiTQMsOGrCBiX9G1MYhsyqRzo6dGwUQzRoQHsPW4OlKiadvss5bIsTgf3u+UDyTXxUi5IHo+RroA47L/2oGCmxHSycr4q0G4rCUuaGwLZsIbNOY1pLHW7aP3f3HIbn5GD4sjBEF66eYE5m/KrO+5VrBFH8oCIQyJngKbrdpy7uVHgNYmCGu+8TqLqaON6bTfuNZNgH/KtmPDBB36zv5aRQ6NdKla+JGb3RiBjz13j5n4Rqd4LMwONIliV04U9nZCvpDn8BUYeX3JNgtQiByFbRVRuO65IBYxFdSasWPehM6ceShIecZr/8xibb5iRvjCwwMRWG8ZBF5XXNuAqB+IeK0nnC+EyfGai1Lgh3L6sAlWhrhy73l3TH4JrahJw3/DFOIXrPsTUkRoJtjL+7t5tj7CbMWHvxNQff3Jl92FbXKEMxNWFwnHSfSvFd3XnPbchA3qC4QZBWrZK0MPz3xNTnnX5WKbHj+CaZKV56y1K/MHeeSv+okfWABzixiKOVrziM5JdrT5DwjdIAntgBOFn2pK8NEESvGa6EALHSi0SHlxK/VPEURNAm332XfEDQ1/XWbmoIx0T3cTas+TuAjW3kXemWqeErEYxosWnnaF4walk0VN4XKP4uA/6N2Dc8DUwI1Ik8LZO+l+bhXNMpqDzC/jvXRULNKsHJdgcFDAcUIe+1OU9wg4arenzA/7Jbbo0tK33VpJuFfuAStBdbeSbbodxD+9TfFXXsYK3lgA3JAJf0b9WUo4GsRg6ZNubDWwHh3j9y9rDfFDUBXvM/2PpetadhSJob8EJj+Sk8lgwhs552i+fmjfqdrZ2rp7x+ButXTOkVrqAaDraWmWJsTJDjJW/c8eVxfyscPpdhT8CbzuFXe9bPifonwvLxtPvemX7R15Vh+lO8SdnZQR1aPV+mVSw0e/L+EJ50TzulwiGhwWZp0B4rWe+Maar9D9BqzfoaAm9TIJvczQ/8D8QoChDIKVcr26ZFyHxYzHydRgW69mpC0lbgMWV2gjbnjVUKJjQkwTSwQO+WqQv+s3dNxz+Z0QG7JRr66ivnRXcM6rpt+mGu3wnjdUu3LG6fst6zMeb2G6PURRHk8VaCfK/ZAqOlyLhCJwbyuPsZrj/qWJxp6VwkQt3Vr96FXYMM/4fQdFjjUhW7xGdPQ5QrGVok0qGnnJEzu8syK/az69JxOjWebX2fSt7SATxweGkZO2JJK+9uFpegPhOz9vrA/xWRxATYCtwfLXldQwmkaGt+MPh946pOf2jaW01ZYtk9oZ7PlQgmVO37+JAZXa8EpxzQ0WKIJVSGrIhaLcJn2vm+i+YqxnNKAjGdhiwlFKhEae2c777TWKoIBk9PtDkpwhEvonmgx7eK/iJkD5sxL+uqXexwCZOEWvZzP2/fdrEamW7RahNuH1puj2CMK1/xYP8Cm2pXZOKAuJdPw4w+he0Dp/loKSaJo1LKr3aYslLcmGfgNQzlpWGQ2swKd2Mpva9kLlAWqNhi1Nr8dZv9uIq+7ws+ClU42EojzgvO16eKUXaCk/VdaPb20Wz1f0+HDHyQKMmvC4+0tHpOTEoS+zeQ0qpb0TV6F5iePuflGv0Q21E1Y+PAmRjw3v6gsOKZG2X3Z4uZj7xEsVzFthOo+NlJf5Io4A0wwK5ldNq/WAoicJp6vQcfiSH6n9G9JfdPuylvRFafqjLZ9pxatUNopnF6G6GsZBPr30OKO4CLsOT8L4lk9wjG7MXWcaZzoUc6HUeKuNXA/ZTn7L3h3GM+EjdFTDfvClA0ikoyU0SlNLgFQqvUu0jDhPPq0OghQLeApwGAW6ujHkyLhW7Hx1AvGXdt5aVnyCrACKxJjndA3kGERn/k45bRI1iQ1n9uZrR9LgHJeUr6djI0AouoXOho5bW+3Rloa+bzc7y5HdblorSffLMrWu+vXv0vwfBRCkKGbctnzck4TaTRt7jliEmeD5ntjPk+q/xF+f6SeMDyo7znEIdXdI/DgjDIubUwAUDrP1dRgB2MMG0zy9cRkOZiCt+q7cusCZkMOWNFZ9RT3xR47XQhHktI46lam46Kjbw5uWUpngPWy1tB8YRxjtoCm6Oi+b8Ca/+JE0ljly4vfUxhusZQ2/HGNAj988+vSngHthY4XpUlOS88F7ZxPJzcjHLoGNYL98uQ0zbhRoR3WV0n99mFzM692GNw8rvo8RKfdJ4ZAUnFwtXtPLI23B27xcyfn2OTvzV4NVC5hpX6ZVyVXiRUVaZbuJeEVQZn0jHK1/xbDQIt7Izat0030X59vFL6rrM+z+oLCD98MixkEBnHgTlaWgEQ3KhlFgaIcnO6nkvC2eVgTCeOn0/dHzL8R8pVLrtVvhH9B2fCiMi9NKdXTr5ZgODAaJSB/B9yzqJpRRr6CQ6LbWhDOWhdKUabffCHQ+M9vgTwqJDNfvwXsiouWBHLLwxA3oTjMbUtuwVm5oeU7wDqrXEBh2XXYg1rzF0oXTXuLWlObJxhMW3+evmUeb92NLO3I7joQ0HPPD/3H5Bm4clplOiJZkb3PZTwT7Pb7H2m1wDVXpNVBmkASb3yec7gnwH86CIBwPW32HvYlbH+6Q9RF/EuMPKpo+Kb6lnmleanboMr5SWX1KOvmqAgN7c8U3xU7bDLRVz2EfQk7mfuFvZVmP7HfFXeyOJh1pPC3A15Yc7ysMKWBVrhFKTSKVjRM3MZ8u4fb4Om3tnK/gmPao83xL5kDdAzCLApcGQXkGY28y3DmyGnVXf/vjdBwIPBUGDDTCIGRsXd06Yf1N54w8UxyO4LpqNBYytVmgD68gG2e76CW/FpwF9f6Mc/m25k36aitNKMKRVKoPY4rTd7vAWsvQfjwupSyFxUs74puPVnXvB9GGfcfG9JBRUW5A6VpME2OsBC9rykHWaYpGCHyvydRoSYKMR/VqC1SGopsS/ER5jto2YYLG0JZ+K7PB9A3xZhtFwU/elrHozXlumH9HrauA5xIspuN/k58u7CToa3Pdlnrg4PkqAnMpgyl8exHuWlEo7NfZ+h4PHdrpf2h2BiuG+HuZrLUzJGVLy6YpnjPE06zN0PqB6ynwqs3x/nNHNPYgrTL1k9Gets96KWLWZXaYLZ85h4C3PNBGfxd0OCCCPXzGyKMMk6kq+stj5pkKW+Tbbbj63tbefb3Dke4e9NA95Kaa1XSy1q/eeJLAt9qZky/R1McGCNMab6U01k3ZVIMdF/zt4YbP/od3CLebTKXH+zY+9nHJ200pyRJABL2YYUpsUzAQyMRPk9+ZbiG5NRui6JiOKlnXSDnlQxGjcF/oyHRsHP7yQP4zLm9/rr8OsC4mkijxPiQaN12j/vV4j7qEoxImaYwHNJGl0ALoSYfTZO6cQOrwUUk8TKELyywlTNM0ei9j1zxxj4pmC6ZeIK3VURRDAOivj/VL50scAyL464hmJxC/hzpz+jejIXmNo+3dhYnvrv76SbbFZtC7hqjxtF7X982Zn/ny7OSLSHwLM3mvIK0Tlkyvu63Kzhklexjuu6h7f5AQseek7bVSO0MNz78rPspdGkQ1tQSvqUURCjPdtR+7ZasXiLFsAcakESaV/KAAzy5Q3wIhHFTPXyrKW/YafRL1nWhwNGkkLZSm5YM8j63MFHUXm9bW7T39ruS/kkR22s0+qvjDby400EB+MLJ9d8RmKeQwkOKy4Z1rK/upPds2Fa3tyn8RS0dGE7Hsqgsecq60wVaGdKvaK6nsBAOxLt4gJ2nmNnCg1yFng1OnMiIX8/3inl0Li2fF9DnB9xkiqQuW/f0JYqHq6eWG9fbQq91UQz4csHNgXCTnZhjuKMQw67Z+PeBl/aL8+hVVFAQxlUu5Pyw0qYKmxpOhA1GX+0ahjyxSKnZLJXbwupigyjJaBsb03hhTIvzIBeQ34tBLD8H10azQ2wUCkZIoLDHd9Vro3d40mn3f0PBrFz+ml4I74wIlp+AhPQTRVQGkTI+JSxlmaRX8ztRtYRAPa10mjNtN+lse6kxV3fd6goZCyA/lDJLVbc1Wp15jGxExA1koo+ehHzboGz8xnIp9bBN5i63b56H0/lXfyjcXUZWIm2hYTaEhj84zRPHzfu2D6OwFE3xeiVgyZ1nzHLmHZKfIpay598xdN6f7fwomnaAeXdegeG5XJ5S49/05vK8uPR6uZleSGJvGQTA0SohRO23j5HpItn1+7f6z8xcT7285J2CCCqPV76HSkRt5sWPE2z9Ytow2rmlOK7UKq5CiRuwsyl+/OhI4wEKIr2RBkT477GtQkINqVrzEDuLaEp6uT+ThH4iqDX0/1FMijwp2CjVia0D1msTUNqDWYja8ttmv8Kti2W15+bTDrwzzPhvQe5+h7bEiqjoZYxjt6nic71rceDoxdEkcuyd4RMaxZVLfKC0a7tqluLh0/3Tjr0mM4oV4DCp9U8MmLT7/edsCxwT0V4+ZkJJ852Su+W1xgcmXfSqbtN9+JYn1r/3xLga2wv6sx/ifBmYQL5hDcCDc0sjQ2V0+wu9hkRSYbUGvql/tB/t4cVB3QZgSHi85Gg8qx7W9fHvQ/JLZ/SNOuGdsiM+UmqZKntEIYmAia74mH/ZOHyTRSXzH0g3ulo5h9flELRQZRTsHNtq0FfGlyYU+NP4brXd1XcHMF+Ydz9Ygl0nyanDmTDnCuC67KzoLAfcUit9gCXxtTjZ60I4YKee89G/xwcViwL3247fWnhW5gv5RR/9sRLsn+MXeZjnQWjmVF/l1ptIdq+VuCeCXhYPvFjGMGhVJIKkCyQDwHvTtf+tgMnyiFz9xoavveq7ziBtmxarevEvKUCtPRl6SFPOgDPtys4gRuOG9L2ICkFe6WScWIc3ZlQrHsG+d4Ibtz94/cCsVpHsXb6k+4hVTQ3vmdaAPRiMIE9pOjYgTut5Dx7f+Y2eD7Lw/9v1JYmuKLdGWXrlHvrKPgWt6dMWMFlxJGz0gOrF2/xNpv46jCuNg+2Xqv1PcVvzrZAWYndlqHGm6E3hrnNAm+nw1ekg+gq551q+xvKhPhP3taxhb09A8Xtbx0L70teO1qiIyCsaUnP6beT0ebwzTvMcmpta+kr5MruJVsC9PRJU7mHq7DKb+dPIL372PmgymaVE12ck8SFx1VTao4mfk0IadYYj8zXwEl/uoA+jHQrQTVeVUKgsWbK6B3sWh1rv81Wm2TltP8ASKjs8mNwAWZ/D9ZXFWWIrC14WVUTN886ZPxnbWAMeHHAVDbnvyOxoS6ejAc1DCWTHAnGS8IN9ag70NDv3wVZ6lgmbG+Net6RInT3YQ210fsc5vLuu7xKnH0l2gN/YwIbugUPRl8Pvm/+W3ms/3gql7bUY0o/h+YL9pvD/+QS2SUsOzTrO3M+fTjlx0t8M/S55prJoEfoHHcEP+etAo89k2XVVNmYAvfrc7rtO2lehPuhutGGUjnh1ZtgHMZWFeiv3OTpX782URao/12MrVqO/rK1mxgr/O6wU0nhHUn+SfZVu68Mu+2ukzjehX2yCZNT/uEGxLNGYQhg0YSaNl/LIm/iyJuFwVMxKjfpND8avc38jJ+NFd9Tfe+80+tIY3dlS9nw0w+arUCuEUVTASi9GWY1qFbSScB59lIUlYLQ/nd0HvjM3vkvn2kgH9bKXP7qxxJKsiSWC/DE2orzN8bbfJW0bOWeAGOfp5pX9Hp6mB1PZNHBS1ohdNPyj7+SOrl6/h88J2QuCjVy3M/fB6XNh4BT0mDB4QEtj4QxwSpj2xui4QGiSnLL0RmVdWZqnr8dG3eVibH0nNO7peoVRa+/9b7VaZX2Hme1gFWohundSxVKXxZxCbnEyQSkE8IBbQa6yrfhcnZOBYE5XuPapjxDSXQyJPiKZwtlxsgi9uWKu7FBD0a4oakFYkea8gCQRNrz/TwgilUbJ0bfGzZ+t/QGKZMuUDonrAaGZnJVpUZdL0sgKUMZ8z6DjSzXsezBzlY3K4Snc7XkFdOLPWUV6lCsHGyrb+6y2+FK8UB7ctyRzHP2t9RLiFx6dIvyxLFxxEIkMvon0nKXW3/LElrZQ6ktYa2I3cBx6ug5Iy9Fs+teSDLWooi1rRA1j6hJNlxaorldIIcqni4E60oJLP+qFFe8VhapPygK7odEOx7U0gBARmczOGHNMly72SZ2tOrn254U4rT6jrzrH6jQkh8KyU8tsb8t50kFnmD8A1lRj2fKVWRJwoyuRFqN2Da8v8jXhvWEF6Juatu5mu72tvDvpD0uNLu0Wmjm1+S0wYRSEUt1G/76yIpktHWE3uE8EQfVZa8qI/9yypSYREl8bcnm30d79yZlrzkeUAQV8VdB7KTnDnE/Nk6koC6D1ntN5AL0veEll2P6+xt5NN/5ib0I9z/fKu1znZJqLxEVTpyS1rE1vdYaPwqeDLH8zL4WaBVXT+xuG9gtvJF4MGBvSW35fezIysrwsTfENcGdu7RJF3IWrtt7s+bdvHot4DujBneeidDx2EPGYIXgRsWz5mi4HGTkrrEqYXcI52XwrdnPkAjlVqqg2kB0LFkNjq/hn7eFY3OpY63MkfhVK+0Ulh7gdSABSfprxirp1QjGkSnpgMKuJcVvteDXRiwBnPQ+SU3bzhthpNrc9xMIFNIvE68hqXYsllN2b/QxdW+9gH4eSloaoB7QWvMkQd545r9oF5pRcWH9BmUMgnKVS+WdmfEOh51+taYH2VOR43YATo9gr/3jqPe3eIIo7rYNvp4xXWu8X7Tqd/YezxFvVvWoXZkOpjYhBMHRlbr1Tn2csXbi2xKhnEt6rMfoOf/zzXedEjl1l7cVOc3rlUM9ZmipDMfeN6Lkz4lQf+zeowIstV1rT3G/JY23nhYgmNEzdtAvqgEf2VmUy4kxee9DOLpt7ijRszHK9NSeud9rx4APAjv7WHMQk8QPnp4V3Ru8dtt/tbJURi3lE1BHaVRAW5vleKsUADZCaBk+903SRsSRbMRTK53M0LaDzfRb4efHRaDmbo8rI9OELikjSfVePedRADCOaVW8NRqBKil2L22kcjLYZZdXPh1TKfev41vMJx1hnOMHwlRObDGRsw5Bux3wdFgunVgv24bMD2Gk2FBcSlvq5z+tXwqyUj6oWqSEQ/aXBVTfx+3oZdavTYszQETg+T/cK8Kt8Gm5ZikXAip5oBihZZskpvosF5/0WkdajWb3ZRa3yE8HESnlUsaJpnHS86kKsE0HhDm1hEtpGSWYv00vvrQeV+WatXyQ5lKDhWkpXdZyFb8RVxESkrk/US1idPNoYjxH7Ia17wUbjWP/rTk0ljDBHSNyxXfY6Z6WNqZnmEIb8BT2EOVYf99RXaYbKCaCtArydEa7SOfSbdxkNfkdniy0Dv9iM3suop5tCtrSYwbfX6rDP2HQoPdVgNHi6quUP/+MDNJoYtFfcNEnd9PReI7MVypZ1Wzso7RkERJpQWtPUnDWjFFSejLy1/AaFWnCdujL+xN9RO+i0m4cX1jhiWGO0vfUsk9WdCClmyZ3TGyFc0nrDhNRw05SLFAfEsR2iuDIxVU9BtEr9DIHFO1HKBVcNgnKFAzixIej20ztE2OAyRw0B9RFrud1XpD9KoIOWhWN4ike65b0XSwPS25cYcsVidkLDiy7rEQPK9Zsk+z/z3HNqk3t5Wo50hB06uxprlblD4ackiZypuIJTNC74si9cHhRN/vbnvpj3cxobb73k2oNxghv4y+eCfTnFMzAHbUxgXhf36Qr1K3QkmrEy/jxOjLD+q9XZUtEkfH7fgdInPY2kHs/LyZWQxyoKmysznjzSMxXJTdFQj64eJUdGor55/NgrOtJD4JHnk+JlZcqAYjjDKYvobAevDLNysrj7uCivtXwjWy49E7Zk4+uAQH9JZszQdMgGSLEg/kdMETf9b6AnheuGcs76piQM5ZVO5UFRbZb+/Sxjiz+80aCw/AJn+QrIAfGv7tlA6UM711aQszdJ4gD6I8k99W6eiqIzv15tGLcXo2lbAQ2IKoywYJRIIaWVNn+OhpwJVzkONhvFTAhG2L36jjUf0VVRpsVXrEwzoUng57ywQHr6w9pwO4UKcApMKKeYSlT6dMJC7+w07KIeVTQiLfKWwkvRQUneI/8Bk5mRvpUK48W4Kc6X1ULxGUF0Sl1MiZZvme2TXaO6kfruKP35U07Zy5fPSK96KcirtJkrx49YDUxp69e7q5rYIIBRlPrSSZCGLU8RU5xB1d0V2v24cMQn31aedKOF2BBGW37EGUFP64fUmN6GJd6dlBBV5bgPVNvkqcjXd1Gn6asvbCdKW1PyG6h9yhWQPWsjYkKrXBkZu6EUI9cm8uZjDzDdKmg/9KD2wj7PK1jJtHvvFSuYxwl86avIDVKE7K33KtqWZdQY9u09Qh0ezoAtC8jYKBroXyWKsFSNH9TAp5SI7aSB/7EYHN0VgxmlHzTwK8jOr42yzpeUvbNTc7geO+NCPbL8ThPdvsKNul7/LlRdrmgiUH8iNikVD0c5o4iI4MlatCJacIR/Zgne4uIPpcylym36kJw6oH3Szqn3OAYTFafD7RnZVt4CAiThMy1hmonJ5gMHCF+a9UDUiv6dQ3w8qxdVcRVoIuZW2K2+O7wqb7IgQSpvA1oDcK1SHe2YRDsLR+/y+Qmv9fOJ6AtwlEFfR6rplqJcCaWnZehAazjxYVL7f88XwSlZybFnbte/IdILoQZVYGc3h1VxYL06gS49bjX4ov+L4gtpq8D3ed0NT9gYLc+XjB0md1bCWEI7cti1daItQN5uolcCwndikLUUXIo/JN7HPmWEOuQtpr+elnphpqqIHycwIqinF7T9kyV+Jcern1A61YOx3AuIwo9L1eTfdhSrAUXLkgdKsfR+CdafUtXdXdhS7ueV68G7QmJgkX6/Zm0u4sEJBhWwdiNrwbhI4FJe4skwiHrjFdffjWZribbMhUMuDUvxz5kyAnThizdKJhaXkTLYBXg1HPh9V9TAQEYOtadM4vaBsTbGCuPDaB5yFef8OR9RaqGidpWhtNhEmcdgq0VxRwuMeBcUH4ztLVimSGu6HCiH8kOOs9AXy50rZkO3IM/S3ITbMr/amkrKIWFJ8YiEoQL/U8O5MddBF8rTliExgWNlN4fAz7J05DcdDv24UvrnMU45BYR3VYx3OUeV8nRXOY16Dvk304LuBxXLcvumHm3zP5mu19cLGruO6rq1ZciC6RkBYYrTHWa6v8ZfqseW6cMgBDvAr2v1Rvz62VZr3pivZ7TDotEowsktpINyrdEockUREms1tFtf7K4Yxj2kpI8iH4EMb5msMVnGFiv2SwboXDcn7VozcezMasWY/pgXCS16DlW6yIy4ZK1cjJag+Zrr4D7oHCuzdtwC5pm/i1No04nUy0860gD6C2RCvNsvLkaOXMHNNc06YWOYSLNUP6e6ZPzMrkvCyqZ681QXk1yCY+T/bMj6OcyW2yN+daC56SdUYcNLj4pbeGMWC9n2y1chfny5HYbGXDgda0ouX8mHxy3MPmG+TufhVgNqDc4fr92WxjGzJ+wYEmY+0PWTL+g6qM6i2W51vpoa86h1KURESp82JV59Y0L2rzY7ameSZI54NjR5Jmsl6si+WoSvSJTPIHzn9fDwPw9gaitYJ9k6izsMJH+47E+sgT19VYoTLrb262f6gL1nNoBoAmVVk7c5wIWmGfHcOwmV2qH4O7gNEAJ/czDUdoltg8RdFmj3Hel2jK2wdpd7roQ40EXa0yjutpS/hD6HjcUQSCmkwROQLEb1DUCoKNP1JYLIZA0yC9NwwzhN5h+AchaZ7fwnNWwzA+xk9DCYD4AmJxylcRG/mVEbQ4on1WJpR0eHvEtyAuFgZ0tiXUHDEYy1DFvr3s9hm8Ru4UxraTORvqA+HIiiz+E9/FXyKPL8vq+zfrM6EtzzJLQocrzmkolkccjjZQvN/qmAI5JvBU8C4QpSEFrHjQ9dJyLdemLIo/QaAal6nb/uIGTOa5q+PeBECcogVhXlkas3pGmXv+c1DBc6XIJnM858R/vjCbvo+ktXEfqlCFY5y+/2qaEpU9jvfDiBf69ohQT8YAWY+NM/3MsIIoPz8GkwzI7WGzINUPb9wJ4x6/v0Jj7fX5WFlZg+Kl2Hocxons2a9wziKd+COrSDUD6bN4/gAKW8MxJG36GvQxvDSbwd8vMBP3p1hSA57VRNz4jQ94eI8b0x/uBiq+sMk8ofVWRBVrcuvfjaMuG0Xo0jy7q5BINYB3ArqTzVWkgBeG6rqkIcPLw04wnZMuq9XuW7Hx1YhZYJoSLQrZQ6lpPNRzd1XJGv5Pt0pB2zK73LxY/fpRaoLgmpKtsATXCVkR58s+J95wXG5n0g0myYYXy2/iX99el8orzGS+RoaUiMfwAzCnrdXvBpioIDzN1QCLG41FdubLyyBb0a3JHsa/iloplmDoEJK55dxdIYPY+vX8sNRn+8P8kNpCUAT+cSzMyWvOKhvtigc5Tfr3jhuiK0pf/118ggFgUn6tmCN9Pg8rMdY12OZ7iXvMgcfd7RTtffBfoEoJjMGgiRyAl/UZgmCzVUIjtkzAmujPTz2Vt09NFUoSa0WNnBIYHn2uxNEVG4Z++SZKyiTCalMs2xne9+ew3QNuPAlYH7zdp881LG/19mBI32NOKFtJatWYwPNp4TUyxqoiNv3NffVBnH1N3mzjRp0n6w0IsIdtNwkX4/1vOHdEWg1BO0u32ZqUlwcADT6ju2c1qCVO7pMMfBxMzs4Dt5Rv0MzMYW0n71FKeu2Xidf7a/htz1WHRki6bozr37LwY8SHOhdLnbNbnZvb+gozcQcC13j4r/0NpNNTbAWZkDLzKoVFDlL8U2j7H0Hb5RRoQQjKQZtyrTIy7dj9KTIFwvQwS1JZqzH32lFfiMvbHpw0HwttB5c839GgNe/0v5fWJdZrVHCOQcsAs7P32D3IEiz840gEhsMgjXyMPlqNGXWHxY8nXjx9yFzUWltsF4fjkzCYha4B7DFTaXFGbTMsAm5cMgY8CIzbH2tPvcbwX2Sy2p66FKEDx8Bq1p342I++JWl0ZQHEMJ4VwDsS+sKQWePIt4HYTsHXR2cXL6eYMT2V3dm0r9PBQO9rIRU7ptRqpPfNS/6qgJIm+oiXX/jdpT4eaCEIeAiUwKcoiaLVlDFSpHjODlcs1SZ3qt4w9iX/awrZ+2Pa3wdx2cdf7Tv7ohuW2e2beCUNWjaTLQi5MvODyDoeDlfEDDMUsI428U6bcq/djUe558/LXYI3H2DPnhcxkHCbR93zrP4ZCMlv4+MYVFHt/S7LWvZct2MulvtAc1piv4Mxk9pxiCGk1qR/OxKqQhpRj57yaxN4ssRr4dv+x8DwK0RFnDMh5WigRQKp3XGkEYVPBmBYpe3DA7UxTcqCPJkXD5uZ9Xb1ftIJaKckv14elSFj4OBBcVtLZoUYN+l2nv+EvlpFbnBFajEhJm/v4RFs7OHIRabJ7rXYRvb4Vc/pcf5wEyvV6N6PUyjjhHkrb7t+vV2EK1iPOPNV1n0nPLpftimmEmRFjbAiICdD2R23W5k2Zqwa17ImM9XMyCV04HvzvqFbeBFS9xiwSnpglZ5zzBtJQIinviii+LO+DU3cFFpaw5x6l4ozH34quuPE5xU4qZv9KMUbcWmBnzsHVw0SQ4V9qtZnaG97S96fQQxdO/EejCel5vnkUQVQoW4i93EdSdvoDeq8tzu4EHIMlr9G0f7L1n8JZWF9i1FFvXsckEkRtw297qwPT4NnyHHqyOSx7GsDJ2W9LpmY9QdkK+VXh22ytbza9qiXT7+HFiTIh2MDZRVQGlLY1p7A3MMhfEGhTRIebprHNj5V2A+A4pXRTFvkRZl8Hj69BcFTt48Wm5KEV1ropJh30dW/g36Ym5nPx8gAl8qI7v9SgXnyW9v9wX46q+JbC04Xn9dDApFh/H6VLZVvfUHWxnj/V+Ebkcq6EGgA3uSvj3Fxil0NdHeO4zQqdH9gh+YoQSQVDpRM2FG+5tpMFzgiDe7OCZ6MktfS5OjlPD5NjomWTSXVeh90377O6MbgXHWoLJTeA17a6jow5VqZaP8DTO/MBv90qVzEWOHqaUkIwbntkjQ+ZjVR3yNVSWrU3GHWFeRvGkI/kN/IPaxoih8vVqV2oY0aB74bVzGLhbRShJvVw2GnUoQQEZyr/fAEr4oOZnW3fiSZ8jo8xuG3vilYO+/isvIzqF2lB1NqcABxyU8JrF6K8JxwaAxNBjWCstegzDy2fbT2ORqUfzzhj22FBiQiTaG8ejh45Wf9Klt77b3qIFhFF4Baqk20lxAzEVmno0ZvXqgWhHbf/WpiQXCRF26+aK3pxMEZdz37bK6E9ZKj6HnW6dwFKwQ2C/q9YtR9pJaqs7Zz5l17wd7SRRGviloOs0wIob7yyXhkofdQ9FerQJKGyCIgC/UAw9mq0V7qOEBHCYp/cqyruAj3whjAb8y4QwocFPz2XQoffiY4XtwF7ekfhLIpJKbMJisaoUbjdhnfvdq+iz+FFMs+loO6RPGOypXdtwfS/01o5m6mgLU9Gx9dCcVZegrBGbcClC8e1et3X63Fw5WyHyJ6CjtJRGoQ/EEdbAKj8ocVKRP0n0AeMK5dNR54MLnR3fftGO9xajpmax+p+GyeYn9i+ogG1QqdyIkfCTOJYQQOJUJX6P0y6SxBpAU7rKphqHJUW5mfliKQhlbfDWU0omFU4M7Z5D1ayyW4Nv1WqdX4s7f9nFGZ4bBC1EnaeWFpAUYLccMmMhB199Z5IevoZtcfVWL+5Fr2trIAkmKDYnWmGbsdOfcd50ozQKkI2lh1Aoxm44jNUi7pA+Z/pnDb84mSmaP17YpjM2rs4/FOE2W69eBGmgJYSxNeEkUOn46tw162M68icgrkWzHWxgizhvbuLES8rz/3oyeHhSaBp89aqv3ipiXLIpr6xHvPirdpkA8w0E9wypY2lXqAKkkCuf3ZXRqTbKqqke85QnhzbUDl04QGVlcOEWSv+E/Xm2PYhR8oJYPlJuuivorNynGPWx9yV47bFeSH1rUBLI/KfeAo6x/gG/joVYIN2f5bo3CHFhBjKqGJiQ8jUSRyKG1AaZ4rojiTw/lSuRbdafk+xuwjL/TjH577fyQ73ixfy2JtPxBGT2P4DLLZWGs9xvx8KtsbK+tgSQFQoov7T0YAV4TIr0Fe6Z7r3Qu9Tga8UNYRgCj5GPl+fwqj9mwsNx/3JzdRfAhSEpYtdrvnHIUsZTzhgvQHwHDFNGjc3FoUtc6aZZV0SUTqcYmQ0HjSlV+J5HxFbUpjc/H7Sv99RucegB3UUQIZHxtPkfbS3+W4LUQ5PuV+D7OBaRWWSgoLmB4QDSEkkkyLgTDdQQ9WsPgzwoIXWWRt53RxcXHVK1u5TUihHnU1rl5+e+gRyhgmheUv85epVLIf3Yd7+6aj1eXDqsx1c876E4kCBLSJJBDvlTl2j8p71X+rT6UXntY5Dk2FXxMA1QpSnlFZ4i9b+1TIZnvsQ4Ji7XyDpG3SDLuBG9KPPnUp8P9EhvhDnrcvgoXKZWecDG6jvM6VJELBGb5PEGfXASnZsXIuEX5u7BLQDdAR34zI1XEXRDHypuhVcjV6HmABxBz/nr2DD6weMZWoq+GRWOwkdkYgbuapoTPXSKTJDqRciW8386HrmkP5IlmRv3swa1Q7i3cl+EyOMYCuYIE63v4SiqZ46+nUG9bABK8ujajEEl/iPT4fj6bAZdNhXfQ0hUt/GYl4z7BPZ724WYOhOCAYaWPd8v/zvCYkBlIdRb2zgYLtWMcieutbrTe59Xah6vnhYA6JpyDCZNMjnfGFonoCffQG1P4tLColQRa/uoUBhr/5HWGeADwlgoz/YTmFf9WGVCHjYfOEjWZG7MAj1YLiM9FmT7AhMhh7DsiIUiYWmSZIpXFvuGGC5EmpgmE+BEB+8WluV6BsMC4r8fJUFci5TEVfJcRYpZbUEHMMQ2dcg4YrtiKdMM1heB7AlmLzY8wYQMdWbLO8r2mm7c4VF+cFj2uXljTyEB8DMqO6NZg/fyF1fiTtMM7H4thuoU5TnbybSiNuUQPz/exD/CpzR6j/EfKWX7l319xOTecM0PTrrNfYpCQAtfbr3YH3wAOrldYgvQVOI4c9Aoe3mUuruXKPHzeYrK0QQQTyLuquDP4KORb82iGdibga3ViJogK8e3v5zsJEEYhJ/gxK3Sxdwd0q3HNVwA/4fRurfGFWF/1fXAnKB4TDiZHvCZDkvH6ZsPsGTku2h+MJA3hp98lyHBLQWMaMMai6SSGZdUw+z0cyBi/Pq0xqUE0YqeonVGnuB2IrUv/Js2N88LI4B+kOuLjmEOib4rxOtzPg1yHxosrBYzLMbz/RdJML3FIvKrirQyzPV+Wg2dDnhcAm01qPqCaM306f2rDLWFeyWmUheiECjIX0MdS7RY/ZFSwfi0M23hIkxhYMjZnJsLXszkGZ8jKlsBUiXWK1mJJG0g9rPiLsSaFbBicnSwtpiUP0SIq2Sa/EO9BMex1Uq+asnNqqrYvjcjYB8KJD1GXgLeB3ryUO3pY7ByqJtrWmi4e5RYvDqPbb/0gsf03SAwcIA/11XO6T4z+Ht/aJ44Anb8orBlyyxqPK3GWZP92LVWmNZGPYX/CnSVXmpMzC7ZjUxcaJGE0/nsicWgOTsHoAUAY2/IJmK8jB0DEOApF1+iCRLkYHXJWs1sOCLe7i99LngdASULWl4yZ0zBY1BlHg88plfH62wHwq2HB4rIwZlAYuZ3JXQ3xFXVNauS7FTqXOOuACmQcxoCYR11/1sogAzJyXIXPLlRam443sK4/7BQgjwa7ee7v4zeLYGfTuGPsNqJGarL4RbAbQIm40/qqlj7meaKUfutC3AO9x4FgYBsSQf1uLCxwDuxxWZyDUw4E2iR28N08IEdcIbxMHIFzWTuGEG77BrVnVNYWEGpzC0O7hAOfMAumhQo5OqPdVtQYj2Hdl5Nt6efz9BG3p7w9tAHizd/90q/ya1zbcbVMGCfUXe4gfyGYs8eQe336yVFFhY31OOwUmj62bLkzGtoCA7TKYvJ8DsE9ngJuWFcbxApoSTDn82ciTJI+5cX2aRJ+yyD8hB46sw7NUVh+2pEHGs4GFIpqMQHarLIEEygM03h70TQCsY9wRUVD2QEXE/sY6D3s8HSlyEuFPfu9M93ZT8RdsT0igqbjwovr//R7H+qzNyPcof9gBx1d/GEX0Qlk0Alu8yURd57IFUcuqMp2vDmVDFarzCNzxMc1NiJYX2hWxkWFb59AqPem6bFOwjWmhsx95xCA58y8WrEh3vK1aDKbADbf9afIGvVBDFEDoyhvHOf6oNnYkhU9HDX8eJH/q9lEE/6Bc1E0pIRGRLQ4Dg5JSHUfIk9+cEtP3nLMuDeWscZfgDo+v+ruAJG0oDjAD3OV5TjPt2Z0+WXGufAhzJW3NB5H/K48w7dpjYs2uEQx6Qd0/YainBbzh4WYWQhm/zp9SN87uzNBZRBD/9pf3AslhS/GE+ZZ/sqQaA/8lkf7PE8PHvsNfOtFZSaqypipveukrvGIw8Tx4PI+l1+NtWx2eYCp7KKNIChOcRCdxNv33gs5tmqHSNg1C+nHYsHeouR5oF/gtotRIeEehRQNwtVQhvnf0Fk4KlpLCSShRRHt5XZZ0FzUVEM7j9NMZafZLOgeFxVXtDjipSVvGuFBKF9ve4LLjZHDr9cjbc95SsQqa7yTw/m8GvFr06mzE449MXE/doMAo+X2MxDTOO910NtU5Orvir3RdxTxUuVUrg5Kn7QZIrA1U+ShQXQ5AQUx/ZYeRgGjUlU94AwHq1hqbybk6VEiruM3Ko3E+7tkabCLiY+3hsDxIeBGxsdENc9SDJ003dXjF2HJvp997wVpMI2hevEeaYC5zIIlZIFjel/ZlzdgcMO36IJEaIYFAS6KtYoXDDOg1vs30t6Q/QBBXqA7Bk1CFXSJ8OsNnNWAfHIjPARcw19Db1xkwy8i7n3XW8XzMC8+SxXSDUw5AfqSs0II0XB7sF/D6UT1v1XZDf7VRARFsv/bkSyvDHaiNNQA+GGsMsszlc3Htf686/WKbm6hKiEjdWe19vluTT0YJFwej8f+uG3fgzbAP5P3yS13VkXo+IxGCGZIjGNgKm4E1UaMEcEwhvpv6hb99xoYAnTT6gaLmo0B9UAmeq93GU1rRpdCI+wLvUwe6tjQwNPaEZmlSDSpdpJuKum7BEKfIqbtD2NOzclpsGfJrtCdt4c/3xHc3oYpyXATX7JcO7Z+xK9vKaH7pCc2jfJWszwsqqBtn9p4GfFTtpQdIpLy1uu+8CTYxnCYUvYRVG14+8K7r2I4kBmXXa0t6+TBygqMMaH3S+ecv+8DrCuc48tlJIS/40D8aqjCvbr+yO7Hs3i0YWAARl0rmhr+gpTOiDH5hSNbQQ9lpT6o3yvOQRrKTU0Twa51cTwlySWS0v5d7WuMhNzSFUPJeDAPFK97DrY0Xdxf14v94jDV/UbKbI7G3kV2KNsMY4J4+uONUpJ0TyyU5nYTpUDMOl9ikzrvjUz4cR22sdznJh+WmmCAFEetPgIVZsb9+jt9ECnHA/NAPpvOHeYUXWN1SfYa47YCwoh2ww9YZvWvaheIZiU2BBIsTJueZBSx3telrdBOqjTo6i3a7tf4eFgc2/aLOvKMzneDUmlEOSFnT2+LzRVokRX5LQXatGuGxJYMSH5iKfBxafpCIb3w+puOT0fJcBoN+zbCZhtb9ENVnvCbOKCuZz8ooqrjWjx+RUvBQFnFlCdz0ojLsQse0IWnn1TDn3ozMfpgafaoGWxfuN74Ji9nWGBcmPM0eTwW1Sgpmzuh/AVnU5Mt7kUu1arszVoD8Ce0aT9wTf5iEKC78W0zsU5gKy/fm/E6AstxcOBpaQzmLDdwzuvEUSb5KNbpz/+/+/RQKo4OkeINFxkMCXEFDmeZF8WxvS5px6WAEDl6w2+hXEe2VFWfJDiDKwRZ6niFnZ1fL+xZ3Rg9LA6KLu7SRsn6csVCi+p00DyJQMhuWUBxBIPGYG6sqm2LbAH6zyyywSEwRn+0q69W7aU7Aaw/sX8LaTI0UvAtT4WrTs+Krz3QZVDnBZiP6foSc2Po+zqNFx5bHgH50yVBDCKhHvf66X3Uj4Xj3MsomDnXrYfpE6q+fwk6z55XrDQ0+4LbZxIgaIVlfkU7YlbCGReADTjsJtDod3H1NwFUyMAwXeYa16zk6VJecMd7I2ZJcFBI/toMAnkV/wmyd9p/RUVrXshz6MVxbIZ7SZpwy89x989yjSUj59iBc9SKNhmcrj9G/mCOiq+tZYnzF7c8eHgLv1mCSrakXeqXfZ+lccKIlEG+e3QAmZDFRO5SlF4Q/x2txRMCwsn2UtNOWTTbVcGjbGKtSIw/y9ipRDzOAWaeugoCEsPjRYxzAbSODhtfi0GE2UVoemFrVa2EgX/7p9a8z3ODMTAQm/HokrVxQLMt7z2x+xssTHOeSxOQbgdc6nLhXZbblyAibAizUkPO1tGSb78/sDYSKAKjlu3EYEhyWHtc4XslDSIOWuyhNUJ//rmyX78Rh+/lsG6pnu9/uUsnRMt4MU+JER5SYDJzGtcpwqaN8F69siCt7LouBE1NXbU8wZ5mGNJsPfkIhlGAIHgaeYPhtLidiRw5C5x+UX4D7WUFnhzEX1uV4a756bqR0ZrCJMmlkTR16oX+5oz+RicVzRemSBFJjreaP/gCiAD9sVCvDmtPcTCXcgEnDQFwrX7DxNz2vQHzR5m25ajChAj01oL4cA7qbGNBNUuCr6OsqbXP0YNzK/EO5fvM5FWMMR8muSW3+9Z5cZyM8p4rswAp+PFup06uubrIhkMPPP0XW5+DSUKKqBRB2NdU1QNs7ZuH7iLDCTbMCEIi/g1kGWZo+zpZVqug05GwgdMV521x3Q8pIVmbveUH13L740+AJgDG8QFf1XfSy3G7s+q80CLk94iYFk3vnLimlSIJ9DkmSUv7SHYLhlZo8NVhQTfO7K6AU0FMCBSQv+l++URZCMwnDy2/d2G5e9gtprS01u4dNaGmjLrOFl2I8UuJh9+0UdxW391myfD/6mvOzE+8bmZzfZAg+wdKw5sikZHZKU670HM24/sDc5qHCSpsEzp87OdaJV8OKD+Ozlgcy9fjRBYGfidfexCG/ZqNLY2fTzlz9kswsIpBKE7mziSMo0Y58lmFRFRCmhQfIpcgJP2/XlZXYcbreUgfx8X9hcBB6WQFFYwi5/4j5gyAJ7lSd1uzbbt/74NmPFzl4w6k/qXnDuVZ2jkCff2GPEWSOMN+5CDQxnhdPldeXCg5WMvgavop1q5DFcfSqZ5kG0Al+UBTiruydu2jjYqU23Ggp6dL3dMEq2wkzcXmdUZWuT7xbXsQDQDI/EuTdFIzbt1Ymxo/MHXbAwCGucjp3gHYcq8aLuBDz87qfQ38FZbfARdw6ZM2EOPB4ogUUvVAnPgvi9kcIBENwvYhzh/4O+RXhZVeBvzUyYjVnHsgR9J8fxkn9ASfNVFJd7tzBLOfx5ETRdn+rWvuA1kUgAohP3HddWHIQyPYmufadAzu69FwHIBIerM8DAiqLdrOocm29Ylwg5RB3N7jkKRGvIv0q9eOjZ+QsAE4WpAaAiSDKP7bGi8s/vUfIzH+4timeqxe3xKiduGvWcJt2YVBfNxhG6GpeMY3cANjY6wExBK9K8MiAIgChjHHCVlZblFvPwHCwxM4I52ewGNokAYrf3lAciTayP84x6YhmPSlTMMc/IWBNFcb5zBZdjy39FHQ4+kxeJBGN9XGkw+ORZeH24w2owBULl4Ng0IEGn+y7vnSFYmvy5dfo/RW+AB7BWVTa/r0hgBAcpf1cPIsGYDQ8kR8sEGSwn0C3vC884CfKPLrATn0w+rE9XAWJcniP51K+E2VP4UKI5IlIAXKP8iyKXlDUpqUohW2hfRrrhbSeX9Z5DgPvzfqwkgMjfshq4sOiiesGVyLB5FDIQZRJE2zDc/Zes88aKon2JHmCliFkwSqmai/gTU6gQFRBDmQKvDbBQ3clrBfJqmjpjlab4E4poiKdc2CFUP6lt32tqJxtS8If/EzRS2SR+RzzYxAVxpV2eQw+euc/mTH5GDjX7RSvrkuLhV7+pzz+qYsXaBlMCAHFdfHG329o3l43GjJptdjkPuQqvdPPZ6ECV8WfySEDsZvt3F0STzYDIjUp26Dt3zgbEQsUHjyvD05FgBFFFjy43S2opgcfmfCmbW/BMS34GtGHkv7QJ6GsaT3Nivd0fjXh1znE2TvdN9F9OauLuExX928GwQcCgKweESKv4NfwDwe5N0BaTXrMwaDWydGksD2eTP5oh0hN0pj5ST1gCPItAsmU0tgwQbc3A3Zvt2R+26cWodHhv88FVRDMGujQzREv+ZXgF7/0j/nFNubyAwFZs3vyB4WtQ7U5cW+xtcK0xa4cSBcMEEv8pRvSQyEpKChgE5E1fYDYRpXTfOykNRMJvQoyXaTIpB2SopGYGZMrM/+f+zXlcKQbxNXtJKC9/IhxpYdUQ7/gvm5Ki3jsbT/BY5PuKHn9Inw8uctzJmxYRobYHbFfz0IZlLXkSsqqPACiAVQ42HO3nCfXVrSIkGGxwMsQUG97TdRGpbNm4XBfn59nuohVOkfPOd+nQBRgROLNLpWfJM/ay/94+kqFl1VsujX9ByXIS7BHWZokAT3r28q53YP3uS83ASqtqy1tY+lTKqrhz/8wRWW7sx92D3C2WZPMHyQ8xP3smL+pQi0LyoIjcGmc3q6mj0uWI21Y8w5PEO3NfnYZyJ83NkRMxpp65rXR3b4hnVfjb9Lh22d93ZberGMixc/43v7djFheRhVTRP48a5sAoYfC8TLN62Q1gdrskJG/wawll2hv98pq/QVQabY4kSS1823v1k9gvQ5q1eiefCSuAmD8ehv92jAFz2t7RDygJpXcG7M+StbdH/I7wYKlbBJ/KDp0uDO9dMr7Kr7cWrK6SvL6Eb1ef5M6iXL/xwN+4eGwSHRaIuqirh9cVeY0asyMEID7mK8Lv63sUqoPsKj2kDAK0rMyJ2CoPZ1Y8m3cORR4ubf9vDf7oAmrcyT6Nc58Ak4Eb8OFHys+ntQlZC0Zq0ztOOLeQBHvT69pdZp0imOXWy8c9l5o75SJy+jwjZV3q0L2Dgxmslu5N/usTrvtIDVqWg+CcMmOD+cJgad2o6QxCeFZgb5ABCVaxnFqlNuIHH4PlM35K25Gyja5ElfvPI+Y7qDPYtfXdMR9qscHLbgcjNq9Vt1CZUKjwyOe1+0qhMmxRUSokdtd+j81LBDOhAMQxl+5tI2+6TSoe3MQMC7KPxY3OQ7aN6e5pYJOmyspSXCJvZGTCfNdaaghwyqHpBNDOu7Ym47DYWMYob13+gEB78+MV3sipt8kL/7kfJF/9N0sc53cP6snQK/mxBfRWPv3wZV72QCG0/o8FCACxfcUyJWIXPnk3mPmfTYhxsCfk/MzAFTyIym1MmjLvhutPxnzml/vpcxzkcxW+OYegNMX7aKoZHUGpdj1g63HnPRvpNFafG/+F6QRehhRbsYZZSK/v50o7vM/CKhuSQ46Vajf6+1oVUcOZA6yMMZjpkv8VH7nkB4Wu1rwiQM8jN8O5XdVHiOEKni9t4hGVSUWu97wjROMnSYdfTvNybcVhSHjFDC52Vtye90bLs1xmW89pPWI1M/qW6kEeMQDJz7pezwbWj6kpc9/Qg5ZVLSf3EM3052ePuYHPNKzXcSjezNBAs0gMnt16uUsOZNKhEP9bM4XybRU5XyNvJe9Lfri0yoP+nAhvRYjZQg5+LJbIPn8gADGNewMfbg/T2yO0dcQldBPoAHfPXIHOrCl9e99UnsbNHg5JAjNAtra7o7v2xn5vZBAXWKngEyoHobQZE8qOMbEN4JJhHje4OrZFtw2/dDZ2A33f1hlB/g1PXtYl6PRQd8qTLKtMmi4AC9Dd8qW1MJKfVe6tgCqyxp3iH1GzTjV8WqA3JpcL5A4gDpIowTkMfqAo4D+1Vz03SPyQ99db/H/+sbP1cn/VDl5atiGeAJd+jsW6Cr0jjt5l6wwAkIHACuU0AhXeffi6kx+zgYIEOKkMEoLWhIsiFNUw58CBNis8n3vWS8qDJmvDwoHCHeipTY8MeyaouERNwNSe4DyP4ovSdK+/RaOSPQL5LzJ23/+XVF0vRz8SJK2CLOvB+8awXX+2CdrnvLjwpfzmhNFH8TzpQqilBnIAGVlSO+kpARk8IW/bb2TMD8XJCc9j47lA+CIyvYp72d3ueWUzSQiaBzxpzcXhsIotFrfXgYlDcGHwWDZQLsrkEyMi8nwSjhTYFfRbvTB251o73fVODxkelyOrmf48eqjopfi/XNfe+1cZkGhK1fhg0vj3i2NqJZTWUzOQtZDA9VoIAha787eVOe6kS4+It6oF8gY+YqdZiA/eaJA1LG8qhpSUSBUvlK37g/osuvas0k3C9FGuHuoEnJOag54hjxY4NACNItFQK8ZDiu3bhG83tSR5hDb/VCs7G3rpr2fjUMqPkQJIpQY1rlwpy0ziqu4/fLfJnApFV2KSXLNlZrh0lgKYtZ8lJ/1BcPn6WOo2fkgLnXLJ7uu/0R2WT1MBi9umsFTqkfr/Q49t5c+Ryqy17P6LLlmt5FHmzL/jrosHmm1I+11/j2ufOTM9/xaNbo5gtMSaLp8rkLilbgST3xLIGJgAEWyTWL6jvSKrhcQumi4ivjuVKkqU/lmXnE32EtBGAkKuWGsDZCEek+DYj3vF8G7hHGXO6pOOblqiHFZMoqFzViEYE0o5L2DoJoYkaFWbRsnhaHYwArGkSb8+rUPx4jxwWPgOyoTqPF6yVOzqetl9Z8/jyaWWe+XKMNXoabvR4w7EskedLyxez7IV/l/ouIg5R/VevBZAtJqBYkRL7BKDTRP0n3/Ddh4IU8mAdtXNt9NKRdUr/VrqkCSTujtFw1R1s5/PukIIoN6btLqIXJ19RbnGJU2bJZVwNXgGic3lph4PSlrDMGjBMnoSjuB4deVMNRXE5C+24N7i8Vf57xwmm3rT2U1v46u0A50GQsPiYKyaH/K5d5mbAU/1kakbqyLm7LdpRAnwSwNJg30huYCfE4x47+BK7098ktQA+Jf8NQoNbEI6836hevYigi9kyy7W1Bjcf/WSUWcL3tL/DOMt/P+Ipks/Gj4mY+2Z3HH44e81fCDS/9lID9puNQdeTrUCEeIpohu5iAsZIebn8TfwxkdYPz8ZcAA52Ugu3Ox3k8T2py7VvZhmXHClTETUZ5Dd8owDg3/UZRu9lDOZ1tVREgMFd90olp67oZYn97l4VizNNdwRd6JR56L2qwHdRzjOYDGy509WZapGeGaZt5iVDtATrdD3fd9Im+8yD6zV4ug4dEuUn+CbRZXS4ouGa8bhgsYYSGg2pWcd13I4A9X/ZvVtnbs13hO6S14lGK3dllq8cYTWSavl1asIYkPI4E8ZvQQCFbLabkhCQwmpu9B7cZuuIoiUN3jlQmIcC0bMu2yWBnEQR1NAjdK2kKy5PzSYW2+FTxzDivhn2N32185VVhVbUFZlK9Msta4iLAEXUYFB7j32zyWXKPjcldwqIotVRVfFWlHP3W8NgE9tVW3HJuTroWt19F63zgT0GqJHglO0wIiSoNsDXk+7gGNb6xJWqZtdVYF0kjj//Xn/NGdEYuft3EoiO0ZKYSuUgXMPJqHMM7MBC4GlSKauGi/G2gyMj+AOoRF9ok5gIIybDHabyH9Yrero1Tos7Gv75OanWCKKpWLZKrFqRw+BgMDXtUqirRDiFkOyHTdoBZpo7XW3/xFnm4fBUYQlruekqjMj8no/eiyc5nWWFQMukBf/4jYpl85/xD5rlF0yvCHT7K3dPX4pvGLkl9i8O1anhknA+1onD+a237+YPYgQDFPVadH6Syye7beLpbvKZz4JLo4fD92Ozyb5yRKMadZ8J0hRDJfuIk1w22IwNCpZQR/9628uNum/PKqgfcBpoBh3J0ERhEf2Crm2rBY0pGxFy3T3sLsho/sSRlugj5HtwAmaax8eTRlTG3NZ9/VrW0rr4J79fYzKzq9w5PatQuVJpPAhPi7yhVPBlpLbSta91iPS9d4KMc/A0blrN7CQ8UZJ6/TY2nKT/NA1zrSSeeehZfZYJZMtZ4QTpLIbL6Yrnd2w4DQ6loImRE30re9UQOiseyeovbXWU3vI511b+WHqeVBswpwtFRgp5G++bPIDhMC66PD7B/0eHCI7hQRtmim3l7q/cqEbodDoDKxRILCEqvBKT5LPt3DmT5/JCuQsjDpBYQf/LRFbwlCQK1AGhfz/r58rClecvLsJ8rDkpQRJkCQVLaMUv2E8GL2a5GBuMGy4zxgxEynocRMExKvC8j90Qk1/W9lNVsBa6oN+el/INsbLo8ug0BO+X7QUy/XpT5cX2XU0AotHc67yQ9d8FTqU0CmG/GD/Bwsnr4qv2A4YMdRQ3PrDvlvvaRnC3dmgZeC+XODmG69dl6vpzAPysqFpp2VqLIEcJ9xsgbvpoRwI66iDDiHLH1tOR5XqqHrZjvPNodEGaRIywnRCac7ro0JEZhnBQWZZyG0/uQS9gfaTmBRyyTO8E2smhElVtB7lTxPhWHS6gs8lkpWhapawDQiHAiMMXjQUkA+SjWPX5MDn1wp3R9qrZM0sX+3g1BZ13vfVJHflFIp4UHAxA1VvJLwlMWEJHLx12G5ayAo3XKDD5DnehXAVpOfCXXKKzt9Qv/cFhUs7qSxy0+SCKtuthbgI2XSSIl0noe7hIAkMcLiE/g5DuwkSbEvt3Bucs+5UCOAAKoVbuubdHtfmMLFWHuEqSwC6huq2XiLyVJoK/bpHYK098mQmQKMX8jns09oIsG3kA/uMwEVoH/oB3SyFZr80A4BQ3FAoWRV5gE7exVqVwBiHNMGXLsd/CGcl2gTTd/vRqj84wlRKRdEjGViHsf0mvTefQKFnhULGL38bEeo1Q5aEQwSkQBFWlbl6TJ+3xlELzUvBMyC0XXaT68W+5DJb9FQKhNWbPXbSaR7Rd/Sh8SfX/9oC2F24tMRNd9zgaJHraOXss3tV4SeLWsflsTO3cqiPz3Gc8WwUinCYT5zcr3V4jljxjAv0ZLMXOSV/5RltmQhQodwaRa0EnKMqAO/9d9g9UvbwlbfFoz+fHA1WHha08/4PJ7ctyY7uGKEojRnlJQI+T0EGaBC9eLeU2vepJYWw3aJBvYcJjemRAIyvYq3tz2srSP5D9s+4odZzEP5OuGjlpJNe1UmUQo3uVnUXPUu/RS2T7HZe9XMg3Q+xsnveINxc5u+K6NbttGW0wgPMb7+Z86Lps/0OTfI0S9b1Dc86thwyF5Ql3Caxt4BNTK/kAvBKYzTzkMFdvm3KD5ijDbWcscEHKwyMfMEf9LeKBd5JMXrzOnivXctSuINKPbkA3SQKgDbLTdMPAvhap6uxEXUh8sCp0XYZ9St9gRUo7DeUzPjAcahah5lC6hd3mOPA0WxPiVzbYPX3sY9ik0Y5FyTEUyymk22yatHC2D5Kt1o7E/TMhXT60DfujoBnwwwTTfPlHIXZ9hgH4CEghYqsuM9qajtwWC4W3WtXlhdpdRuCKc5lnjCoRpAz/r8RT+hqtNY0RUQpZtY+n8q6P7XHd0veaVo37h+1Sy4MojSc3csj0WDoCEKT0X8ViF4dWkaOkUNS7bBaqiLwisI2GFutJvBU2SxHx/mYKFPB2L+WQ3UA7TUIv1x4QJFyqr0nrMA50M9j1Ju9peIk/xuDPROQTFlJ0bMqSV2hNcbOM3w0JLD4KiYX4jcOxUmlwx8crQ305Zrl8HDMcQR9PXVbUzxtWOAKDTS+AMqBc+pQfUgyhOuIP+FBbFuct8sc5HygBEdvyueUHNqkFzjVLZc0m6K9CUcjwQaqjZ9/Jt/bFj7e1F2DJlyqGNe8LLHrnw5TG6qWk4REpT/02YIGJe0mYMv0QE7+yIzEAjdZv6HgnrdBamA2bmb2H3WztOtPxHzuJPgc5MLkCkN3CLW6yAT6nsKLeIBPD3CAvMi6uzlT27GafyB6SSZT9zDNcTDp97gW1AcsxKH5zNTvoxUC9uFyzxMQ6o2Pw2ywCCc9roitr8o/DmmUYRxiEkic060gSkDIOEaPHWP1+Ow1Y5yHyPobHJF4pZ7l6A134WpblCIqqIIQtCI8+yRNObXB/if3OYQvexL0qOg0O20X33PmIw7esWx89L4DS6KzdwySVaHdcj7w89Z6Z6DdjL+I0KQC+0HaCkUIl1Tgg9fpDkD9uid8sbDxA8ps8Xy+oep8hPMAQeftnurhUZ4CQZJXPZt73AqxJC9Kalxdiq7zK8sqx9W9/k0x63bTNDEqtBn0fD2hE51+XupxZmIp1X43xT1B7/pl77M3AE5q/FgH7IccZuj5e6oLg6e+9ENAovviAwIllUQDrkaJ3lpv+6taHBlsx7pIN4Rd4PgQBusU1BJUb4AYvzRNstwy7PTsEJPzy7cj4eMCqlISf4YbFDWgec9C7vBO3YHCb4MSjdElawzowFjbwRTlmaCrODC7Gc/Bb28lNL+KYu1Jf/joNkSorXeSJOjGXWXK8HIboRH/n5kn8wrMtRLF4ti3JP8Ew9dhUQeoB09/1mli1XcG6mCfYmyPBBdb2ayH2GXVHv3RakiWsqHaZ41CAcXmCpuNxWbbDA2CpeXTzAt6WuMaZMKnfxmM8ZGQAUfv7e15BFrtx5Fn+Q6PZgyc43Ak6Z23e9P4yeRMxvPfRqtf/0VXgwzpSXsTX1Gj13NiNylQB+xZpUEEFhSQOGlA+1ePonlri02TMqacHhAttK9HPyqd8JeD/ZrpLSc3qatJEhvnXVC3zXLQcz+sd+HYgFOlgdJ7/tASMUZFawrIh70aOeSCsmDjmk7/iLT8pMXLnn+GI4OkcFr21UWMPRVRm/CP31OHl7VALiLR2ZSfiY9etGPtPxxbwfiz28QbKMVRgQwLx6aiaNHW2Sxwz7Y3+S1Ehhcj9Inxwa3vVjBBSUfoW9i8+u4GgjwRmk+fBC4iFeAJuKNTHd+CKCKuCAhX3RA9yKjfG5SnFWGHGAgvgsL3IYf/EXBt41QV5DULaYy67jRIwfuSoQnff88zgk4h5URJfsTnAcfdXbrO21j8dbpT9XbLlzhadOpSzYPN/xLl55nshzuusWCtJZiwt8smVebmitaMCveHsg25It0W75jrpsuy5HlHXhz7HAJU7oiqSPIIDw7Sy86ueNP19zTjZUwce7WCu67G3cupKFu2PYHhF5ba0Ei/x2QnMv/Jth7gkTn8qWysbFYeK+L2uY1fRq4OHxnXyTLKWMRT3rkTnFVQAJsE1e9eJjXpB+oRwhPjD/QxfON3lYvwurG/5a4o78mtywB9rkkFKRA90m2rGoiYVrmgNo4E5TDFm0CmJVByAovHgKogNghba+SL5zTtc0GYXl3YQqgf17p70fwi1Jn3t+MK1WzhnqsF2EKv28HLRhQHwoXecRq42wcpGoEI6q45ZpDy/bKNWvsZgf/Tvi0gGlnkqU9kmUUMOyYUAHIvkbh0ALU9K3A5pO8VUDJ1F0x9z4i4JWX17fWoyeNmctomnQsgfCVyIwjmvewOGvFkJJwXPyEOgf+mulDSFaIKdhvqlXalSmPFuCpaQgmPW1W+s3RJn+fIbZuPtirg7YWAgTh3GMR0P4Xpm7CHq2W/z7ufMJ0ZjTWNJpeylmNff9ZvNSVama/sbKF2EMDu/wEpJhwnn8Rhhi0KxekZtBjKhFdzu5EvWjtX4M1rqzblGLS/Cez2/R6MS1JaZjRCL0uS269aQm6wmSQ2UoEKPO2olSbb2uH2QDhOsp7rt2FzXPeJ/GLGTbZ0bZDiOQ79963M86r9si+xs1eNvtMGRfwpEBqkwRIyQHzT4Pm905FtlbE4r90M9fubVrZaJ7RKB90fPdHCAG85DRF4DTGoeUrYoM7qCJ9TjlH8EWG+IVkMLHqVrD0VA3Hz+WQxByjYd6SlDeNN/xGbyQUiw0snNssNxS3Dr6q0PNMmsSls5a0GmnS5mKLn2gqCcNAxwOG0VFbnc5p5V2r62EmTy30WLEgABnTgKkVHStP6j++8XN8AKqEMdAq85tvlvct7dNILeFuCZJhfKa1KNkk3RUOLqz/RKRIj63+RJHr4Z27ugggIR/jVd66C1l/FGcraiMObOypkNQ/MNT9hL/Cys+XCC9SJne9p0F+ahZSI6tfhP1S2UCPOcU5eDSCRCc5TreDNgoIT4gVR1UiA4OZLLsMAtvlZfvMNagptDJjAOFOiIM+y3JHGbOvJ+PXzpqnNadv1whq78yNCFbrrJ6fgeG4YIyJO4K8fqvrZSleXU3nkekTpYRdPIqNKC/wg2CplVAqZEGj6+1xNcdKAPIhKSFYghFEAiDDisPESdphaOp/Udwb3x7/QLQWNjG/mkdkFVZO8LU98uEhBeYWvkcgzte+ZSAyoTdO+NAYn5ttUCjGqy/yKDCHvvja5/coRB/YiT/jrX783VaJLlVMOqdPQuyM0pQPqmTVNwNZfYrdWqmwX17dQ9VDqNhzAZEb9Dg1wkgR4q1tDoqUEaE1k6Sex8b92u49M+O9xOJzpRc6HIlXcrCpCoQ3h1BjArAemxxhDx1CYyRx34YpubN/BZqxZBl0Ze46TZghRYKmiNY8+aBTLBXachY5QvIQJeVGcGddm8Tv/II5ReT+Dr4/gjepgC7zLxtVtCcfL0oRTGQjBUYMWQ7njThd1Db31GBFa4uLoXRbLB34N/cUmtXrnR6vLTWGmGqceEe3cAqg/xZA39369pvhDu/mjjBwdkVmnRCMCfatnrlRNUsCZ0DLB6O8Eu47t/acmDTEdB6ub2uAdDBYroea8/Y0ZVZMgZt2V3A2/eNC1e/0eaxoWIVn3B48t+Ho2QUZ6aspDtfjCTaRHOefxPyheR2I8IGWRMn9EMTxAtXij2OPiPT5esWHl9oD2Q2xEo7WBEgO05QsZqwvsEeVVaWGxdCFhFLQ7WqIO4mVsTW1JnMFqsrBeZgQnhCFPR5hHbNSShMUBavLB5X0yn7O6iLBskb829IrQjxUzN6v8LVi5CFWEexK1MIjHjhv8UZOl50j1yob+b8po7ZAHw5K5eFoV8/oB2ZZ0rEYTgdvgUOspZjF0r5OKboOvOrhgNDhaS3DpBZ9dtum30InKkERX/8c10yThjCX6v3Fu31Ug8qDjW3FPdNB/EhcTYvHXoLRPobX3uEqpsJWPhpXn20mfFXyoI1ksgr1MZVx6QjxeZK8EoF80nkoWaTiIA4Nj8eDsafPeQaPIypNfVOJahllqSHvZTYPj6Roi19O1hlbBTJTlS3Xj+n1/iiGfStHsJc+nxxZZ8LKud6EAUGlgD6qqUwtfsQVliL91IJ8nkusk/S7HY2sFXwvf+imyAEzle73ItbM/6Se3eGe81gdEUc6nMsyr9VL/XaYeMZ5Ki8R0kBYNt86voOQjOh7ezNW98Rg40jrAI2By0Q6UsPXfmATQZtzynWpU1PoNB6m45dvrD8TX0ETjvnBjtvHZiIxnvlKJvMgdAyhPCJrZHxVRzdRnwUrVONO3uRRE/+qg1peEFGTR1WovR65WxzVtahQ2j0QDs0xmIQaCNhYssdZlBy2h8U1XSlcp2CwkeL5zCV85LDtg7uEWyhan7EPDW9GPriM4Ka9SwANLEfl2Fr09Xl8fkA57vrpneEJ0OVN4XPiqfT3/q8AgupoFHZH661jkTDj4rTaNhuunwG4Dv7+bfpvja0hML706JRODOOvW0eyOYIjN4yr6tvzOe4+nPQ2kH3hIO+fUgf+5ck7eMsR636eCv9THM4NuHpKlkocAkL0bxPVpeiIBLVFH3o0vUGvlJp62YohajMzVrSHqQdFLEkk2mK855LY4MCViSxw0+cGU7/WgQTKQaEl+Rx9+YCG0eUpSQ7uoE6SKPqYZAOwqr9YvXSgdvQxqULakgoy6TAssnBfCH9XiPZWt8cVVofRN4OTvvc6lxIjOcEY+n9S++dqIL75toEb28sI3jaazkqeukWlLAfX5aeftXZkVj7atOTA2S/TQlGT2nDPs9e18Mioadf1FOxYJFtEurb/eb7ADAbGxMVCvmZfLpZrYJfhpvKZClsiG3Sre0gKq5WinTE0IgtSkg2L4qKiEFsVrCHlN0+dH6M/k4oqs2/V+5l4s+xVomAT9t+tGMnAH7/ORW1UT0cQa/tgaTCpCvW7CwYpX7M5Y1odVtIA5fk3B7yqWd81sIv5dOVaqQsHvYjqMbsGIHhE7n5CQxzi+CH4fjy5gHi3T5i2zj6l2/SuzlkIDidHIgfqQy6dQ0mYzfE0RUaJbYlZv9MsbJSn+z5DjeoSfJhcBWBZCYoCKrHdVKH2mTb3tZE6NIUSwcu8CVjqnEiF1zY+csWiioyQOOpyPfJGy2gWZFV7Hq5aKPHDHsgXK9SpVyE/NIs5ntKpb0B2OgccnJ8hIBUHlHtMQerL7TZzs2S6BLCc/b8GD4W4hbNNCrLBe9Dwawth0sQ44jvXnKdGphzTP2INuzwZAihviI3xRR8CGFPAN79wMcILxO6IoePxlGh3Dk3mIvIgHrfCjmr/XZW3fxCuf+xxW3p3GPxtNc5KvrsoomPZ/cepYBPQFk0eEE6vW6q7L3z7T/SI8iBAvlIe/513U7frtEdNQxs/4xNzybQ0MlvNvECkDmX46x83gCY3J0pCBGqR085ghx5KDpbYwetz6b8eu165Qynqc7PA1yssQyx87Jz9pdJLFESA6neOymPM4BtZKXWB2qsa6O00lxVm2kj5y1+sfiO4y+PR2DteT7SGKN7sSt9B7zgfA5TfS7jt+UKrGP/Q2AilTOpyAQED0eeWxx9DimT40hk2OAxgT4IjBIac5sUEG3ls2vn3HLbGEic6Iwb9fcCkVS0R7TxXA7QN09H+SXTCJwKs+l1DAQ1MVFd2LQZUyXv+8C0prp4yPyIDgt79dqd5raoF23+0pfptF+GhmVeqrnKiXYScCeT6mL85NMseptUfU0TDjvlwUELx3luyMiMjHb6t2VV7WXG+jUvj3VUXT30W5nJ7/qaVUDZ1l7DZ1agu+GxY9sE5VlLSQjoDGfXn31lwf2gCCugfG90SVxBbClNCMyeAfvaHnJr6misIq91PcoKrn27cB9muQxHTFXd95veIVWZBnqhoIY9tQPo7TMvSyg557cIQnvx9MtQy4vr3OG1YLme737Efx8hGe9fjpg8FbB6M3mMT6TRmTMcQ0WgSCdS8Chts5z4zF1HUysOm+LR2Qqq3N6UnQHsayH02kmVKeFrFfp+UcSXlk2KznzZhiPPPWn9pF9IJE4qraGD/o79qcaKycOvYeTMA/fXry0a0vp9XM9YnFceaR2Pfbk4kq/3JYmspn7bYbTig/HN0IJpeq51/KvHoc3ZWF5WRS5hxooo04+nluMJAMRHAykEN4jPV6sHuIukriez0b1vBkxOdNVfFOkxr4ZY3/rjy9xzENvTKlelj6ntrU362c3RTZ5jd+HjWJovRX1zC7o8KkknIDsjMPaFvIZercnN9UzeiyUuwu7+VUn9u9YSk4+97RHx3KoVrgjbguT6OuwdYO9dBD+Z4u1pj+aqR/ahzaRuKWIcJcSEQ0/tkHXZGl3Xkb7WlnDvjRdFS0kQ8K4M/NbLpb7nYH82MumIvXgY6cZf7EdCOIeSPOFGV2p+hb+hkN6vrlvAA2MGJSciFyRHcutL8BhrPKRoCsuBh9eIKf04hu/bD/YesXpAHn9bT3bywVHq5wJvUB8vMmTcCJNmfHcAeBNXfj5jALejs2UuBHRgDwPBDlMwnnLrzqnSZGG7+xnfMQ/LiJ0NLe9Je/1GYOke31jlYjkr/2K8GU6cRbrHVqmlg2e+xYMFN6tpVP/qhxzG5mA4OVy/oU6Tal27+Wb/kJgHk2vObknZqPctszlZIeghrUUXlOYMrknvkUoYXobQwNM77qog37+dXDCGv9GMb5cqYfJ8tmiLyw5EBFmjZFJyq7J0sJzrrzurFmRtxtLk8/kuM057NgOlxaaoB29/+NMkXmBdXCA6RZxPZmZ39XSHuS3Lv+z71DS8HT0Yk29KWZ1SywXVd2ERqnI7Ry8ac+xf1xkFjfHW14QjC9VBv3EJ/kVl2GBJNYVIzSK5oD+r6TACp0ZBFKHCt/UfZfwuV5PxDwUS1S+gIOGvhyY1E3zHMfimWRqm1rWQ+BbBvGYiHep+1WSvjR+1ic3iY0/oyxqCh284AHARktpRPuUSBW1dWbLc8tmVpZMjncdmF4nR+IMUh69XoYUTs5hVr5g1f7G7Luo7R+QOukUs7h45iRcMyXgoDFYpGEjoc5jytPTa5bV7TP+2Q+haXsaBQ+OzNPuUl1NBbrS9l3w83T10FcpaiBQ+3KdlP4h/thynzoYS2LtxBzhm8cRDk+MBp13aPVToywCDeqoLqPD9NWqOkycgGndmXB8QJ8oxt6ZnMQV4q/bisqjrXWkDpndABml+XImnknrdI3fj0K4zYCE3gdF4ItU5ttA8aO/6vueS9L0zyYo3dUGW3ZpJaP/dSwWoBvuQUkgX6WaHP2JP/Tbd5VZLZNsJt66vAaRknDw6D3lCvTx4WeH9mFF6P/Txg6So2zNHkYk215Io2VLGQrYt6522EIg+GYbuJ4OOQI1rERyf8JAA7OaCVDb8U1NWdgqXLKXOWJ+WzyaJFu/TyDUcTDUXG78/J99ZxyWlN1azTQ7wuLajk94Ma1CNtiJsFjZCADXV+0vZWtMhKvm1k876Nd8aRvVH77/iOFcmnSRP6P66uhv3ez69KqyEd1Qu9VeN6lAwWBdKDEG7X1m/uJ/E7A/q1/C4Exn16yT5re4w0oF0I+B+up5PT5cpIjgoC8YOAjvow0xuZe9YNXa6pnRh++i8mFRSnbeAoa6Ayl3+utpaZ9pKCMvkwEIwg5wlrcclgCgby37L1kTma2rsMVS5TKo+VHRQHffCc6O37AZcDkSr/wYEaEDVz+oxiI+P5DowdVHw0st35M8npIosA9Bdok0cZ8OHm4+ouWtIzcZfVS8Z+41ITb8i/BaDb63PLgCFN0RxJurjJ8oTGqdFwnvGQXkukMPBZJGjDT5iG9NUc076p1VAbI3q9y+vTwWiM5pr1ig3eAck3YSBkUn0tnyoru4Y+SYgxrDYpHE7eCgUnAoyzsnymB9se81e7ikIUcBkO0giAxZxPDT4/WEr/9EbkceGaYmN2V8c8dYt5w1Lu/41HdYPyVd+ggpolnmc6vtLFnXocmrWpVhWEZCqZ4yKveOqy2k92QVxkfdZvVAbOqyq340Xi7/AInu5Y150Nc4WAZHE7jCcoIsRKw6RUxZ6371OsEYZDDRkR+vqicabfnlPhfN9X1kgx/tE7Hau9i9SOi9VjziElM7u3VknczQJA142gCVGHL7bpe3EtAq29KKaQWs2e6+v/C2N11R/idEa5/RtzzQpQrr8Q981IdhB4aELyd9hqV3bPXxfDyiWpn4KGIZ/2DkxUTGN8NDxFVLDwy49Lc0BzshCou2ZRI++oSqj2ofMheop9tiLmt9ZnpslyC1ZKwnMj9z/4qkiiOzR25l9fsIVU3MXNx+XQD/wEHNIekaXzlCRjHAPmmgXbR4RIAVtsQ2EN6RfyNqnSkC7q216wb916bO4Q1XuV3tMzBpysGsbMt9V8Y1uriyqiGyBiDqhvM+iFBwPifMvwZCSXcSUTMd7n0m6oDC1Ct0ZQmffbgkbbd+HlTJJqXuA3/zQ45DMI/AfxGQvQZIXhxGz4nT+WbnmOEnvyDWtSJYIieLIL2Oipi+FgbPIfEvnoormh8AadQ0koyJd2BDfyjf/LAeoaCpRxeYfrWME2RPO5hfxrOjtEMIoipEY0d5v22EPCoaseHn52auZmGkPWgWk1JAYdbtxNKcPS0DVPb+NoGm3mnZxlQj339Tbgh0pEm+ZMGI/3+SxpS96zbf7R38PZ0GRjHheFxEtxfc1Zax6ouZSAuZu7K0ulcWjE7tFBxh9xTLImJThO//c1aCRwA9YBGhLFacTLYIepqUoDh/60quT89tZl4sl30GCO23ym+z7mdAKLwDlU+HsWKyte6fRhviCJKMlzFTdXt0ltVbPSg6PUOyil/JakBUaruunnphXjH4K2Pqo266d/K4otA8AbX86yTkse2VfMmpfmWWD9/6xe6MVsApAPBk3aJQGOLHO74Wj9GHzl9NcxcivWIaxJWTTfObXEvtyy98KEJsH88Zb7qSXyzeM4dqh/hHCuIZC5S07y44Y5+UkSiy9j02ZpualSGzkGha3sfUFYENPLts2hYz0ebfRIRU7luHiw1RahYNkYuPW69y7h3pQy8X3N3AAGPOt0VrVHASR9z55wWz3ScJjLZDFUwB6ohzC0lAnLxQ0fW9JUToPj3flNo3e6JoFJmmMd8Yw7oqlOw6z2Myd0zGN9ai+3Lj7rnnGWyNZlQiE/5bKQ4b48JamLNK9wsLKIs+SbMdlDTFaOZLSTFWZ0p2ePl8Mn6zWADxVPMZEYc06YYjq3FXSTj6AbltRuZH3fUec1hId/rtDOigXaekkgL1f7QLhsqrQFVmEH5ONldJuU1SbGtpNyCDb799adTMLwwoPFKYSJgRBSYSvN46uZCU0fgP9OdWjtF+RIdEuYjtUjjS8Nv6HNYy9kRJENFiu5X4b3oHfoZczCFPi/ebA15F+tkMR75puV3j0w4jHbkXqM/w1t+aaMyUEl06Nb62pHx8l4zdKgTBHDSNDn7kNVvmogZn8+0EH+yEmSAQi1iovTc0AiBYloRQ+SRlUnsvb+EwOBIucA7GK2OA+nIQgPrtBO4bdQ903b7ncCnrHPnCMxktZd7T7m2LHYrVG75VyrLe17p0NKMtjl3N3/zXIuWNqN95OQ9zLo2REBnT64R6SF+YgdkZJ1wBZ4Kxg/ded2uYtI0l68HOQ1h3vfpIchSEkqX4WbEBedXt+lwQE2sWy+jfh7vDVacVPnY7skvtN0GRU2Iix4y5y5MehLopgnSSxl23g3FYF9Rf0xD0sb7EiLnds8Teqre99EW0fwJBEhXdnlFEi0hVm9fapdqgEFZpse1K7g8sO33k3dm8oWnv/mkoFA8V2rtTi2pJXY6LVX2DeCXW51Z1lpeXqNlVlwHKtRFBL4UtE1TmQV/NZrcJXFltTIHeYriAU3JBc6qUwOqeBmBt7JpltNiOat391LJ9f4GrehfA9XG47JSPHixY6DOuozejUPIo9fVaxfsO3GgkEmEUPZeHs6/U0HzhBVeGuFVYguUdqd/1gC89fdArG37EEkyR9pADrLfE3nXJi2fK5BjkRcT07JLzeyfBbF4KKwSA95GBortbi9awfrkRx/JBTk2Tf28t8j/DS8QzhMOc7bmJ0EE8e8fDwyNCx0Y5H2IF8qL+5APItnEuIWnNWgwhIedKFjsbxptYEPkOyH1To/amK+75YAyM2eo/XeWEDxv8WNkcd68tVm6nHL+RRGJo0MBHcyWJgKPpOpe8sg+jSlIYeBULryZedQbGQmC+Krd483E8776oFMIIPTwDHlTqf3DcLmvkt2CwRzPYCmjBnoDb0B5mNMkYKGlDgX2Ord+VkU7SPMajf8wAGaKy8rWTm8fjC7S8AT4Uhl6zpfnt+ox8FmAIqgmIWsewfCui9Wz0+19JESbr+KQbMnqHy7bDXyEPRNzo4T5sd0pwqfWvOt2jK30YY6TWWVG08DANQS17dWsiZbj4VBpaVkd55lxDJTdwsjGlVBeTc08iWtADYXFhuf/Ba+tOfBFS4f+Gaqk03hGmv95at4ALXJpj2c1CKfW7V2PvwC24EiZnrq2+Ol+XDEfwx7wjuW60ZZjAYkG1+NZn6/qH9DxtAMmvenLqIo9DwVEboP1250bziKm9tSQWVOkI/3zJJ7tDKfdK19oNmrxZZxfVosPeGxQ7GsVZ7QXwehigNQXh5Hmxf1h5TffI5HQoz0jDsTBII4wOxpRES+KaH38fTktdIUZYK/bogheadaHMzw/l60grnui0y/iMyBJcgahiyHlNaqDYTFz/RvUa9RQK30NsUE/8dWvLrnlb/JVxBjx53U/YQhjbYd8J6P4gf/77Ay2+c3J4bpYZsMci9QG95WVkf2EjNq0H9DccIkP8r94H1HgjOuNa0QlLmQe5s2bwl/QMXjVMQatAA+Xz+jLjv+utBMzuhrf3bXFxho5G4FjW3jRZ4p56/ORDOfrCWV1KVNoQVyWrPe2WcilgW40+t+X2AEYBP2Vbbo/HZotHEm01nBJfV9H2nP0E7KG89SkTHcDuN9YDh4hYxkMks/y2LAchb9mirvSFC34/SOlqnqZjkgdt9lBheaMWhu1cReO/2gx7Z2Cd9g702jGo/BqzHy45KGXYXBPLIbP4NrOCrDAOLWnQNbOfj4KRmoCGlqCEZs/rOOF0voiqgIJIXQC8H/xspAVX7O5ytLINROHnQAXmSpgFKDkFnoOiLgdOsz42/RXy7C1ZkdEfQqA5A3whqCbn95al7OXkVZNT1C7ALx2qFVISrAbche+gpPJI8BpwZVCOLnWJVILk0XAEWCxRynDc69SoyKz2V+chxozV3PlCOQnMu/wIWBM7ZzhC7LFubwzIdt0mhpjO9rwhn3JvszxhYDMI5msmL+yvGqxyQDAHfWeVtwJsdiA4bEjQGd/nFr+wVy88T6fDX93Ozf1/+q1oXJdkyZRLGtdd8T2lV4iq/IqiyIE17RzmjcIaRQ5bgvZflN5+vZZ2+Oc4ZGPytQshbvxgpv7KHs/STIpF8Jl8RtCuELeZHRfeTRhdU4AambHTRq57FB4clIOXM6wBXk7KXlPn7fzOm3T6WZakeDWWmq47AdVYUS80xidZBSa4XuCDgQ5fiU2ND1VzUr6ohmSBxGBXMfz9kpXptUxZmMtmuMWrIPTiO7VcBuxHmMNCs3+Dz5wFXdwQAHR/PESCg38diMgIToz997xKJ4Sd2/Ao7DKhG2Ypf19fqho/BvjLx0hVyiA/hy8ROaDnW3io/xwP9ok08iZdy4j1MifFfkEdil7BCrKGVZ2l3AGSAAhwqgY7HO8I9ayesU1S8TyK173iTGY5u7ng3iRWEFZn0oG/281WVBpKYETCAra4mbxcAfeYOjzLApq9nFzbcqgiosRHnDqg2xHYRvrexQqu2cFLcdCNfntsjc7GW2vSJX8XsbzDcG/KP7npNr62Lo76Ef/aXZapQG7cHSTqd8lbXJta7iWi2DMFgWo9p7UUMPJZRAxYQhtIYn8cqoRUKFDyHMyUcUtVmLl6OpUMQvMe4JkxZL71wN+PZPOeSJNRmxdBs2jRmyUfVF7HzQmEVg9BBtozzZgpgOdjGhzO+Fk6Jw8q5Euj0OxUbqHFgd1j2BxJlhew2Xj/h0Ty3a4k8vM6HQaQZ+/UG6ZMCJWUKsudcPWJtH8QLQF4x+16SSZnTa96gSJbPkvstZtjiT5drv9Vi1DAT73Ln34z84CbUf9tv1QvX3yQVHGhexiSaiBIEDNEiLce/kT4e7R+kS73DN8GEjm0TJO/m+eWGiXE4X+wr2ybUUr+tWdAnqALr/XrxBUro5lBePUmSP8oPYsCEMsCBjgKaN74tvjT3qxuBoxqJ880SjBuoRWTxhdsZXc0g8ZGTVnsuMG98Wq54iBDMLOdXAPVBQIZXGqPNch80d2mHN2NBXoaSSdeiC2Xtd4cDYmHjp4iQgC3gEH/C2q4AwMEkonsP2XHoUYVbEiiKQNI29wTE4lAAjkQBSo3PXKGmlKpijLYTaIDh7wsRu1lGGjVWGtMtrGAIgpU50pQf1azc0vdkI0iFvDQ5Jz/Sb1jIHTdONFUdyUnOpxAG+kLA3CZxBa+w5dlyxR8F04Ll5hDDFpr6lMwGjE7B5WbaiPX9PHj8mpkcTraipQCMKBwz8r+3ibgq20lLBUWszDWLDA1RZTFkjkXz63SCFB4Ga1Yb8iXgH3FErPZYkCZfEzuUm9eQL35//vZg5sxn20sdJGRNAoDX6/FJoyUakjux6dsbVkr+UJtKDWOKWpXdEM2cHsOslrrPVYRfmDdNzohPGNU4Tvv80ZECEBSDj8NJY+uXF++SFpGQxE3ntfvTRRST7EKeJJnjPzupP1TASktMDlw4kTWtW7SJ4Ba6u1+y4fOc9RzcDp3uHWqooWlinMn2CuEo4oJ2KbVusghucOp9QXNcmrMde5IcwBeUWJLlQUgHvA89ZEku9k2174SsouWX1pcGtW3OjHqAjdaVvBTJJd9XRPKerzATM8j3w+/NIJi/5JttufYbFEEqxfe6wph8QdSLVbBXcp5f6OCeJ1qQZLJI4HBxzSKpFWDg35rZj60wL+kouXecl1tMtvMO4mntPmO4BswnS/F2UJ7nY3PAPi1RIWNtpGBumDho7PBJHGknaWq9PYOE0LMK3fEYe4c5QSWPADiLLGuKA37ObxUQPHd6yVnxrKje3wVrLHiD2cQP6JL3JARzuTUp+GJxRSEwAujBmYvs2F3DTfldbiuNXF9+aCWU2Cdq3wdiSMdvaZA4rbBHjn3vs+7mXYJvLjsVMGY6QWF+HlHgSUA5it+ZCOe++qil6bSpiiNGrGroZZC9oZpeXRAZBMv8XxnqVJ1WloBrFMFZC8EQbd2+iINMz+Ro7pFuzSTmPUSNZnwIbw6r1GxnZ8Mbsd0MjQfLoicRpLN/2aNL4QwvML/c/bWdajKGi384qjQlv+Eh1jSCmWYy1d9U3SUPBQZJP+8lXg+vKy0fiK1AfuMkvAAqosFj0uFey629aGHxPNZ+72SKxpslvh9junLDi/HuajlB8SrAam8KiJIBmiyIbIfVozvOhewBb+PZk9zIzwutpEQ+NhnGyCFCMaU5H1zfSltZ1HHqQ2Grh44c1oU4GaF8r4tFj6P2+DHaj0TFo14JzgSgHo+NeMkMN0cPoY225YFar+u7jwZJcNSuYD5vLFkZLpb7mYy1rBB39D4z6r7b7gPeK5sAvzEput7QCD36BP++AzRUHmLJ4gQPqN2lrZi3fextO1gvkvZoTEk1AsgAC1MTQxTZXHpRFfbjpH4TUQ8UVSSA8rjvv/o1flGsNoemy9IPUgSLE3hw6yNm0DienOuFQw9XBkGJiwV1SR+96dJpeNAcmJoSylgZyMxhG0B2zL87Nr6W1X+th5TnjgAREG9a4CeT6SRJrIeSNzF+89qBkX4hfoEcgMp+6x5pSiaHVnEdVnNJs11/38eSvd+puf0274Wnvl8ouEPqaIMeL38+EYE+4NVQs/xLEKP0rV+JLj0ozd/nkCDLOOfaXZVJ/G+gPWsHPU3tjKV9qOMd02Vbh3lI5UsfEWV4bZDBmsU9lC2VUfyXJkQE7q+u2oUwUPVPrbSPfghCM9mCVeLsMn4V5vGjsLr7igfyCi8bsdBKL0AnziM9BmDUr6HQPUFNH1i77eRASr9o0RxBv3Yf4JyxXDuGKOa1sFTuApnQk49sdtbL7aACX3G4tfuFVGmv+GiUvIFTI9hE1u0ujP9L03UtS6rkwF+Cxj/ivYdu4A3vvefrlzpzNyZiYswJoJyUmVJJ4VnytO+56QRMzEuefWtXFfnRPwjAvLIQFlPpikudKciMVId74UYLh78Tr56aXGIWPb2xrKpdeFlV+3KU+dovYjOpnwZEA6CG9kwDzaYDX7VGw3vxeUxLpNL6bBjFZMBhrIGRZob8QP+aTye13cRBU7WOHJipFd4wseYHM8HypyiEZWjSGDXc3CjX7989OI1ZMs6Ah6RRRC2LkPXK4VCENtUd1ZY6qPxPCmK+hk0cx5Jn1t+NhSP5CPYaNMrssASqx3FuPRxvndB6LKSeDcgcjm57v+4LGP2DzpZn7ZnzoW0kgK35aotCK72EwGElf1Zw7DLCT7ZVs6dgcb8u3k6yvca4cUoMrE5qiF7TSu6vrWm91gtx/Tdv+UIdSq1aL4TOXDLTNWeA19cnf2G9mdfNOr9dwcLE2K9UxZsNitFuWrybMuaXAk7QhNmWiND/K2e97M4tVAlAx4j6XIEbuvRKNQjGu/huB172A8rFSZRAn2PMJPtIFgbWaHiYOOms+ym/sw3s5F9X3IL2G9r40CgZJbmBzbvjqvpQ12Y0ioHD2EMDWVi87uUGgXrBTDIeOz2/fCMMFVwu2Xv5/CtnJvzlFAIv7LREdn1dltMxPQp1IRh6/N1VWj7iysH5KYWgwfdIKp1EpNT1DoTkEuKGq1TzjWnF0vXH2oFMegWr96i6cc/5d6dXHmsNOqlR7cdguoVOsYRViP0rYnu+qn8OPYaNsmCt2053gfS0HVa1AGesHKEunJAXH2AkR4CderdpVGfDX8KVR6W5uGXLuPBfUv/1FveQH446VTAq9F1fbE97LkpfTrmeTzSkBEM6rR6BC7fCdhRHiKM5nxM5bjA8hIQAipsc7UBCEBZFtObjWinhDM0jme4B0kRW53bLAzpjOmjKjJ5HY6Po+t0LbgUb9mxoR54z73/uIXz9Jnmt94/lH6u8bh5cKxDsGZpIPNfPn5bieS700pqpkGbJ76Q6CpbgB+vynyBhIv2BmsVQxgA31eFP58jHuz7i8MIt3tvyy/DWvMgD3IDg5gG0dOp+V0PTD2R5VGTA5AecoFivBK1MOCc8sVS3OSfhjaubXn+ml/0NywvbMJuUFU6u5lTIjOUvvjO19OZ5jMWfkkEYjyXu02MQIWxq1+jBu1V2rSFoafIezdYchKUMkK2TJOuUtiLnvBN/QVJgaFVqVW7wFaWjdKxwWyB6grCv/3hs3HwFDbb8LF+2BbHo+a/1cJMaQ3DW3nxE8zpJJvkwGCI0+E2sVd69btqxmhxpqeIdL2QE8hNB6gfkHz2zbFQuOBE42mYoSiLGVx0qqihPtfEZGL4oo0rMG/W+0RjveC/WGgNFxU9vjPNm0xBJsor4r/P0gTCd48Os/rnapEmuskPRCNg+aoR3kyOrL9Nu3yPfyHYNj49IQ57/ApIVdUadaMP0KKJcYCGsJcYDak7X4Z/wOK3/cvaVZ7YStDXTZDjix6M+ap0bHoNCeGjuJFbKXZmGcSwu92hI0zjWPplbQJ8zpBBX5bHarMmZrQvo43X9G15KzNduOQjN33V+/sHR4QXrAW/jnut6GD78BYkbjK9I/k8TNj2UYNZnd35OQsk8/cvdUkaIhzLjB61l+/685jLbqoRYltLZ+6quB4JjlSfi+IBL4e8zUYjK8rSlgzxc4SzWen09kCOI9F853feloixPmuZi3HWFmYwc6EtWWuv5TVKEQEYjw0zXBfLaXnMIdcrq1Fk3Vo4KCq9HP7qSSiIJXPG6XhsMmfoJ/UFyxr8qL5FeHoRL7Ge2hW7S/EWL25K8ehluicIZvZgJi7KTbj+B3df1pyqMQIx393VozsPz1zUKFYFBqrZq1oal5CQ94zDUP/md+fwHmqjfTbB7vD37B+Y/mElnwL0kDeDRFGVbt8kOz2eHBg9BnLK+14BDPejRpLNHSXj4E/TR/KBls809MOmC8lnPOd6AvCoJ12hBCfLXiFW3T7fDD+mTr4MMJUvXc5PphcmepqIOTlWJvgYXxF8HpE5eLoIS4oZVRY5GH0va9rucNht6xoLHUExMZB1gpVZr+/scfxo2myB2i5OY5slfhdp14S45zfmKETCQT+KfHyrE5jI0DlwZfcYW1kT45Et9pJ9eNwtP/ndDR+BbV+9ku7+rTSyMwNf1NR+c5RSRpTwgRSi+ssii9RwyHDbfIC/02RGuxpvtfG3m3DOukBV0UpUeLeOHJu6J8dFH2abJ809QqXAS7iF0x8GYWS0oXbmkWzN6oJ7YZh6w45eiLeLELUvEdJC/P2775RVe/tKbwjiq+JHO5IYQ5zFJBhUHv1YYTG9LMYx36JpO/bhyfS1hFSgx7O18F18hP814aQGWhnOs/kG76dakhswGtu5cmSNc1HhSeNeE9i+tq8hTu1W0uqUsM4WNA8TblpOyrMOvQSPKL4viv1kdhvtayMFjaoGy7HZSPZ4n+Puv3SjKrCYNb+FnjN2VqTPzq8hSXNFKCdsR+tltA0jvQYW1j4Jp1YOBsJ40z2tmrKOYFeidjFRuvjbkBkLO6QCcyDJ+lA7nbTTIqbyU8lOt8ufnsQ3I0S6u0P4LtmmGUM9BjDTBX1cbTKs93jzDlUxs5Pa1xMjkAhiLAnaT5lNtlJ0Bk5IobqCoK5b3fAAQ+P45rqgqnj48torEX3+BaJ+xkShiKCfXrkcdsVAvJp2OcpYxYioW2hC+w5J3ROWjGF4War2nD6ux25puCWmnDbkpsxKrlmWH7Q/Tf5CFl7LTJY1S12ta92RemO65xWJSkefF2h1HeojzRYoNX8zk9m19L+wPdS2qYTs1Mbe4qse94vUwPIrzRTBK+vP6Z3D4VGtecH60chg4DuoqrEaR9if++hU/17+Zr5n0V+01lQercZZf7Jfyzcfwv7G16H8VjWLxZQpBoQ/Tl8EoZgY3NZlatSU3XtW/vMyH/+uJBMgsFaDqE0rZRhAzREfR6PK6aO59cuZ/XQYmzRb/GpHELzPJPdL6HVqaI0szLRAfgpCVDu7UA1L7V5pp6ZCzmmox9Qj13gZgT2wpFiTLejeJWOZm12moRWiBBuudQ/4wEFOUSuebOZvMgpMs5weFU/NnIhcM+iu55bwuDPqrh1NbB5nRh6g/Em2Ios/4/EzLVkXoTAg04P3cq224pf7FfCPcNGP0AsugdYvMI3wq5Bxw5Qfy48EZNJ/r1sF4/6CDnC4ddnzrq1eO4Ac83DkV9JepPMqwL5AqTNMXFyP1BcN/ZHHKqMLrBZTyc2X4KGXLwmSKGFJ1khmjJFv16Stt75UmzTq0rnm20eIFCsq81hS2g/6uXoWibETro56ZJUCYjmfl7LWmPtEEOcAiEL87tZ6BZpIegP+KXhbUs8QpK7aWNqaVLQG8BSpYkqjwhjemMHn3mERcG3nlB5xA7P1ZxcYNAG8hRrBE1o2mGf2CN9JCgCEbG7zaYzt8nBl+Vrz1FmLLLsI9HO8rPpYJDXNRiVTRQYPzFYKoJO6FQL/ZT+8ZGXs+HBQdr8WMxyDBMCQxwc2GD+2HtFeaDdg230IOlx6E2PxqkeAtF+cXCimH89Gza0eKOndJKaEDKVSn2Ynjy5e6wLQN8pSpBT74k+M+exmN6tCbg+ky0RLXarBOeJ9utv7T91+leuXviGHPNJnGFEfkSrlmWU12qj7ivpzoFfRIlR3rGaVjiEjJU0UlFNmX7+bY5Wp7547yY/Rw5ASdIAF/3Qnut8yDD3QMFDrPvq2E9nJy/p0Pf9nnrcGBzpdrrrBCzJGxNcnjFlVPWFzAeOeHAt/VR4ZNT5JFELPFJGfdCyqAeOdx/i7zYEbyWMxpbN/m/usJR693zltm8tcYqmVTcwjAXs0naVXOcuUelcW3s/i7T04/ynfJhUWtwt5bv+Ol1LGNhpGK40c5JNyEl1ODsZemxaU8jVD2RUaJi8zMpZpP0RWpnlggQPQC0XVO58h6/TIfNUzmGUtuIz65s7ZOX4O+9Q7dsu952NJyL8owXKeXTWIXO8VxOr6zXVb5F2IY4NMdb/fZSbgoVAr8mbsmcuZkKD7PiTgGVyHZyLQ3uMXjSv5B6ddt+UZDf7mFluUdyrwE8CKmPTcnfJ1ZJvhadwcl63YsXaLdiuf1oXHL6edJuX4uVdLXZ/aMv8q2IfK1ozCI0+Y6xYbWq5gA58vMluFf6OM/aguxeiJ9amGoqkMM2jZqbgQoMmzjNviuIS0E95q0XBnUhMfV4eKHL+tSHcWDAMvKfM7feq7sRn8Jinqp+197EiDctdKDftvjGhoFZjidKsxGtzdbOUmhjJop8h2IgNhPghoQVdQWEiA2FtjlMhkPq9N/91cTOexvoNkdvUJRL/+lD5+nuYrAOvyvFDQzcIktXNILLZpcn+ovrcFDqdXlrcEQBFPaXduK3fbauf8E2OAlODcXM0hsTM7KlGTT7rzujDY6YHuEoi5ftC21Zy7Rteg6jnkbw0f/i7EBr5AskdR8LXuSfblUl2eqDfXGstpuPWcy+UUdTa9XZNN22VI2MACnOAmj0CrPm89flZHNG622vKmO05J35/8FdotwFxGy9pebnjTZ663i9aJ0On55OiTOff/YwrIQ9WIY7u96gQLtM9W5aalFcgX+UxuGdud+kYfpNvvIkkbQOZ4IKKvtFdUKXgACpjLUf5jO6SDxEBMEmjkvVdlzPXY7iU6+l1scFqJ+uhzIF7Cf6Vt9veyy3zZU5rKUBNCDRcblq4UDhkas2VppTpl6t9vBy1X/6o+S9B6Lal9mlFwHkf3du5PSupWgsShfbSHpTUuzv4KfCAK+n5ZVOg4hXFnwd21/L/wFYxLl3OaS9gjEYoyfb2YDcdhx2adErhfUcYFs64VF0ZI4mB2PaeIKmG90/ZX6BqEm1OX/bqU5PlMkQy1eAEElVkp5jF+3VbsMGrmJv8BvqMpN62kZHRwASNIdQuBSNgvyGFzTAimqKmZkI3AQDOIe1Atr6NesV04JnA5fS8fY6mxIJSqv0N3XeqSBcmLnHxGpY6jXXflrq1sw9eu5B3e44umLz8Zz/YX6vXNzmY+JoGx+BBjmO/6Qv5t3YX7+oC6iilNUM0IJCTWbHo2vrdk6Z+8E+LYORCUlRqnwXHAb0pZdtd0v1PqHWxjy44iWGiI35b129kPAiD6WjBNyakIaIfGXrY4cCJn2vLFdcBKARiLe5bdY8jV7VQdR0aVN/KSACvPG8Cq8GfbhitN2X5j0Xf8WentGUGbgXRPasmtn/ytprEIjOxRLscZj9RhQMxqt3hsp6gt/VcO/00/c1PE7NSXyg9tYnNXmzkv9up7tUXrg15Pa6SIUquz7N2q5bsafOUlfu8NgoVrbcgyPGu2DqvsMxF4rgn3F8cBNs9KegAHMMHS9Cmg1388oYoejYngdaVFJfoFFqlGb+0HKTTusrxbEBKpEvuhvjHOjHuOjRZMGswQ4aUCUEfBFBk0Y4dHITq9aECx7PYjG/BRyENVlGbPKPyyur+ZVujX+11BmXrOj8OLjHxqTHdWtwuc92TuT2t0vfcRhbs5luIDlHXlHCVLLf50fwGYTAOfnfq/Tz5EOW69tPaXrEpgchXktGtie1hl5Cg5qzjqSp69ZBa2Of+khMSsZeSMC4+GRe5rN0xkVMs+2KD5hdumRUcFM9PMpo2tGYKvlJHiIY/Zfey/Oo4AhoAAG/FxC+HlWLvgCuWqtkROXaeYYqtzGLAtpNXTX0xIK69zkEILnksva/i8+go+vAoFYvgzvXSboic4k1WkFCI8bC1y/D3uxzB17a0zkc3pHqcc3tPSh9WC1gGFX0OQYbOh0StxsoS9/1p6AHsqOpDXH/+XH57+9UUZqeaHTGbGsLM7QV2TGufK5h1q+ZfjTyIV9zNbTpcm39HObgjr2yr8bbuX+GlWMtNb5Hg2F0bpS/Un3LC7HR9J/ohe0aJ6oqn1c/YoPDdOyuFMLNQhmN5Mpad0T16HZWDa+/II7vtqt5/7fgJftZapQUs1/Iccq19DGA+yIXTbMh131tVnQcEnA0sAinX8KqN7PdNSnjYIasNkJFw6CNUoQQJeyPWfzdQ1iI5QQtQrs6FQitCICWB36q32wjCr1CCjhZuW2pdPh3P3pWorxPe4++Wv469Vt/cje8CkKzaLU/9k1LbhnBYpgw7xyolb1e7dRViXSeCvpiElCNH+Di27Cr9BF3BpDTuc6wd1gTklheDOr2zKuhRE1Qr12dYgWKWSjk3WtnXagq0diiYNWGo4PTyiyL0LGegvhmy/YslCjDICaNHzvQQe3XATJOZEiZRxnRFxQjSj1AQhPKRRe7GvdXOacybiAMXo048CRZdSqkZowVkm2jXuncfUsDTUl/z7rHeoclS0tq2wFgmbLLSqKbZqPXNNqOdkmg3p/JRuYy8oKG3qqZK8/pvn6ESyv6R1SbP84GD9wZtW1X5vGwU6UrsbEckT8vK4SUPnfAs+f1i5YQm6GEoQ4BUiUBApi+har+eWvtgYfRWLnk18J4J1d+c10N4UhRRwBrpbHcx71Xv6Fnpw4mXShMhQbbC2chFP7v5rg4+dAIENXA6McJI1omSvQf6fqjp7hSmMWEN8n2Mh3tX42edMbK4kaMpCSWbtJJUQnBDmN49sh/dKpel1WlpcisomLtP+xu8rxa6bKerO6IKmFwN2f/JiqLTYhdkfMQKkwwFBW2Db65NaSu35BjKj/LkrCS7+qJIHWFQWFHcIaGxwpVig1EbRaqm+GAKlOxwiwJNIA/hM32cLeMWB0CH1aTsuIo8xV+aLEJSFNZbcPhZTS9iHPE5qB4WPODxmY3eHpljsVP+BLOninVDHS099pgut+F8NoBcfKdOAbq33BouRSQNCRW/ZiIILhj+/ss+LW070/M96uEEsRm4eac2TQjwwR3TCjBtp3Nu578Ga67cUCZfX5Y99gjxZGh/1FOTfKJgqpUlQOlAQSLCtAGVCfEAhodBthtUvHB7Qp+5N+wxpsBcsuBgcEQsEEHch7mGGq99Hckr4Qf/UsC5a5ViXqYBQDkrxtROdUzFNrIvw7VsBbgdOkNa4rlWuVo8+21EL7Fdwt8XjzHS2vCKqjak1NV8WVrYqfTi+bRn5xZ+5k6tAdpMEHR4zy2seH4q6FnX7FxKjq9CPN6lyyGWwPCG05piAQ3Il5ZJkWxQCpTzX+cOSDFkKNJw0BWmC+b/W1Ev5+ycBjyOvsrIMYHv9Pufir5y7XhAnl4oZ9gBgjFmeDEKh1ZcPzE9sC/JuLhU3EoUcPG1nnWR+bHwEgsQ+rsL849J9CS0EbdhjWGJosLUi8rX4FsvQVyvF94FIETDeI0F23sY74Qye/rdjCt0yXzKT15QH97BIQwdUk2a+b8IpOw5kXzViNvWvtTTF/mVVcT18dRT7yyTehuIeF9DS39cFlUfaijLTKrrluD3j4ZT1OorrU1NzTiC5Jy87+GXYuNan6BgFi5lodCBhdetidpn6Ev+pAG0BstHGDcuvgvMgfUU5WTFZ7HEqX9JOZIhy30cIUIZAFwcAlJXeeSQOpaGvzTlm5koLAt0cQP8i5Eell1e+HlwuIvDNHPOpTHw6MTGrvD3rv/rfl1O/OiSTFGb94ikeN0no3nSN/h32ZHjIU7w/wxRROaqJ1SQjK5lR34ZUXSeNc+2NAexmV1edh/k4mg30wAUVbHWF1SpB714tm/+vJI+QLDDNf2GEv+mMt5Am52AfjSS/br1h610tf1T+4iTSyTjL385vFEQdy54z1HtVWIl203oz81QvXo85oiNfIyom3MhwTtWcq2bm9UY+bQLz1La2xzNNoaGCrtZnt5pw4bknWFAYW+sv1ooXt66aOXFriFxFfZNBe8lVk+0HTz/qY5Z3OtnmF5ejGT21GogAapHJZXvwg7mFCp6xY6rWlyGvkpYZUmf6i2Cf0FE83i9E/iIpMHi/yMRAAUSXkmyZslUd/fWHPjkohFdbFSORGsexs2v0rB9pIvJDjQgzGDOR47uFQbcus3hk80vl2QFdi9sAcaF2UT5HdhZq1kI/GzpH5zU0E9rIaMkMSuEehX6e5UYpRBH8bJn9ZWhCGAMlQDOcPlP7XIK2qCcMgrOjHAKYDxImi1huaNhjaCouik8oq3yLXpTn7yhRaL5jkiRoUAKwcbOMFh3FJnFrHqATbF764BbyPJTap1nX4S6dsNkHsKfNjAWzYma7pO3Yq6HBJ/SmsNe4bouCwhWMVeXGyR8kmBNX9dsS5jkKMe+49NeP/0kl0XQKNPJmzsEb4ut45Z5nkte70SSfSql61c/+qXrS9LWVG8IGVQa1n5GQ9xmEUTHJl3bdVwBCxMooNmVo11fMgEhyugdtHJZx+1BKKVNMYoC8lEcX0Dh6Spcs6A9AMxPhrrwquHNpYH5O5e0znWkYGpVrIvGF/Xb+7bNKuPAQ5vYI9JO3Qz8yMH/t39/RDUpIiwasUN+u/qrMApJQDuz+DzYhMjTNZ1zr9lRn5340W+mXr2yo9zpLEid45X6kiXwf30z+h5a6cGeh9mOPp7VeaFzs5e3/40rG/JVjvKUlzDT01oxccYXLBxkkQ/GSBmroi6BpW6C9W32EuccxST/I+bY3IGUdZH1Fmj/Eohca488kGOf2xBPBFD6bhTGAi+imfJVODzUOWqk5mGyy0gVbGYtLqWtd25mdZvy1FoCJ0G3BGr01JJBpuGMH5jb5jaU/yrTqfpP/pd246QgSRUUWpJW+a569nEYZE0h16mShLjodMvfvvMZqEuM8pjAtX3tdRVW862xAplYbgevja0b+1JIUhpyAwY7DMP8v42DymO7nw3WAle038j95/BUc/WeUyNEb0mRCYvXbNoMnWGPye+KithOGx4tFGXm84iJOnUR7E5tGRRbcSYNhILvXpwBC2A4iF5bSxId+qX4dIrqb34+Fymc9relzI4e+0lZcrhAqQDgTvWzSWf9mdTHoa2kIEYeHFBRIPEeZ/g6x+LVOUNpqF9s1FTg7Y6Dv/+ani5aoffjRGVATBl5GvR3NfXclaVOHLGcKc9BfC4Ncm7aXpwYFAP0BSWTmnpEX7ckqPQrtosrAsCvVbZ05z3vPyvmOZB6KdZ9zgIv0/kl9SK5amo9zSGL3Bvve7pm3z4q7rHDwphHDW9pwQX8zgCZ+efX5u3TYfem4+Ejr48A4bPnxSw1jO5zgJf6WQmYYv94PNCtblMJqcXzdChzjIVkECfwKscAgOWH/+Mp0y7S/5z8+qKMf1RRQpwb5hFiKKHtxyktLv+flxskWTxsSfr5fTy3i6oOM0mg3HRCi8YPQXKv1a5DIn2y5N2D2XJC2rufCNCk8xgnOwbw6mh1q1PMPDg7ogAnbL0DTbAqNs4V/LnOpLgGw783b/2rjMce4ZeVQ6pc68PsgHqsMVHqSFAHwCQmpMGZrlFhsO7IecgEJJvKZ6mAX0wB8ZSSciBqKUUv9wmhUdMBl8euzX+43eqxLmLwSa/nUywEQdVsYP40ZSYj4UQ6BSN576oCJK71UN4ojhGAxJ+Be9a6w0M0JBZpkxyJh67Elb/ITm98v4nY85FaI5FHAiSUD7UqiNcajsa0sHnoQU9dTiurr4q5x7RcGaN+R1d7k5/aLLy+i8aNaN3KK8KQ1zNqMtgTYjb7A3eMGH/otCwbLEwVgrQND2kuOaGUNSsRPmNTE0P+6OZA++4I9o8dozhXceofJoBBY4RQLyg1z6DltetiyvDLKeexPmpxF+0LDQzeZ7u6PQ7CSKx3WyoepXLAxn/aAcXcdOg+AfB1AiG6bAnngmO1B4Qv8YfKt8qnwXpvxEv2RzF+8Op9/TTm0wMlp8ztumgenbXGu6tJKk5sA0QxQuXM/t6ACFTh4TMxB4ZU1Oa6XKKFET74Ktn/ewgXPD+vW76b5BmRZhUijq34/TX6QESw5KeEJLSZ0R5QXBAV1dOHH453JEkXjBEz0Ix+ov0tx9ttdBZAYXXjRL+DpHGwq7VOXfPdfy94kN9OZHvXWUcguWah2GMhsaOibF+1BHzJOpL06LXoajDF+9W7AjPIed2MioSK5X2YaxBlwsXlArTg/stN7WkGsVHb4e/rXi6YSTSYVxZd8X69ZeAnMC6qF6uSe5SWsZohWUvP1OWXlyMX1uwlqDhdeQOffMQrqH4q/btirclAItH/JJo14rLS40LhuQ0QIv+FUOUOQMkQbv11i3SBN0BwUK6jxWrqmAgmsCRnc0Eatpdv2s5CH50ofL9wSJzBjn7ffrVHSyrZH0OYoehDPLgKwBlSAGY2Cj5l3UU+ds4JLI4hLxv+5a27MCgbFyRFoOY7Hy+uT7BZpYx5nRh3f69+nvGa1zCqhVJylsWwYUPZ4gUau6SVAXILIL/D19Izt1f6+gL+6xak2Za3VfsP2aPUu0+XCcq2JQPkdaACYWwjz937fvP6C6m3nnod8cwVtv/KuIxmHy5A/sh/h9s0AbS1r/e7qCxAWmR/MMgDm00GACKVeWQquixMClKp+Uo4aXHlUJafD1sdgswCo1jtXe2zNhE09yFmhbyjTq18wAfpv46RaDOvcCGoxXrNbmg3xmYapBvj3ghkOjW+c6uTR5KYORyOHz9zm8e/MZ7c9JhSP19U5ikgj4nAK/gBzzfO0o5DpeYP97ascdyMdlJ3FCVoUPm4qnaRFDl9uo/Ci0738jtK+jWeZl3rWmIDQvjE/Mpg2zaAV1jqk+e0H8v3kuVYab2TlG2BIOyFUMr9J7EeiLmYzDr37YUPuvPwY/ytqR9lcHlsKfkOcFEYUr753RWP4LQH1yeF0OHTxTVxQGKYh2XGfsV+TP+eJ1RmVwuwAGSACGEard9t9z5askYNIe6YblqEpuWJoNVXb+i6pg/YxPGivaYJFD244LQo/qGfxn0GzEZ9hWgw9DOmUxjBhhzqMG5+T/RiaTCE5SNLQl7RyRUZe8JozTKSAQxMoEzoX1MAphwdVPeN2hWJWiyLScm3k240s2RUg8cd0i4+RQ6G0F/ck/tmVgr/94rGLsDaepxEaYTbDP830Th9j2RrXWuAq8ntIQZ6EcyDZ0+6+4Zhh7QtNmH1H9hs9nxmSpej558qCJr3tXWa/1mFcvaykiWZ7viLxRkqxp2QM49dxzgQ4qbUOOCMnzMuYL/4fbSQWjcu0zMO4a9rJ8oX3oSUTBjBi0E1RD6G66zpZ8hiOJ2jZXTR1Puz4TBkOGLpxT0T254fWp2iP8a4vMSBzNpRnB1nQZS9Zoq6PfBGl71VJ1x6YTq1MOCVKyZcxkCWSDqd9+8DP+tBnjLsTSNqUMm4BmbzR/5TwlQrfP6vzEsJ0F3e/ehr/m9WZjhw2twvIHdC4XPvqyACGhLP7/Ztns6CUmqpocmtV7gaBfHGRuNSCYxyZXHbYZZTzGA2yAoRBcGd2RDCh78QNBMb73/c41Xz9cquIM808fs+Daxowo3bBu0iiyxs5l9tFrXYYVQ5Ey4V+X4FV/PQh/nJgMCfxl4od0bRRdtx3vncQad7bhoVABDEuCdP4ARAvpO/s1HZQ/Pvw1dt7PM9EXwsCw9DwPSo+omepXxCK+Wx2yNpvOvEkcCbMf3+UBFgFGkZqcKrzJEWdfPoDqFPzzOm11E4Y5DZ0KMIpP+BjKXzZGr/Mjo0NzhD2Gj3i9T9s0pB/85/peOTa1GIOLK7hjs70nBejQadBa6ZG9zlIsWQe9+KDK6Qd6qLRJSGGvgUkOvO+BAUwdxZJtyFQoUPUV+oxIALtGmVAuoO6QYx/+AlhL6MQ1ltpNubQb7Xm82Al85yMSMnaxhFGy9+Ifr9BoSva+AEezGCm4NKIpPzgD1uaJym60PyVjnZFR8Pm9FHKjYe/+IhjwOK/1/aaa0hh/XWjcSww5gDlt2rcvREQzUgu56c80JrwSPMDzsMsv5hrLOh0RmFqwqbKOq4F1BsdTTBYQpJV05iHFIzaorswUHWkKm5n1Ozq66bvZyc94kRTq2coWJw0U2e2HG09kp3z4IyioPZwSn4nWGDShqaM/rgZQo6xWlelx4OHJTZhrYN46hyLWC044O6RzEInBcHX6IiZLK6EFLLMOc3KQZefvN7mwmfo7d6O7VupxLMvdQGgQe4jAwmttR7xbdkD0TixftyDNQh8csVTPI3VFXRQDXQAiSzyl5/oMmiQTk2SD4bXyLk8GKnO+WmeWQcTTmUGr/c30wMQ6U7dzXRIcnyjgeyyvrlD8DHPKk7JnT8hYAqlljNlar9/Tre1YIem3/Q4miPEb+tE8qjnO58tebudDCrlKociNUuVk1+eWtHxpCuBW5yr8bCqa5PqB/V4idVA3Ce/Z8LfNLIbluQNV76egx6OQV4Xh+DNj9BcZo1yWwpb1JXhQ7ZopRu99BDmA+aZaApy+Rp6bBei1JI5RVPybFMPcFwOG7j1bOhdvoRmns/6i/YIRM4+Z8LrkeADy17tsHowf6OcUhvLcVXlsGox3eOZyTpFPhwcVnAjQsdqQNSRCoH7DCX/yqS52PzlJYiTRIsKvrDofxWcXwQAQYd4BsNskEhSxIGCAzwF+/y8Zw1YaZYjEzwQCI67Y23Q0BXmoW9bEBnvoocXrYm0Ioeka/kQTSblxhe0Wr7TyT7lc9ie9kInW0e5F/vSSaf4HmSMXOywdc+WLFVVVlJ6aB6V9vnZ7Tvo7Vk77a7XJ8XS564drnzcttB2saZ+0lr7pr4bUpbdbs2f11wVm+AN5kPRFP0n+sg+ZVCNXfBfI1s+/G8dbyosMks2gdgvh9ww8rsPq8rPZCEilFXQahdj94WqU9u/we1d1yi+ipIX8/XT05wq6DqL60mnFPkP5BU3PyZ71ByuBc9vRIlbyX8iCqsI5TGpallzlNMzCVdFmv83tP3qMZUoBkX7obn8dC0Pc+FPQpkiQSegyahdDed3o20/pFJ3L5QdbpEnZADoutK1olfXs86LMpvqwZZ9L2zsgg1s6EUypEumtK+DcQTpUfR/X7f5gb/5hlqtfiLST7EOGTQzwmI3EmJR4bFazX7YVuAtPf52pkmSnruQw2bFTjlDiI2VfqWp5RcIyu2p+5k0uBiVpoEYVSV929qcRFo1S9RL/wgm6ngQmeppypZLpWciPGl1oIFQlj2AY88I+30Xc1mMuQzmYWQsRS3X4c46omlLHrbe5EhCb2p7FNJU70bCV8YHsPoFuelRdl3eiyeQzLMzR6rOoinf1E6azLXfsYkd80Kw39vtX+Jdc8USF58hyjLT7dw8VnDHJ5+5GQ6rRZKw7haOuS9qd82Hx+mtKvuzkcP2u/GVOFRr65RJ0U1CeYfxNnuPvOsBqoplU+V83fTloQhQTEDAFwnScAPUl/wtPkW7h7SO7ABzoXK0ryIKTBjKqTLRImvFVfgqv+m7n8FyQ+kbHuPih8RlXZylQDASj+tIV1OjemZgkTVS7LJe9EK/YAGdCUmr6BJCCvjwkJ3T5V2blPuTlSmEL8ScHn6pkvvmDOhithY551Zr3VHgokEvjCajJkIsc3/RETASZdvaDewRtn0vl/eIKeJuv1ipebVPUNPZhyxZCgfrLjK8AUxkIt9rOpxlPkPgvbD9Oj2wC4bHxSpABbcr8Nhv/kRtHSUpJnqc2xtv4HMyfo3d2WtKYHMSJAySH72OkX7xqyxcQZTlVuOJV7Je8/eUdnxuyttXK7TReWdhn1NU9gBpgPZdlVQ338x64lTK5tbgcRk7MEkU59PxAFwAXSsE5a6WQe49ez9yFumk8LjQrxT8lbz+BheP3QWDdzbmsXZc+j+DYMztMjqgFWns0B3C5XEVw3RmLPrcxrRbCgW5H2RJCXtXwEWYuL6VsJt9zexutQbEVUeUmX4nRWdw4VEpbl3kBid+eRqpupbsLcfC1p5RLx03+ZMn3QGv5yYcl/HGdaOfczNcUDt77oBJ5LNGNoJ/7gXdLQtdh4r196rztkF2zX6uA2gh/JKGXYX1HcMG4iban0KqPHuIQc3AGXk/Qz9YUMY7XjO8FQEm3hay0F5Hj+FFNrjdfcIO56EagPU5KBbYRnfvQo+J9AKcIgUBpVeR6E7dTNmRU0vJFF2gvx8CX/qre/p2G/bppUflFB8OeFRmRS7f/VQ36TH7kdmPwvSLisMnpWs3ImFNEm09J+Wz07xw18gARhKxlyDjqKK8mj1KAxcF0lT6vC63PwXHcSWS3RLb7FLn+k/ytRM9fZRv9a1pC5DuomaQnBQOSbXauroGWhiRwhQSZwnkyzfiWC9UvbOaDrhxMaWDdwaVKyeTNcbelI/jeEVzo55RqZwayXyVjxFgUuJrNoi1kEfBf+/QI9kWtrAy5XSV8HH33Po13jSlhZ3GZu4P+5NnIizj+tsjrYM4+NmjxT1LgDqqlW/4hrc0DAcp+s0saZmxOfCCCsENrfzGVMgYPt36rEa9CmoGJfdgw5fWgx6evI8L/sNs8eHOZMLgu18O3A84jnphJ+25r8pOBQ/sR9olRI4n9dQd94HBNJXmc1pW9Ds5GNfbRa7mwJLluF5GZdzrNrtPAcpskGoyURO+TXg3JOTkfJ2ielHDq/pbWBhTfRS13RF4XZqYIePo4qb6glNtfIoduf/lyRM5YLQpez7vLjBkZ3EgReP2njYgA/ZT4ucC5NYyfq4fxp0eD+2Cu/NflbEo26qoVIjtBcK4qEn8lK8F/v7M7DZ/sYCw7l/6aYUW+R4g1pggoPZMm97M0l76J9m5T/Ub4LtDAsQHkwDHiC94J9rrk/Lmhw9Fl+MuBApeyenBtPAwpvBd/LevGXHpx+WZjiGD8yomzY4EHWaycSr60eRoC9VpjkI7Ql+TfxhJyBCjuqMW288mw/i64GyoeGhDX2chBEaZ+NN09muCvHtJAvIhAqPi2btOm/quYIBc0sZfPqafvf7wWFSCa+yZznARHmBsxM2ceUTtfkirHNi712GbJli/Thk0azvqXQo8Th5Quonzxs0FzPqO6zQVZ8kmAMR2Ocnkp7v11ruw2vLtnCtsgc9eNOTEaxBMQiSEnVWfvdr3uVZAZgp0BDgjJYxB4fI0R9tcpHxWQz9BENXyXmmvooUFERV5HBpeVwUUTwcB/tsuk67pSW0keNHJ/5uuF2fDXUKsFBMm9O82lsG+GxZj6vxr+KtAIM7+sFQS7/9qsMus8NMkYlbZqDLG1+I5+flEztVg1OZHR8XDvia4C0RpVrZ5fnZDHL62PpMRDEgwxkAQxs5m7jpOo+5yj+vPe2Y378bbll2bVOqj++Xy2/E+Bdz2fZVeSPrgQatgVb54pQp9f6iEHNkzfPMHxDiPdgdzbeYjYMM6G+s7Poskt/KmtcODvqh30gQSQPfqoroaqj2tw913Q7T7Xf2lyDg/uGDFgB0iaveY82cwKNLMpyMd8Fio70kvppCfB5d2ejSZHbQceRl1041hT4TaBIfiv4WaRFUkzx3Lzo65U9RZ+SRo1OjRnsc+/3idm5uaTUkatF8MdtPlzG3w4bxjlYSVHc0+mG6Yw2vPNyQKKTdkEbuJx3ahNnTA9J84392ey+uAsHi0t0QSki+XlqcW5WP7y2M7+Ev87/GMnA3oyEvmQaNOCYRTpc8NR6YuEQlxxEfrKsGfq0DxqcRr4iOjXjY7utf5lqZMg/qzojMov7JlGxt2oCJTjTJWKzfin9Gb1rZsdosUxsvBxcNIRKcFUEHjWv6wEBgBbMdUByC3Ig9PTsIlWGf9LTdEnWmCK7WNuPqs/vDuRUgOEiC21F5LnHeb6CyEzd1v9RXF3zru0eKIZVZjY02eTdgAdOkoeU4yCOv9SDXfdUe6mX9tsovC58Xdtme/ECpAFdWgbqZA9Cw5qHTqSwqiq7vsmWZ20+e5/3TYJEY+Xd32vQVPe41LQAB1CnRJOx4Acvw9DYHwc0ayKnH89xphdWGnM21Lu/d6+T42+CvFGsU8x0ebgbDD/+1QL6YlnLury4KF31H4u+3Rq00U7ZAO2qaAkuAgI0Z+31fmD9eB0AKe/fsw6ax3wDRtUGnrJvA9jx5hR3IwTM6NbdMNLC8sc9TlPgYnhNiqOhxPGwG1LwU83QMRI6jVN358XDlvZoZfnXPmn6B1zEf2K3EmizjRpZm9pOCmkLPm1fcEfbGyaf4ghjV1rgv+UOrLt4MuisG6rGJr/aNm6bsX7DdhHPD3/HAvtCGoOXLzuHYNLuAp7jBpUu4Gd5wH1sWEul4fUbMSMev46yc/Uu7NkSQaw3ID3XkzmgDS2xssdwEuxOty07FOhCfqbM9smnN0WPygF7/46patkQ0vVz4veFURcQcVFxFIVA82DGc5WM0I84S7EyqXLi5LO81B6pRVJG/46NGCilZ1/3QeUJ5Bp/dq0pSwFEyXW78jIH5td1/2D01XuzXF/RM5uDDQ7CzKaYSr43h845mKrYM36UbpHIs2JoMtMVHDY8Hf7TnFEYxQ6ozFWQ3Pic3rmy+SsGR5uGr5pTudunFhE8hwOR5tF/qazofnrHMrR+mEtv5P5gbkcX9JdMSQgFhDpPiMi+phq1oNKVH0EhI1GBrRoXE7/8Gzx/QXOuLWRq6Ap122AISK/lql6OvcDEjFZRU4Hih/L+y4HmV3ISGqFbpgX+VI81vGw49qNZLwN7PM4M/8z0Lz+K7w4BoxfUXRuM/x3BEocG188ugiyznPvjD+09Tq0pMBeMOzCTstB4HjM9CfPMUdSVKOYnzMu/5JRdRC2I6CPZU7rFzstDhSFwuOHzxR+ENtEeGTaEWIOjOxFCllIc8Xu0VEzDODFrgRiT7sFG3TWiVlp9I328fG/EAum3ubyYn6FSZsgPHsfw1fG+6tzlUtPLbnHgvz+avK6CvNY81/oMGAbNs0bzRaT2n5Xh7ZpOdwPCURiBIvlVPzrMuQLBWnCsUw4KB6lA0KAgxyKJkFhd4kXHfAo+ddU082BB22VcMbEYg9XyrEUWHj3AGgOInSlXdEQmz60binaBvarugs+84EyktNXeypZYS8UOcxbTzs4uKvXv1aJnGKLrn7lJJfq/ctTrOyJc3pV7DKmb/49lxnZetY7Ace7CThrfZFgUYmglDlWf2ipiQuCud6tzYZdtOvmTWn3tONggOjxkp2WliuvD9n3I81vF8x5nkAvh+YfQ2+hS+hxrjGPCM/scRMar/8zUewhKqy714mHSjfDgHnzb8/nlyb12F+hMIa9cAw0eSqw4mpEwtQV2CqSeFGu3DQeM2sXnp0XCuK0vEPmZdugPcXaQ5OOVeyHPw/PGKyTfBX7Hlkzf6SS/JkK3JVLT58HrbaWZI/mb6iUaNVwnLaHr/WpYi75bA0ocuPKrx2p3yHw/UMy9wpgFno/pcMONywDCMc07w9bV6MXhf9F1dm61sPbS4HD5uLBmEdKzoxYq4l7CUQZz3yfbxYUqy3Nl1E/ssb8EFE7q2CHqO6YBcwTFvlGdeP31JugIvP49vvckydRSh9TuKw1+GaK8FJ+MIPS+p5TderIl2Nm4JS2dFglU2IjZMBeOcJ6jNzZyyhUsSNHgXL3WA5nsOLK94a/+Gik4yY2YDqEaF4zkOFZqYLcSMPQfhnH/9sbtExFsF2wdEFtthY+2w15Gmf/UjosNoAt+pejZoEPe4tSam3K7GNAl5/Ch9ZfRKsTkXKzE/Z3NNzDl8jF37VvCs+9J1QJFQOntm+m4Rye/WjWl4T6BNyKOAnxWlfb89W/lnrmC5Bah0/qi/6yPQniKwfABvgaQOmv+uul2+Pa8ifQOIYaE3vS46VREJDGsJi8eWuBkScwGyR/QoFS2oGa6wg1EeuTEAiBJtNcaPf6ApUVpnCEls8RlXuxKyy8b69++z6fFPI4WjCJDYb2H741MhaFCGV+iKpzK/qk23x3SpHzLGz64aPuuoawl+NXMfIc3fC0CCf5L/uRyvZzOVJzGJ+vktF0X77YJlz7EJu7B9hsLmp9pj+c6NNwXk+Dix3LqKJC91eYWiwPrINCZDHb1nk9Oj/s1ZQEE+5a4tdfx6ahZ0Jg7o/mrSujYc/CHk1+PiKp7D0zXR1ON0baqSWS68bvlI9Ba/Uk/3bks51mIRSh+wJFfVOHJ9v4sFaYn2cmA/aTD8lwxaPkK5XnaMahdQ8AC1Xc4ZVW1fsvoc67fENt7NnUUwP64qsH4e3BU8GINNjvv/v+ffSyAYUNuWg95cqt6ZcTPPl6sb+vENgVNNid0uHxL/5be5FVONxUNZ1LQvD33X7UnzvHroTOkDHgogC0XcYO27/PEVJFLVJxek0iRr+Ig4TvNQ4apHwJ6OpG1p93M09oIRPrXTf1C5GlYiqXG9G8wqL9HZQB9/ymy/P9MEBjJydU3PnpCTfAlFvjtT0ZltIuQ8brLCNHUQw7f2mkSukD+yOwTdWtMeFptDb9VO5Rma2yX7JFQU0BLvnsNJ6RBBZ8u7AUaaKm0lLEK8ntmVhRzbAj+atlTfiJimzKp+ihA7INhw7+gahP8VugnwHQEM60hAjEmCjPH76PkpdUA546GYKv/lL5NRt71t9/7hxqnL9AInEtaCuVdzFy01j4fBTxmeWtR9TR9PHH2/6CckFLYvjWQtNAKK+PMX8WmnmqABKKn+lSwRTe4d51eRsTHQ5zXLoZxV9L8QscSsNpBIC2dYVgilrLcte0kdejWxzm+t2uqbNkpgcoESrErvwSs+P9OV3jXuyjKQTfmKPWc/rPtpaNZL8RYVkVqcoq1WyY/ENFbzNlnQqM0MSThRfYn8d+D/yHJf8r77uaHTeSdH+NYncf1AFvHuE9QRCEIV424L0hPPHrL4rnSGq1emb67kprZtlx2CSIKqAyv7SVqKI3mEiXNKBfhornkesdIJVjexm2qoavGPZ7UyIcGY+07XTIxtQm9+iV7lEBmk1aXl0cG4LrLK9IJw73Mvfq5RVctPeS8WcMjEh66MWPe4/icNFLCn76RMBllrG9zF4vFlRaL6OKYIw/rwPOkcbMCae9rH0XTEvyt8BdLy93V1wJXy3DrUwXevFSkXMBv0Er8EzqLguwjGKSCwfryunu1IbHhrVULyBnskl6hvJPQGO+JYaD31LOWKcLD/y+CGCLmP2lF0PnxmiqQniYjopPOAPjXvnTokKXY0nbGMqNfKSDo0Jo4Xa7JhDWzaMLSdHjPadjYIGoTxf8eNhZsmbFs7/K1MXWhnuHr0sPVYi7igyP+YkuYDCaLHq4pLOanrArTOBSovEic3FjBeaKtah5iQ/yvUxXcrraMerw1JXdhKy33w8uOgzJyJqyEjJVoOzw6MtjHKijaw9zZMXy1mL8WlG3fXsQNMe5r4AaFvL+4p4YETOydVHv3XMNAiAS7OYHcKV4Im5YKirFCbpz/CC9i7xIoBt2N4uco7grvvPx5JXjQLqN3Z/eBOZg2UvNmI+89/jKDYGDBha/ZZ2hb3GHz/vgEZvueLS9ih2k7kz6pcV5iEZo7w5P5MPg06sR3RAsQZgHHYxtoqN6XCl033DJI75xj213rzwlbTrw/Z+VZ2IXnDdZoBWv1rXFigNMGfAvRUTBzHmVoCNgap0P14J+Gv5K0L2VKmpo94qxsa9GpSvIFlPd3xMtav0UK6DLBCyItYgWZvDj3q2zOQUxiGulId2HIyV4X96OCgXPXvYqfuEnx14kHFWPaLmm9qTq7GVHc0FW4WkJZmd6WdmGiZiyXG9r05Gd2VbDbhwN6a8bqBmQ+mcGX8hHc49KjWab8kH6lHK6WQils5veOWahArqqmMfmOVQ2ZFm10IO1j5W7CLigVjSMKr0i3u4KGmLpNe3han15W5xQha+rFY+SHgSmz/fHjAyG5r7cHKcG02DcBQ0L4Add9wCuX+qVxB4Ttr9QedtNb8+ANnteVjUeqIvYTnF3dGGRWMJJnOx6iRqYX5UAf7YcxJxwLzoSkeN0YUAGvIaP10iPIXncZ4lxHkDQMwLzrPjEMH76MbMtbjzpKbu4RqqkpJPEg7TZDKy1c7xmyR+s4Yz6F49V7PmFTOwr85pe76hwTw7b1HFKwfDm0bflfdoVjdBjXs3Nhe6aLXTrAR4ywjET5PGQFhO9dth2OpXMwnatpcnhZV9ZJtl7WsKwQj3JZHYspAIzBLRQEyvtvK3o6JbacMNQZaodGdTQQpgGBLR3u0tU0Ba3nHC4AOX0KBsv5dG+Mvx9dBmh2g6ZK5/6tb2D5CWsF8Wo2O9twl6yNOuZgHH9FfOM97M2SkgZt1VKt5fsxE3grLYfkdw0PO6gDidCMyt7LXDOWAHDKDAoaomS3T721EGrYtQNrFJY4hFFize5jiEEmWzkUCKYF1lqDAnMRZS4PlwyKkD7cqe9pom4o78fTr+nrw+fAgsQbtgPWArYBbvZBEjWDtLg75NvzDcDlhtD2zKHGhq/1e6MBEtAJlIMSAtAUAMvtrBA+24O1QZSSEAqITNcHQXMQLeEwZ624kq/90+P8FE3telRbumQ+nz1LDd61Dt7nMulDnqet62DZ/JGt1wkIu5itp1eNxg9p1SwO6No/N6Q8EZcYozWBrBYc1lp7vBqgBEFc8RiR1Ghi5GJGB3WTRMV5oxslDYcU5Dxh0k0pYlCYAYOsqyBFQWIJzuV59pVcJan180RkxOWYlkh0/Kh7s8zPoQ4CAMJDUevzrMNGZq1yrK+CNBwhiplmaY8eEi0FFnu9M13Y5cZMgJxb9nL0RLGIdVD9o3jNC332mNh0UO7p4RvX+njjoxccuNkhWG547Z2a1Juk+aD6Lt8Lwy7RNpkRh4uCIITnJZuH3RKl81HSIOIlq98coW0UpDOWEt77aG4yqIVnXLPJj7hG6LpyIIAIlGp7fhcQgU2OaPt7U7DuFCyIPRsMyeSQM1T85CzvmiIIrqE+VXhOU4nQ3W4Q0ZsU0+33sZ9oNQdHnGjf7dsTgm60tsOIoi8fUb3qZa4F6vFJt3RyuDHOsgLsDCNhvQZwnO1IJ3Wk3yv8AvB+CNAeiME+eXpISnPuT3vR5Tw8aoH1/Ww51I63Qxaf/tGoWoNDKswTxN6IuRp03EaRJDh+/F9M6DrXLV6ezc2LqP6sHGOVNxqaa25D8ooWkfMXiLBMIhC72scAz3livajslgWPEsqbotIWGe43G2Wdgf+BFzbBairYe1MyCq+qufk1PK6z8fIpmvxKHnc6aCAx8UY/dWnpLdpILhUztBQhvTLeLo7761z2Bjy71N03v9NUYrdZOkIapPQiOXN4+6g9l6PP2ijFqSy6XMXLEZwVZexXTvzvvJIHTJNTtnUEW3Li7XxJ28jknkTV7qzWZvjGWZ/LycnAtsYHc1pvFnoOPxk21rxkrbKxhguxa1nvPsIfa4spustCd8OnvZBV/ORDCAbMoXlNoPdqlm+EZNxWUSkYk46lfdWGXtzaFvL6QJNi6gbd3/7qVjDgXi8vNqStY+yileLt1iOqGkuA91vM2EZdz6NxcZl1nI3gCkg0rnzrzXPnKzhuFbhCD2aV5cF6e7Rb2kCZxUr7zThxWnV1drW/c7VQ4C9eptaSHpGPcwuSsAZ9t7Le5TSvH7qG4MUnguBv1oWKzngucOjtLCIRQXlDbtzZwwRtQtWP6wcJEt2BhlSFDXA3rnss2pbaSpYG0p7ph8vQM8/HDZsWtZeWybYm5SAj5i6UcpbDT+KcACro7EUrJ/weC8k9zrchD4KyRL61znyBQBtIfh1WWpISZ1xRHKqIopqViSAEUGu88swpwcmjxUpDd7QLpWEMDfxItz4gL8+Q7w9ZA2z3MIDk1y22g6gKG69hh2R7m+NwQrys3pM4zUY0hoNmfeqf747s/FD51UhUKRHQfAzgLfes2FQD0MkX6AC2B2ixce0rwQlzplTQ7CTV0VkP8HP0+fZyQlCXUdW4sQTAiZQtEdEMxAPVXbjzHnZyk9iIPyduTOeigmnlmcEJ2Udkt5pM7rW+96dPug9eeqXC6NAhqC2/JoHLSFHlguN5SUM94Cc34s/z310g+r8zZXbbRDvCyGXskSP8KSBeSFsJ1eqOu+jYTgsO0RycQ9+LOLaaE/PyIyKQL8meRytkPyWdU5SFmp4b8EZjtfFLJAquuot55uPlkAd1LU4R6oYFIhmA57vymzzdg9G1bf89eaa2oC2fH57WdbJaiAZQUmmkE/QQFEVpBSzpD9z7hloMZfcg6QXyzgZxXfterl7kyLUx1I7IPHfDle/34yo7WzLfvfFlPKrSZHLciJvizFKwEzJs0O/9qT+knPXLgHAu2gvnfMbK/YDDRo1H/XQywpFpWN9YK8eHXIeyGbQp9V5adzGBJgyusYQRwIbOrXClbKiUWy2QsBMtwT1XtAksJ3HA0ud+rI0fYaUCAWUXMW7Y11a3vv+lDwAM5TcFTylKt5tqF9xKXgKuPls72a83e8JR0Xy3RAxp5SCvGmW+l4xEOZippJo6lNmUtwGLoo1hmBt1pNZVx7UjMhXmnoopKDk+cZYnp2913m15qwydIS0L1d4IsR+rJ91bpo9dloM09EtJ8J7rq2IRzyBOcuMrbKofzrtQxNkiY8Tl93nwAHpebfnwvXCGBfWzAvhrVe00wU7rzFWh1Cj/qXSJfi4mvDBmYFzKGqMb3OsP2BFchzzKj104iJdcjarDmtAV3TOA65kbh1SyCrCCM1xgWMghuxVWwMtMU3iPj5H7i3JgqM7CzK/5/wdA7GnyDUucLBrwWMYZner0KmQXnTR8Q85qVkHow0V28ICUhAOfZhou4F6RLi3t0RjUWbFsSRCboJTSfecnyTtBt8VJSn0G8I/fQFfbHx8ZINefsidEb7nCc+hIsjF88LsKTXU3cAx0onDZ1eOoqrCPHdpDlnX2iRF8UV97ytEyVK2FfXpLRzOezfUcb1uXaoeDkuaN/5dXpkZOCEUR9bhCHpXym3cgjp0bwpdPi+ZZ+Xs1CQsGZnA7RfhmUBxzsX7t4JlZSF0JTSZcHGYarA4+hkbKepzedTC1DiGkvvWTuWe9pKVgdyC+x3HA22xhEfOW1jLYS6Ev5hjEvZLMRWdkqSaMw3hXdgcVPIKUWlNPcXqWunF1okdO0dI5UYUeYEuD2ycVCuQNlilNIiX8MWEZqc4uZWDv5LThHqzBOvGCMDLO+WSU5i3/mGF8w/Ys89zjbdqPP+A5OXl+++0O8y7LUDZuy1gw2dbtmc+2p5+IPtue6ok+7MtwP7t3fbASIpjGP6nX9bootuGcJoL81QY8BIa8V7bi9Vy3E/oyduTX9BwOrDd/BMopQBPgoNDazrO6f7VIVT4CeXaXUr7Np3BhoTQ568ITH+hv36RHz28Pn7+GUGhjwNbmczFx0ES+oJhH0eLtMyLz2sTn03D6eN7/uvF3sumvG8BJCp3Lm2aX+7o/RmByuSjjW//Oz37xoEgfAPF4r/rztj8jNGfowqb03q+z/s4MM2n2vw4MJ2WF3xMwjmc5n48P7NbUc6pPYQx+GEbw+E8VszteW0ePj9O89jXKdc3/fjuA0VRms6yX3/xPkd8Uo/Nyqb55cyu70DvWd/NdnmAvmHyT2IGBf2e+jAEf0HwPzAAgf5IfRT5gv9F9MehH6d/2YY5oM77f2Ya0hjcH3QeCX/5kpV7el6OBXQp47DRwyhtrv1UzmXfnb9H/Tz37VcnME2Zgx/mfvge27IswknoD2yD0W/4dp4KvV/fYd7nbb/h8xPKfHw9g12wLTlXuqx52yBNynsggxfbKQQnPz8Z4I1XOOZx/s/Nnkak4ATOZxXPN85PpHC+mTsjudqGRaBF6TSC5d4wZEGTDClcfEdl6p4Mi2JbrFXbeV8qNtfYdtnYN8GCGpEV7LIunb7HNBcK9pt3u9eW5vSKINidfZ40KKUqloV14xyH4DxyLAt3727OcD91YPuwLFXPpxcJNHG3ZFM3v2JQ4EDh9HF0HchvZ+K+a8a9xdOAPMOD96GCsoTb7//ViYrfa8/iApThFtbf+NfWc8otv6Wn4pLlTd0uFdjHzePGss40kmE27nV4yy2YKPHmuKKCFMjG6EqI6bmQaJUylJAcdeLTiE3YnZw6GM+4U8kjWen4LFluIvecOFTKC1MH7sJLmC7kDPErM06s/mAye1hewVrpVii/E8rmYsYhxm4bKIw6jTrGjPeHqqn8Nd2Yo9jz1kI6UhF43sXlJWWyGVTkqGpDHOwhYdM+AmCzKYIJXR9Ra9fluuvxgSj4lHQGZneBpy8DSefCVUHB80usFLZ+ebvfq0mpapeUD/5QwKQNnAKCo+4NsVV/Uq579yDk3hj1O3plq+r6Xm/NLq3xkvN3vlcrfi4mkuyCZByiNYlp9zHXlkVvMCzfiSdHB2C+f9PMkCafw1OVoGSTG3nz5QB9UvfFHPMM9Dmk5W0mj1qr8sQJKPNq6mO3EH1qJ8Z4EYEjKa9hOGcxBzJb8V6cVjB7r+JhX8ZbcRoqeAazFrlPgQLLgtnwlq9f66xfN84wt9JMPU5+1oQ9sz0WZpSdwnOJH8KJYpD0Kuha7wU5D/qFjWcyHTbNMNsh5K2MujYIwiVp8xw8LJkv1Et+SJJWq80lm6SXr2nq1ef5sdX4YnEuyA1RvEDX9+ZzkTuw2D+A7iPzpsHj7Jfv3KKAzR1NuCK9d0BZYj6Wm7k+79Gq8HlLqtMVNKBbi6LR3Exc/KZ7jAH7ryMgMMChKOv788LUfeXwkfGrCMbtaDcgAUNWpFsmZYWaURbL8Tmcjm5VYI5icIGVbXK8+p7rTijliu+qin71YpSSUYHc4fdtck3pk7HO+jtLPTcdZgjjnfw51vfm1KVpZnMAP+f5OGg+JCdnTnO7rh9UwtZpZV3FKUnua2q+SPpYoOtyoxzAJwa+XEcwYSFaugiKG/po0ObdPJ5pgxwyIZ/vp8gF64ah4cEqKi3I3HYTWsckoFijz4gL2IH3zjAHmtyqU5M8byA5PoOEsBZyeelco0gVzYgc6WolLGYx15wCLZzeVvlEs25yFDJxTw5WbPsXZ/NpklkmlNDEKtAsfhiuhO6P7WgnEc8dIGwAOVz4uOQguEMIF5UOT1ZsEsiKz6jzNkG+B0cOk4H6CBGBn1yBQ4DxFwzid5Q+ZJgQubtSWBuovQYpMFaOtIxEqsuKwmriXF0YAyNAqxqy7Zxliw2+4xpGzn0uaRwBJseAdI84/UIu8nV5XJFKyBP2MEL5Qjb0/SrZUsDD93trALwN/eMohvfKQ112ns/4GKAcx0cWmDTNjdrJMBkDpx5v6uQXSr2C6YWDNLQlmuk2o0CLPArKCgcbCkTj4NkJRVZ0ocUgxbBFOCyTnohAWW+pFPZRFBPk1FpT76UAiSrU3ZWI75esq8CEy9NJlIR8VXXk2wbcyV0AofDsiNCUckerXbCTIsFNKrKKXIEgj0T6ymzuvSeM2iVAT6rtIzVrEkG2Eil4VM9u/HSzEfOVp5HTvddv2rjRKOHav4d49OQnD17O0ZIgUVpT/GuOrOM+WX2DSeNKQ1fjMUku8xBf2mk5O11G48EZskI6yHwOEZhr2AfUUE+0GmbGoNhOL6DdRyh8gwUWt2Q/KIjZoEIQe0Pxei35GBqlwNuyKbNTlGwNE9Bq3hyBKzvuHg8pSF0e3X7EwaPP6MP3sRk/+CcaaOMtxVpX5+lZXh5EAMJzZsXcomJebp8lzjovXXd4I4rZxG2Dg2x9XZem1WjLei4gZfmYdbhjVA2QwbRZhT0Enb1mIagub0HsiEMXsTPNO7l6FAlWpBAJhfH9MlidrgFF7+VAFUglX/RKmbLao24MeSnGtOklwj7m5zN2XunI86XeglibYGbFRZHARIa0iY0BahbGBLpJbV1XNQS2NeysfNSZ52vWq52EKQ0EGxFkQkNBmfbDr0NFp8dT6tjrgyTkZLDcu5rsIIwIj4srYNiD6EjZ1bm1y+xLN659gjSsazXd+wlQTcCUl6QdLL/CAur5XfdsaeQ9qbAFQKR8YWnUx90enQxAyjCZY607jH0M9wvIClYlUWXKXaJtNEB4inTS4DoNWUsd1k1/bwVAcoBOweqCyYEHQaW1r42Q2CPby448SIweIsXoo3g6Q2ktB/LIozm5z8hpXSKaz6XjDFX18F2GTV+dKlzdsmgpOUrMLqnxUvSCclJn7IW0zkuQPPO9TgjzXu0o7VN4zzEM1dWVf+9qFvO05ABVwMt5txOvDD9M/AbcZDqS0CONpgNkkaWCcahnxOzNQKVAo/enas7H132KMsUTmKxn6aOYGqw06eieWcF7D2qXxFnCrJgxIq+QuMqvx9UtmmdmFFcwB6IklOJdigJb6GklrncavYjN6HfDoTgqS1fBbFdknaEBSr6XpjGS6i0XI3CAOPMehOoFpJEqHRax5x2hbiRBRxF+keRMY5bHEjj7IbQkOugYkHUwKpJWK0VBQq+JXzCoYVUnMJkKaIk+EU7KzwAlOfUt64Vdv6cWkALnYXMGfxMpuUl8HfRCkMgWyHuE7WnFHtpkwCBVvAAA3Z+3YsZqbzeVsVcbqTJr36PK+KY30RBaR2FNlqEvDiyQHuRYL5AjZVc8m5Idv8/O5EgQEPC7y3jH6/RkUzi3H3olnFpXDiT7UrdrU7LMtRFdm1iJV4yZO2cvXrxrzCB5fKq3kcFMfpu4q+tfn1YRUlOOzOTDJ68ZcUEeMIXbT8/Un7sAhxC3Jzhsz9isPLEyEV/GCIssamnp8wrriPl89Y32SA65OCqCZ2I7AjV5VGuaDGJfnw1nz5wsH+E+hgKSquZL75hInk/PpR5vMC4btx7WNLtZYnWg1wqbn17d2WwPSE93jQcNwxMWejs5XiA9b2FJYHMq8bpES3Tlun7BHNjpSJ+p3xmZq0L0SPhIUmqITYLXNadU57hku80Xr52vDs1JkYoFj7fo2Mw9bzfzuHqna8a7XjALiwjX9GSX07gKVUQa+C2WX/N9bCFnSzbzlbExsMJJ3Lao7JWtr90XcFkDAOQUsyd0WQcTeSV9QKze/HBtd1gEUC2c5OHTeeGnByPIEUbJrkEVotSOr72+VkWXB653HySBx2LKvxPnJfutrofnpCLGw7m5qjeHNulZBnBrSkuCmlszGEBhKxs+QFYtuo25r4yTCqcilDotrDSJjQ9ctrXh5SuvojGfct1EbmXCKMx1/kX3rCO8G/cXmMK0tZsrObsUGxOlPZ+3WfP282rcuyTUdWBivj6PELA2MLcxryj2qkWg3EbMZcoBAxRS7r0TOXaaOjq5zCLsh8tuP67gJgdt9yvs6lzAY3SiizJbWGf7TQ0wP5bZi+XJrNHoRv+oR2Jm5h0TuUDzA2ZXfTfeNnoq85BwTT5ftKOsazcB6fxriBWXALZvl6ypLEwvc2xQ68smbiqo7Df0WR44qyr8bWEwUM3PWcblg4r2GfaJ7GbDz7jhMPxqZAat1NDlwSu3pnSwe9ljqjIWQu7LpaLy/VS8wN1VtWjFQl4X0GTLRuzIrOryB3ujDPbBKMTz+XRcGbdykyHG9vx8Wut4rh2z6s1n0Hbu2ivyvolufPrR13ENGW8DRnAYbQCgpfllH0Kgi1l0A/SFpNOIZUGVt4iRpgIt6nIL58vNbg0MLOAK+0Ldxpm657G54kz8hLJWwHLTDJkZ9qi6izvhjEQCil3EEK3R22BcusjfMskrm1DDpI07P0tl42kY48dkZ2asH/beGTf7UqCmVm88nbIBy34WLZuwRmwYWn421XvmdBh5hZi1RhnqFsFeCYPx3n6LnfGGFngFYjVuLmtssEskrXNTJXIHeTU5/2TNQ+Ifu3a/iUsdXwhTugEYMkBdj5LmKUvPyMZDPYdtoQ7W1sQKPba7ZL5sbHXqnKZLcVXvecqAxO/Iab5ybfvdZDGWgF3KaW90K8nB05B3QSw7bDBPw4NzMX8wnVCXTluOpYxDW4caavm8bes0PTo+ZuBwRDTLgIvrjOghSfdQ5eS9r+R3WXb2VsBFO1W6rSVAqpqFw+dLuxljbxnVQm0bxZRDJAWxuhQRGLa0ETk7FflitGqRrrZWG4VVmhEmpmLgOfcwm53D0qqW2k41K9pt4gMR4rBx4azp2TPX46FUz1MiTmS/WhA05u/f87m5aKKqoUFBS5B8aqWOVTzXJ+GDtcloUF01pGPMXTU+FxjQJwjeGWq/U0+PsAcmCDMNLbzNFktYxcZUiM722hSkLZ4/Y1fmVNCgM8Q7Injao1fUySwSgsy5U1Zfopo91oDkuQm7qdHrKBxqWBjLC3t+YR6x63OKrseXGrn4N0bvrwJz+zZDItxOjl8xgP4ol0C4qoaGabxTt7bjmjcN5x6KApJRf03CkP5+vvDX3ODXGUPkL8wYwv84Y/hVJvYPqcHf5/g+E65TESb99ks28TMnGJ/ES8e/mSz8Jv+HshQvwh99vdOV7Z6P4VB8yeMB+bKW6fbvTTnNfwJnUPgPqdxfyf01Z8gv6HdS6Sj6hfzLeIN8hzdEc16ZTcrToSNy8JH+cp5y6ecyA4NK5ziZ0vEc/i+nnpf+6uzvdDD34OaHoQHt475ty3k+efujzU/Kj2U6/a3zv8HSyZT598n9f4iPtkwS0Jwd06k8wujdFUDW0Jfd/CY6zv6EA0ENl7mfPpPF3+SOsT9Hjn/GKOz3cAEZ7W/BAmPfyfsT0F+FE/RHcAJDACjMyec3t6B/wLfvw8RwOe7NUDBv86Mtw+5EExSFcZ12fxNX/2w4IXDyyy+M+RUq+Heggv6XQgX7IajAACr57frm9RLH6fQrTqLxW07/Q/6f/Bj6bkp/vIt/NjDgEPQFw38PBor4QTBgfxUYiO/pjW9IP/ZLlwBL/yZeP85Fn/dd2Og9sNpvglXpPL8+KQZo+b25269naNMkT+3PK/zWofDbUTZeTgOWfHbwN+k/9csYp39nfPDnbPQcjnk6/50Tfxk3uLO/y88xbcK5XNPf3cf3+PLZ9ArA9hUOUOQbXwNHvmHux6A+2/3GX2Ycw9dXp32i+G9fCaG/uRJJwN8A5qPP3+Dz6zj/E4ii/r56+QpZxHPp509x+/lD/M5YH4KxYf/tx2/1BDj5P94L9i//8jaBUw3s35ACHwm6MeDJHqgDC6x9pY4+rvTDiu6/d2D/2r/9PaiY5+HfAOD7N1ZPSx+Hc/q/dVTzGHZTGL9rDU7cgbf51ItgoB6jf21M/rP3+YOU+SezSiTyN3TR1waJ+q/0TsgfcmRJIMRc32Xl2E4foADvX6PlRx1T0Pw3WHwd+/yGlP+jLgsMwb+HB0Fjv4a6/wghf5nLQv4DA/PJDZxlplcXA2L9KBbs83wAB7ZvZv48AcrK5v/DY/1Df2+llZRT/SYX2J0cOn2HFKQsfrCHd4XT6ZScTAOMgSGo/c944f+a9OnU/QtQyFHTgxntD8n5XvNtLIHdOCE0F//2z4ft78VmKPSD7jgM/2Xg/oHqybObcph+oGbya7oRP/0hv5bgKZVgP32nSI9CIpQg/hxKw9/6u/T3ImD6O1Sm/ioi/9LxV0TW0zB5+0+X0wn8n4N2+pfvn/cC/xkMwb+JQunvJKG/C3v0L2PID2Sg/yzUZ1mGxPH3UJ8QEYH/Saj/NvRC/9szhNQfM8li3zT99j8Q9sRfAXsI+oIg0G+vb5X/jybmvg3X/zwO4f9sUvAzCkNf4G+cyO8mvf5rRYH4vy0KPyM0/YWiv2EL9l+I/xqyU/pieqvQbAO3H/hLl3/+nhH4gwuLgMgPLNYWjsn0mef4D/vDn4mfaYl+nl7TnLb/m2O9P/D/Oyj5O3Na2Cmp0FePFhG/hwfxmTv8K1LV59exB7H2b3lIMM9rnIIIzvh/
\ No newline at end of file
diff --git a/Documentation/etcd-internals/diagrams/write_workflow_follower.png b/Documentation/etcd-internals/diagrams/write_workflow_follower.png
new file mode 100644
index 00000000000..d0f41d45af6
Binary files /dev/null and b/Documentation/etcd-internals/diagrams/write_workflow_follower.png differ
diff --git a/Documentation/etcd-internals/diagrams/write_workflow_leader.drawio b/Documentation/etcd-internals/diagrams/write_workflow_leader.drawio
new file mode 100644
index 00000000000..94eba26c98e
--- /dev/null
+++ b/Documentation/etcd-internals/diagrams/write_workflow_leader.drawio
@@ -0,0 +1 @@
+7PzXsqzMli4IPk2adV/kNiCQl4hAq0DDzTG01gTq6Qufa/1b/lUnq85O666yWmJO8HB33Mf4hnbiP15sfwpLPFXamOXdfyBQdv7Hi/sPBHnhEPr8Ai3XrxYEeyG/Wsqlzn61wX9rsOs7/90I/W791lm+/kPHbRy7rZ7+sTEdhyFPt39oi5dlPP6xWzF2//jUKS7zf2mw07j711a/zrbqVyuJQX9rF/O6rP54Mgz9/qSP/+j8u2Gt4mw8/q7p9f6PF7uM4/brqj/ZvAPU+4Muv8bx/zuf/nVhSz5s/5UBEKIPrj76BnNm1UHDgjBg/4n9nmaPu+/vHf9e7Xb9QYLxu3X1kLN/pTD0Hy+mGIeNHbtx+enzev7y4LFMucRZnf/ts2EcctC97rq/686ROA6/nvZ1W8Y2/6fOWbxWefb7QXu+bPXDDzVO8s4c13qrx+H5LBm3bez/rgPd1SX4YBunpzX+fZc+a8mfuZlq67vnHv699t9Ig5E/7n/vFzwyXqdfGy3qE6yDmcYazPLen8nW35M87JzAgP4sAfT/Eh8r+pcs+R/j8D/qYd3iIc3B3P/KpT8o/iw7P/+u6TfXhHzs8225ni6/P6V+A+i3CP3n6w+IHX8DJEL8bqv+Dox/bYx/C0H517n/hpPn4jdU/hw2AYuyyPCZyqE8s7quE2Qb/vP/Rc2/EzX/Zrz8J4H/E2IIEv8L9i+YIaA/wcxfG//tmEH+55j5gzrp9UAn+2HAUdVbbk9xCtqPh2b/yJRk/D4dMzX5a0OctuUCWo1fAPyD8D+Q+UOF/wmKHnwVBQlB0L+JCThK/gXB/pEPMPyvkotSf8KF13+b5L7+X8n9v5u+/6uz8f87fY/9C2r85ZHLp4mu8jh7fqtj+fz8//i0+v/9F0A9+97+kSP/wq1/ZmpfZxkYziz5Wt9x8lde/bDmZ3sY8x8YB+b6bg9K7vxPmE38m2QZhZG/EK9/4ApM4X8V77/jC/yn0vzfxRb8fy7Mf0/0f4b5nwrjX91U6L/Cp1/C90/S/mJIjv8zmSnTCfnLXufH/+jqdft3cOZF4H/5I6L4gzPIn9g6GIX/QmD/yhr4j8Z/O2+o/7q5y+ItXrdxyf/n9u5fbNbrRVFF8S8GDv8nrvyhlv8bhAMjyb9A/ygcf0J/5M+IT/6VVf928pP/dfLX/U8MyPz8pv8Qkj81DP8XjdOfeBoJRkB/7pb8kzBBP3/+hHe/l/2Dnv940b9uEX4aHkXM1h5jWAekCOVIP390263ebvlcaeAHJ7F0+PxmN1/Bc9CBDRjJD7Tning/P4yTFjzlQBMwona798ezUOT7ygqk8rDzJZJONn0l+8N8Wrsca8lmO9uuO9t6f6COZ9523dbuOKKKB0Wn5VtO+1HcUXq/7cF+Ok1SLfN19bFY18VZn1jqyjsHy52cVhr78POR1XK9COnZz/At1mG7HnlAGBKj7nsYHuYzBX+eiub0WB4R9x38NFXk52394982kzGn9T9s9KLZLxMc3HWMrGSVVk7fqCge8qE3dUFHPrvUbaEQNH2w1+1/rWglecv1eAmpkINWpRhVy3emNNJUQ2Iy8LOWGrC3um200FIvlYkoDVyRfS2enVf2JZSVod7Pqq73qhMbxO30sjJqSBf29L2ivVE/sfgYTgYyvkYao8xxuM9uDfhG6cUJZUXmzPyg7+os+w8yENKb4zxM/OZ0sXlPR1nu8Ju5BXQ9FwBsJkfQ9zAm5D4Mper5XMS/A1JIxcN5c5Q+EVT5NqUX8QxlhLgPastxmlVqWo8Qb+6WnjkZOAcEf3kWYsvBKpnnEOLiqC2q8zKZpjEBkRe7/ix6yTncKDfcVq0EMUTZMiV7llJeuLWfD3XAsOjgM0tFDwH4QzFiipinWRag7BA78QjE6DWTztdYygLMOeW1tRF3qzRl5kakYRrqMnzxMbczbdH5x8ox4h7HW5Gy2jNhelbY1yjGp3m29cWqmJGGt8ecMGVARs+vij6wnmuvfVPNg9WMozZynxXnFrc3ZkTjgrRzeKux+/2g+JkwrKhWHd9iGY1fJt2IfDoUzeinmPsUpNkhCJvl3Tz5aLbp5CWGgqC0cqcXq3AFiiKbAcctvcJVX1dHLETyI1U9O/NZyPMvfeYH0A0Lf5181r4C10oipnSVt4mM/g0VmRF+LWOfnWSXuLIn5NUEA6j+Q1Kv0sg8zFJ9WoOD645wFHAoKcbxeTDp7Cy20EGTwJidnBr0RpEdGb6rtEPdIvL1Mk/p4TQV6koaG32KQ0z3wPe89UV6PATIN+5++iLF15s44Z9lsl0dEKnKBCdDzocK07iW7M8j732Pn1+1YRRbBM/bdt8UFxOru+Wl3bYhmTFt3nxMfs0yZ8+Ni6DuL2R+LdIFfKJh3Vzmhx78R+Xpp2FMJmU7jXvOO+QWcfH5+YhctB/oK74ZSabeIntY7941cChVKG4vgRngn//6/cqs5tEks6U/E22PAWGUmC1r10wSmTcSYqGaHf/QX2MvSTDCHW2Zy5SPJSYxnY7E9EntQHePgCLo7/rCFb6JlA83TSauBku/2FnCsTcORgKhuPWyfa4R3HsJty9KNgFkJaDl7VihwIcTly6ED+gBz2yFQYDxOgpx54u6RRjnWUeqPgdfAXuGMIyYKAWBNPr+guXMNT0YBTt4NS1k2yXDVAfsYApKbGMpKCwug3mB2GHUheii+Q1NpHmXGXNrsagTHeWYgi1EHOw4vQbwNo3hXU3l+VwOxdOfDlBAOZZLPguQEK11C1REQdf7hzqlTsomBVhMaMo32ai+IMGIMonqBnMA3JbJtzOSaKhKSR9SMEeCwSLh8yCd+pFJ9OnT5GJUkntLApTIeBOr3o6njl4MTfK0zG4mZcTVtElga/AgDhH0gjeXh9acvXtFRx+KRJZQFQ2xA0Fe8PwqbJYBZJWHDOhJuQ9zoyUQ5KiRinuphcWtlo0YV5kn7jCCDR3sotVwGzgxlszc6sPfZ7cgJuJbkru25HM762fsUGHZKcjUwlXw6JC/lMdyDqr4Sid3KirhJsotRmC2Y0KoI+dXM220RjKDWkFngJDYAb8Z7CMGUYVvGhkbQIenu1lzKbQIkX8Ua2HnL6LXDECr7XDfbD2wTjrlH0D04bzTKBwL6g4CdMNubn5FymLlaO+pHLWJ3xCPHu7x9I56VUNf3lhk7r59h+H2lxdq49YBR8V+md+uV6jPZ/72QIVtKjzQsgLIYNiMxNxvlTGL+BOa335+OmCQzg+G4RC7TxJfACtcooOgjnZ36EQg2hNZIY2oq420Fq1PWjShV0vejQJu39s8p+6VLxxXq70OBtOb5L2QyECmvEu1Ceq+tAF0k9x7nqy9mV6zizpsCz9QPle/vtc8etvIW8SVlw/WG7SxpFLLI3WMGRK4mE0fz5Gz83F5+PjWvTeKhvhAiJ7K7kNh68OyjxnSMd6nGyKAT+WNSpeg3Ay3w++XHwzD3FMIbACLEwGRCt7fTg4de3ELACnNoO+9HVAmnBz9WQLT1HhTSI5A2a8I4UjCzSNznYqevD+WCswISbCATtHuCWDFOJm3gbJA/Igcl534EJ+EPEmrC/84Q3krRuLCvUri3JDHuiQUVwo3x+pqHFBAm5tuE+9eXfWkmGTGkLVYzftRvcobeiG9e70F35B/bJcpAms+5vBZouhLlXcOyJSZcpTgAlXAieVw4leB3QZmAS+ZSoTXnSfrvT6fChXtknNCn91E5kCjj49qLpfLWZNC8t90MTLUXa0dWhtU4hSfiAIm1SMwBjcaekkIE+J38QpNr+rmQqvMxxVnpIyUfL2q0C+17rjpUC+d75ZgmG7JlRmqiTa7IdriFb2Iwnqm07LmRy4W4ACxhhPFss4Ckqswj84OQloETiUJpgtiodDf8Bu55/3uidekokDWwa4ISm4kCYn9Lr3gCiiAlYqWHtDyNSOsUD7xSfboW8aPh/HMP0AK3NBmNc7iSbHLAhXMghPIEYlngp55w9zKqsGPNWO+AEDObFUb2vqnIS2j3AmN0QY+WaeW2iVT/Lmrz/rR1K8Lvwkfcj/XF2jvHSvW7MSczV1dAQIC7ni0f1+PJ5vDpR2qzfvRumIk2Hrb713N0GbHeza+41eKGidrf/30VOhJ8Llc7RONXoM+83YvMOdPFZNriWxEGBBmgetICJOYPfuGOp9vOIbYM8Nge0M3aUbrjL+0BeaZ10fJZxNWEWO+xk4Js1us7gbn6NROnjiAJ3vDoBHbnDvW3lhRvONzid9ILhuXOtCJuD2eS7tYMCZq1ggrit19U3mi9gbdZr8dbGYEpKeGzoemaYbfo53d1wDsOJpFNivjl558E5Mdxi/qwu5ABHRbAZSaEj4icZjl5JQaOKcqbi1vac0MR8CbQyBP3UORhtmRvlbRjZ0ty7hN/3HNOM+PtveXh1tqtet12d9NQmiYlYrX5iw95B7ZYVwFkwIrnKV9/xL9ug8U5wseqwGAPGI2Q/o+GciVjRG++1vo2d70faNgSBnP7oU9HsxbTFBS9DSy4oV+uc7WbKqhjDzfmYQ3h6Zk4ODPI8ejbad5lREtdC1P9rfYJvyPBtya+iNAndVNGlDY0oFN0Kflvc44d9rN348iFAYlbhSBSW9MtJXpCqSr6oxZbLvEawz4BbNDoKv+544dzbkeUeNtxfIE9xRSbSWVebY2xT+fp7Ek8Jo8F8Y3c75jwNrIOJayIRlTSU7gWZUi6YINvnN2AcBEH1NHZfrGw0H8Pe3QBIuclDNoUNPVG6AovRd9xG1xWnKEBqnI6B9fZLRO1cawXfCN3k6UZyMliOhTDrz0OKi1LmPcM7jyq9x123qZAnRTjFZ6BNuWXnTNB1XrEp3kVj/4Qy4BT9RNnNhPUwXHl0ZnoBI+mv6LivYT9vHMYcNz2rEoZmqFRkktpIecZHW1izr1iMrSUr3LQKwlmRvX6gKra1r+k77LtoJWW9RSV2Rkj7sZi9SYkJbweZ5dT8Q+pUHjS/9cP9Y63VrXaEZjjvrB20dJPA/eSx8/2lz2mPYPYASnxQYA+naM8eOu8UAXM68D0BcSHiNWRE3ZI1qevyleFXu4/Fp2r6EccBSDd9unhXyWqbFjdDpDRf9GS8OI6Q32yXZIh/cTiUQk8+XjV/uyJk0fkuAoBL/uYgUVDva5FurOV1A6SInBKJggHv0nbg6ESM4/oza7dadw0WNFmIzRUk1TymeoOtKPw8hJ+KZ00tT2CHplNMr5p5W6i/WqsAbEauxWt+hk10jeloaMly5ydSU3M8YtcOGpOBb/bVMdNwQLwJAG6noRFF/6jrSohfKz7c/LRfsW36HwcATjstHdbUuKqvlddsqcJsEIVgkksx9Pg0EZHPZIt7eoXhCjWRPPN18P6GQ8hgdjU+6mh3dbu3291CIGHcNLk+vZOvZ1DQcupeF4QZSPBlfmhqgxQY1Q45ZjIJWOKLpn/8Z4O5eGo8cFwCE4ni/F0pbxozVf8jhIup4SIUrlb5WAbQsHXjJrVX61Xq7y3VZarfrURoLyOR/5rhMXm3t/lKYnj0fN8nafBUCEWHT5sp91HmnzDqVmfiTiQfbVg6Cx/Pm83Dpd4WXlFVWUAImPVhoYyfcCAr4Zm0gm2ZNjKkW9XeHKNw3mBME7TZ4OOfu4PdFRXCivyj9svoZldMnfyTNeWaO8x8o59URWBgMGjXeQt6+EoySvRpXhRMk+snrxchHuEcGxK2rJyXVXLjl96Y8fj9yXDlMvYCVVTfUW0QOLVkfzTVv/nCF5Ww/HTRSgPykFEK7KsWaATBBN265nWArGhpIEklH/pnwh8S/5QvJPU7bQn6UMof+uhOEfpbm/yxgyY7dxzP/zyhkYDv0F+y+VM/6syvTfVs1A/gvljDwr8z9Kd+OyVWM5DnH3/lsrk36X/aeYCKi3/CoP/yb23waoI8jL/nRp8m27flMY0P7P8u1/5GeR/zMMWcfvkv5e9p+evXn93u8WL2W+/R8R5jfFweb/Dxn8hGvxVu//eHTpf4VPf7pu/L9QQ/5vJjz63OdnvQXgAX/Bcez3ffjc/yf0F+iPe+78vYKfm+vvbsx8qR9yAGn91TY8pPk9HUT+0QDmg/8C/TD6p+FvE/7cXX9/989T/peQ8ee6CP0vIgPG/t3I+D3UBHrp71UG/I/64j9x6p/UwK9N/R7398fQ/nWqf6pvU/985OTXtv9lqh+w/nVP/wuaHv3/69rQ/1pF7/9aVchdQQ3I+1UV4ixKr+DnQlmEjvvAjPyBtNIV5T3quzX6PH6B1HvdkbkgP9rapUkREKSzNsgvbhQbSTLKutDbCLgmYt816wq0xJySwp+Wl7LXWI9sfXm0F0xpFa8xV48zSxfu26d9L1P16pNbKcqIRfGTwMj0GwScPdiGMQQUCPRBSjo1/JSl5UF5l7Ud+i3t7s109PTuwVPrcR9df8KPkS7plvYRVYzjknm7mJSSZTBQ2WsoGJyOiixr5S3+PeXztF1azWpa3kwpSZ/m67erp33MEVk95GI0BqI91qufwFzjyfvIQnv6zBZdMT8ZGvNIHfI8CPZqoEQDuZeU/7KsNhyyHqrCm6TZT3a+CTXU0iZ1my8r76fhHCB1uZZ3+p5kP4UZQWtVjqELOpVuMu/PF+mDPBS8a77Ia3y95beoD73lk8ZF6a12xuSLys9f6/c7wbEKGqUi+W1XOsHUciQSX5APhvqp0Aq9oQ916NM8oqJnSMfI/AvhrhuirhrDHtd7T72HnQYp74Mho4N7HsNgtfhNHVCDMRg/owb/zZU3MZ7Y1HaBVi52OTrIaaCPb04SkvOpxcFAJVd81hm7Y7OXIFhOEBZ1mXBfMJC9owKmuPJxMYdJRV5BhzmmmFOS9QHpYjHyPzsMfNFeq6JqanM1Nvjz1Rx+oakgGyfpsDe8vx+RdRDsiog6hyfBFh8NN0gM0R86VJEV2q4DzmDMl6Bp4LFX5jlKNJbQ7FRyPsqlDje+YlCcSRgfXxj/+97N+p0xI4c70rdJu6G/8NqPqsinpQMiHiSSToIQBsWyg6BulT2W7HG5IojHJUW/P0YJW9XUhZqqN9AMjURFy/cSht+3EBhIH4Ss8Kbf7hOxdRKNQ3QoHLQV0sxe0vz1qQeYluiSd326CyXt3dLKW2KMlcY2WqxL+vuTBXBpVTk8/kOjONfiND/QfnBwwYfNcTrYaYmTmIpkZK00s5IdpVJ824Ii00+wIU6fNf4wri0OLG1eNKlUs1IKbJmjrVbHDAIx9kyz89vG6coF2hVih/Ahs5OXybdkdLrWPyB3Wt43rcgrWOjLL8+Cy+QSpJ6fCUl6RMUW50Eykk9zOks5vqAB/hgQWjNqyhYFC/jBqDkj8S4jjXn06a/S4i2ir+hOtGaudt+qy3QhOX86vxQ6Zptpwg2yDx1XN8yGMO9SNLTRQ0zniM61xg3Fw8jlzYYzMvTo3Kj4eBR/I9LQAuE8MPqcq6ppOESow+F9CtqlBX0otYc3ld3NZ4YqpDF5IspJdhW1sC9fOL+8k9rf0ydEcf0gYU3VX4LnkHebcnP+1sgQQ0v5VPkDV0vqRYuplh1iUJ67UKSWinLBwQ+VWXB23lA/iRBSDdD0p1wCosAGhJA9qz/YCN+kJIu0FB8lwUb+W1/l7WiYqq4rDmZT/N0acp1a+inINaDdeyYd8VxC+13J8XtILZuNiLc1dnVtaTZ8uYLs4Fb07gSZK+x0UqRN0l0nmaS8ZXD7ih0qLb+YUF3G0IrfKCL74gIFaF54lHs7X5HT2kYkde2I2CDZJO2TVNtGp2ix5du9p/Jun7XLuOk2prd67GST3tsAGx91AnZBoqDdtDuudYSAjSy5zdqve2X3B4lcvs4XkOxeqwsT9c6B5hzukFpS6vhRo4pwK3w7dpg9db00d3hnRPDbmh8TgW7hikt2ukSXG6hT2nnX9HJ67JEOf5dBBsJ1sKRQAsre218KcUWVmG4NZiYYfXSFygJJcm4c57q+5EADWcTTTfiATDTbSTtOcbtpgxUVqvRuUxWL7NTOMTzucQ28E/ZHpUTnRPbILtJmDvYqzza9UPfhuc285FnqPPdLHrcyTONj4IWJh2VBBvOmB6qVS9FhlCMZNRo7DETVS+qKDeMixjnTHYROk4eUTj/P8esJDG/biINocvx9ikNvTjZrhoe+dLZ8iNNsIrbsC8uvD5SZJwx/Bxe/b1fOGzuzdAjAbUNwkfhAVK5C+neGcek1g+TbeFOOCYOaUFAgJ60PNGJz/qip1VBzfitcYSQ2Nhken8S31gdAbvxyYqHxES1PQ7FJ1iC0kkjNpX3uGjeBpGdE1fa6nXDyogDzPEdJE21agMC+kJ2UoWb1vfrFbdx6uyLzbRERiy1ED2oESHKDgpKE8O0neHwlpx7qIII27ToJ1SMX31utIMAnziv8r7IB8wB732dvOIlsj3/RH3GieWuGnE2i/ZwCMCY3t3TK2J/9r4EBX8SbABa+kF9rCGqtBQ6w4ZGGXGIyN7jGCO1Q92IDj0WjdLOuiGvz1DqPsRl4g4ERYeD8TAgx7waKPqV4BE0q9xseB8f0ciMYmodj/dKqgbylZrJH38c9/n5nM92z+34lKxlxL9Z8j+QwTHOeRgQ8bEbOd8RS+PjDmJPag5zIXqYnGFefBBprUcnwuwLtvU55r0GeS04IE9DqJisRe7bKL0BHNyDPDxQKmTLfUiRLOaLfWqm9GBOV32DFCheMMB6Ek8urNpaCItIdmkFVpG9Xkn3Ls0GuUwJJTM6DQO2rbjRJO1yvIh4diJI7F5M0aRGVwL8/hlVfZmC9IkFxi3Ps5ibNxAryAqslV7L7wom+3Gk+DDm5oEC+6u/lbWc4J8eV5z7mecriK5tx+kwt9eocLvhBd623aF0Y9WdE0E7SJEgB1I7aHNugCrmGA3UlvQQjDb3zcSd4wSCDvT5PJX2jo30O3Xd/DAoOOftpqm8O6hk782SIbK0rhCZAPDvsOJ30AxQoNHKaqcyLZmLlH6eAPFx2LnIgHjDdVwtBz5w7JW0Wx07rCyO81jT1fpz6tzaht90AUkVo1A9rb4MCGd+/4KrekCTqZX29fx39yGFcDdYtfy26nm1+k6PDAvLJFx2NoBR8CVR1b4N+pWgI5wKyIu9HASXFtK3U0B/ra99aa43TzbsoJAQJaZuDD4D/PJKx46e4tnyw5HpRb7Cjhlj9b4IlIYcQRPeTydv5qfR4xiORlMiLXRvIn+MSBQYSjXkT44V+twaxqjv6BVECYvxa+leJPtPNoinyBc+dXu2e+L/06eM4E94XIdwHNp6KF1/QCBU4Fy5EA5cazgQf4LZ3rO1Scox3TezPUbSBQsF2tKJxYBinjmF0bYXCksBNEPOw2EEC8o1D3VV5MA/x4gpeHWOa+9o5pQEJ18mkMl/xUs8TkQ3rUhOmnSBQeniJkc3FX4kPTgsENaCK2NSg7J99c/RnQ7zoM5MhSOjraj7ucNCDVVtAZE7ztjNKfYHSLzgQ0wsQMQ8XNo5OMeiuacTY2PuPjWXgufGJBnj1EbUkoj7HRRNQWzGv2DbY6UsdsgkdhpWkzBcEimak2lqpyJPufnZoEnsEMK/f6gQcyHBv8nJ82ad8DwidpJbHUCZilX8N+LFifL6q1InVCRW138F5NKeab4mT/HjsVOJGmekV+o5k3zX73tv2cr84OGqVohR84fiLeCHNq98fnpc68d2BlmB+0eGlgmCj+AIdgJtm+7NQc6rO1Ziscy3CkxhMDLDU3S2X8geeQIHu1A8i5OQjyX68PMq+WyTWKmrjVJNSf83OKWBS7CZ3oXqHuIXAggoRcjMgMhRcMKrtp00OzALOx8BmfxNywMzUrzoL83TAEfmShgp+fCQIlPKQHfZwALqXgXcvInq8PMDTdCLwnweJFFCOXXPXBT6hL/ONYcKDWVD5Ukmv4BIwR1GYFDWUeBRM8cEoV9SmDK+q5Zcr6Vf34T7vqGfXMaOlRsX6ZPZDERiwstDee2WaXPa60VGnVV/qq56im5B1jZi9WU5q28S/+frdvPtjij30Tn9ReCVMvHmXBfDUuYMAeIEy4tcuH0N0OM9jUNAMjuNgZFwrgCBOQarl5r6BPl+76fFzaZrxI6+SoxbWafYnXpdoWhDjD/3+Sc7/dPlpBwc27aqkRXD1BvH82y6fwPdv/dxvSR9/dy+7vy49nbPdFkzkhC+5i3zsjgUKQrjwjEG/LJo8voL8N6wZvb4nNlZFAlVHNoYmwe9HcydqDNWWCnCXCe8yF+A1GTQ856A69K097F0c3Ce+B4U2WUtiiT99jkxYKamrdJdluOR5tsSFX41FD6lBFYmlS1OU26iZbOsd/nXOtLd605bHTLQOoyb37JU9cpfeak9d0UU+sXSLqTd9qbd0Pba4jWr4fgwOFAbl9oxv/pj77+Z/R4HepH33rKfbk5q5IiHEQ1/es+BDSY9L+kf/P/7/sZYoqKaogepYtKCUG3cVwZ518G2CyJ36Qyuqy4WuV+/3sy+yNsvx7+chf42fPnFQdQnfvWMf7pJBnxIEqCzN+WAm+2sGo2OmXPBW37Wmh4rfFHlW+nc7+dmN4KGxH+Ku0KGRf4aW0G0/3PSpr9lot+Yc+z8+sfqnJ7L13+2TO38/2bujQOYSBAazQWYHaOWBUWPy+pAq8uupXqB3afvs+WfGz9/Pxh074JrRWl2K6FccMNDPmn5Rg1Rff+UyGNUce85ND7aYLQqsyixnJerp6MHng+lfp5CfeC9jUrqzsGCPr1uOBMnJruJLtDaNGvjxNW42XHYJ/XWOhKn8oDoKhe5S9lLVGWX4lbXDWrxY521GuvbioUEIflV7X8AxZIp894kEqGkT211wNC7YiEl2QAy7uHO+zHg/mAU4SCJ6qD0JfLw+IO7P98uhEgvYKnb8PFu9NulT22e/q4Y9X3zX44VXf8tJ6290c/Si8GhMCsVBAGvt7SNwLHOej0Lsl0lpHIxDZntNuWVttPwJ3CFjms16hsg9pnU2V+XZMBTUOmYseiftW7Vg/xE4F5Nz+SOU5ST026QwXqJ0+iMCUU0HYj/nbdIBv5JwVe6KN0elhFx4w4ce4NQkmbQzLYxMy5JdQi03oHifrSYh1l3UZo4TUqLcUS8onaYQoe4Vj2nltkjZ8Wxxjfr1yBwDnCdi4rSlWoeHGz9hq9C2mpNlrrMq3umKfhRTTyan47To6ktPnJYw/XJ0PxirvwPrTYHQsDc+QjBd4lckm1TBAkHYDFM5P9F4rsGbqGLig0Vn7F4B3+D0Xk/XI4unawnP/rdo24/ovqpdDd7HCN/sWyCQPn6AvzCjjQXeKwXQEJgQGhC227lGkF9IFcZWVf64UvFsmY2yy7LOWq2qFzql8JIUC2fVpY12TjJsmdt7dyOXWMDBToeOYau83jvaKls/fmOWz3Qzs9rpUSZXx316cXm3x487P109W3/I20qTCGJjXbF576yjY1ZxvfIiKYKfwN/0jGSCdVvdGeXDGsbJPb6y9i2v07seQ/USvVjSKHuDpCj4XPuaAvFhvvS0Cx1RRh8qusNXakkTwh1zM2s5SDbwjWjdEOZkGEJ+p0DttWlElQd5WLfWPHyPj4UaDP8kMcTqpc93eIJLwYI0+2bn08P1RP32cXg4iiirM4xYVrVLxKTxGgKpsx03HdaAsLJtrhpYO7ZMte5Dvd1GOnvN8FPHNyF4BGHN7VEc8hIRqFMgd+pU/hWmKbZ+tfF9c6erObfpvoLCY7GJUFzG42H38Qo7PqmEoJnVSXAZZGWXM+vh76fhkl51pbxx2PmTfZ6oD/dgP+croafSlwUc4Y+vuKI+kARTHmF5TYKytDNBKbFNe5WC+aejDoZLOiQKOBS/1g2GLfojRc2Gf7xPRVzd24XevgX2dzKRp0foDBn2vQsYSMfEJzovzFz1lqRFJTagSR+4Ye23ZarvVKJMpfT+1NM4G4/fALmRZ6PHvp6zELk4XzBLoX29a2rqQIQmK/H3eURhN0+Bs2/XuIaO8CnjZnQtHNuckFw0VnCq7jtR+8t3U//3GkhIKLbP/YTu+eAkSgiVFEPaL6zUy2AWR/nVW3u1lhi3LleyVoWDqe6A1sMkC7grfpJVtAOynWgroOpSpgyP7yCPq7/eGjpDHOsdM5ifr7vG8UqEjUE2/ODCIJ5opMBn7EjjuSkQu423GBkcCu6UF+4vGYO7qRCZPSKQST47/CvV1FvFa4/LHM/Yi3JeTmiG3Cuhq5Z99KZWHcwyjiKN6prmyJCKsQp1Fyfw6MKjG7T3UbMD16vfldx+DoBE1mthHNXUHenRem1X+EocY59ij0hSVLCGjZSTEue0sjO7wz7U2WhQk+mBHLTQvE2GV5w7oeLdo+WVt6QNeHyGpFi1C2Pd8F63TyDcwZ6BlBukd5meibOgBFob4+7dTWymlbHJ/uRWhhMNsfSIRIHnsZYQXlSjytVsv6cncL5zQdkzQY3QSxY/UVe/Au11BR9P+dLp2+a8kausKiBgF0mHWD5nx/D2khY+L8nCRkpiN01mWi0pc76/3sSyZEJzMyPOX9VRDpmC+6elLc5Ei4bATFRxUISoKeg1dKkCMvC6BbIn+HS/I5CMglIo7z3tie1e7enSL5pLatMg+3DTKfGMOsahPjirgMICxEl8GXHHW5M1ozJq4+4cd5oCxxci+3PyI6HCeHc7h65xNjnyat9w/ELwzrl8R/fy4cniPUc9vi08gnOUk8ewYanwfIxC6Xkoha6jn03Qk87YwOdvozE5hF+YC53He6X7XMIM7jX5fm/IK68nML69kIJRhXM8eMsfP95r9/DHQfIbZhBDs+3rMPlu+HIhfqhZtK+ia+dj2k/uA5l7F7O9uXVC31ECX41KEuGZHGVbHDeUlIWhMR4XFIGOfSPaSrcid6p9imgTT4mgJdHaroZDA7gMY3KxGeuhjGqCsipourV+iycLG0bxq8UfvJupUO2N+dFMgTpn7qU7cax9qdtHZXWfs+578juV2ibsNoNI3klwumcuTPbQJt9lgdVvN5FjrNPNiwNJLidSS96Nbj8L4xRDe+u5notlS+CWMSuhdMguhFn74r4/MdDGqJ6Z3COdhQAEIllJXmxYtvi4EPzNBuo4JyUfi/TApz6HtkjLTly+wUb/hKhRsrI3bsOpxAh3gvtf/8FLhch+P++1W2vbKxftyIGrqrIoDWSOCEUS4KYSvAu2Qbg6NUx7FJ+qNDr2XWql9FnpBu/7i5G+TGH4uUy7FYiH6qRhPZkL94+EM+t8Cag/N+m5zhWlzfgmsgvIl5eckMrHK5cX9EqhckmEbGiiaG/Qj7cP6GMujIwXCsETD7F0XmbHzkopfQUvF+tHq2nzxOajKn4CeVJFPZ3RY8iLcwKEmqh+wCGXkgBIaBvTz3mlnc9nbBBYOrymL9O+qDNaIAerr5wXqb02rqwH+IYs6cGykNhWJUvgPPXMCBrkr6XgCfobmBQtlqds1b6WI1BOoxlp+dIexHGWa+1d4tMKy0LxSfmwDd64mK1LqfEoDrHBwSHThM1NI0kb+EWIrMLQ8L69a3cZ2n1hW27XZtDlyKqfL+0TEy5FdC8WZM+czX9lWp0ro6yV7NzOPuZWsfIAKllUn39D/HpQprRtZd2nmmlxj/VQHKBP74sIRpFAMi0CqWR+WVXHrhAPcRRMk8nRbR/nNDEGTfw2ra8Y0lJg76P/bl+6rCkTA5yabu548RK/0JLIW/T+WJTTOVsQ4aIezsTYbEZsdSXECpy+W7eCJ6LcLE69MCbtBiFTX4u1xLUTpO/1cJLFZkx2+AjwZQsoDjIOJjdfG8vlO0RSm72lnZs5WGykTKh4sKN4gcgb+Hfp/QhuR2693CeQohz8nZ7RYXMg8+DX8nLMDrzR2bGEkqMwUPd2+DywF+whC6F2Fyr/ZCOAEvxx7JV2R9zP527YaqWzc1yyqh0xoSlT/5OpNJOyPvmpKGWwRIpUyYJ4KdCirdrCt1D8caSx/HjfDb3NOeaY9EIqRxiNbcJGNAasz9j1Cbc6f/gqxm0t1mx4HuxyShFerhxKOqsR9U8Ob8P4+45sWAjD4Jp5OadH5NO/QTayYetDYazurWMGS+2ZFiM+mkUg13MEXX3mohxugTX1r8AzD7RElIqYeO/jd3xLLQqsBLjyupSTOeT8RMZpmzTLpA0uHxnIs5pKknUgIg5C4g28qvyQ5IboCEm3SJRmN9WreLY0jjv/ThCnzG3be1vRXfP5prJ5ieR86q6FjVf6Qq6ZDcRjRurNexXeWyPxeQUJsNBxsvT+aJ5G8amOLd/1CVKUNWaQ+0HNIL6ij28u7VExrSKnad4z4WbyQpj2tpwoTd9vVXu4fPuEi+IZYjdDeS2nj1YS1Yav2jPngDQdhs1TwNVcMMhks909JW0tUlDvb8zso7454ddHKmuWvNE9mnleFy1sH8uxtTckE3lIrtuSo+5AqRqCllq0PDFqbgU69vIK0b0xWx1HeEDs9XYZFfocic4oQs7h76ptpZCb3iQlXcvFs99ib5gwEx0HKjJ3O20udQtTqIvVGDGbVH/efgXJ16BjbQqXBPGSV4GVUKuiPe5607Lre/6cNykNSnCxFbRFGi8O1wI8iAHAOvzxiZg2wfsZyxZ/S1hNQl65gvY2yiS1Or4a6urAfc/Tv0ftCt42KhQ8Q3jdj5rJPY5ZI/S58bpMVj6GRFq7yIvFHdLmpPPgRrAkNBtVeZct2gvBemsBwj/VkfRVID4Bte+OHKZUhTaToAYnBXELBP87ESGRJAbSi2eNkvoMU1ceNTDOaIJAC9HLdO+cCryjclV+dHiyQaleG711SVEsWr8JUUwtvlG2Pmg64w1SDM02sqTGBtRlMn/IFA4Y2vsuV1WfLH+ceq72p2EkN1TXCtJC/Pm9DUTccdkNCHvdcful9MWINWbEh5cpkzCLHF6kRt6X7B3PPF/89QTHSaW5vo/dhCArvcQWfGaZiWXeUiMECudkD9xepyJk+xeCww8aXWRmqLe5vz/wF834/rc1Bk5DkoEc4HXezQySDCYqTj2n4MC3TlMvRgkpTOxVVx8Rny37Dam6mJa8VPkQD/NwcQYLMLRFYigd38Vq3ThQvEEgAzkMfN2YlvF5fO4nYmnmUap4DkoJ4Qlf1Z9o59c/vM7MTBilPkpbMgJDCRNpnZ/yEf4RsSj0FHgkZ3J4v3XWG31g5pCFjUY1Z1/cDtOT2Yri4DbCoQ7HlwtNk/tWqPM47ArR8bWi59YYnPHnlzLtwClzF6Or4VOlX796RCpCnYba2scRXxBkD+lLnL/j7Ja4EMxEUCsUaTjOARe/vCxv7/FuTcmclF8BqJUxnxSEljJfesaKvtOEVWBZdc/t9qX4iyiwl2rxeR7Rz9GEQsSSNBX1lfI8e65/4tY1jnjYvGxdu9bptJs8tMrdFARCLMyX5jO4M1XBnCTjN3iJavtdiRQLT1BL/Oz3YHrMlAMb8WnemOLo/AxIlHRYEJifqDffcp6EHZ97gsn7nvqqAgyNVgLp9VrV4c89CY9a299H6Inf7NAwTwvtEgJlkcEvilT7VvF7fSHvTRMz8L5BbhbD6QDWHeTQbwLE5k9QUC8mNBlbhNb24+7co35AYtsw3TTSPAZjDguhFvK9QF7dmmfUi+4bhRzdcI/ppqgSNmd7yzPd1F1rxix9Z/x0sEPhBUx0U6nBzuqkINuvJbeQ9yJs9nRpHqk8H7diJtD9KSpf6m1/XZPmRYT0qzXmroOl0UFMtmadBkSrDQXqjxmUDUDN5VODDJuE8bwXgermaNwobODDFn5Biq2YqO7g2XNhCmBzBGhMcnwgfIi5wbvjjeLWGK6kmNO7+TJRyHh1AB75zzuABwkv1oh9nDBIZCFpXKyL+wpAryh4E72HYahKqJsR1zZFTvZP+D0sM5eOnW3ZYZBNswjSed0Ew8oUaNu0NTesHtcWKvoHFdbkplTv7tBNBe8tki3HUK8ggH5e/pY18dUvKoccxVc8MWfAo+3bDJ5rxy57I3hjj8F3a1mwSUsmY5swdphgMjko+vdgkKsMlWPOOw962IS1IOBT+RubQlye2d+yTXiHwUiOjQ30C+CrCh5rL++djWPH2xQbh/AYjsIgJZ54sMVg/BLGz6s8uGvDAlLVkxdS6SMGXy8ub/XHmPRDMpRL69E+/jgjb4A82bv9Qhh071O16LsjTspTSFJ+x6CEqo1nSsORXb4KM6wgj1XsTAcaxZb0w8sLwzFJEsaICnv5/rpq/dkpjyPhv+VwJqAjWkExlEgDAsUNP6gC7ih6ETu9nOeA+SBehwNiQV/9Pi7AEWngGDR/HpcSjNAQw24PzAfhlIwvtPMlIR9t3Nm2I7TXyFgDOzc3BCv5hLghRmKZfJ0AQLwdStaEfZA6esKE7HOgWnMtQ3sXmTbYHBKgUpe9sVPfbZtxSUEKrSkRYp6SrBtN6HMErO3MGOESshFj03xzPoS4pxJxoCjsth7J8HDDy+F09JUlbXeWSCZ87HGSWDHyCq57Nd1eQHMdhqHXzcNLk3ACoXcVn3c4/m0JPtwh1HRlvMC5Zw/+E+iDklTClfeys1RSZGL16C43A2lj8z04apzzJx4v73fP5UxfQFYfuyCTDRHFO6otmYapeCkYoVUS1Y4m5DSBCvR+3teldR+ziN7njEAQRvQLd+YuO9LlmXGLVfxwTRUVjNF1vmLBbQ+UGBMMDfXg8VpmipBKOochyHKvnibD5R3dwoeJ+BSJ1vct7Kx7zahqhCoCw8dYFBu8LEqWKS2Fncu0VNf2xu58/9bzOlNYMG3yOMT+NUPl6W0+8IfpwhysPP8aXW+7rH08fqhgxN+vm4TFrm2VgLfj9ihEOgxzjasbKT/yh5GryFkQFX5EoCNQiLvEnY4WUyBWsow/EYLdDqp3S10ik1xa2Yq0kZUzzpTTFWGqcLcG8Lvo1EWFilBxcCYaMGlkfKXCsuboYykN8oBrDAsXDLLgCVw3Dq0Mj6uFiecZpoDDnhfKX3zXxeVOll7YIwQKtk2JH4UrhuFnGF4jfqugnksNXKVnOc/i4Jsu5O28NnBEmVFcZYOKjyamxPhptsSVucKVMKT391mY+nNScfgsLL2Z7IqXDX9eUxa1Cw8XIhl8ewE13QSsfJB2/nlh08A6EeiJhvTB4XsVMrB5tOFaSsT2Ht8yQm0IHT/edSzNF0Ggj1ecuSKKFp5NbEyWlXhRUEVS1mfVF6u4UUe6D0lG+Txl5nYkO6JtxMw1I00cekEHRLdYSVJtdIpaxQG4FAyssXjAMuX2PhqqAicYeG7KCHwDXgFRyYne4scg88ByVs4wYDlNBK9hWr6miBik3Gj150F4S7+sStXHsHxdWJKcPji2QK1mvriN6RhQwZ85dBimySzftwG9XiI/fA3c91FInMTuQIqGQmScQmB72+RHNZle+1rWDqS6rCFJeCzXf74iAQ04ijqaRFXzbwt1t0zjg6JkaF1DoNAcSBL7WJCpu3AD9oReAC8vf3P12PogsNAcxV23SkV0mnHqAHURWnbIRRjg9b3tCZax+hR8XRwBBinIKk2sg0URO/rAWC+L7OXSKyreU+oocdki+w9cGjVABpY81HfNgcMw/wnyAsqu9nAyHyG4UZHppBzr71vjghJ21+jO3wI6jyMOSgQDi2rHYywtTAy//I3ie9ENYMqvIFaw61XgDB0jFS95SXJQCiFA1Fh01c+xq6B9p12Z1rUOrOIY3txuGk1KriXLKW6vmLWbkPmLuOi4+/kqAqUwyvwL3unmC71InA+vSx/4I4HjacdSXpS+0EWRq3JqeLqODddNPNjeTXZWvwUz0nmv45L5tZjRxKLzo3HEXKi+Lo0ZNc32/ZgNWRtgO/syByS4EBcLYUOIA/a+qO7LCVmRtz6Kq8tyVNJW2RoEYWisuV/DMvorb6wPBtIRowqNkDNioYQntqEk8rQUdQvOYXgizoLYqfNEsUINWB+mEmS2R77Ip9LdK0rct6oC5ioXm3FNC04k8YR/YRT8RJIOW+Rk8SHNiHLMELzIxyfHnLvYE6BNOYZeGwXKMiP06toXONtQKmxidrH4kLThjkrxmdPTgstqSr6AZweJudiNBkVmyUYCh6AKlcRJTVSJFVoYhhuoT2J+rscIBDY3JiLZX22Tg4RO46TSfL8un6+yYCTgiB1/HUngu5/znXiNRipDQzDKrsQxEKN/FVyWrxaDJndnk7rH3WjYbBPa3oxYTRc23K+gt/Y+1l8Pr4piIQNEwz00bi720PW8MUwc3fRaSSeHu5F3ttI4EFP10GB0+Tlso7Tt3TnEMXXaI8WLZRlCUhRhOfPRBbnW6dcBrt0vVEjDwIPZnmYpQgaZB79b1Mcv/4rIz7dCqQ6yjE9IX53R+fO+SCjWX55L0VIX1FEyy7MIrtUIhnv8uF6aGA85nO3O9W3b+hbNwUkM6iDMF+JuAJlPoB41Mqm/Lh44Bxw0qxHjRZlOfvMbKZHqo/FcXQKO7ev6NtEftZ2LFIGX35y/nSI9RFbjXnacxaXfM+qMiyLmyR78jRhBODSBQ3fE5Tf0NYpEj5k/X9ESVUyLW5+vXfS6+RahNSaVrExF4XpHOGmsBFbal0B0/feLag/FNLOG9F34OHqNJryxaCluUJ2iZoV2E0+ETMm9t5TCr2Mm/aOFlsPBdvC9SXzUDCevNzGDiuTOiB/B1yjz865Dd0sIdLtl9YQVr2wbLKSZ/N2zM8dGHQ6Q0pzo2G56OagwTL4WZRxK4EcoP9/A0hkR+uj7clRGtfyYxRNGONQ9SslBXdGMlMpneF99JnKDEN29mmcFBIrfPLVgb7+XYkY7vqYtQUaXX4hY48ijG1LKjsJiYxOLmWx3wqKO4NYbvK5RXJBGmqbTrJSqB9Ix8F/FO6zSTyMB/jRt7nH74mhAg/EoeC/oMdRSpgVzZ+juDE6dznMbhPIpZE8U9DYr5iooLTpAYG4pUoXhJ9fxr/ElxrGOYKgH7UXwRiPgjoVfB3RrBs1L2jBhP8GXKUk+YTRN3Vun2Kwuo3Ds7q4JyuJfISuoDaCpqguNF+DhjiMfTo+z6P7faLqqpVmRZvs0/z0ulw002rhzh7s7T3+ob8+ZiImR2G1F5sq10moOjdQOyOTq2x+nRlHNivWxUQYShgfHbcluhvtaIR9Q0+fC6C89BnD67JxT41iOtvLpBTTsTJLbRYNveDV9B6ZKsqJAyTd2CaJGUym+jY71QZ3LEXMdph9yu04UDoKqO3RjKzBYOk4hn5/pwxskAHjQIBficQi3cw6GNr55YoyWE+EfigEJc00xCmR6ctjVxs8IN4KfcsNXJVjuC6L/j3GWQkgrjjjyiIm5fycwzwfnvSECwCtXjsB3tD3lVa6wf9yDhpzuxSkco03pP4GftkGAw9dI6CTR8TWl1k+m7m2kfn4M9n3s3ud10GeR5aJ5Dpkvt376A8rlXyPjnxJ8z1ydeR4t2t4ajLH9nQRzuzMMvyS8r1LXT0Q3NMvMgHkBbY4gRpmFTqnHXXp+tc3ypij2PODq44VI7/FUCC/XzBffiZWMPxgD+AEJUsFw6nnRKm968LMnuZarQRqj9X5osMzoUNJ2WZJPEGN3lcu3c+n1IG3RFmPi25hTII0iMR/TKIFyddUOfQjF6L+pl3+92a9oH9oOO1cITN9w+Dn24Lg8xv58rspNhZ59oy+PYh3BJy3BQjNtzR/AAmVp9OqsyuSKEl/VMG4n+hqDIv7XpPpQSkEuVN3OhmSjRhZZn78GjjPPO2iJY/MM7eACcoEKxRAUFYzlJ9NIvaSkqesoAGCxBwTm5xtFbVHEdBSr7UKzKf5cFj5KU6SfzA5uG57gr1+K0pdsPiS2NPDgHM1JwJQzLOhL8iJBZGCf958y5SviE67e5/NKAxU0Oievwj5+a55zH2qSMtJRqPNJWDeb05uTt2G+kXWnXzddnOJVC9uJkwbSJCelNr6qbAh0IdmMliI5nvEJhNpAAQvH6idVnjGp9jtPES0qp23KrEi10MJuAmNC0UDMO69VgkEqdmtyql5dLZtbWCZPC34J6WMwWEeFClJTbeg5xbLDWtCETscuj7207Dz8YUlCuNIT7SMq0ms2FwrajZwhVXYBI7a5OVT5Md2dP2Sjxr5rm7sFWzP1viv+4mDko4nxkkHH4bwqaZF8t5waRwbO+TWPl09MpSA2BMqX8Fd0BWEbV0NkC0o2XRzWSZJRcgCWQp+GyHgfskn8VXRHfVaqi9IFpNI5xK/6mJpNFCgFOMsFnb5Mr1QLe79p7nn5KmA4vP65+u57kdQvOQMMnruKKB8r2fMPBDGTln3WU611/OmdCUpIE/owOOCQmYYe4DP1XXfsrUQ/dx0pBPkBjdjMVRGOo5WhzuEe/7XijwVeVooALiSM/pqwxKkgi60/fMfd8MLCiV+XQQvzPdNY2s9shgj8QNH0BWTJyOhfIybzI08qBuHc8C7yRiANaSETZLUc40duGkY9IdLiqJAbyPE86e+voUvdwkLVtOZziNbQafxHCGYh6T7qnxg9j0NP4lTFu7+29gpN0eqb+JfTC/jwUahHzfNC+71HCYIueEDjsu6eINATZ+vRs9Ed9MX25tc3EmWHf6uSWBGDUNGo83UvllfXgRL9Syqe8uEwB+laFoljxGOzzP9i1yQpl84Om3zVp9sPVQ8IEE+fp3nOSCJ/kbv7UHJZHQSWoighV6+ePZA48UJOqe7paaNGgV65ADqlwcosS9+/mTHF/qNJN/yFkaAM9mMpp+hIRIOZcv7izDb/XQa5Y9UatnAFFZULrCSXEpufc8M3xe1HPd6/Q1euScHS2EnO5y/eklA7sLywj4JaWarpcozSKC4HX3SQNGQg9i6kWYIpoAsZMZJ0As8QcZyyj2bT8xzGP2EQEu0+H4jqf2w+aF3hi+kMTHLy6+ZLkGd9mGXbdmJkkvTdEqjtdPfY03lCeyZlsAbFG7Q1LHkZGOw2r/gENif21G1sIfgdSM1BLwn0O/jint51twIpgzFCGmnQBRQkmwsy67oFIj58SbWcKDSF86G5YQl4vv+iUEJwBpvYrA7Fx7Xu+nQYx1Fk2L+gz6BwBnzg1js3rrdDpb4Y6Y0XYdA7mdOIM2Iw5s6tjAs+pS3G9ws0pPCGPf9xmAkrRLvUQEuy4+kAflUI4fCaLb9Wn939a9hokChxNKNGraabvu6Y92oVWzDSZ12sYfaoIiEEHUSCvzVm3NeNKlUSF2syEEysnj7MK6NExFPxiM9BhPBEbJM7oEQhs4jRpiXWVz7vatGGcdyf1T/YfNL0BY+Y5DmsaxRO8gzM9ps9opz+YHU39p2S2bzajbyH6nC+JFW9yTlxrOl7AAunEK9qKmLXRYM0iK3GTI6bPyv2fDrkA/J80fkylsKzsPvBSeQy2cfPq+5l7upkB0Yhn76Ye14MNT37zR8tIUmMzosAwSxo7WaBMk2o46yui3TxUY9hD3JJNUrHuQOx2TJvFPPolcZ10nVrwBAG++otCHCG2LD6m7cLXV8ZBLaRMQA5E4ZwOzT5GUUuAMwqiu8X5GqwvzzFJbaAQx27na9bMafrT/Rx6nsIIPfknbYJmm2YGuS2gyRWi3+Rlxe7SvXUVxih7UZ/wzqxRb+eAbfE5SyQt7X7EcwwAjMiZJZbnOsSx89P3PKxJfHQJutxw8BHPHTTc58zThSHVgs9/G0J4ZvPSVhZ8d1x3DXkLCeQc5gSVg+giSuJWDdqtiNnbqpBBUHRCCa2jy2YwZTGeh0KoCAgqINnVb5nZatsw0iQjIn46XXUD9hTSsly34Gkqp009zUd7saE0fuwov6c7e25blNraU3s8nbmMipZ6PeM1KyK24Q4PJAPOsbKXA5LOTCrGbCDNHzoFX+p0WvseR8vEm8SId7Z3AhQ1T8Nl2YRxwWnwNVh+grxeWOkz2E/eMFM0rACzvlS9roOX3Wru7grlmTPcDpy+WNyz88OQ3zPqrcqM3+beGY5NfzVBj/l9INKa+9eVSWl0LI6nr8dHfF6hQX2A0K9bVW1Bqocka03SDljvqQVdOqIjkMT0Q9KBIgG5RMYK8iHNhw3cmEiTj85+X5s1+gQ+in+8i/4bIOJY5LeUildpE+0L9xsfweJ467yafEQyvkenHqYYbxX5selIAc3i3rIK/7GuXDxunvOaUaCSmHw0rZv8x1PVlInDw71Jr32j8Bv164seCC4JVibyhBzkjRRExduWJoEMBnUSHs4gEDLmt5/bI2s5iWIW5Eg6eBCbSS7yqYufSWdB0eM/cLSsCPGaRBNrfvY0Nhw203Vvu+5tgiNt0TAgZru78EYLMsNXJfwf/PovEqiG0dC+b66qUuYmbCc1KHQ6LAnZVlIFBzBO1sAHo1qoWfre5AUEXJUHdoMk28yIt4GKJJ3EeYm2pArU1/fT6EAVxWT+WKEb9NMxcRiKu6ECLtFUuh83v8YWgMVaCEFR/kNecm3psm24xaFlZ3JwV4yvqP49BbPdti99bCDB+c1kOZC+6Da+jm8TOClXE+wyTVX2BfE9FT5nXsfBEyiw4ZY8DAWLhWbE1RbLjx71AqfOmbS3v6+s+cfUBCipSt1xXCZI6jijvOdL7Rb7P7160ov/Ba5Igvvk3/jsS7c0HD39W74qZYTe//0HOuHyz4L4jpbWRb29KEZ6p9Trv3U9M4H8awSr19SubDWNBqi4l+JKmuWeHAyjWm8QF1DkQygVILShA1BFpyJ+8Oiq6T3P4g04Q9YEME8H3YfmVT4q3cRE3w0LiF9GL1gUIaL/gX34CzIlUbx8UT0PGG0Yfv0OuVghy5ZpHsNTAnKimoC4xBZBP0CTOIkKLLPdVNo0e1Fr9SBgzgUd0be061k2QrmfKm9pl/sxlan9pEx11kIDmfdymp7AVIXcSFPzlfXcxPHyCzm/fSXXkducvvAuN1P8kxokYoT6X92quyNQ9tvezVq+XWsiWSbU5sUMc0APNCYk8DdYYegmNbTdObmsPnM3nQkroh7Rgj/bTFbcCzhcZ4KhB88i9/8E9iN5l+LpEdUyXCXHVd/J3p8g549MgI+4vcxGNnICCbINoCAD51WZxKuvWJleI9j7CWJFHKjID95OEPB5KXaCZuC8b7J2p9X94rlPBYGcHOCAUrKogi+cMRYky+yzqNA6pDL4Hwhq2ndTnRfePCgRlTCh9X+k9aAliWvS7ranItSmdgbBnqXVkA0C1XPn1UjP41UvcEpct/jngl9ZOkxahsdTlOQqQ083KvxM7BS7FHqAEuOAa1vKmpu7PrWHRSZKiWL9feAKF3fW4gy7sd9jTYOerT+ng5DLTsA1N/M016yKkiop+UoCM263K1kwfWyTcZerxJG6f1Nx98Sw2hCOa9Wplckl07L9Pjb346zwBR+I1QgRp/gGz13NAxV1RH18bi0k3+vR3ArjlxqyRdp7Cgpsyb1ZN66+GvjmmgefwPgMuY4v4WANOWDc1zGApPsNh7HbujhCC7W6pfChpyHIQsTZJ0xpDSKmWC2luklJyIgKd7Dk6LO78eI65I5F1NQTjhMTZbKXvTzxOEDOOJxBE9hfJeXjD+tnjQ1OQaIaRgfUdS54MJI8qE+y4ra4sOr9kNyr+NR+LPH0oyU8aTj+Y2Rqg/GjUyWkzENJwGAGi1fYJuCiriyy7qxrF7S75W4p0YlXgm68JAJ9eUhVfpZ3p7W0c+0VI0B6Syzyughs0qwdlsNUn5nK0c5h1jr/UIrgRp3s6ZDgfxeTTWBX3ChMvaK764kXVNRYimyverXAZYTld+LS1jJvEUBkcw8QZylAAGUcMELqTMXeUZE30fQjOmjA4uDO5uzT26A0b6fLoPlUh2D+2/emnvD8SBxCoXIcQp/69y/v9ZCh2H5Fd4ZV/CPJqucQB1MBH/uAflLiBFQr5kM7WOoMagae4G/Lz+2+v2+3umNtY9rXj6d2HUNhvrfJhQ9Qj2fTrEPsYOEEBFg13iohxP+xvwX+Sd6YknEfSj2G44qXurDwPA+wcE7vzCNK3wUAOA0jL90Bw8THEuPIH7q/tDiT+fHmn8UiBvU8KSj8Kk3C9ASN9bNxNrdZej+RrzfxJpb9VBDJFfH6OWXvc/nM9P7TXxQKw1dF0PbBUnKT8Rypc69IcqQo91qz3MN/uTmlxnuj24PEw5XjSOhwpeDdTm2ICEKCAOCfSEQc0NWd6P4yagphpS/5H0hrVRY4sXUprYSyrQbOl+QoqW9XpSemuoDQyKXPcZStcTXr4bbw62IvVMb09RSYFTRAHXJosbNiyUt/i/Uwk42b8Czm6JS/mENI+fg+au/XsZRXXX1fppl/9miVmTLeJlVqF8zktZeKopKkSMl7TrZvW1Ym6CCyl9GGS9dovMcQjBi4UVw3l6cDu+DqwH2uYdKgCyh/0Iwpfvp86N75ngp+LUNuyV4J6wM+w3y/d/USPBwOj41QVoVJ3vt90fap3JxhMl+CwOCrhIvR1RwJ9eOfD/pBhT9tIL82R2a99HPtUBrs/R7emgwd2uvVCdO7f4QUYoQzpJEVBDKoxBVwVn2XCVkX1XXXThPxxVJnPL4od/xI3NMTP6lpbsf6SEV89wlI7A0XhQry1CUCQZRtG5gVvwJlte7EjII+iy9FnE7q5gshgFsr2ZUOCruBe+GFDmJ+fCe8gGPYJf5D6prK+VecFmrpgXHqctNVth0+wrPof/ar9cbRSr99d49ZrsZuV5yJh1nm7tg3fWKT5Z+xvAZ2AWCV32nagQORssuFcjdKc4yUMdVohJkgt35iYefTiVXU03DceHNC+kzt7XlhKXw2DP4+8Zky30b5qR3C2QT+UufhDI64yi+10Yx4RBym2uIa/o3Ey+jgTNvXxWQvXbSCPGBrhH771d7Rcx13PnzfiPdqCxCHrnz/OfCs/cjc7hHojFiHMfkRiqaBjt6wRTF4+KvaiM973f5JCd0NEnMeCmO1gPlejeJlmyy15mt6kSdq0+khoVkDCcFAf47c9hQHXtRHEItHsa86L8bScv7GbxGn/T+amrs6qWtVV/8NS+KYvSgDFMi/h7l+nOn4r/GuYycebgTgFLqyjwkpLsVuSBy0SjbHijAnTFCPHuiyFnMwn+SisG+Lwgi1PPjToF1U/xWm3KImSptqV4oNfuX1ueLsp+OmaWfz4d78DcJ7y3PbbWfWPvqiNJVH9c9buXsZRQsIu1ua7Jd5ZjAoiigLxEUphIjp6v3GeBG7My7oXMXWH7CUzL33NUP4yf/qHnvYDflqLVmUqJIzqBfIuizs+XEsyRdXgjp+rdz6AavdORmd02BA87pZJG8m2Jnv+YNxRwBba40qdJVhgGnER8pnCLRLDcs9MUkRorEtP6dmKpE9HcW9GmoSAOWfk+Skl7ZBVruDTkvREgTXbhn0uwu4KQLwF8sRRmeF5WrtamNy8IQteKFYLXFXHLD4dYLmPmhw9DJka/dGDQf7ZEwQvavNpR2DangtxvzD6koQ+mGYjuTcJh0xx6uWSlf2ba/IYRHZwLvXPUEhWU+5ZZ9P2imhno7HAn2I4cmbY0p3GzA5tZfPO4s9ykQuFr0gOgMNfiqyemXi93M+RWjWD2EyGcYxBP5IKpcYmD/QhyqATqnicvrEFbhB7gGg38dGBuJmz1Y0rTlr+Ks7e2c0QViaNNV+xapzrR3MClDj3jVYdasw+dglapSzFoscx4kFfprrDWo8iLQpPfxPoV8xAJBi2QtJmmODAB3YPfG1A0b84xrzvyNRlii4PBvkyKytrkc5zONoZSncyUHjk1uUmfWyMyC5F07A3ekML/BQL4xZ05EIvcGRLN2U6BDgzsx9r4nZaHsjmXkUjzOkfQshStk57Hsw0afo0FATMFLsQ5wVuyee993rN/1/E7TCZkImIpv0TTUlh25azO1uTos5f0Lal9NPrX7q+IVuNbmh5H7W6h2WslwyGMJrXNd0TEViA+etQ9HKAqrv1ofJ3IKK51J4Pja0rMv8nC6C8XM10+8HEFqtnjZeXdf2Jwjt9tQs59nNNahOBR2GedNcePbwzq9ZK0Ik2HQoCJLWF/ExWpSeKrvpCM1CfQQvOuxXnvsEiTVVERX6JrkvwbT+j51mXObcuICFzUp+cygC5wepyqF/XXO294zSEfEVxL8rTGrqNjn6My/1rmIYDA8NN6vxbcnaYud6sYuQPQE6SqrsAmHi7ev88Pv7LZS8pWZ1fHHkOTWXTyQnJJpBSTQ5r+dDBCmXIQqT6Nax0hfB6320wy/dwNqEqTF5n6ldN7qMShwtV/gbhioTeMrwrvIvAVuNWIPdZ6wQDvdVB17sRXFW860muzvvPJyEZtkYNU3A1UCrapliOrfKL2XK98FPZcplYcloE28WHHESGK/vuhjlCvE2GsWjw9f2ETEb4A7T/yjSJw1ObryuSn1hzKmranbuFIFMZagOeg0zzvctv0gic4yseCKZksVBJEJFbRL/VkqVzou6x/IkSrUGzgj4NVfX6hkkH4L+8OuHGDImbYXNL8SuXOOXIIOFMllzGEGQ3BXwY62yBYZ+HHOmIG7PvHDvzJ0V4Y57bqWeCToZDHpxwGIfEjFkt1DfaArdqs4n4OhhAG7lHW7UGXwc179XrihkHXCEX0ULqwEbdM8Q4RgH4zSz3upMWQntnPlK5X2qTPpRH3br0N8DS0ZmAN3klxly7GPH9ryX94QVRzUbYsEGvp0HiZYD6A8gpBa838ybv4OUN2iX32CBr+VjGnwKpgfmqEr8Ol9wPmDa0YKFiIx2yDiYROKVyUmeZHRCATBzWmNgNtH53k6y8UtM0/5hnfqPEmM69eyelIRifEv/Wkfzs+A9qxjcy+Vs1ROMz5b7cczEFknOwiEeGYk5pFXl6xSQrqXLV691Rh2t1Fh8AT5vkFooLQn/vzs6tWT7shlurl1P+lpPNdafPmvfiSb20UE/XZakXIL2IkgBNL7MLQarUBgOY6st8d5Q3TKvnCfB0kaMrXgzsb1ELfl7L5TmpbRkHcgXXp+PruqIBUM2znctmBcgv/d05yonnQLFrIfql2s64r6VoUJo72k6ZHTfAkf8Ht6kd8m3onASSTGFzuFhy99h8iJnGpLVvUVVGo1ZTjoouIzZcdcpArhQNodMTOpnYc3kOjqvfdbopadig2ak0KvOWya1JcmX8ZnUzL9dODowbJBH5BYW7TIdrXiWqRmuU3pC3mX1rvmSH95D1d0VTarfiXq9rM4MX4wvMgkFuDnZKDbu8dUylmTScL4Oqz8VaWA3U5ScjAQy5Dsn7fkPE5DN+q7OE3i3iv9P/MHBF7V7bZMC4rjOJT33yNg8+HavBhA4qtxF7B+DPXUsL25BA9wreYSV+T4uVV4oscC1+f5rQWDRFbighZjdwn8CHzCUe7LrWlK/3A/exeUDlHB+J+LBwSFQvAV7MA2F/ucmmtyAqHsXjBB+l0s/pbFvNqAyRnc+8Z0sYIJiRjRkXGRvqYGTawmEYIHCWy+GFqhpBehremD8dgLp1XKmgODREZP7I7r1Jqp/EgT9Hdqa315wuZSEUnTZ73nRQiA1CKv7UuX/hUIbd3dk1sKRvMg1cicau/+5f+V33k/hrFWcKLIUmzE5PQtkiwNCyBmel2P5yVI6DhPiW9kI9wAnUIekUoxvDYmHw6Bzf1G2WHFcXjj8v3z2gOy3Hp7gILr7WzVj2ZPQ+7SyD7CQ+eT4gzDHtt+5fbT/j3OaE4SUmg9ltoyOOp3D7M/KZ7nC8PFujBgbTXcT5Hbd2ZyZ372kCTs9+tLrwC+x+SVC7RrX5gT4u3DuQK5SijO8n6kTmJ/2cuhRtjImguT5p9dxwr0MCwe73jiE0w7UauFBW9Vi2Nf92kyENuirf5bhUMj1aaJG2CnwImZ50ge6K9rRd+QH9VLK0xw+qvDnylolxJUC5/cVFjIhwMAfZQotsY/GQBGZOh6kAZRrF2mt/kN9XGj58+NJI4nONqwneMok7MdIhozojHq2taiAp8Ka6IeE6BuJSWeK2Ln729N7edGn2aaRgKrP9AbGCuHmtAySHj5+XynW5vVyPaRAzKyOMf+RqoIuXlJY9LspYveg67VP17P+1CWJRaneNYD39GGj9Gp296Wuo92mZSdfP999/vg7PMkLlizPvi6zJf3ZUu8HeV+KBkOR7LtVCaXjyguKWhqDpDWIf9uLtsSLqGc/Pj4Rk0PmPjbEkT0KTYn1ZDpN+mDFh8RBTWjak/z8KfMquUtfYfkefKayrUXAXQ4cgp61RgItAwyhXsXQtaRFdlcqQ1eOjB/O1Bd+3zqwDkd+wA5YL189EPGvJF0Z/5Hx1CFV1HAy4c7vfQKvGK2l98oM03BNUHS0IHLhcTSCkHW9I5hW2zFaGN/f5VH7FPBPb/XBYvHDVZP8Uasaw5e27RMId6db8xo9m3/iO00ojVuDFfVfXxi6+gdQdkCfY0S1KL2n0sBBm4RhnY1H68MuMOeqnZNGPPJbeoDHu2r6rrf2R9ASocjBvmeqH8syinSok9f3jsvCYujnRTedHgLtx8PYvDXhSQ4C0qSV5bzUgFUahKnz1JCxUlA/KYKcrKzLbdxxZR1xLgDWsNkd6NA89foxafon7/FN5pNBpsGksPX11wemwMQ1SGRBwuaLrxmlTFtSKhCBjC7GCF8LoqUIskXBodQtq9Np6hCwRHyAC/MJjIrGC2SiS9Fs1J1iQthD3h1gJXs7zdgqeOcOGaUrUoPjS9ImE2gJYc3XoIEfM25d2+w9dEjtBx30+NxRpNS9GAfsEA/ip94BdHIIu5ANn+7O5BXH6IGKBRvL2rQv6WZP2dMg1/2Q2lWdW25pNVPhACSwPvAa4OXPfTKdljJGGniX5ertH5JodTnAVo2F8wJOUV0qyTeoJ59jl8jL4alC/42Ostk0M79EshV7iffG+Y4owkwPbuXwoGmpjZ5TipagbIK6LFEf6Xdaf4c3r1Wn2fTRQwgCoXzP3LSo4x7SKCyvxEm927u9DdIWA1LjPgu+boWyhrHRDhx/gZ6/naCKqH/an/N1qiqMASvQmhMAlHoCBX2WLOmFqE74iMkGD84FnKSCBZA2zEbHDQAfBfIxGjW81oH+5IiBgM2cTz3A+CGB7aYFKNKjLlLrDx5E4950NZyeJ6fevMsdpt4iKaAI7lDtw0N/+3RExEuEGtkE/c4OB1CLOyWvkx3weZR0MqjEAgCHu+NN3l0G6vc2Md7MgTJ/WlxN7rxvTRVUHd/3imtR/yVFgjrE8FLT/7w8PAkwrNjx8Qm49/BgrTofgBxAVVjek8WUbLgPL2/kTbnmeCBQxzll+iK82uZ1Dc+1eKioAu0xYpDH1JK9/3ZwOs+LcQO/xePFF4DpK0ozWuYz567/awWazK3aoX6vdjRzlPs35oxh6KvBKTVrtaA4Y7slqdJfH+Rg8Bl0YuHz09bZJEK6VX7ulJhzGwR7t3uT13q6hB/zGMKx3cjNLUBTdRv1F2zCGQZwAUVTAamiN1qSfq6SFRNWxgDXf4GXnD6jF50SiXNt8hjkdrDMubtoMEeTUbc6+G1zeXFUBgOBE2LDrrqfsf00xDUjdDDxEesRYrXlSziOg6KMO9uS5LaN5Z4aqgCWBS5HDLnwa5CrBOOuxBaAccExdOCj8dBWChrYT5l1m1pvIFYOW7T53cdutXUopE+KBUVw+Hr2XTSvo80N4Pbpp4DWvf5qzVoGxZPtW5Whmxo5qzI/I7J9t9OV9r5xGIcDYBSid+uWFHLOpfiBVzU1RJXdFnKlM4WOhEKJ29f7AkcaGbIxNjg7/JA30KyVpMj1JCBbL9+jp6ShTxcxU5kCYyBJrq4n15LUQ88oIbxp/UP74iKtsC/gOsluCvJh6V/4uxnZJ3jfRFYcQQ88cAS9CRRAlyMyG/cURh/g5xI8LeKkYffKEPtQmL+HLbV5sbYXhyYbAYyvuDXBAado4ZGu3tEp1cnpxYFSgEPfeTfLQm+au2pjC8ILsOPGmhw0t/gR/08kKN+2b0Ft/qxG1p0sS3uJS73QnFnxmqvMvChm5O2zh+oeFqYpKJgGFosF7kFRWC8JRvhQXaF2aZl0jlM/Frm8V1y9ejwrLIF12WOua6VbAWjziPD4FRqnsvJ0AehMaHtZ5ZQDFJDC7TX/CtF/O+/dmBGSbSvte0SQkYOMGJHsR6O/WLGqnxJDccnMcUGqrnbx/+TdTF6VPUaoILTDSINwvSQBII6RS/H1jDRuhwL/t//N75yjXP9POhv8SCQYvjV9UZKcLUwBrvFT6dOPh8+sDzrgk2lpaJwnEoJtzxRFKLjos2XcyrlAN7K/uBFMYiILriL0oLP/fykNVGqvK1pR5f7wJtcm1EGov4cU2scf9f6Oj0KojBOSuKz/2h6Qp8JmXJPTT8+ApaB36Kh7lA+EX/zz2pijDjTDby00U4AMExTZzSKT1lemmVE3eVMaU84BMsAA5a8SLGUEhToRGGn5k6quKhmQ0M7DZXEz0kqmGPImzb6WrzaFf4DZwO87pbzyPnuNo6YzwKPnfB4GJzLyUdupj2ryXIYxomM4FmoK7oUCpzvq/WpQcvAS5dlyH8ZZmjcD3ouX7kd3IljfeVjzuf8O2lkitgStTo76EAfzQ8qL+dp0IJX1WgfUpdlT5RnezzTFgFik0owsknudKXEU0oGvTiFDsNYrgGeAyXYXxs3CL43oCgL9V2qbMe6YZobMy+a0PdztvJgStVPBvuduBnbXDobooPRZTtUi7rv6UlI4t8CdhNJbi45r/E/5aAiL6PGMageAARwhq7uj+0sq7ZPnjwjVZN8FKK7RgqbWeNUC4oV3RhXE0ut/1aFGymE8bo82LBxCEsqny4Z8D9hWuoVsyxsTn7+bXfdT2g+fdSPYSCFNgoq3vHL9UGOalFEfLznA9pfpvhq1fxvSqzEBcGs8MnjiwDUbg2Q1P6F6vDjXxYnGHgUUOAia4b8IxC54kX7qOyV5Wy/QTQZwR0grahGqHLzsI5aJUW/ek2FyiFAtQn23JI+QhnH00CgXMN/fws6fML3z4+RqhV19qQ6ZQnThUw4BXdiYx/0qz3eH5RVbv0+S+BMGbclmBINZeWLI7dRY9XMeph2SjnC0y0bqjNOMM8d6u/M0l+BsVsz+AKcG2zbNOc4gEx+Kh2gvM0SvV0KmSolNpzeXYtQNrM6I3vhIHczC1QpbWBAZNlJiIJMqEvPgG5DVXyDhEiaKZ2TF3UcbLMuRcH47q9OwzdSt9HSsK2p/F1NzJjB/TiDjYtJoPxaeB3iaG1M9MN3EERA2hquIT3bFB7hJP1JZC4XKIWgdI+mqfTWj1SlgiFNe0Qt77s+mijGVMMLQwGh14txOuNV9r7F6S86rfaziaAzA6aPWDV29OgKDM+fhaLEgOxJ+6eFWDg0eXMhO/Oj0Tp1Ly/V8IAv9y710nDH5rKi2i0AOYWUqw8sOLXDQswAOxZNzQVVDlGu85OE8/oZV0NlUnQCOE8ntRQkfbWTmHwZtYpAn6eagf/q1MHGOD2mwciWu931gbNR+k0JkgBRFHFenFjxhDM2Ms4jZktpdQkp+tznKRkD6sf+ogEXQlEU8doprq5VCUpG+hClyuCrGx8wnGzNnKIvb1uAWf2b3AdE6v07vCp/BYj3ng1ARP5TBjFkGlAWcWVbo0CeEbclPeatufZng0ldGsvks2paUirpSYZE/X3m1IAD7UIfGPcehttTyDmKMcJEO/skoHVJnzWDXDqWMz8eGKu5LSP3FXlNxIogPyIpBNCpmNAoSws+AuPKo2nP89LQ0HTKj0SAmjeesPKXoOz7rzt2KWOYio64VB8kDB/HhCbUQy0SXj9pnspQ5qWKQQPWhbiZxsMwPfTs3u2I53U+l4s8aqh/lOXlK6YuUfAPFS66KcSWvTHo9fT+NA/OCXpTjJoO0d3bUVeDokfyDcrHXsdptol83LOahvkrFqQT2F8PYXsQ88Uznb9FBtHOC3S7cZ4nCsvy+mofoThDuF0x0ZVA4aq+EpHzmjU4GEdc5YPssmIpRJwDqweM3V1v6Fbe725xRdCV91zPYnmHl53L58OWGMOKFKV0vIUsnQNSZj82lZ2INK/q14HHK4bUeHfmMHCV/4WcYcExisKgyxE6MGgGUkHMtxu6tCKxoZmsERhBkaE4kC8vT/lLxn6CkH8wdewHyTV4ZDB//tSloPWXmriOOJaYlvxMZa/COGBCIAFx3LIkZvLfF4mMhvmLbVkRS8hk+pQahyt04OIR/mMDHchEUHUrMAyRfPQ5fXGWEBtDonK/QbGvHhb0sb24vdXyhiqNKg5cBr/PKCPYSETb3tHa6iQb/r4Pt9g1RjJ4lV0+aldQZPHCSPJh+TJ9qpwrF5vM/KLbxj/L0FMB5k5wCl84PTKUcZ328C7q9Dkb/cwP6nifVPypIyC49+k5AfSNUyj0KP8naAfoBXhJ4qHM0VEMT3q84tnfD6zXU6qTV2H6msUcDRXSN5htBVxKzDOsR9j3R42OTG/9NdkDvXfVETv1yAY2dHqaZYEzvy84Krv0kyy8lmIQC+VC+5chxfNvc5U4YCUx92ix/T2uQSQfJ/39ngUK+a0MRl87fyn0OWQPj9EGurnrN6Q+nOh71ri065KTzCE0iZpZlDtU8ygCR+MWJ8lL4A1B7obgh82e0BC6ClElvMCUDS2VRXE7n1yub1BoFskLkFsVaLcI95vUfYUkPELEGPcHBgyGmJR/3Q2P+nQ0p9AMQ3UN1zjDnzM0ka4uq0Z36OQrzbJcfYShDblKoVwnnUCS//dXG1nKLCL+Nh/yWrYTO/Ed7ZW+V9nK5zxj5+XW+SeYW/Kqm2x6JaOjnpkxV12d9lHOifhd9RlG3A97YSWW4JBPsMYe2DQevvJVQcrcAYzXpnJbhcJwj887rRFdrjbQ6o6mUJQ3XQr3bTaSQEOaHUXjkTowej2xy63oVsJkjnDYxGXN6stRaLQwIogc6h9RB+JmHmjirTbkKbkYnXh5Vq9aeyLqROyXtcHG1+zwZFmepEH4JqNPMxy2VJ8gsdw4O/B2R0+KbNrwKHOS2ftO9tXO1rx1/obwggsxQhpRC7O5nB7Y51iubSXSc4Wn30COBw7PgSZ7fh1QSypUaXckWdRH0wHuBeUuuD9wuzXz6IHf6MF3bw1DzRP0z1tHKUp+5yLCG2nZfRuDfAr6xjTwzwPqh5E2L3p30/hIKezTcvmePo++fLqtzMcjWeSMl29MLOIjup9DZ757kLIbfY0jtBhXFfRUtQJ9gBlXVNPIG5uY2ltXR+kH2oqbpNx1Qdfvs/KypR8MUY9zRyfiQdGQOoXYkUITR8jnpixX4dvlJ3p70l+GZeec0BCN/D9Jmv526iACfrkKv1dHiPw1WQQlBIoRcyIKaOCGP5Ka95f88T1Bbf/4JwHLCDTfDNK0+jC3sltmPNHBATy9bAsQCCfxbpuyT2g+lIlBg6umPgVJ1c0hk8r1U5wqqF0F+86WukdEHsiTLe+PAvexsU7L8q/ewZeZ7AXh4K+2VfN/lQ2QH/hkP9pUrU4+ggjRn0H4p8hknxZNKU2wCu//W1VEd55tjkmfu8tTeCiOd3ez32s+Op4HKZ0TUyPVNCnKT/DZjNLfWHAQBM22Hv03naXP7p6fj3pqsvXl3brL4S550tHI8uQkhvJcWiQtILRExpwwTtZZ5vjbWwp9X5ow1cHffUtc0Dwts+j79fJ1WZFPGEU+Br8f3LUvKv5nTcwHhr3qUsdSaMaof+TjFe0Cj/sah//qr4a5zK5H4jpygCqJlVgU2QLB8rO8QvfHYhk5ZC8fEOALp6cPPEgtBPVZ1H9QCNBUb3M6EVLNyStUf8456NX3z4eC7bQEF2vzjXgMFR0Y+Yg3hQw6eNDxvLwDEFBmTpGW9TEKi9JXegPgQuX9vDbFK1O4e23bNpmb0Xzqi8uqMruE+YbLr4GRzwhRiNY45/57iZkte0RL4qD2wkECkBC0wKW6+rcO0Btt2gK5n7jagVHqBt+Sfxnu1fjSGt5I5VKjgJs1eGhqOFCEUWmdckVYylG5cex8FUHukPiXr6FMqFETtA/oJpQ7K0UmDD0AJfqROLnsMBh4Gh2nmO2QioUXnVsO6/ZJ4ceyhmGLX6MCXm0ZFr6ZU8rHpgdxC4TMiyTj3wDVNlmriW7zBLi93dMbcakH81UuzzCazGMQPaRC8pF2OCFUsfn75FlktsMk3aGdU86rKkvrBwypQdIpPiEYvrJRFnB4SVAKE7Y8ylItLq1PnPFnundCe7sJ5Nd9Oh/1Q3LNXI24/qssYYjoE2PEE4VnKFd6YB95n2owHuqu1D+CwduwlraYqTbfgYaNyu1WMm6UQra12FrlqqG/w1fx4LNvxEKd1Yk6VhuJ3zO0dIueP23ahcLFiYBsT+pJqNcwDLKcf+2jOP62f1DQQr1kL6dzzGToZmt+k9i1SmVwflBnxG4BPIVG49V4X0UnZ6uCnDkyFCKIsn4oYC3obTTSdYpidWIs9jXaKXuOQmujFFGh0jns2+PpdiODBmZuWrlEU3ySX+mjTtdIDzWDS5AMUmj4hHDu+//G36n01h7Ar2YDBcqMJp2GfD9a8PA0e2N5Nw9Vg1sd9HoqGFwDRQGGSre7WJ8ps2pQroSy/B+wzD6ambRDQxGaq8AQZKVr3CAP+izrXbEGNv1Ilq6DO3eZCnsyfenR5uVopF4AmkiaPYSZ1z4CQpBR51M19iWT7qioa7V3jLrP1jBbGhiXtX1x1ljJzbsNoTqqM3+psdCSkYu3HpsHy5oPTp7oliY32X5s78Sjnnx/T88rwyf/ZJgh/ZpDTeYmR//KNVhSyLN6CObZeqY27FpGlUgDZEvzktyb5OQTwQ4GdIbxpCVgNO2sOulxF1VL2U2Q419BBEtObBDrQnh1D8B/+iQzfQgqFSsvii/KtHn+1FIbjgwQf2sgylOhHRpjgQbDHvRIbBuCoCuLrzvI8Aq1RVQqMXELT9NKrD5n9BhF1Fi8airn59Rnmwt99avFnLY9o2q0kIEYl+Rjinu7136khWrAT1z+9acxaPG9O88Hxy1QhDbg3niJFsrkuM7cbt/PRPV3rwdKWuBZJxfdzY8eviz7R+H08Lu0z3mf8J90Ym8uwtWX5P7d2UNnKeFGe7Vz23f8VBpHSmn0d4FJFXydQiSKz3EvEENACjRwjci/qp6AfmiN/014Xg2xSKqR4HpR6V5DQ/eoXcZW1yXXhUp7nVIl6S9Afs5HZJUv8bh+TPJlHvP38NFVsjFJWkJ8BqOencJJ4jpetlRxOwU9RL2Lf3uCtR1QmPzJwE/70p/EVbfcCPYDJR/90VNnMZ5OijhUAQ31uRgQ2cHrQNVak+uK/2SGhFHxs+xfQE716Smw86Vw4GHzFHcFbFa/5lo+v2MmDsnhE4fXfhWaR03/Czp0QE107cE7hh6WiPf6sdY7puJD0nqhcbeIDYnV8frSNOH2GdgSYicqdSrEkIrywV7bmv2vw8XB1Eqck/8NsBH5ueKL+lU+ndJ+osMTb7vjrGAyvnE56gkY6opBmnKSyjMQoP4GX106XukUGVFTE1fcW48AmqzKIWWPIw98ecPJ31SIUnIcQkk6Gp3TLxgGgyNkcy5+KeQZg1gzlvsovh01ujFImQYycA9ivUnsmA7W8qxfMAjK+UabUvkGWfmCkWrzasENxb99n7ziXnM8iTHbQv1YKuQ3cqCMrbT+ZZA+n9+JGamTFNoV6mQxpFl+MQ4M6WzhytUvwe1kBp/mmJOSVT9UCuPgkUpT6ve3QmJnxPQfNAlcKSnOWD4PTKUJ+3SbSOKKAPeXrRKf5hADbgejJ8xYVvpflUsZX5SO9RIJMvHLaRwKRpKW76qu03WBvZutkQQL2vNG6Pyg58jGP/UpZRqmyrZRoXuVtCd5OgrdGcRFYY9WEFNzoa2btxCTvJ+5EnZf6NpLd0iGJINL7VYnbzYpTWVznacOxWGE/b44j2dZC/ol3dEEKKOr3HFb9YxrdO0jjm3xmweXiazOBmHyWRMGZTMVs2KNBjtOF8Lz+JjluLC8xMijL6yFgp7LDCSGHcU+MRrWlIUTSw7p6yPgex3pi8Mdxaw0onh92eqYHfZ2kjbBBARm9JZcOPeviB28g0PCQ2xnwbWNERUfxrGFh8jmTESTx/v24Tc31xHPcyLOW1vQco9+a+mHWjzRciCdAxo6eWmY8KsfXpP4jReG26Dm8SFegeTrxX0vLIIq80Y5rqhsMwTvGVhDx+eSGqSbTFdEex66xILLqJnyDrizNoIi4CHHSP/2QVk6LdLfsiktNgL9EiOtUYv0PX634mwG8rJcfmltsG6XtzsCd4JDj4xhzxYP9N0nt4ujtDCXdixTwcsNQdqvykbEjCqXO+OXFhBmgNRaItcKo0LFroKfhGhPz9/k8bts/EO/bnOori8szeuxBCS+x8SSepVvG8X/OuiIF0W7Oh3aPqR5WWq6nM1Rpxq0qRjaMM7NN+gD5LFJakSuuV85FXKJovCZwKOjCXsaL2HC6AvLoCoQd891Ai3NfIsHSXsQZZY5vWl18N+1tL9q8Ag1L84CQUfn4EDRTi4fW+ToqfYFTXZ1gme9XAL9Tt3nbwXfQILEhVSpE6Hg0Mykcq3ilyom6FGxp3XRshF55in+TufHUCviD/n+ylNd2W5MVURTl+1+3uWrkbR8FdTx/hY1yrG98po3tQgRJRXQKuXmpnwhAqqPKkymZkDOZlr3z/OgJxwztpaGd6O6r1nGclxX0fvOeE2ABAYUwPwwuba8ct4f1qHE47E5qy4jNpey9jo97Xf0hPzOxEEockcQKDAOK16y3vtiP6uSiYt1sOiLbE8X8Q/cWn4dApohKrVKGBv7YicIz2fB138XDTTTRNzIogb9ThZ/61jiBO3p4ngwVJJFdCKNnWNuOc2KtgnTdN2qXhlFMUTf6B3OE40i/0bamLFK1c1Mg5uayuXc/ewymrOywpcpAL7007YA/sYj2E8y6WH4dwvyE6kvp/ziALEAcbtpiuGMmBtEjkFOL0OSIQgitZTqpuMGtWKNb+p/kOYa45btWES1BqN4lgUjBmUBKYoINfIMAnEYw4KD4rAGvSUB9iQy7OHMC1pRaBEzc8NdLBTyiCnUYG/pxP5I4TECV/bSsF8ferbIoR+lksp7qbFppbBhuKsEK/jeDJSXn5RXo8fOWQXhMNecIklkF+gvk0sTmmUyoJs3Te3mIwPKT+Cv/f8LFK1laOTT5LOajpzxBuwq84VMy1VyjweW4+Vj2SQZCfWPGEyRuHM0QYG38M018FO2oUl0xv1X/8+718Q5ckqlM+6CWt4rOvzt6XudNNEEK9nLaZyYzoTyVshkJwcu5V3nH+0ExHS4Ols9rOuiJwamqRo+oCqTMlLVk9gUAGf7HEXRzPgigaAReCnQrm+0OUMEyMxQM4YYUOAl/yvy/SWyA13XGpjtoCQ6qsOvTMxPwoGhVQ0BqRIhxLmTerUSgwx2lMtQvIESM5d/55MNSEFkmpc83QHVM6W7YHozWMQjQChqW5R5qE+NJ+NjGxmCs5NwtjCWOXRTFHFCf1hMMVA0WFt7KufLfH0hIscfm2tfyQeUHeyWu9oggJMJC0oOU7SHpJlB4RkEqyPlw2iCyt7+hZQIk6EooCTZTD6GXjSJB+W5JpiIh1tHN/ylyUUoHM0pwTSF4Vr1/8j7r2XZkSRLFPyaeuwUcPIIDndw5iBvABycc/L1A/OI6MrKzK6u6dvdc0Um5MTZB9hwg7mZ2tKlamqqAQ5GhMYztEbQfovnWlnc3JtV13zp8VtH/Er6nRTTfTXwMQMd3RS4hfX7NAPUaPlfsDmFGYYWgQw7v/0y+S7zjNIlCv8+vUofblt02eut2qTma6qi6y/XSHP8uu3uq+cFlppw+UjC7iigMzmWu8VgmuY08FmgY9BpaV6iHnePDCa6cg6f8a1151KwpVjfWRL8DT5Kh0Lrkaa/XGQqyx9ge5imOq/WIrcAm4/m11ORN00y8ukpNKa1Adiyaec8pfvlg/b8N9TxajNM+dbEZue3D8bzznuU4TAdoDQFiidxil99VlUyZBktW/4O5d5cNek2V0ZEqMTEdCzb/gCWDsJdvKuxIRSN4J56n1Lne54QN17WEFPfSPAqginO3lpEqQfRwzSN2JJcBtlkQUjPkWf9BvW2g1ABKo3M+5uXL35QYdfUXrrkHTkov/pNwwpOPnaphnjvozeXUplgHT2eJhj+O1EO5qR6QecYD4MhPAZKzTkJemdaJu6g5YIgULhQtEnAMrf0gjvvAGWl4hhEBCAPRZmT/asnHFNl/MAAaUDGNUXv1LDPWtaa19hdCiPbpXYC/SzGKXgv/wBM3gfXlIUwHmEJdf3OsZr7TcDHmZJEwQxyuqKTN71OLIYqHSFlwBAC4Fh0xFVPInhTXu2Wf0dgtKWINZGOyqfLftf0bfLJQlGwil0mlHksyvQvfEdiYe+xjqaL7bRt/Kqom6vv6vUo4t95MN7PikNoMOo1d+VjbO+b6J5a7lDfdIUNFEXtfHuwo7waDL9BKD75DfGvhrmOz70rmzxwEHV16BRZPQbOl4dOSifTU1VrXzx41ldCK3J5wKBgL/0KCXkdOcg27FwDdAgaPhmbOOAzMhEw3W7Xi7R25zq699Z8UWIvC2/od78qA47WggY+7ioB6qGLXzXu0aAchRL1kh4dFZasuVmPSpzJBrL5y6OAAFbk5HfdBPHEr1DryL5aQEHlwXSjwOv8puvfL7OVH+1bELX4XtyonxPqxKrp+3lstgJ7wydayUCLi44lHh/j58ty1iJ2c7Zq8JOGLLMygasI1hGw2Po+mijXKEK74BVX/bg8KvCtXMqPOAz0K5YNm6Z/Zxn4DCYRrCsGERgHdWgCHzQrZHnG/+pqt3vOe+AfdOLw8nfAEzjF+QJWPkYg6FF8RmgG1hXICXdG4aRhVFkgqIZG+HCfoXOB/RagcYCUKeQZoo/y9N44YzT5m5e6UoW3q7Y74BxkhQGsY0E4c4fwvRyfQRU+dpevi+DP+x2Qx5LmnB99s2T3Ql5dgPBo+czYyuCGjoVBQrJACFf/sdb1IUrRmFNLI5eoakkVpilG3Ofa9xKZZIsiSrMO1URpFB3ZYKuCo/1rdO9T+7IM8AVs6vd7MJZmlm9zB+NDogUF5fC5a0qf8O9AWSDT+zPFH7Fnj0EPVJe0GD1BPEv5XdezJESHYsQoYcUYwTBSBhpe14PO4a++EfcJ8zCOa+rgVYEJTT5N6/SNUpzW1vy5/86VFm2dzYFM94kq50VEGwrGHfub65+VHc0Iu+XSGZ3brwxugIuG4CRsfKF0O0vn+35zjN1iWz8LwcOOLDJOouygjFYsQgjvgPmhSV9R+EXunKH1rl03mPDzOuar967b77UuR+IiTEBI98bmOmAFGAm+qXFfZZL5Q33XMofWxVdH1hUL7tWWOAW7AhJ1jLTpMRUuIGvjrtTK1bR6k1LfUW4da7iLbsG2L+VbM+fvm9pfI4aQ6IPhyKOjo2X4RZbXVs5Bh6FL2adv6AH+4JR7r0bwOqD9pnle+tTWLWdx0J8hyXI/seWhgOyLqF8Y2Nppgp8Owyo5P3+vx4s40RxDAMUXWeFzW/I2l/ODTQYz4PHL3x8ttBn+95uNTVZqh06KBuM5SrObInmVcvhldLkQZTfIJwmWTEIXRq0K488yRO3RUjNi/HKox2mK27BMzrnnHJN2y4/FsSIwXFvpHiPNS4etoljX31HnIGi/xCgBD0j+yzvEfUsjPkgAWYouQJtGdJcjc5/3e/NW5cCq7vhUfstW9QUpwws4p4zcyLvXn7II51wL3O3nJ9NDv8N3lhZgVHYMrb0N3sz/KI/8UIw1eZUYOn6CKuuDGEGLII/15MSVTOG1Xu/ug4ri4EVUJGBt911OHcnynTMxggjjk2L052Q+Q0ZhCn7siAHs/eu1LGq/aFrG72qiUwNnF6zu37Cm7CM3hiPgbkiDxAb7ymKQRbFSmz955Cagw6ovQ4FbPvV8zz/6qXP88Qt0MGg66ygbO/TtLRosiliEBjBisCDK34vrBQI4eVJ5TCRVYOljyMi1oXYlnLE874HMMnVHryCKCNuyhGAXteUoFzPVMnF/BdDvF5iAoCixzP9KCqBnZl+lZqPryPxGJwWqRKhW4U8FQxivXzgbo7k1HLrLB3fMsXu+8P3DDIfqt6mpWRnxKdt3WwnfwKx0Pgux9G2AA2WsuLcr4PFGFfAPfTU2L2ameRTimSpraGXFihQa7qF0uWxR/HW04dCLCPMtu5aTmHehPuYQWQAMpICHKTRRYoZDcvPrFBJ8h3trA3fqEPCyz/kW/6LdoeKSn3EFQievZdEBX3X3YsA4v0jTl3qT86cFza9CLyxhLn1EGfRYEGZefkmLTuUxISAACgMB4I3gEmVLptXTs189zMFQPCtV0YKrEYLG6Ucm9ER69Dlu49v1uWGmfFMUafaqih446+rlvAppF4wz+VgrIzbh30O7v4Ir8qgFSwsZOlI3VNmIBnGgYt8FzIcmIIlJYHJjXWQmZXlD8NK1Qu3dCwNpefVg6uXYPwq8/mlKxEhV4Fcbl9F5+6v58SYrZIq03dsFGJBdF8bUty8DBMnFSN9uVNVtcfe7hKrdYxRN8aD8ejfrX5524HIi6kaM8rlsvvmHTHrHF5UQsx33lhpmyMrytSonh5DDVU7tx+C4g+2vfLvAltTZZ/T3FRB4IIcXrLaeglnazThp5Z3ZWpI2LX8sxigSZXxDXcrhmm4H+ImMfakA4KXLVExued/kyHlROyTx+uvqShsmZNpDz3kvH4KdMHPl+fe0+Aa302CuKcdJ9rbMvSvoTKc2igd4dzkQ01HMY2oBIU76UcUXdbhb2pHwC3s9IxBiGDUxcz0JodNN/XKdNfMLS6DGIWmoP+tjmcGp8bD8QHYrWzQK60Ra69E4vflEPGzLZA+5PnOFZtHgGL0e0WffZFslQFZzAecSa3f3fWR5Jub8+pC5rNEc1zPwUVKmQnjl/ShNwsl3oFtyM/8F8mFuRFdjT6Is0eJedPI2mU0hi86J1KS9zWXRzCHA2yNr1qw7Zu0JloVpgDsS/akv5qbxM0qISdyYUFUIhI89UvazuoKHJniKbFmu+j7k9vCFylQJ5Ly/Yz6hpsoXWbtSAJXqRtY2jubT9MvulTmxxP4CweSBRWsN8xj0BwffJ/BoYDSI6xR+xJ4MIHuXsI9SlLv8JVHIdhMZH2ncdkL3nVrboAj1FzhTR/QxcVxSXLri7BhMl+oU14173U93vowi/yo61tY/TPsL9ilt7OWTzKjskbFis9LjssY7K85qQSP9V2u5b6wSzZXDWgrUCrWXzFlf+hc2ziRjsl7gzXiTCvoAlWN5YWLz6ZmvkeCkDFEQp+AyWYyXY6NNRHZiD11lLMSo/+K6SBq9xh9xXfbHcd7E9KC47sW9HRDM6nKnXJwf+VlIKIVTqkvV0wtNG+29wPCguGfRpKlJ9ibWyp/Ss2jgl0NdvM2E72/btH/oQlpAKZpIMl5ycKB0HAK90+p3GDA2arRRTRXzIihxXdkPH8aYPIq3dPDDNAKuQo4QEFE2LTEswDM+kabdhBdQjqalV7i4wNFBst/fkpZurqUmHqUPUj8Cx5boLu89KNLu3hPqw+JAGQEJ2GDkAPhrJmbuCaQ4wnPWlBnTt7/s08hGeRpeS/ZkvgUQ/PBdvg9P4ME7B/ZTkq8TEuIsJv5oLUqOIqBwdb/lGcXrcnxj0G4wB2bT2jiM02EO363oXeyby+aJS0FGpGvNvCQqQ/rJ//AwLS3WL/BbR+5XUkuxMUkMjnAM1qVSMit7FEVGfbwqKEE6g0w8yFLsoCFOOz10fHxwT9CBBg8wTP+21JUnVBB5D5ORZ/daCV/xKlmEzToeS8cM3kqqRJ+B4OJm+0RDXrZEwsLJUIjGAEhY2fYf1INEXW4N0QVQwWwwin0pFBCANe1NZNKhQFTAwxotq8DxY25/8une3jZZZd00lqcidtpNjqGUxZgzJ7wdLvBrVgFn7Sw7Tw4G2l0ixE76wu6AF/e9acV2tSEDQl7nTZ+cDLtKPesT9YuwXnpz1XcCyUT4PI9vX5mZRtKvAmjDQ4i6bldtcJjFZntow4qUovNvHhKH9oFxIibJXzEAMw+g1HyJORlWo9pq3ll7pB84mULO2P6oygcmUwe9fGlTxDi+8EI0B7gUBmE8T+SF3e2py+v8vkxJgNy7xCdBZUFsg8rTLa3f6Addxezq0sd6xbej0J3ZWLeNBrCbUYrTo0uerZWyOW97PNp3xcguLyof0q8+TmDC43Lag+VbgPX4PhlIzZEVUZuAVzg0Emvds+w2A/jQ2+M4AiaDZL45012CYPveZTlDFUZmrixNURybgliaisnAiTyjk+RYF5pC6xiZ0yk4KbF74Gq8sWLHKlCe4kEV15loQTCXd6wIiRkrmVaIQbj4xKegw4CRKeMNYxprdcmgMrECYCf8UbzOpAeXML5D8F1NGIZ0m2crgpQQ5WdC3rT7+YyEfTRvHxVR9R7TOe+DLZs9FCnvrMFxaJJegr8yxw7zgULUA5G944KNtBAnUy6oxk/vth4uasjEY+/1al+YDh3X2emhWnVLoFIrja0BuhuyuS+/vKSiDuO7+VmDdR66MeiqOmH8uMgnQHtALUPRJ+itz0i4WK5ey2pa4jh4gOYmzUkpDK5zpaBAQ7GA+pjlIX+/rHmJwFgXW2WtuAfK0YapULn/Q/pzBXgyifNZqRvimLDFUkb/ZwwTBhHqlJoJhUbnvUsdT1qv8Bj1n7EYCtjxqKcmySiN013bQFC55H1oFDJQkmyg55MyY9BhvYEAkePtPKS+Gpu8A4drKd0kUe2a3G1XqV+svrRPx0PN+CX54FOxkdO9t/6LbYhGyJjz+4XKP3rc7HcB8PVZFE6ZmVGW0ZMLwDHq6I/44UochKWoOsl7DaWWKTU/pEgssPzS4zFDeMyawtdHlSUkQppc9TYuhIE2XWJctoWNldVuN3UDscDXPBnbFs26S6+NESe3FXlxeyz5AMlm+kt6uKz5Ahl4mL3V0idrj231Lxn1aUIHhfdYGus+BDBoDtz1gW96zmQlNeFUAIpaHNrc8d7EtP5c8Nu4b559vpcxlJ0H6ZaHCQkSZtk0/iL5BVuz36ruOaYg9uoTOYRoU1Gfe9YHk3M6jkkWgYmujyu44+l5qHSzOIvjq38hJwzqGhxvyJN1Pa7fiX0E8S03u1XDXjea1clha7Jas+7mUX8b987HzVKDucm9Oz8mMODEi2aAN75Rz46/X4yjakZ9YdSFxZ8bdcWpfGBY8e6FcNo+EgYN/uQHa7+PU6HBln+QKiHDR1/z6tPJukvAiUvVMsYjN8XQ7NDp28K/3OkF4Zf+xs9EEPnyY5YOvMqIlHgzoT6PuuDcrzMKxKlT9Tt5RGAY05EwFZU34gxX+vU7wtdHx+CZPXcipW932eHMhjs4W/rHVNtbqX4bo35w0HzqNNgAYFW6cwYnBv5LXVMPHeMCgMvlkGv2C810eY4OQWf2YLmLxOmMLZjdBwOOaFv1WfSgV8cqlfS5ZxEYXKTW9KuVEFTOMXBXXEn6cQb42uCX/hA2eIclqfm0DsQRK2Xz9Oe3D5F9xOBBTal5PcsiO6aBvINJy/cjayvqms91u6XhNSmBwdiNq79xRBOSt07RV6ipWCT0/VK5pE66biipMpV/MD0uJ7R2OjgdTkNESSOmtrLu+dSutxvu3Rn7ELR/XvaqrY8S+GOXe1GavL62fL1+UffGsnnpw9DdFF6tkDrZnqJ1HhoEXd/cuMkVVx+ifBChZD28NF8wPfhOG7c6N9SkbnWSaCiLUczT/VuHYHjWQWpcUTi86UQckS0OUYwLzho/aycCIUF9X3Hs+Raqx7q3w3xdP5+PdLynqZU285tKfPDhOy9mHeUQH5M1Y99vTdBCEeleJZFJtDeo+gA2xhAzAnmlWR2rvCJeI56LROWsq3tM2F9JkRiafTvJXyfHo9zkgRiNzXkB/0CcUBEs1dVFL+fmCbXeed9xtXwBxE2I3oQcyuLwNu0WLojbWIt1zhnxOwsxc3ML0vO1/5G+eBxo0PKu52XkAx3kAWalnS5FFBmH5e37HtzHIShBJ4ZLEdZiiXOTpp7BK0TmX+gnfnXW9iiMX8cAJu/3jLkgaZWYzNlJExOQV+2WTkUm3pn+OvuPUcQNFwJdq7evh+RdUq+9f3lMr44VDs37LAPGbeEuCJzZa2ltt+AIwAvVtQuTlC6CFZFoT5Wh2Q8BiXIdXK8chD4tdk33NsCwBT3o5+tJcGmrfnXMZYohMaxx7tPeoCehBJxp7xYsK0Qfjv11fR0J1p1+zuFABPv6w7eKt/Aer0/FcQw3T9PQ7tM1bceO3quaNoT4HUpmgM4kprrpQbD4iImMzR6pn3rcDJ1rJkz30Uv3NwMsV+SC21UjhOS+8vwRsHdKAu+KHCvVpx2B1ekZlRXIKzvl74bJss+BioA5w99XmUps0kBxsNeRU8lvcKAjBjkZRGmRlTrMSqI8SXVqkVFhviw/HVyDVk5zHP0EVZIUDFA7nSN0TrdUDQ8QV3zZWdOrooZuK8umhimfK055iTYEgeeDFw6zbQzbqqGgguVwN53Q+rqW6DNHah+fhUQlcV/fZMRRW5/jy9cTy1qKHnuAPU0EE+8BJVm7n92D6nOxj7gyG1NJNB6VXw1TGFWvnotogWnjxGm/c0R8aycxCP/umMRyMVIev6G2aw+1RtmH5HdhWuc18rkr1su2+VeWalGlHtEouDDmQT90MUyB40fQV4XY4vlhmqjgi0I4ujLvvQVEbuUtt5ELIbAVThaWujxXEhMTxd94wVS8QhOo00SM6zhiwM0CcCPS9AturUqFoXK4BOjDNvHUMC3rR/GcQmMJCriy9IO9E2e7hYfX1BR2sbTEx+U2aev08Yiik/7YJDsBx2N98q5sL5tN3NBX0nVD8+mb87NI/YZ7sjM5sn/E3C7Vl6IdkGWezo48q9OScFf+9tBD+uLKUBog35B1tRhp2Hq7fzMsX2vL0j4nTqWJBU9sVFH0TaPusu1R6u9FYezyCzd4Dp+3UI1fN0+VBgJ3VwvVuokmmg5UWptqpVq6X7Pj4JZQyXT8ymU6ggA9+nhvr8zuaIodu6vJNuxKVDqaP6hsDeq1lY4whw5XlGJfw43i8jqo6tKamiIucxF/3FBueB3P8VBiN3U9IoUWsiHOOYeWg0GXNDh4TVvE4Z6CLTHzOZNI7UYvaIc72dD9eIkZyfmyjAxJN7+Wobwe2xufTa1TZ8BL9gb8zU2opQGWdSrAlYgf7Xprl5orQytaWXoOaTvGOACpZmzzEN3LkXWdWcbh1+AhD7e6xDFxhbfJT9MHt6S6LoY4Lurg61Yo1NYEt3af+fmoNSOTdvm0Li4n7cRiaMcRhAlawEvqWPfsro/pkMkn1n6sAdnq67tCNYZtcnV6O+/NBKpm5ihdVIM4y446XFOFeCWtuf5N96WMGKTAD++sapY1Ixl/eNVvL4XC8caelletuInjoDzYilSFnx9nh7qHyDgkxOjYfSfkUqOyippe5Fo4U9sxMGQxrshsImMWo4vig/DkHlHQ+7HRhtYp3IXt93qki0ivpyjEd9VojodtPp9731UjWlLwLPY6UGv/a7bRPGH7+bZ26Jib6Tum2cJ0Q6w55lDuYhousau5lLt8/LJoSPUzPqjXvBwb01WrwwbUqc9k/wbm7gb0L69JgGlkNNM0mSEM5SphsvxRKolt/R1mF32C1/oKJHlmTbNHcm8MQ+iVZLuDOxMBCPyFEbSh1vBHK62itoezV69xCeoImlmA8jnQL9iXGMLs2imOvA+mrBeWbQme1wyAvobNok3Jz0kgXZJgPzi3fBVgIFOmMmtAW83zZwSLz/aJZapzCbrtcUmjNnM+78w/ClXcZ5MUF47pzSh6Ze6qzOqn8GsObBcZL2QBTqcsNEC9L/YiLE9qGlppzK+XA1eiwL9ThQ9NO3hsJxDq7d24TGZMvo/Lsx7XswR+E+Zr7GpQ+8AgaZOLgH61kvqv9wmU6xF4Ec1eiMamaNHxVJtHpAYm0R5ETGjPtkHQxHtrNXWq0G1ZONV4EsN3/llycq9MRM2Rg4U8lGoFfrSlFxsEHxJteKBtyOiHOhxXgm2zJBsmBzvdF6QWYGemUdJJutn+1xP6gdYdRbAw9HsxhDjv5eLvYh07Wh7g1oHhaZMP/JBT5C133J97ogld4gSBasrbJ+GrdkdwE19nvjR6k68hCofDCRNy8xrukpSDfUYuoFWJC961mWhd5WtQUohRhuG6pGkGOJ9Z3SLjJnMva+IOnmsi2V5q4WN+Yvyr6IRh1F+WG0vMEvR3jI0d8Ll8CsA1Rlv0X8rZf61wG6i7NzppZSS4LsA3TI1MfMFsW0rRLuqFQglw1n0kZRI+EvIyk8hCaoYLxfnjydKmdMQwJOVkhLyTtRySl50JnDzvX+WMiaGHz3tyG05ZFa8lrlzpR1exhEufpRW+ieqIIFiMUvbr3SsIoQWzje5XsLwowwORYLx7HJZBk3C6c2677lJOwhEIY7bEbjN9pNE+SX7PV7uSMfl1Xqo8BPuSPNBWWy4DXHoOy/BVttIw2ZMkkSlx+/W14aJCh/Qo8sPNAE6r0+ecVXLGQoHYeA8n4IK3x0k134ntwayxvJoQ+XwWJpzmonTVLnpPyJJKGjUgUAIbiiLr1nuQ50p6mCqrLnz8EbPiMV4jolmPKrqQFeNQRtfZjCrTBtrtX1lCsfUbc+mUrdSly3LKD6e8kFa8AJDTbMGqg22HrNEowIjtE56Uy04QVUl6w6+8Ky1Z8ohyiztbh77ZitnxmSIGdoZh0rb07jKQMyaQHeKXAa/58NiA9I6SpEHvEIwmijhVEQjF0rJqhUIF4bvCnUEy8bjiC7SzVpIEqzufG9JaVoV0+oxtdm7+BaiYoFJQEB/ph7zPG1kM6k4KTM/fNgj8YpnKPcbIa22z2hptqHCY8XYB1D6fBGCCSPLHjk466tR1ybiXzfzCivtvbzJYDJBoLfyFKsaM2b4QFQjrM3Qt0t3rG7VLnSzutazQX9FV1MJs/bRCTt65j1KOK/bY25uh0txQJrex3f5SIL7KtG/rAj6MGaTEEQs7j9DNrQ2JXLs2S7k8O+5I2ucO2NoiSdqCgXRQwAcICNh+AWvZZIvy4Z1pdDRjyqsv3oRb3l9Yv7zw0j/NFsjSWjOfo0b6TXRJtEwy41dpFopD1joPimEylzWp9o6pKCacrZ0HWrt5o1zOxsYHQgQwin48cydnFXGaIJCzrzZaKoiJep1I3DAGtTKBLFYaw/q4PawwI3VYRtRBKsx8iqFSHFTBSTqk6nm+PX3zi/RZs53tNMEQUzEtq1idTrCsa96+iy37RNcnR0YTebFV3n71X/7NVrdpeEVWfAQhFCQdx3i1sItogIpy+8X15ZmR325G4xtbSg+QyVpxNrD9UaRDVr7QRRDv93mf9qc8bBjtvMdcASWzWH2VDKLI931a1x1YKqkSS1teYs+KBLCcmpiIBSaAXkwuHZ1gi21ReDOaN+jclmJaMKf5wxO1IwXjabg+xIPGPrC1B5/DqoNYcrFcE0PMQvI1uZpD3/E6zR8erhX9mN5oV8Q5M1R1tE/wXfr352T6upzCLWYKP73kmQEd9b+hqaCb+ebO+yXHDAF4ZM1FhyaRz3zSUBKOmcTfjhYe4euPGmXLRaKnhCB9VRuc0wVvxdwZe48lDKqSfe37fc0e8+Fqtp9aIElS3+Vc/lKVS4R2VI2bl2WbquVvd5GTJI/GTpbV5Os9zcdhb164FocGo54w02XZ2wCjlIINFAlCJPtewJk31goYH5hPn8Ru/e+cMTVpN0mEE6dSJ8bo2Ce0M8hAKUP0mEvlp/RQb/wKsUud1D5qBLpoU5BELwg0rKEZX0LpcPFyhh9hNuGffBlr/GW9Moa0xGPyUWAgy0k/2KOqFK8v3cBQSPvbo1WhGC9CS6A1nhM3DSbjrh9JHNH37I1Ix75G6hpL1IR8ppGJl3iEsP2++d47VECcpiRXGAHhfnEEXW3i+K/StdicuO4ZQo+sYwptg0usfUe6rhPpptMgyLvdO9FYxi8wc0j8wOh+EOSFe6f+JQXu2Nq9XU4JzCrh9AsqaK5665IZfdg0LdMqPeGi/SFhRx1utCXqafjiLdHmvDqWH/VFa1/23g0/COWx7oIM0vAjRfBpo4YCF0+wC28GPSeGD+NA6ygWtIsgTf2mD0YvYQRB1o02mAPqLKDHAhc+J4w5JWZetQ/xoFtwaGaY1xjPOtvbhflBwlEhzXLonV/7du6zeJflWOZY4lBsW2GHlx2BU9mU/3o7f2ZUrO7p9YqoutLeLdaS+/dEJ+Bd2sIrqttE3ZQMAchOWaG5fXhH6exaxFFZuM8vZmZv4Bd+Zq7JX1zYMwW1JwGHy6/DMp0HobpgSG6Ph9WDyo5qmcFmBlsI5xVVqM21l2zfvA/MeBQvs1yFYbg49ywnTiz7yLIIQuup1dHGvjFQpx1/GYQJuc4wshBcUDtD+iWUltHFspMj8RzqFlLy9cq/xLbIav0R0Mz5KvKarhREO2x2CYPuiqZdu8uNEWpN2N25aubYJ1Ms3TvyqOf+7n2u4yVNppJ0AHHzPP/y+BtWzf1UJsCLzk/FhQpVadP7ugpXfneL2yCY0lwj47c8nolqJnJj0TQY5PtCVhZ9pdNZtxG/JHVFSXZXQrMDjvOHZ8D77xhuIfvvlR8mbUkC+Edv1iD4pbHOviAChfXljLd8OSwVKZsYWePaCKqcR9feHbpdx8UkRiS+0AOth+vDeDSFZhrqycJFNOrG5G9qM0LXORcdTSPauSep8Jl9fhmHY/ILC0PGVmYvyhJx06eAXraXvQb7ku+JkbQxlN6H7Ae6x3xh1Rqj/ox5KSRTrsaYz6Wb1qQXgvmeBxT45cIYAPZFnlB24r1KJhn6zj7eZ3NkyNS4Wl5M94Wbt/mdGQBw1zwXTR8ELLuZU+dfh/g6pjhpHZdkzUkoT5JysJPfIO3ArGquvk30LpQ9HB/t0R43d2fUYw4aK9cYXLzjnmRC0/4QBiGA4MXjGwtTbp68V9e4yrtwc6I4SXBmMdg7Jxt9L7Kq7b2udycnv7xAa26stp4kLRr73uFwHeKNNsRXwqRJU6/A7RucOhgkdYKCruutdSo/0sc0ExDPVUxkkqwwIj8EKlUMHzkyfaWPAmJTHcResGNFZK1psNqX5LaM9H1K+3BnLBBg5eHoY0cXmJZ86txTPphL23P27dJjxKIAiaAUlWtOumvPp9gCm1DBJKNNwZJKis/PY0cn1/ThpvJNU1cODGZ7EtGZA+Euaje3pQybrCuXnH0d77hZpzBQizvw3xP83c2PpZmdjNYcHDpQ9tisLkHNurNtEhFMM1p8NeTDdKXGRyOREct83+8BtuBtnO3eh93vBa/hGZGPdp1mouNvjblBF8j96fKZx7/dk4h5aTQg/SYjvHrKCG71fY+DzmowxAhrgvd7rH0Qop8+c+J22jd6d+h9t5q8Qd4KnSck6ng8cd06nqWq+99OfszmVunMiID5NHgGvuBn1Dgs2db2+TFW0rMKJJUXmLk5W7bWLDrVJPHFoZF0DQI4XvqCHGkw9oD4lAWtSbsVaK0zYJ4NYk7yL+98MWGmmyv5nWaHDBORBA+zoJDcnIG/E89rQrRRehRpX1Ozeo75EapawkoGn+Hic68p7QR9fozei6402epe8pebutSHMsNso9wVXhnLBmwhmmxphCje2Qx+KIMj91fLPOo5mhf9oN0q6vToVQq7KzXTIr/bqSkn6fDtuczE2T/pSdd8qzcqzUJaegpXTGrNphyNX0zDr3oXoTu9cGujU4pvYWxmqdR0cbnvZjFyVTKKi4kUyYjgDkkTVHcec/adJIEjfVWsGUIswoqgQONwH6us1OCRdke1L1zeCaPXedwnO4xupLf6ikDGPDAr/26EZI9CTqch7Znh06XLRiZeG9z7GWXTw2olSde08RsAa2Sm/mCyQn95dccoM9jESiWIwQdsJN/Lupno4Q98L2xOuctpMrBG/w7ZKiFyiQwfLojFmIZDz6ocdOBLmMKnZ0ZMwLfRRhGkfBLvQ6Ja6A7D9fKzLZaNMD4WmorlZRGlaRPygYSNF5CJQ7uKvbtR1Z0982UjgvYpxtkplQzsQQpx9aKYGErm/huU2LlStSP8SvG6UQgSGQNnSlrnKrtq34PpbA6/y/UOLGe4Mi2OP4nUJL5fAcP/NciP5QMNeAHTclh8lkqtWv1Ve+rrih+56NCFzKZsknrrVgmfD0mBiFiwj2lApMW2lKRXiahxsf+p58Dy3HpK7G4rznIMrBRmOKTSCGNM66NnkvC7W/K0LZEdhe7ynptGtAPdHFocn9/Bowh4tu7tWI5saRA9lp8g+MX5ZavR9DJUwPQqF338QjRfprp3uDKxpIUn47qTSMscfcRlZPnz7otrgqK0tM8WOw0b2U60N4xVt0uLzQChUFZH0dNcf3WIhAg+81lNQ5larT8tXpA/ziWvgZIPFzloXuC1d6IRcnip8jRDDy/xpq/Nz8MyRphmrBnYaFGj96rskG85EeExUAk/Jt0Wcw3tvobLqWpqsyVoiYMVm3G5itGvaEr0fL8ag9SvJDa53GUOZe6JCL9tfHJWighF4eSxoxtW4dT6rSDwbilQmeHFjkSXnoHvnNrW2YZQtWXx8aFGWnxPbWdKrbzZyMMqVuFItIOXCQ0VC59sMynblpU2/U2d4IMdmjMkoFxHOV4S6OQFh5SmvHtbrOlb1TRroe7ISRUW/zmHvq+afpEi3GFaVZNJo1H0yE+Kwu5wa9+2X/DBynZffJxyxx4Hg2xcD0mXCS0QXhvlpLru9YRn2wpKTtZCW//4UGdwRJSUoipO2ojR/rOEGqHMCmNAH37gnrWeXY8d59XQ3UlKyP12F2BYw+iq6WLYKOB5pJ/2g3zVkOSoHdE1qqZor4XQJLd42laFZfTfzIgY/fwdOkKr5Qf3CAgmoUbjB8d+D8AG6WOYiWG3Nxf1/a7yD2LAYhWHAvLQGElKWfmBRETqUDydPCh6x2daGOix0rjNBsin6bdvSH/s7uGWSTwyHXe4Pmt9JLA3TCmvy7vdtpvkh5dEILI1dC6qNzgMHpvHSNbbl7Tjh1Q5d7A+Vh6MABsevyAlL+51sqVmS8o8jqtae0w8vlHr4I23cFU106owow6xOnYWX1HwRdcPiKaJEyF4FiLZf7jhWTqMjazw0XG93VO1nL1t7tHY6rpU0fsjWXYTlQiq+IoGThzYqnHdvfkQvRUJu2VJ32gCYW+a3XsPehpwbLN915t1j29LrDHg/aYFl5OGupVBXVKR4Ayu4ZUYOKZepbGBU+zs7OuWSuNK4hrR54P1i3Zi4szFACTB7vGJmltSGBfh4ugKc/kVfW9GhY867d1t7tFlF2mebVy4NKeeFlRj/awvZaNnguAydfczsD9yDMH0/f7CUfDNUfj3bHma+qol/1NJNSSXzOFi610gg8TQnzfVT9EVDMiYaBnzouOPUfVxXfm/pL3ZGxpNihPJ8bF+bx0lHEJvsMBAiwQHMljZ30yn1trWSvkeTleGUOxqzgF9beT8oonlncnXUU91C8CmaCuvJiGdTL4IRwB3cjFNX3kVBm9hO+mXAet+BDzcXqTmG+F+6HvHRF1etPzIB1CI6fZ9H+pFRc4bu9VloaUGhOIX9fDbNKIyh6htgqAym23sVjuyds0+rwR7qbjLFjB7skdMcKv7ppBulzzJwz++N2YieSZ1/o0mkVCLtDnPB1v4EAL0WzKEQmr7wLpdxQjgR9Ld9NCClGTilUCj8nTvX2xayZSGJP7it7WDef5j37aHC3PzLori31D+94f9N+ThltAYz1m/gjsIAv9xa8/mNTv/7hYq/BvKdaeUDV22ztfzyJ+//W84if8N//Nj15/3CIr6GwhrB/eO6ruWf9yn6b/RxB93y6wqyj/fif35ZLz8cV3895eAzeY/Xg0s+JPL2vavnvz+jUDV94/PBBzGIb01Fn1xfquqSpC1/2+gWufv28Ttlv3x3B83lvVq/7yRfYvM+fNymNdyKIY+boV/v8vOw9Z/M/Ae6Ln692fUYRifm/Bzs87W9dEmN2gk3tbhuVWuXfvnb/OhX//8JUw+18s6D03m/zkuz9iy2VmtAXjB3zDkr+vwd03Tf13z5589+F1cf3dhZnP1DFg2/3Wvfwbvj+ZQCv/rBmjvv0F/gxDqrzv/3uLv6vr7q39s838oKcuwzWn2n03Cn7O7xnORrf/Zg9gfD4IZ+U8lb87aeK327D/0418Jz58fNYfq6fS/SywJQf8grvQ/SOAfff3zc/8ghP+9I/8P5BL/n8vlCF7+6w7OPn+eiXssU+6vH/+GP89wv/v47/o/3gNx2v/8LPy7if3rZ0kM/xcNw/9w789O/NPD/7FlHGALgJAqfZZJnDxm7bBUazX0z/dJhnUduv+4Qv56lmmrAjyzgpXFxsuYpUBi8uoE64+N//x1+gghkEz2N0jZLOzZH2MF2lrKeARj2J3FHI/l39JqSQeY/tsxzM2yxn/0gs2rtuWGdph/o41CEA49pv9fa/Ov3/RDn/2LBfy/AzYJCPobKHz293IIQzD0T6AJ49A/QyaMQP+nMJP4J9nk2ir7cyH8vYQ+33T9j9P4T/Pzj/PaVd/vH5iaLdUdJ7+moL8m8r9LOxAfAKPLn2P+f2gKcBz/F1OA/fMUEP9iBtD/UxOAYP+vUFr/rKT+cQr+Ts38ByUD/0/0S7rN+69n8P8jvfLXCvif6pW/Wvw/rlcQgvwb8B/+vTzhGPG3/5pyYeY5vv7usT+XxP/wdQiK/8dXYSjxD8L3R5P/q6pLfTWqa+kqmegadt+hx3y7//ZfEM7/rXwJ+9d86d9FD0PRvxe+R0VB9P9dgvNfFkTi/5Ic4sg/SAYN/Zck8H9BRiBE7z0dVBs6v+XBwJLU4/9XZOTfGTOBEH/HmP/63X+dLf+n0oWT2D/yZwj9/3fxgoh/oM//d8Xrr9f9v1pBYv9RjmjsH1CKxLD/FTH6r+nOvxeVfzmE6P8tWUHRfzC1cPgfGvlD/v9JWP5FU/CjXREc+us/9B+kEP8votz/t3r2n78DAf3vVbT/6bL/z6T8L1On6mIwj+zvJ/OX1QT9KxPqf2qV/WtT7D+aRY/BhKI0nef/tAyIfzCt/gcG1J8d5r/xGv8byvxxiYhjXzwmZPVhDfuAFKkYgONKd7xS8IrnX+B/Ru45Jnx+8gaUfb4MI8RK0wrWx8aQ7aZZ/Gx92OtrEOiBmpvUXXM3b8R63zvEOVZk2YcvSK6sEWxhffNqxoxq6BhD6nJhL1fB/jrWMDCaI8oFZWsvV95BjKz4IXtZJFECLnAYg3pSp/sZNY35s6HkhGzzhKcEtfWZ/oHJGu57NHhAaCPhFmeLRSw0ttDOceCFg5EK6rl+Fwy4xw4mGzIqE/LgT8xzFscfPnNIzNExz4//nz77ukdNbAReS5RjynfUWxGE9xIs1kWpj3Wqhl2llNHh8wYBfUVT1mxBqYgQQi1nHfP7V2UL5CpJ2zuAwF4FGrw9lJXfXMUeWhvGz1/chHF9ICZYSO0Kvo/fJVZJvunC41OnQoXxrC8OFHqv58HzOl+Ui9bpfoWb6jf9zEKBZAURcepMv7nIs6y1cZY2fIfW1vKK74V20Y2c3FPSNF89/eFGsj/ylGI9gxB2rmxEkHeohyMue0Ul3Y4nhRPB94qod4a/4tz6pGNHjl2fKWRdq/edwWvCu/hfI8Zy/XsYSFP/ftE8daX7LaIq+QphuFApdu9mpPjMWL4qkCoUIBMrA0JTdoghPi/FWT6KhLSwaH3m1eVUZuRaE6t9zoVyCIQzF7MIRlbF5RD16PsWimVkwuYNwrItHSQJCLZzVE4Q7tguZDtnVEXQqxJ3LZLyBSUIhSaGXxc4u3sZaZUU5feNyugxiF+O7HPXJ86VgrjI6BUGUwa2HT9zlBHy1ULrxL/z9eULzstgmvdwgAIi7Aca2NJRq805bJbjhUpMRjrjMyH8IGwJGzCX7SNptWeHpOZfPaD7GiFWjxhI8jbhyU89m5WfsV2LrjhuXu/UWDNBaIj8aLPzs4GjVgHpFiAy3+bAJgJGN0zciDKT2rQhvkCiyhGBVLy9eBGcVxLvvu+gMQvHvms0ttQYttPfVAACDVgzVzrzo0OlLreatSmek6wdAYsZTZpzz76QLdPCr+gI48u1Dq2gKrbcVpz1OHpiXeMQTdNAyS2ASQfuq3hMP4XGgDUdCi5NpXF8/YpwXFaxnyi34bYSQmvZ9dyXRKu8zh/w0Xy0M4oqlDyNufi1oRS3EuqSL/pXhN+9frk1mfhm2jlhzYXMsxJTSnlj2dEpGYiEEkdup3sU1dzFj+lY2o2FsLMtW4LzVbiWJukKE0f7K0INRtiyJgqAa44NvZ6/UxDuiHXt+Sx+5lnXyqZEEYL1Tb8mOeXClDiU1KscJWODx6zBk1XmIuIorIppR0nmdAbyuMHnqNUT3zkHvVJwqED2iVd6HuxvjlXJT4OjoSn42N3vG0IrmFLlZJWgejHZCPuVlrY6ntEElEAD8/tyAgFMG5/e9KhX4ED3ImDqLD7f3GWYcILJkSeSPBHq2kDR4hNH5fpiJkztGopvwwSDlqSTqUKSQkq/uPHiTeWX8a3o1XPx8rih6YJzpd9YTofTl6i+0wSIi3YggUrGtnOqnXSM5kb49Xmj39mJF5asgpZSS80jD0LhFr2eE3AY8Szf1W/OxTC8h4NIUOPzRxApqlAnAZNRtYou9kEQYe5vgdfZ5lDDvauyBUefvucF8cvTjfNu2O1Lb/2+ZS35SkMZon6YGX9t4oeukMRdjQVEOJjOSJ3R/S78gfFS08QNgrB6pINuENr/9fz81Avph9bL18LQP5OZOWPt38fU7pXmV1f2tUYshkneiagj+LAFuqheWaRxIa8CVnAUWPKsV56c7wsv7c28jNOR6ykXaT0I9ukLoSBIZMPDW5/C6/wMLtg4/JggIRCLOUsRFfWSNDXTln5wZgvIpVLKTH8M5aY0dnE0THHL+2etsey15g16J50t77CnDJ+zk5j46frvPNk5/BIq4Va7xGmpfReRyjUGrTKwl8mzav0HlmC+5u+wnHIZvzHJ4RUBdWKSVjfZ63WJfZIxYZEI+xILqBHFKPTrT8IYmGdJUdj9+gPWU1GBk59fxsTU4sHgHP1SZWTwkl1Q1tmo9UvXnU99DU4Uvx5VlL1TC2uWxftNXFxv63tncl5mVEy1BAS+YSi/EG0gIfVQJW9hxKtdFG2rpzg0QIBNv1tqkR9NiS6UE1Un6rAhG54CkCIynUcXU4df5gueQn65SQN/RE4MAym7xLNQNdtFcVcCo96C+GnJLfjdg2ZYiQEY1legA43kG9uvuvtHmWwNfLQImC91I9kt8i/Ji11GCKkDN5XA/SDMilefkNmJ2FOqqkUXyIluhg2VAz4BbrrO1CfMGxNSydrAtJcv32RwMqTOmwYpAdA3N6zhLW7vQ7RUSpZG2nM6tHlTbMEsbHiHIJXR7E2jcXAQnphlmJIfkdcKQ3TE33G0W0CqKP+dpk9MVmLUzPNG2nlaqd8PRwGtuL9WyKcV/wCgxi08kUVdYU281MyHfrxLmcdYrkCgB3v5jDG42NuG9/i1/bx6hLrgtDa+sRwKn1vmI5tS+D2/fYEgzPcQQtj+EJ52yx4GTlQQl0kpeZsQuWULQhVeiDpmN8KI0KsCx7HY/C0Z55Hgn7PRwSAwIDw/dSQvr3rnd/rhPs4mBKUKxVenqJCYIm9pWZne8e0HSyfsWttF5XCz5Q/xq2zgTIHElUcfgMhWOvS29ytFXzi6xj2TX45gvCoTESTh4KW0DcCW7ik/ss9gW1kHELYjlydsqxa/8OJBhEOdhTKOwTGqeKliuQYC4fbB626C7Hx4xh0g1/IrU7CUJzj/1L4p+I+RHsAhGheuCdqUxX1PosG6r317vTcqXvMBhR17n5s14ns/FW9OCw6Tsqz98E20+fXPA8vqAcLgQdmNUwgNr/kmc+kCTsxA5l/ilw5koea9z1ZbEvbQhQqM07NmeE2/i1cDcoiKP+mTla36A9KwuJWFr8YVmBBopmIin/e0/iJux1+kQokwO6wr/UcHsZ89gz04JmmnaNKmogQEY95cWlaPfuiRAN/vZ1ZjoV8SoVu/ovlDLHENXaVyowOwX7Z+2SBN6guaxujgKZTkUiV6uPI7WgpbZcghfOy1l5Mc9mFdPcxehwIYx9kfAMFIiqP4i2kvAl2sxhZltpxZBHM6kQyi2zWJnGd3xhi/jhp6ofHTn7zYnznNO1O7MRL9nTtDc7weRojUyxkQvS5qvBeqge44AxPFqqG3RQgnwvvp3jiC3GrCdBlsYSZidXJ6DGETxVM8AFYRJdqa7wHaRZaOKW2R0zUs5oBY1tgNah6z1EiV79fxhfhPPUCXu4ct5zonXnzOQqK8oz7tv77jjgDlj1IyxdM8iOpQpoIL+eHwgL50O9l5KQIjb+aHSOD+frV0zxcofB/yiw84R5B81vqtyHQB8T1MysstoYQ3olQYt6fdUtYayK7zKKG3Sb89PniAdB6QWveJSzNCiaeYnKuAWJRe8GPhFaiLKhqHTOHtROCkO2PUOLTbzKWXyjqV8FAeuXvrARzdh5cULDbafKUOG+dZHXUk4aNrKWT8uvgaJ5SQTM7/h6frWpYUibFfs+9484j3vrBvQGEL7+Hrl7w9uxEzMRHdNUWRKR2do5RS7eMNmtoaODRxNygI3pmF+8UWeHX7pRWYgnvESCciUFPS3918JdVUysrQM95ZxT23hDElkwrfASjV/mtM0ciNyfWQOKmJBupUvTyia4C2zL+VaIgHsk9gSVZq/TfZc8q9DvIeiXvw47Ux3pWcNfF/EFJls84Yl4/qxrVqNWPxaO2CLMNJqICZxscfYj7O36BxgaE9xJhtJsylfHa2vAe3j7K/tZx9v9+N0JqMEPj9uNkL9qvtKhQri4cpPgB/+vzbJXS9vTkmPpUffVm6dQ7WET+e+pO2LRJDWzh0JwbxS60cbDJWromZVg2B1ck5wBoxZ3r4l9JUKf68q+NsRl/1Vjtusx6JQ0p6YQVQdHeCpQ0hcy0C9LABVt7lszMe8VAJWCnYS/aHqRMQUaMyuMQW18EPpCZRn1llZ4H3Jn8znT6njBkoEfJYTCAYu3kWVq9Cq5WQ7bm/kovfJ36uy/XaIQ+V6kUMu9Ns3PTmX8aYJANNVdb354dk3Di0Ky3VXfOrZTH110xiGLbkgF2Uxp7hTrEtbgB8JiPiv+4fMeTHL4bsL0Q6DXUyWx1ef44TXHwhz7Ft4ABR0poaqKOyi0dlsUILrJtFGBaARev1WXoTq61fvvYNj04e4QZTwO/ENeBrA5PdovoBeGloYQWvklHHQs2aE/Y3L7Qb39jvvxj2JZkRiP6uy66MTg3ZYFpdgfGyAs1i4k0pJRNCQEOqJV9W/ItM8rsDr4SqdRDOwJ3kLMXoimcOhphJhxJwBlk+kYdi+nVGJ+Mxa/kXZtD8DY9eCUFqOtensLLMWfseB/4K2dTlfZNPWHXKfGl/uYcE1J9HlHUCMHpXILYWgKDVelXhaY+y32THkr6AxtjKy3OK+7VvEKesP/Itrhy6ek4odqdWvB6uX2xLoBSMQX0HA6J/SuApVhJRIR29vsQDXs12a8nEetZW12LobwxtNTgny1/+so58ZYHd2h2QXCPFWExQSdDW5h8upyn2jQGPNNjS8+K0/fdRnik+f0O/JIVtGVFZ2zxid8uo/4b9haeg/zVY89oW64f/G7/JI7wajDeiEsSvGnRIidiuWvvKihUbK+F8isM5MNKqutPXSV7F8WGMlNf+RjxxOECglzVe4cErDMXPwJMlJ/+Ab4tHtK5Edfa4Hm5ebGQV9i+QAv2Cn3zxTGocSS1P2xPWziB7UJcOw7w/tllyFq6xlEmuwJvh1v+8LFB0/2ZWAaRPw8vS27urQqStOzr7g4Tr1BS0t9lMRnzmqNZOnwPv1AC2EznwwBs8U1XjUjxfbiytmCyDHBAVxKHiMh86qZ30+goHe8Ru94Do4o7D/WdFa5OHiDsKy/j8XgqBzr7xjQupmj5kVbwPXrPotaUy+OpKBtwtwW7xV+LlGTEa/POac99qEYufwPStDoBQA8oHqd+YCegaCxxibthDqUpJuFfOCJX6t66w5NDok3ufNX7449fuqXq0n7Mz9L6v23TnozNcS/SNYqCLpX+Na+GQDqixo7wC7lCU17Z4Jzjw+MxNBl1FTIkYeqUBrMtMdd7M7pjyIOcx2trnWtur+hqRdqoDi5v/tV6BwLaQT2F8rEwTaEEdvVr6jdqgqgTnV/MOgP5a6jwQEVS0WDMMNUqvFLHLUqiLuwnbi1T8Sd0MWuAabpCDfBjGl3RqwfHzqcQxelmgLRs8KU1KDCXlBjGUki7Kl+gPzZEXQx7bScp2cTxWkHCLUQlpB1AlwC/K5ZO9eG+cCnOyPHOs8S+Tkl96oaWCfTALoHDOzddh0wLAoZdZAYWifBB3aPDpm1CDkZRtit499XP/rrd3GZBNp0npZSEr962WwiRrlylj4C1BE1g8uMeALWHBtqWlj7y0zsWqUxlmQAxOYgRMHRn1tBa+KeQ1oxuw4J8dz9RUPEPWr7TcT38n3yPT+kZ0ns1UhPqM9SV9E5CSDarAah/gx7y/0HOxYptcmjHP9nSVjMrGhavIHhLJYHbPmR9Hgat8j77YSBfklEjgG1rPAtEWBCb4hXLQCv/FF/tMWib/MZ2iXJrLvHrp5H2ZXZoFEsfmXBuee66J83VmPSvWiSC4L6DPAJJ/YhQRXuWDpQtKno5pRa1Qi8PsMVcYqODVMda6yYUim85kjOKkEqqWP0BdqKKOUH/gbWaTcxsy0nGiIbb9m/xTZthDUBCGufWvCJ6PpHAMJjU2V4lTEMf3rk3Fx25fklP31pl+GVyoatcuqocA8RmHjGC5RDoCyKVz49/jXmF7DROGZb9VueqH5h4YbPyv2PjTFKAFfNbdvaAGPVScD7b0Lgjob25vvTSe407lfTpsb1kJw3SS1xt4aeYXo3w3xEBK80vjff1Qu7+mHvAX0kZZjPhptTocLA2QzAUL/zFi/v1HcRP7tMNcxSjoApHYhro6z3nSOiGy6HcHLbUg/v/P5z+D7uJkCAkIew0TtNRNuSDERlTBMr8PFecd63f7XD/j/YX7LYXP+ipA+fnhv2OlXyL+qSzAdETle09Y6BPPEyWhcEpvtDhxjfkwaZ+WaNrEshG+HKg5LYtJKv4DgutvmLyX8U9UO9i0FL8Y22kVIXvClJpRUac1N9udeBgIzKsfVv1M7+bTq0SRUA2Ij23QN/3GRA0/88cmgjjT7+ThQmHCvVSZkxfVsosiCFZd2mSNViaiksT9GJ0VgjQI7XgmLrw/UgV2tmXR/BE6qb9aJHCiiX7p11iAhlTLjbIjPTB8sFiJTzC7duj6ayDouIBI3LWGGlvhPBypX1cpjBiiTPOPwJIodxHMrVDQ9ArebiIhv2cWIkWJs/EXX2zTccx86E8DbmHITNsCKIn3hJYJl3H4qkVxXbD1aDjVyW+3On60bXRiFKAFlzzru/J/BBscNHTR/VdvvpqcDcjMq0q40Lc7FP424+ZcgFmgV2cq5aPAXS6vsIslk0L5bsCG5s9PN3ErFfJ3ZfPCa0u49xcrWTceOctBOswDq5haJCQPcaX57nUmPAnIYIIeTJycHX9Ic8nAvhg82KzETh2Inu+v3P/GZv2pALLVZMy6viZDHG2w/znAUD/GK+xaZR/wQASRNV6IdWLNdMg4ZYITDw8bU8gClHN+pTfuhJ2Tw+A0go2QSmE9ErDkaAeO/3pW12M3QsWX4nsmiVqEjJnusXGR9pT0Bk07JS2azyNM4PbBQcGEPfwYnFPEJ4eRQcO9nW+8qwhdD2yDEP1NdfMX64PHtuYb6OcbFpwScxnxOfcWSd28RMmgmlezwTUee2TgPjxelc5B0Yf02or7kmtAgc4o29qCbFf73RdeA1GTRSjlPuUBdQCtYRsHBmcenLPD00/Q1q/GkVgIE4klucbsTxwPobATsK7s/4Kv2NAkWpHL/VUxD3eFcdm550dToBn4SMGBBy11GHQL2It3LFElR++JZvnoL+XHWU0HjVhBhNw26D5/pf5VkfEHqY3wcmosCL3pgoMDNBCV8HevF/M79ndwBZYogrtl5jRuoqj+hqYHtVbOXYIjGCJSEwAQ3zcye9aFeDx2rgQTp7we1oc1KQCKhbTCtETSMHVuFoOfrIp9gSPfa/E3l8MrRh1cstwaMR7Pw9MqY6OdKIHwOZVxsNklaypaGnbakwZmbLEoy8cRa/LLB4HFiLcoVFxGcJN+u3U2kJHE87sxY5pbEo2CS6mpQFH+59/g4r4mtcdjU7O3gm2O+0bpAyPw29bCEvVyW078U84iujwiBxZj+RukEfe/8Pvi5boShSw0n/PTOqLhxadvjxge5QWdAxrXpqFGximT185n8AZO94DQslOrgXuDRtX7RyVUcW8MbW9pOv6lChPaDWLCgO0OBl2vhx1dh/q7cwqBA81Ooe9Xfvfq95wN4SqHY9YH526eWcsKuNpe/H7c0vmhYX+LicZ5MiBUGlrQ9t+UX33rBmAfQI/GsqrMjYGfmXzQ+60D12N5IQ5eW3vhPPW2tHpZNqv44eCHTAxSc68VtWKMfT4CD37cyCUFSXC1tbvUWHkgAgWT1R6JM2PvkvtizLcaqqOgk6Z04ZcratXhKx59hoEp2+f7dBF9cZ6l8h4K/t3tLM5v8H7cwkrbUjtWz+2kifXGQSZE+y9icb4o7sTY4vZ9tJLLP6Pt/uDDmfoRkuGXXvLDqx2Q6x50RQjnLh0LCGrZ6m4ucM3CtFYoBrGDshszLgeD+6Dah7kw0/Q8JfzdLy/nowG2HgVQKKAq+MfpCH5Kmb/D3IhTzmI5ze03oIdGXVXxerXz9GjQfxhR7pAxZug27xpMAgeryzDiRNnNY++pg5Ra4pnePowAyuzSvZIEO9G3+L4Ru/0ghiPGhvpsQ9iik92x7VEPhpRejVEzd0DFr37hOqzy41cYVqxEnGTgtQnI/TzxjyocXIP94PGYlxk82T00ijizAQMf5pW27HfTOXubXcVIsOQZ458SNh24poddxDIMVveGqQd9rfSjG1Tl9CJyKcKiGpALti/cFlokpT5R1VEZxHYZBaRWwnwiQNvYDUJpRspMW/FEfuP01AR7PG1dzPEDxA8wOJjysBuH+rDqS4kaG6APt3qG0eq7zEwW+Ve+0seC7Z8ghBQjLI9PMRzKehmQ9dmIrrG2hmn8QVki/42qmg5dGgwboSeVniID5mkQ6LY7ystEluuLmVwid0ytjlxM/g0vbWnrbKrdJ/j+D/ts/APoIOC8X7Kq6QOl8BXimzB3A5ktkRx4uhNXAnCYun73QU4JA1/7x/RZeQFdTrMaIYqbEYj5CeQ/iP/TB+bloE0kTovUvWjlt5wKRdRxArkBPrU2fwMhfk8KMRrrt2Ff8iVocevvtYSC/RUkeytYRWN0JJjNxrWkxVd/t/mnWLqjzda/kszdh9ij8IBIjxch9B8at1KmfG4YkFS7eLDpwh38oAKSlvujtbLxS9g/qVgGJMn1rflB5oo46IfJHmiSz0bVo8t6wHQo6dGnYEO7HDPxs2shjs98XJ+oZ8hbxqAeqgSNTSIWRuTSM7xhQZiFt/Ta28nOH1NrSLGcuYL1FEzaVnpAG1r5LWGAH+T+aTq1emxZYVPUDdks3uqK9ejRWxbDXH8zDIew5gdSnWJR8rsH5yCDzgbH4GJxXaXvXowa9FourhX10w0Xm9qv2mcCFs7GltkzSJgvkFn9Ncsxdz8fgxKP185LRQ3LtTpb29gddcG7tcbleR2dlrMCGmp94wfcwCaW9qtIKSz5nGRfmv2rU1EpiV4ZQ0/1WSoticu/7mw4TAi/d0MwHs9U2rKZShhv2TilGgx92kden2xu9dymijh9+JL1M65XvrByCkwqRvFDuN8FJYtf00jK+MZuk63DQx3Zxpsvd8PiKrG5tMWy0BM/59drhntcP414gCuIldP+iy7LV9y8GchSPAJLxO42i6nRKgXfz6LabPR3Zbk698PKsRr5E1bsOL7Cw+DPMQiZiS7zqwWC4DESFJPiEYwpiEy2DdRyj/vwkguQckRp63UIM/h1qyk/ttDyofxtDHsVDkw0GBZL52Ahc0hRfvdBG279+PgUtOfmrOzFcfReOl90BF0VohTpU6cVlJiDjPEUxufj7ejTJcvAO+M1vfTpeTRbirR0SmC4+hcRpEm/wHlm6kEEjo528auP0Ca5rlflXMvmCBNH8QNHsL4Hj5U7H+pblDIIdGy90Zxasc+1vvJJyY2bJvu8/xto1JssZ2gCRsscWjvPfM+ZpiD74Y9AkaP34oPDsB/WoRCS3oNvsxQyEFXVDkGz1YyfD+WUFBun5wuzdHjg/N25ReNJNxiH8uFynk4pOh1mlIigz3f9MZOMiqqAG7GRg2MzdW/ZpDfpRNlWQ/xCif3h008V/DPH7vlSX4RKXg5pcWyenf+SI+gbO2yxpLanbqk3WkhtB7GQBKpyPiJbhYHr6vdYLnbELseEXTlHsnBE/MKVTKqWOLLHX3SiCMbG3jRntAjJZ83LAthk5/DV/GY8tP2/XEQCI7lu/mQp8H6U7nXkVOQUIYQ5I2nQnM/lI3wGpydDATokoC0WDfW6eI0IZg6LviRygCbmvArw6q2h1jl04HmRUIMKQ+8Rvvz+HYiM2uiPlOtO55Va3jiuonjN4vQmfzfxPRvFi4rp3NKhrMcLPKid+gjjJClx/WgfWotIXa6v5agIxrBFNgMoqcfcfb6iktmRXoa2EsIG4mfFfdEKJsBneAzUCuHnVRW5oditAnNlnhq3FxvgBoLNMX6/U3q9B4Q2eHWqtdUy7M6I/A24gsm3snGTqdHgtP3lH7/Twq8XargvvBrq6nygxnd8uoU6pBVDgoGCMEJoxG3yKDh/jZhj6TiN5vUR7uco467rl3xQzRvLENmAm2xrA29KmASb7aF9cxs4KdHGz37glZbRmYApB2M0SQ8luR7oQ8lun1ghYje2ODJ6HdQRKb7ofRqX/IB3MPG52c/DI6xuqfXeQXaZdstU2TNR9KY8EjCPAsZFY/ayj60oPJvN91FZEs5EpTa1tQLbs4HF/1Ga7Nfy3+3kntORYgL7FVOmhc2hg7POuwkO7jnEb8jNITwur6U43y2TJxJMoPDuY0M96vMJwA1obDfK/aSPnrmMHgM8wn6KXHVVZxlcnfkD8i/8sp5bXpDQM7emWNPv6vU1n78sV+lh0VzjaZqEP4ay7eFqAc+uJZsWoEgzRIsZkvbe8frWfWjXq2uYfbW+Jxd6IW40Zu84srU28mjz6LXIOg4phO930B76CaMSTjXcSrDhAxm44b3QCvOdM7fN6rUYtx1LvLs4BC4i7XDyvLufbQWZtVtTa+Mhn8DTu4ZSWljm/FfmAwJTl9loN8PqwC7ghO8V3yFYh8+NvCKpBRVPnxCpqxePf5Qj+xiQiKcMsefTIB1qfUvZNwSzqTrVKfBx8VovSdDfpuhJryvT1HMYZqB54mNsV4sxKQiPXPChy78c/94reEp83Mez5VUL5AJp8wlLT5yv2lfwDGwNNz2pTL9T9wsTmQI/PqL6lzk+2jrUxRNhqXIBJ/W2+2rnQg53YYig8vtXHmM5y35pSOG4UjLUx+Jw5Dxfc3CBO5ADnHoZRnaiOqi1Oe70a7WPGx1Hv7Ej03pxTpQjDlX9N0t/9U8MNY2+9KKgPJKO/DV7+fPSqMPPKn4v4XB8s580Sg4Edyz1gyhWFLlj+kXGso64vxAOgBun+c53R0mxPgyUJ7EGNGXyYbgZ29rvmsA9+gJlpyo5ndPVw8Kp54+1PFS1n/9JkXQaoSQGPfWt9TVhpcAq6VCocc1YX5qmoFCROX5pF0XVyWjsaiRpwPEmOHs3a9nvZ/I9rYiJug0JN39EIdWgA9buy7bphdz/CpxuCi+K/ElelQTtQqWjwFRYgWrA0ALP6rPkme3g7D6/U6SbSxLfAFmqI3iODgK+nFlV621XsiPY8XpZKJ1Un59yrUuyCkhm0wSkSh1Md5QwyMj2FFVJfq3VTrOUUPiKzM0I3xhhMNZkgBR0tlEDB+6xZd0df931fuoXmQvjY4AcxkTLp12A8wb0hw6gIMbxi0y6O/rZM9SFJm2kJmktjAcppHfhI9FW8Y+jFl9EOf8vSWnhP+Zj2NeZA9mPYgv4Q3LwdcxmZ81z9BH9eUZ3LLowiT8h3E6PI+P9tbeuYEanyz7Wji3kfJ8tXjI4d+t05eW3pWhUYpt7ZPdOgk0eeBtzIJKHiTuXgxcT/f60N+Qw7ZcccJCm8AvMmub2hTn5Cerq4akVwbKjgKhv6l+UAe085/A1LrT7IPLxdYJMH8hpvv/xGyY4JtNwjc2ID1CEAbnxjFLYSw+ziEIcdq3jYnZnf627vAfVf9D0vEQ5k6Qmf/Xm9z6ACnn+pQ8HmZL+JnpWrsTOc0DBasTZa3wHPc6cH8lucsFauzm86mpg7kJek6bOKRQtYxPYDKhmXhg0/lZ3gT/wNXFgt9kpk77a7SoCfwme0GkoBeqeY+eufKplN+LF+Vb6FJepxtX5qXCZ3yfkJOm2BiriLzucpRVsjAwPFNbABe75cQvy88Wyxisp0BcgTsNdAhWV8B+VV+UvN+EffCiu54Hs+qd/KN73Et7MTX2xyOc3r8mwWMTzXBhiWUvpEkBG4ttatAkIfntOwTPqBkP4SkFlQ/nMeMlwK6AX8wytMbka4c/ySA2/lgGd2yK9Lx+7vJtDk5q/eg9wdKCRICu4tVTxZ3GAFUAxkmKeM2mC/10R1ftVh8CfJv1hZWguYx9TgJIGk1REomdWkaHEy37uXcWhQKySy0GkdPQdvZyYLFPCbkk1KzHFKzijaDMUTvUFGSAPl/rVR0ZLFjO1dPTyBTKcDMXsiTU97JElo17Yuq+PbzwaFL+9rKzeS9phCKTjXZB0yPawvqCQ4X6jYsG4pny2D9H5M+85nWq2H5DcZfNdfDXZN1LuiHZNtFgX3IafbOpHwci8YTw6B7vmGm/YXArNX2NppIwr3h3c83r+ndJc1an/7KYmRam2pskt5aSGSe7GSH0Ni/a4UR0NlWG9X4rsBgYlJjffTJNvWYS8jmGqy4m3Ly3ZW6cvYui7L0M/9eEPT09bVcKf5eVX+K2lG2k5GunW8Ej6MJhT6SKjh2Sl8qEptJYg1A5ySDJtQs6bwTSLwryH3VUcgnNohBpaoRCfUv1Hf4cvb+vzPQFFH2L+4XwESn70IOeK1/L75iwUyMYbXB1EA/5bJwdvJnXAX460rnOR325brCWHPZqXf6pBSCOiLnLUm3Ad3BLDNuW1diT8wanwHPGr/jo0X7Y2V9BWfR8/+l4V30DhTRzu7QuyQ+6wMZfJSV5wTu2heP7Pb9Xxg9+yTF6ys/GDiyI2mFXKBucbrTtArg+Pv6j2gksoQ1JlbpCA4NHAE+rErgv3yItD3MArGqTg4Xig6ts4OO3ufJlU+02/4JlKWu9arJwHmLJVGnynY6fTadcHSLR3hZ+VbVHs0tMU40lI2/yufC2zT3eewxBMSz9A2if2mOD+yfZd1Fiw0hcbnWP39BzIi2zWWkwl7ngVXnzEABpAmF+KK5LjFMGdrsG0OdYRjeKBzmLddL+b8z4o9eqsG6RlpSAcRBqJDPhsv73h5HDTakHrN0FrGIukGrMFcqTMk4/z3YtP7H5rcz4cTM+c7sIg7TGx5Zesq4RsED/Ta6u+WIosaJj33DzEeVKYUZCen9XhZRIpv42qX12ODUEo7ZIfiOH24wVjcDUUdGD4WK0hZakP0DDRJTRf1GHr1Ei5uv77JzPLJ5+rmSTu1Mmz1wbOrkgym0aW8NtE+pfP5NZb0NFYwNeRH+TioqG7I7l1pluDYfKe6L+pTmKAJfNTJPG4E+lKaZdfl74qfjOZOsKDfumiQVwl/MXbA7msW9efIoxQOUIlU4DjH/clANbSRC2CAxTtjRaylx+RmnCk8hmowOz/elZ+ztqO+nL8TVenWOLFgzqtTj77dChm5pa1lurw13UzO+jpDUxlX99udWa1YiTKWMFNsKw13Ajoc7lAXQiPGf6TgOEvgFrUmaQ1zvvs0i4hJ00qCMMBjrKgSwYPCMapZM+QqZXen0kHZ5q1HA7ZgDTKB7NXH3z2BH020ngzjM+yXhovIHTGp3b2E/F3KEqIP1Zd/4s1TNrOM35G7t91atyetaD8iqQXf2VRC0aFlpydo0zwVvJgcCj3kFa2ZnFkwpL3A1UebDj9vA+EIEKzrClFCf0iSJiU3yuDtqjDIUvgJ4s6Quf7U3UYio5t5IYbZFOk8OhINzrr2Dlon3ZK43vKSfM3K5qCuSoiomgstKgkmkPhwak9SOaBWONTjjBj3Qi/5pPiCTNbcmp1jqdjg+iwVsg2p1jDC8w0XPrfSR1I9hUnCth1zwwv0EdLE9nHKtmXDTOvYVzDwkjdcoVInCsrboo3DfkQmjXaC6SHIsuHmzQt8zA3dnE78y//KlKkRH5ZjB/O3+KPm4bL3w3qE34mDTPFuZervG80Zn9zbn8pq+VxNjjl/JkUnQ112cHFfhwTEpgfPieMdjlf9SZszgvgs9lpio6ngBWnhOZfuhlBsnxLHw+sRD2vUISNJ6a8SotJfUG14LX4QpjvF05fidkzMBPGc8zvCOMvhZzp729UkoAZJC2ZDSW7Nvz5sHT7nAZvpFoQa5fFP3/jj2W7StADnHqfG4tQq1KGekbzy99lUOnqDr3M/M44ZYf5JJIX2GzN1CkmJatwld7goO4hZTGX4Pje7zRQHoCTFiMy5G8w+QxZQwIiyoIyfRt3SilUxNSX2mawAOV9g4bE5eDIffl011MXXnZ8HjVss/DQQ5IflXiTbqI48/SwleE1ugJwy8QND5XXFbmXseqb3+YpQN2BYOWYlReL0vXNEU6WMecjNZ0m/PE28gYXGoh8ISM5twq6Ux46FeH931XUKVCMUvGyydI1bjeHIP0bs9TLowj7ARKTraPWfmPVFTbt5wCfXuaf4smyWAduRuqUtjKsTKc4WZSNwvUfySDRXx7QJPR3L3fyLi7iY/zL2AaJOxSg1VTWepCGEAga/yqMehozYPEdWhdOAk5qsTsAJg2/hgoS/MNxEDdSunQMSK27oXhC8KuqOt8aVyXTMfLj0EF8ouTchCCe++5clmzKxd1xPTpT6/Pq30wjUs6/PDpIrva0igjf2FLv0Byks8fiAiKoYWN0L081XlAjfxfPS/yCeX+NBC1PSEd6HaszETzr/paL2Srwoy1QPkZ/n8KgbFAhSm/gBeqMgj+lKQeazBh4cSk1eJHtyMySzbLhgG5zIGfm4T/gO5jP5n78gEI5+FzK4JzVXnmBaZghky7q0UvZXkLZH6eOo0ZzCEyb5fd13Q/F1eAw4d8kABm84btOVvyddANvBfJn8PdrPzXGjuQiuJLj18wEWvywdnaIUDCOQzMtG6IzTPh8YIVQRutCdYjxfgAgVQ5DkqE8jjUXYn1oaGvwrT60OXBwMRwgiEjNaC5h3NgtIg/iQ6h0A0JVJz7PazCBmbK2hhwRhUsGQrc+FDsmGSPjDP2EC73E5SDk0oGFnu8YuKU4AqRZ4DK+Xv4rkKaSkmVuCjzv/QQhsE//dmr3CVSypUCXryg9sWWfw6u0q/jsFig6ZYkCMcb/oqVFv5o8BOnDf+1E7K6XfaeCzSEf9IFrNgCk2s+Zk7dI8hL1BIgpjh5b+kYFoum9LGrgK8F8FNL25wuPmtUNzXWhOA0hIAVJ+a67s05dKZP5H2YzWFFlssFXRBU9Rf1xhEn9BOKldbFwGnHBBCg2fEBkLd/3RXTojJObXnVFcdLlheDhVePLRKmnrPM/YFnhN/r4tHAwyb6Xa6BXvRd78Q1UswhqAkSJ9KMBK6IZMEHwrx5y31chPeW/40CWFgjI3MLPX+nlQXtk/m4QMBu18gocMX/KeGRHbmHwawKu4Z+tNhVMMtgK2Ge/csFUy9eFDZMsr65i2zBa2c76/s+/ufDkKzhAyoGVuGQsbP+1/Iu3bbkA+P3+YtVZJdM/rNLZUUo3wGkNv/ZKsETIFui+RIbNflLE/l0cCJ44HQXTIXtXLITVkf6QUuyfm9nBEf+7STvtOJLW1X6A8SsobhjsCVQIvfAeBVudb1/ranWEeeMP4u0Jw53Pwm946ivji3mhjVi4A/Vq1Xc1TVslOLlNQZj+6xD6/Aj9r9AAMIc4S1U93TNIHF7h0Wy1DwqCNaSyQlxhvt+oeF+S4fCcvOM7vtoPuGFKrAvQGMVWuPXP6rg8Js2RA8+JGid+OsKJTtEksxzlh/Yx2PBdNhuNphZ6/ibRAwkGIiI8TiWn66CvTpzJtfhnX4DRRLP+nC+7GwIcbOlGIvSYUdr+KeBxEWAHkgnnt3xvkkadyfjCeZml9vfGEAA1mfFSD4alz/VoCZDWZPlMRPnfFr97IRUTq7v8Xb2ita9eJk48iDZ5B7bPueghkqsqZYVI97Kyr4o0wHoAWCM63aF0VMKMXn3jDtXZ1NL/6qEm5WGlKVaC7dK9teaojU+xPTb41VevhBEJgjUHj/HfifY3deOJq2VfLapSpszgA99zLzCLXSfWfMI22Gdui/uKWa8DkdvEodKEb7oFkzWQM+KHfG3MjyZcRlXFu9Wupt6uxleOw7hXbDNBltS9ikTzg+4nUy+tyMMPxSpTYfx8qIaynwSvbpbW3HdIrd31tohtqJR7RpCtE1lFUM94Ti5SeKlhSWI0yf+ZZcHlrKf9251WRPHg2tESx0FlRGtFmLT0IZdazkVgR+lkC8hpnFWtsmwtU5PjcQtKFQjvQdgmG+0RAdLw4oVS164Pcvys/sCIhKHDh6u2Cf9MFGEFGN6URRUB6N9uTviIEroR7LbuP3+1CLFBKvp7alMP5w5BQmZOgM0nJLpCBJlmvR/PnzxtEncT8NLtxx+kRNo/UE00in2YL9GtOZq1+vgc1ujVWbSeq/TLjME3vzJustrnVsobmLWrtkU0sjwvF2VYH7JrMaqfybO/o1OpoLkB8kjXoHEJPjnHSH0f9FSjhMb4VzLy4GXDfo6h25rRz+dGvo8RkKicb4mgTXjwd/odXa4PcbJcpCrFxDLlwQCaKG9Z0NXM+F7JuEiXH141Xg3H1rxD0V/Y/qCjJc/WB7BSGooUwv7FSc3ZipsgXOrFLF2wX1kBf/0+mEx2F11GG4M3JEY/fDLe8DfkqDEZfvFS7cXZhD2mGSC5Jv9ykCAqvujnL5wAL9agYSGxk0zAIuKolgwduMVf3DT10pk9+1wvQwSKkBJJ+Z+ji4z3/Ro+1w30AFDnG4VovJHh0xvA3buZVR0uiU6rsCPn8rg01zheQdseAC5FUJMdsRblsP5n6xn1dlNyA1disLtZ/s26SJLr5e0X6uVUFyzEFceWpKsBeW9mdBQuuyt5o/k8pmUB+jdbKIuwVxwtlCo4sBkbys8cCv8qUqCA9ig6IAs3QVS9D/DxvJT8jq8EveG5UFJGpWBjc5qJDLHtiyb0/bDtExwih0H6JAuBgvi6jYK8IU+Tvrwxh4821xzhyCX34I2+2M9H629onc54B2y6tOHtOF/JUoy8/y4chLmupvHLI9tjac+Pp8Mn+0LGORd7k4PS3sYFAMrDsnLy+crrT7NlLaIDlBIK0Vh05fsZXoH/CturJPmDskBqMHmVI2IAf0WtSOYlIcdIkAa/Oo1PjzyitvbZfbpCMwCYlsjcU4u+gYYP/xCM/LowkiGzvJDcXB1Pz22WY8kvp9h8wzCmVKrXz/m0KLiFn/rQIJ1qSTgNg45DtutARtG931XLg8xu1xqRTFqwxV//eZlujrkM4vhWmMwg9yrO2TU2e4sWxX3OoneVtr4Bb8BGfSAfApGvbWXuGOSc+nIuSuLpbxwVekbZTEbAgenoTo1H+azXCxfy2mKjPPrFYVpu138JC7U43aqsLlwJ0HxICZCXWD+F7pbJ5g7Y9Blx2PnEnub8Lpp6I48dyI9c+3O1hRgubZeVEmBMkDgQuzMljEN6/YslfzckP0/d04wiNw+ik39zbOS9oSneygTOvrBPwc2DP9TEPzR9ZZPVvrKkodbpQs5HVuPRbAnP/3HaXHQV4tYC5H91G7ZNyuoA5cA0vdrZz+CwUGiDw7++4lLG/0YCVBH6evuc28c+sN+taPa/25Nr2KlXaeuSIj+SyTmHG/pHbl45Cp2MWEvf5JUKq60OuiYHMk7FJyUA1vD4aftQok37zj7FYCgky1n8F5yNjnG4ljOkbIKu4M7GTiScM3HIeZd8OR9gXUeKA4Xy+WvsFDLGvBAobrLVQJK0Ps/fzQa+ovGC/TLvKNtl/IjVMXK0VM4L0DsnVi8+FzC7xKw3uFUYfKe/WogDHFXtgDdUqpRULG0/W8008Qi2NyIqyto82rTDQDgyI479lTqp4G/EMCiojfTk10egTZNdsYLD9EHF/0Yi5vJuJbapJTaTSGrLFfdXvD4fzwgjN+oUPx/MUXo9FuiCtsgL9VJPUZm99fCAihzaIy7RkqE+lRAAlCJ++kvRUrEGla2xd7Mw4NROQ4UXmTsPYvvoce7xGo9dKWzlpBBhcUx0wt96pIkMPBPvqoWq6/3mIwprECrEtHvoPkSa8ZNyuD2GYd0tROQa4t9mGd7ha+nVGxNqY+ci6RCisK3tjMvy/MU+voGCr5FVhlLFajcixCme4LfTgGSXlXSoizSPwHZWAwuIfmkQ3ds5MEaqkz4N69NMhgMuIySaUl8oVLxuU1da+9uFWK6o9rvSIGRW5TCyNSFZN7GlbM4i2BxMb8z6m7ep49PLDSt/mV++9OQWqQyX41a/jN9hDTDesbzCzwG3upXqS4OCjVtP7AuEzxV/XJpK7TPttl78QEVoTeAMe6mxWS2WjmXHOs3sOUWjtkDMVNpnhViR/Ncvopu0I4dWD0puttwp3hrO+pcowLJS8mATu0zEjGUWkKYAdUhfIK63DlKWfx1hF0zVeKOe973f8/KFtp59kud91KXp08GUHcNPxZP+JinR4H84A2EQeRi2ZnmP4HXZyYUNOenRcW3Ab/HMrfOP1E5DB1H8VsLo1pGp+T2MU4xj3fvuNssHKyAQDANWznBjvQM5pr20HpzYi4sKATJygETEvi4LAxtgiRZHQGYWejZcAQYickEaQrkn1rLHKY8Jf+M3ZebOZmk6qmRrP8GYpZeYfdQCtb4FuU+sRi6Dvp2/Ky1VRxau/FDi0uhRbKv+SqgcA6bk/gfCRfy188Gvw0iDoOY3SFnffDeiv6XErxrm3llBiMzOdmNAKVLT/+vppPzTSsZwb1qFrApRjjC7/fviDxN31YhUU6+BCLnB5GovKspdFsP62XDHW1m73sHwuR5751lyQ903MRI9jakD3k1LlEtklGrDYqYQTv4ylOimfl5YND/DwTUxwX9e14uOXV7Yw6mimJlmPiA6Jd8WfluZDjPRAaCcvd3QuT8bNOQ1LIu9iYeWKlUZfvzUT+mqmQUfDvJNGJgwr3mPFiZrEzt2Ph2CuZJX7e5M8L/YOE48isqGAoTIhktFjjZRv3wHCtC/qxJwS1dJhVzSHqoltJrCptUML2sDA/MeYEm4VxH1ZLga/rI6VF65649hg79SM1GVmEnUkk2BqfGQ5OpXwn95GsXwSovcrztTdzYC/wdp4Y3O3M0b25eXSQl/nAzwbwDPLwePTCKm/gZDoV9IGPUVTk9k032uufaQZNa/XOm4lXrAtfiNQwQ4W2qIviN6T0TZORg5BLMtq0EdpCWky2QSGbpcpmt2DLe0cBz0sxdTpRgfYp9WMiRa9seSUd55WnGcLXb9KLp8urWtGDcPiPfR+nlWGg2D6kygBO7yF9RkWo8hDsNB6eR/vaf/hGh+HEpTnJk45a5yD1BAQBAOMgwgI0Lrc6sxISXrFpmbHor99Zx7ZdiiKf5jdPUD/01cbYbz4Et1nXObKWNE8A/ETryXWccrPdjYjEwPzuKhrIMTrNTXtmyT6P0EGdWVSAfl6RIrnEq+xr425uoVrBhmzMxoVa/5vZuAH7Umyeb+9ct/QefnP9JRJNnfeYY+oviYxZdoDQIdVfBYDitM4aaPsp9/+7SZ5ZVaxCWzVnY9yiU18LfWFb8qO70mdgC8r89S/QJLuIrw41+bPGvSJLWeh+dgwkVR9jVF7ivrQLKhZ5hV4CcrWqql8jFgn71tmM/DTwa+FsBdIZYHRkKdgqIWbIS8BMIXAKn5G3AqdOVFPX+3usTdF6SimsNyt5Xtu+mXn5h00fooXRNPU8yoBebnbgaSiKtZ+Ma/2dqqzPsbAMxdnZ5//wYvRpDJf1EzawoX/7lgMM6zj/2tIx9C75zn87McsBSHfmmkt5lG8DHM1G4A86ILsoYxXor1+avqxOK1RlRZBX7Q15NQENYztmXArzRAey+j9j/ZZhQxUDi157dTAhP+G5WR2sYMHGYN/6f11Rzbvh2LvPV8K3DqNoeG9C2f8L48fwCVMS9Lx90sc7pG8YP5r3t39z9Hxg0R1jDQ37UrqXc3kiK0+5M2GrqE1EuC3Kvq/+wGmO7+91V7MtoiaR/JTlCe1YbwskwFFxw3WmDZReq/bOXgVlvXdbLlv4iPjSrjHqx39pMmguUTDvSV7On58tthA15Brforq/u/tIp8yohVbFLErhvvwxhEyyh0EL+/w6ngtbQ/xdbda2YT7T3V0V+/OrrcH4iGLowg869e+TdSCSta0Kzzu3mUljQSwYz5O+kNu+LlARVsYvlT4kSyA7/q8yK/+zP4MjKrr6p0rGFooB2eQofvcvG2bY46bLIJmsba4ADT/vQCYCRfB1aGuaPeF5OVvxx+uwI1QQAfqq/6fetntwXs98vxjh+pHAUiLuVy5oVB+zd9PL87nSdcE/5sTSOffmVpK+Vr1h8SK8pMa4jn3+q/kWdKiYHF8DPR5tLsc15e3ADQjQV5Vbz43elO0WBiLVE/eIna1/MCUQ9fTmo/moVTZDkxszCleJuV8PI7VNMlk893VZu/eu9lDaOH/byKsR5743gJgsN1kGVAE8d42wC3BDapBLwpLUb1uUqT+PCI33UeQMwXbUjfOhU7u1RN+2Y3Ji6MzQGPGJCAYxdy6pzE54+KRP5apLFTwFjcWctEe+jDs3/796AeuZ9cbj2k74KfQnSAL4aKsx52ftMxremopScnX1a+SNacMyUs2qsEnp3A5ZfEByEK+XdARN7Og1VhccHo46PHalJynVDhDvIvUAN8/mEjHalxN4qRTWofqFaiE4cKMSMnuW5FkyKLUZdVrlsP3xu+UQBvr3jFK5Jsc/dyRqsym3RHi0HuiQ2aQC13jotLBBTk3y0JGhnLw37wFLYUPB2DIbwgCs17PzO3WHCxOeMcj2UtFKYiZekCZhs7l8D1SbI17RBzVYkyiBqsplZJKip58Xdsh0zxqZzR+XkhO2qFXRmB2oa7x/TT3DszvkS/iI2qPCFfiFXW8ONagM4fH3BdjriwTvDj+6sEUB00I/4Qd5fCDj9lihQR5waxSiUTIFpNpE4Df+wDosXDpk5/wbUihXMwUrcVBb86E0UhFumrlqF7lQop49Kj35NYpw21oK9SKtfyRjjYt89ZHjzw+BX50UdJB+0V4LMeoc8utDrgmKxrV6pQXJMP9jyM8sK6A8MqY/MOT1yPOP5soBRvlOSrjZLbcaTPHKRxYtjP4D37U1SJrJ/CbfuY0AP1yeyZHxhy2vyWEWSKdEUGi9kjMaIrOgG+mPdQpduyD4FYHClgk6j6VNfvvqBUfhuc0G4exHD8Tp66mKhWYBWMXoXEiJXBnaPgAI61WEH/zE8oRx2OMzgCfnxVZphGRzv8fqNGpXRIf10kaGiyC95VEsjSu78quJ9EXL6vpxn3wZv6DSKdmDtaz6KtZrtbiRr87F5fa1hWFYjUQinyzlVweiu3qWhAAWORnSYLizK/fUDZpnXQrBk+UqAc2BcmbIKrvmimMfdLv++rCUyBWjISIme7kzyzbXEUUfFEq4Mtcce+jK22MJeJwM0P2BGoLJvL0sm7/4c+krFCCXOqwcuMZfvSijjgcDyo9gatCWaRIcuaCZLgcVS2xiM6NXeIOV/EnVYAVuTT7h76njvLnnU3vyMOuhUlTAugF5vPizY9f4GawPlkZZB2P4ka5AToutVd+LeVzFdENUj8XfdmQPR3ufYCa4v3gVzO8vGBJjdJyX/z60yguKlTroDgSjV4tcufmdkDhmLeVnRGMB+VSB7ERsD7ldErgWhJAmVQvqfrsA5dfpjSu44o//7llIAvscOonaoUHiNBEFP909yW/7Vl9JaGcGwu71oud+PRZ8HQfdRymNKMV4YQSgDnL9FBmqNEZ7C/iCteEZxZWKf0cfojUEBvAuRucHksyAghj1/5U4Pv2cD+J/oWLaLw3vqbU9lwW7VHddN0euBv/iP9ZtssA2XC1Tz/EFKJX6EV/XKyn4aKTeQ4N0GIJ5espfqtCApHEGzB5roznjDBJZh5cGPPldlJngko2550U9AFk3L0FIyp6HUnvDEOIjXD+Yk/VtRY32GIv3tuOCK/htSQ5UrNQKoOMot2QaqE/0GRDagcXjCeXElQuuM0v7OsxdTQZ5jjPRp1RrcXAJBSZsYO5gGqnkcS2R0xfYK40H7HBfirMBD3jhBbEE5drP3xq22c0hPOl580pr9hJ6Jov2u1gRQRUCNOsyeTwMdxiLGZSbFT9bUubxHbM9jR6nZKhxDOmpZF++p/YhDdcYu0xYgniBrSWu0JUSEpc8QkhRNfydvrljx/THXu77Hk46v3vp5bBu7kqL+FUDs3O/wxZYRffeub22OLm9PFzwFIzyfmubMNT2jEnfya+AGpAczEdq8LIdGda62bwSKgNAARisXx7XeVjAGhDEXyF8r8IFS/tXqw7fMiMIFsV7qA0qRm/Pl+hebuRR73KtHZzRjz5KDuQLGCWFCMy+qLGNKJf72V+hsBJ2AlGOAM7oAVS3b4o64v98j90qaQLwx3Q+iCZBX2JLON3V+kfBD9eMMC+6w0pYg89Ro8OZIgj52F0Y5sVxEwRLBwE03auhn9tTmH9S9WnX81qcV1JMdLqSSIFMGhDH0bbH9cFPatOpyZLMNlbY2+I/l7c3SKmPk5Ew0ZB8v2rFfsZ2gqNXP91Bn+k76fyG8JcYhZB79uCHG0JQRSnqnbHD41iBNp92Jsd/IatxWl9vN3v1wzmHU4AIQ8Xt524xqC7vXfBb58acGxDrzu/IsSRZ+mrYJCHEfQ9Ke8zpd0niijIfLTW+GDX/bqECRlJl45yemwr+DrTV3LkQ3SQDwnQqgP/F+td7kbVXvxsW8y2U4yAjkBmc8tO8Yxe6FX5Ob0U7Wx7IFleiFAxUkOReSMLS+cgYQRDvhP0k2B/LWEId+aCwfEH3QVBHbJsJxnN1dihKLXHTSyqHRiUb36ApVbrAixJsiE5rEKqH9Z7xYBK/0Gy1a15Gkk07d08ZpGDXQszNxfGQWgqDki3PLIzndll/Si0YF09dsyzvlRlqRL/U1gju7P79X/d9C44JWiFTY8/xmXee5pUUvP/6XpOrYj53nl09y9WllLqZVzTjvlVs7x6X/R893N+IyP7ZZIoFAFgKDmcrH0Ri8gs5zaoIUSrhHCV+9pSVpDoNZk1pVKHtmZzn60LdDQerTfy/9osalDxJYKQS2lJOR9XU53xNOnmlwlXrMtjoTC/wYM6mrenBedV3FdVqBxbJyLNdxlbAdtLUxSrjc/7Sh43FQ2hdEkA+SFe1AHVIqXYhpM6DqfERkgt6+xj7A1kM8X5VhMbm2I4tg9B0G7sFGvdh/gN1jC1OLm1+VJEJN4KnMiOiMdXMOy/QucaotKsDfjJWcQbtNT8SfKb+Wvu7mnWhlzs5h8RXBE8SOx4548OA+0Wbu/2LODMm3pYtyYi6+PiuXCYbQyPoSO3M+o8N31kqsmyZbH19DXqD94+4p9ab6onehf2M8/OYV+tv5v4tRNlFIKSpMx1uoxMYc5m+jFt5E90wQyd6p79t1Q+Gc7GCnBJyg6yK0z84H+M2KRS4V3P3FlXE6gVnLXnETR4amnDFZWXyxknF/E3CvM8wMSNFaAfGOFay4mL4dgn500g8Z4jnSyjWf6E7n6NxZceDgQ0VFWOsgu03LFJ0U5wdGWgODwc2OL4uA7XYJicb3Ph2ammX3hVFQC2447bGJaz4n1vcssKYVEpo36jEGNXyntnDrh4/Q8zoc22AFsgQGCnIgDaQ8Bdi+mn0VdDvdyNlQIROnbvtIH2Pqa9NexZ3/nHcfrLqLsZfxG7L5ot1wUyRZEFYazsqOrFOwBhqTBIg4rlMf8J6h3IuzGHX40rDiiIy0zo6W51ac/PatZ4OZpYCahZiy6/dHBgRhxaql89BxCZ80GBjoRXvAqkPg/sESwfskRYQAVqqgP55TMG/43LS3822dFwFmtR8PiXhVN8666GiwOmQ7W1aWv2Ypx9uW40g2iW4uTrq/uSGSsce/xp1aUXQsjcsFBuDxXfNe1NlHSOGWPsAJBYNupk3A/mqvYUyErXMGfIfAh2Hjpa6EVZIHiELlc5HxlYbhUyOJldLx39jc9U8u5Nbnzdcob7l71kFc0w18BBu+uLGPpcyYacnM3o8ZAHlZAJ0iBxucCQgCHNEXjzJekxOu57vqJdxsog4cgCCowmhVOpz2fFSZiZhKg7wROz3sZcmibS9MFJ0DDc5AAWoEB5aDk0hhqLUApvBDUzYK7DXjSRdv0g8/C/h2sLqcCWNuY4ucnyNiAD3MohSjxzOx6F+F+apq/EuyzTD/Qrnk49YZNy1Osos5fJ0mWbhzt5FfNsmYn9nNKDWyWq+aVtrLHzqmMH88vP6irgF3YmQjfVrLp7IWyRXXd6yFdt63zVjaS6+POcxf0FoEIUyKOjVLlCj1qbttTHEaZabMedIeNSWza4G4APuoW+Mspqh+ww3TE9bUqKvWyNgdyyuVxozYYk53B4FwNX+6D619C3iYQi8Qj4mxlHqH+hsOEvxL0Lwq1Zv/DcsIOuYJTsODE/Y+VNAqf1ZzvNjbyHXG1OI1mFzGkkmT6/gM4X2A6jVUagX1ZxfRgVUxx04Jdz+CZ2NF11U+JcBa/FyyST9qoUfLpWkpFWMS8RCi2NooSrA+JUAOx/g0rAdnPPKhM4hUCaW+E3d21iovuz5zU1dwjX9WryTk/ftxsT9qx67oCGcvTSwkD0uX5Km3lEOwV9HvoiiDXYZlUwqhmOqCooMHKFYGm+2H8jKJhcySAKv6Z+AJyS/dorsq6hOtvVu3bAre9cZgbRqPn7u7XR9fsZ1PxWCtrN9c+5Vd4jeGMlsqKTEi8M/ZrDhrmGWqS5VcWNXh+Qyqd65oZln/mcSzwVfmvhkd2NNfNAet9FAREJRwCWN/Y4DegAKV/hMf1OV0LoN7PbNX817WCfJfibqJ1E+6ljCgZ0AHmZABWo11+AeZGNjcumdXY8KjYgClcfJbDx8bIFW1Wphcu9+7fKXuDDzHPTVYRs3iosy6Beolbk45ifs8an5euYv/qC5LMDoSTIZk+DXr0qn4P9cG/1t0QxSfHuJqIp3sJ/cTDlFBd4iY6wgJVKh0PJQEsrf2JojQ/86m42WTBoMloKEeW0bLjxeAzfAjvgrHz1e0N1falT24Q5hAKx+gtlv5W195zYBI2NbFLsWlrygTCKZfGiNfhpUdvxAe8Z4uNVy4SnqmrptTlJ2l6jwrT65Ds+M/m7Fejva8nPSsSdZ8F+DFXdLBG/pUv/V5r2rlQ+6fAGGnfgs2Besw3LFT5a8t2+r8r7iPESyK6m77wl9y/IIRoLvKKg9LN3Irva6Js4pBg6xpoDNBDASL950siLYlEGo5T+cQm1JqiBGkY01zxWtHxhvwC5PiXbVqd2DM2LeQgTXlxiSgPDkTBBMlCD5oin0FLgwxS4XfMYl18LBQogYP1Ja78ewsoWxjy8vL+fXhbjrqgrCdDXmZ6AOrYQSA9iI9k7YxfwxdO+NX5SUEQ4sDsQe3gmIzQVqogMccxzvp87mT8yHvcjHnUEgGInV8ZBV9uOUcRPOfV8K/ar1IBPgX754bJX/ZddqqJxCrjYQLivv2ybf7Ppe+GevzjHP/m7j58tCwblMZC95PUlXiAeoO6xRL7tsbo39Elz/Epx7MBZp31V2T/5eMJiiMfo/yB40ozmBX5kruyuRTtdxBU/IHVfpLIWlS7PZIQ0IuzjbcRErdc5TykZTNVivyaJndnHOh1f2YiL3J/pV+E+4kzk1dQvETqpl7rjTtbhwQbZbukvy/GBP/uFA5uJVb4b/1iV9RqItBuJeQtjohT+tXxZX83geBX0ji/1Lo0h7RjOmdcX6ACTnDsDNrpPANbpbfBcnfnlGuVl1wgWv5ZQg+lTpXCQT3Ivj1z6Eq3Nl9edEmHcA3fNS1JkQ3XYFIrCwhEcZWeYNqzvTQOKwK5Af5kcDpI2AfTPKgDT7eVVBaZusHlzD2Dix35n/cvP8OuPw0ca+UT0RjHQAs+ubkoQ32lqVONRHbkJchrJERJ3DF0zJjFIgu1mbYnkFtO9p+OqULmfcf+I3XCbGsKdJjxGnhlzM8uVKb+jNZ+27PDfU4ghxqqTWEmXuWL4u5tj/wRWGZWzFb5S/SNNAuaq1IH3nJ1ATnVtdQOfskb4sjJvEf2U0Y3Mdl6AzrUSGDaaokok6N8SnykkJljNz0uQgcRjMNQP8CUgl5frpAO+47sr3L/THLFIla+x+AkKAO5xu4q09Y16S9gNfwEQpEXyLs48fNvJNKg14yIk9M8E+WjSun5qqTGD329tv84P1+cGgq6DDxjT8hl8tIpI70VFKaXv7npKXRUuVN8Jek4lu/q7e0uJPe9OKTRLXMWuuHrdNdyp1hnJ5wxIB27w1dH7NmRfvtTw/e8Xagbdh+wZ0k8THvyih0SZSPok2SFUTHWXTM9ujpd8LHLU8XMwfIHvraxKdYeNdeHp8NEhGLQy4EYtHArXXV/O6BFhBm4s/ADPtKozM/iOmwsj2oZPhpt4GzzYhACk4DtBWlnKGf8QRs/hb3HMBubwkUM3DnG0zPc92Wqz982OxL5CPXCPJcR8jT/9S/tD5JEt4NDDYFYkmTX9Vag94MS7KOWPpr+1eZ0uxzzh1rABjyflyJdQuc7Ko+EI/GcErHCXCVU3lovl7A6VzRqTeCf+CFv9xiNKty0IOapgMWC2wUUx/hFfeRmsOx2OOk6vT1l1vRyaPzZiQhm40FOzg3Z8J/mXgTRBn+ndcH173zHWwMCuoGCZ0r9DrHneTC/sGWFKpI7iDcqPSR/wXsxXvP/rcKgSGcBB++JlFZ+npePsuqIj14PRcsGyB7T2sWBHq7jm/hwlZRqKvrp4YUEU8FhU8tIAd6dJ6/MmH+OPmrdeIXGiKww5OIUhgxuqD3eXwRCFNCngyL0higRPLbdes773fNkAfKTKnBvPtBqpZtwJPI++Q4h+JX4tlHkhoKRLEz5Vkhr31nyMGaRfswvEsFOqm2u17gTxyLF5K8shLSN3bHI/eAN1DJ2GvHFkQcieIk2KCaeDsUpYWnRRzb+22ASg8aMFypEvmzHPtqkFi6F7vjk7InbRhOvULAX7G8gWA2OGF8v0Hkz8sD6Yf4djxEizgdEffURcSP9bEve6Fhr5Q87gNaEkA1CMPKhk/p7Sa07WFBdttl4peCX/IfPIFTxgZZSaRMhrcUDK8oNeH30U06+lPFvXHCfx44g15tHjkc2tpzd/YiEBKmf9lAUaNGIoPjLUM14AHaX1P90s61tI9t0MO4IlAc1QrdjRYUR+uJZdnjOL1T786kpfMqdUbiY6e9K0a+X1h3WqGehZy3K9lgpUg4/C7KPEx2ywST3YAB/tcQL6W8Hzm3+RHvrbUup4WpiI7jNDIRk8eD+ocu5F5v5Jayf/izJnn46G7Ge4Yk0f2rs/+qKSttSro8pp91sJxEgf/65H8eVRnYWTBSTvbY63jHaFKBCYDLKN6GANYFUSEvG90Gq7kGlEzjDw3ffOpKXCADEM4Gz8yZcWu4IDF4VFR80XrR6h5Sftg78eix6PD7HnzMiM3e9sAaBVOU1pR83404I9FZ9nclsmcci4QQwUpvVUk8pDNEAB232z9p6lLY6B5QFv+1MjEyA97x0X4n/l4BNTGGM7a8/Tm+MJUHKntgQ3+FVyEIXrGBxGJUfCStNRKNCpnAnzOUoV6R6tGeh+UNEX6mTwWuUf4V/bUzI8u/ciFvsyfo3ya+Y5wOVC1LB5+hyvzEHtGqCIYQHEEFYpg8GEkzkD70CaI0Iq4z/6RRiR5jjg4o87nCVHNscIH/F7yj2iOathhay3YS5kQoXXom+CztinfR4+E/SAGkDuDWCqnemF0qHW7wDxy8kvN+gxDz0LWialPvzN3oGRVoU8Dio4K16+vQAiPB9hsH3VpzQicxLww/xmdluy1GjLflWdTr/26F1fSlfY85gPSx2ZYHbxEj+PXyJ2+D6OuYX7p7ULSDZGdVwvcRbooRY9MkmlQ1dvXSHAQFRLesYeZXsjLfPEDyk+koM15OVVLvu5iT1ZTP3SaUjPvb5iuMOoYpRtdllt7CpNf7N7gFPMYPUmI70CvzzGzQl17uuP6TcHtTchb+ShhN1GNAO+GUJckh7vDXWONyjaB56WqIfrcyNOqYzCYb+ziB4WsOceBZZslVb38zYyK0rmuof/+b9+C95UBrRlJpHJUJmnDn1k9UALNf65G8pwEpPTx2nuOdoKlV8Rl6JPQf9OWZlhui4e94Gur+q8MuVmG3+TYLK2XgL7UmYUleaBUYzbSIHy9czfE7d24nqCbheCNAKVX+E5Fm5+W/yZ4fU+UblBV86ricawQbyGodHZlsLxRkcsLJOA/i61gFxHb17eJhy2PKJP4cuTmEq9fqcHohFWJKnHt8ItH9uxOxVgycLOrcfvvmwMmt1yG76vxjjWDVYj+smi6NV7sG0j9B6SYcYA8kxhTnj7cfySyGMiw7Wo/46SyLKQ3ta8EdDJOCIl5LpM2DLhaPdGLY4SxtlyZpeUoC4Ny3C/UMIq95Ash2NA0I/guBPGgQOYGRPUR+IMTdqv3kTRULzEqVuZm9wv+UIlZxzbVTi6bhYoBw+Dtf8Hngm8SL/J9BmyoJfpVIeL1C7HxhzHUBjZ8YGZZ/8mmYOqSAXTpWYNYaoXJkfj9MLhhJfxV8dCJZK6UuQqh3au50/n+zQcf1+ckKh3v1lmDvs3DlKhJ5kEbtuNHtFK+/zdev37/L197Xs3YTyW+THxpL6mjI/hPioH2mOb5+1gmiATEpfyPIC11zzS7VmLCNyND73hfRXryvyjvgyJVENIsCxIfq5SUUS/6jD8Z2Pcl3YbU+oGtyGACyBZpmGFOZUeZVQsfLbAnTkD5cxfeZIvshmm/xr4x+zKeTlHyDX65hL1er1DrQoN0URspD3qGrkgVAY3UG14OGYgaDQlnrwLlhiA8fU0nYMYkfrPViXQvTGsuVaUld1sR6lX8rqmdFTBYGJc0jYiC6UoETCEzK8lc7wuGgBiBKvauGc6dUkXztCw4oHhpTyGsSGPT7s1haCw1E8+N4cJtksEDoTwub1tSaV6CHwB2aPaVqor/ueN0B5UO4lOPhNjk1j6RsCpm8MSKj96lFV1YeFKjjrVfCjmJ+NTEybwQY2jSTH8ZiwCj6Ly5+iRGxmP6v5OTXtDSf8wvRqmQlGC3pfmQh6qdsGdiCkulNl1o+Eaihplod9IPFCCkuSTCPlG6KuGxU4nU9NfoT7PL95/HyyssSG+10kxew81uyoOha6Zhcbh/lyeuAOepDmn3MWRQNJcaeuxj45P2x4Yinue0YS603cEIm6bHgXaek9iwz1N+q1yGFqfwOXlUjbNCaXnRZCeYQxcnd3f3bJXUwZwYex1agpO062zqXo4GXxN3H8r+1ymhVvpRtgpPpfUF9zo/i+n2MmdSPlk1ILjuesroV0zgvJMa4MNqcmtho05/GG8irfSauwpHmpReWHpZqOEACHbiYMiKKSI503JXG99L8++TjL9ozmRtmiB3WC7BsgtRmQzMI9KDQyi2NcAMFBOhv4avLX05s1c1iuStMevJPNfh44EztemqcRD2r8uJ3LNuXGPMsVAkn9uzNBQfe1vUk9V+6ciP8KxkDAcfGNFrKRzLylyJuuZIFOSO8m4X1xCcO/44C3NaYcFEeZ1sDZzBxgfgCTvrTt2XzSX1dorsvTLozQs/CeoA4fEJNv9RLhvrLgrQkc50EljDWnclSqs/3KDJ4pcMWLbYjYwwKVRaNBnH2ZGFTt46XH+PJ32AO8rtgNPCdJ6lP7HaHjUvB3LUtZUCQjkl2MHHDZF/Dig8IiBRZpp3ZDU5jK6Bnz9yWeOsI2Wv46AfQTTyGqGRqnrcxRMVrTwhxWiTl5lsnncCqH+45I1wdQXKSI4TgI4PgDTTCevxLU3PsWyTsMAFSO/ZeX+huU0z4uODFxeVu/Sd9dt+t0EreIUV+Q3WAtNPqudgwyyzSONgX/UxjYq+fLa2TCtIej55JZNL91AgfxMGJo9g3fPTln/IpNKyYbIPiCVIXka3Jv6/ltpimFfs3UqHHXXz7H4mOIOtpIp2iLXH/V5+V4j6c5/H1q3lq0nwnyiHJsM7MxwTrl3bQJgouUyPO8mmNEgODnXfu7bUxmogfu3BW7PoPUChUuf22n0yFKZwSkISzhwB7NN+5DgQQFseSp874Nv7zytYCapBB431yhSlWe/cf4V9r9tPq7eQ91T6BP/qpYWuM793kq2qRP0NhQYjRd52t6yo9oI2Rk0LUlfayHlVaOmaZZVtdI+FbH3caX28xMt4TrFQiIaP1N1zDNi+E1htAvNG5EdG3KBt3go3pdXZv98em/NGNeUyGWlINJtKV+7qf74APZAfRX05W6i406ExBVXkn2d0gUFN2q8i+KwLa0YY32TbRYVR0PYdriDP3wNho1MukItu9ut7UKl6IfRX9GPPuA8lvcG8v32ac62VQXSkzZYWeEkXRmhCGp1oJz0zpzuxgHCbLawhavlDceLRxmN1qrIQcx4h2pZH5T2J77F4lBpYXT1MkhPpyLwRIklbnr0pHV8rDR+10t5j/ZN501Y+jHxjmZ7hmLrorhoh4RMOM+fiBIbZaFGhOmm6JtZ6QoAutHad8s970wnri1WnIhxsK+uMfBBE09VfXxppxmW6Lp9H9E93wxHp2CuHEsDGspAYTbYrsh5GuxHqj2UyQOkbppFr387+YR/loGMq62m1nmC70z83zXrve/pI2YiYHvas3xRv37idA+h+pLg/Dr4yseMZRYFpt9NPaMAAXtCWGfO+sxtCXboYr03xN3oCxc31gzwH5LqXuzV5WOlX0aN/PXyPVX9OjVvfdGfxL2KbFBisr+VFM98UAXkanXtWLnASrzB1SxoBN8jO5XLy2Hi+GB9YYiiO72mb9kLBxNFaXLGF04258GejR6NMRVbwkiBvYOR61wV998JnXuh7nHCrSG2a8/vF+Mi12Vx/JpZlgD+SyuIAQ+7odf8EOONNTPWtVtpx6VUP+RMuY0iNJE7xpgDWS1DZSvPSMJpj9MW3clLzAQajy6g7zJBoJeCY72QX77isLd6ysr/vq3Aw/5kUJF6XaxyWeGmjvyrZNoL96I5YXkKOscCkgPl/PxLnAQP8W0/qGApp2FLTYpdHXy0ncPsk0gpni2ok09e7G4ifeToODtbCAEaAl1gChfv0otRcxzYG28zIH0q/5QrFd9E39QfuT0hGvQXgKhjJvVdwU2j18xTA9WdUm07NUcQAFeadCnIlGk/FX5B7BgWOo0ZtADpQLmh2yHQMGAhL12TtPf+k/EfRnl07tUiXxKK9FkPNFZ2ESOENU/K2PUquv94B5gKPzRU2t62tXKMy1aXEM3nZGyIgmef+zXjh4uypCuZxbd8vyg6naHgkaE8L9s/mEnjm5yziJgGqqozXpl9t/RyZ0XMZwR1dJsMO3lM30CHgrZP29MpKgg9NSu2eKN1Z0cm8M9Orm+Jl10ozcsDSHZVQ1JwElAgC26lDjrmuP+W341N0NsEsI9IWelaZAeMXfEdVO6pvtZn03VLcOw1JazYrp1QL/EG1tBlLfp3gkKj0PHi6t3kSxhP3xDIGTSqmroG9Vr9lh2n9wdGh5TXXHGbvE2DXDdE1+bIElzaL+bT8R0cw7DXoO7uBcVQwZZLofGCgmcGoJwsEyltdbVS4ix+abJREn64edQ1OiP2J1j3V8OfT1Fnzy0lyP+JLkIc9nmTUv0MAoyVJO1UTQj9H7cr4lCf//Ws7rqTutrHp1+peXJNku+usyHai5leyr0yxX+u0Za0HtVOuUh5P+GhMKsO7lqRZrHVEi9b2wNkc7nJBPC/qG/IysPFHqMbUL/qSg+khdhRZDy7Fkqfb1OW0neoaWWbX6E1uzwgSyfBV/hXrAzR0PVfrA/iOgNT3TVE/QPzqzYUQEDEntZ2WsQMOIx8IWGCylW98SqFmd8K4WfXNvUMbOdEvdsfYAioaufmfAjDLvbVH90fUmyl0LDequPUiR1Z/GOM7QDHvibJ5dolWloavvVsnbuHuwZX2gvIS8dHcbdhnHXUKUtNCgq8SHAHLZfUNkBuskTdni4/PubOu6SLz8aFfIQm1cEdNFvRHHSHPxX8dSOPLG6FcaXWt5Jrg6W0ei6pPyKJs2+y4ieoXHGk/93A83TTjkKcgT+bowvOwNbsXkk8r2Z2PuwNwZyP/aGQObPCHaXWx0DMz/4hWjLy09LgujyUxWR2xDbCqbx+3hRlalB3zHzlJAapwQqfrWEE0siwVImnXUzEc9zcpur5qfOFdPaGmlCYSXRjr6MmjZSXAuiljSxDWFsPjcWA693vzIJfjZz37TBZp3J3YXSvsJo6WSm8YjDZw4vvssmcHcC3/dSGdrQPuTWZXpoRtJxYrmQvSxs43hjUyERiTi4cFG9ohEwNR8wasWvgxHlUL5xRM73TcXXqEkJa7PmZG5K6EZc6JuyTU2O18MXcdc6n8hbteoWh2GP1r9+OI37NKVWFhfbxH5YdQSgko1rpGjJYzkElbDoX8lp0BlWJkXy+rT1hdIWlEP4b/Rkx0ys5WdAUOdzXPD7ASIfC8OXcxcxzD2hrc3RFbyctPle506hypV+DmO1V1byCpI31GIxyNksaSwx9EwIMBXhv1e/nuoO6zUNyG0TsPJLxZArrFogFt5YeuVDcMzS+U3gE6Mo7LhXn5G2cQZhjHXxB2tZYyuDJoarOsqtY3b0C/BWeJnsAVuyOJg9kChdL7R0m/KDm/Ge6L2dxh7/OlG8K3Is0mn0/OjhxP+uB/LpsVb7JHR/MALn1bdKAtH66wbTOLMpiJIaPx8huFe4Oa6S60HqGqGoUFEZ+ot650QAZLMyRTza6VPOHSOBmPe9hm/ycC+ZTUBaXNyhzP7KH2Rrz18Hk6/o1SDGauWPgXH0pFdbxxm2rrEhLSpsiuIlRnr94FB5tdaN8/JOIBMbxL+n+2M0OIbGEA/EmnEr46GSuYgz/gzt7/L2tP3p3sUK1jsAcGtSLwkhtBiqlNP4IABXgmg2pfAWeTgcmQZXKckXwI9SCZunTWZC+FSiiBCom2ZX2gcvB1Iqxi6Yu+EqY9PzUHyvsmHkSlWiVwEWlzQyBVyyJ/3GX0RnURdEjE++wPtHqegKRZ6FiqQqkFUgfeCNcNzbHDVBItcXlUzCz+FvBhrL24XfxUj76u/6voqoa+yJjX584I4JO6469IOg3y8WfsojkOUM9p7tIUqCm12YQaL0Og9UkVWRKnzsYwhHZvEr9YKYX+4FSBMvXM+hrmgZTBB0sKhfhdv11VYur+IAUiErTOxqMR6GZLObZqn0nFrUMdtPfjcFEGgLf1H9lWXGDUkeTgYPesSFeoO1SaQ94mluKzhyx3QhXbmvumlbv/3R9+poTfb6ZOxO3+xpeQdaXsX4klx/gXWJPr/X+cM0vRIOsD90wLF03DY0BkI9aZYVbmLM1yZ+rTqD4w5cbVeoUEXrl9HfgLi4UCPTr/58ASJRm8G0vFhxk8Q5F69nOsQLqRBwdENHUvtVwB7lH7q93eTySGtArLEoHcJ36NfV1Y2XYjpGESI65jgZTT3f/gDnsPSO2srCQI+no+2aZ14GklTmwoY77C4goChxLvXO+qTN5rnQXIqW2fiGopZaQp0fE0IkYn0hGea0b3DBFuQZvVRlE7fUv0/MsODgiuOvdvzX26ItFes/XVdF9JeOzEUlzsxUvvAGlxBVyK90La08fg5Ubx/UHW9+dOotV6hqu/pWS59C8UiHxWUHSzyg55nibJ5B2CjG1KER2lx1Nv9GcuTe0cdVJCyBGyqnqqnaHX6rvbbEIu4nHyQIws8owLudfjQyv9FSooVMj/5KQpOkO75ycd1HdWcbREjIHAv1tV8OXsG0YYYRQRgwTkgUzlHCtTyYjXtMo3J4jtIqLKR2fl0fGk9AQi0d3E8vq0Z5c4RUvWod2u14KZ1EKFsNDokSYbT3d0FcFBFMO7nvQ3EgcxPpOy5G+9dLI7mNfisJYwO/GV3FxDeHLal2ncQ2fg7XezEr925y6MGoxIy9zer1QYBfIH0ERGkPFFUlv4+Vcpt68TQfYaZqqqI6+FHrzIi3CGnIrMFW0skrEP3Mw3QjXzizGmOQo/gkw5TXKT7lXkPQLa/Q6UqW7agnrdRV+ZfMtp4cjKGuaOFw5nh6XNx1f+WH2+pN7pwQUyRURbU6GXp9raQV74FF8HPfi7ucNkhAw8srdxfdDV1ru9K4dwc2Np/D5MhwJbNop/vZHj6ImuzXAYkuPrnBMtxHa53c31Rh9H7SdsgOdO3rR39wVrDV5HjlnecT2FeyBf8NRB4m9vfsrAZjOqbbiUHaU/mdKqwSSFxn+ikCB8YPHoxv2P5NP5MwK2Ltm4Jk1OC2r0WjTb2+rNYh/RqZXayPL5kryx80DXZm3/UbvzczTKYtnsO0jeeolm+DF3YiooNQOlS+qRTluemYDhKNybf6XHDBM8yHvxnnIzJwZ4PrTJmHpy3Dzn3ikyWHpDIc8+57dUcr9gDDl6q/iTqr+ZvNYbLob9zRESQwfxO9zY4uxnD+PordYYHSthYosatKg5mX0gzOlGC/u8VfXsilVKoP8SPFmcCkRVw1ZaOhJQyRjQE3EJbjON+J5RDVd9tfnLO28KUvvBe4/pf4o/1ljCgQ7aQf/DgX5xU4/khEbYNUcss4Q9icEvf1vm4ll3YI+divsiAdhGG3/lJHA5IAcBkB6Hhui8Y4S67y2osCufM7ITzt3SgWa6UlI2vBz/5dXNqBy1EZpPFEcf3yFgg9jPo9wHY1e3e7fCu8elf54gZdYIKF8m2yJ2isGzqJQ5usuf5s93vyYx/5iaooujsmb9ujMTsLW15h7AMeA/5qVF+WdqoT+zv8g9ttpyzZ4auxdK+7GebWA6LgX5up2mjq2hRiUUrb57/DL7XynQ267H7+AFFYIKGsO7yI3oCrcYw0NyeyM6XSmRRe/ruoCIAjhDsOG8zliYZqhdlKjAml3w4Ob/Iz6HhRzhWfpaPDlwBgUqXXFpwfrrvzzSoIUjRxrZOVTbmrhbA2LoMJruu/VGzcLqifIvSzVf7HNmnaPPim5r4V6WNCtj8ybI09MLnPKfQ3wwkBvoE7bBn/mXTyb0hiyfdP77w77v3NUPooBJAIhkUyo7VjtH6B8AZ+A4n2G/tuitcNRh/KdPvzvPjn/hYdTdVRgZ72RLlMQTdvR7MerWVfLedJWQeGWpcvb/NF6weHomV09XGIDhxYYgRHsWK5zW7S0b6k4bqVBAyoZaS6kNBhTWVu5xihLWumHLOOmEUoHx8LBFSmskQ4JQ8/1orihYa/BDTx96z8xoH/NHPQeJf7q81vL18QND8lywGm90IAmYk9LS1LhFvpplH4OaERfunZ/WKXWb7Ox+wlUDZz7ZqUIIUP3UJNEx/H5EQro36OPaQtt50Ms9oaJUt3XIc6viof6lrtHYHJxztBsmyLUTXWkYX62V9aVSBJQzHtx4fLevHnZQn0GajJcIHaRpB2qs5UY54CPqy/gYKY/sbw5d1awT+zQbT7y5ho9m3kG2K2xCYcUU/OUskqknH0rXRMyDtBAp9qR1Af4APiI6/x71oVLg/2WiHXafq4MLw6tMarnFxRuGmDpkI6G40GrLzpRa+qRJMxe1Y7PUVi65IgMcbI1suKEQaJ/8CNrRzgICEzHiyuUOnH7szLrS0aVw3ueLS2rX4HTOxCX5eV2GiI1qKaGUhivDYvQjW8lSZ2cc7fG+o6IJ4dzZ0cCooEGQZHHKkY3D7nHBCQ0a9g6KvZipXurwRVb9hZjepZ1SRNgKHWjC1pzIiaQvjqogutzfT7hqa/gwbiwgu+XL34T3hmbLfZdeW7Y9G04O4uw9ELeOGQeJjE0+MPsBvrn/kwo1l+0b9H8wX7GOSqV/Wh2aZdS7TWRE0AuYaL2s6QgQYBPtAgxdZEnYxV5gMdn5cHzFKO/R0T3n9SBxs3Q5+aYVy9s/fP82Q3mlhSiSwG6JXNzT7iOGakY0El5wC7/e5ljVm7+yA8h15/iGBSA/0HJQ0jTtDRONfzMwWshQLKEFrbRljbPqDLfi3QPn31penKL+mZJ4A8Bq+P+4M+no0e+F92bJidjDklZS33Z0ifj2Vjahg16iuEOcvjiwCEb4UaXj6Ipn6FQa0TuxeoazQyqHsDjA+5lkFxVGcigyw8nz5pq5X6oKvPNwhcu2rRlbO/QvE6QR87M6DWGTWLqBtgXMFxqyDSMWQ19TGrbXstYfGr4QJMANfs4b+Eo3rbz0pz9j4YDZuuoR9Og9PRWFVjtsaFRtWfOOzaTR5s2SQ+VDmtUcUwVhglZmRMgZvLgAQnpsvzXN824sCb7ofVdvZCqSzqFHrZ5nqmIr76PaO4si9qF6K89keko6SNYoOZZsiHEXQVXZ7o74xxhfnJ6xgi9jdTrkwYw6BfGoljMx5LXuaoWeJ6X3rcU5kKnWNj/uYjENBLc1GxKj+gq5k/vqf4JMmzeSPwUczj6XCbHbhKEl2dvkD2X6vp388w2B+W5/6KMzMNLAA+4DQPt89f5ZfcnbPCoZWVlVOjCxkNs+Y1M8AR6IHIAlpiMrku3Tjb2ck6ha/E0VuDfL5/s4oA1Gv3IchJul1MVIEnI2n0s1e37EW5UCNEBhQIb2xHatfyUy+KOvlYf90kpchWESVLaSXwDWlOlpiOPfv1Zt8Fviqra/1kHfiIrT3Z5fA4wqWKrLHMa1DuK3MM1aHo5t34l4xNTgOvwcs0mk1ZyL1YYsyNQDee+dqjlVzBneetpqvEGiTdfQXn4WYU7jOsbCOfvScDTiASn4MEl1raD41TpBoLTx4OVEdPfUd9aumNBncXbRBmCQVECsy/gMZDilBwkPmdZYraaTPF2sxpb1Bi/Lj1wU0B/5e39q3du5bQHjT65zm9Cb90xC+7sPk7u4gI6k+Cv6/kP5geu6GDDWmIHO1q64Jtg/M9kMJz/FoYHbLbX6LDSjh7gj4DMI39YgT+fC5DGoBrX/2Vd/L8XWS5kCJsqupX9xjTyxMZVEE/LFzh+A05EvQ6COL/sp+cU2woEEhch6PYGREDzjYzISYE1r0RfNMKmqtBPcGY1n4IpZFvhhB/hXqUTi2WKtJkUHDAW2zTklTsfbBJ+x4rQkFnqZUqKrsUFS2rTrcv/srUu1Tl+910MwG8RuZWHKF0vni1/p3qVslKA3dkdG1A6MY+zABAvLwAVqwQBs79MNoRJbuuBlZh/G6tO1GNhbV9dA4sMe8tqXKkcuKb+PzdSP6xBc5jh5eZKUZEEkXUaJ1wfDxqIud4f+1Dj1H0Y8sfeRsQIavfH2H9udjvI6vpopVyDuq5laBBLodbsJcpf/lz2ZFBoi2WTthUNTvqyX1sTbTr96PE7iT5EkyJ5JNXpRpeO/cjtforMThik/xc7XquXBWSV35vQiunyP3DGvjXLBlWG/Dtxeuhv7H5b1SpqH+Ijdm0r6WkM5kF5OdXF5kB6hUFkM0dceTc4a6QsJmOdhoDjO6NoYyj1D+7COnjc48MdR7j5WuCO+9SpvwEiIoei+K8k6fHh2unX0UadvsU49MOxPKkhcNCGiXCTD8bEUcYQOylTOBzf+NazMHbZiFwupUWiI4ULyG9ztkvf2rDCuSrwY3k8cAECKaIZkM4uX4Aclh3/1wiDmujsz7orwiU0iPJ3wpvazGiHha8UHO+2mP5cbykHaBH2zzECdeEplCPoICb4Vjujg9abiBMaFjXJ44NJIIpKgmhA1cIuvhz+F2yl59BBF2SaFBTuSJFTHe4tjceY0EupBq/rGemVMHK018rTjrUOQWQwn9fjR0kwcKkGr3n5q+tLa7GUZ0+LAHy86RakRS7lUR0kpIGGCCeROO4R3B5dSEAKssMD0AZPtTnpXUBlhltITZZ8iugIhH6CkwT4XVBcDUUVe/Vzy8kdjtuBT0vS2Jz7npZj3hr1qo71rUa7MUUNRYWrfLjLCg/od9c8fGMZAdvvEIQQLxNXRC/fntVIpldu01/aqthwsNo1rB9vX5yApTQVzRwc5cTrbIjagiQ17Py5XLRhWr8Z1Q6WMFG/Af/tL/LyDEviw4j/qEnptLX3niqEu6Cx+T3Aam6lE4nwamhbxeQCLxT+BWMNFOZbQRn/AfY21SMbpVC/yYv85uo5MLfKbQwMUdFqkApMuEXFTgVDBDMKMITCjOJPmJ5Qx8a1rAcFdo6QLV1++W7zcxaFDQ8QUPoiPBjTCsmFvESQ37nupw1md/AXbSA8/dkhf6JR53yuj6h7D7aeMwfRoXtoQWUpbUywKi+Wbnwa8iacxVd0OsLEAPPAFvCrJAbG/rAAhlJUGsuQTzN+cXtFHU87nQQ31UF/nNCdV+thDVsKzjDlwbUSrYMvP2BFXkZEhn3YBnfKdcdCrSIsKbZXmMSqvmEeXZfdaUk8vSRHb/lQwzTwsA7g0K6sySfY0GyedwCxnzyctAY4FYRpsuzrQkqHLBNyR/+Ljqg627/hkiSgnaiV+1eTgekS+DlgqpPg7oLL74VrenIiteYS/g9a0v+LqxpqpldqGttggzB9QZsMow5uLnhB6spwBQwcqLU9UJMTouFPAZHeLQv1ypVHFoP4+ct3ReQnWVezY5fl1wijzYY7Kgv0MgJkoJfcecxhvbY+W/Yek0u//dvaneExhOd9a4ohC8nnw9Ij33XnDphnpVfiXvNIcZ93Xfduk+kNtCXx++FdShPnHAnbCSDeQVshaf9S/LK+uetVJa7Yj/zcUrlJPNA4Rutf21ptNDl7cxV8cPI0Gn/OkTeXyYMcgSOMXO18oBHEju8Kd6QRn1B/uOPGjsMozDehrMvsK/CaYLHznuOZqqr6W30Dmbct9LZXXdmvkbQ7c/AZYOw7Qu0VLudsHZ5Qc0CRt+73kGid05PD0IkCt2Gk0d9zJSnPtRf09ylMq9b9H/FfIgxxR53s8+oHAeWCusRDmrO9n+tOzhdQT5RVQimfeBbXGWZqNSKRvWfQ3NsvbvSz7EUaP82RiLJb6C5xfJ4HZFHcccH6etU+h3ktsWh5+h+8BzXM2158/vbnL+yzMzJE6bemAG2cPjIy8/voFk5A+9Ra8jl8HG+NJO6oe1MzAFecui7BMXK0J/a8eIHhmTsBy6SRUa2OkTLjxNrKvf5nuR1+T1/Uu+1yNU2dbX4fpXzU9wsn7htVjffwXRaEDNi59j31TN+DErotFB7p3Hn+Lo8SUru01wwh4UDOs7svsp5dXWR0q9j3pjgNWsLDtC9EFMCz+W+ApOZnKAzweBPajcRNi7fIIP6EEdmuDZgrgb1K1Rq/QNsW49bIgkTMBefV3SAE0NcFwHS4H9cTisyRNEtZn5Xu/ceJYV/e59vDpzSLX1youGWwJwUqKToSLP9oChsEjYPKLk7QVbs7vS6Nxyi+dfIZjdSuQUPR9wotoZJ9jW+kmCXEv9yXTBSMxj/+s6BTbAwOY2ygG5UHoYkGSyPp+tDZHqoWka6SNaawmFLyGSiPyQgsUQv7dfQAvzqMAE0Ho2HZ4TSB8o6J3CMb7Vn5dW7veayy0R8uklxnf7mGGvePl8qd4aHcBY26oBVa+f8pE4rvALyd35AvifuvLmdn+CuNgLXpo7hM+J3+GlJuI5SwL6dVFNtidVCo7/WhQ0EarxPDprgU/M1e4cShYCH4/1bwWJOfxh/p9dZMC3Rmkfl9clM1G64f1UG3G/wDL3//M1R1M+0Ttrprv2fct/zRQUBvCwjqt/nrbvnJXyMoO25UzD+Lskgi/5dSp+60I7L8TpZry7WBPTc/dn90SBdRUsdrUYCLTNsyAoi9hVdG4PIpkw6N3pqFEw0Y0R4AFuPqyMlmrbNPmuJHIvz4f1O+UByXYyUC7LnY6QLMC77rx8ouBkhnayMvxqky1rigsa2YCa8QWNeQxpr3T56/5eH7OZn9LA4QhCkl25OYP6mzPqeawVb9KEskMKQ6Cmw2aot515+BGhtgrDmG6+zmDramE77jWvdBPinbDs2TNCh7+yvVeTQSJeqhR+52Y0R+LPn7jET3+gUj4XZgSZR7MqJwt5OyBfyHL4CI68v2WYBCpGjsK0iCtc9F8QipoJeM3bMm9CZMw8FJc947Z9ZrM1XzAhfeHggAustg8DrimsbkPUDGa/1hPOFMDlec1EKfFNOHzbByhBX7j3vjskvoRU1afhvmEL8gnV/QooIzQR7eX83z9ZHmK348GcB1deffNld2CZHODNhdZFwnET/WtF9zWnPTdigvkCYUaCXvTL08MzX5JR3XS626fEjmCZZec5auzJ/kEf+qp/4gQUwt4ihmKM1j+icZEeb/4DQDZLQDjhR+KmmBB9NoBSviw600IFGi5QXt1L/FEHUJNB2n31H3NDwd8rMHJSR7uluQu15EhfB2rvIO1PNUyIWw3jRwtOucNygdLKoKVzuURz8B717cA6YGrgRaVI4eyfdz62iWUZzUPllvJeOikWaleMSDA4KOE7IY3+K8h4BR+32lPlhv8QWXVr6tlsrCffKPWAlqO5Wsk23g/intyn+ystYwRsLgBsy4c+oP0sJR4MYLH3Sja0G1qNj/P5lrSF+CKrifaYeELqeFmdxQpz8IBMl8PfkdyG+HU2PI+Nv4HWvpOslI/DLSl1gG8+86a/aO3JffRSfCHd2UkIUj1Zq2KQGX38u/g3nRANfLhEPzvfzdQaI03riTrRApvsNWL9DQU3m5SJ6mVHgf7iFAJcy8FbG9sqSsx2WMB4rUYNtwc1IW3LShl9cpo2k4RRDjo8JMU0s5Vnk1qBg1x/oeObqnhAbslGv/sV95a7Az39Nv0012uE9Zyj2zxmn+67qMxkffno8RJZfpAq1E2X/mCo6XIuIIp/ell9jNcf9ponGnuXSRC3dWoMYLu0PxwR9B8WONSFbssZ07B+R0IrxJpaNtBSpHT15WTw1lz2TidFf5m+yqartoBLHhYZRkLYokIHmczS9gfBdnA/WR/gsDKAnwNY+0u2KShRPI8PZic+ijw7phf1gGW21Vctkdv7xAijFcqfvVWJAxTa6MlxzwwWKPwokNuRCUW6TqesmuHCC9YwG8kgGtpifOCMio8htR1W9RuZlUIxWfZJkDYHQ/Xgy7EFdhY2HinclgnXLPN8AlThZr2czCQIVXgSq/XYLX5uf9aHo9gijtb/Ll/iU21I7J5RHRDb6zjC6F7TO/lJSIk1/DYvqA9r6kpZoQ38XoJy1pDAaWAG/dnKb2vZS4QBrjYcty64XrNU2Zn9P5C945fxGQpZfct52/WelF2ip/F/ej6o2CyccvxjuOHmIUROedP/KERk5sShsNvCgUJqaujLNiSz79ItyjW6knR/Z50iIfG14V+BPRAm0DdvR5WLuGy8VcN8K03nfWIZNmDhCTDOoD7dqWq2HFD2JOP2LHIeruJHa74i+0e3+WuKN0rSvLf604r9MMsp3F6H6N4yDdHrZccZJGXUdnkbJI53AjR7MXWcaZzoUc6HMUJVGqod8J++qd4fxTLkYHZWoHwLxACnS0eIbualFICrl3iVaRpingFYGXkx4PAM8jAJT3RhyZFwrcW6dQIKlnbf2K7xBlgdNYszrXQM5hvFZqBmrTYImfqP5+3C1I2qfAhfl29OxETAU3UJnQ8etrfZoS0PVx83PavxuD61VpHt/mfp/LF3XsqNIDP0lMPmRnDOY8EaOJifz9UP7TtXO1tbdO8Z0q6VzjtSSoQbN79L8HwUQpDhhvK563JOEOm2X+K5YRrngB774mSc1eIm/PtNPGB9UdpyTCOrviPhxRhgWN7cEKBxmm+swQ7CHLab7RusxHMxAev1duXWBc6GAbWmsPzX1xB85WUtFkLMm7lWm5uKj6Q5/Wiplgveo07PPwLjC6IRt2TdF1UY3+cWPtLWtkRO/pz7eYC0b+OWaA3r85tFnPwXcj1o7ypaGktw3/nE3kdzMYuxT2Az3K5C7KOdGgXZVT6mC15spxKLZHXjzsfL7GJFynxQOSeHJNeI1vXzSEfzNL5SC756zM391WLWBmX6qrK64WryoWK8dLxWvGMrtb4yjza8YFlrEG7l5lW777+J+++RF9Z8cu98o7OKfYRGTsAROvI2rStCJFmWjODT1w5fdTHI1m6cVgTBfBn2/jeILMV+p0j/6rfAPaDveFMYlWa26hv1yLRcGg0SktxD4NnUTymjUUET0W2fBOctCWcZ0228EOp9bXfgnhcSmF3zA90RE2wc5ZOGJG9Cd5Q6kdlGj3NDynOAdVK8hMOx57ECsRYdlC6e/xK2trJNNJiy5z18zj674jB3tyt04EtJwzA//x+UbuHFYZnohXtK9K+QgFRxt1MbGa3EdVek1VGaQBJu1E872FPgPd0EQjoftT49pxG0Md8QGSDCJyRsVrYAUNenDtC81PwwZX6m8OSWDfNWhiWlc+c2w07FCfTUKOICQk7lfuKYs65H/rriL/dFmI41nJXhtyfW/wpABVuWZkdSmUtW6SZvw2RJtj6/T1979Cq7ljAbPd2QB1D0AsyhwaRCUZzDOJsO9K6txf33uYJyOA4Gn0oSBRhhGjGOoWy+sv+mcsW+JwxFeV4MmQq62C/TmFWTjHA+95NeCs6Den3GvwNH9yVgdpY1EOJYq9WFMSaZ1C6x3DB0k41LJUlS+9CO5+XhV988gOnDgOpgRMSrKDSjdiFlqjrXg5201yAZN0QiB7w2ZmR1JkMmoXl2JylB8U0KQKs9R2yZM0BnaNm5lNplPS2hsqyj4yTsyFmuc70XFd9T7GnguwWZ6/jf56cJOgr42z+uoBw6erzK0liqcIs2Pcc+OI2G/zi7weejQz+BNszNYMSTYq3Rt3CGtOlq2LPGcIZ5mHYY2DtzIgFdtD+3PHdHYg7SqLEhHZ9re66WIeZ87Ub685wIC3vJAW0Mr6WhABGd4j7FPmRZT1/SXx6wzE7Y4cLpoDfytuz/NDseGd9BD/5CbelazyV6/RutLAt/pZ0G+RMsYWyBM67yd0Vg/5VMDdlwItocbPvsf3RHcbTKVHdptvp3jkrebUtIlhAh6saKM2KZwIJCJn6agt7xS8ho2QtExG1WyaZBqKoYyQeFPaSDTsXH4ywf5z6S6g7n5usC6mFiixPuQaNzyzObX4z3uU45KmbQ1H9BEVkIHoCcdTZO1cwJpwEct8TCFLiyzVDBN0+i9jH37xD0qnm2YeoG0Vk9RDAGgvzE2L4OvcAyI4K8jnt1Q/B7qzBnfnIbkNYk3rY/SwFuD9Z1ui8OgdwNR42m/rq/GWe/58p30i0h8BzPFR0E6N6qYj+F1KjvnlOxjeOCh3v1GIsSZ0+6jV/oZ6XjxXfFR7rMwbqglfE0dilCY5a2fsV+2ZoEY2xFgTBphUikOCvDsEg1sEMJB9fylorztrPE7VbVUh+NJJ2mhsuwA5HkcZaaou9z0runu6Xcl/5WmstttzlEnb37zoIEG8oOZ77srtkspR6GUVC3vXlv1mbqz6zLR3q7iF7EMZLQQ26n78CHnShduVUR3qrOSyk4wEOvhLXKSVuEAB3odcj64TSYjcjnfL+7Ztah8VsyYU3yfIZK6YDnYnyAWqb5RbdjHGT5qPzVQAIfsHJoXyXk5hrsKMcyGY1wPeFm/KL9+RRUFQUzlMu4PC02qoKvJZBpA1OW+cRQgi5SJ/VKLPbwuFqiyjJeBsXwNYyqEH7mQ/MYcehkRuD6al0a3QCBSEqUtZrvRCB/vY5ntvm9o9HXKH9PLwJ1xgZIz8JAPBNF1CaRMn0kqGWZpFfzO1G9RmAxrU6WM10+GJg9Nrqqetp6goRDyQzmDZPdbuzWZ3zpmTMxAFsrpefgMG/RNnhhOJQG2ibzNNt3zUHr/qpryLURUJZI2HlZLaMmj901RfGuvfRDdvWTC9ysVK+asGp4j94jsFbmSde+euevmjOBPwaRT1KebBhTP7eqEEve+P4f31WfHw9WcWhITyzwIhkYJMe6mbZw8H8m396/df37+YuL9reYUTFBh9EYbagO5kRc7xrzzg2XL6OC67nZSp7AKKerEzqL89asjgUMsgvhaFhTpvcOBDoUFqGbFK+wgri3l6eZEHv6BqPrw+QzNlMqjgp1Cgzg6UL0mMXNMqLOZDW8c9iv8qlh2R17e3fArw7zPFvTeZ2hnrIm6SccERvsmGee7ETeeTk1DEsf+CR6xeWy59GmVDo12/VI8XLp/uvHXIkbxQnwGlb6Z6ZA2X/y8bYljAvqrx0xJSb4LstCDrrzA5MtPJlt00H0liQ2u/fEuJrbCwWwk+J8GZhIvmENwINzSyNA7fTHC2rBICsx2oFfVr/aDfbw4qLsgLAlPlgJNBpXjuo98+9D8ktn9LU64b25IwFS6rkq+2QpiaCFrsaZv9s4eJNFLfM/SLe5Vrml/iolaKDKOdw5stOUo4kuXS2NoAw1tdnVdwcwXRktme5CrNH21OHNmHGFel9OXvY2Aewrlb7AEvrYnGz9oR4yVc14+mvjgYjHkXvvxW2vfjj3BeKtjcLai8yH4xdlmOdQ7OZMX+XVm0p2o1W4L4JeFg+8XMYpbFUkhqQbJAPA96Dv4NuFkBsRHfCeloWrN3BQxN8yKXWu8R8pQJ09mUZEU86AM5/LymBG4QdsXMQXIK9vsE4uR9uwrhWNYzSC4Yfuz9zfcSSXp3aUmNUeyYmrkzLwB9MF4BGFC36kRcSPPf+j49nk7+SC72tu532liT4ktOtKr8MlX/jZx3YivhNHDK+3iB0Sn9h68Y/3XcVRhXGy/LON3iruaf52sALMzW48jTfcCb48T2sbvr04P6VswdN/+NZYXjYlwvp8GxtYsso6XfTy0L3vteKOqiIyCMSVnoDGvx+ONUVZ8sIlp9K9kLJOn+DUcyBNRFy6m3h6DqT+d/MJ3/62mg2XZVEP2Mg8SV32dD6r4Hjm0ZWcYIn8zH8HlPuoA+rEQ70Rdu7XKggWbG6B3caitVb86zc7tmgmeQNHx2RYmwOIMvr9szo4qUfh6sDLqZmDd9Mk47hri+FCgYMjth/yOpkS6BvAclHDWDDAnGS9JTW8xzeTQN18XeSboVoJ/vYaucPJkB7HbjRHrg/ayv0uS+Szdh0brDBOyCwpFXya/b8Fffqt9fy+Yutd2RHOK/wzsN0v2xz+oZVrpeN7rznYWfNaTi+H1+Hspcp1V0zAo8QRuyV8PGmU+u7av6ykX8CXod9dzu64Wg8nw4hWjHMR3YtsxgbkszEtxtPxUuT9fFqPO2IydXI/Gvr7SFSv567xeQOMZQf1J8V62pY++7Kub3tOIfvUNklnr7Q3htsRjDmHYgJE0WiUve+LPikiqVbFiMf5sciR+lfsbuzk/equh4Z+g3YfO9MeeavazBSZfV3opnKIKRmIx+nJMq7CNhPvgszwiCbvj4eIu6Z1x+F2yND8d0PdWBezOmke6KpIE9svUheY6o9d2W7xtFpwNbpCj71f2d3TaBkht39RFUTt+0fSDsp8/snoFOj4vbC+EAXo1wvwZXo8LG6/wgwmDD4QENnkTh4TpT6xuSoQGySnbaEXmlVd55vl8/G0f1hbEUqvF1yuSKnv/f6vdroorygMfq0EL0a2Xepaqdf4ME4uTCVIpiQfEAnqN9fXv4oQMHGuq0h+f6hkxK+SIKFKiLd2tENvwi5v26i0lBP2aooakHUv+K0xDQTea97QwQmVWLN3Y/Ow7xh+QWKZceYOoHjK61dupHte5NL3sEGWs5wy6rnTzvg8zR/WYHK7S/Y7XUB/NrH1UV6VCsLmyXfDSxJfiV+LgdRVZ4Ph7bY4Yt/HkFOmXbRuCi0hk5Md04KaV4VU/tqRXUk/Segt7sffAw3VQMobW5FNP39iiRrKolx8AS59wsqxYfWVSFkMeVR7ciZZU+l7ftOisOExtUhHSNZ1tKLZpBEJAYDY3Y8oJXbHcK3225uS6lxfttPKEuv4c69+YEALPK6m4/aH4WC4yy/wBuKaSwH6gNIqIE2WVvgi1f3BtVWiIr8EK8mES3r7b6fq+9vag3yQ9vvRbZJrE4bfUglEUQnEHDT69HdN05Qqrxb1jGKLPWk9f9PueJTWNkfjSmdt3zM/9WTkra/jYdoGgrwoGD+UnuPOJ+TJ1pSGkzTlttNDLlrdUlr33a/w46Wa8rU34jHPz8q/XOTkWovMxVBvpLesTW99Rq/CZEMhvzC/gdoFVdP4m0b2C28kXg4YmpMnaZbQzIxvrwoTfCFfG7q5QRCtFvfv217vrPolofABdmPMi8s+HDkI+M4QvAnbsAHPEUGcnpfMIyw85V78vhW7PYgDHKrPUFjJCoWZIbPX+jH086xsdKwPu5bdCKd/4pDDvDSkAik9TUTPXTijmNAlPTAYVcR6rf68WOjHgjOchdqt+3nBHjacu4DiYwCaReB1Fg0uJ5LEbs/+hC7t77INwi8pU1ZD2w1cVoa57Jw37wLzKj8o3aDMoFJMUKd+8+pwQ6Hn3MfTQ/ipzMm7ACNDtFf196yL5eEMcc1wPO+4nWWGjX/zvdAYXxh6aaHyzOsqHzBhTk2Ca2Nw+Sn2eH/nC7SVRJZP41rX12eDnP891XozYY9aPuCnux71UK9FnipCsfeM+XJTyKw/8m91jRF6orOXsN+SzjvvCxQoaJ27aBPRBI8Yrt5hoJy88/cwsmvmLP27McLw2JWt22veTAcCP4tYfxiTwAOVnh3/F2gd3vP5vlRCJ0eJ6CJ06jUty1VaKsUEDZCaF0+903SRsSzbMxTK53O0LaDzfRb4efHTaLmYa8rI9OELi0qyYVfPeDRADCOZV2MNRqhJiVGL+2kczK4dZ9Qrh1THvZv41vMJx1h3OKHqlRB7AORsypIY42kGRYHq14DwuG7C9VldhAfGor+eeQT38asmIZqFqEjFOGlxVE79vzXQqnR4/LA2B08PkvzCvyrfJZpVYppzIqVaIomWerpJGtDgfvIisidRGYxe1wUcIHyfhWcWSpnnW9eMDuSoAjTe0TURkGymZtUk/u78+VO2Xvfq17FKmgmMVWTufPGJrviYuImNlslmi5uTJ1nSFJIh43Q/fCtcFx+f0ZdIcI4QMTNtTn2NmBZia2z5hyhrgKcyhGnCwviInSlcQbQXo9YRonTaw92Q4eBQoMlt+GUjr3nIrq75iDf3a6QLT1a/3OmPfofRRl9Xh4aLaOwqON9xuYtRRyadFkv7TzCUi+4lc66ddsPKOUVCMCZUNbZ+TBrTiStIxkJa/gNAo7hM3xt/YG2ongw6T8PLSYoYlRudL3xJJ/ZmQQlbsGZ8J8hXNJ2z4LQdNhUhxQDwrEJqrQnPVFXSbxO8QSpwbd1xoNzAYZyiQMwuSXg+tc/UNjiLkMNEAkZZbq2vjQRo1pDwUy18k0jv3rUxbmN62wpxjFmtSElYC2ZAYSL7XPN3nmf+eQ5c2m2a3+hlx4OTqrFXtJoWftixyluKFQtW+4Mu2eWNQOPHXm/tuu8NrHbj7nmcLyg1m6C+TD/7pFdfCXLA9pXlR2K8v1Ksy3HDCquz7ODHKDuLG6EZFn4zxcQtunwY8lvUwKy9fRhbjPGzr3Hr+SMNYLjdFxw2yvpkEFc3m+vDPRsG5HhHvtIjdILcqDhTDEWZVTn8jYAOYhdvVM8ZdYaX9C8FG9ZaoPRfHABziQzoblqYjJkTSBflM5DRB0/8WekK0Xjjnrho1cSCnbCkXiuqrHHzuCob48zsNOssPQKa/kDwEb+3cNkqHyrm+2oylWRoP0QdR/qlv61SWtfn9+tOoZxjdOAp4SEJhlA2jRAohnawbczJ8qFCVi0inYfyUQIT9lL/RxiP6Kuus3Or1CQZ0JbxcLQ+Fhy+sH86AcCHJgElFFHOJyiebMJC7+w07qIaVTQmbfGWwkn6gtOmR4IHJzMneSo1w492W1kobkXiNoLokqaZUyjc98Mm+1b1J/fY1f/yopmMXyvtl1LwdF1TWT5QSJJ0PpjR81Ltv2tsmgFCUB9BKkqUsTjFTn0Pc3zXZ/7pxJCT8qd/dRAm3K4iwrCU6QE3ZmzfawoIm3puWEVTkeS3UOOSrLNRsU6fpqy+aG2YdqQct9XnIFZI/aCFnI6pZWxi5oRchNCejcQmHWRpKWg/9qHywj7PKNjJtHfvFStYxwl86bosDVKG7K33Kjq1bTQ49u09Qh0+zoAtCqpklA92LZDP2ipGjeliUcpG9NJA/dmOAmyIw43ajbh0l+Z7VcXbYyg4WNm5v7w3HfBTETtALgvYb7Gg41e9y5cVaFgIVB3KjYtlStDtauAiOjN0ogi3nyFu24R0u73B6X4rcZW/piQPqG93sep8LAGFxGvy+mV/1LSBgIg7TMbaVqlwRYrDwhXk/Us04+FBoEIS14umeIi2E3EnbVbTHd4UtdkQIpUthe0DuFWqiPbcJF+HofdauyF7f76SZAHcJxVW0+34ZmqVEOlq2H4SGMw8WlW9tvhheySuOrRqnCVyZThEjrFM7pzm8nkv7xQl05XOr+Rmqrzi+oK4eAp8PvMiS/cHGPPn4QVJ3Ne0lgmOv6yoP2mLUyydqJTBsJzZpy9CFKBJSI/Y5N62h8CD99XypJ2ZaquhDMjOCakpx+w9ZildqnsY5dUMjmPudgjjMqHRz3m1/oQpwlBx5oDTr3Idg3xl17f2VH+VubYURai2aEJMUGA17cykX1SiokG1CUR+0NoUjcUlq2yKSgVs8bz+epSk1h42AWh5W4p8zZ0LsxBF7lk4sqiR3ckzw1XDk/VZVHwMRMdzaLkuyC8rXDCuJC28CwFkY7Xc44s5GRfusRHtziChNok6J55oSHvcoKAEY31mxSpk28GeoESKIOM7OXiB/rlQt2Y08Q39bYsOCem9rKY+JJcMnFoJC9EsNWm+pgyGSpyPHZArDym4JR5BjWu62HA/9ulEE1jJPBQZFTdyMTTTHtft1V7hIeB36tvGD7wYWK3Dnph9u8j3br901C5t4rud5jm7LoeiZIWGL8Z7khbEmX+qDLdeFQy5wgF/R+RzN6+3YlXVvhpLfLoNOqwQju5SFwr1Kp8QRaUxk+dzlSbO/EhjzmY4yw2II37RpvcZwFVeo3C8ZrHvZknxgJ8i9t6OZ6M5jWiC8FA1Y6TY/koqxCzVWwvptZUvwoHugwN6fDiDXTCNOvcti3iBz/cxK6C1YLfHq8qIaOXqJcs+y5pRJZC7FMuOQ7g/zZ2ZlGl0O9SFvdQH5NQhm/s+2TI7jXIktDnY3nsuPpOoMOOlJeUsaRrGgfZ9st/I3oKtRWJylx4GW9OKlYliC6txD5tvmHn6VoPbg3OFGu2yWkW1534Ag85a2h2zZ30F1B9Xx6lNjGsivtUiKy4g4HU68PqkN3bva7qiTS7414vnQGrGkW6wvB2IVeSJdMYP8lrP32/cxjG2geJ1g/ySaIprw4b5zsQmL7FWnZrTc+qufnTf6ktUcagCQWUXW6U0PkmYo8OYwWmaX+szhfYAIEJCbtWZDfAss/qJI68Oxft8aCtvEmf96qANNRD2t8m5nG0v0Q+h4EpOEQpoMEQdCTO8QlIkCTb9TmGzHEJMgozDN80S0CJyjyPLuL6H7iwl4P2NE4WQCPCHxOIWL6M2cyghaPLE+SzMqOvxdghsQD6siGvsSCo74rG3Kwkd7FtsqfwN3KlOfiUKDPtFQhlWe/OmvQkCR5/dlVx+NNZjolie5Q4HjtYZMtMpDjiZHaP9PFYyAfDP4ChhXiJLQIvZ85LkpqRmlJYvSbwCo7vfGto+YOaNZ8XqLFyEgh1hTmE9m9pytca7NGg+VOF+BZDLPv0f4HQi7FQRI3hD7pQp1NMrd96uiGVE7WrEdQL429EOCfjACzHxon/cyoxig/OIaLCsn9ZYswkw9v3AvjEbx/QmPt98XUW3lD4qXYeh9miez5h+XcRX/wF1HQagfTJvH8QFS/hiKI2/T16CP0WXcLvh4gZ/8O8eQAvbrNuHEaXrCxXnemPFwMVQNhknkD7u3Iapel1/9bBRz2y7GseTffYtArAu4FfQ51URJQ3htqbpHHj68tOAIOwnpvV7Vuh1vR4WUCaIh0amVOZLSPkB1b1+RvOM/2U65YFN+l4sfu88uUl0QVFfyBZ7gOiV7+mTB/yxKjiuCVKLZLMX4evlN/Ptk94XyOiNZr6EldfIBzCDs+XvNqxEGCjh/QyXA4tZTuWl8aQt8O3oV+aHhn4JmWQ0IKqR0fhnXYPgosX8tP1z1eX+QH8oqAJrIJ56dGXklYXOzZekqv1n35nFDbEMF66+TRyQITPrpStbMjvfDesx1PZbpXoo+d/FxR3tV1w72C0QxmTERJJVT+KI2WxAcrkZwzJkRWB+d4bG3+v5AU42S1GpjA4eEtu9ovSCicsc4J89cYZVOSG1ZVTc7+/YcpmvAhS8B85u/B+Shjp97nV04NtaYE7pOshs1MdFiSkmjaoCKuH1f86feIK75phrbqmH/ziszJrxBLyzy9ViPBu+uQKsRaHepWZlFcUkI0KiWOAWtQyt39Lli4uNm9XASavFnh2Ziiugg10Qp77ePQb66X8NvZ6x7MkKydWden60AP0pxoHd52DV7+b1p0FFZqTWWhs4lf+ltJp/acC2tkJaZVS8pcpaSm0bZ+w41lFGhFCMpBm2rrCwqzTU/pMiXC9DBbUlm7Mff6WVxIy9senDQfC20EV7zf0aAN7/S/l9Yl1m9VaK5ACwCLs7fYPcwzPJTQxCJDQfBHnmYfLW6MhsPC55OvPz7kLms9S5crzdHplE5C9wD2JK21pMcWmbYgjw4Ykx4kRm2udaA+43gPslltXx0KaOHj4BVbfpxsR78ytJoxgMIYWo1APvSukLQ+UER/42wvYuuLk4uX18wE+druDMZ3KeCgV5WQiZ/2lFq0t81L/qqQ0ifmjJbf+N2lOR5oIQh4CJTCpyiLot2WCdKWeA4OVyzVFv+q9Rg7Mu+15Wz98c1vo7jvY4/2nf3RL+tM9u1cMaaNG2lehnxVR+EEHS83C8IGFYlYZzjYb0+FV+nHo/zz5+WOwTuvkFvPKmSMOW2tzcXeXKysVLcR86wqGvYxt1VjWx7Xk7dnf6A5ixDfwYTZDRjEsNJrUhx9pVURjQjnx/JaiziyxGvh28HbxPArREWcCyAlbKFFAqnDcaURhU8GYESj7dNDtTFtyoI8mRSPW5nNbrVf0sVopyS83h6VIWPg4EFxetsmhTgwKO6e/4SxWmXhcmVqMREebC/hEV38ochlpsvetfhmNsR1D+lx33DzMeoR/V6mEaTIIimak7z0lxErxnf1Pg6j59TPt0P2xRzKdajFhgRsPOBzK/bi21HF3bdjxjreTUTUjkD+O78s7AtvOipVy44JV3QKu85pq9ESCQTX/Zx0pu/5gYeKm3tIU79C4W5N1/3n+MEJ5W46Rt9K2VXs5kJH3sPl21aQKXzald36G7ni15vQYy8O7UfjOcX1nmkcY1QEe5hN3HdqQb0RlWeux08CFlG+6Ph6OdLln9JZaHTpNimnl0uidRMuvZeF/aDT8N7KPD6iOVxrGrToCWjadgE9Qbka2dXj62y/fyavuhXgD8H1qJIF2NDZRVQ2taZztnAHENhvEEhDVKd3pqETvEVmPeA4nVZzlusxzk8ngH9RYGTt46OmzLE0Nu4YljtyKu/QV/M7e7nA0TgS2Vk77NS4Xnym+a9AF/9NZFtBNf/XBeDQvFhvt61Y9ea8WArc7z/i9DdSIUfEOjAnmSarzg4ha4W+vEPM3IbdL/gB2YoISRVbtxOmNn9ZhoMFzji7S6OqZHO0tfW5TgjAr6Lj0kWrWUVPoHlaMHOGGZong2o7BRew96ZKvpwpUbZqGDDrC/Mxr906Vwm2GHpGcmI4bktEnQ+ZvUWX2Ndy+pU3hHW1yRvmULw0B+Ifawojl6vTqW2IQvbB36bl7mLZbyShOap4bBTKQLISOF/fLCEL0pOp3U3v+QZMcaswZCGXwqm/VVcxk4BdaPs6koNDjgu4QmJNVsZjQsGjZHJsHZUfXQII59tP81NrhclOG/YZyuBAZlocxiPD3y8ipM+9U3rPj41MIzCK0At1UeaC4m5zK2zteLXB6hWxPZffWoTgbBQj26/6O0bBEGZ9317rOFGjfLB0FMzKBwFKwT2i3r9YpSzZLZqcM5zZr37wV4ShZEaBU2nFcXEcH+5NFqKqH8o2qtTQGkDBBHwhfrgwWy96A81PIDDJKVfWdYVvuUbYWzgVyacAQVuajFbLmUMbyvSBm/xKuongUwquQmDxap2tNGIcxb3R82exZ8SikVfyyG9o2RH5dpJPsfSfK14pq62BDU92ye+05oyjRUCM24FKNn9q9HvoN9LFytlvkIMlPbTGNSh+II62KVP5S4q0ifpPQA85Tw67n1w4fNteBrt2poYtx8mb7QsWjY/dX5RHWSDKuVOhZSPxbmCEAKncuFrVkGVtvYAksJ9PjUwNLnKzcwPS1Eoc0uullJ6sXQbcOcMsn+NxVJ8u17r9Eq9+ds9zujMMXghmjSr/Yi0AaPlmAETOej6O4v88DUNi2uuevHeckPbG1kiabkh8ZrQjJPtnKc1qdIuQDqSFkatEavtOVKH9Et6k9mfOfzmbKJk/nhth8LYoj4/iZhk6XL9OlADLSFKpAmviNLAT/d2QA/bmbcQeSXS7dCEIeb8sUtaOyXP+++b0dODQrPwvcddra2IdcmiuHY+oX3iymtLxDdd1DftkqU9pQmRWqJwfl9Gt9Elu64/iL88Iby9duDSCSInywunSPI3/MdvnFGMwzfU8aFy03XZfOU2w7iHrS/5a4edWgoim5pA9ifjHnCUfx7g2/qoHcHtWWmdWVoDK4hx3dKEhGexKBIFtLbAFM8VUYLpoVypfKvelH5/A5ZxLctpze/mh3wni/NrSaQXD8r48Agus1weJcZnIx5+lY/dtbWQpEBI+aX9ByPAa0pkt+DM9Mev3Es9jlZ8E7YZwij5WHkxv6pjNm2sCB435/QxfAiSEtWd/junHEUs1bzhAvRHwDBF9OlCHNrMs0+aZVV0yUWqdchI0LlKlbU0Nr+iPmXJ+bh95XP9BqcewF2UMQKZX4cv0O4yniV4LQSpvdIgwLmQ1GsbBcUFDA+IhlAxac5FYLiOYMRrFP5ZAWGoLKI5OV1efEI16lZdI0JYR2Ofm1/8DnqMAqZ5QcXr/KhUBgXPruP93fDJ6tFRPWbGeYf9iYRhSloEcsiXqlz7O+P9OrjVh9LrD4s8x7aGj2mAakWprviMMO3W3zWSBz7rkrDYKFqEaCLJeBO8KckUUO8eDypshHvocfsqXGZUdsLl6Lnu61BFLhSY5f0EfXIR3IYVY/MW5e/CLiHdAh1ZY0aqTPowSRSNoVXI0+l5gAcQc/569gwBsHjGUeKvjsVjuJH5GIO7mpaEz30qkyQ6kXItaJr7phvaB3mimVHfe3grlHcL92V6DI6xQK4gwfoegZJJ1vjrKfRxbAAJXn2XU4hkPER61J7PZsBlU0ELO7qmhd+sZDwguMfTPtzMhRAcMKzs8W7F3xkeUzIHqc7S2dlwoXaMI3GjM8zOf7865/CMohRQ14ILMGGSKfDe3GIRPeEPpGEKn5U2tZJAy1/d0kSTn7zOEA8A3jJhpp/QvOLfOgfqsPnQWaIhC3MW4NHuAPG5KCsAmBA5zH1HJASJMpusMqS2WQ1uuQhpE5pAiB8RcF5cVhg1CAuM93qcDHWlUpFQ4XcZIWa5BRXEHMs0KPeA4ZqtSS9aMwi+J5C12IIYEzbQkSXv7cBv+3lLIvXF6fHj6oU1i00kwKD8iG8dNs5fWE3eaTdoxVgO0y3MSbqTmqm01hI/PD/A3sCntnuC8m+pYPmV177icm44Z0WW0+S/xCAhhZ6/X90O3gAOr1dUgfQVOI4c9Aof3mUtnu3JPHzeYrp0YQwTiFbX3Bm+FVLTfZqh3Qn4WoOYCaJGAuf7/k4ChFHICX7MCn3i3yHd6Vz7FcBPOKNfG3wh1ldzH9wJiseEgykQv82RdLy++TD7ZoGLzhsjSVP46XcpMtxS2FomjLFoNolRVbfMfg8HMiavd2dOahiP2CnqZ9wrXg9i6/LRSGvj/Cg2+Qepjvg4FpAYWGKyDvfzIM+l8fLKAONyTf9/kTTzkTgkWVXxVobZmS/bxfOhKEqAzSa1GFDdnd59MHXRljKv9DSrUnQjBZlL6G2rTocfMirYvxaGXTJkaQIsGZtzC+Gb2RrDM2JlW2Dq1D5Fe7GlDaQeVvzF2JNCtgzOTrae0JKP6DGVblNQivegmM46qVdDOQU11duXRmTsDeHEm2gqwNtAb17KG30scQ9VFx17zRaf8soXh9Hdt3mQ2P4bJAYOkI8G6jndJ0Z/j28TEEeIzl8U1k25Y83HlbhLun/7jqqyhijG6HPCvS3XulswC7ZjUx+ZJGG2gTaRODSHp2B+AEAYu+oJmK+jAEDEPErF0OmSRLkEHQpWdzoOCLe7h99LUYRASULWl4xZ0zDY1JnEQ8Aptfn62wHwq1HJ4rIw5lAUe73FXS3xFQ1dauW7E3qPOJuQCmUcxoCYR11/1sogAzJyXI3PHlTZm4G3sGE87BQgjxa7ee7v4zebYGfLvBPsNuNWavPkRbAbQIm42wWqnj3meaKUcRtC8gF6jwvBwDYkgvrdWFjgAtjjsrgHpxwItEnsEHhFSI64Qvi5OALnsvYMIdzODWrPqLwrIdThFob2CBc+YRZMCxUKdEb7rWwwHsP6Lyc70s/nGSPuTEV36APEW7/7pV/l17i25xqZME+ov7xB/kIw54wR93p/JlcVFTYxkqhXaPrY8uXOaWgLTdAqiymKOQL3eEq4ZT19EGugJcFcwJ+pMEnGVJTbu035LYfwE3rozDq0R2kHWU8eaDSbUCSq5QRos8oSTKgwTOvvZdsKxD7CNRUPVQ9cTBJgoPewy9O1Ii819uz3zvTnZyLumv0gImg6Lry4z59+H0CfXGOEOwoe7GCgSzDsIjqBDDrBbYEk4u4TuZLYA1XZrj9nksnqtXXkrvi4xlYE6wvNyrio8B0QCKVtupEYJNxgasTcdwEBeM7Mq52Y4i1fiy6zKWDz/ecUWbM5iCFuYRTlzeNcHzSb2LJiRKOOHy/yfzWbaME/cC6KppTSiIiWx8EhKanuQ+zLD275kLecMN6N5az5F6CO96+6O0QkPSwP8MNCZTnOD+wZXX6ZcS56CHPtL63PEb8rz/Bt2eOiDx5RTsYBXb+hKKfN/GEhZhbCObjOADL23uktUBnE0L/2F/dCSdGL8YV5lr8yJDoDvxXxPs/Tg8d+A98+ojITdW3O1N73Ut/6xGHheHj57yuox0a2+iLEVHbRRxAUpySMT0ILfG0hx07tEQm7ZiF72yzYW5Q8D/QL3HY5KiT8QSFFh3A1kmH+N3QWjsvOVkJJ6FBEf3l9HrYXNTXQzuM0UztZPguGz8XlFS+ueOmpRiM8COXr7UxwtTFy9PU/SPfhfCVmlTXZyeF8vhrxa9NpsBOOPTFxP3aTAKPl9jMUs6T4GKC3qcg13xXTUC2Oeal2a88ApU/6DBHYmivy0CKGnIKCmM+WHWYJo1JdP+AMB6tY6RoT8fQoEdfxG5VG4p+7Ymmwi2mAd6bA8RHgRubbQnXfVkyDtLzV5xdhyb/vff8I0mCZQ/3ifdIEc5kFW8hD1/K/ciBvwOCGb9mHqdAOCwJcFGuXLxhmQK33b6S9KQchgrxAdwyahGroEuGXBpzVgLwLMzoEXMdfw8e8yJZfRNz/rreKF1FRvpc6oluYckP0JeelEKHR9mC/ljOI+n+rshv8q40JimT/tyNZXjnsxlmkA/DD2FVe5CpbjGvz1pr1im9uoWohJw13tff57iwjHCRcHo/H/rht38MuxN+T/y5sb1ZF6HiPZgRmSIxjaCleDDVmghHhMEbGb+oW/fc1MATopvUNFjUfQ+qBTPTe7DKaNYwhRWb0KY0qfahjSwNP68RkniHxpDpptqlk4BEIfYqYvj+MObMmt8WeJbsib94e/nzHcHebliTDbXLJcuM6xpG8vpWE7pOROjTK2+3ysKiSdgJq42UkyNhKdolYKjq//8KT4JjDYUn5W1D1QQsE7VMncCgzHrvaW97Lg52XGGNB2svg3L/3AdYVzcnlMRLC30kofnVU4V7958jvx7P4tGliAEZdK5qZwYJU7ogxxYUjW0kPVa0+qN8vz0Eaqk3NUsFpDHE8Jckj0sr5Xe1rzZTcshVDyWSwDhRvPhxs64a4v64X+8Vhqv+NlNlcnb3L/FC2GcYE8QzGG6Uk6Z5YKCucNs6AmHW+xDZztY1M+XEdtrHa57YYloZggBRHrQEClVbO/fo7vRGpwEPrQN6bwR3WFF9jfUnOmuCOAsKIfsMPWGaNr+qUiG6nDgQSLEyXnWQcs/7Xo+3ISess7Jst3u7X+HhYHNv2izqKnC52k1JpRDkhd89umy0UaJEVWZNCfdp1U2IrBiQ/sQz4uCx7oZBR+p+bTk5XyXEajT5djM0OthiHqjzhN3VBXc9+UETdJI14/IqWwoGyy6lI57QVl2MXfKALTz+phj+NdmKMwdadUTfZT+n5o0Ze7rDAuDAXWfp4LKpVMrZwI/kLzqYu29yLXOpV2du1AeBP6LLPwLXFi0GA7sZ37cS6oaO8An/Gmxgsx8GBp2UJmLPcwgVvEEeVFqPYZD//r32yQ6k5OkJKDS5zGBKSGhzOqijLY3td0o5LISFy9IbfQrWObKWqAUlwJlcKstTzCju7v17Ys7oxRlQeFF3elYOSzeWJpR432aD7EoGQ/bKA4ggGTcDcWFXfFtkG9J9ZZJNDYIx+69enXvWX4Yaw8cT+LaLJyMzAW54KV5++nVx7aMigzgswH8sLJObGUO06zRee2D4BBdMlQQwioT73+ul91I+F49zLLJm5MOyH6ROqsX8Jusifr1jraP4Ft88kQNBK2/qKTsyshDsuABtw2E2g8e/i6m8CqJCDYbrMNa55xdOVvOCuryFWRXBQRP7aDAJ5Ff8Jsnf2+YqK3r6Q59CL49gO95K20Vac4x6c1ZpIZsGxA+eqNW0xON28zeLBHDXf2MuSFC9uefDwFn3zFJUcSb/UL6udlXnCiJRDgXf0AJmQ5UTuUpxdEP8d7cUXQsLN90rXT1m0ulXB43xi7VhM3svYq0QyziFmnYYKAhLD42WCcyG0ji6bXItJRPlF6Ebp6HWjRGFwB6feaue5wRgYiM34dMU6OKDZtq9N7K6BhWnPc2lD0uuBS10uvM8L5xJEhI1gVmrJ2T46Ugs+B9bFAkVg1LKdGAxJLuuMK3yvpEkkYYc9tEb4nH+u7NdvxOU/ctR01If//HKXboRWyWKdEiM8pMBi5ixpMoTNWkFb/aok7fy6LgTNLEO1fcGZZhjSHSN9C6ZZgiB4mkWL4bS4nakcuwucfVF+A+1lBZ4cxF9bleFu+Om6kdGeojQtpJG0DOqF/uaM/kYnle0XpkgRSQ9NLR58AUSAz7FQrx7rTnGwlmoBJw0BcK3RYGLuPh8T5o8q66pRhQkR6K0l8eZc1N3GkmqXFF9HWVebgKMH91aSHSr2mSnqBGPeTHpLXv9tivI4GUWba6sEKfjx7qZebrimzIfDCH3jF1ufg0lCiqiUYfRpqPoDsHVgHYaHDCfYMDOMiOQ3kGWYoe3r5nmjgk5HwgZOV1J05XU/pIRkHfaWH1zL7Y8/AZoAGMcHfNWnl16u159170c2IWsjYtk0vXPimtWKJNDnmKYdHSD5LZh6qcNXj4X9OLO7Ak4FMSFQSP6m+xUTZSMwnz60/N6F5f7AXjlllb32WtxGujIaBlv2EcYvFR59s1bxOmP32iXH/6uvBTM/8bqdrfVBguwfKI1uikRGZqc4/ULP2UruN8zpPiaosEMY8LGfa51+OaD8uAZjcyzfjBNZmvidfp1BGPZrNrcseT7lLNgvwcAqBqE4WbiTMI465cpnHRFxBelScohcipD0/3pZQ4UZ/8NDxjgu3i8EDkovK6hglgX3HzHnADzJtbo7uuM4H20fdPPhKm9vII0vPfcoz9LuERrrN+IpksQZ9i2HoT4m6/K+ivJCycFeBk83TrHxXKo8ll71JccEKskbmjLck/VrHx1UpLyeAz09PeqeJlhlY2kuN78389oLiG/3AdEAgMy/NEkvtePWj42l8wPTdB8AwDAPOb07BFvu18MFfOjZ259AB3+F5XfABTz6pE3EfLA4IkVUMxAn/stitgdIRIOwfYjzG/4OxVVjlZ8DP3UyYj0XPsiRtN9fxgk9wWdNVNrf3hzD7Ptx5ERZdX/rWgRAFgWgQihO3PA8GPLRGLbnubFck/v6NJyEIJLeLA8DguqIjnvosmO/Y9wkZRC39yQiqRHvY+P66MfGT0jUAhwtSC0BkkEU/+3MF5b8+o+RGH9xbFs/Vm9sKdF48Neq4K7qozA57qiL0Uw8kxu4gbE1VwJiiY8nwyIAiAKGMccJ2XlhU1qQAuHhCZyxQU/gMTRIg1W/PCA5El0cvN1j0xFM+lKWaQ3BwkC6p49zlC47XtjGKBjJ9Bg8SKNbauvLB8eiy8NtRodRACoXr5ZBIQJN3nn/vHRN4uvy5dc4uxU+xF5h1Ta6MWkQAEjesh5ukacDEFqeiA82SFK4d8ibvn8e8BNFfj0gh8+wukkznGVFsvhPpxJ+U+VPocaIdAlJgQoOsmor3pSUNqNohe0g45rrhXS1L4sc5xF8zKY0U1PnfsjqosPyCWsm1+Fh7FKISZRp227Dc7a0mQdN9QQn1j0Bq3GSQHULDTawRicwIIogB1IFfrukgdsS9ssiDdSyRlsTiGOKqcTQbVgxpW/Vb5odj6tzQfiLnylqkXyimBtmBLrSqMoWh8lf9wwmJyEHB/+itfItDHGp2TPg3Nc3Y+kSrcIBOaikOTT0pcXz8LjRis2uxyD3IVPvn3o8CRO+LMFICD2M317rGpJ4sDkQqU/DAd/ygbMxsUDRyfPO5NoAFFFgyY/T3cpycvmdiWbW+RIQ34HXjH2WDoA8DWPpx9/sbEeTXx9yg0+RvTcCDzHau76Ex3wN624RcCgIwOIRKfkOQQnzeFj0B6Q3bMCYDG6fGEkC2+et9Iv2hNwqrV2Q1AOOIMspmVytgAWbcHu3ZKd5I/fdOLWJjhz/eSqogWDWQYd4iH/NrwC9/qV/zilxNpEZSsyetdgZFrUJ1eXFvsbXCtM2uHEgXDBBL/JUbGkChKSwpYBORDXOA2FaT82KqpTUXCaMOM13iyKQbkrLVmBmTGzOz//YbyilKd8WrugVBe/VQ4xtJ6Zc/gXzc13Z5mNp/wscn3BDz9kT4eW3Jsy5uWE6G2JOzX99CGYyz5VrKqzxEogFUOtj7t5y711asjJFhscDLGFJaY5GVKbt8FZpsu9fn6dmiFT6B8+5XydAVODEMouvFd/k9/qRhkhMxbp8+MMfXGGozjzGw8OdffF4wwc5P+EoSvp/ikD7IDzfGEyyJJer2dOK1mg7RazD0VRbE49/xoMnnJ0RrRG2rnlDaAcVrPtK9Fk7dO+8ym2p1TK+nNBP1f7pItzyULKcZ/DwrmjeNDflLy/btVzcHqzJ8Cn1a8BadLleVQkjDyVOJOjqhKLXLbe/W8PrNWSMXgrmyYnCzo/Gc3675wR8kMvaTz57k8sG1o2+fmWL7g/53eBAxUwcPWi6MNhr6weZ2XQ/SkwpUdOUahSf4664XtPsL9Awf2gYLBKFtIgiC/sHc/kF+ZYGimsgXEzfL/ebWMWXPf8cbWDgJSmkxEFCUKveaPzJHWkS2eU3Pfw3O6BJSvPCh215+zgcCx8HevdW/TnJko9bs9ZpyvGF7A2Hgz5XYus0yRxFLjrdmeRUiC/XsWqU6K5Ih/UFPk4IF6KbuMo9N6dKcliZ86aPaSbGuPEyUejSjheB9wm00K8egKhMS0lGmTPjFQXVlbgBZy3dSFImR/jCNxtSujuZK//VNZ3BsEnv0+ZddkGsYS+/fKnAE41h3gcp65hOMJmAqEk7HCq7NPQUzxeKIjS3sEmb9ol4agc94vAh8D8WN/sOkrWXuae8DhtbYQmwiVYv00kync6pMYXKB2Tj41aV9G0nAZ+S9Lj9b53gYN8+ovJDduP+9bc/YrbqfyddqLMDrD9jJyDuxvhH1pj7N0HVu+i3jcVUcMoghPPuJeIbn7rLRVdTKj7+4YZA3BNSc0RlIqVIZfbIL3w3WvZz55S/3OsUZZOQblFEVgDTF61saAS5RcWUtuOtR2x4HEReWNxP33unIXJa4SGEKakgvx/dyCHRPyU0E3kn2Wvk77V2pIxCB1JGabyCKfVFLmyrGcjTylDjJm4Q/fjpFGZX4CV8iSV7DA5BI4LYep8LpjCCpoK0o37PmDFblh0iRHCfk7Q1u5Op7bYIk7Daj1uPSPy4vF+NEAWg4dwvZYftYzMUnOTpZ8DKs5z81zF8Oz7gvTdZWk3MKg4n5qbfKzSCzu1ftRDRpiLkkIOGRVi+Jj6QpVwZ2SD4+/fzmhF/1oEPGdD6VYCciycxDZZJIwxgXMNE6IP3j9DunH88XTWC40AWPc3mYgjFzKzMIgtsMZ9+Ve7ZDSbpcbulqg/vfRSXyFOQD+ABXyO2hroI5HVvAxI7WzQ8OeSIrMLemu7OL8eduX1QQJ2ib4IMqNHGUCwP6vgGhHeCScT83uAq2Rbc9v3QGdh77cEwyg9w6vp2sa7HogO+VJnlq8ni8AC9Dd8qW18SUhq91LEFVtnSvEPqN2zGr4pVB+TR4HyBxAHSRZgnII/VBRwH9qvmpukekx/66n2P/9c3fq5O+qHKK1DFMsRT7jDYt0BXpXk6zb1goRsSOABcp4BChsG/F0tn9nEwQYYUIcNRWtCIZCOaplz4ECbEYdPve8l4UWWsZHlQOEK8FSl14I9t1zYJibgXkdwHkP1Rek+U/un1ckagXyTnT9r+8+uKpOnn4kWUcESceT941w6v98G6XfeWHxW+3NGeKP4m3OmlKEKdgQRUVo74SkJmQgpb/NvaMwHzc0Hyqw/YoXwQHFnBAe3v9D63nKKDTASdM9bk9fpAEI1RG8PDoPwx/CgYLBNgdw2SkXk5CWYJbwqsFe1OH7jdjc5+U6HPx5bHGeR+jh+7Oip+LdY3973XxmMaELbWTAdeHvFsHUS3m8phchayGR6qQAFD1n538qZ81Y1x8Rf1QL9AxqxV6jAB+80TB6SM5VHLlogCpfKVvvFgRJdf1ZpFeF+KNKPdRdOSc1FrxDHixwaBELy2lxDiJcNx7cY1etCTBsIcRmsUuoO9DdVy9qthQM2HIFGEmtAqF+WkfVZJnbw1S7OASaucUkqXbazWDpPAUhar5KX+qC8ePksDR8/YBXOvWfy1785HZNPVx2D06q4VOKV+vF7HsffWyudQXfZGRpct1/Qe8mBb9tdBh80zpX7svca3z52fnPVORqtGt0BgShJ9LZ+7oGgFntQTz1KYCBlgkTyrqL4jrYLLJZQuLr4ynivF6xVQeWYdyXdYCwEYiUq5IayNUUS6TxPiff+XgXuEMZd7Kkl4uWpIMZ2yykPNREQg3aykvYMgmphRYRZth6fF4RjAigbR4fz6FRyPkePCR0B21KDRQtPEyf209dJaz49HK+sszTPbUDO9THvAcCCR5EnLF7Pvh3yV+y8iDlL+VW2EkyOkkVqQEPkGo9DE4CS989+EAQ15MA/aeI73aEi7vIJWv6YKJO3M0vbUHG3l6O+Tgig2ZOAtkR6lX8tocYpRZdthPR1cAaJzRmtHoduXssGYME6chKJ4HxzSqIajuJyE9t0evF8q/jyThdNvR38orfN1d4FyoclcAkwU0sP4Vy6jWbCU/FkakbqyLmnLdpRAnwSwNJg/0huYCfE4x47+hJ7098ktRA+Jf8NQqNbEI683GhRaMRQxe6bZ9rahxuf/rBILuN72F3hnme9n1GLZaoK4uJlPdufJh6PHXEu5QTNOCdhvOolUV74OFeIhohmyiwkZO+3h9jfxx0RWLzwffwkw0Ekp2O5+3MfzvCyufSvbsOxYgYq4xSja8I1DjPNe3zhuN2cop7OtKgIE5qrPa2Laum6GJNjeZaGY83RX8IVeqY/eixpuB/Uco/XAhgtd/ZkW6Zlh2mZeYlR/gE73w103faLvPIx/s5fL8CFRXpp/Qn1WlwsKrxmvGwZLGaHhoJpVPO/dCGDPl/ObVfb2HU/4Dq9a8SnF6ZyyNRKMJjLd2C49XCMSHkeC+E1ooJCtFl/khKQwmlu9D7cZuuIoiUN3jlQWIcC07MiOxWBnEYZ1PAidljaF7cv5pEJbcqp4Zp5Xw2rjdxu1vCrsqrbBTCots+0lKUIcUYdB4TH+zaafJffZhNwlLI5ftqqKWlXK8W8Nj0NgX33FbffmpGvx+lW0zwf+FKRKgldyopSQqNIEW0O+j2tQkxtb4pZZW531kFfs8//6c96IwcjFr5tYdIWWzFQiF+kCRrTGNf0DA4GrQaWoFi7K3waKjOwPoB5JoU9iLoCQDHuc5ntYr/jtOTglGmzy6+ukVjeM42rVY7lqQQqHT8DQsEelqhLtEEJ2UvLVDjDL1Ml6Gxpvk4fHV6EpvMrdeNGozM/p6Gs02QUsKwxKJj3gL3hELJPvnH/IPLfoRkV4w0e5e/paAsvcJalvcbhWTZ9M8qFWFC7Q1rafP4gTClDSY9X5QSqH7L6Nb3iFNp0Dl8YPh+/HZpd/44xEMel8C6YrhEj3Eye5bnBcGRAqpYz597aVH2/bXC2rHnAb6iYcyfFFYBD9ge1uqgWfKRkR87z+1duQ3QSpLSnTRcj34IXINI2NL4+ejHmt9fxa1dKG+ib8X2Mzs6rfOzqpUb9QaT4JTEi+o1TxZKy30LaudYv1vHSBj3LwN2pYzuklPFSQef42Nf568dM8wLWRduJpZMlVppgtY40fvmYpQtZALLd722FgKBVdhMz4W8m7kcph8VhWf/G6q+wG7VhX42sbyavSgTlFODpO0dNs3/wZhodlw/XxAfYvPjx4BBfKKFt8M29/9bUSodvhAKhcLLGQoIxKQJrPsn/nUJbPD+kphDxMagHxJx9f4VuSIFALgPb1bJyajy3NW16G/VxxUIIiyhQIktKuVbKfGF6sdjUzGDdZZkwejJDxPIyAYVLifZm5LyK5YeylrGYrcEW9NS/lH2RjX8uj2xCwU0EQJrSmUdbHCzxOAaHQ3u38k/S9BX9JbRrCfDN+gIeT1SNQnQcMH+wo6nhm3y/u6xzp2dKtZeK1UO7sEL22PltPzQ2Ds6ISoWlnJY5dIdpnjLzhqxkB7KiLGCPOEVtPW57npXrYivXO490FYRY5xnJCZKLprktTYhTGfcGijNPw6z7kEg5GWk7hEcvkTnDMLB5R5VaQ+6X4n4rDJVQW+awUbZs0dABoRDgVmOLxoCSAfBTrHT8mhz64U7o+VVumr8X53g1BZ13vf16urFFIp0cHAxA1VvJLylM2EJErwD2G5eyQow3KCj9DnRpXAVpOAiXXKaztjQv/cFhcs4aSJy0+SCKtethbgE3NIpESaX0f9wgAyJMFxCdw8h06SBNh3+7gvGWfciBHAAHUqlPXjuh1v7GFijB3KVI4BVS31TLxl5Km0NdrXs4Lpr9NjMgUYv1GPFt7SBcNvIF+cJkJ7QL/QTukke3W4YFwCjqKhQojrzAJ2tmrUrlCEOeYMuTY7/AN5YZAW16uaY3Z+eYSIdIuiZhKJH0AGbXlPnoFCzwqFon3+FifUaocNCKYJaKAirStS1/p+9QyCF5q3o2YhaLrVz68W+5Dpb9FQKhD2bPfbRaR7Rd/Sh8SfX+DsC2F248txDACzgGJHraOteX7sjUJvFpWv+2JnTsVRP77jGeLcKRfKYQFzcr3V4TljxjAv0ZLMXNTLf8oy2zKQoWOYFIt6CRlGVCH/+u+wWrNX6IWn9ZMfjxwddj42tMPuPyeHDe+9mhFCcRsTymsEXJ6CLPARevFaJNWTxLrqGGbZgMbDdM7E0JB2bTizW2arX+k4GHbV+K6i3UgXy9y1UqqabfKJELxryCLm6PeJU1l+xyX/V/JNEDvb5z0izeUuLsZeA66bRttM6HwGO/nPw1ctn6gKbhHiHrfoLjnV8OGQ/KEeoTfNvAIqJXzgTQEpjNfOUwV2+bcpPmKsNpZz1wQcrDJx8wR/0t4oF0ckBdvMKeK9dy1K4g0o9uQDdJAqANstt0w8JpCVb3TiAtpDDaFzouwTy+v2BFSTqJ5fJ0ZDzQKUfP4tUT+5bvyNNgQE1QO2z587WHYp9CMxYtjKpJRTqvZNmnlaBkkX+0bTYJhQr7Gyz7gh45uwAcTTPPtU4XcjRkG6CckgYC9DJnR33T8tkEwvM26Ni+s7jILT4RfedZ4AmE5wM/6PIW/4WrTGRGVkGXbWDr/Gug+1x1dr3nlql/4PpUsvPJYUjOvbI+FAyBhep2LeKzCoDUvtHSLGpedAlVRDQLrSFihroxbQdM0td5fpmAh38ASPt1NlMN01GaDMWWihcqqVz3moUGG+56+utpZYl/xuTM1OATFlJ0bMqSV2hNcbBM0w0JLD4KiYX4jcOxUmlyx8Mo03m5Zrl8XDMcQRyswVLUzx9WJAaAzSuAMKA2fXgfUgyhOtIP+FBbFucvSWPcjZQAiu0HXaFCz6tBco1T2XJLhCTSlHA+EGmr2vXzbYOxYZ9MIR6YsOXJwX9CckYs0nzEsXcchUpr6b8qEMaNJmzn8EhG8uyMyA43UbRl7LKzTWVgumJm/Rd1v7TjR8h85Sz4FOjO5AJH+wC1esQI+pbKj3CISwN8jLDAaV2cre3YzTuUPSCXLfuYYridcPvdDx4TkhJU+OJud9GOgNG4XbPExDqjY/DbLAIJzOuiKOvyj8Nb5imOMQ0gSmw2kCUkZBgnR4m18vhyHrXKYBT5DY1MgFLPcaYDXfhaluSIiroghCyMzz7JUN5rcGJJ/c5gi77EvSo6DQ3bQffc/Yjjt65Ykz0vgNLorN3DJJVod1yPvDz1npnoN2cv8jQpAL7QdoLRQiXVOCSN5kOQP26J3y5sPEDymzxfL6h6nyE84hD5+Od6uFxngJBklc9m3vcCrEkL8pqXF3KrvMmhZ1r7tb/ppj9txmCFN1LDP42HtiJzrcu9TCzPxmlfzfFPUnvymXgczcATWr8WAfshxxm6Pl7qgpDp7/0R0Ci++IDAi2VRIuuRon+Vm/Lq1ocGRrHukw2RF3g+BAG6xfYFKjOgDFueJjldGXZ6dght9eHblAjxkVEpHTvCHxQ5pXXDSu7wTtOtwmBAkoHRLWME6MxY08sY4ZesqzA4exHLyW9jLTy3hm7pQX/47DpIlKX7nizgxlllzaQ9C9GI+DvIl/2BYl6NYsto25Z3gmXrsKiD0AOnu+80sW67g3EwT7E2Q0YPqejWV+wy74t6/bUgX15d0WOJRg3B4gb3E5bZrkwXGVvHr4gG+LXWNCWVRuYcnfM7IAKDw8/e+hiz25M63+YNEtwdLdoEZcsrcvuv9YfQkYn3roVer/aevwoNxprxM7KnX6blzGJGrBPBX7EkFERSWNGFI+VCLb3wSiXs1e0alLThcYFuJfk4/9TsF7yc7VVr6bk+TDjIkt6H6YeB55WDF/9ivC7FAB6vj5Lc9ZISCzAqWFXE/ftQTacXUJYfXO/nikzITV+67gRiN7lHBaxsX9nB0VcYvQn89Tt4ZlZB4S0dmEQFm/7qRz9eoMe/HYg9vkCxjFQYEMK+emklzR5v0McPB2J8kNVKY3A/SJ4eGd/0YAQWltaj38NkTXH0kOJO0Hl5IPMQLYFOxJqYbX0RQBRyycCD6gFuxCT5XL5wVRhygID7LixzGNf7CwLumiDaEZYt57DpOxPiRqwIxeD84j0Mi7kFFDMnpBNc1VqPN2l7/+LxdBnPFljtX+OpUyoLD8x3v4ZXvizxneF6hIJ29eMAn29blRfaKhvyKtweyLdkS73bgqsu2G3JM2Rf+HAtc4oShSMYIAgjfzsarft74U5tzsqEKPtnFWjFkf+PWlSy8HcP2mMhreyVY5LcTmtPwb4Z5J0x8KkcqGw+Hifu+7GFWX1cDD4/v5Jt0KWUs7lmfzCmuAkiAbfKqFx/zgvQL5QrJgQUfunC/6cP6PVjdcG1JOvJrccMe6pNLSkUOdJtox6ImFq5pDqCBO00xZNEqiF0dgKDw4imILoAV+qqRfOeenmUxCst7KVUC+/d+9UEEtyR97vnBtHo5Z6jLdjGq9PNy0KYJ8ZF0nUeiNsLKxaJCuKqB25YzaI5Zql9zsT7Gd8SlA3r5KlE6J1FCDctGIR2K5G8cAi1Mad8O6GtKrho4iaI75iZYFLT68sbWYvS0uWsRT4OePRC+EoFxXPMGjn61EMoLPCcPgf6hv1baCKIFchrmm9JeZmXJsy3YygsEs75Oa/+GKNOfzzCbd1/M1QGbC2HhMI7xaATfK3MXYc92S3A/dz4hOnOay2vaNMWq5r7fHF6qKlU33lipEebg8i4vIRkmnMdvhCEGzeoVexnEiHp8t5MnUT9aGyRgrTvrFbW4hO/5/BaNQVxbarlmLEKf26ZbX2qyniA5VIZCMe7snSjV1u/6QTZBuJ7ivmt3UfOM96+EhRznzCjHZQTy/VuP+1nndVvkYKMGf7tdhuxLODZBlSliRuSgO+fhsDvHIntrQUkQBbmW27tepoZPhPoXPd/NAWIwDxnVAJzWOaRsVWTwBl2sxyn/CI7YEFpICh+3ak1XR718/NguQcg1HhkvgvKn+U7OUENKsdDJznXAcktx6+ivATXLrEvYa9bDTj89ylIM6QPFPWma4HDYOC5yp8s5vXR6fSWs9LmNFiMGBDhzEiClomuDQQ3eGjfDC6hCHEO9Orf5bvHA2TaB3BbimiQVymvSiNNNMlDh6M72S8SK+NymJo5+De3c0UEACf8ar4zIX8rko7hbUZlzZmdNh6D4h6ecJfkXVny4wOsiZXrbdxbko2YhPbb6TdSayoR4zinKwb0mQHCW63gzYKOE+IBUdVAhOjyQyXaiLLpVXr6jRIeawiAzDhTqiDActCRzWDnzfj5+Gah52neueUJWf2VoQrZcZY38Dk3TA2VI3BXh9V9bKUvz6m4+j0idLCMY5FXoQH+FGwRNq5BSYx0etbXE1x0oA8iEvArFFIowFAYDVh4iTtIKR1P7j+De+Kb9AtBY1CbBaR+QXdk7wtS3ZkGCBqZWPsfgjVc+paAyYffPJJSYX1st0KgG6y8yrLDH/gT6J3cpJJgYKbgT/f583RZJbxWMemfPguzMEpRPGiSVdEOZ/Uqdmmnw3n7dQ5XL6BizAdEbdFg7AeR4YS2tjgqUEZG9k+TeJ+atDZfx2fF+ItGZkgtDrqRLWZiXCoR3RxCzArAeW1whf3kExshjPwxT82Z+C7USyLbpS9wMB7BCGwXNEax180Am2Ks0ZawKBGSgy8qK4U6/t4lfeYQKiknUDr4/wrclwB4zb5sdNidfL0pRDCRjh2YCOa4vTfgd1s53VGCFq4tLYXQH7B34N7fU3pXrNT1eWm/N6KVz0R7fwCqD/FkDf3f72m+EO7+6OMHh2RW6dEIwJzqOeuVE1SwpnQMsHo2wJlz3b205sOkIaL3ctGsAdLCYrsfaM058ZbaMQVt2F/D2fePC1W+0dWyoWCUnHJ389+EoGcVZL1Yy3C9GEm2qu8/vRHwhed2IsGHWJCn90ATxwpViT+LPyHT5ukXHF9pDmY2w0glXBMiOG1asLqxvsEeVleXGg5BFxF6RWlUQdxMr4ujqTGaL3ZUCczARPCEK+jxCu+YkFKUoi1c2j6uvKfs7qIsGyRvrb0itCPFTM/q/wtWLkIXEQLErUwiM0PDf4gwDL7pHLtQ3c35frtUAfDkrl42h3yCkXZlnSsRlOAO+BQ6yl2MXSvk4pvg686uGQ1OFpLcBkFn1226bfQicqQTFePxzXTJuFMFfu/cXXdPUg0oi3SvFfTNAfEicrcuA3gLx+o2vPSLVywQs+jRaH29W8pWycI0l8or0cTUw6XhhcyX4pYIFJPJQs0lEQBybHw8X488e8kwextSaer8kqGWWtIf9F7F9AuKFtvTtYpW5USQ7Ud16/ZxeE4hW2LdGBHOv54sr51xQOTfCODSxFNBX/QVTewBhhb34mkqQz3ORffrKbncDWwXf+y+6CULgfLXLvbg14y+5d2e43wxmVySRMSei/Fv1Uq8dNp5hjsp7nBYAts2nYewgNBM57t68jR0x2STGKmBz0AKRvvTQlQ/YZND2nBJD2owUiuy35TqlhuVv6iNw+jk32HkbwEQ0vpajbDqHQssQwiexRyZQcXQb8VG0TzXpnEUSffmrNqTphxk1dViJ0uuVs81Z2YcBofED7dAES0CgjYSJLXeZQcnpYFBUy5PKdQqLAC2ew1TOS47aOrxHsIWq+RHzl+Un0BefEdSqZwGgif24TEefri5Pzgc43103vWM8Haq8KQJWPN3+NuYVWEgFjcv+8Ox1JBp+VNxGx3bL4zMA39nPv033tamnFN6fNo3CmXnsbfNANldgjJbRrr6xnuPqz0FvB8MXDvoOIGPsNUnax1mOW/XxVsb5yuHEgqerZKHQI2xE9z9ZXYqCSFRT/KFLzx/4SqXtm6EUorI2e3n1IO2giCWZTlOS99wrMSlgRVIn+iSZ6fbaIlhIMSC8JI+7PxfYOKIsJTnxDdRBGlUfgwwQVu0Xu5cO3IE27rWgpoSyzAtYNjmcL6TfayRb65ujSvuDyNvB6Z9bnQuJ8d1wLP1/6b0TVfDAWpvw7Y9lDE97LcdFL92CEvWjZhuvrzq7Eutc7evkANlvXwRjvGjTOc/eMKIipadf1FOxYZFtUurb/eb7ADCbmBMVCfmZfrpZrcJfhpvKZClqiG0y7O0gKq5WiteIoTFblJBsXRQVE4PYrGAPKbt96PwYg51QVId/r5xm4c+xVqmAT9t+tGMnAH7/ORW1UX0cQa/tgaTCZCj27C4YpX6s5Y3odVtIA5fm3B7xL9/8rEVQyqcn1UhZPOxHUM3ZNUMzIHLrE5rWFsMPwwnkzQfEu33EtnGNL9+87uaQgeB0cih+pDLs1jWczN0UR09olMSRmP0zJcpKfbLnO7ywJsmHwVUEklmgIKge10kdaotte0cXoUtXbAO4QE3GVPNELrhwcs0Riio2QeOpyPfpGy2gWZFV7NI8tDEShj0QrlepUi4ifmkW6z29pL0B2OgccnJ8hIBUHlHtMRerL7TZzs2W6BLCc/b8mAEW4TbNNCrLhe9Dwewth0sQ40juXvLcGphzTP2IDuzyZAShgSI3xRR+CGFPAd79wMcILxO6IkeAJnGh3Dk3WIvIgHrfCjmr/XZXw/pCefBxxG3pvGPxde0cFWP20DTAs3uPX4BPQFk8+OFr0m6q7P3zHTzSI8ihAgVIe/513U7frjFcNQqd4Ews3yHQyM1vNvVDkDmXk6x83gCY3J0pCBGqR185whx5KDpbYwdtzJasabtRucNpqfPzABdrLkPiak7O/jKJJUpiINV7p+VxhrCDrNT6QI11bZRWmqtqsxzkvMUvltxJ8uXxGKw9z0caYww/8aTvgBdcwGFqwGX8tlyhfex/CEykcuYlMiHBw7HvFUefQ8rkuhIZNXhCoA8Co4TG2iYFRFv57No5r9w2BhInOuNG471AJBXvMW0+lwP0zTdQfsl0AqeibNKOgaAmJq4Lh7YSquSDAJjWlyEeMj+iw8JevX6/ckc0ijbXjGU6Hc3Uscx/6Z5yop0E3Mmkehg/BTSL3hZVX9OEw255cNDCcb4XMTIjo53xbVlV16zEuOblsY6qZ0RBKzP5XV+zCijb2uv4zAp0Nzx2bJugPGspCQGd4ez6s68suB8UYQWU780uTSqILaUJgdkzZLXtIbeWgSYqoq3rUVZwHTiF9zDLZTgSquq+39cdUZVlohcKathfTgi9A0azhZJzf4sgdI2nNVMtL67zBm3BciPfg5j/PkIy3r8cMXkqYPVm+hifWKczdziGikCRTqTgUdpmOQ2Yu46nVhw2xaezFVS5vSknA9jXRui1kypLwtcqCoKiSC49mxSD+bINR5572gZpv5BIklZ6Q4f9nQRTjRWTj1/DyFkHHqxfRzSl9fu4nrE4rzzWOx77ckksX+9LElld/bbDaCcHE1iRDdP0XBv410gih3OwvKyKXMLMFVGmH08txxMAiI8OUghemJxaa4S4h7w8X2bje99MmJzoqr8o0me0hljfxuPLvHMQ29MuV6VPqO2tT8bZzfFNnmN34eNYWpqivrkFXR6VpFOQnREY50K0oVdrcvN8i/cTiYuxu9cqqX/Xemrxib89Ip7btcIVUVuQXF9HvQvsvYfgJ1O8ff3RXPXIPrSV1i1FjKOEWHDkqx2yLltjGAbS1/oS7b2pUbSUhiHvycBvaR71PQfns5FpR+zFw0g3/mI/EsK5lOQLN7pSsxb9hkL6v7puAQ/NGZSciFyYHultLOFjrPGIoiksBx5eJ6bXxzWDwHmw94jVA/L423py0g+OUj8XeIP6eJEhk0aYdPO7A8CbevLzGRO4HYMtcyGkQ2cYCHaYwvGUW29+KU0WtXuQ8R3zsIzE3dDynnTtNwLL8PnGLhfbXXmN8Wc4dRfpHlullg6e+RYPFtzsplGDqx9yGJvD4eRw44Y6XaoN/eab/UNiPkyuObulZaPet8zmZIWgh7QWXVhaM7gmo0cqYdBMoYGnd9JVYb5/O7lgzGCjmcApVcLi+WzRF48diBiyR8mi5FZl6XA51193Vi3I+oy90s/nu8w47TsM9Co2RT1458OfFqGBdXGh6BZJPlmZ09XTHeWOLP+y71PT8E78YEy+KWV1etkeqL6LikiV2znWaMx1fl1nFDQmW18TrixUB/3GJfgXlWHD5aUrxMsq0gv6s5ouI3BqHMYxKnzb4FHG73I1Gf9QIFH9AgoS/XpoXlaK7zgG3zRLw9S6FhLfIpjfTKRL3VpN9vr4UZvEKj7OhGr2ED58wwWAi5DUjgoojyho+8rS5ZbPrizdHOl8NrtIjMYfpDh8/Qot3ITF7HrF7PmL3XVR3zkid9AtYkn3yEmyYEjGQ1G4SuFAQp/Dkqel1y+/3RP6tx3C0PMyCV0an6U5oPycCnOz7f304xveYahQ1kKk8OE+LftBgrPlOHU2ldDZzTvEMZsnHpqcDDjt0d6hQl8GGNRTXUCF769Rc5x8AdG5M+P6kDhRjrl1I0sowFt1jcvirvekDZjeARmk+XElvkoadY/cjUt77oBF3ARG44lU5zpC86C96/ueSzLwzzQr3tQF2U5rpZHzdy8VoBrsQ0ohQ6SbHf6IPfXbdJfbLZFtJ9x6gQ6Qknny6DzkKaX58LLC+zGj9H4Y4wd5oV7PHEUmOlxLomRLmQvZtqx/OkIoBmQUeZ8MOkI1qUVwfMJDArCbC1+yGZy6srJTtGQv6kyMaflskmjzAY1cw8FUc7Hx+3PynX1c0uvGarbJAR7Xd3QymmENq9FRhM3GRgigpnrXlK21XKKStZ1016/11jGqP/pAS5JcmQySPKH76xle0u/5pFVYCe+oXBpajRpQONgXSgxhu19Zv3if1OoP6tfwuBMZ9esk+a3uMF8D6cXA/XQ9/zo9pojhsCwYJwydsI8yuZX9Y9XZ6ZpeC9vH58W8JNV9CxjqCajc5drV1gbTVkJUpgcWgRnkLGk/LgFE2Vj2W7YWMl9T44yRymVS9aHig+o4Dc/N3nYacDkQrf4bEKADVT+rxyA+PpLrwNRFwX9dgSt/PhFVZBmA7hJt4TgbPdx8RK1dR2o2+apGyThvRGr6FeG3BHxrfXYhKLwhijNVHz9RntA4LRLeMy7Kc6EcDRaLHG34EduEpppzMj6tAmJrVL9/eWMqEIPRPatGucE/IOkmTIxM47cdQHV1J8g3BTGGxSHN28UjoeBUkHFOl8f8YJs2+7mvIEQBk+0giQxYxPHQ4PeHrYJHb0QeG6YlMedgccXbsN03LO3G13LZICK1/AQV0CzzONX3lyzqyOPUrHthWUVAqpExKvZOqi6njXQXxEXeZ/VCHeiwq343NRbXwCJ7uWM0uhpnm4BIYncZTjDEmBWH2C0Lo++0E6xRBgMN2dG+eqLxp1/eU+GCIFAWyPU/Mbudq/OLlM5L1SMuIb1m7+7skzmalAEvG8ISIw7f7dJ3YloFR9KoZtCbzdnrK39L4zXVX2K0x/n1dmaaFCFD/qHvmhCcsPDRheTvqNSv7R6+2gOKpamfQobhH3ZOTFRCIzx0fIWX6WOX8SqtAc7IQqKdmUSPvqEqs9qHzIPqKfHZi5rfWZ5bJcgt2SsJzI/c/+KpIojs0duZfX7ClVBzlzQfj0A/8JBwyOuML4OhYhnhHjTRLvo8IkAK2mIbCH94fSF7nyoB7a626YXgNqTP4g1VuV/tMTFrxMGeY8p8VyU3unmyqCKyDSLqhPI+i1JwfSTJvwRDSk6RUDKd7H0mGYLC1Cp0ZwidfbslavR9H1bKIqXuAX7zQ48jMo/BP4jJNEGSF5cRs+J0/1m55jhJ/8h1vUiXGImTOCgToqYvhYGz2HpL56KK1ofAGnUNJbMiPdgU38o3/ywHqGgqUcXhH61jBNkXzuYX8azo7RCiOE6QBNHfb8dlDwqG7GTRgkxrJmbaw1YBKTUkQb1uHK3pwxJQdc9vM2zaraY9XCWi/Tf1tmBHisRbJorZzzd9bKlGr/l2/+jv4S4okhHP6yKirQSBroxVT9Tci4C5G3urS2Xz6MRu8QFGX7EMMqZl9M4/dzXoJPADNgHaUsXpRIuwh2kpTqKHvvTq5P521uViyXeQ4E2b/Cb7fib0wg9B+VQ0uzbrGP5pthG+IOloCzNVt1d3Sa3ds5LLIxS7GKW8FmSFRuv6qSdGS9BPAdsfddv1k98VhQ4AoO1PNz2HZa+cS0adK7Md8N4/dm+2AlYBiCfjJo3SACfW+b1wlDFswXJaqxgHFcswjoRsesD8WmI1r/ytAHF4MG+85U56uQLTHK4d6h8hTGooUt6yu+yIeV5uqiTS+9iUaWo0RWJjz7S5ja0vABt6ctm2KWKkz7uND6nYsQwXH6bSKhwkExu3XufePdSDWi6+v4EDwJhvjdaq7iKIvPepBrPdJ42OtUAWXwHoiXIJW0fdvFDQ13tLi9J9eLwnt6/4ja5ZaJHmeGcM463Ya8dhFpu5czqmsR5VzUu675pnvD2SVYlA+G+pPGSKD29pyuK1V1hU2eRZku24rBFGK0daWi9Vpgy3p0+N4dPVHoCnSsaEKOzZIExRnbtK2skH0G0rKjfyvu+I29qiy393yADlIi2dhrD/q10gPFYVuiKL8WNysFLaHYpqX6Z+EzLI9ge3Xt3MwrDCA4WplIlAUBLh642jK1mJzN9Af071Kf1XZEi0i9gOlSsN2sb/sIa5N1KKiCbLtdxvwzvwO/RyhtGLeL858HVkkO1QzHuW1xU+/TDisVuR+ox+za257k4pwb2mJrDXV5AcJRM0SoEwRw0jQ595DVYFqIlZ/PtBB/shpkgMItYqL03NAIgWJaEUPkkZVJ7L2/xMLgSLnAuxitjgAZxGID67QTuG3UPdN2+53Ap6xz5wgiZLWXe095tix2K1Tu+Vcqy3ve6dAyjLY5dzb/81yHnjy2n8nYY4zadkRAZ0+uEekh/lIHZGSdcA2eCsYOPXndrmLSNJRvhzkPad7EGaHoUppC/jLNiQvOr2/C4pCLSLZfVvwt0RqNOKnwYdOyX3m6DJqLCZYMdd5MiPQ10Uwbpp6izbwHmtCuov6Il7WN5ix1zuOuJvVFvfByLaPoAhjQv/ziizRKQryurtU+1QCSo02fakdheXXb7zb+zeULT2/zWVCiaK7VypJ7Utr+ZEq7/AvBsZcmu4y0rL1W2pyoDleomgtsKXiGpwIK8WsHqFryy2voDcYYaCUHBDci//BaPzKxRzc88kq81mRPf3r4Hlswau5l0I38PjtlMyc7xoocO0j9qKT92n2DNgFfs3fKuRQIBZ9FEWzr5+T/OhG1YV7tlRBZJ7pH7XD7bwg8WgYPydSDBJ0scLYL0l+b6mnFi2fK5BTkRczw6Jrnc6/NaFoGI4SA85GJqrtXkj64crVdwg4tQ03ff2st4jvHQ8Q7jM+U6aBB3Ek0d8PDoydGz04xF2IB/qby6AfAvnEqH2nNUgAlKedGGgSbKpNYHPkByEFXp/quK+L9bEiI3ek3Ve2JAJvoXDUceqeWoz9fiFPApDkyYmgjtZTAxF3y/pO8sgujS9Ip8CofX0y86gWEjMF8VRbx7up5331AIYwYcngON6uZ88sAqa+S3YLBHM8UOasGagNvQHmc0yQQoaUOBfY6t/5WRTtI8xqN/zAAZorLyjZNbx+MLtLwBPRRGXrq/99oPGOAowBVQExSxi2T8U0H+3RnKupYWSdP1TDJg9I+XbYdrIQ/E3Pjhfn13Smipja863aMnfRhjpNZFUfTxME1BLXt1ayJ1u/iUMLCsjvfsuIZKbuFkYX1UVknNPI1vaAmBzYbnzwWvpT39SUOH+hWuqtrwIpv3eX7aCCz2HYNrPQSnOuVVjH8Aa3AgSM9dX3xyaHcAx/LHuGO5bvRlmMBiQbX41mcb+oYMPG0Iya92cuoij0PBURhg/XbnRvOIqf21JBZU6wjjfMknu0Mp9XmsdhM1eLbKKG/Hg7A2LHYxrr86CBDwMUTqC8PI8OIGsP6b65HM6EmakYdiZJBAmAGJLIyTwTQ+/T6Ylr5GiLBVauyCF5t148zLT/frSCueGIzLBIzIElyJqFLE+U9qoPhMXP9G9Tr1FArfR2xLT4B3ZsnZPa6AJV9ijx92UPYShDfadsD4Ik8e/L/DyGye352apI1sCci/QW15WNgA2UvdrUH/DMQIU/Mp9YKMHgjOuNa2QlHWQO1s2b8n4wEXjFoQaNkA+nx8j3rv++tDMTmjr/DYXV9hopp5NzW2jh/5p5G8OhLMfrOWXVKUPUUWy+vNeGacits0EU2t9H2AE4FO21c5ofrZ4tPBmMxjBY3Vj3+lP2A7K24hT0TW9Tmd9YLi4RQxlMst/y2IA8pZ92m5viDD2o7SP1m0qJn3gdh+nph/ZSeTtVQzeu/2gRzb2ad9g2oZR7ceEjWTZUSnD7oJAHpnNv6EdfpVhYFGbroHtfByc1Aw0pBQ1JGN235mn58dUBRRE8kNIc/G/kRJQtb+j2c4yGIXTBx2QJ2mZoOQQdAaKgRi6zfrc+FvEt7tgRcZwBZ3qAPSNoZaQ21+eupdTrSDjrl+AXThWO6JiXA25DdkjX+GR9DHgzKCaWeIWqwLJpekJsFigkOu+0alXkVnpqSxAjhutufOBchSac/kXsCBwzk6GOGXZOhyWGbhDCjWdGX1FuOPeZH/GwGYQztUtXty1BK9yQDIEfGeVtwlvTii6bETQGNzlF7+yVyI/T2TA3yDIrf59BVq1Lkq6ZcokjGuvB77SqsRVfkVQZUFazo5yZuEOI4cs4Xsvy28+X8s6fXOcMzH4W0WQv34xUtayh7P0kyKRfCZfMbQrhCPmR0X3k04XVOiFlmx2sVbP4oPDUpBy5g2Aq0nZT8v8/b8Z016fyLJUj6Yy01VH4AYriqXuWkTroiTXC1wY8pFH8S9zQ9VcNK6qIZkwdRkVzH8/ZKXStimLMpls1wQ15R4cx/argN0IaxhoNmjw+fOAqzsGgI5P5hgQ0O9jMRmBSdCfvnepxPATO36FHQZUo2zFrxfodcMnYF+ZeBkKOSSH8GUSN7Jde2+Vn+OBftEmnsRLOfUfpsQEGuST2CWsEGvq5Vk6HQAZoACHSqHj8Y5wzzop6xYVH5BIHbj+ZEWjl7v+TWIFYccWPRib83xVpYMkZgwMYGuo6dsDQJ+5o6MMsenrO4UDtyoCamzEuQOqDbFdjO9totCqI5wUN93Il+f22FrspbYC4lcx+xsM94aCo7u0Sdu6JO5L+Gd/WaaK9HF7kKTbKW91bRKjm4hmyxAMpo2E1jVi4LGMGrCQMJXG/DxWCa1QoOA5nCnR8FId5uLlRDoEwX+Ma8qU9dILdzOezXMuaUptdgLNlkNjtnxUfZG4GgqrGIQOsm2eN1MAy8E2AZzxtXBKHFbOlUC/vlOxgRoHdoflYCBRVshuU/sJj+57XUvk0XU+DOKVsV9/kD4voKRMQfacZ8SsE4B4AcgrZt9Lsihr0uYNimX5LLnfYoYt+XS5/lstRg0z8S53/s3ID25Cg7fzVv1o/U1SwYHmZUyqiyhBwBAt0nLyG+nj08FBetQ7ehNM5DoOQfJenl9elJqH+8W+smNBLfXbmgV9wiq035rGFyhhWEN59SRJ/ig/iAETygCHBgpo3vi2+dLar24EjmokzjdLMF6oFrHNF15ndjWDJEdO2u25wLz5abniIUIws5xfAdQHARleaYy2yn3QvaUd3owN+RlKpl2LLpS93x0OiIWDnyJCAraAQ/wJ67sCAAeTit49ZMdhxBVuS6AoAnm1uS8gNocCcCQK0Mv8zBVqSS9VTNB2Ag0w/H0hYjfLSKMmSmN5hR0OYbgyx+vFj2pWbq/35CBIhWi6nJMf6Tcs5E4aN56qjuQk91MIA30hYG6TuIJX2PJsuZKPgunhcnOI6QhNfUpWA0an4HIzbcT6fh480WYmh9OtaCkAIwrXioPvbSGeynbSUkExK3PNIkNDXNkMmWPxrJ1u+IKHwZ7VhtQE/COOiN0eC9Lka+pEcqMN+RL0528PZs58tr00QELWIgB4vR6fNNqiKXkT+3r7w0rJH2pTqWF8oXblNEQzv45hVksj4CoiKKybJmckIMxqHKd9/hhIAQiKySfRpLO15ie7pMckJHHTee3BdBHFJHuQL0nW+M9OGg8VsF8lJocenMq63i36RHAL3d2abAY8Zz8Ht0Ond0c6auq6mGSys0I4inigXUqtmyyGG5x6X9CclNbsJL4kh/AFpbZk+xDSAe9DD1mai31T7Tshq2j5pY2lQR2Hs+IeYKN1JS9F8sj3FZO8HyjMxAzy/fB7KwznL/lmW679hkX4kpJ7XWFMviBKYxVMS8/zCx3c80QLkk42CRwurtsktQIM/Fsz+3EURpOOknsnebklZDvvIJ7W7jOG68B8shTvhOV5PjYH7NMSFTLRRwrmhomDxg6fxJF206Y22jNMCSOr0B1PsHeUE1T6CIC7yLKuuODPBa0CguduL7krnhXV+7tgjQ1vMJsGIV3yvoRgHremBV8sniiEZgg9OHORXadruCm/y22lkevLD62EEvtE7ftADK/xW5okTivskWPf+6y7eZfgm8tOBYyZTlGYn0cUeBJQjhJ0FsJ5Wh+3NP1qquJIELsaehlkb6imVxdEBsGy4FeGOlWnnaXgGkVw1kI4xFu3L+Ig0zM5Wnts2DOJ+Q9Ro5kAwpvDLnXH3dnoRhwvQ5PBtulJBOnsX/boUjjTD60vd38dt5rM4eIfjipN6W94iD2NYKaZTPU3VXfpQ4FB0s/XxOvhdaUdALEVyG+SRhdARTR4TDraa7l1Fj0qnsfa7518oclmi+/HmK7coDH+XS0nKF4FWO1NAVEyQZMFke2wenTHuZA94G08e5Ib+dHQSkrlY5NhjBxiFFOa88H1rbSVRZ28AihqjciVo7oQJzOS73Wx6XHUHz9GB7Go+JSW4kwI6vHYmJesaHONCNpoRx6o9bq++2iSBEftChbw5pKV0WJ7n8lcywrxRv8zo9677T7gvbIJ8BuLousNjdGjT/HvO0Qj5SGWLE7wgNpd+or528fZtoP1Y2mPxxepxgAZYNHLwhBFtpZeVIX9OKnfRNQDRRUJoDzu+69+jV8Uu82h6bKNgxTB4gQe3PqImTSOp+d64dDDlUFQ4mJBXdLHaLrXNDxoDkxNiWSsDGXmcEwgO9bfHZtf2+6/9kPKc1eACIi3bPAn0+kkSayH0jcxfvPahZF+IX6BHIDKfuseaUomh1bxXFb3SKtdf9/Hkn3Qqbnztu6Fp75fKLwj6mjDHi9/PhGBPuDVUKv8SxCj9G1cqSE9KC3Y54ggyyTn2l2VSfxvoD3rhD1N7Yytf6jjndBlW0d5ROVLHxNldG2QyVrFPZQtlVH8lyZEBO6vrtqFKFSNT620j34IQjM5gl3i7DJ+Febxo7C6B4oP8gqag9hoZRSgE+eRHhMwam0oDF9QXw+s3XZyIKVftGiOoV+7D3DOWK4fQ5zwelQqd4FM6MnHDjsb5XZQYaC43Nr9Qqq0X3x0St7AqRFsKhtOFyXJ8RaYwPfyEZiYhzwH9qapym0gKMC8iphU49uT5qZQ0Qmtd+8kzA5OooOo74aaXxx2+MO7rjfxYVXdw1GmczvJ1aIjHQQNQDT0y7bQZLnw2egMvFXIbdkSnTdHy6oWC5SxAUaa7csd+y2fzhqnfcVt3blKbOV2csHkUu7sCCtIVYlz3+YvzPRK872Evz44nZ0L3oT7rFUlvUjR5SzhRIJWzRu0jt7p8hcKYkPTIfd9Lgv717GwZ4joLHGrTi5HYsbrVdo3L9gHtOwzZRQ9OiWD112P+wJGf2eK+V6+7HEzDhrD9nR2VaW//YwkYLW8F6B2BRlk66I7Yzx7oUd0o+IsL8I8ZBbWRi3BznGhtsfWdH7nJ4QRTWs507vaaPYDoQuPKgzd7eHl8ckhbLTTstpH+Kk4mBy+C10LVovhjJdXj1C+hLmCMyxj1zkljX/jrOfNvcQ6A+gY1e4z9hKPWegWxQWP2JzYLyIQuTjIN4jPsVZWILKNgzvqb/aVfezrfoeTA+zkbytuxQQtYyIMRqVZaeLT5nqa0TeNlQ5S7LJO30I2/lq29wqBecFsNuwbMz18I0lUQnlz14z8jTMTfzWFwAu7HVmcocfxBm6kiSHG/Zd4pEovB0Ld+SCnUSwO96w2KFTOPX9HKT4jL7jO9cAcFzxfIs6JFcqvOOOLaSt/H7+eXmVodOigB+07xOMlflRbXMRXcKbcV6ibyGWGpFVnvPO68arQL+MkdSPCBaekmAdn1CnEOMWTQFKvLk+bov8VXPl0XkprMQ+zEFJG9LX5m0J4+tDAW2HP/eJb/uXT/OGUy3GnfU6ylNsZKWi4Fde92hMCK4WSLAmTFSA0AVDc4hkXEuOkqtKlHJZaTSZoGqh8i9E2tT/eZ77BZkwXy9nB9xl8kLzg84Bb0YF9B9rQ+yi/kbeLYdBmj/WOOOG23+clgLYC0ZmgkSJK44j0nChL8SsvhQbptvIcqqviGbFznoDEGZsaN9TOpjrEhKX1vzhHOVzN/kpOwhb8tTxNfymrMiZMCG5vQEvHT3S2DHNDtk+nJkwhQINeRi3q74x3kwPPDYd3M8E8P+Pjz4z394KVmWvZVS4qt9RKOvkvTde1LKmSA38JGv+I9x66gTe8956vX+rM3ZiIiTEnuqlCJWWmVBIzlr/4ztTSm+cxFn9KBmE8lrhPj0GEsKldowevqexaQ9DS5D2arTkISxmgWidJ1iltRc55N/6CpMDQqtSq3OArSkfpWOG2QPQEYV//8di4+QoabPlZvmwLYtHz3+jhJjWG4Ky9+YjmdZJM8mEwRGjwm1irvHvDtGM1OdJSxbteyAjkJ4LUD6g/embZqFxwInC0zVCURIyvOlRUUZ5q4zMwfFFGlZg36n2jMd7xXqw1BoqKn94Y582mIZJkFfHf5OkDYTrHh1n9c7VJk1xlh6IR8H3UCO8mR1Zfpt2+R76R7RoeH5GGPP8FJCvqjDrRhulRRLnAQlhLjAfUnK7DP+FxWv/V7CvPbCVoa6bJcMSPR33UOjc8BoXw0NxJrJS7Mg3jWFzu0ZCmcax9MreAPmdIIa7KY7VZkzNbF9DH6/o3vJSYr91yEJq/6/z8g6PDC9YD3sY91/UwfPhLEjcYX5H8nyZseijBrM/u/JyEknn6l7uljBAPZcYPWsv2/XndZbZVCbEspbP3VV0PBMcqT8TxAZfC32eiEJXlaUsHdbjCWaz1+kYgRxDpv3a675eKsjxpmotx1xVmMnKgL1lprec3SRECGY0MM10XyGt7zSHUKatTZ91YOSpovB796EoqiSRwxet6fTBk6if0B8kZ/6q8RHp5EC6xn9kWuknzFy1uS/LqZbglCmf0YiYsyk66/QR239CfqjACMd7d16E5D8/f1ChUBA6p2qpZG5aSk/SMw1D/5Hfm8x9oon43we7x9uwfmP9gJp2B8JI0gEdTlG3dJjs8nx0aPARxyvpeAw71oEeTzh4l4eFP0Efzg5bNNvfApgvKZz3neAPyqiRcowUlyN8gVt0+3Q4/pE++DjKULF3PTaYXJnuaijo4VSX6OlyQfx2QOnm5CEqIG1YVORp9LGnb73LabOgZCx5DMTGRdYCVWq3t73P8adhsgtwtTmKaJ38VateFu+Q05ytGwEE+iX9+qBCby9A4cGX0GVtYE+GTL/WRfnrdLDz53w0dgW9dvZPt/q42sTACX9fXfHCWU0SW8oAUofjKIovWc8hw2HyDutBnR7gab7bz9Zlzz7hCVtBJVXq0jB+auCfGRx9lmybPP0Glwkm4h9AdB2tmtaB05ZJuzeiBemKbecCOX4q2iBO3LBHTQf7+uO2XV3j5S28K46jiRzqTG0KcxyQZVBz8WmEwvS3FMN6hazr148r1tYRVoMSwt/NdfIX8NOOlBVgazrH6B+2mW5MaMhvYunNljnBR40nhXRPav7KuIk/tVtHqlrLMFDYOkG9bTsqyDr8Ggyi/LIr/ZnUY7mshB4+pBcqy20n1eJ7g779xoyizmjS8hZ8xdlemzsyvIktxRSslbEfoZ7cNIL0HFdY+CqZVDwbSetI8r5mxjmJWoHcyUrn5+pAbCDmnA3Aiy/hROpy30SCn8lLKT7XKn5/HNqBGu7hC+y/ZphlCPQcx0gR/U20wrfZ48wxXMrGR29cSI5ML4CwK2E2aT7VRdgZcSqK4gaKuWN7zAUDg++e4oqp4+vDYKhJ/4wWifcZGooihnFy7HnXEQr2YdDrKWcaIqVhoQ/gOS94VlY9ieFmo9Z4+rMZua7olpJ025KbMSqxalh22P0z/QRZeyk6XNEpdr2ndk3lhuucWi0lFnhdrdxzpIc4XKTZ8MZPbt/W9sD/UtaiG7dTE3OKqHveKN8LwKM4XwSjpzxufweFTrXnB+dHKYRA4qKuwGkXan/jrV/xc/2a+ZtJftddUHqzGWX6xX8o3H8P/xtai/3U0isWXKQSFPkxfBqOYGdzUZGrVltx4Vf/qMh/+byYSILNUgKpPKGUbQcwQHUWjy+uiuffJmf9NGZg0W/wbRBK/zCT3SOt3aGmOLM20QHwIUlY6uFMPSO1fa6alQ85qqsXUI9R7G4A/saVYkCzrNRKxzM2u01CL0AIN1juH/GEgpyiVzjdzNpkFJ1nODwqn5s9ELhj013LLeUMY9NcPp7YOMqMPUX8k2hBFn/H5mZatitCZEGjA+7lX23BL/Yv5RrhpxugFlkHrFplH+FTIOeDKD+THgzNoPtetg/H+QQc1XTrs+NZXrxzBD3i4cyror1J5lGFfIFWYpi8uRuoLhv/I4pRRhdcLKOXnyvBRypaFyRQxpOokM0ZJturTV9reK02adWhd82yjxQsUlHmtKWwH/V29CkXZiNZHPTNLgDAdz8rZa019oglygEUgfndqPQPNJD0A/xW9LKhniVNWbC1tTCtbAkQLVLAkUeENb0xh8u4xibg28soPOIHY+7OKjRsA3kKM4BVZN5pm9AveSAsBjmxs8GqP7fBxZvhZ8dZbiC27CPdwvK/4WCY0zEUlUkUHDc5XCKKSuBcC/WY/vWdk7PlwUHS8HjMegwTDkMQENxs+tB/SXmk2wGy+hRwuPUix+dUiwVsuzi8UUg7no2fXjhR17pJSQgdSqE6zE8eXL3WBaRvkKVMLfPAnx332MhrVoTcH02WiJa7VYJ3wPt1s/afvv0r1yt8Rw55pMo0pjsiVcs2ymuxUfcR9OdEr6JEqO9YzSscQkZKnikoosi/fzbHL1fbOHeXH6OHICTpBAvG6E9xvmQcf6BgodJ59Wwnt5eT8Ox/+qs9bgwOTL9dcYYWYI2Nrksctqp6wuIDzzg8FvquPDJueJIsgZ4tJzroXVADxzuP8XebBjOSxmNPYvs39NxOOXu+ct8zkbzBUy6bmEABbzSdpVc5y5R6Vxbez+LtPTj/Kd8mFRa3C3lu/46XUsY2GkYrjRzkk3ISXU4Oxl6bFpTyNUPZFRomLzMylmk/RFameWCBB9ALRdU7nyHrjMh81TOYZS24jPrmztk5fg771Dt2y73nY0nIvyjBcp5dNYhc7xXE6vrtdVvkXYhgQ0x1v99lJuChUCvyZuyZy5mQoPs+JOAZXIdnItDe4xeNK/kHp1235RkN/uYWW5R3KvATwIqY9Nyd8nVkm+Fp3ByXrdixdot2K5/Whccvp50m5fi5V0tdn9oy/zrYh8rWjMIjT5jrFhtarmADny8yW4V/q4z9qC7F6In1qYaiqQwzaNmpuBCgybOM2+K4hLQT3mrRcGdSEx9Xh4ocv61IdxYMAr5X5nL/1XNmN/hIU9VL3v/EkQLhrpQf9tsc1NArMcDpVmI1ub7ZykkIZNVPkOxABsZ8ENSCqqC0kQGwssMtlMh5Wp//uryZy2N9Aszt6haJe/ksfPk9zFYF1+F8raGbgElu4pBdaNLk+1V9ag4dSq8tbgyEIprS7thW77bVz/wmwwUtwbi5mkNiYnJUpyabded0ZbXTA9whFXb5oW2rPXKJr0XUc8zaGj/6XYwNRIVkiqfla9iT7cqkuz1Qb6o1ltd16zmTyizqaXq/Ipu2ypWxgAE5xEkahVZ43n78uI5s3Wm15Ux2nJa/l/yV2i3AXEbL2l5ueNNnrreKNonQ6fnk6JM59/9jCshD1Yhju73qBAu0z1blpqUVyBf5TG4Z2536Rh+k2+8iSRjA5nggoq+0V1QpeAAK2MtR/mM7poPAQEwSaOS9V2XM9djuJTr6XWxwWon66HMgXsJ/pW3297LLfNlTmspQE0INFxuWrhQOGRqzZWmlOmXq328HLVf/6j5L0HotqX2aUXAeR/d27k9K6laCxKF9tIelNS7O/gp8IAr6fllU6DiFcWfB3bX8v/AVjEuXc5pL2CMRijJ9vZgNx2HHZp0SuF9RxgWrrhUXRkjiYHY9p4gqYb3T9tfoGqSbU5f9upTk+UyRDLV4AQSVWSnmMX7dVuwwauYm/wG+oyk3raRkdHABI0h1CEFI2C/IYXNMCKaoqZmQjcBAM4h7UC2vo161XTgmCDl9Lx9jqbEglKq/Q3dd6pIFyYucfEaljqNdd+WurWzD167kHd7ji6YvPxnP9hfq9c3OZj4mgbH4EGOa7/pC/m/fF/PxBXUQVp6hmhBISajY9Gl9fs3XO3gnwbR2ISkqMUuG54DakLbtqu1+o9Q+3MOTHES01RG7Ke/3sh4ARfSwZJ+TUhDRC4q9aHTkQMu15Y7vgJACDRLzLb7Hka/aqDrKiS5v4SQEV5o3hVXgz7MMVp+2+MOm7/r3o7RlBm4H3ndCWXTv7X0tjFRrZoViKNR6rx4Ca0Wj13khRX/jrGv6dfuKmjt+pKZEf3MbirDZ3XurX9WyP0oO4ntROF6FQZd+/Uct1M/7MSfr6HQYL1dqWY3jUaB903Wcg9loR7CuOB26alfYEDGCGoetVQKv5fkYROxwVw+tIi0ryCzxSjdrcD1Ju2mF9tSAm0CXyRX9jnBv1GB8tmjSYJcBJA7KMgC8yaMIIj0Z2etWCZNkbQTTmp5CDqC7LmFX+YXF9Na/SrfG/hjLzmh2FFx//0JjsqG4VPu/J3pnU7n7pIw5zcy7DBTzvyDtKkFr+G/wANpsAOD/3e51+jnTYem3rKV2XwOUozOvRgHlaZ+QpOOg560ievmYVtDr+pYfErGTkjQiMh0fuaTZPZ1TIPNui+ITZpUdGBTPRz6eMrhmBr5aT4CGO2X/9vTiPAoaABhjwcwnh51m54AvkqrVGTlymmWOochuzLKTV0F1PSyisc5NDCJ5LLmv7v/gIHr4KBGL5Mrx3mWAmOpNUpxUgPG4scP1+2Itl7thbYyKf0ztKPb6hpQ+tB6sFHLuCJsdgQ6dT4mYLffmz9gT0UHYkrTn+rz4+/+2NMlLLC53OiGVlcYa+IjPOlc891PItw59GLuxjtp4uTb6ln9sU1LFX/t1wK/fXqWKktc73aCiM1pXqT7pncTk+kv4TvaBF80RV7ePqV3xomJbFnVqoQTK7mUxJ6564Ds3GsvHlF9zx1W499/8BvGwvU4WSav4LOVa5hjYeYEfssmE+7KqvzYKBSwKWBhbp/FNA9X6moz5tFNSAzU64cJCsUYIAupTtOZuvaxAboYSoVWBHpxKhFRHA69Bf7YNlVKlHQAk3K7ctnQ7n7k/XUozvcffJX8PfrG7rR/aGT1FoFqX+z65pwT0r0AQb5pUTtarfa0ZZlUjjraQjJgnR/A0uugm/Qhdxaww5nesEd4M5JYXhzaxuy7gWRtQI9drVIVqkkI1O1rV22oGuHoklDlppOD48oci+CBnrLYRvvsBkoUYZADVp+N6DDm65CJJzIkXKOM6IuKAaUeoDEJ5SKLzY17q5zDmTcQFj9GjGgSPLqFUjNWGskmwb907j6lkaakr+fdY71DkqW1pW2QoEzZZbVBTbNB+5ptVysk0G9f5aNjCXlRU29FTJXn9M840jWF7TO6TY/nEwfuDMqmu/Po2DnShdjYnliPh5QyWg8r8Fnj+tXbCE3AwlSHEKkCgJFMT0LVbzy19vDT6KxM4nvxLAO7vym+luCkOKOAJcLY/nPOq9/Es9OXEy6UJlKDYwLZyEU/u/nuDj50AgQ1cDoxwkjWiZK9B/p+qOnuFKYxYQ3yfYyPdt/WzypjdWEjVkICWzdpNKiE4IchrHt0P6pVP1uqwsL0VkExdp/2N3lePXTJX1ZnVBUQuBuz/5MVVbbELsjpiBUmGAoaywbfTJrSV3/YIcUf9dlISXflVJAq0rCgo7hDU2OFKsUGoiaLVU3wwBUp2OEWBJpAH8J26yhb1jwOgQ+rSclhFHmavyRYlLQprKbh8KKaXtQ54nNAPHx5wfMjC7w9Mtdyp+IJZ08E6pYqSnv9ME1/0uhtEKjpXpwDdW+4JFyaWAoCO37MVABMMf39lnxa2ne39mvF0hliI2DzXnyKAfGSK6YUYNtO9s3PfgzXTbiwXK6vPHvoGNFkaH/WU5N8omCqlSVA60BBIsK0AZ0J8QCGh0G2G1S8cHtCn7k37DGpiCZReDAxKhYIMO5D3MMNX7aG5JX4i/epYFr7lWJepgFAOSvG1E51TMU2si/DtWwLeCoElrXFcq1ypHn22phfYruFvi8ea7Wl4RVEfVmpquiitbFT+dXjaN/OLO3MnUoTtIgw+OGOW1jw/FXQs7/YqJUdXpR5rVuWQz2B4Q2nJMQSC4E/PIMi2KAVKfavzhyActhBpPGgKMwHy/1ddK+PslA48hr7OzDmJ4/D/l4q+fu1wTJpSLG/YBYoxYnA1CoNaVDc9PbAvwby4WNhGHHj1sZJ1nfWx+BIDEPqzC/uLQfwotBW3YYVhjaLK0IPG2+hXI0lcox/dBSBEw3SBCd93GOuIPnfy2YgvfMl0yk9aXB/SzS0AEV5Nkv27CKzoNZ140YzX2vmtvivnLrOJ6+uoo8pFPvgnFPSykp7mtDy6LshdlpFV2zXV7IMIv63ES1aWm5p5GdEladvbPsXOpSdU3SBAz1+pAwOnSw+409SP8dQfaAGKjjRu0WwfnRf6IcrJistrjULqkn8wU4biNFqYIgSwIFi4pufNMGihFW5t3y8qVFAS+PYL4Qc6NSC+rfh+8XEDmnTniUZ/6cGBkUnt/0Hvt35ZTvzsnkhRn/OIpHjVK6zU6R/4O+zI9ZCjeHxCLKZzUROuSEJTNqe7CKy+Sxrn2x4D2Miqrz8P8nUwG+2ADirY6wuqUIPeuF83+N5NHyBcYZr6ww170x1rIE3KxD8aTXrZfsfS+L31V/+Am0sg6ydzPbxZHHMidM9Z7VFuJdNF6M/LXL1yPOqMhXicrJ97KcEzUnqlk5/ZGPW4C8da3tMYyT6Ohga3WZrabc+K4JVlTGFjor9aLFravmzpyaYlfRHyRQXvJV5HtB00/62OWdzrb5hWWoxs/tRmJAhiQymV58YO4hwmdsmKp15cir5OXGlJl+otin9BTPN0sRv8gKjJ5vMjHQAJElZBvmrBVHv3NhT07KoVUWBcjkRvFsrNp968daCPxQo4LMVgzkOO5h0O1LbN6Z/BI59sBXYnZA3OgdVE+RXYXatZCPho7R+Y3NxHYy2rIDEkQHoV+neZGKUYR/G2Y/GVpQRoCFEMxnD9Q+t+AtKomDIOwoh8DmA4QJ4pab2jaYGgrLIpOKqt8i1yX5uwrU2i9YJInalAAsHJgxgsO45I4tY5RCbYvfHELRB9LbFKt6/CXTtlsgthT5scCMNiZruk7dirocEn9Kaw17hui4LCFYxV5cbJHySYE1f12xLmOQox77j014//KSXRdAoM8mbOwRvi63j1nmeT17vRJJ9KqXrVz/6petL0tZUbwgJVBrWfkZD3GYRRMcmXdt1XAELEyig2ZWjXV8yATHK6B20clnH7UEopU0xigLyURxfQuHpKlyzoDMAzE+BuvCq4c2lgfk7l7TOdaRgalWsi8YX9Tv7ts0q48BDW9gj0k7dDPzIwf+3f39ENSkiLBqxQ367+uswCklAO7P4PNiEyNM1nXOv2VGfnfjRb6ZevbKj3OksSJ3jlfqSLfAPfTP6HlrpwZ6H2Y4+ntV5oXOzl7f/jSsb8leN9TkuYaempGLzjC5ALDSRD8ZIGauiLoGlboL1bfZS5xzFJP8n7aGpEzjrI+oswe41EKjXHnkw1y+mMJEIseTMOZwET0Uz5LpgbGQ5aqTmYbLLSBVsZi0upa13bmZ1m/LUWgInQbcEavTUkkGm4YwfmNvmNpT/KtOp+k/+l3bjpCBJFRRaklb5rnr2cRhkTSHXqZKEuOh0y99vcYTULc5xTGhSvv66iqN51tiJRKQ3A9fO3o31qSwpBTEJgxWOafZ3xsHtOdXPhusJK9Lv5H77+Co5+schkaI/pMCMxeu2YwZGsMfk981FbC8FjxaCOvNxzEydMoD2Lz6MiiWwlwbCSX+nRgCNsBxMJy2tiQb9WvQyRX0/vxcLnM53U9LuTwd9rKyxVCBSgHgvctGsu/6k4mPQ1tIYKw8OICiYcI879BVr+eKUobzUL75iInBxj6zn9+qni56ocfjREVQfJl5OvR3FdXshZV+HKGMCf9hTD4tUl7aXpwINAPkFRWzilp0b6c0qPQLposLItC/daZ05z3vLzvWOaBaOcZN7hI/4/kl9SKpekotzRGb7Dv/a5p27y46zoHTwohnLU9J8QXM3jCp2efn1u3zYeem4+EDj68w4YPn9QwlvM5TsJfK2Sm4cv9YLOCdTmMJuc3jNAhDqpVkMCfACscggPWn79Kp0z7K/7zsyrKcX0RRUqwb5iFiKIHt5yk9Ht+fpxs0aQx8ecb5fQyni7oOI1mwzERCi8Y/YVKvxa5zMm2SxN2zyVJy2oufKPCU4zgHOybg+mhVi3P8PCgL4iA3TI0zbbAKFv4NzKn+hKg2s683b8xLnOce0YelU6pM28M8oHqcIUHaSEAn4CUGlOGZrnFhgP7ISegUBKvqR5mAT3wR0bSiYiBLKXUP5xmRQdMBp8e+/V+o/eqhPkLgaZ/kwwwUYeV8cO4kZSYD8UQqNSNpz6oiNJ7VYM4YjgGQxL+Ze8aK82MUJBZZgwyph570hY/ofn9Mn7nY06FaA4FgkgS0L4UamMcKvva0oEnIUU9tbiuLv4q515RsOYNed1dbk6/6PIyOi+adSO3KG9Kw5zNaEugzcgb7A1e8KH/slCwLHEw1goQtL3kuGbGkFTshHldDM2PuyPZgy/4I1q8/kzhnUeoPBqBBU6RgPwgl77DlpctyyuDrOfehPlphB80LHSz+d7uKDQ7ieJxnWyo+hULw1k/KEfXsdMg+McBlMiGKWATz2QHCk/oH4NvlU+V78KUn+iXbO7itXD6Pe3UBiOjxee8bRqYvs21pksrSWoOTDNE4cL13I4OUOjkMTEDgVfW5LRWqowSNfEu2Pp5Dxs4N6xfv5vuG5RpESaFov59OP1FSrDkoIQntJTUGVFeEBzQ1YUTh38uRxSJFzzRg3Cs/iLN3Wd7HWRmcOFFs4Svc7ShsEtV/t1zLX+f2EBvftRbRym3YKnWYSizoaFjUrwPdcQ8mfritOhlOMrw1WuCHeE57MRGRkVyvco2jDXgYvGCWnF6YKf1toZcq+jw9fBvFE8nnEwqjCv7frFu7SVwJ6Afqpd7kpu0liFaQcnb75aVJxfT5yasNXjxGjLnnllI91D8TdtWhZtSoOVDPmnUa6XFhcZlAzJa4AW/ygGKnCHS4P0a6xZpgumgQEGdx8o1FdBwTcDojiZiNc2un5U8JF/6cPmeIJEZ47z9fp2KTrY1kj5H0YN0ZhmQNaASxGAMbNS8L/XUORuEJLK4RPxvutb2rEBgrByRlsNYrLw++X6BJtZxZvThnf799PeM1jkF1KqTFLYtA4oeT5CoVd0k6AsQ2QX+nr6Rnbq/r6Av7rFqTZlrdV+w/Zo9S7T5cJyrYlA+R1oAJhbCPP3fs+8/oLqbeeeh3xzBW2/864jGYfLkD+yH+H2zQBtLWv/7dAWJC0yP5hkAc2ihwQZSriyFVkWJgUtVPilHDS89qhLS4OljsVmAV2ocq723Z8ImnuQsMLaUadSvmQH8NvHTLQZ17gU0WK9Yrc0H+czCVIN6e8ANh0a3znVyafJSBiORw+fvcXj35jPan5MKR+rr3cQkEfA5BXEBOeb52lHIdbzA/vepHXcgH5edxAlZFT5sKp6mRQxdbqPyo9C+/63Qvo5mmZd515qC0LwwPjGbNsyiFdQ5pvrsBfH/9rlUGW5m5xhhSzggVzG8Su9FoC9mMg6/+mFD7b/xGPwoa0faXx9YCn9CnhdEFK68d0dj+S8B9cnhdTl08Jm6ojBIQbTjOmO/In/OF68zKoPbBXBAAnCMUO22/z5XvkoCJu2RbliOquSGpdlQZee/rArWz/iksaINXnJo23FB6FE9g/8Mmo34DNtq8GFIpyyGESPMedTgnPzfymQSwUmKhraknSMy6pLXhXE6BQSCWJnAubAeRiEsuPoJbzgUq1IUmZZzM89mfMmmCIknrltknBwKva2gP/nHtgzsjR+PVYy94TSV2AizCew83zdxiG1vVGuNq8DXUxriLJQD2YZu/zXXDGNPaNrsI6rf8PnMmCxVzydPHjTxde8q67Ue8+plLUUky/MdkTdKkjUtewCnnnsu0EGlbcgRIXlexnzh/3A7qWBUrn0Gxl3DXpYvtA89iSiYEYNxgmoI3U3X2ZLPcCRR2+aqqeNp12fCYMjQhXMquic3vDFVe4R/Y5EZiaO5NCPYmi5jyRptdfSbIG2vWqru2HRidcohQUq2jJksgWww9dsPfsafNmPchVjappRhE9DsjeavnadE6PZZnZ8YtrOg+93b8De83mzssKFVWP6AyeXCR18WICSUxf+/WTY7eomJqiaHZvVeIOgXB5lbDUjmsclVh21GGY/xAB9gKARXRnckA8pe/EBSjO99v3PNNw6XqjjD/NPHLLi2MSNKN6ybNIqssXOZffRal2HFUKRM+DcleNXfCMIfJyZDAn+Z+CFdG0XXbcd7J7HGnW14KFQAx5IgnT8A0UL6zn5NB+WPD3+NnffzTPSFMDAsPc+D0iNqpvoVsYivqUPWZtOZN4kjYfbj+3qAR4BRpCanCm9yxNmXD6A6Bf+8QVvdhGFOQ6cCjOITPobyV43R6/zI6NAcYY/hI17v0zYN6Qf/ub5Xjk0txuDiCu7YbO9JATp0GrRWemRvsBRL1kEvPqhy+oEeKm0SUthr4JID73tgAFNHsWQbMhUKVH2FPiMSwK9RJpQLqDvk2Ie/ANYSOnGNpXZTLu1Gex4vdgLf+YiEjF0sYZTsvfjHKzSakr0vwNEsRgoujWjKD86AtXmishvtT8lYZ2QUfH4vhdxo2Lu/CAYizut9v6mmNMbfFBr3EkMOYE6b9u0LEdGM1EJu+nONCa8ED4g87PKLucayTkcErhYYVdZxNfDO4HiKyQKStJLOPKR4xAbVlZmiI01hM7N+R0c3fTc7+RkvkkI9W9nipIEiu/1w44nslA9/BAW1h1PiM9EagyY0dfTH1QBqlNWqMj0OIjy5CXMN3FvnUMR6wQlnh3QOMjEYrk5fxGRpJbSAZ9ZhTg6y7Pz9Jhc2U3/nbnTXSj2OZbkbCA1iDxF4eK3tiNdkB0TvxPINC9Is9MERS/U8UlfURTHQBSCyxFN6rs+gSTIxSTYYXivv8mSgMuerdWYZRDydGbTa304PTKwzdTvXJcHxiQKex/LqCsXPMKc8KXv2hIwlUFrGmK31xj3d2o4Vkn7b72CCGL+hH82jmuN8vuzldj6kkKsUitwoVU52fW5Jy5emAGF1rsLPpqJJrh/Y7yVSB3WT8J4Nf2ZmMSzPHah6PwU9HoW8KgzHnxmjv8gY5bIUtqwvwYNu10wxeu9HkAPYb6olwOlr5LlZgF5L4hhFxb9JMcx9MWDo3rOlc/EWmnE66y/aLxgx85gJr0uOByB/vcvmwfiBfk5hKM9dlcemwXiHZy7nFPl0eFDBiQAdqw1ZQyIE6jec8Cef6mL3k5MkRhItIvzKqvNRfHYRDAAR5l0Au00iQRELAhb4HOD3/4oxbKVRhkj8TCAx4oq9TUdTkIe6ZU1ssIceWrwh1oYQmq7hTzSRlBtX2G7xSiv/lMtlf9ILmWgd7V7kTy+Z5n+QOXKxw9IxV75YUVVF6al50Nrna7fnpL9r5bS/UZscT5e7frj2edNC28Ga9klr6Zv+akhders1e1Z/Q2CGP5AHSV/0k+Qv+5BJNXLF9wXZ+vl343hLeZFBshn0biH8noHHdVhdfjYbAam0gk6jELs/XI3S/h1+76pO+UWUtJC/n47+XEHXQVRfOq3YZyi/oOk52bP+YCUIbjtaxEr+C1nQVTiHSU3Lkquchlm4Ktrst7n9R4+xTCkg0g/d7W9iYYgbfwraFAkyCV1G7WIorxt9+ymdonO5/GCLNCkbQMeFthWtsp59XpTZVB+27HNpewdkcEsngilVIr11BZw7SIeq7+O63R/szT/McvULkXaSfciwiQEes5EYkxKPzWr2y7YCd+HprzNVkuzUlRwmO3bKEUp8pOwrVS2vSFhmV83PvMnFoCQN1Kgi6cvO/jTColGqXuJfOEHXk8BET1OuVDI9C/lRowsNhKrkEQxjXtjnu4jbesxlKAczayFiqQ5/zhFVU+q49TZXAmJT27OYpnInGrYyPpDdJ9BNj6rr8k40mXyGhTlafRZV8a5+wnS25Y5d7IgPmvXGfv8K/5IrnqjwHFmOkXb/7qGCMyb53N1oSDWajHWncNR1SbtzPixef0PJl50crt+Vv8ypQkO/XIJuCsozjL/Jc/xdB1hNNJMq/+umLwdNiGICAqZAmI4ToL7kf+Ep0i28fWQXgAOdq3UFWXDSQEaViRZJM77KT+FV3+0cngtS3+gYFz80PuPqLAWKgWBUX7qCGt07E5OkiWqX5bIX4hUb4ExISk2fAFLQl4fkhC7/yqzch7xcKWwh/uTgU5XMN39QB6O10DGvWvOeCg8Fcmk8ATUZcpHjm56IiSDTzn5wj6Dtc6m8X1yBaPPVWsWrbYqaxj5s2UIoUH+Z8RVgKgPhVtv5NOMJCv+F7cfpkU0gPDZeCTKgTZnfZuM/cuMoSSnJ89TGeBufg/lz9M5OSxqTgzhxgOTwfYz0i1dt+QKiLKcKV7yK/ZK3v7rjc0PWtlq5ncYrC/uMuroHUAO857KsquF+3gO3Uia3FpfDyIlZoiiHnh/oAuBCKThnrRRy79HrmbtQN43HhWal+Kfk7SfwcPw+CKy7OZe169LnERx7ZofJEbVAa4/mACGXqwiuO2PR5zam1UI40O0oW0LIqxo+wszlpZTN5Htub6M1aLYiqtzkKzE6ixuHSmnrMi8g8dvTSNWtdHchDr72lHLpuMmfLPkeaC0/+bCEP64T7Zyb+ZrCwfc+qEQeS3Qj6Od+4N2S0HWYeG+fOm87ZNfs1yqgNsIfSehlWN8RXDBuou0ptOqjhzjEHJyB1xP0szVFjOM143sBUNJtISvtReQ4flST680X3GAuuhFoj5NSgW1E5z70qHgfwClCIFBaFbnexO2UDRmVtHzRBdrLMYilv6q3f6dhv2FaVH7RwbBnRUbk0u1/XYM+kx+53Rh8r4g4bHK6VjMy5hTR5lNSPhv9O0eNPEAGIWsZMo46yqvJoxRgcTBdpc/rQutzcBx3Etktke0+Ra7/JH8r0fNX2Ub/upYQ+Q5qJulJwYBim52ra6ClIQlcIUGmcJ5MM77lQvULm/mgKwdTGlh3cKlSMnlz3G3pCL53BBf6OaXamYHqV8kYMRYFoWazaAtZBPzXPj2CfVErK0NuVwkfR1/bp/GuMSXsLC5zd9CfPBt5EcffFnkDzNnHBi3+SQrcQbV0yz+ktXkgQdlvdknDjM2JD0QQdmjtL6ZSxuDh1m814lVIMzCxDxumvBH0+PR1RPgfdpsHby4TBtflevh2IHjEEzNp321NfjIIaD/CPjFqJLG/6aAPHK6pJI/TurLXwdmoxj56LReWJNftIjLzTqfZdRpYbpNEg5GS6H3SqyE5J+fjBM2TEk7d39LagOK7qOWOyBvCzBQBnz5Oqi8o5fZXyKHbX74ckTNWi4LX8+4yY0YGN1IEXv9pIyJAPyV+LnBuDePn6mH86dHgPpgr/3U5m5KNumqFyE4QnKuKxF/JSvDf7+xOwyc7GMvOpb9hWJHvEWKNKQJKz6TJ/SzNpW+ivdtUvxG+CzRwbAA5cIz4gneCvS45f27ocHQZ/nKgwaWsHlwbD0MK78XfyLoxl15cvtkYIhi/cuLsWOBBFSunki9tnoZAvdYYlCP0JflnWEKOAMUdtdh2PhnW3wV3Q8VDA+I6GzkowtSPprtHE/z1QxqIFxEIFd/WbdrUfx0T5IIm9vI59fT9j9ejAkRz32SOk+AIcyNm5swjaudLUuXYxqUe2yzZ8mXasEnDWf9K6HHikNJFlC9+NmjOZ1S3uSBLPgmwpsNRLi/Fvb/Jld2Gd/dMYRtk7roxJ0aDeAIiMeSk6uzdrte9CjJDsDPAASF5DAKPrzHC/jrlowLyGZqohu9Scw09NIioyOvI4LIyuGgiGPjPdpl0XVdqK8mDRu7PfL0wG/4aarWAJLl3p7kU9s2wGFP/18NfBRph5pe1gmD335hVZp2HJhmj0laNIbYW39HPL2qmFqsmJzI6Hu490VUgWqOq1fOrE/L4pfWRlHhIgiUGkiBmNnPXcRJ1n3NUf967u3E/3rb80qxaB90/n8+W/ynwruez7ErSBxdCDbvizTNF6PNLPeTAhumbJzjeYaQ7kHs7DxEbxtlQ3/lZNLmFP7UVDvxdtYM+kACyRx/V1VD1cQ3uvgu63ef6r0zO4cEdIwZYgKTZa86TzaxAM5uCesxnobIjvZROehJc3u3ZaHLUduBh1EU3jjUVbhMYgv8GbhZZkTRzLDc/6kpVb+GXpFGjQ3MW+/ybfWJmbj4pZdR6MdxBmz+3wYfzhlEeVnI092S6YQqjPd+cLKDYlE3gJh7XjdrUCdNz4nxzfyarD87i0dISTUC5WF6eWpyL5S+P7eyv8L/DP3YyoCcjkQ+JNi1YRpE+NxyVvkgoxBUXoa8Me6YOzaMWp4GPiH7d6Ohe61+VOgnyz4rOqPzCnmlk3I2KQDnOVKnYjH9Kb1bfutkhWhwjCx8HJx2REkwFgWf9q0pgALAVUx2A3II8OD0Nm2iV8b/SFH2iBabYPubms/rDuxMpNUCI2FJ7IXneYa6/FDJzt9VfFnfnvEuLJ5pRhYk9fTZpBzCho+QxxSio86/UcNcd5W76tc0mCp8bf9eW+U6sAFlQh7aRCtmz4KDWoSMpjKrqvm+S1Umb7/43bZMQ8Xh53+81aMp7XAoaoEOoU8LpGJDj92EIjI8jmlWR82/GGLMLK415W8q9z9v3qdFXId4o9ikm2hycDeZ/n2ohPfHMRV0ePPSO2s9ln05tumiHbMA3FZQEFwEh+vO2On+wHpwOEPTXj1lnrQOeYYNKQy+Z98PYMWYUN+PEzOgW3fDSwjJHfc5T4GK4jYrj4YQxcNtS8NMNEDGSel3T9+eFw1Z26OU5V/4pesdcRL8id5KoM02a2VsaTgopS35tX/AHG5vmH2JIY9ea4D+ljmw7+LIorNsqhuY/WrauW/F+A/YRT88/x0I7gpoDF697x+ASrsIeowbdbmDneUB/bJjL5SE1GzGjnr9J8jP1WpYsyQCWG/Dei8kckMbWeLkDeClWh5uWfSo0QX9zZtuEs9viB6Xg3V+ndJVsaKn6edG7gogrqLiIWKpioHkww9lqRogn3IVYuXR5UdJ5HkqvtCJpw9+EBky0svNv+oDyBDKtX5u2lKVgosT6HRn5Y7Prun9wusq9Oe6PyNmNgWZnQUYzTAXP+wPHXGwVrFk/SvdIpDkRdJmJCg4b/m7fKY5ojEJnNMZqaE58Ts98mZw1w8NNwzfN6dyNE4tInsPhaLPI33Q2NH+TQzlaP6zldzI/sJfjS7orhgTEAiLdZ0REH1PNelCJqo+AsNHIgBaNy+kfni2+v8AZtzZyFTTlug2wROTXMlVP535AIiaryOlA8WN53+UgswsZSa3QDfMiX4rHOh52XLuRjLeBfR5n5n8Gmtd/jRfHgPEris5thv+OQIlj44tHF0HWee7d8Ye23oCWFNgLhl3YaTkIHI+Z/uQ55kiKahTzc8blXzGqDtJ2BPSxzGn9YqfFgaZQePzwmcIPYpsIj0w7QsyBlb1IIQtprtg9OmqGAXyxK4Hc027BBp11YlYafaN9fPwvxYKpt7m8mF9h0iYIz97H8JXx/vpc5dJTS+6xIL+/nryuwjzW/Jc6DNiGTfNGs8Wktt+3Q9u0HO6HBDIxgsVyKv51GfKFgjThWCYcFI/SASHAQQ5Fk6Cwu8SLDniU/Buq6eYggrZKOGNisYcr5VgKLLw2AIaDCF1pVzTEpg+tW4q2AXtVd8FnPlBGcvpqTyUr7IUih3nraQcHd/X6NyqRU2zR1a+c5FK9f3mKlT1xTq+KXcb0zb/nMiNbz3o34HiNgLPWFwkWlQhamWP1h5aauCCY6zVtNuyiXTdvSrunHQcLRI+X7LS0XHl9yL4PaX67YM7zBHo5NP8YegtdQo9zjXlEeGaPm9B4/Z+LYg9RYd29TjxUuhkG7Jt/ez6/NKnH/gqFMeyFY6DJU4EXVyMSpq7AVpHEi3LlpvGYWbvw7LxQEKflXTIv2wbtKdYemnSsYj/8eXjGYJ3kq9j3yJr5I5Xkz1Tgrlx6+jxotbUkezR/Q6VEq4bjtD18rU8Vc8lna0CTG1d+/Uj9LoHvH5K5VwCz0PspHXa4YRlAOKZ5f9i6Gr0o/C+qzta1Ht5eChw2Fw/GPFJyZsRaTdxLIMp45vt8s6BYbWm+jPqRNeaHiNpZBRaiumMWME9Y5BvVjd9Tb4KKzOPb73NPnkQpfUzhstbgmynCS/nBDkrre07VqSNfjpmBU9rSYZVMiY2QAXvlCOsxcmcvo1DFjhwFyt1jOZzBiivfG/7io5GOm9iA6RCiec1AhmelCnIjDUP7ZRz/zzZomYpgu2DpgtpsLXy2G/I0zv6ldFhsAFv0L0fNAh/2FqXU2pTZx4AuP4UPrb+IVici5WYn7O9ouIcvkYu/a98UnntPqBIqBk5t30zDOTz70awvCfUJuBVxEuK1rrbnq38j9cwXILUOn9QX/WV7EuRXDoAN8DWA0l/1N0u3x7XlT6BxDDUm9qTHS6MgII1hMXnz1gIjT+A2SP6EAqW0AzXXEWoi1ichEAJNprnQ7vUFKitM4QgtnyMq92JXWHjfXv32fT4p5HG0YBIbDO0/fGtkLAoRyvwQVedW9Em3+e6UIudZ2PTDR911DWEvx69i5Dm64WkRTvJf9SOV7edypOYwPl8lo+m+fLFNuPYhNncP8Nlc1PpMfzjRp+G8ngYXO5ZRRYXurzG1WB5YB4XIYrat80Z0ftirKQkm3LXEr7+OTUPPhMDcH81bV0bDnoU9mvx8RFLZe2a6OpxujLRTSyTXjd8pH4PW6kn+7chnO81CKEL3BYr6pg5PtvFhrTA/z0wG7CcfkuGKR8lXKs/RjEPrHgAWqrjDK62q919BnXf5htrYs6mnBvTFVw/C24OnghFpsN9/9/376GUDChty0XrKlVvTLyd48vVif18hsCtosDulw+Nf/PfuRVbhcFPVdC4Jwd93+1F/7hy7EjpDxoCLAtB2GTts/x5HSBW1SMXpdYkY/SIOEr7XOGiQ8iWgqxtZf9HNPKGFTKz3valfiCwVU7nciOYVFu3voAy45zddnu+HARo7OaHizk9PuAGm3Bqv7cmwlHYZMl5nGTmKYtj5KyNVSh/4H4Ftqm6NCU+jtemnco/KbJX9ki0KagpwyWen8YwksODbhaVIEzWVliJeSW7PxIpqhh3JXy1rwk9UZFM+RQ8dkG04dPAPZH2K3wL9DICGcKYlRCDGRHn+8H2UvKQa8NTJEHz1l8qv29iz/v4L51Dj/CUSiWtBW6m8i5GbxsLno4jPLG89oo6mjz/e9peUC1oSw7cWmgZCeWOM+bPQzFMFUFD8TJcKtvAO967L25jocJjj0s0o/kaKX+BQGk4jALStKwRT1FqWu6aNvBHd4jDX73ZNnSUzPUCLUCF25ZeYHe/P6Rr3Yh9NIfjGHLWe03+2tWwk+40Iy6pIVVapZsPkHyp6mynrVGCEJp4svMD+PPZ74D8soU4Yz/c8om5dwcrk+3uAlOP+CvRQ9EDW3b+hRNhnefJ+0CAXVbryRx3UiPDQZlLS8cXQKbI26fgMwuTV5a/d78hQ/1rGvxz4I2rxLw29EcHgahRl7MVEADJL6FUX982ASut9UT4oHWzHhLGEvrH8Gy/b4AvSkpwTfQ/j/l7yV8QOW/825he6ObEq2Yg7oQMgk3YoIrQg6cxgYU1+4U6r/5i4FdsdaCanqBUIN4M95np8ergzZ/VjNTiA+xJgW/gW7KMQ+w6tKjL+QzVEmOECrPvg3ogKGc+e9ylU6uVCRU/zoXjHsTIIHbblC4lJ+JfT0dFI0FYDe0K3yI6imkdLIg1XnbwBO/YRaj7fQ6A5NMg0HoWRbNfiPd+U/DW7ygSQEkl3iU07OzIPtEdMI32IvzZd2Qu1U8TnSIs5+WJ0/y4u+jRBS6p84BJZIcwUjvWzTOQz9I+5MELt9Ch3NKRznSFOsez3jshpJ7ybnVE8pSXbULxhPqIIHAnmDCK4kX8CptsKIqYZcrHcJP4VeRHAN1zfIvGfypMD/9/NK9+HNBf15t8KcrCM0dJmWI4/rvnGAKCB5reMP4095nPlGIWp+V2eflTQh9D8VTN6jIOoD/Xz4JUIdS639MT5oNmHDqlo6TMN0dJGpsaOzcLUYcPz+locKZ4awP5z8zNRA+NMBnhFy7Z6tHpAyoC7ZQEBmfMmQxbwUttysipq1oMDp0Y7l5XYHWX9ZO5OoRrIFXItuDI16YMcrSBjBRHE3gUb1bnlGo7NXKMU8Fpxyq/pyXEukM6nQcDdy1HBDG713V3EEOVJdit3V0VjjAspeUmB1z3a/PW2ixMVUHm3nKMbiMHsm+nSn44IjhPUDIjjXMAGEXZeUqsU09UhEZDyC7M+pMac2uCblQL2VUF/TFlCdUfUTQ+FjPscrMFjvNJQMCKPsuB4MhKjuZWPcHPcvzPNyCrQlIZDiB8E0udXuH0mXf3e3xIjJ1OnvzsSVwAHWVcEt7diEWi4oteNSOdl/q4CeLPZOJR0Ig2hX9PhGeIqs/l3cwrLSDqYO+QIm3sWol9zrwbiI6X5TgMFvIWfe6GWmHi8TaT9EBz0Av8fc+/VJSkTZAn+mn7sPmjxiIZAB5o3INAy0PDrB4+qr8XuTJ/dedjZOpmVGSTCcTc3u9fM3A0L7OyRYfzBMasjHjwZKKe4py9JyReJB26zFVhr775WKZzs6WH9W8AqznohC3sVQTdqA5Wcn9sxNZxSMLyLxr52l1NRCS3jX6W50UN3JH47wVNBeOYHiSJpM1FrwI4HVDIbO/S2KifGubPM5xxpCcOq19NN5sBCL2CGgBbqMqVfjx2d/Vqd3hiqLK0ngxxaCFPBBB39wUgr2ua2RxwMoJyiugtyHh0bPTxnnxGa45a5+qtZvQucl7BWVbPi/MqEXbK0aoWAcaOFBfpvrY2SUPp7l/Ljkr2si73dCVOSW6bIBXk4KVrYxbXBJWPHDKPAIKkl/ZzOfeYe2lSzpmONwhJRmm7B4nu6EBeyXkIfwTRkqdMlEIuocW0yCipGx/qkg65LuXt0b2888+sPpsBihJvOG5ZidsPeDgGctZM0hecS6utbh+VOV4/Co6Yu7FWXkWAJzIkcA7MFSFAHb46wQedpTs0BXEhgVkJmsnsKiED3hM4+tsKif/XTU3zWTHWJ6iOf8pBvvvVBz9rgzGu9tfHI845980zZabaPpIQrFseDusHbc0oD+yuKZr+ChG/CyDBancBmzXWj+tPVASMKYsTiQFGJj5EfMb3ttyoqzMNslD6Zc+Dxh0k0p4lKYCYOsu2JFQWIJ4cXz/W74G3fYFhTpiRsxbYTpucTLVxXfEpwQAMJFUct79snDM3add0aAjQ9VKWu85wHi0RrkeUebH7qp8yQKeC99SinW5Il1Ag5b45T1TLo741Fb9XNidCx6NtFZu7z5mSFYbn7vQ/7pz4WNQTsu/5tDLul6mKmAS4Ighc/lu6cNEqTzSihAaPlm5DcIbUWpIdrqdeZiLss2ukz79lPSIS6aHqyIAAmKvUDX0qowH4etn24NIwLNQuoZ194qQRynrpILsaqI6rUSEpL4TlOI5PX5EJ65lBfvz3mc6JeJzzj+vi7sntmkEUfJ2AQZf9N3aWVuItVM5MeaGUKMw34BViYRhP6ofBcK0iP9SR/O/xCMB7FyKgnwL+8RJLyXfunPaKEz5YWW/vtrLX0wAxa+2Gj5GVPDKswXxP6IuRj03EaMMjkt3zfjOm2fNmjc+oHV1Bj0nl3Lh6ttLfcn55R1IFYg48Ew4CFunuWAT3li07U2CwL1pKKxyYS9kOXh8NWXYAn4NapQF4N6xRC0fBNu34eLa+FfIYcmprNUsA9AAUsF2O0a8zJ4FABuVQeaihDmjE/cOdXOofNoNBd0qf9b0WpTpOlU6j/JHomHwHngtx7LfvTN6+KVA5tHeJNj63XNvf7YLo7j7QJ05WUQ93psV2sg395B5HMt7jTg8M6HM8w5287ORHYxvTuHuPNQvcdfo6jF428Vw5G9yluf/hulIRcXS3W+5P8AJ76p1/N6DMBb8iS1McKqlWzfCd+5m0TkYZ5+ql2e2UezanvbW+IVTWl3pz7w6lYxwE+XluOZJ+z/MKbLdhsT1RVn4Hc90rYusvnmdj5zF6fOjAFRL4OodXyzDM0HNcrHKGl6+6zwN09hz1N4Kxil4MqXJzaWPaxny7XTjF2jQ61kfSKBphT1WBkWHeUzzSnee3RNzopfDcCv3oWqzmA3OFZ2ljEpuL6jbncwyHSfsPayC6Bs+RkkClHUR3UzmW/Td9LS8U6UD4y42wAPR95bNL1rLP3THx2OQHfGfWmlJ8ajqpkArujsRSsPeLx20juuv0PfVeSLYzX8+YbELSN4PdtayEl9+YZKamGqJpVkYCMCHJbGtOa35g8N6Q0BVO/NRLCvEVDePMxb30TvL9lFbP9KgBBLufVTyApbreSgcjPn8ZgBfnbRMtsxVPeognz2/Uv9Fc2izT+JcSKFFUEvwLx1kY2idtpSmUDqoDdIXp8zsdGULKSeTQEuwRNSo4L/H0wz0kuEOp7spJ9AiFmYkWNUpqBeKhxOm8t617+EhMRnozLBC9MeLQ8I3g565H0SZup1Z7n8GBQ9/PVDINRIF149fxexj0hp7YPzbWRJGdMrr/Nn9cxfUNt+RuV93sS3Y2Qa1miZ3hRQVwIO8mdap52dAyHFbdIbv7Nz1XW6v2DjMy0ijXrU2bpDsm/uc5JykZNvxKcyWxtZoU0qaX1XGhGPYF6qG9zntQwKJiaHVjfVTjm243nV2iH+9s31Qnt+fJ92fYz1GBmxDWZQyFBA0VVkVLGkuHK+Q/RYowygKSLZbyC4od+N9xgUYT23loPOP77yQrHQ0/7wbGd372YWr66HDG2R/KODKMEzJQCJwnbQBqNkrOGDxA8Q700LuzsLIxVaFZDNECNHUprz/4je+3sketEdpO27N6lcgcTY8rs61OWCmzitQpXy4pKscUOATPdE9RvQ5PY8aIIy73W2LqxQGqEAkqu4f25re3g1z6ljEGEkrPAKlXRdaBxx6X4K+Dmt3fN7HDdD0elsquLmFdLcdl1W+s2DIT5mKl81NdXZnLcARDFnhOwN+szWBYPckZki6YihRSUsjwYO3CK3z6v9lo0uoaQjmHBCyGOc/ttS9McscdimJ5meyk+cn1DRNkCYpYF2xTp+PX6SBVkic8+PnuusQfc8/7IJbvB6AZrlpXw0yvqA8GeZ8zNLbRoaDSaBN+WCd+cGXu38srwY820CFYkzzMtKdIIQzJKtmhue0J3dC1jrmbeA1LJL4QRutuAMzANWUvdY/VjmoQ7f2fuN5MFT/M2ZP3F/D0dcZbU1w04PtU4mqbVPxp0qaSLrgY+kj8t62G0/sKOpIIUhEMjE+0PkI8Ij87xUVmU2XHskyJvwWskt+QXSX3DrqJ8Ku2N8N9QwDcHn6Ni0uo/805PfnHC51URxAiCpPhKHeXqOEZ6WfId6ll8vWCeM7pb1tT+k6P49vrVFaJkqTiq9kELt/erhjrv1jHkr9tjSfPN/9IrCx0nhOouBhxBXaU+5iNuE/+t0PXXKAK7ZJfuw5KpCWC/CK8EinM+Pv4ULCsLiS+hnwUXp6UFm6M/3Eh5fbeoFZbO05UytE+qDNRLVibyiF0Xx2N1s4Wo5G2s5zAfwi/mXoTTqJZqUD656i1T4gqHh0pBJSq9qeVY2yqj2HuZ55QIqbyJqqzQLcLm5WXH0gG/KBXiJXwzodWrntEqwXfNqUJ72IL9ZgSA8p55ySnMT/+wwvMN7Nnfc/Wfany+wcwr69/3Y3eY37VAyn7XgmH4ey07Mn+ufXAg+7v2UUnO32uB7L9/194YSXEMw//LP3t00X1HeJ3BfBUG/BM60W2dze457l/QZ2yf8YKmB8AO67+AVAqwEhwc2vN5zc//dAgV/gXl+lPKxz5fQUFC6O9f/xUFwRZwzfX3AA5Dfw4c9Wet/t4Dx/4N/3telddl9c/jMPLPwWT5c6D89wf8tkr5PRY4J08u77p/WvH7HYHqz59rNKXVXNvQyNTQsfuOPObT/+s/jdiT7rGYv/P+HFjWR1X+OZB/ytz5+3EYh+cHO4/b8MnBjaHn0ziv1ViOQ9Jp4zg9B+HnYJOv6+XUN7gq2dbxOVStfff3r8s6j20e/H31p+PYYhzWv6fD2PP56ev5CsET/g1CyX8ORM+Bf32OEP9+hD//tuLPp+s/f7LyuX76KZ//OXjW659bkvjfj9HfBoHf/+NW4MM/d/pfjv4ybnP2t49CDuOQwZ7KoTw/dV2nyDr86z8nrslc5ut/MwrEn/NAR/+3wjTnXbLWe/5fmvE/k4m/l1pj/bT534UQJvF/Q2D63/9R/1UiCQT9r3f80+6/N/m/CNq/t+p/X/aQ/5ey9x9iJvzH0f8vRDHb5v33BPi/yBDyf0qG/ucnQv+/ECIKh/4fCREzz6Bw5r+fNoETlv/1Y5/n/tcHYX8V5X8I5Z9b/u+K6H/b+/+diC4PGQG/1n0Cep79/WSWKc/Wv6Of/POhqE8gRyywG3X2yGiS5p01LvVaj8Pz93Rc17H/TycwXV2CP6xAlv9KKjd24/x7NIqiNF0U/zcZJoAM1133z5l/lfZ/Fmvyn4Y+Hz7JmvwLyvz5iIjTUP4LwtU+a74PSJXKERhEw/EqwSuf38A3Iw8cEz0/eRPK/c9jLxO17QTbf2PIdtMsfnYB7A0N4MSotUn9NffzRqz3vUPcg8Xt9xEIkivrBFvan6KeMbMee8aU+kLYq1V4fxx7HBndEeWSeuuKK+/sb+M5cpBFEiXgEocxaCANephRy5z9DSW/yDZ/8YygtiE3fJhs4GFAw0eDbCTc4Wy5iKXOlvo5jbxwMFJJPZ9fJQOOsaPFRozGRDz4SnjO5vgjYA6JOXrm+fF/9FzlnnSxFXg9VY9vsaPeiiC8l2KJIUpDYlAN7KqVjI7+C8Djsq0atqQ0RIigjrOP+WUAsgkiDFl3h6AakIiGLw9l5RdXs4feRcnzH/fFuCEUUyyidhXfp8+SaCTf9tHhN5lQYzwbiCOF3ut58LzBl9Wi90ZQ45b2yfxZKJG8JGJOm+kXFz88aW2dpYtekb11vBp40bvsJ04eKOk7XwPtcxM5HEVGsZ5JCDv3YGDg+B3gmMuVuKK76aRwIvxcMfXKcSUpbD+benLqh1wlm0a77xxeU97F/+kxlhte40haxueDFpkr3S8R1UglguFSo9i9f6i/P2PFqkKaUE5K5DBgSeQOMYSvqM7iqxLSwaLtz6vLaczEdRbWBJwLFRAIK5bzL5Si4XKEevR9C+UyMVH74nndtA3Ao8LtnNRfcmm3kN2cUzVBr2rSd0jGl5QglLoYfVyQMDjISKdmKL9vVE5PYaI4csBdflKoJXGRsRKF319hcH+Oc+Ihq9D65V/FqgSCo5hM+xoPUN6E9aGRrRyt3pzjzXK8UIvpROd8LkQ+wlawCXP5PpF2d/ZIZv3TAnpoEGL1iJEkbwv+Bpn3ZuWnb9eyL4+bN3ot0a1f9udjqE5/A1w4JN3f9jhvDoQGMLplklaUmexNm6ICYuYTAml4d/GiBCqE3MPQQ1MeTUPf6mylM2xvvKjw/pVlKdTe8g2oMuROtzfVc9K1J2Axp0lrHlgF2XI9+oiOMCmufeglVbPVtuKsx9Ff1jUP0bJMlNxCmHTgoU6mzC91BszpSHBpKkuSawar9C+73E+U2/C3GkFr1Q/ch0Troike5aMHaG+WdSR5OnPxa0upbi00FV8OSozfg3G5DZkGVtY7UcNFzDMTM0p9YfnRq2CTH1GcuJ0eUFR3lyChE2k3F+Kdb/kSnkrp2rpkqEwS70qMmoyw5W0c4vzPkzXwdwacLVjfnc/kZ555rW5qHCPY0A5rWlAuTIljRSnVJJkbPOUtnq4yFxNHaddMN0kyZzCQx40BR62e+Co4SMlATrIcEEp2HuxvjDUpyMKjpSn42N3PC0JrmNLkdJWgZrHYGAO76bB2zzO6gBJoaH0UJxTAsPHZTU9GDaIGi4Bps/i8ucsw0RcmJ55Ii1RoGhNFSz+Jq1VhvpjWtxTfRSkGLWkvU6UkRZRxcdPFW78EQbIctHPxiqSl6ZJzpV9ffg9nqFBjp4kfU4cEKp263ql30jHbG+HX54lB/069qGJVtHpY9Dz9anItRjOnID51Vq/6N+ZiFN3jQaSo6f+hb6hKnQRMxvUqupiPIMI83AJvsO2hRXtf5wuOPm0vSgIYOBrnwa74y2D/3rKRArWlTNE4rJy/NtGnayR1V3MBQX7Lmagzvl9lMDJeZlm4SRD2gPTQb3eZjxcUp1FKP229fGwMbX7tYZ2pCe7j2+21HtRX/rEnLIFJ3ompI/TZEl00ryqzpJRXASs5Ckx51qtOLggERX8xink6cvMtRNoIw/37gX7VrDY8uo1vdJ3+6P5KQlk9cKJhzlLGZbOkbcN0VRCeOUhfFiuZGY6x2tT2XR4tU97y7q8Nlitr0aJ32r/lHfbU0T97iUmepgsgf+8cFeC0we1uSbJK/ywiVegMWucgjsCzWvNHl2CBHuywnHE5vzHp4ZUhdWKS3rS5olzikOZMVKbCviQCasYJCv3akzIm5tlSHPW/9oD5VNbACf5hLEwrHx1coB+qik1eepeUfbZaoxiG4zfX6MSJ8pii/JXZWLss3m/gkmZbXztT8DKjYZotIPANQ8WF6CMJaYcmeQsjXt2i6lvzTSITrGIZdlsri6Ot0IVy4vpEHTZio1MAUkRm8+RiGthUT2R4Cvk5R8NgQk4MU8ALnKWmv10Ud381zDoQkwcOot2DZlhNgDJsrtAAFikwN+AxJn31+9bBpWXIfKgbyW+RVyQvcRkhog7cUkPXR5gVr/2I2YnEU+u6QxfIiW+GjdQDPoHedJ3vkDIvTMgkewPDXimBxeBkRJ03DZaXoy9uXKNb3F6HaGuULE205/Ro+6LYklnY6AaZR+LsfSfz4CA8taooI32R10tTdEQdhB1uAanj4lcuPrXAPgS5502089yleT0YBdzF/d2FfO4SHECpcQtP5HFf2l9eaufDOF6VzGMsVyLQo3v5nDG5xNvG1/R5B0X9CHXJ6V1yYwUUPYesRzal6HN+hhJBmM8hRPDbJzz9lj0M5EkRl0WpRZcShf0WhDq6EG3Kb4QRIaVefjtLviTzPFLcP1sDdAIDEksyR/KKenBmEC6+j7ONht9mlr2qQWKGvKRlZQYneD+69Itda7doHG51/CF+1A249iWuOoYQZFPQkbe9lAxVcHRNBqa4HMFUagsRJOHgpawLgeP6lB/ZZ7CtakII25HLE7ZVTxS8fDTCoc1ClSQg1TtZ6kRugEC4Q6jcbZifD864Q+T6LTPglwok+ovdi4L/9PQInIEu3BC0JYv7nsajfV/7prw2KlmLEYWd9z63a8wPQSbenB4eFmXb+xFYaPtrnwem1aMIw0fLbpxK6HjDt7lLl3BqhTKviB86lIWG9/ytsSXsgQs16KdnzvC6cZdKKwPf40/6ZHWr/6g0LOlk4aNzJSaEuqVaiP/6riDyeE7A7r4rhNlhQx18Q90fAMBgjx6T9FO0aEtVQ4Kxbi6r6sc+DEiI7/czqokwLKnQrx/R+mkscY1ctXbjA6BftlHeoDKUAn2n+OAplOQyNX6w8iteyrfGkGP08DXFSY/3YV8DzF6HChDHORxAg5EUR/EX010EutjtW5TZamYRzOlFMoxv1yIKnt0Zc/o4WuRF5s9+8uJwFjTvfLuNkejP3Ju64w0wQmRewYA0JVHnvUgLDccZmTjRTKMrIzgVXk/zpsklqk74XiZbWqlYn5yRQNiX4ikeKFYRJbqGByvP2Ng2MLUrC7qBxQIAywa7598W6RNVvZTjA/F+M0KXu0cd5zonXvpnKVHe0Zzvf95xR4DxRymZ4mkeZHiB7UgjfjxA3XrR7WVHUQVG3iyfSOHhVjp64EsUvg9Z4UPOEaSAtX8zMlvA+jMm4+WOUKMbUWuM27N+qRqw4F98jNDLol8eHz6KdB6RxgiISzcjiaeYgquBWFRe+EPh9QuMlnnIFN59CZx0Z4yaxm6buezSWKcWHsgj9y8jhOP78NKSxaY3X2vjxnl2Tx1p9NhaCpk+Lr4mKSWkX6e5nUF9NToOTdz1WyfJzFwbmeDVrQdWYAruECMdi4BNSb8tQwuqLpWFob94Z+bXtyH0KZ5e8OWD9SQOsM0quTKZFhAHNf2KqL5Oh+hqwC2zTynq4o5sv51QFmo5cRA5mTKng5xb4m58f2SMf0v2EnsthJTpV2P000M1/VzUijF5tHoDL8NBvAAyjfafxrxtoC1FgaEdRP9aTJBJ2ddesz+5Ke1SfD2v3/TAnPQAzPtxtWasrawyEEuThyneB0fvP6OELpfzjQi39MIPSzf2ztqi67xaaV1DMbCEXbOj37qO0sYmfeHqiGleAZA6OQO6RsyYHm4TmirE1jk7zmK0RWvU/TKqkdiluBcWoIquTjDVIWDOWYBu1seKq7g3xiFu6ldPHnbi7WaqGFjUsPBPscE10EBqErUvq2wsmL2xA97ePWRMR4mAxyICwdjVMbFqERq1gCzn3RZc9DzRPc+30wxZoJSPxrA61cIN59umjEEy0FSmfX+4JPOOAqtUE+1tfNQ0osB07HXdkmwwitLYM9whNvkFFJ/BiHjb/QGG/PjBkO1RkXZNHcxaBedv4vgnn8vfyNJxoFGSihqovbTy+8ViueqbF4swLFAWjdOnyUUslnZ66ifYO3mEa0wB7cRBLqk4MOklvlygL3U1KOFF0qtIqFhjwtBfas742H7v0WEfkhkB6e+69EzpRJd1ptEUGC/KX234i1IKJoAAh3wVfFHyj2aSnxF4KFSlAXP2qwRCMZriGIMuptKu+JxOFnfooJh2HuHBOMxS/MwMmj3m0Skg6JV8q0NYWOaoPAfUsRSR9TU/b+IGZad8T/Xne4jBMqyQMg+gjJ4eiMwZaNByOcvgsEbZq9N9Th6FxljKg3Py65FvYKfMH/gWFw5dHDsQu0PNnxmunWxDoBSMQX0HA6B/SOApZhxSAR0+c4lffiWGloKJtLQpz1nXHhvaqHBGFm32oI5sYYHcWh2gXCPFmIxfStAK1lVkNMU+NuCWBku6Hz1t/U7lmdz97fIhKWzDiMrSZCG7mXoFWL4eHIL2W4fHq2uk7V47fuJbeDgYr4cFsF8VWAUjYtvL3BZWLNlICb6HOBwDIy2v9/Sx44dxuIye8GoBzD6HAw30oMYz2HmFofgvmMmSnbngbtGIVqX4+jpcD9ePbmQV9mdIAX/BDz6/p1cUSg1PWxPWfIH3oCpshnkaW88ZC1dYwsSn73zhxnMfFCi+wYUb0PRJcJpac3VlgDRVR6c/lXAeqoL2FpvKiMfs5dJpX985VKDbiQzMwAs88/WKCvF4sLG0YLIMfECUHwXKm3HpuLKT8yPs7B69uxtYl/c4XD8pWuosQN6jMI93+0AI9OvpnyiXyskly/x58JKGjywV/kdT0t+WRdgltgVeHCGjwq1TH9taiVh0+4ZndkAJgY16WKodUwFdIoFDjBW7qZdSEO8zY4Ty9etXWLJp9M4cd4lufm+bLXntjXt0utb3VZNsfHgES4E+Vgzk8/WPcM0c0gE2thenz+2K8sgWb/s7Hh2ZwaCLiCkhQy80UOsyUx4Xs9mGPMhZhDbWsVTW8nqESD1eA4sb1h+wAwzbTN657pqpKtDCa3QqqR3V4fUiOK/8bkDRn3OV+SKCiiZrBIFKaaUidmkCdVE3YVueiK3UfUFgtuYG2c+GYXxAp+rvrUfFtt7LAm1avyXxcYGhpFwjulLQefEA/aHes3zIIitO2C6KxhISLjEsIHUHrATMi2J200ff64fCHCzP7EvUplLcJidaKJiLmUALZ9z33C1aAHroQVaAoSgu8h5qfPrE1KDHRZOgV0+1v30f6zcDvOk0KT0oZOE+5ZwbZPVmigjMFr/2TZ4ATqACFixLmvvQSapMLLsXwwyIzkmMgL1G5nWYM1/n8pLSNehwd8PTVyIeAeuVauYl7cH3yLQ8Fp1n0xdCuWN1Sp8YuGT90jebG8xj3pvpb75gq1wYEc/2dBmPysoFi8juEslgVs8Zrq3AZbaFH2ykc3KKJXCHxjGBtQWGCX5UOVjp9sFn64gbJmuZTlFO9c08fOngPZmd6xkSx/pYap67z4nzNGY5StYOIbjPIXcAzj8xDAmn9EDX+QVPR7TyKlGTw6wxUxgo519jpHbTGwotOpUxipMKqJx/CnWm8ipEvYG3mFXOLEhPxomG2IYHAlGk2E1QEIa9qzb3b1dSOAaTaosrxcmPomtTp9y1mgfkVL15JB8GF8rqbeXlTQD7jEO6P58iDQqsiRr3y9t6pK84hwnD0nZRzuqmuRsGA9/mK38YAjSDc9+b41cg1ZDzwJBeOQGBT3411479norrsNneNGOG6SSn1/HCyE5G+ayIjhTGh8b76qY2b0kcMF9IC2UxolWr17CzNNBkb9Dxrh7xz5fyjq3DCrIXRkEnsMQW1FVZxpPmAZF5v9loof7Kofw5P2t1uoviISAg7BHM53gwZYIQ6WEJy/w2lJyzL5/VPVv9aeF2ScG9PAxQvlu83Rf6AeJuaQKkIyqfa8ICj7jvMA6EQ3qsxYGrjMskPdhnuI5kHZTHqQ/TZOKSd4FxbYfJeRD/RDWDRUvRo2M7tSRkR5gSI8yrpOK+VifuOgLzL5d9udMz+PQiUSRUAeBj6fRFPzZRxY/stgg/SrUrvrlAmHAnUb7xo9XSkyII9jU38RIuTEjF8dvVOzMAbhDadgxceBr5AnK2puHXFTqpPxvEt8OJfuDXmIMMUvMdpnuyY/hgshIfY1Zl09VHR0DVKEAgG/0VmcF32BOvKhMY0UWZ5m+BJVHuJJhLoaDpIbzdREJez8xEghJH7c2e2CTjmHrQjwOuQcBM6wwgiXMHpgEXUfCwRXGZsWWvuZedXe9yb2lL78TQR3Muvpen538AGwQauvDKgZZYDM765TJFPBd4VofCn3pc7RMgC/TsDKW4FbjL5AV+Y/GkUN7bZwOj9ZJVXAuFbM/0O/PqHGz9yUrmhYf2vJM2c8Mv7JXHJA9xhQHK7Qh3DDyYYLk9Tn5tb0gyScc+GDxYrMROHbCeTys3YCzGHwsgG1XGzPNjMMTe+NtvAgzVrT/ErlG2AffB+h8xmollYo1kSDllgmMHD2pDSH2Us9vCGTfCyshhsGvBQkglN28JSHIIcg7ZZ2Z1PXYhVHQqnmOQqEnImPHeVy5U74JeoWmjpFn1eITx372/UzBhDS2Dc4p4ZzAyqLiz8bVz5sHbAcMghC+QD+rNpotHlurpqPsJck6JuJRwj61BkndWoKRffhejxlUeu2UwfXi8LOydonfpkZX3A64BBDrCdG1yslmsZ1x4FVhNFqGU65AH1Aawhq1tGMQ8OHuDp1ZQl4/KkVgAE7EpvfWvN3E8hMK2z75lr/U/Yk2TaEnO1+eFOfhbGOeNu1v6l9m2J7+tXKUOgy4Be/QdS5Tx3juiUdzaA/lxVtXAEgw/RC4LPX5U/yzJyEUqPTjtCvMDZzphfwdZ7gX82arZ+Iz95Z++KYogk/ybRHUYVp/AcKDGzLhTsAVdRKpfrYTnjYyefUM8HtlnjIlTVg3LzRoUUIq5tMC0RNIwdawmgx/sC/uAiXwtefMjOPmogaS9Ro/w6DvcjTLW6oESCJ9RKQcbXbwkoqlihzWpEegvlOWjkDX42UVgMeRNChXnceTeZbN2FqCRxN1emD59GxIN/VOpKF9R/mSN0X1FqrfDJkZv+us36mul93XfaxoTi1/nu+HEH3MW0fkWOdAZM3DZ6VHfBp9HXy4LkctC7R5uY4u6Ex2eNWJ4mOV0BmBckwQqGSVMVtnu4Ayc5gCiZSVmDfc6jb6uloqp/FoZ2lqTZPy5CmP67UeEDlsd/FutaoXn/mqvjEJgX7US6PORn7Fq76Mm3spuG9XOvVfHqGSFArPp474Lu0WD/hJjlXN+WfQqmtMWuPmird1vITvgo5H8Ur61jh+pvNPbpf2W3vFC5D+y9qjzxFmT8kHZrOIFgxcwEXDNPVLUiBHmugIPGjdycU4SXGVub2osHWCB/Mls9tj+Yk+Xe2LENyqqoWANV/GGH6yolrunOPQR+IZsHc/TRfTR8yyV9ZBP/BmT72O873duJk2h7ovz7qSJdcZBJkTrZ7E4TxQ3Ymxw69ob6c3fo/Vu4d2e+hGS4Qde8sPDHZDzGjRFCL5dMuYQ1LDlVf/2KZ6WEsUgdlA2/YvL/vC+UdVlTswwHEcJ2uvB5Xw4wOatAAgFWAV/2x3BTwnzC+aGnHLk82Gs7YDuKnWW+TOr7btH/d5lRLlDxoihm6yrMQkEVudhxImi+4698xqkxBSP5PJgBEDmN90rsb8RfYNvK7FZN6LbYqS/7nUIGnSyOrbZq0GXkrPWK+byqejhL1yHlV70EMOSlYiD9J0mBr6fO2qp3MZV2PNvh3mQwZ1eQ62IX9Zn4N04k4b9rBpnrd+3osdYfI9RqwR19wXzYxaLwF/eF0zd6COlrqZTpd2LyKkI80uH3mD4gnWmRVLq49drVAaxmUcBqZQgmwiQZ3sBU5qSMtOUPJFdOD3V/hZNaxdx/ADxw6+ssYNdONQHZV9I1FgDfrhWXxgtP/OXSUPvzBZ6n7HN9QNI0YNid/NhV5ZTh0x3JbraXGum9gZlDr3HqqoadKowrAeOVDiKDJCnTqDrZisPEpnPD2Zwsdwx1WvkIhLEDKaGNo+63DyC73+6z8JdAAcB5v2QZUXvKIUvEF8H2duX2QLJwEy3o1IAE6aqnnGQE0LHl/42PFaewXKe7ytElHdKIIbryz8V/+MHxmmjdShOs9Q92spruBcUUvsB6AY4a6nBmhSxvROIUVmvCfqCB1uFiv21FJC/PYRkawQzr/WO5EJS4hrS5MsCzO4ESza0XvuHkr23IXIo3CeS/dEQWotGjZQq7gUDkGrlNzaduI3vlE/Scr83Zjp+CKuV8nlA4kxb6xYyFsRGXSa9oUk+6pcWnub9Wyd+a5O/ol2GGfjRNRDHpx6ugaWVWcPo1E0VJMCzWBCSc8/wuglhJt7QS2/FG79PjS5FcvoWzDtnkqbUfFoHuxLBQH+Qm1t3r/K2ZIVN0HfAptFalaxDj84868bSfmE4gFXPl6oEC+P2Guyd9DsLhMHF/DwL730yL79XM3EpqVbT39jUfF59KmDBV19T6wsc5jNklNAvtnX1331QovHceCmvYLl6fc117PYq59+VymVZFR6mvQAYan6iewE2v7AeRkphsXuQfWH0D09FpTh8aAw9VUehNCQut91Rc5gQfK6aYBywefi8GkoQrek4JSoMuc0tL3f6bUAZ0zxKbr5gvZTrlQ+sHAKTiGF0E+/PjJJ5W9eSMj6222CrYH+NbO18z/eKRWVscUmDpYEjusfHqYdrXNxa3DM1YpTD+lmX+SOuzm/VF/6rgs5uFou9wkXyP+78stjQBNjs9e2HhWNVshUWbN8/ws3g9z4IqYHO34cL+P6txygmRWNM9mJosI3/KraoD045By5HlDafCWH4bbcY8m0JDR/In1q3FmHHRJ1hseTrz2QGKUp77bT+rm4Pn/zmWO2FPTmO3gr7g44wkHAp1KZOzSkxAx7jKYiO29nQu4vngbfHc3rg032rlhSqyRTDcPnHIkiTdoJ4ZuJABI6OVt5We2CRXNe/5ExNvyEmjqILh7C2+beZ2S71yQsZGDq2WmnuVbL3uTz0Scn0iyb7rAdOGLY3WE5XBYyWObSy7+/1TVUF2XZvBIwcvWYPBMNarEMhJLkGz2IpZCDKshn8eq0YLxuKKc5XTstmZu5w304A5qbxuBv0XXG5jKcTik6GL0qEkPtZWmaSUfEl4HqkZyBs9toaNu4NOlbWRRc/UGy5fOKW/h9x7O4P9UGo+MGQJsdm6fHHOYI+tsMSC2q9q4Z6rIXUdBALSSArxxXZMvDfb+0ai9kK2XmfsDPjSBYOiTZYyLhsiD29vVkjcn+srVW1R5OQPNY4TaCbrAw+6/aLB5b380XEMJJpRitLvtNSmtORU55RhBBkjKRC3+xb3II72D0ZCNAu/ZYbqKjTRUtIMN8g7wsiA9rE+C4CvDhLoHY27TtOKFQgwdC5hQ+/fQYipVbalTLN7pxCzWr7rShOPdu9wV91dH31/NGKybehA1mLZnh4da9bGCdJiapbdWk1JDW5Oue9JBjdEtkUaEkt4q7jIZXMhvQytBYQNhCtGfV5Ixi/Qjij/yoR/ru8RG7INzPH3jJPjeujG+Aago0xeu4pPbPnV0B0sculUVPsSonsMbiCwTeyfpGJXuO09eFvr1ODjxOouCc8HOrsPMDGN3y6hCqgFV2CAYPQA2jELXLPOW8JmX3uOJXmtRHuv2HKnWcbu6jqjEWArGCarEsNr0oQ+6vloH196Tgp0Xpr3fBCy+iXgCkbY1RJCyS5GuhdSS+PWCBi09co1HsN5BEpnui49Zt0wTsY+Lfejt0hzG6utN5GNpl+F4mypaLoTFkoYA4FhIvGrHkbG1G4V4vvw6Ig7IlKLGptBLZnfZP/QZq0bfjPenD3YUsRgbX5lKpBvWsg1nnV/s7du/gJuG8Aj/MjKfZnTeWJBHtMONe+og7luv6W/vzzcj9po2PMo8OAGWHdefZ6v+x5eGvMT5F/4Af1XPKMBI6x1vmSfBanr/jsQblKD4vGEk3TJPwQyroFiwlmdiVZtACFqi6azBA314ZXl+ZBm1aew9d7Vdf0hh4VN+pfZ9/TpdKzcHXoJU87DsmFz2dQb/oOwgJOVNyMscGFdFx3HtUK8539berFaTBu3edoe+MQWMXQ4eRxda1l+qm5mVNj4QEfw9PTh1KSm8b3l+YDDFOXWmj3hV8DO4MI30O+A9AP7oU8JKkBGU9ugFTlo49bypY9DFDEQ4bY466RDjU/hezpglGX3cvO8XF2GieO0XZVtLjXlGnqOQzT0Sz2MLarxIgUhFvO+eDNPxj/2kp4ij3cw9P5YQvkDKnfA5buKFvUj+Do2BKsWlwaXvfaTkxkcnx3xdfPc7w3VaCJByh1MoNIvfV+uHMuB5swhFDx+aXHmPa8nSqS228pHqp9tjny+z2//glWRPs49SCM9EA1kGuzX8nHbO53uO/9yo5M40QZUYw4VPafNGmrVgxUlT61PKcckg69JX3w81y/htbM2wdw2J7RTyol+8IbbP1G5AuKXBH9aMaiCrmfCQeKG6f5znuPkmK6DJTFkQo4Zewy3Bdbm88Swz36KMrupWR0Rpc3CyeON1byUFZe9qMiyTRCcQQWOjbmx4CVHCulXaHGJWU9aZr8/IV8owd2UVQVj/r2CiUVTLwJTp/BmrfrnjxHzSOiagLind2ikKjQDqvXaVn0TG6/BKeLwvM8u2OwueEmlBoKRIUVqLotvNsx+zS+v5Z/dG57gFoEkvgYyOI1guf86lTJqVk2znrGG4LtzywLpIPqs0OuNEl+AZBZ1z75onam2wsYeGR7iiolr1Iru54LKHhI5qoHj43QGXPSgQs6XamBm8Dy4veGP9P1uqtHM+e6qwMfxkTLh5WDeAPaogNIiLG9PJWujr63FH1DkzpSk7Tk+o3k0tPxoWi9cNd+5R9EOf5xUpp4y7i6dR4ZoP0oNoOD5OBpmMWCkrXaiLaO3u2zJkxiKwTr4XBktD3y1uXMaHepa27YTH6vo8ELBucujS6d7DIVlYotYwut3o6xyfmtjh6I+Gai7s3Bs4F+WvUxOUzzIQccuCm8HDOnb/OoOfn2q/LmqQXB0j2HqE/inZQObTxn8xUuNNsg8tF5AE8f8Gk+P7ya8ffJ0N/6qkc7SMKA3tEXpbAHHqYhhdjsUkX59/31lqrLepD9B033A5RTSaqzh29+rh2wkPuP+3CQKelXkLV8S+z361PwK+SsJbr8HmcOV7LqTDCX7hucVTkwVy4vcV1lFIoWkQFkBmQzzwwafcorx2/4nDgw2uyUSh/1eisCfwqO0KkoBfKeI/sqPaphV+LR843k5qfxisrDLXGZ3ybkIOmmAizi5x1OkxLWR4YHDGvg/PfhvnPS/WBp7RQUClxB03AVv3IGvPviX/KHm3AXH/LzviGrajWX4j0n5o3M0GaTvNvvEg+zSdz3iSGmORdvAtBIfF3yBuwrKG4ZBX/Rtz8EDxVUVpRP9QcMNwJ6MvfQ6NNbJbyvPFJD2zA0YJH0NrtWcdW7KtW/fA8QOlBJ4BVcGyr/SRxABVCEJJhjT6rgfRbk5bTlLvCHQbusDH2LyMMUwKRh4IQiemYRGUo8rfvaXjjki2V82oiUjJ6tFROTpkrQzYlqxoZ4+kcYrrrCvTxBBpqHS7wSFDhjMUNNRiebId1OUcyaWMPBblnSq5mt+mr/RKNO8euDyqqtoG2GQDr+DZwO6RZUJxQwXDsqJoyriru6ROd9ecfuXkbjAucum23iw8k+oXKF9NtA82XGLfhOp34U9NQZxr2zsfNb4TWbSYHR1qZKyrjiXP71XY5flOYsD6216ooUpcqcpnchxxVMchdGakuQN/uFamigDMv1QOS3r1NifPH1NHmmScjLGCSaHDvb3JC9eXgihj7jMvRTH7R4clgvJWhNJzuDTyVdSMPRSLcEe9wH/jeRTjK8SVYqbppCKwlCLT+DJMMi5KweDCPPjWvY3opNcDaNUEMj5OIN6lEDhDJ8eEv7XhNg9AHm7bYrUPKt+RmXP5Lf10euQBZe469B1OFfP9l4Pb0G/MFIy/LNs+vd5EvBYbfqZG45CElIVHmGOhOuGSAGXxfn0pGwi1PBMeJn9bFpvmgsLqfN6tpb+loUT0fhVRyu9QO8Q+9hZU6Dkxz/mJpdcbzWa16ji1+yTJ6yvfLDG0WsAhAx/3isdQfA9e7wJ9WccAGlSKJ8a8QneNR3hCq2qvy9Z/kuruAVdVJwcNx/aes42M1mf5hEbafWv6eC1roGK74DTFkvGtzTtpLpsKodONq73EuLJs836a7z8SCk9fv0fCWzd3ccw+BPcz9Aqhs5jH+1snXlFeYv9MmGx9jdPQf8Iqu55FOB206J567oQwMw83N+hnKUILjd1Zj6jTREpXjAs9h3sl31ce3U6+zMC7hlJT8YRBoJdfhoPr1uZzDYqqTxar/R9Vl66V8T+EiZOxu/Vy/e0ftTGd/dxrTU7k4MUm8Dm9t4WSRkhfgvvTSvR5ciMxpkPfcdoizOjdBPDnexeZlEik/90s4uwwY/kDbJ88VgbXlBH94qClZgeFilIkWhDdAw0QX0Pand0qiRemvan9IaYnFn3/JLEldiZ+kjA0eXx6lFI3PwqUPtw6dy48zoqM/gdqSLnFw4dFcoN/Z0qTBMXhP9211a9LH4e+dxNG5EslDq6VWF9xI/qUztwU4/cFEnzgL+4M2OnOalaXcehKgcopIhwFHLfcAeNCxNVCIIoKiPtZCdbA9fMUcq7kD5Rv9bs9LaSzNq864CvkGxxKMPqqQ8+NTtUMzITHMpXsNv1c3XRg9nYErr/HSL/X2VjETpyxdYCHO4ELDO5QR5ITyme3eMDDxQgXKVSmptP88urAKyk7iEfqu+RBasksF9grFL2dFlaqG3e9JATLOSgyEdkFpxMWvxwLkHWGcjjRfDeCzrJNEMTGd0qEc/Eb+gKCG27Gv5a2uYpPl+8SN816A13Jb+Kl2R9OwtLGrCqNCQX3svYryRHBgE5W7STJc0Cg1YclqQ5cEGU+u4EIII9bwkFCX0syBhUnYtDNqgNofMvhfPrxE6nqZqMBTu68gNF/CmSMHeke/wqCJ7pz3aLvTPIcf1BlAUBXNlSIThmKthQdS7woOoPXDmAVvjUbbwxboRfsQnwWPma8qJ2dmOhg2izZoBWx9iBc8wU3PJ30gdcPblBwrQdc8Mj6IP5zq09kWyTgtmHsE4h5mRuvkMkChTFtwQLxryIDSt1UeR7oos7++4bpibubCT25g//leRIiXyw2L8cLSzN64qLn9WqI/5L6kbCc49WOV5ozGFQN+1CatmUTrYxdedFI0NNNnGxX4cYxKIH/6NGfW0P6+LsDjHh496oyk6mnxWnGKaf+BmCMnyJbkO6Inqu0AhNh6Y8jAtJvGElwkv+QfCPC+3+1JM74GZMJ5j2j2IPhRyJC3QGqiA6SQtGTUlvy3YdVm6uQ+d1xPVj9TT5EFVKxGRrTJGdxD1PlYWoRalCLSU5mdAnaRkeQ+9zLRHlLDD9yDiR7FZqqFRTEKWwSI9xuG1BZTJnILtOe2hozxQTmqEyJC3wuQ9pDUJgCgL0vQt3Aa1UMTEk5p6MAHkfYyGxGUg5D673XlXuZPu7v0KmjTYtYDkRyVapYvIjyzZLWV4hC4H2DJ+B/uL1xS5l7Hyk13GIUDdjmDFmBYni9LVxRF2mjLHLdWdKvxwG3mhYE7wuYxk3CJodrFrVIj3ABP4CWCMUv6gyeKtX+8MgrRPxFIPjiKsG1BMtgob67FVZ1A37g7Onr+t4siyWPnvlNQodWFYmU5wMi9qhetdSSfRNvNpEgLRsCZ+OhfxMP5BbIPE7Qrgai/WvJGaEAga/yjM69C/AMV3aJXbMYjUYpcPRBp+BBU4+Id9Jy6keNMRALXvFcVjgl9eL/tT4S/JsPVs3zVgnyg5MyCI5z4bl8arcnJXVI321Hj8C/wZJuXsw6OD9FbvRhHhC5urDfr6yddhcQERXkGtdw9O1R+lRrYnz0v8jDm/hQQNT0h7cu6LPRE8+27nk1lL0GgTpI/RnzvXKQtkiNIreIEqpWC3MGRflRkdz0+lAi+y7qlRsGk67NBlDOSXuflfeQrGXd+u51MoBx9z4R/fV688imn4QgadV6OTsL2Esi33GkeV5hCYNorPM3VdiqtAMOFXn3SWwRs+/QS2HdJ0vBHIVuevR34qjB3JWXhLtlcxE1jihzVfmwgEfd9Vw7QgOsUE14UVQhnNE9UgxmmBgnxxGBIPxb4vmRBpQ02bg2f2gcWBwMWwAyMi1aMxB1FtNYg8iDfxon/Vvzvxvh+B8Y2EtVRkDylc0hG68aDINsgIGb9QK5zoKc47IRc2LPR8x8ANxRHAzQIX0fngX4E0lIQsMkPgeacVBN86vMuu3rf/IhsKrPIVpTsyrWN4mHYZHd0MhYcsUcDGeB+0MOmHkwfAffhnORG7aUXfvX61kW70hivWB6Day5iDN0nyFLUYkCmOHhv6QgWi7p00rOEzxjwUUrf7A4+q2Q31eaI4DSHABUl57/fG2lWpTMZfnc1geZnKOl8SZXjnlWsL08v1xVPtIuHQo5zxUWxwgWUtnvdFNOiI4oteNEWxk/lRwcPDxueJeh2yxrdAsoJP6Hq0sDPxthWLr5W9EznRBVizCHICRIn0wgHLw1/VTvCtBdznYUh38SccyNICARlr4P5SL3faIbNngIDYvEonxxGjVcY93TMTgx8ReOve0ahTzsSDpYBx9sq3AcwJgugGWZxdyTZBuLCd+fmBG9YgH8IBXA6sxMVjbnmP5J+8Zcn5rxDA4r/sRTK83SzsDaU0HURr+KVX/DlEVl/zJDKot4Mits9sQ/DEaehvx9e3mAuLLf00pdjfF7OBEP8zSBtt25LaVZ6P8QtIbhisCWQIPeo99NcqWz/m2WgI89gfxNlihjvumV/xxFPGR+cFFmLiNtS/yr6raNr87TKVADP9WyHktoT2SzQAyCFKk5eWbCkkDg/xqNfKAwnBKlKaAa4wn0+YPy/JcHhGXtEVnY0Ltt0Qqxz61YjBzT9Sx2URaYwceE5Y29HdEXZ4iAaZZig/NLfOBk+3WWg4NdANYhcAnWfAIsLjVHCaBtbViV9y+VNpGHy54Ve7jwfdDT4OhnQlEXpMKXVzc3icBdiGZMJu589F0qg96R84K9LE+h80XceW47oO/CXlsFS0ZOUcdspWzvHrn9hz32rO6em2LJIoVAEgkN8YAqAm1V7qwbD0uR4NAcKaLJ+KKN9u0bsXn2JiVZu/q1e09tXLxIkHUSbnwPY5Ex3kY3/lskI+97Kyr4rUwHoAWCM61aJU9INp/Re0de5Maunb3/AjpWGlKfYDm6V9K/Xx09xie0zwra9eDkIw/5Ll4DH6y2jniR1N3E/yvkVVSpTuu/A99wKzmL/YmE/YBPvMbVFfMet1IFITW1QS83W3YJICYkb8kK217irCpVVVtBvNqqvNquVSFES9bOoxsiT2VcSK53etRL20IgtcipWnQms96Ael7Qde7TT5cfmQGLvtbCFbUwn3jCBaJ7Ky8D2jOb5I4aWGJYnRJP93LAsuYx3l3+40Ior7146WOA4qIxojxD5LH3CJYV0EdpRWuoCYxln9viz7k6jJcrgFpQqEdyBsk7TmCAFpePFC/v1sD8T4WfWBkQ+GDi5XbRPuThRh+Bhel0UVAujfbk5wxQ+6Eey27q23GoRYIxWdn8rUw5lFkJCeEWDziQ9dIYJEs07L8ydP68Rd+/zn9iIXKZHmD1RjhWIfJie6NUPTRh2fwxidXxqu5/ppU23w9FzCdVZxb7m8wbG2v00RjizPS0UZ/A7JNpivl0qzt6NTKaOZBuJI16BwMT5Zx0jlD3p+w5jG+Fcy/jWyCvo5gm5jRl33RvJH80lUyrZYUCb8r4V3GF62B3GSVCRfiokkyoEBNFHOsqCrnvK9nHKhKj38V3s1HPvjLYrOYdNFR0OaDRewUhoKZcJso/jHmbIdI1ziRCxdsLkkg/9+H0zGu40uo4nBGxKhLh+PN5wHHDXGQxst1V6cddBjivY3lbYF83PEIkfdP3cCrFiBhoXETjIGi4ijSjx0H22KxE35Xiqzp+71MkSgCCmRlP4Zusg4ea55XDfQA0CdPAzQaCODp9eAuXcz+7W4ODyNwgyty+GSTOF4GW16ALgUQU1myBqUxXru1jPf207IDbTEYHe9BC9lxvH18vYLdTKq8xfiiiLjo3598t708ChsdpezWvF4TEl9FOQGkzTEXnG0UF/BgvVIk1t9KLyrSIAC2sPwgAxcB171PsCvZ+XH6/hKUGueCz7yKBdspE8zkSKmedGEuh+m+dfhPvCTJ14IFPjXbRSkDXnq5OWNGXw0mWIJR/axD17ri/18lP6G1umMdsCmSxPejvOVLMXIe+/CQZhtKwq/PJI5lub8OCp8si9knHOx1xko7a1tAKA8LMknn628+tRb2iB/A7mEQtQWVc7d4RX4r7C9SpI/KNAfWoxf5YhowF5RI5T4j5BhJAiDX53CJ0cWUlvz7B5doaCnNWuIzD016Oto+OAPwcjchpEUmaWF5ObqeHpuMyxDejnF5mmaNiWf3+qCFsegHS7l0iCcanxwGgY3DtmuAxFF+35XLfNTs1l/yEenBVNse/dluhlmM4jlGUE8g9irOKfXWO8NWhT3OYvOVZrqBqwBG9WBfAhEuraVuSMQc+rLuSiJp79xVOgZedMZAQdHR7V+eJjN6m/hAl5ZTJRHcxympWb9F7D4FqddldWFyz6aDQkB4hKrW6h2GW/2gE3uiMOWGzmK1V409Xoe05ce6efN1RZg+Ge7jIQwATcZiN2aYsYinf7FkgSE5Z/n19OMLNUPopKgibIo7TVN8UYqcOaFuQU3D97wN5UZ2JFVGs0rS2pqnS7kfKRvNOoN4Xgtp8xFVyH2T4C8XDVhU6eMDlAOTFGrnXUHi4UCEyT/+opLGC8PBagi1PX2OLuPPHB+t6LeAYvBfrD1Wz9bFxfZEU/WOdzQP3LzylHoZMTfJ49fqbCa30FVJF/CqeikBMAaHi9pHko0ac/ap2gBbIUz+BzkRscoWMsZkjdBlXFrYycSzpgo4JxLuiwXnK4jwYFCcf8udgopo18IFNXpqiFx8jvP9mZ9T1Z4wXyZd5juEn5E3zG0lETKCnB3TqxefC5gdolYZ7CrwM+nv1qIA6SqdsAbqu8nrljafLYfU0cj2N6QqChjc2jdDHzhSLUo8lbqpHwRHGRQUBuqcduH4Jomu2IFh6nDFwdEpsuk3YhNXYlNJv58G664c/FyXUcLQjvsZC8b9PHzWizQBU2RFd/re4ry7KzHX5/7oTmiEi0Zyq0EH6AU0aovRUvEH6hsjZybhQGntmoquMjMehDTQ49zj9Zo7EphKyeZCIpjomP+VkNFZOCZeFct+NpOOx9h8AOuQky6h+4DpB7dhMPNMQh+3UKEtib+bZbmHJ6SXL02oSZ2Lh8VQmS2Ma1xWZ4/38fXkJ9raaXJVfTtRoQ4xRN8dxqQ7LL6HN/lM4/g7Kwa5hP9UiOqs3P2kXy6j1uzHs2kOOAyQqzIvwuFitdsfpXStLsQSRXV5CsNXGZVDiP7Iz7GTWwJm7EINvvT67P+5sGq+PRyw8pb5pcvPZlBysNl2VWb8jsMZtaxY3kF7gE3qpGoS/3XmX09sRwInytybZpKzDPptl50oSIwJpDDXn7Y/C2WjmXHX5Kac4KGTYHoyWefZWJFsrZfRDtuRg6tHpTcTKmTnTWY1ZwowLJS0mASu0REjKEXkCIDdUhfwK83FlKWfzfCLpj64fX3vO/9npcc2nr2iZ/3UZeiTgdTdgw/FU/STp9Ygf/hDIRB5KGZiuE8gtOlJxfU5KSGx7UBu8VT+5e5n2YaOojitxJGt45M9PzQTjGKVCffTZb3V0AgGAasnGZHagdiTHtpPDixFxcV/I11AiQi8lRJGFgfi5UIzCMUCzUdLh8DHrkgNaHcY2PZo4THBOXvjjRzp/NnOqp4a1x/TJNLTN1vgRp5Qe4Tq5DLoG5neyXl15KEKzvkqNR6FNuqvxIqS4MpqW+Bu4hyMxu8XxAqEFS3wyft63wj+vsTe1XN3DsrCKHemXYEKEWie393OinvNOIx2OtGJqtClELMbP4+2GWirhqRauoV4CE3mFzN5Ytyl8GwXjrc0Vb+bOdg+EyNnPMsueHX1xESPrWuAt5NfyibSKmvCYupTFjZy1DCm2qdoKhbzcIVMcZbp+tFyywv7OG+opjqejYgKiXdBn4bqQoz4QGgnL3twLrdDRqyHyyJvY4HxvdTpfjRft3S/qYGfFhIHjMwoV/zHi5M2sRmZLkdgtkfp9rtmeDbSDtOPAzLmgKEyIRLWQo3Ub08C/LRv1YJuKF+SZlckh76fdBqCupG0Zy08TXMecBJwp2K+E2areAvq0Ollbv+GDb4r28qfj/MJCrxJsPUeHykqi3hvziNrDmlQe7XnX53NgR/QRp4rTJ3/fr25WVSwvCvBSgLjqrwcvBQJyIKKOwAzSFhVFc4OZFN9bj62gOSWf9ipeNWqj7X4DcOESC3VBN9R/SOiLKzP3IIZhpGjVpIQ3wunYkl6LKZrt4x3FCCcVDPXkzkYnyIfVrJgGjYliXDrHOU4jgb7Gopuny6takYO/OJ99HqeVYKDYPqTKAE7rL1f2TyGwMchv3Syv7unv4TotlxyHVxpuKU2fI9QD4BQTiIMICICK3OjcIElKQaZKY7KPZ359wpgwZN8JZRvy4M9mKph/Pgy+86ZyZTRojgHYgZOy+zjlZ6MLEZmR6cxQNJBRmsxFO2dPvQ+wkiqiuRDPLTxUYwlfwPy03MVitY1vSImdHqt2b3rgN+1Ogkm3lXm7V+52Utackfydt5hj7C6JjFl2gNAh1W8FgOK0zhuoey7r992vTySgziklgjvR75+tRw/lNlryo79UfsAHhfm6X6Bf7gX4Qf/67JszpNUut5OBYmXBRlXlNov7IOBBt6hlkFfjLCpVoqDwPnszc1/Xn4ScPXApgrxPLgkFCnIH8LNkReAuEJgNQw4MdCV17U89fVJepyEIqqD8PeVrbvpjY7sc9Fq+PnmniaYkbF1927HkgiqmYhj9rZ2KrUAcdt4a5OzXLz78YppPM5qqd1YeOtDXq+P/vY3yriEmpnPW5rWGApDvVSSGfTNd/V9MSsAfOiC/IHY/wnUuf8qxKL02hhZRT4QV9PTEFYz5iGBr/SAO2dlNr/ZJtWREDh/ByvmWKY8F6vjPxMTMNhVvNapa/myPTMSOSNJ69A1m0OtE9ePsF9Od7wN7hvfnA7Ta2ulj1//ru9u3vukXJDiNUM9Nd2JXHu+iMLzf4ktYIuAfWSIPuq+r9zA47u/vdRezyaImke8U5QjtEE8LJMBecfN1pg6UWqbbpycKOs6zqZ0p/Hx8YvYx+sc/aTIoLlEw70lezJ+fLbYQNWQa3qK6v7v7CKdEqIUWyfkF033oMxiJZQ6CDav+SU/560P8XW3WtqEs09/cK/++rocrsQDV0YQWa5Wnk3UgkrWtCs1d48Sn8UEsG0OZ/Uml3x8oAKNja8KbZCyYJf9XmR+f4MnoTM31dVWsYw1NAOT4HFd5l4myZHHSZZ+3VtbLCPKX96ATCS3ILlYe6o98Uk+S+G36xATRDAhn7X733rZzcFrG0zvONHKkOBiEu4jHlh0Gwn1/G603qCNebPRteyqS1LUy7fY+2SWFGmSk08/1b/9TxTQgwshp+xMpd6n/HSYvuAbizIq+LFfKc7WYGJtUQ9/yVqueP4ohq8nNR8FAOnyHJiZmFK8CYt4aU9vrpNxm6+fuu/eu9lDcKHdV/F+Bt77XgJgsV1kKFBE8c42wA3BDZ9CXiTG4zqsy9N4sMj5us8AJ8vmpC6dV/s7JJv0te7NnFBpA94yOB/49nJqbNijz8qEvm7Io2dAsbi1lrGykMfjtnu+UE9Uj/Z3Hp88gU/hfAAHwwV52/Y+U3FlLqjlp6cPEnOkbQ+Z0pYlFcJPDuBSy+J9wMU8m6fCJ2dB6vC4oLWR0eP/ciPbQUyd5B/jhrgc4uNdPiNulEMTVJxoZ8cnjhUiCk5Sb9G1CmyGFXpy3Xr4TlDHvrw9opXvCLJJrMvazQqvU52tBikntigCdRyZ7i4hEBB/nVJUMhIGvaDp7Cl4OkIzFIGXmje+5m5xYKL9BnneCxtoCARKUMVMFPbuRj+nST7oy1iripRAl6DVb5VnIhyVvyl7ZApOuUzPN0XssNG2OURqG24e3QvyZwz5Us0R0z0yxPShRjlD35sA9D5wwXtcsSFtfyW768SQLVfj/hD3F0CW/yUyp+QODeIlSsJjKAWJ1Kl/6bn+USDB/Uvaf1rRQrrYD7dVhT8ak0UhRik9zU01am+kDwuPZqfxDptqAHlcilfy+vhYM88Z2lwwONXpKWPkvaby8dnNUSfXWhUwDFZ26y+QnFNHtjzIMwK4/Y1o4z0OzhxNeT4s4YSvJbjXBk/dseRHnOQ2olhrcY7pltUsaSewm16mNAD9cnsqedrUlK3ywgiRaosgcXskQhRZZUAH8w7qNxtqUsgBkcK2CR+Parrd0+QK6/xT2jXD2I42pOnLib8yfAXwo4HEkNWAj1HQQKONVhBdecnkMIOxxkcAV++KlNMocMdfj9RoRI6oHMb8Wua7Px3lQSydO78C/qTiEv+Wpp2H7yu3sDTiZml9CzaKKa9lajGz/aVG8OyfoFILeQi62wZp7dym4oaFDAW6amzsCjxmwvKNo2DZvXg+fjygeUwYRJclaOpwtwv/b6v2tcFaklJiJzN7uPoTYOjyBePlZ+/xfbYl5HRFPoyEbgOZtKLUFnWl6GSd/8PfT7aCsXM+fVfZiyZl1JEPofjfrXX6I9gFgkyjJkgCR5HJWM8wlOxh4jzRNxqBHCKPNreA8+xZ8kx7ro9Ir9bUUI3AHqx2bwo0/PnqAmcj1cGafaT+IGYAP1rVBtut5LJRVSBxPa6Nw2i8+XaC6wp3gdyGctHBxrfJCXJAFt1oLipU6qA4EoUeDXLVk/NAUMxZys6zZ+PSiQPYiPg/UrplUCUOIZSKNuTdViHLjv0z7uOKP/+5xSDDzGDsJmqBB5DQRAT1a1vw8tNCb0/QzDWl3Mtl73x6LNg6D4qGUwp2itDCNmHs5foIPVRojPYX8QWrxBODayT+yhpCRTQGx+5a1waCzJEyKMt26+fnzXsuWFeNIjMO2s7J5JmN98eVXXd6oG9ec+nnU299OUJ/2aZS3xK/AqMsM3IfhoqNpaiTAcunlzShuq3wi8sQTAFk+vOaMIEm2DmwY4cW2InaSagdHuSTUYX7JOhp6BNRa9awY1xEKloViu2rKiwnsUQf31uOCK7hkSTpOqbglAdpBfNglQx30KhCagcXjCOVH2gZMdpfmdZg/lB7jBHeziqjGouACA/qR5ZmAOoehZ+yO6I6BP4hSYfF2CvwkDcO0JsfjB1kfLHr7ZxSk44W9rPmLTDToThfv++NSSD+YMip5iTTuDjOETYzCTY+fWULmsQ09HY0eh2SoUQzpiWRcnVPzGI7rhBmmLIE8QPUhrlCVAhLjNEJ4UTX8nb6ZYse/Tv3N9jyUdX7+SOXfr2ZH3bhfh2dnp4Y8II7e9WN7vHFjuji9YCSM/H+rmzNU8oxB23dfSA0ACmY7vTBZBozz+lm8EioDQAEYrF8a29SkaDUIYi+QtlWghVb+U3mOZ5EZhANitdQEn8Y7z5foXm7oQO9yrR2U4Z/eSg7kCxglhQjEt/FzEkE/9aK/UBaSEBKzWwriDPWLLDH3V9uUfmlSaF5DDcDYENglXYE88mdudI+SDq8boF9llpShZ56j3w5EiCOHYahDuyXYXPEP7CTTRpqnr4d805+LXR1/pXk1pcR3y8lOoDkX/zs+hbY/vjorC86nBmMjSbNRX6DqX85ugE0bNzJmoy8pftWa/IS9HkU8+/55fi7Sd3Q68hxCFiLfy6IcRSlgBIeebXZPCpQJxI2xdj2pNT2434ady//nL1oP+CASDk8fK2G1cQdP/9NfDlSwOOVGB155+XKPokaWQU4jiCpt3yOl/SeaKMgkhPbwQPfpmrRZCUHjvlJCXD/jcMUFeVDNkgBfhzIoB632t/apfZYbUXrnmT8XaSIYgJSHxmmBGOmQu9Ijennl8TSx9YohcCVJxkUEjO2PLCGQgY4YD/xN3kS7khDNlWXzgg/uBWgW+WDMs5Zn3FWiA63UEjy5eODar/vkBlFytCrDEyoVn0BdS//O0GAcv9BktGtWRJKNH35+IVhRroSJi5vzIKQFEzRLilkZ3vyizpRaH9z9VvyzhnR1mSNgXKGIrwdttX/99+bYNXCldYc7xnXOa5p0UlOTVXiOXXewGZ5dQG8ymRGiV99Z6WpDU+9JrMulJ9R35msh9jfxhoPVru8mEtNnWI3NJPUMspBXmcK+iOdPp0k6vke2yLI6GJvwaDupo358XkVVyXFSgcG+diDfcvvoOyFjYp11ucdgx83fRrfkaTCtAX7kEeUCleimmwoevAIzpAbl/j8GdrIF8syrGY3NqQpLF7DpJxEaNe7T4gbrCEqSXMr8lTwCeJdOZETEY5hIZnOweMaovA5ExxvL4ZRNjMVPyJ8lv5q27u6faLu1lMvSI4osWR3AnvOzgPtFm7v9izg7Ft6eLCmEuvjUrlIuCMMj6kjt7PqIjd9ZKrJsmWx9ew91DDRPuKfXm+6J3sX9jP4ZzG4K3/6zh1k6WcgtRkjLd6TM5hzid6wTVfzzSBzJ3qnn83FPnZDk7JyAmSDt/WmcVA/xmxJKSfdz8JZVxOoFZy15wkyRHppwxWXl8sdJxfxNwr3PMDChRWgHhjRWgu/l2Oj3128gwK4wXKyTaR7U/06l9fcBHhQEZHWekgusx8KzEpygmJtgQ4h58bW7SA3OkSFIvrwTDDTjP/wqmkBLYdd/jEtp4T63uXWXIKSWwb9RmLGb9S3gV1IsbpeRyYMfgBbIEBnJxEAGkPAXYvpfCiLod7ORv2CSSZa1/pA876mvTXsWd/9x3H6y6i7GX8Ruy+aLdcNMUXZBWGs7JjqxzsAY6mwSINK5THIhzUOxl24448Gl4c0ZGWmdEywuozcM9r1nqgQB6LoWYsug2DoYmsNLV0PnoOqfNmgwCdiCxEFcjiH1iieL/k6GcAGaqoD+eUyhvxNy0t8ttn5UPwWo+Fxb0qmuZddTVYAjodvKvLnNlKccYJQukG0a3FSddXdySx1rj3xFMryq6FEbUQwF2eK7HrWpsoaZzyR1gBJ7Dt9Em6sOYq9lR8FaEQzxDYEGK89LXQCqrACIhaLmq+sjBcKnTxMibeO5tLz9Rybu3b+TrtDXeveugrmhHug4B3V5ax9AUTC4W5mzFjoA4rYBK0wOJzAS5AQJuiceZLVuL1XHf9JLoNpMFD4AQVBMsKp9MeeEXImJ0+EDeB2/Nehh7a5jJMIXyg4TkoAK3gAOUg5dIYav2BUmQh6ZsHow1EysXaFCbmz84NVpfTAaJtbPHzE3RswMMcWiFLIjO73kWFn5rmrwSDl+lvXNjh1Bs+LU+xSrp4nRRVunG0U5yaZc1O7ueUGvj8rZpX2n49fk6/xPH88oO+CsRFnIn0bSWbzv5Ttpiuez2k67Z13spGCX3cee6C3RIQYUok8FGqXKFHz217SsP4ZdusB9VhYxKbNpgNIEbdgnCCovoBP0xHXF+rotIva3Mgp1weN2qDMdlZHMnV8OU+hM6R320Cvkg6IsFW5hHqbyRMxCvB/rxQa/Y/PCftUCgEBQ9OwoetpFHErBZ8t7FRbiTU4jSaXcLRSv4y9x/A+R+203il+fAvq5gevIppYVrw6xk8Ez+6rvopEcET94JH35Mxaox6upZWUR41LwmKrY2mPxZMofRArn/NSkD0Mw8qk3yFQNobYXd3reJi+zMndTX3KKd6NTXnx0+Y7Uk7dl1XIGN5ejlhQbg8X+WtHIK9gn4PU5HUOiyTShrVzAQ0HTR4uaLQdD+sn9EMYo4kUMU/k1hAbOkezVVZl3D9zap9WwjCdQLuhtHoubvL+dia/Ww6Hmtl7ebap/2KqHGC1dKv8iVl0Rn7NQcF8yw9fb+vLGqI/IZUJtc1Myz/jsexIFflvxoe3bFcNwe89zHgEJVwCBB944PfgAGU/pGe0OdM/QH5fnar5r+qFZRbiruJ1u1zL2VEfwEdYE8WYDXW5RdgblRzE7JZjY2ISQ3owiVmOXJs7LdizMr0wuXe/Tvlb/AQ89y+KmoWD33WJVAvcWsyUSzuWeOL8lXsnL6gyexABBVS6dNgR6/q91Af4nu6G7KAc1yoyXi6l9BPPFwJ1SVuoiMsMKXSiVD+gKW14ShK8zOfiptPFhyajIZ2vl+s7EQpgAeY9C4EP1/d3tBtX/rUBuEOqQis3uLpb3XtPQdHwqYnfik2bU3Z4HN+S2Mk6vDSo9fjA96zxcYrF0nP1FVT7vKTMr1HRZh1SHbiZwv2q9He15OfFY06eAF2LBQdolF/6Uu/15p2LtT+KXBW3rdgc6Ae9w0LU/7Ksp0e+I8yQr0kYrqJQzhq/xusqLnoKw5KN3Mrsa/JsolDkq9roDFADcXf0FuOQlsKjTSCoPOJT+g1xUjKMKa5ErWiE43vC5DjX7RpdWLP2LRQgDTlxSWyPATgBRM0Cz1oinwWKw0qSD+/Y5bqArYwoAQO3peF8u8toGxhqcvL+/fL29+oC8p6Mr7LzAxAHTsopAfxkayd8WvEwgk5XZwUFCUP3B7UDompCGvlCpJyAhcsGL6TEf7ucTPmUUsGwHdyXzD9Sry/OYYSuaiGf9l+lQ6IKdjhG6F+GbfsdBNJVSYiJCRw/bJt/s9l7oZ+/OMc//ruPmK0LBuUxp/uJ6sr+QD1BnWLJfVtjTO/o0ueAy7HswHHOuuvyP6Lx5O0QD1G+QPXlWbQK/Ild2VzKdrvIOkYRtR+kqlaUrs9klFQi7ONtxGS97fKRUjLZrqUxDVN7s44sOuGZzIvcn9lXoT7STObV1C8ROqmXutNOFuHBhttu5S/L8aE/O4UCW4lVkSufrErajUJaLcS8hZHImj96sSyv5vg41fyOL/UujSHtGM7Z1xfoAJGcOws1ukii1iltyHf7s5p1yqvb4FqObyEHkafKk2AfJB9e+bQlW5tvrzoko/PNXBrWlISH67BpFYWEIjSKj/BtGd7aRxWBGID4skSTJDwD655UAe+3VbSWWTqhpCz94yAnft5/+Iz/PrTwLVWMZGMcQy0AM7NRRnqK02daiSzIy9BXCMhS/KOoWPGLR5d6M20vQ+15VQPd2wVsu879rDcfWZbU6DDjNfAK2NxdqEy9Wes9tueH+5zAjHUUG0KM/EqX5J2b3u+8IdnZ8Vslb9A38j8m4fsIFuuLiCmupbaIS55Qx45lffofn6xTUq23oAONfqwbbVEtCnQPi09csjOsZseF6kDDybgmB/gSsGsL1dIh31H91e5w9O34lEr32NwE5SFXGN3lWnrmvQX8BpxAqEofqi7OInzryXSoNesRFDTPJPlo8rp+aqkxg99vbb/OL9YnBoGqgw8Y0+oZfLSKaO8FSSml7++6Sl0VLlTcLJ8HAu3enu7f5L7XhzK6JY5C93wNbpruVO8sxPBGNCO35GrI/fsSLn+1Ig9bxf6RtwH7FkSD9OevGKHwvgIgpOsMCrWumu2x1anC2C7PFXcHCx/EGsbn2LtUXN9eDpcQmkWuxyIxQq30lX3twNaRJqBO39+wEYalf1ZQoeP5VEtA6wxBsE3LwahCAXYXpB2hnLGMNb4KeI9htnYNCHhBPA7zIz0fZnqM9dmR/I9Qr0wz2WEPM1/7Uv7gyTJ7ZBQQyGeovh1vRXofVCCw2rpY+lfbk63yzF/6L8R9Q/8UqTr0/mOKqLhSD6nTK6IUH0qb62X67M6VzRqTeCfxPHd7jEaVaRpgc9TAYsF0wUUx/hFfeRmyNftCMp1envKrOnl0MSzkxHCx8M3OTd0I36ae5FkG/zd1r1BTqMTrQEF1UDBM6V+h9rzPJgcYlmhiuYO6o1KD3058F6s1/y/VBgk6Sxg4D2ZMsrP8/LxqzrSo9dD0fIBuseMdgmghuvgEh+pklJNJT89vJBkKyRs6i9agHcXqSsz5p+jj1o3XqExoisCuQSNo4Mbao/354FQBdTpYCizoUqEjG23nvN+9yJVgPikCsxbDLRa6SYCjTw43yGUuBLfNorcUHCKR2jfChmNm2UPZxf5x/4iCeyk2uZ6TThxLNFs/spCSNv4HY9cmGiglrXTSCyOPJDAS7RBMYlMKE0Jz0g+uolcg8ssFrNeqJD5sh37aFNauBS641OzJ20bQ75CwV7wv4ZgNbhifL1A583og+iH+Xc95hMJPiDqq49KG+VnW/J6x1orf/gBtCaEbhCKUw+T1Nwlt+5gQXXZZuOVgj/yHzGDMMUHWkplTJSyFg+sqDAQ9dFPOfVSxr92wX0eO59vvXnUeGRjK9jdj0woEPppD0WBFo0Mir8I1UwEYHcp/U8329o28k2HEM6H9qDm0+14UeGkvniWHZ7zC9X+fGqKmApnFC5m+rtSjPPSusMb9Sz0rMX4Hi8l2hHnz9cnyA7dEEp4cIC/WuKFDNeBe5s/yd5621JqpJr4CGkzA6V4Irh/2HLuxWZypPXTnyXZU7izUesZnkjzp8b+L6+otC3t+rhy2s12kgH6Z5/7cVxpZGfBRLPZe1bHO8aaAmQITFbhEhqcJhAKaan4PijVPeh0And4xI6ro+8SAYB4JnB33kRKyx3BgVclxQeFF63eoSXc1oFfj0VPxOf4c0Z0Fq4X1iAQqrymFHYz4YRAbRXnTGbLPhaFJICR2ryWekphSAa4aLPDa+vR2uocUBb8tjMxsg+y56X7Svy/AGxifsbY5vxxen0sBUL25Ib6jqhCFrbgBU8g2PeR8dJENTpkC3fCXYF2JbrHeh6aYTLi5O4LXqP8S/xrY0KVf/dG3GJP1r9OfsU8H9i3oBRiji6XiwWgVRMcJT2ACJ9lgnEQYKJ+2BVAa0RaZfxPp5A7yh4wJomEI1Tf2BYA+St+R7FHjGg1zCfbTUQY6XARlYhb+BHv5McjfrIGSBvArRFkvTO9UDrCEh0kfiHh/QEt5aFvQdOk3PBf6xkMbTHA46BCtOoJ7gEQEfuMgJ+tBKmTmZeGMAnPfLflmNGWYqs6nc91WF1fCmfMGaKHxa4sSJsYyb8vXxI2GF/H/sLdk7sFBDujGqmXeEuUEI/gbFL50NVLdxhQ4NWyjv2usp2J9gnm2IrqKzFc76uk2nU3J6Uvm7lPKhOJsS9WgnB8qhhTm/3rFja9xr/ZPZApZtEa19FeQX5+g6XUetc1TH3bg5678FcySKIOA9YBuyxBDGmPt8Yah3uUzENPSwzWytyoYyaTEejvDoKnNexJZJH1tWqLy4yN2rqiqf7xb9GP/4IHpRFNqXlUEmTGmVM/WQ3Acq1P8ZYDvPT01HGKe46mUiVm9JXYc9CfY1ZmqE64521g+6sKOaHEbfOvE1TOx1toT58pdeX5w2qmTeZg+XpWzOl7OzE9AeOFAK1Q9eeTPKsw/3X+7NA63+i8EEvH9SQj2EBc4/CobGuhOEMC/qszAL6udUBdR+8eEaEdvnxi+NClKUzlXp/TA7VIS/bUg4tA+edGzl41eN+PLuyHbz78l7c6dDf9X4wLvBqsx3VTxdEq92DaR2i9pEOKgeSYwpz19mP5pRAuRAfv0X+VJRHtYT3z8UdDIpFIlJMJHvDlIrBuDFuCZ4yy5E0vKYDfm5bP/UNJq95AsB2LA1I/guBPGgQOYGRPUR+oMTdqv3kTTUHzEqVuZm9Iv+UonZxzbVTS6bh4oBw+gdTiHngm+SI/HGgzbSGvUimPF6hdGMFdB9DYmbVB2ie/pllAK8hFUiXmjSEqV/YnEsyCYySn+KsDIXIpcySl2qG92/kDZ4dO6PeTkwr97i/L3mHnzlHy6SketetGs1es8mDOrd/PFWvuPdm7CeW3JI6NJfc1bcKk9KiwPMe3z1tBNEAmrS9UeZFgo5dqzXhWEhhi7gv5L19X5B3JsSVZDRLAsSH6uUkFppADJD24+SjXhd/2hK7BNARwEhiebajPnCqvEipWcVuAjvwRX1yfBUosstmm/sr4x2wKxe8PkOt1zOVq9XoHWpSbpsnvJ+8x1ciDT2F0B92CL8cOJI219EN0wRIbBK6WtmOQO1bvwboUkjeWrdBSuqpL9Sj/Ul7PjJ4uSFyaQ9JG9U8JUiQi+UW20hkeFysAURJVLZwzvZq+144yiOKBJqWiBvFhTwy7tYXgcpQIfjaHSTZ/SJ0NEfPirEklewh8wOyxTQv1dd+LBkgPfnsZCX6TYzN4+rqAiYsBCbVfPaqq+rDQhWC9Cn6U8rP5ktNm8IHNoMlxPCaigmcJ+VOUqM3uZzU/p6a97kRc2F4ts4/RgtpXNoJe6raBHQjp7lTZFZYxDaPM8rAPNF6oz5Ik00j7hqTrRgVu59OTHxG+KG6eOJ/8V+bD/S6SYnYea3ZUHQ9ds4uNw3w5PTAHPUhz+JwlyUBTwqmrsU9OmA9PPCV8z0hivYkbMlGXjegiLb1niaX/Wr0WOULvr+OyEnmbxuSy0+JTHmGM3t3dn11yF1NGimFsNWrKj5OtCyk2eFnMJY7P2a6gWfFWugFOqf859TU3Cu59jpnUjZxPSv1xPGd1LbRzXkiOCWWwBTWx1aA5j9eVV/lOWYUlz0stKT881XSUBDh0s2FAFtU30kVTltZL/6uTj7Nszxhh/FrMoE6QfQOkNgOKXYQHg0Z2cYwLIDgIZwNbTf5qerNmDstVadpDdLLZzwNn4sdL8zTywYyfsAvZpty4Z7mfQFb/ZiYo2L62N6Xnyp2T8V/CGAg4Ib6x4msks2gp301XskAn5XeTiL64PsO/64C3NaYCFEeZ1iDZzB6gfwCbvrTt2XzKX1dorsvTLozQs4iepA8fEBOueolwX1nI1gSO82AyzptTOSrV2XJflsgUpBKlNkTtYYHKotEgwb5MHKr28dJjYvm77AFeV+oGUZBl9an9jtQJOfgby1IWNMVKVBejB1L2BbL4ILFIg0Xa6d3QFLYyetb8ceRTR/jGfDkngH7S+YlqliEYK3NUnNG0MEdUck6eZfIFgs6RviPT9QEUFy1iJA4CJIahCSHyV4Kae9+ieYcDgMrx/+JSf41y2scFNyYub+s3mdt1u04naYtY9QXZDdFCo+9qx6CyTBMY8+PDhYG/er68RjZMeyR6ri+P5bdOEsAfRizDv+67p+ZMXPFpxb8GcL4gVCH72re39fw205TGODM1asL1F/hYfBxVRxvtFG351pz6vBzv8TRHvE/NW4sWniCPLMc2MxsTrFPeTdvn46Il+jyv5hhRIPhF1+a2jc1M7CCcu+LXZ5DbT0V8OdvpdIjW2Q/akNbnwB/NN+5DgT4Kan2nzuMacXnlawE1SfERfXOFKlV59h/rX2n302pu8x76nkCd/FXxjCZ27vNUjMmcoLChxBmmztf0/D6SjVKRwdSWDFsPL68CO03zV12jD1cddxtfbjOz3RKuV/BBJeuvu4ZpXqyosaR+YXEjYWtTNtiGHNVr6trsj0/PMax5TYVU0g4uM5YK308HEwPVAfRX05W+i40+E+BVXkn2d0kUJN2q8s+LILa84Y3GJVqsqo6Hsm1xhn54G40amUyE2He321pFyNGPZuCRyGCQfot7Y+GefaqTTXWhxPw6/Iyyss6OCCTXWnBuWmduF+ugQVZb+OKV303ECofdjdZqqEGKREcu2d8UtufOoTHItAiaOjkkLLg4IkNymbsuE1mtiBi939VS/vv6prNmLPPYhPBletZiqmK46EcCzLiPHwhSm2Whx4TtpmjbWTmKwPrRGpflvhfGk7BWS/6J8bAv7nEwQVFPVcHelDN8Szad/o/oni/GY1MQN46F4y39Ae622G4I5SzeA9l+miIgSjfNov/+mzwiXstAxdV2s8t8YXdmnu/a9T5H2aiZGMSu1oJo1L+fBO1zqL40iLhgX/HIocSz2OyjsWc/UNCeEA7fWY9jLdUOVaT/nrgDaeH6xpsB8Vta3Zu9qnS87NO4mTkj11/Ro1f33hv9SdqnzAcp9vWnmu7JB7rITL2uFT8PkJk/oIoHleBjdL96aTlcnAis1xVBTLfP4vXFw9FUMaaMsUWw/WlgRqPHQkL1liBiEe9w1Ipw9c1nU+d+2HusQGmY/drD+49x8avyWD7DDmvwPYsrCIGN+yEHfsmRh/pZq7rt1KP61H+kjD0NsjSxuwZYA1ltA+Vrz8of0x+mrbuSFxhINR7d4bt9DRS7EgLrg/z2FUW411dW/NVvBx76oz4VrdvF9j0zzNxRrk6ivXg9lhdS41cXMEB6hFyM948AiVPM6DANNO382WKTxlYnL333oNoEYotnK9rUsxdLmEQ/CQrRzgbyAy2hDhCF86vUUqQ8B6dN/Aog/Ko/NO9VXOIPyo+annAN2utDKuNm9V2BzyMnhenBqy6Flb2aAyggKg2CKwpDy1+Vw4AFI3KnsYMeKBU4fuh2fGgEkLD3nDMMV/+JOI5V4N6lSxQurUT7EonOIyZ6hJgOr6xRq673Q3qAoQisp9b0tKuVZ1q0uIZuOiNtRTIy/3jOjh4hytCuZxfd8vyg6naHhkaU9Dk+h/lJYJpcsEiEgSp6s16Z/Xd1chclnGAltTQbXHv5TJ+AL4Xu8OsTaToIPbVrtnjjdSfH53CPTqGvKRfbmA1PQ+jrqob8IShAgC2mlAXrmuOeKznNzVCbggjvk/PyNMiPlDvSuild0/0seFN1yzAstRWsmGkdUC/x+lbg5W2md4LCE7DxEupdokrED18XCJmMqhr6RveaPZYdnLtDI+KqK834Ld2mAcY9ibUJgjSH9rvFREo35zDsNbiLe1FxdPh+y6GxQpKghyAcLFNprXX1EnJsuDSZaFk//ByKGv2RunOs+8thrqfok4fxctSfZBdlL9u8GZkZxs8XqqnaKJoReh/3a6LQ37l6VlfdaX3NY1JOXp5ss75Xl/lQLaR8T4d+uSJ/Y6Q/eq/K53cIxb8moQjvTq5aUeYxFXLvG1tDpvM5fcnPDjPcyH8HGjvGNmH+VJQYfZfPiqLl2fN0+lqdtlKiw8gt3/xIrdmRA13ghViR/mNnjoap/WDDqOQNT3TVE/QPzqzYUQEDkvqvstfAYcRj4H8aIaR53ZOqWpqJrfz8vrVNHzPfKXHP1wdIErr6mX1+pGF3m+qPri/L9lJoeG/1UYqm7izdcYZ1wAJ/8+SSrTINTW2/WtbO3YM/4wvrZfSlo8O42wjhGqq8hQZNJz4EmMP2Cyo7wLbvhB8e8f39dR13qZcfjQp1SM0rArroN2IEZQ7+q3hq5zvxuhXGl1reSa4OltHouqz8iibNuGXEztA448n/m0DztFOOgRiBvxvjy87AVmwehXI3G3swf+Mg9mNvKGT+jGB3hdUxcBMmLlRbXn5akmSXn6qE3obUVghD3MeLqmwN6o7Zp4TUOCUxidMSQSrJBE/ZdNbNRDrPyW2uWpw6V0pra2RIhZclO+JYNW3kuP5IWtLENoTz+dxYLLLe/comxNnMfdMGm3UmdxfK+4pgpZOZxiMN8BxeYpdNYHaC2PdyGdrQPuTWZXpYRjFxYrmQvSx843hjU6ERhTrE56J7RSMRej4QzIpfAyPLoXz9yDffN5VYoyYlrc2ak7kpoRt1IS7lm5oar0cs4q514MhbteqWhmGP1r96OE2Am1Iri4tvYj+sOhJQycY1UqwU8RyCSkTyr+Q0mAwvkyJ5bdrioLQF6RCRi57smMm1hAcUc+DjQt4HSGL8GTjBXaQw9z5tbY7ux8spW+x14fxUudLPYaz2ykpdQfK6WjwGMZsljWWWmckPQkfE79Wvp7ojes0ActsE/PelYugVVi0QC68vvfIhOGb55BLkxGkaP+7VZ+VtnIEb413iwVve2MqgiZGqjnLrmB39ArwVWSZ7wJcsDmYPBErXCyvdpoQJM94TvbfT2BNfI4p35RtLTBo9P2Y4ib/xQD4z1mqfhO4PQZG84qokkKy/ajBNMJuCLOkRhj/BvSLNcZVCD0LXKE2HisoyHOadEwmQzcoU6WgnuJw7VgY+j7sGLnmEl8wmICwu7VBmc18Y3drz1yHUK3o1iLXaL2zgAjPp1dYJhq1rfMhICp9iRIlTXj84dF6tdeO8vBPIxAb17+mGjYbAsRgSgVgzbmU8VCqXCNafof1d3p6x4e5drGC9AwC3Jv2SEFKLoUo5DRgFuBJEsymHtyQi4cg2hErL/gf8Kp3wedpkJkRMJYZ+AnXT7EqDiXKg5GLsgrkbrjI2PQ8j9iobRqFUZWb9INKSRuaHkO1Jv4kX0XnMBR4Dzhdkh5WKqTD0WehIroKvCqQPspGOe5uj9pGp9UUlk/RzhMtAYXm7iLsUaZz+ru+riLrGnvjoJwbumPDjqkM/CPr94s9PeT5UOYO953uIlpFm/8wgUHqdB6Z8VYkufBw2PkdmiSv9gphf7gUIEy9CL2CuZBlsEHSIpF+F2/XVVi6v4gBSIStM/GpxEYG+ZjfNcuk5taTjtp/8bhog0Bb+opr7flk3pEQkGTzokRb6ddYmmfaop7ntx/l2bBcylfuqm7b12x9zr47WZK9Nxu7EZU8rOtDyKsaX5PoLosvMyV3nD9f06nOA/WECgWfitmFw4Oops6wIE2c5m/y16gyuOwi1XWGfKlo5Vn8d4uJCzZd59ecLEInaDKblxYqbJM65eD3boV5Ih4CjGzqa2q8C9mj/0O3tppZHXgNyjSX5+HBDv66ubrwU0zGKENVxx8kY+uH6A9zD0jt6KwsDO56OsWuRfRlIUpkLH+6IuwCHosS53Dvrkzab50JzKVlm4xuKWmoJfcImhMrk+kIyImhccCEW5Bm9XGWTsNQ/OGZ5cHHF8Vc7/qtt0ZaK95+uqyKGYyJzUckzMxUO2ZASoovvK11LK4+fA9PbB3PHWxydessVutquvtXSp1A8yuGJr4MnHtDzbHE2z/DZaNbUoRHaXHU2/1py5N7Rx1X0WQI3VE5VU7U75Kq9tqQi7icfBAhCePwgu53CGpXfWCkzn0yP/lJCk6w7vnIJHay6sw08JGSOhfqeXwFZQbdhlpWAGzBOSPqco0xoeTAb95hG5fAcpVVYaO38uj40noCCWia4n/6rGuUtkHL1qnVot+OldJJP2WpISJYoq71/C/yihOLaKXAPLYDITaTvhBTtnJdG3zb6rRSCD+JmdBUb3wK+pNp1ktsIH673Ylbu3dTQg1aJGX+b1WuDAL9A+AiI0h4oqur7fq1U2NRLZMQIN1VTldTBj1pnRr3lk4bsGmwlk7wC0c88XDfyRTCrMQYxCjgZprxOiSn3GpJpRYVJV6psRz1p5a7KOSrbemowhrpiPoczx9PjEq77K2Fhq7dv54S4ImMqptXJ0OtrJa9ED06EOPe9tH/TBg0YZHnl7qK7oWttVxr37sDH5nOYAhWuVBbtTD/bA4yqyX4dkOQSkxssw3201in8dRXG7idth+zA1r5+9IfgP7aaHK+883wS52T747+OyMOl/p6d1WBNx3Q7KUh7Or9ThVcCWehMP0WRwPghg8GF7V/3Mxm3It6+aeiLGcLGWQzW1OvLah3Kr9HZxfv4+gpl+YOmwc7su37992aGybTFc5i28RzV39sQPzsZMUEoH6rYVIry3EzMBInG5lt9LsTHM8xHvFkHlliks8E4U/YRGcuwc5+Es+SQVVZg332v7mjFH3Dw5eqvo85q/mZzmCyGizsmgj7sX0dvs2OKMZy5R7E7PFDa1gIpdlVpcPN/LF3VlpzaFv2a+w4U+oi7O2+4u/P1FzpnjHRG0ql0UXsvmXPpJTeDPcVIdbfoiwvZhEi0IXrEKOWpJI/KpmhUuIAAvNGhBkAyFOU6oRjC+m77i7XXFrq0hXN9x6OxP9hfRD8ZIO0ERI9zsV+C441Y2Da/UmopewiaU2Rpl3ZKqbACwEOq0gS0zw07NU0czRcEgIrwMx3PbZIIa0plVruhL3Vexwentev5Yq6kqKft99q/xaXdtxyV+jWuIKw0Z36uh1Lo47uuZu9uh2v5l+/KNKqTOcKbMNfGewxHmq7hKLBJquPNVr/HFfNIT1iG4d1RWdsejdGZyPISY+/DMd9PDevLVE9lYqrDO9jdsouCGWiVIXvNSRGnHn4ySltU2YZT1yYAAxPqPv81v9QyPetk0VXeABCIL8KMM7wWvflW4+hJZkx4Z4iFPcmc9Leo6DOOAGrbjD8XJxwoJWLJEcIXXjvYnMHNX8WLfK7oLB4duvifTSq12oSyw3F2rll5XgwntrXToil2JefXxqEQ3nG8F4qN2wX0UwiDW+mBlkGSxsE1NUuXuIfw6f5IkDn2n8iBJ9/fFMv76PbtsKW8Z9LwvyGJBdc/vf3euPs3QwmUsY8i6CZOjeaOkNr1ubfvf/zC/UboTXa7Qe8DiWwr140qp1o0OFFGGXjaE2ZTGd7cHU57uJY8pZgneR0oYl1ozuLy1vMPWU3JErSx7mtYonhbNiOpTW/cVmlcd5xS/ASopcQ6F+FhTSR2Zym+LWqqGNMOmwUgGx/zc6hUaQpQgh9epOb5axr+AtDY37NyG/v9pZn9xr2cqjboXroAYH4Khv2Q3msC8FToSXFZQtRMNpVAzwkO0UtL79d2GcWrfNRefMxmrh2D4MXgIVugaaLjmOxwpRTw2APSdNpJN8qtkdNkRzWg48riIa7V2n8Q/rjnFyzbIliJtN9CVBZNKjIgqjCiVlywrBd3XiZPnr4SD9eX2/CTTtGocsySDw9rr6PApr8xfFm3llBlND/1pikDTulGugFqiy3MFrT4LOS0xClb2wrbANzzC+AT7fjlBzgfA6U1qq5VZjN/r2V8nSbQgaDVJlVOYaWSQA3rKyok01FvvpM33PBllXA8ps9qJaeAbV3sx/oYWlpRUvwgciDUWPLxNRJS48GgMpGAVmdcTm2SqKKzx6O2bVkdELbzfV2UQqP+1BZWDV8UorV5LVTDmUls5edM30DXfeTZVp3JJoCQl6CvxZGIvu1z9gF8NPolDH05m5Hc/aWg6g05y1E5yxonsW+oNWWJKjXCBh+8vOiCayOhX9f012ggLBzvSeVr/zHXiKw2va5st02S5J3doVhy+T5wgD1U7GoR+MmN+U98qNEoaPjv0TzeOgap7BVtaLZpV2O1NWDjM7m6A1v2kH4FApyvArKlChoeKRQIHOCLA2YxQ/7ahPdK7CD9pshT1fWrt/f+eZ70hmNTLH6L/tXKZkYfsiw1khGv4LOP3F73osa03b3PPQdufwjfpAbyz5Q0lDABR2NfT2XwSAv4hM63lvVjLOsALuuVQOv0lBemy1XcU48PuBRaHzcIP64FH+hfdGyY7ZQ6RXkt9mdIHtC0ECUIG+Ulwqzpcrn/uW+ZGF48CCdeiQCtHTnXl9dopC/v/dn4gG0pGIU1KtTx3PXIkzRbsfe7+nydwLUrJlna+0sUr/OrY6cG2DzDZhE0/RtXcNzK5+kovJz6iFG3vRaR6OVwPsJ/a/bQKmaJ3vLSwphdECEhw9G1w25QMhzLckzXKFeJGoyCrt2kwZIMDCSKaQ1LijKDMDZCffKdTPpAcGw4HMf2bSMMnOGAjLozF0ykYSeTyzbXMxFyZfWMwsq8VjsXpLU/Qg3GLRgZjCT9gRSvKfDyhH89xiXixa9iCMjfTLkipnSdfGEkisxoJLqpraSx49LkuCcSEdjHRv3NR8CAF+bCQlmAX1Uzd9Cn8MTxs7njp6OIy5HBNttQGceaMtEf7b9Ww7ufYbBAhmP/kjMz+UkAdEBJFmzgX+YX3+2zRIGVkeRTJXMJDtLmFbMPI5ADlvqkSKVSXThRujOTefK0yJJb8wPpv1lFn6lX74OX4mS7qLD8ngwnYXAvb8kNM77+YenHQDh9OxKrlp56kZXJQ/rrxglZMvMwXgozhm5AtdPYsK3ZqzfrztFVXh2zkrRPRyz1SS+bQ39sIksqQ70C5bw0R1dsgmzei3/B2GQ30Oq/SKPZ5AXf8yVCnPCrxjNeeTTjy7+zrFU1BVv9uLsv/zyclEA9ipGsH7j3uM/yWOyxAO8QSwuSKIErEf9kwUB05NR3BFiLrze4u3ADEJPPAZyn/jk0DpD5nAUMepYIYieNBGlTu72/FCPo1Ac7+dxf3Nozd/daAmtQycq1ewN64YhXdEHz17v445VKhOiX8h9Uj9zAwQQkgI9WuXX+tkHZ7ovBOdImQgbM9hfoMGPWmgBw+ERjvyieO59LF4dPta/+yjppphdJysUQmcr65T369OJECpZhkIFKFL0BWwReBfl5VVpJGcEEPPaL6mAUOj2kvt5mKkB437w3jGtaXnVUoMcow9wPvtCzTecjmq9H8VQjscQNCv4avIU2KXDZ2gcLt+6xxGR4FluxJNJLVuCi7DTr4q5UuQtFut9LN+IP10jsiv4Ijctfrn8nmlkw4sAeKVnrALwxDzV8Rry4PluxAsjX90OpRxjvmuKbuV7danfCKgOp+2gfSGzcW1xmv9KObgz820gOWjzrMsOLzGQ9xLE8bNSOP0CXmPA52l/50CIYBi0JlLbhx6f1+xLGm/P9PtKazFsxY4GeXTHyi+WwC/IiZZo7l/03iKTJkDGTKEZHPJmHrLF6VRUhdCfOFd+USC5+WarutnM/Equ3YoMtNHHlqNdzZQofv/R741sp+d0V0kBVs6RIrUO3G62H9vrmv1GlggZiG7WptCknM576OFjVeap/+Yr8o80ddmTs4awAvxm2euoDBO+NLo+j2D+7AGjjc48UcR7j5am8M+9iKlc8QISPSbDuyZHjw7ZTVeK61T75+LQDtjxJbjOASggQ1c96yGL6R/YSyvfYv3EtxuBuM+/b3UryWIcLF59c5+wVldIwPP5ycD1+3G8CBJWHs86fbD98dFhz/lQiCmq9M0G4yn25cHG8WqFtzUfYRfzX1Jwv91gqlhPV46vRNg5hQlW+yZXDz6FmOJa74/yWHTADGNb1iSL9F0IEEQfAgcoYmf8p/C5aS6VjfhfHKtCUjkBg0x2s7Y1GiJ/xicot65nKpb9yJG1GcQfbJ/+F8N+PxgwibyJiDd9z81fWFpXjqEwgg33xeVwpcYLZCiw8cVH9ECAah+O4h1BxdcFnqEwjOD7IABLgC+t8JNXbXGjSuMqBPOb78psmwmk876gwrNyrl12/yOnY9at5WWKLddbLfIRbNVfNNq9VZy4qr5Egb+WKNYHsBKq55KL5lx6c/hLBz8RbxAVwK90rIk7t6m14U1sOExqEs4rs61VJ8ceEaEFHjV2K1dIKicH/vZqVLZcDL0TjPaPcQTIyohVUqX/LyBE3DQ89quATUchrb1xFDnbepbL7ABRNTKYTY5XAs3JA+LSTr3JKnInU0v0z+jPY25SPTpkA/yYvc5sgZ/xfF1oQG6Msll8qMuYW5VMq6LNgeh6cQJCK5BFJG/yQkIpkMN/WPqyuW5XtFjWrod9wGAnA448bI1I2kJATKZye62JWJW77dtF+mL/HS/iPPGqE2/UxYfXhxiHeMMpMDyxfWlotfITom5UNaF1S7Svv/F5bPjLwDJDJzzK+MYH3SSAl8krNxj9XtauoncKOQ+0O4LoyRys7UPbVjBndMv0zeGFALafLwFkgJEvLEEuoC0noTjjOkMN5iDTN9goTX84nxDH7qskFliWPZHstFyCIGvju6efincbZHPGixaHmJ8wnJ/mN/m0Vobos3Rq/RD+0KXrD36IDsu52OvjFyVdO9LLdy+4+6uK7Ga9o06Ds/Gvf8tawJdltjCWgz9qU6IUxDCW1cmWtjS9CcL0OGw8iFmpu6EFq4kMKCD4Rynr9DFaN+Cz6WnhUmm3lMgrMh/KyluxzwEpTt2ZG2sGX0CV1Cjnq6yvk/IKCtLBzCEW6zPw3bL3Gl//9m9odwtFEpr0j8MGLyecD0CLPMaaOn2e5KlC3OYSor/uuW/cJVwfycrk9Nw/5iWL2hPR4MC6fKdGkf0FeUVfuSqSZI/QzFyVEhlMPELzeumoLvQUud6eukhtGikz6VyGy/jKgL0Zg6zNby8/3SEKHNvnr0gj6i3/8QWObomTK3VDmNewrfxrfY2c9S1Ll1fQWfPsz6pnJ7Kw7NV/jV+1PQUXzY9rX0BLtdkLq5fo18yH63nEPHL4zcnp+WCyTbTC5BGgkHAESf0Vzl0K9atH/JfMByhB61EnBUT4OJOHXIxiUjOn/SndQsgQ8rCx/iApCt7BKElYqJQlrlU2yTL07YmWbMrDTjR6L0utobqE4XkXkYNT2vvB1IlYHvm1R4Nqa5z/H9Uxb1lR/l/OXlplZaUKUG9G/KxxAaam8Dpjl03cfpQYcFh3nSzWIG9jO2BigJQPoxc9XigRr240eCJCQ6lsk+xuZ8hBML4rNqdjne5LWpXr+qN4rkatlaEpO0/IJ5jfDxU6b1g09GHb7+YzIPvZ9dfWKgjGN5Gv31O8MXZcnTvB9mnPqMNEPjlO7p7BuXV64WHXU6xPcZm2/BrrXxBSf5rI0T6UGy2uUP3iT0k2YhUr3F0F9sCPVHetDrjpR5Qqx/hlsS4taLA7iby4+J2ufnRiiOvd/DfqH5dQ8/cmaSc3vaffuIydQtffZZkMJ2ZInK+hO8YmTDBQEGaqW5+e5hUPGAcR3x0uy1Z1u97pDOKP1dHZChV3QYET1fGuoeF+jK/Z3MfYux/lGavrjX935JxMMhE+jxMMbkQUBjvvL42raEBourBShJuC1KrPIElCp4A3xF1gil5bWVR+9OoT/Co/Gw9UDEQTSzvZtnS73tLh6p1cdZpkwsJtkx+5vljLnDaSJzB4ezF6YsPukWj3nJ7Fb/iWQ1Ql+8Z6oc+d2fvy73DBUnTqKS7Hq8JICc2w5hzwrLqfaFMqFhKvWgfQf0Lhg9hXBJ8Yr9jYh8D4HRTtdQkJGgpS3k+vMG6ZgzqP86mQqqDfUvywD6jdoBt7f/uYoamdSx+10114l3/d8Eb4PLcsIa/d5a8558aDutz178vrfkgw879+j9IgL7tgMreP16iKVh8/dm52K/MJVpNiRSsiTEsUEDC8gtOBYCIA3Rdw54VPD30QzSoCG7+pRZSQEw7KYZy1+x2KDnNfJICDV+Ug4X/R8DDUeQiXv1QMZNcJfJ8ljVX/hsha7gLHNqQlt4IhTf425bqDW/8Uhu/kZXSQKf79fL94sT/1NmfVcx/S3ECTML4QhkpNvMWVbzL308MDa+EHNNW5nUnW4UZ1ajWvd+ChYtB0TxPDQdxZt5hkwkoViokdmdGP4/dhzd6mJazSCQ4L0gOMwcqRYZm474HJpDl6CkdWXZDGfFcJHflsFGKp71o8ERPlqzZgxawJ7Tl34S3lGa//MQm28ZIanoeEBMKQ3dQytS7ZtvqjfF/FaTyhbMIPlVAcmvm9KycPESBGg8r1n3TF5BbDCBgn9DVOIXmPdn4AsADPGXO7f5tn6CNIVHf4koKS9yZOchWmyH2vEjCZgth1rtBne15z07IQMymsIU+KrZS91LTizNT6lXZPybXq8ECJxRprT1iqNCnDxqqwEEOK/uUUUQR2tcYTnJNnq/GcIHT8OLJ8V+EoxROhofDl/VXQg+e4rtEg4YSs0MPfDJga2++w77AaGvy4zY5BHsie7CbbmSVh4c+9C90xUVw4ZBOEEE0263Hb8wk7DJnfYR7bRCnjv4BwQxXdC3CBQ5o67yinDWYKzL/NLuS8cFfIkLcbFH2z4wzgBh/wxynv8MGq3J1SFVLElOKRIt1sr8vfKPt9JEN0tp5tm+VGltQn60stIRhvzM26/CX1G7VkKKBwEf+njbmzV7zw6yutf1BqgB6/ILjj1H6DrSWEWpp+dHXgs+94eV9fPs8LpsSX0dbzOFXe9qPteUSoLZKGpO/1le0eW1kbhCVF7x8Wf7JJyDRnE4GnPxb3uHGugy8GiwaZB2h4AVu2xO1Z9iey3T/ptAmhSNxPgywh9D2QX7FvKwJkp08tLxnRITLmMSAyWCTUjaUpxG9CoROpxw8q6FB3TzzCQhGN+twr4u/YAxzOX9/SzAAt26yrqS2f99Lxq+m2q4Q7tWV22Knuc7rusz3h8uOlxf5L0WqpAPWHmD6nCw7UI8A/sLekVVmPcbxJrrFkqDNjUzNWPoMICWcrvOyCyzem3xWtERt4R8q0QbULRiEueWOGTFflTs+kzGQhJU3+TTRV1/zJxbKDrOW4JPO6rHkuS2+e+8/NB+hCd+eGrCbBUULwdQQ6jaaRYK/YY+NEALbceJCXNtmyp1MpA1wcSJLP7XsEGWGjDK0VVJ1iACJQBocEXgnCaVFk33oFipKfUL46kI4sBRikW6nlm2YriNhInfcloxcNxRucxzYsm3RqUld84IH9Pwl+31PX0LxMnafVsxL6vQAtPtHS3cLUBrg9BtkcQrv1dvMCn2JbaPoEsxNLRs4fRuYB19paCEEiS1k2i90mTxk3BAv4WoJy1KFPqdwJebWcWse2FzH6oNRq2NL1eY620EVM9obegpV2NmCS94LztenAlF2ApvSrrR0Wd+ROKXhtu21mAEBMad//SESk+MTBkNNAgE6qSOBLJCgzz9It8jU6onqDksTiAvzK8yxAYEjxpQVZ4OYjz+kv527dCdS4dSZABYUeAqDoBsquq1lpAkJOAklVo22zJjsR+h+QNbzdtCjdMkp66eNOKVqmoF+8tAnU1jIN4uulxRnERdh2ahPEjnp8aPYizziRKdTDiAKmuyI1YD9mO32XvDOOZsBE8ymE/+MLxhUhHk2ukphY+Uin1DtZS/Dz5pDxwQsyh6YfDiG+qG4WPlGPG9q1hP39p562l+dfJcl+RGPVq14CPQXTmSsqoE68KdDjTD1vbggrmqCDdroaMH0LRTHjWNdTcapc0VVh5nOwsR3p7SLXEnZumak3267+m+X8UgBOimHLa8jVPAmw1bezafBFmnOu7fD9Psg/xf3OmXzc+yPQ4xyHQPSH2xxlBkN/s4kPhIF1fhx58d9ggqqs1DsWAFKBW98qsC5hxOWgKY9VXxOt/xHgtJE5M66iTqYqJjro93GkppQncw1ZN+4GyudEKmqKr87IJH/xGj6QxjZHh71Mdn+8saxCy9QE+/vbRp38RcDdszDBdakKwPbS3Nx7f9HzsElAP9ssX2zBjRo60ZUcqfcijcj6vdwvcXKS4XyGSnpNAASE4mZq/JsjFLc7d3FzK2fbVnflWQdn8xLQv06pkKv4iIrWynIS/IiAz7wiF679iWGDhn9/DymTT3Yt9dzFEdH2GPB4M2mg/LHwcFJ8Rb6Ky5FSsgekwCnT1cEU7FWzFZEmJw3RIIx9Py2+AuoVS7dVHYl/QdngEwsRpJduaCdmGDX6LRASP812TeDBp1CogxLqtNcCMpoE0pdrtbwU6mxlt8C8UEumO33/P+eNN98shc6/fAJ40swC5DWvpAZZXg/eveu0Hgo5DD9iat0i6MCrEb01pnHQ8IfFz/g3zaPN+bElbbMcRE4Zjfvk/Kj6fGQdFquOiJdnbXPQTzlJGZaydBlVhmVwDaf6SYLNygumefPbDXn4/hgXNvkMU7NGGJ6T9nz/xsQfzho/zitBTDSRnhyaiK5HVp6DhUBXoiMIUd4qclhGoq5aDPvA7qQdCFWlZj+yvxZ3vjiYdSTQtvo8t2O7NDenHqhw9FJpEKBs7bmI2XcLttXXq2tk3ZxvWqLFsi+dfdO+DWcTXNPiVZ1DWJoKdLcpRd/WPP07H8QOnQge/GGEQUpYmbx23/m3njFyDH47gumo45jK5WQCPlX4bYznwJUILSn/1/pR9+ZbqTtpqSU3Ig5FQyi9jilOlXUC1pUg/HpdSFMICUo/4YaNV3vuBt0DfthAtpGSYGWCy5tNEHyvOzZpyEDWSIH8Yutd4qrc4hsejfLUFLALRQ3B+Ir2qtk0Ip1KkqT3SrFN9gyl0I0noyVoiEimM64T5Papd9VkuzqQ69m/z04WcGHltjtMSLxw8oSIwljKYQsWNUMeMQm6/ztZ3WeBQT98j6fk7sZ+/l8la20NStqRoGPw5AyxJWxSpHaiWfla1OZR/5ohEXqRVpn4yWtPmrZfEZ11mhdnizTnwWcsDbjSlIMPhx1mDN0YuoRtUVZE3ixhnym2Rb7Xh6rtb+/T1Dkaac5BD95KbapbTyVxvrXEFjm3VM8ch3tDG5gtMq6yZkkg3ZVP93Tjnby83fO8/fEKw3UQiPZRH96zjEreHkJIlADByMcIU26ZgwH4TO01+ZziF4NR0CMNjOsp4Xf/KKR+KGAb7QvtNx8agkPvlP+Py8ef6tj/poiKB4J9DIFHD0eu/Ge9RlzBEQiWN/oImvOTaD3qS4TQZO8PhGnhUAgsS8EJTSwmSJAk/y9g1r98jotkECehLa3UEQWEf9NfGGtLYEkW+IDh0RLMd8Pchz4x2ZyQgrnG0KV2Y+M7qr16yLRYFPzVAjKcJXbfCGN58uVZy/wS2Bam8l36tHZZUrzmtTM8ZIboI6juw83i/8GfNSdurpXqGKprfKzqKXRpENbEE0NTCPwIxnLUfu2WrF4AyLQ5EhBHEpfwgPp5dwL75ufCvev6SYda01shLZCVRwWhScZIrDdP/8jyWNBPEU2xqW7fP9NeSDyWJaLebdVSxx24OMJBf+EHP9t3mm6UQw0CIy4a1r63sp/Zs25Q3tyv/81jabzR+plV1wUvOpTbYypBsZWvFpR2jANpBm9+JG7n1GdDrELPBrlPxJxbzAzHvrYXFe2LanKD7DODEBYr+/jqxUHa1ckN6a+jlbqoBHwzoOdAvnHEyBLUlbJg1S7te8LLeMLvevAx/TkxmUuYfFppkTpXjSde+oC5zR6H/W4SU75aK78B1Mb4qy2gZKMNVEKr8sSMT4HfEwJcWfu2jWaG1C/B5Sqww+XTXaq53ekNv9n2Dw9sq/phe+vWMc4SYfm/SAwBZFV8o06XiUgRpUv5eM3VbGMTDWpcJ5XSTpohDncmyo6znN1Do94dyBsHstmarU7ex9Aibv7BQRs5DP2zAHb8+nIh9ZONZk67b903J/ZYV6c55WMbiJhpWg2vwo3N1nvcUaB94ey+owIMSvqTOsmYZfA/xThJLUXWembkeRvP/RTDJBHbJuv6K53Z5grFn31/lhbr0eLmaVQl8bOgHRpEwxkfttI2T4/6yzfsb95+dfz7xucs5+TaoUGqtDJX2e34QPUas9QfLltFCVdVuhVaiJZxXsZ2G2euvjgQMkBBgK5GTBG8HfRUI8q+aFS2RA7u2hCXr8/fyj5+sDn0/1FMijhJycvXPUr+o18Snlg60JrWhtUXf3F8Vy26Ji9cOf2WYz9l8s/cp0horrKqTMQbhro7H+an5jSUTXRP4sXudR6QfWyb0jdTC4a5ekoMKz1/c+Dawkb9+LgULd6pbuMnmf9a2QBEO/qvHTHBBfHI8V/22uL7Nl30qGqTf3oJA+9f+WhcdWUF/1mL0XwxMxyCQ+aFf4Jb8DZ3V5SOoDIsggXT7zar6q/2gXyv+1V1ghoDGSw7Hg8wwbS8+LjBDIr17/IS6+vbzqVJVZcHVG44PjN+ar4lHP+mLJDqB7WiyQZ3S1s0+n4iFwKNoZ76LNiyJh1Sx0IbGV+B6l9f12/lCKfFsDmKZJFCDUmfKYPp1WV3Rmb+vT6H4WyyBrs1JRy/a4SPpnJde4V9czAcMtB9/Z+2akcNpnjz6Z8NbPcYu1jaLgdqKqbiI0JkKTyyXu8l9L+YOtlv4MGrkXwII1ZcM+J6DfPy7Dibdx3reiwtNVuq5ziNmmCWzUlgHF4FWnPS8xAnqRRnW5WQRxTGDsi988iGvdDNPJPo1Z1dKDEUrGsYM2z9598BWKHDnKRShPuIVkUNrZrUvPhiNn5tQd2L82aHjvnR86z0rG0Rb8azHS2Jzik3eEqDcxaHM01FVi66YUoMraaMXRCfm7nuR+jdxVKJsZL8M7U+L24qFTpoD6ZmuxpEkO441xwluIu9WySHxOE11zb/B8rw2Ydbd1yCypqFxQObx0r4U2tFaln8i/K0pOX2Fgl6LN4Zp3iMTVau3oC2TI7kV6IsTVuU2Ij8Ohch/cfIL3V1PTgbDMIka70T2S1x1VTbIvDcycEPPIID/7Xz8mvuI44sfc9GOVZVdyfR3YHP9xbsY2FTKvzrN1m7rCZy+ouOzyfUPi1PoDpmMGZY8dzugNKq6bzzkSVn2GqDokMPfktsev0ddwG3tsxwEd1bUJ04iWuCK2iCKzsAeW+VZyqlGjN5OTZYoftID3+7aiHR+c5n3EqcuTXaB1ljD9Ns5iSAvnd03/19+q/HuCySetRnhjGD7gb7TeH/tg1wkpYpmnWptZ86mHb5oTod6S56ptJwEfoHGYIP/zaCR5rNtuqqaMg5d/G63HbttK96fNCdaEcL6uVZkWvonLgsFSZaSnTLzz5ZFsDXWYytWo7avULIiBXudF/TFeMav/iT3lm3pwpuG2smbRvhWN0CkDc8Zgm2JxgxAkAHBSbiMIXNizxKLy1UyIj7qNzHkb+m5IztjR2fVFLT3m31odXfsiHo/m0/kq1ItuJOXv5VYlLoc08ptI2a/+CwLccxsWTB/CnKnLHYXDMVNBtjbSp/eaf1IVkkQvvvSVa6+zhDaHoM19Zwxvw5y2IPSf6rT1F+o7U5sGDYjiCRflP1+ifLlq+i80B0X+PBVc3M/QK8JG6+gR7jB/QIJdOxhh4Cor6+uix/5JadMreEpKCuz1HHZ6G5e1uZHQqNEFxQKpbn/19VulvkVZr6LVN8I0a0TOpqoVPYMYoMRMVwqsBfEfvQa6aq/xgnxM6yJTPYu0VF8moshlidYU9hbzjfBjerm6iwFAPwNRQ1wMxJcKEgCTtVqb1oortRLmqxNdnYt7R+QWKZM8j6vHlCq0ZmJGlWZMEFmAFPGq4O2LTys64LUUb4ih8pkt6MV0IUzbR7lVcoAqK9060MKD0luyQ9OW+I5inprfUSoicYnT0KmqXH2T8BDNyJ9Oyk1p/xjS2opdDipNqATOS88XAcppUhFPNXEQxY5FHm16D9Y+rqTZUWqKxXSCHCI4mBOuCASb/VI3lpRkNiEPCArMt1gZFOwHwZ8u7kpXYzJkmag5L2ak2khJ9xJ6XV13TlWf2tCMDQrhfxxh7w37N8sssfHNaUYdH2plngUK8oEwuTuxbVlrvxcBZR+PRWz5tNM1w3tzUF6ODlC6sNTdWyxW2KAMAzAqAX7fWdGJFna3GowXgQC5FmpCUR6zyzISfSLLpV6XEvvn35ljLRmI9P+Avoyp7FAdn49n4grElcSAMqckVoDQKa4JaLoeNDYW8mmecbG9eNcQ+4FnZNl/FQ2AioteUR1oqsnbCQ25XzRQ9wcbBZQhuc7Dp/1606+KDjQAUVULq2ZKVFbFyq4Q1Qa26eEf0rBq+3dXV7b9jGv9R9dmLM8dM+XDgIuNQQQBlqmj1h8oNKT1DqY4QaMrT6XRDZnPnxqlRpyA2gBV1E4sjr/hH08qwceSw3sRE8ipDs6CcTxAOmD4tOUV9S1Y5I+Tdzrk7+KOIdW76sBTuQzxvMQ2WU3b6glR1PrMwyIIROPQUdeo0IsOPRG7f/Qhdm+8oHZeanLckC6AVSGsG0/cU2/MK90w8L7xgxy+SSE0p2V/Ql8M+96TQ3MW5rjcfuEAN6g8N9T53HvDFHEMB1o2X28glq3uPd0+hdCHwqv3WkVZkOqjYmOUXWkb71UnWcvXqi5xLKgY3dVGf0Gvn8813nRIodae36T7N6+ZCNWZwITjH1jeiZM2JX97JvZIViWy7Rh7Q/g0pYNoXwJjBMzbRz8ohENygwq3PELTfqZhlN3cceNGg5ok9J6J103Hj74kT/qy5g49kP56eFekdKjltP9O6WfQClRNQRWlUQFviorQZnfAGQqAZN7uh4cNAUTZCIRX54G+mI89yJeLz46TRvRNXHZXhwhMEmaz7L+7NrnAzAKys3hKGThp5V8Bu2jnhbDLDs5B7WUV89/A69QlLaHMwyhBMt8MKMDCld+lnIQ+Le9mrNek/2xvUaVQe7nELdjn341/NWSYfVCVPhPO8mvVY2/PUW3SpUce5oEPu2hsj83L4uPTqclXyQMz8hGAMNFlqyCgjUo60NYWodyrdCLXKMjgI4T955iQZIsbbvR8bvKDxpvcBPzv20kRNrE3fS5XaDcL3N1K9EmdAlFSryy+iykK7bCLiylRbxewvpk8Ua3udgPWdUNPIlp/aM/XRHXx/CH+7rpyK+aGT4iZ6aL6aLy8RTqkDXQX6HQCpP187YcAL0uWiU1xJs0Cw19SaSLmwKU1hMbUXYlY+jWVuWotoK8dUbuoXBhm1bB4SKaJ/QPD2w2PmyJuG9+cdfXc/ET3Vis1NPMaXFHCCBCuNIEtv4kP1pxxcnoC8s/h1BL9us3xr+1N8SO+y0ioMWlRBSNjdZNPgJO/BMhCS/pMzrj383rr9twGwaYcp5gvuBZ/iOZMtBXVYK3ib+HQGDsqGUCswa/dYYcPtNf0uuldba6gWH4O3TY/wnLo1SV9iKNCpBeiuUuAu6c+1YkDUhuW67PEY3UCQ5KvqgJFCA+a5bs88ze59Am9aaYjXqGzKe5Km2Uu06gpynyjCE5AVc2EHiZJqsNEsP/zeZ+mvZwGgts7/NsvnKDGfiXyf9+dZJtIPZ3PYV+EcjfXCio1OxgQsr0fo0YYfpRrbWjpE7a+JoFu0t8Fkk7kBaXmxL5KAuaKjPeL2EYi+UhyKj+rR4Vw7xeXz37XhSYqSHmJXlk+5lRMl8xHKaXxfRvBawP0mCzOtq4S7Sw3wColZ5A7Bk/+p8SH8JZ0yQZUsEvWX79hE8TMP03Qo8L1wtl7FUhJubLKRvSBcPqKvr9U4IAe97ToNLs8IXpr18WfJ/aekyYDKRzhZqUJmkSDeAXUf6Lvq1TUVT6fbvTqKYIWVvS9yYxgRAmCGMJ8GtFVZvjoScCWcxDlQTRU/g8bF/8rTYeYaio0mKr1tcZkCUH2UoWcC9fWHtGA1AuTj+RCgnq4qU+nZAvd/e37KAcVjrBTBxKQSnpgaTufv4Lk6mTfqTqx4xPUxgrqYX8NX7VJXE5JUK2qb6Ld43qTPLdVezxRzUtM5c8SKtYM8qJtJsIyY9b99vS0MtPVzePiX2BoswHVhwvRH6KqOocou6p8O5vGkeMg33ltRPBPTbHg6ISqx9qSj1Wa3IDmFhnWsavIs9pgNrCoSKX002epltdFDtIW1z1G6J/ydUve9FCRodEvTbg7wEgjKtPSmFiBjEUGDde+lG63z3OMl2LpHHsFy0YxwjeZNTkx1eFbq/kKVqmatQZ8N4+RhwuSX9TEBJFLyjgWQSTMlcEH+XDIKQL74QB/2M32tcpAlJ2O6rGUeDeLI+zRZemv9BR8zgeGLGhH1l+x3HK32JHzSr/misv2jB+QH78HpgvGoK0RwPlP5Uxa4kzxezniSa4g8UTTN4liW3qCa8fkD14M6t9zj8Ii5Lf6/Xsqh7u923EoVrKNBKZyQME5G6QdUNZj/yegH0/qCRHdSRhwcRW2K68Oe4VNOjxh0ltAprD71mBOtwzE7N/DLnPyhWaq+fF9fRxl4BfebPrlqFeil9LiuaL0FDqxaLio8wXxUpZydBlbdW+LZLJTwuqxMxIBq3mwoQYjixdZtX7obz5EQLaavBd1ndCQ3QHE3HE4w+S2qtuLiEYOW1bOsAWwU42ESuGIDu2CVsKL1ge4wq2z5luDLkDqND7UK/PNGTeBURq/Kop+e0/yJJDiX5q59QONafvT/L5YUom6/NpuguWPkPJ4AdM0tZzcOaTEtfeXdlR7MaWa4HSwDE2Cb5W0w+TMGEFfxWydcCrg9IkYMgvcWUaWDwwi+Psx3s0hWLR4RctD0r+nzGnAuREf+YsnEhYCvZk6d+joT/Pk2UX+TxisDVtGqcXkK0pUmAXWvsfZ6GUP+WIWhPmzbPkzc3CwiQOWymaK4J7zSMn+d/6zpKWiqQG+6H6YX7IMGYKfflzqWzwdmQp8m6wDfGrvamELMKWFJ1oAAjgmxiUzpAHjcdPS4zwBASl3eAOP0OUzG4YFvibRuEbyzzlCBDWUT3W4RxV9m2vYB6zKnA30YvvBhrJUeshX25yn81ttvVCx47tOI6lmmLAO3qAmXy0x1murfFN9MhyXShgfwbw5q3+qCHPMkvj2TQpe2wKnlYB/O1CGnDPKpwCgyURlmZzm8X1DsUg4lItoQf5EHikbkBjsPIrUOyX+J170eCsb8a/Z29GPVatV7Q+95LX30k32RGXlJnLkRRUnpEu/ovuvwjs07cfck0V7FTbNGI1PFPPtAA8zmgwqM3ycmTIJcwcw5gTKhaZBEm1Q3h66p+YFUl4WUSPP/Ly5dcAkPpvt2V8HOeKbZG/29Fc9IKsUp+mx8UjKAhBf+P7RLMRb58sR26xlg79YkkQK+TD4pfnHlB3kznoVXy1B+cO1spl0pRoivv2BWQ8YXvJlnkPsj3IllOdClUDbqWEQlSE2Gkx/NUnJvDscrPDVia4xohmQ6NFgmrQrujzZejwZEkNoiemnue6CELXQLROoHtidR5O6PA8GV8HeQpViR4ujwp1s+XBkChnQP0BmZWnrU53AGEGfGcOwmW2iX4OnuPzAD6+GWs6RA9HoxCBGz1Du12jSXQdpS70UgcSCztSZu3W1JbwD6GjcYRjEq5TWORzEbkDQMpzJOklIN6MASIAWq7r5/lTwk+PQsN5bkx1F/3j/ZQWBpP+4QmBRQmUhx/qlMZvxBPt0iQlw8O/Jrjh5yBlSCI3JqE/lzZ1keuV97CN4m/hTqmrM5YrQB8ORVBm8b/4K+cT+HlDZtkrtEaFjziJLfwZXmNIeaM4xHCyuOa/rYLhF74ZXOlbVwjjwMJ3bOjYCa5ohSHywt8CUNXttG0fEX2G0xzy+AvjfgdfEYiLp+acrlGmzAoLFChbfslklvVG0PO53fD9X1Zj+yVzVTiK7X3LcIpVlpJvxxe+1tRDAP5gxLfzoXk/lx5GH8rPr8EwMlxt8DxI5fMGO27U8vsv8Pi4XR5WRvaieBEEvFM/qTXrbcqW3AO1LelH/MG0eRxfIOWOAT+yJnkN6hhe2mN/P55jJ/fJkF8OulUTM/w0ve7iPB9Ee7kYLPvDxLOH2ZkAUa3LX/1sGDHbzkeR4D5d8wNo++NWQH/KsZQE4NoQVfd7+fDSfCpsxbgDQeW6HZ4lA9IEkABvVdIcCknnw6qzr7+sZft0J+zvUv6ai1+5Ty9cXn6wKmULOIFVgnfkSX//mBcMk/uJQNJpgrDV8rfxr0+fC2ZVSjCgocFV/AXMn9tz94qVQ+Qr4PxbKvEdbjUVm8IWJsc2o1PiPQn+RdAMo/6cCi6cN2VrFBvG5t/ID1t+P/+XH0rLDzThrz87U/yKg/qhi8KW/nbd68cD0DXhr3+TPEKOo5K+LWg9PbyX9ejreizTs+RdZqPjDneyqhz0/QXFREr//RIxAS9iMznOYqofiljzD1RHa3jlrXp6YKpgnFhNZGB+gelaSsfxsNhS1slSV1Am068yjLKdrX17lekaUO7GQHZzdx8/5LF/1tkGI22NGK5tBbOWYx3OpwTXyvqLIm43NPfVBjD1nSh0Iwedl5V6hDmDmhs49EqPAu42R8rhN+5SMVKDYOLgQ6NKbOWkCqzM0WWSjo6b0YFxoET9DszYFJJ+pvBC1m29hkPt38Bva6w6PPyl605B/ZZ/30rQL97lINfsZM+mAEdpJMZYaCoT/0tvU9nUBGthBKRIrWpB4LMQPyRMP0+gwJQMJAhOUHBTpkVeKrbe4zxbLF8c3BREynztnVrkzw9CphcHzddCasE1/8cI0PqvtP/PrYu02kjhnH8sAszPv8XuQZBmp/L7CXQwcObIgjjUqNKsvSx4OtHi3w+Zi0ptg/XyGDwJi5ljXsAWN5UaZ8AygwbggCGlg4tI0fW1+szfCu4TX1bDhZcifPnId6p1Ny7Gi19pEk7ZD0LoSvWBfWFdAeDs4Z/r/ejOhlcbxZfb5fTYujV7xv3nlJBvlhWXin0zCnXy1+ZFXlUAqFNdpOvfuh0pft9QQH5fI1PyGUVV5M2giqUiR1F8uGahMlyoUEDkpr11Zcz9NY3QcXjr+Ef7ng7rtnWm2wZMaZ0kjUQtQrbs/AAADsi+P4dhlALCWA7SqVN+W9V4nP/sabEDX+8b4KFxGQcJs3nOnGfxSUdS/hwZRcO2ZmpPW9ai6TgZ8bTqC5rTFP4TGD8lKR0bTmL95WdXCkVIUuLZC0ZtYDeDQS/f9j39g1sjyKGID0pFA0gESmqULozy984/IHZYU2e+uvhG/pw8Hpev2Vm1dnU9ofxJp2C9lh6WweOgQE5yWpPEOdB3iPaZbyw/zSLXmQIWqDDzd4hbVCt7GWKxubxzHZa+HX71F+mxPZDqtWqUr5dp1PHvp8iKVUOK/VMrytUVtsqiV8un52WbfCZEath8QvTJ+YBn1+NEpqVyu+qGlPF+NB2QGe2z3Vm/0A24qIlTLCghXMAq7hmirliAxRNbdFHc6X/DDRxY2JqDnzoIBhmPrbr+OD9NxR7ygT2paCs61cFj78CiSXKgsKBmtYf2sW748jg+dJ7EfDGemxvnkUTVjwhRB3mw60mUL94oi3O7f2/0W0azV1C4v/HiX1KZaxUhMon3lgss0eO2edaF7tFp8IYcrY5IHMey0jVS0OqajmFn+N1menXIKprvy9RFvXz0VViDwG2EDqSVg0lTpVpr+/YYcuPzFdL8ytNZ48DKb47yBhitimLeIjXKwPH0yRv+jLxxtMyU/jS1iUqKVo6s/Lfoi3rs/XyBCHjJlOj0KxGcJ7spDvTx1b8hsjVnu/11UTAQHTrkVZZZKdqLrfTx+S8I3Y5E0H+O7ruTVHElCyXg1YB799BDu4b3C3xhhhQAQmlHzYTo7d9Og+H6VLzZ+THRklm4TVWMUsxn2+iYRN5YVq73DUvxd0rTA/2sv8pODhr2VpfhlyvV0kb4G2LcIB39pUvnIkYOQ01xig/ObRGA8xUrj4fGqhLlqXhCpKtw1tA5/6U/AP1KURRCUCsT25AGzQu/9Uvf+SJacUxx5GDYieT3kZHc7d3vCCFCTKZ112/8DCltVkBAQS8JUf5VXEZWDrSjaKtS9Sk4KqAxjtRbEY4LAoyhTtFmWPYqgODvtZ/6JlaL5J8P6NIlR32ZaH0Yjx48oPwkT3VT2t4lBoqSWOmLlqojyQTYXGTG2RgR1H9RK2z7L/rUxBxmwA7Z3PDjahhG6M/zOLRmh7XUI/CpaAQKfyf03RcB/fkoa0lNWWOsV2ed58VeAoHgCgFMpxFG2PDcTBIuedi9FA1qpa+0AQAw8ILd743palFfanh8BhMX/sqyrsATnx9lfnZlQqmvwE3OZ8MmtMEzQmVwFqck/kIgk4xv3GDQshlu5M8686eX0/fwp5igYWg5BC+Md1isrLg/lvo2opm4muKr6dn66EkqQtdW4NtxywHx7l61+vjdXthIIbLlT4NJN4m+OhSXkwezcInMhnnyxJ0XgCeMQ0ad+zV8epqjkLap8FHTU1mtpOGyuYn159W/bFApPQmXsBE/l8APQ4mMu/XSL5PGHL6kcJdNNQhMtvRQ88tSJELf4qshpI4v7PrrOQPMv8FiCbpd0DpBiTPf7WuMzgwBF6xO0soNcfNjtAw1IDwDXP90kR1uXTOY+qoWxxNr0tzw4pcU2y9aY5Ky0p1xlDqRmuULHQkLJVc/o+kYXAXUS/Dw9J84/O3ZhPHstdoWgdB5dfYxH6fJcv1NoP5iCWEsTGiJFRp62o/1zbCdWeMnrliyHQo3RIw7tnFjJvj5/HsycnpRaBp4e9RWyvozLpHn19bFlD4qnab4uboNu7pZ0KQj1cGvEgiU3ZfRrlXBrKr+5y6vC2+u/TPpGJbhxYUSOP63/MetrZGPAg9o2UB6yKqob7FJEeZl60sG7aBVCX5oEtOX/UmZFxxl/Qt8Gxc2Q7A5S6XVC2OgOT6qGhIT0DTieSwH1uYTxXP9Sf70Uq5EfGRnSu6/Bcuokmak4rbzS77jxfobSaTmL8ro2R8q0kwWxlq/YS+/ysb22hpAkIBfcZPuixHANcHSh7NmsndL+5KPo+E9zNQDEMZfKc9nqDxm3URy/zVzVheBBydIYdWqf3rKENhSzhvKAf8IGCLxLpnzQ5M65knStAwvGU80Fh5yKlPKopJE+s2rUxqfr9mX+utvcerxmYsi+gH6bbE53F7aewTQguEKlPg+ygS4WpnwV1xAsR/R4EoqyZjwW67DadEaBv+kANNk+qdYGVlcbEzU8lZe4w8zjto8Nzf/U/QI/pjmBeTQ2ctECvjvraPdU7Px6pBhNaba+QTd+QuCBDew3yFesnTtXsq6lf/IL6VXXxZ5jk0FHtMAVJJUXtEZIsqjetUv813axkG+lpTwp/A45UzgJsWTT3gd6pfICHbAa/b/z9NVLDoKZcGvmT0uS9xdAuzQACG4f/1w83pm04t0XhKu1Kk6qsFVTucHXA2+5yG7JvGRyM7hY/SpWfQaTkqsW1KumZsjpgV+ZJ0d6CrtojRVdZbRIN9gph7ugc3569nTv8CJZ101uQw8GaKVKoYE1GraMjF1mUJR2EgptajrXsg0TADiRBOrhVt0q7R/i/dp+SyBc8BdQYH13V9qLtvDr6fQ13UAJUC6T0GjsvkI6UF/PpsFxaaiHn2YmhF/s5KJF8k/SPtoMw9CCaCw8gfdyr87PGRUAUKdlbtx0UxvOE8R5se0PkGIfNzdN8tKxDwbLsGESbYkOmtNJOyAv5COq0JeOfRCAV/+4lUWlv7c6yz5EOA1FyfmMc0LcdUF8A5bj5wlG6q0JhEenA8QPidtvwAnRHdr21AZRePcod45WjucDrd8jLYpQ6LkTwi4CJ+XZg3MAusjD8jQZyaXKR1d8wCx8y1qwObYlkl7OwzXXE358ZJD8D2CqMX6SnBxBR1Zis55BW03rWmsIbyRPFAvLnlioS8cKvbkNmDz+JnVNMw+vV4OVT/e4pRmG6VbamvPyaPzX3gIMLXdUkwI5ZITFkG/pPlYCd6ObbcpfoFBUo78YDs/G3gCODqR+A3CV+A68hASPbrLnn3HVwT4uKVs/kQJTKJ6XfNHFKqUbgQMy3gjwFqTnEiyRl/uFV6jCOE0eoCXObFLgztiPgbfXiJ4hTe7pSFmckGae+cPkDwm7myJBm2BZsN5Ff0UWCUhuSFOUZb4899laH/LUWtbMM5h+SjF77plt7vf0SFFwo81alEy4IdkHEmn+h2wrfNXp+yVD+LEEh6mOhDDUELSy5bSpb+fL/I9hqjOHCguzwr+JUmzX5lH00WTbrWf3Ol0PKLoy7IC3GzUyh4zvDHsXuMnXjMWyQ7rXUlerKJTBYWO5n6IXcFE59fC8JP2eZaCk4xPhY0KzWQP0RFziiOydeYckjM78gpCDwuBsM6oUi1LcKNjpIwcoEZCZ+v4qqS7Vy13GbWzod2SHuv1YlAFDyGCDMnmDXQb6M1L+0OAp96uGZLrLPkc0H6F8DjzuZqHiW2/QWLgAgXYSzvG+8CZa7+aF7lH2HRhsGEpH856oMSbs+3qPvQ7b8hyiL8H3DlKbXglO+MbPnaxRZFW+9JHioCm6BCtLyAIw+f9GExkLwERsfZKNQ2mojA+xfqSM9wPDxy3m0/cc1lGwJOELoiC22PfO/SRJv2LV2sL+dsB8Na44ghFHAooTvzO5s+WvCTTkFvl/oidTx5NREcKAePAmUeff6eVRXt04PmamHzo7awm0cKm+ahTwDxa/Bb4v49fHZKbbOtO8dtKWrktUoTkVsASCe/z0oz8OZ4HRpu3KaZf4O/xIBicDZmkfxULM1yC8zjP3s6rOwqtMte//DKiBkIlg0IaALgsHUuKt3uD3DO6+FQQ5vIzy/ikBx8wB6aFiiU2Yd1aNbiA493FK678wzxzINyx/OxGDwn2r770Un+Nazu+UUjrgLrT75ULgnl3iHkk/I6eJqlcaqZxpzLMvhbzXTDQGlmgVRZbllMM6ngquOV8o5dq4EuC+ZdwZOIom2NZrWGbCWsBEQf0yJmlb/fKeeUdtWPxZEGxpFUjkM0aR7KRyrJtsFVtK5LbANd00r87ADHpCwe9hz2BqVVlrvFnvze2O74jedfcF5VA03ER4b9//vsX9C10Vrzj18MdTGx+9ZuEjSCCTvLrS5YI77FcaeKDrGwvmHLZ4oza3gtPeqCxlcD6QpM6zBp8v0iU1lfDTE0KbnAtZu+7hAA9Z6fFSS3pVs7ZULgMqPnue0ic1exkn7QwhgnWfiwPm00dRTXjwSB2hPqXzSbZ8I+cS5IlZwwqYdW+82hGaVufBMrDW77UraSsf+MFZ/0ZqD38ZXdHqGxE1Q5eLDWO54OXM2HzLzLOx49groO5DXjyV/IM37YzzEbvk9Vo7tD5G4pyOOwfF2InMZpe5/GCzK1zOxtkBrHMr/3FPdNyjLCBOE3KpUCS2wtrmWzTND587Dfw7SupE1nX1kRvXSd3bUDuNkFEZxCer3poFLsrI1zjZmMARnFMo+Qg9Vegz9Tw0TpUxs9JzEOHA3uLUceOXQC2q0Gl4C8GqQZEaLECC7+hs3BSfRw1ksUPhhqI3xVRe9JjA20CwbC1mxeTaAZ8Up3J7EmnkekMKgBTvtzuCL9XVomv4It+vnygJpy6pBvVH89PI39tOk1uJPDHJm77ZpFgtNx2RFKell8T9DaV+OZacB3Tk0SQa6/2TZD6ZEwQiS+FqvQtaioZSIj5rvluVTAm1/VDzgiwim9DZ2OBGWTy3H+j0ijie785Buxi9iI+lsgLMdBGVmhjRuColknZ/hIIszgXV7htX1HubauvESGgLDCXWXTEIvLs4FJeygoOXH9VXZSJbT+jAKI4p0JgmAW53r+R9pbyilAUAd0xGAqqoVOCER2AVY+GpRXvImEQSP+1TqoVZokIruXWiDIuq3CuY6aFaS/CEKWoxBiL14f7tbxJ1v9ald3gnzYhaYr7145kRgrYS/LYAOSHdd5FWWhcOSxNqDfLmdz8TNdiQZne4mzT/bHNqJcJZdif88ev2xZ9IiIcg7B0/EmToD0crBjMkBiGyFb9BGqsFCejfojN39Qt5u9n4Cjwm9Y3WNRiiOiHMjFbsylY3rCmHFvxtzLf2SMdWwYgrZtQRY4mo+Zm+apRL59EmUPCje1RzLk9ei3+LNkZ+9P66Oc7gT+3ZcsK3KanojSea+4pcr1lbBvNzGUwwWnnR0VVjPuiV0FBXzn3VjwykctP0F3wKLpWv9tyEYqa0esvUf/WKRwprM8tzlp0Su8UFc7akI6YvPf3POB0xVN6+qyMCncaSZeBqTzSfffifpAlYCwLBzTqXLDces3o2xtwtjwJdK2Y/l1rD+sPqqOX+/eq5ZnoNqY0HLLsk9nb/ZX2tVZGrfmCY1Ta2ztGNF8edgxT2pAT4S4CprvfSJnVM7i7KnZ1nWBclI7XcGO0LN8jB+Wl2yY5cGYdiNTmnr5SmTAs/Tq8t6kt+7khWeCKo5cXClV2wf/6O4WoXBKRvaPhavK7PSbnUJ+yu6SEqwIzYtzwQ5Y589LcCjWczIVAgIX95AeVJFxw+YwTu1mdR12zJuuNDA/CEvi6nfReFky5WbTGoOoBeVt+O1ypQrOiKrocGeNmWDL3ZkHwE88BxuU5gkFmFXxvJj08tSAYLP5+Enxy8dncNfUxv5kH8nq2nSbrJm2k/Ze0FPW0U41lNmWtNO+bGAC/8Phz1QiH2Y6s2TuGOxgW9638YNCp0+tnmBCnMs8exKJbNedKL1YucDcNxeERaq4XdWuXBpA/8ZN/e74tERYFfjfh046cF7kq8gomoknAcuw8+LY8BXOWW7gUTHJ/Z+UgNfkP//Vvvqs1z8RopcNVAUNiWoPL+S6ral+RU94IOSIlnlmJW3wvA/fWtBdF8hZfiYrcCSo3eb9e2JO2smZc7TRT3W8Xo5rTlyojafLeCGQSpbp5BskRLJaCubGasc6KA+Q/OysWj8I4Exrnt14MxPQi2Hxs/xozVGzl4CkPla+PwEnPLTIVkOcFlI/tv2T2xjH9PCyESJ2AhF7jKUMsKmMBj/z8ffRPhRM8YlXsVJrOo/RJzdwukimL5yfWBlZcoPpMBgKtcuxLchN2Ib1hBtyAx28SS36Fq78JoGIBhumy57AUb4F5KzPhBTpqv0keiqlfm0HgXiV+Dtk7/16SarQI+lx6aRja/p6zNl7LY9hex3tJZavkuZ73tJqxWYJpQqt8OEctNM48pyXCzw8fXuOryDDZlY1Tuzj9eFsHjMoF9PL3DjATqhqpTU7yExKuwZkDMSK9YnsbxqFI9mdRiaQYOSeR0nAeOo1MhynC7cPUgEFiBaJKCT6ClsHj0nO2yLg4ScOsXKNu1Dh63a/DaPXjWGEcDMRmA+bNuQSQ2U6gj9ymg4Vpj2NuI8rvAKTOJ9EVpXuKEsrFMCe31OTsH0p/fXf8k4g0idPzeuAwJHucOyzwvVAWmUYf/JE14vf4g7JfvxFP+Cpx86G/wvcXu/Ri7J3O9iGz4iMKbHbK0yZHubwV9SV4V5RTnOeJYrltak4guuMEQ4ZrZqFoWRUwgodVtjjBSOuRKYk3w/mFCStoLysKVC/92qr0dyOM540OzhhnWSkPlG3SCPabM/obnVS1F0xTEprtulY+/AI4Ab77TCMd/jmk3p7fM7hpKKBrjQ6T0+f7tWBhf+ef96DBpAT8rRUZ8h7mrUNFt3NGLINiaM2LZ3rvVtMNKreJLesUZ0M2u2W/u5qy2g9W1afarkAIfrg/Y6c0fFMV/W5Ggfmzrc/FpCBVUqso/jZ0/QXc+mXvpo/2B9gwK4rJ9DeQpZ+g9fKKotFApyNxBbcrLT/VeT+ihOJc7lYeXstvD54AnwAYxwew6tvJiOd3R90FsUMq+oDaDsNsvLTktSqLzDFk2Yd5ocUtWkZlwGeHR90wcZsKbgU5olBE/ab7lSPtoLCQPbL83sT5/sJ+NeZvZ+n0pI0NdTBNrupiXJjfRHzlrep/zM1v54L4530t2emx1+1kLw8T5P5IaXzTFDqwG80bJ3ZMdnqHMG8EuKjBLmnC+3YsdXbxwPPjmazDc0IzjFRlEXd2ub3Yb+dkrXn6fMpRchfJwhoOYQRVeqM4DAbtKUcdk8kbMuR0l/gMpZh/+bKmBrPBV4DMYZj9nwns1U5RMdGqSv4fYy4AeVJqbXMN13W/+tYb1qNVQr+nzIuZOkzgGG+PzOWKBZqiCJYLlSgyhnSZw7OsTozqnbn3DfOQGt+jq33utEB2LeAlCaExJ3zFOLfBxSTa73jQ09On73GENS6Rp2oNOquo/Rd5fb7AGgCS+Rcm6eR2WLuhsQ2hZ5vPFxAw3EcP/47Algd1fwIMPTrn+zLAn3DCBrSAzxyMhVoPF0flmG568iB+Ucx2B4FoYLZ3aQrhqy/PGn8HBcCpg5XqqQxAjKS9fhEn7ACfNdJZd/tTAnPhA+Rk9f78rWv5Am5RQCrE8iBM34ehAEtgZ5oa27P4K2DgNAKW9OYEGAhUV3K93VBcJ0wIi1KA3d7SmKIHokvM82vsqzCicQt4tCi3JAgG0cL1sRA8/fUfo3Dh5Lm2fk69uWZk48OX/YY/7y6O0v2OPwmWS0d6AxgYWmshIY78+gosAYIo4ji7H5BTlA6tvzLgeHgMZ2IyI/gaBoTB3r84IDWQn+QVevtqoLh80bZl96+ZhQzfGKY4mzeidMxBNNPxOfAgjG5rbaDsPIfNj7YZXFYFrFw6WxaDSCwNi+556JoilvkSliS/VSHCkejdNoY56hAgSP687F5ZZD1wtDwWH2yQrPJhJFhBcOzwY0V+PSD7b794adMf1ZviiJ+fSvxNlT/EGiezOaJE+rVT7/YtWLLa5jSjch/IPKd6pjz94tD92F9fq6mszDL4H7M6mah6zJrFf4go8WjUIqusbdf+uVv6JICmeqKbGL6I1wRFYoaNvVawRgc4QDRJ9ZQGcLtiAGyJ22lTJmbbg6OL5D4mdGoaDqxa8vXuVt1JhsU9IQIRJpqe5YAsp4YdgF9p0BSbx5XLO16jm1K9S1xYrV6lKc01d7x4D7lyjqmwd9SjO502u44hejL1D4y+ufx8DuTW59r98x6P4kjM82sgxQ4mbr/1TFnauQI4qQ/TBb/yobMJOUPxIQju6DmAFNFgyffDW6tq9ISNjSfOvUhI+IDHTAKOeQH3NIxn32B18g1Lf33ITSFDt858+ajZ3vUpPsfXtO8WBZeCBCoeldOrf1WwQERlt0NGw71YiyWcA6cocPYFO7uwjlRatXVKin7IEWS7FVtob3CCLbi9W+qj+wN/rbzWxHtB/JAKaiCYc7E+6ZNf8ysgr3/hn2NM3VVi+wp3Jj1x+1lrIm1GOGRAFphxQMWBeMIkMytjuWYpcCRFLQ38RHTjPhSm9bW8fFeyViikmWTFZtMk+hmzqhXZCZea4/vP9ptqZSm3TajGm4a39yOMHTehPQGBhal+O9Zz0v4lOD7mhpnyx8IroS5OhbXiBhfhbi1cAQSzue8pNR3VRAWcBVAb4N7W8uEmz3mVof2DAHNU0bqrk2/LcQW7srjw1+ep6WON+dFz/tcJEBN5qcqTcyFWJVy+ch9LmVRXj374oyss/bH2YfcJd5t9wQxAzE/cy4r5FyLQv6ggNCabzunp6c64YDXWjjHn8gzd1uSDz8TrMWdHzOikY+h+HzmvN2wEavxdPtj28d9eSy+2efFiN7637ycmbB+jqmkCX/4pm5DhxwLx800vpPXhmqyQ0b8GrOWnMN7vlFX6iiBTbHEjyf/Md7DZPYL0OWtUonXwkrgJg/nc389zA77oaW+HkIfUvIJ1Y85f2qL3Y343uFAJm8QPmy5N7ly7XmFXI4hTS061LKMbNeD5M6mXLP8zNOwfGwaLRKMtqiri9sU9YUavysQIHZiL8br438QqoeqE52qDA15RYkbuFAS12o0l38KVR4mbf9PDf7MDmrSyTqJf5zAg4ET8ulDY2fX3oCohaa3aYGg3EPMQjnpjekut26RTHHvYeOey+0YDpU40s8I2Vd7tC2CcGM3kZ+Tf3rG677SA1alouoRhE5wfTguDTn1HSKJLoZlBOkCicj2jWHXKTSR+vc/Ue/H2/Bko2uLJQLzyPmM+B3sWv7ym49Wvcng4gsfNqN1v1SVUKjwyOO5/0apOmBRXSIge9d2l81PHDulAMAxl+JlL26xLpUPfmYGAd1H4qbgpcNG8Pa0tEwzYXEtbhC3sjVhumhtMQQ8ZVD0kmxjWd8XcTvoSMooZ1n+tE1z86mK62BUv6ZC//ZHyxfi76WKd72D9WScFdjchvorO3r8Jqv7JhA6e0K9DASZc8E6JWIXMm0/mPWbSgw83BOyemFkDppAZTamTT13w3ej5D87pYL6XMc5HMVvjmHoDTl+2iqmT1BqXY9YOtxFz0b6TRWnzP/9emEXoYUe7GGWUiv5eutFdZn6e0FwS3HSr0b/H2tAqjlxIHeThfI1ZIPFR+56Ae1rta8IiTLIbvh+V3VR4jhCp4vbeJRlUlFr/e8I0TjL0K/vQv++YcEdRXDJCiYCX9SW/07H9rDEu43WQtD6ZBkl1I40Yv0DDuV/IDt+Gpi952TeOF6dMSvrPjxE4yQ5vncUxWmq9k2hkbyZcoAF0br+0UsKaN6lEPNTP4nxZRE9VytvMezHYri8yocFkAAzpsRopQczFl9kGz+UBBjSuYWPs4ft75HxccXl5CtIBHfA1Imuoi0Be9zYgsbNFw5NDjpdV2FvzufPLcWduHxSQp+ibIAJqtBEUyYM6voHgnWASMb832Eq2Bbt9P3IG9tI9GEb5IU6fvl2s60F0oJcqs0ybLAoPUNvwrbI1lZDS6KUPW2CVLc07pH7DZvyqWHVAHg3WF5w4ILoI8wTisbqA4cB+2dw03WPyI1+97/H//Mbu+kg/VnkFqliGeMIdBvsW6Ko0T6e5Fyx0QwIHhOsUUMgw+Pdi6cw+DiaIkCJkOEoL+iLZF01TLnwIE+Kwyfe9ZLyoMla8PCwcId6KlDhwZ9u1TUIi7r1IrgNif5TeE6V3vV7OCPTz5Pydtv/8qiJp+tl4ESUcEWfeD9+1w+t9sO7n85afK3y5oz1R/E24U6ooQp2BAFRWjvhKQmZMClv0m9ozAfi5IDntA3YoHwZHVnBA+zu9zy2n6CASQeeMNXm9PhBEY9TG8Cgofww7BYNlAsyuQTIyLyfBLOFNgbWi3ekDtz+js99U6POR5XEGuZ9jZ1dHxa/F+ua+99p4TAPc1prpwMtzPFsH0e2mcpichWyGhyqQwJC13528KV91I1z8eT3QLzhj1ip9MAH79RMHoozlUcuWiAKl8pW+8WBEl1/WmkV4X4o0X7uLJiXnotaIY8RPDYJDkG6pEOIlw3HtxjV60JMGwhxGaxS6g70N1XL2q2FAzocgUYQa0yr3ykn7rOI6fmuWZgFIq5xSSpZtrNYPJoGhLFbJS/1RXzx8lgaOnpEL+l6zeLrvTieyyepjMHp9rhUYpX680uPYe2vlc6gueyOjy5Zreg95uC37q6DD5plSO3uv8a2785Oz3vFo1egWCExJounS3QVFK/CknniWwETIAETyrKL6jrQKNpdQPlHxlfFcKdI0oPLMOuLvsBYCAIlKuSGsjVBEuk8T4n3/F4F7DmMu91Qc83LVkGIyZZWHmrGIQLpZSfsHgmhiRoVZtB2eFodjACMaRIfz6zQ4HpDjwueA7KhBo4WmiZPbtfXSWs/Lo5V9LM0z21AzvUx7yHAgkeRJyxez74d8lfvPIw5C/lVthJMjJC+1ICHyDVqhicFJeue/DgMa8nAetPEc77kh7ZIGrX5NFQjamaXtqTnayq+/dwqi2JCBt7z0V/K1jBanGFW2HdbTwRYgOme09it0+1I2GBPGiZNQFK/DIY1qOIrLSWjf7cH7heLPM144/Xb0R9I6X3cXKBeazCXARCE5jH/pMpoFS/Ef0ojUlX3itmxHCdRJAKTB/JHeQE+Ixzh+6C70pL93biF6SPwbhkK1Jp7zeqNBoRVDEbFnkm1vG2p8/g+VWKD1tj/HO8t8u1GLZKsJouJmuuzO446jx1xLuEEzTgngNx2/VFe+DhXiIaIZsosJGTvp4fbX8cdEVi88H3sJONBJKdjudu5jeVKLa9/KNiw7VqAibjGKNnyjEOO89BtF7eYM5XS2VUUAx1zVpRPT1nUzxMH2LgvFnKe7gi/0Snz0XtRwO6hnGa2HNlzo6s+0SM8M0zbzEqH6Q3Q+P9510yf6zsPo13u5DB8R5SV5F+qzulxQeM143TBYwggNB9Ws4nnvRgBzvpxfr7K373jCd0hrxacU5+OUrRFjNJHpxnbp4foi4XEkiF+HBgrZajElJySB0dzqfbjN0BVHSRy6c6SyCAGmZUd2LAY7izCso0H4aElT2L6cTyq0xaeKZ+Z5Naw2frdRy6vCrmob9KTSMtte4iLEEXUYFB7j32zSLbnPxuQuYVGU2qoqalUpR78xPA6BffUVt92bk67F61fRPh/6U5AqCR7JeSWERJUmmBryfUyDGt/YErXM2uqsh6SRz/+rz3kjBiMXv2pi0RVaMlOJXKQLGNEa1/QPDDiuBpWiWrgofxMoMrI/wPWIC30ScwG4ZNjjNN/DekVvz8Ep0WDjX10ntbphFFWrHslVC0I4fAyahj1XqirRD0LITkKm7QCzTB2vt6HxNnl4fBWaQlruRkqjMj8no6/R5CdgWWFQMukhf8FzxDL5zvlHzHOLblSEN3TK3dPXEljmLkl9i8O1avpknA+1onCBtrb93CFOKEBxj1Vnh1QO+fk2vuEV2nQOXBI9Gr4fm13+tTMSxfjjWzBdIUSynzjJfQbHlYGgUsqIf29b2Xnb5mpZ9ZDbUDfhlxxdBAbRHWx/plrwmZIRMc/r096G7CZIbEmZLkK+By9EpmlsfHn0ZMxrrefPqpY21Dfh/wqbmVX93q+TGvULleaTwIT4O0oVT0Z6C23rWrdYz0sXeCsHf18Nyzm9hIcKMs/fpsbTlJ/mAa6N5COeRhZfZYLZMtb4YTpLL2QNxHK7tx0GQKnoImRG30rejUQOiwdZ/cX7XOVn0I51Nb62EaeVDuAU4egoQU+zffNnGB6WDddHB/AvOjx4BBvKKFt0M29/9bUSodvhAKxcLLGQoIxKQJpu2b9zKMtnR3oKIQ+TWkD8yUdX+JYkCOQCoH09G6fmY0vzlpdhP1ccpKCIMgWcpLRrlWwXwYvVrmYG4ybLjPHDETKehxHQTEq8LzP3RSQ3jL2U1WwFpqi35qX8o2xsujx3GwI4FQRhTGsaZXVe4HEKcIX27sc/Sd9b8FRqkxDmm7EDFk5Wj0B1HjJ8sKOo45l9p9zXOZKzpVvLxGuh3NnhlW59tp6aGwZnRcVC085KFLnCa58x8oavZgS0oy4ijDhHbD1teZ6X6lEr1juPdhe4WeQIywmReU13XZoSozBuCosyTsPpfcglHIy0nMAjlskfwTGzaESVW0HuVPG7isMlVBb5rBRtmzR0QGhEOBGY4rGgJKB8FOsdPyWHPrxTurqqLZN0cb53Q9DZp/e71JU1Cvnor4MBjBor+SXhKRsckSvAPYbl7JCjDcoKu6FOjKsAJSeBkusU1vbGhXccFtWsoeRxiw+SSKse9hZgU7NIpERa38c9AhDyeAH+CZx8hw7SvLDv5+C8ZZ9ycI4AA6hVp64d0fv82hYqwvxJkMIpoLqtlom/lCSBvl6TOilMf5sIkSnE+rV4tvaQLhp4A/XgMhPaBf6jdkgj263Dg8Mp6CgWKoy8wiQoZ69K5QqBn2PKkGO/wzeUGwJtebmmNebHN5cXIu2SiKlE3AeQUVvuc69ggUfFIvYeG+szSpWDQgSzRBSQkbZ9kjR5n1oGwUvNuy9moeg6zYd3y3VU8hsEhDqUPfufzSKy/eJPqSPR9zcI21K4/chCDCPgHBDoYetIW76prUng0bL6bU/s/FGB57/PeLYIRzpNICxoVr6/Xlj+HAP4V2gpZm6i5Z2yzKYsVOgIOtWCSlKWAXn4v+obrNb85dXi05rJjwWuDhtfe/ohl9+T48Z0f60ogZjtKYU1Qk6PYBa413ox2qTVk8Q6atgm2cC+humdCaGgbFrx5jbN1jspeNT2FbvuYh3I13u5aiXVtFtlEqH4V5BFzVHvkqayfY7L/i9lGrD3N076xRuK3d0MPAfdto22mVB4wPv5TwOXrR9pCu4Rot43SO755bDhkDyhHuG3DTwCaeV0kIbAdOYrh6li25ybNF8RVjvrmQtcDjb5wBzxv4AH+okC8uIN5lSxnrt2BZFmdBuyQRoIdYDN9jMMvKZQVe804kIag02h8yLsU+oVO0LK8Wse0zPjwY1C1DxKl5d/+a48DTbEBJXDto9eexT2KTRjkXJMRTLKaTXbJq0cLYPgq32jcTBMyNdI7QN+5OgGbDDBNN8+UcjdmGHAfkISHLDUkBn9TUdvGzjD2+zT5oX1uczCE+E0zxpPICwH2Fmfp/A3XG06I6ISsmwbS+dfA93n+kPXa1656he+TyULrzyS1Mwr22PhAEmY0nMRj1UYtCZFS7eocdkpUBXVIDCOhBXqyrgVNEkS6/1lChbyDSzmk91EOUxHbTYYE+a1UFmV1mMeGmS470n6qZ0l8hWfOxODQ1BM2bkhQ1qpPcHGNkEzLLT0MCga5jcCx06lyRULr0zj7Zbl+nVBcwxxtAJDVT/muDoRIHRGCYwBpeFTekA98OK8dlCfwqI4d1ka63ZSBiiyG3waDWpWHZprlMqeTTI8gaaU46FQQ82+l28bjB/W2TTCkSlLfjm4L2jOyL00nzEsXcchUpr6b8KEEaNJmzn8AhG8uyMyA43UbRl7JKzTWVgu6Jm/vT6/seNEy3dyFncFOjO5AJH+wC1esQI9pbKj3CIS4N8jLDAaV2cre35mnMofkkqW/cwxXE+4fO6HjgnJMSt1OJud9ANQGrcLtviAAyo2v8kyQOCcDrqiDv9ceOtMowjjEJLEZgNpQlKGQUC0eBvdl+OwVQ6zwGdobAqEYpY/GtC13aI014uIKmLIwpeZZ1miG01uDPG/Pkwv78EXJcfBIjvovvudGE77usXx8xA4je7KDUxyiVbH9Zz3R54zU72G7GX+WgWgF9oOUFKoxDonhBE/TPLHbdG75c2HCB5T98WyuscpsguH0Mcvx9v1IgOaJKNkLvu2F3hUQojetLSYW/VdBi3L2rf9Tbr2uB2HGZJYDfs8GtYPkXOf3OtqYSbSeTXPN0Xt8a/rdTADQ2D9SgzoRxxn7PZYqQuKq7P3T0Sn8OILHCOSTYWkS472WW7Gr1obGhzJukc6jFfk/QgIYBbbFGRivDowOE90vPL1ybNTcF8dz65cgIeMSunICb5Y/CCtC1Z6l3eCdh0OE4IYpG4JKxhnxoJC3ginbF2F2cGDWE5+C3vZ1RK+qQv15b/jIFmS4n98ESfGMmsu7WGIXsRHQb7kHYZ9chSLV9umvBP8ph67Cgg9QLj7fjPLlis4N9MEexPk62F1vZrIfYZdUe/fNqSLayodlnjUwB1eYKm43HZtsgBsFb8uHuLbUtcYUxaVe3jM54wMCAo/f+9ryCJP/vg2f5Do9nDJT2CGnDK373p/FD2JWN966NVq/91X4eE4U17G9tTr9PxxGJGrBPAt9qQCDwpLmjCkdNTiG10scWmzZ1TSgsUF2Er0c9LV7wQ8n+xUSem7PU06yBDfhuqHgeeVgxX9U78uxII7WB0nv+0hIxRkVrCsiPvRcz2RVkxcckjf8ReflJm4ct8NxNfoHhW8tlFhD8enyvhF6K/HyDujEhJv6cgsIsDsXzXymY4a834Qe3iDYBmrMMCBefXUTJo72iQPDAdjf5LUSGFyP0hdDg3v+gEBBaW1V+/hsye4+khwJmk9upB4hBfgpmJNTDe+iCALOGThQPSBtmJjfK5SnBVGHLAgPsuLHMY1/sLAsyaINoRli3nsOk7E2MlVgRi8H5zHIRH3oCKG5HwE1zVWo83aXu983i6DuWLLnSt8dSplweH5D+/hle+LPGd4XqEgH3vxgE22rct72Ssa8iveHsi2ZEu024GrLttuyBFlX/izLHCJE4YiGSNwIHw/Nl7188af2pyTDVXw8S7WiiH7G7euZOHtGLZHRF7bK8Eiv5nQnIZ/M8w7YaKrHKlsPBwm7vuyh1lNrwYeHtvJN8lSyljUsz6ZU1wFmADb5FUvPvCC9AvlCvGBBR1duN/kUf0erG64tsQf8mtxwx7qk0tKRQ7uNtGORU0sXNMc4AbuNMWQRasgdnUAgcKLpyC6gFboq0byH/f0LItRWN5LqBLg3zvtgxfckvS55wfT6uWcoS77iVCln5eDNk2If0nXecRqI6xcJCqEqxq4bTmD5pil+jUXqzO+Iy4dUOqrROmcRAk1LPsK6VAkf+0QaGFK+nZA0ym+amAkis8xN8GioNWXN7YWo6fNXYtoGvTsofCVCMBxzRv49cuFUFLwO3kI1A/9ldK+IFogp2G+KS01K0uebcFWUuDM+jqt/WuiTHfdMJt3X8zVAZsLYeEwjvHoC75X5i7Cnv0swf3s+YTozGku6bRpilXNfb85vFRVqm68sVIjzMHlXV5CMkw4j18LQwya1SvyMogR9ehuJ0+ifrI2iMFYd9YranEJ3/P5LRqDuLbEcs1IhLrbpltfarKeIDlUhkIx+tg7Uaqt/+kH2QTueor7rp+Lmme8T2MWcpwzoxyXEcj3bzxut87rtsjBRg3+drsM2ZdwZIIsU8R8kYPunIfD7hyL7K0FxcEryLXc3vUyMXwi1L/o+W4O4IN5xKgG6LTOIWWrIoM36GI9TnknOGJDaCEpdG7Vmq6OevnY2S5ByDX+MlKC8qf5js9QQ0qx0MmP64DhluL2ob8G1CyzLmHprIcf/fQoSzGkDop60jTB4rBRVOTOJ+f00un1lbCSZzdajBgQYMxJwJSKTxsMavDWuBleQBbiGOrVuc13iwfOtgnkthDXJKlQXpNGlGySgQrH52y/RKSIz25q4ujX0M4dHwgw4V/hlfHylzLuFHcrKnPO7Kz5ICje8ZSzxP/cio8WSC9Sprd9Z0E8ahaSY6vfRK2pTIjnnKIcXDoBgbNcx5sBEyXEh6SqgwrR4YFMtvPKXrfKy/cr1qGmMMiMA4k6IgwHLckcVs68n7dfBmqe9p1rnpDVXxmakC1XWSO/Q9P0QBoSd73w+q+slKV5dTefn0idLCMY5FXo4P4KN3CaViGlRjo8amuJrzu4DCASkhaKKRRhKAwGrDxCnKQVjqb2n8C98U37OaCxVxsHp31AdmXvCFPfmgUJGuha+SyDN175lIDMhN0/41BifmW14EY1WH+RYYU9+BPoXe5SSDAxUnDH+t193RZJbhW0emfPgvyYJUifNEgq/gxl9kt1aqbBe/t1D1Uuo2PMBo7eoMPaCShHirW0OipQRrzsnST3PjZvbbiMbsf7iURnSi4MuZIuZWFSFRzeHUHMCtB6bHGFPPUIjJHHfhim5s38BmrFkG3Tl7gZDlCFNgqKI1jr5sGZYK/SlLEqEJCBLisrgj/6vU38yiNUUEyidvD9Eb4tAfaYedvssDn5elGKYiAZOzRjyHF9acLvsHa+owIrXF1cCqM7YO7Av76l9q5c6fRYab01X6nOvfboBqgM4mcN/N3ta78R7vzq4gSH56fQpROCOdFx1CsnqmZJ6Bxw8dcIa8J1/8aWA0xHQOnlpl0DkIPFdD1ozzjRldkyBm3ZXcDb940LV7/R1rGhYhWf8Ovkv49GySjOSlnJcL8YSbSJ7j5/8+ILyfuMCBtmTZzQj0wQL1wp9jjqRuaTr9vr+EJ7KLMvrHTCFQFnxw0rVhfWN5ijyspy40HIImLpS60qiLuJFXF0dSazxf6UAnMwL3hCFPT5Ce2ak9ArQVm8snlcTafsb6EuGgRvrL8mtSLET83o/xJXL0IWYgPFrkwhMELDf4MzDLz4POdCfTPnN3WtBvDLWblsDP0GIe3KPFMiLsMZ8C1wkL0cu1DKxzFF15lfNRyaKiS9DcDMqt9026wjcKYSFOOxz3XJuK8X/LV7f9E1TT2o+KV7pbhvBvAPibN1GdBbINJf+9rjpXqZgL26RuujzYq/UhaukUReL31cDUw6UmyuBL9UsIBEHmk2iQjwY/Pj4WL82UOeycOYWlPvVIJaZkl62E+JrQuIFG3p28Uqc6NIdqI+6/Uzek0gWmHfGi+YS58PrpxzQeXcCKPQxBIgX/UUpvYAwgp78TWVIJ/fRfZJmt3uBqYKvvefdxO4wPlql3txa8ZfcO/OcL8ZzE8Rv4w5FuXfqJd6/WDjGeaovEdJAWjbfBrGDlwzL8fdm7exIyYbR1gFMActEOlLD5/yIZsM2p5TbEibkUAv+225Tqlh+ZvqBE4/5wY7bwNARONrOcomcyi0DCF0sT0ygYqj24iPon2q8cdZJNGXv2pDmn6YUdMHK1F6vXK2OSv7MCA0eqgdGmMxcLSRMLHlLjMoOR0Mimp5UrlOYRGgxbOYynnJr7YO7xFMoWp+wjy1/Bj64jOCWvUsADaxH5fp6NP1yePzIc735zO9IzwZqrwpAlY83f425hUgpIJGZX949joSDT8qbqNju+XxGaDvbPdv0n1t6gmF96dNo3BmHnvbPJTNFRijZbSrb6xnufpz0NvB8IWDvgPIGHtNkvZxlqNWfayVcaY5HFvwdJUsFHqEjeh+l9WlKIhENUUdXXr+wFcqbd8MpRCVtdlL2oOwgyKWZDJNcd5zaWxSAEUS59XFmen22iJYSDEgvCSPuz8X2DiiLCU50Q2ugzSqPgYZwK3aL3YvHbgDbVy6oKaEskwKkE0O5wvp9xrJ1vrmqNLuEHk7OL271bmQGN8Nx9L/F947UQUPrLUJ3/5YRvC013JU9NItKK9+1Gwj/aqzK7HO1aYnB8R+mxKMkdKmc569YbyKhJ5+Xk/FhkW2Sajv59ffB5DZ2Jyol5CfSfeZ1Sr8RbipTJZeDbFNhr0dRMXVSpGOGBqxRQnJ1kVRETGIzQrmkLJbR+fHGOyEojr8e+U0C3+WtUoEfNr2ox0/AtD33amojerjCHptDyUVJkOxZ3fBKLWzljei120hDVySc/uLT32zW4uglE9PqpGyeNSPoJqza4ZmQORWF5rWFsGPwgnkzQfCu32ObeMaX75J7+aQwcH5yKHYSWX4WddwMndTHD2hUWJHYvZuipWV6rLnM7ywJslHwVUEklkgIage10kdaotte0cXoUtXbAOYQE3GVPNELrhwcs0RiioyQeGpyPfJGy2gWZFV7NI8tDFihj0QrlepUi5e/NIs1ntKpb0B3OgccnJ8DgGpPEe1x1ysvtBmOzdboksIz9mzMwPshds006gsF74PBbO3HC6BjyO+e8lzawDnmNqJDuzy5AtCA0VuiinsCGFPAN/t4GOElwldkSNA46hQ7pwbrEVkQL5vhZzVfrurYX2hPOgccVs+3rH4unaOijF7aBLg2b1HKdATUBYNfphO2k2VvX++g+f0CHKoQAHSnn9Vt9P30xiu+gqd4Iwt3yHQl5vfbOKHIHIux1n5PAGA3J0pCBGqR185whx5JDpbYwdtzJasabtRucNpqfPzAy7WXIbY1Zyc/UUSS5TEQKj3TsrjDGEHWan1oRrr2iitNFfVZjnIeYtfLL7j+MvjERh7no80xhh+7EnfAS+4gMPUgMv4bblC+9j/GJhI5UwqMiHBw5HvFUefQ8rkuhL5avCYQB8GRgmNtU0K8Lby2bVzXrltDCROdMaNxnuBSCraI9p8NgfcN99A+SXTCZx6ZZN2DAQ1MVFdOLQVUyUfBABaU0M8ZH5Eh4W9ev1Oc0c0ijbXjGU6Hc3UscxPdU850Y8EzMmkehg/BTSL3hZVX9OEw255cNDCcb73YmRGRj/Gt2VVXbNi45qXBx1Vz3gFrczkd33NKpBsa6/jMyvQn+HBsW2C8qylJARUhrPrD19ZsD8owgoo35ufJK4gtpQmBGbPkNW2R9xaBhqriLauR1nBdeAU3qMsl+GIqerz/ab3i6osE71QkMOeOiH0DhjNFkrO/Q2C0DWe1ky1vLiPN2gLlhv5HkT89zkk4/2LEZOnAkZvJg/4RDqducMxVASKfEQKHqVtlpOAuetoasVhU3w6W0GW25tyMsB9bYReP1JlSfhavYKgKOJLzybFYL5sw5HnnrRB0i8kEieV3tBhf8fBVGPF5OPXMHLWgQfr1xFNaf0+pmcsziuP9A+Pfbk4kq/3JYmsrn7bYbTjgwmslw3T9Fwb+NeIXw7nYHlZFbmEmSuiTD+dWo4nIBCdDkIIXhifWmuEuIekni+z0b1vJkxOdNVfFOkzWkOsb+OxZd45iO1pl6vSx9T21ifj/MzRTZ7j58LHsbQ0RX1zC7o8V5JOQHRGYJwL0YZercnN8y3ejyUuwu5eq6T+XeuJxcf+9hzx3K4Vrni1Bcn19at3Ad57CH4yxdvXn5urHllHW0ndUsQ4SogFv3z1g6zL1hiGgfS1vrz23tQoWkrCkPdkYLc0j/qeg9NtZPIh9uJRpBt/sZ2EcC4l+cKNrtSsvX5NIf1fXreAh+YMUk5ELkyO5DaW8AFr/EXRFJYDC68TU9q5ZhA4D/cesXpAHntbT07S4Sj1M4E3yI8XGTJuhEk3vzsgvIknP+8xgdkx2DIXQjp0hoFghykcT7n15lRpsle7Bxn/YR6VEbsbWt6Trv1aYBk+39jlYrsrrzH+DCfuIt1jq9TSwTPf4uGCm900anD1Qw5jczicHG7c0EeXakO/+WbvSMyHyTVnt6Rs1PuW2ZysEPSQ1uITltYMtsnokUoYNFNo4Okdf6ow378fuWDMYKOZwClVwuL5bNEXjx2ICLJHyaLkVmXpcDnXX3VWLcj6jKVJ132XGad9h4HSYlPUg3c6/rQIDYyLC0W3iPPJypxPPd2v3JHlX/R9ahreiR6OyTelrE6p7YHsu1fxUuV2jjQac51f1RkFjfHW14QrC9VBv3EJ/nll2HBJdYVIrSK5oD/UdBmBU6MwilDh2wbPZfwuV5PxjwQS1S+QIK9fDU1qJfiOY/BNszRMrWsh8S2C+c1EutSt1WSvj53axFbROROq2UP46A0XEC5CUj9UQHlEQdtXliy3fH7K0s2Rj89mF4nR+MMUh69foYUbs5hdr5g9f7G7Luo7R+QPdItY/HnOSbxgSMZDr3CVwoGEusOSp6XXL7/dY/o3HcLQ8zIOXRqfpTmg/JwKc7Pt/aTzDe8wVChrIVLouK5lOyQ4W45TZ1MJnd28QxyzeeKRyfGA0x7tHSr0ZQCgnuoCMnx/hZrj5AuIzp0Z14fEiXLMrRtZTAHdqmtcFn16T9oA9A7IIM2PKfFV0qh75G5c2nMH7MVNoDWeSH1cR2getnd933NJBv6ZZMWbuiDbaa3k5fztSwWkBvuIUsgQ6WaHO7GnfpPucrslsu2EWy/QAVMyTx6dhzyhNB9eVng/ZpTeD2PskBT1euYoMtHhWhIlW8pcyLZl/dMRQjEgXy+vy6AjVONaBMsnPCIAu7kwlc3g1JWVnV5LllJnbExLt0mizQc0cg0HU83Fxu/Pyn/s45LSG6vZJgd8XN/RyWiGNaxGRxE2GxshwJrqXVO21nKJStZ20l2/1lvHqP7oAy2Oc2UySPKE7q9neHG/55NWYSW8o3JpaDVqQOFgXygxhO1+Zf3idYnVH9Sv4HEnMupXSfIb3WGmA+lFwPx8ej49PaaI4LAsGCcMnbB/ZXIr+8eqs9M1pQvbR+fFpJLqvgUM9QRU/uTa1dYG01bCq0wO7AV6kLOk/ZgE4GVj2W/ZWsh8TY0zvlQuk6qOig7qw2l4bva204DNgWj1X4MAHVz1s3oA8bGR3Ad0XRT89ApcueteVJFlgLpLtIXj7OvR5iNq7TpSs/FXNUrGeSNS068Iv8XgU+vzE4LEG6I4E/WxE+UJjdMi4T3jojwXyq/BYpGjDTuxjWmqOSejaxXgW6P6/csbU4EYjO5ZNcoN/gFJN2FiZBK97QCqqztGvgnwMSwOad4u/hIKTgUR52R54AfbtNnPfQUhCphsB0lkwCCORwa/O7YKnnsj8tgwLbE5B4sr3obtvmFpN76WywYvUstPkAHNMo9RfX/Jon55nJp9UiyrCEg1MkbF3nH1yWkj2QVxkfdZvVAHOuyq302NxTUwyF7+MBpdjbNNQCSxuwwnGGLEikPkloXRf7QTjFEGDQ3Z0b56ovGnX9xT4YIgUBbI9buI3c7V+XlK56XqEZeQ0tm7P/bJHE3CgIcNYYkRh+926TvxX5qua1tWYwf+Ejk8knMOA7yR0wBDDl9/6X18H+xlH3sxdKMuVUlqad4ER9KodtLb3TmaO6+l3z03A/Gzf0taOwtNipAh/7HvhhCcsPDRleSfT6nf+zMN2kuKpXmcQ4bhX3VOzFRMIzx0DkJq+thtpKU1wRlZSLSzkOg5tlRlVseUeVAzxz57U0ud5blVgtySvZEAfuTxL54qgsgevV/Z98+4Ymrp4/brEegXnmIOSa/oNhgqkhHuZRPdqi8/BFhBV+wT4U/pANnHXAlof3ftKASPIX1Xb6rK4+7Omdk+HOw5psz3VfyguyeLKiLbIKJOKPVVlILrI3E+EAwpOUVMyXR8jJlkCArTqNCTIXQ29Oun1Y9j2iiLlPqX+C2vPP6QeQT+gphMEyR5dRkxKy73P5Rrz4v0z1zXi2SNkCiOgjImGvpWGDiLrFq6VlW0vgTWqlsomRXpwaZYK0P+XU9Q0VSiisO/p44RZF+42r+IZ0Xvp/CJohiJEb2uHZc9KRiy41ULMq2dmfkIOwWk1JAY9frfz5q/LAFVz1KbYdvtDe3hKvE5/rreFuyPIvGO+UTsd0heLNXoLd+fP/l7uiuKZMS7XES0lSDQlV81Eg2XEjD3YLW6VjaPzuwenaD1Fcsgv6T81Pn3qSadBH7AJsC1VHG+0CIcYVqK4s8rX0Z1dv9m1uViyfeQ4M27XJPjuBB64YegfOqzuDbrGP5ldh98RZKfLSxU0939LXX2yEouj1DsapTyVpAV+tm2bzMzWox+C9j+qvuhX/yhKHQACO14uck1rUfl3DLq3JntgHX/qXuzE7AKUDwZN2mUBjyxyZ+Vo4xpD9bL2sQoqFiGcSRk1wPm70qs5pV/I0AcHvQb77iLXu/ANKf7gMbXCOMG+ii17K4HYl63myixVJ+7Ms+tpkhs5Jk2t7PNDWjDSK77Pn8Y6Vt30SkVB5bh4qtUOoWDZGLntvs6+ld6UOvNjw9wABgzNGij6i6CyMeYaDDbf5PPuRXI6iuAPVEuYeuomxcKmtZ7UpTuq+M9uUujGt2y0CLN35MxjLdh6YHDLLZw13zOv+anal7cD1ue8faPrEoEwv+GykOm+OqWtizSo8I+lU1eJdn91u2D0cqZlFaqypThjvSlMXyy2RPwVPEvJgp7MQhTVJe+kg7yJXT7hsqtfBwH4na26PLDARmgXKSjkxD2/2oXCI9Vhb7IIvycHayUDoeiutTUH0IG2f7g0auHWRlWeKkwlTAfEJRE+Gbn6EpWPuZfQ39O9Sn9r8iQ6FaxmypXmrSd/+Ma5tFKCSKaLNdxfxPegd+h1yv8pERdc+BxZJAdUMR7ltcXPv0q4l+/Ic31+bvcmuvunBBcOreBvaVBfJZM0CoFwpwNjExj5rVYFaAmZvH1yw6OU0yQCESsVV6a2wkILUpCKXyWMqi81tr8zi4Ei5wLsYrY4gGcfEB8docODHumZmxrudwL+sC+cIzGa9n0tPfXxY7FGp0+KuXcHns7egdIlheXc+/4uyDn/VKn9Q8a4jSfkhEZyOlXe0j+JwexM0q6J8gGewUbf7dTu7xjJMkI/xyk/cRHkCRnYQpJalwFG5J3013DmoBAu1hW/3W4OwN13vDLoCOn5P46aDIqbMbY+RQ58qehbopg3SRx1n3ivE4F9Rf0zL0qb7UjLncd8a9V2zgGItq9hCGJCv/JKLNEpPuTNfu3OqASVGiy3UUdLi67fO8/2LOjaOP/d6lUMFHs4Eo9bmx5M2da/QvMux9D7gx33Wi5eixVmbBcLxHUVvgSUQ0O5NUCVq/wjcW2FNgdZigIBbckl/opjC5pKObmkUlWly2I7h+DgeWLBj5NXQjD6XH7JZk5XnTQadpnY0WX7lPsFbCK/dd8q5VAgFn0URbOBn+k+dANqwr37E8Fknuk/jQvt/CD1aBgvI4lmCTpMwVcb42HdM6Jdc+XBuRExO3qkc9dJ9PfuBBUDCfpFQdTe3c2b2TjdCeKG3w4NUmOo7ut+gevPc8QLnPVcRujk3jxiI9/zgz9tfr5GjuwD/WvL4D8CNf6Qe0la0AEpLzowkDjeFcbAl8gOQgr9PlWxfPcrIkRO33E27KyIRMMhcNR56Z5ajuP+I28B4YmTUwE32Q1MRStU2lYZBBdmtOPT4HQejKwCygWEvNVcdSHh8f54D21ACD46gSwXan7zQOroJm/AZslgjl+SBPWAo4N/UUWs4yRggYS+O9iq3/nZFt0Lxg09TKBBhob7yiZdb6+cP8XgKc+Hy7Z0uPxg9Y4C9AFVATFLGI5vhLQrzsjvrbSQkm6+TsYMHt9lKHHtB8PRUN0cr6+uKQ1V8beXrVoyUMr/OgtllT9d5omkJa8uneQOz98KkwsKyOjW5cQyc3cIvzSqgrJZaSRPekAsbmx3PnijfTv/CSgwn2AG6qxvA9M+6O/7gUXeg7BdN+TUpxrr35jAGtwK0jM0txje2p2AEfw13oieOz0dlpAY0C2/avJNI4vHXzZEJJZ6+HUVfwJLU9lhPF3Vh40r7jK3zpSQaWeMK5aJskD2rhvujVB2B7VKqu4EU3O0bLYybj25qxIwMMQpSMILy+TE8j6C9UXn9MfYUFahl1IAmECYLY0QgLf9Or7eF7zBinKUqG1G1Jo3o12LzPdwZc2ODcckQlekyG4BFE/H9ZnShvVF+LmZ3rUqVokcBt9LDEJ6o8ta8+8BZpwhyN6Pm05QhjaYsOMjUEYv/59hde/dnJHbpY6sscg9wLV8rqxAcBI3W9A/Q3HCFDwV+4DGyMwnN/W0ApJWSd5sGVbS8YXLlq3INSwBfb5/jHi1c3gQws7o53zN7m4wn5m4tnU0rV66F9GXnMgnP1yLb+kKn36VCSrv+vKOBWxbSaYO2t4iRGgT9neOD/zu0c/C293gxE8VjeOg/6G3aTURpSIrun1OusD4OJWMZTJLP8bFgOYt+zTdvdAhHGcpX12blsxyUu3xygx/Y8df7yjisC6uy96Zr8xGVtM2zGq+5qwEa8HKmXYUxDIa7P5ENrhoEwTi9p0A7DzdXBSO9GQUjSQjNljb16eH1EVOCCSH0Kai/9rKQFVR/1Z7CyDUTh52QF5kZYJSg7BzUAxEEO33d4vXov4/hSsyBiuoFM9oL4R1BFy95enHuVEK8ioH1eAC+dmf6gIV0NuR46Pr/BI8gI4M6lmFrvFpkByaXoCLBYo5Lo1Oo8qsigjlQXI+aANd71UjkJzLh+ACgL77GSIU5adw2GZgTuk0NCZMVaE+zva7B8Y2AzCubrFi4cW41UORIaAH6xSm/DuhKLLfggag/v85jf2juX3jQx4CILcGus70KptVZI9U2bht4164CudStzlIIIqC9JyDpQzC3f6ccga1kdZDvlyr9s85DhnYvBQfSB/GzBS1rJXs4yzIpF8Jt8RdCiEI+ZnRY+zThdU6IWWbPaR1iziy8MSkHLmDcCrSdlPyrz+f49pb4xlWWp+prLQVU/gBiuKpe5aROeiJDcKXBjyH4/iU3NH1Vw07qolmTBxGRX0fz9lpdL2OftkMtltMWrKI9iO/a8CdiesaaLZoMWX70uunggQOj5eIiBAhxcxGYGJ0b/z3icSw8/sbxAOGEiNshMHL9Cblo/BvDLxNhRyik9hYGL3Y7v20Sl/jgf6izbxJF7Kif8qJSbQIJ/EbmGDWFMvr9LpAckABThUAp2vd4RH1klYt6j4gESawPVn6/Pzctd/SKwg7MiiJ2N33kdVOkhiRgAAO0NNag8Qfeb5nGWIzYPvFA7cqQiosRGXHhxtiO0j/OhihVYd4aK4+UEGnjsia7XXxgqIv4rZv8ZwNRSc/a3N2t7H0VjCf/jLMtVH/+0vk3R7pVa3Njb6mWj3DMFg2ohpXSMmHsuoCQsJU2nN74tKaIWCA57DmfKZUtVhbl6OpVMQ/BdcE6Zs1lF42t/VvvuSJNRux9BiOTRmy2c1FrGrobCKQegk2+b1MAVADrYN4IxvhEvisHKpBDod5mIHNQ7sAcvBRKKskD2m9mc8uu/1HZF/7utVEGnGDv4kfVNwSJmCHDnPiFgnAPECkFfMhluyKGvWlh2KZPkqub/BDHv87XP9b7QYNS1EXR58zcgvb0KD2qlV/7P9dVLBwcnLmEQXUYKAIVqk5fivpY9PByfpUfWnJpiP6zgEyXt5fnufxDzdARtkx4I66m9qFvQNq9CuNY0vUMKwpvIeSZL8k/wgBkwoExwaKJB5v9rmS+u4+x9wVD/iqlmC8UK1iGy+8HqzbxgkPnPS7q4V5s1vxxWvEIKZ9RoEUB8EbHijMdoqj0n31m6qGRvyM5RM+g5dKft4ehwICwe/RIQEagGH+AvWDwUQDiYRvWfKztOIKtyWQFEEkna5LyA2hwJyJApQan6XCrWkVBVjtJvBBRj+uRGxX2SkVWOltbzCDqcw3JgzTfmfmpV7Ws8OglSIpss5+ZX+moU8cetGc9WTnOR+C2GibwT0bRI3sIQ9z9Y7/iqYHq4Ph5iO0DaXZLWgdQout/NObPX74rG2MDmc7EVHARpRuFYUDI+FeCrbS2sFRazMtasMTVFlM2SORYt2uWEKT5O9qC2pCfhX/CF2d65Im2+J85FbbcrXYLz+5mDmzHc/SgMkZC0CkNf79Uk/WzQlb2bT2p82Sv5Su0pNvxS1K6cl2iU9p0UtjYCriKCwHppckIAwq99vPpavgRRAoJh8/Jl1ttH8+JD0iIQkbr7uI5hvophlD/Ilyfr9h5PGKwXstMTk0IMTWdf7VZ8JbqX7R5PNgOfsd+MO6PKej46aui7GmexsEI4iHrgupTZtFsEtTtU3tMSltTixL8khfEOJLdk+hPTA+9BTluTi2FbHQcgqWg60sbao43BWNAJutG3krUgeWd8RyfuBwszMJD+vvrfCcBnImu24bgiLMJXiZ9tgTL4hSmMVTEuua4BO7n2jFUlmmwQOF9dtktoAB/4bM/t1FEaTzpKr47zcY7JbDhBP644Fw3UAnyzFO2F5XS/mgHlaokLG+o+CuWnmoF+Pz+KPdpO2MborTAgjq9ADj7H6kxNU8hqAu8qyrrjg54JOAcFzd5TcDc+Kqh5WrLXhHWaTIKRL3pcQzOO2pOCL1ROF0Ayhl2eusuv0LTfnT7lvNHIP/NRJKHHM1HFMxJT+htIkcVphzxwbnqvpl0OCHy67FNBmOkFhfvmhwJOAcpSgtxDO08aoo+m0rYozRuxqGmWQvaHaUV0RGQTLgr8y1Lm67CwBn1EEey2EU7T3xypOMr2QP+uIDHshMf8VajQTQHh72qXuuAf7eRDHy9B4sm16FkE6+y97dCuc6YfWwD2D41azOd38q1GlOflrHmLPP9DTTKbGh2r65JXAIOnna+L96rrSDoDZCuQQJ58bsCIavCb9ORq5c1b9U7yvdTwHmaLxbov1C6YbN2mM/1TrBYpXAVerKWBKJrhkQWQHrJ79ea3kCHQbz17kTn41tJIS+dxlGCOnCMWU9np5fSftZdHEaQB9OuPjyp+mEGfzIz/batO/n/76MTqIRMWntARnQlCPx0a8ZH121/hAO+3IE7Xd93D8TJLgqEPBAt5cs/Kz2t53NreyQryf/11Qr+76L1hXNgN9Y1F0s6MReo4JPtQh+lFeYcniBA+k3a1vmL9/nX0/WT+SjuiXkmoEmAH2SS0MUWRrHUVVOM6L+uuIeqKoIgGWxw3/1a/xq2J3OTTftnGSIhicwIOv/sNMGseTa7tx6NXKIChxs6Au6Wu0fTpPL5sDXVM+MlaGMnM6JrAd6983NgfbHgf7FeW5K0AExFs2+MlkvkgSG6GkJn5D3rgwMq7EXyAHsLK/cY80JZNTp3guq3uk1W1/z2PJMejV3KmtZ+WpYYDC50OdXTji5Z9PRKAvWBpqlf8SxCj9GHdiSC9LC47lQ5BlnHPdocok/q+hPeuEI00djK1/qbOO6bJrPvmHytcxIsrPvUMmaxXPVHZURvEDTYgIPN59dQifUDW+jdK950MQ2tkR7BJn19+gMK8fhdUjUHyQV9AcxEYrowA3cV7rMYGi1qbC8AU1fWntfpATKf1Fi5YI+rvuA5wzluvnFMW8/imVp0Bm9OIjh12Mcj+pMFBcbuv/Qqq0X3x1St7BrhFsIhtO/4njsxaYwPfyH4CYVzwH9q6pymMgKOC8ihhXv9qTlrZQ0RltDu8izB6OPyfRPC21pBx2+lPdNLv4qqr+1SjztV/kZtEfHQQNQDR0YDtotlz4anUG3ivksWyJztuzY1WLBYexBSDNjuWB/Q2fzlqnS6Ou6V0lsnI7vmFyLQ/2BytIVYnL2OUpZnqlWa/h3z04nV0K3oTHrFMlvUjQ9SrhWII2zZu0nj7o8i8UxIamQx7HUhb2342FI0NEZ406dXY5EjPStLQfXrBPaD0WyihGdI4nr79f9wVA/2CK5VkH9nwYB41ge776qtJrPyMJWC2fFRy7ggyybdWdX7R4oUf0P8VZU8I8ZRbWflqMXb+V2l+s6f3ejwnjM2/lQh9qq9kvhS48qjB0d4TX1yeHsNHN62af4bfiYHIaVroRrA7DGS+vXqNMhaWCMyxjtyUhjf/aWS+7e4tNBtgxqj1X5MUes9IdigsesTuRX3xA5OIkaxCfY62sQGQbB99ofNg0+9r3U4ezA3DybypuxQQdYyIMRiVZaeLz7nqaMbatlUxS5LLO2EE2nq57vUGgXzCbTcfOzK/eiGOVUGruXpB/7czEv5pC4IXdniyu0ON4AzeS2BCjcSBeq9LLiVAPPshpFIvCI2sMCpVzzz9Qis/IG25yPTB/K56vH86JFMqvOGPAtI1/zr87vcrU6tBJT9owRb9b/Kq2uIppcCXcIDTtx2WmuFMXvPf6312hA+PETSvCBackmAdn1CVEOMWTwFLvPk/aYvwruPLpvJS2YpkWIaSMz2DzD4Xw9KmBVWHv98X3fOCT/NWU6/kkY06ylNsbCbhwK25HdcQEVgolWRImK0BoDKi4xTMuJEZxVSVrOa2NGs/QPFH5HqFdYn+97/KAyZgulrOT7zP4JHnB9yW3ogP7DrSjz1kOH+8Qw6DLXvT+cMJj19ctgGsFojNDP4oojfOj50RZioO8Fhqk28q7qa6KZ8TBeQISZWxiPFC3mOoUEZY2/sU5yulujzS+CFvwt/Iy/bWsyogwIbh7gCz9fT9XxzAPZPt0YsIUAk5QajSiXme8G594bji8mwnm9f29/syohxtWFq5jN7mo3FIr6Zid6k96F1rtz/OUSh+1gHABz7xnwCFS3LRvZ0Svqex6RzLyz390R3dRjjZBtU6Wrb+8l3j33fgLkiNTb3K78aJQko/ateNtgZgfhIfB43NpF4o6bAdFuWwLajPz3+jhLjfH6Gz9+Ujm9Sdb1MPiqNgRN7k25fd1067dlWhPV+96ITNSngTSEFB/9MyK2XjgRBBYX2AYhZqhNjZ0VZ9aF7AwfNFmk1k35ofJlO7EILU6CyXVx+jM8+byGM2Khvxv8vSBsl83gDkDufqsy676i2EJwD56gneLp5qQ7bfwKDeqX+MDkRjID15CsmLuZJB9nB9VUoochPfkdEDd6bnCEx+n/V/NvvrMdob1Vp6NR/r4NKK1pemzGETE1k7htfKt8zhNpeWeTPk3TW1AlTaIz5lyTGjK1Gz2z53tC8TH2/YzvpJYaL16FLu/6/zCQ2DjS9YjwSF8z/NxYvxLEne40FDCX0zY8jGSXZ/d/bgZrQjMp/RqBSUf2kofrFWcG3nhstiajFyW2t2Hpm1HkufUJ+GFiM/h8PnRqMYJjG2AOlzxrNZ2fT2QK0rMXzvd90clRfnpuofz1xUXCnpgr1jp7efzkxMUMjsFZr/fSFn7a46hr7q6bfGdGlcDjdeTD9PINZlFnnRdLwZDlnFCf5ScDa7Gz+RXBxEyh8yO+P3pwaKnfU1dgwL3ZOVOfsrGVf2V7yCDvdf15xqMQqx/D21szePzNzUKkwAgNVsz6+NS87JR8DgWnMLOIv+RJvpzk9yebs+OwAKCW0wB3EvWAR1N0459W9z4IDs0+ijq1u29RjzmQ48unwNGweNfQB8rD0ax+tIHmy6qyHrO6QbCq7J4TTaUoX+DWA3n9L7EISPlOipQtnwH/mf5cbbnuWSAU1VjL+CC/OuIttmrRTBS2vCmKrEEseVtv+vf5kDPVAk4hkuZYgCu1Ov9cJ/TR8dnC+RuCQrXfSVU6d0Q75rX3VBKAEA+WXAidIzPdWwehDoFrCOumYiUS3vkyGBYla/8u6EjCr1nfBVnuJtNqswoMIy1HN3llNClPiBVrEJF4rB2jlken29QF/rsKN8S3Xa+mDkPrCcWFZM1tc8oxKFLe2YixqQ4DHX+BVQagoIHCNsJsGZOj2pPqZneSh5oILdZAOr4lWiL9OOXJWG/ULA/Xh8KqqCEzKayriYh8pndEOo+FsVi0hi0KosbfS3F6Q5dv9M4rtJYa1gDkRjudsMlUCmkmy49wvN4TrU/ave7dbmjipFrv57Ckx5mPjm862L/V9ZVlbnTq3rb07aVw+YB8m3LSdv2EbRgEGXIYcRn1sbxvhZq9NlWpG2n/2m+IJDC/TduFGNXi4G3GJlSb2XbwgpVRU4bRq1hJ8GQ3TFB6D1q8P5Rcb15cJDWk+d5Lcx1kooKu7OJLq0XQ24QyDldwBM5Nkjy8bzNDj3VV1IizaogH5/rQI12dcXOX7JNN8V2jlK0i/6m2uB66wvWGa9U5qB3oGdmoVQALCrYyzqk2WinAJCSqV6kaiteDkIEGPiOHFfSVM8QH1tDEa+/QHVk6mSaHOuf57STgdqYn1Lul3aXKWEbDtpQ4Ytn74rqRzX9ItYH3xhXc3d0wxbzrz6WlsLJnFbXX3x/2AFBF0EuTo8ya8NoGcNXBPF3zz2eUqoyL/buuvJDni9T7IRqprawD/x4OLS1asbt1KXS5puB8KvXwwgYIVTRJBvP65/B4dPseSGEyS5h4Djoq7I7Vd6fNAwaYW4/s9Cy+afZW7qMVvOsQ/yTCx1iBmFqL8ZfR6NUepVCVBnjL2Rxmp3BTU221RzZS1ftry7zEf5mIgExS0eY9sRysZHkDDFJMnmCIVn7kJ3l35SBn+5If4NI0leZlD5lfw49L9Gl+y2QEIOUlQHu1ANR+9eaafmiZ/NrpdwntXsbAZ44cirKtv0aiVSX1verYzapRzpsfF3qg4Ocoly7YeFuCgdOslIeNEHPyI9acOiv5Zb7ujDorx9Oax9UwRyS8ciMKUkBGwgzo9gNabAxiAHv595s4y0PL+eb4K6bkpdYRr1XFT4Z0DHvgis/UJCO7qgH/HcdzfcfDFDTZcBuYIdG44pBJMBft4H+KpUnBQ5ESoMZ5uJTtL1g+E8s/gq68gcRo4NSHRG17jmYylFTbk6qYNVsa5Ch0fdB7fLii7WtwHV6ukBRXba6yn2hv6tXsaSYyfpoZ2GLEG4QRT37vWX8GJIaYQkEv79aO4OYSX4A/Sv5RdTOMq+u+Fo7uF73JPAWmGjLkiqY/pTD1D3gMnlt1FUecAZxN7JKnRcB3UJO4BPZN5YXzEveKBsFQDZ1RLOnTvy4M/ysRO8v5FZcpHe4fig9tgWNc9VIdPWFRjcUo6Qm74XEwuJjDKyCPwgPJceLmOkUZTiOZha42YAwQcz4tdUBswkrJV4GkGILmkWGt1KaXyqkHi5iFNeOVm3pUXLGRHKs/WY3Ta9A/kaWY1KnQi/wIZw8j+x1MmnjYI2WxyZL2mrR+iOGfHOMj7F/Gs2vP0cK+5bFdpY0oVfOd8tqcb8GkfblxK5oQJviWM8kn2JUzp4mqaHEuQKvxC9P37/epDzmACdu9BVl4K+/ohfWZYRAx0hj8xw4auwsJx/c5fhXfd6bPJh8uZYqJ6Y8ldo/ZdqS5omrC4B3eajw3SAKbPmyIoGcLS67617RESS4j/t3mQc3s8dmT3MLu/tvJhyz3qVgW9nfYKiey60xArZa/uRVPeuVfzSO2M7q7z4586jhUoqL1sSDv4bTpbapg8WJRhBHPWb8j6h/Hc5dup7Wym+CihCdZD6xCo/ukOpb5UZmgwTRS0TXOZ8T+/XLQtKxhW8upYMG1M45BnONxja4TM+952HL672q43j9vWoSv7hfmubTu9t1U4YQywKf7vp7wP3Ei8bkKJj560fNvAKl5/kjj9FTKS6xnA3uibRRPlAeer3Q6dintLG6vmNFkAFfxPXn5sXQnRVSaA1vVIvvjudLstvpvD4MYbvD/FOvj0fXzIXMvvnX2TZGQyeJozTvrlPqGKNJSXC+rGIZ/6U+/pO2EGdkMtKKY9McUtT3SXejICLDdV5H7DraQ/Cgy8tVQF18XF9CQoS6rbVJOkjwWVnk/Kznym1MSNL0K93/xpOAwF0vP1jYH9fYqTDLG3RldYazOepJiXXS/ZLAhUiIQzLMhOiqtdEIdfDIqZef+XAG83d/NVPi4QYxu2NQafrVv8wRCAzfkPiX+GsFzY585oiX/FKLrjR+bcjo8FjrbX3rMATBtH63jur0g37uHxE2BRkurcWKMgdXijqnuPx7XnfBmF+APWLV1i/blvuzlJlW8lzXus0RMf5ybMArZEsid6Ht/JRAqbXl+bWmduNF6/S++7OERZssf1AVy/G4WjFxQKd4Gaexpiw75K/LyOZPdl/f9JfXs9fy/xK7VbxLKNUGy838dMUf7Or1okw+hQITk+e+I464LGS7mKb3uV6iwARsc256blN8RXy0jmW8eViU8XdbQ2LLE5gcT0a03Q+qZkcvAQFbGRsf3OANUHiIiyLDnpem7qWRel+ZycLLqw4b1ZBvCcIXcFAYW3u96nLYNkzhi5wC1INDpyXU4xHHEs7q7bykLeO7O9GrVf/6j1LMnkraUBe00kaJE+7fk9a/K8ngSbk6YjZYtu6EYpCJIrGftl27LileRfR3bX+vggVnM/Xc5prxSdRmzU9gFSN5OGk95GRpVPRxgWrrhcOwmjzYnUgZ8orYMLn+Wn2DVBPmCX+30tyArbKxlS7AoDI7p302aPumX0ad2qRPFHR04+Xtb5lcAhBIyhtj4FI2G/JZQtcjOWkaduIScBBM8h61C++YF9YbtwZOR2jlY+oNLqYzTVCZb2g/8ki7qftPiLQpNBieEjraFv2G9dyjO16J/OVn07l+YuPe+bkup0xUtyABCvNdfyzc3fthPsGoLZJG0HQ3QRkFdZuRTC/WbF93/4rwbR+oRsms2hCl6HWUo3hav1+Y/Y+3sBTiSrYWozftvziLkDBqTDXrxryWUWZM/lWrowdK5YNgbhecRWCQiH8FPZ6F1qAZICu69FmQVVBl3TjRxDfLPXx1Ot5Lk8L170NvzwTaDLzfhLGd1t3/Whpr0MSN1VKt6dQ8JtRNZm8MZo4F4l/X8PD3kTZtCn9djX7gPpVmrbvL2riuZ3vUAfj1rHW/CQY1zv2Z9NKwUmTO8hd3WDzWWkdJ4UlnAtB1n4W4a0XxUJoOwrIa/YlYoAxjz29ArCZEJgk/XA0n2kRPaioEiNRiDv+B1JtxuUCryB/oEvmyvyktzXZKjx7LOtwW4awDWUagF1ksY8VHp75G04Nk2etBdPajUqOkLctUNMFh80Mzr/KtC5+OtsqWm8SXH3+wlPrS31VE3pO9s7nz/eSPNM7duYwXQN5JcNUot4PX+QFu9gPk/Nzv9fdx5cMxWsfImbYGkKOyL6IB87TPxFcJ0HPWlX1jLRpodYPLiMlZLagbFVmfSLzT6p6v2aDz7EjSExeXkZgNzCafgDa/3QSwWsmihzzm4MV7aZ5EHAUNMODnEmPkWfkoBOGqtUVPQmHYY2xKB7dttNex3chrKG5Li0dJgc8ue/t/8BG8fBOJ5BKygn9ZYCY6mzWnHaECYS5w+z7s5TJ36q8pWc75neS+0DEywhjRagNgV7HsGB3odGvC6qFQOFtfxA51R/OWF/7q48vP3qkTvbzU6Uw4TpFmKJTYaW4C/qGXsI4/OrVwj9X7hvwLbOPcflGb+vXfDbd6f0EVp+x1vidTZfVvrX3ke5aWA5GNj+RHPVZmmuYc17ASY8f2HOG2YguS2d3PkvXvk7ax1dkOsXyiO736beD/P4CXGxS6UnM9eCnHqrTQJgDuiF8OLMTfJnQ4MHBJxPPIptx/EVBjmJlkyDsVM2HrK14ESNaoUQRd6vacXeiZ5EaqMWZX+PHVyNhOSIA6TKgjeEHXRgIi4Vbj9bX7Jfgb+fY0G/j8fQrX+Der2/5QgxnQNFYkefBxWkb0zgY0wYYF9cTs5vOaUdFk8nSr+YTLYjKH0cV0cSh+E35NIffrudHd4W5N40Q3a9syrZWZdGK7ftsYq3LIwX72tX71A1t9Cs9crNEJYnxiiXsZMj7YqNCFwGShTh2BNOmEwYcOfrlIincTVS543kz4qJkwGgEMT61UQRpaw1rmki34iDUHrODBkWW1ppO7ONUork8Ht/OMIo91tQyf9Y4Nni6WnlO3CsWK5ZZU1bGsR2kZrf45Fov5fy0b2MsuKgd6mmxvEct6/QhetswOqU5wHGwQubPmOS+m8bCb5Kv543gyfV5XCaT8Z4FnpHcqjlS6sQYpThGSZJGG2KHHW2H5660hJIn0DahQBnxnVz8z8/3FMU0eEaHVx3Me7V7/pZ7cNPsZYmOqDjAtgoJz57+e4BNyoJBpaJFZj7JO9uwVGZ9T8ybf9OSpiMjwiTbq/Vofh7qZjZMlHR0p2Wq9rBGTE4Lczg2cmHnlVLsuKyfICdWlVT58uF3jhbXQFKNbPVDUQhLeR3kszZG6GL8TdqQ1GHAoO+474+e1sreGIEc0hIuaCfKnqSkQ60qiyolhnYuOHK/Ulox6PTc2U4Q098uKsCwxgP6TN9XD/jHiTAwhPa8X5FGXmnLR0pJRlro7h0rJef9Q5wnNAPjYE6Ei63v4hu39qg/wJV94pzUpMfLPaYHrfhfL6hXPKUwUmKtzwZLs0SCgo/TcxUIkKxzhHHDSNjBDMLP+rpJLlVqHVvJUNEwsmdwwq0V6OJv3Pfoz0w9ShXHGjDg3sNHK/OJ/Wc6NdshKblSNBy2BRNuOMBb0JwQBNKZP8NZj0gPa1P3Jw7gFpmA71eiCRCjYoAN9DzNMDwFW2nIICdfAceAzt5pMH6xqQrK/TdicS2Vu/8jgTlXwq8BpMjr/rdVrVRJkW1qxD0Vvy3zBelcrqKLmanrXMk11Fasa5L9XTaOf9GvtVO4yX0iHD56clHVID9VbKycPpcxs2hyRZ22uuQJ2RpSxXUsUSf7EfarOq2qEtKeZPgSKYJXYEllHghGY768Geg2HIRX5LHWdX/sgxyf4i1z89XNXWtKCSmnDERCMkaqzQ0nMvorx+Uh9Bf7Mw+Mu4bFjgM3i69uII0yAkDiHXTkhAf0XoaWhDT9Me4otjhFlwdFCkaoDlXaDALgUETdMMvbWbWoT4TCosJd6+FaYmv3pQ31AH6cGQnC1KC70MkE1GLjwkxlv8fdb+79UuKwmbX+hgaGIcgpdLO1xJT/dbSOEIil+UlB2/e2u2wceflmPk2wuLbf2PGFqynaKf8DO5xbd3iBBzF6rCwHQZcbd7dpH/OsOtAHGxpg3aLcOzouCSEq24oo2EFC+5EhhSXDaJwtbxSAsCBYuq6X7/HRQirZ275bVKyWKQn9E6YOeG5lfdvu+eL2AzDt7pJPxG+KRVSj9/R/91/4dJQ++54+ipJm4BFrAzNp+jc5VwnFffg8VSzcCfDFNULpkXzKKcSX9vYjGT+RpboMpYvyCLtrzsD4nW8AB2ICqb464OWXIu9tFd/7N5BHLBYbZEHa5i0HshTohD0dwgfKL/Url93sZq/ZHN9FOMSj2fj6zNBEg3Dnjg0/3jcRUvT+jf/3CjeRrduQLskrmryzPJv2Zy07pbPTjZZBgh7U91WWejB1s9w673bybpj3FWeLIQX+1Xoy4hV7uKrUthaj0MoP+Uq6q2A+GedbHqu98dqwrricvfVorkUQwIJUvyuoD8Q8bu3XD0S+Woi/Iyx2lscNFc0/sq75hVVNwkA2VPX4S4CABoslomGdcUyZ/c2HPL51DGmxIicRPUv11GO+vHWgnC2JJiClYMwjH8w+P6VthD+7oU274BXEldo+skTEk5ZS4XWw5G0V0bk6ssLRQ2C9ayIop4B7FYf3NnVpNEvi38RcsSw/SEKAYiuWDkTb+BqQ1LWmapJ18WKB0QHCiao2OYUyWseOq+sp1U26J5zG8cxUqY1Rs9iQdBghWCcx4IWBCln69azaiE4ghYQPvY0tdrn+/xCunHC5DnV8RpCIw2JlpmTt1G+jwKOOp7DUdOrLi8YXnVGVxi0ctfihmBP1E8F8aNe958LVC+CsnMQwZDPJkz8qe4Ot695xjsxfdmZPJ5FW7Wvf+NIPk+FvOTuAFG5Nez8QtBpzHaZji63bom4glU3WSOiq3W3oQQCY4XiNvSGo4R7QaSjTLHKGQlsnq9y4eUuTLPiMwDMT8G68Krhw6+JBSpXf8zrVOTFqz0XnD/6Z+f4uffpUxqOkVnTHrx2FmZ+LYw903DlnNqoxocsJq/7rOApJSj9z+jA4rsS3BFt/eHa7CLP9utDCvWt9W+XGXLM2MrxvKDfU6uI+BxLa38lZkDHFJ5HfQ6H7qltyNCLXrhDX43r8sL3Xs1M1BdMWfBwwnQ4mTA9HUFcXWuME+qfYuc0lTjn6y92lrQs0ExgWoOvusT6sMzp9PMSr5hyOBL3pwnWAjCzVO5azZFhgPVWsGVWyw2Ed6nUpZb+jf/mshyxr2NIlJ0G3CBbN2NZnphGlGZ5iEU+38lFtzkWz4GHdpuWICUUlDa7VgWedn4FCWQvMdepUoR02HQr/295hdRt7nL04rT9nXSdNupthQOZfH6HqE1jXCVpbjmFdRmDU59h8yPo6AG24phhusFi/Ef5j9U/HMUzQey+DkUIiRNejXDIZsTdHnSY/WzlgBrx59EoyOh3jlNymj1D0Guhh2BoCN4vOAiUxxO0CwsP5tXCz0WuiS2dUNQTpeHou80ONBrnDnvbJcMVSBciB435Kp/qvuZPPT1Bcyiis/rdB0TPAgjIr2RaYk73QbG7qL+rnA0HcB+WjS5WmIMJkTJoHkyyS0k7Wvnmwvmhjypjhnw4WyxLXJe235cCQyDwiprLxbM5JzubVPY9/kZ+NFEhu3wZ7WvJf1faeKAIJ2vnmDi/T/RH5Nr3ieT0rP4MwGB/7n+m2bn36/X5fIKjGe9b0kpZcz+CIycM/Ha/sOYeYOkbExgHfYDOCTHqd6Pqef+NcKme2Eej+4ouI8Hmeo+XUjTEyAahU0Cn5AFY7RARvPX6VTof8V/wVFk5SEsUgSLTo3zEFkNYBbTnIensiHV2yGMn/C+Xo5o05/F3ScZrcRuATFF4x9YnVYq1LhFcdjSGfgs6zndA++MfGpJnAO9s3FjVhvlmd8BNAXRMRvBfrNjsiqW/w3MqcJSVBtZ93e3xiXOS19s0xqtzbY1wcFIOpwxQdlo4CfgJQaW8dWvaWmCwcxL2JQlq65ERcRMwpHQTGZhIMspTw8vG4nB0xFyIB/hqAzBk3Gg4XE8r9JBrhkwOqEsF4iZ9ZDsyQmf6fTGDVUHfymQ10pnqIxi/+yd52dF2YsKhw7RQXbTgPlSEhshSEbfAPcbVDdpYETySImkGN9SmN1X3sm8mW0an89YWhLsCqlX1WcdUP+9643d1gMZZncl816iVfVN63j7mb2NYjNKBvsj36EMH9ZKFiReRjvRQjaXnHcslNMqU7GvhDDCNPuys4YiMGEVS+eqYL7iI3PoLDIqzIIPyh14HL15SjKyqLruXdxeZoxgsWVYXXh7U1it1MYkbbZhmmhVJnuimA806ZuhxKICySRA9PAJp6fE6kCaSCm0KtIU+7irzyxkOru6rVw5j3t9Aajky2UgmOZuLHNrW7IK0XpLsywZOXB7dxPLojQKVNmRaKgrtlpr3SdZFrmX7D98R8ucm/YuD43M3QY26NsDiXD+3LGy5Rg2cVIX+xp+WsmZUXyIK4ungT88XiyyvzoSR6U54yXae4BNxggM0OIL5slA4NnTJVbmvrvnmv9QVITu4XJ6F213qKlWcexLsaOSSnpPrQJ9xU6JBjJLwiMFZrXBL+k73I/LjEbih80rmPtkZCql9RKvwd2e3/rqLVJjsCI/0bxfMWTzcVp5d4fNuy9BnAC+qH6pS97WW+bkh3VgvNuWX3yKXNu4tqCD6+jc+lblXyP1d+0bU28aRVaEOrJk0GvbT42LweI0YqohFWJMPSM0Y4Y1tSwKQtMBwUR1HlqPEsFDddEnPkyZKrlxfWxs4cS6gCu3xMksVNa9mHoNky2rYmMHNUA0pl1RLVASpCjOXJJ937U0+Ad4JKo6pKIv+la27OCAGPjSowSp1LjD1kYgpjYl7cSRHCH9+nvGW1LGkSrTkrctgJE9ASSwuzmpkBfgMSpiPf0Tdzv+/cTzMU/dqurc6vtC75fs29LjhBPc1ONKnLkFVBiMSww/737/gFRd6v8+lhYokTvT38d0Xhc+QUjh5CfsIj0qWaMv6eraFrhRjLPgJhDCwM2kPYUObYbWoo8ugkoJekE+dHUmAFvn0rdAlCpc+3+3p4f/hMo3gZjS9lOC60C8Lef8LulqC39iAHrlZq1Q1BkFn8tqLcH2nDsDPtcfx5DXepoZkr8/L2O4N1CwQRz1hBoe72bmGUiMefAL6DHPF87BnmuHzn/nvrlDxTxuJ/0Q1dViLtGYBgJx5bbbIIkdu5/K3Suo1vmZd71riJ1P05P3GFMq+pFbU7poXhJ/L99rjWWn7k5RbkajqhViq/afxnoy5nMI2g++NgGrz8G/yvnJPpfH1iaeGJBECUMbvx3R1PlLwGFlPC6HAZ4pqGqLFqR/bTO+Kcqn/Pl66zGEk4FAEgEwAi1Xv/vucpVkzDlTEzH8XSjdBzDxRo3/2VV8GEmfjonOeAjx46TVqSRtDP4j1G3kci4raYQx0zO4Tg5wbxPj+4p/K1MoVCCohloy/o5oZJv9kIYb9AgQJCqP3Au7IdVSRtuPuLrDqWmliS2573Cd9hAdmhSFsjrlli3hGJ/qxikRBzbxF//8djVNJhu10idOFvAzst9k8bU8Set1fkG/Dyto+5Cu5BjGs5fc8049cWuLxBJC+MHmXFFbh6kzB4sCwz/qtu1ncrmVS1VoijznVA3RlEto/iAp557KTJRo2/okaBlWadCFXwIJ2tgTGkDFiY801mWENrHgUJV3EzBOEEthu7u+3XkgOUpsnWsVdem02nPjMXR8RvPueSd/Pj6VP0R/41FZmWe4fOC5FqmTmV7crQp6KK8v1q5uVPLTbVfCYlythXszxapDtfCYQwK4XRY866k2rHkAv+BmL3Z/bXzlEnDOZsTSWGniL6fexv/htdbnRN3jAYrCJhcLiLGsoBAQl39/5cV68ssKdm01Nit/ksEg+qgSrsDyTwuu9q4L2jzMR+AAaZK8nVyJwqQ7NUHJMWEIQi+nvX64VqTZlh4hpQD1zZmVP2O6yZPEmfufOEcg/4t8Gqscjb+mxK8Gq8HEY4TVyBRuCzikK+NZtr+K/gnuaZfx/QxqALAkqHfYARBCzmcg5aJ6o8QfzqnHOaZHCpxZDlmnkd1QLVCCxpykV5Th+zNYQr/J02kNUzv5wGIAGNoS/0aoitRd18QIHUq4XmdtraJ45zHbgMUBRI/pvpXjTEYwsQa0Jzgjxmg/hAwDgMZh4Bc4VXivx5nCWkFd2y296SAOHQe9XZ+FK+zlGrOxS4hakrmgR467zJK3FsAyZEfHjjg1EkqO6ZCxyLdXnHASiTANdqCShHzxhJHhAtwLfErrancb+ql39ggENVOEruQUJC5SzWMUYOffgSVwXJqCEQ4maVEJeQJy4XRHfG+zDRuY4JfNrUFlUTI55WQGwP7d4jiwOO86BvmutqZf1NovEuKecA5HSZwLlTCCkqP+d8fNGaCGj3A83DLJ+U72z5dCUAtMKriy7cAncHxlLIFJGllg30o6UhN+lsXqoF2lcPOxp0c31+4OdnHfJkU5jvqlmYdlDg9wk8nutMBjIgq5oynLBSSPUVdbBnYh28B1aibVWMHAnh4ahPnFsDb16XJ9YIz3omZEmRicEL7hajFMWpsA2Q2YF6JiuL8fH4ebOXBzt/YrtdGmirKdyR1iDskgPB6/yVfkx1R4yvVr1uQZ3GIjlRu54m+km+SgrgARNVEzsztGXVZIWXZBsNr41++AqLM5WqfRQGRz9eKev1vp0c2Ndi2n9ua5IVMBe9j+22DEWdc0r5cPHtGpTIoLWOt3n79nmFvxwrJn+1zsFFK3NCHETDddZGQu7xvAKnUKscSP8mNW1zILevl0lXArc5NjGwalpXGgX9eIXXQNwXvxfhnZjbLCfyBafdTMdNRKavK8sJZsMbLjDG+yGHbDkkBdLtmq8l/H0GNYL/pngSnr1PmbgHxWorAaTr9/FTT2hcThu69WL4e0UMzwRTDxQQVKxU++yPamhcAyV/vuntwYWSeUxzrc9eUqetwwRXYyz0lIR8fTHQTIMdaU9HRBIWGjSCDX0B/Uw8pKQqnyB4VP3XzDTBi9lAcEBH2XQC3/SSSJhcULPA5wN//K8Zw1E4dEwn5gcSIJw0Ok/yiMjZs+8dFe+xj1etiHQhlmBZGkh9Fe2mD77ag9spHvTzuI7+UiTGw78v8maXQAwSdEw8/bAP3lIuTNE2Sn1YArX1Cpz9/xrtWXv8btckLTL0bh+ecNyP2X1jXkbyVw/zTQtoyOL01cMbrAgvigXxIDjEkK1/1oVBa4knvB3KM8+/G8ZYLEosW8//Ye69uR5E0UfTX9LpPXQsPesR7EAKEebkL743w4tcfQplZJjO7p2a6a865Z+6u2rkFgoD44vMuQO8W0u0YeJj72eZfZi2gpZbTSRjgb4SrMNp9B893WSX8JEpawL/PlkYOv22hW1c8GrFLMX7Ckn20XvqJF0C4rVgeKZkXsKCrcAZTmpbGRzH2L+EoabNbXs0X8xhPlRyi3MBePjsWBoTx8aCNoSBT0GFUNo7xutE1SPHIW5vLNjZP4qIG5rjQNOK9qF4uL8psovdLihza2gI3+F0n/TFRQr2xBYLbqMetem/H2/Zg5+Xhd1s/UGml2JMK6gjoYxYa4VLssGnFPtlG4A4i8VpTpahWnal+tKJHMUCxixZdqarFEQrTy1azPasz0S8oAzPKUHqyL3ccYNEoVCd2D4Kkq1FgwrMu5ls8nhOFqOGB+UJZ8CiOM5fa59qo3TjMYSgb89IC9K4++P0V3qqbOiydxRXAsKmsl5gkcisaljKckNXF0JseVNvmH+Fo8ikeZFiJTKriHN2I62zDbavYkgiWdsb69nL3kEueLIkMnbaBtj91qIDGJJd71xpaDiZzfydw2LZxs3IuLB6fTcmnleoP78guy6nEAreY/Hb0iz2InvG5fcoBZhNLpdJ92sllg8ZkPgIHpkCaj4ePuZL7hMdQvxPNKdtAOdC5SlfQiaAMdFCZcJI046l4Cq+6dvvgOT9xjZaxiU3jU65KE+AxEIzySZdQrTt7bFI0Wa6yXHRCNOM9nApxoekj0BT06aQ4oc2eMit3AS+XCpuLnuwjZcE8sxN74LQWPMyj0pyzJAKBmmpHwEyGmuToTY/kSFJJa52EQ9LWPpWOF5VA2jy1RnEq63Ybhy5o2FzIMXd6ETPQqQyUm60HUg87SPwXFo/TQ4tEeXw4YrTH6iJ7m7V7yvVDiQtJfo1NRDTR3pveQ2+tpKBx2Y/iB3A5PE8jeRJlU1wKUZrdcls88vWQl0/e8b6gc1PO3EoT5R1HBl1dfagG3HOaZtWwkYvg5pvJzfnxYOTYLDCMw3YEOoByoeTcYy4Vau2w43y1gW4apw29lPyLJ2/dAYfj115g7eVx3FddQk7hYb3YfnyImq81W70BkcuVJNfukehyC9NoAezrVphOAeSUNR/i5nSZlPXoOnZnYRVotiKq3OgqEfYSFw6TksZmLoXEbXYjUZfCXoXIf1pjwiXDIiNp/NywSj6zfgo8rhWtjHvx1Y0Azz0xidqm8I1iyPuE17uEzf3IO+vYOssm22Y3l/5tId2Bgi4L6zmAAuM6XM5cKxE9ICBm4wyiGiHP0hQxiuaU7wRgki4TVWqXRk4QWznazuuAa9zGFhLrCErK8YVs7ZMeFAcBNkUAHJT3kprf5PtR1FRY0PJB51gnR0CWemVnebthXWJaVLxwY9i9pEJqatdP1yBkdEO7HfznEZKbRY3HbIbGK0G11y4pyEJ7+6BRG4ggpA1DRWF7cypqKwRY7E1b6bIq17oMkONKoetdZFskz3RPcpcC273SMrqLtQTos1dTSY9zBiTbrFxVAV8aGsMl6qcK58g0495tqLrUZt5vi96Uetbu7Vshmbw5rJa0+c93COf6PibanoLsV8kYcBYDoma503d0EgivOTsUf2L3tAi4VSVdArtwnyba2pTwPT/M9YF58svI8ih6NuglYPYuMmjx41LgtltDN/xJ3RcHBCi7xSpomLE48YRI0gru66VTKYN/cvOzHIgyoBmYXPsFVy4JuiFdFZIuwi6v3nkVMUPoctU/WyA8opEZtecyx54MBJpHWjt+Gyj8szvoCQdzIsnDOM/ssXEWprGnXsn5XZKrZhKZ10on6bEbeGZRZI1TkuggyVFT3CPjoxjL4gJObG9qLGDi29jdHtBLhJkJCkYfRtUVlGL5JHLo1pMvBnSP1Dzn9aw9zIiRQUWKwOueNqAC5CnReQC6NQzP1oMI6TD/vTFH5rUZm1C1Omu5yI4QnKmKxB/xTPLP58seeyTdmLuVSZ/NsELXIcUKVwSMflEm5901m36TzbtJ9DfKt74GyAYYBw8jOuCVZI9Dzs43tD10GX5yoMGlrG5cE/V9Aq/5Z8u6IZMuvXyxcFQwvGLkrEjgQRYrp1KX2Tz2vnrMEUhH6Arqg1hChgKPO3Znm9fOsO4q2AsmbhpwrrPhA0OZ6tR0e6v9Tz+knrw0AqHkm6pJ6urTMUHOaXItzl1Pri8ujgo0mvebyggKkDA34GbGnKK2X0aqHFmE1OHLXb67Mm1YlPGYPyn0BLlJySTKB/8yaM5lVLs+oLu8k2BO20M5nIRwPjtXtgvRvl83fIHMVTdesVGjjoBKDDWqOvtu5uM9CzJDsi+gBwTU1gs8MUco67UKogLjMzAxjVil+ug7qBcxkdfR3mZlUGgiGIRn2Uwyz/NtKaiNRt/I67jUbPhpqOUEguTOO8mkoKv7yRi7Tw9/FfgIU7eoFBR/f7ZZZeZXX8dDWFiq0Uf3yX3o+xMzkzurxjs6PBzCOcMjR7VaVcvTq2Jq85JqiwsioMAUfUkQU4t5V1Ectsg+qJ5zQTfqhrclX2ZWpYPunyeyZB8PvO24LDtT9MYFUM3ORH2OIXZ6iYNueD8+s5ggWpyye2ptXn3IBlHaV+9sz+vsTpzVPej5d9n0ek8BlT1EVFvD1NM2uPc7p5v1VX3S5B48qDFiAAZImjVnPFW/FOjFJiAf85xu6ZYcSiudMSGv1suoM8x6wP2gi3YUaSrcxDAEfzbczNM8rl+RXHu3I1GdiZ/iWg037TFZ+2fvEzO1s1EpwsaJ4BZa3FfjI5zTD3I/U4O5xuMbvuG045rjHXhsitq3Y4drB21shfHcCb5+I+O98/f81JICi0G6WFbsWpSJhZdFVvpJ/G8JxIp7bGck6qSwugHTyJPzDYeFK5IKeUR54Cr9mqp9far5bhADqh9vbLCP+ZOlToH4s6IzKj+xexIa71pFoYxgykSsh4+nN63eutmiWhShEx/5Ox1SEnzzfef+JSuBAYqtmOhAyc2pjdOToA5nmfikpugjLTD5gpiLy+onb4+UVANHxJJYE8XzD+b4hJCZd1N+orgr5xxaNNKMKozs7rJx04MdOgoeV4z8tn9SDVf9obzrbm7S8Ua8anfVptc7vvvohD1oCy3RNfW329y31A2/lVXX1fH8SOrn+tltkxSJaLrW9+g15SKXnAbaIdQqwbj16OYhDInzUUizKrp/9hhjVmGmcWdJuOt9uy4xujIgasXaxVh7+XuNu8+znChH3DNRl3sHe4cNclj7ozJtrEUXwJvymwTnPim6r2V+fNR6QB1A6M+IWaXNA7zDAhWGXjDXYOwQMYqdcmJqtJNuOEl+Nwf9lSWAxXDLLYr6HcZBtaXgJgswxKjbxZqenhP0S9Fih/M4MiTvHuYkuiW1UmSVatKLfUv9fkOLgp+bS/mDjUVzNzGg8WOOCU+pQsvynywG65aKY5lHy/fjrThejyPi7rj7kGubX3Gg8Lp7GFzMlfhpVKDbDfw4T9AfG+YyuU/MWkxv52cn+dftwixZkoFabsBrJ8YvnzKW2skewC7Fq2DRUqTEYsx7pZZFPlZLRLAbvLrzmMySBU1l95r0NiejEsoPMpLKCPg8mH5vNCMgYu5A75l0OGHcOg5Gz7Qiaf1nhwZcvKf7Z/cB5fRlWj8WbSoKwcTI+TkwMmKx87wiBF1mzivqtvCxGj3NvgQZS3EVvK8HyFxsFLyeEaU9JcocSbpIRYWADXe13gmBaoxCpzTOalhGIrtjXpbc/QX3bxp+05zOvQlyEqm93x7aS+TfdNrXn51DOVrf7pO3Mx6A5XAZ3SVDAcMCouxzQEUXV82qV8myC4Fjo5aBWTRMu7s5lnj9B2j8vlCzoCnH2wBTRL2GKTs6c30KNVlFTvobPxTvd9HL7ESFUiO0/WuSD8VhHw6+HasRD28DR87Hi/cMLKs+jRcHn3HLG51ZDP8cgCeOjQ4emwRZ57kL4id9vwRanOOXMmzDj4aDAHm8aCTL8IekqEb+Oveo+CSj6iBsR0LI3RznJ77fOdAUiohOPlX4Xmxi4ZTphxBxYGaXppAGNJevDh3WfQ8ebEsg9rTeYYNOWzEtjK7WEJf4hFhw9W1Ol86vMEntB3vn4sTMOJ8+V5l0VpK9Taj36clrK8x5f31Chz5bs0lWa5YYV9a1OrRFy8G6SSASI9xZTiWeNkNdqiBNPu4m7Oen0gJHwAPdFE2CgvYQD9rnMeqzqaadAQnaKMELF/M1mG+PuwILFw6AzUGEtrBKGmKTk9bvirYAfFVXwWUQKKU4fbbGghXWXJGDrHG0jYPbav5slcgplmjrR0Zxid5ddso9PaOMnhWriOg3f9FlSjXO/QLAdiEBd58vTTAvRdDKHK8QWqqjnGSOC7XZoA1X3XzftPe4EmCC2HYZOw0tl04XsNdLms/Wf2VZDF02NH8aegMdQkdwtbmFRGoNi1A73YdFsZuosPZaxQ4mvRkGwM19Oy4/1YnDernCGNbEMdDoqICLqyEF3w7fUtHYCTPlTRMRM7fB3jqBII7TNWVetgzaUe5rYNKRinvEefKMwT7ip2K9B9bMTqmgPFOB22Lq6H2j1eYuWYPp9aUSzhpB0Fb/vCNlxMXIUoMmN7Z88ZHqmgLfnRTznoGahb3P4sH2b1gGKhxTXxffj1rPc/eJqa/7MW/OWggc/spPnDmleE/JuRy5y4AoohffZcsditSG5ouwG1jjdZJh81IBhqj2kPrMGeTZcmuH567Xfkll0dvtMkceRSk5TeG4z/4zVYTL5AcQlOaLTtWxpS4bMwVU2tBBGY+xhVI+e2Qo6zBya02DUEYPOfSVd4dncAortvxeiEs/GuiojgyYDiCa1wy0P+dbTi2UYWheyvFfcIOWbyFs5Syd3xZLC87lDTkaZ3kJHeQL0C26y0ZNfRd2JqXQmoRZB58ukNyFZi+k1ZFMuNcj6N5h/+6fZCZ6x7ooPHdRqBIoBnFbnqlGcETq0awrCdUObCtyJ8Vjni3HVT9b6pmXgtQ8+Lg66CfbUSC+sgHdgJh9KPHKz166HaFNHwfNw1Ajco07ojByEtIYFpcXZ85xagdsg+J3yFcKy1czHb2N5HzGJEpi8fjKtfd8KSozfCNQWt4HTO7ENr8TXXN0y/NEEsjhaMEkFxhaPWKpZTwM0JuJkGVrl/RON9n6KETOueOjRwy6bRvCWgxPxcgybCGSPBjlT/bjLV33aUvMfjifSkrTXXHpNsHcBfirPQHP5sLGZbrtESI153Q0KOyYBhUT2k9jarHY8BYK0Mlsmscl0fl+LcfYHwn7Lj7deahr+kUKzBvRnHlmNPyc2K3O9lOklLVjxqMl6NpIWrVAM93wdnnrtUaPs2dLnctu5kIe2JeiqC9qf6YLH1QK4zlm3OOevEmGLW4FX6o8RzMPWneAYqGKKzzTqvr+JNQ5h2uotfUy9cSAnsTsQESz8Td/QGvc+1rv34WXNaCwARfOu1zaFX3ZBGc2H6z3FHyrhHqrVVoi8qLP2ouswhGmqulcHIDj1TpVz35FtoS9IKMnRAH4dhkraD6vIySKmifieLFEnL40Dgp+z5Ffo8VlgM52eP9IN3OHJiq+X+umPiGqUEzlsEOaV1ise/uFz53eeDiuG/hY9MhIlXh4esz18M2uiMoajbvSTH3K6ywjh2EEPz5ppErhAv4jsHXZzhHpaLQ2eip3qsxSWpexdYPqHBT5rDSRUiTuP9ugEGmyuiWFSJSS3TGRoppBS/FHw5rwGebpmI3hSftUE/Qt7IGoT+5NkGcAbYhgGlIEzpgwy06+C+PLqAZ26mgIruol8sU21rR7f8Q5VD8+gUTymLBGKt75wI1D7vJhyKd3Z97Clqa3j932Ccr5DYUTSwONPalcMsb07ljqqAJIKD7HQwUgfAdr22ZNRLYEzHHJYuSfLcUPQJTGoxaAtq0rJJNXWprZpoVeEv3O4bbbrpr6ksxkAy1ChciWL8Nsu67TNe7SfTSF5Gtz0DpO96z7tFDsMyTv95JSZfVWL7jsYaKzmLJ+843AJOKJF1jPYZ8b4eHxbYeJbM3C21tX8CJ+eidw5dhejm2K7su6/dmUCEemM+t6DbIxpS2823YbUB5azJu0PXFsDO+LtCG9MDpV4TXrOzTUT8v4ywZGRC3yksAZUBwuB1HGL50IqMwSdlT5+82ATOt1UhCM9pdtxFlSX1j+kpeN/wRhSe4RPjfj/Tzkp4hvlv6szSf05sSyYENuhzagmTR9HmI5RacGC2vype40usdEjdiswGeyi1qOci8AY64jxpPbM1bfZoMDel8McItY/HUQIvdBq4pMeJiGCi84B/PeuEuiQsa5Zl0CFXox3cKzRm7843FPIaxfpickxsEnpqNjoaDNBn4Gdp5uefka7hJl2Oro9Pi2DlCNPDeB5jA/1XgMRtNVi9ZsUbIL7UoTqJRoskps0lqhuWEdahrJSX7adKWXqp2gLkfdmZ3PB/tTuOjSJC2p8kZIVIkyYzBU5zRSZ9+d5sQI1aPDuK2mHsceEDeWfb5DalxJ582+MCKhJctQnP61hSEgCWb3Q7iWPQHXLQUVkxQ9WG4UP0leJOANxzOP3bN0ZN/9UnnlupBmY87Lm0EMljEa2gyKwePqZwQUNND8lnHHocNdrhjCIDGf09kNCnaSmjtrRodz0A25eQ48k4HOZXc9fiBYitDBLZy6VEO1pJZvQ8umQfJgg/143jlK3DWg+79qz8QMnDMZwBXv1r3DyhOEDLi3LKAgcl6n6AQWtSnGe3l76f5G3AYrk5XIHmR9Z96tcqshW8g0/0jVuPMzrISMGUgQaxUsTOemo98Wcw4TYNeKY3aMZ0ZwvrSfNQpqLwcFN7jZtVcRR5UzXu+ZPSsaYxxowUsKPK/h4s5vK98xAZPX+2Nre7I3u3o89LMl/W0HOQPi8MphgwxaJ67UG9NWAelT8qVmIZTG7FrvmqUC4KpgHlMUUNWSVd1BAWOfG2vwOK/UNxiVB1l4ODIaYdk9G+B6e3t7klKlryk1h5IeBMLnR7Ago64+388Cp0ZTp58rGpVAD7ofIdy8lTuJBTN2vFFpP0zvyAE3exmbkoyUIXRz0p99VKYWfwEnvxtxC3ObHOKvjoXoC93LnkSkJFtp4AFv4PM93aaIPJ1FpN0AEHpOYJ6VXDiMX3rMYgs7R3ryIWyxIsrZLHLAbbYAae2e70X0R2u8rP7VY2R7eSMz8869dtB6KjrS0zY1nJIxvA2GrnLmQ1YJLeGUwlxvfbtHz2aEx5xwzRQJAnE10XuP7ZdSSa9M31mqFBnHxtDpMdxEDCuVC0xmz0AKEEOAC7WJ3C37hk7PSh0fGCrPjSuBHFoIUwGBDs/eiMubxa4XOhiAOQVV62UcOtS6f0xPmq/3U2Krl3bvHOC8hLWynGT7s03YWxIXLecxdrhjnv6ptZEjSn9sYra/JTdpQ3ez/Zhk5zFwQB5OjOZW/l7hgrZCmpZhkNQSp4d9HpmL1uWk6VgtM0QQx6s3P12dD3NJL6CUNw1JbHURxCIqXBuNnArRoTpuXtvG7Dk4pzsc2fuLToGFCDseJyyGzIo9bAI4a0dx9I/Z15eHDkutru65S42t36kOLcIioIkMA9QCMKiFV5tfoeMwx3oHLiRAlZAZba4MItAdoTOXrLjfPvunx/ikmeocVHs2Zj5Xv6r9Nmm9PS3V2oQDx9nWydFFq1lPJCYcId8vrRvMnpVr+LmgaPLZkPBBGAl2U0fQrLmq1ef4boEQBTFioaeo6ImRqRCf1kMVZPqybOQumjLg8YdJNLsRJU+PLGRZIyPwEEf2Csd2G++uL69fYrogLNmyIrrjIs1fFnyMcGAGEiqO3t1XF9E3xqqqxuCh8TJVqirLOFAkWgkMe+nmh35INBkDu7capHiNkogaIPvBsqpaeN25MuipOhnh2/fb6SATmz5YSaYZ9nxs/ZZW+6z6wPquPo1h11idzdjDeZ53w0vSHaNGaZIZRDdg0XK1T26QWvHiZWup7yMSNkmw4ovumdQnfF0wXYnngSUqdj1XiCjPpJe1vTs3GOcrBpieXe7GIsh5agMpH8qWKGMjKu4yx7IaGSmjA+mJTb2ezT4dI6Uc8ITrw+fO9qKg+20/gAVRdK/YmRuRfTNqYt76mzz6iQb8Agx8Q6PbZcKzDS9e0pP8dPiFYDwIkUGPgH95DkT5tXTX+wgiPt218L6d9lKJl5px0z66UaRYI83I9MuEXgh5yXT8BizI6FO+b4a3plCswT70nc2pIWrdMxP2Rtwa9gtkZLUnFi8VYRhYoc6WJIBPPQU7qC2GAbWkwr4KhHWZy/1uqQ7QJ+DGLkFeDWPnfF5zdbOkF5fXfC5Bdk1NJtFjLwUFlIvR2nvISG9XgXEpX6ahBGnGdKk7n61zmATynTm+3v8hy+VhMrcY6tJIT6TdYx2Qe68lX2CjlKS8a0sfrnp4V9ap23rT2Tikiei2oGzqjPf1zdj4i7MR0XwI2623GZvlaPr4tJMTgGyMz/YS3gx0nn66751gZJ280/qTYrfL3g0in63K+f5Io4+Cp36BqxmkI/CGzFG1L2C3aoZrhXRaVwGp6QtOldPJ02COXWe5faiqMfVgnY+eirUssMeruy1axyQpeL16q+UKqvqkIeexEJbucFkitE96qw4diAIiW3r/3nD0tTQs28ksocXL9mSAu3vyuxuBM7JV9Cr/ZtX6bu3b4bDNGGLvwaZW8ragHmaXFVgZxhmkI85unHbxG53kXyuBvzsGq1igucOTuDKIRYXVA3PYy4aIuxVrAqsAzpKDRsYMRXWwdy7zqrtOnEvGhrKBHiYD8PnAZaK2Y+yto8OjzQj4TKgHJX/YcFBGI+iOxlCwdqHHp5Hc+3ymt7MULX54XzNfAaKtBLetawPJmTtNSEHVRFkvsghwhJeawhiX7MSkqSbF0Ru7tRYR+iEY/IMLufsrwrtTUjHrWXogyGUr3QiS4rZ71BPZ8eEYDC+96mCe7uGYNWhEf7r++c+FSQKNU/hQFoOS4BaA3trARGEzjrFkQCWQO0SHT9lQ83JS0BeHYGavjslhhl+XznOQM4Q+XUlOUo8P6VBWg/hGQxxU2627FFUnvYiR8A/aoT0F4y8uT/Nuxrjk7biZ8b05jv7SQZ30pRkGLUM6r3TcVoQdIcXWE5oqI4qOkFw+zZ+XIX5ATfFZlcdjFJyVkCpJvE3wrIK4EHaQG1Vf79HSLJafArk+T24qk0bvLs3IjMtQu6dFEm+Q9KF1VpRXavxswRlN99UskTq+ax3rm0FHoC76tFhXrGkUkGYL6rty23w44aT4lr89nqY6oh1XPN6WdS01oIywIjPIJ26AUZWkmDCkv7DPy9CijcKDxDdDuznF9d1mON4s8825Ni5w/Hfj3R92Pe5627I/Y9GV9G4zxFgvzNsTjOIxU/TsyG88cTAK9t6nAPEM9a2xfmslfqhCk+qjHmpsUFy51hfcayaXXEayHbV5c98qu9MhJk9PfUxinoncRmYrSVYpJt8gIKY7gvo0NAltNwiwzG2MtR1ypEIowORq7jk1leV93k8uQhChZO+gSlVwbGjYcDF88bj56hwz2R0nZalYcnQBcysxLNp2bZyahrAnZsqpqrwkOsNtoKJYUwR6s16LdedAzoh0v1GBTPJyUey05dn5p8+rteS1riGkbdzhmRCGqXk1hWkO2CUxTFez3Bgf2K4mgmQGMcucqfN4eLldoPKSyCXpkzmW0AXu+efARptB6wZjFiX/4SvqpYJdz5jqk29Q36g1ET7vJnyyZuiespLg+5JoASyLrmvexUAjDNEomLw+rRHd0KUI2Yp+9EgpKQjNt6cBJ4AMmbu6hWpqmoQzvSb2Q8m8q7krsnxi/q6O2HP81A04PNQwGMfludfoXIrvW9lzgZQ2jIvddAXboxKSERYNTLTbQT4iPNh7qjIoveFYGiMP3q1Fp+BmUX3AjiynpfZAuJfP46uNT0E+atUXutOjT5zwmiqCGJ4X5S+xpRwdx0g3iV59NQmKAnOs0Z6SpnZphuKr8tlXiJLEfC+bS1s43c9uqNN23/tMOV2GNB/cJ70y13GCL8+8xxHUkat92sMmej7kW/Uycs8qmLlNGTI2gdovwAuB4uwTHz4MlpH46Cmi6YwL49yA5uiXbSQrrzVo+Ll1dbnwrYMqPPUtySO5h46D46G6WnxQcBbWsdgTwt/0OfOHUc5lL6eZ6s5j5PC7i4peKcidqWVY08iD0LmJaxcIKT+IsijRNcCmWbFCcYcVSoU4EV9NaHHLa7UK8FuxKt/sFm89aB5oeRddsjL94T8Mf/0Cefb1Wv3DGq9fQHlF9fm95A79uRdg2edesAxf72UG+su9lx7IfO69WJL99V6A+4/PvSdGUixNc3/71qPr1rWE2xr0S6bBD98KTmOvVseyf0Ovtb3WCxovBbZf/gZSKUAlODi1ZdOSHb87hfJ/Q9nuELOhyxawISH07dvb7ReSvP32Q34Z4f3l67+TJPTlxF6lS/nlJAn9gmFfzpZZVZRfn018vTWavxwXvz7s0zbl8wrAUXmwWdt+e6PPZwSq0i/3+CzGIr01Fn1xpFVVxcjS/x3Bv84qai/p+bnuy4l5udjmlxNZWmT218NhWsqhGPqo5X87y0zD2qcZeA50Hf12jTYM43USvk7W2bK87eoEg0TrMlynyqVrv347L9PQZN5XOFwwZfKhX75eDpPX8bUM09sHT/gF/3YYfH3g54A7/nD0/nqUrNP2eTPwmOyoFv93n4PfxruOfhsBHHwb4B8iwjysU5L9M9h+Xd4lmors670QYvSuMXgmc6TlTsOi2ON/x9EvFwJA/1PEmrI2Wqot+8N7/Awnvt56H6rrpX9FyL+jKPkLifyGkGBfmt9jJI4Sfxzzyxy/DvMbqv0wMoxDfxwJwb4b6QsQfhiJniawl+Kvl43ggvkfTwFB8T8+CPv6yr8RwZchfyOJX+H3p6hEkxvNsQyNjA0dO8/ApdPu78gPRAL/ch3LfTJ0FdheHdqnCqQ2XCv0WrP5emGivVacSatL4ycK8LGswLygbEnS60/xuLPgqE/HLxP8cn08fbv825nrfX83yA+0ebGi5Y+kFLVV0V+fkwtfs+k6ARhWlUQt/fWLrkrTL1SbzdUZxZ+hAKZ/hf01Ls78DefAWBehzl+J8DuaxP49PPLvJPId5lDEj1wR/sYpf88TMehf54k/Xe2fscQflhP9fwAG3LNprubPwi5l9o/W7Ieblynq5yhZqqEHdw5/fvl/GOr6G3UXk2X6eAZ/vmDZAHaAhjxa+5+LWBj6HWKhyE8QC/4JYhF/FWKR/7GsnS8rF3xMoyWal2EC4Nwv1pHZY/QRNvsUjT+TnezQDtNnDBRFb7c8/7lUrdr225X90Gc/EbT/Btgj1HegJ3D4l2+axu+gj/wE+CjyC/4XgZ/68+CvugiIYubzl57HLFm+4nL07SCvDqBX/EoFWhRn7f3C6g9Vo1w8LMvQ/YRMFqAT/bhqeR7jJPTDqsHod8t2XQp9fn6ydl9f+4M9f0PpL4eIMAIRxVZPxnzskCoWA1B3Ddstebe4PungH05m6eD6yy6eSmTgAtZnZM/Xr08kf/1jHrT4VHcsBndUbstbzweGrGiaI+UTP1CJctJxlW2LsRq7GCrZZlvbrlr7wVtQKzC8XTWVOwyY+oTC4+E9nMZS3UHmebu3r4tGuVKEqrQerOsSrEdOVfk8+oc7Ope50QWWpWjF/CaB0dOv+dwv7wTkElL47Tz7HoSSc+E4VN3p8Cwkz9P/nCopi3/88b8mVXCn8Sw2RGl2Zfyde+8DKz+KR3bZCJK0K7tRgy1TPXaqmlwlaXpn36e3PsKZEh7uU5CREtlpTY4wreBTtZbHCpLiXnjpiQk/Z7cJJ1ru5CKW5J7L0/UhsK+ZRcWiNDVgmb/52SAXiNvoaWa0gM7tcX2HW61ZkfSJ3ZqrmUQYs+8gB/mynzF6cgJFVbh7ttNneRSdhfSkzHPcE5fWjM4XkPyqKC1xMqeIzccEEJvJEIzvh5ja+r7Qnh4XCrxPiYm0Ozx3M0byVvB3GQWlwowYdX71cJx6luvmSUond8ogPwLOAMDR5wOxFX+W70cfENKgT5qD3pm6vn9am9qVNRkF53CDUnNLOZNkH6bTGG9pcnsGS2NZtx2GJYd4sbcQpNbtqhndyNf4UkQo3aVW2n0pRF+Us5pTkYMxx6x6LOTZqHWRuiFl3k1t6ldiyOxUnwwB+GykLYqWPGFBECk5ysvgzD8Ns2xjepSXTQgvIEGg8ClQy1DSO95xzXtbtPvO6uZemZnHSq+GsBdmwKKcsjN4qfCTv7AYxJfKW6MNvFSEw8okC5mNu6qb3RhxVk7dWwRh06x9jR6WLgb1lgJRVBulNfJZfPuqqtx9jps6lStX10AeiOyFmna0X/vJgn11AOoGuTePHmu/ffcRh0zhqvwdGbwTylMzWB/m9nLiTeaKjlTmO7jh1lnUDS3M9Ik/NI/WYf99hgQGVijOh+F6MOVsLD7Rfh3DuB0fOsRjyIb06yxvUDtJQjW9xmR36hJzZZ0NrXyXks33ns8ZpZ7CJ4Fx2LwEpSSUJw/485psW/lkojH+wVCvXYNpQv/EWc5tA+7ryjTzJYRfy3KeNy4iZ3fJCrtpAiplmqy27sKcps6WmW/ydq7QfX1QLlgnGjbuE8gNECxNAHmEQzyqy2Ger6xFTomQrn8vkgu3HUOjk5GVGy+x+4PvXJOAEvXGbZ/cys8mbCeaPuqLk7weIA69gNirGrFF5d7jWBHMmJxu9UZY9GpuBQXucAdb4VLVekhxRCcDOVqJ7Rvu7t9Iep1RQhXqULW4cbwTmj91k53GHHsCDx0Il8KnUQA/KkI8UfH0JNkmAa34tLLsM+R7cOzSOUhFFBD4xZY4BBbewCDuQG+nBBMC68iltYMyJxBtYqRYzUmkNjYUVlL3/oQxMAO0biDbLhim3GEHVzFyGQpRZQmQhwKoe8Jvb8SQ7mtwR2q+SJlTjySDbG/OXbTFkIMdp9MBvo1DcJbjp8lfn1/X0z4GIMdysQXykwq9cXNMwsCl5wc6hUEpdxDJP0ldXePl1uUUuKOIw6rGwd498TR6dkqR9a1UE+DN32MclkhPQKB8sBQKBEXqTAoLamuoT9ddoo6050YkjpH3NchteLmpnJLvuol9W4d7qQ8hFF5cAZoz9uxUA7sgEj7EMq/JDRDyRGTv3GY/268pfQr4pNIFmdmQCLJXSMmhWv7g5oeNmO8ii93+0ypxZye9ghvfifD4xc0evF6zBdaE0FDce4mt05mtocXEabtBdz2YxScdCG/1kpy9JqHJ6I55KZ5ksUQIzLZMALXUC63HhdYpptdK6PARCt9hnsEtyQ9LYtGpCLi5oWS7V1wCTWLo7fmc2xlKdroJYLXsLs9WPeskYwaihGd/nEkYDPnt9H1swU/uhYbq9Miw7qlxt0VaAyIEnnB6w55lTb+fQ56627L2/elNKGYTjx0O8+19X9tOvVnWawXRwWDR4J5WVAAG02Zk5uQ15p5HoJCrA25aHDKE3jQdcvMoEjR/EgiZ9v0q3Ny+BfVl1UiVSC0ZWi3PeeNRD5o0yilrB5Gwz+X1Stx3NnFcpXXArU3Qi/xEkdBExqxN9BFqV9oEvEnpnk9F55lOt/MqaHLPV613N/NzFvI2wkuEioKKqMBvIlm7TRfVMfeAJKR0tJ6Okh7AYxedxpPHsIDoSempsVuf20Y/bUOKtMzTavtPswWVx+S3qJ4Mt8E86vl9/+puyCd+v4eApHx+bZXAsSc3Byilm/S5NT3GBKNjgABcXRF1LjvizUZDhKNINwvv85h31Gk9tM+uOyQL4BRuTxCHDwgqa3x1goQB2d927EFCHAgUrU3CpQxljRRKE4cW5LEgl3SJb1whnhxraNGn4ul2d+toe1ZlR0lxavZpg1eCF1azsmBvpHPfvOiZn5Zc9KexYDZk8FFgGKopG/fZQDThbqILWAEnFf1BvHP8NPEH0JJvsYieWTyfIGArlrRLvWL6aEcqAxx9uFhzMb2dOc5lj6fzgbmd5dxilXmLndwKb0CkPkmcIcyanmLyDgmb9A7uz7J95Xp5B+kGckrJnlGW2HqbN+Lu3FBDaCe/H0/ZVZhbHS52TTY5GqLkpwucntYfupiAAsSaThgpBojY1BosYC8HoR4kcYtj3BClXKXXYA3d4+Q7Eh01DNA6mBV5U2pZRiKvTd4wKBdRZpC3BGCJvhBWLC77JL34LeNF/XBkFqACN7BZnXsIlNSmvgZGIUhkD6Ujxo6sZk511mEQlV0BAjmvR7lgjXeY8jQorVibje9RVfLQ2niMrLO0ZkvXVhfmSQ9yrTcIRzIbns/pgTuLO7siBAjcedLe+b402Qwu7ECr+YvrSqFoG023tRVD31vhaRMb8U4w82Dt1UsOlR5Fj8u0Ltbp2e/S5/b07y+rjKi5QBYy8Ml7ThhIAFO4/fJM7XXwcASxR4rD9oIt8gurUuGtT7DAoJaave6whpiv99CqQXpK5VkTHJ3YMUh/pzrTpBH7/mpZe2El6YyOKeKRTDHfWk/H0nJpLs30gHFJfwywqtrtmijjbaux5eU1vc0MAPS3vvWgcXzB/GCn5xtEwi0sDW1WId5GvMZ3th9WzIXdnvTp5hP8uMvEgERBmlFjYhKcprqVsiQV0+++cO99ZWwviNQMqCTVsIV9PR7mefcu1Yx7euHCrwLc3Ga7mqeNr2NSxx+J9F6cqYPcPd3Nd84kQAqnSdehkld1vuqs4LE6QJCLzF6QsY0m8k6HkNi8JXjaz3HlQWFOWkQv941fGgwvxRglPXWqFMRueh/NvS77Inx6zijyHJZQvkNcjxz2phlfs4Logft4Kt4S2aRn6UCtqSwRah/tqAOGLe/4CFmN8GzNY6PdjL8YodirUa2KTHLikq2Ob19+l635kpo2ftYmjMJs7xuaZ52RoztvkC1kq4+n6B5ios+U+no9FtU7rqexn+qLpwsTy/11RmBpQ3Ofippi7moMMluFQqJcMEE+YyeAmNgl6m6psQiwH62HHdzBS47q4dfY3TVAxbrwROk9avLjoYSYn0iMYXkSo7eaPgTNRCz0cmACG6p+SB+K/0z2/TZXRUQ8Ta5Y1bNqmmcKIuf3CCuNELYfRt7WFqZVBTYqjbELuwKK6HRtkUbWqkt/X2kMFM6xlm58gaJ9mX0Cs9vwK2lZDL/ruX6TG8gIOPnRVi7mVAOmyFPJF75UyQo3zOUbvF3dCFbCF00JzbakJ67EKE/uZB6UzgS0TLxeL/cp4VZh0sTUXZ8vaZ0sjWvWg/kKu/65DbJ07MIzufTo+7RFtLcDIThONkCgtf225S/gxQy6A/hC4iXE8rAuOkTPMv4maFIHF+vD7nQM9EqHfb7pklw5isTccDp5QXnHY4VpRvQCe1TTJz1/WSIhxaxChDboY9SNPvb3XPSqNlIxcWevz2LVeipG+wnZmznjR4N32c2+GCqZNegvt2pBh+2yY1JGT3RdLa5btYG+FEZOJha1lcemQ7B3SmOcdzwSd3qgJV4DW41dqgYb7QrJmsJUiMJF3m3BvRjzFLngUJ2HsDaJQZjiA6AhDdj1JKqevA60pAfKNW0LdbGuITYo2B3RfNvY5jbF7VYJm+IUGQ1irBOr+vK9Gw6TwRgCflJu97h1ohS+dOngharHRvMSPDibcCfd803ldtVUSTi096iuVK/Hvs1z0HMJDUcTolo6XN4XRIvI2wDVbjH4cuFIknt0PC7YmdzvHQGiwgwcvd7qQ58GS69Xat8puhpjMUyUtYzBtMWdKJi5LFa9U8pss9VGL63KjDEhE0LPdaJ8cU9LrTtqv9isYHepD0iIxaaVtebXQN/PQK5fF0VcmP3ugNFYfL4vltZQBUVFw/ImQtLFlXpG9p4+CZ+MTcaj8lSiW4I9N5UreBqMCYx3mjoc6uUR9kiHUa6ipbfbQgUr2JTx8XW/OodZhxev5CmxCrih1wUH4T01GGRlNsuUIAv2otW3oOTBFpIcO2MPJX6fpUuNK2150cCtdJA8fVbWtMRoEMN/0Npw5+nH9x4S/nGt+B0D2B8XIjBXlUg39U+U1Haf5kPF2UCWgTPqL/IXYj/3F/7qG/y9xxD56zyGMPyDyxD7o9v/R9f+AAb9OOH/J7vf0e+97yT+o/ed+u+M6sA/BvF+snwIWF5QXRpN6fyvxmYeNKhBgeY1/vv8npes+x+LDn9HQWLDd6FbAkN/IX+kchj9i9Dip7H3Hyn8JwuJA6xghz6vpm7+RHnH9oL1l/jdn435/f+M4bPq8HeMAf9ZvBf/70SBnzGG/9NSYLB/lMoCAUH5u2yWf5rL8msSDfwLBKF/+30azS8EjPztn6fSfI4u0VddYAeI9ufTY/5p1svv02P+aR7NX54e870agsHfody/K4kFpr570Ncp/jSJ5U8k2xDoH9/zH6Tt/BfSYX66csSf4ZkE4Jl6NDXfJUf8F+ToH9MlovlP33j9fa0D+JQMXVcty0U+v576n8JwkdufSYSA/qJEiJ8iEIr+CZbbpxcRDDsAdBvNc5X8R/zyN5aI3X7PE+Ff4H+e4gcO/n18DaV+5Gs/vZD8k2ztd8v0M7n47dy/yv1g9Hu+8o0t/afTAS8YfI9z5J9jpf8uFoX+jEd9h2G/Q6Y0mstfhfcn32b+Y77NdxkYCEXiPPLzNA7w8wOtM9AlcS9dl70ELQr+APJH2M8X8Oc08t3Z20/Pfob4/srbPxiY/Nx9ff+TQeDvziHUd9d+2NO3fJjuKC44lL80a5xN/UUm8y9VAtJcmHGSPx+4L4mN/zq7uvTBX76pBr/lD/1q4/8xV/rbhX/gWv8Wf8A/Je3fYRWY9t/n7NLMpv/7RAdOUL9A360FfvtRetx+wpa+5x3/tkXAbn9CePxOX/+a5/bfoKD/V3PUf+U+8L8keMgf5c5P9enbn5Q7/6pAwRH0Fwj5h+UPPxh0f7FI+DH7Un+y7A+oM+9V10YflPnD4kfTt7VGwJqVw1Sd1/pH365IyqpNteg9rAD2y5Rl3w5+d61znf666N8TeAIuqPpsct4flrt8Lv0J1v0k3xNFPyLnhzTB5Pq5zl+8O60udPqZmPq3sAkK/YFNYD8z63+W8PmrNfNPMOuRJUvUF232u4d+p9XC0E/40s/Sxr/nS1F7ceI+WjIGMIj5r8C8P5F4+h/k+H63smmUUXnyM1wgEiqLf76u/5Qo/vRiY9SvPruvkMd+hPs/cNT/VRLhTwiE/yR88YxKsZ/Bl0JilCD+Qvh+Vw7xa4L6f5A2/ZfB9tvz/6y0zdp42H/vGPucuL74pun8EfB/lMr/OaD+hwLxzxpi/3aB+K9B/Gcu6f+vojP1fxo6I//3o/M3BPrvx+efBt7+c5b4D/UVf8TZr8r8ZZemn2WA/oxR9aXi4juiQBmKE+Cf2LhFMiK/bFW2/79tNS//Bv0I/T7k8XNzFiF/QX9S+ov+FiT7t1MD/p+khv8tphSE/lr/C4ypv19niF/P/GdCFb/5B8k/uAf/qW/wH67/fym+8fML/6wj8F80yGAS/wUUpX/7+S4IQX4XRPiLzTH8T/iA/zeWY/2DIjrib3+qiO6/VogFfmmp/1KIxZlQ9kxpmo/U5lux1Xlj8KP1YLevQcIlel/F7j1100os57lBrG2F1mP3eNGRdIIprDSvJsysho42xS7nt3LhH6ltDQOt24JUUA9ddqQNdKwXnmQvCSRKwAUOY1BPGrd+Qu/m9FxR8oWs0wtPCGrtM+MJkzXc96h/sYaVhFucKWah0JlCP8aB43daLKjrWClocI4Z7kxAa3TAgf8jjrVYbvfoXaT3jr7+/G+9Vj5HXWh4To/V/ZVvqLsgCOfGWGQIYh8ZVA07aimhw1MBXUiKpqyZgtIQPoBa1tonxQC1CCA5MGlPHwIFE6ivuCgjKWzF7HobRNc/7Atje1+IsYDaVHwb0znSSK7pgv1ZJ3yFcYwnDBR6LsfOcQZXlLPeGV6F37U0eU58gWQFEbLadFPY0LWspbHnNlACa2051XODR9GNrNRT4mt697cnO5L9nicU45oEv7Fl8+mv18Mhm8lheWvHg8IJP32HlJLhcpRbz2TsyLHrM5Wsa+08M3iJOQf/BjGG7ZVhIO9GmqJ54oinIqAaKQcwXGgUs3UTUjwnLF9USOOLUQ5sGpSTbRBNPGXVnp+qiLSwYD2nxWE1emTbO1Z7rAPl0Kc4a/p0rNRwKUDd23nyxTzSQaNwnG5aBmhX46/HqH728Ghnsp0yqiJuixp1LZJwBcXzhS4EqQMS73oJadXkUsxWKruNfiTbkse+n1GuFsSbDOXAf2WfTdqmMCOkdwstL07JF9njbdmkG2XYGVCm84QGprS1arX3B8NyfCXE4y3jMj54IkwJmzCbbSNptUeHJPdvb3Dra4RYXGIgyfMOv7zEfTDSBdul6Ir95IxOi/T7Z5ONS5IczxWUr/mkU3ya6rGgpAi7NXTUCBKdPG6m8Cm9GxFIw9s3J3wy8M6+76AxC8a+a3Sm1GmmMxTKP0G3w3uudvenAZWG1OrWqrp2vHQELGQ38j71jIysmR6kgs2PsmPtekFVTLkuOOOytxfjmLtwv5soufowacN9FY3Js9BpQNMB79yoJIreE9gM+W0V24GyK/5QA2gpu55NSbTK6/xiPrqHdmZRBaKr029uaSjVqfi65IpeDvGzN95OTcbePensoGYD+qLEhFIVLNs7NXuAMq6R3W49iurO7EW3SNzMmXhkazb7h1w4li4aKh2FmxyiJs2vWRP6IHTBBG7PnaAAScC69riIn77oWl3VMESwvumXOKccmBKGkpLLUTRXeMwaPF4kNiT2wqrodhQl1qAhlx08llpcQclZSE5A2aXkEXJy7MxnjTXRS/y9uVHwvjmpAqEVTGlSvIhQPd+ZEItApqvVcbTOowTq31PZ9nmwbFxy3kajAs0ZZx7TJuGauUPTwQsmR46I85ivaxNFi2cUlotMvzCtayiuDWIMmuNOogpRDCjjzY5v7v7Zh4Eseu2Y3TxqbreCdcQPLF+73Zeosd0+qa82xFPx2HZ2tZG22ZwIt1xP9LpH7AYlo6Kl2FLTyIEuerNRTzFoA3qUSvVZcyEIzmEnYtR8fkl3RlXqIGAyrBbBwZ4Iwk/9yXMG0+xasHVVNuPo9e55QQABd8M5J+i2ubc+s6xFT20oUzD2e8a9V+F5q5DYWcwZFNHc7ZE6wlMpvIF2k/sdNwnC6pEOOsGuganr5YdRiB9uPacWhtaf92HssfbO/dVule5V7yy1RiyCSc4Oqd1/MgU6a25ZJFEhLTxWsKBujWHc8mA9j5d1hZbNw5bqVy7cDN/fXimEgh6hKx6cxit4H8/BAYVCz3sHilUwey7Cop7jpqbb0vOPDOwSI5QS3e9DuarNo9gbujil7bnUWCYveYOecfeQNthVh+fRiXR0vToPEvWPQf4UXVntHCWlns4Cles0WmWgXSPHaPUXXoJ5urfBUsJm3ErHu1v41IGJet1ksvwW+jijgyLmtzniUTOMUOjzPjFtYq4lhkH3eR9ATwVIaWBS+o5pxcWDczSlytDkxEdBWUej1bJh2M/6PdhhJF+iKFMSC2vm2f0sXFSvi7LROSfRGqZZPAKfMJS/EX0gIW3XRHemhXc7q/pav6LABJVR/WZpRb43JTpTdlgdqM0ETHDwAIvIZBodTAMFeQLNUcinB53vjciBYTKYwFFo+sNBcUf8dIgD5YGgD9fmQhOsRoAZ1m/fABLJM1dQXkA+1ddDB7cWPp1SJ5KdAieLbuTQfEDt+F31nSdCL3j1DOiNiFy1qlp0huzwpJlA3eED8E3HfvUxrWB8IlorWPZS9u40TgbUcd7ALr6owg5LcAqrsguWRknieHPtDm0UiinomQlO0OBdmNzXaO4shMf/i6fr2HYcxpVf8/bKYamcsxV3kqxo5Sx9/Yi3Z96iT5+23QokUKgCScCu45wMRN6oLNETDXBI6xGQJilpUFs0s0G758L3J9p7r9KqL0cBV/n8XYV8rxKeANS4lSeKpK+cmZd+y2meai3zGMtVCPRiL18wFpf6+6hOXzcsm9eoK87o0gcrofj9yH5tU4q/13eoEIT5nkIMuwHhG4/sY+99WOK2Ka3sMqJ0XEFo4hvRp+JBGBFSmhVU6CxVybrODA+unwkGgfk7s+xJftkM3gKq8j7n9YsHMPpKr+mQmCOqtG7M4IXui6Uzdm/dqnO43fGn+NV2cMxV4upziEDRajr2d1XJUQVHt3RgytsTLKWxEUESTl7KuwjUB7zk1/YZbK/bCMIO5PaFfTNSBa9eRDj1RajTFJxrSdcmlVtgEJ8hUp5fVFwvz3gi5P7r5sSvNeinJHYqBf8b6RFs0//ALUHbsngcWTI6z33sirpT6VaOKOy5x/LbEn4Ic/HhjOi0Kcc5ztBGf3/P5wO3eoEwelF25zTCwFv+V3zoCs7sSOYV8UtHstDyfrC3joS9dKEB4/T6DG+YT6X8ZFDi7c/6ZG1v/kEalnay8DW4ChMiw9ZsJFDnDRxeuyYQd90aYQ7Y1IbA1I6XADDYi2OScYk2bWtaRDD2w+V188aHAYnw43lnNRWGNRP67Svaf4glbvFHaz7JCdgv2youOMSrQPOUnDyFklyuJS9XVpO1cnWGHGOMQxQvO93TuQeYvU8NMI5rOAGCkRRH8TfT3QS6Oj9XlNl6YRHM60UySp6PTZQ8ezDW9PX02I+tv/jJi8NV0rw3dzsj0d+ltwzPH2CEyP2SAdXgRYP3Yz0yPW9kklS3zK6K4UxQ38ebpg9Rd8J8W2xlZ2JzcWYKYTPFUzwAVhElupYHDf7YxDExratKuoXFEhDLFnsWYNLURNWqcn4hPmhH6P4cccd9vAuvgquSKP9sL/d/73ggIPijlEzxNP8FB8nniov58fRBvPz0sqdoAiPvdkBk8PAoHT3wFQo/p6zwEecJUsg6fx6Zr+BQK5Pzckdo8YNoDcYdeb/WLeirLL5BSLVp1eejF0iXEWnNkLgNK5Z4iim5BphF7Ud/LLxRwWxZp0zh3Uzg5GfBqGns9oXLb531GuGlPHKvmhGcPKefVSw2uXyjjzvnOz11ZvEbaylk+n7wLc0oIZu99vEGTW0NHJq4+68dJbNwv9gCr26/tAJTcI8Y6UQEakraQCOFkmoqZWXoGe+s4p5bwpiSSYXvABwG9EBs1siNyfWQOKmJBupUvTyia4C2zL+VaIgHsv81nF+p9cLBKdAp9zrIeyTuwY/XxnhXctbE/0FIlc06Y1w+qhvXqtWMxaO1C7IMJ6ECZhoff4j5OAAtRYGhPcSYbSbMpXx2tvxfCfDfWs6+3+9GaE1GCPx+3OwF+9V2FYqVxcMUH4BPn3+zhK63N8fEp/KjL0u3zsE64sdTf9K2RWJoC4fuxH/tsyoHm4yVa2KmVUNgdXIOsEbMmR7+pTRVij/v6jib0Ve91Y7brEfikJJeWAEU3Z1gaUPIXIsAPWyAlXf57IxHPFQCRgr2kv1h6gRE1KgMLrHFQcUQkZpEfWaVnQXem3jg7T+njBkoEfJYTCAYu3kWVq9Cq5WQ7bm/kovfO36uy/XaIQ+V6kUMu9Ns3PTmX8aYJANNVdb354dk3Di0Ky3VXfOrZTEF3LE3DFtywCxKY89wp9gWNwA+kxHxX/ePGPLjF0P2FyKdhjqZrQ6vP8cJLr6Q59g2cIAoaU0N1FHZxaOyWKEF1s0iDAvAovX6LL2J1dYvX/uGRyePcIMp4Dlx0LJDHJjsFtUPwEtDCyt4lYw6FmrWnDD0rwL6+MZ+/8WwL8mMQPR3XXZldGrIBtPqCoyXlQh4900pJRNCQEOqJV9W/ItM8jsDr4SqdRDOwOE2lmJ0xTMHQ8ykQwk4gyyfyEMx/Tqjk/GYtfwLM2j+hkevhCA1netTWFnmrH2PA18hm7q8b/IJq06ZL+0v95CA09YRZZ0AjN4RiK0FIGi1XlV42qPsN9mxpC+gMbby8pzifu0bxCnrj3yLK4eunhOK3akVr4frF9sSKAVjUN/BgOifEriLlURUSEevL/GAV7PdWjKxnrXVtRj6G0NbDc7J8pe/rCNfWWC3dgck10gxFhNUErSB9lU5TbFvDHikwZaeF6ftv5/yTPH5a6YuKWzLiMra5hG7W0YNVL4RnoL+1+6Q17ZYP/zf+E0e4dVgvBGVIH7VoNmYiO2qta+sWLGxEs6nOJwDI62qO32d5FUcH8ZIea0EYZ/DAQK9rPEKD15hKH4Gniw5+eevoMGI1pWozh7Xw82LjazC/gVSoF/wky+eSY0jqeVpe8LaGWQP6tJhmPdhmyVn4RpLmeQKvBlu/c/LAkUX/McdIH0aXpbe3l0VIm3d0dkfJFynpqC9zWYy4jNHtXb6HHinBrCdyIEH3uCeqhqX4vlyY2nFZBnkgKggDhWX+dBJ7aTXVzjYI3a7B0QXdxzuPytamzxE3FFYxuf3Ugh09o1vXEjV9CGr4r3xmkWvLZXBV1cy4G4Jdou/Ei/PiNHgn9ec+1aLWPwEpm91AIQaULuF+o2ZgK6xwCHmhj2UqpSEe+WMUKl/4wpLDo0+ufdZ44c/fu2eqkf7OTtD7/u6TXc+OsO1RN8o9l7s7l/jWjikA2rsKK+AOxTltS3eCQ48PnOTQVcRUyKGXmkA6zJTnTezO6Y8yHmMtva51vaqvkaknerA4qb9j+yAwLaQT2F8rEwTaEEdvVr6jdqgqgTnV/PfQfxrqfNARFDRYs0w1Ci9UsQuS6Eu7iZsL1LxJ3UzqC3RcIMc5MMwvqRTC46fTyWO0csCbdl/nYeTEkNJuUEMpaSL8iX6Q3PkxZDHdpKyXRyPFSTcYlRC2gFUCfCLcvlkL94bp8KcLM8ca/zLpOSXXmipYB/MAiicc/N12LQAcOhlVkChKB/EHRp8+ibUYCRlm6J3T/1coIoblwH5c5qUXhayct9qKUyydpkyBt4SNIHFEyAJVMKCbUtLH3lpnYtVpzLMgBicxAiYOjLqaS18U8hrRjdgwD87nqmpeIasX2m5n/5Ovkem9Y3oPJupCPUZ60v6JiAlG1SB1T7Aj3l/oedixTa5NGOe7ekqGZWNC1eRPSSSweyeMz+OAlf5Hn2xkS7IKZHAFVrPAtEWBCb4hXLQUPCLL/aZtEz+YzpFuTSXefXSyfsyuzSgJkZzrg3PPdfE+TqznhXrRBDcF9BnAMk/MYoIr/LB0AUlT8e0olaoxWH2mCsMVPDqGGvd5EKRTWcyRnFSCVXLH6AuVFFHqD/wNrPJuQ0Z6TjRENvywCDKDHsICsIwt/4VwfORFI7BpMbmKnEK4vjetan42O1LcureOtMvgwtV7dpF9RAgPuOQESyXSEcAuXTurzz+a33lNUwYlv1W5aofmntgMPG/YuNPU4AW8Ft394IadHTgfDCld0FA4F9BvTSe407lfTpsb1kJw3SS1xt4aeYXo3w3xEBK80vjff1Qu7+mHvAX0kZZjPhptTocLA2QzAUD/zFi3gDH4BP7tMNcxSjoApHYhro6z3nSOiGy6HcHLbUg/v/f5z+D7uJkCAkIew3z/TycckGIjaiCZX4fKs471u/2uX7G+4T7LYXP+ipA+fnhv2OlXyL+qSzAdETle09Y6BPPEyWhcEpvtDhxjfkwaZ+WaNrEshG+HKg5LYtJKv4DgutvmLyX8U9UO9i0FL8Y22kVIXvClJpRUac1N9udeBgIzKsfVv1M7+TTq0SRUA2Ij23QN/3GRA0/88cmgjjT7+ThQmHCvVSZkxfVsosiCFZd2mSNViaiksT9GJ0VgjQI7XgmLrwPCY5Ms1sWzR+hk/qrRQInmuiXfo1/JXgsN8qO9MDwwWIlPsHs2qHrr4Gg4/LXVrg11NgK5+FI/bpKYcQQZZp/BJZEuYtgboWCplfwdhMJ+T2zEClKnI2/+GKbjmPmQ38acAtDZtoWQEm8J7RMuIzDVy2K64KtR8OpTn671fGjbaMTowAtuORZ35H/I9hgoaGL7gKgxGpy9l/J+JjnQt/uUPjbjJtzAWaBXp2plI8Cd7m8wi6WTArluwEbmj8/3cStVMjflc0Lry3h3l+sZN145CwH6TAPrGJqkZA8xJXmO9eZ8CQggwmKM+Dk7PhDmksG9sXgwWYldupA9HyfcgfB4q8kiEe2moxZ19dkiKMN9j8HGOrHeIVdq+wDHoA2a2K8EOvEmumQccoEJx4eNqaQBSjn/Epv3Ak7J4fBaQQbIZXCeiRgyRFo7cC+ntX12I1Q8aX4nkmiFiFjpntsXKQ9Jb1B005Ji+bzCBO4fXBQMGEPPwbnFPHJYWTQcG/nG+8qQtcD0yBEKqj64y/WB49tzTfQzzcsOCXmMuJz7i2SunmJkkE1r2aDazz2yMB9eLwqnYOiD+m1Ffcl14ACnVG2tQXZrvY7L7wGoiaLUMp9ygPqAFrDNg68/RW62OHpJ2jrV+NILISJxJJcY/YnjodQ2AlYV/Z/wVdsaBKtyOX+qpiHu8K47Nzzo/8aCByg2AdLSx0G3QL24h1LVMnRe6JZPvpL+XFW00G1ryBCbhs9/6T+VZHxB6mN8HJqLAi96YKDAxQxKuHvXi/md+zv4AosEVSBE+c0bqKo/oamB7VWzl2CIxgiUhMAEN83MnvWhXg8dq4EE6e8HtaHNSkAioW0wrRE0jB1bhaDn6yKfYEj32vR/gmcYtRBb4TWiPF4Hp5WGRvtRAmEz6mMg80uWVPR0rDTnrQYjBfK8nHEmvzyQWAx4i0KFZdx5Nyq3TobyEji+d2YMc0tiUbBpdRUoCh/PEOk+5rUHo9Nzd4KtjnuG6UPjMBvWwtL1MttOfFPOYvo8ogcGIwFpOyMuP+F3xcv15UoZKH5nJ/WEQ0vPn17xPAoL+gc0Lg2DTUyTpm8dj6DN3C6B4SWnVoN3Bs0qt4/KqGKe2Noe0vT8S9VmNBuEBMGbHdgJxJ72NF1qL87pxA40OwU+n7ld65+z9kQrnI4Zn1w7uaZtaxQwJu+H7d0fmjY32Kicd5fsyINLWgbXHzVt+6vXzDQo7GsKnNj4GcmH/R+638dDnkhDl5be+E89ba0elk2q/jh4IdMDFJzrxW1Yox9PgIPHm7kkoIkuNraXWqsPBCBgslqj8SZsXfIfTHmWw3VURdQFxd+uaJWHb7i0WcYmLJ9vncX0RfnWSrvoYD4NyfzG7wft7DSttSO1XM7aWK9cZAJ0f6LWJwvijsxtrh9H63k8s9ouz/4cKZ+hGT4pZf88GoH5LoHXRHCuUvHAoJatrqbC7SEmdYKxSB2UHZjxuVgcB9U+zAXZpqep4S/++XlfDTA1qMACgVUBf84HcFPKfO3mBtxylksp7n9BvTQqKsqXq92nh4N+g8jyh0yxgzd5l2DSWBhdRlGnCi7eew9dZBSSzzT24cRQJlduleSYCf6Ft83YrcfxHDE2FCfbQhbdLI7tj3qwZDSqzFq5g6o+NUvXIdVfvwKw4qViJMMvDYBuZ8n/lGFg2uwHzwe8zKDJ7uHRhFnNmDgw7zSlv1uOmdvs6sYCZY8Y/xTwqabgX8sYhkGq3vD1IO+VvrRDapyehG5FGFRDcgF0xduCy2SUp+o6qgMYruMAlIrYT4RoJ3JDUJpRspMW/FEfuP01AR7PG1dzPEDxA8wWJjysBuH+rDqS4kaG6APt3qG0eq7zEwW+Ve+0seC7Z8ghBQjLI9PMRzKehmQ9dmIrrG2hmn8QVki/42qmg5dGgwboSeVniID5mkQ6LY7ystEluuLmVwid0ytjlxMgjWDqaWts6l2n+D7P+yz8c9fqVjgNGRV0wdK4SvEN2HuBjJbIjnwdCeuBOAwdf3Og5wSBr72j+mz8gKK/MxqhChuRiDmJ5D/IP5PH5iXgzaROC1S96KV33IqFFEHqAfEgl+tDWj9Jf6eFGI01m/DvuRLUDmyv9cSCvZXkOytYBWN0ZFcREpcS1p8VQLvTrF0R5utfyWZuw+xR+EBkR4vQug/NG6lTPncMCCpdvFg04U7+EEFJC33R2tl45ewf1KxDEiS61vzg8wVcdAPkz3QJJ+NqkeX9fy14330KdjQLsdM/OxaiOMzH9dBB8u8ZQzqoUqw3UzEwohceoY3LAiz8JZeezvZ+WNqDSmWM1ewnoJJ20oPaEMrvyUM8IPcP02nVo8tK2yKuiGbxVtdsR49estimOtvhuEQ1vxAqlMsSn734Bxk0NlgGVwsrqv03YtRg17LxbWifrrhYlP7VftMwMLZ2DJ7BgnzBTIr6G9t6+7nY1Di8dp5qahhuVZnaxu7oy54t9a4PK+j03JWQEOtb/ysIOaX9qtIKSz5nGRfmv2rU1EpiV4ZQ0/1WSoticu/7mw4TAi/d0MwHs9U2rKZShhv2TilGgx92kden2xu9dymijh9+JL1M65XvrByCkwqRvFDuN8FJYtf00jK+MZuk63DQx3Zxpsvd8PiKrG5tMWy0BM/59drhntcP4145FrMKKf9F12Wr7h5f8318AgMEbvbLKZGqxR8P4tqs5EFuJk698PKsRr5E1bsOL7Cw+DPMQiZiS7zqwWC4DESFJPiMSF7MTLZNlDLPe7DSy5AyhGlrdchzODXrab82ELLh/K3MexVODDRYFgsnYOFzCFF+d0Hbbj14+NT0J6bs7IXx9F76XzREQYWLkX61GkFJeYgYzyF8fl4O/p0yTLwznhNL316Hs2WIi2dEhiu/kUEadIvsJ6ZehCBo6Nd/OojtEmu61U517I5wsRR/MARrO/BY+XOh/oWpQwCHVtvNKdW7HOtr3xScuOmyT7vQRKG7U2WMzQBo2UOrZ1nvudMU5D98EegyNF78cFi2A/rUAhJ78G3WQoZiKpqh6DZasbPh3JKio3T84VZOjxwwLFzkcaTbjAO5cPlPJ1SdDrMKBFBn+/6YyYZFVUBN2IjB8tm6t6ySW/SibKthviFEvvDp58q+GeO3fOlvgiVvBzS4tg8O/8lR9A3dthiSW1P3VJvtJDaDmIhCezK+YhsFQauq99judgRuxwTduUcycIR8QtXMqla4sgef9GJIhgbe9Oc0SIknzUvC2CTncNX85vx0Pb/chEJjOS6+ZOlwPtRuteRU5FThBDmjKRBcz6Xj/AZnJ4MBeiQ/ro6aajXxWtEMHNY9CWRAzQx51WAV28Ntc6hA8+LhBpsPPQe4cvv34HIqI3+SLnudF6p5Y3jKorXLE5v8ncT37NRvKiYzi0dynq8wIPaqY8wTpIS14/2obWI1OX6Wo6KYAxbZDOAknrM3ecrKpkd6WVoKyFsIH5W3BetYAJ8hsdArRB+XlWRG4rdKjBX5qlxe7EBbiDYHOP3mtLrPSC0watTra2WYXdG5G/AFUy+lY2bTI0Gp+0v//idFn69UMN94dVQV+cDNb7j0y3UIa0YEgwUhBFCI26TR8H5a8QcS8dpNK+PcD9HGXddv+SDat5YhsgG3GRbG3hTwiTYbA/tm9vASYk2fvYDr7SMzgRMORijSXooyfVAH0p2+8QKEbuxxZHR62AfkeKL3qdxyQ94BxOfm/08PMLqllrvHWSXabdMlT0TRW/KIwHzQFlflsbsZR9bUXg2m++jsiSciUptamsFtmcDi/+jNNmv5b/byT2nI8UE9iumTAubQwdrnXcTHNxziN+Qm0N4XF5Lcb5bJk8kqNnt3ceGetTnE+zZX35e7id99Mxl9BjgEfZT5KqrOsvg6swfkH/hl/Xc8oKEnrk1xZp+V6+v+fxluUoPi+YaT9Mk/DGUbQ9XC3h2Ldm0AEWaIVrMkLT3jte37kO7Xl3D7Kv1PbnQC3GjMXvHka21kUebR69F1nFIIXy/g/bQTxiVcKrhVoINH8jADe+FVpjvnLltVq/FuO1Y4t3FIR7obpw87+5nW0Fm7dbU2njIJ/D0jqGUFpY5/23zAYGpy2y0m2F1YBewwveK7xCMw+dGXpHUgh1PnxCpqxePf5Qj+xiQiKcMsefTIB1qfUvZNwSzqTrVKfBx8VovSdDfpuhJryvT1HMYZqB54mNsV4sxKQiPXPChy78c/94reEp83Mez5VUL5AJp8wlLT5yv2lfwDGwNNz2pTL9T9wsTmQI/PqL6lzk+2jrUxRNhqXIBK/W2+2rnQg53YYig8vu3PcZylv3SkMJxpWSoj8XhyHm+5uAClWADnHoZRnaiOthrc9zp12ofNzqOfmNHpvXinChHHKr6b5b+6p8Yahp96UVBeSQd+Wv28uelUYefVfxewuH4Zj9plBwI7ljqB1GsKHLH9IuMZR1xfyEcADdO853vjpJifRgoT2INaMrkw3AztrXfNYF79AXKTlVyOqerh4VTzx9reahqP/+TIuk0QkkMCri21teElQKrpEOhxjVjfWmagkJF5vilXRRVJ6Oxq5GkAceb4OydrGW/n8n3tCIm6jYk3PwRhVSDDli7L9uml3/VlO2bwosif5JXJUG7UOkoMBVWoJpf6T+e1WfJM9vB2X1+p0g3lyS+AbJUR3AfHQR8ObOq1tuuZEew4/WyUDqpPj/lWpdkFZDMpglIlTqY7ihhkJHtKaqS/FqrnWYpofAVmZsRvjHCYKzJACnobKMGbgJFgd0df931fuoXmQvjY4AcxkTLp12A9Qb0hw5gQ4zjF5l0d/SzZ6gLTdpITdJaGA9SSO/AR6Kt4h9HLb6Icv4vSWnhP+Zj2NeZA9mPYgv4kBx8HbPZWfMcfUR/ntEdiy5M4k8It9PjyHh/7a0rmNHpso+1Yws532eLlwzO3TpdefltKRqV2OYe2b2TYJMH3sYciORh4s7l4MVEvz/tDTlM+yUHHKQp/AKzprl9YU5+grp6eGpFsOwoIOqb+hdlQDvPOXyNC+0+iHx8nSDTB3Ka719+wwTHZBqusRnxATZhQG48oxT20sMsohCHXeu4mN3ZX+su78HuP2h6XqKcSVKTv3rzex9AhTz/0oeDTElAQwmVK7HzHFCwGnH2Gt9BjzPnR7KbXLDWbg6vuhqYu5DXpKlzCkXL2AQ2A3YzLwwaf6u7wB/4mjgw2+yUSV/tdhWBvwRP6DSUAvueY+eufKplN+LF+Vb6FJepxtX5qXCZ3yfkJOm2BiriLzucpRVsjAwPFNbABe75cQvy88WyxispUIhNnIa7BCoq4T8qr8pfbsI/+FBczwPZ9U//ULzvJbyZm/pikc9vXpNhsYjnuTDEspbSJYCMxLe1aBMQ/PacgmfUDYbwlYLKhvKZ8ZLhVkAv5hlaY3I1wp/lkRp+LQNOjIv0vnzs8m4OTWr+9nuApQONBFnBraWKP4sDrACKkRTznEkT/O+KqN6vOgT+NOkPK0NzGfuYApQ0DJJQRM+sIkOJl/3cu4pDgVgll4NI6eg7ejkxWaaE3ZJqVmKKV3BG0WYonOoLMkAeLvWrj4yWLGZq6ejlC2Q4GYrZE2t62CNLRr2wdV8f33g0KH57WVm9l7TDEEjHuyDpkO1hfUEhw/1GxYJxTflsH6LzZ95zOtVsPyC5y+a7+Gqyb6TcEe2aaLEuuA0/2dSPgpF5w3h0DnbNNd6wuRSav8bSSBlXvDu45/X8W6W5qlP/2U1NilJtTZNbykkNk9yNkfoaFu1xozoaKsN6vxTZDQxKTG6+mSbfsgh5HcNUlxNvX1qyt05fxNB3XoZ+6sMfnp62qoQ/y8uv8FtLN9JyNNKt4ZH0YTCn0kVGD8lK5UNTaC1BqB3kkGTahJw3g2kWhXkPu6s4BOfQCDW0QiE+pfqP/g5f3tbnewKKPsT8w/kIlPzoQc4Vr+X3zVkokI03uDqIBvw3Tg7eTOqAvxxpXeciv922WEsOezQv/1SDkEZEXeSoN+G6Cdbgm/JaOxL+4FR4jvhVfx2aL1ubK2irvo8ffa+Kb6DwJg739gXZIXfYmMvkJC84p/ZQPP/nt+r4wW9ZJi/Z2fjBRRG7BEIsON9o3QFyfXj8RbUXXEIZkipzgwQEjwaeUCd2XbhHXhziBl7RIAUPxwNV38bBaXfny6Tab/oFz1TSetdi5TzAlK3S4JqOnU6nXR8g0d4Vfla2RbFLT1OMJyFt8zvytcw+3XkOQzAt/QBpn9hjgvsn23dRY8FKX2x0jt3TcyAvsllrMZW441V48REDaABhfimuSI5TBHe6BtPmWEc0igc6i3XT/W7O+6DUq7NukJaVgnAQaSQy4LP99oaTw6AjfOs3QWsYi6Qas/VXNPjJx/nuxSd2v7U5Hw6mZ053YZD2mNjyS9ZVQjaIn+m1VV8sRRY0zHtuHuI8KcwoSM/P6vAyiZTfRtWvLseGIJR2yQ/EcPvxgjG4GgpOYPhYrSFlqQ/QMNElNF/UYevUSLm6/vsnM8snn6uZJO7UybPXBs6uSDKbRpbw20T6l8/k1lvQ0firJ09+kIuLhu6O5NaZbg2GyXsCtXHYSAywZH6KJB53Il0p7fLr0lfFbyZTR3jQL100iKuEv3h7IJd16/pThBEqR6hkCnD8474EwFqaqEWwgKK90UL28iNSE45UPgMVmP3fmZWfs7ajvhwa0BsUS7x4UKfVyWefDsXM3LLWUh3+Tt3MDnp6A1PZ17dbnVmtGIky1hlECGu4EXDO5QL7QnjM8J8EGXgAgXKdSVrjvPcu7RJy0qSCMBzgKAtOyeABwTiV7BkytdL7M+lgTbOWwyEbkEb5YPbqg9+e4JyNNN4M47Osl8bLX1OEUzv7ifhbFCXEH6uu/401TNrOM35GbgOehtuzFmy/IunFX1nUglGhJWfnKBO8lTz4ry8GaWVrFkcmLHk/sMuDDaef94EQRGiWNaUooV8ECZPye2XQFnU4ZAn8ZFFH6HwfVYeh6NhGbrhBNkUKj450o7OOnYP2aac0vqecNDtgURTMVRERRWOhRSXRHAoPVu1BMu+vVQzlCDPWjfBrPimeMLMlp1bneDo2iA5rhWxzijW8wEzDpf9dqQPJvuJEAbvumeEF+mhpIvtYJfuyYeY1jGtYGKlbrhCJc2XFTfGmIR9Cs0Z7gfRQZPlwk6ZlHubGLm5n/uVfRYqUyC+L8cP5W/xx03D5u0F9ws+kYaY493KV943GDAJj90tZLY+zwSnnz6TobKjLDi7245iQwPzwOWG0y/mqN2FzXgCfzU5TdDwFrDglNP/SzQiS5Vv6eGAk6nmFImw8MeVVWkzqC6oFr8UXwny/cPpKzJ6BmTCeY35HGH8p5Ex/ADVQATNIWjIbSnZt+PNh6fY5Dd5ItSDWLot/ANNEZLtK0AOsep8bi1CrUoZ6RvMLkE5SurpDLzO/M07ZYT6J5AU2WzN1iknJKlylNzioe0hZzCU4vvc7DZQH4KTFiAz5G0w+Q9aQgIiyYJu+jTulFCpi6kttM1iA8r5BQ+JysOS+fLrrqQsvOz6PGrZZeOghyY9KvEk3UZx5etjK8BpdAbhl4oaHyuuK3MtY9c1v8xSg7kCwcszKi0Xp+uYIJ8uY85GaThP+eBt5g96lIl/ISM6tgu6Uh05FeA84QQB6Y4hS8bLJ0jVuN4cg/Ruz1MujCPsBEpOto9Z+Y9UVNu3nAL9e5p/iybJYB25G6pS2MqxMpzhZlI3C9R/JINFfHtAkBFbD2uQdXMTH+JexDRJ3KECrqaz1IA0hEDT+VRj1NGbA4ju0LpwErNRidwBMGn4NFST4h+MgbqR06RiQWndD8YTgV1V1vjWuSqZj5Mehg/hEybkJQTz33bks2ZSLu+N6dKbW51XwNUzK+ZdHB8nVnlYR4Rtb6h2ag3T2WFxABDVsjO7lqcYLauTv4nmJXzDv7yBByxPSkV7H6kwEz7q/5WK2Cjy0BbaP0d+nMCgb7BClN/ACdUbBn9KUA01mDLy4lBq8yHZkZslm2XBAtzmQM/PwH3AN5rO5Hz+gUA4+lzI4Z7VXXmAaZsiki3r0UraXUPbHqeOo0RwC02b5fV33Q3E1WEwA6cxykcEbvuNkxd9JN/BWIH8Gf7/2U2PsSC6CKzl+zUzgiB/Wzg4RCsZxaKZlQ3SGCZ8PrBDKaF2oDjHeDwCkymFIMpTHseZCrA8NbQ2+1Yc2BxYuhgMEEakZzSWMG7tF5EF8CJVuQKjqxOd5DSYwU9bWkCOicMlA6NaHYsckY2ScoZ9woZe4HIRcOrDQ8x0DtxRHgDQLXMbXy38F0lRSssxNgee9nyAE9unfTu0+gUq2FDjlK0pPbNnn8CrtKj67BYpOWaJAjPG/aGnRryYPQfrw33EidtfLvlPB5JAP+sA1GwBS7efMyVskeYl6AsQUR48tfaMC0fReFjXwlWA+Cmn784VHzeqG5rpQnIYQkIKkfNfdWaeulMn8L2YzWFFlssFXRBU9Rf1xhEn9BOKldbFwGnHBBCg2fEBkLd/3RXTojJObXnVFcdLlheDhVePLRKmnrPM/YFnhN/r4tHAwyb6Xa6BXvRd78Q1Uswj2BIgS6UcDVkQzYILgjx5y31chPeW/5UCWFgjI3MLP39bLg/bI/J2gv85YlVfgiPlTxiM7cguDXxNwDf9stalgksFWwDz7lWuCcIIghkmWV1exbRitbGd9/8gNa5Kv4AApB1bikrGw/dfyL9625b9+P+8Tq84qmf5hlc6OUroBVmv4tVeCJUK2QPclMmz2kyL27+JA8MTpKAyClysWwupIf0gp9s/N7GCJ/52knXYcSetqP8D4FWxuGOwJ7BB64T0KtjrfvtbV6gjzxh/E2xOGO5+F3/DUV8YX80IbsXAH6tWq72qatkqwcpuCMP13QujzI/S/jQaAOcRZqurpnkHi8AqPZqt9sCFYQyorxBXm+42K9yUZDs/JO77jq/2AYgtiXYCDUWyF/+vZwXJ5TJojB+4TNU78dIQTnaJJZjnKD+1jsOE7bDYaTS30gLULwM5zEBHhcSo5XQfn6sSZXIv/+1/z+E8068/5srshwMGUbiRCjxml7Z8CHhcBdiCZcH7L9yZp1JmML5yXWWp/bwwBUJMZL/VgWPpcj5YAaU2Wz0SU/23xOxdSMbG6y9/VK1r76mXixINok3dg+5yLHiK5qlJWiHQvK/uqSAOMB4A1otMdSkclzOjVN+5QnU0t/a8ealIeVppiJdgu3Vtrjtr4FNtjg6e+eiWMSBCsOXiM/1a0v6kbT1wt+2pRlTJlBh/4nnuBWew6seYTtsE8c1vcV8x6HYjcJg6VJnzTLZisgZwRP+RrY3404TKqKt6tdjX1djW+chzGvWKbCbKk7lUkmh90P5l6aUUefihWmQrj50M1lP0keHWztOa+Q2rtrrdFbEOl3DOCbJ3IKoJ6xnNykcJLDUsSo0n+zywLLmc97d/stCKKB9eOljgOdka0VoRJSx9yqeVcBHaUTraAnMZZ1SrL1jI1OR63oFSB8B6EbbLRHhEgDS9eKHXt+iDHz+oPjEgYOny4apvwz0QRVoDhTVlUEYD+7eaEjyihG8Fu6/7zV4sQG6Siv6c29XDuECRk5gSYfEKiK0SQadb78fzJ0yZxNwEv3X78QUqk/QPVRKPYh/kS3ZqjWauPz2GNXp1F67lKv8wYfPMr4yarfW6lvIFZu2pbRCPL83JRhvUhuxaj+pk8+zs6lQqaGyCPdA0al+CTc4zU90FPNUpojH8lIw9eNuznGLqtGf18buT7GAGJyvmWCNqEB3+r39Hl+hAny0WqUkwsUx4MoInylgVdzYzvlYyLdPnhVePVcGzNOxT9he0POlrybH0AK6WhSCHsX5zUnK24CcKlXszSBfuVFfD1e2My2V10GW0M3pAY/fDJeMPfkKPGZPjFS7UXZxP2mAa6K7HyLwcJouKLfv7CCfBiDRoWEjvJBAwijmrJ0EnGFIubpl46s2ef62WIQBFSIin/c3SR8b5fw+e6gR4A6nyjEI03Mnx6A7h7N7OqwyXRaRV25Fwel+Yaxyto2wPApQhqsiPWohzW/2w9o95uSm6gJAa7myV4KTtJrpe3X6iXU12wEFccW5KuBuS9mdFRuOyu5I3m85iWBShYG0yzCHvF0UKpggObsaH8zKHwryIFCmiPogOycBNE1fsAP89Lye/4StAbngslZVQKNjanmcgQ275oQt8P2z7BInIYpE+yEKCGKbuNgrwhT5O+vDGHjzbXHOHIJffgjb7Yz0frb2idzngHbLq04e04X8lSjLz/DhyEua6m8csj22Npz4+nwyf7QsY5F3uTg629jQsAlIdl5eTzldefZstaRAcoJRSisejK9zO8Av8VtldJ8gdlgdRg8ipHxAD+ilqRzEtCjpEgDX51Gp8eeURt7bP7dIVmADAtkbmnFn0DDR/+IRj5dWEkQ2Z5Ibm5Op6e2yzHkl9OsfmGYUypVK+f82lR0N2O+tAgnWpJOA2DE4ds14GMonu/o5YHmd2uNSKZtGCLv/7zMt0ccxnE8a0wmUHuVZyza2z2Fi2K+5xF7yptfQPegI36QD4EIl/bytwxyDn15VyUxNPfOCr0jLKZjIAD09GdGo/yWa8XLuS1xUZ59IvDtNyu/xIWanG6VVlduBKg+ZASIC+xfgrdLZPNHbDpM+Kw84k9zfldNPVGHjuQH7n252oLMVzaLislbMBNBmJ3poRxSK9/sSQFafnnqXuaUeTmQXQS1CUW5b2hKd7KBM6+sE/BzYM/1MQ/NH1lk9W+sqSh1ulCzkdW49FsCc//cdpcdBXi1gLkf3Ubtk3KAlUaWUzTq539DA4LhTZY/OsrLmX8byRAFaGvt8+5fQz6wIpb0eyAxWA17NSrtHVJkR/J5JzDDf0jN68chU5GrKVv8kqF1VYHXZMDGafikxIAa3j8tH0o0aZ9Z5/iBbAVzuK/YG10jMO1nCFlE3QFdzZ2IuGciUPOu+TL+QDrOlIcKJTP38FOIWPMC4HiJlsNJEnr8/zdbOArGi/YL/OOsl3Gj1gdI0dL5bwAZ+fE6sXnAmaXmPUGtwqD7/S3F+IAS1U74A2VKiUVS9vPVjNNPILpjYiKsjaPNu0wEI7MiGN/pU4qEIEhgw21kZ78+ggc02RXrOAwfVBxQGS6XN6txDa1xGYSSW254v6K1+fjGWHkRp3i54M5Sq/HAl3QFnmhXuopKrO3Hh5QkUN7xCVaMtSnEgKAUsRPfylaKtZgZ2vs3SwMOLXTUOFF5s6D2D56nHu8xmNXCls5KURYHBOd8LceaSIDz8Q7aqHqer/5iMIahAox7R66D5Fm/KQcbo9hWHcLEbmG+DdZhnf4Wnr1xoTa2LlIOoQobGs747I8f7GPb6Dga2SVoVSx2o0IcYoneHYakOyykg51keYR2M5qYAHRLw2iezvnHqnUSZ+G9WkmwwGXERJNqS8UKl63qSut/e1CLFdU+11pEDKrchjZmpCsm9hSNmcRbA6mN2YBn5F1fHq5YeUv88uXntwileFy3OqX8TusAcY7llf4OeBWt1J9aVAwceuJfYHwueKPS1Opfabd1osfqAitCaxhLzU2q8XSsexYp5k9p2jUFoiZSvusECuS//pFdJN25NDqQcnNljvFW8NZ/xIFGFZKHmxil4mYscwC0hSgDukLxPXWQcry70TYBVM13qjnfe/3vHyhrWef5HlvdWn6dDBlx/BT8aS/SUo0+B/OQBhEHoatWd4jeF12cmFDTnp0XBvwWzxz6/wjtdPQQRS/lTC6dWRqfg/jFONY9767zfLBCggEw4CRM9xY70COaS+tByf24qJCgIwcIBGxr8vCwAZYosWgCapY6NlwBRiIyAVpCOWeWMsepzwmaH9npJk7m6XpqJKt/QRjll5i9lEL1PoW5D6xGrkM+nb+rrRUHVm48kOJS6NHsa3620LlGDAl9z8QLuKvnQ9+HUYaBDW/Qcr65rsR/S0lftUw984KQmR2thsDSpGa/t+ZTso/rWQM96ZVyKoQ5Qiz278Lf5i4q0akmnoNRMgNJld7UVHushjWz4Y73sra9Q6Gz/XYO8+SG+q+iZHoaUwd8G5aolwio1QbFjOFcPKXoUQ39fPCovkZDq6JCf7zul507PLCHk4Vxcw08wHRKfm28NvKdJiJjvSvmbwbOvdng4a8hmWxN/HQUqUqw4+f+ildNbPgw0G+CQMT5jXv0cJkbWLHzqdDMFfyqt2dCf4XG8eJR1HZUIAQ2XCpyNEm6pfvQAH6VyoBt3SVVMgl7aFaQqspbFrN8LI2MDDvAZaEexVRTwboDZd6qLxy1x/DBl+pmahKzCRqyabA1HhIcvUr4b88jWJ4pUXu152pOxuB/0FaeKMzd/PG9uVlUsIfJwP8G8Dzy8Ejk4gpoLBD9AsJo77C6Ylsus811x6SzPqXKx23Ug+4Fr9x6K9fdkP0HdF7IsrOwcghmG1ZDeogLSFdJpPI0OUyXbNjuKWF46CfvZgqxfgQ+7SSIdGyP5aM8s7TiuNssetH0eXTrW3FuHlAvLfWz7PSaBjszgRK4C5/QU2m9RjiMByUTv539vSfEM2PQ2mKMxOn3FXuAQoICMJBhgFkRGh9bjUmpGTdInPTQ7G/M+deGbZoiv8YXf3AYC6WZjgPvlTXObeZMkYE/0DsxHuZdbzSg43NyPTgLB7KOljBSn1tyzaJ3k+QUV2JdFCeLrHCqeRr7Gtjrl7BimHGzIxW9Zrfuwn4UWuSbO5fv/wXdH7+Ix1Fkv2dZ+gjio9ZfInWINBRBY/lsMIUbvoo+/k3T5tZXqlFXDJrZdejXFIDf2td8auy02tiB8D7+izVL7CEqwg//h2TZ02apNbz8BxMuCjKvqbIfWUdSDb0DLMK/GRFS7VUPgbss7cN83n4ycDXArgrxPLASKhTUNSCjZCXQPgCIDUM+Fjoyot6/qq6xN0XpKKaw3K3le276ZefmHTR+ihdE09TzKgF5uduBpKIq1n4xr/Z2qrMA+a2cFen51/778QpZPJf1MyawsV/bgvWKvaxv3XkQ+id83x+lgOG4tAvjfQ20wg+hpnaDWBedEHWMMZLsT5/VZ1YvNaIKqvAD/p6EgrCesa2DPiVBmjvZdT+J9uMIgYKp/b8dkpgwn+jMlLbmIHDrOH/tL6aY9u3Y5G3nm8FVt3m0JC+5RPel+cPYGfMy9JxN8ucrlH8YP47vbv7nyPjhghrGOiv7Erq3Y2kCO3+pI2GLiH1kiD3qvo/uwGmu/9dak9GWyTtI9kJyrPaEF6WqeCC40YLLLtI/ZetHNxq67pOtvwX8bFRZdyD9c5+0kQwfMKBvpI9PV9+O2zAK6hVf2V1/5dWkU8ZsYpNith1430Yg2gZhQ7i97c4FbyW9qfYunvNbKK9pzr6O6+OLvcHoqELI8j8q1f+jVTCihY06/xuHqUljUQwY/5OesOueHlABZtY/pQ4kezAr/q8yO/+DL6MzOqrKh1rGBpoh6fQ4btcvG2bow6bbIKmsTY4wLQ/vQAYydeBlWHuqPfFZOUvh9+uQE0QwIfqq37f+tltAfv9crzjRypHgYhLuZx5YdD+TR/P707nCdeEP1vTyKdfWdpK+Zr1h8SKMtMa4vk3+m/kmVJiYDH8TLS5NPuclxc3AHRjQV4VL353ulM0mFhL1A9eovb1vEDUw5eT2o9m4RRZTswsTCneZiW8/A7VdMnk813V5m+/97KG0cN+XsVYj71xvATB4TrIMqCJY7xtgFsCm1QC3pQWo/pcpUl8eMTvOg8g5os2pG+dip1dqqZ9sxsTF8bmgEcM/tfzm5w6J/H5oyKRvyPS2ClgLO6sZaI99OHZv/17UI/cTy63HtJ3wU8hOsCFoeKsh53fdExrOmrpycmXlS+SNedMCYv2KoFnJ3D5JfFBiEL+HRCRt/NgVFhcMPr46LGalFwnVLiD/AvUAJ9/2EhHatyNYmST2geqlejEoULMyEmuW9GkyGLUZZXr1sP3hm8UwNsrXvGKJNvcvZzRqswm3dFikHtigyawlzvHxSUCCvKvSoJGxvKwHzyFLQVPxwaIgcAt935mbrHgYnPGOR7LWihMRcrSBcw2di6B65Nka9oh5qoSZRA1WE2tklRU8uJv2Q6Z4lM5o/PzQnbUCrsyArUNd4/pp7l3ZnyJfhEbVXlCvhCrrOHHtQCdPz6gXI64sE7w4/urBFAdNCP+EHeXwg4/ZYoUEecGsUolEyBaTaROA3/sA6LFw6ZOf8G1IoVzMFK3FQW/OhNFIRbpq5ahe5UKKePSo9+TWKcNtaCvUirX8kY42LfPWR48cPsV+dFHSQftFeCzHqHPLrQ64Jisa1eqUFyTD+Y8jPLCugPDKmPzDk9cjzj+bKAUb5Tkq42S23GkzxykcWLYz+A9+1NUiayfwm37mNAD9cnsmR8Yctr8lhFkinRFBoPZIzGiKzoBLsx7qNJt2YdALI4UsElUfarrd19QKr8NTmg3D2I4fidPXUxUK7AKYccDiRErg5qjYAGOtVhB/8xPKEcdjjM4Ah6+KjNMo6Mdfq+oUSkd0l8XCRqa7IJ3lASy9O6vCuqTiMv39TTjPnhTv0GkE3NH61m01Wx3K1GDn93raw3LqgKRWihF3rkKTm/lNhUN2MBYZKfJwqLMbx+wbdM6aNYMHylQDuwLEzbBVV8005j7pd/31QSmQC0ZCZGz3Ume2bY4iqh4otXBlrhjX8ZWW5jLRODmB8wIVJbNZenk3f9DH8lYoYQ51eBlxrJ9aUUccDgeVHuD1gSzyJBlzQRJ8DgqW+MRnZo7xJwv4k4rACvyaXcPfc+dZc+6m98RB92KEqYF0IvN50Wbnr9ATeB8sjJIu59EDXICdN3qLvzbSuYrohok/q57MyD6u1x7gbXFe0MuZ/n4QJObpGQFYKsJFDd1yhUQXKkGr3b5MzN7wFDM24rOCOajEsmD2Ah4vzJ6JRAtSaAMyvd0Hdahyw9TescR5d8vpwRcxA6jdqpSeIwEQUz1T3Nb/teW0VsawrG5vGu53I1HnwVD91HLYUozXhlCKAGcv0QHaY4SncH8Iq54RXBmYZ3Sx+mPQAG9CZC7weWxICOEPH7lTw2+ZwP7n+g/NF3HlqTIsvyatyfRLCHRWqsdOtFafv0lqudtps/UqcqEiHBzM1eRFw0ssc7azomo2Y3cI6quWz2wN+8R2tnUS1+aMDnLXFwosSswwjYj+mmomFiMMh24eGJJG7LfCr+wOM7kzG93RhPK2Tg9D3bk2CIziTMOpduTbBKyoEKGnJw2Fb1qBTf6hQhFs1q+ZXiF8Swa/5tz88Wza0g0UaxkcJMfD+lFs8BVzLZQaAIqhxW0I1YClOwYxe4MY9A/yB3maA9HlVbNBQCkkOqRhTqAqmehQHRHRJ3ALzT5uAB75Qb83mF884Opi5Q/frWNU3J+sqUVxqQddjwM9/sn15AErtjlv4o56Tg2jkOEznSCnrKndFkDm47GjEa3kyoEf41pWZRc/RODyI4ZhMmHLI7/IKVRngDh4jKDdYI7sZW4nW7JskeX5/4eSza6eid37NK3J0tuF1zu7PTwxoTm2t+tbnaPLnZGFa0FkJ6N9XNnahZX8Dtu6+gBoQFUR3enCyDenn9KN4NFQCgAIiSDYVt7lbQGITRJsBdCtxCi3spvMM3zwlGOaFaqgJL4R3vz/QrN3Qmd76tEZzul9fMLdQeCFviCoN/0d+FDMrGvtZICSAtxaKmBdQV5xpIZ/qjryz0yrzRJOP98uiGwQbAKfeLZRO8cLh9YPV63wDwrRUo8S74HnhgJEMdOg3CHt6vwadxfvhNFmKoe/rU5B782kq1/NanFdcTHS6kEiOBBUoa6NaY/LhLNqw6jJ0OzGVOh7lDM7y+VwHp2znhNRP6yPesVeSmSCPX8e34p1gq5G3oNzg8RY2HXDcGWsgRAytO/JvucCvTlKfuiTXtyarvhhcb9my9XD/ovGABCHi9vuzEFRvbf3wBftjQ+kQqs7vzzEkWfJI2EQN8vTlFueZ0v6TwRWoHFpzeCB7vM1cIJUo+dchKTYV/Bx+uqksEbpAB/jgdQ73vtT+0yO6z2wjVvIt5OIgQxAZHNDDPCUHOhVvj+qqdsounzEakFBxUnGRQSM7q8cAYCRhjgP3E3+WJucEO21RcGiD/oKvDNkma+jllfsRbwTndQ8CJTsUH28gtUdrHC+BrDE5JFMqD+5W838I/Ubx/RqJYsCUXqFi5WUciBirj5+1dGAShqBnO3ODLzXZkltSiUL1z9toxzdpQlYZOgjKEIb7d99f/t1zZ4pXD9aI73jMs89xSvJKfmcrH0ei8gs5zaoIUSrhHCV+9pSVpDoNZk1pVKHtmZzn60LdDQerTfy/9osalDxJYKQS2lJOR9XU53xNOnmlwl3mNbHAmF/w0Y1NW8OS86r+K6rEDh2DgXa7jL2A7KWpikXG9+2lHwuKlsCqNJBsgL9yAPqBQvxTSY0HU+IzJAbl9jH2FrIJ8vyrGY3NoQxbF7DoJ2YaNe7T7Ab7CEqcXNr8mTwCfxVOZEdEY6uIZl+xcY1RaVYG/GS84g3Kan4k+U38pfdXNPtTLmZjH5iuCI4kdixz15cB5os3Z/sWcHZdrSxbgxF18bFcuFw2hlfAgduZ9R4bvrJVdNki2Pr6Hvof7g7Sv2pfmidqJ/YT//5BT62fq/iVM3UUopSE3GWKvHxBzmbKIX30b2TBPI3Knu2XdD4Z/tYKQEnyDpILfOzAf6z4hFLhXe/cSVcTmBWsldcxJFh6eeMlhZfbGQcX4Rc68wzw9IUFgB4o0VrrmYvByCfXbSDArjOdLJNp7pT+TqX19w4eFAREdZ6SC6TMsVnxTlBEdbApzDz40tioPvdAmKxfU+H5qZZvaFU1EJbDvusIlpPSfW9y6zpBQSmTbqMwY1fqW0c+qEj9PzOB/aYAewBQZwciIOpD0E2L2YfhZ1OdzL2VAhEKVv+0ofcNbXpL+OPfvrdxyvu4iyl/Ebsfui3XJRJFsQVRjOyo6uUrAHGJIGizisUB7zn6DeibAbd/jRsOKIjrTMjJbmVp/+9KxmrQcC5DEfasai2x8dNMSIU0vlo+cQOms2MNCJ8IJXgcT/gSWC9UuOCAPIUEV9OKdk3vC/aWnh3z4rAs5qPRoW96pomnfV1WBxyHSwri59zVaMsy/HlW4Q3VqcdH11RyJjjXuPP7Wi7FoYkQsO3OW54ruutYmSxil7hBVwAttOnYT70VzFngpZ4Qr+DIENwcZLXwutIAsUh8jlIucrC8OlQhYvo+O9s7/pmVrOrcmdr1PecPeqh7yiGf4KMHh3ZRlLnzPRkJu7GTUG8rACOkEKND4X4AI4pCkaZ74kJV7PdddPvNtAGjwETlCB0axwOu35rDARM5MAfSfQPe9lyKFtLk0XnAANz0ECaAUHKAcpl8ZQawFK4YWgbhbctMyTLtqmH3wW9u9gdTkVwNrGFD8/QcYGfJlDKUSJZ2bXuwj3U9P8lWCfZfqBcs3DqTdsWp5iFXX+OkmydONoJ79qljU7sZ9TamCzXDWvtJU9dk5l/Hh++UFdBezCzkT4tpJNZy+ULarrXg/pum2dt7KRXB93nrugtwhEmBJxbJQqV+hRc9ue4jDKTJv1oDpsTGLTBncD8FG3wF9OUf2AHaYjrq9VUamXtTmQUy6PG7XBmOwMBudq+HIfXP8S8jYBXyQeEWcr8wj1Nxwm/JWgf16oNfsflhN2yBWcggUn7n+spFH4rOZ8t7GR74irxWk0u4ghlSTT9x/A+QLTaazSCOzLKqYHq2KKmxbsegbPxI6uq35KhLP4vWCRfNJGjZJP11IqwiLmJUKxtVGUYH1IhBqI9W9YCYh+5kFlEq8QSHsj7O6uVVx0f+akruYe+apeTc758eNme9KOXdcVyFieXkoYEC7PV2krh2CvoN9DVwS5DsukEkY10wFFBQ1Wrgg03Q/jZxQNmyMBVPHPxBcQW7pHc1XWJVx/s2rfFgx/Ow5zw2j03N39+uia/WwqHmtl7ebap/wKrzGc0VJZkQmJd8Z+zUHBPENNsvzKogbPb0ilc10zw/LveBwLfFX+q+GRHc11c8B6HwUOUQmHANY3NvgNKEDpH+FxfU7XAsj3M1s1/1WtIN+luJto3YR7KSNKBnSAORmA1WiXX4C5kc2NS2Y1NjwqNmAKF5/l8LExckWblemFy737d8re4EvMc5NVxCwe6qxLoF7i1qSjmN+zxuelq9i/+oIkswPhZEimT4MevarfQ33w7+luiOKTY1xNxNO9hH7iYUqoLnETHWGBKpWOh5IAltb+RFGan/lU3GyyYNBkNJQjy2jZ8WLwGT6Ed8HY+er2hmr70ic3CHMIhWP0Fkt/q2vvOTgSNjWxS7Fpa8oEwimXxojX4aVHr8cHvGeLjVcuEp6pq6bU5Sdpeo8K0+uQ7PjP5uxXo72vJz0rEnWfBdgxV3SwRv6lL/1ea9q5UPunwBhp34LNgXrMNyxU+SvLdnrgP8oI8ZKI7qYv/CX3L3Ahmou84qB0M7fi+5oomzgk2LoGGgPUUABP//mSSEsikYbjVD6xCbWmKEEaxjRXvFZ0vCG/ADn+RZtWJ/aMTQs5SFNeXCLKgwNeMEGy0IOmyGfQ0iCDVPgds1gXHwsFSuBgfYkr/94CyhaGvLy8fx/elqMuKOvJkJeZHoA6dhBID+IjWTvj1/CFE351flIQhDgwe1A7OCYjtJUqSMxxjLM+nzsZP/IeN2MetUQAfOdXRsE/t5yjCJ7zaviX7VepAJ+C/XPD5C/7LjvVRGKV8TABcd9+2Tb/59J3Qz3+cY5/c3cfPlqWDUpjoftJ6ko8QL1B3WKJfVtj9O/okuf4lOPZgGOd9Vdk/8XjCYojH6P8gXalGcyKfMld2VyK9jsIKv7Aaj9JZC2q3R5JCKjF2cbbCIlbrnIe0rKZKkV+TZO7Mw70uj8zkRe5v9Ivwv3EmckrKF4idVOv9cadrUOCjbJd0t8XY4J/dwoHtxIr/Ld+sStqNRFotxLyFkfEKf3q+LK/m0DwK2mcX2pdmkPaMZ0zri9QASM4dgbtdJ6BrdLbYLm7c8q1yksuEC3/LKGHUqdK4SAfZN+eOXSlW5svL7qkQ7iG75qWpMiGazCplQUEorhKTzDt2V4ahxWB2AB/MjgdJOyDaR7UgafbSiqLTN3gcuaeYbBzP+9ffIZdfxpoa+UT0RjHQAs+ubkoQ32lqVONRHbkJYhrJERJ3DF0zJjFIgu1mbYnkFtO9p+OqULmfcf+I3XCbGsKdJjxGnhlzM8uVKb+jNZ+27PDfU4ghhqqTWEmXuWL4u5tj/wRWGZWzFb5C/SNNAuKq1IH3nJ1ATHVtdQOfskb4sjJvEf2U0Y3Mdl6AzrUSGDaaokok6N8SnykkJljNz0uQgcejMNQP8CUgl5frpAO+47sr3L/THLFIla+x6ATlIFcY3eVaeua9BewGn4CocgL5F2c+Pk3EmnQa0bEyWmeifJRpfR8VVLjh75e23+cny9ODQVVBp6xJ+QyeemUkd4KEtPL39z0FDqq3Cm+knQcy3f19nYXkvteHNLoljkL3fA1umu5U6yzE84YkI7d4asj9uxIv/2p4XveLtQNuw/YsyQepj15xQ6JshH0SbLCqBjrrpkeXZ0u+NjlqWLmYPkDX9vYFGuPmuvD02EiQjHo5UAMWriVrrq/HdAiwgzcWfgBG2lU5mdxHTaWR7UMH402cLZ5MQiBScD2grQzlDP+oI2fwt5jmI1N4SIGbmvl6Rnu+zLV52+bHYl8hHphnssIeZr/2pf2B0mi28GhhkAsSbLreivQ+0UJ9lFLH03/cnO6XY75Qy1gA57PS5EuofMdlUfCkXhOiVhhrhIqb62XS1idKxq1JvBP/JC3e4xGFW5a4PNUwGLB7QKKY/yiPnIzWHY7nHSd3p4ya3o5NP7sRASz8SAn54Zs+E9zL4Jog79u3RvkNDreGsCVakzwTKnfIfY8D+YXtqxQRXIH8Ualh+QveC/Ga/6/VBgk6Sxg4D2R0srP8/JRVh3x0euhaNkA2WNauzhQw3V8Ex+uklJNRT89vJBgKjhsahkpwLvz5JUZ88/RR60br9AYkRWGXJzCkMENtcf780CIAup0UITeECWCx7Zbz3m/e54sQHxSBebNB1qtdBOORN4n3yEEvxLfNorcUDCShSnfCmntO0sexizSj/lFIthJtc31GnfiWKSY/JWFkLaxOxa5H7yBWsZOI7448kAEL9EGxcTToTglLC36yMZ/G0xi0JjxQoXIl+3YR5vUwqXQHZ+cPXHbaOIVCvaC/Q0Eq0GL8fUCnTcjD6wf5l97jBBxPiDqq4+IG+lnW/J6x1orf9gBtCaEbBCCkQ+d1N9Lat3BguqyzcYrBX/kP3wGoYoPtJRKmwhpLR5YUW7A66OfcvKljH/jgvs8dgS53jxyPLKx5ezuRyQkCP20h6JAi0YExV+EasYDsLuk/qebbW0b2aaDcUegPKgRuh0rKozQF8+yw3N+odqfT03hU+6MwsVMf1eKfr207rBGPQs9a1G2x0qRcvhZkH2c6JANJrkHA/irJV5IfzvQt/kT7a23LaWGq4mN4DYzEJLFg/uHLudebOaXsH76syR7+ulsxHqGJ9L8qbH/yysqbUu5PqacdrOdRID82ed+HFca2VkwUUz2ntXxjtGmABkCk1G+CQVOEwiFtGR8H6TqHlQ6gR4evvvWkbxEACCeCfTOm3BpuSM48Kqo+KDwotU7pPy0deDXY9Hj8Tn+nBGZueuFNQiEKq8p/bgZd0KgturrTGbLPBYJJ4CR2qyWekphiAZotNk/a+tR2uocUBb8tjMxMgHe89J9Jf5fADYxhTG2v/44vT6WBCF7YkN8h1chC12wgsVhVH4krDQRjQqZwp0wl6NckerRnoXmDxF9pU4Gr1H+Jf61MSHLv74Rt9iT9W+SXzHPByoXpILP0eV+Yw5o1QRDCA8ggrBMHwwEmMgfegXQGhFWGf/TKcSOMMcHFXnc4So5tjlA/orfUewRzVsNLWS7CXMjFS68En0XdsQ66fHwn6QB0gZwawRZ70wvlA63eAeOX0h4f0CJeehb0DQp9+dv9AyKtCjgcVDBW/X06QEQ4fsMg5+tOKETmZeGH+Izs92Wo0Zb8q3qdP63Q+v6Ur7GnMF6WOzKAreJkfx7+BK3wfV1zC/cPalbQLAzquF6ibdECbHok00qG7p66Q4DArxa1jHyKtkZb58heEj1lRiuJyupdt3NSerLZu6TSkd87PMVxx1CFaNqs8tuYVNr/JvdA55iBqkxHekV+Oc3aEqud11/SLk9qLkLfyUNJ+owoB2wyxLEkPZ4a6xxuEfRPPS0RD9amRt1TGcSDP31IHhaw5x4FlmyVVvfzNjIrSua6h//5v34L3hQGtGUmkclQmacOfWT1QAs1/rkbynASk9PHae452gqVXxGXok9B/05ZmWG6Lh73ga6v6rwy5WYbf5NgsrZeAvtSZhSV5oFRjNtIgfL1zN8Tt3bieoJuF4I0ApVf4TkWbn5b/Jnh9T5RuUFXzquJxrBBuIah0dmWwvFGRywsk4D+LrWAXEdvXt4mHLY8ok/hy5OYSr1+pweiEVYkqce3wiUf27E7FWDJws6tx+++bAya3XIbvq/GONYNViP6yaLo1XuwbSP0HpJhxgDyTGFOePtx/JLIYyLDtaj/ipLIspDe1rwR0Mk4IiXkukzYMuFo90YtjhLG2XJml5SAL83LcL9Qwir3kCwHY0DQj+C4E8aBA5gZE9RH4gxN2q/eRNFQvMSpW5mb3C/5QiVnHNtVOLpuFigHD4O1/weeCbxIv8n0GbKgl+lUh4vULsfGHMdQGNnxgZpn/yaZg6pIBdOlZg1hqhcmR+P0wuGEl/FXx0IlkrpS5CqHdq7nT+f7NBx/X5yQqHe/WWYO+zcOUqEnmQRu240e0Ur7/N16/dz+fr7nuzdhPJb5MfGkvqaMj+E+KgfaY5vn7WCaIBMSl/I8gKXYfJLtWYsI3I0PveF9JevK/KO+DIlUQ0iwLEh+rlJRRL/qMPxnY9yXdhtT6ga3IYATgLNMg0pzKnyKqFi5bcF6MgfLmP6zJF8kc02+VfGP2ZTyMs/QK7XMZeq1esdaFFuiiJkIe9R1cgDoTC6g2rBwzEDQaEt9eBdsMQGjqml7RjEjtZ7sC6F6I1ly7WkrupiPUq/lNUzo6cKAhPnkLARXShBioQnZHgrneFx0QIQJV7VwjnTq0m+doSGFQ8MKeU1iA17fNitLQTNUTz42Rwm2SwQOhPC5vW1JpXoIfABs8c0LdTXfc8bID0o9xIc/CbHprH0dQHTNwYk1H71qKrqw0IVnPUq+FHMz0Ymps1gA5tGkuN4TFgF38XlT1EiNrOf1fycmva6E35herXMBKMFta9MBL3UbQM7EFLdqTLrR0I1lDTLwz6QeCGFJUmmkfINUdeNCnTnU5Mf4T7Pbx4/n6wsseF+F0kxO481O6qOha7ZxcZhvpwemIMepPnnnEXRQFLcqauxT84PG55YivuekcR6EzdEoi4b3kVaes8iQ/2Nei1ymNpfx2Ul0jaNyWWnhVAeYYzc3d2fXXIXU0bwYWw1asqOk61zKTp4WfxNHP9ru5xmxVvpBhip/ufU19wovu/3mEndSPmk1ILjOatrIZ3zQnKMK4PNqYmtBs15vK68ynfSKixpXmpR+WGppiMEwKGbCQOiqORI501JXC/9r04+zrI9o7lRtuhBnSD7BkhtBiSzcA8KjcziGBdAcBDOBraa/NX0Zs0clqvStAfvZLOfB87EjpfmacSDGj9u57JNuTHPcoVAUv/uTFDQfW1vUs+VOyfiv4QxEHBcfKOFbCQzbynypitZoBPSu0l4X1zC8K8d8LbGlIPiKNMaOJuZA8wPYNKXtj2bT/rrCs11edqFEXoW3hPU4QNi8q1eItxXFrw1geM8qISx5lSOSnW2X5nBMwWueLENEXtYoLJoNIizLxODqn289Bhf/po9wOuK3cBzkqQ+td8ROi4Ff9eylAVFMiLZxcgBl30BLz5ILFJgkXZqNzSFqYyeMX9f4qkjbKPlrxNAP/EUopqhcdrKHBWjNS3MYZWYk2eZfA6ncrjviHR9AMVFihiOgwCOP9AE4/krQc29b5G8A9fn8jn2X1zqb1BO+7igY+Lytn6Tvrtu1+kkbhGjviC7wVpo9F3tGGSWaRxtCv6nMLBXz5fXyIRpD0fPJbNofusEDvxhxNDs6757cs74FZtWTDaA8wWhCsnX5N7W89tMUwr9mqlR466/fI7FxxB1tJFO0Ra5/qrPy/EeT3P4+9S8tWg/E+QR5dhmZmOCdcq7aRMEFymR53k1x4gAwc+79nfbmMxED9y5K3Z9BqkVKlz+2k6nQ5TOCEhDWMKBPZpv3IcCCQpiyVPnfRt+eeVrATVJIfC+uUKVqjz7j/GvtPtp9XfzHuqeQJ38VbG0xnfu81S0SZ+gsKHEaLrO1/SUH9FGyMiga0v6WA8rrRwzTbOsrpHwrY67jS+3mZluCdcrEBDR+puuYZoXw2sMoV9o3Ijo2pQNusFH9Zq6Nvvj039pxrymQiwpB5NoS/3cT/fBB7ID6K+mK3UXG3UmwKu8kuyvSRQk3aryz4vAtrRhjfZNtFhVHQ9h2uIM/fA2GjUy6Qi27263tQqXoh9Ff0Y8+4D0W9wby/fZpzrZVBdKTNlhZ4SRdGaEIanWgnPTOnO7GAcJstrCFq+UNx4tHGY3WqshBzHiHalkflPYnvsXiUGmhdPUySE+nIvBEiSVuevSkdXysNH7XS3mP9k3nTVj6MfGOZnuGYuuiuGiHhEw4z5+IEhtloUaE6abom1npCgC60dp3yz3vTCeuLVaciHGwr64x8EERT1V9fGmnGZboun0f0T3fDEenYK4cSwMaykBuNtiuyHka7EeyPZTJA6RumkWvfzv5hH+WgYyrrabWeYLvTPzfNeu97+kjZiJge9qzfFG/fuJ0D6H6kuD8OvjKx4xlFgWm3009owABe0JYZ876zG0JduhivTfE3cgLVzfWDPAfkupe7NXlY6VfRo389fI9Vf06NW990Z/EvYpsUGKyv5UUz3xQBeRqde1YucBMvMHVLGgEnyM7lcvLYeL4YH1uiKI7vaZv2QsHE0VpcsYXTjbnwZ6NHo0xFVvCSIG9g5HrXBX33wmde6HuccKlIbZrz28/xgXuyqP5dPMsAbyWVxBCGzcD7/glxxpqJ+1qttOPSqh/iNlzGkQpYneNcAayGobKF97RhJMf5i27kpeYCDUeHQHeZMNBL0SHO2D/PYVhbvXV1b81W8HHvIjhYrS7WKTzww1d+RbJ9FevB7LC8lR1jkUkB4u5+Nd4CB+imn9QwFNOwtbbFLo6uSl7x5km0BM8WxFm3r2YnET7ydBwdvZQAjQEuoAUb5+lVqKmOfgtPEyB8Kv+kOxXvVN/EH5kdMTrkF7CYQyblbfFdg8fsUwPVjVJdGyV3MABXilQZ+KRJHyV+UfwIJhqdOYQQ+UChw/ZDsECgYk7D3nNP2t/0Tcl1E+vUuVyKe0Ek3GE52FTeQIUf2zMkatut4P7gGGwh89taanXa0806LFNXTTGSkrkuD5x37t6OGiDOl6ZtEtzw+qbncoaEQI/8vmH3bi6CbnLAKmoYrarFdm/7VO7ryI4YyolmaDaS+f6RPwUMj+eX0iRQWhp3bNFm+s7uTYHO7RyfU16aIbvWFpCMmuakgCTgICbNGlxFnXHPff8qu5GWKTEO4JOStNg/SIuSOum9I13c/6bKpuGYaltpwV060D6iVe3wq8vE33TlB4HDpeXL2LZAn74esCIZNWVUPfqF6zx7L75O7Q8JjqijN2i7dpgOue+NoEQZpD+918Iqabcxj2GtzFvagYMshyOTRWSODUEISDZSqtta5eQozNN00mStIPP4eiRn/E7hzr/nLo6yn65KG9HPEnyUWYyzZvWqKHUZChmqyNohmh9+t+TRT6+7ee1VV3Wl/z6PQrLU+2WfLVZT5UcynbU6FfrvDfNdKC3qvSKQ8h/zckFGbdyVUr0jymQup9Y2uIdD4nmRD2D/0dWXmg0GNsE/pPRfGRvAgrgpRnz1Lpa3XaSvIOLbVs8yO0ZocPZPks+Ar3gp05Gqr2g/1BRG94oqueoH9wZsWOChiQ2MvKXgOHEY+BLzRcSLG6J1a1OONbKfzk2qaOme2UuGfrAyQJXf3MhB9h2N2m+qPrS5K9FBrWW32UIqk7i3ecoR2wwN88uUSrTENT26+WtXP3YM/4QnsJeenoMO42jLuGKm2hQVGJDwHmsP2Cyg7QTZ6ww8Pl39/UcZd8+dGokIfYvCKgi34jipPm4L+Kp3bkidWtML7U8k5ydbCMRtcl5Vc0afZdRvQMjTOe/L8baJ52ylEQI/B3Y3zZGdiKzSOR783E3oe9MRD7sTcEMn9GsLvc6hiY+cEvRFtefloSRJefqojchthWMI3fx4uqTA3qjpmnhNQ4JVDxqyWcWBIJljLprJuJeJ6T21w1P3WumNbWSBMKK4l29GXUtJHiWhC1pIltCGPzubEYeL37lUnws5n7pg0260zuLpT2FUZLJzONRxw+c3jxXTaBuxP4vpfK0Ib2Ibcu00Mzko4Ty4XsZWEbxxubColIxMGFi+oVjYCp+YBRK34NjCiH8vUjcr5vKr5GTUpYmzUnc1NCN+JC35RtanK8Hr6Iu9b5RN6qVbc4DHu0/tXDadynKbWyuNgm9sOqIwCVbFwjRUseyyGohEX/Sk6DzrAyKZLXpq0vlLYgHcJ/oyc7ZmItPwOCOp/jgt8vEPlYGL6cu4hh7gltbY6u4OWkzfc6dwpVrvRzGKu9spJXkLyuFotBzGZJY4mhZ0KAqQj/vfr1VHdYr2lAbpuAlV8qhlxh1QKx8PrSKx+CY5bObwKfGEVhx736jLSNM3BjrIs/WMsaWxk0MVzVUW4ds6NfgLfCy2QP2JLFweyBQOl6oaXblB/cjPdE7+009vjXiOJdkWORTqPnRw8n/nc9kE+PtdonofuDETivvlUSiNZfNZjGmU1BlNT4+QjBvcLNcZVcD0LXCEWFisrQX9Q7JwIgm5Up4tFOn3LuGAn4vO81fJOHe8lsAsLi4g5l9lf+IFt7/jqYfEWvBjFWK38MjKMnvdo6zrB1jQ1pUWFTFC8x0usHh8qrtW6cl3cCmdgg/j3dH6PBMTSGeCDWjFsZD5XMRZzxZ2h/l7en7U/3Llaw3gGAW5N6SQihxVClnMYHAbgSRLMphbfIw+HINLhKSb4AfpVK2DxtMhPCpxJFhEDdNLvSPng5kFIxdsHcDVcZm56H4nuVDSNXqhK9CrC4pJEp4JI96Tf+IjqLusBjfPIF3j9KRVco8ixUJFWBrALpA2+E497mqAkSub6oZBJ+Dn8zUFjeLvwuRtpXf9f3VURdY09s9OMDd0zYcdWhHwT9frHwUx6BLGew92wPURLc7MIMAqXXeaCKrIpU4WMfQzgyi1+pF8T8ci9AmHjheg51RctggqCDRf0q3K6vtnJ5FQeQCllhYleL8TAkm900S6Xn1KKO2X7yuymAQFv4i+qvLDNuSPJwMnjQIy7U66xNIu0RT3NbwZE7pgvpyn3VTdv67Y++V0drstcmY3f6Zk/LO9DyKsaX5PoLrEv0+b3OH6bplXCA/aEDjqXjtqEx4OpJs6xwE2O+NvFr1Rm0O3C1XaFCFa1fRn8d4uJCjUy/+vMFiERtBtPyYsVNEudcvJ7pEC+kQsDRDR1J7VcBe5R/6PZ2k8sjrQGxxqJ0CN+hX1dXN16K6RhFiOiY42Q09Xz7A/Rh6R21lYWBHk9H2zXPvAwkqcyFDXfYXYBDUeJc6p31SZvNc6G5FC2z8Q1FLbWEOj8mhEjE+kIyzGnf4IItyDN6qcombql/n5hhQeOK4692/Ffboi0V6z9dV0X0l47MRSXOzFS+8AaXEFXIr3QtrTx+DlRvH9Qdb3506i1XqGq7+lZLn0LxSIfFZQdLPKDnmeJsnkHYKMbUoRHaXHU2/0Zy5N7Rx1UkLIEbKqeqqdodfqu9tsQi7icfBAjCzyjAu51+NDK/0VKihUyP/lJCk6Q7vnJx3Ud1Zxt4SMgcC/U9vxy8gmnDDCMCN2CckCico4RreTAb95hG5fAcpVVYSO38uj40noCEWjq4n15WjfLmCKl61Tq02/FSOolQthocEiXCaO/fAr8oIph2ct+H4kDkJtJ3XIz2r5dGchv9VhLGBn4zuoqJbw5bUu06iW38HK73Ylbu3eTQg1GJGXub1WuDAL9A+AiI0h4oqkp+HyvlNvXiaT7CTNVURXXwo9aZEW8R0pBZg62kk1cg+pmH6Ua+cGY1xiBG8UmGKa9TfMq9hqBbXqHTlSzbUU9aqavyL5ltPTkYQ13RwuHM8fS4uOv+yg+31ZvcOSGmSKiKanUy9PpaSSvegxPBz30v7nLaIAENL6/cXXQ3dK3tSuPeHdjYfA6TI8OVzKKd7md7+CBqsl8HJLr45AbLcB+tdXJ/U4XR+0nbITvQta8f/cFZwVaT45V3nk9gX8kW/NcReZjY37OzGozpmG4nBmlP5XeqsEogcZ3ppwgcGD94ML5h+zf9TMKsiLVvCpJRg9u+Fo029fqyWof0a2R2sT6+ZK4sf9A02Jl916//3swwmbZ4DtM2nqNavg1e2ImIDkLpUPmmUpTnpmM6SDQm3+pzwQXPMB/+ZpyPyMCdDa4zZR6etgw794lPlhySynDMu+/VHa3YAw6+VP1N1FnN32wOk0V/446OIIH5m+htdnQxhvP3UewOC5S2tUCKXVUazLyUZnCmBPvdLf7yQi6lUn2IHynOBCYt4qopGw0tYYhsDLiBsBzH+U4sh6i+2/7inLWFL33hvcD1v8Qf7S9jRIFoJ/3gx7k4r8DxRyJqG6SSW8YZwuaUuK/3dSu5tEPIx36VBenADbv1lzoaEASAywhAx3NbNMZZcpXXXhTInd8J4WnvRrFYKy0ZWQt+9+/i0g5cjsogjSeK65e3gOth1O8BtqvZu9vlW+HVu8oXN+gCEyyUb5M9QWPd0Ekc2mTN9We735Mf+8hPVEXR3TF52x6N2VnY8gpjH/AY8KlRfVnaqU7s7/APbredsmSHr8bSve5mmFsPiIJ/baZqo6lrU4hFKW2f/5pfauU7G3TZ/fwBorBAQll3eBG9AVfjGGluTmRnSqUzKbz8d1ERAEcIdxw2mMsTDdUKs5UYE0q/HRze5GdQ8aKcKz5LR4cvAcCkSq8tOD9cd+ebVRCkaOJaJyubclcLYW1cBhNc13+p2LhdUD9F6Ger/I9t0rR58E3NfSvSx4Rsf2TYGntw5D6n0N8MJwT4Bu6wZfxn0sm/IYkl3z+98+649zdD6aMQQCIYFsmM1o7R+gXcG/gLJNpv7LspXjcYfSjT7c/z4p/7W3Q0VUcFetoT5TIF3bwdzXq0ln21nCdlHRhqXb68zRetHxyKltHVxyE60LDECI5ixXKb3aSjfUnDdSsJHKCWkepCQoc1lbmdY4S2rJlyzDpiFqF8fCzgUJnKEuGUPPxYK4oXGv4C0MTfs/IbB/6nmYPGu9xfbX57+YKg+SlZDjC9FwLITOxpaVki3Eo3jcLPCY3wS8/uF7vM8jU+Zi+Bsplr16QEKXzoFmqa+DgmJ1oZ9XPsIW257WSY1dYoWbrjOtTxVflQ12rvCEw+3gmCZVuMqrGOLNTP/tKqAkkaimk/PlzWiz8vS6DPQE2GC+Q2grRTdaYa8xTwYf11FMT0N4Yv79YK/pkNot1fxkSzbyPfELMlNuGIenKWSlaRjKNvpWNC3gkC+FQ7gvwAHxAfeY1/16pwebDXCrlO08eF4dWhNV7l5IrCTRsUFdLZaDRg5U0velUlmozZs9rpKRJblwSJMUa2XlaMMEj8B25s5QCNhMx4sLhCpR+7My+3tmhcNbjj0dq2+h0wsQt9XVZioyFai2pmIInx2rwI1fBWmtjFOX9vqOuAeHY0d3IoKBJkGLQ4UjG4fc45ICCjX8HQV7MVK91fCqresLMa1bOqSZoAQ60ZW9KYETWF8NVFF1qb6fd1TX+NBuLCC75cvfhPeGZst9l15btj0bTg7i7D0Qt44ZB4mMTT4w84N9a/48OMZvlF/x7NF+xjkKte1Ydmm3Yt0VoTNQHkGi5qO0MGCgT4QIMUWxN1MlaZD3R8Xh4wSzn21ya8/6QONm6GPjXDuHpn75/nyW40saQSWQxQK5ubfcRxzEjHgkrOAXb73csas3b3gXsOvf4QwaQG+g9KGkacoKNxrudnClgLBZQhtLaNsLZ9QJf9nkD79NWXpiu/pGeeAPIYvD7uD/p4Nnrgf9GxYXYy5pSUtdyfIX0+lo2pYdSorxDmLI8vAuC+FWp4+SCa+hUGtU7sXiCv0cgg7w0wPuRaBsVRnYkMsvB8+qStVuqDrj5fJ3DtqkVXzv4KxesEdezMgFpn1CyiboBxBcetAk/HkNXUx6y27bWExa+GCzABXLOH/xKO6m0/K83Z+2A0bLqGfjgNTkdjVY3ZGhcaVX/isGs3ebBlk/hQ5bRGFcNYYZSYkTEFbi4DEpyYLs9zfduIA2+6H1bb2QulsqhT6GWb65mK+Or3jOLKvqhdiPLaH5GOkjaKDWaaIR9G0FV0eaK/HuMK85PXMETsb6ZcmTCGQb80EsdmPJa8zFGzxPW+9LinMhU6x8b8zUcgoJfmomJVfkBVM398T/FJkmfzRmCjmMfT4TY7cJUkujp9gey/VtO/n2GwPyzP/SVnZhqcAPiA0zzcPn+ZX3J3zgqHVlZWTo0uZDTMmveYAY5AD0QW0BKTyXXpxtnOTtYpfCWO3hrk8/2bVQSgXrsPQU7S7WKiCjwZSaOfvbplL8qFGiEyoEB4YztSu5afelHUycf66yYpRbaKKFlKK4FvSHOyxHTs2a83+y7wVVld6yfrwEZs7ckuh8cRLlVkjWXeA+W+MsdQHYpu3o1/ydjkNPAavEyj2ZSF3IslxtwIVOOZ73m0kiu487zVdJVYg6S7r+A83IzCfYaVbeSz92TACUTic5DgUkv7oXGKVGPhycOB6uip76hPLb3e4O6iDcIsoYBIgfnn0HhIEQoOMr+zTFE7baZYmzntDVKMH7c+uCng/+LWvrV71xLag0b/PKc34ZeO+GUXNn+9i4ig/iT4+0r+g+mxGzrYkIbI0a62Ltg2ON8DKTzHr4XRIbv9BTqshLMn6DOAo7FfjMCfz2VIAzDtq7/yTp6/iywXUoRNVf3qHmN6eSKDKuiHhSscvyFHgl4DQfxf9pNzig0FAonrcBQ7I2JAbzMTYkJg3RvBN62guRrUE4xp7YdQGvlmCPFXqEfp1GKpIk0GBQ3eYpuWpGLvg03a91gRCjpLrVRR2aWoaFl1un3xV6bepSrf76abCeA1MrfiCKXzxav171S3SlYauCOjawNCN/ZhBgDi5QWwYoUw0PfDaEeU7LoaWIXxu7XuRDUW1vbRObDEvLekypHKiW/i83cj+ccWOI8dXmamGBFJFFGjdcLx8aiJnOP9PR96jKIfW/7I24AIWf3+CuvPxX4fWU0XrZRzUM+tBA1iOdyCvUz5y5/LjgwSbbF0wqaq2VFP7mNrol2/HyV2J8mXYEokn7wq1fDauR+p1V+JwRGb5Odq13PlqpC88nsTWjlF7h/WwL9mybDagG8vXg/99c1/o0pF/UNszKZ9LSWdySwgP7+6yAyQryiAbO6II+cOd4WEzXS00xhgdG8MZRyl/tlFSB+fe2So8xgvXxPceZcy5SdAVPRYFOedPD0+XDv9KtKw26cYn3YglictHBbSKBFm+tmIOMIAYi9lAp/7G9diDt42C4HTrbRAdKR4Cel1zn75UxtWIF8NbiSPByZAMEU0G8LJ9QOQw7r7ZxJxWBud9UF/RaCUHkn+VnhbixH1sOCFmvPVHsuP4yXtADXa5iFOuCY0hXoEBdwMx3J3fNByA2FCw7o+cWwgEUxRSQgduELQxZ/B75K9/Awi6JJEg5rKFSliusO1vfEYC3Ih1fhlPTOlClae/lpx0qHOKYAQ/vtq7CAJFibV6D03f2VtcTWO6vRhCRCfJ9WKpNitJKKTlDTAAPEkGsc9gsurCwFQWWZ4AMrwoT4vrQuwzGgLscmSXwEVidBXYJoIrwuCq6Goeq9+fiGx23ErqHlZEptz18t6xFuzVt2xrtVgL6aosbBolR9nQfkJ/eaKj2ckO3jjFYIA4m3qgvj126sSyezabfpTWw0THkazhu3r9ZMToIS+ooGbu5xolR1RQ4C8lpUvl4suVOM/o9LBCjbiP/in/V1GjnlZdBjxDz0xlb72xlOVcBc8Jr8PSNWldDoJTg19u4BEYJ3Cr2CkmcpsIzjjP8DepmJ0qxT6N3mZ30QlF/660MLEHBWpAqnIhF9UYFQwQDCjCE8ozCT6iOUNfWhYw3JUaOsA1dbtl+82M2tR0PAEDaEjwo8xrZhYxEsM+Z3rctZkfgN30QLO35MV+icedcrr+oSy+2jjMX8YFbaHFpCW1soAo/pm5cKvIWvOVXRBry9ADDwDbAmzQm5s6IMTyEiCWnMJ4mnOL26nqONxp4P4rirwnxOq+2olrGFbwRm+NKBWsmXg7Q+syMuQyLgHy/hOue5QoEWENc32Hiahmk+YZ/dVV0oiTx/Z8Vs+xDAtDLwzKKQ7S/I5FiSbxy1wmE9eDhoD3CrCdHm2NUGFA7Yp+cPfRQd03e3fEElSUE70qt3L6YB0CbxcUPVpUHfhxbeiNR1Z8RpzCb9nbcnfhTVNNbMLda1NECG4XodNhjEHNzf8YDUFmAJGTpS6XojJabGQx6CFR/tyrVLFofUwft7SfQHZWebV7Ph1ySXyaIPBjvoChZwgKPgVdx5jaI+d/4at1+Tyf/+mdkdoPNFZ74pC+HLy+YD02HfNqRPmWfmVuNccYtzXfdet+0RqA315/F5Yh/LECXfCRjKYV8BWeNq/JK+sf95KZbkr9jMfp1ROMg8Uvt7615ZGC13ezlwVP4wMnfavQeT9ZcIgRuAYM1crD3gkscOb4nVp1BfEP/6oscMwCuNtOPsC+yqcJnjsvOdoprqa3kbvYMZ9K53ddWfmawTV/gxcNgjbvkBLtdsJa5cX1Cxg9L3rHSR65/T0IESi0G04edTHTHnqQ/0VzV0q85pF/5fMhxhT7HE3+4zKcWCpsB7hoOZs/1e6g9MV5BNVhWDaB77FVZaJSq1oVP85NMfWuyv9HEuB9m9jJJL8OppbLI/XEHkUd3wQvk6l30FuWxx6ju4Hz3E905Y3v7/N+UvLzJw8YeqNGWALh4+8/PwOmpUz8B61hlwOH+dLM6kb2s7EHOAlh75LUKwM/akdL35gSMZ+4CJZZGSrQ7T8OLGmcp/vSV6X3/Mn9d4Tudqmrhbfr3J+ipvlE7fN6uY7mE4LfEbsHPu+esaPQQmdFmrvNO4cX5cnScl9mgvmsHBAx5ndVzmvri5S+nXM6xO8Zm1BA90LMSWwXO4rMJnJCToTDP6kdhNh4/INIqgPcWSGawPmalC/QqXWP8C29bglkjABc/F5RQc4McR1ESAN/sfltCJDFN1i5ne1e+9RUvi39/nmwCnd0icnGm4JjpMClRQdabYfFIVNwuYBJXcnyIrdnV73ukM0/xrZ7EYqt+DhiBvF1jDJvsZXEuxS4l+uC0ZqBuNf3Tk4EyxMTqMsoBuVhyFJBsvj6foQmR6qlpEukrWmcNgSMpnoDwkILNFL+zW0AL86TACFR+PhGaH0gbLOCRzjW+1ZefVur7nsMhGfblJcp785xpq3z5fKneEhnIWNOnCqtXN+UqcVXgH5Oz8g3hN33tzOT3BXG4FrU8fwGfE7/LQkXEcpYN9Oqqm2xGqh0V/rwgYCNd4nB0Xwqfkee4cShYCH4/1bwWJOfxh/p9dZMC3RmkfltclM1G64f1UG3G/wDL3/+ZujqJ9pnbTTXfs/5b7niwoCeFlGVL/PW3fPS/gYQdtzp2D8XZJBFv27lD51oR2X43WyXl2sCei5+7P7o0G4ipY6Wo0EWmbYkBVE7Cu6NgaRTZl0bvTUKJhoxojwALYeV0dKNG2bfdYSORbnw/ud8oHkuhgpF0TPx0gXYFz2XztQcDNCOlkZfzUIl7XEBY1twUx4g8a8hjTWun30/i8O2c3P6GFxhCBIL92cwPxNmfU91wq26ENZIIQh0VNgs1Vbzr38CNDaBGHNN15nMXW0MZ32G9e6CfBP2XZsmKBD39lfq8ihkS5VCz9ysxsj8LHn7jET3+gUj4XZgSZR7MqJwt5OyBfyHL4CI68v2WYBCpGjsK0iCtc9F8QipoJaM3bMm9CZMw8FKc947Z9ZrM1XzAhfeHggAustg8DrimsbEPUDEa/1hPOFMDlec1EK/FBOHzbByhBX7j3vjskvoRU1afhvmEL8gnV/QooIzQR7eX83z9ZHmK348HcCqq8/+bK7sE2OcGbC6iLhOIn+taL7mtOem7BBfYEwo0Ate2Xo4ZmvySnvulxs0+NHME2y8py1dmX+II/8VT/xAwtgbhFDMUdrHtE5yY42/wGhGyShHXCi8FNNCT6aQCleEx1ooQOFFikvbqX+KYKoSaDtPvuOuKHhr8vMHJSR7uluQu15EhfB2rvIO1PNUyIWw3jRwtOucNygdLKoKVzuURz8B717cA6YGrgRaVI4eyfdz62iWUZzkPllvJeOikWaleMSDA4KOE7IY3+K8h4BR+32lPlhv8QWXVr6tlsrCffKPWAlqO5Wsk23g/h/LF3XsqNIDP0lMPmRnEwGE97IOUfz9UP7TtXO1tbdOwZ3q6VzjtRSpbcJ/tDLSMUbCzg3ZMLvUb+X4hUOkr/0cTe2GliPjvn0D2oN8EN8qx489QDQ9bQ0SxPiZAcZq/5nj6sL+djhdDsK/gRe94q7Xjb8T1G+l5eNp970y/aOPKuP0h3izk7KiOrRav0yqeGj35fwhHOieV0uEQ0OC7POAPFaT3xjzVfofgPW71BQk3qZhF5m6H9gfiHAUAbBSrleXTKuw2LG42RqsK1XM9KWErcBiyu0ETe8aijRMSGmiSUCh3w1yN/1GzruufxOiA3ZqFdXUV+6KzjnVdNvU412eM8bql054/T9lvUZj7cw3R6iKI+nCrQT5X5IFR2uRUIRuLeVx1jNcf/SRGPPSmGilm6tfvQqbJhn/L6DIseakC1eIzr6HKHYStEmFY285Ikd3lmR3zWf3pOJ0Szz62z61naQieMDw8hJWxJJX/vwNL2B8J2fN9aH+CwOoCbA1mD560pqGE0jw9vxh0NvHdJz+8ZS2mrLlkntDPZ8KMEyp+/fxIBKbXiluOYGCxTBKiQ15EJRbpO+1010XzHWMxrQkQxsMeEoJUIjz2zn/fYaRVBAMvr9IUnOEAn9E02GPbxXcROg/FkJf91S72OATJyi17MZ+/77tYhUy3aLUJvwelN0ewTh2n+LB/gU21I7J5SFRDp+nGF0L2idP0tBSTTNGhbV+7TFkpZkQ78BKGctq4wGVuBTO5lNbXuh8gC1RsOWptfjrN9txFV3+Fnw0qlGQlEecN52PbzSC7SUnyrrx7c2i+creny442QBRk143P2lI1Jy4tCX2bwGldLeiavQvMRxd7+o1+iG2gkrH56EyMeGd/UFh5RI2y87vFzMfeKlCuatMJ3HRsrLfBFHgGkGBfOrptV6QNGThNNV6Dh8yY/U/g3pL7p9WUv6ojT90ZbPtOJVKhvFs4tQXQ3jIJ9eepxRXIRdhydhfMsnOEY35q4zjTMdirlQarzVRq6HbCe/Ze8O45nwETqqYT/40gEk0tESGqWpJUAqld4lWkacJ59WB0GKBTwFOIwCXd0YcmRcK3a+OoH4SztvLSs+QVYARWLMc7oGcgyiM3+nnDaJmsSGM3vztSNpcI5LytfTsREgFN1CZ0PHra32aEtD37ebneXIbjetlaT7ZZlaV/36d2n+jwIIUhQzbls+7klC7aaNPUcswkzwfE/s50n1X+Kvz/QTxgeVHec4hLo7JH6cEYbFzSkACofZ+jqMAOxhg2me3rgMBzOQVn1Xbl3gTMhhSxqrvqKe+CPHa6EIclpHncpUXHTU7eFNS6lM8B62WtoPjCOMdtAUXZ2XTXiTX/xIGsscOfF7auMN1rKGX44xoMdvHn36U8C9sLHCdKkpyfngvbOJ5GbkY5fARrBfvtyGGTcKtKO6Sum/Pkwu5vVuw5uHFd/HiJT7pHBICk6uFq/p5ZG24G1eruR8+5yd+avBqgXMtC/TquQq8aIirbLdRLwiKLO+EY7Wv2JYaBFv5OZVuum+i/Pt4hfV9Rl2f1DYwfthEeOgAE68icpS0IgGZcMoMLTDk51Uct4WTysCYbx0+v7o+RdivlKp9dqt8A9oOz4UxsVppTq69XJMBwaDRKSP4HsWdRPKqFdQSHRba8IZy0JpyrTbbwQ6n5lt8CeFRIbr9+A9EdHyQA5ZeOIGdKeZDaltWCs3tDwneAfVawgMuy47EGveYunCaS9xa0rzZOMJi+/z18yjzfuxpR25HUdCGo754f+4fAM3DstMJ0RLsre57CeC/R7fY+02uIaq9BooM0iCze8TTvcE+A9nQRCOh62+w97ErQ93yPqIP4nxBxVNnxTfUs80LzU7dBlfqaw+JZ18VYGBvbnim2KnbQbaquewDyEnc7/wt7KsR/a74i52R5OONJ4W4GtLjvcVhhSwKtcIpSaRysaJm5hPl3B7fJ22ds5XcEx71Hm+JXOg7gGYRYFLg6A8g7E3Ge4cWY26q7/9cToOBJ4KAwYaYRAytq5unbD+pnNGnikOR3BdNRoLmdos0IdXkI2zXfSSXwvOgnp/xrl8W/MmfbWVJhThSCrVhzHF6btdYK1laD8el1KWwuKlHfHNR6u694Now75jY3rIqCg3oHQtpokxVoKXNeUg6zRFIwS+12RqtCRBxqN6tQUqQ9FNCX6iPEdtmzBBY2hLv5XZYPqGeLONouAnb8tY9OY8N8y/o9ZVwHMJFtPxv8lPF3YS9LW5bks9cPB8FYG5lMEUvr0Id60oFPbrbH2Phw7t9D80O4MVQ/y9TNbaGZKypWXTFM8Z4mnWZmj9wPUUeNXmeP+5Ixp7kFaZ+sloT9tnvRQx6zI7zJbPnEPAWx5oo78LOhwQwR4+Y+RRhslUFf3lMfNMhS3y7TZcfW9r777e4Uh3D3roHnJTzWo6WetXbzxJ4FvtzMmXaOpjA4RpjbdSGuumbKrBjgv+9nDDZ//DO4TbTabS430bH/u45O2mlGQJIIJezDAltikYCGTip8nvTLeQ3JoNUXRMR5Wsa6Sc8qGIUbgvdGQ6Ng5/eSD/GZe3P9dfB1gXE0mUeB8SjZuuUf96vEddwlEJkzTGA5rIUmgB9KTDaTJ3TiB1+KgkHqbQhWWWEqZpGr2XsWueuEdFswVTL5DW6iiKIQD018f6pfMljgER/HVEsxOI30OdOf2b0ZC8xtH27sLEd1d//STbYjPoXUPUeFqv6/vmzM98eXbyRSS+hZm8V5DWCUum191WZeeMkj0M913UvT9IiNhz0vZaqZ2hhuffFR/lLg2imlqC19SiCIWZ7tqP3bLVC8RYtgBj0giTSn5QgGcXqG+BEA6q5y8V5S17jT6J+k40OJo0khZK0/JBnsdWZoq6i01r6/aeflfyX0kiO+1mH1X84TcXGmggPxjZvjtisxRyGEhx2fDOtZX91J5tm4rWduW/iKUjo4lYdtUFDzlX2mArQ7pV7ZVUdoKBWBdvkJM0cxs40OuQs8GpUxmRi/l+cc+uhcWzYvqc4PsMkdQFy/7+BLFQ9fRyw3p76NVuqiEfDtg5MC6SczMMdxRimHVbvx7wsn5Rfv2KKgqCmMql3B8WmlRBU+PJ0IGoy32j0EcWKRW7pRI7eF1MUGUZLQNjem+MKRF+5ALyG3HopYfg+mhW6O0CgUhJFJaY7not9G5vGs2+b2j4tYsf00vBnXGBklPwkB6C6KoAUqbHxKUMs7QKfmfqtjCIh7UuE8btJv0tD3Wmqu57PUFDIeSHcgbJ6rZmq1OvsY2ImIEslNHz0A8b9I2fGE7FPraJvMXW7fNQev+qb+Wbi6hKxE00rKbQkEfnGaL4eb/2QXT2ggk+r0QsmbOseY7cQ7JT5FLW3HvmrpvT/T8Fk05Qj65rUDy3qxNK3Pv+HN5Xlx4PV7MrSYxN4yAYGiXEqJ22cXI9JNs+v3b/2fmLife3nBMwQYXR6vdQ6ciNvNgx4u0fLFtGG9c0p5VahVVIUSN2FuWvXx0JHGAhxFeyoEifHfY1KMhBNSteYgdxbQlP1yfy8A9E1Ya+H+opkUcFO4UasTWgek1iahtQazEbXtvsV/hVsey2vHza4VeGeZ8N6L3P0PZYEVWdjDGMdnU8znctbjydGLokjt0TPCLj2DKpb5QWDXftUlxcun+68dckRvFCPAaVvqlhkxaf/7xtgWMC+qvHTEhJvnMy1/y2uMDkyz6VTdpvv5LE+tf+eBcDW2F/1mP8TwMziBfMITgQbmlk6OwuH+H3sEgKzLagV9Wv9oN9vDiouyBMCY+XHI0HlePaXr49aH7J7P4RJ9wzNsRnSk1TJc9oBDEwkTVfkw97pw+S6CS+Y+kGd0vHsPp8ohaKjKKdAxtt2or40uRCHxr/jda7uq5g5gvzjmdrkMskeTU4c6YcYVyX3RWdhYB7CsVvsAS+NicbPWhHjJRzXvq3+OBiMeBe+/Fba8+KXEH/qKN/NqLdE/xib7McaK2cyov8OlPpjtVytwTwy8LBd4sYRo2KJJBUgWQAeA/69r91MBk+0YufuNDVdz3XecQNs2JVb94lZaiVJyMvSYp5UIZ9uVnECNzw3hcxAcgr3awTi5Dm7EqFY9i3TnDD9mfvH7iVCtK9i7dUH/GKqaE98zrQB6MRhAltp0bECV3voeNb/7GzQXbeH/v+JLE1xZZoS6/cI1/Zx8A1PbpiRguupI0eEJ1Yu/+JtF/HUYVxsP0y9d8pbiv+dbICzM5sNY403Qm8NU5oE32+Gj0kH0HXPOvXWF7UJ8L+9jWMrWloHi/reGhf+trxWlURGQVjSk7/zbwejzeGad5jE1NrX0lfJlfxKtiXJ6LKHUy9XQZTfzr5he/eR00G07SomuxkHiSuuiobVPEzcmjDzjBE/mY+gst91AH0YyHaiapyKpUFCzbXQO/iUOtd/uo0W6etJ3gCRcdnkxsAizP4/rI4KyxF4evCyqgZvnnTJ2M7a4DjQ46CIbc9+R0NiXR04Dko4awYYE4yXpBvrcHeBod++CrPUkEzY/zr1nSJkyc7iO2uj1jnN5f1XeLUY+ku0Bt7mJBdUCj6Mvh98//yW83ne8HUvTYjmlF8P7DfNN4f/6AWSanhWafZ25nzaUcuutvhnyXPNFZNAr/AY7ghfz1olPlsm66qpkzAF7/bHddp20r0J92NVoyyEc+OLNsA5rIwL8V+Z6fK/fmyCLXHemzlatT39ZWsWMFf5/UCGs8I6k/yz7ItXfhlX+30mUb0q22QzJofdwi2JRozCMMGjKTRMn5ZE3+WRFyuihmJUb/JofhV7m/kZPzorvob7/1mH1rDGzuq3s8GmHxVaoVwiioYicVoyzGtwjYSzoPPspAkrJaH87ugd8bmd8l8e8mAfrbSZ3fWOJJVkSSwX4Ym1NcZvrbb5C0j5yxwgxz9vNK/o9PUQGr7Jg6KWtGLph+U/fyR1cvX8HlhOyHw0asW5n54PS5svIIeEwYPCAls/CEOCdOeWF0XCA2SU5beiMwrK7PU9fjo2zyszY+k5h1dr1Aqrf3/rXarzK8w8z2sAi1Et07qWKrS+DOITU4mSKUgHhAL6DXWVb+LEzJwrIlK9x7VMWKayyGRJ0RTOFsuNsEXN6zVXQoI+jVFDUgrkrxXkASCptefaWGE0ihZurb42bP1PyCxTJnyAVE9YDSzsxItqjJpelkBypjPGXQc6eY9D2aO8jE5XKW7Ha+gLpxZ6yivUoVgY2Vb//UWX4pXioPblmSO45+1PiLcwuNTpF+WpQsOIpGhF9G+k5S6W/7YklZKHUlrDexG7gMP10FJGfotn1rywRY1lEWt6AEsfcLJsmLVlUppBLlUcXAnWlDJZ/3Qor3iMLVJeUBXdLqh2PYmEAICs7kZQ47pkuVeybM1J9e+3HCnlSfUdedY/caEEHhWSvntDXlvOsgs8wfgmkoMe75SKyJOFGXyItTuwbVl/ka8N6wgPRPz1t1M1/e1Nwf9Ienxpd0iU8c2vyUmjKIQituo33dWRNOlI6wm94lgiD4rLXnRn3uW1CRCoktjbs82+rtfOTOt+chygKCvCjoPZSe484l5MnUlAfSeM1pvoJclb4ksu5/X2NvJpn/MTejHuX551+ucbBPR+Aiq9OSWtYmt7rBR+FTw5Q/m5XCzwCo6f+PwXsHt5ItBAwN6y+9Lb2ZG1teFCb4hroztXaLIuxC19ttdn7btY1HvAV2Yszz0zocOQh4zBC8Cti0fs8VAYyeldQnTCzhHuy+Fbs58AMcqNdUG0gOhYkhsdf+MfTyrGx1LHe7kj0Ip3+ikMPcDKQCKT1NeMddOKMY0CU9MBhVxLqt9rwY6MeCM5yFyym7ecFuNptbnOJjAJpF4HXmNS7Hkshuz/6ELq33sg3Dy0lDVgPaCVxmijnPHNfvAvNILiw9oMyjkkxQq36zsTwj0vOt1LbC+yhyPGzACdHuFf2+dx707RBHHdbDt9PEK693ifafTvzD2eIv6N63CbEj1MTEIpo6MrVeq8+zlC7eWWJUM4ltVZr/Bz3+e67zokcusvbgpTu9cqhlrM0VI5r5xPRcm/MoD/2Z1GJHlKmva+w15rO28cLGExombNgF90Ij+ykwm3MkLT/qZRVNv8caNGY7XpqT1TntePAD4kd/aw5gEHqD89PCu6N3jttv9rRIiMe+oGgK7SqKCXN8rxVigATKTwMl3um4StiQL5iKZXO7mBTSe7yJfDz46LQczdHnZHhwhcUmaz6px7zqIAQTzyq3hKFQJ0Usxe+2jkRbDrLq58GqZTz3/Gl7hOOsMZxi+EiLz4YwNGPKN2O+DIsH0asF+XDZge42mwgLiUl/XOf1q+NWSEfVCVSSinzS4qiZ+P2/DLjV67FkaAqeHyX5hXpVvg01LsUg4kVPNAEWLLFmlN9HgvP8i0jpU6ze7qDU+Qvg4Cc8qFjTNs44XHchVAmi8oU0sIttIyaxFeun99aByv6zVq2SHMhQcK8nK7rOQrfiKuIiUlcl6CeuTJxvDEWI/5DUv+Chc6x/96cmkMYYI6RuWqz7HzPQxNbM8wpDfgKcwh6rD/voK7TBZQbQVoNcTojVaxz6TbuOhr8hs8WWgd/uRG1n1FHPo1lYTmLZ6fdYZ+w6FhzqsBg8X1dyhf3zgZhPDlor7Bom7vp4LRPZiudJOK2flHaOgCBNKC9r6kwa04oqT0ZeWv4BQK84TN8bf2BtqJ/0Wk/DiekcMS4z2l74lkvozIYUs2TM6Y+QrGk/Y8BoOmnKR4oB4liM0VwbGqinoNonfIZA4J2q5wKphMM5QIGcWJL0eWudoGxyGyGGgPiIt97uq9AdpVJDyUCxvkUj33LciaWB623JjjlisTkhY8WVdYiD5XrNkn2f+ew5tUm9vq9HOkAMnV2PNcjco/LRkkTMVNxDK5gVflsXrg8KJv97cd9MebmPD7fc8G1BuMEN/mXzwT6c4JuaA7SmMi8J+faFepe4EE1am38eJUZYf1Xo7Ktqkj49bcLrE57G0g1l5+TKyGGVBU2Xm80caxmK5KTqqkfXDxKho1FfPPxsFZ1pIfJI8cvzMLDlQDEcYZTH9jYD1YRZuVlcfd4WV9i8E6+VHovZMHH1wiA/prFmaDpkASRakn8hpgqb/LfSEcL1wzlnf1MSBnLKpXCiqrbLf3yUM8ed3GjSWH4BMfyFZAL61fVsoHSjn+mpSlmZpPEAfRPmnvq1TUVTG9+tNo5ZidG0r4CExhVEWjBIJhLSyps/x0FOBKuehRsP4KYEI2xe/0cYj+iqqtNiq9QkGdCm8nHcWCA9fWHtOh3AhToFJhRRziUqfThjI3f2GHZTDyiaERb5SWEl6KKk7xH9gMnOyt1Ih3Hg3hbnSeiheI6guicspkbJN8z2yazR3Ur9dxR8/qmlbufJ56RVvRTmVdhOl+HHrgSkNvXp3dXNbBBCKMh9aSbKQxSliqnOIursiu183jpiE++rTTpRwO4IIy+9YA6gp/fB6k5vQxLvTMoKKPLeBapt8Fbmabuo0fbXl7QRpS2p+Q/UPuUKyBy1kbEjVawMjN/QihPpk3lzMYeYbJc2HfpQe2MdZZWuZNo/9YiXzGOEvHTX5AarQnZU+ZdvSzDqDnt0nqMOjWdAFIXkbBQPdi2Qx1oqRo3qYlHKRnTSQP3ajg5siMOO0o2YeBfmZ1XG22dLyFzZqbvcDR3zoR7bfCcL7N9hRt8vf5cqLNU0Eyg/kRsWioWhnNHERHBmrVgRLzpCPbME7XNzB9LkUuU0/0hMH1A+6WdU+5wDC4jT4fSO7qltAwEQcpmUsM1G5PMBg4QvzXqgakd9TqO8HleJqriIthNxK25U3x3eFTXZECKVNYGtA7hWqwz2zCAfh6H1+X6G1fj5xPQHuEoiraHXdMtRLgbS0bD0IDWceLCrf7/lieCUrObas7dp3ZDpB9KBKrIzm8GourBcn0KXHrUY/lF9xfEFtNfge77uhKXuDhbny8YOkzmpYSwhHbtuWLrRFqJtN1Epg2E5s0paiC5HH5JvY58wwh9yFtNfzUk/MNFXRg2RmBNWU4vYfsuSvxDj1c2qHWjD2OwFxmFHp+ryb7kIV4Cg58kBp1r4PwbpT6tq7KzuK3dxyPXg3aExMkq/X7M0lXFihoEK2DkRteDcJHIpLXFkmEQ/c4rr78SxN8bbZEKjlQSn+OXMmwE4csWbpxMJScibbAK+GI5+PqnoYiIjB1rRpnF5QtqZYQVx47QPOwrx/hyNqLVS0zlK0NpsIkzhslWiuKOFxj4Lig/GdJasUSQ33Q4UQfshxVvoC+XOlbMh25Bn62xAb5ld7U0lZRCwpPrEQFKBfanh3pjroInnackQmMKzspnD4GfbOnIbjoV83Ct9c5inHoLCO6rEO56hyvs4K5zGvQd8mevDdwGI5bt/0w02+Z/O12nphY9dxXdfWLDkQXSMgLDHa4yzX1/hL9dhyXTjkAAf4Fe3+qF8f2yrNe9OV7HYYdFolGNmlNBDuVToljkgiIs3mNovr/RXDmMe0lBHkQ/ChDfM1Bqu4QsV+yWDdi4bkfStG7r0ZjVizH9MC4SWvwUo32RGXjJWrkRJUHzNd/AfdAwX27luAXNM3cWptGvE6mWlnWkAfwWyIV5vl5cjRS5i5pjknTCxzCZbqh3T3zJ+ZFUl42VRP3uoC8msQzPyfbRkfx7kSW+TvTjQXvaRqDDjpcXFLb4xiQfs+2Wrkr0+Xo7DYS4cDLenFS/mw+OW5B8y3yVz8KkDtwbnD9fuyWEa25H0DgsxH2h6yZX0H1RlU263ON1NDXvUOpagIidPmxKtPLOje1WZH7UzyzBHPhkaPJM1kPdkXy9AV6ZIZ5I+cfj6eh2FsDUXrBHsnUefhhA/3nYl1kKevKjHC5dZe3Wx/0JesZlANgMwqsnZnuJA0Q747B+EyO1Q/B/cBIoBPbuaaDtEtsPiLIs2eY72u0RW2jlLv9VAHmgg7WuWd1tKX8IfQ8TgiCYU0GCLyhYjeISgVBZr+JDDZjAEmQXpuGOeJvENwjkLTvb+E5i0G4P2MHgaTAfCExOMULqI3cyojaPHEeizNqOjwdwluQFysDGnsSyg44rGWIQv9+1lss/gN3CkNbSbyN9SHQxGUWfynvwo+RZ7fl1X2b1Znwlue5BYFjtccUtEsDjmcbKH5P1UwBPLN4ClgXCFKQovY8aHrJORbL0xZlH4DQDWv07d9xIwZTfPXR7wIATnEisI8MrXmdI2y9/zmoQLnS5BM5vnPCH98YTd9H8lqYr9UoQpHuf1+VTQlKvudbweQr3XtkKAfjAAzH5rnexlhBFB+fg2mmZFaQ+ZBqp5fuBNGPf/+hMfb6/KwMrMHxcsw9DmNk1mz3mEcxTtwx1YQ6gfT5nF8gJQ3BuLIW/Q1aGN46bcDPl7gJ+/OMCSHvaqJOXGannBxnjemP1wMVf1hEvnD6iyIqtblVz8bRty2i1EkeXfXIBDrAG4F9acaK0kArw1VdcjDh5cGHGE7Jt3Xq1y342OrkDJBNCTalTKHUtL5qObuK5K1fJ/ulAM25Xe5+LH79CLVBUE1JVvgCa4SsqNPFvzPvOC43E8kmk0TjK+W38S/Pr0vlNcYyXwNDamRD2AGYc/bK14NMVDA+RsqARa3mortzReWwDejW5I9Df8UNNOsQVAhpfPLODrDh7H1a/nhqM/3B/mhtASgiXzi2ZmSVxzUN1sUjvKbdW8cN8TWlL/+OnmEgsAkfVuwRnp8HtZjrOuxTPeSd5mDjzvaqdr7YL9AFJMZA0ESOYEvarMEweYqBMfsGYG10R4ee6vuHpoqlKRWCxs4JLA8+90JIiq3jH3yzBWUyYRUplm2s71vz2G6Blz4EjC/ebtPHurY3+vswJG+RpzQtpJVq7GB5lNC6mUNVMTt+5r7aoO4+pu82UYNuk9WGhHhDlpukq/Het7w7gi0GoJ2l28zNSkuDgAafcd2TmvQyh1dphj4uJkdHAfvqN+hmZhC2s/eopR1W6+Tr/bX8Nseq44MkXTdmVe/5eBHCQ70Lhe7Zje7tzd0lGZijoWucfFfepvJpiZYCzOgZWbVCoqcpfimUfa+gzfKqFCCkRSDNmVa5OXbMXpS5IsF6OCWJDPW4++0Ir+RFzY9OGi+FloPrvk/I8DrX2n/L6zLrNYo4ZwDFgHn52+wexCk2flGEIkNBsEaeZh8NZoy6w8Lnk68+PuQuai0NlivD0cmYTEL3APY4qbS4gxaZtiEXDhkDHiRGba+Vp/7jeA+yWU1PXQpwoePgFWtu3ExH/zK0mjKAwhhvCsA9qV1haCzRxHvg7Cdg64OTi5fTzBi+6s7M+nfp4KBXlZCKvfNKNXJ75oXfVUBpE11ka6/cTtK/DxQwhBwkSkBTlGTRSuoYqXIcZwcrlmqTO9VvGHsy37WlbP2xzW+juOzjj/ad3dEt60z2zZwyho0bSZaEfJl5wcQdLycLwgYZilhnO1inTblX7saj/PPnxY7BO6+QR88LuMg4baPO+dZfLKRkt9HxrCoo1v63Za1bLluRt2t9oDmNEV/BuOnNGMQw0mtSH52pVSENCOfvWTWJvHliNfDt/2PAeDWCAs45sNK0UAKhdM6Y0ijCp6MQLHLWwYH6uIbFQR5Mi4ft7Pq7ep9pBJRTsl+PD2qwsfBwILithZNCrDvUu09f4n8tIrc4ApUYsLM31/CotnZwxCLzRPd67CN7fCrn9LjfGCm16tRvR6mUccI8lbfdv16O4hWMZ7x5qssek75dD9sU8ykSAsbYETAzgcyu243smxN2DUvZMznqxmQyunAd2f9wjbwoiVuseCUdEGrvGeYthIBEU980UVxZ/yaG7iotDWHOHUvFOY+fNX1xwlOKnHTN/pRirZiUwM+9g4umiSHCvvVrM7Q3vYXvT6CGLp3Yj0Yz8vN80iiCqFC3MVu4rqTN9AbVXlud/AgZBmt/o2j/Zcs/pLKQvuWIot6drkgEiNum3td2B6fhs+Q49URyeNYVoZOS3pdszHqDsjXSq8OW2Xr+TVt0S4ffw6sSZEOxgbKKqC0pTGtvYE5hsJ4g0IapDzdNQ7s/CswnwHFq6KYt0iLMng8ffqLAidvHi03pYiuNVHJsO8jK/8GfTG3s58PEIEvlZHdfqWC8+S3t/sCfPXXRLYWHK+/LgaFosN4fSrbqt76g62M8f4vQrcjFfQg0IE9Sd+eYuMUuppo7x1G6NTofsEPzFACSCqdqJkwo/3NNBgucMSbXRwTPZmlr6XJUUr4fBsdkyyayyr0vmm//Z3RjcA4a1DZKbyGvTVU9OFKtbJR/oaZX5iNfunSuYixw9RSkhGDc1sk6HzM6iO+xqqS1am4Q6yrSN40BP+hPxD7WFEUvl6tSm1DGjQP/DYuYxeLaCWJt6sGw04lCCAjudd7YAlflJxM6258yTNk9PkNQ2/8UrD3X8VlZOdQO8qOplTggOMSHpNYvRXhuGDQGBoMa4Vlr0EY+Wz7aWxytSj+ecMeWwoMyEQbw3j08PHKT/rUtnfbe9TAMAqvALVUG2kuIOYiM8/GjF49UK2I7b/61MQCYaIu3XzR29MJgjLu+3ZZ3QlrpcfQ861TOApWCOwX9frFKHtJLVXn7OfMuveDvSQKI98UNJ1mGBHD/eWScMnD7qFor1YBpQ0QRMAX6oEHs9WiPdTwAA6TlH5lWVfwkW+EsYBfmXAGFLip+Ww6lD58zPA9uItbUj8JZFLJTRhMVrXCjUbsM797NX0Wf4opFn0th/QJ4x2VKzvuj6X+mtFMXU0Banq2PrqTijL0FQIzbgUo3r2r1m6/2wsHK2S+RHSU9pII1KF4gjpYhUdlDirSJ+k+ADzhXDrqPHDh86O7b9qx3mLU9ExWv9Nw2bzE/kV1kA0qlTsREj4S5xJCCJzKhK9R+mXSWANICnfZVMPQ5Cg3Mz8sRaGMLb4aSunEwqnBnTPI+jUWS/Dteq3TK3Hnb/s4ozPD4IWok7TyQtICjJZjBkzkoOvvLPLD19BNrr6qxf3INW1tZIEkxYZEa0wzdrpz7rtOlGYB0pG0MGqFmE3HkRqkXdKHTP/M4TdnEyWzx2vbFMbm1dnHYpwmy/XrQA20hDCWJrwkCh0/ndsGPWxn3kTklUi24y0MEeeNbdxYCXnef29GTw8KTYPPHrXVe0XMSxbFtfWIdx+VblMgnuGgnmEVLO0qdYBUEoXz+zI6tSZZVdUj3vKE8ObagUsniIwsLpwiyd/wH6+2RzEKPlDLB8pNV0X9lZsU4x62vmSvHbYryQ8tagLZn5R7wFHWP8C38VArhJuzfLdGYQ6sIEZVQxMSnkaiSOTQ2gBTPFdE8aeHciXyrbpT8v0NWMbfaUa/vXZ+yHe82L+WRFr+oIyeR3CZ5bIw1vuNePhVNrbX1kCSAiHFl/YejACvCZHegj3TvVc6l3ocjfghLCOAUfKx8nx+lcdsWFjuP27O7iL4ECQlrFrtd045iljKecMF6I+AYYro0bk4NKlrnTTLquiSiVRjk6GgcaUqv5PI+IralMbn4/aV/voNTj2AuygiBDK+Np+j7aU/S/BaCPL9Snwf5wJSqywUFBcwPCAaQskkGReC4TqCHq1h8GcFhK6yyNvO6OLiY6pWt/IaEcI8auvcvPx30CMUMM0Lyl9nr1Ip5D+7jnd3zcerS4fVmOrnHXQnEgQJaRLIIV+qcu2flPcq/1YfSq89LPIcmwo+pgGqFKW8ojPE3rf2qZDM91iHhMVaeYfIWyQZd4I3JZ586tPhfomNcAc9bl+Fi5RKT7gYXcd5HarIBQKzfJ6gTy6CU7NiZNyi/F3YJaAboCO/mZEq4i6IY+XN0CrkavQ8wAOIOX89ewYfWDxjK9FXw6Ix2MhsjMBdTVPC5y6RSRKdSLkS3m/nQ9e0B/JEM6N+9uBWKPcW7stwGRxjgVxBgvU9fCWVzPHXU6i3LQAJXl2bUYikP0R6fD+fzYDLpsI7aOmKFn6zknGf4B5P+3AzB0JwwLDSx7vlf2d4TMgMpDoLe2eDhdoxjsT1Vjda7/Nq7cPV80JAHRPOwYRJJsc7Y4tE9IR76I0pfFpY1EoCLX91CgONf/I6QzwAeEuFmX5C84p/qwyow8ZDZ4mazI1ZgEerBcTnokwfYELkMPYdkRAkTC2yTJHKYt9ww4VIE9MEQvyIgP3i0lyvQFhg3NfjZKgrkfKYCr7LCDHLLagg5piGTjkHDFdsRbrhmkLwPYGsxeZHmLCBjixZZ/le081bHKovToseVy+saWQgPgZlR3RrsH7+wmr8SdrhnY/FMN3CHCc7+TaUxlyih+f72Af41GaPUf4j5Sy/8u+vuJwbzpmhadfZLzFISIHr7Ve7g28AB9crLEH6ChxHDnoFD+8yF9dyZR4+bzFZ2iCCCeRdVdwZfBTyrXk0QzsT8LU6MRNEhfj29/OdBAijkBP8mBW62LsDutW45iuAn3B6t9b4Qqyv+j64ExSPCQeTI16TIcl4fbNh9owcF+0PRpKG8NPvEmS4paAxDRhj0XQSw7JqmP0eDmSMX5/WmNQgGrFT1M6oU9wOxNalf5PmxnlhZPAPUh3xccwh0TfFeB3u50GuQ+PFlQLG5Rje/yJpppc4JF5V8VaG2Z4vy8GzIc8LgM0mNR9QzZk+nT+14ZYwr+Q0ykJ0QgWZC+hjqXaLHzIqWL8Whm08pEkMLBmbMxPh69kcgzNkZUtgqsQ6RWuxpA2kHlb8xViTQjYMzk6WFtOSh2gRlWyTX4j3oBj2OqlXTdk5NVXbl0Zk7APhxIeoS8DbQG9eyh09LHYOVRNta00Xj3KLF4fR7bd+kNj+GyQGDpCH+uo53SdGf49v7RNHgM5fFNYMuWWNx5U4S7J/u5Yq05rIx7A/4c6SK83JmQXbsakLDZIwGv89kTg0B6dg9AAgjG35BMzXkQMgYhyFomt0QaJcjA45q9ktB4Tb3cXvJc8DoCQh60vGzGkYLOqMo8HnlMp4/e0A+NWwYHFZGDMojNzO5K6G+Iq6JjXy3QqdS5x1QAUyDmNAzKOuP2tlkAEZOa7CZxcqrU3HG1jXH3YKkEeD3Tz39/GbRbCzadwxdhtRIzVZ/CLYDaBE3Gl9VUsf8zxRSr91Ie6B3uNAMLANiaB+NxYWOAf2uCzOwSkHAm0SO/huHpAjrhBeJo7AuawdQwi3fYPaMyprCwi1uYWhXcKBT5gF00KFHJ3RbitqjMew7svJtvTzefqI21PeHtoA8ebvfulX+TWu7bhaJowT6i53kL8QzNljyL0+/eSoosLGehx2Ck0fW7bcGQ1tgQFaZTF5PofgHk8BN6yrDWIFtCSY8/kzESZJn/Ji+zQJv2UQfkIPnVmH5igsP+3IAw1nAwpFtZgAbVZZggkUhmm8vWgagdhHuKKioeyAi4l9DPQedni6UuSlwp793pnu7CfirtgeEUHTceHF9X/6vQ/12ZsR7tB/sIOOLv6wi+gEMugEt/mSiDtP5IojF1RlO96cSgarVeaROeLjGhsRrC80K+OiwrdPINR70/RYJ+EaU0PmvnMIwHNmXq3YEG/5WjSZTQCb7/pTZI36IIaogVGUN45zfdBsbMmKHo4afrzI/9Vsogn/wLkoGlJCIyJaHAeHJKS6D5EnP7ilJ285Ztwby1jjL0Adn191d4BIWlAc4Ie5ynKc51szuvwy41z4EObKWxqPI35XnuHbtMZFG1yimPQDun5DUU6L+cNCzCwEs3+dPqTvnd2ZoDKIoX/tL+6FksIX4wnzLH9lSLQHfsujfZ6nB4/9Br71ojITVWXM1N51Utd4xGHieHB5n8uvxlo2uzzAVHbRRhAUpziITuLte++FHFu1QyTsmoX0Y7Fgb1HyPNAvcNvFqJBwj0KKBuFqKMP8b+gsHBWtpQSS0KKI9nK7LGguaqqhncdpprLTbBZ0j4uKK1oc8dKSN43wIJSvtz3B5cbI4dfrkbbnPCVilTXeyeF8Xo34tenU2QnHnpi4H7tBgNFy+xmIaZz3OuhtKnL1d8Xe6DuKeKlyKlcHpU/aDBHYminy0CC6nICCmH5LD6OAUamqHnCGg1UstTcT8vQoEdfxG5VG4v1dsjTYxcTHW0Pg+BBwI+NjoppnKYZOmu7q8YuwZN/PvveCNJjGUL14jzTAXGbBErLAMb2v7MsbMLjhW3RBIjTDggAXxVrFC4YZUOv9G2lvyH6AIC/QHYMmoQq6RPj1Bs5qQD65ER4CruGvoTcusuEXEfe+663ieZgXn6UK6QamnAB9yVkhhGi4Pdiv4XSi+t+q7Ab/aiKCItn/7UiWVwY7URpqAPwwVpnlmcrm41p/3vV6RTe3UJWQkbqzWvt8t6YeDBIuj8djf9y270Eb4J/J++SWO6sidHxGIwQzJMYxMBU3gmojxohgGEP9N3WL/nsNDAG6aXWDRc3GgHogE73Xu4ymNaNLoRH2hV4mD3VsaOBp7YjMUiSaVDtJN5X0XQKhTxHT9ocxp+bkNNizZFfoztvDn+8Ibm/DlGS4iS9Zrh1bP+LXt5TQfdITm0Z5q1keFlXQtk9tvIz4KVvKDhFJeet1X3gSbGM4TCn7CKo2vH3h3VcxHMiMy67WlnXyYGUFxpjQ+6Vzzt/3AdYVzvHlMhLC33EgfjVU4V5df2T341k82jAwAKOuFU0Nf0FKZ8SY/MKRraCHslIf1O8V5yAN5aamiWDXujiekuQSSWn/rvY1RkJu6YqhZDyYB4rXPQdbmi7ur+vFfnGY6n4jZTZHY+8iO5RthjFBPP3xRilJuicWSnO7iVIgZp0vsUmd90Ym/LgO21juc5MPS00wQIqjVh+BCjPjfv2dPoiU44F5IJ9N5w5ziq6xuiR7jXFbAWFEu+EHLLP6V7ULRLMSGwIJFqZNTzKKWO/r0lZoJ1UadPUWbfdrfDwsjm37RR15Rue7Qak0opyQs6e3xeYKtMiK/JYCbdo1Q2JLBiQ/sRT4uDR9oZBeeP1Nx6ejZDiNhn0bYbONLfqhKk/4TRxQ17MfFFHVcS0ev6KlYKCsYsqTOWnE5dgFD+jC00+q4U+9mRh9sDR71Ay2L1xvfJOXMywwLsx5mjwei2qUlM2dUP6Cs6nJFvcil2pV9matAfgT2rQfuCZ/MQjQ3fi2mVgnsJWX7814HYHlODjwtDQGc5YbOOd14iiTfBTr9Of/3316KBVHh0jxhosMhoS4AoezzIvi2F6XtONSQIgcveG3UK4jW6qqTxKcwRWCLHW8ws7Orxf2rG6MHhYHRRd3aaNkfblioUV1OmieRCBktyygOIJBYzA3VtW2RbYA/WcW2eAQGKM/2tVXq/bSnQDWn9i/hTQZGin4lqfCVadnxdce6DKo8wLMx3R9ibkx9H2dxguPLY+A/OmSIAaRUI97/fQ+6sfCce5lFMyc69bD9AlV378EnWfPK1Yamn3B7TMJELTCMr+iHTEr4YwLwAYcdhNo9Lu4+psAKmRgmC5zjWtW8nQpL7jjvRGzJDgoJH9tBoG8iv8E2Tvtv6KiNS/kOfTiODbDvSRNuOXnuPtnucaSkXPswDlqRZsMTtcfI38wR8XX1rLE+YtbHjy8hd8sQSVb0i71y77P0jhhRMog3z06gEzIYiJ3KUoviP+O1uIJAeFke6lppyya7argUTaxViTGn2XsVCIe5wAzT10FAYnh8SLGuQBaR4eNr8UgwuwiNL2wtapWwsC//VNr3ue5wRgYiM14dMnaOKDZlvee2P0NFqY5z6UJSLcDLnW58C7L7UsQETaEWakhZ+toybffH1gbCRSBUct2YjAkOaw9rvC9kgYRBy320BqhP/9c2a/fiMP3cli3VM/3v9ylE6JlvJinxAgPKTCZOY3rFGHTRnivXlmQVnZdF4Kmpq5anmBPMwxptp58BMMoQBA8jbzBcFrczkSOnAVOvyi/gfayAk8O4q+tynDX/HTdyGhNYZLk0kiaOvVCf3NGf6OTiuYLU6SIJMdbzR98AUSA/lioV4e1pziYS7mAk4YAuFa/YWJu+96A+aNM23JUYUIEemtBfDgHdbaxoJolwddR1tTa5+jBuZV4h/J9ZvIqxpgPk9yS233rvDhORnnPlVmAFPx4t1Mn11xdZMOhB57+i63PwSQhRVSKIOxrquoBtvbNQ3eR4QQbZgQhEf8GsgwztH2dLKtV0OlI2MDpivO2uO6HlJCszd7yg2u5/fEnQBMA4/iAr+o76eW43Vl1XmgR8ntETIumd05c00qRBPock6SlfSS7BUMrNPjqsKAbZ3ZXwKkgJgQKyN90v3yiLATmk4eW37uw3D3sFlNaWmv3jppQU0ZdZ4suxPilxMNv2ihuq+9us2T4f/U1Z+YnXjezuT5IkP0DpeFNkcjI7BSnXeg5m/H9gTnNwwQVtgkdPvZzrZIvB5QfR2csjuXrcSILA7+Trz0Iw37NxpbGz6ecOfslGFjFIBQnc2cSxlGjHPmsQiIqIU2KD5FLEJL+Xy+rqzDj9Tykj+Pi/kLgoHSyggpGkXP/EXMGwJNcqbut2bbdv/dBMx6u8nEHUv/Sc4fyLO0cgb5+Q54iSZxhP3IQaGO8Lp8rLy6UHKxlcDX9FGvXoYpj6VRPsg2gknygKcVdWbv20UZFyu040NPTpe5pglU2kuZi8zojq1yf+LY9iAYAZP6lSTqpGbdurE2NH5i67QEAw1zkdO8AbLlXDRfwoWdn9b4G/grL74ALuPRJG4jxYHFECql6IE78l8VsDpCIBmH7EOcP/B3yq8JKLwN+6mTEas49kCNpvr+ME3qCz5qopLvdOYLZz+PIiaJs/9Y194EsCkCFkJ+47row5KERbM1zbToG9/VoOA5AJL1ZHgYE1RZt59Bk2/pEuEHKIG7vcUhSI95F+tVrx8ZPSNgAHC1IDQGSQRT/bY0XFv/6j5EYf3FsUz1Wr28JUbvw1yzhtuzCID7usI3QVDzjG7iBsTFWAmKJ3pVhEQBEAcOY44SsLLeot58A4eEJnJFOT+AxNEiDlb88IDkSbeR/nGPTEEz6UqZhDv7CQJqrjXOYLDueW/oo6PH0GDxIo5tq48kHx6LLw21Gm1EAKhevhkEhAo0/Wfd86YrE1+XLr1F6K3yAvYKyqTV9ekMAILnLejh5lgxAaHkiPtggSeE+AW943nnATxT59YAc+mF14no4i5Jk8Z9OJfymyp9ChRHJEpAC5R9k2ZS8ISlNStEK20L6NVcL6by/LHKch98bdWEkhsb9kNVFB8UT1gyuxYPIoRCDKJKm2YbnbL1nHjTVE+xIcwWswkkC1UzU38AancCAKIIcSBX47YIGbkvYL5PUUdMcrbdAHFNExbpmwYohfctue1vRuNoXhL/4maIWySPyuWZGoCuNqmxymPx1Tn+yY3Kw8S9aKd9cF5eKPX3OeX1Tli7QMhiQg4rr442+3tE8PG60ZNPrMch9SNX7px5PwoQviz8SQgfjt9s4uiQebAZE6lO3wVs+cDYiFig8ed6eHAuAIgos+XE6W1FMDr8z4czaXwLiW/A1I4+lfSBPw1jSe5uV7mj860Ou8wmyd7rvInpzV5fwmK9u3g0CDgUBWDwixd/BL2AeD/LugLSa9RmDwa0TI0lg+7yZfNGOkBulsXKSesARZNoFk6klsGADbu6GbN/uyH03Tq3DI8N/ngqqIZi10SEaol/zK0Cvf+mfc4rtTWSGArPmd2QPi1oH6vJiX+NrhWkL3DgQLpigF3nKtyQGQlLQUEAnomr7gTCNq6Z5WUhqJhN6lGS7SRFIOyVFIzAzJtZn/z/260phyLeJK1pJwXv5EGPLjiiHf8H8XJWW8Vja/wLHJ9zQc/pEePnzFubM2DCNDTC74r8eBDOp68gVFVR4AcQCqPEwZ2+4zy4t6T+ermLRVSWLfk3PcRniEtxhhgZJcP/6pnJu9+BNzstNoGrLWlurDO0fCzBHFa05Gvk2bUewKpMLfnOemj5+MT94zv8mAWIiL1V5ci7EqgTLV+5jKZPq6uEPf3CFpTtzH3aPcLbZEwwf5PzEvayYfykC7YsKQmOw6ZyermaPC1Zj7RhzDs/QbU0+9pkIH3d2xIxG2rrm9ZEdvmHdV+Pv0mFb573dll4s4+LFz/jevl1MWB5GVdMEfrwrm4DhxwLx8k0rpPXBmqyQ0b8BrGVX6O93yip9RZAptjiR5HXz7W9WjyB9zuqVaB68JG7CYDz62z0a8EVPazuEPKDmFZwbc/7KFt0f8ruBQiVsEj9oujS4c/30CrvqfpyacvrKMrpRfZ4/k3rJ8j9Hw/6hYXBINNqiqiJuX9wVZvSqDIzQgLsYr4v/bawSqo/wqDYQ8IoSM3KnIKh93VjyLRx5lLj5tz38tzugSSvzJPp1DnwCTsSvAwUfq/4eVCUkrVnrDO34Yh7AUa9Pb6l1mnSKYxcb71x23qiv1MnLqLBNlXfrAjZOjGayG/m3e6zOOy1gdSqaT8KwCc4Pp4lBp7YjJPFJoZlBPgBE5VpGseqUG0gcvs/UDXlr7gaKNnnSF6+8z5juYM/iV9d0hP0qB4ctuNyMWv1WXUKlwiOD494XreqESXGFhOhR2x06PzXskA4Ew1CGn7m0zT6pdGg7MxDwLgo/Fjf5Dpq3p7llgg4ba2mJsIm9EdNJc50p6CGDqgdkE8P6rpjbTkMho5hh/Tc6wcGvT0wXu+ImH+TvfqR80f80XazzHZw/a6fA7ybEV9HY+7dB1TuZwMYTOjwU4MIF95SIVcjc+WTeYyY99uGGgN8TM3PAFDKjKXXyqAu+Gy3/mXPan+9ljPNRzNY4pt4A05etYmgktcblmLXDrcdctO9kUVr8L74XZBF6WNEuRhmlor8/3eguM79IaC4JTrrV6N9rbWgVRw6kDvJwhmPmS3zUvicQnlb7mjAJg/wM305lNxWeI0SquL13SAYVpdb7njCNkwwdZh39+40JtxXFISOU8HlZW/I7HdtujXEZr/2k9cjUT6obacQ4BAPnfik7fBuavuRlTz9CTpmU9F8cw7eTHd4+Jse8UvOdRCN7M8ECDWBy+/UqJax5k0rEQ/0szpdJ9FSlvI28F/3t+iIT6k86sCE9ViMlyLl4MtvguTzAAMY1bIw9eH+P7M4Rl9BVkA/gAV89Moe68OV1b30SO1s0ODnkCM3C2pruzi/bmbl9UECdomeADKjeRlAkD+r4BoR3gknE+N7gKtkW3Pb90BnYTXd/GOUHOHV9u5jXY9EBX6qMMm2yKDhAb8O3ytZUQkq9lzq2wCpLmndI/QbN+FWx6oBcGpwvkDhAugjjBOSxuoDjwH7V3DTdY/JDX93v8f/6xs/VST9UefmqWAZ4wh06+xboqjROu7kXLHACAgeA6xRQSNf592JqzD4OBsiQImQwSgsakmxI05QDH8KE2GzyfS8ZL6qMGS8PCkeItyIlNvyxrNoiIRF3Q5L7ALI/Su+J0j69Vs4I9Ivk/Enbf35dkTT9XLyIEraIM+8H71rB9T5Yp+ve8qPClzNaE8XfhDOliiLUGUhAZeWIryRkxKSwRb+tPRMwPxckp73PDuWD4MgK9mlvp/e55RQNZCLonDEnt9cGgmj0Wh8eBuWNwUfBYJkAu2uQjMzLSTBKeFPgV9Hu9IFb3WjvNxV4fGS6nE7u5/ixqqPi12J9c997bVymAWHrl2HDyyOerY1oVlPZTM5CFsNDFShgyNrvTt6UpzoRLv6iHugXyJi5Sh0mYL954oCUsTxqWhJRoFS+0jfuj+jyq1ozCfdLkUa4O2hScg5qjjhG/NggEIJ0S4UALxmOazeu0fye1BHm0Fu90GzsraumvV8NA2o+BIki1JhWuTAnrbOK6/j9Ml8mMGmVXUrJso3V2mESWMpilrzUH/XFw2ep4+gZOWDuNYun+25/RDZZPQxGr+5agVPqxys9jr03Vz6H6rLXM7psuaZ3kQfbsr8OOmyeKfVj7TW+fe785Mx3PJo1uvkCU5JounzugqIVeFJPPEtgImCARXLNovqOtAoul1C6qPjKeK4UaepTeWYe8XdYCwEYiUq5IayNUES6TwPiPe+XgXuEMZd7Ko55uWpIMZmyykWNWEQgzaikvYMgmphRYRYtm6fF4RjAigbR5rw69Y/HyHHBIyA7qtNo8XqJk/Np66U1nz+PZtaZL9dog5fhZq8HDPsSSZ60fDH7fshXuf8i4iDlX9V6MNlCEqoFCZFvMApN9E/SPf9NGHghD+ZBG9d2Hw1pl9RvtWuqQNLOKC1XzdFWDv8+KYhiQ/ruEmph8jX1FqcYVbZs1tXAFSAap7dWGDh9KeuMAePESSiK+8GhF9VwFJeT0L5bg/tLxZ9nvHDabWsPpbW/zi5QDjQZi4+JQnLo/8plXiYsxX+WRqSurIvbsh0l0CcBLA3mjfQGZkI8zrGjP4Er/X1yC9BD4t8wFKg18cjrjfrFqxiKiD2TbHtbUOPxf1aJBVxv+wu8s8z3M74i2Wz8qLiZT3bn8Yejx/yVcMNLPyVgv+k4VB35OlSIh4hmyC4mYKykh9vfxB8DWd3gfPwlwEAnpWC783Eez5OaXPtWtmHZsQIVcZNRXsM3CjDOTb9R1G72UE5nW1UECMxVn3Ri2rpuhtjf3mWhGPN0V/CFXomH3osabAf1HKP5wIYLXb2ZFumZYdpmXiJUe4BO98NdN32i7zyIfrOXy+AhUW6SfwJtVpcLCq4ZrxsGSxih4aCaVVz33Qhgz5f9m1X29mxX+A5prXiUYnd22eoxRhOZpm+XFqwhCY8jQfwmNFDIVospOSEJjOZm78Fthq44SuLQnSOVSQgwLduybTLYWQRBHQ1C90qawvLkfFKhLT5VPDPOq2Ff43cbX3lVWFVtgZlUr8yylrgIcEQdBoXH+DebfJbcY2Nyl7AoSi1VFV9VKUe/NTw2gX21Fbecm5Ouxe1X0Tof+FOQKgleyQ4TQqJKA2wN+T6uQY1vbIlaZm011kXSyOP/9ee8EZ2Ri183segILZmpRC7SBYy8GsfwDgwErgaVolq4KH8bKDKyP4B6xIU2ibkAQjLscRrvYb2it2vjlKiz8a+vk1qdIIqqVYvkqgUpHD4GQ8MelapKtEMI2U7ItB1glqnj9dZfvEUeLl8FhpCWu57SqMzPyei9aLLzWVYYlEx6wJ//iFgm3zn/kHlu0fSKcIePcvf0tfimsUtS3+JwrRoeGedDrSic/1rbfv4gdiBAcY9V5wepbLL7Np7uFq/pHLgkejh8Pza7/BtnJIpx55kwXSFEsp84yXWD7ciAUCllxL+3rfy42+a8suoBt4FmwKEcXQQG0R/Y6qZa8JiSETHX7dPegqzGTyxJmS5Cvgc3QKZpbDx5dGXMbc3nn1Utratvwvs1NjOr+r3Dkxq1C5Xmk8CE+DtKFU9GWgtt61q3WM9LF/goB3/DhuXsXsIDBZnnb1PjacpP8wDXetKJp57FV5lglow1XpDOUoisvlhu97bDwFAqmggZ0beSdz2Rg+KxrN7idlfZDa9jXfWvpcdppQFzinB0lKCn0b75MwgO04Lr4wPsX3S48AgulFG26Gbe3uq9SoRuhwOgcrHEAoLSKwFpPsv+nQNZPj+kqxDyMKkFxJ98dAVvSYJALQDa17N+vjxsad7yMuznioMSFFGmQJCUdsyS/UTwYrarkcG4wTJj/GCEjOdhBAyTEu/LyD0RyXV9L2U1W4Er6s15Kf8gG5suj25DwE75fhDTrxdlflzf5RQQCu2dzjtJz13wVGqTAOab8QM8nKwevmo/YPhgR1HDM+tOua99JGdLt6aB10K5s0OYbn22ni8n8M+KioWmnZUocoRwnzHyhq9mBLCjLiKMOEdsPS15npfqYSvmO492B4RZ5AjLCZEJp7suDYlRGCeFRRmn4fQ+5BL2R1pO4BHL5E6wjSwaUeVWkDtVvE/F4RIqi3xWipZF6hoANCKcCEzxeFASQD6KdY8fk0Mf3Cldn6otk3Sxv3dD0FnXe5/UkV8U0mnhwQBEjZX8kvCUBUTk8nGXYTkr4GidMoPPUCf6VYCWE1/JNQpre/3CPxwW1ayu5HGLD5JIqy72FmDjZZJIibSeh7sEAOTxAuITOPkObKQJsW93cO6yTzmQI4AAatWua1t0u9/YQkWYuwQp7AKq22qZ+EtJEujrNqmdwvS3iRCZQszfiGdzD+iigTfQDy4zgVXgP2iHNLLV2jwQTkFDsUBh5BUmQTt7VSpXAOIcU4Yc+x28oVwXaNPNX6/G6DxjCRFpl0RMJeLeh/TadB69ggUeFYvYfXysxyhVDhoRjBJRQEXa1iVp8j5fGQQvNe+EzELRdZoP75b7UMlvERBqU9bsdZtJZPvFn9KHRN9fP2hL4fYiE9F1n7NBooeto9fyTa2XBF4tq9/WxM6dCiL/fcazRTDSaQJhfrPy/RVi+SMG8K/RUsyc5JV/lGU2ZKFCRzCpFnSSsgyow/9132D1y1vCFp/WTH48cHVY+NrTD7j8nhw3pnu4ogRitKcU1Ag5PYRZ4ML1Yl7Tq54k1laDNskGNhymdyYEgrK9ije3vSztI/kP275ix1nMA/m6oaNWUk07VSYRinf5WdQc9S69VLbPcdn7lUwD9P7GSa94Q7GzG75ro9u20RYTCI/xfv6njsvmDzT59whR7xsU9/xq2HBInlCX8NoGHgG1sj/QC4HpzFMOQ8W2OTdoviLMdtYyB4QcLPIxc8T/Eh5oF/nkxevMqWI9d+0KIs3oNmSDNBDqABttNwz8S6Gq3m7EhdQHi0LnRdin1C12hJTjcB7TM+OBRiFqHqVL6F2eI0+DBTF+ZbPtw9cehn0KzVikHFORjHKazbZJK0fLIPlq3WjsDxPy1VPrgB86ugEfTDDNt08UctdnGKCfgAQCluoyo73p6G2BYHibdW1emN1lFK4Ip3nWuAJh2sDPejyFv+Fq0xgRlZBl21g6/+roPtcdXa955ahf+D6VLLjySFIzt2yPhQMgYUrPRTxWYXg1KVo6RY3LdoGq6AsC60hYoa70W0GTJDHfX6ZgIU/HYj7ZDZTDNNRi/TFhwoXKqrQe80Ang31P0q62l8hTPO5MdA5BMWXnhgxppfYEF9v4zbDQ0oOgaJjfCBw7lSZXTLwy9LdTluvXAcMxxNH0dVXtjHG1IwDo9BI4A+qFT+kB9SCKE+6gP4VFce4yX6zzkTIAkR2/a15Qs2rQXKNU9lyS7go0pRwPhBpq9r18W3/sWHt7EbZMmXJo457wskcufHmMbmoaDpHS1H8TJoiYl7QZwy8RwTs7IjPQSN2mvkfCOp2F6YCZ+VvY/daOEy3/kbP4U6AzkwsQ6Q3c4hYr4FMqO8otIgH8PcIC8+LqbGXPbsap/AGpZNnPHMP1hMPnXmAbkByz0gdns5N+DNSL2wVLfIwDKja/zTKA4Jw2uqI2/yi8eaZRhHEISWKzjjQBKcMgIVq89c+X47BVDjLfY2hs8oVilrsX4LWfRWmukIgqYsiC0MizLNH0JteH+N8cptB97IuS4+CQbXTfvY8YTPu6xfHzEjiN7soNXHKJVsf1yPtDz5mpXgP2Mn6jAtALbQcoKVRinRNCjx8k+cO26N3yxgMEj+nzxbK6xynyEwyBh1+2u2tFBjhJRslc9m0v8KqEEL1paTG26rsMryxr39Y3+bTHbdvMkMRq0OfRsHZEznW5+6mFmUjn1TjfFLXHv6nX/gwcgflrMaAfcpyx2+OlLiiuzt47EY3Ciy8IjEgWFZAOOVpnuem/bm1osCXzHukgXpH3QyCAW2xTUIkRfsDiPNF2y7DLs1Nwwg/PrpyPB4xKacgJfljskNYBJ73LO0E7NocJfgxKt4QVrDNjQSNvhFOWpsLs4EIsJ7+FvfzUEr6pC/Xlv+MgmZLidZ6IE2OZNdfrQYhuxEd+vuQfDOtyFItXy6LcEzxTj10FhB4g3X2/mWXLFZybaYK9CTJ8UF2vJnKfYVfUe7cFaeKaSocpHjUIhxdYKi63VRssMLaKVxcP8G2pa4wpk8pdPOZzRgYAhZ+/9zVkkSt3nsUfJLo9WLLzjYBT5vZd7w+jJxHzWw+9Wu0/fRUejDPlZWxNvUbPnc2IXCWAX7EmFURQWNKAIeVDLZ7+iSUubfaMSlpwuMC2Ev2cfOp3At5Ptquk9JyeJm1kiG9d9QLfdcvBjP6xXwdigQ5Wx8lve8AIBZkVLCviXvSoJ9KKiUMO6Tv+4pMyE1fuOb4Yjs5RwWsbFdZwdFXGL0J/PU7eHpWAeEtHZhI+Zv26kc90fDHvx2IPb5AsYxUGBDCvnppJY0eb5DHD/tifJDVSmNwP0ieHhnf9GAEFpV9h7+KzKzjaSHAGaT68kHiIF8CmYk1MN76IoAo4YGFf9AC3YmN8rlKcFUYcoCA+y4scxl/8hYF3TZDXEJQt5rLrOBHjR64KROc9/zwOibgHFdEluxMcR1/1Nmt77ePxVunPFVvuXOGpUykLNs93vItXnifynO66hYJ01uICn2yZlxtaKxrwK94eyLZkS7RbvqMu267LEWVd+HMscIkTuiLpIwggfDsLr/p548/XnJMNVfDxLtaKLnsbt65k4e4YtkdEXlsrwSK/ndDcC/9mmHvCxKeypbJxcZi478saZjW9Gnh4fCffJEspY1HPemROcRVAAmyTV734mBekXyhHiA/M/9CF800e1u/C6oa/lrgjvyY37IE2OaRU5EC3iXYsamLhmuYAGrjTFEMWrYJY1QEICi+egugAWKGtL5LvnNM1TUZheTehSmD/3mnvh3BL0ueeH0yrlXOGOmwXoUo/LwdtGBAfStd5xGojrFwkKoSj6rhl2sPLNkr1ayzmR/+OuHRAqacSpX0SJdSwbBjQgUj+xiHQwpT07YCmU3zVwEkU3TE3/qKg1ZfXtxajp81Zi2gatOyB8JUIjOOaN3D4q4VQUvCcPAT6h/5aaUOIFshpmG/qlRqVKc+WYCkpCGZ97db6DVGmP59hNu6+mKsDNhbCxGEc49EQvlfmLoKe7Rb/fu58QjTmNJZ02l6KWc19v9m8VFWqpr+x8kUYg8M7vIRkmHAevxGGGDSrV+RmECNq0d1OrkT9aK0fg7XurFvU4hK85/NbNDpxbYnpGJEIfW6Lbj2pyXqC5FAZCsSos3aiVFuv6wfZAOF6ivuu3UXNM96nMQvZ9plRtsMI5Pu3Hvezzuu2yP5GDd52OwzZl3BkgCpTxAjJQbPPw2Z3jkX21oRiP/TzV27tWpnoHhFoX/R8NweIwTxk9AXgtMYhZasigztoYj1O+UewxYZ4BaTwcarWcDTUzceP5RCEXOOhnhKUN813fAYvpBQLjewcGyy3FLeO/upQs8yahKWzFnTa6VKmoksfKOpJwwCHw0ZRkdtdzmml3WsrYSbPbbQYMSDAmZMAKRVd6w+q/35xM7yAKsQx0Kpzm+8W9+1tE8htIa5JUqG8JvUo2SQdFY7ubL9EpIjPbb7E0auhnTs6CCDhX+OVHnpLGX8UZysqY86srOkQFP/wlL3E/8KKDxdIL1Kmt31nQT5qFpJjq99E/VKZAM85RTm4dAIEZ7mONwM2SogPSFUHFaKDA5ksO8zCW+XlO4w1qCl0MuNAoY4Iw35LMoeZM+/n45eOGqd15y9XyOqvDE3Ilqusnt+BYbigDIm7Qrz+aytlaV7djecRqZNlBJ28Cg3or3CDoGkVUGqkweNrLfF1B8oAMiFpoRhCEQTCoMPKQ8RJWuFoav8R3BvfXr8ANBa2sX9aB2RV1o4w9f0yIeEFplY+x+COVz4loDJh9844kJhfWy3QqAbrLzKosMf++NondyjEnxjJv2Pt/nydFkluFYx6Z8+C7IwSlE/qJBV3Q5n9Sp2aaXDfXt1DlcNoGLMB0Rs0+HUCyJFiLa2OCpQRobWT5N7Hxv0aLv2z4/1EojMlF7pcSZeyMKkKhHdHEKMCsB5bHCFPXQJj5LEfhql5M7+FWjFkWfQlbroNWKGFguYI1rx5IBPsVRoyVvkCMtBlZUZwp93bxK88QvnFJL4Ovj+CtynALjNvmxU0J18vSlEMJGMFRgzZjidN+B3U9ndUYIWri0thNBvsHfg3t9TalSudHi+ttUaYaly4RzewyiB/1sDf3br2G+HOryZOcHB2hSadEMyJtq1eOVE1S0LnAIuHI/wSrvu3thzYdAS0Xm6vawB0sJiux9ozdnRlloxBW3YX8PZ948LVb7R5bKhYxSccnvz34SgZxZkpK+nOFyOJNtGc59+EfCG53YiwQdbECf3QBPHClWKPo8/IdPm6hccX2gOZDbHSDlYEyI4TVKwmrG+wR5WV5caFkEXE0lCtKoi7iRWxNXUms8XqSoE5mBCeEAV9HqFdcxIKE5TFK4vH1XTK/g7qokHyxvwbUitC/NSM3q9w9SJkIdZR7MoUAiNe+G9xho4X3SMX6ps5v6ljNgBfzsplYejXD2hH5pkScRhOh2+Bg6zl2IVSPo4pus78quHAUCHprQNkVv2222YfAmcqQdEf/1yXjBOG8NfqvUV7vdSDikPNLcV900F8SJzNS4feApH+xtceoepmAhZ+mlcfbWb8lbJgjSTyCrVx1THpSLG5ErxSwXwSeajZJCIgjs2Ph4PxZw+5Bg9jak29UwlqmSXpYS8lto9PpGhL3w5WGRtFshPVrdfP6TW+aAZ9q4cwlz5fXNnngsq5HkSBgSWAvmopTO0+hBXW4r1Ugnyei+yTNLudDWwVfO+/6CYIgfPVLvfi1oy/5N6d4V4zGF0Rh/oci/Jv1Uu9dth4Bjkq71FSANg2n7q+g9BMaDt789Z3xGDjCKuAzUELRPrSQ1c+YJNB23OKdWnTEyi03qZjly8sf1MfgdPOucHOWwcmovFeOcomcyC0DCF8YmtkfBVHtxEfRetU485eJNGTv2pDGl6QUVOHlSi9XjnbnJV16BAaPdAOjbEYBNpImNhyhxmUnPYHRTVdqVynoPDR4jlM5bzksK2DewRbqJofMU9NL4a++IygZj0LAE3sx2XY2nR1eXw+wPnuuukd4clQ5U3hs+Lp9Lc+r8BCKmhU9odrrSPR8KPiNBq2my6fAfjOfv5tuq8NLaHw/rRoFM6MY2+bB7I5AqO3zOvqG/M5rv4ctHbQPeGgbx/Sx/4lSfs4y1GrPt5KP9Mcjk14ukoWClzCQjTvk9WlKIhENUUfunS9ga9U2roZSiEqc7OWtAdpB0UsyWSa4rzn0tiggBVJ7PATZ4bTvxbBRIoB4SV53L25wMYRZSnJjm6gDtKoehikg7Bqv1i9dOA2tHHpghoSyjIpsGxyMF9Iv9dIttY3R5XWB5G3g9M+tzoXEuM5wVh6/9J7J6rgvrk2wdsbywie9lqOil66BSXsx5elp191diTWvtr05ADZb1OC0VPasM+z1/WwSOjpF/VULFhkm4T6dr/5PgDMxsZEhUJ+Jp9uVqvgl+GmMlkKG2KbdGs7iIqrlSIdMTRiixKSzYuiImIQmxXsIWW3D50fo78Timrz75V7mfhzrFUi4NO2H+3YCYDff05FbVQPR9BreyCpMOmKNTsLRqkfc3kjWt0W0sAlObeHfOoZn7XwS/l0pRopi4f9CKoxO0Zg+ERufgLD3CL4YTi+vHmAeLeP2DaO/uWb9G4OGQhOJwfiRyqDbl2DydgNcXSFRoltidk/U6ys1Cd7vsMNapJ8GFxFIJkJCoLqcZ3UoTbZtrc1Ebo0xdKBC3zJmGqcyAUXdv6yhaKKDNB4KvJ98kYLaFZkFbteLtroMcMeCNerVCkXIb80i/meUmlvADY6h5wcHyEglUdUe8zB6gtttnOzJLqE8Jw9P4aPhbhFM43KcsH7UDBry+ESxDjiu5dcpwbmHFM/og07PBlCqK/ITTEFH0LYE4B3P/AxwsuErsjho3FUKHfODeYiMqDet0LOar+dVTe/UO5/bHFbOvdYPO11joo+u2ji49m9RyngE1AWDV6QTq+bKnvvfPuP9AhyoEA+0p5/XbfTt2t0Rw0D2z9j07MJNHTym028AGTO5TgrnzcAJndnCkKE6tFTjiBHHorO1thB67Mpv167XjnDaarz8wAXayxD7LzsnP1lEkuUxECq907K4wxgG1mp9YEa69oorTRX1WbayHmLXyy+4/jL4xFYe56PNMboXuxK3wEvOJ/DVJ/L+G25AuvY/xCYSOVMKjIBwcOR5xZHn0PK5DgSGTZ4TKAPAqOExtwmBURb+ezaObfcNgYSJzrjRv29QCQV7RFtPJcD9M3TUX7JNAKnwmx6HQNBTUxUFzZtxlTJ+z4wrakuHjI/osPCXr12p7kt6kWbv/RlOu2XoWGZl2qucqKdBNzJpLoYP/k0i94mVV/ThMNOeXDQwnGeGzIyI6Od/m1ZVXuZsX7Ny2MdVVcP/VZm8ru+ZhVQtrXX8JkV6G547Ng2QXnWUhICOsPZ9WdfWXA/KMIKKN8bXRJXEFtKEwKzZ8C+tofcmjoaq8hrXY+ygmvfLtyHWS7DEVNV9/2md0hVpoFeKKhhT+0AevvMyxJKzvktgtBePP0y1PLiOnd4LViu57sf8d9HSMb7lyMmTwWs3kwe4xNpdOYMx1ARKNKJFDxK2ywnPnPX0dSKw6Z4dLaCKrc3ZWcA+1oIvXZSZUr4WoW+XxTxpWWTojNftuHIc09aP+kXEomTSmvooL9jf6qxYvLwaxg588D99WuLhrR+H9czFueVR1rHY18ujuTrfUkiq6nfdhit+GB8M7Rgmp5rHf/qcWhzNpaXVZFLmLEiyvTjqeV4AgDx0UAKwQ3i89XqAe4iqevJbHTvmwGTE131F0V6zKsh1rf++DL3HMT2tMpV6WNqe2uTfnZzdJPn2F34OJbmS1Hf3IIuj0rSCcjOCIx9Ia+hV2tycz2T92KJi7C7f1VS/661xORjb3tEPLdqhSvCtiC5vg57B9h7F8FPpnh72qO56pF9aDOpW4oYRwkx4dBTO2RdtkbXdaSvtSXce+NF0VISBLwrA7/1cqnvOdifjUw6Yi8eRrrxF/uREM6hJE+40ZWaX+FvKKT3q+sW8MCYQcmJyAXJkdz6EjzGGg8pmsJy4OE1Yko/juH79oO9R6wekMff1pOdfHCU+rnAG9THiwwZN8KkGd8dAN7ElZ/PGMDt6GyZCwEd2MNAsMMUjKfcunOqNFnY7n7Gd8zDMmJnQ8t70l6/EVi6xzdWuVjOyr8Yb4YTZ5HusVVq6eCZb/Fgwc1qGtW/+iGHsTkYTg7Xb6jTpFrXbr7ZPyTmweSas1tSNup9y2xOVgh6SGvRBaU5g2vSe6QShpchNPD0jrsqyPdvJxeM4W8049ulSpg8ny3a4rIDEUHWKJmU3KosHSzn+uvOqgVZm7E0+Xy+y4zTns1AabEp6sHbH/40iRdYFxeIThHnk5nZXT3dYW7L8i/7PjUNb0cPxuSbUlan1HJB9V1YhKrcztGLxhz713VGQWO89TXhyEJ10G9cgn9RGTZYUk0hUrNILujPajqMwKlREEWo8G39Rxm/y9Vk/EOBRPULKEj466FJzQTfcQy+aZaGqXUtJL5FMK+ZSIe6XzXZa+NHbWKz+NgT+rKG4OEbDgBchKR2lE+5REFbV5Yst3x2ZenkSOex2UViNP4gxeHrVWjhxCxm1StmzV/srov6zhG5g24Ri7tHTuIFQzIeCoNVCgYS+hymPC29dnntHtO/7RC6lpdx4ND4LM0+5eVUkBtt7yUfT3cPXYWyFiKFD/dp2Q/iny3HqbOhBPZu3AGOWTzx0OR4wGmXdg8V+jLAoJ7qAip8f42a4+QJiMadGdcHxIlyzK3pWUwB3qq9uCzqelfagOkdkEGaH1fiqaRe98jdOLTrDFjITWA0nkh1ji00D9q7vu+5JH3vTLLiTV2QZbdmEtp/91IBqsE+pBTSRbrZ4Y/YU79Nd7nVEtl2wq3rawApGSePzkOeUC8PXlZ4P2aU3g99/CAp6vbMUWSizbUkSraUsZBty3qnLQSiT4ah+8mgI1DjWgTHJzwkALu5IJUN/9SUlZ3CJUupM9an5bNJosX7NHINB1PNxcbvz8l31nFJ6Y3VbJMDPK7t6KQ3wxpUo60Im4WNEEBN9f5SttZ0iEp+7aSzfs23hlH90fuvOM6VSSfJE7q/ru7G/Z5Prwor4R2VS/1VozoUDNaFEkPQ7lfWL+4nMfuD+jU87kRG/TpJfqs7jHQg3Qi4n67n09NliggOyoKxg8AO+jCTW9k7Vo2drild2D46LyaVVOctYKgroHKXv6621pm2EsIyObAQzCBnSetxCSDKxrLfsjWR+ZoaewxVLpOqDxUdVMe98NzoLbsBlwPR6r8BARpQ9bN6DOLjI7kOTF0UvPTyHfnzCakiywB0l2gTx9nw4eYjau4aUrPxV9VLxn4jUtOvCL/F4FvrswtA4Q1RnIn6+InyhMZpkfCecVCeC+RwMFnkaIOP2MY01ZyT/mkVEFuj+v3L61OB6IzmmjXKDd4BSTdhYGQSvS0fqqs7Rr4JiDEsNmncDh4KBaeCjHOyPOYH216zl3sKQhQw2Q6SyIBFHA8Nfn/Yyn/0RuSxYVpiY/YXR7x1y3nD0q5/TYf1Q/KVn6ACmmUep/r+kkUdupyadSmWVQSk6hmjYu+46nJaT3ZBXOR9Vi/Uhg6r6nfjxeIvsMhe7pgXXY2zRUAksTsMJ+hixIpD5JSF3nevE6xRBgMN2dG6eqLxpl/eU+F831cWyPE+Ebudq/2LlM5L1SMOIaWze3fWyRxNwoCXDWCJEYfvdmk7Ma2CLb2oZtCazd7rK39L4zXVX2K0xjl92zNNipAu/9B3TQh2UHjoQvJ3WGrXdg/f1wOKpamfAobhH3ZOTFRMIzx0fIXU8LBLT0tzgDOykGh7JtGjb6jKqPYhc6F6ij32ouZ3ludmCXJL1koC8yP3v3iqCCJ79HZmn59wxdTcxc3HJdAPPMQckp7RpTNUJCPcgybaRZtHBEhBW2wD4Q3pF7L2qRLQ7mqbXvBvXfos7lCV+9UeE7OGHOzahsx3VXyjmyuLKiJbIKJOKO+zKAXHQ+L8SzCkZBcxJdPx3meSLihMrUJ3htDZt1vCRtv3YaVMUuoe4Dc/9Dgk8wj8BzHZS5DkxWHErDidf1auOU7SO3JNK5IlQqI48suYqOlLYeAsMt/Suaii+SGwRl0DyahIFzbEt/LNP8sBKppKVLH5R+sYQfaEs/lFPCt6O4QwimIkRrT323bYg4IhK15efvZqJmbag1YBKTUkRt1uHM3pwxJQdc9vI2jaraZdXCXC/Tf1tmBHisRbJozYzzd5bOmLXvPt/tHfw1lQJCOe10VES/F9TRmrnqi5lIC5G3urS2Xx6MRu0QFGX7EMMiZl+M4/dzVoJPADFgHaUsXpRIugh2kpisOHvvTq5Px21uViyXeQ4E6b/Cb7fia0wgtA+VQ4OxZr695ptCG+IMloCTNVt1d3Sa3Vs5LDIxS76KW8FmSFhuv6qSfmFaOfArY+6rZrJ78rCu0DQNufTnIOy17Zl4zaV2bZ4L1/7N5oBawCEE/GDRqlAU6s83vhKH3Y/OU0VzHyK5ZhbAnZNJ/5tcS+3PK3AsTmwbzxljvp5fINY7h2qH+EMK6hUHnLzrIjxnk5iRJL72NTpql5KRIbuYbFbWx9AdjQk8u2TSEjfd5tdEjFjmW4+DCVVuEgmdi49Tr37qEe1HLx/Q0cAMZ8a7RWNQdB5L1PXjDbfZLwWAtk8RSAniiHsDTUyQsFTd9bUpTOw+NduU2jN7pmgUka450xjLti6Y7DLDZz53RMYz2qLzfuvmue8dZIViUC4b+l8pAhPrylKYt0r7CwssizJNtxWUOMVo6kNFNVpnSnp88XwyerNQBPFY8xUVizThiiOneVtJMPoNtWVG7kfd8Rp7VEh//ukA7KRVo6CWDvV7tAuKwqdEUW4cdkY6W02xTVpoZ2EzLI9vu3Vt3MwrDCA4WphAlBUBLh642jK1kJjd9Af071KO1XZEi0i9gOlSMNr43/YQ1jb6QEEQ2Wa7nfhnfgd+jlDMKUeL858HWkn+1QxLum2xUe/TDisVuR+gx/za255kwJwaVT41tr6sdHyfiNUiDMUcPI0Gdug1U+amAm/37QwX6ICRKBiLXKS1MzAKJFSSiFT1IGlefyNj6TA8Ei50CsIja4DychiM9u0I5h91D3zVsut4LesQ8co/FS1h3t/qbYsVit0XulHOttrXtnA8ry2OXc3X8Ncu6Y2o230xD38igZkQGdfriH5IU5iJ1R0jVAFjgrWP91p7Z5y0iSHvwcpHXHu58kR2EISaqfBRuQV92e3yUBgXaxrP5NuDt8dVrxU6cju+R+EzQZFTZi7LiLHPlxqIsiWCdJ7GUbOLdVQf0FPXEPy1usiMsdW/yNaut7X0TbBzAkUeHdGWWUiHSFWb19qh0qQYUm257U7uCyw3fejd0bitbev6ZSwUCxnSu1uLbk1Zho9ReYd0JdbnVnWWm5uk1VGbBcKxHUUvgSUXUO5NV8VqvwlcXWFMgdpisIBTckl3opjM5pIObGnklmm82I5u1fHcvnF7iadyF8D5fbTsnI8aKFDsM6ajM6NY9iT59VrN/wrUYCAWbRQ1k4+3o9zQdOUFW4a4UVSO6R2l0/2MLzF52C8XcswSRJHynAekv8TaecWLZ8rkFORFzPDgmvdzL81oWgYjBIDzkYmqu1eD3rhytRHD/k1CTZ9/Yy3yO8dDxDOMz5jpsYHcSTRzw8PDJ0bLTjEXYgH+pvLoB8C+cSotac1SACUp50oaNxvKk1gc+Q7AcVen+q4r4v1sCIjd7jdV7YgPG/hc1Rx/py1Wbq8Qt5FIYmDUwEd7IYGIq+U+k7yyC6NKWhR4HQevJlZ1AsJOaLYqs3D/fTzrtqAYzgwxPAcaXOJ/fNgmZ+CzZLBLO9gCbMGagN/UFmo4yRggYU+NfY6l052RTtYwzq9zyAARorbyuZeTy+cPsLwFNhyCVrut+e3+hHAaaAiqCYRSz7hwJ671aPz7U0UZKuf4oBs2eofDvsNfJQ9I0OztNmhzSnSt+a8y2a8rcRRnqNJVUbD8MA1JJXtxZypptPhYFlZaR33iVEchM3C2NaVQE59zSyJS0ANheW2x+8lv70JwEV7l+4pmrTDWHa671lK7jAtQmm/RyUYp9bNfY+/IIbQWLm+uqb42X5cAR/zDuC+1ZrhhkMBmSbX02mvn9o/8MGkMyaN6cu4ig0PJUR+k9XbjSvuMpbW1JBpY7Qz7dMkju0cp90rf2g2atFVnE9Guy9YbGDcazVXhCfhyFKQxBengfbl7XHVJ98TofCjDQMO5MEwvhAbGmEBL7p4ffxtOQ1UpSlQr8uSKF5J9rczHC+nrTCuW6LjP+IDMEliBqGrMeUFqrNxMVPdK9Rb5HALfQ2xcR/h5b8uqfVfwlX0KPH3ZQ9hKEN9p2w3g/ix78v8PIbJ7fnRqkhWwxyL9BbXlbWBzZS82pQf8MxAuT/yn1gvQeCM641rZCUeZA7WzZvSf/AReMUhBo0QD6fPyPuu/560MxOaGv/NhdX2GgkrkXNbaMF3qnnbw6Esx+s5ZVUpQ1hRbLa814ZpyKWxfhTa34fYATgU7bV9mh8tmg08WbTGcFlNX3f6U/QDspbjxLRMdxOYz1guLhFDGQyy3/LYgDylj3aam+I0PejtI7WaSomeeB2HyWGF1px6O5VBN67/aBHNvZJ32CvDaPajwHr8bKjUobdBYE8Mpt/Ayv4KsPAohZdA9v5ODipGWhIKWpIxqy+M07Xi6gKKIjkBdDLwf9GSkDV/g5nK8tgFE4edECepGmAkkPQGSj6YuA063PjbxHf7oIVGd0RNKoD0DeCWkJuf3nqXk5eBRl1/QLswrFaIRXhasBtyB56Co8kjwFnBtXIYqdYFUguDVeAxQKFHOeNTr2KzEpPZT5y3GjNnQ+Uo9Ccy7+ABYFztjPELsvW5rBMx21SqOlM7yvCGfcm+zMGFoNwjmby4v6K8SoHJEPAd1Z5G/BmB6LDhgSNwV1+8St7xfLzRDr89f3c7N+X/6rWRUm2TJmEce0131NalbjKrwiqLEjT3lHOKJxh5JAleO9l+c3na1mnb45zBgZ/qxDy1i9Gyq/s4Sz9pEgkn8lXBO0KYYv5UdH9pNEFFbiBKRtd9Kpn8cFhCUg58zrA1aTsJWX+/t+MabePZVmqR0OZ6aojcJ0VxVJzTKJ1UJLrBS4I+NCl+NTYUDUX9atqSCZIHEYF898PWale25SFmUy2a4wacg+OY/tVwG6EOQw06zf4/HnA1R0BQMfHcwQI6PexmIzAxOhP37tEYviJHb/CDgOqUbbi1/W1uuFjsK9MvHSFHOJD+DKxE1qOtbfKz/FAv2gTT+KlnHgPU2L8F+SR2CWsEGto5VnaHQAZoACHSqDj8Y5wz9oJ6xQV75NI7TveZIajmzveTWIFYUUmPeib/XxVpYEkZgQMYKurydsFQJ+5w6MMsOnr2YUNtyoCamzEuQOqDbFdhO9trNCqLZwUN93Il+f2yFyspTZ94lcx+xsM94b8o7te02vr4qgv4Z/9ZZkq1MbtQZJOp7zVtYn1biKaLUMwmNZjWnsRA49l1IAFhKE0xuexSmiFAgXP4UwJh1S1mYuXY+kQBO8xrglT1ksv3M14Ns+5JAm1WTE0mzaNWfJR9UXsvFBYxSB0kC3jvJkCWA628eGMr4VT4rByrgQ6/U7FBmoc2B2W/YFEWSG7jddPeDTP7VoiD6/zYRBpxn69QfqkQEmZguw5V49Y2wfxApBXzL6XZFLm9Jo3KJLls+R+ixm2+NPl2m+1GDXMxLvc+TcjP7gJ9d/2W/XC9TdJBQealzGJJqIEAUO0SMvxb6SPR/sH6VLv8E0woWPbBMm7eX65YWIczhf7yrYJtdRvaxb0CarAer9efIESujmUV0+S5I/ygxgwoQxwoKOA5o1viy/N/epG4KhG4nyzBOMGahFZfOF2RlczSHzkpNWeC8wbn5YrHiIEM8v5FUB9EJDhlcZos9wHzV3a4c1YkJehZNK16EJZ+93hgFjY+CkiJGALOMSfsLYrAHAwiejeQ3YcelThlgSKIpC0zT0BsTgUgCNRgFLjM1eoKaWqGKPtBBpg+PtCxG6WkUaNlcZ0CysYgmBljjTlRzUrt/Q92QhSIS9NzsmP9BsWcseNE01VR3KS8ymEgb4QMLdJXMErbHm2XPFHwbRguTnEsIWmPiWzAaNTcLmZNmJ9Pw8ev2Ymh5OtaCkAIwrHjPzvbSKuynbSUkERK3PNIkNDVFkMmWPR/DqdIIWHwZrVhnwJ+EccEas9FqTJ18QO5eY15Ivfn789mDnz2fZSBwlZkwDg9Xp80miJhuRObPr2hpWSP9SmUsOYolZlN0Qzp8cwq6XucxXhF+ZNkzPiE0Y1jtM+f3SkAATF4ONw0tj65cW7pEUkJHHTee3+dBHFJLuQJ0nm+M9O6g8VsNISkwMXTmRN6xZtIriF7u6XbPg8Zz0Ht0One4caamiaGGeyvUI4irigXUqtmyyCG5x6X9Acl+Zsx54kB/AFJZZkeRDSAe9DD1mSi31T7Tshq2j5pfWlQW2bM6MeYKN1JS9Fcsn3FZG85yvMxAzy/fB7MwjmL/lmW679BkWQSvG9rjAmXxD1YhXslZznFzq454kWJJksEjhcXLNIagUY+Ldm9mMrzEs6Su4d5+UWk+28g3hau88YrgHzyVK8HZTn+dgcsE9LVMhYGymYGyYOGjt8EkfaSZpab88gIfSsQnc8xt5hTlDJIwDOIsua4oCf81sFBM+dXnJWPCuq93fBGgveYDbxA7rkPQnBXG5NCr5YXFEIjAB6cOYiO3bXcFN+l9tKI9eXH1oJJfaJ2veBGNLxWxokTivskWPf+6y7eZfgm8tOBYyZTlCYn0cUeBJQjuJ3JsK5rz5qaTptquKIEasaehlkb6imVxdEBsEy/1eGOlWnlSXgGkVw1kIwRFu3L+Ig0zM5mnukWzOJeQ9RoxkfwpvDKjXb2dnwRmw3Q+PBsuhJBOnsX/boUjjDC8wvd39tp5qM4eIfjipNyW94iDWNYKaZTPU3VXfJQ4FB0s97idfD60rLB2IrkN84CS+AimjwmHS413JrL1pYPI+13zuZovFmie/HmK7c8GK8u1pOULwKsNqbAqJkgCYLItth9eiOcyF7wNt49iQ38vNCKymRj02GMXKIUExpzgfXt9JWFnWc+lDY6qEjh3UhTkYo3+ti0eOoPX6M9iNR8ahXgjMBqMdjI14yw83RQ2ijbXmg1uv67qNBEhy1K5jPG0tWhovlfiZjLSvEHb3PjLrvtvuA98omwG9Miq43NEKPPsG/7wANlYdYsjjBA2p3aSvmbR972w7Wi6Q9GlNSjQAywMLUxBBFNpdeVIX9OKnfRNQDRRUJoDzu+69+jV8Uq82h6bL0gxTB4gQe3PqIGTSOJ+d64dDDlUFQ4mJBXdJHb7p0Gh40B6amhDJWBjJz2AaQHfPvjo2vZfVf6yHluSNABMSbFvjJZDpJEuuh5E2M37x2YKRfiF8gB6Cy37pHmpLJoVVch9Vc0mzX3/exZO93am6/zXvhqe8XCu6QOtqgx8ufT0SgD3g11Cz/EsQofetXoksPSvP3OSTIMs65dldlEv8baM/aQU9TO2NpH+p4x3TZ1mEeUvnSR0QZXhtksGZxD2VLZRT/pQkRgfurq3YhDFT9Uyvtox+C0Ey2YJU4u4xfhXn8KKzuvuKBvMLLRiy00gvQifNIjwEY9WsodE9Q0wfWbjs5kNIvWjRH0K/dBzhnLNeOIfovTde1LKmSA38JGv+I9x66gTe8956vX+rM3ZiIiTEngHJSZkolhZz2y+Un+8zIxQU2s+j5fpJfX3bYrf2TVCkv6zRS2sGs4Uwk6Xb7C8Oz5Gnfc9MJmJiXPPvWriryo38QgHllISym0hWXOlOQGakO98KNFg5/J149NbnELHp6Y1lVu/CyqvblKPO1X8RmUj8NiAZADe2ZBppNB75qjYb34vOYlkil9dkwismAw1gDI80M+YH+NZ9OaruJg6ZqHTkwUyu8YWLND2aC5U9RCMvQpDFquLlRrt+/e3Aas2ScAQ9Jo4haFiHrlcOhCG2qO6otdVD5nxTEfA2bOI4lz6y/GwtH8hHsNWiU2WEJVI/j3Ho43jqh9VhIPRuQORzd9n7dFzD6B50tz9oz50PbSABb89UWhVZ6CYHDSv6s4NhlhJ9sq2ZPweJ+XbydZHuNceOUGFid1BC9ppXcX1vTeq0X4vpv3vKFOpRatV4InblkpmvOAK+vT/7CejOvm3V+u4KFibFfqYo3GxSj3bR4N2XMLwWcoAmzLRGh/1fOetmdW6gSgI4R9bkCN3TplWoQjHfx3Q687AeUi5MogT7HmEn2kSwMrNHwMHHSWfdTfmcb2Mm/rrgF7Te08aFRMkpyA5t3x1X1oa7NaBQDh7GHBrKweN3LDQL1gplkPHZ6fvlGGCq4XLL38vlXzkz4yykEXthpiez6uiynY3oU6kIw9Pi7q7R8xJWD81MKQYPvkVQ6iUip6x0IySXEDVep5hvTiqXrj7UDmfQKVu9RdeOe8+9OrzzWGnRSo9qPwXQLnWIJqxD7V8T2fFX/HHoMG2XBWred7gLpaTusagHOWDlCXTghLz7ASI4AO/Vu06jOhr+EK49Kc3HLlnHhv6T+6y3uIT8cdapgVOi7vtie9lyUvpxyPZ9oSAmGdFo9Ahduhe0ojhBHcz4nctxgeAgJARQ3OdqBhCAsimjNx7VSwhmaRzLdA6SJrM7tlgd0xnTQlBk9j8ZG0fW7F9wKNuzZ0I48Z97/3EP4+k3yWu8fyz9Wed08uFYg2DM0kXiunz8txfNc6KU1UyHNkt9JdRQswQ/W5T9BwkT6AzWLoYwBbqrDn86Rj3d9xOGFW7y35ZfhrXmRB7gBwc0DaOnU/a6Gph/I8qjIgMkPOEGxXglamXBOeGKpbnNOwhtXN73+TC/7G5YXtmE2KSucXM2pkBnLX3xnaunN8xiLPyWDMB5L3KfHIELY1K7Rg3er7FpD0NLkPZqtOQhLGSBbJ0nWKW1Fznkn/oKkwNCq1Krc4CtKR+lY4bZA9ARhX//x2Lj5Chps+Vm+bAti0fNf6+EmNYbgrL35iOZ1kkzyYTBEaPCbWKu8e920YzU50lLFO17ICOQngtQPyD96ZtmoXHAicLTNUJREjK86VFRRnmrjMzB8UUaVmDfqfaMx3vFerDUGioqf3hjnzaYhkmQV8V/n6QNhOseHWf1ztUmTXGWHohGwfdQI7yZHVl+m3b5HvpHtGh4fkYY8/wUkK+qMOtGG6VFEucBCWEuMB9ScrsM/4XFa/+XsK89sJWhrpslwxI9HfdQ6NzwGhfDQ3EmslLsyDeNYXO7RkKZxrH0yt4A+Z0ghrspjtVmTM1sX0Mfr+je8lJiv3XIQmr/r/PyDo8ML1gPexj3X9TB8+AsSNxhfkfyfJmx6KMGsz+78nISSefqXu6WMEA9lxg9ay/b9ec1ltlUJsSyls/dVXQ8ExypPxPEBl8LfZ6IQleVpSwd5uMJZrPX6eiBHEOm/crrvS0VZnjTNxbjrCjMZOdCXrLTW85ukCIGMRoaZrgvktb3mEOqU1amzbqwcFRRej350JZVEErjidb02GDL1E/qD5Ix/VV4ivTwIl9jPbAvdpPmLFrclefUy3BKFM3oxExZlJ91+Aruv609VGIEY7+7r0JyH569rFCoCg1Rt1awNS8lJesZhqH/yO/P5DzRRv5tg93h79g/MfzCTzoB7SRrAoynKtm6THZ7PDg0egjhlfa8Bh3rQo0lnj5Lw8Cfoo/lBy2abe2DSBeWznnO8AXlVEq7RghLkrxGrbp9uhx/SJ18HGUqWrucm0wuTPU1FHZyqEn0NLoi/DkidvFwEJcQNq4ocjT6WtO13OW029IwFj6GYmMg6wEqt1vb3Of40bDZB7BYnMc2Tvwq168JdcprzFSNgIJ/EPz9UiM1laBy4MvqMLayJ8MmX+kg/vW4Wnvzvho7At67eyXZ/V5tYGIGv62s+OMspIkt5QIpQfGWRRes5ZDhsvkFe6LMjXI032/nazLlnXCEr6KQqPVrGD03cE+Ojj7JNk+efoFLhJNxD6I6DMbNaULpySbdm9EA9sc08YMcvRVvEiVuWiOkgf3/c9ssrvPylN4VxVPEjnckNIc5jkgwqDn6tMJjelmIY79A1nfpx5fpawipQYtjb+S6+Qn6a8dICLA3nWP2DdtOtSQ2ZDWzduTJHuKjxpPCuCe1fWleRp3araHVLWWYKGweIty0nZVmHX4NGlF8WxX+zOgz3tZCDx9QCZdntpHo8T/D3X7tRlFlNGt7Czxi7K1Nn5leRpbiilRK2I/Sz2waQ3oMKax8F06oHA2E9aZ7XzFhHMSvQOxmp3HxtyA2EnNMBOJFl/CgdzttokFN5KeWnWuXPz2MbkKNdXKH9F2zTDKGegxhpgr+uNphWe7x5hiuZ2Mjta4mRyQUwFgXsJs2n2ig7AyYlUdxAUVcs7/kAIPD9c1xRVTx9eGwVib/+AtE+YyNRxFBOrl2POmKhXkw6HeUsY8RULLQhfIcl74jKRzG8LNR6Tx9WY7c13RLSThtyU2YlVi3LDtsfpv8gCy9lp0sapa7XtO7JvDDdc4vFpCLPi7U7jvQQ54sUG76Yye3b+l7YH+paVMN2amJucVWPe8XrYXgU54tglPTn9c/g8KnWvOD8aOUwcBzUVViNIu1P/PUrfq5/M18z6a/aayoPVuMsv9gv5ZuP4X9ja9H/KhrF4ssUgkIfpi+DUcwMbmoytWpLbryqf3mZD//XEwmQWSpA1SeUso0gZoiOotHlddHc++TM/7oMTJot/jUiiV9mknuk9Tu0NEeWZlogPgQhKx3cqQek9q8009IhZzXVYuoR6r0NwJ7YUixIlvVuErHMza7TUIvQAg3WO4f8YSCmKJXON3M2mQUnWc4PCqfmz0QuGPRXcst5XRj0Vw+ntg4yow9RfyTaEEWf8fmZlq2K0JkQaMD7uVfbcEv9i/lGuGnG6AWWQesWmUf4VMg54MoP5MeDM2g+162D8f5BBzldOuz41levHMEPeLhzKugvU3mUYV8gVZimLy5G6guG/8jilFGF1wso5efK8FHKloXJFDGk6iQzRkm26tNX2t4rTZp1aF3zbKPFCxSUea0pbAf9Xb0KRdmI1kc9M0uAMB3PytlrTX2iCXKARSB+d2o9A80kPQD/Fb0sqGeJU1ZsLW1MK1sCeAtUsCRR4Q1vTGHy7jGJuDbyyg84gdj7s4qNGwDeQoxgiawbTTP6BW+khQBDNjZ4tcd2+Dgz/Kx46y3Ell2EezjeV3wsExrmohKpooMG5ysEUUncC4F+s5/eMzL2fDgoOl6LGY9BgmFIYoKbDR/aD2mvNBuwbb6FHC49CLH51SLBWy7OLxRSDuejZ9eOFHXuklJCB1KoTrMTx5cvdYFpG+QpUwt88CfHffYyGtWhNwfTZaIlrtVgnfA+3Wz9p++/SvXK3xHDnmkyjSmOyJVyzbKa7FR9xH050SvokSo71jNKxxCRkqeKSiiyL9/NscvV9s4d5cfo4cgJOkEC/roT3G+ZBx/oGCh0nn1bCe3l5Pw7H/6yz1uDA50v11xhhZgjY2uSxy2qnrC4gPHODwW+q48Mm54kiyBmi0nOuhdUAPHO4/xd5sGM5LGY09i+zf3XE45e75y3zOSvMVTLpuYQgL2aT9KqnOXKPSqLb2fxd5+cfpTvkguLWoW9t37HS6ljGw0jFcePcki4CS+nBmMvTYtLeRqh7IuMEheZmUs1n6IrUj2xQIDoBaLrnM6R9fplPmqYzDOW3EZ8cmdtnb4GfesdumXf87Cl5V6UYbhOL5vELnaK43R8Z7us8i/EMMCnO97us5NwUagU+DN3TeTMyVB8nhNxDK5CspFpb3CLx5X8g9Kv2/KNhv5yCy3LO5R5CeBFTHtuTvg6s0zwte4OStbtWLpEuxXP60PjltPPk3L9XKqkr8/sGX+VbUPka0dhEKfNdYoNrVcxAc6XmS3Dv9DHf9QWYvVE+tTCUFWHGLRt1NwIUGTYxm3wXUNaCO41abkyqAmPq8PFD1/WpTqKBwGWlfmcv/Vc2Y3+EhT1Uve/9iRAuGulB/22xzU0CsxwOlWYjW5vtnKSQhk1U+Q7EAGxnwQ1IKqoLSRAbCywy2UyHlan/+6vJnLY30CzO3qFol7+Sx8+T3MVgXX4XyloZuASW7ikF1o0uT7VX1qDh1Kry1uDIQimtLu2FbvttXP/CbDBS3BuLmaQ2JiclSnJpt153RltdMD2CEVdvmhbas9comvRdRzzNoaP/hdjA14hWSKp+Vr2JPtyqS7PVBvqjWW13XrOZPKLOpper8im7bKlbGAATnESRqFVnjefvyojmzdabXlTHacl787/C+wW4S4iZO0vNz1pstdbxetF6XT88nRInPv+sYVlIerFMNzf9QIF2meqc9NSi+QK/Kc2DO3O/SIP0232kSWNoHM8EVBW2yuqFbwABExlqP8wndNB4iEmCDRzXqqy53rsdhKdfC+3OCxE/XQ5kC9gP9O3+nrZZb9tqMxlKQmgB4uMy1cLBwyNWLO10pwy9W63g5er/tUfJek9FtW+zCi5DiL7u3cnpXUrQWNRvtpC0puWZn8FPxEEfD8tq3QcQriy4O/a/l74C8YkyrnNJe0RiMUYP9/MBuKw47JPiVwvqOMC2dYLi6IlcTA7HtPEFTDf6Por9Q1CTajL/91Kc3ymSIZavACCSqyU8hi/bqt2GTRyE3+B31CVm9bTMjo4AJCkO4TApWwW5DG4pgVSVFXMyEbgIBjEPagX1tCvWa+cEjgdvpaOsdXZkEpUXqG7r/VIA+XEzj8iUsdQr7vy11a3YOrXcw/ucMXTF5+N5/oL9Xvn5jIfE0HZ/AgwzHf8IX8378L8/EFdRBWnqGaEEhJqNj0aX1uzdc7eCfBtHYhKSoxS4bngNqQtu2q7X6j1D7cw5McRLTVEbsp77eyHgBF9LBkn5NSENELiL1sdORAy7Xlju+AkAI1EvMtvseRr9qoOoqJLm/hJARXmjeFVeDPswxWn7b4w6bv+LfT2jKDMwLsmtGXXzv5X0liFRnYolmKNx+oxoGY0Wr03UtQX/qqGf6efuKnjd2pK5Ae3sTirzZ2X+nU926P0wK8ntdNFKFTZ92/Uct2MP3OSvnaHwUK1tuUYHjXaB1X3GYi9VgT7iuOBm2alPQEDmGHoehXQar6fUcQOR8XwOtKikvwCi1SjNveDlJt2WF8tiAlUiXzR3xjnRj3GR4smDWYJcNKAKCPgiwyaMMKjkZ1etSBY9noQjfkp5CCqyzJmlX9YXF/Nq3Rr/K+hzLxmR+HFxz80JjuqW4XPe7J3JrW7X/qIw9ycy3AByzvyjhKklv86P4DNJgDOz/1ep58jHbZe23pK1yUwOQrzWjSwPa0z8hQc1Jx1JE9fswpaHf/SQ2JWMvJGBMbDI/c0m6czKmSebVF8wuzSI6OCmejnU0bXjMBWy0nwEMfsv/ZenEcBQ0ABDPi5hPDzrFzwBXLVWiMnLtPMMVS5jVkW0mrorqclFNa5ySEEzyWXtf1ffAQfXwUCsXwZ3rtM0BOdSarTChAeNxa4fh/2Ypk79taYyOf0jlKPb2jpQ+vBagHDrqDJMdjQ6ZS42UJf/qw9AT2UHUlrjv/Lj89/e6OM1PJCpzNiWVmcoa/IjHPlcw+1fMvwp5EL+5itp0uTb+nnNgV17JV/N9zK/TWqGGmt8z0aCqN1pfqT7llcjo+k/0QvaNE8UVX7uPoVHxqmZXGnFmoQzG4mU9K6J65Ds7FsfPkFd3y1W8/9vwEv28tUoaSa/0KOVa6hjQfYEbtsmA+76muzoOGSgKWBRTr/FFC9n+moTxsFNWCzEy4cBGuUIIAuZXvO5usaxEYoIWoV2NGpRGhFBLA69Ff7YBlV6hFQws3KbUunw7n707UU43vcffLX8Ner2/qRveFTFJpFqf+za1pwzwoUwYZ55USt6vduo6xKpPFW0hGThGj+BhfdhF+hi7g1hpzOdYK7wZySwvBmVrdlXAsjaoR67eoQLVLIRifrWjvtQFePxBIHrTQcH55QZF+EjPUWwjdfsGWhRhkANWn43oMObrkIknMiRco4zoi4oBpR6gMQnlIovNjXurnMOZNxAWP0aMaBI8uoVSM1YaySbBv3TuPqWRpqSv591jvUOSpbWlbZCgTNlltUFNs0H7mm1XKyTQb1/ko2MJeVFTb0VMlef0zz9SNYXtM7pNj+cTB+4Myqa782jYOdKF2NieWI+HldJaDyvwWeP61dsITcDCUIcQqQKAkUxPQtVvPLX20NPorEzie/EsA7u/Kb6W4KQ4o4Alwtj+c86r38Cz05cTLpQmUoNthaOAmn9n81wcfPgUCGrgZGOUga0TJXoP9O1R09w5XGLCC+T7CR72r9bPKmN1YSNWQgJbN2k0qITghyGse3Q/qlU/W6rCwvRWQTF2n/Y3eV49dMlfVmdUFSC4G7P/kxVVtsQuyOmIFSYYChrLBt9MmtJXf9ghhR/12UhJd+VUkCrSsKCjuENTY4UqxQaiJotVTfDAFSnY4RYEmkAfwnbrKFvWPA6BD6tJyWEUeZq/JFiUtCmspuHwoppe1Dnic0A8PHnB8yMLvD0y13Kn7Al3TwTqlipKe/0wTX/S6G0QqOlenAN1b7gkXJpYCgI7fsxUAEwx/f2WfFrad7f2a8XSGWIjYPNefIoB8ZIrphRg2072zc9+DNdNuLBcrq88e+wR4tjA77i3JulE0UUqWoHCgJJFhWgDKgPiEQ0Og2wmqXjg9oU/Yn/YY12AqWXQwOCISCCTqQ9zDDVO+juSV9If7qWRYsc61K1MEoBiR524jOqZin1kT4d6yAtwKnSWtcVyrXKkefbamF9iu4W+Lx5jtaXhFUR9Wamq6KK1sVP51eNo384s7cydShO0iDD44Y5bWPD8VdCzv9iolR1elHmtW5ZDPYHhDackxBILgT88gyLYoBUp9q/OHIBy2EGk8aArTAfN/qayX8/ZKBx5DX2VkHMTz+n3LxV89drgkTysUN+wAxRizOBiFQ68qG5ye2Bfg3FwubiEOPHjayzrM+Nj8CQGIfVmF/ceg/hZaCNuwwrDE0WVqQeFv9CmTpK5Tj+8ClCJhuEKG7bmMd8YdOfluxhW+ZLplJ68sD+tklIIKrSbJfN+EVnYYzL5qxGnvX2pti/jKruJ6+Oop85JNvQnEPC+lpbuuDy6LsRRlplV1z3R7w8Mt6nER1qam5pxFdkpad/TPsXGpS9Q0CxMy1OhAwuvSwO039CH/VgTaA2GjjBuXWwXmRP6KcrJis9jiULuknM0U4bqOFKUIgC4KBS0ruPJMGUtHW5p2yciUFgW+PIH6QcyPSy6rfDy8XEHlnjnjUpz4cGJnU3h/03v1vy6nfnRNJijN+8RSPGqX1bjpH/g77Mj1kKN4f4IspnNRE65IQlM2p7sIrL5LGufbHgPYyKqvPw/ydTAb7YAKKtjrC6pQg964Xzf7Xk0fIFxhmvrDDXvTHWsgTcrEPxpNetl+x9K6Xvqp/cBNpZJ1k7uc3iyMO5M4Z6z2qrUS6aL0Z+asXrked0RCvkZUTb2U4JmrPVLJze6MeN4F461taY5mn0dDAVmsz2805cdySrCkMLPSX60UL29dNHbm0xC8ivsigveSryPaDpp/1Mcs7nW3zCsvRjZ/ajEQBNEjlsrz4QdzDhE5ZsdRrS5HXyEsNqTL9RbFP6CmebhajfxAVmTxe5GMgAKJKyDdN2CqP/vrCnh2VQiqsi5HIjWLZ2bT7Vw60kXghx4UYjBnI8dzDodqWWb0zeKTz7YCuxOyBOdC6KJ8iuws1ayEfjZ0j85ubCOxlNWSGJHCPQr9Oc6MUowj+Nkz+srQgDAGSoRjOHyj9r0FaVROGQVjRjwFMB4gTRa03NG0wtBUWRSeVVb5Frktz9pUptF4wyRM1KABYOdjGCw7jkji1jlEJti98cQt4H0tsUq3r8JdO2WyC2FPmxwLYsDNd03fsVNDhkvpTWGvcN0TBYQvHKvLiZI+STQiq++2Icx2FGPfce2rG/6WT6LoEGnkyZ2GN8HW9c84yyWvd6ZNOpFW9auf+Vb1oe1vKjOADK4Naz8jJeozDKJjkyrpvq4AhYmUUGzK1aqrnQSQ4XAO3j0o4/aglFKmmMUBfSiKK6R08JEuXdQagGYjx114VXDm0sT4mc/eYzrWMDEq1kHnD/rp+d9mkXXkIcnoFe0jaoZ+ZGT/27+7ph6QkRYJXKW7Wf1VnAUgpB3Z/BpsRmRpnsq51+isz8r8bLfTL1rdVepwliRO9c75SRb4O7qd/QstdOTPQ+zDH09uvNC92cvb+8KVjf0uw3lOS5hp6akYvOMLkgo2TIPjJAjV1RdA1rNBfrL7DXOKYpZ7kfdoakTOOsj6izB7jUQqNceeTDXL6Ywngix5Mw5nARPRTPkumBpuHLFWdzDZYaAOtjMWk1bWu7czPsn5bikBF6DbgjF6bkkg03DCC8xt9x9Ke5Ft1Pkn/0+/cdIQIIqOKUkveNM9fzyIMiaQ79DJRlhwPmXr332M0CXGfUxgXrryvo6redLYhUioNwfXwtaN/a0kKQ05BYMZgmX+W8bF5THdy4bvBSvaa+B+9/wqOfrLKZWiM6DMhMHvtmkGTrTH4PfFRWwnDY8WjjbzecBAnT6M8iM2jI4tuJcCwkVzq04EhbAcQC8tpY0O+Vb8OkVxN78fD5TKf1/S4kMPfaSsvVwgVIB0I3rdoLP+yO5n0NLSFCMLCiwskHiLM/wZZ/VqmKG00C+2bi5wcsNF3/vNTxctVP/xojKgIgi8jX4/mvrqStajClzOEOekvhMGvTdpL04MDgX6ApLJyTkmL9uWUHoV20WRhWRTqt86c5rzn5X3HMg9EO8+4wUX6fyS/pFYsTUe5pTF6g33vd03b5sVd1zl4UgjhrO05Ib6YwRM+Pfv83LptPvTcfCR08OEdNnz4pIaxnM9xEv5KITMNX+4HmxWsy2E0Ob9uhA5xkK2CBP4EWOEQHLD+/GU6Zdpf8p+fVVGO64soUoJ9wyxEFD245SSl3/Pz42SLJo2JP18vp5fxdEHHaTQbjolQeMHoL1T6tchlTrZdmrB7LklaVnPhGxWeYgTnYN8cTA+1anmGhwd1QQTslqFptgVG2cK/ljnVlwDZdubt/rVxmePcM/KodEqdeX2QD1SHKzxICwH4BITUmDI0yy02HNgPOQGFknhN9TAL6IE/MpJORAxEKaX+4TQrOmAy+PTYr/cbvVclzF8INP3rZICJOqyMH8aNpMR8KIZApW489UFFlN6rGsQRwzEYkvAvetdYaWaEgswyY5Ax9diTtvgJze+X8TsfcypEcyjgRJKA9qVQG+NQ2deWDjwJKeqpxXV18Vc594qCNW/I6+5yc/pFl5fRedGsG7lFeVMa5mxGWwJtRt5gb/CCD/0XhYJliYOxVoCg7SXHNTOGpGInzGtiaH7cHckefMEf0eK1ZwrvPELl0QgscIoE5Ae59B22vGxZXhlkPfcmzE8j/KBhoZvN93ZHodlJFI/rZEPVr1gYzvpBObqOnQbBPw6gRDZMgT3xTHag8IT+MfhW+VT5Lkz5iX7J5i7eHU6/p53aYGS0+Jy3TQPTt7nWdGklSc2BaYYoXLie29EBCp08JmYg8MqanNZKlVGiJt4FWz/vYQPnhvXrd9N9gzItwqRQ1L8fp79ICZYclPCElpI6I8oLggO6unDi8M/liCLxgid6EI7VX6S5+2yvg8gMLrxolvB1jjYUdqnKv3uu5e8TG+jNj3rrKOUWLNU6DGU2NHRMivehjpgnU1+cFr0MRxm+erdgR3gOO7GRUZFcr7INYw24WLygVpwe2Gm9rSHXKjp8PfxrxdMJJ5MK48q+L9atvQTmBNRD9XJPcpPWMkQrKHn7nbLy5GL63IS1BguvIXPumYV0D8Vft21VuCkFWj7kk0a9VlpcaFw2IKMFXvCrHKDIGSIN3q+xbpEm6A4KFNR5rFxTAQXXBIzuaCJW0+z6WclD8qUPl+8JEpkxztvv16noZFsj6XMUPQhnlgFZAypBDMbARs27qKfO2cAlkcUl4n/dtbZnBQJj5Yi0HMZi5fXJ9ws0sY4zow/v9O/T3zNa5xRQq05S2LYMKHo8QaJWdZOgLkBkF/h7+kZ26v5eQV/cY9WaMtfqvmD7NXuWaPPhOFfFoHyOtABMLIR5+r9v339AdTfzzkO/OYK33vhXEY3D5Mkf2A/x+2aBNpa0/vd0BYkLTI/mGQBzaKHBBFKuLIVWRYmBS1U+KUcNLz2qEtLg62OxWYBVahyrvbdnwiae5CzQtpRp1K+ZAfw28dMtBnXuBTQYr1itzQf5zMJUg3x7wA2HRrfOdXJp8lIGI5HD5+9zePfmM9qfkwpH6uudxCQR8DkFfgE55vnaUch1vMD+99SOO5CPy07ihKwKHzYVT9Mihi63UflRaN//RmhfR7PMy7xrTUFoXhifmE0bZtEK6hxTffaC+H/zXKoMN7NzjLAlHJCrGF6l9yLQFzMZh1/9sKH2X38MfpS1I+2vDiyFPyHPCyIKV947o7H8F4D65PC6HDp4pq4oDFIQ7bjO2K/In/PF64zK4HYBDJAADCNUu+2/58pXScCkPdINy1GV3LA0G6rs/BdVwfoZnzRWtMEih7YdF4Qe1TP4z6DZiM+wrQYfhnTKYhgxwpxHDc7J/41MJhGcpGhoS9o5IqMueU0Yp1NAIIiVCZwL62EUwoKrn/C6Q7EqRZFpOTfzbMaXbIqQeOK6RcbJodDbCvqTf2zLwF7/8VjF2BtOU4mNMJtgn+f7Jg6x7Y1qrXEVeD2lIc5COZBt6PZfcc0w9oSmzT6i+g2fz4zJUvV88uRBE1/3rrJe6zGvXtZSRLI83xF5oyRZ07IHcOq55wIdVNqGHBGS52XMF/4Pt5MKRuXaZ2DcNexl+UL70JOIghkxaCeohtDddJ0t+QxHErVtrpo6nnZ9JgyGDF04p6J7csPrU7VH+NcWmZE4mkszgq3pMpas0VZHvwnS9qql6o5NJ1anHBKkZMuYyRLIBlO//eBn/Gkzxl2IpW1KGTYBzd5o/sp5SoRun9X5iWE7C7rfvQ1/zevNxg4bWoXlD+hcLnz0ZQFCQln8/82y2dFLTFQ1OTSr9wJBvzjI3GpAMI9NrjpsM8p4jAfYAEMhuDK6IxlQ9uIHgmJ87/uda75+uFTFGeafPmbBtY0ZUbph3aRRZI2dy+yj17oMK4YiZcK/LsGr/noQ/jgxGRL4y8QP6dooum473juJNe5sw0OhAhiWBOn8AYgW0nf2azoof3z4a+y8n2eiL4SBYel5HpQeUTPVr4hFfLc6ZG02nXmTOBJmP77LAywCjCI1OVV4kyPOvnwA1Sn453Xa6iYMcxo6FWAUn/AxlL9sjF7nR0aH5gh7DB/xep+2aUg/+M/1vXJsajEGF1dwx2Z7TwrQodOgtdIje52lWLIOevFBldMP9FBpk5DCXgOTHHjfAwOYOool25CpUKDqK/QZkQB2jTKhXEDdIcc+/AWwltCJayy1m3JpN9rzeLET+M5HJGTsYgmjZO/FP16h0ZTsfQGOZjFScGlEU35wBqzNE5XdaH9Kxjojo+DzeynkRsPe/UUw4HFe6/tNNaUx/rrQuJcYcgBz2rRvX4iIZqQWctOfaUx4JXiA52GXX8w1lnU6IjC1YFNlHVcD6wyOp5gsIEgr6cxDikdsUF2ZKTrSFDYz63d0dNN3s5Of8SIp1LOVLU4aKLLbDzeeyE758EdQUHs4JT4TrTFoQlNHf1wNoEZZrSrT48DDk5sw18C8dQ5FrBeccHZI5yASg+Hq9EVMllZCC1hmHebkIMvO329yYTP1d+5Gd63U41iWu4HQIPYQgYXX2o54t+yA6J1Yvm5BmoU+OGKpnkfqirooBroARJZ4Ss/1GTRJJibJBsNr5V2eDFTmfLXOLIOIpzODVvub6YGJdaZu57okOD5RwPdYXl2h+BnmlCdlz56QsQRSyxiztV6/p1vbsULSb/sdTBDjN/SjeVRznM+XvdzOhxRylUKRG6XKya7PLWn50hTArc5V+NlUNMn1A/u9ROqgbhLes+Fvm1kMy3MHqt5PQY9HIa8Kw/FnxugvMka5LIUt60vwoNo1U4ze+whyAPNNtQQ4fY08NwvQa0kco6j4NymGuS8GDN17tnQu3kIzTmf9RfsFI2YeM+F1yfEA5K932TwYP9DPKQzluavy2DQY7/DM5Zwinw4PKjgRoGO1IWtIhED9hhP+5FNd7H5yksRIokWEX1l1PorPLoIBIMK8A2C3SSQoYkHAAJ8D/P5fMoatNMoQiZ8JBEZcsbfpaAryULesiQ320EOL18XaEELTNfyJJpJy4wrbLV5p5Z9yuexPeiETraPdi/zpJdP8DzJHLnZYOubKFyuqqig9NQ9K+3zt9pz0d6yc9tdqk+PpctcP1z5vWmg7WNM+aS19018NqUtvt2bP6q8LzPAH8iDpi36S/GUfMqlGrvgukK2ffzeOt5QXGSSbQe0Wwu8ZeFyH1eVnsxGQSivoNAqx+8PVKO3f4feu6pRfREkL+fvp6M8VdB1E9aXTin2G8guanpM96w9WAue2o0Ws5L+QBVWFc5jUtCy5ymmYhauizX6b23/0GMuUAiL90N3+OhaGuPGnoE2RIJPQZdQuhvK60bef0ik6l8sPtkiTsgF0XGhb0Srr2edFmU31Ycs+l7Z3QAa3dCKYUiXSW1fAuYN0qPo+rtv9wd78wyxXvxBpJ9mHDJsY4DEbiTEp8disZr9sK3AXnv46UyXJTl3JYbJjpxyhxEfKvlLV8oqEZXbV/MybXAxK0kCNKpK+7OxPIywapeol/oUTdD0JTPQ05Uol07OQHzW60ECoSh7BMOaFfb6LuK3HXIZyMLMWIpbq8OccUTWljltvcyUgNrU9i2kqd6JhK+MD2X0C3fSoui7vRJPJZ1iYo9VnURXv6idMZ1vu2MWO+KBZb+z3r/AvueKJCs+R5Rhp9+8eKjhjks/djYZUo8lYdwpHXZe0O+fD4vXXlHzZyeH6XfnLnCo09Msl6KagPMP4mzzH33WA1UQzqfK/bvpy0IQoJiBgCoTpOAHqS/4XniLdwttHdgE40LlaV5AFJw1kVJlokTTjq/wUXvXdzuG5IPWNjnHxQ+Mzrs5SoBgIRvWlK6jRvTMxSZqodlkueyFesQHOhKTU9AkgBX15SE7o8q/Myn3Iy5XCFuJPDj5VyXzzB3UwWgsd86o176nwUCCXxhNQkyEXOb7piZgIMu3sB/cI2j6XyvvFFfA2X61VvNqmqGnsw5YthAL1lxlfAaYyEG61nU8zniDxX9h+nB7ZBMJj45UgA9qU+W02/iM3jpKUkjxPbYy38TmYP0fv7LSkMTmIEwdIDt/HSL941ZYvIMpyqnDFq9gvefvLOz43ZG2rldtpvLKwz6irewA1wHouy6oa7uc9cCtlcmtxOYycmCWKcuj5gS4ALpSCc9ZKIfcevZ65C3XTeFxoVop/St5+AgvH74PAuptzWbsufR7BsWd2mBxRC7T2aA7gcrmK4LozFn1uY1othAPdjrIlhLyq4SPMXF5K2Uy+5/Y2WoNiK6LKTb4So7O4caiUti7zAhK/PY1U3Up3F+Lga08pl46b/MmS74HW8pMPS/jjOtHOuZmvKRy890El8liiG0E/9wPvloSuw8R7+9R52yG7Zr9WAbUR/khCL8P6juCCcRNtT6FVHz3EIebgDLyeoJ+tKWIcrxnfC4CSbgtZaS8ix/GjmlxvvuAGc9GNQHuclApsIzr3oUfF+wBOEQKB0qrI9SZup2zIqKTliy7QXo6BL/1Vvf07Dft106Lyiw6GPSsyIpdu/6sa9Jn8yO3G4HtFxGGT07WakTGniDafkvLZ6N85auQBIghZy5Bx1FFeTR6lAIuD6Sp9Xhdan4PjuJPIbols9yly/Sf5W4mev8o2+te0hMh3UDNJTwoGJNvsXF0DLQ1J4AoJMoXzZJrxLReqX9jMB105mNLAuoNLlZLJm+NuS0fwvSO40M8p1c4MZL9KxoixKHA1m0VbyCLgv/bpEeyLWlkZcrtK+Dj67n0a7xpTws7iMncH/cmzkRdx/G2R18GcfWzQ4p+kwB1US7f8Q1qbBwKU/WaXNMzYnPhABGGH1v5iKmUMHm79ViNehTQDE/uwYcrrQY9PX0eE/2G3efDmMmFwXa6HbwecRzwxk/bd1uQnA4f2I+wTo0YS++sO+sDhmkryOK0rex2cjWrso9dyYUly3S4iM+90ml2ngeU2STQYKYneJ70aknNyPk7QPCnh1P0trQ0ovota7oi8LsxMEfD0cVJ9QSm3v0QO3f7y5YicsVoUvJ53lxkzMriRIvD6TxsRAfop8XOBc2sYP1cP40+PBvfBXPmvy9mUbNRVK0R2guBcVST+SlaC/35ndxo+2cFYdi79NcOKfI8Qa0wRUHomTe5naS59E+3dpvqN8F2ggWMDyIFjxBe8E+x1yflzQ4ejy/CXAwUuZfXg2ngYUngv/lrWjbn04vLNxhDB+JUTZ8cCD7JYOZV8afM0BOq1xiAdoS/Jv40l5AhQ3FGLbeeTYf1dcDdUPDQgrrORgyJM/Wi6ezTBXz2kgXgRgVDxbd2mTf1XMUEuaGIvn1NP3/94LSpANPdN5jgJjjA3YmbOPKJ2viRVjm1c6rHNki1fpg2bNJz1L4UeJw4pXUT54meD5nxGdZsLsuSTAGM6HOXyUtz761zZbXh3zxS2QeauG3NiNIgnIBJDTqrO3u163asgMwQ7AxwQkscg8PgaI+yvUz4qIJ+hiWr4LjXX0EODiIq8jgwuK4OLJoKB/2yXSdd1pbaSPGjk/szXC7Phr6FWCwiSe3eaS2HfDIsx9X81/FWgEWZ+WSsIdv+1WWXWeWiSMSpt1Rhia/Ed/fyiZmqxanIio+Ph3hNdBaI1qlo9vzohj19aH0mJhyQYYiAJYmYzdx0nUfc5R/XnvbMb9+Ntyy/NqnVQ/fP5bPmfAu96PsuuJH1wIdSwK948U4Q+v9RDDmyYvnmC4x1GugO5t/MQsWGcDfWdn0WTW/hTW+HA31U76AMJIHv0UV0NVR/X4O67oNt9rv/S5Bwe3DFiwA6QNHvNebKZFWhmU5CP+SxUdqSX0klPgsu7PRtNjtoOPIy66MaxpsJtAkPwX8PNIiuSZo7l5kddqeot/JI0anRozmKff71PzMzNJ6WMWi+GO2jz5zb4cN4wysNKjuaeTDdMYbTnm5MFFJuyCdzE47pRmzphek6cb+7PZPXBWTxaWqIJSBfLy1OLc7H85bGd/SX+d/jHTgb0ZCTyIdGmBcMo0ueGo9IXCYW44iL0lWHP1KF51OI08BHRrxsd3Wv9y1InQfxZ0RmVX9gzjYy7UREox5kqFZvxT+nN6ls3O0SLY2Th4+CkI1KCqSDwrH9ZCQwAtmKqA5BbkAenp2ETrTL+l5qiT7TAFNvH3HxWf3h3IqUGCBFbai8kzzvM9RdCZu62+ovi7px3afFEM6owsafPJu0AOnSUPKYYBXX+pRruuqPcTb+22UThc+Pv2jLfiRUgC+rQNlIhexYc1Dp0JIVRVd33TbI6afPd/7ptEiIeL+/6XoOmvMeloAE6hDolnI4BOX4fhsD4OKJZFTn/eowxu7DSmLel3Pu9fZ8afRXijWKfYqLNwdlg/vepFtITz1zU5cFD76j9XPbp1KaLdsgGbFNBSXAREKI/b6vzB+vB6QBOf/2YddY64Bs2qDT0knkfxo4xo7gZJ2ZGt+iGlxaWOepzngITw21UHA8njIHbloKfboCIkdRrmr4/Lxy2skMvz7nyT9E75iL6FbmTRJ1p0sze0nBSSFnya/uCP9jYNP8QQxq71gT/KXVk28GXRWHdVjE0/9Gydd2K9xuwj3h6/jkW2hHUHLh43TsGl3AV9hg1qHYDO88D6mPDXC4PqdmIGfX8dZKfqXdnyZIMYLkB772YzAFpbI2XO4CXYnW4admnQhP0N2e2TTi7LX5QCt79dUpXyYaWqp8XvSuIuIKKi4ilKgaaBzOcrWaEeMJdiJVLlxclneeh9Eorkjb8dWjARCs7/7oPKE8g0/q1aUtZCiZKrN+RkT82u677B6er3Jvj/oic3RhodhZkNMNU8L0/cMzFVsGa9aN0j0SaE0GXmajgsOHv9p3iiMYodEZjrIbmxOf0zJfJWTM83DR805zO3TixiOQ5HI42i/xNZ0Pz1zmUo/XDWn4n8wNzOb6ku2JIQCwg0n1GRPQx1awHlaj6CAgbjQxo0bic/uHZ4vsLnHFrI1dBU67bAENEfi1T9XTuByRisoqcDhQ/lvddDjK7kJHUCt0wL/KleKzjYce1G8l4G9jncWb+Z6B5/Vd4cQwYv6Lo3Gb47wiUODa+eHQRZJ3n3hl/aOt1aEmBvWDYhZ2Wg8DxmOlPnmOOpKhGMT9nXP4lo+ogbEdAH8uc1i92WhwoCoXHD58p/CC2ifDItCPEHBjZixSykOaK3aOjZhjAi10JxJ52CzborBOz0ugb7ePjfyEWTL3N5cX8CpM2QXj2PoavjPdX5yqXnlpyjwX5/dXkdRXmsea/0GHANmyaN5otJrX9rg5t03K4HxKIxAgWy6n412XIFwrShGOZcFA8SgeEAAc5FE2Cwu4SLzrgUfKvqaabAw/aKuGMicUerpRjKbDw7gHQHEToSruiITZ9aN1StA3sV3UXfOYDZSSnr/ZUssJeKHKYt552cHBXr3+tEjnFFl39ykku1fuXp1jZE+f0qthlTN/8ey4zsvWsdwKOdxNw1voiwaISQSlzrP7QUhMXBHO9W5sNu2jXzZvS7mnHwQDR4yU7LS1XXh+y70ea3y6Y8zyBXg7NP4beQpfQ41xjHhGe2eMmNF7/Z6LYQ1RYd68TD5VuhgHz5t+ezy9N6rG/QmEMe+EYaPJUYMXViISpK7BVJPGiXLlpPGbWLjw7LxTEaXmHzMu2QXuKtYcmHavYD38enjFYJ/kq9j2yZv5IJfkzFbgrl54+D1ptLckezd9QKdGq4ThtD1/rU8Vc8tkaUOTGlV87Ur9D4PuHZO4VwCz0fkqHHW5YBhCOad4ftq5GLwr/i6qzda2Ht5cCh83FgzGPlJwZsVYT9xKIMp75Pt8sKFZbmi+jfmSN+SGidlbBDlHdMQuYJyzyjerG76k3QUXm8e33uSdPopQ+pnBZa/DNFOGl/GAGpfU9p+rUkS/HzMApbemwSqbERsiAvXKE9Ri5s5dRqGJHjgLl7rEczmDFle8Nf/HRSMdNbMB0CNG8ZiDDs1IFuZGGof0yjv+3N2iZimC7YOmC2mwtfLYb8jTO/qV0WGwAW/QvR80CH/YWpdTalNnHgC4/hQ+tv4hWJyLlZifs72i4hy+Ri79r3xSee0+oEioGTm3fTMM5PPvRrC8J9Qm4FXES4rWutuerfy31zBcgtQ6f1Bf9ZXsSxFcOgA3wNYDSX/XXS7fHteVPoHEMNSb2pMdLoyAgjWExefPWAiNPYDZI/oQCpbQDNdcRaiLWJyEQAk2mudDu9QUqK0zhCC2fIyr3YldYeN9e/fZ9PinkcbRgEhsM7T98a2QsChHK/BBV51b0Sbf57pQi51nY9MNH3XUNYS/Hr2LkObrhaRFO8l/2I5Xt53Kk5jA+XyWj6b58sU249iE2dw+w2VzU+kx/ONGn4byeBhc7llFFhe6vMLVYHlgHhchitq3zenR+2KspCSbctcSvv45NQ8+EwNwfzVtXRsOehT2a/HxEUtl7Zro6nG6MtFNLJNeN3ykfg9bqSf7tyGc7zUIoQvcFivqmDk+28WGtMD/PTAbsJx+S4YpHyVcqz9GMQ+seABaquMMrrar3X0Kdd/mG2tizqacG9MVXD8Lbg6eCEWmw33/3/fvoZQMKG3LResqVW9MvJ3jy9WJ/XyGwK2iwO6XD41/8t/Yiq3C4qWo6l4Tg77v9qD93jl0JnSFjwEUBaLuMHbZ/nyOkilqk4vSaRIx+EQcJ32scNEj5EtDVjaw/72ae0EIm1rtu6hciS8VULjeieYVF+zsoA+75TZfn+2GAxk5OqLjz0xNugCm3xmt7MiylXYaM11lGjqIYdv7SSJXSB/ZHYJuqW2PC02ht+qncozJbZb9ki4KaAlzy2Wk8Iwks+HZhKdJETaWliFeS2zOxopphR/JXy5rwExXZlE/RQwdkGw4d/ANRn+K3QD8DoCGcaQkRiDFRnj98HyUvqQY8dTIEX/2l8ms29qy//9w51Dh/gUTiWtBWKu9i5Kax8Pko4jPLW4+oo+njj7f9BeWClsTwrYWmgVBeH2P+LDTzVAEkFD/TpYIpvMO96/I2Jjoc5rh0M4q/luIXOJSG0wgAbesKwRS1luWuaSOvR7c4zPW7XVNnyUwPUCJUiF35JWbH+3O6xr3YR1MIvjFHref0n/2/8r6rSXIcO/fXbEh62A5680jvk8lk0iRfbtB7k/TJXy8iq3unp7u0O9LO7N2VqqOqaQCQOA7nfDgEruNMcW5AXq8FpSkaXc244mHSfTYVg/YvD5OIRkHkvDvnroSHR/QGE+mSBvTLUPE8cr0DQDm2l2GraviKYb83JcKR8UjbTodsTG1yj17pHhWg2aTl1cWxIbjO8op04nAvc69eXsFFey8Zf8bAiKSHXvy49ygOF72k4KdPBFxmGdvL7PViQab1MqoIxvjzOuAcacyccI6Xte+CaUn+Frjr5eXuiivhq2W4lelCL14qci7gN2gFnkndZQGWUUxy4WBdOd2d2vDYsJbqBWAmm6RnKP8ENOZbYjj4LeWMdbrwwO+LgGwRs7/0YujcGE1VCA/TUfEJZ6DfK3+OqNDlWNI2hnIjH+ngqBBauN2uCYR18+hCUvR4z+kYWCDq0wU/HnaWrFnx7K8ydbG14d7h69JDFeKuIsNjfqILGIwmix4u6aymp9gVJnAp0XiRubixAnPFWtS8xAf5XqYrOV3tGHV46spuQtbb7w8XHYZkZE1ZCZkqUHZ49OUxDtTRtYc5smJ5azF+rajbvj0ImuPcV0ANC3l/cU+MiBnZuqj37rkGAVAJdvMDuFI8ETcsFZXiBN05fpDeSV4ksA27m0XOUdwV3/n48spxIN3G7k9vAnOw7KVmzEfee3zlhsBBA4vfss7Qt7jD533wiE13PNpexQ5Sdyb90uI8RCO0d4cn8mHw6dWIbgiWIMyDDsY20VE9rhS6b7jkEd+4x7a7V56SNh34/s/KM7ELzpsssIpX69pixQGmDPiXIqJg5rxK0BEwtc6Ha0E/DX8l6N5KFTW0e8XY2Fej0hVki6nu74kWtX6KFdBlAiOItYgWZvDj3q2zOQUxiGulId2HIyV4X96OCgXfXvYqfuEnx14kHFWPaLmm9qTq7GVHc0FW4WkJZmd6WdmGiZiyXG9r05Gd2VbDbhwN6a8byBmQ+mcGX8hHc49KjWab8kH6lHK6WQils5veOWahArqqmMfmOVQ2ZFm10IO1j5W7CLigVjSMKr0i3u4KGmLpNe3han15W5xQha+rFY+SHgSmz/fHjAyG5r7cHKcG02DcBQ0L4Add9wCuX+qVxB4Ttr9QedtNb8+ANXteVjUeqIvYTnF3dGGRWMJJnOx6iRqYX5UAf7YcxJziXnQkIsfpwgAEvIaP10iPIXncZ4lxHkDRMwLzrPiUYfz0Y2Zb3HjSU3ZxjVRJSSeJB7DZDEZr53jNkj9Ywxn1Lx6r2PMLmdhX5jW93lHhnhy2qeOUguHNo2/L+7QrGqHHvJqbC901W+jWAzxkhGMmyOMhLSZ67bDtdCqZhe1aS5PDy76yTLL3tIRhhXqSyexYSAXDELBCTay087aio1tqww1Dlal2ZJBDC2EaUNDe7S5RQVvccorDBRinR9l4KY/2leHvo8sI1XbIXPnUr+0dgJewXhSjYr+3CXvJ0qxnAsb1V8wz3t/aKCFl3FYp3V6yEzeBs9p+RHLT8LiDPJwIzazstcA5YwUMo8AgqSVKdvvYUwetilE3sEphiUcULd7kOoYQZLKRQ4lgXmSpMSQwF1Hi+nDJqADty532mibijv5+OP2evj58CixAuGE/YClgF+xmEwCsHaTB3yffmG8GLDeGtmUONTR+q90ZCZaATqQY0BYgQQ282MIC7bs5VBuAkIBWQma4OgqYgW4Jgz3Hiiv93j89wkfd1KZHuaVD6vPVs9zoUe/scS6XOuh53rYOnskb3XKRiLiL2XZ63aD3nFLB7oyi8XtDwhtxiTFaG8BizWWlucOrAYMomCMWO4oKXYxMxOiwbpqoMGdko7ThmALEHybRlCYKgRk4yLIGVhQgnuxUnmtXwVmeXjdHTE5YimWFTMuHuj/P+BDiIAwkNBy9Os82ZGjWKsv6IkDDGaqUZZry4CPRUmS50zffjV1myAjEvWUvR0sYh1QP2TeO07Tca4+FRQ/tnhK+faWPOzJyyY2TFYbljtvarUm5TZoPou/yvTDsEmmTGXm4IAhOcI50+6BTumw+QhpEtHzlkyuklYJ0xlraaw/FVRat6NR7NvEJ3xBNRxYEEIlKbcfnEiqwyRltb3caxoWSBaFnmzmRBHKemoec9UVDFNElzK8Kz3E6GarDHTJim3q69TbuA6Xu8Igb/btmc2rQld52EEHk7TO6T7XEvVgtNumOVgY/1gEuwMI0GtJnCM/VgnSOnuR7hV8Ixh8B0hshwJenh6Q85/Z8H1HCx6seXNfDnkvpdDNo/e0bhao1MKzCPE3oiZDnmI7TIIIM35/vmwFd56rV27uxcRnVh41zpOJWS2vNfVBG0Tpi9hIJhkEUel/jGNgpV7QflcWy4FtScVtEwjrD5W6ztDvwJ+DaLkBeDWtnQlbxVT0np5XXfT5GNl2LR8njTgcFfC7G6K8+Jb1NA8GlcoaGMqRfxtPdeW+dw8aQf5+i8/1vilLsJktHUJuERixvHncHufd6/EEbtSCVTZ+7YDGCq7qM7dqZ95VH6pBpcsqmjmhbXqyNP3kbkcybuNKdzdoczzD7ezk5EYyN0dGcgzcLHYefbFsrXtJW2RjDpbj1jHcfoc+VxXS9JeHbwdM+6Go+kgGgIVNYbjPYrZrlGzEZl0VEKuakU3lvlbE3h7a1nC7QtIi6cfe3n4o1HIjHy6stWfsoq3i1eIvliJrmMtD9NhOWcefTWGxcZi13AwwFRDp3/rXmmZM1HNcqHKFH8+qyAO4e/ZYmcFax8k4TXpxWXa1t3e9cPQTYq7ephaRn1MPsogScYe+9vEcpzeunvTFI4bkQ+KtlsZIDnjs8SguLWFRQ3rA7d8YQUbtg9cPKAViyM8iQoqgB9s5ln1XbSlPB2lDaM/14AXb+4bBh07L22jLB3qQEfMTUjVLeZvhRhANYHY2lYP0Uj/dCcq/DTeijkCyhf509X4CgLQS/LksNKakzjkhOVURRzYoEZESQ6/wyzOmByWNFSoM3tEslIcxNvAg3PuCvzxBvD1nDLLfwwCSXrbYDSIpbr2FHpPvbYrCC/Kwe03gNhrRGQ+a96p/vzmz80HlVCBTpURD8DMRb79kwqIchki9QAcYdosXHtK8EJc6Z00Kwk1dFZD/Bz9Pn2ckJQl1HVuLEEwImULRHRDMQD1V248x52cpPYiD8nbkznooJp5VnBCdlHZLeaTO61vvenT7oPXnqlwujQIagtvyaBy0hR5YLjeUlDPeAnN+LP899dIPq/M2V220Q7wshl7JEj/CkgXkhbCdXqjrfo2E4LDtEcnEPfizi2mhPz8iMikC/JnkcrZD81nVOUhZqeG/BGY7XxSyQKrrqLeebj5ZAHdS1OEeqGBSoZgO+78ps83YPRtW3/PXmmtqAtnx+e1nWyWqgGUFJppBP0MBQFaQUs6Q/c+4ZaDGX3IOkF8s4GcV37Xq5e5Mi1MdSOwD4b4er329G1Ha2Zb/bYkr51aTIZTklb4sxSsBMybNDv/ak/pJz1y4BgnfRXjrnN1bsBxo0aj7qoZcVikrH+pC9enTIeSCbQZ9W56VxGxNgyugaQxwJbOjUClfKikax2QqBYbolqPeCJoHtPB5Y6tSXpekzpEQoYOQq3h3r0vLe76fkAZih5K7gK1XxbkP9ikvBU8DNZ3s34+1+Tzgqku+GiDmlFORNs9T3ioEwFzOVRFOfMpPiNnBRrDEEa7OezLryIGdEvtLUQyEFJc83xvLs7L3OqzVnlaEjpH25whMh9mP9rHPT7LFzxDAd3XIivOfainjEE5izzNgqi/qn0z40QZb4OHHZfQ4cAM+7PReuF8a4sGZeCG+7op0u2PmMsTqEGvUvlS7Bx9WED84MnENRY3ybY/0BK5LjmFfpoRMX6ZKzWXVYA7qicx5wJXPrkEJWEUZojgscAzVkr9oaaIlpEvfxOXJvTRYc3VmQ+T3n7xiIPUWucYGDXQsewzC7W4VOhfSii45/yEnNOhhtqNgWFpCCcOjDRNsN5CPCvb0lGosyK44lEXITnEq65/wkaTf4rihJod8Q/ukL+GLj4yMb9PJD74zwPU94dhVBLp4XZk+poe4GjpFOHD67chRVFea5S3PIutYmKYov6ntfIUqWsq2oT2/hcN67oY7rdetS9XBY0rzx7/TKzMAJoTiyDkfQu1Ju4xbUoXtT6PJ5yTwrZ6cmYcnIBG6/CM8EinMu3r8NLCsLoSuhyYSLw1SDxdHP2EhRn8ujFqbGMZTct3Yq97SXrAzkFtzvOB5oiyU8ct7CWg5zIfzFHJOwX4qp6JQk1ZxpCO/C5qCSV4hKa+opVtdKL7ZO7Ng5Qio3osgLdHlg46RagbTBKqVBvIQvJjQ7xcmtHPyWnCbUmyVYN0YAXt6pl5zCvO0PK5y/YDz7WtZ4m8bzF2heXr5/z3GHedcFUvauC9jwtS7bMx91Tz+Qfdc9TZL9tS6Q/du77oGRFMcw/J++rdFFtw3hNBfmqTDgR2jEe20vVstxf0JP3p78gobTge3mP4FUCvAlOLi0puOc7t9dQoU/oVy7S2nfpjPYkBD6eheBoC84Tv/yQ3608Pq4/eezwMeFrUzm4uMiCX3BsI+rRVrmxddnE1+rhtPHef6Xh72XTXm/AgAqdy5tmm9v9D5GoDL5qAMhl8659J7J7kmxMbAkdfif8a8PW8PmHD3f5T4uTPNpNj8uTOfICw6TcA6nuR/PY3Yryjm1hzAGN7YxHM5rxdyez+bh83Cax75Oub7px3cbKIrSdJb95Y73tccn9disbJpvJbu+A61nfTfb5QHahsnfiRkU9GvqwxD8BcF/YsA3nnxPfRT5gv9R9Md/O/3LNswBdd7/M9OQxuD9oPNK+O0kK/f0fBwL6FLGYaOHUdpc+6mcy74770f9PPftdwWYpszBjbkfPmNblkU4Cf3ENhj9gW9nUej98wnzvr72W3z+hDIfp2ewC7Yl50qXNW8bpEl5D3TwYjuF4OTnkQH+8ArHPM7/udnTiBQU4HxW8XzjPCKF84+5M5KrbVgEapROI1juDUMWNMmQwsV3VKbuybAotsVatZ33pWJzjW2XjX0TLKgRWcEu69Lpe0xzoWC/ebd7bWlOrwiC3dlnoUEpVbEsrBvnOATnkWNZuHt3c4b7aQPbh2Wpej69SGCJuyWbuvkVgwQHCqePo+sAvp2J+64Z9xZPA/IMD96XCsoSbr/+Vycqfq89iwtQhltYf+NfW88pt/yWnoZLljd1u1RgHzePG8s600iG2bjX4S23YKLEm+OKClIgG6MrIabnQqJVylBCctSJTyM2YXdy6mA8404lj2Sl47NkuYncc+JQKS9MHbgLL2G6kDPEr8w4sfqDyexheQVrpVuh/AaUzcWMQ4zdNpAYdQ7qGDPeH6qm8td0Y45iz1sL6UhF4HkXl5eUyWaQkaOqDXGwh4RN+wgEm00RTOj6iFq7Ltddjw9EwaekMzC7Czx9GUg6F64KCr5fYqWw9cvb/V5NSlW7pHzwhwImbeAUEBx1b4it+pNy3bsHIffGqN/RK1tV1/d6a3ZpjZecv/O9WvFzMZFkFyTjEK1JTLuPubYseoNh+U48OToA8/2bZoY0+RyeqgQlm9zImy8H6JO6L+aYZ6DNIS1vM3nUWpUnTkCZV1Mfu4XoUzsxxosIHEl5DcM5izmAbMV7cY6C2XsVD/sy3opzoIJnMGuR+xRIsCyYDW/5+rXO+nXjDHMrzdTj5GdN2DPbY2FG2Sk8l/ghnFIMQK+CrvVekPOgX9h4JtNh0wyzHULeyqhrgyBckjbPwcOS+UK95IckabXaXLJJevmapl59nh9bjS8W54LcEMULdH1vvi5yBxb7B6L7yLxp8Dj75Tu3KGBzRxOuSO8dUJaYj+Vmrs97tCp83pLqdAUV6NaiaDQ3Exe/6R5jwP7rCAgMcCjK+v58MHVfOXxk/CqCcTvaDUjAkBXplklZoWaUxXJ8DqejWxWYoxhcYGWbHK++57oTSrniO6uiX70YpWRUIHf4/ZpcU/pkrLP+zlLPTYcZwniDP8f63py6NM1sDuDnPB8HzYfk5Mxpbtf1g0rYOq2sqzglyX1NzRdJHwt0XW6UA/jEwJfrCCYsREsXQXJDHw3avJvHM22QQybk8++pcsG6YWh4sIpKCzK33YTWMQko1ugz4gLjwHtnmANNbtVpSZ43AI7PABDWQi4vnWsUqaIZkSNdrYTFLOaaU6CG09sqn2jWTY5CJu7JwYpt/+JsPk0yy4QSmlgFmsUPw5XQ/bEd7STiuQOEDQDDhY9LDoI7hHBR6fBkxSaBrviMOm8T5Htw5DAZyI8QEfjJFTgEGH/BIH5H6UOGCZG7K4W1gdxrAIGxcqRlJFJdVhRWE+fqwhjoAVrVkG3nLFts8B3XMHLuc0njCDA5BrR7xOkXcpGvy+OKVEKesIcRyheyoe9XyZYCHr7fWwPI29A/jmJ4rzzUZWd5xscA5Tg+ssCkaW7UTobJGCh6vKmTXyj1CqYXDtLQlmim24wCNfIoKCscbCgQjYNnJxRZ0YUWA4hhi3BYJj0RgbLeUinsIykmyKm1pt5LARJVqLsrEd8vWVeBCZenkygJ+arqyLcNuJO7AELh2RGhKeWOVrtgJ0WCm1RkFbkCRR6J9JXZ3HtPGLVLgJ1U20dq1iSCbCVS8Kie3fjpZiPmK08jp3uv37Rxo1HCtX8P8ejJTx68nL0lAVBaU/xrjqzjPll9g0njSkNX4zFJLvMQX9o5cna6jMaDM2SFdJD5HCIw17APqKGeaDXMjEGxnV5Au49Q+AYLLG7JflAQs0GFIPaG4vVa8jE0SoG3ZVNmpyjZGiag1bw5Ald23D0eUgBdHt1+xMGjz+jD97EZP/gnGmjjLcVaV+fpWV4eRADCc2bF3KJiXm6fJc46L113eCOK2cRtg4NsfV2XptVoy3ouALJ8zDrcMaoGyGDarMIegs5esxBkl7cgdsShi9iZ5p1cPYoEK1KIhML4fhmsTteApPdyoAqkki96pUxZ7VE3hrwUY9r0EmEf8/MZO6905PlSb0GsTTCz4qJIYCJD2sTGADULYwLbpLauqxoC2xp2Vj7qzPM169VOwpQGgo0IMqGhIE374dehotPjqXXs9UEScjJY7l1NdhBGhMfFFTDsQXSk7Orc2mX2pRvXPkEa1rWa7v0FqCZgykvSDpZfYQH1/K57tjTynlTYAqBSvrA06uNuj04GRMowmWOtO4x9DPcLQAWrkqgy5S7RNhogPEU6aXCdhqylDuumv7cCIDlAp2B1weTAg6DS2tdGSOyR7WVHHiRGD5Fi9FE8naG0lgN55NGc3GfkHF0ims+l4wxV9fCdhk1fnSpc3bJoKTlKzC6p8VL0gnJSZ+yFtM5LkDzzvU4I817tKO1TeM8xDNXVlX/vahbztOQAU8DLebcTrww/TPwG3GQ6ktAjjaYDoMhSwTjUM2L2ZqBSYNH70zTn4+s+RZniCUzWs/RRTA1WmnR0z6zgvQe1S+IsYVbMGJFXSFzl1+PqFs0zM4ormANREkrxLkWBLfS0Etc7jV7EZvS74VAclaWrYLYrss7QACXfS9MYSfXWixE4QJx5D0L1AmCkSodF7HlHqBtJ0FGEXyQ505jlsQTOfggtiQ46BnQd9Iqk1UpRkNBr4hcMcljVCUymAlqiT4ST8jNASU57y3ph1++pBbTAedicwd9ESm4SXwetECSyBfIeYXtasYc2GTCAihcgQPfnrZix2ttNZezVRqrM2veoMr7pTTSE1lFYk2XoiwMLpAc51gtgpOyKZ1Oy4/fZmRwJAgp+dxnveJ2ebArn9kOvhNPqyoFkX+p2bUqWuTaiaxMr8Yoxc+fsxYt3jRkkj0/1NjKYyW8Td3X969MqQmrKkZl8+OQ1Iy7IA6Zw++mZ+nMX4BDi9gSH7RmblSdWJuLLGGGRRS0tfV5hHTGfr77RHskhF0dF8ExsRyAnj2pNk0Hs67Ph7JmT5SPcx1BAUtV86R0TyfPpudTjDcZl49bDmmY3S6wO9Fph89OrO5vtAenprvGgYXjCQm8nxwvA8xaWBDanEq9LtERXrusXzIGdjvSZ+o3IXBWiR8JHklJDbBK8rjmlOscl222+eO18dWhOilQs+LxFx2buebuZx9U7XTPe9YJZWES4pie7nMZVqCLSwG+x/JrvYws5W7KZr4yNwSicxG2Lyl7Z+tp9AY81gICcavaELutgIq+kD4jVmx+u7Q6LALKFkzx8Oi/89GAEOcIo2TWoQpTa8bXX16ro8sD17oMk8FhM+XfifGS/1fXwnFTEeDg3V/Xm0CY9ywBuTWlJUHNrBgMYbGXDB8iqRbcx95VxUuE0hFKnhZUmsfGBy7Y2vHzlVTTmU66byK1MGIW5zr/onnWEd+P+AlOYtnZzJWeXYmOitOfzNmvefj6Ne6eEug5MzNfnEQLWBuY25hXFXrUIpNuIuUw5oINCyr13IsfOoY5OLrMI++Gy248reMlB2/0KuzoX8Bmd6KLMFtbZflMDzI9l9mJ5Mms0utE/6pGYmXnHRC7Q/IDZVd+Nt42eyjwkXJPPF+0o69pNAJx/DbHiEsD27ZI1lYXpZY4Nan3ZxE0Fmf2GPssDZ1WFvy0MBrL5Ocu4fFDRPsM+kd1s+Bk3HIZfjcyglRq6PHjl1pQOdi97TFXGQsh9uVRUvp+KF3i7qhatWMjrApps2YgdmVVd/mBvlME+GIV4Pp+OK+NWbjLE2J7H52gdz7VjVr35DNrOXXtF3jfRjU8/+jquIeNtYBAcRhsI0NJ824cQ2GIW3QB9IekcxLKgylvESFOBFnW5hfPlZrcGBhZwhX2hbuNM3fPYXHEmfkJZK2C5aYbMDHtU3cWdcEYiAcUuYojW6G0wLl3kb5nklU2oYdLGncdS2Xgaxvgx2ZkZ64e9d8bNvhSoqdUbT6dswLKfRcsmrBEbhpafVfWeOR1GXiFmrVGGukWwV8JgvLffYme8oQVegViNm8saG+wSSevcVIncQV5Nzj9Z85D4x67db+JSxxfClG5ADBlgrkdJ85SlZ2TjoZ7dtlAHa2tihR7bXTJfNrY6dU7Tpbiq9zxlAPA7cpqvXNt+N1mMJWCXctob3Upy8DTkXRDLDhvMc+DBuZg/mE6oS6ctx1LGoa1DDbV83rZ1mh4dHzNwOCKaZcDFdUb0kKR7qHLy3lfyuyw7eyvgop0q3dYSAKpm4fD50m7G2FtGtVDbRjHlEElBrC5FBLotbUTOTkW+GK1apKut1UZhlWaEiakYeM49zGbnsLSqpbbTzIp2m/hAhThsXDhrevbM9Xgo1fPUiFOyXy0IGvP3/XxuLpqoamhQ0BIkn1apYxXP9Un4YG0yGlRXDekYc1eNzwUGtAmCd4ba79TTI+yBCcJMQwtvs8USVrExFaKzvjYFaYvnz9iVORVU6Azxjgie9ugVdTKLhCBz7tTVl6hmjzUgeW7Cbmr0OgqHGhbG8sKeX5hH7PqcouvxpUYu/o3R+6vA3H5ESITbyfErBqQ/yiUQrqqhYRpv6NZ2XPOm4dxDUQAY9ccAhvTneOFfsMHvEUPkD0QMyb+NGH6HxP4EDf4a4/sKuE5FmPTbNzTxKyYYn8RLx/8SLPwB/0NZihfhj7becGW752M4FF/yeEC+rGW6/b+mnObfgTMo/BOU+xdyf88Z8gv6CZSOol/IP4w31Ce8IZrzyWxSng4dkYND8stZ5NLPZQY6lc5xMqXj2f1Pis49eM1haEDJuG/bcp5PLn4teL7jd2U/qX7SeCzT6a+X//HqD7J0MmX+Nbj/N+WjLZMEVGfHdCqPMHo3BSRr6MtufhMdZ/+EA0UNl7mfvoLFP2DH2O+jx3/GflRkgGj/KCww9gnuT0B/kJwQ8G+REwrICXMy/81C6Adm/iglhstxb86BCZqvZaLxbzH7p7bC7hQvKArjOu3+hqD9bxccAse/4OgPsoN/IjufzRn9cbLz2YzdT2ykgezkt+tbJJY4Tqfp7xCKkx1D303/Dbn63yYLGI1/wfBfywJF/EZZwH4HWeiwlw+betsLKBbFZNB1afDn3zB5eLZSDtNvmLL9nmrEz8N7gqdUgn02R0ghEUoQvxOdUfQHY01RnxCZ/oTI1B9F5J9ttZ6GCRiuz0E8Sf95ZJ3+dv71XeDfgyEI/aPU4z+7WjD62az5H8UQ4h8n9VmWIXH8mdQnRETgv5PUwyj9haR+TWb6k+SQP8xJ+ZTMyE9kFvum6bd/Qskn/gDJh3HoC/T9zw+eAA19wqDP9AD5oxj0WbDxL60H5/0vMPZrMqOf2f9/qB7Q/7f14Ay3v6DED0z5zPP5h8r+Nwjgv/KCv2MH8Vz6+Sth/vxBOOYsAGPD/svNH/1ZUPh/3gr6b//2jt2mGgRuQ/oWlRsDvkiFOrAw6Hdu88eTfrND/v+3Y//ev3EKqJjn4T/O/9+R55ieIWoczum/aq/mMeymMH7nyJ1yB/7MpxSDjnqM/n3Q8/e+52+kzD+LPfmdoifsB58e+wyAof6RNh3+2aj/PHZ2CTOOb1Q0bsJpKuO/PmQCfOblfyXz++QBTgDa+PWc37+/y7++ne3l7H9tFBw/vrv+SxVw8q3Gz0m0STgVAN59N/JfsmzqlzH+2j2fwziks4a8y/ekLMsImbs/f7PXczjm6fzXCn5tMU3y9K+KwHcsxj/h8LdrY9qEc7mmv3rdz9j+9QlXIMbfhSkk8WsZQ3+MuT+6/rXaL9LzSUs/oMsI/UNLH7T5qaW3HP6l43+H2/03RravNgJnmenVxWfBfkjH8KvtCrP5bZ8JoNr/c7yH67usHNvpwxCCv99byN+KJILqv5jC79HrX6zj/1E4if7BoSJh9GeLSP5BFjF9onwk40kslYldPnv0KvmfoBx/Tep+ka6/KQb2WR5IAts3M38WgM7Q4u8BqN9ynpRT/abWSSwEOk1HCmaWfmML70T003Sd/AI8gSGo/XvA0X9P+nTq/g3YkKjpQeLhh9L8xurbWALP6RSoufiPf2WN+En8P1GSvwa2kz+B7ehvDbFh+L+vFufp2AMz9IvxBtOVxumegxL/CQ==
\ No newline at end of file
diff --git a/Documentation/etcd-internals/diagrams/write_workflow_leader.png b/Documentation/etcd-internals/diagrams/write_workflow_leader.png
new file mode 100644
index 00000000000..0c4a6371161
Binary files /dev/null and b/Documentation/etcd-internals/diagrams/write_workflow_leader.png differ
diff --git a/Documentation/postmortems/v3.5-data-inconsistency.md b/Documentation/postmortems/v3.5-data-inconsistency.md
new file mode 100644
index 00000000000..27e15500edf
--- /dev/null
+++ b/Documentation/postmortems/v3.5-data-inconsistency.md
@@ -0,0 +1,142 @@
+# v3.5 data inconsistency postmortem
+
+| | |
+|---------|------------|
+| Authors | serathius@ |
+| Date | 2022-04-20 |
+| Status | published |
+
+## Summary
+
+| | |
+|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Summary | Code refactor in v3.5.0 resulted in consistent index not being saved atomically. Independent crash could lead to committed transactions are not reflected on all the members. |
+| Impact | No user reported problems in production as triggering the issue required frequent crashes, however issue was critical enough to motivate a public statement. Main impact comes from loosing user trust into etcd reliability. |
+
+## Background
+
+etcd v3 state is preserved on disk in two forms write ahead log (WAL) and database state (DB).
+etcd v3.5 also still maintains v2 state, however it's deprecated and not relevant to the issue in this postmortem.
+
+WAL stores history of changes for etcd state and database represents state at one point.
+To know which point of history database is representing, it stores consistent index (CI).
+It's a special metadata field that points to last entry in WAL that it has seen.
+
+When etcd is updating database state, it replays entries from WAL and updates the consistent index to point to new entry.
+This operation is required to be [atomic](https://en.wikipedia.org/wiki/Atomic_commit).
+A partial fail would mean that database and WAL would no longer match, so some entries would be either skipped (if only CI is updated) or executed twice (if only changes are applied).
+This is especially important for distributed system like etcd, where there are multiple cluster members, each applying the WAL entries to their database.
+Correctness of the system depends on assumption that every member of the cluster, while replying WAL entries, will reach the same state.
+
+## Root cause
+
+To simplify managing consistency index, etcd has introduced backend hooks in https://github.com/etcd-io/etcd/pull/12855.
+Goal was to ensure that consistency index is always updated, by automatically triggering update during commit.
+Implementation was as follows, before applying the WAL entries, etcd updated in memory value of consistent index.
+As part of transaction commit process, a database hook would read the value of consistent index and store it to database.
+
+Problem is that in memory value of consistent index is shared, and there might be other in flight transactions apart from serial WAL apply flow.
+So if we imagine scenario:
+1. etcd server starts an apply workflow, and it just sets a new consistent index value.
+2. The periodic commit is triggered, and it executes the backend hook and saves consistent index from apply workflow.
+3. etcd server finished an apply workflow, saves new changes and saves same value of consistent index again.
+
+Between second and third point there is a very small window where consistent index is increased without applying entry from WAL.
+
+## Trigger
+
+If etcd crashed after consistency index is saved, but before to apply workflow finished it would lead to data inconsistency.
+When recovering the data etcd would skip executing changes from failed apply workflow, assuming they have been already executed.
+
+This follows the issue reports and code used to reproduce the issue where trigger was etcd crashing under high request load.
+Etcd v3.5.0 was released with bug (https://github.com/etcd-io/etcd/pull/13505) that could cause etcd to crash that was fixed in v3.5.1.
+Apart from that all reports described etcd running under high memory pressure, causing it to go out of memory from time to time.
+Reproduction run etcd under high stress and randomly killed one of the members using SIGKILL signal (not recoverable immediate process death).
+
+## Detection
+
+For single member cluster it is totally undetectable.
+There is no mechanism or tool for verifying that state database matches WAL.
+
+In cluster with multiple members it would mean that one of the members that crashed, will missing changes from failed apply workflow.
+This means that it will have different state of database and will return different hash via `HashKV` grpc call.
+
+There is an automatic mechanism to detect data inconsistency.
+It can be executed during etcd start via `--experimental-initial-corrupt-check` and periodically via `--experimental-corrupt-check-time`.
+Both checks however have a flaw, they depend on `HashKV` grpc method, which might fail causing the check to pass.
+
+In multi member etcd cluster, each member can run with different performance and be at different stage of applying the WAL log.
+Comparing database hashes between multiple etcd members requires all hashes to be calculated at the same change.
+This is done by requesting hash for the same `revision` (version of key value store).
+However, it will not work if the provided revision is not available on the members.
+This can happen on very slow members, or in cases where corruption has lead revision numbers to diverge.
+
+This means that for this issue, the corrupt check is only reliable during etcd start just after etcd crashes.
+
+## Impact
+
+We are not aware any cases of users reporting a data corruption in production environment.
+
+However, issue was critical enough to motivate a public statement.
+Main impact comes from loosing user trust into etcd reliability.
+
+## Lessons learned
+
+### What went well
+
+* Multiple maintainers were able to work effectively on reproducing and fixing the issue. As they are in different timezones, there was always someone working on the issue.
+* When fixing the main data inconsistency we have found multiple other edge cases that could lead to data corruption (https://github.com/etcd-io/etcd/issues/13514, https://github.com/etcd-io/etcd/issues/13922, https://github.com/etcd-io/etcd/issues/13937).
+
+### What went wrong
+
+* No users enable data corruption detection as it is still an experimental feature introduced in v3.3. All reported cases where detected manually making it almost impossible to reproduce.
+* etcd has functional tests designed to detect such problems, however they are unmaintained, flaky and are missing crucial scenarios.
+* etcd v3.5 release was not qualified as comprehensive as previous ones. Older maintainers run manual qualification process that is no longer known or executed.
+* etcd apply code is so complicated that fixing the data inconsistency took almost 2 weeks and multiple tries. Fix needed to be so complicated that we needed to develop automatic validation for it (https://github.com/etcd-io/etcd/pull/13885).
+* etcd v3.5 was recommended for production without enough insight on the production adoption. Production ready recommendations based on after some internal feedback... to get diverse usage, but the user's hold on till someone else will discover issues.
+
+### Where we got lucky
+
+* We reproduced the issue using etcd functional only because weird partition setup on workstation. Functional tests store etcd data under `/tmp` usually mounted to in memory filesystem. Problem was reproduced only because one of the maintainers has `/tmp` mounted to standard disk.
+
+## Action items
+
+Action items should directly address items listed in lessons learned.
+We should double down on things that went well, fix things that went wrong, and stop depending on luck.
+
+Action fall under three types, and we should have at least one item per type. Types:
+* Prevent - Prevent similar issues from occurring. In this case, what testing we should introduce to find data inconsistency issues before release, preventing publishing broken release.
+* Detect - Be more effective in detecting when similar issues occur. In this case, improve mechanism to detect data inconsistency issue so users will be automatically informed.
+* Mitigate - Reduce time to recovery for users. In this case, how we ensure that users are able to quickly fix data inconsistency.
+
+Actions should not be restricted to fixing the immediate issues and also propose long term strategic improvements.
+To reflect this action items should have assigned priority:
+* P0 - Critical for reliability of the v3.5 release. Should be prioritized this over all other work and backported to v3.5.
+* P1 - Important for long term success of the project. Blocks v3.6 release.
+* P2 - Stretch goals that would be nice to have for v3.6, however should not be blocking.
+
+| Action Item | Type | Priority | Bug | Status |
+|-------------------------------------------------------------------------------------|----------|----------|----------------------------------------------|--------|
+| etcd testing can reproduce historical data inconsistency issues | Prevent | P0 | https://github.com/etcd-io/etcd/issues/14045 | DONE |
+| etcd detects data corruption by default | Detect | P0 | https://github.com/etcd-io/etcd/issues/14039 | DONE |
+| etcd testing is high quality, easy to maintain and expand | Prevent | P1 | https://github.com/etcd-io/etcd/issues/13637 | |
+| etcd apply code should be easy to understand and validate correctness | Prevent | P1 | | |
+| Critical etcd features are not abandoned when contributors move on | Prevent | P1 | https://github.com/etcd-io/etcd/issues/13775 | DONE |
+| etcd is continuously qualified with failure injection | Prevent | P1 | https://github.com/etcd-io/etcd/pull/14911 | DONE |
+| etcd can reliably detect data corruption (hash is linearizable) | Detect | P1 | | |
+| etcd checks consistency of snapshots sent between leader and followers | Detect | P1 | https://github.com/etcd-io/etcd/issues/13973 | DONE |
+| etcd recovery from data inconsistency procedures are documented and tested | Mitigate | P1 | | |
+| etcd can imminently detect and recover from data corruption (implement Merkle root) | Mitigate | P2 | https://github.com/etcd-io/etcd/issues/13839 | |
+
+## Timeline
+
+| Date | Event |
+|------------|-----------------------------------------------------------------------------------------------------------------------|
+| 2021-05-08 | Pull request that caused data corruption was merged - https://github.com/etcd-io/etcd/pull/12855 |
+| 2021-06-16 | Release v3.5.0 with data corruption was published - https://github.com/etcd-io/etcd/releases/tag/v3.5.0 |
+| 2021-12-01 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13514 |
+| 2021-01-28 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13654 |
+| 2022-03-08 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13766 |
+| 2022-03-25 | Corruption confirmed by one of the maintainers - https://github.com/etcd-io/etcd/issues/13766#issuecomment-1078897588 |
+| 2022-03-29 | Statement about the corruption was sent to etcd-dev@googlegroups.com and dev@kubernetes.io |
+| 2022-04-24 | Release v3.5.3 with fix was published - https://github.com/etcd-io/etcd/releases/tag/v3.5.3 |
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index a0a6a290e56..6573d2f0194 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -5,64 +5,20 @@
The etcd community adheres to the following principles:
- Open: etcd is open source.
-- Welcoming and respectful: See [Code of Conduct](code-of-conduct.md).
+- Welcoming and respectful: See [Code of Conduct].
- Transparent and accessible: Changes to the etcd code repository and CNCF related
activities (e.g. level, involvement, etc) are done in public.
- Merit: Ideas and contributions are accepted according to their technical merit for
the betterment of the project. For specific guidance on practical contribution steps
-please see [CONTRIBUTING](./CONTRIBUTING.md) guide.
+please see [contributor guide] guide.
-## Maintainers
-
-[Maintainers](./MAINTAINERS) are first and foremost contributors that have shown they
-are committed to the long term success of a project. Maintainership is about building
-trust with the current maintainers of the project and being a person that they can
-depend on to make decisions in the best interest of the project in a consistent manner.
-The maintainers role can be a top-level or restricted to certain package/feature
-depending upon their commitment in fulfilling the expected responsibilities as explained
-below.
-
-### Top-level maintainer
-
-- Running the etcd release processes
-- Ownership of test and debug infrastructure
-- Triage GitHub issues to keep the issue count low (goal: under 100)
-- Regularly review GitHub pull requests across all pkgs
-- Providing cross pkg design review
-- Monitor email aliases
-- Participate when called upon in the [security disclosure and release process](security/README.md)
-- General project maintenance
-
-### Package/feature maintainer
-
-- Ownership of test and debug failures in a pkg/feature
-- Resolution of bugs triaged to a package/feature
-- Regularly review pull requests to the pkg subsystem
-
-Contributors who are interested in becoming a maintainer, if performing these
-responsibilities, should discuss their interest with the existing maintainers. New
-maintainers must be nominated by an existing maintainer and must be elected by a
-supermajority of maintainers. Likewise, maintainers can be removed by a supermajority
-of the maintainers and moved to emeritus status.
-
-Life priorities, interests, and passions can change. If a maintainer needs to step
-down, inform other maintainers about this intention, and if possible, help find someone
-to pick up the related work. At the very least, ensure the related work can be continued.
-Afterward, create a pull request to remove yourself from the [MAINTAINERS](./MAINTAINERS)
-file.
-
-## Reviewers
-
-[Reviewers](./MAINTAINERS) are contributors who have demonstrated greater skill in
-reviewing the code contribution from other contributors. Their LGTM counts towards
-merging a code change into the project. A reviewer is generally on the ladder towards
-maintainership. New reviewers must be nominated by an existing maintainer and must be
-elected by a supermajority of maintainers. Likewise, reviewers can be removed by a
-supermajority of the maintainers or can resign by notifying the maintainers.
+## Roles and responsibilities
+Etcd project roles along with their requirements and responsibilities are defined
+in [community membership].
## Decision making process
-Decisions are built on consensus between maintainers publicly. Proposals and ideas
+Decisions are built on consensus between [maintainers] publicly. Proposals and ideas
can either be submitted for agreement via a GitHub issue or PR, or by sending an email
to `etcd-maintainers@googlegroups.com`.
@@ -73,8 +29,14 @@ worked out between the persons involved. However, any technical dispute that has
reached an impasse with a subset of the community, any contributor may open a GitHub
issue or PR or send an email to `etcd-maintainers@googlegroups.com`. If the
maintainers themselves cannot decide an issue, the issue will be resolved by a
-supermajority of the maintainers.
+supermajority of the maintainers with a fallback on lazy consensus after three business
+weeks inactive voting period and as long as two maintainers are on board.
## Changes in Governance
Changes in project governance could be initiated by opening a GitHub PR.
+
+[community membership]: /Documentation/contributor-guide/community-membership.md
+[Code of Conduct]: /code-of-conduct.md
+[contributor guide]: /CONTRIBUTING.md
+[maintainers]: /OWNERS
diff --git a/MAINTAINERS b/MAINTAINERS
deleted file mode 100644
index ecf320ad39e..00000000000
--- a/MAINTAINERS
+++ /dev/null
@@ -1,24 +0,0 @@
-# The official list of maintainers and reviewers for the project maintenance.
-#
-# Refer to the GOVERNANCE.md for description of the roles.
-#
-# Names should be added to this file like so:
-# Individual's name (@GITHUB_HANDLE) pkg:*
-# Individual's name (@GITHUB_HANDLE) pkg:*
-#
-# Please keep the list sorted.
-
-# MAINTAINERS
-Brandon Philips (@philips) pkg:*
-Gyuho Lee (@gyuho) pkg:*
-Hitoshi Mitake (@mitake) pkg:*
-Jingyi Hu (@jingyih) pkg:*
-Joe Betz (@jpbetz) pkg:*
-Piotr Tabor (@ptabor) pkg:*
-Sahdev Zala (@spzala) pkg:*
-Sam Batschelet (@hexfusion) pkg:*
-Wenjia Zhang (@wenjiaswe) pkg:*
-Xiang Li (@xiang90) pkg:*
-
-Ben Darnell (@bdarnell) pkg:go.etcd.io/etcd/raft
-Tobias Grieger (@tbg) pkg:go.etcd.io/etcd/raft
diff --git a/Makefile b/Makefile
index bfd6741c601..5bc74672768 100644
--- a/Makefile
+++ b/Makefile
@@ -1,548 +1,212 @@
-# run from repository root
+.PHONY: all
+all: build
+include tests/robustness/makefile.mk
+.PHONY: build
+build:
+ GO_BUILD_FLAGS="${GO_BUILD_FLAGS} -v -mod=readonly" ./scripts/build.sh
+PLATFORMS=linux-amd64 linux-386 linux-arm linux-arm64 linux-ppc64le linux-s390x darwin-amd64 darwin-arm64 windows-amd64 windows-arm64
-# Example:
-# make build
-# make clean
-# make docker-clean
-# make docker-start
-# make docker-kill
-# make docker-remove
+.PHONY: build-all
+build-all:
+ @for platform in $(PLATFORMS); do \
+ $(MAKE) build-$${platform}; \
+ done
-UNAME := $(shell uname)
-XARGS = xargs
-ARCH ?= $(shell go env GOARCH)
+.PHONY: build-%
+build-%:
+ GOOS=$$(echo $* | cut -d- -f 1) GOARCH=$$(echo $* | cut -d- -f 2) GO_BUILD_FLAGS="${GO_BUILD_FLAGS} -v -mod=readonly" ./scripts/build.sh
-# -r is only necessary on GNU xargs.
-ifeq ($(UNAME), Linux)
-XARGS += -r
-endif
-XARGS += rm -r
+.PHONY: tools
+tools:
+ GO_BUILD_FLAGS="${GO_BUILD_FLAGS} -v -mod=readonly" ./scripts/build_tools.sh
-.PHONY: build
-build:
- GO_BUILD_FLAGS="-v" ./build.sh
- ./bin/etcd --version
- ./bin/etcdctl version
- ./bin/etcdutl version
+# Tests
-clean:
- rm -f ./codecov
- rm -rf ./covdir
- rm -f ./bin/Dockerfile-release*
- rm -rf ./bin/etcd*
- rm -rf ./default.etcd
- rm -rf ./tests/e2e/default.etcd
- rm -rf ./release
- rm -rf ./coverage/*.err ./coverage/*.out
- rm -rf ./tests/e2e/default.proxy
- find ./ -name "127.0.0.1:*" -o -name "localhost:*" -o -name "*.log" -o -name "agent-*" -o -name "*.coverprofile" -o -name "testname-proxy-*" | $(XARGS)
+GO_TEST_FLAGS?=
-docker-clean:
- docker images
- docker image prune --force
+.PHONY: test
+test:
+ PASSES="unit integration release e2e" ./scripts/test.sh $(GO_TEST_FLAGS)
-docker-start:
- service docker restart
+.PHONY: test-unit
+test-unit:
+ PASSES="unit" ./scripts/test.sh $(GO_TEST_FLAGS)
+
+.PHONY: test-integration
+test-integration:
+ PASSES="integration" ./scripts/test.sh $(GO_TEST_FLAGS)
+
+.PHONY: test-e2e
+test-e2e: build
+ PASSES="e2e" ./scripts/test.sh $(GO_TEST_FLAGS)
+
+.PHONY: test-grpcproxy-integration
+test-grpcproxy-integration:
+ PASSES="grpcproxy_integration" ./scripts/test.sh $(GO_TEST_FLAGS)
+
+.PHONY: test-grpcproxy-e2e
+test-grpcproxy-e2e: build
+ PASSES="grpcproxy_e2e" ./scripts/test.sh $(GO_TEST_FLAGS)
+
+.PHONY: test-e2e-release
+test-e2e-release: build
+ PASSES="release e2e" ./scripts/test.sh $(GO_TEST_FLAGS)
+
+.PHONY: test-robustness
+test-robustness:
+ PASSES="robustness" ./scripts/test.sh $(GO_TEST_FLAGS)
+
+.PHONY: fuzz
+fuzz:
+ ./scripts/fuzzing.sh
+
+# Static analysis
+.PHONY: verify
+verify: verify-gofmt verify-bom verify-lint verify-dep verify-shellcheck verify-goword \
+ verify-govet verify-license-header verify-mod-tidy \
+ verify-shellws verify-proto-annotations verify-genproto verify-yamllint \
+ verify-govet-shadow verify-markdown-marker verify-go-versions
+
+.PHONY: fix
+fix: fix-bom fix-lint fix-yamllint sync-toolchain-directive
+ ./scripts/fix.sh
+
+.PHONY: verify-gofmt
+verify-gofmt:
+ PASSES="gofmt" ./scripts/test.sh
+
+.PHONY: verify-bom
+verify-bom:
+ PASSES="bom" ./scripts/test.sh
+
+.PHONY: fix-bom
+fix-bom:
+ ./scripts/updatebom.sh
+
+.PHONY: verify-dep
+verify-dep:
+ PASSES="dep" ./scripts/test.sh
+
+.PHONY: verify-lint
+verify-lint: install-golangci-lint
+ PASSES="lint" ./scripts/test.sh
+
+.PHONY: fix-lint
+fix-lint:
+ PASSES="lint_fix" ./scripts/test.sh
+
+.PHONY: verify-shellcheck
+verify-shellcheck:
+ PASSES="shellcheck" ./scripts/test.sh
+
+.PHONY: verify-goword
+verify-goword:
+ PASSES="goword" ./scripts/test.sh
+
+.PHONY: verify-govet
+verify-govet:
+ PASSES="govet" ./scripts/test.sh
+
+.PHONY: verify-license-header
+verify-license-header:
+ PASSES="license_header" ./scripts/test.sh
+
+.PHONY: verify-mod-tidy
+verify-mod-tidy:
+ PASSES="mod_tidy" ./scripts/test.sh
+
+.PHONY: verify-shellws
+verify-shellws:
+ PASSES="shellws" ./scripts/test.sh
+
+.PHONY: verify-proto-annotations
+verify-proto-annotations:
+ PASSES="proto_annotations" ./scripts/test.sh
+
+.PHONY: verify-genproto
+verify-genproto:
+ PASSES="genproto" ./scripts/test.sh
+
+.PHONY: verify-yamllint
+verify-yamllint:
+ifeq (, $(shell which yamllint))
+ @echo "Installing yamllint..."
+ tmpdir=$$(mktemp -d); \
+ trap "rm -rf $$tmpdir" EXIT; \
+ python3 -m venv $$tmpdir; \
+ $$tmpdir/bin/python3 -m pip install yamllint; \
+ $$tmpdir/bin/yamllint --config-file tools/.yamllint .
+else
+ @echo "yamllint already installed..."
+ yamllint --config-file tools/.yamllint .
+endif
-docker-kill:
- docker kill `docker ps -q` || true
+.PHONY: verify-govet-shadow
+verify-govet-shadow:
+ PASSES="govet_shadow" ./scripts/test.sh
-docker-remove:
- docker rm --force `docker ps -a -q` || true
- docker rmi --force `docker images -q` || true
+.PHONY: verify-markdown-marker
+verify-markdown-marker:
+ PASSES="markdown_marker" ./scripts/test.sh
+YAMLFMT_VERSION = $(shell cd tools/mod && go list -m -f '{{.Version}}' github.com/google/yamlfmt)
+.PHONY: fix-yamllint
+fix-yamllint:
+ifeq (, $(shell which yamlfmt))
+ $(shell go install github.com/google/yamlfmt/cmd/yamlfmt@$(YAMLFMT_VERSION))
+endif
+ yamlfmt -conf tools/.yamlfmt .
-GO_VERSION ?= 1.16.3
-ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
+.PHONY: run-govulncheck
+run-govulncheck:
+ifeq (, $(shell which govulncheck))
+ $(shell go install golang.org/x/vuln/cmd/govulncheck@latest)
+endif
+ PASSES="govuln" ./scripts/test.sh
-TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
-TEST_OPTS ?= PASSES='unit'
+# Tools
-TMP_DIR_MOUNT_FLAG = --tmpfs=/tmp:exec
-ifdef HOST_TMP_DIR
- TMP_DIR_MOUNT_FLAG = --mount type=bind,source=$(HOST_TMP_DIR),destination=/tmp
+GOLANGCI_LINT_VERSION = $(shell cd tools/mod && go list -m -f {{.Version}} github.com/golangci/golangci-lint)
+.PHONY: install-golangci-lint
+install-golangci-lint:
+ifeq (, $(shell which golangci-lint))
+ $(shell curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin $(GOLANGCI_LINT_VERSION))
endif
+.PHONY: install-lazyfs
+install-lazyfs: bin/lazyfs
+bin/lazyfs:
+ rm /tmp/lazyfs -rf
+ git clone --depth 1 --branch 0.2.0 https://github.com/dsrhaslab/lazyfs /tmp/lazyfs
+ cd /tmp/lazyfs/libs/libpcache; ./build.sh
+ cd /tmp/lazyfs/lazyfs; ./build.sh
+ mkdir -p ./bin
+ cp /tmp/lazyfs/lazyfs/build/lazyfs ./bin/lazyfs
+
+# Cleanup
+.PHONY: clean
+clean:
+ rm -f ./codecov
+ rm -rf ./covdir
+ rm -f ./bin/Dockerfile-release
+ rm -rf ./bin/etcd*
+ rm -rf ./bin/lazyfs
+ rm -rf ./bin/python
+ rm -rf ./default.etcd
+ rm -rf ./tests/e2e/default.etcd
+ rm -rf ./release
+ rm -rf ./coverage/*.err ./coverage/*.out
+ rm -rf ./tests/e2e/default.proxy
+ rm -rf ./bin/shellcheck*
+ find ./ -name "127.0.0.1:*" -o -name "localhost:*" -o -name "*.log" -o -name "agent-*" -o -name "*.coverprofile" -o -name "testname-proxy-*" -delete
-TMP_DOCKERFILE:=$(shell mktemp)
-
-# Example:
-# GO_VERSION=1.14.3 make build-docker-test
-# make build-docker-test
-#
-# gcloud auth configure-docker
-# GO_VERSION=1.14.3 make push-docker-test
-# make push-docker-test
-#
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-test
-
-build-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/Dockerfile > $(TMP_DOCKERFILE)
- docker build \
- --network=host \
- --tag gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- --file $(TMP_DOCKERFILE) .
-
-push-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker push gcr.io/etcd-development/etcd-test:go$(GO_VERSION)
-
-pull-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-test:go$(GO_VERSION)
-
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make compile-setup-gopath-with-docker-test
-
-compile-with-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker run \
- --rm \
- --mount type=bind,source=`pwd`,destination=/go/src/go.etcd.io/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash -c "GO_BUILD_FLAGS=-v GOOS=linux GOARCH=amd64 ./build.sh && ./bin/etcd --version"
-
-compile-setup-gopath-with-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker run \
- --rm \
- --mount type=bind,source=`pwd`,destination=/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && ETCD_SETUP_GOPATH=1 GO_BUILD_FLAGS=-v GOOS=linux GOARCH=amd64 ./build.sh && ./bin/etcd --version && rm -rf ./gopath"
-
-
-
-# Example:
-#
-# Local machine:
-# TEST_OPTS="PASSES='fmt'" make test
-# TEST_OPTS="PASSES='fmt bom dep build unit'" make test
-# TEST_OPTS="PASSES='build unit release integration_e2e functional'" make test
-# TEST_OPTS="PASSES='build grpcproxy'" make test
-#
-# Example (test with docker):
-# make pull-docker-test
-# TEST_OPTS="PASSES='fmt'" make docker-test
-# TEST_OPTS="VERBOSE=2 PASSES='unit'" make docker-test
-#
-# Travis CI (test with docker):
-# TEST_OPTS="PASSES='fmt bom dep build unit'" make docker-test
-#
-# Semaphore CI (test with docker):
-# TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test
-# HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test
-# TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'" make docker-test
-#
-# grpc-proxy tests (test with docker):
-# TEST_OPTS="PASSES='build grpcproxy'" make docker-test
-# HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make docker-test
+.PHONY: verify-go-versions
+verify-go-versions:
+ ./scripts/verify_go_versions.sh
-.PHONY: test
-test:
- $(info TEST_OPTS: $(TEST_OPTS))
- $(info log-file: test-$(TEST_SUFFIX).log)
- $(TEST_OPTS) ./test.sh 2>&1 | tee test-$(TEST_SUFFIX).log
- ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log
-
-test-smoke:
- $(info log-file: test-$(TEST_SUFFIX).log)
- PASSES="fmt build unit" ./test.sh 2<&1 | tee test-$(TEST_SUFFIX).log
-
-test-full:
- $(info log-file: test-$(TEST_SUFFIX).log)
- PASSES="fmt build release unit integration functional e2e grpcproxy" ./test.sh 2<&1 | tee test-$(TEST_SUFFIX).log
-
-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- $(info TEST_OPTS: $(TEST_OPTS))
- $(info log-file: test-$(TEST_SUFFIX).log)
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`,destination=/go/src/go.etcd.io/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash -c "$(TEST_OPTS) ./test.sh 2>&1 | tee test-$(TEST_SUFFIX).log"
- ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log
-
-docker-test-coverage:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- $(info log-file: docker-test-coverage-$(TEST_SUFFIX).log)
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`,destination=/go/src/go.etcd.io/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash ./scripts/codecov_upload.sh docker-test-coverage-$(TEST_SUFFIX).log \
- ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 docker-test-coverage-$(TEST_SUFFIX).log
-
-
-
-# Example:
-# make compile-with-docker-test
-# ETCD_VERSION=v3-test make build-docker-release-main
-# ETCD_VERSION=v3-test make push-docker-release-main
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-
-build-docker-release-main:
- $(info ETCD_VERSION: $(ETCD_VERSION))
- cp ./Dockerfile-release.$(ARCH) ./bin/Dockerfile-release.$(ARCH)
- docker build \
- --network=host \
- --tag gcr.io/etcd-development/etcd:$(ETCD_VERSION) \
- --file ./bin/Dockerfile-release.$(ARCH) \
- ./bin
- rm -f ./bin/Dockerfile-release.$(ARCH)
-
- docker run \
- --rm \
- gcr.io/etcd-development/etcd:$(ETCD_VERSION) \
- /bin/sh -c "/usr/local/bin/etcd --version && /usr/local/bin/etcdctl version && /usr/local/bin/etcdutl version"
-
-push-docker-release-main:
- $(info ETCD_VERSION: $(ETCD_VERSION))
- docker push gcr.io/etcd-development/etcd:$(ETCD_VERSION)
-
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make build-docker-static-ip-test
-#
-# gcloud auth configure-docker
-# make push-docker-static-ip-test
-#
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-static-ip-test
-#
-# make docker-static-ip-test-certs-run
-# make docker-static-ip-test-certs-metrics-proxy-run
-
-build-docker-static-ip-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-static-ip/Dockerfile > $(TMP_DOCKERFILE)
- docker build \
- --network=host \
- --tag gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
- --file ./tests/docker-static-ip/Dockerfile \
- $(TMP_DOCKERFILE)
-
-push-docker-static-ip-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker push gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION)
-
-pull-docker-static-ip-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION)
-
-docker-static-ip-test-certs-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-static-ip/certs,destination=/certs \
- gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
-
-docker-static-ip-test-certs-metrics-proxy-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-static-ip/certs-metrics-proxy,destination=/certs-metrics-proxy \
- gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-metrics-proxy/run.sh && rm -rf m*.etcd"
-
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make build-docker-dns-test
-#
-# gcloud auth configure-docker
-# make push-docker-dns-test
-#
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-dns-test
-#
-# make docker-dns-test-insecure-run
-# make docker-dns-test-certs-run
-# make docker-dns-test-certs-gateway-run
-# make docker-dns-test-certs-wildcard-run
-# make docker-dns-test-certs-common-name-auth-run
-# make docker-dns-test-certs-common-name-multi-run
-# make docker-dns-test-certs-san-dns-run
-
-build-docker-dns-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-dns/Dockerfile > $(TMP_DOCKERFILE)
- docker build \
- --network=host \
- --tag gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- --file ./tests/docker-dns/Dockerfile \
- $(TMP_DOCKERFILE)
-
- docker run \
- --rm \
- --dns 127.0.0.1 \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig etcd.local"
-
-push-docker-dns-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker push gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION)
-
-pull-docker-dns-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION)
-
-docker-dns-test-insecure-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/insecure,destination=/insecure \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /insecure/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs,destination=/certs \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-gateway-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-gateway,destination=/certs-gateway \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-wildcard-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-wildcard,destination=/certs-wildcard \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-common-name-auth-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-auth,destination=/certs-common-name-auth \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-common-name-auth/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-common-name-multi-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-multi,destination=/certs-common-name-multi \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-common-name-multi/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-san-dns-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-san-dns,destination=/certs-san-dns \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-san-dns/run.sh && rm -rf m*.etcd"
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make build-docker-dns-srv-test
-# gcloud auth configure-docker
-# make push-docker-dns-srv-test
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-dns-srv-test
-# make docker-dns-srv-test-certs-run
-# make docker-dns-srv-test-certs-gateway-run
-# make docker-dns-srv-test-certs-wildcard-run
-
-build-docker-dns-srv-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' > $(TMP_DOCKERFILE)
- docker build \
- --network=host \
- --tag gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- --file ./tests/docker-dns-srv/Dockerfile \
- $(TMP_DOCKERFILE)
-
- docker run \
- --rm \
- --dns 127.0.0.1 \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig +noall +answer SRV _etcd-client-ssl._tcp.etcd.local && dig +noall +answer SRV _etcd-server-ssl._tcp.etcd.local && dig +noall +answer m1.etcd.local m2.etcd.local m3.etcd.local"
-
-push-docker-dns-srv-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker push gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION)
-
-pull-docker-dns-srv-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION)
-
-docker-dns-srv-test-certs-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs,destination=/certs \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
-
-docker-dns-srv-test-certs-gateway-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-gateway,destination=/certs-gateway \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd"
-
-docker-dns-srv-test-certs-wildcard-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-wildcard,destination=/certs-wildcard \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd"
-
-
-
-# Example:
-# make build-functional
-# make build-docker-functional
-# make push-docker-functional
-# make pull-docker-functional
-
-build-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- ./tests/functional/build
- ./bin/etcd-agent -help || true && \
- ./bin/etcd-proxy -help || true && \
- ./bin/etcd-runner --help || true && \
- ./bin/etcd-tester -help || true
-
-build-docker-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' > $(TMP_DOCKERFILE)
- docker build \
- --network=host \
- --tag gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \
- --file ./tests/functional/Dockerfile \
- .
- @mv ./tests/functional/Dockerfile.bak ./tests/functional/Dockerfile
-
- docker run \
- --rm \
- gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \
- /bin/bash -c "./bin/etcd --version && \
- ./bin/etcd-failpoints --version && \
- ./bin/etcdctl version && \
- ./bin/etcdutl version && \
- ./bin/etcd-agent -help || true && \
- ./bin/etcd-proxy -help || true && \
- ./bin/etcd-runner --help || true && \
- ./bin/etcd-tester -help || true && \
- ./bin/benchmark --help || true"
-
-push-docker-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- docker push gcr.io/etcd-development/etcd-functional:go$(GO_VERSION)
-
-pull-docker-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- docker pull gcr.io/etcd-development/etcd-functional:go$(GO_VERSION)
+.PHONY: sync-toolchain-directive
+sync-toolchain-directive:
+ ./scripts/sync_go_toolchain_directive.sh
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 00000000000..393aea9670a
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,12 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - ahrtr # Benjamin Wang
+ - jmhbnz # James Blair
+ - serathius # Marek Siarkowicz
+ - spzala # Sahdev Zala
+ - wenjiaswe # Wenjia Zhang
+reviewers:
+ - fuweid # Wei Fu
+ - ivanvc # Ivan Valdes
+ - siyuanfoundation # Siyuan Zhang
diff --git a/Procfile b/Procfile
index 7d6b63e3884..82d58eb73ba 100644
--- a/Procfile
+++ b/Procfile
@@ -1,4 +1,4 @@
-# Use goreman to run `go get github.com/mattn/goreman`
+# Use goreman to run `go install github.com/mattn/goreman@latest`
# Change the path of bin/etcd if etcd is located elsewhere
etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr
@@ -6,4 +6,21 @@ etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --adve
etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr
#proxy: bin/etcd grpc-proxy start --endpoints=127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 --listen-addr=127.0.0.1:23790 --advertise-client-url=127.0.0.1:23790 --enable-pprof
-# A learner node can be started using Procfile.learner
+# A learner node can be started using the below Procfile.learner (uncomment and run)
+
+# Use goreman to run `go install github.com/mattn/goreman@latest`
+
+# 1. Start the cluster using Procfile
+# 2. Add learner node to the cluster
+# % etcdctl member add infra4 --peer-urls="http://127.0.0.1:42380" --learner=true
+
+# 3. Start learner node with goreman
+# Change the path of bin/etcd if etcd is located elsewhere
+
+# uncomment below to setup
+
+# etcd4: bin/etcd --name infra4 --listen-client-urls http://127.0.0.1:42379 --advertise-client-urls http://127.0.0.1:42379 --listen-peer-urls http://127.0.0.1:42380 --initial-advertise-peer-urls http://127.0.0.1:42380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra4=http://127.0.0.1:42380,infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state existing --enable-pprof --logger=zap --log-outputs=stderr
+
+# 4. The learner node can be promoted to voting member by the command
+# % etcdctl member promote
+
diff --git a/Procfile.learner b/Procfile.learner
deleted file mode 100644
index 06e3d72854f..00000000000
--- a/Procfile.learner
+++ /dev/null
@@ -1,12 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-
-# 1. Start the cluster using Procfile
-# 2. Add learner node to the cluster
-# % etcdctl member add infra4 --peer-urls="http://127.0.0.1:42380" --learner=true
-
-# 3. Start learner node with goreman
-# Change the path of bin/etcd if etcd is located elsewhere
-etcd4: bin/etcd --name infra4 --listen-client-urls http://127.0.0.1:42379 --advertise-client-urls http://127.0.0.1:42379 --listen-peer-urls http://127.0.0.1:42380 --initial-advertise-peer-urls http://127.0.0.1:42380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra4=http://127.0.0.1:42380,infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state existing --enable-pprof --logger=zap --log-outputs=stderr
-
-# 4. The learner node can be promoted to voting member by the command
-# % etcdctl member promote
diff --git a/Procfile.v2 b/Procfile.v2
deleted file mode 100644
index 00370968631..00000000000
--- a/Procfile.v2
+++ /dev/null
@@ -1,7 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-# Change the path of bin/etcd if etcd is located elsewhere
-etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-# in future, use proxy to listen on 2379
-#proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2378 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof
diff --git a/README.md b/README.md
index fc438ae732e..3f3bd97da46 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,22 @@
# etcd
[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/etcd?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/etcd)
-[![Coverage](https://codecov.io/gh/etcd-io/etcd/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/etcd)
+[![Coverage](https://codecov.io/gh/etcd-io/etcd/branch/main/graph/badge.svg)](https://app.codecov.io/gh/etcd-io/etcd/tree/main)
[![Tests](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml)
-[![asset-transparency](https://github.com/etcd-io/etcd/actions/workflows/asset-transparency.yaml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/asset-transparency.yaml)
[![codeql-analysis](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml)
-[![self-hosted-linux-arm64-graviton2-tests](https://github.com/etcd-io/etcd/actions/workflows/self-hosted-linux-arm64-graviton2-tests.yml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/self-hosted-linux-arm64-graviton2-tests.yml)
[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)
[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/etcd)
[![Releases](https://img.shields.io/github/release/etcd-io/etcd/all.svg?style=flat-square)](https://github.com/etcd-io/etcd/releases)
[![LICENSE](https://img.shields.io/github/license/etcd-io/etcd.svg?style=flat-square)](https://github.com/etcd-io/etcd/blob/main/LICENSE)
+[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/etcd-io/etcd/badge)](https://scorecard.dev/viewer/?uri=github.com/etcd-io/etcd)
**Note**: The `main` branch may be in an *unstable or even broken state* during development. For stable versions, see [releases][github-release].
-![etcd Logo](logos/etcd-horizontal-color.svg)
+
+
+
+
+
etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being:
@@ -24,10 +27,14 @@ etcd is a distributed reliable key-value store for the most critical data of a d
etcd is written in Go and uses the [Raft][] consensus algorithm to manage a highly-available replicated log.
-etcd is used [in production by many companies](./ADOPTERS.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [locksmith][], [vulcand][], [Doorman][], and many others. Reliability is further ensured by [**rigorous testing**](https://github.com/etcd-io/etcd/tree/main/tests/functional).
+etcd is used [in production by many companies](./ADOPTERS.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [locksmith][], [vulcand][], [Doorman][], and many others. Reliability is further ensured by rigorous [**robustness testing**](https://github.com/etcd-io/etcd/tree/main/tests/robustness).
See [etcdctl][etcdctl] for a simple command line client.
+![etcd reliability is important](logos/etcd-xkcd-2347.png)
+
+Original image credited to xkcd.com/2347, alterations by Josh Berkus.
+
[raft]: https://raft.github.io/
[k8s]: http://kubernetes.io/
[doorman]: https://github.com/youtube/doorman
@@ -35,46 +42,9 @@ See [etcdctl][etcdctl] for a simple command line client.
[vulcand]: https://github.com/vulcand/vulcand
[etcdctl]: https://github.com/etcd-io/etcd/tree/main/etcdctl
-## Community meetings
-
-etcd contributors and maintainers have monthly (every four weeks) meetings at 11:00 AM (USA Pacific) on Thursday.
-
-An initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas.
-
-[shared-meeting-notes]: https://docs.google.com/document/d/16XEGyPBisZvmmoIHSZzv__LoyOeluC5a4x353CX0SIM/edit
-
-
-Time:
-- [Jan 10th, 2019 11:00 AM video](https://www.youtube.com/watch?v=0Cphtbd1OSc&feature=youtu.be)
-- [Feb 7th, 2019 11:00 AM video](https://youtu.be/U80b--oAlYM)
-- [Mar 7th, 2019 11:00 AM video](https://youtu.be/w9TI5B7D1zg)
-- [Apr 4th, 2019 11:00 AM video](https://youtu.be/oqQR2XH1L_A)
-- [May 2nd, 2019 11:00 AM video](https://youtu.be/wFwQePuDWVw)
-- [May 30th, 2019 11:00 AM video](https://youtu.be/2t1R5NATYG4)
-- [Jul 11th, 2019 11:00 AM video](https://youtu.be/k_FZEipWD6Y)
-- [Jul 25, 2019 11:00 AM video](https://youtu.be/VSUJTACO93I)
-- [Aug 22, 2019 11:00 AM video](https://youtu.be/6IBQ-VxQmuM)
-- [Sep 19, 2019 11:00 AM video](https://youtu.be/SqfxU9DhBOc)
-- Nov 14, 2019 11:00 AM
-- Dec 12, 2019 11:00 AM
-- Jan 09, 2020 11:00 AM
-- Feb 06, 2020 11:00 AM
-- Mar 05, 2020 11:00 AM
-- Apr 02, 2020 11:00 AM
-- Apr 30, 2020 11:00 AM
-- May 28, 2020 11:00 AM
-- Jun 25, 2020 11:00 AM
-- Jul 23, 2020 11:00 AM
-- Aug 20, 2020 11:00 AM
-- Sep 17, 2020 11:00 AM
-- Oct 15, 2020 11:00 AM
-- Nov 12, 2020 11:00 AM
-- Dec 10, 2020 11:00 AM
-
-Join Hangouts Meet: [meet.google.com/umg-nrxn-qvs](https://meet.google.com/umg-nrxn-qvs)
-
-Join by phone: +1 405-792-0633âŦ PIN: âĒ299 906âŦ#
+## Maintainers
+[Maintainers](OWNERS) strive to shape an inclusive open source project culture where users are heard and contributors feel respected and empowered. Maintainers aim to build productive relationships across different companies and disciplines. Read more about [Maintainers role and responsibilities](Documentation/contributor-guide/community-membership.md#maintainers).
## Getting started
@@ -84,11 +54,7 @@ The easiest way to get etcd is to use one of the pre-built release binaries whic
For more installation guides, please check out [play.etcd.io](http://play.etcd.io) and [operating etcd](https://etcd.io/docs/latest/op-guide).
-For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `main` branch. This first needs [*Go*](https://golang.org/) installed ([version 1.16+](/go.mod#L3) is required). All development occurs on `main`, including new features and bug fixes. Bug fixes are first targeted at `main` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
-
[github-release]: https://github.com/etcd-io/etcd/releases
-[branch-management]: https://etcd.io/docs/latest/branch_management
-[dl-build]: https://etcd.io/docs/latest/dl-build#build-the-latest-version
### Running etcd
@@ -107,25 +73,19 @@ mv /tmp/etcd-download-test/etcd /usr/local/bin/
etcd
```
-If etcd is [built from the main branch][dl-build], run it as below:
-
-```bash
-./bin/etcd
-```
-
This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
Next, let's set a single key, and then retrieve it:
-```
+```bash
etcdctl put mykey "this is awesome"
etcdctl get mykey
```
etcd is now running and serving client requests. For more, please check out:
-- [Interactive etcd playground](http://play.etcd.io)
-- [Animated quick demo](https://etcd.io/docs/latest/demo)
+* [Interactive etcd playground](http://play.etcd.io)
+* [Animated quick demo](https://etcd.io/docs/latest/demo)
### etcd TCP ports
@@ -147,23 +107,26 @@ This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and optionally
Every cluster member and proxy accepts key value reads and key value writes.
-Follow the steps in [Procfile.learner](./Procfile.learner) to add a learner node to the cluster. Start the learner node with:
+Follow the comments in [Procfile script](./Procfile) to add a learner node to the cluster.
+
+### Install etcd client v3
```bash
-goreman -f ./Procfile.learner start
+go get go.etcd.io/etcd/client/v3
```
### Next steps
Now it's time to dig into the full etcd API and other guides.
-- Read the full [documentation][].
-- Explore the full gRPC [API][].
-- Set up a [multi-machine cluster][clustering].
-- Learn the [config format, env variables and flags][configuration].
-- Find [language bindings and tools][integrations].
-- Use TLS to [secure an etcd cluster][security].
-- [Tune etcd][tuning].
+* Read the full [documentation].
+* Review etcd [frequently asked questions].
+* Explore the full gRPC [API].
+* Set up a [multi-machine cluster][clustering].
+* Learn the [config format, env variables and flags][configuration].
+* Find [language bindings and tools][integrations].
+* Use TLS to [secure an etcd cluster][security].
+* [Tune etcd][tuning].
[documentation]: https://etcd.io/docs/latest
[api]: https://etcd.io/docs/latest/learning/api
@@ -175,18 +138,41 @@ Now it's time to dig into the full etcd API and other guides.
## Contact
-- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
-- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
-- Planning/Roadmap: [milestones](https://github.com/etcd-io/etcd/milestones), [roadmap](./ROADMAP.md)
-- Bugs: [issues](https://github.com/etcd-io/etcd/issues)
+* Email: [etcd-dev](https://groups.google.com/g/etcd-dev)
+* Slack: [#sig-etcd](https://kubernetes.slack.com/archives/C3HD8ARJ5) channel on Kubernetes ([get an invite](http://slack.kubernetes.io/))
+* [Community meetings](#community-meetings)
+
+### Community meetings
+
+etcd contributors and maintainers meet every week at `11:00` AM (USA Pacific) on Thursday and meetings alternate between community meetings and issue triage meetings. Meeting agendas are recorded in a [shared Google doc][shared-meeting-notes] and everyone is welcome to suggest additional topics or other agendas.
+
+Issue triage meetings are aimed at getting through our backlog of PRs and Issues. Triage meetings are open to any contributor; you don't have to be a reviewer or approver to help out! They can also be a good way to get started contributing.
+
+The meeting lead role is rotated for each meeting between etcd maintainers or sig-etcd leads and is recorded in a [shared Google sheet][shared-rotation-sheet].
+
+Meeting recordings are uploaded to the official etcd [YouTube channel].
+
+Get calendar invitations by joining [etcd-dev](https://groups.google.com/g/etcd-dev) mailing group.
+
+Join the CNCF-funded Zoom channel: [zoom.us/my/cncfetcdproject](https://zoom.us/my/cncfetcdproject)
+
+[shared-meeting-notes]: https://docs.google.com/document/d/16XEGyPBisZvmmoIHSZzv__LoyOeluC5a4x353CX0SIM/edit
+[shared-rotation-sheet]: https://docs.google.com/spreadsheets/d/1jodHIO7Dk2VWTs1IRnfMFaRktS9IH8XRyifOnPdSY8I/edit
+[YouTube channel]: https://www.youtube.com/@etcdio
## Contributing
-See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
+See [CONTRIBUTING](CONTRIBUTING.md) for details on setting up your development environment, submitting patches and the contribution workflow.
+
+Please refer to [community-membership.md](Documentation/contributor-guide/community-membership.md#member) for information on becoming an etcd project member. We welcome and look forward to your contributions to the project!
+
+Please also refer to [roadmap](Documentation/contributor-guide/roadmap.md) to get more details on the priorities for the next few major or minor releases.
## Reporting bugs
-See [reporting bugs](https://etcd.io/docs/latest/reporting-bugs) for details about reporting any issues.
+See [reporting bugs](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/reporting_bugs.md) for details about reporting any issues. Before opening an issue please check it is not covered in our [frequently asked questions].
+
+[frequently asked questions]: https://etcd.io/docs/latest/faq
## Reporting a security vulnerability
@@ -194,16 +180,25 @@ See [security disclosure and release process](security/README.md) for details on
## Issue and PR management
-See [issue triage guidelines](https://etcd.io/docs/current/triage/issues/) for details on how issues are managed.
+See [issue triage guidelines](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_issues.md) for details on how issues are managed.
-See [PR management](https://etcd.io/docs/current/triage/prs/) for guidelines on how pull requests are managed.
+See [PR management](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_prs.md) for guidelines on how pull requests are managed.
## etcd Emeritus Maintainers
-These emeritus maintainers dedicated a part of their career to etcd and reviewed code, triaged bugs, and pushed the project forward over a substantial period of time. Their contribution is greatly appreciated.
+These emeritus maintainers dedicated a part of their career to etcd and reviewed code, triaged bugs and pushed the project forward over a substantial period of time. Their contribution is greatly appreciated.
* Fanmin Shi
* Anthony Romano
+* Brandon Philips
+* Joe Betz
+* Gyuho Lee
+* Jingyi Hu
+* Xiang Li
+* Ben Darnell
+* Sam Batschelet
+* Piotr Tabor
+* Hitoshi Mitake
### License
diff --git a/ROADMAP.md b/ROADMAP.md
deleted file mode 100644
index d9898166c32..00000000000
--- a/ROADMAP.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# etcd roadmap
-
-**work in progress**
-
-This document defines a high level roadmap for etcd development.
-
-The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/etcd-io/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
-
-etcd 3.3 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
-
-### etcd 3.4 (2019)
-
-- Stabilization of 3.3 experimental features
-- Support/document downgrade
-- Snapshot restore as Go library
-- Improved client balancer with new gRPC balancer interface
-- Improve single-client put performance
-- Improve large response handling
-- Improve test coverage
-- Decrease test runtime
-- Migrate to Go module for dependency management
diff --git a/api/authpb/auth.pb.go b/api/authpb/auth.pb.go
index 16affcd62cf..37374c5a711 100644
--- a/api/authpb/auth.pb.go
+++ b/api/authpb/auth.pb.go
@@ -232,29 +232,30 @@ func init() {
func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
var fileDescriptor_8bbd6f3875b0e874 = []byte{
- // 338 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40,
- 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba,
- 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13,
- 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0,
- 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48,
- 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1,
- 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9,
- 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12,
- 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a,
- 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1,
- 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd,
- 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07,
- 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb,
- 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0,
- 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c,
- 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d,
- 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c,
- 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9,
- 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb,
- 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d,
- 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01,
- 0x00, 0x00,
+ // 359 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xc2, 0x40,
+ 0x10, 0xc6, 0xbb, 0xb4, 0x60, 0x3b, 0x08, 0x21, 0x1b, 0xa2, 0x0d, 0xc6, 0xda, 0xf4, 0xd4, 0x78,
+ 0x68, 0x15, 0x0e, 0x7a, 0xc5, 0xc8, 0xc1, 0x93, 0x64, 0x83, 0x31, 0xf1, 0x42, 0x8a, 0xdd, 0xd4,
+ 0x06, 0xd8, 0x6d, 0xda, 0xaa, 0xe1, 0xe2, 0x73, 0x78, 0xf0, 0x81, 0x38, 0xf2, 0x08, 0x82, 0x2f,
+ 0x62, 0xba, 0xcb, 0x9f, 0x10, 0x3d, 0xed, 0x37, 0xdf, 0x7c, 0x33, 0xfb, 0xcb, 0x2e, 0x40, 0xf0,
+ 0x9a, 0xbf, 0x78, 0x49, 0xca, 0x73, 0x8e, 0x2b, 0x85, 0x4e, 0x46, 0xad, 0x66, 0xc4, 0x23, 0x2e,
+ 0x2c, 0xbf, 0x50, 0xb2, 0xeb, 0x5c, 0x42, 0xfd, 0x21, 0xa3, 0x69, 0x37, 0x0c, 0xef, 0x93, 0x3c,
+ 0xe6, 0x2c, 0xc3, 0x67, 0x50, 0x65, 0x7c, 0x98, 0x04, 0x59, 0xf6, 0xce, 0xd3, 0xd0, 0x44, 0x36,
+ 0x72, 0x75, 0x02, 0x8c, 0xf7, 0xd7, 0x8e, 0xf3, 0x01, 0x5a, 0x31, 0x82, 0x31, 0x68, 0x2c, 0x98,
+ 0x52, 0x91, 0x38, 0x24, 0x42, 0xe3, 0x16, 0xe8, 0xdb, 0xc9, 0x92, 0xf0, 0xb7, 0x35, 0x6e, 0x42,
+ 0x39, 0xe5, 0x13, 0x9a, 0x99, 0xaa, 0xad, 0xba, 0x06, 0x91, 0x05, 0xbe, 0x80, 0x03, 0x2e, 0x6f,
+ 0x36, 0x35, 0x1b, 0xb9, 0xd5, 0xf6, 0x91, 0x27, 0x81, 0xbd, 0x7d, 0x2e, 0xb2, 0x89, 0x39, 0x5f,
+ 0x08, 0xa0, 0x4f, 0xd3, 0x69, 0x9c, 0x65, 0x31, 0x67, 0xb8, 0x03, 0x7a, 0x42, 0xd3, 0xe9, 0x60,
+ 0x96, 0x48, 0x94, 0x7a, 0xfb, 0x78, 0xb3, 0x61, 0x97, 0xf2, 0x8a, 0x36, 0xd9, 0x06, 0x71, 0x03,
+ 0xd4, 0x31, 0x9d, 0xad, 0x11, 0x0b, 0x89, 0x4f, 0xc0, 0x48, 0x03, 0x16, 0xd1, 0x21, 0x65, 0xa1,
+ 0xa9, 0x4a, 0x74, 0x61, 0xf4, 0x58, 0xe8, 0x9c, 0x83, 0x26, 0xc6, 0x74, 0xd0, 0x48, 0xaf, 0x7b,
+ 0xdb, 0x50, 0xb0, 0x01, 0xe5, 0x47, 0x72, 0x37, 0xe8, 0x35, 0x10, 0xae, 0x81, 0x51, 0x98, 0xb2,
+ 0x2c, 0x39, 0x03, 0xd0, 0x08, 0x9f, 0xd0, 0x7f, 0x9f, 0xe7, 0x1a, 0x6a, 0x63, 0x3a, 0xdb, 0x61,
+ 0x99, 0x25, 0x5b, 0x75, 0xab, 0x6d, 0xfc, 0x17, 0x98, 0xec, 0x07, 0x6f, 0xae, 0xe6, 0x4b, 0x4b,
+ 0x59, 0x2c, 0x2d, 0x65, 0xbe, 0xb2, 0xd0, 0x62, 0x65, 0xa1, 0xef, 0x95, 0x85, 0x3e, 0x7f, 0x2c,
+ 0xe5, 0xe9, 0x34, 0xe2, 0x1e, 0xcd, 0x9f, 0x43, 0x2f, 0xe6, 0x7e, 0x71, 0xfa, 0x41, 0x12, 0xfb,
+ 0x6f, 0x1d, 0x5f, 0xae, 0x1c, 0x55, 0xc4, 0x3f, 0x77, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61,
+ 0x5a, 0xfe, 0x48, 0x13, 0x02, 0x00, 0x00,
}
func (m *UserAddOptions) Marshal() (dAtA []byte, err error) {
diff --git a/api/authpb/auth.proto b/api/authpb/auth.proto
index 8f82b7cf1e4..5a7856bb73d 100644
--- a/api/authpb/auth.proto
+++ b/api/authpb/auth.proto
@@ -3,6 +3,8 @@ package authpb;
import "gogoproto/gogo.proto";
+option go_package = "go.etcd.io/etcd/api/v3/authpb";
+
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
diff --git a/api/etcdserverpb/etcdserver.pb.go b/api/etcdserverpb/etcdserver.pb.go
index 38434d09c56..eaefa2d2088 100644
--- a/api/etcdserverpb/etcdserver.pb.go
+++ b/api/etcdserverpb/etcdserver.pb.go
@@ -129,31 +129,33 @@ func init() {
func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
var fileDescriptor_09ffbeb3bebbce7e = []byte{
- // 380 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
- 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
- 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
- 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
- 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
- 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
- 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
- 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
- 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
- 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
- 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
- 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
- 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
- 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
- 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
- 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
- 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
- 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
- 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
- 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
- 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
- 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
- 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
- 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
+ // 402 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0x41, 0xef, 0xd2, 0x30,
+ 0x14, 0x00, 0x70, 0x0a, 0xfb, 0xff, 0x81, 0x8a, 0x8a, 0x0d, 0x31, 0x2f, 0xc4, 0xcc, 0x05, 0x3d,
+ 0xec, 0xc4, 0x0e, 0x9e, 0xbc, 0xe2, 0x38, 0x2c, 0x11, 0x83, 0xc3, 0x60, 0xe2, 0xad, 0xb2, 0x27,
+ 0x34, 0x01, 0x3a, 0xbb, 0x6e, 0xe1, 0x1b, 0xf8, 0x15, 0xfc, 0x48, 0x1c, 0xfd, 0x04, 0x46, 0xf1,
+ 0x8b, 0x98, 0x8e, 0x8d, 0x55, 0x4f, 0x5b, 0x7e, 0xef, 0xf5, 0xf5, 0xb5, 0x7d, 0x74, 0x88, 0x7a,
+ 0x93, 0x64, 0xa8, 0x0a, 0x54, 0xd3, 0x54, 0x49, 0x2d, 0xd9, 0xa0, 0x91, 0xf4, 0xf3, 0x78, 0xb4,
+ 0x95, 0x5b, 0x59, 0x06, 0x02, 0xf3, 0x77, 0xcd, 0x99, 0x7c, 0x73, 0x68, 0x37, 0xc6, 0xaf, 0x39,
+ 0x66, 0x9a, 0x8d, 0x68, 0x3b, 0x0a, 0x81, 0x78, 0xc4, 0x77, 0x66, 0xce, 0xf9, 0xe7, 0xf3, 0x56,
+ 0xdc, 0x8e, 0x42, 0xf6, 0x8c, 0xde, 0x2f, 0x50, 0xef, 0x64, 0x02, 0x6d, 0x8f, 0xf8, 0xfd, 0x2a,
+ 0x52, 0x19, 0x03, 0xea, 0x2c, 0xb9, 0xde, 0x41, 0xc7, 0x8a, 0x95, 0xc2, 0x9e, 0xd2, 0xce, 0x9a,
+ 0xef, 0xc1, 0xb1, 0x02, 0x06, 0x8c, 0x87, 0x42, 0xc1, 0x9d, 0x47, 0xfc, 0x5e, 0xed, 0xa1, 0x50,
+ 0x6c, 0x42, 0xfb, 0x4b, 0x85, 0xc5, 0x9a, 0xef, 0x73, 0x84, 0x7b, 0x6b, 0x55, 0xc3, 0x75, 0x4e,
+ 0x74, 0x4c, 0xf0, 0x04, 0x5d, 0xab, 0xd1, 0x86, 0xeb, 0x9c, 0xf9, 0x49, 0x64, 0x1a, 0x7a, 0xb7,
+ 0x5d, 0x48, 0xdc, 0x30, 0x7b, 0x49, 0xe9, 0xfc, 0x94, 0x0a, 0xc5, 0xb5, 0x90, 0x47, 0xe8, 0x7b,
+ 0xc4, 0xef, 0x54, 0x85, 0x2c, 0x37, 0x67, 0xfb, 0xc8, 0x85, 0x06, 0x6a, 0xb5, 0x5a, 0x0a, 0x1b,
+ 0xd3, 0xbb, 0x95, 0x38, 0x6e, 0x10, 0x1e, 0x58, 0x3d, 0x5c, 0xc9, 0xec, 0x1f, 0xe3, 0x26, 0x57,
+ 0x99, 0x28, 0x10, 0x06, 0xd6, 0xd2, 0x86, 0xcd, 0x9d, 0xae, 0xa4, 0xd2, 0x98, 0xc0, 0x43, 0x2b,
+ 0xa1, 0x32, 0x13, 0x7d, 0x9f, 0x4b, 0x95, 0x1f, 0xe0, 0x91, 0x1d, 0xbd, 0x9a, 0xe9, 0xea, 0x83,
+ 0x38, 0x20, 0x3c, 0xb6, 0xba, 0x2e, 0xa5, 0xac, 0xaa, 0x15, 0xf2, 0x03, 0x0c, 0xff, 0xa9, 0x5a,
+ 0x1a, 0x73, 0xcd, 0x43, 0x7f, 0x51, 0x98, 0xed, 0xe0, 0x89, 0x75, 0x2b, 0x35, 0x4e, 0xde, 0xd2,
+ 0xde, 0x02, 0x35, 0x4f, 0xb8, 0xe6, 0xa6, 0xd2, 0x3b, 0x99, 0xe0, 0x7f, 0xd3, 0x50, 0x99, 0x39,
+ 0xe1, 0x9b, 0x7d, 0x9e, 0x69, 0x54, 0x51, 0x58, 0x0e, 0xc5, 0xed, 0x15, 0x6e, 0x3c, 0x7b, 0x7d,
+ 0xfe, 0xed, 0xb6, 0xce, 0x17, 0x97, 0xfc, 0xb8, 0xb8, 0xe4, 0xd7, 0xc5, 0x25, 0xdf, 0xff, 0xb8,
+ 0xad, 0x4f, 0x2f, 0xb6, 0x72, 0x6a, 0x86, 0x72, 0x2a, 0x64, 0x60, 0xbe, 0x01, 0x4f, 0x45, 0x50,
+ 0xbc, 0x0a, 0xec, 0x41, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x79, 0xf9, 0xf5, 0xc9, 0x02,
+ 0x00, 0x00,
}
func (m *Request) Marshal() (dAtA []byte, err error) {
diff --git a/api/etcdserverpb/etcdserver.proto b/api/etcdserverpb/etcdserver.proto
index 25e0aca5d9f..ff639b9c96c 100644
--- a/api/etcdserverpb/etcdserver.proto
+++ b/api/etcdserverpb/etcdserver.proto
@@ -3,6 +3,8 @@ package etcdserverpb;
import "gogoproto/gogo.proto";
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
diff --git a/api/etcdserverpb/gw/rpc.pb.gw.go b/api/etcdserverpb/gw/rpc.pb.gw.go
index 2fca126af85..5e8132a2fe7 100644
--- a/api/etcdserverpb/gw/rpc.pb.gw.go
+++ b/api/etcdserverpb/gw/rpc.pb.gw.go
@@ -9,893 +9,665 @@ It translates gRPC into RESTful JSON APIs.
package gw
import (
+ protov1 "github.com/golang/protobuf/proto"
+
"context"
+ "errors"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"io"
"net/http"
- "github.com/golang/protobuf/descriptor"
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-var _ = descriptor.ForMessage
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.RangeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.RangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.RangeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.RangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Range(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.PutRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.PutRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.PutRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.PutRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Put(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DeleteRangeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.DeleteRangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DeleteRangeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.DeleteRangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.DeleteRange(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.TxnRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.TxnRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.TxnRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.TxnRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Txn(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.CompactionRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.CompactionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.CompactionRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.CompactionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Compact(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
-func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) {
+func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, chan error, error) {
var metadata runtime.ServerMetadata
+ errChan := make(chan error, 1)
stream, err := client.Watch(ctx)
if err != nil {
- grpclog.Infof("Failed to start streaming: %v", err)
- return nil, metadata, err
+ grpclog.Errorf("Failed to start streaming: %v", err)
+ close(errChan)
+ return nil, metadata, errChan, err
}
dec := marshaler.NewDecoder(req.Body)
handleSend := func() error {
var protoReq etcdserverpb.WatchRequest
- err := dec.Decode(&protoReq)
- if err == io.EOF {
+ err := dec.Decode(protov1.MessageV2(&protoReq))
+ if errors.Is(err, io.EOF) {
return err
}
if err != nil {
- grpclog.Infof("Failed to decode request: %v", err)
- return err
+ grpclog.Errorf("Failed to decode request: %v", err)
+ return status.Errorf(codes.InvalidArgument, "Failed to decode request: %v", err)
}
if err := stream.Send(&protoReq); err != nil {
- grpclog.Infof("Failed to send request: %v", err)
+ grpclog.Errorf("Failed to send request: %v", err)
return err
}
return nil
}
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Infof("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
go func() {
+ defer close(errChan)
for {
if err := handleSend(); err != nil {
+ errChan <- err
break
}
}
if err := stream.CloseSend(); err != nil {
- grpclog.Infof("Failed to terminate client stream: %v", err)
+ grpclog.Errorf("Failed to terminate client stream: %v", err)
}
}()
header, err := stream.Header()
if err != nil {
- grpclog.Infof("Failed to get header from client: %v", err)
- return nil, metadata, err
+ grpclog.Errorf("Failed to get header from client: %v", err)
+ return nil, metadata, errChan, err
}
metadata.HeaderMD = header
- return stream, metadata, nil
+ return stream, metadata, errChan, nil
}
func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseGrantRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseGrantRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseGrantRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseGrantRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.LeaseGrant(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseRevokeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseRevokeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.LeaseRevoke(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseRevokeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseRevokeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.LeaseRevoke(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
-func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) {
+func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, chan error, error) {
var metadata runtime.ServerMetadata
+ errChan := make(chan error, 1)
stream, err := client.LeaseKeepAlive(ctx)
if err != nil {
- grpclog.Infof("Failed to start streaming: %v", err)
- return nil, metadata, err
+ grpclog.Errorf("Failed to start streaming: %v", err)
+ close(errChan)
+ return nil, metadata, errChan, err
}
dec := marshaler.NewDecoder(req.Body)
handleSend := func() error {
var protoReq etcdserverpb.LeaseKeepAliveRequest
- err := dec.Decode(&protoReq)
- if err == io.EOF {
+ err := dec.Decode(protov1.MessageV2(&protoReq))
+ if errors.Is(err, io.EOF) {
return err
}
if err != nil {
- grpclog.Infof("Failed to decode request: %v", err)
- return err
+ grpclog.Errorf("Failed to decode request: %v", err)
+ return status.Errorf(codes.InvalidArgument, "Failed to decode request: %v", err)
}
if err := stream.Send(&protoReq); err != nil {
- grpclog.Infof("Failed to send request: %v", err)
+ grpclog.Errorf("Failed to send request: %v", err)
return err
}
return nil
}
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Infof("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
go func() {
+ defer close(errChan)
for {
if err := handleSend(); err != nil {
+ errChan <- err
break
}
}
if err := stream.CloseSend(); err != nil {
- grpclog.Infof("Failed to terminate client stream: %v", err)
+ grpclog.Errorf("Failed to terminate client stream: %v", err)
}
}()
header, err := stream.Header()
if err != nil {
- grpclog.Infof("Failed to get header from client: %v", err)
- return nil, metadata, err
+ grpclog.Errorf("Failed to get header from client: %v", err)
+ return nil, metadata, errChan, err
}
metadata.HeaderMD = header
- return stream, metadata, nil
+ return stream, metadata, errChan, nil
}
func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseTimeToLiveRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseTimeToLiveRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.LeaseTimeToLive(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseTimeToLiveRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseTimeToLiveRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.LeaseTimeToLive(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseLeasesRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseLeasesRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.LeaseLeases(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseLeasesRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseLeasesRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.LeaseLeases(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberAddRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberAddRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.MemberAdd(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberRemoveRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberRemoveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberRemoveRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberRemoveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.MemberRemove(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberUpdateRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberUpdateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberUpdateRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberUpdateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.MemberUpdate(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberListRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberListRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.MemberList(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberPromoteRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberPromoteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.MemberPromote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberPromoteRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MemberPromoteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.MemberPromote(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AlarmRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AlarmRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AlarmRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AlarmRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Alarm(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.StatusRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.StatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.StatusRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.StatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Status(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DefragmentRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.DefragmentRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DefragmentRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.DefragmentRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Defragment(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.HashRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.HashRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Hash(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashKVRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.HashKVRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashKVRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.HashKVRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.HashKV(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.SnapshotRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.SnapshotRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
stream, err := client.Snapshot(ctx, &protoReq)
if err != nil {
return nil, metadata, err
@@ -906,758 +678,569 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh
}
metadata.HeaderMD = header
return stream, metadata, nil
-
}
func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MoveLeaderRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MoveLeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MoveLeaderRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.MoveLeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.MoveLeader(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DowngradeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.DowngradeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Downgrade(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DowngradeRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.DowngradeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Downgrade(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthEnableRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthEnableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthEnableRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthEnableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AuthEnable(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthDisableRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthDisableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthDisableRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthDisableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AuthDisable(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthStatusRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthStatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AuthStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthStatusRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthStatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AuthStatus(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthenticateRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthenticateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthenticateRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthenticateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Authenticate(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserAddRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserAddRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UserAdd(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGetRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGetRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UserGet(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserListRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserListRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UserList(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserDeleteRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserDeleteRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UserDelete(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserChangePasswordRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserChangePasswordRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserChangePasswordRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserChangePasswordRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UserChangePassword(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGrantRoleRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserGrantRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
-func local_request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGrantRoleRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+func local_request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserGrantRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UserGrantRole(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserRevokeRoleRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserRevokeRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserRevokeRoleRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthUserRevokeRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UserRevokeRole(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleAddRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleAddRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RoleAdd(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGetRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGetRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RoleGet(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleListRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleListRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RoleList(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleDeleteRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleDeleteRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RoleDelete(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGrantPermissionRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleGrantPermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGrantPermissionRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleGrantPermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RoleGrantPermission(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleRevokePermissionRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleRevokePermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleRevokePermissionRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq etcdserverpb.AuthRoleRevokePermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RoleRevokePermission(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
// etcdserverpb.RegisterKVHandlerServer registers the http handlers for service KV to "mux".
// UnaryRPC :call etcdserverpb.KVServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterKVHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.KVServer) error {
-
- mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Range", runtime.WithHTTPPathPattern("/v3/kv/range"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_KV_Range_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_KV_Range_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_Range_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Put", runtime.WithHTTPPathPattern("/v3/kv/put"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_KV_Put_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_KV_Put_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_Put_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/DeleteRange", runtime.WithHTTPPathPattern("/v3/kv/deleterange"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_KV_DeleteRange_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_KV_DeleteRange_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_DeleteRange_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Txn", runtime.WithHTTPPathPattern("/v3/kv/txn"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_KV_Txn_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_KV_Txn_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_Txn_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Compact", runtime.WithHTTPPathPattern("/v3/kv/compaction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_KV_Compact_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_KV_Compact_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_Compact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -1666,9 +1249,10 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
// etcdserverpb.RegisterWatchHandlerServer registers the http handlers for service Watch to "mux".
// UnaryRPC :call etcdserverpb.WatchServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterWatchHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterWatchHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.WatchServer) error {
-
- mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@@ -1681,153 +1265,155 @@ func RegisterWatchHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv
// etcdserverpb.RegisterLeaseHandlerServer registers the http handlers for service Lease to "mux".
// UnaryRPC :call etcdserverpb.LeaseServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLeaseHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.LeaseServer) error {
-
- mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseGrant", runtime.WithHTTPPathPattern("/v3/lease/grant"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lease_LeaseGrant_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lease_LeaseGrant_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseGrant_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/lease/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lease_LeaseRevoke_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseRevoke_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/kv/lease/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lease_LeaseRevoke_1(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseRevoke_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
- mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
})
-
- mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/lease/timetolive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lease_LeaseTimeToLive_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseTimeToLive_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/kv/lease/timetolive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lease_LeaseTimeToLive_1(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseTimeToLive_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/lease/leases"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lease_LeaseLeases_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lease_LeaseLeases_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/kv/lease/leases"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lease_LeaseLeases_1(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lease_LeaseLeases_1(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseLeases_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -1836,106 +1422,108 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv
// etcdserverpb.RegisterClusterHandlerServer registers the http handlers for service Cluster to "mux".
// UnaryRPC :call etcdserverpb.ClusterServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterClusterHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.ClusterServer) error {
-
- mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberAdd", runtime.WithHTTPPathPattern("/v3/cluster/member/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Cluster_MemberAdd_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Cluster_MemberAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberRemove", runtime.WithHTTPPathPattern("/v3/cluster/member/remove"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Cluster_MemberRemove_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Cluster_MemberRemove_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberRemove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberUpdate", runtime.WithHTTPPathPattern("/v3/cluster/member/update"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Cluster_MemberUpdate_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberList", runtime.WithHTTPPathPattern("/v3/cluster/member/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Cluster_MemberList_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Cluster_MemberList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberPromote", runtime.WithHTTPPathPattern("/v3/cluster/member/promote"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Cluster_MemberPromote_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Cluster_MemberPromote_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberPromote_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -1944,153 +1532,155 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se
// etcdserverpb.RegisterMaintenanceHandlerServer registers the http handlers for service Maintenance to "mux".
// UnaryRPC :call etcdserverpb.MaintenanceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMaintenanceHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.MaintenanceServer) error {
-
- mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Alarm", runtime.WithHTTPPathPattern("/v3/maintenance/alarm"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Maintenance_Alarm_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Maintenance_Alarm_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Alarm_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Status", runtime.WithHTTPPathPattern("/v3/maintenance/status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Maintenance_Status_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Maintenance_Status_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Defragment", runtime.WithHTTPPathPattern("/v3/maintenance/defragment"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Maintenance_Defragment_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Maintenance_Defragment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Defragment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Hash", runtime.WithHTTPPathPattern("/v3/maintenance/hash"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Maintenance_Hash_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Maintenance_Hash_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Hash_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/HashKV", runtime.WithHTTPPathPattern("/v3/maintenance/hashkv"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Maintenance_HashKV_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Maintenance_HashKV_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_HashKV_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
- mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
})
-
- mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/MoveLeader", runtime.WithHTTPPathPattern("/v3/maintenance/transfer-leadership"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Maintenance_MoveLeader_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_MoveLeader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Downgrade", runtime.WithHTTPPathPattern("/v3/maintenance/downgrade"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Maintenance_Downgrade_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Maintenance_Downgrade_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Downgrade_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Downgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -2099,346 +1689,348 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux
// etcdserverpb.RegisterAuthHandlerServer registers the http handlers for service Auth to "mux".
// UnaryRPC :call etcdserverpb.AuthServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.AuthServer) error {
-
- mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthEnable", runtime.WithHTTPPathPattern("/v3/auth/enable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_AuthEnable_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_AuthEnable_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_AuthEnable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthDisable", runtime.WithHTTPPathPattern("/v3/auth/disable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_AuthDisable_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_AuthDisable_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_AuthDisable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthStatus", runtime.WithHTTPPathPattern("/v3/auth/status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_AuthStatus_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_AuthStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_AuthStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_AuthStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/Authenticate", runtime.WithHTTPPathPattern("/v3/auth/authenticate"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_Authenticate_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_Authenticate_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_Authenticate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserAdd", runtime.WithHTTPPathPattern("/v3/auth/user/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_UserAdd_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_UserAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserGet", runtime.WithHTTPPathPattern("/v3/auth/user/get"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_UserGet_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_UserGet_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserList", runtime.WithHTTPPathPattern("/v3/auth/user/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_UserList_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_UserList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserDelete", runtime.WithHTTPPathPattern("/v3/auth/user/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_UserDelete_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_UserDelete_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserChangePassword", runtime.WithHTTPPathPattern("/v3/auth/user/changepw"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_UserChangePassword_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_UserChangePassword_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserChangePassword_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserGrantRole", runtime.WithHTTPPathPattern("/v3/auth/user/grant"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_UserGrantRole_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_UserGrantRole_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserGrantRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserRevokeRole", runtime.WithHTTPPathPattern("/v3/auth/user/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_UserRevokeRole_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserRevokeRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleAdd", runtime.WithHTTPPathPattern("/v3/auth/role/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_RoleAdd_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_RoleAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGet", runtime.WithHTTPPathPattern("/v3/auth/role/get"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_RoleGet_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_RoleGet_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleList", runtime.WithHTTPPathPattern("/v3/auth/role/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_RoleList_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_RoleList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleDelete", runtime.WithHTTPPathPattern("/v3/auth/role/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_RoleDelete_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_RoleDelete_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGrantPermission", runtime.WithHTTPPathPattern("/v3/auth/role/grant"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_RoleGrantPermission_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleGrantPermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleRevokePermission", runtime.WithHTTPPathPattern("/v3/auth/role/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Auth_RoleRevokePermission_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleRevokePermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -2447,25 +2039,24 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterKVHandler(ctx, mux, conn)
}
@@ -2479,158 +2070,133 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "KVClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "KVClient" to call the correct interceptors.
+// "KVClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error {
-
- mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Range", runtime.WithHTTPPathPattern("/v3/kv/range"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_KV_Range_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_Range_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Put", runtime.WithHTTPPathPattern("/v3/kv/put"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_KV_Put_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_Put_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/DeleteRange", runtime.WithHTTPPathPattern("/v3/kv/deleterange"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_KV_DeleteRange_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_DeleteRange_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Txn", runtime.WithHTTPPathPattern("/v3/kv/txn"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_KV_Txn_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
- }
-
- forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ }
+ forward_KV_Txn_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Compact", runtime.WithHTTPPathPattern("/v3/kv/compaction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_KV_Compact_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_KV_Compact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "range"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "put"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "deleterange"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "txn"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "compaction"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "range"}, ""))
+ pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "put"}, ""))
+ pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "deleterange"}, ""))
+ pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "txn"}, ""))
+ pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "compaction"}, ""))
)
var (
- forward_KV_Range_0 = runtime.ForwardResponseMessage
-
- forward_KV_Put_0 = runtime.ForwardResponseMessage
-
+ forward_KV_Range_0 = runtime.ForwardResponseMessage
+ forward_KV_Put_0 = runtime.ForwardResponseMessage
forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage
-
- forward_KV_Txn_0 = runtime.ForwardResponseMessage
-
- forward_KV_Compact_0 = runtime.ForwardResponseMessage
+ forward_KV_Txn_0 = runtime.ForwardResponseMessage
+ forward_KV_Compact_0 = runtime.ForwardResponseMessage
)
// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterWatchHandler(ctx, mux, conn)
}
@@ -2644,34 +2210,41 @@ func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "WatchClient" to call the correct interceptors.
+// "WatchClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error {
-
- mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Watch/Watch", runtime.WithHTTPPathPattern("/v3/watch"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+
+ resp, md, reqErrChan, err := request_Watch_Watch_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
+ go func() {
+ for err := range reqErrChan {
+ if err != nil && !errors.Is(err, io.EOF) {
+ runtime.HTTPStreamError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ }
+ }
+ }()
+ forward_Watch_Watch_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, ""))
)
var (
@@ -2681,25 +2254,24 @@ var (
// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterLeaseHandler(ctx, mux, conn)
}
@@ -2713,230 +2285,201 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "LeaseClient" to call the correct interceptors.
+// "LeaseClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error {
-
- mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseGrant", runtime.WithHTTPPathPattern("/v3/lease/grant"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lease_LeaseGrant_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseGrant_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/lease/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lease_LeaseRevoke_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseRevoke_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/kv/lease/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lease_LeaseRevoke_1(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseRevoke_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseKeepAlive", runtime.WithHTTPPathPattern("/v3/lease/keepalive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+
+ resp, md, reqErrChan, err := request_Lease_LeaseKeepAlive_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
+ go func() {
+ for err := range reqErrChan {
+ if err != nil && !errors.Is(err, io.EOF) {
+ runtime.HTTPStreamError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ }
+ }
+ }()
+ forward_Lease_LeaseKeepAlive_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/lease/timetolive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lease_LeaseTimeToLive_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseTimeToLive_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/kv/lease/timetolive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lease_LeaseTimeToLive_1(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseTimeToLive_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/lease/leases"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseLeases_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lease_LeaseLeases_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/kv/lease/leases"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lease_LeaseLeases_1(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lease_LeaseLeases_1(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lease_LeaseLeases_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "grant"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "revoke"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lease_LeaseRevoke_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "revoke"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "keepalive"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "timetolive"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lease_LeaseTimeToLive_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "timetolive"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "leases"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lease_LeaseLeases_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "leases"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "grant"}, ""))
+ pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "revoke"}, ""))
+ pattern_Lease_LeaseRevoke_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "revoke"}, ""))
+ pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "keepalive"}, ""))
+ pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "timetolive"}, ""))
+ pattern_Lease_LeaseTimeToLive_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "timetolive"}, ""))
+ pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "leases"}, ""))
+ pattern_Lease_LeaseLeases_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "leases"}, ""))
)
var (
- forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseRevoke_1 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
-
+ forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseRevoke_1 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
-
forward_Lease_LeaseTimeToLive_1 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseLeases_1 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseLeases_1 = runtime.ForwardResponseMessage
)
// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterClusterHandler(ctx, mux, conn)
}
@@ -2950,158 +2493,133 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "ClusterClient" to call the correct interceptors.
+// "ClusterClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error {
-
- mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberAdd", runtime.WithHTTPPathPattern("/v3/cluster/member/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Cluster_MemberAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberRemove", runtime.WithHTTPPathPattern("/v3/cluster/member/remove"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Cluster_MemberRemove_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberRemove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberUpdate", runtime.WithHTTPPathPattern("/v3/cluster/member/update"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Cluster_MemberUpdate_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberList", runtime.WithHTTPPathPattern("/v3/cluster/member/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Cluster_MemberList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberPromote", runtime.WithHTTPPathPattern("/v3/cluster/member/promote"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Cluster_MemberPromote_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Cluster_MemberPromote_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Cluster_MemberPromote_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "add"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "remove"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "update"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "list"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Cluster_MemberPromote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "promote"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "add"}, ""))
+ pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "remove"}, ""))
+ pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "update"}, ""))
+ pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "list"}, ""))
+ pattern_Cluster_MemberPromote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "promote"}, ""))
)
var (
- forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage
-
+ forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage
+ forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage
+ forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage
+ forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage
forward_Cluster_MemberPromote_0 = runtime.ForwardResponseMessage
)
// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterMaintenanceHandler(ctx, mux, conn)
}
@@ -3115,230 +2633,193 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "MaintenanceClient" to call the correct interceptors.
+// "MaintenanceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error {
-
- mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Alarm", runtime.WithHTTPPathPattern("/v3/maintenance/alarm"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_Alarm_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Alarm_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Status", runtime.WithHTTPPathPattern("/v3/maintenance/status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_Status_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Defragment", runtime.WithHTTPPathPattern("/v3/maintenance/defragment"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_Defragment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Defragment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Hash", runtime.WithHTTPPathPattern("/v3/maintenance/hash"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_Hash_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Hash_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/HashKV", runtime.WithHTTPPathPattern("/v3/maintenance/hashkv"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_HashKV_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_HashKV_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_HashKV_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Snapshot", runtime.WithHTTPPathPattern("/v3/maintenance/snapshot"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_Snapshot_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Snapshot_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/MoveLeader", runtime.WithHTTPPathPattern("/v3/maintenance/transfer-leadership"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_MoveLeader_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_MoveLeader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Downgrade", runtime.WithHTTPPathPattern("/v3/maintenance/downgrade"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Maintenance_Downgrade_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Maintenance_Downgrade_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Maintenance_Downgrade_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Maintenance_Downgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "alarm"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "status"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "defragment"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "snapshot"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "transfer-leadership"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Maintenance_Downgrade_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "downgrade"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "alarm"}, ""))
+ pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "status"}, ""))
+ pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "defragment"}, ""))
+ pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, ""))
+ pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hashkv"}, ""))
+ pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "snapshot"}, ""))
+ pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "transfer-leadership"}, ""))
+ pattern_Maintenance_Downgrade_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "downgrade"}, ""))
)
var (
- forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Status_0 = runtime.ForwardResponseMessage
-
+ forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Status_0 = runtime.ForwardResponseMessage
forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_HashKV_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream
-
+ forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_HashKV_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream
forward_Maintenance_MoveLeader_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Downgrade_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Downgrade_0 = runtime.ForwardResponseMessage
)
// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterAuthHandler(ctx, mux, conn)
}
@@ -3352,420 +2833,336 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "AuthClient" to call the correct interceptors.
+// "AuthClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error {
-
- mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthEnable", runtime.WithHTTPPathPattern("/v3/auth/enable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_AuthEnable_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_AuthEnable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthDisable", runtime.WithHTTPPathPattern("/v3/auth/disable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_AuthDisable_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_AuthDisable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthStatus", runtime.WithHTTPPathPattern("/v3/auth/status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_AuthStatus_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_AuthStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_AuthStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_AuthStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/Authenticate", runtime.WithHTTPPathPattern("/v3/auth/authenticate"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_Authenticate_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_Authenticate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserAdd", runtime.WithHTTPPathPattern("/v3/auth/user/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_UserAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserGet", runtime.WithHTTPPathPattern("/v3/auth/user/get"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_UserGet_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserList", runtime.WithHTTPPathPattern("/v3/auth/user/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_UserList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserDelete", runtime.WithHTTPPathPattern("/v3/auth/user/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_UserDelete_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserChangePassword", runtime.WithHTTPPathPattern("/v3/auth/user/changepw"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_UserChangePassword_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserChangePassword_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserGrantRole", runtime.WithHTTPPathPattern("/v3/auth/user/grant"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_UserGrantRole_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserGrantRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserRevokeRole", runtime.WithHTTPPathPattern("/v3/auth/user/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_UserRevokeRole_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_UserRevokeRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleAdd", runtime.WithHTTPPathPattern("/v3/auth/role/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_RoleAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGet", runtime.WithHTTPPathPattern("/v3/auth/role/get"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_RoleGet_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleList", runtime.WithHTTPPathPattern("/v3/auth/role/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_RoleList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleDelete", runtime.WithHTTPPathPattern("/v3/auth/role/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_RoleDelete_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGrantPermission", runtime.WithHTTPPathPattern("/v3/auth/role/grant"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_RoleGrantPermission_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleGrantPermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleRevokePermission", runtime.WithHTTPPathPattern("/v3/auth/role/revoke"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Auth_RoleRevokePermission_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Auth_RoleRevokePermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "enable"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "disable"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_AuthStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "status"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "authenticate"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "add"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "get"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "list"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "delete"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "changepw"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "grant"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "revoke"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "add"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "get"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "list"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "delete"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "grant"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "revoke"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "enable"}, ""))
+ pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "disable"}, ""))
+ pattern_Auth_AuthStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "status"}, ""))
+ pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "authenticate"}, ""))
+ pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "add"}, ""))
+ pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "get"}, ""))
+ pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "list"}, ""))
+ pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "delete"}, ""))
+ pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "changepw"}, ""))
+ pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "grant"}, ""))
+ pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "revoke"}, ""))
+ pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "add"}, ""))
+ pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "get"}, ""))
+ pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "list"}, ""))
+ pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "delete"}, ""))
+ pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "grant"}, ""))
+ pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "revoke"}, ""))
)
var (
- forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage
-
- forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage
-
- forward_Auth_AuthStatus_0 = runtime.ForwardResponseMessage
-
- forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserGet_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserList_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleList_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage
-
+ forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage
+ forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage
+ forward_Auth_AuthStatus_0 = runtime.ForwardResponseMessage
+ forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserGet_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserList_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleList_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage
forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage
)
diff --git a/api/etcdserverpb/raft_internal.pb.go b/api/etcdserverpb/raft_internal.pb.go
index b94a7bfd9d9..d39efef7c98 100644
--- a/api/etcdserverpb/raft_internal.pb.go
+++ b/api/etcdserverpb/raft_internal.pb.go
@@ -12,6 +12,7 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
membershippb "go.etcd.io/etcd/api/v3/membershippb"
+ _ "go.etcd.io/etcd/api/v3/versionpb"
)
// Reference imports to suppress errors if they are not otherwise used.
@@ -237,70 +238,74 @@ func init() {
func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
var fileDescriptor_b4c9a9be0cfca103 = []byte{
- // 1003 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xd9, 0x72, 0x1b, 0x45,
- 0x14, 0x86, 0x23, 0xc5, 0x71, 0xac, 0x96, 0xed, 0x38, 0x6d, 0x87, 0x34, 0x72, 0x95, 0x70, 0x1c,
- 0x12, 0xcc, 0x66, 0x53, 0xca, 0x03, 0x80, 0x90, 0x5c, 0x8e, 0xab, 0x42, 0x70, 0x4d, 0xcc, 0x52,
- 0xc5, 0xc5, 0xd0, 0x9a, 0x39, 0x96, 0x06, 0xcf, 0x46, 0x77, 0x4b, 0x31, 0xef, 0x11, 0x28, 0x1e,
- 0x83, 0xed, 0x21, 0x72, 0xc1, 0x62, 0xe0, 0x05, 0xc0, 0xdc, 0x70, 0x0f, 0xdc, 0x53, 0xbd, 0xcc,
- 0x26, 0xb5, 0x7c, 0xa7, 0xf9, 0xcf, 0x7f, 0xbe, 0x73, 0xba, 0xe7, 0xf4, 0xa8, 0xd1, 0x3a, 0xa3,
- 0x27, 0xc2, 0x0d, 0x62, 0x01, 0x2c, 0xa6, 0xe1, 0x6e, 0xca, 0x12, 0x91, 0xe0, 0x65, 0x10, 0x9e,
- 0xcf, 0x81, 0x4d, 0x80, 0xa5, 0x83, 0xd6, 0xc6, 0x30, 0x19, 0x26, 0x2a, 0xb0, 0x27, 0x7f, 0x69,
- 0x4f, 0x6b, 0xad, 0xf0, 0x18, 0xa5, 0xc1, 0x52, 0xcf, 0xfc, 0xbc, 0x2f, 0x83, 0x7b, 0x34, 0x0d,
- 0xf6, 0x22, 0x88, 0x06, 0xc0, 0xf8, 0x28, 0x48, 0xd3, 0x41, 0xe9, 0x41, 0xfb, 0xb6, 0x3f, 0x45,
- 0x2b, 0x0e, 0x7c, 0x3e, 0x06, 0x2e, 0x1e, 0x02, 0xf5, 0x81, 0xe1, 0x55, 0x54, 0x3f, 0xec, 0x93,
- 0xda, 0x56, 0x6d, 0x67, 0xc1, 0xa9, 0x1f, 0xf6, 0x71, 0x0b, 0x2d, 0x8d, 0xb9, 0x6c, 0x2d, 0x02,
- 0x52, 0xdf, 0xaa, 0xed, 0x34, 0x9c, 0xfc, 0x19, 0xdf, 0x45, 0x2b, 0x74, 0x2c, 0x46, 0x2e, 0x83,
- 0x49, 0xc0, 0x83, 0x24, 0x26, 0x57, 0x55, 0xda, 0xb2, 0x14, 0x1d, 0xa3, 0x6d, 0x3f, 0xc3, 0x68,
- 0xfd, 0xd0, 0xac, 0xce, 0xa1, 0x27, 0xc2, 0x94, 0xc3, 0x0f, 0xd0, 0xe2, 0x48, 0x95, 0x24, 0xfe,
- 0x56, 0x6d, 0xa7, 0xd9, 0xd9, 0xdc, 0x2d, 0xaf, 0x79, 0xb7, 0xd2, 0x95, 0x63, 0xac, 0x33, 0xdd,
- 0xdd, 0x43, 0xf5, 0x49, 0x47, 0xf5, 0xd5, 0xec, 0xdc, 0xb2, 0x02, 0x9c, 0xfa, 0xa4, 0x83, 0xdf,
- 0x42, 0xd7, 0x18, 0x8d, 0x87, 0xa0, 0x1a, 0x6c, 0x76, 0x5a, 0x53, 0x4e, 0x19, 0xca, 0xec, 0xda,
- 0x88, 0x5f, 0x43, 0x57, 0xd3, 0xb1, 0x20, 0x0b, 0xca, 0x4f, 0xaa, 0xfe, 0xa3, 0x71, 0xb6, 0x08,
- 0x47, 0x9a, 0x70, 0x0f, 0x2d, 0xfb, 0x10, 0x82, 0x00, 0x57, 0x17, 0xb9, 0xa6, 0x92, 0xb6, 0xaa,
- 0x49, 0x7d, 0xe5, 0xa8, 0x94, 0x6a, 0xfa, 0x85, 0x26, 0x0b, 0x8a, 0xb3, 0x98, 0x2c, 0xda, 0x0a,
- 0x1e, 0x9f, 0xc5, 0x79, 0x41, 0x71, 0x16, 0xe3, 0xb7, 0x11, 0xf2, 0x92, 0x28, 0xa5, 0x9e, 0x90,
- 0x9b, 0x7e, 0x5d, 0xa5, 0xbc, 0x54, 0x4d, 0xe9, 0xe5, 0xf1, 0x2c, 0xb3, 0x94, 0x82, 0xdf, 0x41,
- 0xcd, 0x10, 0x28, 0x07, 0x77, 0xc8, 0x68, 0x2c, 0xc8, 0x92, 0x8d, 0xf0, 0x48, 0x1a, 0x0e, 0x64,
- 0x3c, 0x27, 0x84, 0xb9, 0x24, 0xd7, 0xac, 0x09, 0x0c, 0x26, 0xc9, 0x29, 0x90, 0x86, 0x6d, 0xcd,
- 0x0a, 0xe1, 0x28, 0x43, 0xbe, 0xe6, 0xb0, 0xd0, 0xe4, 0x6b, 0xa1, 0x21, 0x65, 0x11, 0x41, 0xb6,
- 0xd7, 0xd2, 0x95, 0xa1, 0xfc, 0xb5, 0x28, 0x23, 0x7e, 0x1f, 0xad, 0xe9, 0xb2, 0xde, 0x08, 0xbc,
- 0xd3, 0x34, 0x09, 0x62, 0x41, 0x9a, 0x2a, 0xf9, 0x65, 0x4b, 0xe9, 0x5e, 0x6e, 0xca, 0x30, 0x37,
- 0xc2, 0xaa, 0x8e, 0xbb, 0xa8, 0xa9, 0x46, 0x18, 0x62, 0x3a, 0x08, 0x81, 0xfc, 0x6d, 0xdd, 0xcc,
- 0xee, 0x58, 0x8c, 0xf6, 0x95, 0x21, 0xdf, 0x0a, 0x9a, 0x4b, 0xb8, 0x8f, 0xd4, 0xc0, 0xbb, 0x7e,
- 0xc0, 0x15, 0xe3, 0x9f, 0xeb, 0xb6, 0xbd, 0x90, 0x8c, 0xbe, 0x76, 0xe4, 0x7b, 0x41, 0x0b, 0x2d,
- 0x6f, 0x84, 0x0b, 0x2a, 0xc6, 0x9c, 0xfc, 0x37, 0xb7, 0x91, 0x27, 0xca, 0x50, 0x69, 0x44, 0x4b,
- 0xf8, 0xb1, 0x6e, 0x04, 0x62, 0x11, 0x78, 0x54, 0x00, 0xf9, 0x57, 0x33, 0x5e, 0xad, 0x32, 0xb2,
- 0xb3, 0xd8, 0x2d, 0x59, 0x33, 0x5a, 0x25, 0x1f, 0xef, 0x9b, 0xe3, 0x2d, 0xcf, 0xbb, 0x4b, 0x7d,
- 0x9f, 0xfc, 0xb8, 0x34, 0x6f, 0x65, 0x1f, 0x70, 0x60, 0x5d, 0xdf, 0xaf, 0xac, 0xcc, 0x68, 0xf8,
- 0x31, 0x5a, 0x2b, 0x30, 0x7a, 0xe4, 0xc9, 0x4f, 0x9a, 0x74, 0xd7, 0x4e, 0x32, 0x67, 0xc5, 0xc0,
- 0x56, 0x69, 0x45, 0xae, 0xb6, 0x35, 0x04, 0x41, 0x7e, 0xbe, 0xb4, 0xad, 0x03, 0x10, 0x33, 0x6d,
- 0x1d, 0x80, 0xc0, 0x43, 0xf4, 0x62, 0x81, 0xf1, 0x46, 0xf2, 0x10, 0xba, 0x29, 0xe5, 0xfc, 0x69,
- 0xc2, 0x7c, 0xf2, 0x8b, 0x46, 0xbe, 0x6e, 0x47, 0xf6, 0x94, 0xfb, 0xc8, 0x98, 0x33, 0xfa, 0x0b,
- 0xd4, 0x1a, 0xc6, 0x1f, 0xa3, 0x8d, 0x52, 0xbf, 0xf2, 0xf4, 0xb8, 0x2c, 0x09, 0x81, 0x9c, 0xeb,
- 0x1a, 0xf7, 0xe7, 0xb4, 0xad, 0x4e, 0x5e, 0x52, 0x4c, 0xcb, 0x4d, 0x3a, 0x1d, 0xc1, 0x9f, 0xa0,
- 0x5b, 0x05, 0x59, 0x1f, 0x44, 0x8d, 0xfe, 0x55, 0xa3, 0x5f, 0xb1, 0xa3, 0xcd, 0x89, 0x2c, 0xb1,
- 0x31, 0x9d, 0x09, 0xe1, 0x87, 0x68, 0xb5, 0x80, 0x87, 0x01, 0x17, 0xe4, 0x37, 0x4d, 0xbd, 0x63,
- 0xa7, 0x3e, 0x0a, 0xb8, 0xa8, 0xcc, 0x51, 0x26, 0xe6, 0x24, 0xd9, 0x9a, 0x26, 0xfd, 0x3e, 0x97,
- 0x24, 0x4b, 0xcf, 0x90, 0x32, 0x31, 0x7f, 0xf5, 0x8a, 0x24, 0x27, 0xf2, 0x9b, 0xc6, 0xbc, 0x57,
- 0x2f, 0x73, 0xa6, 0x27, 0xd2, 0x68, 0xf9, 0x44, 0x2a, 0x8c, 0x99, 0xc8, 0x6f, 0x1b, 0xf3, 0x26,
- 0x52, 0x66, 0x59, 0x26, 0xb2, 0x90, 0xab, 0x6d, 0xc9, 0x89, 0xfc, 0xee, 0xd2, 0xb6, 0xa6, 0x27,
- 0xd2, 0x68, 0xf8, 0x33, 0xd4, 0x2a, 0x61, 0xd4, 0xa0, 0xa4, 0xc0, 0xa2, 0x80, 0xab, 0xff, 0xd6,
- 0xef, 0x35, 0xf3, 0x8d, 0x39, 0x4c, 0x69, 0x3f, 0xca, 0xdd, 0x19, 0xff, 0x36, 0xb5, 0xc7, 0x71,
- 0x84, 0x36, 0x8b, 0x5a, 0x66, 0x74, 0x4a, 0xc5, 0x7e, 0xd0, 0xc5, 0xde, 0xb4, 0x17, 0xd3, 0x53,
- 0x32, 0x5b, 0x8d, 0xd0, 0x39, 0x06, 0xfc, 0x11, 0x5a, 0xf7, 0xc2, 0x31, 0x17, 0xc0, 0xdc, 0x09,
- 0x30, 0x29, 0xb9, 0x1c, 0x04, 0x79, 0x86, 0xcc, 0x11, 0x28, 0x5f, 0x52, 0x76, 0x7b, 0xda, 0xf9,
- 0xa1, 0x36, 0x3e, 0x29, 0x76, 0xeb, 0xa6, 0x37, 0x1d, 0xc1, 0x14, 0xdd, 0xce, 0xc0, 0x9a, 0xe1,
- 0x52, 0x21, 0x98, 0x82, 0x7f, 0x89, 0xcc, 0xe7, 0xcf, 0x06, 0x7f, 0x4f, 0x69, 0x5d, 0x21, 0x58,
- 0x89, 0xbf, 0xe1, 0x59, 0x82, 0xf8, 0x18, 0x61, 0x3f, 0x79, 0x1a, 0x0f, 0x19, 0xf5, 0xc1, 0x0d,
- 0xe2, 0x93, 0x44, 0xd1, 0xbf, 0xd2, 0xf4, 0x7b, 0x55, 0x7a, 0x3f, 0x33, 0x1e, 0xc6, 0x27, 0x49,
- 0x89, 0xbc, 0xe6, 0x4f, 0x05, 0xb6, 0x6f, 0xa0, 0x95, 0xfd, 0x28, 0x15, 0x5f, 0x38, 0xc0, 0xd3,
- 0x24, 0xe6, 0xb0, 0x9d, 0xa2, 0xcd, 0x4b, 0x3e, 0xcd, 0x18, 0xa3, 0x05, 0x75, 0x07, 0xab, 0xa9,
- 0x3b, 0x98, 0xfa, 0x2d, 0xef, 0x66, 0xf9, 0x17, 0xcb, 0xdc, 0xcd, 0xb2, 0x67, 0x7c, 0x07, 0x2d,
- 0xf3, 0x20, 0x4a, 0x43, 0x70, 0x45, 0x72, 0x0a, 0xfa, 0x6a, 0xd6, 0x70, 0x9a, 0x5a, 0x3b, 0x96,
- 0xd2, 0xbb, 0x1b, 0xcf, 0xff, 0x6c, 0x5f, 0x79, 0x7e, 0xd1, 0xae, 0x9d, 0x5f, 0xb4, 0x6b, 0x7f,
- 0x5c, 0xb4, 0x6b, 0x5f, 0xff, 0xd5, 0xbe, 0x32, 0x58, 0x54, 0x17, 0xc3, 0x07, 0xff, 0x07, 0x00,
- 0x00, 0xff, 0xff, 0x94, 0x6f, 0x64, 0x0a, 0x98, 0x0a, 0x00, 0x00,
+ // 1072 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0x4b, 0x73, 0x1b, 0x45,
+ 0x17, 0x8d, 0x6c, 0xc7, 0xb6, 0x5a, 0xb6, 0xe3, 0xb4, 0x9d, 0x2f, 0xfd, 0xd9, 0x55, 0xc6, 0x71,
+ 0x48, 0x30, 0x10, 0xe4, 0x20, 0xc3, 0x02, 0x36, 0xa0, 0x48, 0x2e, 0xc7, 0x54, 0x48, 0xb9, 0x26,
+ 0x81, 0x4a, 0x41, 0x51, 0x43, 0x6b, 0xe6, 0x5a, 0x9a, 0x78, 0x34, 0x33, 0x74, 0xb7, 0x14, 0x67,
+ 0xcb, 0x92, 0x35, 0x50, 0xfc, 0x0c, 0x9e, 0xff, 0x21, 0x45, 0xf1, 0x08, 0xf0, 0x07, 0xc0, 0x6c,
+ 0xd8, 0x03, 0x7b, 0xaa, 0x1f, 0xf3, 0x92, 0x5a, 0xde, 0x8d, 0xee, 0x3d, 0xf7, 0x9c, 0xd3, 0xdd,
+ 0xb7, 0x5b, 0x17, 0xad, 0x30, 0x7a, 0x24, 0xdc, 0x20, 0x12, 0xc0, 0x22, 0x1a, 0xd6, 0x13, 0x16,
+ 0x8b, 0x18, 0x2f, 0x80, 0xf0, 0x7c, 0x0e, 0x6c, 0x08, 0x2c, 0xe9, 0xac, 0xad, 0x76, 0xe3, 0x6e,
+ 0xac, 0x12, 0x3b, 0xf2, 0x4b, 0x63, 0xd6, 0x96, 0x73, 0x8c, 0x89, 0x54, 0x59, 0xe2, 0x99, 0xcf,
+ 0x4d, 0x99, 0xdc, 0xa1, 0x49, 0xb0, 0x33, 0x04, 0xc6, 0x83, 0x38, 0x4a, 0x3a, 0xe9, 0x97, 0x41,
+ 0x5c, 0xcf, 0x10, 0x7d, 0xe8, 0x77, 0x80, 0xf1, 0x5e, 0x90, 0x24, 0x9d, 0xc2, 0x0f, 0x8d, 0xdb,
+ 0x62, 0x68, 0xd1, 0x81, 0x8f, 0x06, 0xc0, 0xc5, 0x6d, 0xa0, 0x3e, 0x30, 0xbc, 0x84, 0xa6, 0x0e,
+ 0xda, 0xa4, 0xb2, 0x59, 0xd9, 0x9e, 0x71, 0xa6, 0x0e, 0xda, 0x78, 0x0d, 0xcd, 0x0f, 0xb8, 0x34,
+ 0xdf, 0x07, 0x32, 0xb5, 0x59, 0xd9, 0xae, 0x3a, 0xd9, 0x6f, 0x7c, 0x03, 0x2d, 0xd2, 0x81, 0xe8,
+ 0xb9, 0x0c, 0x86, 0x81, 0xd4, 0x26, 0xd3, 0xb2, 0xec, 0xd6, 0xdc, 0x27, 0xdf, 0x91, 0xe9, 0xdd,
+ 0xfa, 0xcb, 0xce, 0x82, 0xcc, 0x3a, 0x26, 0xf9, 0xfa, 0xdc, 0xc7, 0x2a, 0x7c, 0x73, 0xeb, 0x7b,
+ 0x8c, 0x56, 0x0e, 0xcc, 0x8e, 0x38, 0xf4, 0x48, 0x18, 0x03, 0x78, 0x17, 0xcd, 0xf6, 0x94, 0x09,
+ 0xe2, 0x6f, 0x56, 0xb6, 0x6b, 0x8d, 0xf5, 0x7a, 0x71, 0x9f, 0xea, 0x25, 0x9f, 0x8e, 0x81, 0x8e,
+ 0xf9, 0xbd, 0x86, 0xa6, 0x86, 0x0d, 0xe5, 0xb4, 0xd6, 0xb8, 0x64, 0x25, 0x70, 0xa6, 0x86, 0x0d,
+ 0x7c, 0x13, 0x9d, 0x67, 0x34, 0xea, 0x82, 0xb2, 0x5c, 0x6b, 0xac, 0x8d, 0x20, 0x65, 0x2a, 0x85,
+ 0x6b, 0x20, 0x7e, 0x01, 0x4d, 0x27, 0x03, 0x41, 0x66, 0x14, 0x9e, 0x94, 0xf1, 0x87, 0x83, 0x74,
+ 0x11, 0x8e, 0x04, 0xe1, 0x16, 0x5a, 0xf0, 0x21, 0x04, 0x01, 0xae, 0x16, 0x39, 0xaf, 0x8a, 0x36,
+ 0xcb, 0x45, 0x6d, 0x85, 0x28, 0x49, 0xd5, 0xfc, 0x3c, 0x26, 0x05, 0xc5, 0x49, 0x44, 0x66, 0x6d,
+ 0x82, 0xf7, 0x4f, 0xa2, 0x4c, 0x50, 0x9c, 0x44, 0xf8, 0x0d, 0x84, 0xbc, 0xb8, 0x9f, 0x50, 0x4f,
+ 0xc8, 0x63, 0x98, 0x53, 0x25, 0xcf, 0x94, 0x4b, 0x5a, 0x59, 0x3e, 0xad, 0x2c, 0x94, 0xe0, 0x37,
+ 0x51, 0x2d, 0x04, 0xca, 0xc1, 0xed, 0x32, 0x1a, 0x09, 0x32, 0x6f, 0x63, 0xb8, 0x23, 0x01, 0xfb,
+ 0x32, 0x9f, 0x31, 0x84, 0x59, 0x48, 0xae, 0x59, 0x33, 0x30, 0x18, 0xc6, 0xc7, 0x40, 0xaa, 0xb6,
+ 0x35, 0x2b, 0x0a, 0x47, 0x01, 0xb2, 0x35, 0x87, 0x79, 0x4c, 0x1e, 0x0b, 0x0d, 0x29, 0xeb, 0x13,
+ 0x64, 0x3b, 0x96, 0xa6, 0x4c, 0x65, 0xc7, 0xa2, 0x80, 0xf8, 0x01, 0x5a, 0xd6, 0xb2, 0x5e, 0x0f,
+ 0xbc, 0xe3, 0x24, 0x0e, 0x22, 0x41, 0x6a, 0xaa, 0xf8, 0x59, 0x8b, 0x74, 0x2b, 0x03, 0x19, 0x9a,
+ 0xb4, 0x59, 0x5f, 0x71, 0x2e, 0x84, 0x65, 0x00, 0x6e, 0xa2, 0x9a, 0xea, 0x6e, 0x88, 0x68, 0x27,
+ 0x04, 0xf2, 0x97, 0x75, 0x57, 0x9b, 0x03, 0xd1, 0xdb, 0x53, 0x80, 0x6c, 0x4f, 0x68, 0x16, 0xc2,
+ 0x6d, 0xa4, 0xae, 0x80, 0xeb, 0x07, 0x5c, 0x71, 0xfc, 0x3d, 0x67, 0xdb, 0x14, 0xc9, 0xd1, 0xd6,
+ 0x88, 0x6c, 0x53, 0x68, 0x1e, 0xc3, 0x6f, 0x19, 0x23, 0x5c, 0x50, 0x31, 0xe0, 0xe4, 0xdf, 0x89,
+ 0x46, 0xee, 0x29, 0xc0, 0xc8, 0xca, 0x5e, 0xd5, 0x8e, 0x74, 0x0e, 0xdf, 0xd5, 0x8e, 0x20, 0x12,
+ 0x81, 0x47, 0x05, 0x90, 0x7f, 0x34, 0xd9, 0xf3, 0x65, 0xb2, 0xf4, 0x76, 0x36, 0x0b, 0xd0, 0xd4,
+ 0x5a, 0xa9, 0x1e, 0xef, 0x99, 0x27, 0x40, 0xbe, 0x09, 0x2e, 0xf5, 0x7d, 0xf2, 0xc3, 0xfc, 0xa4,
+ 0x25, 0xbe, 0xc3, 0x81, 0x35, 0x7d, 0xbf, 0xb4, 0x44, 0x13, 0xc3, 0x77, 0xd1, 0x72, 0x4e, 0xa3,
+ 0x2f, 0x01, 0xf9, 0x51, 0x33, 0x5d, 0xb5, 0x33, 0x99, 0xdb, 0x63, 0xc8, 0x96, 0x68, 0x29, 0x5c,
+ 0xb6, 0xd5, 0x05, 0x41, 0x7e, 0x3a, 0xd3, 0xd6, 0x3e, 0x88, 0x31, 0x5b, 0xfb, 0x20, 0x70, 0x17,
+ 0xfd, 0x3f, 0xa7, 0xf1, 0x7a, 0xf2, 0x5a, 0xba, 0x09, 0xe5, 0xfc, 0x51, 0xcc, 0x7c, 0xf2, 0xb3,
+ 0xa6, 0x7c, 0xd1, 0x4e, 0xd9, 0x52, 0xe8, 0x43, 0x03, 0x4e, 0xd9, 0xff, 0x47, 0xad, 0x69, 0xfc,
+ 0x00, 0xad, 0x16, 0xfc, 0xca, 0xfb, 0xe4, 0xb2, 0x38, 0x04, 0xf2, 0x54, 0x6b, 0x5c, 0x9f, 0x60,
+ 0x5b, 0xdd, 0xc5, 0x38, 0x6f, 0x9b, 0x8b, 0x74, 0x34, 0x83, 0xdf, 0x47, 0x97, 0x72, 0x66, 0x7d,
+ 0x35, 0x35, 0xf5, 0x2f, 0x9a, 0xfa, 0x39, 0x3b, 0xb5, 0xb9, 0xa3, 0x05, 0x6e, 0x4c, 0xc7, 0x52,
+ 0xf8, 0x36, 0x5a, 0xca, 0xc9, 0xc3, 0x80, 0x0b, 0xf2, 0xab, 0x66, 0xbd, 0x62, 0x67, 0xbd, 0x13,
+ 0x70, 0x51, 0xea, 0xa3, 0x34, 0x98, 0x31, 0x49, 0x6b, 0x9a, 0xe9, 0xb7, 0x89, 0x4c, 0x52, 0x7a,
+ 0x8c, 0x29, 0x0d, 0x66, 0x47, 0xaf, 0x98, 0x64, 0x47, 0x7e, 0x59, 0x9d, 0x74, 0xf4, 0xb2, 0x66,
+ 0xb4, 0x23, 0x4d, 0x2c, 0xeb, 0x48, 0x45, 0x63, 0x3a, 0xf2, 0xab, 0xea, 0xa4, 0x8e, 0x94, 0x55,
+ 0x96, 0x8e, 0xcc, 0xc3, 0x65, 0x5b, 0xb2, 0x23, 0xbf, 0x3e, 0xd3, 0xd6, 0x68, 0x47, 0x9a, 0x18,
+ 0x7e, 0x88, 0xd6, 0x0a, 0x34, 0xaa, 0x51, 0x12, 0x60, 0xfd, 0x80, 0xab, 0xff, 0xdf, 0x6f, 0x34,
+ 0xe7, 0x8d, 0x09, 0x9c, 0x12, 0x7e, 0x98, 0xa1, 0x53, 0xfe, 0xcb, 0xd4, 0x9e, 0xc7, 0x7d, 0xb4,
+ 0x9e, 0x6b, 0x99, 0xd6, 0x29, 0x88, 0x7d, 0xab, 0xc5, 0x5e, 0xb2, 0x8b, 0xe9, 0x2e, 0x19, 0x57,
+ 0x23, 0x74, 0x02, 0x00, 0x7f, 0x88, 0x56, 0xbc, 0x70, 0xc0, 0x05, 0x30, 0xd7, 0xcc, 0x32, 0x2e,
+ 0x07, 0x41, 0x3e, 0x45, 0xe6, 0x0a, 0x14, 0x07, 0x99, 0x7a, 0x4b, 0x23, 0xdf, 0xd5, 0xc0, 0x7b,
+ 0x20, 0xc6, 0x5e, 0xbd, 0x8b, 0xde, 0x28, 0x04, 0x3f, 0x44, 0x97, 0x53, 0x05, 0x4d, 0xe6, 0x52,
+ 0x21, 0x98, 0x52, 0xf9, 0x0c, 0x99, 0x77, 0xd0, 0xa6, 0xf2, 0xb6, 0x8a, 0x35, 0x85, 0x60, 0x36,
+ 0xa1, 0x55, 0xcf, 0x82, 0xc2, 0x1f, 0x20, 0xec, 0xc7, 0x8f, 0xa2, 0x2e, 0xa3, 0x3e, 0xb8, 0x41,
+ 0x74, 0x14, 0x2b, 0x99, 0xcf, 0xb5, 0xcc, 0xb5, 0xb2, 0x4c, 0x3b, 0x05, 0x1e, 0x44, 0x47, 0xb1,
+ 0x4d, 0x62, 0xd9, 0x1f, 0x41, 0xe4, 0xc3, 0xd4, 0x05, 0xb4, 0xb8, 0xd7, 0x4f, 0xc4, 0x63, 0x07,
+ 0x78, 0x12, 0x47, 0x1c, 0xb6, 0x1e, 0xa3, 0xf5, 0x33, 0x9e, 0x6f, 0x8c, 0xd1, 0x8c, 0x9a, 0xe5,
+ 0x2a, 0x6a, 0x96, 0x53, 0xdf, 0x72, 0xc6, 0xcb, 0x5e, 0x35, 0x33, 0xe3, 0xa5, 0xbf, 0xf1, 0x15,
+ 0xb4, 0xc0, 0x83, 0x7e, 0x12, 0x82, 0x2b, 0xe2, 0x63, 0xd0, 0x23, 0x5e, 0xd5, 0xa9, 0xe9, 0xd8,
+ 0x7d, 0x19, 0xca, 0xbc, 0xdc, 0x7a, 0xed, 0xc9, 0x1f, 0x1b, 0xe7, 0x9e, 0x9c, 0x6e, 0x54, 0x9e,
+ 0x9e, 0x6e, 0x54, 0x7e, 0x3f, 0xdd, 0xa8, 0x7c, 0xf1, 0xe7, 0xc6, 0xb9, 0xf7, 0xae, 0x76, 0x63,
+ 0xd5, 0x22, 0xf5, 0x20, 0xde, 0xc9, 0xe7, 0xd6, 0xdd, 0x9d, 0x62, 0xdb, 0x74, 0x66, 0xd5, 0x38,
+ 0xba, 0xfb, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x06, 0x59, 0x70, 0x10, 0x30, 0x0b, 0x00, 0x00,
}
func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
diff --git a/api/etcdserverpb/raft_internal.proto b/api/etcdserverpb/raft_internal.proto
index 68926e59f6c..91f7705b739 100644
--- a/api/etcdserverpb/raft_internal.proto
+++ b/api/etcdserverpb/raft_internal.proto
@@ -4,24 +4,31 @@ package etcdserverpb;
import "gogoproto/gogo.proto";
import "etcdserver.proto";
import "rpc.proto";
+import "etcd/api/versionpb/version.proto";
import "etcd/api/membershippb/membership.proto";
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_getters_all) = false;
message RequestHeader {
+ option (versionpb.etcd_version_msg) = "3.0";
+
uint64 ID = 1;
// username is a username that is associated with an auth token of gRPC connection
string username = 2;
// auth_revision is a revision number of auth.authStore. It is not related to mvcc
- uint64 auth_revision = 3;
+ uint64 auth_revision = 3 [(versionpb.etcd_version_field) = "3.1"];
}
// An InternalRaftRequest is the union of all requests which can be
// sent via raft.
message InternalRaftRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
RequestHeader header = 100;
uint64 ID = 1;
@@ -38,11 +45,11 @@ message InternalRaftRequest {
AlarmRequest alarm = 10;
- LeaseCheckpointRequest lease_checkpoint = 11;
+ LeaseCheckpointRequest lease_checkpoint = 11 [(versionpb.etcd_version_field) = "3.4"];
AuthEnableRequest auth_enable = 1000;
AuthDisableRequest auth_disable = 1011;
- AuthStatusRequest auth_status = 1013;
+ AuthStatusRequest auth_status = 1013 [(versionpb.etcd_version_field) = "3.5"];
InternalAuthenticateRequest authenticate = 1012;
@@ -61,9 +68,9 @@ message InternalRaftRequest {
AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
- membershippb.ClusterVersionSetRequest cluster_version_set = 1300;
- membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301;
- membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302;
+ membershippb.ClusterVersionSetRequest cluster_version_set = 1300 [(versionpb.etcd_version_field) = "3.5"];
+ membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301 [(versionpb.etcd_version_field) = "3.5"];
+ membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302 [(versionpb.etcd_version_field) = "3.5"];
}
message EmptyResponse {
@@ -73,6 +80,7 @@ message EmptyResponse {
// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
message InternalAuthenticateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
string name = 1;
string password = 2;
diff --git a/api/etcdserverpb/raft_internal_stringer.go b/api/etcdserverpb/raft_internal_stringer.go
index 31e121ee0a6..a9431d52542 100644
--- a/api/etcdserverpb/raft_internal_stringer.go
+++ b/api/etcdserverpb/raft_internal_stringer.go
@@ -72,13 +72,13 @@ func (as *InternalRaftStringer) String() string {
return as.Request.String()
}
-// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
-// fields in any nested txn and put operations.
+// txnRequestStringer implements fmt.Stringer, a custom proto String to replace value bytes
+// fields with value size fields in any nested txn and put operations.
type txnRequestStringer struct {
Request *TxnRequest
}
-func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
+func NewLoggableTxnRequest(request *TxnRequest) fmt.Stringer {
return &txnRequestStringer{request}
}
@@ -155,8 +155,8 @@ func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} }
func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
func (*loggableValueCompare) ProtoMessage() {}
-// loggablePutRequest implements a custom proto String to replace value bytes field with a value
-// size field.
+// loggablePutRequest implements proto.Message, a custom proto String to replace value bytes
+// field with a value size field.
// To preserve proto encoding of the key bytes, a faked out proto type is used here.
type loggablePutRequest struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
@@ -167,7 +167,7 @@ type loggablePutRequest struct {
IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
}
-func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
+func NewLoggablePutRequest(request *PutRequest) proto.Message {
return &loggablePutRequest{
request.Key,
int64(len(request.Value)),
diff --git a/api/etcdserverpb/raft_internal_stringer_test.go b/api/etcdserverpb/raft_internal_stringer_test.go
index 35be8782479..9a9c98f9ad6 100644
--- a/api/etcdserverpb/raft_internal_stringer_test.go
+++ b/api/etcdserverpb/raft_internal_stringer_test.go
@@ -17,15 +17,14 @@ package etcdserverpb_test
import (
"testing"
+ "github.com/stretchr/testify/assert"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
-// TestInvalidGoYypeIntPanic tests conditions that caused
+// TestInvalidGoTypeIntPanic tests conditions that caused
// panic: invalid Go type int for field k8s_io.kubernetes.vendor.go_etcd_io.etcd.etcdserver.etcdserverpb.loggablePutRequest.value_size
// See https://github.com/kubernetes/kubernetes/issues/91937 for more details
func TestInvalidGoTypeIntPanic(t *testing.T) {
- result := pb.NewLoggablePutRequest(&pb.PutRequest{}).String()
- if result != "" {
- t.Errorf("Got result: %s, expected empty string", result)
- }
+ assert.Empty(t, pb.NewLoggablePutRequest(&pb.PutRequest{}).String())
}
diff --git a/api/etcdserverpb/rpc.pb.go b/api/etcdserverpb/rpc.pb.go
index 34c1824426e..87aec5c015a 100644
--- a/api/etcdserverpb/rpc.pb.go
+++ b/api/etcdserverpb/rpc.pb.go
@@ -12,8 +12,10 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
+ _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
authpb "go.etcd.io/etcd/api/v3/authpb"
mvccpb "go.etcd.io/etcd/api/v3/mvccpb"
+ _ "go.etcd.io/etcd/api/v3/versionpb"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
@@ -274,9 +276,10 @@ type ResponseHeader struct {
ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
// member_id is the ID of the member which sent the response.
MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
- // revision is the key-value store revision when the request was applied.
+ // revision is the key-value store revision when the request was applied, and it's
+ // unset (so 0) in case of calls not interacting with key-value store.
// For watch progress responses, the header.revision indicates progress. All future events
- // recieved in this stream are guaranteed to have a higher revision number than the
+ // received in this stream are guaranteed to have a higher revision number than the
// header.revision number.
Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
// raft_term is the raft term when the request was applied.
@@ -1630,7 +1633,9 @@ type HashKVResponse struct {
// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
// compact_revision is the compacted revision of key-value store when hash begins.
- CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ // hash_revision is the revision up to which the hash is calculated.
+ HashRevision int64 `protobuf:"varint,4,opt,name=hash_revision,json=hashRevision,proto3" json:"hash_revision,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -1690,6 +1695,13 @@ func (m *HashKVResponse) GetCompactRevision() int64 {
return 0
}
+func (m *HashKVResponse) GetHashRevision() int64 {
+ if m != nil {
+ return m.HashRevision
+ }
+ return 0
+}
+
type HashResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
// hash is the hash value computed from the responding member's KV's backend.
@@ -1792,7 +1804,11 @@ type SnapshotResponse struct {
// remaining_bytes is the number of blob bytes to be sent after this message
RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
// blob contains the next chunk of the snapshot in the snapshot stream.
- Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+ Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+ // local version of server that created the snapshot.
+ // In cluster with binaries with different version, each cluster can return different result.
+ // Informs which etcd server version should be used when restoring the snapshot.
+ Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -1852,6 +1868,13 @@ func (m *SnapshotResponse) GetBlob() []byte {
return nil
}
+func (m *SnapshotResponse) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
type WatchRequest struct {
// request_union is a request to either create a new watcher or cancel an existing watcher.
//
@@ -2176,7 +2199,8 @@ type WatchResponse struct {
// the created watcher from the same stream.
// All events sent to the created watcher will attach with the same watch_id.
Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
- // canceled is set to true if the response is for a cancel watch request.
+ // canceled is set to true if the response is for a cancel watch request
+ // or if the start_revision has already been compacted.
// No further events will be sent to the canceled watcher.
Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
// compact_revision is set to the minimum index if a watcher tries to watch
@@ -4223,7 +4247,11 @@ type StatusResponse struct {
// dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"`
// isLearner indicates if the member is raft learner.
- IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+ IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+ // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version.
+ StorageVersion string `protobuf:"bytes,11,opt,name=storageVersion,proto3" json:"storageVersion,omitempty"`
+ // dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes)
+ DbSizeQuota int64 `protobuf:"varint,12,opt,name=dbSizeQuota,proto3" json:"dbSizeQuota,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -4332,6 +4360,20 @@ func (m *StatusResponse) GetIsLearner() bool {
return false
}
+func (m *StatusResponse) GetStorageVersion() string {
+ if m != nil {
+ return m.StorageVersion
+ }
+ return ""
+}
+
+func (m *StatusResponse) GetDbSizeQuota() int64 {
+ if m != nil {
+ return m.DbSizeQuota
+ }
+ return 0
+}
+
type AuthEnableRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -6154,264 +6196,289 @@ func init() {
func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
var fileDescriptor_77a6da22d6a3feb1 = []byte{
- // 4107 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0xc9,
- 0x75, 0xe6, 0x00, 0xc4, 0xed, 0xe0, 0x42, 0xb0, 0x79, 0x11, 0x84, 0x95, 0x28, 0x6e, 0x6b, 0xa5,
- 0xe5, 0x4a, 0xbb, 0xc4, 0x9a, 0xb6, 0xb3, 0x55, 0x4a, 0xe2, 0x18, 0x22, 0xb1, 0x12, 0x97, 0x14,
- 0xc9, 0x1d, 0x42, 0xda, 0x4b, 0xb9, 0xc2, 0x1a, 0x02, 0x2d, 0x72, 0x42, 0x60, 0x06, 0x9e, 0x19,
- 0x40, 0xe4, 0xe6, 0xe2, 0x94, 0xcb, 0x71, 0x25, 0xaf, 0x76, 0x55, 0x2a, 0x79, 0x48, 0x5e, 0x52,
- 0x29, 0x97, 0x1f, 0xfc, 0x9c, 0xbf, 0x90, 0xa7, 0x5c, 0x2a, 0x7f, 0x20, 0xb5, 0xf1, 0x4b, 0xf2,
- 0x23, 0x52, 0xae, 0xbe, 0xcd, 0xf4, 0xdc, 0x40, 0xd9, 0xd8, 0xdd, 0x17, 0x11, 0x7d, 0xfa, 0xf4,
- 0xf9, 0x4e, 0x9f, 0xee, 0x3e, 0xe7, 0xf4, 0xe9, 0x11, 0x94, 0x9c, 0x51, 0x6f, 0x73, 0xe4, 0xd8,
- 0x9e, 0x8d, 0x2a, 0xc4, 0xeb, 0xf5, 0x5d, 0xe2, 0x4c, 0x88, 0x33, 0x3a, 0x6d, 0x2e, 0x9f, 0xd9,
- 0x67, 0x36, 0xeb, 0x68, 0xd1, 0x5f, 0x9c, 0xa7, 0xd9, 0xa0, 0x3c, 0x2d, 0x63, 0x64, 0xb6, 0x86,
- 0x93, 0x5e, 0x6f, 0x74, 0xda, 0xba, 0x98, 0x88, 0x9e, 0xa6, 0xdf, 0x63, 0x8c, 0xbd, 0xf3, 0xd1,
- 0x29, 0xfb, 0x23, 0xfa, 0x6e, 0x9d, 0xd9, 0xf6, 0xd9, 0x80, 0xf0, 0x5e, 0xcb, 0xb2, 0x3d, 0xc3,
- 0x33, 0x6d, 0xcb, 0xe5, 0xbd, 0xf8, 0xaf, 0x34, 0xa8, 0xe9, 0xc4, 0x1d, 0xd9, 0x96, 0x4b, 0x9e,
- 0x12, 0xa3, 0x4f, 0x1c, 0x74, 0x1b, 0xa0, 0x37, 0x18, 0xbb, 0x1e, 0x71, 0x4e, 0xcc, 0x7e, 0x43,
- 0x5b, 0xd7, 0x36, 0xe6, 0xf5, 0x92, 0xa0, 0xec, 0xf6, 0xd1, 0x1b, 0x50, 0x1a, 0x92, 0xe1, 0x29,
- 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf6, 0x51, 0x13, 0x8a, 0x0e, 0x99, 0x98, 0xae, 0x69,
- 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, 0x2f, 0xbd, 0x13, 0x8f,
- 0x38, 0xc3, 0xc6, 0x3c, 0x1f, 0x48, 0x09, 0x5d, 0xe2, 0x0c, 0xf1, 0x4f, 0x72, 0x50, 0xd1, 0x0d,
- 0xeb, 0x8c, 0xe8, 0xe4, 0x87, 0x63, 0xe2, 0x7a, 0xa8, 0x0e, 0xd9, 0x0b, 0x72, 0xc5, 0xe0, 0x2b,
- 0x3a, 0xfd, 0xc9, 0xc7, 0x5b, 0x67, 0xe4, 0x84, 0x58, 0x1c, 0xb8, 0x42, 0xc7, 0x5b, 0x67, 0xa4,
- 0x63, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x04, 0x2a, 0x6f, 0x84, 0xd4, 0x99, 0x8f,
- 0xa8, 0xb3, 0x0d, 0xe0, 0xda, 0x8e, 0x77, 0x62, 0x3b, 0x7d, 0xe2, 0x34, 0x72, 0xeb, 0xda, 0x46,
- 0x6d, 0xeb, 0xad, 0x4d, 0x75, 0x19, 0x36, 0x55, 0x85, 0x36, 0x8f, 0x6d, 0xc7, 0x3b, 0xa4, 0xbc,
- 0x7a, 0xc9, 0x95, 0x3f, 0xd1, 0x87, 0x50, 0x66, 0x42, 0x3c, 0xc3, 0x39, 0x23, 0x5e, 0x23, 0xcf,
- 0xa4, 0xdc, 0xbb, 0x46, 0x4a, 0x97, 0x31, 0xeb, 0x0c, 0x9e, 0xff, 0x46, 0x18, 0x2a, 0x2e, 0x71,
- 0x4c, 0x63, 0x60, 0x7e, 0x61, 0x9c, 0x0e, 0x48, 0xa3, 0xb0, 0xae, 0x6d, 0x14, 0xf5, 0x10, 0x8d,
- 0xce, 0xff, 0x82, 0x5c, 0xb9, 0x27, 0xb6, 0x35, 0xb8, 0x6a, 0x14, 0x19, 0x43, 0x91, 0x12, 0x0e,
- 0xad, 0xc1, 0x15, 0x5b, 0x34, 0x7b, 0x6c, 0x79, 0xbc, 0xb7, 0xc4, 0x7a, 0x4b, 0x8c, 0xc2, 0xba,
- 0x37, 0xa0, 0x3e, 0x34, 0xad, 0x93, 0xa1, 0xdd, 0x3f, 0xf1, 0x0d, 0x02, 0xcc, 0x20, 0xb5, 0xa1,
- 0x69, 0x3d, 0xb3, 0xfb, 0xba, 0x34, 0x0b, 0xe5, 0x34, 0x2e, 0xc3, 0x9c, 0x65, 0xc1, 0x69, 0x5c,
- 0xaa, 0x9c, 0x9b, 0xb0, 0x44, 0x65, 0xf6, 0x1c, 0x62, 0x78, 0x24, 0x60, 0xae, 0x30, 0xe6, 0xc5,
- 0xa1, 0x69, 0x6d, 0xb3, 0x9e, 0x10, 0xbf, 0x71, 0x19, 0xe3, 0xaf, 0x0a, 0x7e, 0xe3, 0x32, 0xcc,
- 0x8f, 0x37, 0xa1, 0xe4, 0xdb, 0x1c, 0x15, 0x61, 0xfe, 0xe0, 0xf0, 0xa0, 0x53, 0x9f, 0x43, 0x00,
- 0xf9, 0xf6, 0xf1, 0x76, 0xe7, 0x60, 0xa7, 0xae, 0xa1, 0x32, 0x14, 0x76, 0x3a, 0xbc, 0x91, 0xc1,
- 0x8f, 0x01, 0x02, 0xeb, 0xa2, 0x02, 0x64, 0xf7, 0x3a, 0x9f, 0xd5, 0xe7, 0x28, 0xcf, 0x8b, 0x8e,
- 0x7e, 0xbc, 0x7b, 0x78, 0x50, 0xd7, 0xe8, 0xe0, 0x6d, 0xbd, 0xd3, 0xee, 0x76, 0xea, 0x19, 0xca,
- 0xf1, 0xec, 0x70, 0xa7, 0x9e, 0x45, 0x25, 0xc8, 0xbd, 0x68, 0xef, 0x3f, 0xef, 0xd4, 0xe7, 0xf1,
- 0xcf, 0x35, 0xa8, 0x8a, 0xf5, 0xe2, 0x67, 0x02, 0x7d, 0x07, 0xf2, 0xe7, 0xec, 0x5c, 0xb0, 0xad,
- 0x58, 0xde, 0xba, 0x15, 0x59, 0xdc, 0xd0, 0xd9, 0xd1, 0x05, 0x2f, 0xc2, 0x90, 0xbd, 0x98, 0xb8,
- 0x8d, 0xcc, 0x7a, 0x76, 0xa3, 0xbc, 0x55, 0xdf, 0xe4, 0xe7, 0x75, 0x73, 0x8f, 0x5c, 0xbd, 0x30,
- 0x06, 0x63, 0xa2, 0xd3, 0x4e, 0x84, 0x60, 0x7e, 0x68, 0x3b, 0x84, 0xed, 0xd8, 0xa2, 0xce, 0x7e,
- 0xd3, 0x6d, 0xcc, 0x16, 0x4d, 0xec, 0x56, 0xde, 0xc0, 0xbf, 0xd4, 0x00, 0x8e, 0xc6, 0x5e, 0xfa,
- 0xd1, 0x58, 0x86, 0xdc, 0x84, 0x0a, 0x16, 0xc7, 0x82, 0x37, 0xd8, 0x99, 0x20, 0x86, 0x4b, 0xfc,
- 0x33, 0x41, 0x1b, 0xe8, 0x06, 0x14, 0x46, 0x0e, 0x99, 0x9c, 0x5c, 0x4c, 0x18, 0x48, 0x51, 0xcf,
- 0xd3, 0xe6, 0xde, 0x04, 0xbd, 0x09, 0x15, 0xf3, 0xcc, 0xb2, 0x1d, 0x72, 0xc2, 0x65, 0xe5, 0x58,
- 0x6f, 0x99, 0xd3, 0x98, 0xde, 0x0a, 0x0b, 0x17, 0x9c, 0x57, 0x59, 0xf6, 0x29, 0x09, 0x5b, 0x50,
- 0x66, 0xaa, 0xce, 0x64, 0xbe, 0x77, 0x02, 0x1d, 0x33, 0x6c, 0x58, 0xdc, 0x84, 0x42, 0x6b, 0xfc,
- 0x03, 0x40, 0x3b, 0x64, 0x40, 0x3c, 0x32, 0x8b, 0xf7, 0x50, 0x6c, 0x92, 0x55, 0x6d, 0x82, 0x7f,
- 0xa6, 0xc1, 0x52, 0x48, 0xfc, 0x4c, 0xd3, 0x6a, 0x40, 0xa1, 0xcf, 0x84, 0x71, 0x0d, 0xb2, 0xba,
- 0x6c, 0xa2, 0x87, 0x50, 0x14, 0x0a, 0xb8, 0x8d, 0x6c, 0xca, 0xa6, 0x29, 0x70, 0x9d, 0x5c, 0xfc,
- 0xcb, 0x0c, 0x94, 0xc4, 0x44, 0x0f, 0x47, 0xa8, 0x0d, 0x55, 0x87, 0x37, 0x4e, 0xd8, 0x7c, 0x84,
- 0x46, 0xcd, 0x74, 0x27, 0xf4, 0x74, 0x4e, 0xaf, 0x88, 0x21, 0x8c, 0x8c, 0x7e, 0x1f, 0xca, 0x52,
- 0xc4, 0x68, 0xec, 0x09, 0x93, 0x37, 0xc2, 0x02, 0x82, 0xfd, 0xf7, 0x74, 0x4e, 0x07, 0xc1, 0x7e,
- 0x34, 0xf6, 0x50, 0x17, 0x96, 0xe5, 0x60, 0x3e, 0x1b, 0xa1, 0x46, 0x96, 0x49, 0x59, 0x0f, 0x4b,
- 0x89, 0x2f, 0xd5, 0xd3, 0x39, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0xaa, 0x92, 0x77, 0xc9, 0x9d, 0x77,
- 0x4c, 0xa5, 0xee, 0xa5, 0x15, 0x57, 0xa9, 0x7b, 0x69, 0x3d, 0x2e, 0x41, 0x41, 0xb4, 0xf0, 0xbf,
- 0x64, 0x00, 0xe4, 0x6a, 0x1c, 0x8e, 0xd0, 0x0e, 0xd4, 0x1c, 0xd1, 0x0a, 0x59, 0xeb, 0x8d, 0x44,
- 0x6b, 0x89, 0x45, 0x9c, 0xd3, 0xab, 0x72, 0x10, 0x57, 0xee, 0x7b, 0x50, 0xf1, 0xa5, 0x04, 0x06,
- 0xbb, 0x99, 0x60, 0x30, 0x5f, 0x42, 0x59, 0x0e, 0xa0, 0x26, 0xfb, 0x04, 0x56, 0xfc, 0xf1, 0x09,
- 0x36, 0x7b, 0x73, 0x8a, 0xcd, 0x7c, 0x81, 0x4b, 0x52, 0x82, 0x6a, 0x35, 0x55, 0xb1, 0xc0, 0x6c,
- 0x37, 0x13, 0xcc, 0x16, 0x57, 0x8c, 0x1a, 0x0e, 0x68, 0xbc, 0xe4, 0x4d, 0xfc, 0xbf, 0x59, 0x28,
- 0x6c, 0xdb, 0xc3, 0x91, 0xe1, 0xd0, 0xd5, 0xc8, 0x3b, 0xc4, 0x1d, 0x0f, 0x3c, 0x66, 0xae, 0xda,
- 0xd6, 0xdd, 0xb0, 0x44, 0xc1, 0x26, 0xff, 0xea, 0x8c, 0x55, 0x17, 0x43, 0xe8, 0x60, 0x11, 0x1e,
- 0x33, 0xaf, 0x31, 0x58, 0x04, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x6c, 0x70, 0x90, 0x9b, 0x50, 0x98,
- 0x10, 0x27, 0x08, 0xe9, 0x4f, 0xe7, 0x74, 0x49, 0x40, 0xef, 0xc0, 0x42, 0x34, 0xbc, 0xe4, 0x04,
- 0x4f, 0xad, 0x17, 0x8e, 0x46, 0x77, 0xa1, 0x12, 0x8a, 0x71, 0x79, 0xc1, 0x57, 0x1e, 0x2a, 0x21,
- 0x6e, 0x55, 0xfa, 0x55, 0x1a, 0x8f, 0x2b, 0x4f, 0xe7, 0xa4, 0x67, 0x5d, 0x95, 0x9e, 0xb5, 0x28,
- 0x46, 0x09, 0xdf, 0x1a, 0x72, 0x32, 0xdf, 0x0f, 0x3b, 0x19, 0xfc, 0x7d, 0xa8, 0x86, 0x0c, 0x44,
- 0xe3, 0x4e, 0xe7, 0xe3, 0xe7, 0xed, 0x7d, 0x1e, 0xa4, 0x9e, 0xb0, 0xb8, 0xa4, 0xd7, 0x35, 0x1a,
- 0xeb, 0xf6, 0x3b, 0xc7, 0xc7, 0xf5, 0x0c, 0xaa, 0x42, 0xe9, 0xe0, 0xb0, 0x7b, 0xc2, 0xb9, 0xb2,
- 0xf8, 0x89, 0x2f, 0x41, 0x04, 0x39, 0x25, 0xb6, 0xcd, 0x29, 0xb1, 0x4d, 0x93, 0xb1, 0x2d, 0x13,
- 0xc4, 0x36, 0x16, 0xe6, 0xf6, 0x3b, 0xed, 0xe3, 0x4e, 0x7d, 0xfe, 0x71, 0x0d, 0x2a, 0xdc, 0xbe,
- 0x27, 0x63, 0x8b, 0x86, 0xda, 0x7f, 0xd2, 0x00, 0x82, 0xd3, 0x84, 0x5a, 0x50, 0xe8, 0x71, 0x9c,
- 0x86, 0xc6, 0x9c, 0xd1, 0x4a, 0xe2, 0x92, 0xe9, 0x92, 0x0b, 0x7d, 0x0b, 0x0a, 0xee, 0xb8, 0xd7,
- 0x23, 0xae, 0x0c, 0x79, 0x37, 0xa2, 0xfe, 0x50, 0x78, 0x2b, 0x5d, 0xf2, 0xd1, 0x21, 0x2f, 0x0d,
- 0x73, 0x30, 0x66, 0x01, 0x70, 0xfa, 0x10, 0xc1, 0x87, 0xff, 0x5e, 0x83, 0xb2, 0xb2, 0x79, 0x7f,
- 0x47, 0x27, 0x7c, 0x0b, 0x4a, 0x4c, 0x07, 0xd2, 0x17, 0x6e, 0xb8, 0xa8, 0x07, 0x04, 0xf4, 0x7b,
- 0x50, 0x92, 0x27, 0x40, 0x7a, 0xe2, 0x46, 0xb2, 0xd8, 0xc3, 0x91, 0x1e, 0xb0, 0xe2, 0x3d, 0x58,
- 0x64, 0x56, 0xe9, 0xd1, 0xe4, 0x5a, 0xda, 0x51, 0x4d, 0x3f, 0xb5, 0x48, 0xfa, 0xd9, 0x84, 0xe2,
- 0xe8, 0xfc, 0xca, 0x35, 0x7b, 0xc6, 0x40, 0x68, 0xe1, 0xb7, 0xf1, 0x47, 0x80, 0x54, 0x61, 0xb3,
- 0x4c, 0x17, 0x57, 0xa1, 0xfc, 0xd4, 0x70, 0xcf, 0x85, 0x4a, 0xf8, 0x21, 0x54, 0x69, 0x73, 0xef,
- 0xc5, 0x6b, 0xe8, 0xc8, 0x2e, 0x07, 0x92, 0x7b, 0x26, 0x9b, 0x23, 0x98, 0x3f, 0x37, 0xdc, 0x73,
- 0x36, 0xd1, 0xaa, 0xce, 0x7e, 0xa3, 0x77, 0xa0, 0xde, 0xe3, 0x93, 0x3c, 0x89, 0x5c, 0x19, 0x16,
- 0x04, 0xdd, 0xcf, 0x04, 0x3f, 0x85, 0x0a, 0x9f, 0xc3, 0x57, 0xad, 0x04, 0x5e, 0x84, 0x85, 0x63,
- 0xcb, 0x18, 0xb9, 0xe7, 0xb6, 0x8c, 0x6e, 0x74, 0xd2, 0xf5, 0x80, 0x36, 0x13, 0xe2, 0xdb, 0xb0,
- 0xe0, 0x90, 0xa1, 0x61, 0x5a, 0xa6, 0x75, 0x76, 0x72, 0x7a, 0xe5, 0x11, 0x57, 0x5c, 0x98, 0x6a,
- 0x3e, 0xf9, 0x31, 0xa5, 0x52, 0xd5, 0x4e, 0x07, 0xf6, 0xa9, 0x70, 0x73, 0xec, 0x37, 0xfe, 0x69,
- 0x06, 0x2a, 0x9f, 0x18, 0x5e, 0x4f, 0x2e, 0x1d, 0xda, 0x85, 0x9a, 0xef, 0xdc, 0x18, 0x45, 0xe8,
- 0x12, 0x09, 0xb1, 0x6c, 0x8c, 0x4c, 0xa5, 0x65, 0x74, 0xac, 0xf6, 0x54, 0x02, 0x13, 0x65, 0x58,
- 0x3d, 0x32, 0xf0, 0x45, 0x65, 0xd2, 0x45, 0x31, 0x46, 0x55, 0x94, 0x4a, 0x40, 0x87, 0x50, 0x1f,
- 0x39, 0xf6, 0x99, 0x43, 0x5c, 0xd7, 0x17, 0xc6, 0xc3, 0x18, 0x4e, 0x10, 0x76, 0x24, 0x58, 0x03,
- 0x71, 0x0b, 0xa3, 0x30, 0xe9, 0xf1, 0x42, 0x90, 0xcf, 0x70, 0xe7, 0xf4, 0x9f, 0x19, 0x40, 0xf1,
- 0x49, 0xfd, 0xb6, 0x29, 0xde, 0x3d, 0xa8, 0xb9, 0x9e, 0xe1, 0xc4, 0x36, 0x5b, 0x95, 0x51, 0x7d,
- 0x8f, 0xff, 0x36, 0xf8, 0x0a, 0x9d, 0x58, 0xb6, 0x67, 0xbe, 0xbc, 0x12, 0x59, 0x72, 0x4d, 0x92,
- 0x0f, 0x18, 0x15, 0x75, 0xa0, 0xf0, 0xd2, 0x1c, 0x78, 0xc4, 0x71, 0x1b, 0xb9, 0xf5, 0xec, 0x46,
- 0x6d, 0xeb, 0xe1, 0x75, 0xcb, 0xb0, 0xf9, 0x21, 0xe3, 0xef, 0x5e, 0x8d, 0x88, 0x2e, 0xc7, 0xaa,
- 0x99, 0x67, 0x3e, 0x94, 0x8d, 0xdf, 0x84, 0xe2, 0x2b, 0x2a, 0x82, 0xde, 0xb2, 0x0b, 0x3c, 0x59,
- 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0xe9, 0x18, 0x67, 0x43, 0x62, 0x79, 0xf2, 0x1e, 0x28, 0xdb, 0xf8,
- 0x1e, 0x40, 0x00, 0x43, 0x5d, 0xfe, 0xc1, 0xe1, 0xd1, 0xf3, 0x6e, 0x7d, 0x0e, 0x55, 0xa0, 0x78,
- 0x70, 0xb8, 0xd3, 0xd9, 0xef, 0xd0, 0xf8, 0x80, 0x5b, 0xd2, 0xa4, 0xa1, 0xb5, 0x54, 0x31, 0xb5,
- 0x10, 0x26, 0x5e, 0x85, 0xe5, 0xa4, 0x05, 0xa4, 0xb9, 0x68, 0x55, 0xec, 0xd2, 0x99, 0x8e, 0x8a,
- 0x0a, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0x22, 0x39, 0x97, 0x4d, 0x6a, 0x08,
- 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x74, 0x2f, 0xb9, 0x44, 0xf7, 0x82, 0xee, 0x42,
- 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x72, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, 0xd2, 0x42, 0x46, 0x2f,
- 0x84, 0x8d, 0x8e, 0xee, 0x41, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, 0x8b, 0x18, 0x55, 0x99,
- 0xbb, 0x77, 0x28, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xee, 0x48, 0x4f, 0x1c, 0xc3, 0x52,
- 0x2f, 0x73, 0xdd, 0xee, 0xbe, 0x30, 0x37, 0xfd, 0x89, 0x6a, 0x90, 0xd9, 0xdd, 0x11, 0x46, 0xc8,
- 0xec, 0xee, 0xe0, 0x1f, 0x6b, 0x80, 0xd4, 0x71, 0x33, 0xd9, 0x39, 0x22, 0x5c, 0xc2, 0x67, 0x03,
- 0xf8, 0x65, 0xc8, 0x11, 0xc7, 0xb1, 0x1d, 0x66, 0xd1, 0x92, 0xce, 0x1b, 0xf8, 0x2d, 0xa1, 0x83,
- 0x4e, 0x26, 0xf6, 0x85, 0x7f, 0x06, 0xb9, 0x34, 0xcd, 0x57, 0x75, 0x0f, 0x96, 0x42, 0x5c, 0x33,
- 0x45, 0xae, 0x0f, 0x61, 0x81, 0x09, 0xdb, 0x3e, 0x27, 0xbd, 0x8b, 0x91, 0x6d, 0x5a, 0x31, 0x3c,
- 0xba, 0x72, 0x81, 0x83, 0xa5, 0xf3, 0xe0, 0x13, 0xab, 0xf8, 0xc4, 0x6e, 0x77, 0x1f, 0x7f, 0x06,
- 0xab, 0x11, 0x39, 0x52, 0xfd, 0x3f, 0x82, 0x72, 0xcf, 0x27, 0xba, 0x22, 0xd7, 0xb9, 0x1d, 0x56,
- 0x2e, 0x3a, 0x54, 0x1d, 0x81, 0x0f, 0xe1, 0x46, 0x4c, 0xf4, 0x4c, 0x73, 0x7e, 0x1b, 0x56, 0x98,
- 0xc0, 0x3d, 0x42, 0x46, 0xed, 0x81, 0x39, 0x49, 0xb5, 0xf4, 0x48, 0x4c, 0x4a, 0x61, 0xfc, 0x7a,
- 0xf7, 0x05, 0xfe, 0x03, 0x81, 0xd8, 0x35, 0x87, 0xa4, 0x6b, 0xef, 0xa7, 0xeb, 0x46, 0xa3, 0xd9,
- 0x05, 0xb9, 0x72, 0x45, 0x5a, 0xc3, 0x7e, 0xe3, 0x7f, 0xd6, 0x84, 0xa9, 0xd4, 0xe1, 0x5f, 0xf3,
- 0x4e, 0x5e, 0x03, 0x38, 0xa3, 0x47, 0x86, 0xf4, 0x69, 0x07, 0xaf, 0xa8, 0x28, 0x14, 0x5f, 0x4f,
- 0xea, 0xbf, 0x2b, 0x42, 0xcf, 0x65, 0xb1, 0xcf, 0xd9, 0x3f, 0xbe, 0x97, 0xbb, 0x0d, 0x65, 0x46,
- 0x38, 0xf6, 0x0c, 0x6f, 0xec, 0xc6, 0x16, 0xe3, 0x2f, 0xc4, 0xb6, 0x97, 0x83, 0x66, 0x9a, 0xd7,
- 0xb7, 0x20, 0xcf, 0x2e, 0x13, 0x32, 0x95, 0xbe, 0x99, 0xb0, 0x1f, 0xb9, 0x1e, 0xba, 0x60, 0xc4,
- 0x3f, 0xd5, 0x20, 0xff, 0x8c, 0x95, 0x60, 0x15, 0xd5, 0xe6, 0xe5, 0x5a, 0x58, 0xc6, 0x90, 0x17,
- 0x86, 0x4a, 0x3a, 0xfb, 0xcd, 0x52, 0x4f, 0x42, 0x9c, 0xe7, 0xfa, 0x3e, 0x4f, 0x71, 0x4b, 0xba,
- 0xdf, 0xa6, 0x36, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, 0x2a, 0x14, 0x9a, 0x3d,
- 0x9b, 0xee, 0x3e, 0x31, 0x1c, 0x4b, 0x14, 0x4d, 0x8b, 0x7a, 0x40, 0xc0, 0xfb, 0x50, 0xe7, 0x7a,
- 0xb4, 0xfb, 0x7d, 0x25, 0xc1, 0xf4, 0xd1, 0xb4, 0x08, 0x5a, 0x48, 0x5a, 0x26, 0x2a, 0xed, 0x17,
- 0x1a, 0x2c, 0x2a, 0xe2, 0x66, 0xb2, 0xea, 0xbb, 0x90, 0xe7, 0x45, 0x6a, 0x91, 0xe9, 0x2c, 0x87,
- 0x47, 0x71, 0x18, 0x5d, 0xf0, 0xa0, 0x4d, 0x28, 0xf0, 0x5f, 0xf2, 0x0e, 0x90, 0xcc, 0x2e, 0x99,
- 0xf0, 0x3d, 0x58, 0x12, 0x24, 0x32, 0xb4, 0x93, 0x0e, 0x06, 0x5b, 0x0c, 0xfc, 0x67, 0xb0, 0x1c,
- 0x66, 0x9b, 0x69, 0x4a, 0x8a, 0x92, 0x99, 0xd7, 0x51, 0xb2, 0x2d, 0x95, 0x7c, 0x3e, 0xea, 0x2b,
- 0x79, 0x54, 0x74, 0xc7, 0xa8, 0xeb, 0x95, 0x09, 0xaf, 0x57, 0x30, 0x01, 0x29, 0xe2, 0x1b, 0x9d,
- 0xc0, 0x07, 0x72, 0x3b, 0xec, 0x9b, 0xae, 0xef, 0xc3, 0x31, 0x54, 0x06, 0xa6, 0x45, 0x0c, 0x47,
- 0x54, 0xce, 0x35, 0x5e, 0x39, 0x57, 0x69, 0xf8, 0x0b, 0x40, 0xea, 0xc0, 0x6f, 0x54, 0xe9, 0xfb,
- 0xd2, 0x64, 0x47, 0x8e, 0x3d, 0xb4, 0x53, 0xcd, 0x8e, 0xff, 0x1c, 0x56, 0x22, 0x7c, 0xdf, 0xa8,
- 0x9a, 0x4b, 0xb0, 0xb8, 0x43, 0x64, 0x42, 0x23, 0xdd, 0xde, 0x47, 0x80, 0x54, 0xe2, 0x4c, 0x91,
- 0xad, 0x05, 0x8b, 0xcf, 0xec, 0x09, 0x75, 0x91, 0x94, 0x1a, 0xf8, 0x06, 0x5e, 0x87, 0xf0, 0x4d,
- 0xe1, 0xb7, 0x29, 0xb8, 0x3a, 0x60, 0x26, 0xf0, 0x7f, 0xd7, 0xa0, 0xd2, 0x1e, 0x18, 0xce, 0x50,
- 0x02, 0x7f, 0x0f, 0xf2, 0xfc, 0x76, 0x2d, 0x0a, 0x5a, 0xf7, 0xc3, 0x62, 0x54, 0x5e, 0xde, 0x68,
- 0xf3, 0xbb, 0xb8, 0x18, 0x45, 0x15, 0x17, 0x6f, 0x5e, 0x3b, 0x91, 0x37, 0xb0, 0x1d, 0xf4, 0x1e,
- 0xe4, 0x0c, 0x3a, 0x84, 0x85, 0xa2, 0x5a, 0xb4, 0xae, 0xc1, 0xa4, 0xb1, 0x3b, 0x00, 0xe7, 0xc2,
- 0xdf, 0x81, 0xb2, 0x82, 0x80, 0x0a, 0x90, 0x7d, 0xd2, 0x11, 0x09, 0x7b, 0x7b, 0xbb, 0xbb, 0xfb,
- 0x82, 0x17, 0x74, 0x6a, 0x00, 0x3b, 0x1d, 0xbf, 0x9d, 0xc1, 0x9f, 0x8a, 0x51, 0xc2, 0xed, 0xab,
- 0xfa, 0x68, 0x69, 0xfa, 0x64, 0x5e, 0x4b, 0x9f, 0x4b, 0xa8, 0x8a, 0xe9, 0xcf, 0x1a, 0xc6, 0x98,
- 0xbc, 0x94, 0x30, 0xa6, 0x28, 0xaf, 0x0b, 0x46, 0xfc, 0x2b, 0x0d, 0xea, 0x3b, 0xf6, 0x2b, 0xeb,
- 0xcc, 0x31, 0xfa, 0xfe, 0x39, 0xf9, 0x30, 0xb2, 0x52, 0x9b, 0x91, 0xe2, 0x68, 0x84, 0x3f, 0x20,
- 0x44, 0x56, 0xac, 0x11, 0x94, 0x0d, 0x79, 0x2c, 0x94, 0x4d, 0xfc, 0x01, 0x2c, 0x44, 0x06, 0x51,
- 0xdb, 0xbf, 0x68, 0xef, 0xef, 0xee, 0x50, 0x5b, 0xb3, 0xc2, 0x5a, 0xe7, 0xa0, 0xfd, 0x78, 0xbf,
- 0x23, 0x1e, 0x90, 0xda, 0x07, 0xdb, 0x9d, 0xfd, 0x7a, 0x06, 0xf7, 0x60, 0x51, 0x81, 0x9f, 0xf5,
- 0x65, 0x20, 0x45, 0xbb, 0x05, 0xa8, 0x8a, 0x68, 0x2f, 0x0e, 0xe5, 0xbf, 0x65, 0xa0, 0x26, 0x29,
- 0x5f, 0x0f, 0x26, 0x5a, 0x85, 0x7c, 0xff, 0xf4, 0xd8, 0xfc, 0x42, 0xbe, 0x1c, 0x89, 0x16, 0xa5,
- 0x0f, 0x38, 0x0e, 0x7f, 0xbe, 0x15, 0x2d, 0x1a, 0xc6, 0x1d, 0xe3, 0xa5, 0xb7, 0x6b, 0xf5, 0xc9,
- 0x25, 0x4b, 0x0a, 0xe6, 0xf5, 0x80, 0xc0, 0x2a, 0x4c, 0xe2, 0x99, 0x97, 0xdd, 0xac, 0x94, 0x67,
- 0x5f, 0xf4, 0x00, 0xea, 0xf4, 0x77, 0x7b, 0x34, 0x1a, 0x98, 0xa4, 0xcf, 0x05, 0x14, 0x18, 0x4f,
- 0x8c, 0x4e, 0xd1, 0xd9, 0x5d, 0xc4, 0x6d, 0x14, 0x59, 0x58, 0x12, 0x2d, 0xb4, 0x0e, 0x65, 0xae,
- 0xdf, 0xae, 0xf5, 0xdc, 0x25, 0xec, 0xed, 0x33, 0xab, 0xab, 0xa4, 0x70, 0x9a, 0x01, 0xd1, 0x34,
- 0x63, 0x09, 0x16, 0xdb, 0x63, 0xef, 0xbc, 0x63, 0xd1, 0x58, 0x21, 0xad, 0xbc, 0x0c, 0x88, 0x12,
- 0x77, 0x4c, 0x57, 0xa5, 0x0a, 0xd6, 0xf0, 0x82, 0x74, 0x60, 0x89, 0x12, 0x89, 0xe5, 0x99, 0x3d,
- 0x25, 0xae, 0xca, 0xcc, 0x4b, 0x8b, 0x64, 0x5e, 0x86, 0xeb, 0xbe, 0xb2, 0x9d, 0xbe, 0xb0, 0xb9,
- 0xdf, 0xc6, 0xff, 0xa8, 0x71, 0xc8, 0xe7, 0x6e, 0x28, 0x7d, 0xfa, 0x2d, 0xc5, 0xa0, 0xf7, 0xa1,
- 0x60, 0x8f, 0xd8, 0x0b, 0xbf, 0x28, 0xc3, 0xac, 0x6e, 0xf2, 0x6f, 0x02, 0x36, 0x85, 0xe0, 0x43,
- 0xde, 0xab, 0x4b, 0x36, 0x74, 0x1f, 0x6a, 0xe7, 0x86, 0x7b, 0x4e, 0xfa, 0x47, 0x52, 0x26, 0xbf,
- 0xf9, 0x45, 0xa8, 0x78, 0x23, 0xd0, 0xef, 0x09, 0xf1, 0xa6, 0xe8, 0x87, 0x1f, 0xc2, 0x8a, 0xe4,
- 0x14, 0xaf, 0x13, 0x53, 0x98, 0x5f, 0xc1, 0x6d, 0xc9, 0xbc, 0x7d, 0x6e, 0x58, 0x67, 0x44, 0x02,
- 0xfe, 0xae, 0x16, 0x88, 0xcf, 0x27, 0x9b, 0x38, 0x9f, 0xc7, 0xd0, 0xf0, 0xe7, 0xc3, 0x6e, 0xd6,
- 0xf6, 0x40, 0x55, 0x74, 0xec, 0x8a, 0xf3, 0x54, 0xd2, 0xd9, 0x6f, 0x4a, 0x73, 0xec, 0x81, 0x9f,
- 0x4a, 0xd3, 0xdf, 0x78, 0x1b, 0x6e, 0x4a, 0x19, 0xe2, 0xce, 0x1b, 0x16, 0x12, 0x53, 0x3c, 0x49,
- 0x88, 0x30, 0x2c, 0x1d, 0x3a, 0x7d, 0xe1, 0x55, 0xce, 0xf0, 0x12, 0x30, 0x99, 0x9a, 0x22, 0x73,
- 0x85, 0x6f, 0x4a, 0xaa, 0x98, 0x92, 0x2d, 0x49, 0x32, 0x15, 0xa0, 0x92, 0xc5, 0x82, 0x51, 0x72,
- 0x6c, 0xc1, 0x62, 0xa2, 0x7f, 0x00, 0x6b, 0xbe, 0x12, 0xd4, 0x6e, 0x47, 0xc4, 0x19, 0x9a, 0xae,
- 0xab, 0xd4, 0xbd, 0x93, 0x26, 0x7e, 0x1f, 0xe6, 0x47, 0x44, 0x04, 0xa1, 0xf2, 0x16, 0x92, 0x9b,
- 0x52, 0x19, 0xcc, 0xfa, 0x71, 0x1f, 0xee, 0x48, 0xe9, 0xdc, 0xa2, 0x89, 0xe2, 0xa3, 0x4a, 0xc9,
- 0x6a, 0x60, 0x26, 0xa5, 0x1a, 0x98, 0x8d, 0xbc, 0xc5, 0x7c, 0xc4, 0x0d, 0x29, 0xcf, 0xfc, 0x4c,
- 0xc9, 0xc5, 0x1e, 0xb7, 0xa9, 0xef, 0x2a, 0x66, 0x12, 0xf6, 0xd7, 0xc2, 0x0b, 0x7c, 0x55, 0x1e,
- 0x9e, 0xb0, 0x19, 0xca, 0x87, 0x0e, 0xd9, 0xa4, 0x59, 0x33, 0x5d, 0x00, 0x5d, 0xad, 0x85, 0xce,
- 0xeb, 0x21, 0x1a, 0x3e, 0x85, 0xe5, 0xb0, 0x5f, 0x9b, 0x49, 0x97, 0x65, 0xc8, 0x79, 0xf6, 0x05,
- 0x91, 0xb1, 0x86, 0x37, 0xa4, 0xed, 0x7c, 0x9f, 0x37, 0x93, 0xed, 0x8c, 0x40, 0x18, 0x3b, 0x1d,
- 0xb3, 0xea, 0x4b, 0x37, 0x96, 0xbc, 0x03, 0xf1, 0x06, 0x3e, 0x80, 0xd5, 0xa8, 0x67, 0x9b, 0x49,
- 0xe5, 0x17, 0xfc, 0x2c, 0x25, 0x39, 0xbf, 0x99, 0xe4, 0x7e, 0x1c, 0xf8, 0x25, 0xc5, 0xb7, 0xcd,
- 0x24, 0x52, 0x87, 0x66, 0x92, 0xab, 0xfb, 0x2a, 0x8e, 0x8e, 0xef, 0xf9, 0x66, 0x12, 0xe6, 0x06,
- 0xc2, 0x66, 0x5f, 0xfe, 0xc0, 0x5d, 0x65, 0xa7, 0xba, 0x2b, 0x71, 0x48, 0x02, 0x87, 0xfa, 0x35,
- 0x6c, 0x3a, 0x81, 0x11, 0xf8, 0xf2, 0x59, 0x31, 0x68, 0x38, 0xf3, 0x31, 0x58, 0x43, 0x6e, 0x6c,
- 0x35, 0x02, 0xcc, 0xb4, 0x18, 0x9f, 0x04, 0x6e, 0x3c, 0x16, 0x24, 0x66, 0x12, 0xfc, 0x29, 0xac,
- 0xa7, 0xc7, 0x87, 0x59, 0x24, 0x3f, 0x68, 0x41, 0xc9, 0xbf, 0x0c, 0x29, 0xdf, 0x9b, 0x95, 0xa1,
- 0x70, 0x70, 0x78, 0x7c, 0xd4, 0xde, 0xee, 0xf0, 0x0f, 0xce, 0xb6, 0x0f, 0x75, 0xfd, 0xf9, 0x51,
- 0xb7, 0x9e, 0xd9, 0xfa, 0x75, 0x16, 0x32, 0x7b, 0x2f, 0xd0, 0x67, 0x90, 0xe3, 0x5f, 0x5f, 0x4c,
- 0xf9, 0xe4, 0xa6, 0x39, 0xed, 0x03, 0x13, 0x7c, 0xe3, 0xc7, 0xff, 0xf5, 0xeb, 0x9f, 0x67, 0x16,
- 0x71, 0xa5, 0x35, 0xf9, 0x76, 0xeb, 0x62, 0xd2, 0x62, 0x61, 0xea, 0x91, 0xf6, 0x00, 0x7d, 0x0c,
- 0xd9, 0xa3, 0xb1, 0x87, 0x52, 0x3f, 0xc5, 0x69, 0xa6, 0x7f, 0x73, 0x82, 0x57, 0x98, 0xd0, 0x05,
- 0x0c, 0x42, 0xe8, 0x68, 0xec, 0x51, 0x91, 0x3f, 0x84, 0xb2, 0xfa, 0xc5, 0xc8, 0xb5, 0xdf, 0xe7,
- 0x34, 0xaf, 0xff, 0x1a, 0x05, 0xdf, 0x66, 0x50, 0x37, 0x30, 0x12, 0x50, 0xfc, 0x9b, 0x16, 0x75,
- 0x16, 0xdd, 0x4b, 0x0b, 0xa5, 0x7e, 0xbd, 0xd3, 0x4c, 0xff, 0x40, 0x25, 0x36, 0x0b, 0xef, 0xd2,
- 0xa2, 0x22, 0xff, 0x44, 0x7c, 0x9b, 0xd2, 0xf3, 0xd0, 0x9d, 0x84, 0x6f, 0x13, 0xd4, 0x57, 0xf8,
- 0xe6, 0x7a, 0x3a, 0x83, 0x00, 0xb9, 0xc5, 0x40, 0x56, 0xf1, 0xa2, 0x00, 0xe9, 0xf9, 0x2c, 0x8f,
- 0xb4, 0x07, 0x5b, 0x3d, 0xc8, 0xb1, 0x17, 0x2e, 0xf4, 0xb9, 0xfc, 0xd1, 0x4c, 0x78, 0xea, 0x4b,
- 0x59, 0xe8, 0xd0, 0xdb, 0x18, 0x5e, 0x66, 0x40, 0x35, 0x5c, 0xa2, 0x40, 0xec, 0x7d, 0xeb, 0x91,
- 0xf6, 0x60, 0x43, 0x7b, 0x5f, 0xdb, 0xfa, 0x55, 0x0e, 0x72, 0xac, 0xb4, 0x8b, 0x2e, 0x00, 0x82,
- 0xd7, 0x9e, 0xe8, 0xec, 0x62, 0xef, 0x47, 0xd1, 0xd9, 0xc5, 0x1f, 0x8a, 0x70, 0x93, 0x81, 0x2e,
- 0xe3, 0x05, 0x0a, 0xca, 0x2a, 0xc6, 0x2d, 0x56, 0x04, 0xa7, 0x76, 0xfc, 0x1b, 0x4d, 0x54, 0xb6,
- 0xf9, 0x59, 0x42, 0x49, 0xd2, 0x42, 0x4f, 0x3e, 0xd1, 0xed, 0x90, 0xf0, 0xdc, 0x83, 0xbf, 0xcb,
- 0x00, 0x5b, 0xb8, 0x1e, 0x00, 0x3a, 0x8c, 0xe3, 0x91, 0xf6, 0xe0, 0xf3, 0x06, 0x5e, 0x12, 0x56,
- 0x8e, 0xf4, 0xa0, 0x1f, 0x41, 0x2d, 0xfc, 0xa4, 0x81, 0xee, 0x26, 0x60, 0x45, 0x5f, 0x46, 0x9a,
- 0x6f, 0x4d, 0x67, 0x12, 0x3a, 0xad, 0x31, 0x9d, 0x04, 0x38, 0x47, 0xbe, 0x20, 0x64, 0x64, 0x50,
- 0x26, 0xb1, 0x06, 0xe8, 0x1f, 0x34, 0xf1, 0xe2, 0x14, 0xbc, 0x51, 0xa0, 0x24, 0xe9, 0xb1, 0x17,
- 0x90, 0xe6, 0xbd, 0x6b, 0xb8, 0x84, 0x12, 0x7f, 0xc8, 0x94, 0xf8, 0x00, 0x2f, 0x07, 0x4a, 0x78,
- 0xe6, 0x90, 0x78, 0xb6, 0xd0, 0xe2, 0xf3, 0x5b, 0xf8, 0x46, 0xc8, 0x38, 0xa1, 0xde, 0x60, 0xb1,
- 0xf8, 0x3b, 0x43, 0xe2, 0x62, 0x85, 0xde, 0x2d, 0x12, 0x17, 0x2b, 0xfc, 0x48, 0x91, 0xb4, 0x58,
- 0xfc, 0x55, 0x21, 0x69, 0xb1, 0xfc, 0x9e, 0xad, 0xff, 0x9b, 0x87, 0xc2, 0x36, 0xff, 0x26, 0x1c,
- 0xd9, 0x50, 0xf2, 0xcb, 0xf4, 0x68, 0x2d, 0xa9, 0xce, 0x18, 0x5c, 0x6b, 0x9a, 0x77, 0x52, 0xfb,
- 0x85, 0x42, 0x6f, 0x32, 0x85, 0xde, 0xc0, 0xab, 0x14, 0x59, 0x7c, 0x76, 0xde, 0xe2, 0xc5, 0xac,
- 0x96, 0xd1, 0xef, 0x53, 0x43, 0xfc, 0x29, 0x54, 0xd4, 0x3a, 0x3a, 0x7a, 0x33, 0xb1, 0xb6, 0xa9,
- 0x96, 0xe2, 0x9b, 0x78, 0x1a, 0x8b, 0x40, 0x7e, 0x8b, 0x21, 0xaf, 0xe1, 0x9b, 0x09, 0xc8, 0x0e,
- 0x63, 0x0d, 0x81, 0xf3, 0x1a, 0x78, 0x32, 0x78, 0xa8, 0xc4, 0x9e, 0x0c, 0x1e, 0x2e, 0xa1, 0x4f,
- 0x05, 0x1f, 0x33, 0x56, 0x0a, 0xee, 0x02, 0x04, 0x95, 0x6c, 0x94, 0x68, 0x4b, 0xe5, 0x5e, 0x17,
- 0x75, 0x0e, 0xf1, 0x22, 0x38, 0xc6, 0x0c, 0x56, 0xec, 0xbb, 0x08, 0xec, 0xc0, 0x74, 0x3d, 0x7e,
- 0x30, 0xab, 0xa1, 0xd2, 0x34, 0x4a, 0x9c, 0x4f, 0xb8, 0xbe, 0xdd, 0xbc, 0x3b, 0x95, 0x47, 0xa0,
- 0xdf, 0x63, 0xe8, 0x77, 0x70, 0x33, 0x01, 0x7d, 0xc4, 0x79, 0xe9, 0x66, 0xfb, 0xff, 0x3c, 0x94,
- 0x9f, 0x19, 0xa6, 0xe5, 0x11, 0xcb, 0xb0, 0x7a, 0x04, 0x9d, 0x42, 0x8e, 0x45, 0xea, 0xa8, 0x23,
- 0x56, 0xcb, 0xb6, 0x51, 0x47, 0x1c, 0xaa, 0x69, 0xe2, 0x75, 0x06, 0xdc, 0xc4, 0x2b, 0x14, 0x78,
- 0x18, 0x88, 0x6e, 0xb1, 0x52, 0x24, 0x9d, 0xf4, 0x4b, 0xc8, 0x8b, 0xd7, 0xbe, 0x88, 0xa0, 0x50,
- 0xf1, 0xa7, 0x79, 0x2b, 0xb9, 0x33, 0x69, 0x2f, 0xab, 0x30, 0x2e, 0xe3, 0xa3, 0x38, 0x13, 0x80,
- 0xa0, 0xc6, 0x1e, 0x5d, 0xd1, 0x58, 0x49, 0xbe, 0xb9, 0x9e, 0xce, 0x90, 0x64, 0x53, 0x15, 0xb3,
- 0xef, 0xf3, 0x52, 0xdc, 0x3f, 0x86, 0xf9, 0xa7, 0x86, 0x7b, 0x8e, 0x22, 0xb1, 0x57, 0xf9, 0x56,
- 0xac, 0xd9, 0x4c, 0xea, 0x12, 0x28, 0x77, 0x18, 0xca, 0x4d, 0xee, 0xca, 0x54, 0x94, 0x73, 0xc3,
- 0xa5, 0x41, 0x0d, 0xf5, 0x21, 0xcf, 0x3f, 0x1d, 0x8b, 0xda, 0x2f, 0xf4, 0xf9, 0x59, 0xd4, 0x7e,
- 0xe1, 0xaf, 0xcd, 0xae, 0x47, 0x19, 0x41, 0x51, 0x7e, 0xab, 0x85, 0x22, 0x0f, 0xf7, 0x91, 0xef,
- 0xba, 0x9a, 0x6b, 0x69, 0xdd, 0x02, 0xeb, 0x2e, 0xc3, 0xba, 0x8d, 0x1b, 0xb1, 0xb5, 0x12, 0x9c,
- 0x8f, 0xb4, 0x07, 0xef, 0x6b, 0xe8, 0x47, 0x00, 0xc1, 0xb3, 0x44, 0xec, 0x04, 0x46, 0x5f, 0x38,
- 0x62, 0x27, 0x30, 0xf6, 0xa2, 0x81, 0x37, 0x19, 0xee, 0x06, 0xbe, 0x1b, 0xc5, 0xf5, 0x1c, 0xc3,
- 0x72, 0x5f, 0x12, 0xe7, 0x3d, 0x5e, 0x65, 0x75, 0xcf, 0xcd, 0x11, 0x9d, 0xb2, 0x03, 0x25, 0xbf,
- 0xea, 0x1c, 0xf5, 0xb6, 0xd1, 0x6a, 0x78, 0xd4, 0xdb, 0xc6, 0xca, 0xd5, 0x61, 0xb7, 0x13, 0xda,
- 0x2d, 0x92, 0x95, 0x1e, 0xc0, 0x5f, 0xd4, 0x61, 0x9e, 0x66, 0xdd, 0x34, 0x39, 0x09, 0xea, 0x26,
- 0xd1, 0xd9, 0xc7, 0xaa, 0xa8, 0xd1, 0xd9, 0xc7, 0x4b, 0x2e, 0xe1, 0xe4, 0x84, 0x5e, 0xb2, 0x5a,
- 0xbc, 0x44, 0x41, 0x67, 0x6a, 0x43, 0x59, 0x29, 0xac, 0xa0, 0x04, 0x61, 0xe1, 0xf2, 0x6c, 0x34,
- 0xdc, 0x25, 0x54, 0x65, 0xf0, 0x1b, 0x0c, 0x6f, 0x85, 0x87, 0x3b, 0x86, 0xd7, 0xe7, 0x1c, 0x14,
- 0x50, 0xcc, 0x4e, 0x9c, 0xfb, 0x84, 0xd9, 0x85, 0xcf, 0xfe, 0x7a, 0x3a, 0x43, 0xea, 0xec, 0x82,
- 0x83, 0xff, 0x0a, 0x2a, 0x6a, 0x79, 0x05, 0x25, 0x28, 0x1f, 0x29, 0x29, 0x47, 0xe3, 0x48, 0x52,
- 0x75, 0x26, 0xec, 0xd9, 0x18, 0xa4, 0xa1, 0xb0, 0x51, 0xe0, 0x01, 0x14, 0x44, 0xbd, 0x25, 0xc9,
- 0xa4, 0xe1, 0xf2, 0x73, 0x92, 0x49, 0x23, 0xc5, 0x9a, 0x70, 0xf6, 0xcc, 0x10, 0xe9, 0x95, 0x52,
- 0xc6, 0x6a, 0x81, 0xf6, 0x84, 0x78, 0x69, 0x68, 0x41, 0x25, 0x33, 0x0d, 0x4d, 0xb9, 0xce, 0xa7,
- 0xa1, 0x9d, 0x11, 0x4f, 0xf8, 0x03, 0x79, 0x4d, 0x46, 0x29, 0xc2, 0xd4, 0xf8, 0x88, 0xa7, 0xb1,
- 0x24, 0x5d, 0x6e, 0x02, 0x40, 0x19, 0x1c, 0x2f, 0x01, 0x82, 0x6a, 0x50, 0x34, 0x63, 0x4d, 0xac,
- 0x82, 0x47, 0x33, 0xd6, 0xe4, 0x82, 0x52, 0xd8, 0xf7, 0x05, 0xb8, 0xfc, 0x6e, 0x45, 0x91, 0x7f,
- 0xa6, 0x01, 0x8a, 0x17, 0x8e, 0xd0, 0xc3, 0x64, 0xe9, 0x89, 0xb5, 0xf5, 0xe6, 0xbb, 0xaf, 0xc7,
- 0x9c, 0x14, 0xce, 0x02, 0x95, 0x7a, 0x8c, 0x7b, 0xf4, 0x8a, 0x2a, 0xf5, 0x97, 0x1a, 0x54, 0x43,
- 0x55, 0x27, 0x74, 0x3f, 0x65, 0x4d, 0x23, 0x25, 0xf7, 0xe6, 0xdb, 0xd7, 0xf2, 0x25, 0xa5, 0xf2,
- 0xca, 0x0e, 0x90, 0x77, 0x9a, 0x9f, 0x68, 0x50, 0x0b, 0x57, 0xa9, 0x50, 0x8a, 0xec, 0x58, 0xc9,
- 0xbe, 0xb9, 0x71, 0x3d, 0xe3, 0xf4, 0xe5, 0x09, 0xae, 0x33, 0x03, 0x28, 0x88, 0xba, 0x56, 0xd2,
- 0xc6, 0x0f, 0x17, 0xfb, 0x93, 0x36, 0x7e, 0xa4, 0x28, 0x96, 0xb0, 0xf1, 0x1d, 0x7b, 0x40, 0x94,
- 0x63, 0x26, 0x0a, 0x5f, 0x69, 0x68, 0xd3, 0x8f, 0x59, 0xa4, 0x6a, 0x96, 0x86, 0x16, 0x1c, 0x33,
- 0x59, 0xf1, 0x42, 0x29, 0xc2, 0xae, 0x39, 0x66, 0xd1, 0x82, 0x59, 0xc2, 0x31, 0x63, 0x80, 0xca,
- 0x31, 0x0b, 0x6a, 0x53, 0x49, 0xc7, 0x2c, 0xf6, 0x76, 0x91, 0x74, 0xcc, 0xe2, 0xe5, 0xad, 0x84,
- 0x75, 0x64, 0xb8, 0xa1, 0x63, 0xb6, 0x94, 0x50, 0xc6, 0x42, 0xef, 0xa6, 0x18, 0x31, 0xf1, 0x49,
- 0xa4, 0xf9, 0xde, 0x6b, 0x72, 0xa7, 0xee, 0x71, 0x6e, 0x7e, 0xb9, 0xc7, 0xff, 0x56, 0x83, 0xe5,
- 0xa4, 0x12, 0x18, 0x4a, 0xc1, 0x49, 0x79, 0x4a, 0x69, 0x6e, 0xbe, 0x2e, 0xfb, 0x74, 0x6b, 0xf9,
- 0xbb, 0xfe, 0x71, 0xfd, 0x5f, 0xbf, 0x5c, 0xd3, 0xfe, 0xe3, 0xcb, 0x35, 0xed, 0xbf, 0xbf, 0x5c,
- 0xd3, 0xfe, 0xee, 0x7f, 0xd6, 0xe6, 0x4e, 0xf3, 0xec, 0x3f, 0x1a, 0x7f, 0xfb, 0x37, 0x01, 0x00,
- 0x00, 0xff, 0xff, 0xee, 0x4f, 0x63, 0x90, 0xed, 0x3c, 0x00, 0x00,
+ // 4511 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x3c, 0x5d, 0x6f, 0x1c, 0x47,
+ 0x72, 0x9c, 0x5d, 0x92, 0xcb, 0xad, 0x5d, 0x2e, 0x97, 0x2d, 0x4a, 0x5a, 0xad, 0x25, 0x8a, 0x1e,
+ 0x59, 0xb6, 0x2c, 0x5b, 0x5c, 0x89, 0x94, 0xec, 0x44, 0x81, 0x9d, 0x5b, 0x91, 0x6b, 0x89, 0x11,
+ 0x45, 0xd2, 0xc3, 0x95, 0x7c, 0x56, 0x80, 0x63, 0x86, 0xbb, 0xad, 0xe5, 0x1c, 0x77, 0x67, 0xf6,
+ 0x66, 0x86, 0x2b, 0xd2, 0x79, 0x38, 0xe7, 0x92, 0xcb, 0xe1, 0x12, 0xe0, 0x80, 0x38, 0x40, 0x70,
+ 0x08, 0x92, 0x97, 0x20, 0x40, 0xf2, 0x90, 0x04, 0xc9, 0x43, 0x1e, 0x82, 0x04, 0xc8, 0x43, 0xf2,
+ 0x90, 0x3c, 0x04, 0x08, 0x90, 0x87, 0xbc, 0x26, 0xce, 0x3d, 0xe5, 0x57, 0x1c, 0xfa, 0x6b, 0xba,
+ 0x67, 0xa6, 0x87, 0x94, 0x8f, 0x34, 0xfc, 0x62, 0xed, 0x74, 0x55, 0x57, 0x55, 0x57, 0x75, 0x55,
+ 0x75, 0x57, 0xb5, 0x09, 0x45, 0x7f, 0xd8, 0x59, 0x1c, 0xfa, 0x5e, 0xe8, 0xa1, 0x32, 0x0e, 0x3b,
+ 0xdd, 0x00, 0xfb, 0x23, 0xec, 0x0f, 0x77, 0xeb, 0x73, 0x3d, 0xaf, 0xe7, 0x51, 0x40, 0x83, 0xfc,
+ 0x62, 0x38, 0xf5, 0x1a, 0xc1, 0x69, 0xd8, 0x43, 0xa7, 0x31, 0x18, 0x75, 0x3a, 0xc3, 0xdd, 0xc6,
+ 0xfe, 0x88, 0x43, 0xea, 0x11, 0xc4, 0x3e, 0x08, 0xf7, 0x86, 0xbb, 0xf4, 0x1f, 0x0e, 0x5b, 0x88,
+ 0x60, 0x23, 0xec, 0x07, 0x8e, 0xe7, 0x0e, 0x77, 0xc5, 0x2f, 0x8e, 0x71, 0xb9, 0xe7, 0x79, 0xbd,
+ 0x3e, 0x66, 0xf3, 0x5d, 0xd7, 0x0b, 0xed, 0xd0, 0xf1, 0xdc, 0x80, 0x43, 0xd9, 0x3f, 0x9d, 0x5b,
+ 0x3d, 0xec, 0xde, 0xf2, 0x86, 0xd8, 0xb5, 0x87, 0xce, 0x68, 0xa9, 0xe1, 0x0d, 0x29, 0x4e, 0x1a,
+ 0xdf, 0xfc, 0x89, 0x01, 0x15, 0x0b, 0x07, 0x43, 0xcf, 0x0d, 0xf0, 0x23, 0x6c, 0x77, 0xb1, 0x8f,
+ 0xae, 0x00, 0x74, 0xfa, 0x07, 0x41, 0x88, 0xfd, 0x1d, 0xa7, 0x5b, 0x33, 0x16, 0x8c, 0x1b, 0xe3,
+ 0x56, 0x91, 0x8f, 0xac, 0x75, 0xd1, 0x6b, 0x50, 0x1c, 0xe0, 0xc1, 0x2e, 0x83, 0xe6, 0x28, 0x74,
+ 0x8a, 0x0d, 0xac, 0x75, 0x51, 0x1d, 0xa6, 0x7c, 0x3c, 0x72, 0x88, 0xb8, 0xb5, 0xfc, 0x82, 0x71,
+ 0x23, 0x6f, 0x45, 0xdf, 0x64, 0xa2, 0x6f, 0xbf, 0x08, 0x77, 0x42, 0xec, 0x0f, 0x6a, 0xe3, 0x6c,
+ 0x22, 0x19, 0x68, 0x63, 0x7f, 0x70, 0xbf, 0xf0, 0x83, 0xbf, 0xaf, 0xe5, 0x97, 0x17, 0x6f, 0x9b,
+ 0xff, 0x32, 0x01, 0x65, 0xcb, 0x76, 0x7b, 0xd8, 0xc2, 0xdf, 0x3b, 0xc0, 0x41, 0x88, 0xaa, 0x90,
+ 0xdf, 0xc7, 0x47, 0x54, 0x8e, 0xb2, 0x45, 0x7e, 0x32, 0x42, 0x6e, 0x0f, 0xef, 0x60, 0x97, 0x49,
+ 0x50, 0x26, 0x84, 0xdc, 0x1e, 0x6e, 0xb9, 0x5d, 0x34, 0x07, 0x13, 0x7d, 0x67, 0xe0, 0x84, 0x9c,
+ 0x3d, 0xfb, 0x88, 0xc9, 0x35, 0x9e, 0x90, 0x6b, 0x05, 0x20, 0xf0, 0xfc, 0x70, 0xc7, 0xf3, 0xbb,
+ 0xd8, 0xaf, 0x4d, 0x2c, 0x18, 0x37, 0x2a, 0x4b, 0x6f, 0x2c, 0xaa, 0x16, 0x5e, 0x54, 0x05, 0x5a,
+ 0xdc, 0xf6, 0xfc, 0x70, 0x93, 0xe0, 0x5a, 0xc5, 0x40, 0xfc, 0x44, 0x1f, 0x41, 0x89, 0x12, 0x09,
+ 0x6d, 0xbf, 0x87, 0xc3, 0xda, 0x24, 0xa5, 0x72, 0xfd, 0x04, 0x2a, 0x6d, 0x8a, 0x6c, 0x51, 0xf6,
+ 0xec, 0x37, 0x32, 0xa1, 0x1c, 0x60, 0xdf, 0xb1, 0xfb, 0xce, 0x67, 0xf6, 0x6e, 0x1f, 0xd7, 0x0a,
+ 0x0b, 0xc6, 0x8d, 0x29, 0x2b, 0x36, 0x46, 0xd6, 0xbf, 0x8f, 0x8f, 0x82, 0x1d, 0xcf, 0xed, 0x1f,
+ 0xd5, 0xa6, 0x28, 0xc2, 0x14, 0x19, 0xd8, 0x74, 0xfb, 0x47, 0xd4, 0x7a, 0xde, 0x81, 0x1b, 0x32,
+ 0x68, 0x91, 0x42, 0x8b, 0x74, 0x84, 0x82, 0xef, 0x40, 0x75, 0xe0, 0xb8, 0x3b, 0x03, 0xaf, 0xbb,
+ 0x13, 0x29, 0x04, 0x88, 0x42, 0x1e, 0x14, 0x7e, 0x8f, 0x5a, 0xe0, 0x8e, 0x55, 0x19, 0x38, 0xee,
+ 0x13, 0xaf, 0x6b, 0x09, 0xfd, 0x90, 0x29, 0xf6, 0x61, 0x7c, 0x4a, 0x29, 0x39, 0xc5, 0x3e, 0x54,
+ 0xa7, 0xbc, 0x0f, 0xe7, 0x08, 0x97, 0x8e, 0x8f, 0xed, 0x10, 0xcb, 0x59, 0xe5, 0xf8, 0xac, 0xd9,
+ 0x81, 0xe3, 0xae, 0x50, 0x94, 0xd8, 0x44, 0xfb, 0x30, 0x35, 0x71, 0x3a, 0x39, 0xd1, 0x3e, 0x8c,
+ 0x4f, 0x34, 0xdf, 0x87, 0x62, 0x64, 0x17, 0x34, 0x05, 0xe3, 0x1b, 0x9b, 0x1b, 0xad, 0xea, 0x18,
+ 0x02, 0x98, 0x6c, 0x6e, 0xaf, 0xb4, 0x36, 0x56, 0xab, 0x06, 0x2a, 0x41, 0x61, 0xb5, 0xc5, 0x3e,
+ 0x72, 0xf5, 0xc2, 0x17, 0x7c, 0xbf, 0x3d, 0x06, 0x90, 0xa6, 0x40, 0x05, 0xc8, 0x3f, 0x6e, 0x7d,
+ 0x5a, 0x1d, 0x23, 0xc8, 0xcf, 0x5a, 0xd6, 0xf6, 0xda, 0xe6, 0x46, 0xd5, 0x20, 0x54, 0x56, 0xac,
+ 0x56, 0xb3, 0xdd, 0xaa, 0xe6, 0x08, 0xc6, 0x93, 0xcd, 0xd5, 0x6a, 0x1e, 0x15, 0x61, 0xe2, 0x59,
+ 0x73, 0xfd, 0x69, 0xab, 0x3a, 0x1e, 0x11, 0x93, 0xbb, 0xf8, 0x4f, 0x0c, 0x98, 0xe6, 0xe6, 0x66,
+ 0xbe, 0x85, 0xee, 0xc2, 0xe4, 0x1e, 0xf5, 0x2f, 0xba, 0x93, 0x4b, 0x4b, 0x97, 0x13, 0x7b, 0x23,
+ 0xe6, 0x83, 0x16, 0xc7, 0x45, 0x26, 0xe4, 0xf7, 0x47, 0x41, 0x2d, 0xb7, 0x90, 0xbf, 0x51, 0x5a,
+ 0xaa, 0x2e, 0xb2, 0x48, 0xb2, 0xf8, 0x18, 0x1f, 0x3d, 0xb3, 0xfb, 0x07, 0xd8, 0x22, 0x40, 0x84,
+ 0x60, 0x7c, 0xe0, 0xf9, 0x98, 0x6e, 0xf8, 0x29, 0x8b, 0xfe, 0x26, 0x5e, 0x40, 0x6d, 0xce, 0x37,
+ 0x3b, 0xfb, 0x90, 0xe2, 0xfd, 0x87, 0x01, 0xb0, 0x75, 0x10, 0x66, 0xbb, 0xd8, 0x1c, 0x4c, 0x8c,
+ 0x08, 0x07, 0xee, 0x5e, 0xec, 0x83, 0xfa, 0x16, 0xb6, 0x03, 0x1c, 0xf9, 0x16, 0xf9, 0x40, 0x0b,
+ 0x50, 0x18, 0xfa, 0x78, 0xb4, 0xb3, 0x3f, 0xa2, 0xdc, 0xa6, 0xa4, 0x9d, 0x26, 0xc9, 0xf8, 0xe3,
+ 0x11, 0xba, 0x09, 0x65, 0xa7, 0xe7, 0x7a, 0x3e, 0xde, 0x61, 0x44, 0x27, 0x54, 0xb4, 0x25, 0xab,
+ 0xc4, 0x80, 0x74, 0x49, 0x0a, 0x2e, 0x63, 0x35, 0xa9, 0xc5, 0x5d, 0x27, 0x30, 0xb9, 0x9e, 0xcf,
+ 0x0d, 0x28, 0xd1, 0xf5, 0x9c, 0x4a, 0xd9, 0x4b, 0x72, 0x21, 0x39, 0x3a, 0x2d, 0xa5, 0xf0, 0xd4,
+ 0xd2, 0xa4, 0x08, 0x2e, 0xa0, 0x55, 0xdc, 0xc7, 0x21, 0x3e, 0x4d, 0xf0, 0x52, 0x54, 0x99, 0xd7,
+ 0xaa, 0x52, 0xf2, 0xfb, 0x73, 0x03, 0xce, 0xc5, 0x18, 0x9e, 0x6a, 0xe9, 0x35, 0x28, 0x74, 0x29,
+ 0x31, 0x26, 0x53, 0xde, 0x12, 0x9f, 0xe8, 0x2e, 0x4c, 0x71, 0x91, 0x82, 0x5a, 0x5e, 0xbf, 0x0d,
+ 0xa5, 0x94, 0x05, 0x26, 0x65, 0x20, 0xc5, 0xfc, 0xc7, 0x1c, 0x14, 0xb9, 0x32, 0x36, 0x87, 0xa8,
+ 0x09, 0xd3, 0x3e, 0xfb, 0xd8, 0xa1, 0x6b, 0xe6, 0x32, 0xd6, 0xb3, 0xe3, 0xe4, 0xa3, 0x31, 0xab,
+ 0xcc, 0xa7, 0xd0, 0x61, 0xf4, 0x2b, 0x50, 0x12, 0x24, 0x86, 0x07, 0x21, 0x37, 0x54, 0x2d, 0x4e,
+ 0x40, 0x6e, 0xed, 0x47, 0x63, 0x16, 0x70, 0xf4, 0xad, 0x83, 0x10, 0xb5, 0x61, 0x4e, 0x4c, 0x66,
+ 0xeb, 0xe3, 0x62, 0xe4, 0x29, 0x95, 0x85, 0x38, 0x95, 0xb4, 0x39, 0x1f, 0x8d, 0x59, 0x88, 0xcf,
+ 0x57, 0x80, 0x68, 0x55, 0x8a, 0x14, 0x1e, 0xb2, 0xfc, 0x92, 0x12, 0xa9, 0x7d, 0xe8, 0x72, 0x22,
+ 0x42, 0x5b, 0xcb, 0x8a, 0x6c, 0xed, 0x43, 0x37, 0x52, 0xd9, 0x83, 0x22, 0x14, 0xf8, 0xb0, 0xf9,
+ 0xef, 0x39, 0x00, 0x61, 0xb1, 0xcd, 0x21, 0x5a, 0x85, 0x8a, 0xcf, 0xbf, 0x62, 0xfa, 0x7b, 0x4d,
+ 0xab, 0x3f, 0x6e, 0xe8, 0x31, 0x6b, 0x5a, 0x4c, 0x62, 0xe2, 0x7e, 0x08, 0xe5, 0x88, 0x8a, 0x54,
+ 0xe1, 0x25, 0x8d, 0x0a, 0x23, 0x0a, 0x25, 0x31, 0x81, 0x28, 0xf1, 0x13, 0x38, 0x1f, 0xcd, 0xd7,
+ 0x68, 0xf1, 0xf5, 0x63, 0xb4, 0x18, 0x11, 0x3c, 0x27, 0x28, 0xa8, 0x7a, 0x7c, 0xa8, 0x08, 0x26,
+ 0x15, 0x79, 0x49, 0xa3, 0x48, 0x86, 0xa4, 0x6a, 0x32, 0x92, 0x30, 0xa6, 0x4a, 0x20, 0x69, 0x9f,
+ 0x8d, 0x9b, 0x7f, 0x39, 0x0e, 0x85, 0x15, 0x6f, 0x30, 0xb4, 0x7d, 0xb2, 0x89, 0x26, 0x7d, 0x1c,
+ 0x1c, 0xf4, 0x43, 0xaa, 0xc0, 0xca, 0xd2, 0xb5, 0x38, 0x0f, 0x8e, 0x26, 0xfe, 0xb5, 0x28, 0xaa,
+ 0xc5, 0xa7, 0x90, 0xc9, 0x3c, 0xcb, 0xe7, 0x5e, 0x61, 0x32, 0xcf, 0xf1, 0x7c, 0x8a, 0x08, 0x08,
+ 0x79, 0x19, 0x10, 0xea, 0x50, 0xe0, 0x07, 0x3c, 0x16, 0xac, 0x1f, 0x8d, 0x59, 0x62, 0x00, 0xbd,
+ 0x0d, 0x33, 0xc9, 0x54, 0x38, 0xc1, 0x71, 0x2a, 0x9d, 0x78, 0xe6, 0xbc, 0x06, 0xe5, 0x58, 0x86,
+ 0x9e, 0xe4, 0x78, 0xa5, 0x81, 0x92, 0x97, 0x2f, 0x88, 0xb0, 0x4e, 0x8e, 0x15, 0xe5, 0x47, 0x63,
+ 0x22, 0xb0, 0x5f, 0x15, 0x81, 0x7d, 0x4a, 0x4d, 0xb4, 0x44, 0xaf, 0x3c, 0xc6, 0xbf, 0xa1, 0x46,
+ 0xad, 0x6f, 0x91, 0xc9, 0x11, 0x92, 0x0c, 0x5f, 0xa6, 0x05, 0xd3, 0x31, 0x95, 0x91, 0x1c, 0xd9,
+ 0xfa, 0xf8, 0x69, 0x73, 0x9d, 0x25, 0xd4, 0x87, 0x34, 0x87, 0x5a, 0x55, 0x83, 0x24, 0xe8, 0xf5,
+ 0xd6, 0xf6, 0x76, 0x35, 0x87, 0x2e, 0x40, 0x71, 0x63, 0xb3, 0xbd, 0xc3, 0xb0, 0xf2, 0xf5, 0xc2,
+ 0x1f, 0xb3, 0x48, 0x22, 0xf3, 0xf3, 0xa7, 0x11, 0x4d, 0x9e, 0xa2, 0x95, 0xcc, 0x3c, 0xa6, 0x64,
+ 0x66, 0x43, 0x64, 0xe6, 0x9c, 0xcc, 0xcc, 0x79, 0x84, 0x60, 0x62, 0xbd, 0xd5, 0xdc, 0xa6, 0x49,
+ 0x9a, 0x91, 0x5e, 0x4e, 0x67, 0xeb, 0x07, 0x15, 0x28, 0x33, 0xf3, 0xec, 0x1c, 0xb8, 0xe4, 0x30,
+ 0xf1, 0x57, 0x06, 0x80, 0x74, 0x58, 0xd4, 0x80, 0x42, 0x87, 0x89, 0x50, 0x33, 0x68, 0x04, 0x3c,
+ 0xaf, 0xb5, 0xb8, 0x25, 0xb0, 0xd0, 0x1d, 0x28, 0x04, 0x07, 0x9d, 0x0e, 0x0e, 0x44, 0xe6, 0xbe,
+ 0x98, 0x0c, 0xc2, 0x3c, 0x20, 0x5a, 0x02, 0x8f, 0x4c, 0x79, 0x61, 0x3b, 0xfd, 0x03, 0x9a, 0xc7,
+ 0x8f, 0x9f, 0xc2, 0xf1, 0x64, 0x8c, 0xfd, 0x33, 0x03, 0x4a, 0x8a, 0x5b, 0xfc, 0x82, 0x29, 0xe0,
+ 0x32, 0x14, 0xa9, 0x30, 0xb8, 0xcb, 0x93, 0xc0, 0x94, 0x25, 0x07, 0xd0, 0x7b, 0x50, 0x14, 0x9e,
+ 0x24, 0xf2, 0x40, 0x4d, 0x4f, 0x76, 0x73, 0x68, 0x49, 0x54, 0x29, 0x64, 0x1b, 0x66, 0xa9, 0x9e,
+ 0x3a, 0xe4, 0xf6, 0x21, 0x34, 0xab, 0x1e, 0xcb, 0x8d, 0xc4, 0xb1, 0xbc, 0x0e, 0x53, 0xc3, 0xbd,
+ 0xa3, 0xc0, 0xe9, 0xd8, 0x7d, 0x2e, 0x4e, 0xf4, 0x2d, 0xa9, 0x6e, 0x03, 0x52, 0xa9, 0x9e, 0x46,
+ 0x01, 0x92, 0xe8, 0x05, 0x28, 0x3d, 0xb2, 0x83, 0x3d, 0x2e, 0xa4, 0x1c, 0xbf, 0x0b, 0xd3, 0x64,
+ 0xfc, 0xf1, 0xb3, 0x57, 0x10, 0x5f, 0xcc, 0x5a, 0x36, 0xff, 0xc9, 0x80, 0x8a, 0x98, 0x76, 0x2a,
+ 0x03, 0x21, 0x18, 0xdf, 0xb3, 0x83, 0x3d, 0xaa, 0x8c, 0x69, 0x8b, 0xfe, 0x46, 0x6f, 0x43, 0xb5,
+ 0xc3, 0xd6, 0xbf, 0x93, 0xb8, 0x77, 0xcd, 0xf0, 0xf1, 0xc8, 0xf7, 0xdf, 0x85, 0x69, 0x32, 0x65,
+ 0x27, 0x7e, 0x0f, 0x12, 0x6e, 0xfc, 0x9e, 0x55, 0xde, 0xa3, 0x6b, 0x4e, 0x8a, 0x6f, 0x43, 0x99,
+ 0x29, 0xe3, 0xac, 0x65, 0x97, 0x7a, 0xad, 0xc3, 0xcc, 0xb6, 0x6b, 0x0f, 0x83, 0x3d, 0x2f, 0x4c,
+ 0xe8, 0x7c, 0xd9, 0xfc, 0x3b, 0x03, 0xaa, 0x12, 0x78, 0x2a, 0x19, 0xde, 0x82, 0x19, 0x1f, 0x0f,
+ 0x6c, 0xc7, 0x75, 0xdc, 0xde, 0xce, 0xee, 0x51, 0x88, 0x03, 0x7e, 0x7d, 0xad, 0x44, 0xc3, 0x0f,
+ 0xc8, 0x28, 0x11, 0x76, 0xb7, 0xef, 0xed, 0xf2, 0x20, 0x4d, 0x7f, 0xa3, 0xd7, 0xe3, 0x51, 0xba,
+ 0x28, 0xf5, 0x26, 0xc6, 0xa5, 0xcc, 0x3f, 0xcd, 0x41, 0xf9, 0x13, 0x3b, 0xec, 0x88, 0x1d, 0x84,
+ 0xd6, 0xa0, 0x12, 0x85, 0x71, 0x3a, 0xc2, 0xe5, 0x4e, 0x1c, 0x38, 0xe8, 0x1c, 0x71, 0xaf, 0x11,
+ 0x07, 0x8e, 0xe9, 0x8e, 0x3a, 0x40, 0x49, 0xd9, 0x6e, 0x07, 0xf7, 0x23, 0x52, 0xb9, 0x6c, 0x52,
+ 0x14, 0x51, 0x25, 0xa5, 0x0e, 0xa0, 0x6f, 0x43, 0x75, 0xe8, 0x7b, 0x3d, 0x1f, 0x07, 0x41, 0x44,
+ 0x8c, 0xa5, 0x70, 0x53, 0x43, 0x6c, 0x8b, 0xa3, 0x26, 0x4e, 0x31, 0x77, 0x1f, 0x8d, 0x59, 0x33,
+ 0xc3, 0x38, 0x4c, 0x06, 0xd6, 0x19, 0x79, 0xde, 0x63, 0x91, 0xf5, 0x47, 0x79, 0x40, 0xe9, 0x65,
+ 0x7e, 0xd5, 0x63, 0xf2, 0x75, 0xa8, 0x04, 0xa1, 0xed, 0xa7, 0xf6, 0xfc, 0x34, 0x1d, 0x8d, 0x76,
+ 0xfc, 0x5b, 0x10, 0x49, 0xb6, 0xe3, 0x7a, 0xa1, 0xf3, 0xe2, 0x88, 0x5d, 0x50, 0xac, 0x8a, 0x18,
+ 0xde, 0xa0, 0xa3, 0x68, 0x03, 0x0a, 0x2f, 0x9c, 0x7e, 0x88, 0xfd, 0xa0, 0x36, 0xb1, 0x90, 0xbf,
+ 0x51, 0x59, 0x7a, 0xe7, 0x24, 0xc3, 0x2c, 0x7e, 0x44, 0xf1, 0xdb, 0x47, 0x43, 0xf5, 0xf4, 0xcb,
+ 0x89, 0xa8, 0xc7, 0xf8, 0x49, 0xfd, 0x8d, 0xc8, 0x84, 0xa9, 0x97, 0x84, 0xe8, 0x8e, 0xd3, 0xa5,
+ 0xb9, 0x38, 0xf2, 0xc3, 0xbb, 0x56, 0x81, 0x02, 0xd6, 0xba, 0xe8, 0x1a, 0x4c, 0xbd, 0xf0, 0xed,
+ 0xde, 0x00, 0xbb, 0x21, 0xbb, 0xe5, 0x4b, 0x9c, 0x08, 0x60, 0x2e, 0x02, 0x48, 0x51, 0x48, 0xe6,
+ 0xdb, 0xd8, 0xdc, 0x7a, 0xda, 0xae, 0x8e, 0xa1, 0x32, 0x4c, 0x6d, 0x6c, 0xae, 0xb6, 0xd6, 0x5b,
+ 0x24, 0x37, 0x8a, 0x9c, 0x77, 0x47, 0x3a, 0x5d, 0x53, 0x18, 0x22, 0xb6, 0x27, 0x54, 0xb9, 0x8c,
+ 0xf8, 0xa5, 0x5b, 0xc8, 0x25, 0x48, 0xdc, 0x31, 0xaf, 0xc2, 0x9c, 0x6e, 0x6b, 0x08, 0x84, 0xbb,
+ 0xe6, 0xbf, 0xe6, 0x60, 0x9a, 0x3b, 0xc2, 0xa9, 0x3c, 0xf7, 0x92, 0x22, 0x15, 0xbf, 0x9e, 0x08,
+ 0x25, 0xd5, 0xa0, 0xc0, 0x1c, 0xa4, 0xcb, 0xef, 0xbf, 0xe2, 0x93, 0x04, 0x67, 0xb6, 0xdf, 0x71,
+ 0x97, 0x9b, 0x3d, 0xfa, 0xd6, 0x86, 0xcd, 0x89, 0xcc, 0xb0, 0x19, 0x39, 0x9c, 0x1d, 0xf0, 0x83,
+ 0x55, 0x51, 0x9a, 0xa2, 0x2c, 0x9c, 0x8a, 0x00, 0x63, 0x36, 0x2b, 0x64, 0xd8, 0x0c, 0x5d, 0x87,
+ 0x49, 0x3c, 0xc2, 0x6e, 0x18, 0xd4, 0x4a, 0x34, 0x91, 0x4e, 0x8b, 0x0b, 0x55, 0x8b, 0x8c, 0x5a,
+ 0x1c, 0x28, 0x4d, 0xf5, 0x21, 0xcc, 0xd2, 0xfb, 0xee, 0x43, 0xdf, 0x76, 0xd5, 0x3b, 0x7b, 0xbb,
+ 0xbd, 0xce, 0xd3, 0x0e, 0xf9, 0x89, 0x2a, 0x90, 0x5b, 0x5b, 0xe5, 0xfa, 0xc9, 0xad, 0xad, 0xca,
+ 0xf9, 0xbf, 0x6f, 0x00, 0x52, 0x09, 0x9c, 0xca, 0x16, 0x09, 0x2e, 0x42, 0x8e, 0xbc, 0x94, 0x63,
+ 0x0e, 0x26, 0xb0, 0xef, 0x7b, 0x3e, 0x0b, 0x94, 0x16, 0xfb, 0x90, 0xd2, 0xdc, 0xe2, 0xc2, 0x58,
+ 0x78, 0xe4, 0xed, 0x47, 0x11, 0x80, 0x91, 0x35, 0xd2, 0xc2, 0xb7, 0xe1, 0x5c, 0x0c, 0xfd, 0x6c,
+ 0x52, 0xfc, 0x26, 0xcc, 0x50, 0xaa, 0x2b, 0x7b, 0xb8, 0xb3, 0x3f, 0xf4, 0x1c, 0x37, 0x25, 0x01,
+ 0xba, 0x46, 0x62, 0x97, 0x48, 0x17, 0x64, 0x89, 0x6c, 0xcd, 0xe5, 0x68, 0xb0, 0xdd, 0x5e, 0x97,
+ 0x5b, 0x7d, 0x17, 0x2e, 0x24, 0x08, 0x8a, 0x95, 0xfd, 0x2a, 0x94, 0x3a, 0xd1, 0x60, 0xc0, 0x4f,
+ 0x90, 0x57, 0xe2, 0xe2, 0x26, 0xa7, 0xaa, 0x33, 0x24, 0x8f, 0x6f, 0xc3, 0xc5, 0x14, 0x8f, 0xb3,
+ 0x50, 0xc7, 0x5d, 0xf3, 0x36, 0x9c, 0xa7, 0x94, 0x1f, 0x63, 0x3c, 0x6c, 0xf6, 0x9d, 0xd1, 0xc9,
+ 0x66, 0x39, 0xe2, 0xeb, 0x55, 0x66, 0x7c, 0xbd, 0xdb, 0x4a, 0xb2, 0x6e, 0x71, 0xd6, 0x6d, 0x67,
+ 0x80, 0xdb, 0xde, 0x7a, 0xb6, 0xb4, 0x24, 0x91, 0xef, 0xe3, 0xa3, 0x80, 0x1f, 0x1f, 0xe9, 0x6f,
+ 0x19, 0xbd, 0xfe, 0xc6, 0xe0, 0xea, 0x54, 0xe9, 0x7c, 0xcd, 0xae, 0x31, 0x0f, 0xd0, 0x23, 0x3e,
+ 0x88, 0xbb, 0x04, 0xc0, 0x6a, 0x73, 0xca, 0x48, 0x24, 0x30, 0xc9, 0x42, 0xe5, 0xa4, 0xc0, 0x57,
+ 0xb8, 0xe3, 0xd0, 0xff, 0x04, 0xa9, 0x93, 0xd2, 0x9b, 0x50, 0xa2, 0x90, 0xed, 0xd0, 0x0e, 0x0f,
+ 0x82, 0x2c, 0xcb, 0x2d, 0x9b, 0x3f, 0x32, 0xb8, 0x47, 0x09, 0x3a, 0xa7, 0x5a, 0xf3, 0x1d, 0x98,
+ 0xa4, 0x37, 0x44, 0x71, 0xd3, 0xb9, 0xa4, 0xd9, 0xd8, 0x4c, 0x22, 0x8b, 0x23, 0x2a, 0xe7, 0x24,
+ 0x03, 0x26, 0x9f, 0xd0, 0xce, 0x81, 0x22, 0xed, 0xb8, 0xb0, 0x9c, 0x6b, 0x0f, 0x58, 0xf9, 0xb1,
+ 0x68, 0xd1, 0xdf, 0xf4, 0x42, 0x80, 0xb1, 0xff, 0xd4, 0x5a, 0x67, 0x37, 0x90, 0xa2, 0x15, 0x7d,
+ 0x13, 0xc5, 0x76, 0xfa, 0x0e, 0x76, 0x43, 0x0a, 0x1d, 0xa7, 0x50, 0x65, 0x04, 0x5d, 0x87, 0xa2,
+ 0x13, 0xac, 0x63, 0xdb, 0x77, 0x79, 0x89, 0x5f, 0x09, 0xcc, 0x12, 0x22, 0xf7, 0xd8, 0x77, 0xa0,
+ 0xca, 0x24, 0x6b, 0x76, 0xbb, 0xca, 0x69, 0x3f, 0xe2, 0x6f, 0x24, 0xf8, 0xc7, 0xe8, 0xe7, 0x4e,
+ 0xa6, 0xff, 0xb7, 0x06, 0xcc, 0x2a, 0x0c, 0x4e, 0x65, 0x82, 0x77, 0x61, 0x92, 0xf5, 0x5f, 0xf8,
+ 0x51, 0x70, 0x2e, 0x3e, 0x8b, 0xb1, 0xb1, 0x38, 0x0e, 0x5a, 0x84, 0x02, 0xfb, 0x25, 0xae, 0x71,
+ 0x7a, 0x74, 0x81, 0x24, 0x45, 0x5e, 0x84, 0x73, 0x1c, 0x86, 0x07, 0x9e, 0xce, 0xe7, 0xc6, 0xe3,
+ 0x11, 0xe2, 0x87, 0x06, 0xcc, 0xc5, 0x27, 0x9c, 0x6a, 0x95, 0x8a, 0xdc, 0xb9, 0xaf, 0x24, 0xf7,
+ 0xaf, 0x09, 0xb9, 0x9f, 0x0e, 0xbb, 0xca, 0x91, 0x33, 0xb9, 0xe3, 0x54, 0xeb, 0xe6, 0xe2, 0xd6,
+ 0x95, 0xb4, 0x7e, 0x12, 0xad, 0x49, 0x10, 0x3b, 0xd5, 0x9a, 0xde, 0x7f, 0xa5, 0x35, 0x29, 0x47,
+ 0xb0, 0xd4, 0xe2, 0xd6, 0xc4, 0x36, 0x5a, 0x77, 0x82, 0x28, 0xe3, 0xbc, 0x03, 0xe5, 0xbe, 0xe3,
+ 0x62, 0xdb, 0xe7, 0x3d, 0x24, 0x43, 0xdd, 0x8f, 0xf7, 0xac, 0x18, 0x50, 0x92, 0xfa, 0x6d, 0x03,
+ 0x90, 0x4a, 0xeb, 0x9b, 0xb1, 0x56, 0x43, 0x28, 0x78, 0xcb, 0xf7, 0x06, 0x5e, 0x78, 0xd2, 0x36,
+ 0xbb, 0x6b, 0xfe, 0xae, 0x01, 0xe7, 0x13, 0x33, 0xbe, 0x09, 0xc9, 0xef, 0x9a, 0x97, 0x61, 0x76,
+ 0x15, 0x8b, 0x33, 0x5e, 0xaa, 0x76, 0xb0, 0x0d, 0x48, 0x85, 0x9e, 0xcd, 0x29, 0xe6, 0x97, 0x60,
+ 0xf6, 0x89, 0x37, 0x22, 0x81, 0x9c, 0x80, 0x65, 0x98, 0x62, 0xc5, 0xac, 0x48, 0x5f, 0xd1, 0xb7,
+ 0x0c, 0xbd, 0xdb, 0x80, 0xd4, 0x99, 0x67, 0x21, 0xce, 0xb2, 0xf9, 0xbf, 0x06, 0x94, 0x9b, 0x7d,
+ 0xdb, 0x1f, 0x08, 0x51, 0x3e, 0x84, 0x49, 0x56, 0x99, 0xe1, 0x65, 0xd6, 0x37, 0xe3, 0xf4, 0x54,
+ 0x5c, 0xf6, 0xd1, 0x64, 0x75, 0x1c, 0x3e, 0x8b, 0x2c, 0x85, 0x77, 0x96, 0x57, 0x13, 0x9d, 0xe6,
+ 0x55, 0x74, 0x0b, 0x26, 0x6c, 0x32, 0x85, 0xa6, 0xd7, 0x4a, 0xb2, 0x5c, 0x46, 0xa9, 0x91, 0x2b,
+ 0x91, 0xc5, 0xb0, 0xcc, 0x0f, 0xa0, 0xa4, 0x70, 0x40, 0x05, 0xc8, 0x3f, 0x6c, 0xf1, 0x6b, 0x52,
+ 0x73, 0xa5, 0xbd, 0xf6, 0x8c, 0x95, 0x10, 0x2b, 0x00, 0xab, 0xad, 0xe8, 0x3b, 0xa7, 0x69, 0xec,
+ 0xd9, 0x9c, 0x0e, 0xcf, 0x5b, 0xaa, 0x84, 0x46, 0x96, 0x84, 0xb9, 0x57, 0x91, 0x50, 0xb2, 0xf8,
+ 0x2d, 0x03, 0xa6, 0xb9, 0x6a, 0x4e, 0x9b, 0x9a, 0x29, 0xe5, 0x8c, 0xd4, 0xac, 0x2c, 0xc3, 0xe2,
+ 0x88, 0x52, 0x86, 0x7f, 0x36, 0xa0, 0xba, 0xea, 0xbd, 0x74, 0x7b, 0xbe, 0xdd, 0x8d, 0x7c, 0xf0,
+ 0xa3, 0x84, 0x39, 0x17, 0x13, 0x95, 0xfe, 0x04, 0xbe, 0x1c, 0x48, 0x98, 0xb5, 0x26, 0x6b, 0x29,
+ 0x2c, 0xbf, 0x8b, 0x4f, 0xf3, 0x5b, 0x30, 0x93, 0x98, 0x44, 0x0c, 0xf4, 0xac, 0xb9, 0xbe, 0xb6,
+ 0x4a, 0x0c, 0x42, 0xeb, 0xbd, 0xad, 0x8d, 0xe6, 0x83, 0xf5, 0x16, 0xef, 0xca, 0x36, 0x37, 0x56,
+ 0x5a, 0xeb, 0xd2, 0x50, 0xf7, 0xc4, 0x0a, 0xee, 0x99, 0x7d, 0x98, 0x55, 0x04, 0x3a, 0x6d, 0x73,
+ 0x4c, 0x2f, 0xaf, 0xe4, 0x56, 0x83, 0x69, 0x7e, 0xca, 0x49, 0x3a, 0xfe, 0x7f, 0xe7, 0xa1, 0x22,
+ 0x40, 0x5f, 0x8f, 0x14, 0xe8, 0x02, 0x4c, 0x76, 0x77, 0xb7, 0x9d, 0xcf, 0x44, 0x5f, 0x96, 0x7f,
+ 0x91, 0xf1, 0x3e, 0xe3, 0xc3, 0x5e, 0x5b, 0xf0, 0x2f, 0x74, 0x99, 0x3d, 0xc4, 0x58, 0x73, 0xbb,
+ 0xf8, 0x90, 0x1e, 0x86, 0xc6, 0x2d, 0x39, 0x40, 0x8b, 0x9a, 0xfc, 0x55, 0x06, 0xbd, 0xeb, 0x2a,
+ 0xaf, 0x34, 0xd0, 0x32, 0x54, 0xc9, 0xef, 0xe6, 0x70, 0xd8, 0x77, 0x70, 0x97, 0x11, 0x20, 0xd7,
+ 0xdc, 0x71, 0x79, 0xda, 0x49, 0x21, 0xa0, 0xab, 0x30, 0x49, 0xaf, 0x80, 0x41, 0x6d, 0x8a, 0xe4,
+ 0x55, 0x89, 0xca, 0x87, 0xd1, 0xdb, 0x50, 0x62, 0x12, 0xaf, 0xb9, 0x4f, 0x03, 0x4c, 0xdf, 0x2c,
+ 0x28, 0xf5, 0x10, 0x15, 0x16, 0x3f, 0x67, 0x41, 0xd6, 0x39, 0x0b, 0x35, 0xa0, 0x12, 0x84, 0x9e,
+ 0x6f, 0xf7, 0xf0, 0x33, 0xae, 0xb2, 0x52, 0xbc, 0x68, 0x97, 0x00, 0x4b, 0x11, 0x3e, 0x3e, 0xf0,
+ 0x42, 0x3b, 0xfe, 0x50, 0xe1, 0x3d, 0x4b, 0x85, 0x49, 0xcb, 0x5e, 0x86, 0xd9, 0xe6, 0x41, 0xb8,
+ 0xd7, 0x72, 0x49, 0x1e, 0x4d, 0xd9, 0xfd, 0x0a, 0x20, 0x02, 0x5d, 0x75, 0x02, 0x2d, 0x98, 0x4f,
+ 0xd6, 0x6e, 0x9a, 0x7b, 0xe6, 0x06, 0x9c, 0x23, 0x50, 0xec, 0x86, 0x4e, 0x47, 0x39, 0xb3, 0x88,
+ 0x53, 0xb1, 0x91, 0x38, 0x15, 0xdb, 0x41, 0xf0, 0xd2, 0xf3, 0xbb, 0x7c, 0x5f, 0x44, 0xdf, 0x92,
+ 0xdb, 0x3f, 0x18, 0x4c, 0x9a, 0xa7, 0x41, 0xec, 0x44, 0xfb, 0x15, 0xe9, 0xa1, 0x5f, 0x86, 0x02,
+ 0x7f, 0x49, 0xc4, 0x0b, 0x85, 0x17, 0x16, 0xd9, 0x0b, 0xa6, 0x45, 0x4e, 0x78, 0x93, 0x41, 0x95,
+ 0x62, 0x16, 0xc7, 0x27, 0x16, 0xd9, 0xb3, 0x83, 0x3d, 0xdc, 0xdd, 0x12, 0xc4, 0x63, 0x65, 0xd4,
+ 0x7b, 0x56, 0x02, 0x2c, 0x65, 0xbf, 0x23, 0x45, 0x7f, 0x88, 0xc3, 0x63, 0x44, 0x57, 0x0b, 0xf5,
+ 0xe7, 0xc5, 0x14, 0xde, 0x5f, 0x7c, 0x95, 0x59, 0x3f, 0x36, 0xe0, 0x8a, 0x98, 0xb6, 0xb2, 0x67,
+ 0xbb, 0x3d, 0x2c, 0x84, 0xf9, 0x45, 0xf5, 0x95, 0x5e, 0x74, 0xfe, 0x15, 0x17, 0xfd, 0x18, 0x6a,
+ 0xd1, 0xa2, 0x69, 0xd1, 0xc6, 0xeb, 0xab, 0x8b, 0x38, 0x08, 0x78, 0xf0, 0x28, 0x5a, 0xf4, 0x37,
+ 0x19, 0xf3, 0xbd, 0x7e, 0x74, 0x5f, 0x22, 0xbf, 0x25, 0xb1, 0x75, 0xb8, 0x24, 0x88, 0xf1, 0x2a,
+ 0x4a, 0x9c, 0x5a, 0x6a, 0x4d, 0xc7, 0x52, 0xe3, 0xf6, 0x20, 0x34, 0x8e, 0xdf, 0x4a, 0xda, 0x29,
+ 0x71, 0x13, 0x52, 0x2e, 0x86, 0x8e, 0xcb, 0x3c, 0xf3, 0x00, 0x22, 0xb3, 0x72, 0xb4, 0x4d, 0xc1,
+ 0x09, 0x49, 0x2d, 0x9c, 0x6f, 0x01, 0x02, 0x4f, 0x6d, 0x81, 0x6c, 0xae, 0x18, 0xe6, 0x23, 0x41,
+ 0x89, 0xda, 0xb7, 0xb0, 0x3f, 0x70, 0x82, 0x40, 0xe9, 0x58, 0xe9, 0xd4, 0xf5, 0x26, 0x8c, 0x0f,
+ 0x31, 0xcf, 0xf3, 0xa5, 0x25, 0x24, 0x7c, 0x42, 0x99, 0x4c, 0xe1, 0x92, 0xcd, 0x00, 0xae, 0x0a,
+ 0x36, 0xcc, 0x20, 0x5a, 0x3e, 0x49, 0x31, 0x45, 0x95, 0x3c, 0x97, 0x51, 0x25, 0xcf, 0xc7, 0xab,
+ 0xe4, 0xb1, 0xb3, 0xa7, 0x1a, 0xa8, 0xce, 0xe6, 0xec, 0xd9, 0x66, 0x06, 0x88, 0xe2, 0xdb, 0xd9,
+ 0x50, 0xfd, 0x03, 0x1e, 0xa8, 0xce, 0x2a, 0x63, 0x62, 0xba, 0x66, 0xd1, 0xcf, 0x14, 0x9f, 0xc8,
+ 0x84, 0x32, 0x31, 0x92, 0xa5, 0xb6, 0x0f, 0xc6, 0xad, 0xd8, 0x98, 0x0c, 0xc6, 0xfb, 0x30, 0x17,
+ 0x0f, 0xc6, 0xa7, 0x12, 0x6a, 0x0e, 0x26, 0x42, 0x6f, 0x1f, 0x8b, 0x24, 0xce, 0x3e, 0x52, 0x6a,
+ 0x8d, 0x02, 0xf5, 0xd9, 0xa8, 0xf5, 0xbb, 0x92, 0x2a, 0x75, 0xc0, 0xd3, 0xae, 0x80, 0x6c, 0x47,
+ 0x71, 0x4d, 0x66, 0x1f, 0x92, 0xd7, 0x27, 0x70, 0x21, 0x19, 0x7c, 0xcf, 0x66, 0x11, 0x3b, 0xcc,
+ 0x39, 0x75, 0xe1, 0xf9, 0x6c, 0x18, 0x3c, 0x97, 0x71, 0x52, 0x09, 0xba, 0x67, 0x43, 0xfb, 0xd7,
+ 0xa1, 0xae, 0x8b, 0xc1, 0x67, 0xea, 0x8b, 0x51, 0x48, 0x3e, 0x1b, 0xaa, 0x3f, 0x34, 0x24, 0x59,
+ 0x75, 0xd7, 0x7c, 0xf0, 0x55, 0xc8, 0x8a, 0x5c, 0x77, 0x3b, 0xda, 0x3e, 0x8d, 0x28, 0x5a, 0xe6,
+ 0xf5, 0xd1, 0x52, 0x4e, 0xa1, 0x88, 0xc2, 0xff, 0x64, 0xa8, 0xff, 0x3a, 0x77, 0x2f, 0x67, 0x26,
+ 0xf3, 0xce, 0x69, 0x99, 0x91, 0xf4, 0x1c, 0x31, 0xa3, 0x1f, 0x29, 0x57, 0x51, 0x93, 0xd4, 0xd9,
+ 0x98, 0xee, 0x37, 0x64, 0x82, 0x49, 0xe5, 0xb1, 0xb3, 0xe1, 0x60, 0xc3, 0x42, 0x76, 0x0a, 0x3b,
+ 0x13, 0x16, 0x37, 0x9b, 0x50, 0x8c, 0x2e, 0xc9, 0xca, 0x93, 0xde, 0x12, 0x14, 0x36, 0x36, 0xb7,
+ 0xb7, 0x9a, 0x2b, 0xe4, 0x0e, 0x38, 0x07, 0x85, 0x95, 0x4d, 0xcb, 0x7a, 0xba, 0xd5, 0x26, 0x97,
+ 0xc0, 0xe4, 0x0b, 0x9f, 0xa5, 0x9f, 0xe5, 0x21, 0xf7, 0xf8, 0x19, 0xfa, 0x14, 0x26, 0xd8, 0x0b,
+ 0xb3, 0x63, 0x1e, 0x1a, 0xd6, 0x8f, 0x7b, 0x44, 0x67, 0x5e, 0xfc, 0xc1, 0x7f, 0xfd, 0xec, 0x0f,
+ 0x73, 0xb3, 0x66, 0xb9, 0x31, 0x5a, 0x6e, 0xec, 0x8f, 0x1a, 0x34, 0xc9, 0xde, 0x37, 0x6e, 0xa2,
+ 0x8f, 0x21, 0xbf, 0x75, 0x10, 0xa2, 0xcc, 0x07, 0x88, 0xf5, 0xec, 0x77, 0x75, 0xe6, 0x79, 0x4a,
+ 0x74, 0xc6, 0x04, 0x4e, 0x74, 0x78, 0x10, 0x12, 0x92, 0xdf, 0x83, 0x92, 0xfa, 0x2a, 0xee, 0xc4,
+ 0x57, 0x89, 0xf5, 0x93, 0x5f, 0xdc, 0x99, 0x57, 0x28, 0xab, 0x8b, 0x26, 0xe2, 0xac, 0xd8, 0xbb,
+ 0x3d, 0x75, 0x15, 0xed, 0x43, 0x17, 0x65, 0xbe, 0x59, 0xac, 0x67, 0x3f, 0xc2, 0x4b, 0xad, 0x22,
+ 0x3c, 0x74, 0x09, 0xc9, 0xef, 0xf2, 0xd7, 0x76, 0x9d, 0x10, 0x5d, 0xd5, 0x3c, 0x97, 0x52, 0x9f,
+ 0x01, 0xd5, 0x17, 0xb2, 0x11, 0x38, 0x93, 0xcb, 0x94, 0xc9, 0x05, 0x73, 0x96, 0x33, 0xe9, 0x44,
+ 0x28, 0xf7, 0x8d, 0x9b, 0x4b, 0x1d, 0x98, 0xa0, 0x6d, 0x66, 0xf4, 0x5c, 0xfc, 0xa8, 0x6b, 0x1a,
+ 0xf8, 0x19, 0x86, 0x8e, 0x35, 0xa8, 0xcd, 0x39, 0xca, 0xa8, 0x62, 0x16, 0x09, 0x23, 0xda, 0x64,
+ 0xbe, 0x6f, 0xdc, 0xbc, 0x61, 0xdc, 0x36, 0x96, 0xfe, 0x7a, 0x02, 0x26, 0x68, 0x3b, 0x03, 0xed,
+ 0x03, 0xc8, 0x76, 0x6a, 0x72, 0x75, 0xa9, 0x4e, 0x6d, 0x72, 0x75, 0xe9, 0x4e, 0xac, 0x59, 0xa7,
+ 0x4c, 0xe7, 0xcc, 0x19, 0xc2, 0x94, 0x76, 0x49, 0x1a, 0xb4, 0x29, 0x44, 0xf4, 0xf8, 0x63, 0x83,
+ 0xf7, 0x75, 0x98, 0x9b, 0x21, 0x1d, 0xb5, 0x58, 0x2b, 0x35, 0xb9, 0x1d, 0x34, 0xdd, 0x53, 0xf3,
+ 0x1e, 0x65, 0xd8, 0x30, 0xab, 0x92, 0xa1, 0x4f, 0x31, 0xee, 0x1b, 0x37, 0x9f, 0xd7, 0xcc, 0x73,
+ 0x5c, 0xcb, 0x09, 0x08, 0xfa, 0x3e, 0x54, 0xe2, 0x4d, 0x3f, 0x74, 0x4d, 0xc3, 0x2b, 0xd9, 0x44,
+ 0xac, 0xbf, 0x71, 0x3c, 0x12, 0x97, 0x69, 0x9e, 0xca, 0xc4, 0x99, 0x33, 0xce, 0xfb, 0x18, 0x0f,
+ 0x6d, 0x82, 0xc4, 0x6d, 0x80, 0xfe, 0xd4, 0xe0, 0x7d, 0x5b, 0xd9, 0xb3, 0x43, 0x3a, 0xea, 0xa9,
+ 0xd6, 0x60, 0xfd, 0xfa, 0x09, 0x58, 0x5c, 0x88, 0x0f, 0xa8, 0x10, 0xef, 0x9b, 0x73, 0x52, 0x88,
+ 0xd0, 0x19, 0xe0, 0xd0, 0xe3, 0x52, 0x3c, 0xbf, 0x6c, 0x5e, 0x8c, 0x29, 0x27, 0x06, 0x95, 0xc6,
+ 0x62, 0xbd, 0x35, 0xad, 0xb1, 0x62, 0xed, 0x3b, 0xad, 0xb1, 0xe2, 0x8d, 0x39, 0x9d, 0xb1, 0x78,
+ 0x27, 0x4d, 0x63, 0xac, 0x08, 0xb2, 0xf4, 0xff, 0xe3, 0x50, 0x58, 0x61, 0xff, 0xd7, 0x0e, 0xf2,
+ 0xa0, 0x18, 0x75, 0x9b, 0xd0, 0xbc, 0xae, 0xa0, 0x2d, 0xaf, 0x72, 0xf5, 0xab, 0x99, 0x70, 0x2e,
+ 0xd0, 0xeb, 0x54, 0xa0, 0xd7, 0xcc, 0x0b, 0x84, 0x33, 0xff, 0x1f, 0x83, 0x1a, 0xac, 0xec, 0xd9,
+ 0xb0, 0xbb, 0x5d, 0xa2, 0x88, 0xdf, 0x84, 0xb2, 0xda, 0xfb, 0x41, 0xaf, 0x6b, 0x8b, 0xe8, 0x6a,
+ 0x23, 0xa9, 0x6e, 0x1e, 0x87, 0xc2, 0x39, 0xbf, 0x41, 0x39, 0xcf, 0x9b, 0x97, 0x34, 0x9c, 0x7d,
+ 0x8a, 0x1a, 0x63, 0xce, 0x9a, 0x34, 0x7a, 0xe6, 0xb1, 0x6e, 0x90, 0x9e, 0x79, 0xbc, 0xc7, 0x73,
+ 0x2c, 0xf3, 0x03, 0x8a, 0x4a, 0x98, 0x07, 0x00, 0xb2, 0x8b, 0x82, 0xb4, 0xba, 0x54, 0x2e, 0xac,
+ 0xc9, 0xe0, 0x90, 0x6e, 0xc0, 0x98, 0x26, 0x65, 0xcb, 0xf7, 0x5d, 0x82, 0x6d, 0xdf, 0x09, 0x42,
+ 0xe6, 0x98, 0xd3, 0xb1, 0x1e, 0x08, 0xd2, 0xae, 0x27, 0xde, 0x52, 0xa9, 0x5f, 0x3b, 0x16, 0x87,
+ 0x73, 0xbf, 0x4e, 0xb9, 0x5f, 0x35, 0xeb, 0x1a, 0xee, 0x43, 0x86, 0x4b, 0x36, 0xdb, 0xe7, 0x05,
+ 0x28, 0x3d, 0xb1, 0x1d, 0x37, 0xc4, 0xae, 0xed, 0x76, 0x30, 0xda, 0x85, 0x09, 0x9a, 0xbb, 0x93,
+ 0x81, 0x58, 0x2d, 0xf9, 0x27, 0x03, 0x71, 0xac, 0xe6, 0x6d, 0x2e, 0x50, 0xc6, 0x75, 0xf3, 0x3c,
+ 0x61, 0x3c, 0x90, 0xa4, 0x1b, 0xac, 0x5a, 0x6e, 0xdc, 0x44, 0x2f, 0x60, 0x92, 0xf7, 0xba, 0x13,
+ 0x84, 0x62, 0x45, 0xb5, 0xfa, 0x65, 0x3d, 0x50, 0xb7, 0x97, 0x55, 0x36, 0x01, 0xc5, 0x23, 0x7c,
+ 0x46, 0x00, 0xb2, 0x75, 0x93, 0xb4, 0x68, 0xaa, 0xe5, 0x53, 0x5f, 0xc8, 0x46, 0xd0, 0xe9, 0x54,
+ 0xe5, 0xd9, 0x8d, 0x70, 0x09, 0xdf, 0xef, 0xc0, 0xf8, 0x23, 0x3b, 0xd8, 0x43, 0x89, 0xdc, 0xab,
+ 0x3c, 0x4d, 0xad, 0xd7, 0x75, 0x20, 0xce, 0xe5, 0x2a, 0xe5, 0x72, 0x89, 0x85, 0x32, 0x95, 0x0b,
+ 0x7d, 0x7c, 0xc9, 0xf4, 0xc7, 0xde, 0xa5, 0x26, 0xf5, 0x17, 0x7b, 0xe4, 0x9a, 0xd4, 0x5f, 0xfc,
+ 0x29, 0x6b, 0xb6, 0xfe, 0x08, 0x97, 0xfd, 0x11, 0xe1, 0x33, 0x84, 0x29, 0xf1, 0x82, 0x13, 0x25,
+ 0xde, 0xbd, 0x24, 0x9e, 0x7d, 0xd6, 0xe7, 0xb3, 0xc0, 0x9c, 0xdb, 0x35, 0xca, 0xed, 0x8a, 0x59,
+ 0x4b, 0x59, 0x8b, 0x63, 0xde, 0x37, 0x6e, 0xde, 0x36, 0xd0, 0xf7, 0x01, 0x64, 0x77, 0x2b, 0xe5,
+ 0x83, 0xc9, 0x8e, 0x59, 0xca, 0x07, 0x53, 0x8d, 0x31, 0x73, 0x91, 0xf2, 0xbd, 0x61, 0x5e, 0x4b,
+ 0xf2, 0x0d, 0x7d, 0xdb, 0x0d, 0x5e, 0x60, 0xff, 0x16, 0x2b, 0xad, 0x07, 0x7b, 0xce, 0x90, 0x2c,
+ 0xd9, 0x87, 0x62, 0xd4, 0x7c, 0x48, 0xc6, 0xdb, 0x64, 0x9b, 0x24, 0x19, 0x6f, 0x53, 0x5d, 0x8b,
+ 0x78, 0xe0, 0x89, 0xed, 0x17, 0x81, 0x4a, 0x5c, 0xf0, 0x2f, 0xaa, 0x30, 0x4e, 0x8e, 0xe4, 0xe4,
+ 0x78, 0x22, 0xcb, 0x3d, 0xc9, 0xd5, 0xa7, 0x2a, 0xd6, 0xc9, 0xd5, 0xa7, 0x2b, 0x45, 0xf1, 0xe3,
+ 0x09, 0xb9, 0xae, 0x35, 0x58, 0x1d, 0x85, 0xac, 0xd4, 0x83, 0x92, 0x52, 0x06, 0x42, 0x1a, 0x62,
+ 0xf1, 0x0a, 0x78, 0x32, 0xe1, 0x69, 0x6a, 0x48, 0xe6, 0x6b, 0x94, 0xdf, 0x79, 0x96, 0xf0, 0x28,
+ 0xbf, 0x2e, 0xc3, 0x20, 0x0c, 0xf9, 0xea, 0xb8, 0xe7, 0x6b, 0x56, 0x17, 0xf7, 0xfe, 0x85, 0x6c,
+ 0x84, 0xcc, 0xd5, 0x49, 0xd7, 0x7f, 0x09, 0x65, 0xb5, 0xf4, 0x83, 0x34, 0xc2, 0x27, 0x6a, 0xf4,
+ 0xc9, 0x4c, 0xa2, 0xab, 0x1c, 0xc5, 0x63, 0x1b, 0x65, 0x69, 0x2b, 0x68, 0x84, 0x71, 0x1f, 0x0a,
+ 0xbc, 0x04, 0xa4, 0x53, 0x69, 0xbc, 0x8c, 0xaf, 0x53, 0x69, 0xa2, 0x7e, 0x14, 0x3f, 0x3f, 0x53,
+ 0x8e, 0xe4, 0x2a, 0x2a, 0xb2, 0x35, 0xe7, 0xf6, 0x10, 0x87, 0x59, 0xdc, 0x64, 0xd9, 0x36, 0x8b,
+ 0x9b, 0x52, 0x21, 0xc8, 0xe2, 0xd6, 0xc3, 0x21, 0x8f, 0x07, 0xe2, 0x7a, 0x8d, 0x32, 0x88, 0xa9,
+ 0x19, 0xd2, 0x3c, 0x0e, 0x45, 0x77, 0xbd, 0x91, 0x0c, 0x45, 0x7a, 0x3c, 0x04, 0x90, 0xe5, 0xa8,
+ 0xe4, 0x99, 0x55, 0xdb, 0x29, 0x48, 0x9e, 0x59, 0xf5, 0x15, 0xad, 0x78, 0x8c, 0x95, 0x7c, 0xd9,
+ 0xed, 0x8a, 0x70, 0xfe, 0xc2, 0x00, 0x94, 0x2e, 0x58, 0xa1, 0x77, 0xf4, 0xd4, 0xb5, 0x5d, 0x87,
+ 0xfa, 0xbb, 0xaf, 0x86, 0xac, 0x0b, 0xc8, 0x52, 0xa4, 0x0e, 0xc5, 0x1e, 0xbe, 0x24, 0x42, 0x7d,
+ 0x6e, 0xc0, 0x74, 0xac, 0xc8, 0x85, 0xde, 0xcc, 0xb0, 0x69, 0xa2, 0xf5, 0x50, 0x7f, 0xeb, 0x44,
+ 0x3c, 0xdd, 0x61, 0x5e, 0xd9, 0x01, 0xe2, 0x56, 0xf3, 0x3b, 0x06, 0x54, 0xe2, 0xb5, 0x30, 0x94,
+ 0x41, 0x3b, 0xd5, 0xb1, 0xa8, 0xdf, 0x38, 0x19, 0xf1, 0x78, 0xf3, 0xc8, 0x0b, 0x4d, 0x1f, 0x0a,
+ 0xbc, 0x68, 0xa6, 0xdb, 0xf8, 0xf1, 0x16, 0x87, 0x6e, 0xe3, 0x27, 0x2a, 0x6e, 0x9a, 0x8d, 0xef,
+ 0x7b, 0x7d, 0xac, 0xb8, 0x19, 0xaf, 0xa5, 0x65, 0x71, 0x3b, 0xde, 0xcd, 0x12, 0x85, 0xb8, 0x2c,
+ 0x6e, 0xd2, 0xcd, 0x44, 0xc9, 0x0c, 0x65, 0x10, 0x3b, 0xc1, 0xcd, 0x92, 0x15, 0x37, 0x8d, 0x9b,
+ 0x51, 0x86, 0x8a, 0x9b, 0xc9, 0x52, 0x96, 0xce, 0xcd, 0x52, 0xdd, 0x18, 0x9d, 0x9b, 0xa5, 0xab,
+ 0x61, 0x1a, 0x3b, 0x52, 0xbe, 0x31, 0x37, 0x3b, 0xa7, 0x29, 0x76, 0xa1, 0x77, 0x33, 0x94, 0xa8,
+ 0xed, 0xed, 0xd4, 0x6f, 0xbd, 0x22, 0x76, 0xe6, 0x1e, 0x67, 0xea, 0x17, 0x7b, 0xfc, 0x8f, 0x0c,
+ 0x98, 0xd3, 0xd5, 0xc7, 0x50, 0x06, 0x9f, 0x8c, 0x56, 0x50, 0x7d, 0xf1, 0x55, 0xd1, 0x8f, 0xd7,
+ 0x56, 0xb4, 0xeb, 0x1f, 0xf4, 0xbe, 0x68, 0x36, 0x9e, 0x5f, 0x85, 0x2b, 0x30, 0xd9, 0x1c, 0x3a,
+ 0x8f, 0xf1, 0x11, 0x3a, 0x37, 0x95, 0xab, 0x4f, 0x13, 0xba, 0x9e, 0xef, 0x7c, 0x46, 0xff, 0x3c,
+ 0xc4, 0x42, 0x6e, 0xb7, 0x0c, 0x10, 0x21, 0x8c, 0xfd, 0xdb, 0x97, 0xf3, 0xc6, 0x7f, 0x7e, 0x39,
+ 0x6f, 0xfc, 0xcf, 0x97, 0xf3, 0xc6, 0x4f, 0xff, 0x6f, 0x7e, 0xec, 0xf9, 0xb5, 0x9e, 0x47, 0xc5,
+ 0x5a, 0x74, 0xbc, 0x86, 0xfc, 0x93, 0x15, 0xcb, 0x0d, 0x55, 0xd4, 0xdd, 0x49, 0xfa, 0x37, 0x26,
+ 0x96, 0x7f, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x52, 0x87, 0xa2, 0xad, 0x3a, 0x43, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -9461,6 +9528,11 @@ func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
+ if m.HashRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.HashRevision))
+ i--
+ dAtA[i] = 0x20
+ }
if m.CompactRevision != 0 {
i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
i--
@@ -9581,6 +9653,13 @@ func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x22
+ }
if len(m.Blob) > 0 {
i -= len(m.Blob)
copy(dAtA[i:], m.Blob)
@@ -11481,6 +11560,18 @@ func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
+ if m.DbSizeQuota != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeQuota))
+ i--
+ dAtA[i] = 0x60
+ }
+ if len(m.StorageVersion) > 0 {
+ i -= len(m.StorageVersion)
+ copy(dAtA[i:], m.StorageVersion)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.StorageVersion)))
+ i--
+ dAtA[i] = 0x5a
+ }
if m.IsLearner {
i--
if m.IsLearner {
@@ -13459,6 +13550,9 @@ func (m *HashKVResponse) Size() (n int) {
if m.CompactRevision != 0 {
n += 1 + sovRpc(uint64(m.CompactRevision))
}
+ if m.HashRevision != 0 {
+ n += 1 + sovRpc(uint64(m.HashRevision))
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -13513,6 +13607,10 @@ func (m *SnapshotResponse) Size() (n int) {
if l > 0 {
n += 1 + l + sovRpc(uint64(l))
}
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -14389,6 +14487,13 @@ func (m *StatusResponse) Size() (n int) {
if m.IsLearner {
n += 2
}
+ l = len(m.StorageVersion)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.DbSizeQuota != 0 {
+ n += 1 + sovRpc(uint64(m.DbSizeQuota))
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -17568,6 +17673,25 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
break
}
}
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HashRevision", wireType)
+ }
+ m.HashRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.HashRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipRpc(dAtA[iNdEx:])
@@ -17865,6 +17989,38 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
m.Blob = []byte{}
}
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRpc(dAtA[iNdEx:])
@@ -22459,6 +22615,57 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
}
}
m.IsLearner = bool(v != 0)
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StorageVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StorageVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DbSizeQuota", wireType)
+ }
+ m.DbSizeQuota = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DbSizeQuota |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipRpc(dAtA[iNdEx:])
diff --git a/api/etcdserverpb/rpc.proto b/api/etcdserverpb/rpc.proto
index 14391378ada..a4ace71ce2b 100644
--- a/api/etcdserverpb/rpc.proto
+++ b/api/etcdserverpb/rpc.proto
@@ -4,13 +4,36 @@ package etcdserverpb;
import "gogoproto/gogo.proto";
import "etcd/api/mvccpb/kv.proto";
import "etcd/api/authpb/auth.proto";
+import "etcd/api/versionpb/version.proto";
// for grpc-gateway
import "google/api/annotations.proto";
+import "protoc-gen-openapiv2/options/annotations.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
+option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+ security_definitions: {
+ security: {
+ key: "ApiKey";
+ value: {
+ type: TYPE_API_KEY;
+ in: IN_HEADER;
+ name: "Authorization";
+ }
+ }
+ }
+ security: {
+ security_requirement: {
+ key: "ApiKey";
+ value: {};
+ }
+ }
+};
+
service KV {
// Range gets the keys in the range from the key-value store.
rpc Range(RangeRequest) returns (RangeResponse) {
@@ -217,7 +240,7 @@ service Maintenance {
// It only iterates "key" bucket in backend storage.
rpc HashKV(HashKVRequest) returns (HashKVResponse) {
option (google.api.http) = {
- post: "/v3/maintenance/hash"
+ post: "/v3/maintenance/hashkv"
body: "*"
};
}
@@ -388,13 +411,16 @@ service Auth {
}
message ResponseHeader {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// cluster_id is the ID of the cluster which sent the response.
uint64 cluster_id = 1;
// member_id is the ID of the member which sent the response.
uint64 member_id = 2;
- // revision is the key-value store revision when the request was applied.
+ // revision is the key-value store revision when the request was applied, and it's
+ // unset (so 0) in case of calls not interacting with key-value store.
// For watch progress responses, the header.revision indicates progress. All future events
- // recieved in this stream are guaranteed to have a higher revision number than the
+ // received in this stream are guaranteed to have a higher revision number than the
// header.revision number.
int64 revision = 3;
// raft_term is the raft term when the request was applied.
@@ -402,17 +428,21 @@ message ResponseHeader {
}
message RangeRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
enum SortOrder {
- NONE = 0; // default, no sorting
- ASCEND = 1; // lowest target value first
- DESCEND = 2; // highest target value first
+ option (versionpb.etcd_version_enum) = "3.0";
+ NONE = 0; // default, no sorting
+ ASCEND = 1; // lowest target value first
+ DESCEND = 2; // highest target value first
}
enum SortTarget {
- KEY = 0;
- VERSION = 1;
- CREATE = 2;
- MOD = 3;
- VALUE = 4;
+ option (versionpb.etcd_version_enum) = "3.0";
+ KEY = 0;
+ VERSION = 1;
+ CREATE = 2;
+ MOD = 3;
+ VALUE = 4;
}
// key is the first key for the range. If range_end is not given, the request only looks up key.
@@ -453,22 +483,24 @@ message RangeRequest {
// min_mod_revision is the lower bound for returned key mod revisions; all keys with
// lesser mod revisions will be filtered away.
- int64 min_mod_revision = 10;
+ int64 min_mod_revision = 10 [(versionpb.etcd_version_field)="3.1"];
// max_mod_revision is the upper bound for returned key mod revisions; all keys with
// greater mod revisions will be filtered away.
- int64 max_mod_revision = 11;
+ int64 max_mod_revision = 11 [(versionpb.etcd_version_field)="3.1"];
// min_create_revision is the lower bound for returned key create revisions; all keys with
// lesser create revisions will be filtered away.
- int64 min_create_revision = 12;
+ int64 min_create_revision = 12 [(versionpb.etcd_version_field)="3.1"];
// max_create_revision is the upper bound for returned key create revisions; all keys with
// greater create revisions will be filtered away.
- int64 max_create_revision = 13;
+ int64 max_create_revision = 13 [(versionpb.etcd_version_field)="3.1"];
}
message RangeResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// kvs is the list of key-value pairs matched by the range request.
// kvs is empty when count is requested.
@@ -480,6 +512,8 @@ message RangeResponse {
}
message PutRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// key is the key, in bytes, to put into the key-value store.
bytes key = 1;
// value is the value, in bytes, to associate with the key in the key-value store.
@@ -490,24 +524,28 @@ message PutRequest {
// If prev_kv is set, etcd gets the previous key-value pair before changing it.
// The previous key-value pair will be returned in the put response.
- bool prev_kv = 4;
+ bool prev_kv = 4 [(versionpb.etcd_version_field)="3.1"];
// If ignore_value is set, etcd updates the key using its current value.
// Returns an error if the key does not exist.
- bool ignore_value = 5;
+ bool ignore_value = 5 [(versionpb.etcd_version_field)="3.2"];
// If ignore_lease is set, etcd updates the key using its current lease.
// Returns an error if the key does not exist.
- bool ignore_lease = 6;
+ bool ignore_lease = 6 [(versionpb.etcd_version_field)="3.2"];
}
message PutResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// if prev_kv is set in the request, the previous key-value pair will be returned.
- mvccpb.KeyValue prev_kv = 2;
+ mvccpb.KeyValue prev_kv = 2 [(versionpb.etcd_version_field)="3.1"];
}
message DeleteRangeRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// key is the first key to delete in the range.
bytes key = 1;
// range_end is the key following the last key to delete for the range [key, range_end).
@@ -519,50 +557,61 @@ message DeleteRangeRequest {
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
// The previous key-value pairs will be returned in the delete response.
- bool prev_kv = 3;
+ bool prev_kv = 3 [(versionpb.etcd_version_field)="3.1"];
}
message DeleteRangeResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// deleted is the number of keys deleted by the delete range request.
int64 deleted = 2;
// if prev_kv is set in the request, the previous key-value pairs will be returned.
- repeated mvccpb.KeyValue prev_kvs = 3;
+ repeated mvccpb.KeyValue prev_kvs = 3 [(versionpb.etcd_version_field)="3.1"];
}
message RequestOp {
+ option (versionpb.etcd_version_msg) = "3.0";
// request is a union of request types accepted by a transaction.
oneof request {
RangeRequest request_range = 1;
PutRequest request_put = 2;
DeleteRangeRequest request_delete_range = 3;
- TxnRequest request_txn = 4;
+ TxnRequest request_txn = 4 [(versionpb.etcd_version_field)="3.3"];
}
}
message ResponseOp {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// response is a union of response types returned by a transaction.
oneof response {
RangeResponse response_range = 1;
PutResponse response_put = 2;
DeleteRangeResponse response_delete_range = 3;
- TxnResponse response_txn = 4;
+ TxnResponse response_txn = 4 [(versionpb.etcd_version_field)="3.3"];
}
}
message Compare {
+ option (versionpb.etcd_version_msg) = "3.0";
+
enum CompareResult {
+ option (versionpb.etcd_version_enum) = "3.0";
+
EQUAL = 0;
GREATER = 1;
LESS = 2;
- NOT_EQUAL = 3;
+ NOT_EQUAL = 3 [(versionpb.etcd_version_enum_value)="3.1"];
}
enum CompareTarget {
+ option (versionpb.etcd_version_enum) = "3.0";
+
VERSION = 0;
CREATE = 1;
MOD = 2;
VALUE = 3;
- LEASE = 4;
+ LEASE = 4 [(versionpb.etcd_version_enum_value)="3.3"];
}
// result is logical comparison operation for this comparison.
CompareResult result = 1;
@@ -580,13 +629,13 @@ message Compare {
// value is the value of the given key, in bytes.
bytes value = 7;
// lease is the lease id of the given key.
- int64 lease = 8;
+ int64 lease = 8 [(versionpb.etcd_version_field)="3.3"];
// leave room for more target_union field tags, jump to 64
}
// range_end compares the given target to all keys in the range [key, range_end).
// See RangeRequest for more details on key ranges.
- bytes range_end = 64;
+ bytes range_end = 64 [(versionpb.etcd_version_field)="3.3"];
// TODO: fill out with most of the rest of RangeRequest fields when needed.
}
@@ -606,6 +655,8 @@ message Compare {
// true.
// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
message TxnRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// compare is a list of predicates representing a conjunction of terms.
// If the comparisons succeed, then the success requests will be processed in order,
// and the response will contain their respective responses in order.
@@ -619,6 +670,8 @@ message TxnRequest {
}
message TxnResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// succeeded is set to true if the compare evaluated to true or false otherwise.
bool succeeded = 2;
@@ -630,6 +683,8 @@ message TxnResponse {
// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
// with a revision less than the compaction revision will be removed.
message CompactionRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// revision is the key-value store revision for the compaction operation.
int64 revision = 1;
// physical is set so the RPC will wait until the compaction is physically
@@ -639,35 +694,48 @@ message CompactionRequest {
}
message CompactionResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message HashRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
}
message HashKVRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
// revision is the key-value store revision for the hash operation.
int64 revision = 1;
}
message HashKVResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
ResponseHeader header = 1;
// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
uint32 hash = 2;
// compact_revision is the compacted revision of key-value store when hash begins.
int64 compact_revision = 3;
+ // hash_revision is the revision up to which the hash is calculated.
+ int64 hash_revision = 4 [(versionpb.etcd_version_field)="3.6"];
}
message HashResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// hash is the hash value computed from the responding member's KV's backend.
uint32 hash = 2;
}
message SnapshotRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
}
message SnapshotResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
// header has the current key-value store information. The first header in the snapshot
// stream indicates the point in time of the snapshot.
ResponseHeader header = 1;
@@ -677,18 +745,26 @@ message SnapshotResponse {
// blob contains the next chunk of the snapshot in the snapshot stream.
bytes blob = 3;
+
+ // local version of server that created the snapshot.
+ // In cluster with binaries with different version, each cluster can return different result.
+ // Informs which etcd server version should be used when restoring the snapshot.
+ string version = 4 [(versionpb.etcd_version_field)="3.6"];
}
message WatchRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
// request_union is a request to either create a new watcher or cancel an existing watcher.
oneof request_union {
WatchCreateRequest create_request = 1;
WatchCancelRequest cancel_request = 2;
- WatchProgressRequest progress_request = 3;
+ WatchProgressRequest progress_request = 3 [(versionpb.etcd_version_field)="3.4"];
}
}
message WatchCreateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// key is the key to register for watching.
bytes key = 1;
@@ -709,6 +785,8 @@ message WatchCreateRequest {
bool progress_notify = 4;
enum FilterType {
+ option (versionpb.etcd_version_enum) = "3.1";
+
// filter out put event.
NOPUT = 0;
// filter out delete event.
@@ -716,34 +794,38 @@ message WatchCreateRequest {
}
// filters filter the events at server side before it sends back to the watcher.
- repeated FilterType filters = 5;
+ repeated FilterType filters = 5 [(versionpb.etcd_version_field)="3.1"];
// If prev_kv is set, created watcher gets the previous KV before the event happens.
// If the previous KV is already compacted, nothing will be returned.
- bool prev_kv = 6;
+ bool prev_kv = 6 [(versionpb.etcd_version_field)="3.1"];
// If watch_id is provided and non-zero, it will be assigned to this watcher.
// Since creating a watcher in etcd is not a synchronous operation,
// this can be used ensure that ordering is correct when creating multiple
// watchers on the same stream. Creating a watcher with an ID already in
// use on the stream will cause an error to be returned.
- int64 watch_id = 7;
+ int64 watch_id = 7 [(versionpb.etcd_version_field)="3.4"];
// fragment enables splitting large revisions into multiple watch responses.
- bool fragment = 8;
+ bool fragment = 8 [(versionpb.etcd_version_field)="3.4"];
}
message WatchCancelRequest {
+ option (versionpb.etcd_version_msg) = "3.1";
// watch_id is the watcher id to cancel so that no more events are transmitted.
- int64 watch_id = 1;
+ int64 watch_id = 1 [(versionpb.etcd_version_field)="3.1"];
}
// Requests the a watch stream progress status be sent in the watch response stream as soon as
// possible.
message WatchProgressRequest {
+ option (versionpb.etcd_version_msg) = "3.4";
}
message WatchResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// watch_id is the ID of the watcher that corresponds to the response.
int64 watch_id = 2;
@@ -754,7 +836,8 @@ message WatchResponse {
// All events sent to the created watcher will attach with the same watch_id.
bool created = 3;
- // canceled is set to true if the response is for a cancel watch request.
+ // canceled is set to true if the response is for a cancel watch request
+ // or if the start_revision has already been compacted.
// No further events will be sent to the canceled watcher.
bool canceled = 4;
@@ -769,15 +852,17 @@ message WatchResponse {
int64 compact_revision = 5;
// cancel_reason indicates the reason for canceling the watcher.
- string cancel_reason = 6;
+ string cancel_reason = 6 [(versionpb.etcd_version_field)="3.4"];
// framgment is true if large watch response was split over multiple responses.
- bool fragment = 7;
+ bool fragment = 7 [(versionpb.etcd_version_field)="3.4"];
repeated mvccpb.Event events = 11;
}
message LeaseGrantRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// TTL is the advisory time-to-live in seconds. Expired lease will return -1.
int64 TTL = 1;
// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
@@ -785,6 +870,8 @@ message LeaseGrantRequest {
}
message LeaseGrantResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// ID is the lease ID for the granted lease.
int64 ID = 2;
@@ -794,15 +881,21 @@ message LeaseGrantResponse {
}
message LeaseRevokeRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
int64 ID = 1;
}
message LeaseRevokeResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message LeaseCheckpoint {
+ option (versionpb.etcd_version_msg) = "3.4";
+
// ID is the lease ID to checkpoint.
int64 ID = 1;
@@ -811,19 +904,26 @@ message LeaseCheckpoint {
}
message LeaseCheckpointRequest {
+ option (versionpb.etcd_version_msg) = "3.4";
+
repeated LeaseCheckpoint checkpoints = 1;
}
message LeaseCheckpointResponse {
+ option (versionpb.etcd_version_msg) = "3.4";
+
ResponseHeader header = 1;
}
message LeaseKeepAliveRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
// ID is the lease ID for the lease to keep alive.
int64 ID = 1;
}
message LeaseKeepAliveResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// ID is the lease ID from the keep alive request.
int64 ID = 2;
@@ -832,6 +932,7 @@ message LeaseKeepAliveResponse {
}
message LeaseTimeToLiveRequest {
+ option (versionpb.etcd_version_msg) = "3.1";
// ID is the lease ID for the lease.
int64 ID = 1;
// keys is true to query all the keys attached to this lease.
@@ -839,6 +940,8 @@ message LeaseTimeToLiveRequest {
}
message LeaseTimeToLiveResponse {
+ option (versionpb.etcd_version_msg) = "3.1";
+
ResponseHeader header = 1;
// ID is the lease ID from the keep alive request.
int64 ID = 2;
@@ -851,19 +954,26 @@ message LeaseTimeToLiveResponse {
}
message LeaseLeasesRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
}
message LeaseStatus {
+ option (versionpb.etcd_version_msg) = "3.3";
+
int64 ID = 1;
// TODO: int64 TTL = 2;
}
message LeaseLeasesResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
ResponseHeader header = 1;
repeated LeaseStatus leases = 2;
}
message Member {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// ID is the member ID for this member.
uint64 ID = 1;
// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
@@ -873,17 +983,21 @@ message Member {
// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
repeated string clientURLs = 4;
// isLearner indicates if the member is raft learner.
- bool isLearner = 5;
+ bool isLearner = 5 [(versionpb.etcd_version_field)="3.4"];
}
message MemberAddRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// peerURLs is the list of URLs the added member will use to communicate with the cluster.
repeated string peerURLs = 1;
// isLearner indicates if the added member is raft learner.
- bool isLearner = 2;
+ bool isLearner = 2 [(versionpb.etcd_version_field)="3.4"];
}
message MemberAddResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// member is the member information for the added member.
Member member = 2;
@@ -892,17 +1006,22 @@ message MemberAddResponse {
}
message MemberRemoveRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
// ID is the member ID of the member to remove.
uint64 ID = 1;
}
message MemberRemoveResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// members is a list of all members after removing the member.
repeated Member members = 2;
}
message MemberUpdateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// ID is the member ID of the member to update.
uint64 ID = 1;
// peerURLs is the new list of URLs the member will use to communicate with the cluster.
@@ -910,59 +1029,80 @@ message MemberUpdateRequest {
}
message MemberUpdateResponse{
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// members is a list of all members after updating the member.
- repeated Member members = 2;
+ repeated Member members = 2 [(versionpb.etcd_version_field)="3.1"];
}
message MemberListRequest {
- bool linearizable = 1;
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ bool linearizable = 1 [(versionpb.etcd_version_field)="3.5"];
}
message MemberListResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// members is a list of all members associated with the cluster.
repeated Member members = 2;
}
message MemberPromoteRequest {
+ option (versionpb.etcd_version_msg) = "3.4";
// ID is the member ID of the member to promote.
uint64 ID = 1;
}
message MemberPromoteResponse {
+ option (versionpb.etcd_version_msg) = "3.4";
+
ResponseHeader header = 1;
// members is a list of all members after promoting the member.
repeated Member members = 2;
}
message DefragmentRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
}
message DefragmentResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message MoveLeaderRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
// targetID is the node ID for the new leader.
uint64 targetID = 1;
}
message MoveLeaderResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
ResponseHeader header = 1;
}
enum AlarmType {
+ option (versionpb.etcd_version_enum) = "3.0";
+
NONE = 0; // default, used to query if any alarm is active
NOSPACE = 1; // space quota is exhausted
- CORRUPT = 2; // kv store corruption detected
+ CORRUPT = 2 [(versionpb.etcd_version_enum_value)="3.3"]; // kv store corruption detected
}
message AlarmRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
enum AlarmAction {
- GET = 0;
- ACTIVATE = 1;
- DEACTIVATE = 2;
+ option (versionpb.etcd_version_enum) = "3.0";
+
+ GET = 0;
+ ACTIVATE = 1;
+ DEACTIVATE = 2;
}
// action is the kind of alarm request to issue. The action
// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
@@ -976,6 +1116,7 @@ message AlarmRequest {
}
message AlarmMember {
+ option (versionpb.etcd_version_msg) = "3.0";
// memberID is the ID of the member associated with the raised alarm.
uint64 memberID = 1;
// alarm is the type of alarm which has been raised.
@@ -983,13 +1124,19 @@ message AlarmMember {
}
message AlarmResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// alarms is a list of alarms associated with the alarm request.
repeated AlarmMember alarms = 2;
}
message DowngradeRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
enum DowngradeAction {
+ option (versionpb.etcd_version_enum) = "3.5";
+
VALIDATE = 0;
ENABLE = 1;
CANCEL = 2;
@@ -1004,15 +1151,20 @@ message DowngradeRequest {
}
message DowngradeResponse {
+ option (versionpb.etcd_version_msg) = "3.5";
+
ResponseHeader header = 1;
// version is the current cluster version.
string version = 2;
}
message StatusRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
}
message StatusResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// version is the cluster protocol version used by the responding member.
string version = 2;
@@ -1025,55 +1177,73 @@ message StatusResponse {
// raftTerm is the current raft term of the responding member.
uint64 raftTerm = 6;
// raftAppliedIndex is the current raft applied index of the responding member.
- uint64 raftAppliedIndex = 7;
+ uint64 raftAppliedIndex = 7 [(versionpb.etcd_version_field)="3.4"];
// errors contains alarm/health information and status.
- repeated string errors = 8;
+ repeated string errors = 8 [(versionpb.etcd_version_field)="3.4"];
// dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
- int64 dbSizeInUse = 9;
+ int64 dbSizeInUse = 9 [(versionpb.etcd_version_field)="3.4"];
// isLearner indicates if the member is raft learner.
- bool isLearner = 10;
+ bool isLearner = 10 [(versionpb.etcd_version_field)="3.4"];
+ // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version.
+ string storageVersion = 11 [(versionpb.etcd_version_field)="3.6"];
+ // dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes)
+ int64 dbSizeQuota = 12 [(versionpb.etcd_version_field)="3.6"];
}
message AuthEnableRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
}
message AuthDisableRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
}
message AuthStatusRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
}
message AuthenticateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
string name = 1;
string password = 2;
}
message AuthUserAddRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
string name = 1;
string password = 2;
- authpb.UserAddOptions options = 3;
- string hashedPassword = 4;
+ authpb.UserAddOptions options = 3 [(versionpb.etcd_version_field)="3.4"];
+ string hashedPassword = 4 [(versionpb.etcd_version_field)="3.5"];
}
message AuthUserGetRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
string name = 1;
}
message AuthUserDeleteRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
// name is the name of the user to delete.
string name = 1;
}
message AuthUserChangePasswordRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// name is the name of the user whose password is being changed.
string name = 1;
// password is the new password for the user. Note that this field will be removed in the API layer.
string password = 2;
// hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.
- string hashedPassword = 3;
+ string hashedPassword = 3 [(versionpb.etcd_version_field)="3.5"];
}
message AuthUserGrantRoleRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// user is the name of the user which should be granted a given role.
string user = 1;
// role is the name of the role to grant to the user.
@@ -1081,30 +1251,42 @@ message AuthUserGrantRoleRequest {
}
message AuthUserRevokeRoleRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
string name = 1;
string role = 2;
}
message AuthRoleAddRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// name is the name of the role to add to the authentication system.
string name = 1;
}
message AuthRoleGetRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
string role = 1;
}
message AuthUserListRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
}
message AuthRoleListRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
}
message AuthRoleDeleteRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
string role = 1;
}
message AuthRoleGrantPermissionRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
// name is the name of the role which will be granted the permission.
string name = 1;
// perm is the permission to grant to the role.
@@ -1112,20 +1294,28 @@ message AuthRoleGrantPermissionRequest {
}
message AuthRoleRevokePermissionRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
string role = 1;
bytes key = 2;
bytes range_end = 3;
}
message AuthEnableResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthDisableResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthStatusResponse {
+ option (versionpb.etcd_version_msg) = "3.5";
+
ResponseHeader header = 1;
bool enabled = 2;
// authRevision is the current revision of auth store
@@ -1133,67 +1323,93 @@ message AuthStatusResponse {
}
message AuthenticateResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
// token is an authorized token that can be used in succeeding RPCs
string token = 2;
}
message AuthUserAddResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthUserGetResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
repeated string roles = 2;
}
message AuthUserDeleteResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthUserChangePasswordResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthUserGrantRoleResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthUserRevokeRoleResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthRoleAddResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthRoleGetResponse {
- ResponseHeader header = 1;
+ ResponseHeader header = 1 [(versionpb.etcd_version_field)="3.0"];
- repeated authpb.Permission perm = 2;
+ repeated authpb.Permission perm = 2 [(versionpb.etcd_version_field)="3.0"];
}
message AuthRoleListResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
repeated string roles = 2;
}
message AuthUserListResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
repeated string users = 2;
}
message AuthRoleDeleteResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthRoleGrantPermissionResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
message AuthRoleRevokePermissionResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
ResponseHeader header = 1;
}
diff --git a/api/go.mod b/api/go.mod
index b14057b4419..54b3847da09 100644
--- a/api/go.mod
+++ b/api/go.mod
@@ -1,14 +1,31 @@
module go.etcd.io/etcd/api/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
require (
- github.com/coreos/go-semver v0.3.0
+ github.com/coreos/go-semver v0.3.1
github.com/gogo/protobuf v1.3.2
- github.com/golang/protobuf v1.5.2
- github.com/grpc-ecosystem/grpc-gateway v1.16.0
- google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
- google.golang.org/grpc v1.37.0
+ github.com/golang/protobuf v1.5.4
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1
+ github.com/stretchr/testify v1.10.0
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb
+ google.golang.org/grpc v1.69.2
+ google.golang.org/protobuf v1.36.1
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rogpeppe/go-internal v1.13.1 // indirect
+ go.opentelemetry.io/otel v1.33.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.33.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
// Bad imports are sometimes causing attempts to pull that code.
diff --git a/api/go.sum b/api/go.sum
index 52d9b2c3ed3..a976d6e014e 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -1,135 +1,88 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/api/membershippb/membership.pb.go b/api/membershippb/membership.pb.go
index cf0d4281806..85a2a741f31 100644
--- a/api/membershippb/membership.pb.go
+++ b/api/membershippb/membership.pb.go
@@ -11,6 +11,7 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
+ _ "go.etcd.io/etcd/api/v3/versionpb"
)
// Reference imports to suppress errors if they are not otherwise used.
@@ -286,30 +287,34 @@ func init() {
func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) }
var fileDescriptor_949fe0d019050ef5 = []byte{
- // 367 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x4e, 0xf2, 0x40,
- 0x14, 0x85, 0x99, 0x42, 0xf8, 0xdb, 0xcb, 0x1f, 0xc4, 0x09, 0x89, 0x8d, 0x68, 0x25, 0x5d, 0xb1,
- 0x30, 0x98, 0xe8, 0x13, 0xa0, 0xb0, 0x20, 0x81, 0xcd, 0x18, 0xdd, 0x92, 0x56, 0x2e, 0xd8, 0xa4,
- 0x74, 0xea, 0xcc, 0x54, 0xd7, 0xbe, 0x85, 0x4f, 0xe0, 0xb3, 0xb0, 0xf4, 0x11, 0x14, 0x5f, 0xc4,
- 0x74, 0x5a, 0x4a, 0x49, 0xdc, 0xb8, 0xbb, 0x3d, 0xbd, 0xf7, 0x9c, 0xf3, 0x35, 0x85, 0xd6, 0x0a,
- 0x57, 0x3e, 0x0a, 0xf9, 0x18, 0xc4, 0xfd, 0x58, 0x70, 0xc5, 0xe9, 0xff, 0x9d, 0x12, 0xfb, 0xc7,
- 0xed, 0x25, 0x5f, 0x72, 0xfd, 0xe2, 0x22, 0x9d, 0xb2, 0x1d, 0x77, 0x02, 0x4d, 0xe6, 0x2d, 0xd4,
- 0x40, 0x29, 0x11, 0xf8, 0x89, 0x42, 0x49, 0x3b, 0x60, 0xc5, 0x88, 0x62, 0x96, 0x88, 0x50, 0xda,
- 0xa4, 0x5b, 0xed, 0x59, 0xcc, 0x4c, 0x85, 0x3b, 0x11, 0x4a, 0x7a, 0x0a, 0x10, 0xc8, 0x59, 0x88,
- 0x9e, 0x88, 0x50, 0xd8, 0x46, 0x97, 0xf4, 0x4c, 0x66, 0x05, 0x72, 0x92, 0x09, 0xee, 0x00, 0xa0,
- 0xe4, 0x44, 0xa1, 0x16, 0x79, 0x2b, 0xb4, 0x49, 0x97, 0xf4, 0x2c, 0xa6, 0x67, 0x7a, 0x06, 0x8d,
- 0x87, 0x30, 0xc0, 0x48, 0x65, 0xfe, 0x86, 0xf6, 0x87, 0x4c, 0x4a, 0x13, 0xdc, 0x77, 0x02, 0xf5,
- 0xa9, 0xee, 0x4d, 0x9b, 0x60, 0x8c, 0x87, 0xfa, 0xba, 0xc6, 0x8c, 0xf1, 0x90, 0x8e, 0xe0, 0x40,
- 0x78, 0x0b, 0x35, 0xf3, 0x8a, 0x08, 0xdd, 0xa0, 0x71, 0x79, 0xd2, 0x2f, 0x93, 0xf6, 0xf7, 0x81,
- 0x58, 0x53, 0xec, 0x03, 0x8e, 0xe0, 0x30, 0x5b, 0x2f, 0x1b, 0x55, 0xb5, 0x91, 0xbd, 0x6f, 0x54,
- 0x32, 0xc9, 0xbf, 0xee, 0x4e, 0x71, 0xcf, 0xc1, 0xbe, 0x09, 0x13, 0xa9, 0x50, 0xdc, 0xa3, 0x90,
- 0x01, 0x8f, 0x6e, 0x51, 0x31, 0x7c, 0x4a, 0x50, 0x2a, 0xda, 0x82, 0xea, 0x33, 0x8a, 0x1c, 0x3c,
- 0x1d, 0xdd, 0x57, 0x02, 0x9d, 0x7c, 0x7d, 0x5a, 0x38, 0x95, 0x2e, 0x3a, 0x60, 0xe5, 0xa5, 0x0a,
- 0x64, 0x33, 0x13, 0x34, 0xf8, 0x2f, 0x8d, 0x8d, 0x3f, 0x37, 0x1e, 0xc1, 0xd1, 0x90, 0xbf, 0x44,
- 0x4b, 0xe1, 0xcd, 0x71, 0x1c, 0x2d, 0x78, 0x29, 0xde, 0x86, 0x7f, 0x18, 0x79, 0x7e, 0x88, 0x73,
- 0x1d, 0x6e, 0xb2, 0xed, 0xe3, 0x16, 0xc5, 0x28, 0x50, 0xae, 0xdb, 0xeb, 0x2f, 0xa7, 0xb2, 0xde,
- 0x38, 0xe4, 0x63, 0xe3, 0x90, 0xcf, 0x8d, 0x43, 0xde, 0xbe, 0x9d, 0x8a, 0x5f, 0xd7, 0xff, 0xd3,
- 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x7d, 0x0b, 0x87, 0x02, 0x00, 0x00,
+ // 422 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
+ 0x10, 0xed, 0x3a, 0x55, 0x6b, 0x4f, 0x51, 0x28, 0x2b, 0x24, 0xac, 0x06, 0x8c, 0x55, 0x2e, 0x39,
+ 0xd9, 0x12, 0x51, 0x0f, 0x70, 0x03, 0xd2, 0x43, 0x10, 0xe5, 0xb0, 0xa8, 0x1c, 0xb8, 0x44, 0xeb,
+ 0x66, 0x62, 0x56, 0x72, 0xbc, 0x66, 0x77, 0x5d, 0xee, 0x1c, 0xf9, 0x02, 0xfe, 0x82, 0x13, 0xff,
+ 0xd0, 0x23, 0x9f, 0x00, 0xe1, 0x47, 0x90, 0x77, 0x9d, 0xd8, 0x11, 0x9c, 0x7a, 0x1b, 0x3f, 0xcf,
+ 0xbc, 0x79, 0xef, 0xed, 0xc0, 0xf1, 0x0a, 0x57, 0x19, 0x2a, 0xfd, 0x51, 0x54, 0x49, 0xa5, 0xa4,
+ 0x91, 0xf4, 0x4e, 0x87, 0x54, 0xd9, 0xc9, 0xfd, 0x5c, 0xe6, 0xd2, 0xfe, 0x48, 0x9b, 0xca, 0xf5,
+ 0x9c, 0xc4, 0x68, 0xae, 0x16, 0x29, 0xaf, 0x44, 0x7a, 0x8d, 0x4a, 0x0b, 0x59, 0x56, 0xd9, 0xa6,
+ 0x72, 0x1d, 0xa7, 0x97, 0x30, 0x64, 0x7c, 0x69, 0x5e, 0x18, 0xa3, 0x44, 0x56, 0x1b, 0xd4, 0x74,
+ 0x04, 0x41, 0x85, 0xa8, 0xe6, 0xb5, 0x2a, 0x74, 0x48, 0xe2, 0xc1, 0x38, 0x60, 0x7e, 0x03, 0x5c,
+ 0xaa, 0x42, 0xd3, 0x47, 0x00, 0x42, 0xcf, 0x0b, 0xe4, 0xaa, 0x44, 0x15, 0x7a, 0x31, 0x19, 0xfb,
+ 0x2c, 0x10, 0xfa, 0x8d, 0x03, 0x9e, 0x1f, 0x7e, 0xf9, 0x11, 0x0e, 0x26, 0xc9, 0xd9, 0xe9, 0x6b,
+ 0x80, 0x1e, 0x25, 0x85, 0xfd, 0x92, 0xaf, 0x30, 0x24, 0x31, 0x19, 0x07, 0xcc, 0xd6, 0xf4, 0x31,
+ 0x1c, 0x5d, 0x15, 0x02, 0x4b, 0xe3, 0x16, 0x79, 0x76, 0x11, 0x38, 0xa8, 0x59, 0xd5, 0x71, 0x7d,
+ 0x27, 0x70, 0x70, 0x61, 0xbd, 0xd2, 0x21, 0x78, 0xb3, 0xa9, 0xa5, 0xd9, 0x67, 0xde, 0x6c, 0x4a,
+ 0xcf, 0xe1, 0xae, 0xe2, 0x4b, 0x33, 0xe7, 0xdb, 0x5d, 0x56, 0xd3, 0xd1, 0xd3, 0x87, 0x49, 0x3f,
+ 0x9d, 0x64, 0xd7, 0x22, 0x1b, 0xaa, 0x5d, 0xcb, 0xe7, 0x70, 0xcf, 0xb5, 0xf7, 0x89, 0x06, 0x96,
+ 0x28, 0xdc, 0x25, 0xea, 0x91, 0xb4, 0x2f, 0xd2, 0x21, 0x9d, 0xe2, 0x33, 0x08, 0x5f, 0x15, 0xb5,
+ 0x36, 0xa8, 0xde, 0xbb, 0xb0, 0xdf, 0xa1, 0x61, 0xf8, 0xa9, 0x46, 0x6d, 0xe8, 0x31, 0x0c, 0xae,
+ 0x51, 0xb5, 0x51, 0x34, 0x65, 0x37, 0xf6, 0x95, 0xc0, 0xa8, 0x9d, 0xbb, 0xd8, 0x72, 0xf7, 0x46,
+ 0x47, 0x10, 0xb4, 0x32, 0xb7, 0x21, 0xf8, 0x0e, 0xb0, 0x51, 0xfc, 0xc7, 0x83, 0x77, 0x7b, 0x0f,
+ 0x6f, 0xe1, 0xc1, 0x54, 0x7e, 0x2e, 0x73, 0xc5, 0x17, 0x38, 0x2b, 0x97, 0xb2, 0xa7, 0x23, 0x84,
+ 0x43, 0x2c, 0x79, 0x56, 0xe0, 0xc2, 0xaa, 0xf0, 0xd9, 0xe6, 0x73, 0x63, 0xce, 0xfb, 0xd7, 0xdc,
+ 0xcb, 0x67, 0x37, 0xbf, 0xa3, 0xbd, 0x9b, 0x75, 0x44, 0x7e, 0xae, 0x23, 0xf2, 0x6b, 0x1d, 0x91,
+ 0x6f, 0x7f, 0xa2, 0xbd, 0x0f, 0x4f, 0x72, 0x99, 0x34, 0x37, 0x9a, 0x08, 0x99, 0x76, 0xb7, 0x3a,
+ 0x49, 0xfb, 0x82, 0xb3, 0x03, 0x7b, 0xaa, 0x93, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x56,
+ 0x21, 0x97, 0x04, 0x03, 0x00, 0x00,
}
func (m *RaftAttributes) Marshal() (dAtA []byte, err error) {
diff --git a/api/membershippb/membership.proto b/api/membershippb/membership.proto
index e63e9ecc994..cbccfefccf4 100644
--- a/api/membershippb/membership.proto
+++ b/api/membershippb/membership.proto
@@ -2,6 +2,9 @@ syntax = "proto3";
package membershippb;
import "gogoproto/gogo.proto";
+import "etcd/api/versionpb/version.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/membershippb";
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
@@ -10,6 +13,8 @@ option (gogoproto.goproto_getters_all) = false;
// RaftAttributes represents the raft related attributes of an etcd member.
message RaftAttributes {
+ option (versionpb.etcd_version_msg) = "3.5";
+
// peerURLs is the list of peers in the raft cluster.
repeated string peer_urls = 1;
// isLearner indicates if the member is raft learner.
@@ -18,26 +23,36 @@ message RaftAttributes {
// Attributes represents all the non-raft related attributes of an etcd member.
message Attributes {
+ option (versionpb.etcd_version_msg) = "3.5";
+
string name = 1;
repeated string client_urls = 2;
}
message Member {
+ option (versionpb.etcd_version_msg) = "3.5";
+
uint64 ID = 1;
RaftAttributes raft_attributes = 2;
Attributes member_attributes = 3;
}
message ClusterVersionSetRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
string ver = 1;
}
message ClusterMemberAttrSetRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
uint64 member_ID = 1;
Attributes member_attributes = 2;
}
message DowngradeInfoSetRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
bool enabled = 1;
string ver = 2;
}
\ No newline at end of file
diff --git a/api/mvccpb/kv.pb.go b/api/mvccpb/kv.pb.go
index fc258d6c206..2fed4242ccc 100644
--- a/api/mvccpb/kv.pb.go
+++ b/api/mvccpb/kv.pb.go
@@ -164,26 +164,28 @@ func init() {
func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
var fileDescriptor_2216fe83c9c12408 = []byte{
- // 303 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
- 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
- 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
- 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
- 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
- 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
- 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
- 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
- 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
- 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
- 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
- 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
- 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
- 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
- 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
- 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
- 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
- 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
- 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
+ // 327 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
+ 0x10, 0xc6, 0xb3, 0x46, 0xa3, 0xff, 0x51, 0xfc, 0x87, 0x45, 0x68, 0x28, 0x34, 0xa4, 0x5e, 0x6a,
+ 0x29, 0x24, 0xa0, 0x87, 0xde, 0x4b, 0x73, 0xb2, 0x87, 0x12, 0x6c, 0x0f, 0xbd, 0x48, 0x8c, 0x83,
+ 0x84, 0xa8, 0x1b, 0x62, 0xba, 0x90, 0x37, 0xe9, 0xbd, 0xf7, 0x3e, 0x87, 0x47, 0x1f, 0xa1, 0xda,
+ 0x17, 0x29, 0x3b, 0x5b, 0xed, 0xa5, 0x97, 0xdd, 0x99, 0xef, 0xfb, 0xb1, 0xf3, 0x0d, 0x0b, 0xad,
+ 0x4c, 0xfa, 0x79, 0x21, 0x4a, 0xc1, 0xad, 0x95, 0x4c, 0x92, 0x7c, 0x76, 0xde, 0x5b, 0x88, 0x85,
+ 0x20, 0x29, 0x50, 0x95, 0x76, 0xfb, 0x1f, 0x0c, 0x5a, 0x63, 0xac, 0x9e, 0xe3, 0xe5, 0x2b, 0x72,
+ 0x1b, 0xcc, 0x0c, 0x2b, 0x87, 0x79, 0x6c, 0xd0, 0x89, 0x54, 0xc9, 0xaf, 0xe0, 0x7f, 0x52, 0x60,
+ 0x5c, 0xe2, 0xb4, 0x40, 0x99, 0x6e, 0x52, 0xb1, 0x76, 0x6a, 0x1e, 0x1b, 0x98, 0x51, 0x57, 0xcb,
+ 0xd1, 0x8f, 0xca, 0x2f, 0xa1, 0xb3, 0x12, 0xf3, 0x5f, 0xca, 0x24, 0xaa, 0xbd, 0x12, 0xf3, 0x13,
+ 0xe2, 0x40, 0x53, 0x62, 0x41, 0x6e, 0x9d, 0xdc, 0x63, 0xcb, 0x7b, 0xd0, 0x90, 0x2a, 0x80, 0xd3,
+ 0xa0, 0xc9, 0xba, 0x51, 0xea, 0x12, 0xe3, 0x0d, 0x3a, 0x16, 0xd1, 0xba, 0xe9, 0xbf, 0x33, 0x68,
+ 0x84, 0x12, 0xd7, 0x25, 0xbf, 0x81, 0x7a, 0x59, 0xe5, 0x48, 0x71, 0xbb, 0xc3, 0x33, 0x5f, 0xef,
+ 0xe9, 0x93, 0xa9, 0xcf, 0x49, 0x95, 0x63, 0x44, 0x10, 0xf7, 0xa0, 0x96, 0x49, 0xca, 0xde, 0x1e,
+ 0xda, 0x47, 0xf4, 0xb8, 0x78, 0x54, 0xcb, 0x24, 0xbf, 0x86, 0x66, 0x5e, 0xa0, 0x9c, 0x66, 0x92,
+ 0xc2, 0xff, 0x85, 0x59, 0x0a, 0x18, 0xcb, 0xbe, 0x07, 0xff, 0x4e, 0xef, 0xf3, 0x26, 0x98, 0x8f,
+ 0x4f, 0x13, 0xdb, 0xe0, 0x00, 0xd6, 0x7d, 0xf8, 0x10, 0x4e, 0x42, 0x9b, 0xdd, 0xdd, 0x6e, 0xf7,
+ 0xae, 0xb1, 0xdb, 0xbb, 0xc6, 0xf6, 0xe0, 0xb2, 0xdd, 0xc1, 0x65, 0x9f, 0x07, 0x97, 0xbd, 0x7d,
+ 0xb9, 0xc6, 0xcb, 0xc5, 0x42, 0xf8, 0x58, 0x26, 0x73, 0x3f, 0x15, 0x81, 0xba, 0x83, 0x38, 0x4f,
+ 0x03, 0x39, 0x0a, 0xf4, 0xac, 0x99, 0x45, 0xdf, 0x32, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x78,
+ 0x06, 0x46, 0xf5, 0xc0, 0x01, 0x00, 0x00,
}
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
diff --git a/api/mvccpb/kv.proto b/api/mvccpb/kv.proto
index 23c911b7da8..a93479c69f7 100644
--- a/api/mvccpb/kv.proto
+++ b/api/mvccpb/kv.proto
@@ -3,6 +3,8 @@ package mvccpb;
import "gogoproto/gogo.proto";
+option go_package = "go.etcd.io/etcd/api/v3/mvccpb";
+
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
diff --git a/api/v3rpc/rpctypes/error.go b/api/v3rpc/rpctypes/error.go
index c03e91cfbc8..28f8d7e435f 100644
--- a/api/v3rpc/rpctypes/error.go
+++ b/api/v3rpc/rpctypes/error.go
@@ -21,71 +21,81 @@ import (
// server-side error
var (
- ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
- ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
- ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
- ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
- ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
- ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
- ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
- ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
- ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
+ ErrGRPCEmptyKey = status.Error(codes.InvalidArgument, "etcdserver: key is not provided")
+ ErrGRPCKeyNotFound = status.Error(codes.InvalidArgument, "etcdserver: key not found")
+ ErrGRPCValueProvided = status.Error(codes.InvalidArgument, "etcdserver: value is provided")
+ ErrGRPCLeaseProvided = status.Error(codes.InvalidArgument, "etcdserver: lease is provided")
+ ErrGRPCTooManyOps = status.Error(codes.InvalidArgument, "etcdserver: too many operations in txn request")
+ ErrGRPCDuplicateKey = status.Error(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
+ ErrGRPCInvalidClientAPIVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid client api version")
+ ErrGRPCInvalidSortOption = status.Error(codes.InvalidArgument, "etcdserver: invalid sort option")
+ ErrGRPCCompacted = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
+ ErrGRPCFutureRev = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
+ ErrGRPCNoSpace = status.Error(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
- ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
- ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
- ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
+ ErrGRPCLeaseNotFound = status.Error(codes.NotFound, "etcdserver: requested lease not found")
+ ErrGRPCLeaseExist = status.Error(codes.FailedPrecondition, "etcdserver: lease already exists")
+ ErrGRPCLeaseTTLTooLarge = status.Error(codes.OutOfRange, "etcdserver: too large lease TTL")
- ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err()
+ ErrGRPCWatchCanceled = status.Error(codes.Canceled, "etcdserver: watch canceled")
- ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
- ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
- ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
- ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
- ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err()
- ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err()
- ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err()
- ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err()
+ ErrGRPCMemberExist = status.Error(codes.FailedPrecondition, "etcdserver: member ID already exist")
+ ErrGRPCPeerURLExist = status.Error(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
+ ErrGRPCMemberNotEnoughStarted = status.Error(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
+ ErrGRPCMemberBadURLs = status.Error(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
+ ErrGRPCMemberNotFound = status.Error(codes.NotFound, "etcdserver: member not found")
+ ErrGRPCMemberNotLearner = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member")
+ ErrGRPCLearnerNotReady = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader")
+ ErrGRPCTooManyLearners = status.Error(codes.FailedPrecondition, "etcdserver: too many learner members in cluster")
+ ErrGRPCClusterIDMismatch = status.Error(codes.FailedPrecondition, "etcdserver: cluster ID mismatch")
+ //revive:disable:var-naming
+ // Deprecated: Please use ErrGRPCClusterIDMismatch.
+ ErrGRPCClusterIdMismatch = ErrGRPCClusterIDMismatch
+ //revive:enable:var-naming
- ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
- ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
+ ErrGRPCRequestTooLarge = status.Error(codes.InvalidArgument, "etcdserver: request is too large")
+ ErrGRPCRequestTooManyRequests = status.Error(codes.ResourceExhausted, "etcdserver: too many requests")
- ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
- ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
- ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
- ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
- ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
- ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
- ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
- ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err()
- ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
- ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
- ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
- ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
- ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
- ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
- ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
+ ErrGRPCRootUserNotExist = status.Error(codes.FailedPrecondition, "etcdserver: root user does not exist")
+ ErrGRPCRootRoleNotExist = status.Error(codes.FailedPrecondition, "etcdserver: root user does not have root role")
+ ErrGRPCUserAlreadyExist = status.Error(codes.FailedPrecondition, "etcdserver: user name already exists")
+ ErrGRPCUserEmpty = status.Error(codes.InvalidArgument, "etcdserver: user name is empty")
+ ErrGRPCUserNotFound = status.Error(codes.FailedPrecondition, "etcdserver: user name not found")
+ ErrGRPCRoleAlreadyExist = status.Error(codes.FailedPrecondition, "etcdserver: role name already exists")
+ ErrGRPCRoleNotFound = status.Error(codes.FailedPrecondition, "etcdserver: role name not found")
+ ErrGRPCRoleEmpty = status.Error(codes.InvalidArgument, "etcdserver: role name is empty")
+ ErrGRPCAuthFailed = status.Error(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
+ ErrGRPCPermissionNotGiven = status.Error(codes.InvalidArgument, "etcdserver: permission not given")
+ ErrGRPCPermissionDenied = status.Error(codes.PermissionDenied, "etcdserver: permission denied")
+ ErrGRPCRoleNotGranted = status.Error(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
+ ErrGRPCPermissionNotGranted = status.Error(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
+ ErrGRPCAuthNotEnabled = status.Error(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
+ ErrGRPCInvalidAuthToken = status.Error(codes.Unauthenticated, "etcdserver: invalid auth token")
+ ErrGRPCInvalidAuthMgmt = status.Error(codes.InvalidArgument, "etcdserver: invalid auth management")
+ ErrGRPCAuthOldRevision = status.Error(codes.InvalidArgument, "etcdserver: revision of auth store is old")
- ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
- ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
- ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
- ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
- ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
- ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
- ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
- ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
- ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
- ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
- ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err()
- ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err()
+ ErrGRPCNoLeader = status.Error(codes.Unavailable, "etcdserver: no leader")
+ ErrGRPCNotLeader = status.Error(codes.FailedPrecondition, "etcdserver: not leader")
+ ErrGRPCLeaderChanged = status.Error(codes.Unavailable, "etcdserver: leader changed")
+ ErrGRPCNotCapable = status.Error(codes.FailedPrecondition, "etcdserver: not capable")
+ ErrGRPCStopped = status.Error(codes.Unavailable, "etcdserver: server stopped")
+ ErrGRPCTimeout = status.Error(codes.Unavailable, "etcdserver: request timed out")
+ ErrGRPCTimeoutDueToLeaderFail = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure")
+ ErrGRPCTimeoutDueToConnectionLost = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost")
+ ErrGRPCTimeoutWaitAppliedIndex = status.Error(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long")
+ ErrGRPCUnhealthy = status.Error(codes.Unavailable, "etcdserver: unhealthy cluster")
+ ErrGRPCCorrupt = status.Error(codes.DataLoss, "etcdserver: corrupt cluster")
+ ErrGRPCNotSupportedForLearner = status.Error(codes.FailedPrecondition, "etcdserver: rpc not supported for learner")
+ ErrGRPCBadLeaderTransferee = status.Error(codes.FailedPrecondition, "etcdserver: bad leader transferee")
- ErrGRPCClusterVersionUnavailable = status.New(codes.Unavailable, "etcdserver: cluster version not found during downgrade").Err()
- ErrGRPCWrongDowngradeVersionFormat = status.New(codes.InvalidArgument, "etcdserver: wrong downgrade target version format").Err()
- ErrGRPCInvalidDowngradeTargetVersion = status.New(codes.InvalidArgument, "etcdserver: invalid downgrade target version").Err()
- ErrGRPCDowngradeInProcess = status.New(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress").Err()
- ErrGRPCNoInflightDowngrade = status.New(codes.FailedPrecondition, "etcdserver: no inflight downgrade job").Err()
+ ErrGRPCWrongDowngradeVersionFormat = status.Error(codes.InvalidArgument, "etcdserver: wrong downgrade target version format")
+ ErrGRPCInvalidDowngradeTargetVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid downgrade target version")
+ ErrGRPCClusterVersionUnavailable = status.Error(codes.FailedPrecondition, "etcdserver: cluster version not found during downgrade")
+ ErrGRPCDowngradeInProcess = status.Error(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress")
+ ErrGRPCNoInflightDowngrade = status.Error(codes.FailedPrecondition, "etcdserver: no inflight downgrade job")
- ErrGRPCCanceled = status.New(codes.Canceled, "etcdserver: request canceled").Err()
- ErrGRPCDeadlineExceeded = status.New(codes.DeadlineExceeded, "etcdserver: context deadline exceeded").Err()
+ ErrGRPCCanceled = status.Error(codes.Canceled, "etcdserver: request canceled")
+ ErrGRPCDeadlineExceeded = status.Error(codes.DeadlineExceeded, "etcdserver: context deadline exceeded")
errStringToError = map[string]error{
ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
@@ -93,11 +103,12 @@ var (
ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
- ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
- ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
- ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
- ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
- ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
+ ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
+ ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
+ ErrorDesc(ErrGRPCInvalidSortOption): ErrGRPCInvalidSortOption,
+ ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
+ ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
+ ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
@@ -111,6 +122,7 @@ var (
ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner,
ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady,
ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners,
+ ErrorDesc(ErrGRPCClusterIDMismatch): ErrGRPCClusterIDMismatch,
ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
@@ -130,6 +142,7 @@ var (
ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
+ ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision,
ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
@@ -141,7 +154,7 @@ var (
ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
- ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner,
+ ErrorDesc(ErrGRPCNotSupportedForLearner): ErrGRPCNotSupportedForLearner,
ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee,
ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable,
@@ -154,15 +167,16 @@ var (
// client-side error
var (
- ErrEmptyKey = Error(ErrGRPCEmptyKey)
- ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
- ErrValueProvided = Error(ErrGRPCValueProvided)
- ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
- ErrTooManyOps = Error(ErrGRPCTooManyOps)
- ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
- ErrCompacted = Error(ErrGRPCCompacted)
- ErrFutureRev = Error(ErrGRPCFutureRev)
- ErrNoSpace = Error(ErrGRPCNoSpace)
+ ErrEmptyKey = Error(ErrGRPCEmptyKey)
+ ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
+ ErrValueProvided = Error(ErrGRPCValueProvided)
+ ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
+ ErrTooManyOps = Error(ErrGRPCTooManyOps)
+ ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
+ ErrInvalidSortOption = Error(ErrGRPCInvalidSortOption)
+ ErrCompacted = Error(ErrGRPCCompacted)
+ ErrFutureRev = Error(ErrGRPCFutureRev)
+ ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist)
@@ -194,7 +208,13 @@ var (
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
+ ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision)
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
+ ErrClusterIDMismatch = Error(ErrGRPCClusterIDMismatch)
+ //revive:disable:var-naming
+ // Deprecated: Please use ErrGRPCClusterIDMismatch.
+ ErrClusterIdMismatch = ErrClusterIDMismatch
+ //revive:enable:var-naming
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotLeader = Error(ErrGRPCNotLeader)
@@ -204,6 +224,7 @@ var (
ErrTimeout = Error(ErrGRPCTimeout)
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
+ ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex)
ErrUnhealthy = Error(ErrGRPCUnhealthy)
ErrCorrupt = Error(ErrGRPCCorrupt)
ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee)
diff --git a/api/v3rpc/rpctypes/error_test.go b/api/v3rpc/rpctypes/error_test.go
index 525d9698311..494385ce8d2 100644
--- a/api/v3rpc/rpctypes/error_test.go
+++ b/api/v3rpc/rpctypes/error_test.go
@@ -15,28 +15,27 @@
package rpctypes
import (
+ "errors"
"testing"
+ "github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestConvert(t *testing.T) {
- e1 := status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
+ e1 := status.Error(codes.InvalidArgument, "etcdserver: key is not provided")
e2 := ErrGRPCEmptyKey
- e3 := ErrEmptyKey
+ var e3 EtcdError
+ errors.As(ErrEmptyKey, &e3)
- if e1.Error() != e2.Error() {
- t.Fatalf("expected %q == %q", e1.Error(), e2.Error())
- }
- if ev1, ok := status.FromError(e1); ok && ev1.Code() != e3.(EtcdError).Code() {
- t.Fatalf("expected them to be equal, got %v / %v", ev1.Code(), e3.(EtcdError).Code())
+ require.Equal(t, e1.Error(), e2.Error())
+ if ev1, ok := status.FromError(e1); ok {
+ require.Equal(t, ev1.Code(), e3.Code())
}
- if e1.Error() == e3.Error() {
- t.Fatalf("expected %q != %q", e1.Error(), e3.Error())
- }
- if ev2, ok := status.FromError(e2); ok && ev2.Code() != e3.(EtcdError).Code() {
- t.Fatalf("expected them to be equal, got %v / %v", ev2.Code(), e3.(EtcdError).Code())
+ require.NotEqual(t, e1.Error(), e3.Error())
+ if ev2, ok := status.FromError(e2); ok {
+ require.Equal(t, ev2.Code(), e3.Code())
}
}
diff --git a/api/v3rpc/rpctypes/metadatafields.go b/api/v3rpc/rpctypes/metadatafields.go
index 8f8ac60ff22..e5770afb2e8 100644
--- a/api/v3rpc/rpctypes/metadatafields.go
+++ b/api/v3rpc/rpctypes/metadatafields.go
@@ -18,3 +18,6 @@ var (
TokenFieldNameGRPC = "token"
TokenFieldNameSwagger = "authorization"
)
+
+// TokenFieldNameGRPCKey is used as a key of context to store token.
+type TokenFieldNameGRPCKey struct{}
diff --git a/api/version/version.go b/api/version/version.go
index af132f821d5..ef3c09e7c11 100644
--- a/api/version/version.go
+++ b/api/version/version.go
@@ -26,13 +26,29 @@ import (
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
- Version = "3.5.0-alpha.0"
+ Version = "3.6.0-alpha.0"
APIVersion = "unknown"
// Git SHA Value will be set during build
GitSHA = "Not provided (use ./build instead of go build)"
)
+// Get all constant versions defined in a centralized place.
+var (
+ V3_0 = semver.Version{Major: 3, Minor: 0}
+ V3_1 = semver.Version{Major: 3, Minor: 1}
+ V3_2 = semver.Version{Major: 3, Minor: 2}
+ V3_3 = semver.Version{Major: 3, Minor: 3}
+ V3_4 = semver.Version{Major: 3, Minor: 4}
+ V3_5 = semver.Version{Major: 3, Minor: 5}
+ V3_6 = semver.Version{Major: 3, Minor: 6}
+ V3_7 = semver.Version{Major: 3, Minor: 7}
+ V4_0 = semver.Version{Major: 4, Minor: 0}
+
+ // AllVersions keeps all the versions in ascending order.
+ AllVersions = []semver.Version{V3_0, V3_1, V3_2, V3_3, V3_4, V3_5, V3_6, V3_7, V4_0}
+)
+
func init() {
ver, err := semver.NewVersion(Version)
if err == nil {
@@ -43,6 +59,7 @@ func init() {
type Versions struct {
Server string `json:"etcdserver"`
Cluster string `json:"etcdcluster"`
+ Storage string `json:"storage"`
// TODO: raft state machine version
}
@@ -54,3 +71,15 @@ func Cluster(v string) string {
}
return fmt.Sprintf("%s.%s", vs[0], vs[1])
}
+
+func Compare(ver1, ver2 semver.Version) int {
+ return ver1.Compare(ver2)
+}
+
+func LessThan(ver1, ver2 semver.Version) bool {
+ return ver1.LessThan(ver2)
+}
+
+func Equal(ver1, ver2 semver.Version) bool {
+ return ver1.Equal(ver2)
+}
diff --git a/api/version/version_test.go b/api/version/version_test.go
new file mode 100644
index 00000000000..532e7525a21
--- /dev/null
+++ b/api/version/version_test.go
@@ -0,0 +1,85 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "testing"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestVersionCompare(t *testing.T) {
+ cases := []struct {
+ name string
+ ver1 semver.Version
+ ver2 semver.Version
+ expectedCompareResult int
+ expectedLessThanResult bool
+ expectedEqualResult bool
+ }{
+ {
+ name: "ver1 should be great than ver2",
+ ver1: V3_5,
+ ver2: V3_4,
+ expectedCompareResult: 1,
+ expectedLessThanResult: false,
+ expectedEqualResult: false,
+ },
+ {
+ name: "ver1(4.0) should be great than ver2",
+ ver1: V4_0,
+ ver2: V3_7,
+ expectedCompareResult: 1,
+ expectedLessThanResult: false,
+ expectedEqualResult: false,
+ },
+ {
+ name: "ver1 should be less than ver2",
+ ver1: V3_5,
+ ver2: V3_6,
+ expectedCompareResult: -1,
+ expectedLessThanResult: true,
+ expectedEqualResult: false,
+ },
+ {
+ name: "ver1 should be less than ver2 (4.0)",
+ ver1: V3_5,
+ ver2: V4_0,
+ expectedCompareResult: -1,
+ expectedLessThanResult: true,
+ expectedEqualResult: false,
+ },
+ {
+ name: "ver1 should be equal to ver2",
+ ver1: V3_5,
+ ver2: V3_5,
+ expectedCompareResult: 0,
+ expectedLessThanResult: false,
+ expectedEqualResult: true,
+ },
+ }
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ compareResult := Compare(tc.ver1, tc.ver2)
+ lessThanResult := LessThan(tc.ver1, tc.ver2)
+ equalResult := Equal(tc.ver1, tc.ver2)
+
+ assert.Equal(t, tc.expectedCompareResult, compareResult)
+ assert.Equal(t, tc.expectedLessThanResult, lessThanResult)
+ assert.Equal(t, tc.expectedEqualResult, equalResult)
+ })
+ }
+}
diff --git a/api/versionpb/version.pb.go b/api/versionpb/version.pb.go
new file mode 100644
index 00000000000..71c74eb7192
--- /dev/null
+++ b/api/versionpb/version.pb.go
@@ -0,0 +1,91 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: version.proto
+
+package versionpb
+
+import (
+ fmt "fmt"
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+var E_EtcdVersionMsg = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.MessageOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50000,
+ Name: "versionpb.etcd_version_msg",
+ Tag: "bytes,50000,opt,name=etcd_version_msg",
+ Filename: "version.proto",
+}
+
+var E_EtcdVersionField = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50001,
+ Name: "versionpb.etcd_version_field",
+ Tag: "bytes,50001,opt,name=etcd_version_field",
+ Filename: "version.proto",
+}
+
+var E_EtcdVersionEnum = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.EnumOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50002,
+ Name: "versionpb.etcd_version_enum",
+ Tag: "bytes,50002,opt,name=etcd_version_enum",
+ Filename: "version.proto",
+}
+
+var E_EtcdVersionEnumValue = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50003,
+ Name: "versionpb.etcd_version_enum_value",
+ Tag: "bytes,50003,opt,name=etcd_version_enum_value",
+ Filename: "version.proto",
+}
+
+func init() {
+ proto.RegisterExtension(E_EtcdVersionMsg)
+ proto.RegisterExtension(E_EtcdVersionField)
+ proto.RegisterExtension(E_EtcdVersionEnum)
+ proto.RegisterExtension(E_EtcdVersionEnumValue)
+}
+
+func init() { proto.RegisterFile("version.proto", fileDescriptor_7d2c07d79758f814) }
+
+var fileDescriptor_7d2c07d79758f814 = []byte{
+ // 284 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xd1, 0xc1, 0x4a, 0xc3, 0x30,
+ 0x18, 0x07, 0x70, 0x83, 0x20, 0x2c, 0xa0, 0xce, 0x30, 0x50, 0x86, 0xd6, 0x7a, 0xf3, 0x94, 0x80,
+ 0xbb, 0xed, 0x28, 0xe8, 0xad, 0x2a, 0x1e, 0x76, 0x10, 0xa4, 0xb4, 0x6b, 0x16, 0x02, 0x6d, 0xbf,
+ 0xd0, 0xb4, 0x7d, 0x04, 0xd9, 0x23, 0xf8, 0x48, 0x1e, 0xa7, 0xbe, 0x80, 0xd4, 0x17, 0x91, 0xa4,
+ 0xa9, 0xac, 0xd6, 0x53, 0xfb, 0x7d, 0xdf, 0xff, 0xff, 0xeb, 0xa1, 0x78, 0xbf, 0xe6, 0x85, 0x96,
+ 0x90, 0x53, 0x55, 0x40, 0x09, 0x64, 0xe4, 0x46, 0x15, 0x4f, 0x27, 0x02, 0x04, 0xd8, 0x2d, 0x33,
+ 0x6f, 0x6d, 0x60, 0xea, 0x0b, 0x00, 0x91, 0x72, 0x66, 0xa7, 0xb8, 0x5a, 0xb1, 0x84, 0xeb, 0x65,
+ 0x21, 0x55, 0x09, 0x45, 0x9b, 0x98, 0xdf, 0xe1, 0x31, 0x2f, 0x97, 0x49, 0xe8, 0xa4, 0x30, 0xd3,
+ 0x82, 0x9c, 0xd3, 0xb6, 0x46, 0xbb, 0x1a, 0x0d, 0xb8, 0xd6, 0x91, 0xe0, 0xf7, 0xaa, 0x94, 0x90,
+ 0xeb, 0x93, 0xcd, 0xcb, 0xae, 0x8f, 0x2e, 0x47, 0x8f, 0x07, 0xa6, 0xba, 0x68, 0x9b, 0x81, 0x16,
+ 0x6b, 0x84, 0xe6, 0x0f, 0x98, 0xf4, 0xbc, 0x95, 0xe4, 0x69, 0x42, 0xce, 0x06, 0xe2, 0xad, 0xd9,
+ 0x77, 0xde, 0xbb, 0xf3, 0xc6, 0x5b, 0x9e, 0x0d, 0x18, 0x31, 0xc0, 0x47, 0x3d, 0x91, 0xe7, 0x55,
+ 0x46, 0x4e, 0x07, 0xe0, 0x4d, 0x5e, 0x65, 0x9d, 0xf7, 0xe1, 0xbc, 0xc3, 0x2d, 0xcf, 0xdc, 0x0d,
+ 0xf7, 0x8c, 0x8f, 0x07, 0x5c, 0x58, 0x47, 0x69, 0xc5, 0xc9, 0xc5, 0xbf, 0xe8, 0xc2, 0xdc, 0x3a,
+ 0xf9, 0xd3, 0xc9, 0x93, 0x3f, 0xb2, 0x0d, 0xad, 0x11, 0xba, 0xbe, 0x7a, 0x6b, 0x3c, 0xb4, 0x69,
+ 0x3c, 0xf4, 0xd5, 0x78, 0xe8, 0xf5, 0xdb, 0xdb, 0x79, 0xf2, 0x05, 0x50, 0x93, 0xa6, 0x12, 0x98,
+ 0x79, 0xb2, 0x48, 0x49, 0x56, 0xcf, 0xd8, 0xef, 0xbf, 0x8b, 0xf7, 0xec, 0xf7, 0x66, 0x3f, 0x01,
+ 0x00, 0x00, 0xff, 0xff, 0xe8, 0x02, 0x15, 0xc0, 0xde, 0x01, 0x00, 0x00,
+}
diff --git a/api/versionpb/version.proto b/api/versionpb/version.proto
new file mode 100644
index 00000000000..c81b2f58a44
--- /dev/null
+++ b/api/versionpb/version.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+package versionpb;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/versionpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// Indicates etcd version that introduced the message, used to determine minimal etcd version required to interpret wal that includes this message.
+extend google.protobuf.MessageOptions {
+ optional string etcd_version_msg = 50000;
+}
+
+// Indicates etcd version that introduced the field, used to determine minimal etcd version required to interpret wal that sets this field.
+extend google.protobuf.FieldOptions {
+ optional string etcd_version_field = 50001;
+}
+
+// Indicates etcd version that introduced the enum, used to determine minimal etcd version required to interpret wal that uses this enum.
+extend google.protobuf.EnumOptions {
+ optional string etcd_version_enum = 50002;
+}
+
+// Indicates etcd version that introduced the enum value, used to determine minimal etcd version required to interpret wal that sets this enum value.
+extend google.protobuf.EnumValueOptions {
+ optional string etcd_version_enum_value = 50003;
+}
diff --git a/bill-of-materials.json b/bill-of-materials.json
index 7317583b4d1..3158356aaf4 100644
--- a/bill-of-materials.json
+++ b/bill-of-materials.json
@@ -1,64 +1,64 @@
[
{
- "project": "github.com/beorn7/perks/quantile",
+ "project": "github.com/VividCortex/ewma",
"licenses": [
{
"type": "MIT License",
- "confidence": 0.9891304347826086
+ "confidence": 1
}
]
},
{
- "project": "github.com/bgentry/speakeasy",
+ "project": "github.com/anishathalye/porcupine",
"licenses": [
{
"type": "MIT License",
- "confidence": 0.9441624365482234
+ "confidence": 1
}
]
},
{
- "project": "github.com/certifi/gocertifi",
+ "project": "github.com/beorn7/perks/quantile",
"licenses": [
{
- "type": "Mozilla Public License 2.0",
- "confidence": 1
+ "type": "MIT License",
+ "confidence": 0.9891304347826086
}
]
},
{
- "project": "github.com/cespare/xxhash/v2",
+ "project": "github.com/bgentry/speakeasy",
"licenses": [
{
"type": "MIT License",
- "confidence": 1
+ "confidence": 0.9441624365482234
}
]
},
{
- "project": "github.com/cockroachdb/datadriven",
+ "project": "github.com/cenkalti/backoff/v4",
"licenses": [
{
- "type": "Apache License 2.0",
+ "type": "MIT License",
"confidence": 1
}
]
},
{
- "project": "github.com/cockroachdb/errors",
+ "project": "github.com/cespare/xxhash/v2",
"licenses": [
{
- "type": "Apache License 2.0",
+ "type": "MIT License",
"confidence": 1
}
]
},
{
- "project": "github.com/cockroachdb/logtags",
+ "project": "github.com/cheggaaa/pb/v3",
"licenses": [
{
- "type": "Apache License 2.0",
- "confidence": 1
+ "type": "BSD 3-clause \"New\" or \"Revised\" License",
+ "confidence": 0.9916666666666667
}
]
},
@@ -80,15 +80,6 @@
}
]
},
- {
- "project": "github.com/cpuguy83/go-md2man/v2/md2man",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
{
"project": "github.com/creack/pty",
"licenses": [
@@ -117,29 +108,29 @@
]
},
{
- "project": "github.com/etcd-io/gofail/runtime",
+ "project": "github.com/fatih/color",
"licenses": [
{
- "type": "Apache License 2.0",
+ "type": "MIT License",
"confidence": 1
}
]
},
{
- "project": "github.com/form3tech-oss/jwt-go",
+ "project": "github.com/go-logr/logr",
"licenses": [
{
- "type": "MIT License",
- "confidence": 0.9891304347826086
+ "type": "Apache License 2.0",
+ "confidence": 1
}
]
},
{
- "project": "github.com/getsentry/raven-go",
+ "project": "github.com/go-logr/stdr",
"licenses": [
{
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
+ "type": "Apache License 2.0",
+ "confidence": 1
}
]
},
@@ -152,6 +143,15 @@
}
]
},
+ {
+ "project": "github.com/golang-jwt/jwt/v4",
+ "licenses": [
+ {
+ "type": "MIT License",
+ "confidence": 0.9891304347826086
+ }
+ ]
+ },
{
"project": "github.com/golang/groupcache/lru",
"licenses": [
@@ -179,6 +179,24 @@
}
]
},
+ {
+ "project": "github.com/google/go-cmp/cmp",
+ "licenses": [
+ {
+ "type": "BSD 3-clause \"New\" or \"Revised\" License",
+ "confidence": 0.9663865546218487
+ }
+ ]
+ },
+ {
+ "project": "github.com/google/uuid",
+ "licenses": [
+ {
+ "type": "BSD 3-clause \"New\" or \"Revised\" License",
+ "confidence": 0.9663865546218487
+ }
+ ]
+ },
{
"project": "github.com/gorilla/websocket",
"licenses": [
@@ -207,7 +225,7 @@
]
},
{
- "project": "github.com/grpc-ecosystem/grpc-gateway",
+ "project": "github.com/grpc-ecosystem/grpc-gateway/v2",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
@@ -234,7 +252,25 @@
]
},
{
- "project": "github.com/json-iterator/go",
+ "project": "github.com/klauspost/compress",
+ "licenses": [
+ {
+ "type": "Apache License 2.0",
+ "confidence": 0.9376299376299376
+ }
+ ]
+ },
+ {
+ "project": "github.com/klauspost/compress/internal/snapref",
+ "licenses": [
+ {
+ "type": "BSD 3-clause \"New\" or \"Revised\" License",
+ "confidence": 0.9663865546218487
+ }
+ ]
+ },
+ {
+ "project": "github.com/klauspost/compress/zstd/internal/xxhash",
"licenses": [
{
"type": "MIT License",
@@ -243,7 +279,7 @@
]
},
{
- "project": "github.com/mattn/go-runewidth",
+ "project": "github.com/mattn/go-colorable",
"licenses": [
{
"type": "MIT License",
@@ -252,29 +288,29 @@
]
},
{
- "project": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "project": "github.com/mattn/go-isatty",
"licenses": [
{
- "type": "Apache License 2.0",
- "confidence": 1
+ "type": "MIT License",
+ "confidence": 0.9587628865979382
}
]
},
{
- "project": "github.com/modern-go/concurrent",
+ "project": "github.com/mattn/go-runewidth",
"licenses": [
{
- "type": "Apache License 2.0",
+ "type": "MIT License",
"confidence": 1
}
]
},
{
- "project": "github.com/modern-go/reflect2",
+ "project": "github.com/munnerz/goautoneg",
"licenses": [
{
- "type": "Apache License 2.0",
- "confidence": 1
+ "type": "BSD 3-clause \"New\" or \"Revised\" License",
+ "confidence": 0.9794238683127572
}
]
},
@@ -288,20 +324,20 @@
]
},
{
- "project": "github.com/pkg/errors",
+ "project": "github.com/pmezard/go-difflib/difflib",
"licenses": [
{
- "type": "BSD 2-clause \"Simplified\" License",
- "confidence": 1
+ "type": "BSD 3-clause \"New\" or \"Revised\" License",
+ "confidence": 0.9830508474576272
}
]
},
{
- "project": "github.com/pmezard/go-difflib/difflib",
+ "project": "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9830508474576272
+ "confidence": 0.9663865546218487
}
]
},
@@ -342,16 +378,7 @@
]
},
{
- "project": "github.com/russross/blackfriday/v2",
- "licenses": [
- {
- "type": "BSD 2-clause \"Simplified\" License",
- "confidence": 0.9626168224299065
- }
- ]
- },
- {
- "project": "github.com/shurcooL/sanitized_anchor_name",
+ "project": "github.com/rivo/uniseg",
"licenses": [
{
"type": "MIT License",
@@ -396,7 +423,7 @@
]
},
{
- "project": "github.com/stretchr/testify/assert",
+ "project": "github.com/stretchr/testify",
"licenses": [
{
"type": "MIT License",
@@ -413,15 +440,6 @@
}
]
},
- {
- "project": "github.com/urfave/cli",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
{
"project": "github.com/xiang90/probing",
"licenses": [
@@ -504,7 +522,7 @@
]
},
{
- "project": "go.etcd.io/etcd/raft/v3",
+ "project": "go.etcd.io/etcd/server/v3",
"licenses": [
{
"type": "Apache License 2.0",
@@ -513,7 +531,7 @@
]
},
{
- "project": "go.etcd.io/etcd/server/v3",
+ "project": "go.etcd.io/etcd/tests/v3",
"licenses": [
{
"type": "Apache License 2.0",
@@ -522,7 +540,7 @@
]
},
{
- "project": "go.etcd.io/etcd/tests/v3",
+ "project": "go.etcd.io/etcd/v3",
"licenses": [
{
"type": "Apache License 2.0",
@@ -531,7 +549,7 @@
]
},
{
- "project": "go.etcd.io/etcd/v3",
+ "project": "go.etcd.io/gofail/runtime",
"licenses": [
{
"type": "Apache License 2.0",
@@ -540,7 +558,7 @@
]
},
{
- "project": "go.opentelemetry.io/contrib",
+ "project": "go.etcd.io/raft/v3",
"licenses": [
{
"type": "Apache License 2.0",
@@ -549,7 +567,7 @@
]
},
{
- "project": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+ "project": "go.opentelemetry.io/auto/sdk",
"licenses": [
{
"type": "Apache License 2.0",
@@ -558,7 +576,7 @@
]
},
{
- "project": "go.opentelemetry.io/otel",
+ "project": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
"licenses": [
{
"type": "Apache License 2.0",
@@ -567,7 +585,7 @@
]
},
{
- "project": "go.opentelemetry.io/otel/exporters/otlp",
+ "project": "go.opentelemetry.io/otel",
"licenses": [
{
"type": "Apache License 2.0",
@@ -576,7 +594,7 @@
]
},
{
- "project": "go.opentelemetry.io/otel/metric",
+ "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace",
"licenses": [
{
"type": "Apache License 2.0",
@@ -585,7 +603,7 @@
]
},
{
- "project": "go.opentelemetry.io/otel/sdk",
+ "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc",
"licenses": [
{
"type": "Apache License 2.0",
@@ -594,7 +612,7 @@
]
},
{
- "project": "go.opentelemetry.io/otel/sdk/export/metric",
+ "project": "go.opentelemetry.io/otel/metric",
"licenses": [
{
"type": "Apache License 2.0",
@@ -603,7 +621,7 @@
]
},
{
- "project": "go.opentelemetry.io/otel/sdk/metric",
+ "project": "go.opentelemetry.io/otel/sdk",
"licenses": [
{
"type": "Apache License 2.0",
@@ -630,7 +648,7 @@
]
},
{
- "project": "go.uber.org/atomic",
+ "project": "go.uber.org/multierr",
"licenses": [
{
"type": "MIT License",
@@ -639,7 +657,7 @@
]
},
{
- "project": "go.uber.org/multierr",
+ "project": "go.uber.org/zap",
"licenses": [
{
"type": "MIT License",
@@ -648,16 +666,16 @@
]
},
{
- "project": "go.uber.org/zap",
+ "project": "golang.org/x/crypto",
"licenses": [
{
- "type": "MIT License",
- "confidence": 0.9891304347826086
+ "type": "BSD 3-clause \"New\" or \"Revised\" License",
+ "confidence": 0.9663865546218487
}
]
},
{
- "project": "golang.org/x/crypto",
+ "project": "golang.org/x/net",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
@@ -666,7 +684,7 @@
]
},
{
- "project": "golang.org/x/net",
+ "project": "golang.org/x/sync/errgroup",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
@@ -675,7 +693,7 @@
]
},
{
- "project": "golang.org/x/sys",
+ "project": "golang.org/x/sys/unix",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
@@ -702,7 +720,7 @@
]
},
{
- "project": "google.golang.org/genproto",
+ "project": "google.golang.org/genproto/googleapis/api",
"licenses": [
{
"type": "Apache License 2.0",
@@ -711,7 +729,7 @@
]
},
{
- "project": "google.golang.org/grpc",
+ "project": "google.golang.org/genproto/googleapis/rpc",
"licenses": [
{
"type": "Apache License 2.0",
@@ -720,20 +738,20 @@
]
},
{
- "project": "google.golang.org/protobuf",
+ "project": "google.golang.org/grpc",
"licenses": [
{
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
+ "type": "Apache License 2.0",
+ "confidence": 1
}
]
},
{
- "project": "gopkg.in/cheggaaa/pb.v1",
+ "project": "google.golang.org/protobuf",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9916666666666667
+ "confidence": 0.9663865546218487
}
]
},
@@ -747,24 +765,20 @@
]
},
{
- "project": "gopkg.in/yaml.v2",
+ "project": "gopkg.in/yaml.v3",
"licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- },
{
"type": "MIT License",
- "confidence": 0.8975609756097561
+ "confidence": 0.7469879518072289
}
]
},
{
- "project": "gopkg.in/yaml.v3",
+ "project": "sigs.k8s.io/json",
"licenses": [
{
- "type": "MIT License",
- "confidence": 0.7469879518072289
+ "type": "Apache License 2.0",
+ "confidence": 0.9617021276595744
}
]
},
@@ -776,5 +790,18 @@
"confidence": 1
}
]
+ },
+ {
+ "project": "sigs.k8s.io/yaml/goyaml.v2",
+ "licenses": [
+ {
+ "type": "Apache License 2.0",
+ "confidence": 1
+ },
+ {
+ "type": "MIT License",
+ "confidence": 0.8975609756097561
+ }
+ ]
}
]
diff --git a/build b/build
deleted file mode 100755
index 60aa15d768d..00000000000
--- a/build
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-echo -e "\\e[91mDEPRECATED!!! Use build.sh script instead.\\e[0m\\n"
-sleep 1
-
-source ./build.sh
diff --git a/build.bat b/build.bat
deleted file mode 100755
index ff9b209a705..00000000000
--- a/build.bat
+++ /dev/null
@@ -1 +0,0 @@
-powershell -ExecutionPolicy Bypass -File build.ps1
diff --git a/build.ps1 b/build.ps1
deleted file mode 100644
index d1c36ee6437..00000000000
--- a/build.ps1
+++ /dev/null
@@ -1,81 +0,0 @@
-$ORG_PATH="go.etcd.io"
-$REPO_PATH="$ORG_PATH/etcd"
-$PWD = $((Get-Item -Path ".\" -Verbose).FullName)
-$FSROOT = $((Get-Location).Drive.Name+":")
-$FSYS = $((Get-WMIObject win32_logicaldisk -filter "DeviceID = '$FSROOT'").filesystem)
-
-if ($FSYS.StartsWith("FAT","CurrentCultureIgnoreCase")) {
- echo "Error: Cannot build etcd using the $FSYS filesystem (use NTFS instead)"
- exit 1
-}
-
-# Set $Env:GO_LDFLAGS="-s" for building without symbols.
-$GO_LDFLAGS="$Env:GO_LDFLAGS -X $REPO_PATH/version.GitSHA=$GIT_SHA"
-
-# rebuild symlinks
-git ls-files -s cmd | select-string -pattern 120000 | ForEach {
- $l = $_.ToString()
- $lnkname = $l.Split(' ')[1]
- $target = "$(git log -p HEAD -- $lnkname | select -last 2 | select -first 1)"
- $target = $target.SubString(1,$target.Length-1).Replace("/","\")
- $lnkname = $lnkname.Replace("/","\")
-
- $terms = $lnkname.Split("\")
- $dirname = $terms[0..($terms.length-2)] -join "\"
- $lnkname = "$PWD\$lnkname"
- $targetAbs = "$((Get-Item -Path "$dirname\$target").FullName)"
- $targetAbs = $targetAbs.Replace("/", "\")
-
- if (test-path -pathtype container "$targetAbs") {
- if (Test-Path "$lnkname") {
- if ((Get-Item "$lnkname") -is [System.IO.DirectoryInfo]) {
- # rd so deleting junction doesn't take files with it
- cmd /c rd "$lnkname"
- }
- }
- if (Test-Path "$lnkname") {
- if (!((Get-Item "$lnkname") -is [System.IO.DirectoryInfo])) {
- cmd /c del /A /F "$lnkname"
- }
- }
- cmd /c mklink /J "$lnkname" "$targetAbs" ">NUL"
- } else {
- # Remove file with symlink data (first run)
- if (Test-Path "$lnkname") {
- cmd /c del /A /F "$lnkname"
- }
- cmd /c mklink /H "$lnkname" "$targetAbs" ">NUL"
- }
-}
-
-if (-not $env:GOPATH) {
- $orgpath="$PWD\gopath\src\" + $ORG_PATH.Replace("/", "\")
- if (Test-Path "$orgpath\etcd") {
- if ((Get-Item "$orgpath\etcd") -is [System.IO.DirectoryInfo]) {
- # rd so deleting junction doesn't take files with it
- cmd /c rd "$orgpath\etcd"
- }
- }
- if (Test-Path "$orgpath") {
- if ((Get-Item "$orgpath") -is [System.IO.DirectoryInfo]) {
- # rd so deleting junction doesn't take files with it
- cmd /c rd "$orgpath"
- }
- }
- if (Test-Path "$orgpath") {
- if (!((Get-Item "$orgpath") -is [System.IO.DirectoryInfo])) {
- # Remove file with symlink data (first run)
- cmd /c del /A /F "$orgpath"
- }
- }
- cmd /c mkdir "$orgpath"
- cmd /c mklink /J "$orgpath\etcd" "$PWD" ">NUL"
- $env:GOPATH = "$PWD\gopath"
-}
-
-# Static compilation is useful when etcd is run in a container
-$env:CGO_ENABLED = 0
-$env:GO15VENDOREXPERIMENT = 1
-$GIT_SHA="$(git rev-parse --short HEAD)"
-go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcd.exe "$REPO_PATH"
-go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcdctl.exe "$REPO_PATH\etcdctl"
diff --git a/build.sh b/build.sh
deleted file mode 100755
index 7a4f8670e6f..00000000000
--- a/build.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env bash
-
-source ./scripts/test_lib.sh
-
-GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound")
-if [[ -n "$FAILPOINTS" ]]; then
- GIT_SHA="$GIT_SHA"-FAILPOINTS
-fi
-
-VERSION_SYMBOL="${ROOT_MODULE}/api/v3/version.GitSHA"
-
-# Set GO_LDFLAGS="-s" for building without symbols for debugging.
-# shellcheck disable=SC2206
-GO_LDFLAGS=(${GO_LDFLAGS} "-X=${VERSION_SYMBOL}=${GIT_SHA}")
-GO_BUILD_ENV=("CGO_ENABLED=0" "GO_BUILD_FLAGS=${GO_BUILD_FLAGS}" "GOOS=${GOOS}" "GOARCH=${GOARCH}")
-
-# enable/disable failpoints
-toggle_failpoints() {
- mode="$1"
- if command -v gofail >/dev/null 2>&1; then
- run gofail "$mode" server/etcdserver/ server/mvcc/backend/
- elif [[ "$mode" != "disable" ]]; then
- log_error "FAILPOINTS set but gofail not found"
- exit 1
- fi
-}
-
-toggle_failpoints_default() {
- mode="disable"
- if [[ -n "$FAILPOINTS" ]]; then mode="enable"; fi
- toggle_failpoints "$mode"
-}
-
-etcd_build() {
- out="bin"
- if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
- toggle_failpoints_default
-
- run rm -f "${out}/etcd"
- (
- cd ./server
- # Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK
- # shellcheck disable=SC2086
- run env "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \
- -installsuffix=cgo \
- "-ldflags=${GO_LDFLAGS[*]}" \
- -o="../${out}/etcd" . || return 2
- ) || return 2
-
- run rm -f "${out}/etcdutl"
- # shellcheck disable=SC2086
- (
- cd ./etcdutl
- run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \
- -installsuffix=cgo \
- "-ldflags=${GO_LDFLAGS[*]}" \
- -o="../${out}/etcdutl" . || return 2
- ) || return 2
-
- run rm -f "${out}/etcdctl"
- # shellcheck disable=SC2086
- (
- cd ./etcdctl
- run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \
- -installsuffix=cgo \
- "-ldflags=${GO_LDFLAGS[*]}" \
- -o="../${out}/etcdctl" . || return 2
- ) || return 2
- # Verify whether symbol we overriden exists
- # For cross-compiling we cannot run: ${out}/etcd --version | grep -q "Git SHA: ${GIT_SHA}"
-
- # We need symbols to do this check:
- if [[ "${GO_LDFLAGS[*]}" != *"-s"* ]]; then
- go tool nm "${out}/etcd" | grep "${VERSION_SYMBOL}" > /dev/null
- if [[ "${PIPESTATUS[*]}" != "0 0" ]]; then
- log_error "FAIL: Symbol ${VERSION_SYMBOL} not found in binary: ${out}/etcd"
- return 2
- fi
- fi
-}
-
-tools_build() {
- out="bin"
- if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
- tools_path="tools/benchmark
- tools/etcd-dump-db
- tools/etcd-dump-logs
- tools/local-tester/bridge"
- for tool in ${tools_path}
- do
- echo "Building" "'${tool}'"...
- run rm -f "${out}/${tool}"
- # shellcheck disable=SC2086
- run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} \
- -installsuffix=cgo \
- "-ldflags='${GO_LDFLAGS[*]}'" \
- -o="${out}/${tool}" "./${tool}" || return 2
- done
- tests_build "${@}"
-}
-
-tests_build() {
- out="bin"
- if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
- tools_path="
- functional/cmd/etcd-agent
- functional/cmd/etcd-proxy
- functional/cmd/etcd-runner
- functional/cmd/etcd-tester"
- (
- cd tests || exit 2
- for tool in ${tools_path}; do
- echo "Building" "'${tool}'"...
- run rm -f "../${out}/${tool}"
-
- # shellcheck disable=SC2086
- run env CGO_ENABLED=0 GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" go build ${GO_BUILD_FLAGS} \
- -installsuffix=cgo \
- "-ldflags='${GO_LDFLAGS[*]}'" \
- -o="../${out}/${tool}" "./${tool}" || return 2
- done
- ) || return 2
-}
-
-toggle_failpoints_default
-
-# only build when called directly, not sourced
-if echo "$0" | grep -E "build(.sh)?$" >/dev/null; then
- if etcd_build; then
- log_success "SUCCESS: etcd_build (GOARCH=${GOARCH})"
- else
- log_error "FAIL: etcd_build (GOARCH=${GOARCH})"
- exit 2
- fi
-fi
diff --git a/client/v2/LICENSE b/client/internal/v2/LICENSE
similarity index 100%
rename from client/v2/LICENSE
rename to client/internal/v2/LICENSE
diff --git a/client/internal/v2/README.md b/client/internal/v2/README.md
new file mode 100644
index 00000000000..ff070c72db2
--- /dev/null
+++ b/client/internal/v2/README.md
@@ -0,0 +1,113 @@
+# etcd/client
+
+etcd/client is the Go client library for etcd.
+
+[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client)
+
+For full compatibility, it is recommended to install released versions of clients using go modules.
+
+## Install
+
+```bash
+go get go.etcd.io/etcd/v3/client
+```
+
+## Usage
+
+```go
+package main
+
+import (
+ "context"
+ "log"
+ "time"
+
+ "go.etcd.io/etcd/v3/client"
+)
+
+func main() {
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: client.DefaultTransport,
+ // set timeout per request to fail fast when the target endpoint is unavailable
+ HeaderTimeoutPerRequest: time.Second,
+ }
+ c, err := client.New(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ kapi := client.NewKeysAPI(c)
+ // set "/foo" key with "bar" value
+ log.Print("Setting '/foo' key with 'bar' value")
+ resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Set is done. Metadata is %q\n", resp)
+ }
+ // get "/foo" key's value
+ log.Print("Getting '/foo' key value")
+ resp, err = kapi.Get(context.Background(), "/foo", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Get is done. Metadata is %q\n", resp)
+ // print value
+ log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
+ }
+}
+```
+
+## Error Handling
+
+etcd client might return three types of errors.
+
+- context error
+
+Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
+
+- cluster error
+
+Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
+
+- response error
+
+If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
+
+Here is the example code to handle client errors:
+
+```go
+cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
+c, err := client.New(cfg)
+if err != nil {
+ log.Fatal(err)
+}
+
+kapi := client.NewKeysAPI(c)
+resp, err := kapi.Set(ctx, "test", "bar", nil)
+if err != nil {
+ var cerr *client.ClusterError
+ if errors.Is(err, context.Canceled) {
+ // ctx is canceled by another routine
+ } else if errors.Is(err, context.DeadlineExceeded) {
+ // ctx is attached with a deadline and it exceeded
+ } else if errors.As(err, &cerr) {
+ // process (cerr.Errors)
+ } else {
+ // bad cluster endpoints, which are not etcd servers
+ }
+}
+```
+
+
+## Caveat
+
+1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
+
+2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
+
+3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
+
+4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/client/v2/auth_role.go b/client/internal/v2/auth_role.go
similarity index 96%
rename from client/v2/auth_role.go
rename to client/internal/v2/auth_role.go
index b6ba7e150dc..b9ef1aae841 100644
--- a/client/v2/auth_role.go
+++ b/client/internal/v2/auth_role.go
@@ -88,7 +88,7 @@ type authRoleAPIList struct{}
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
@@ -135,7 +135,7 @@ func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
Role: rolename,
}
return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "PUT",
+ verb: http.MethodPut,
name: rolename,
role: role,
})
@@ -143,7 +143,7 @@ func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "DELETE",
+ verb: http.MethodDelete,
name: rolename,
})
}
@@ -166,7 +166,7 @@ func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAct
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
return r.modRole(ctx, &authRoleAPIAction{
- verb: "GET",
+ verb: http.MethodGet,
name: rolename,
})
}
@@ -194,7 +194,7 @@ func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, pref
},
}
return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
+ verb: http.MethodPut,
name: rolename,
role: role,
})
@@ -209,7 +209,7 @@ func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, pre
},
}
return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
+ verb: http.MethodPut,
name: rolename,
role: role,
})
diff --git a/client/v2/auth_user.go b/client/internal/v2/auth_user.go
similarity index 94%
rename from client/v2/auth_user.go
rename to client/internal/v2/auth_user.go
index 8e7e2efe833..75b636c0200 100644
--- a/client/v2/auth_user.go
+++ b/client/internal/v2/auth_user.go
@@ -23,9 +23,7 @@ import (
"path"
)
-var (
- defaultV2AuthPrefix = "/v2/auth"
-)
+var defaultV2AuthPrefix = "/v2/auth"
type User struct {
User string `json:"user"`
@@ -76,11 +74,11 @@ type httpAuthAPI struct {
}
func (s *httpAuthAPI) Enable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"PUT"})
+ return s.enableDisable(ctx, &authAPIAction{http.MethodPut})
}
func (s *httpAuthAPI) Disable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"DELETE"})
+ return s.enableDisable(ctx, &authAPIAction{http.MethodDelete})
}
func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
@@ -163,7 +161,7 @@ type authUserAPIList struct{}
func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
@@ -219,7 +217,7 @@ func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password
Password: password,
}
return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "PUT",
+ verb: http.MethodPut,
username: username,
user: user,
})
@@ -227,7 +225,7 @@ func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password
func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "DELETE",
+ verb: http.MethodDelete,
username: username,
})
}
@@ -250,7 +248,7 @@ func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAct
func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
return u.modUser(ctx, &authUserAPIAction{
- verb: "GET",
+ verb: http.MethodGet,
username: username,
})
}
@@ -261,7 +259,7 @@ func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles
Grant: roles,
}
return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
+ verb: http.MethodPut,
username: username,
user: user,
})
@@ -273,7 +271,7 @@ func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles
Revoke: roles,
}
return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
+ verb: http.MethodPut,
username: username,
user: user,
})
@@ -285,7 +283,7 @@ func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, p
Password: password,
}
return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
+ verb: http.MethodPut,
username: username,
user: user,
})
diff --git a/client/v2/cancelreq.go b/client/internal/v2/cancelreq.go
similarity index 81%
rename from client/v2/cancelreq.go
rename to client/internal/v2/cancelreq.go
index 76d1f040198..23e5f89f6b2 100644
--- a/client/v2/cancelreq.go
+++ b/client/internal/v2/cancelreq.go
@@ -8,7 +8,7 @@ package client
import "net/http"
-func requestCanceler(tr CancelableTransport, req *http.Request) func() {
+func requestCanceler(req *http.Request) func() {
ch := make(chan struct{})
req.Cancel = ch
diff --git a/client/internal/v2/client.go b/client/internal/v2/client.go
new file mode 100644
index 00000000000..598c2deb750
--- /dev/null
+++ b/client/internal/v2/client.go
@@ -0,0 +1,719 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+var (
+ ErrNoEndpoints = errors.New("client: no endpoints available")
+ ErrTooManyRedirects = errors.New("client: too many redirects")
+ ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
+ ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
+ errTooManyRedirectChecks = errors.New("client: too many redirect checks")
+
+ // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
+ // that Do() will not retry a request
+ oneShotCtxValue any
+)
+
+var DefaultRequestTimeout = 5 * time.Second
+
+var DefaultTransport CancelableTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+}
+
+type EndpointSelectionMode int
+
+const (
+ // EndpointSelectionRandom is the default value of the 'SelectionMode'.
+ // As the name implies, the client object will pick a node from the members
+ // of the cluster in a random fashion. If the cluster has three members, A, B,
+ // and C, the client picks any node from its three members as its request
+ // destination.
+ EndpointSelectionRandom EndpointSelectionMode = iota
+
+ // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
+ // requests are sent directly to the cluster leader. This reduces
+ // forwarding roundtrips compared to making requests to etcd followers
+ // who then forward them to the cluster leader. In the event of a leader
+ // failure, however, clients configured this way cannot prioritize among
+ // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
+ // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
+ // maintain its knowledge of current cluster state.
+ //
+ // This mode should be used with Client.AutoSync().
+ EndpointSelectionPrioritizeLeader
+)
+
+type Config struct {
+ // Endpoints defines a set of URLs (schemes, hosts and ports only)
+ // that can be used to communicate with a logical etcd cluster. For
+ // example, a three-node cluster could be provided like so:
+ //
+ // Endpoints: []string{
+ // "http://node1.example.com:2379",
+ // "http://node2.example.com:2379",
+ // "http://node3.example.com:2379",
+ // }
+ //
+ // If multiple endpoints are provided, the Client will attempt to
+ // use them all in the event that one or more of them are unusable.
+ //
+ // If Client.Sync is ever called, the Client may cache an alternate
+ // set of endpoints to continue operation.
+ Endpoints []string
+
+ // Transport is used by the Client to drive HTTP requests. If not
+ // provided, DefaultTransport will be used.
+ Transport CancelableTransport
+
+ // CheckRedirect specifies the policy for handling HTTP redirects.
+ // If CheckRedirect is not nil, the Client calls it before
+ // following an HTTP redirect. The sole argument is the number of
+ // requests that have already been made. If CheckRedirect returns
+ // an error, Client.Do will not make any further requests and return
+ // the error back it to the caller.
+ //
+ // If CheckRedirect is nil, the Client uses its default policy,
+ // which is to stop after 10 consecutive requests.
+ CheckRedirect CheckRedirectFunc
+
+ // Username specifies the user credential to add as an authorization header
+ Username string
+
+ // Password is the password for the specified user to add as an authorization header
+ // to the request.
+ Password string
+
+ // HeaderTimeoutPerRequest specifies the time limit to wait for response
+ // header in a single request made by the Client. The timeout includes
+ // connection time, any redirects, and header wait time.
+ //
+ // For non-watch GET request, server returns the response body immediately.
+ // For PUT/POST/DELETE request, server will attempt to commit request
+ // before responding, which is expected to take `100ms + 2 * RTT`.
+ // For watch request, server returns the header immediately to notify Client
+ // watch start. But if server is behind some kind of proxy, the response
+ // header may be cached at proxy, and Client cannot rely on this behavior.
+ //
+ // Especially, wait request will ignore this timeout.
+ //
+ // One API call may send multiple requests to different etcd servers until it
+ // succeeds. Use context of the API to specify the overall timeout.
+ //
+ // A HeaderTimeoutPerRequest of zero means no timeout.
+ HeaderTimeoutPerRequest time.Duration
+
+ // SelectionMode is an EndpointSelectionMode enum that specifies the
+ // policy for choosing the etcd cluster node to which requests are sent.
+ SelectionMode EndpointSelectionMode
+}
+
+func (cfg *Config) transport() CancelableTransport {
+ if cfg.Transport == nil {
+ return DefaultTransport
+ }
+ return cfg.Transport
+}
+
+func (cfg *Config) checkRedirect() CheckRedirectFunc {
+ if cfg.CheckRedirect == nil {
+ return DefaultCheckRedirect
+ }
+ return cfg.CheckRedirect
+}
+
+// CancelableTransport mimics net/http.Transport, but requires that
+// the object also support request cancellation.
+type CancelableTransport interface {
+ http.RoundTripper
+ CancelRequest(req *http.Request)
+}
+
+type CheckRedirectFunc func(via int) error
+
+// DefaultCheckRedirect follows up to 10 redirects, but no more.
+var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
+ if via > 10 {
+ return ErrTooManyRedirects
+ }
+ return nil
+}
+
+type Client interface {
+ // Sync updates the internal cache of the etcd cluster's membership.
+ Sync(context.Context) error
+
+ // AutoSync periodically calls Sync() every given interval.
+ // The recommended sync interval is 10 seconds to 1 minute, which does
+ // not bring too much overhead to server and makes client catch up the
+ // cluster change in time.
+ //
+ // The example to use it:
+ //
+ // for {
+ // err := client.AutoSync(ctx, 10*time.Second)
+ // if err == context.DeadlineExceeded || err == context.Canceled {
+ // break
+ // }
+ // log.Print(err)
+ // }
+ AutoSync(context.Context, time.Duration) error
+
+ // Endpoints returns a copy of the current set of API endpoints used
+ // by Client to resolve HTTP requests. If Sync has ever been called,
+ // this may differ from the initial Endpoints provided in the Config.
+ Endpoints() []string
+
+ // SetEndpoints sets the set of API endpoints used by Client to resolve
+ // HTTP requests. If the given endpoints are not valid, an error will be
+ // returned
+ SetEndpoints(eps []string) error
+
+ // GetVersion retrieves the current etcd server and cluster version
+ GetVersion(ctx context.Context) (*version.Versions, error)
+
+ httpClient
+}
+
+func New(cfg Config) (Client, error) {
+ c := &httpClusterClient{
+ clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
+ rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
+ selectionMode: cfg.SelectionMode,
+ }
+ if cfg.Username != "" {
+ c.credentials = &credentials{
+ username: cfg.Username,
+ password: cfg.Password,
+ }
+ }
+ if err := c.SetEndpoints(cfg.Endpoints); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+type httpClient interface {
+ Do(context.Context, httpAction) (*http.Response, []byte, error)
+}
+
+func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
+ return func(ep url.URL) httpClient {
+ return &redirectFollowingHTTPClient{
+ checkRedirect: cr,
+ client: &simpleHTTPClient{
+ transport: tr,
+ endpoint: ep,
+ headerTimeout: headerTimeout,
+ },
+ }
+ }
+}
+
+type credentials struct {
+ username string
+ password string
+}
+
+type httpClientFactory func(url.URL) httpClient
+
+type httpAction interface {
+ HTTPRequest(url.URL) *http.Request
+}
+
+type httpClusterClient struct {
+ clientFactory httpClientFactory
+ endpoints []url.URL
+ pinned int
+ credentials *credentials
+ sync.RWMutex
+ rand *rand.Rand
+ selectionMode EndpointSelectionMode
+}
+
+func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
+ ceps := make([]url.URL, len(eps))
+ copy(ceps, eps)
+
+ // To perform a lookup on the new endpoint list without using the current
+ // client, we'll copy it
+ clientCopy := &httpClusterClient{
+ clientFactory: c.clientFactory,
+ credentials: c.credentials,
+ rand: c.rand,
+
+ pinned: 0,
+ endpoints: ceps,
+ }
+
+ mAPI := NewMembersAPI(clientCopy)
+ leader, err := mAPI.Leader(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(leader.ClientURLs) == 0 {
+ return "", ErrNoLeaderEndpoint
+ }
+
+ return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
+}
+
+func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
+ if len(eps) == 0 {
+ return []url.URL{}, ErrNoEndpoints
+ }
+
+ neps := make([]url.URL, len(eps))
+ for i, ep := range eps {
+ u, err := url.Parse(ep)
+ if err != nil {
+ return []url.URL{}, err
+ }
+ neps[i] = *u
+ }
+ return neps, nil
+}
+
+func (c *httpClusterClient) SetEndpoints(eps []string) error {
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.endpoints = shuffleEndpoints(c.rand, neps)
+ // We're not doing anything for PrioritizeLeader here. This is
+ // due to not having a context meaning we can't call getLeaderEndpoint
+ // However, if you're using PrioritizeLeader, you've already been told
+ // to regularly call sync, where we do have a ctx, and can figure the
+ // leader. PrioritizeLeader is also quite a loose guarantee, so deal
+ // with it
+ c.pinned = 0
+
+ return nil
+}
+
+func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ action := act
+ c.RLock()
+ leps := len(c.endpoints)
+ eps := make([]url.URL, leps)
+ n := copy(eps, c.endpoints)
+ pinned := c.pinned
+
+ if c.credentials != nil {
+ action = &authedAction{
+ act: act,
+ credentials: *c.credentials,
+ }
+ }
+ c.RUnlock()
+
+ if leps == 0 {
+ return nil, nil, ErrNoEndpoints
+ }
+
+ if leps != n {
+ return nil, nil, errors.New("unable to pick endpoint: copy failed")
+ }
+
+ var resp *http.Response
+ var body []byte
+ var err error
+ cerr := &ClusterError{}
+ isOneShot := ctx.Value(&oneShotCtxValue) != nil
+
+ for i := pinned; i < leps+pinned; i++ {
+ k := i % leps
+ hc := c.clientFactory(eps[k])
+ resp, body, err = hc.Do(ctx, action)
+ if err != nil {
+ cerr.Errors = append(cerr.Errors, err)
+ if errors.Is(err, ctx.Err()) {
+ return nil, nil, ctx.Err()
+ }
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, nil, err
+ }
+ } else if resp.StatusCode/100 == 5 {
+ switch resp.StatusCode {
+ case http.StatusInternalServerError, http.StatusServiceUnavailable:
+ // TODO: make sure this is a no leader response
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
+ default:
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
+ }
+ err = cerr.Errors[0]
+ }
+ if err != nil {
+ if !isOneShot {
+ continue
+ }
+ c.Lock()
+ c.pinned = (k + 1) % leps
+ c.Unlock()
+ return nil, nil, err
+ }
+ if k != pinned {
+ c.Lock()
+ c.pinned = k
+ c.Unlock()
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, cerr
+}
+
+func (c *httpClusterClient) Endpoints() []string {
+ c.RLock()
+ defer c.RUnlock()
+
+ eps := make([]string, len(c.endpoints))
+ for i, ep := range c.endpoints {
+ eps[i] = ep.String()
+ }
+
+ return eps
+}
+
+func (c *httpClusterClient) Sync(ctx context.Context) error {
+ mAPI := NewMembersAPI(c)
+ ms, err := mAPI.List(ctx)
+ if err != nil {
+ return err
+ }
+
+ var eps []string
+ for _, m := range ms {
+ eps = append(eps, m.ClientURLs...)
+ }
+
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ npin := 0
+
+ switch c.selectionMode {
+ case EndpointSelectionRandom:
+ c.RLock()
+ eq := endpointsEqual(c.endpoints, neps)
+ c.RUnlock()
+
+ if eq {
+ return nil
+ }
+ // When items in the endpoint list changes, we choose a new pin
+ neps = shuffleEndpoints(c.rand, neps)
+ case EndpointSelectionPrioritizeLeader:
+ nle, err := c.getLeaderEndpoint(ctx, neps)
+ if err != nil {
+ return ErrNoLeaderEndpoint
+ }
+
+ for i, n := range neps {
+ if n.String() == nle {
+ npin = i
+ break
+ }
+ }
+ default:
+ return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
+ }
+
+ c.Lock()
+ defer c.Unlock()
+ c.endpoints = neps
+ c.pinned = npin
+
+ return nil
+}
+
+func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ err := c.Sync(ctx)
+ if err != nil {
+ return err
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-ticker.C:
+ }
+ }
+}
+
+func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
+ act := &getAction{Prefix: "/version"}
+
+ resp, body, err := c.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ if len(body) == 0 {
+ return nil, ErrEmptyBody
+ }
+ var vresp version.Versions
+ if err := json.Unmarshal(body, &vresp); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return &vresp, nil
+ default:
+ var etcdErr Error
+ if err := json.Unmarshal(body, &etcdErr); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return nil, etcdErr
+ }
+}
+
+type roundTripResponse struct {
+ resp *http.Response
+ err error
+}
+
+type simpleHTTPClient struct {
+ transport CancelableTransport
+ endpoint url.URL
+ headerTimeout time.Duration
+}
+
+// ErrNoRequest indicates that the HTTPRequest object could not be found
+// or was nil. No processing could continue.
+var ErrNoRequest = errors.New("no HTTPRequest was available")
+
+func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ req := act.HTTPRequest(c.endpoint)
+ if req == nil {
+ return nil, nil, ErrNoRequest
+ }
+
+ if err := printcURL(req); err != nil {
+ return nil, nil, err
+ }
+
+ isWait := false
+ if req.URL != nil {
+ ws := req.URL.Query().Get("wait")
+ if len(ws) != 0 {
+ var err error
+ isWait, err = strconv.ParseBool(ws)
+ if err != nil {
+ return nil, nil, fmt.Errorf("wrong wait value %s (%w for %+v)", ws, err, req)
+ }
+ }
+ }
+
+ var hctx context.Context
+ var hcancel context.CancelFunc
+ if !isWait && c.headerTimeout > 0 {
+ hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
+ } else {
+ hctx, hcancel = context.WithCancel(ctx)
+ }
+ defer hcancel()
+
+ reqcancel := requestCanceler(req)
+
+ rtchan := make(chan roundTripResponse, 1)
+ go func() {
+ resp, err := c.transport.RoundTrip(req)
+ rtchan <- roundTripResponse{resp: resp, err: err}
+ close(rtchan)
+ }()
+
+ var resp *http.Response
+ var err error
+
+ select {
+ case rtresp := <-rtchan:
+ resp, err = rtresp.resp, rtresp.err
+ case <-hctx.Done():
+ // cancel and wait for request to actually exit before continuing
+ reqcancel()
+ rtresp := <-rtchan
+ resp = rtresp.resp
+ switch {
+ case ctx.Err() != nil:
+ err = ctx.Err()
+ case hctx.Err() != nil:
+ err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
+ default:
+ panic("failed to get error from context")
+ }
+ }
+
+ // always check for resp nil-ness to deal with possible
+ // race conditions between channels above
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body []byte
+ done := make(chan struct{})
+ go func() {
+ body, err = io.ReadAll(resp.Body)
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-ctx.Done():
+ if resp != nil {
+ resp.Body.Close()
+ }
+ <-done
+ return nil, nil, ctx.Err()
+ case <-done:
+ }
+
+ return resp, body, err
+}
+
+type authedAction struct {
+ act httpAction
+ credentials credentials
+}
+
+func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
+ r := a.act.HTTPRequest(url)
+ r.SetBasicAuth(a.credentials.username, a.credentials.password)
+ return r
+}
+
+type redirectFollowingHTTPClient struct {
+ client httpClient
+ checkRedirect CheckRedirectFunc
+}
+
+func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ next := act
+ for i := 0; i < 100; i++ {
+ if i > 0 {
+ if err := r.checkRedirect(i); err != nil {
+ return nil, nil, err
+ }
+ }
+ resp, body, err := r.client.Do(ctx, next)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp.StatusCode/100 == 3 {
+ hdr := resp.Header.Get("Location")
+ if hdr == "" {
+ return nil, nil, errors.New("location header not set")
+ }
+ loc, err := url.Parse(hdr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr)
+ }
+ next = &redirectedHTTPAction{
+ action: act,
+ location: *loc,
+ }
+ continue
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, errTooManyRedirectChecks
+}
+
+type redirectedHTTPAction struct {
+ action httpAction
+ location url.URL
+}
+
+func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
+ orig := r.action.HTTPRequest(ep)
+ orig.URL = &r.location
+ return orig
+}
+
+func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
+ // copied from Go 1.9<= rand.Rand.Perm
+ n := len(eps)
+ p := make([]int, n)
+ for i := 0; i < n; i++ {
+ j := r.Intn(i + 1)
+ p[i] = p[j]
+ p[j] = i
+ }
+ neps := make([]url.URL, n)
+ for i, k := range p {
+ neps[i] = eps[k]
+ }
+ return neps
+}
+
+func endpointsEqual(left, right []url.URL) bool {
+ if len(left) != len(right) {
+ return false
+ }
+
+ sLeft := make([]string, len(left))
+ sRight := make([]string, len(right))
+ for i, l := range left {
+ sLeft[i] = l.String()
+ }
+ for i, r := range right {
+ sRight[i] = r.String()
+ }
+
+ sort.Strings(sLeft)
+ sort.Strings(sRight)
+ for i := range sLeft {
+ if sLeft[i] != sRight[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/client/internal/v2/client_test.go b/client/internal/v2/client_test.go
new file mode 100644
index 00000000000..19374ec9362
--- /dev/null
+++ b/client/internal/v2/client_test.go
@@ -0,0 +1,1036 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "errors"
+ "io"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+)
+
+type actionAssertingHTTPClient struct {
+ t *testing.T
+ num int
+ act httpAction
+
+ resp http.Response
+ body []byte
+ err error
+}
+
+func (a *actionAssertingHTTPClient) Do(_ context.Context, act httpAction) (*http.Response, []byte, error) {
+ if !reflect.DeepEqual(a.act, act) {
+ a.t.Errorf("#%d: unexpected httpAction: want=%#v got=%#v", a.num, a.act, act)
+ }
+
+ return &a.resp, a.body, a.err
+}
+
+type staticHTTPClient struct {
+ resp http.Response
+ body []byte
+ err error
+}
+
+func (s *staticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) {
+ return &s.resp, s.body, s.err
+}
+
+type staticHTTPAction struct {
+ request http.Request
+}
+
+func (s *staticHTTPAction) HTTPRequest(url.URL) *http.Request {
+ return &s.request
+}
+
+type staticHTTPResponse struct {
+ resp http.Response
+ body []byte
+ err error
+}
+
+type multiStaticHTTPClient struct {
+ responses []staticHTTPResponse
+ cur int
+}
+
+func (s *multiStaticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) {
+ r := s.responses[s.cur]
+ s.cur++
+ return &r.resp, r.body, r.err
+}
+
+func newStaticHTTPClientFactory(responses []staticHTTPResponse) httpClientFactory {
+ var cur int
+ return func(url.URL) httpClient {
+ r := responses[cur]
+ cur++
+ return &staticHTTPClient{resp: r.resp, body: r.body, err: r.err}
+ }
+}
+
+type fakeTransport struct {
+ respchan chan *http.Response
+ errchan chan error
+ startCancel chan struct{}
+ finishCancel chan struct{}
+}
+
+func newFakeTransport() *fakeTransport {
+ return &fakeTransport{
+ respchan: make(chan *http.Response, 1),
+ errchan: make(chan error, 1),
+ startCancel: make(chan struct{}, 1),
+ finishCancel: make(chan struct{}, 1),
+ }
+}
+
+func (t *fakeTransport) CancelRequest(*http.Request) {
+ t.startCancel <- struct{}{}
+}
+
+type fakeAction struct{}
+
+func (a *fakeAction) HTTPRequest(url.URL) *http.Request {
+ return &http.Request{}
+}
+
+func TestSimpleHTTPClientDoSuccess(t *testing.T) {
+ tr := newFakeTransport()
+ c := &simpleHTTPClient{transport: tr}
+
+ tr.respchan <- &http.Response{
+ StatusCode: http.StatusTeapot,
+ Body: io.NopCloser(strings.NewReader("foo")),
+ }
+
+ resp, body, err := c.Do(context.Background(), &fakeAction{})
+ require.NoErrorf(t, err, "incorrect error value")
+ wantCode := http.StatusTeapot
+ require.Equalf(t, wantCode, resp.StatusCode, "invalid response code: want=%d got=%d", wantCode, resp.StatusCode)
+
+ wantBody := []byte("foo")
+ require.Truef(t, reflect.DeepEqual(wantBody, body), "invalid response body: want=%q got=%q", wantBody, body)
+}
+
+func TestSimpleHTTPClientDoError(t *testing.T) {
+ tr := newFakeTransport()
+ c := &simpleHTTPClient{transport: tr}
+
+ tr.errchan <- errors.New("fixture")
+
+ _, _, err := c.Do(context.Background(), &fakeAction{})
+ assert.Errorf(t, err, "expected non-nil error, got nil")
+}
+
+type nilAction struct{}
+
+func (a *nilAction) HTTPRequest(url.URL) *http.Request {
+ return nil
+}
+
+func TestSimpleHTTPClientDoNilRequest(t *testing.T) {
+ tr := newFakeTransport()
+ c := &simpleHTTPClient{transport: tr}
+
+ tr.errchan <- errors.New("fixture")
+
+ _, _, err := c.Do(context.Background(), &nilAction{})
+ require.ErrorIsf(t, err, ErrNoRequest, "expected non-nil error, got nil")
+}
+
+func TestSimpleHTTPClientDoCancelContext(t *testing.T) {
+ tr := newFakeTransport()
+ c := &simpleHTTPClient{transport: tr}
+
+ tr.startCancel <- struct{}{}
+ tr.finishCancel <- struct{}{}
+
+ _, _, err := c.Do(context.Background(), &fakeAction{})
+ assert.Errorf(t, err, "expected non-nil error, got nil")
+}
+
+type checkableReadCloser struct {
+ io.ReadCloser
+ closed bool
+}
+
+func (c *checkableReadCloser) Close() error {
+ if !c.closed {
+ c.closed = true
+ return c.ReadCloser.Close()
+ }
+ return nil
+}
+
+func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) {
+ tr := newFakeTransport()
+ c := &simpleHTTPClient{transport: tr}
+
+ // create an already-cancelled context
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ body := &checkableReadCloser{ReadCloser: io.NopCloser(strings.NewReader("foo"))}
+ go func() {
+ // wait that simpleHTTPClient knows the context is already timed out,
+ // and calls CancelRequest
+ testutil.WaitSchedule()
+
+ // response is returned before cancel effects
+ tr.respchan <- &http.Response{Body: body}
+ }()
+
+ _, _, err := c.Do(ctx, &fakeAction{})
+ require.Errorf(t, err, "expected non-nil error, got nil")
+
+ require.Truef(t, body.closed, "expected closed body")
+}
+
+type blockingBody struct {
+ c chan struct{}
+}
+
+func (bb *blockingBody) Read(p []byte) (n int, err error) {
+ <-bb.c
+ return 0, errors.New("closed")
+}
+
+func (bb *blockingBody) Close() error {
+ close(bb.c)
+ return nil
+}
+
+func TestSimpleHTTPClientDoCancelContextResponseBodyClosedWithBlockingBody(t *testing.T) {
+ tr := newFakeTransport()
+ c := &simpleHTTPClient{transport: tr}
+
+ ctx, cancel := context.WithCancel(context.Background())
+ body := &checkableReadCloser{ReadCloser: &blockingBody{c: make(chan struct{})}}
+ go func() {
+ tr.respchan <- &http.Response{Body: body}
+ time.Sleep(2 * time.Millisecond)
+ // cancel after the body is received
+ cancel()
+ }()
+
+ _, _, err := c.Do(ctx, &fakeAction{})
+ require.ErrorIsf(t, err, context.Canceled, "expected %+v, got %+v", context.Canceled, err)
+
+ require.Truef(t, body.closed, "expected closed body")
+}
+
+func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) {
+ tr := newFakeTransport()
+ c := &simpleHTTPClient{transport: tr}
+
+ donechan := make(chan struct{})
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ c.Do(ctx, &fakeAction{})
+ close(donechan)
+ }()
+
+ // This should call CancelRequest and begin the cancellation process
+ cancel()
+
+ select {
+ case <-donechan:
+ t.Fatalf("simpleHTTPClient.Do should not have exited yet")
+ default:
+ }
+
+ tr.finishCancel <- struct{}{}
+
+ select {
+ case <-donechan:
+ // expected behavior
+ return
+ case <-time.After(time.Second):
+ t.Fatalf("simpleHTTPClient.Do did not exit within 1s")
+ }
+}
+
+func TestSimpleHTTPClientDoHeaderTimeout(t *testing.T) {
+ tr := newFakeTransport()
+ tr.finishCancel <- struct{}{}
+ c := &simpleHTTPClient{transport: tr, headerTimeout: time.Millisecond}
+
+ errc := make(chan error, 1)
+ go func() {
+ _, _, err := c.Do(context.Background(), &fakeAction{})
+ errc <- err
+ }()
+
+ select {
+ case err := <-errc:
+ require.Errorf(t, err, "expected non-nil error, got nil")
+ case <-time.After(time.Second):
+ t.Fatalf("unexpected timeout when waiting for the test to finish")
+ }
+}
+
+func TestHTTPClusterClientDo(t *testing.T) {
+ fakeErr := errors.New("fake")
+ fakeURL := url.URL{}
+ tests := []struct {
+ client *httpClusterClient
+ ctx context.Context
+
+ wantCode int
+ wantErr error
+ wantPinned int
+ }{
+ // first good response short-circuits Do
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{fakeURL, fakeURL},
+ clientFactory: newStaticHTTPClientFactory(
+ []staticHTTPResponse{
+ {resp: http.Response{StatusCode: http.StatusTeapot}},
+ {err: fakeErr},
+ },
+ ),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ wantCode: http.StatusTeapot,
+ },
+
+ // fall through to good endpoint if err is arbitrary
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{fakeURL, fakeURL},
+ clientFactory: newStaticHTTPClientFactory(
+ []staticHTTPResponse{
+ {err: fakeErr},
+ {resp: http.Response{StatusCode: http.StatusTeapot}},
+ },
+ ),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ wantCode: http.StatusTeapot,
+ wantPinned: 1,
+ },
+
+ // context.Canceled short-circuits Do
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{fakeURL, fakeURL},
+ clientFactory: newStaticHTTPClientFactory(
+ []staticHTTPResponse{
+ {err: context.Canceled},
+ {resp: http.Response{StatusCode: http.StatusTeapot}},
+ },
+ ),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ wantErr: context.Canceled,
+ },
+
+ // return err if there are no endpoints
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{},
+ clientFactory: newHTTPClientFactory(nil, nil, 0),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ wantErr: ErrNoEndpoints,
+ },
+
+ // return err if all endpoints return arbitrary errors
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{fakeURL, fakeURL},
+ clientFactory: newStaticHTTPClientFactory(
+ []staticHTTPResponse{
+ {err: fakeErr},
+ {err: fakeErr},
+ },
+ ),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ wantErr: &ClusterError{Errors: []error{fakeErr, fakeErr}},
+ },
+
+ // 500-level errors cause Do to fallthrough to next endpoint
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{fakeURL, fakeURL},
+ clientFactory: newStaticHTTPClientFactory(
+ []staticHTTPResponse{
+ {resp: http.Response{StatusCode: http.StatusBadGateway}},
+ {resp: http.Response{StatusCode: http.StatusTeapot}},
+ },
+ ),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ wantCode: http.StatusTeapot,
+ wantPinned: 1,
+ },
+
+ // 500-level errors cause one shot Do to fallthrough to next endpoint
+ {
+ client: &httpClusterClient{
+ endpoints: []url.URL{fakeURL, fakeURL},
+ clientFactory: newStaticHTTPClientFactory(
+ []staticHTTPResponse{
+ {resp: http.Response{StatusCode: http.StatusBadGateway}},
+ {resp: http.Response{StatusCode: http.StatusTeapot}},
+ },
+ ),
+ rand: rand.New(rand.NewSource(0)),
+ },
+ ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
+ wantErr: errors.New("client: etcd member returns server error [Bad Gateway]"),
+ wantPinned: 1,
+ },
+ }
+
+ for i, tt := range tests {
+ if tt.ctx == nil {
+ tt.ctx = context.Background()
+ }
+ resp, _, err := tt.client.Do(tt.ctx, nil)
+ if (tt.wantErr == nil && !errors.Is(err, tt.wantErr)) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) {
+ t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
+ continue
+ }
+
+ if resp == nil {
+ if tt.wantCode != 0 {
+ t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
+ continue
+ }
+ } else if resp.StatusCode != tt.wantCode {
+ t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
+ continue
+ }
+
+ if tt.client.pinned != tt.wantPinned {
+ t.Errorf("#%d: pinned=%d, want=%d", i, tt.client.pinned, tt.wantPinned)
+ }
+ }
+}
+
+func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) {
+ fakeURL := url.URL{}
+ tr := newFakeTransport()
+ tr.finishCancel <- struct{}{}
+ c := &httpClusterClient{
+ clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
+ endpoints: []url.URL{fakeURL},
+ }
+
+ errc := make(chan error, 1)
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
+ defer cancel()
+ _, _, err := c.Do(ctx, &fakeAction{})
+ errc <- err
+ }()
+
+ select {
+ case err := <-errc:
+ if !errors.Is(err, context.DeadlineExceeded) {
+ t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded)
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("unexpected timeout when waiting for request to deadline exceed")
+ }
+}
+
+type fakeCancelContext struct{}
+
+var errFakeCancelContext = errors.New("fake context canceled")
+
+func (f fakeCancelContext) Deadline() (time.Time, bool) { return time.Time{}, false }
+func (f fakeCancelContext) Done() <-chan struct{} {
+ d := make(chan struct{}, 1)
+ d <- struct{}{}
+ return d
+}
+func (f fakeCancelContext) Err() error { return errFakeCancelContext }
+func (f fakeCancelContext) Value(key any) any { return 1 }
+
+func withTimeout(parent context.Context, _timeout time.Duration) (
+ ctx context.Context,
+ cancel context.CancelFunc,
+) {
+ ctx = parent
+ cancel = func() {
+ ctx = nil
+ }
+ return ctx, cancel
+}
+
+func TestHTTPClusterClientDoCanceledContext(t *testing.T) {
+ fakeURL := url.URL{}
+ tr := newFakeTransport()
+ tr.finishCancel <- struct{}{}
+ c := &httpClusterClient{
+ clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
+ endpoints: []url.URL{fakeURL},
+ }
+
+ errc := make(chan error, 1)
+ go func() {
+ ctx, cancel := withTimeout(fakeCancelContext{}, time.Millisecond)
+ cancel()
+ _, _, err := c.Do(ctx, &fakeAction{})
+ errc <- err
+ }()
+
+ select {
+ case err := <-errc:
+ if !errors.Is(err, errFakeCancelContext) {
+ t.Errorf("err = %+v, want %+v", err, errFakeCancelContext)
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("unexpected timeout when waiting for request to fake context canceled")
+ }
+}
+
+func TestRedirectedHTTPAction(t *testing.T) {
+ act := &redirectedHTTPAction{
+ action: &staticHTTPAction{
+ request: http.Request{
+ Method: http.MethodDelete,
+ URL: &url.URL{
+ Scheme: "https",
+ Host: "foo.example.com",
+ Path: "/ping",
+ },
+ },
+ },
+ location: url.URL{
+ Scheme: "https",
+ Host: "bar.example.com",
+ Path: "/pong",
+ },
+ }
+
+ want := &http.Request{
+ Method: http.MethodDelete,
+ URL: &url.URL{
+ Scheme: "https",
+ Host: "bar.example.com",
+ Path: "/pong",
+ },
+ }
+ got := act.HTTPRequest(url.URL{Scheme: "http", Host: "baz.example.com", Path: "/pang"})
+
+ require.Truef(t, reflect.DeepEqual(want, got), "HTTPRequest is %#v, want %#v", want, got)
+}
+
+func TestRedirectFollowingHTTPClient(t *testing.T) {
+ tests := []struct {
+ checkRedirect CheckRedirectFunc
+ client httpClient
+ wantCode int
+ wantErr error
+ }{
+ // errors bubbled up
+ {
+ checkRedirect: func(int) error { return ErrTooManyRedirects },
+ client: &multiStaticHTTPClient{
+ responses: []staticHTTPResponse{
+ {
+ err: errors.New("fail"),
+ },
+ },
+ },
+ wantErr: errors.New("fail"),
+ },
+
+ // no need to follow redirect if none given
+ {
+ checkRedirect: func(int) error { return ErrTooManyRedirects },
+ client: &multiStaticHTTPClient{
+ responses: []staticHTTPResponse{
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTeapot,
+ },
+ },
+ },
+ },
+ wantCode: http.StatusTeapot,
+ },
+
+ // redirects if less than max
+ {
+ checkRedirect: func(via int) error {
+ if via >= 2 {
+ return ErrTooManyRedirects
+ }
+ return nil
+ },
+ client: &multiStaticHTTPClient{
+ responses: []staticHTTPResponse{
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ Header: http.Header{"Location": []string{"http://example.com"}},
+ },
+ },
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTeapot,
+ },
+ },
+ },
+ },
+ wantCode: http.StatusTeapot,
+ },
+
+ // succeed after reaching max redirects
+ {
+ checkRedirect: func(via int) error {
+ if via >= 3 {
+ return ErrTooManyRedirects
+ }
+ return nil
+ },
+ client: &multiStaticHTTPClient{
+ responses: []staticHTTPResponse{
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ Header: http.Header{"Location": []string{"http://example.com"}},
+ },
+ },
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ Header: http.Header{"Location": []string{"http://example.com"}},
+ },
+ },
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTeapot,
+ },
+ },
+ },
+ },
+ wantCode: http.StatusTeapot,
+ },
+
+ // fail if too many redirects
+ {
+ checkRedirect: func(via int) error {
+ if via >= 2 {
+ return ErrTooManyRedirects
+ }
+ return nil
+ },
+ client: &multiStaticHTTPClient{
+ responses: []staticHTTPResponse{
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ Header: http.Header{"Location": []string{"http://example.com"}},
+ },
+ },
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ Header: http.Header{"Location": []string{"http://example.com"}},
+ },
+ },
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTeapot,
+ },
+ },
+ },
+ },
+ wantErr: ErrTooManyRedirects,
+ },
+
+ // fail if Location header not set
+ {
+ checkRedirect: func(int) error { return ErrTooManyRedirects },
+ client: &multiStaticHTTPClient{
+ responses: []staticHTTPResponse{
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ },
+ },
+ },
+ },
+ wantErr: errors.New("location header not set"),
+ },
+
+ // fail if Location header is invalid
+ {
+ checkRedirect: func(int) error { return ErrTooManyRedirects },
+ client: &multiStaticHTTPClient{
+ responses: []staticHTTPResponse{
+ {
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ Header: http.Header{"Location": []string{":"}},
+ },
+ },
+ },
+ },
+ //revive:disable:error-strings
+ wantErr: errors.New("location header not valid URL: :"),
+ //revive:enable:error-strings
+ },
+
+ // fail if redirects checked way too many times
+ {
+ checkRedirect: func(int) error { return nil },
+ client: &staticHTTPClient{
+ resp: http.Response{
+ StatusCode: http.StatusTemporaryRedirect,
+ Header: http.Header{"Location": []string{"http://example.com"}},
+ },
+ },
+ wantErr: errTooManyRedirectChecks,
+ },
+ }
+
+ for i, tt := range tests {
+ client := &redirectFollowingHTTPClient{client: tt.client, checkRedirect: tt.checkRedirect}
+ resp, _, err := client.Do(context.Background(), nil)
+ if (tt.wantErr == nil && !errors.Is(err, tt.wantErr)) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) {
+ t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
+ continue
+ }
+
+ if resp == nil {
+ if tt.wantCode != 0 {
+ t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
+ }
+ continue
+ }
+
+ if resp.StatusCode != tt.wantCode {
+ t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
+ continue
+ }
+ }
+}
+
+func TestDefaultCheckRedirect(t *testing.T) {
+ tests := []struct {
+ num int
+ err error
+ }{
+ {0, nil},
+ {5, nil},
+ {10, nil},
+ {11, ErrTooManyRedirects},
+ {29, ErrTooManyRedirects},
+ }
+
+ for i, tt := range tests {
+ err := DefaultCheckRedirect(tt.num)
+ if !reflect.DeepEqual(tt.err, err) {
+ t.Errorf("#%d: want=%#v got=%#v", i, tt.err, err)
+ }
+ }
+}
+
+func TestHTTPClusterClientSync(t *testing.T) {
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ }
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
+ require.NoErrorf(t, err, "unexpected error during setup")
+
+ want := []string{"http://127.0.0.1:2379"}
+ got := hc.Endpoints()
+ require.Truef(t, reflect.DeepEqual(want, got), "incorrect endpoints: want=%#v got=%#v", want, got)
+
+ err = hc.Sync(context.Background())
+ require.NoErrorf(t, err, "unexpected error during Sync: %#v", err)
+
+ want = []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"}
+ got = hc.Endpoints()
+ sort.Strings(got)
+ require.Truef(t, reflect.DeepEqual(want, got), "incorrect endpoints post-Sync: want=%#v got=%#v", want, got)
+
+ err = hc.SetEndpoints([]string{"http://127.0.0.1:4009"})
+ require.NoErrorf(t, err, "unexpected error during reset: %#v", err)
+
+ want = []string{"http://127.0.0.1:4009"}
+ got = hc.Endpoints()
+ require.Truef(t, reflect.DeepEqual(want, got), "incorrect endpoints post-reset: want=%#v got=%#v", want, got)
+}
+
+func TestHTTPClusterClientSyncFail(t *testing.T) {
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {err: errors.New("fail")},
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ }
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
+ require.NoErrorf(t, err, "unexpected error during setup")
+
+ want := []string{"http://127.0.0.1:2379"}
+ got := hc.Endpoints()
+ require.Truef(t, reflect.DeepEqual(want, got), "incorrect endpoints: want=%#v got=%#v", want, got)
+
+ err = hc.Sync(context.Background())
+ require.Errorf(t, err, "got nil error during Sync")
+
+ got = hc.Endpoints()
+ require.Truef(t, reflect.DeepEqual(want, got), "incorrect endpoints after failed Sync: want=%#v got=%#v", want, got)
+}
+
+func TestHTTPClusterClientAutoSyncCancelContext(t *testing.T) {
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ }
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
+ require.NoErrorf(t, err, "unexpected error during setup")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ err = hc.AutoSync(ctx, time.Hour)
+ require.ErrorIsf(t, err, context.Canceled, "incorrect error value: want=%v got=%v", context.Canceled, err)
+}
+
+func TestHTTPClusterClientAutoSyncFail(t *testing.T) {
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {err: errors.New("fail")},
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ }
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
+ require.NoErrorf(t, err, "unexpected error during setup")
+
+ err = hc.AutoSync(context.Background(), time.Hour)
+ require.Truef(t, strings.HasPrefix(err.Error(), ErrClusterUnavailable.Error()), "incorrect error value: want=%v got=%v", ErrClusterUnavailable, err)
+}
+
+func TestHTTPClusterClientGetVersion(t *testing.T) {
+ body := []byte(`{"etcdserver":"2.3.2","etcdcluster":"2.3.0"}`)
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Length": []string{"44"}}},
+ body: body,
+ },
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ }
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
+ require.NoErrorf(t, err, "unexpected error during setup")
+
+ actual, err := hc.GetVersion(context.Background())
+ if err != nil {
+ t.Errorf("non-nil error: %#v", err)
+ }
+ expected := version.Versions{Server: "2.3.2", Cluster: "2.3.0"}
+ if !reflect.DeepEqual(&expected, actual) {
+ t.Errorf("incorrect Response: want=%#v got=%#v", expected, actual)
+ }
+}
+
+// TestHTTPClusterClientSyncPinEndpoint tests that Sync() pins the endpoint when
+// it gets the exactly same member list as before.
+func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) {
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ }
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
+ require.NoErrorf(t, err, "unexpected error during setup")
+ pinnedEndpoint := hc.endpoints[hc.pinned]
+
+ for i := 0; i < 3; i++ {
+ err = hc.Sync(context.Background())
+ require.NoErrorf(t, err, "#%d: unexpected error during Sync", i)
+
+ if g := hc.endpoints[hc.pinned]; g != pinnedEndpoint {
+ t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, pinnedEndpoint)
+ }
+ }
+}
+
+// TestHTTPClusterClientSyncUnpinEndpoint tests that Sync() unpins the endpoint when
+// it gets a different member list than before.
+func TestHTTPClusterClientSyncUnpinEndpoint(t *testing.T) {
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ }
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
+ require.NoErrorf(t, err, "unexpected error during setup")
+ wants := []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}
+
+ for i := 0; i < 3; i++ {
+ err = hc.Sync(context.Background())
+ require.NoErrorf(t, err, "#%d: unexpected error during Sync", i)
+
+ if g := hc.endpoints[hc.pinned]; g.String() != wants[i] {
+ t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, wants[i])
+ }
+ }
+}
+
+// TestHTTPClusterClientSyncPinLeaderEndpoint tests that Sync() pins the leader
+// when the selection mode is EndpointSelectionPrioritizeLeader
+func TestHTTPClusterClientSyncPinLeaderEndpoint(t *testing.T) {
+ cf := newStaticHTTPClientFactory([]staticHTTPResponse{
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]}`),
+ },
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ },
+ {
+ resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
+ body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}`),
+ },
+ })
+
+ hc := &httpClusterClient{
+ clientFactory: cf,
+ rand: rand.New(rand.NewSource(0)),
+ selectionMode: EndpointSelectionPrioritizeLeader,
+ endpoints: []url.URL{{}}, // Need somewhere to pretend to send to initially
+ }
+
+ wants := []string{"http://127.0.0.1:4003", "http://127.0.0.1:4002"}
+
+ for i, want := range wants {
+ err := hc.Sync(context.Background())
+ require.NoErrorf(t, err, "#%d: unexpected error during Sync", i)
+
+ pinned := hc.endpoints[hc.pinned].String()
+ if pinned != want {
+ t.Errorf("#%d: pinned endpoint = %v, want %v", i, pinned, want)
+ }
+ }
+}
+
+func TestHTTPClusterClientResetFail(t *testing.T) {
+ tests := [][]string{
+ // need at least one endpoint
+ {},
+
+ // urls must be valid
+ {":"},
+ }
+
+ for i, tt := range tests {
+ hc := &httpClusterClient{rand: rand.New(rand.NewSource(0))}
+ err := hc.SetEndpoints(tt)
+ if err == nil {
+ t.Errorf("#%d: expected non-nil error", i)
+ }
+ }
+}
+
+func TestHTTPClusterClientResetPinRandom(t *testing.T) {
+ round := 2000
+ pinNum := 0
+ for i := 0; i < round; i++ {
+ hc := &httpClusterClient{rand: rand.New(rand.NewSource(int64(i)))}
+ err := hc.SetEndpoints([]string{"http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"})
+ require.NoErrorf(t, err, "#%d: reset error", i)
+ if hc.endpoints[hc.pinned].String() == "http://127.0.0.1:4001" {
+ pinNum++
+ }
+ }
+
+ min := 1.0/3.0 - 0.05
+ max := 1.0/3.0 + 0.05
+ if ratio := float64(pinNum) / float64(round); ratio > max || ratio < min {
+ t.Errorf("pinned ratio = %v, want [%v, %v]", ratio, min, max)
+ }
+}
diff --git a/client/v2/cluster_error.go b/client/internal/v2/cluster_error.go
similarity index 100%
rename from client/v2/cluster_error.go
rename to client/internal/v2/cluster_error.go
diff --git a/client/internal/v2/curl.go b/client/internal/v2/curl.go
new file mode 100644
index 00000000000..5d5dc57cafe
--- /dev/null
+++ b/client/internal/v2/curl.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+)
+
+var cURLDebug = false
+
+func EnablecURLDebug() {
+ cURLDebug = true
+}
+
+func DisablecURLDebug() {
+ cURLDebug = false
+}
+
+// printcURL prints the cURL equivalent request to stderr.
+// It returns an error if the body of the request cannot
+// be read.
+// The caller MUST cancel the request if there is an error.
+func printcURL(req *http.Request) error {
+ if !cURLDebug {
+ return nil
+ }
+ var (
+ command string
+ b []byte
+ err error
+ )
+
+ if req.URL != nil {
+ command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
+ }
+
+ if req.Body != nil {
+ b, err = io.ReadAll(req.Body)
+ if err != nil {
+ return err
+ }
+ command += fmt.Sprintf(" -d %q", string(b))
+ }
+
+ fmt.Fprintf(os.Stderr, "cURL Command: %q\n", command)
+
+ // reset body
+ body := bytes.NewBuffer(b)
+ req.Body = io.NopCloser(body)
+
+ return nil
+}
diff --git a/client/v2/discover.go b/client/internal/v2/discover.go
similarity index 100%
rename from client/v2/discover.go
rename to client/internal/v2/discover.go
diff --git a/client/internal/v2/doc.go b/client/internal/v2/doc.go
new file mode 100644
index 00000000000..68284c20a89
--- /dev/null
+++ b/client/internal/v2/doc.go
@@ -0,0 +1,72 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package client provides bindings for the etcd APIs.
+
+Create a Config and exchange it for a Client:
+
+ import (
+ "net/http"
+ "context"
+
+ "go.etcd.io/etcd/client/v2"
+ )
+
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: DefaultTransport,
+ }
+
+ c, err := client.New(cfg)
+ if err != nil {
+ // handle error
+ }
+
+Clients are safe for concurrent use by multiple goroutines.
+
+Create a KeysAPI using the Client, then use it to interact with etcd:
+
+ kAPI := client.NewKeysAPI(c)
+
+ // create a new key /foo with the value "bar"
+ _, err = kAPI.Create(context.Background(), "/foo", "bar")
+ if err != nil {
+ // handle error
+ }
+
+ // delete the newly created key only if the value is still "bar"
+ _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
+ if err != nil {
+ // handle error
+ }
+
+Use a custom context to set timeouts on your operations:
+
+ import "time"
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // set a new key, ignoring its previous state
+ _, err := kAPI.Set(ctx, "/ping", "pong", nil)
+ if err != nil {
+ if err == context.DeadlineExceeded {
+ // request took longer than 5s
+ } else {
+ // handle error
+ }
+ }
+*/
+package client
diff --git a/client/v2/fake_transport_test.go b/client/internal/v2/fake_transport_test.go
similarity index 100%
rename from client/v2/fake_transport_test.go
rename to client/internal/v2/fake_transport_test.go
diff --git a/client/internal/v2/go.mod b/client/internal/v2/go.mod
new file mode 100644
index 00000000000..758ecb81ce9
--- /dev/null
+++ b/client/internal/v2/go.mod
@@ -0,0 +1,34 @@
+module go.etcd.io/etcd/client/v2
+
+go 1.23
+
+toolchain go1.23.4
+
+require (
+ github.com/stretchr/testify v1.10.0
+ go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6
+)
+
+require (
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+replace (
+ go.etcd.io/etcd/api/v3 => ./../../../api
+ go.etcd.io/etcd/client/pkg/v3 => ./../../pkg
+)
+
+// Bad imports are sometimes causing attempts to pull that code.
+// This makes the error more explicit.
+replace (
+ go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
+ go.etcd.io/etcd/pkg/v3 => ./FORBIDDED_DEPENDENCY
+ go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY
+ go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
+)
diff --git a/client/internal/v2/go.sum b/client/internal/v2/go.sum
new file mode 100644
index 00000000000..f3a0fb38708
--- /dev/null
+++ b/client/internal/v2/go.sum
@@ -0,0 +1,22 @@
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
diff --git a/client/v2/keys.go b/client/internal/v2/keys.go
similarity index 97%
rename from client/v2/keys.go
rename to client/internal/v2/keys.go
index e8f16646174..87783cd30e2 100644
--- a/client/v2/keys.go
+++ b/client/internal/v2/keys.go
@@ -25,6 +25,8 @@ import (
"strings"
"time"
+ kjson "sigs.k8s.io/json"
+
"go.etcd.io/etcd/client/pkg/v3/pathutil"
)
@@ -77,9 +79,7 @@ const (
PrevNoExist = PrevExistType("false")
)
-var (
- defaultV2KeysPrefix = "/v2/keys"
-)
+var defaultV2KeysPrefix = "/v2/keys"
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
// API over HTTP.
@@ -457,7 +457,7 @@ func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
if err != nil {
- if err == ErrEmptyBody {
+ if errors.Is(err, ErrEmptyBody) {
continue
}
return nil, err
@@ -504,7 +504,7 @@ func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
params.Set("quorum", strconv.FormatBool(g.Quorum))
u.RawQuery = params.Encode()
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
return req
}
@@ -524,7 +524,7 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
params.Set("recursive", strconv.FormatBool(w.Recursive))
u.RawQuery = params.Encode()
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
return req
}
@@ -579,7 +579,7 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
u.RawQuery = params.Encode()
body := strings.NewReader(form.Encode())
- req, _ := http.NewRequest("PUT", u.String(), body)
+ req, _ := http.NewRequest(http.MethodPut, u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
@@ -612,7 +612,7 @@ func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
}
u.RawQuery = params.Encode()
- req, _ := http.NewRequest("DELETE", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodDelete, u.String(), nil)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
@@ -635,7 +635,7 @@ func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
}
body := strings.NewReader(form.Encode())
- req, _ := http.NewRequest("POST", u.String(), body)
+ req, _ := http.NewRequest(http.MethodPost, u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
@@ -653,11 +653,9 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp
return res, err
}
-var jsonIterator = caseSensitiveJsonIterator()
-
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
var res Response
- err := jsonIterator.Unmarshal(body, &res)
+ err := kjson.UnmarshalCaseSensitivePreserveInts(body, &res)
if err != nil {
return nil, ErrInvalidJSON
}
diff --git a/client/v2/keys_bench_test.go b/client/internal/v2/keys_bench_test.go
similarity index 96%
rename from client/v2/keys_bench_test.go
rename to client/internal/v2/keys_bench_test.go
index ff136033452..e408e6ce9c3 100644
--- a/client/v2/keys_bench_test.go
+++ b/client/internal/v2/keys_bench_test.go
@@ -20,6 +20,8 @@ import (
"reflect"
"strings"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func createTestNode(size int) *Node {
@@ -49,13 +51,12 @@ func createTestResponse(children, size int) *Response {
}
func benchmarkResponseUnmarshalling(b *testing.B, children, size int) {
+ b.Helper()
header := http.Header{}
header.Add("X-Etcd-Index", "123456")
response := createTestResponse(children, size)
body, err := json.Marshal(response)
- if err != nil {
- b.Fatal(err)
- }
+ require.NoError(b, err)
b.ResetTimer()
newResponse := new(Response)
@@ -63,7 +64,6 @@ func benchmarkResponseUnmarshalling(b *testing.B, children, size int) {
if newResponse, err = unmarshalSuccessfulKeysResponse(header, body); err != nil {
b.Errorf("error unmarshalling response (%v)", err)
}
-
}
if !reflect.DeepEqual(response.Node, newResponse.Node) {
b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse)
diff --git a/client/v2/keys_test.go b/client/internal/v2/keys_test.go
similarity index 97%
rename from client/v2/keys_test.go
rename to client/internal/v2/keys_test.go
index 34428bb0a96..1bc6927b68e 100644
--- a/client/v2/keys_test.go
+++ b/client/internal/v2/keys_test.go
@@ -18,7 +18,7 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"reflect"
@@ -162,7 +162,7 @@ func TestGetAction(t *testing.T) {
wantURL := baseWantURL
wantURL.RawQuery = tt.wantQuery
- err := assertRequest(got, "GET", wantURL, wantHeader, nil)
+ err := assertRequest(got, http.MethodGet, wantURL, wantHeader, nil)
if err != nil {
t.Errorf("#%d: %v", i, err)
}
@@ -211,7 +211,7 @@ func TestWaitAction(t *testing.T) {
wantURL := baseWantURL
wantURL.RawQuery = tt.wantQuery
- err := assertRequest(got, "GET", wantURL, wantHeader, nil)
+ err := assertRequest(got, http.MethodGet, wantURL, wantHeader, nil)
if err != nil {
t.Errorf("#%d: unexpected error: %#v", i, err)
}
@@ -424,7 +424,7 @@ func TestSetAction(t *testing.T) {
}
got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"})
- if err := assertRequest(*got, "PUT", u, wantHeader, []byte(tt.wantBody)); err != nil {
+ if err := assertRequest(*got, http.MethodPut, u, wantHeader, []byte(tt.wantBody)); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
@@ -525,7 +525,7 @@ func TestCreateInOrderAction(t *testing.T) {
}
got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"})
- if err := assertRequest(*got, "POST", u, wantHeader, []byte(tt.wantBody)); err != nil {
+ if err := assertRequest(*got, http.MethodPost, u, wantHeader, []byte(tt.wantBody)); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
@@ -627,7 +627,7 @@ func TestDeleteAction(t *testing.T) {
}
got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"})
- if err := assertRequest(*got, "DELETE", u, wantHeader, nil); err != nil {
+ if err := assertRequest(*got, http.MethodDelete, u, wantHeader, nil); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
@@ -654,7 +654,7 @@ func assertRequest(got http.Request, wantMethod string, wantURL *url.URL, wantHe
if wantBody == nil {
return fmt.Errorf("want.Body=%v got.Body=%s", wantBody, got.Body)
}
- gotBytes, err := ioutil.ReadAll(got.Body)
+ gotBytes, err := io.ReadAll(got.Body)
if err != nil {
return err
}
@@ -830,9 +830,10 @@ func TestUnmarshalFailedKeysResponse(t *testing.T) {
func TestUnmarshalFailedKeysResponseBadJSON(t *testing.T) {
err := unmarshalFailedKeysResponse([]byte(`{"er`))
+ var cErr Error
if err == nil {
t.Errorf("got nil error")
- } else if _, ok := err.(Error); ok {
+ } else if errors.As(err, &cErr) {
t.Errorf("error is of incorrect type *Error: %#v", err)
}
}
@@ -892,7 +893,7 @@ func TestHTTPWatcherNextFail(t *testing.T) {
tests := []httpClient{
// generic HTTP client failure
&staticHTTPClient{
- err: errors.New("fail!"),
+ err: errors.New("fail"),
},
// unusable status code
@@ -994,7 +995,7 @@ func TestHTTPKeysAPIWatcherAction(t *testing.T) {
}
for i, tt := range tests {
- testError := errors.New("fail!")
+ testError := errors.New("fail")
kAPI := &httpKeysAPI{
client: &staticHTTPClient{err: testError},
}
@@ -1080,7 +1081,7 @@ func TestHTTPKeysAPISetError(t *testing.T) {
tests := []httpClient{
// generic HTTP client failure
&staticHTTPClient{
- err: errors.New("fail!"),
+ err: errors.New("fail"),
},
// unusable status code
@@ -1191,7 +1192,7 @@ func TestHTTPKeysAPIGetError(t *testing.T) {
tests := []httpClient{
// generic HTTP client failure
&staticHTTPClient{
- err: errors.New("fail!"),
+ err: errors.New("fail"),
},
// unusable status code
@@ -1310,7 +1311,7 @@ func TestHTTPKeysAPIDeleteError(t *testing.T) {
tests := []httpClient{
// generic HTTP client failure
&staticHTTPClient{
- err: errors.New("fail!"),
+ err: errors.New("fail"),
},
// unusable status code
diff --git a/client/internal/v2/main_test.go b/client/internal/v2/main_test.go
new file mode 100644
index 00000000000..d40d87357e2
--- /dev/null
+++ b/client/internal/v2/main_test.go
@@ -0,0 +1,25 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client_test
+
+import (
+ "testing"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+)
+
+func TestMain(m *testing.M) {
+ testutil.MustTestMainWithLeakDetection(m)
+}
diff --git a/client/v2/members.go b/client/internal/v2/members.go
similarity index 95%
rename from client/v2/members.go
rename to client/internal/v2/members.go
index 5d638487c5c..f53c2dbb2fc 100644
--- a/client/v2/members.go
+++ b/client/internal/v2/members.go
@@ -130,7 +130,7 @@ func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
return nil, err
}
- return []Member(mCollection), nil
+ return mCollection, nil
}
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
@@ -225,7 +225,7 @@ type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
return req
}
@@ -236,7 +236,7 @@ type membersAPIActionRemove struct {
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, d.memberID)
- req, _ := http.NewRequest("DELETE", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodDelete, u.String(), nil)
return req
}
@@ -248,7 +248,7 @@ func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
+ req, _ := http.NewRequest(http.MethodPost, u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
@@ -263,7 +263,7 @@ func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
u.Path = path.Join(u.Path, a.memberID)
b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
+ req, _ := http.NewRequest(http.MethodPut, u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
@@ -282,7 +282,7 @@ type membersAPIActionLeader struct{}
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, defaultLeaderSuffix)
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
return req
}
diff --git a/client/v2/members_test.go b/client/internal/v2/members_test.go
similarity index 94%
rename from client/v2/members_test.go
rename to client/internal/v2/members_test.go
index ecea78096c8..4d3114a4909 100644
--- a/client/v2/members_test.go
+++ b/client/internal/v2/members_test.go
@@ -23,6 +23,8 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/client/pkg/v3/types"
)
@@ -37,7 +39,7 @@ func TestMembersAPIActionList(t *testing.T) {
}
got := *act.HTTPRequest(ep)
- err := assertRequest(got, "GET", wantURL, http.Header{}, nil)
+ err := assertRequest(got, http.MethodGet, wantURL, http.Header{}, nil)
if err != nil {
t.Error(err.Error())
}
@@ -63,7 +65,7 @@ func TestMembersAPIActionAdd(t *testing.T) {
wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`)
got := *act.HTTPRequest(ep)
- err := assertRequest(got, "POST", wantURL, wantHeader, wantBody)
+ err := assertRequest(got, http.MethodPost, wantURL, wantHeader, wantBody)
if err != nil {
t.Error(err.Error())
}
@@ -90,7 +92,7 @@ func TestMembersAPIActionUpdate(t *testing.T) {
wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`)
got := *act.HTTPRequest(ep)
- err := assertRequest(got, "PUT", wantURL, wantHeader, wantBody)
+ err := assertRequest(got, http.MethodPut, wantURL, wantHeader, wantBody)
if err != nil {
t.Error(err.Error())
}
@@ -107,7 +109,7 @@ func TestMembersAPIActionRemove(t *testing.T) {
}
got := *act.HTTPRequest(ep)
- err := assertRequest(got, "DELETE", wantURL, http.Header{}, nil)
+ err := assertRequest(got, http.MethodDelete, wantURL, http.Header{}, nil)
if err != nil {
t.Error(err.Error())
}
@@ -124,7 +126,7 @@ func TestMembersAPIActionLeader(t *testing.T) {
}
got := *act.HTTPRequest(ep)
- err := assertRequest(got, "GET", wantURL, http.Header{}, nil)
+ err := assertRequest(got, http.MethodGet, wantURL, http.Header{}, nil)
if err != nil {
t.Error(err.Error())
}
@@ -152,9 +154,7 @@ func TestV2MembersURL(t *testing.T) {
Path: "/pants/v2/members",
}
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("v2MembersURL got %#v, want %#v", got, want)
- }
+ require.Truef(t, reflect.DeepEqual(want, got), "v2MembersURL got %#v, want %#v", got, want)
}
func TestMemberUnmarshal(t *testing.T) {
@@ -312,13 +312,9 @@ func TestMemberCreateRequestMarshal(t *testing.T) {
want := []byte(`{"peerURLs":["http://127.0.0.1:8081","https://127.0.0.1:8080"]}`)
got, err := json.Marshal(&req)
- if err != nil {
- t.Fatalf("Marshal returned unexpected err=%v", err)
- }
+ require.NoErrorf(t, err, "Marshal returned unexpected err")
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("Failed to marshal memberCreateRequest: want=%s, got=%s", want, got)
- }
+ require.Truef(t, reflect.DeepEqual(want, got), "Failed to marshal memberCreateRequest: want=%s, got=%s", want, got)
}
func TestHTTPMembersAPIAddSuccess(t *testing.T) {
@@ -371,7 +367,7 @@ func TestHTTPMembersAPIAddError(t *testing.T) {
// generic httpClient failure
{
peerURL: okPeer,
- client: &staticHTTPClient{err: errors.New("fail!")},
+ client: &staticHTTPClient{err: errors.New("fail")},
},
// unrecognized HTTP status code
@@ -456,7 +452,7 @@ func TestHTTPMembersAPIRemoveFail(t *testing.T) {
tests := []httpClient{
// generic error
&staticHTTPClient{
- err: errors.New("fail!"),
+ err: errors.New("fail"),
},
// unexpected HTTP status code
@@ -509,7 +505,7 @@ func TestHTTPMembersAPIListSuccess(t *testing.T) {
func TestHTTPMembersAPIListError(t *testing.T) {
tests := []httpClient{
// generic httpClient failure
- &staticHTTPClient{err: errors.New("fail!")},
+ &staticHTTPClient{err: errors.New("fail")},
// unrecognized HTTP status code
&staticHTTPClient{
@@ -569,7 +565,7 @@ func TestHTTPMembersAPILeaderSuccess(t *testing.T) {
func TestHTTPMembersAPILeaderError(t *testing.T) {
tests := []httpClient{
// generic httpClient failure
- &staticHTTPClient{err: errors.New("fail!")},
+ &staticHTTPClient{err: errors.New("fail")},
// unrecognized HTTP status code
&staticHTTPClient{
diff --git a/client/internal/v2/util.go b/client/internal/v2/util.go
new file mode 100644
index 00000000000..76e4132c06c
--- /dev/null
+++ b/client/internal/v2/util.go
@@ -0,0 +1,48 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "errors"
+ "regexp"
+)
+
+var (
+ roleNotFoundRegExp *regexp.Regexp
+ userNotFoundRegExp *regexp.Regexp
+)
+
+func init() {
+ roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
+ userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
+}
+
+// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
+func IsKeyNotFound(err error) bool {
+ var cErr Error
+ return errors.As(err, &cErr) && cErr.Code == ErrorCodeKeyNotFound
+}
+
+// IsRoleNotFound returns true if the error means role not found of v2 API.
+func IsRoleNotFound(err error) bool {
+ var ae authError
+ return errors.As(err, &ae) && roleNotFoundRegExp.MatchString(ae.Message)
+}
+
+// IsUserNotFound returns true if the error means user not found of v2 API.
+func IsUserNotFound(err error) bool {
+ var ae authError
+ return errors.As(err, &ae) && userNotFoundRegExp.MatchString(ae.Message)
+}
diff --git a/client/pkg/fileutil/dir_unix.go b/client/pkg/fileutil/dir_unix.go
index ca82f765c99..42221f4b90a 100644
--- a/client/pkg/fileutil/dir_unix.go
+++ b/client/pkg/fileutil/dir_unix.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !windows
-// +build !windows
package fileutil
@@ -21,7 +20,7 @@ import "os"
const (
// PrivateDirMode grants owner to make/remove files inside the directory.
- PrivateDirMode = 0700
+ PrivateDirMode = 0o700
)
// OpenDir opens a directory for syncing.
diff --git a/client/pkg/fileutil/dir_windows.go b/client/pkg/fileutil/dir_windows.go
index 849c63c8769..0cb2280cd86 100644
--- a/client/pkg/fileutil/dir_windows.go
+++ b/client/pkg/fileutil/dir_windows.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build windows
-// +build windows
package fileutil
@@ -24,7 +23,7 @@ import (
const (
// PrivateDirMode grants owner to make/remove files inside the directory.
- PrivateDirMode = 0777
+ PrivateDirMode = 0o777
)
// OpenDir opens a directory in windows with write access for syncing.
diff --git a/client/pkg/fileutil/filereader.go b/client/pkg/fileutil/filereader.go
new file mode 100644
index 00000000000..55248888c60
--- /dev/null
+++ b/client/pkg/fileutil/filereader.go
@@ -0,0 +1,60 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "bufio"
+ "io"
+ "io/fs"
+ "os"
+)
+
+// FileReader is a wrapper of io.Reader. It also provides file info.
+type FileReader interface {
+ io.Reader
+ FileInfo() (fs.FileInfo, error)
+}
+
+type fileReader struct {
+ *os.File
+}
+
+func NewFileReader(f *os.File) FileReader {
+ return &fileReader{f}
+}
+
+func (fr *fileReader) FileInfo() (fs.FileInfo, error) {
+ return fr.Stat()
+}
+
+// FileBufReader is a wrapper of bufio.Reader. It also provides file info.
+type FileBufReader struct {
+ *bufio.Reader
+ fi fs.FileInfo
+}
+
+func NewFileBufReader(fr FileReader) *FileBufReader {
+ bufReader := bufio.NewReader(fr)
+ fi, err := fr.FileInfo()
+ if err != nil {
+ // This should never happen.
+ panic(err)
+ }
+ return &FileBufReader{bufReader, fi}
+}
+
+func (fbr *FileBufReader) FileInfo() fs.FileInfo {
+ return fbr.fi
+}
diff --git a/client/pkg/fileutil/filereader_test.go b/client/pkg/fileutil/filereader_test.go
new file mode 100644
index 00000000000..2f863cdcef5
--- /dev/null
+++ b/client/pkg/fileutil/filereader_test.go
@@ -0,0 +1,44 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFileBufReader(t *testing.T) {
+ f, err := os.CreateTemp(t.TempDir(), "wal")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ fbr := NewFileBufReader(NewFileReader(f))
+
+ if !strings.HasPrefix(fbr.FileInfo().Name(), "wal") {
+ t.Errorf("Unexpected file name: %s", fbr.FileInfo().Name())
+ }
+ assert.Equal(t, fi.Size(), fbr.FileInfo().Size())
+ assert.Equal(t, fi.IsDir(), fbr.FileInfo().IsDir())
+ assert.Equal(t, fi.Mode(), fbr.FileInfo().Mode())
+ assert.Equal(t, fi.ModTime(), fbr.FileInfo().ModTime())
+}
diff --git a/client/pkg/fileutil/fileutil.go b/client/pkg/fileutil/fileutil.go
index e442c3c92e8..b68c50415ee 100644
--- a/client/pkg/fileutil/fileutil.go
+++ b/client/pkg/fileutil/fileutil.go
@@ -17,16 +17,18 @@ package fileutil
import (
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"path/filepath"
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
)
const (
// PrivateFileMode grants owner to read/write a file.
- PrivateFileMode = 0600
+ PrivateFileMode = 0o600
)
// IsDirWriteable checks if dir is writable by writing and removing a file
@@ -36,7 +38,7 @@ func IsDirWriteable(dir string) error {
if err != nil {
return err
}
- if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
+ if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
return err
}
return os.Remove(f)
@@ -44,16 +46,13 @@ func IsDirWriteable(dir string) error {
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
-func TouchDirAll(dir string) error {
+func TouchDirAll(lg *zap.Logger, dir string) error {
+ verify.Assert(lg != nil, "nil log isn't allowed")
// If path is already a directory, MkdirAll does nothing and returns nil, so,
- // first check if dir exist with an expected permission mode.
+ // first check if dir exists with an expected permission mode.
if Exist(dir) {
err := CheckDirPermission(dir, PrivateDirMode)
if err != nil {
- lg, _ := zap.NewProduction()
- if lg == nil {
- lg = zap.NewExample()
- }
lg.Warn("check file permission", zap.Error(err))
}
} else {
@@ -70,8 +69,8 @@ func TouchDirAll(dir string) error {
// CreateDirAll is similar to TouchDirAll but returns error
// if the deepest directory was not empty.
-func CreateDirAll(dir string) error {
- err := TouchDirAll(dir)
+func CreateDirAll(lg *zap.Logger, dir string) error {
+ err := TouchDirAll(lg, dir)
if err == nil {
var ns []string
ns, err = ReadDir(dir)
@@ -126,7 +125,7 @@ func CheckDirPermission(dir string, perm os.FileMode) error {
if !Exist(dir) {
return fmt.Errorf("directory %q empty, cannot check permission", dir)
}
- //check the existing permission on the directory
+ // check the existing permission on the directory
dirInfo, err := os.Stat(dir)
if err != nil {
return err
@@ -170,3 +169,16 @@ func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string)
}
return nil
}
+
+// ListFiles lists files if matchFunc is true on an existing dir
+// Returns error if the dir does not exist
+func ListFiles(dir string, matchFunc func(fileName string) bool) ([]string, error) {
+ var files []string
+ err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {
+ if matchFunc(path) {
+ files = append(files, path)
+ }
+ return nil
+ })
+ return files, err
+}
diff --git a/client/pkg/fileutil/fileutil_test.go b/client/pkg/fileutil/fileutil_test.go
index 3a761ff9a7f..6abba29ca59 100644
--- a/client/pkg/fileutil/fileutil_test.go
+++ b/client/pkg/fileutil/fileutil_test.go
@@ -17,7 +17,6 @@ package fileutil
import (
"fmt"
"io"
- "io/ioutil"
"math/rand"
"os"
"os/user"
@@ -27,21 +26,15 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
func TestIsDirWriteable(t *testing.T) {
- tmpdir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("unexpected ioutil.TempDir error: %v", err)
- }
- defer os.RemoveAll(tmpdir)
- if err = IsDirWriteable(tmpdir); err != nil {
- t.Fatalf("unexpected IsDirWriteable error: %v", err)
- }
- if err = os.Chmod(tmpdir, 0444); err != nil {
- t.Fatalf("unexpected os.Chmod error: %v", err)
- }
+ tmpdir := t.TempDir()
+ require.NoErrorf(t, IsDirWriteable(tmpdir), "unexpected IsDirWriteable error")
+ require.NoErrorf(t, os.Chmod(tmpdir, 0o444), "unexpected os.Chmod error")
me, err := user.Current()
if err != nil {
// err can be non-nil when cross compiled
@@ -54,28 +47,18 @@ func TestIsDirWriteable(t *testing.T) {
// Chmod is not supported under windows.
t.Skipf("running as a superuser or in windows")
}
- if err := IsDirWriteable(tmpdir); err == nil {
- t.Fatalf("expected IsDirWriteable to error")
- }
+ require.Errorf(t, IsDirWriteable(tmpdir), "expected IsDirWriteable to error")
}
func TestCreateDirAll(t *testing.T) {
- tmpdir, err := ioutil.TempDir(os.TempDir(), "foo")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
tmpdir2 := filepath.Join(tmpdir, "testdir")
- if err = CreateDirAll(tmpdir2); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, CreateDirAll(zaptest.NewLogger(t), tmpdir2))
- if err = ioutil.WriteFile(filepath.Join(tmpdir2, "text.txt"), []byte("test text"), PrivateFileMode); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, os.WriteFile(filepath.Join(tmpdir2, "text.txt"), []byte("test text"), PrivateFileMode))
- if err = CreateDirAll(tmpdir2); err == nil || !strings.Contains(err.Error(), "to be empty, got") {
+ if err := CreateDirAll(zaptest.NewLogger(t), tmpdir2); err == nil || !strings.Contains(err.Error(), "to be empty, got") {
t.Fatalf("unexpected error %v", err)
}
}
@@ -83,18 +66,14 @@ func TestCreateDirAll(t *testing.T) {
func TestExist(t *testing.T) {
fdir := filepath.Join(os.TempDir(), fmt.Sprint(time.Now().UnixNano()+rand.Int63n(1000)))
os.RemoveAll(fdir)
- if err := os.Mkdir(fdir, 0666); err != nil {
+ if err := os.Mkdir(fdir, 0o666); err != nil {
t.Skip(err)
}
defer os.RemoveAll(fdir)
- if !Exist(fdir) {
- t.Fatalf("expected Exist true, got %v", Exist(fdir))
- }
+ require.Truef(t, Exist(fdir), "expected Exist true, got %v", Exist(fdir))
- f, err := ioutil.TempFile(os.TempDir(), "fileutil")
- if err != nil {
- t.Fatal(err)
- }
+ f, err := os.CreateTemp(os.TempDir(), "fileutil")
+ require.NoError(t, err)
f.Close()
if g := Exist(f.Name()); !g {
@@ -108,68 +87,43 @@ func TestExist(t *testing.T) {
}
func TestDirEmpty(t *testing.T) {
- dir, err := ioutil.TempDir(os.TempDir(), "empty_dir")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
+ dir := t.TempDir()
- if !DirEmpty(dir) {
- t.Fatalf("expected DirEmpty true, got %v", DirEmpty(dir))
- }
+ require.Truef(t, DirEmpty(dir), "expected DirEmpty true, got %v", DirEmpty(dir))
- file, err := ioutil.TempFile(dir, "new_file")
- if err != nil {
- t.Fatal(err)
- }
+ file, err := os.CreateTemp(dir, "new_file")
+ require.NoError(t, err)
file.Close()
- if DirEmpty(dir) {
- t.Fatalf("expected DirEmpty false, got %v", DirEmpty(dir))
- }
- if DirEmpty(file.Name()) {
- t.Fatalf("expected DirEmpty false, got %v", DirEmpty(file.Name()))
- }
+ require.Falsef(t, DirEmpty(dir), "expected DirEmpty false, got %v", DirEmpty(dir))
+ require.Falsef(t, DirEmpty(file.Name()), "expected DirEmpty false, got %v", DirEmpty(file.Name()))
}
func TestZeroToEnd(t *testing.T) {
- f, err := ioutil.TempFile(os.TempDir(), "fileutil")
- if err != nil {
- t.Fatal(err)
- }
+ f, err := os.CreateTemp(os.TempDir(), "fileutil")
+ require.NoError(t, err)
defer os.Remove(f.Name())
defer f.Close()
// Ensure 0 size is a nop so zero-to-end on an empty file won't give EINVAL.
- if err = ZeroToEnd(f); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, ZeroToEnd(f))
b := make([]byte, 1024)
for i := range b {
b[i] = 12
}
- if _, err = f.Write(b); err != nil {
- t.Fatal(err)
- }
- if _, err = f.Seek(512, io.SeekStart); err != nil {
- t.Fatal(err)
- }
- if err = ZeroToEnd(f); err != nil {
- t.Fatal(err)
- }
+ _, err = f.Write(b)
+ require.NoError(t, err)
+ _, err = f.Seek(512, io.SeekStart)
+ require.NoError(t, err)
+ require.NoError(t, ZeroToEnd(f))
off, serr := f.Seek(0, io.SeekCurrent)
- if serr != nil {
- t.Fatal(serr)
- }
- if off != 512 {
- t.Fatalf("expected offset 512, got %d", off)
- }
+ require.NoError(t, serr)
+ require.Equalf(t, int64(512), off, "expected offset 512, got %d", off)
b = make([]byte, 512)
- if _, err = f.Read(b); err != nil {
- t.Fatal(err)
- }
+ _, err = f.Read(b)
+ require.NoError(t, err)
for i := range b {
if b[i] != 0 {
t.Errorf("expected b[%d] = 0, got %d", i, b[i])
@@ -178,35 +132,24 @@ func TestZeroToEnd(t *testing.T) {
}
func TestDirPermission(t *testing.T) {
- tmpdir, err := ioutil.TempDir(os.TempDir(), "foo")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
tmpdir2 := filepath.Join(tmpdir, "testpermission")
// create a new dir with 0700
- if err = CreateDirAll(tmpdir2); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, CreateDirAll(zaptest.NewLogger(t), tmpdir2))
// check dir permission with mode different than created dir
- if err = CheckDirPermission(tmpdir2, 0600); err == nil {
+ if err := CheckDirPermission(tmpdir2, 0o600); err == nil {
t.Errorf("expected error, got nil")
}
}
func TestRemoveMatchFile(t *testing.T) {
tmpdir := t.TempDir()
- defer os.RemoveAll(tmpdir)
- f, err := ioutil.TempFile(tmpdir, "tmp")
- if err != nil {
- t.Fatal(err)
- }
+ f, err := os.CreateTemp(tmpdir, "tmp")
+ require.NoError(t, err)
f.Close()
- f, err = ioutil.TempFile(tmpdir, "foo.tmp")
- if err != nil {
- t.Fatal(err)
- }
+ f, err = os.CreateTemp(tmpdir, "foo.tmp")
+ require.NoError(t, err)
f.Close()
err = RemoveMatchFile(zaptest.NewLogger(t), tmpdir, func(fileName string) bool {
@@ -216,17 +159,13 @@ func TestRemoveMatchFile(t *testing.T) {
t.Errorf("expected nil, got error")
}
fnames, err := ReadDir(tmpdir)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if len(fnames) != 1 {
t.Errorf("expected exist 1 files, got %d", len(fnames))
}
- f, err = ioutil.TempFile(tmpdir, "tmp")
- if err != nil {
- t.Fatal(err)
- }
+ f, err = os.CreateTemp(tmpdir, "tmp")
+ require.NoError(t, err)
f.Close()
err = RemoveMatchFile(zaptest.NewLogger(t), tmpdir, func(fileName string) bool {
os.Remove(filepath.Join(tmpdir, fileName))
@@ -236,3 +175,12 @@ func TestRemoveMatchFile(t *testing.T) {
t.Errorf("expected error, got nil")
}
}
+
+func TestTouchDirAll(t *testing.T) {
+ tmpdir := t.TempDir()
+ assert.Panicsf(t, func() {
+ TouchDirAll(nil, tmpdir)
+ }, "expected panic with nil log")
+
+ assert.NoError(t, TouchDirAll(zaptest.NewLogger(t), tmpdir))
+}
diff --git a/client/pkg/fileutil/lock.go b/client/pkg/fileutil/lock.go
index 338627f43c8..dd2fa545d22 100644
--- a/client/pkg/fileutil/lock.go
+++ b/client/pkg/fileutil/lock.go
@@ -19,8 +19,6 @@ import (
"os"
)
-var (
- ErrLocked = errors.New("fileutil: file already locked")
-)
+var ErrLocked = errors.New("fileutil: file already locked")
type LockedFile struct{ *os.File }
diff --git a/client/pkg/fileutil/lock_flock.go b/client/pkg/fileutil/lock_flock.go
index dcdf226cdbf..178c987a4a3 100644
--- a/client/pkg/fileutil/lock_flock.go
+++ b/client/pkg/fileutil/lock_flock.go
@@ -13,11 +13,11 @@
// limitations under the License.
//go:build !windows && !plan9 && !solaris
-// +build !windows,!plan9,!solaris
package fileutil
import (
+ "errors"
"os"
"syscall"
)
@@ -29,7 +29,7 @@ func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, err
}
if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
f.Close()
- if err == syscall.EWOULDBLOCK {
+ if errors.Is(err, syscall.EWOULDBLOCK) {
err = ErrLocked
}
return nil, err
diff --git a/client/pkg/fileutil/lock_linux.go b/client/pkg/fileutil/lock_linux.go
index d8952cc481b..609ac397849 100644
--- a/client/pkg/fileutil/lock_linux.go
+++ b/client/pkg/fileutil/lock_linux.go
@@ -13,11 +13,11 @@
// limitations under the License.
//go:build linux
-// +build linux
package fileutil
import (
+ "errors"
"fmt"
"io"
"os"
@@ -59,13 +59,13 @@ func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
- return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err)
+ return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%w)", path, err)
}
flock := wrlck
if err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLK, &flock); err != nil {
f.Close()
- if err == syscall.EWOULDBLOCK {
+ if errors.Is(err, syscall.EWOULDBLOCK) {
err = ErrLocked
}
return nil, err
@@ -80,7 +80,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
- return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err)
+ return nil, fmt.Errorf("ofdLockFile failed to open %q (%w)", path, err)
}
flock := wrlck
diff --git a/client/pkg/fileutil/lock_linux_test.go b/client/pkg/fileutil/lock_linux_test.go
index c1101c59cd0..65dd96b91f5 100644
--- a/client/pkg/fileutil/lock_linux_test.go
+++ b/client/pkg/fileutil/lock_linux_test.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build linux
-// +build linux
package fileutil
diff --git a/client/pkg/fileutil/lock_solaris.go b/client/pkg/fileutil/lock_solaris.go
index 683cc1db9c4..2e892fecc65 100644
--- a/client/pkg/fileutil/lock_solaris.go
+++ b/client/pkg/fileutil/lock_solaris.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build solaris
-// +build solaris
package fileutil
diff --git a/client/pkg/fileutil/lock_test.go b/client/pkg/fileutil/lock_test.go
index ded6051dccb..0c5da9855ad 100644
--- a/client/pkg/fileutil/lock_test.go
+++ b/client/pkg/fileutil/lock_test.go
@@ -15,40 +15,31 @@
package fileutil
import (
- "io/ioutil"
"os"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
func TestLockAndUnlock(t *testing.T) {
- f, err := ioutil.TempFile("", "lock")
- if err != nil {
- t.Fatal(err)
- }
+ f, err := os.CreateTemp("", "lock")
+ require.NoError(t, err)
f.Close()
defer func() {
- err = os.Remove(f.Name())
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, os.Remove(f.Name()))
}()
// lock the file
l, err := LockFile(f.Name(), os.O_WRONLY, PrivateFileMode)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
// try lock a locked file
- if _, err = TryLockFile(f.Name(), os.O_WRONLY, PrivateFileMode); err != ErrLocked {
- t.Fatal(err)
- }
+ _, err = TryLockFile(f.Name(), os.O_WRONLY, PrivateFileMode)
+ require.ErrorIs(t, err, ErrLocked)
// unlock the file
- if err = l.Close(); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, l.Close())
// try lock the unlocked file
dupl, err := TryLockFile(f.Name(), os.O_WRONLY, PrivateFileMode)
@@ -76,9 +67,7 @@ func TestLockAndUnlock(t *testing.T) {
}
// unlock
- if err = dupl.Close(); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, dupl.Close())
// the previously blocked routine should be unblocked
select {
diff --git a/client/pkg/fileutil/lock_unix.go b/client/pkg/fileutil/lock_unix.go
index d89027e1fad..05db5367410 100644
--- a/client/pkg/fileutil/lock_unix.go
+++ b/client/pkg/fileutil/lock_unix.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !windows && !plan9 && !solaris && !linux
-// +build !windows,!plan9,!solaris,!linux
package fileutil
diff --git a/client/pkg/fileutil/lock_windows.go b/client/pkg/fileutil/lock_windows.go
index 5cbf2bc3d5e..51010bdf81c 100644
--- a/client/pkg/fileutil/lock_windows.go
+++ b/client/pkg/fileutil/lock_windows.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build windows
-// +build windows
package fileutil
@@ -22,31 +21,18 @@ import (
"fmt"
"os"
"syscall"
- "unsafe"
-)
-
-var (
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
- procLockFileEx = modkernel32.NewProc("LockFileEx")
- errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file")
+ "golang.org/x/sys/windows"
)
-const (
- // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
- LOCKFILE_EXCLUSIVE_LOCK = 2
- LOCKFILE_FAIL_IMMEDIATELY = 1
-
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
- errLockViolation syscall.Errno = 0x21
-)
+var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file")
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := open(path, flag, perm)
if err != nil {
return nil, err
}
- if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
+ if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil {
f.Close()
return nil, err
}
@@ -58,7 +44,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
if err != nil {
return nil, err
}
- if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
+ if err := lockFile(windows.Handle(f.Fd()), 0); err != nil {
f.Close()
return nil, err
}
@@ -67,7 +53,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
func open(path string, flag int, perm os.FileMode) (*os.File, error) {
if path == "" {
- return nil, fmt.Errorf("cannot open empty filename")
+ return nil, errors.New("cannot open empty filename")
}
var access uint32
switch flag {
@@ -95,32 +81,17 @@ func open(path string, flag int, perm os.FileMode) (*os.File, error) {
return os.NewFile(uintptr(fd), path), nil
}
-func lockFile(fd syscall.Handle, flags uint32) error {
- var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
- flag |= flags
- if fd == syscall.InvalidHandle {
+func lockFile(fd windows.Handle, flags uint32) error {
+ if fd == windows.InvalidHandle {
return nil
}
- err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
+ err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{})
if err == nil {
return nil
} else if err.Error() == errLocked.Error() {
return ErrLocked
- } else if err != errLockViolation {
+ } else if err != windows.ERROR_LOCK_VIOLATION {
return err
}
return nil
}
-
-func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- var reserved uint32 = 0
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
- if r1 == 0 {
- if e1 != 0 {
- err = error(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return err
-}
diff --git a/client/pkg/fileutil/preallocate.go b/client/pkg/fileutil/preallocate.go
index c747b7cf81f..aadbff7131d 100644
--- a/client/pkg/fileutil/preallocate.go
+++ b/client/pkg/fileutil/preallocate.go
@@ -19,9 +19,9 @@ import (
"os"
)
-// Preallocate tries to allocate the space for given
-// file. This operation is only supported on linux by a
-// few filesystems (btrfs, ext4, etc.).
+// Preallocate tries to allocate the space for given file. This
+// operation is only supported on darwin and linux by a few
+// filesystems (APFS, btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
diff --git a/client/pkg/fileutil/preallocate_darwin.go b/client/pkg/fileutil/preallocate_darwin.go
index caab143dd30..72430ec273b 100644
--- a/client/pkg/fileutil/preallocate_darwin.go
+++ b/client/pkg/fileutil/preallocate_darwin.go
@@ -13,11 +13,11 @@
// limitations under the License.
//go:build darwin
-// +build darwin
package fileutil
import (
+ "errors"
"os"
"syscall"
@@ -40,7 +40,7 @@ func preallocFixed(f *os.File, sizeInBytes int64) error {
Length: sizeInBytes,
}
err := unix.FcntlFstore(f.Fd(), unix.F_PREALLOCATE, fstore)
- if err == nil || err == unix.ENOTSUP {
+ if err == nil || errors.Is(err, unix.ENOTSUP) {
return nil
}
diff --git a/client/pkg/fileutil/preallocate_test.go b/client/pkg/fileutil/preallocate_test.go
index c132b8510d0..8bd7410dfa2 100644
--- a/client/pkg/fileutil/preallocate_test.go
+++ b/client/pkg/fileutil/preallocate_test.go
@@ -15,32 +15,36 @@
package fileutil
import (
- "io/ioutil"
"os"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestPreallocateExtend(t *testing.T) {
pf := func(f *os.File, sz int64) error { return Preallocate(f, sz, true) }
- tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, pf) }
+ tf := func(t *testing.T, f *os.File) {
+ t.Helper()
+ testPreallocateExtend(t, f, pf)
+ }
runPreallocTest(t, tf)
}
func TestPreallocateExtendTrunc(t *testing.T) {
- tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, preallocExtendTrunc) }
+ tf := func(t *testing.T, f *os.File) {
+ t.Helper()
+ testPreallocateExtend(t, f, preallocExtendTrunc)
+ }
runPreallocTest(t, tf)
}
func testPreallocateExtend(t *testing.T, f *os.File, pf func(*os.File, int64) error) {
+ t.Helper()
size := int64(64 * 1000)
- if err := pf(f, size); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, pf(f, size))
stat, err := f.Stat()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if stat.Size() != size {
t.Errorf("size = %d, want %d", stat.Size(), size)
}
@@ -48,30 +52,22 @@ func testPreallocateExtend(t *testing.T, f *os.File, pf func(*os.File, int64) er
func TestPreallocateFixed(t *testing.T) { runPreallocTest(t, testPreallocateFixed) }
func testPreallocateFixed(t *testing.T, f *os.File) {
+ t.Helper()
size := int64(64 * 1000)
- if err := Preallocate(f, size, false); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, Preallocate(f, size, false))
stat, err := f.Stat()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if stat.Size() != 0 {
t.Errorf("size = %d, want %d", stat.Size(), 0)
}
}
func runPreallocTest(t *testing.T, test func(*testing.T, *os.File)) {
- p, err := ioutil.TempDir(os.TempDir(), "preallocateTest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
+ t.Helper()
+ p := t.TempDir()
- f, err := ioutil.TempFile(p, "")
- if err != nil {
- t.Fatal(err)
- }
+ f, err := os.CreateTemp(p, "")
+ require.NoError(t, err)
test(t, f)
}
diff --git a/client/pkg/fileutil/preallocate_unix.go b/client/pkg/fileutil/preallocate_unix.go
index ebb8207c340..b0a8166ae14 100644
--- a/client/pkg/fileutil/preallocate_unix.go
+++ b/client/pkg/fileutil/preallocate_unix.go
@@ -13,11 +13,11 @@
// limitations under the License.
//go:build linux
-// +build linux
package fileutil
import (
+ "errors"
"os"
"syscall"
)
@@ -26,10 +26,10 @@ func preallocExtend(f *os.File, sizeInBytes int64) error {
// use mode = 0 to change size
err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
if err != nil {
- errno, ok := err.(syscall.Errno)
+ var errno syscall.Errno
// not supported; fallback
// fallocate EINTRs frequently in some environments; fallback
- if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
+ if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
return preallocExtendTrunc(f, sizeInBytes)
}
}
@@ -40,9 +40,9 @@ func preallocFixed(f *os.File, sizeInBytes int64) error {
// use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
if err != nil {
- errno, ok := err.(syscall.Errno)
+ var errno syscall.Errno
// treat not supported as nil error
- if ok && errno == syscall.ENOTSUP {
+ if errors.As(err, &errno) && errno == syscall.ENOTSUP {
return nil
}
}
diff --git a/client/pkg/fileutil/preallocate_unsupported.go b/client/pkg/fileutil/preallocate_unsupported.go
index 2c46dd49075..e7fd937a436 100644
--- a/client/pkg/fileutil/preallocate_unsupported.go
+++ b/client/pkg/fileutil/preallocate_unsupported.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !linux && !darwin
-// +build !linux,!darwin
package fileutil
diff --git a/client/pkg/fileutil/purge.go b/client/pkg/fileutil/purge.go
index e8ac0ca6f58..026ea03230f 100644
--- a/client/pkg/fileutil/purge.go
+++ b/client/pkg/fileutil/purge.go
@@ -17,7 +17,6 @@ package fileutil
import (
"os"
"path/filepath"
- "sort"
"strings"
"time"
@@ -25,61 +24,74 @@ import (
)
func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
- return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil)
+ return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil, true)
}
func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
doneC := make(chan struct{})
- errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC)
+ errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, true)
+ return doneC, errC
+}
+
+func PurgeFileWithoutFlock(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
+ doneC := make(chan struct{})
+ errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, false)
return doneC, errC
}
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
// if donec is non-nil, the function closes it to notify its exit.
-func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error {
+func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}, flock bool) <-chan error {
if lg == nil {
lg = zap.NewNop()
}
errC := make(chan error, 1)
+ lg.Info("started to purge file",
+ zap.String("dir", dirname),
+ zap.String("suffix", suffix),
+ zap.Uint("max", max),
+ zap.Duration("interval", interval))
+
go func() {
if donec != nil {
defer close(donec)
}
for {
- fnames, err := ReadDir(dirname)
+ fnamesWithSuffix, err := readDirWithSuffix(dirname, suffix)
if err != nil {
errC <- err
return
}
- newfnames := make([]string, 0)
- for _, fname := range fnames {
- if strings.HasSuffix(fname, suffix) {
- newfnames = append(newfnames, fname)
- }
- }
- sort.Strings(newfnames)
- fnames = newfnames
- for len(newfnames) > int(max) {
- f := filepath.Join(dirname, newfnames[0])
- l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
- if err != nil {
- break
+ nPurged := 0
+ for nPurged < len(fnamesWithSuffix)-int(max) {
+ f := filepath.Join(dirname, fnamesWithSuffix[nPurged])
+ var l *LockedFile
+ if flock {
+ l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode)
+ if err != nil {
+ lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err))
+ break
+ }
}
if err = os.Remove(f); err != nil {
+ lg.Error("failed to remove file", zap.String("path", f), zap.Error(err))
errC <- err
return
}
- if err = l.Close(); err != nil {
- lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
- errC <- err
- return
+ if flock {
+ if err = l.Close(); err != nil {
+ lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
+ errC <- err
+ return
+ }
}
lg.Info("purged", zap.String("path", f))
- newfnames = newfnames[1:]
+ nPurged++
}
+
if purgec != nil {
- for i := 0; i < len(fnames)-len(newfnames); i++ {
- purgec <- fnames[i]
+ for i := 0; i < nPurged; i++ {
+ purgec <- fnamesWithSuffix[i]
}
}
select {
@@ -91,3 +103,18 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval
}()
return errC
}
+
+func readDirWithSuffix(dirname string, suffix string) ([]string, error) {
+ fnames, err := ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+ // filter in place (ref. https://go.dev/wiki/SliceTricks#filtering-without-allocating)
+ fnamesWithSuffix := fnames[:0]
+ for _, fname := range fnames {
+ if strings.HasSuffix(fname, suffix) {
+ fnamesWithSuffix = append(fnamesWithSuffix, fname)
+ }
+ }
+ return fnamesWithSuffix, nil
+}
diff --git a/client/pkg/fileutil/purge_test.go b/client/pkg/fileutil/purge_test.go
index f0e7d6b566f..8f2f5fd037a 100644
--- a/client/pkg/fileutil/purge_test.go
+++ b/client/pkg/fileutil/purge_test.go
@@ -16,36 +16,30 @@ package fileutil
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"time"
- "go.uber.org/zap"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
)
func TestPurgeFile(t *testing.T) {
- dir, err := ioutil.TempDir("", "purgefile")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
+ dir := t.TempDir()
// minimal file set
for i := 0; i < 3; i++ {
f, ferr := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", i)))
- if ferr != nil {
- t.Fatal(err)
- }
+ require.NoError(t, ferr)
f.Close()
}
stop, purgec := make(chan struct{}), make(chan string, 10)
// keep 3 most recent files
- errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec, nil)
+ errch := purgeFile(zaptest.NewLogger(t), dir, "test", 3, time.Millisecond, stop, purgec, nil, false)
select {
case f := <-purgec:
t.Errorf("unexpected purge on %q", f)
@@ -57,7 +51,7 @@ func TestPurgeFile(t *testing.T) {
go func(n int) {
f, ferr := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", n)))
if ferr != nil {
- t.Error(err)
+ t.Error(ferr)
}
f.Close()
}(i)
@@ -73,9 +67,7 @@ func TestPurgeFile(t *testing.T) {
}
fnames, rerr := ReadDir(dir)
- if rerr != nil {
- t.Fatal(rerr)
- }
+ require.NoError(t, rerr)
wnames := []string{"7.test", "8.test", "9.test"}
if !reflect.DeepEqual(fnames, wnames) {
t.Errorf("filenames = %v, want %v", fnames, wnames)
@@ -93,30 +85,22 @@ func TestPurgeFile(t *testing.T) {
}
func TestPurgeFileHoldingLockFile(t *testing.T) {
- dir, err := ioutil.TempDir("", "purgefile")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
+ dir := t.TempDir()
for i := 0; i < 10; i++ {
var f *os.File
- f, err = os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", i)))
- if err != nil {
- t.Fatal(err)
- }
+ f, err := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", i)))
+ require.NoError(t, err)
f.Close()
}
// create a purge barrier at 5
p := filepath.Join(dir, fmt.Sprintf("%d.test", 5))
l, err := LockFile(p, os.O_WRONLY, PrivateFileMode)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
stop, purgec := make(chan struct{}), make(chan string, 10)
- errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec, nil)
+ errch := purgeFile(zaptest.NewLogger(t), dir, "test", 3, time.Millisecond, stop, purgec, nil, true)
for i := 0; i < 5; i++ {
select {
@@ -127,9 +111,7 @@ func TestPurgeFileHoldingLockFile(t *testing.T) {
}
fnames, rerr := ReadDir(dir)
- if rerr != nil {
- t.Fatal(rerr)
- }
+ require.NoError(t, rerr)
wnames := []string{"5.test", "6.test", "7.test", "8.test", "9.test"}
if !reflect.DeepEqual(fnames, wnames) {
@@ -145,9 +127,7 @@ func TestPurgeFileHoldingLockFile(t *testing.T) {
}
// remove the purge barrier
- if err = l.Close(); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, l.Close())
// wait for rest of purges (5, 6)
for i := 0; i < 2; i++ {
@@ -159,9 +139,7 @@ func TestPurgeFileHoldingLockFile(t *testing.T) {
}
fnames, rerr = ReadDir(dir)
- if rerr != nil {
- t.Fatal(rerr)
- }
+ require.NoError(t, rerr)
wnames = []string{"7.test", "8.test", "9.test"}
if !reflect.DeepEqual(fnames, wnames) {
t.Errorf("filenames = %v, want %v", fnames, wnames)
diff --git a/client/pkg/fileutil/read_dir_test.go b/client/pkg/fileutil/read_dir_test.go
index 6080ce750bd..18a5ab26151 100644
--- a/client/pkg/fileutil/read_dir_test.go
+++ b/client/pkg/fileutil/read_dir_test.go
@@ -15,53 +15,40 @@
package fileutil
import (
- "io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestReadDir(t *testing.T) {
- tmpdir, err := ioutil.TempDir("", "")
- defer os.RemoveAll(tmpdir)
- if err != nil {
- t.Fatalf("unexpected ioutil.TempDir error: %v", err)
- }
+ tmpdir := t.TempDir()
files := []string{"def", "abc", "xyz", "ghi"}
for _, f := range files {
writeFunc(t, filepath.Join(tmpdir, f))
}
fs, err := ReadDir(tmpdir)
- if err != nil {
- t.Fatalf("error calling ReadDir: %v", err)
- }
+ require.NoErrorf(t, err, "error calling ReadDir")
wfs := []string{"abc", "def", "ghi", "xyz"}
- if !reflect.DeepEqual(fs, wfs) {
- t.Fatalf("ReadDir: got %v, want %v", fs, wfs)
- }
+ require.Truef(t, reflect.DeepEqual(fs, wfs), "ReadDir: got %v, want %v", fs, wfs)
files = []string{"def.wal", "abc.wal", "xyz.wal", "ghi.wal"}
for _, f := range files {
writeFunc(t, filepath.Join(tmpdir, f))
}
fs, err = ReadDir(tmpdir, WithExt(".wal"))
- if err != nil {
- t.Fatalf("error calling ReadDir: %v", err)
- }
+ require.NoErrorf(t, err, "error calling ReadDir")
wfs = []string{"abc.wal", "def.wal", "ghi.wal", "xyz.wal"}
- if !reflect.DeepEqual(fs, wfs) {
- t.Fatalf("ReadDir: got %v, want %v", fs, wfs)
- }
+ require.Truef(t, reflect.DeepEqual(fs, wfs), "ReadDir: got %v, want %v", fs, wfs)
}
func writeFunc(t *testing.T, path string) {
+ t.Helper()
fh, err := os.Create(path)
- if err != nil {
- t.Fatalf("error creating file: %v", err)
- }
- if err = fh.Close(); err != nil {
- t.Fatalf("error closing file: %v", err)
- }
+ require.NoErrorf(t, err, "error creating file")
+ assert.NoErrorf(t, fh.Close(), "error closing file")
}
diff --git a/client/pkg/fileutil/sync.go b/client/pkg/fileutil/sync.go
index 0a0855309e9..670d01fadcc 100644
--- a/client/pkg/fileutil/sync.go
+++ b/client/pkg/fileutil/sync.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !linux && !darwin
-// +build !linux,!darwin
package fileutil
diff --git a/client/pkg/fileutil/sync_darwin.go b/client/pkg/fileutil/sync_darwin.go
index 1923b276ea0..7affa78ea64 100644
--- a/client/pkg/fileutil/sync_darwin.go
+++ b/client/pkg/fileutil/sync_darwin.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build darwin
-// +build darwin
package fileutil
diff --git a/client/pkg/fileutil/sync_linux.go b/client/pkg/fileutil/sync_linux.go
index b9398c23f94..a3172382e5a 100644
--- a/client/pkg/fileutil/sync_linux.go
+++ b/client/pkg/fileutil/sync_linux.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build linux
-// +build linux
package fileutil
diff --git a/client/pkg/go.mod b/client/pkg/go.mod
index 4cbe1ceb626..95cd8c22a7b 100644
--- a/client/pkg/go.mod
+++ b/client/pkg/go.mod
@@ -1,10 +1,22 @@
module go.etcd.io/etcd/client/pkg/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
+
+require (
+ github.com/coreos/go-systemd/v22 v22.5.0
+ github.com/stretchr/testify v1.10.0
+ go.uber.org/zap v1.27.0
+ golang.org/x/sys v0.28.0
+)
require (
- github.com/coreos/go-systemd/v22 v22.3.1
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
- golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57
- google.golang.org/grpc v1.37.0
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rogpeppe/go-internal v1.13.1 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/client/pkg/go.sum b/client/pkg/go.sum
index 14c70b3330f..6060d6e87a1 100644
--- a/client/pkg/go.sum
+++ b/client/pkg/go.sum
@@ -1,96 +1,34 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/client/pkg/logutil/log_format.go b/client/pkg/logutil/log_format.go
new file mode 100644
index 00000000000..286d385ba39
--- /dev/null
+++ b/client/pkg/logutil/log_format.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import "fmt"
+
+const (
+ JSONLogFormat = "json"
+ ConsoleLogFormat = "console"
+ //revive:disable:var-naming
+ // Deprecated: Please use JSONLogFormat.
+ JsonLogFormat = JSONLogFormat
+ //revive:enable:var-naming
+)
+
+var DefaultLogFormat = JSONLogFormat
+
+// ConvertToZapFormat converts and validated log format string.
+func ConvertToZapFormat(format string) (string, error) {
+ switch format {
+ case ConsoleLogFormat:
+ return ConsoleLogFormat, nil
+ case JSONLogFormat:
+ return JSONLogFormat, nil
+ case "":
+ return DefaultLogFormat, nil
+ default:
+ return "", fmt.Errorf("unknown log format: %s, supported values json, console", format)
+ }
+}
diff --git a/client/pkg/logutil/log_format_test.go b/client/pkg/logutil/log_format_test.go
new file mode 100644
index 00000000000..7b77e256d33
--- /dev/null
+++ b/client/pkg/logutil/log_format_test.go
@@ -0,0 +1,45 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "testing"
+)
+
+func TestLogFormat(t *testing.T) {
+ tests := []struct {
+ given string
+ want string
+ errExpected bool
+ }{
+ {"json", JSONLogFormat, false},
+ {"console", ConsoleLogFormat, false},
+ {"", JSONLogFormat, false},
+ {"konsole", "", true},
+ }
+
+ for i, tt := range tests {
+ got, err := ConvertToZapFormat(tt.given)
+ if got != tt.want {
+ t.Errorf("#%d: ConvertToZapFormat failure: want=%v, got=%v", i, tt.want, got)
+ }
+
+ if err != nil {
+ if !tt.errExpected {
+ t.Errorf("#%d: ConvertToZapFormat unexpected error: %v", i, err)
+ }
+ }
+ }
+}
diff --git a/client/pkg/logutil/zap.go b/client/pkg/logutil/zap.go
index 8fc6e03b77b..1a502d6d169 100644
--- a/client/pkg/logutil/zap.go
+++ b/client/pkg/logutil/zap.go
@@ -16,11 +16,23 @@ package logutil
import (
"sort"
+ "time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
+// CreateDefaultZapLogger creates a logger with default zap configuration
+func CreateDefaultZapLogger(level zapcore.Level) (*zap.Logger, error) {
+ lcfg := DefaultZapLoggerConfig
+ lcfg.Level = zap.NewAtomicLevelAt(level)
+ c, err := lcfg.Build()
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
// DefaultZapLoggerConfig defines default zap logger configuration.
var DefaultZapLoggerConfig = zap.Config{
Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
@@ -31,19 +43,24 @@ var DefaultZapLoggerConfig = zap.Config{
Thereafter: 100,
},
- Encoding: "json",
+ Encoding: DefaultLogFormat,
// copied from "zap.NewProductionEncoderConfig" with some updates
EncoderConfig: zapcore.EncoderConfig{
- TimeKey: "ts",
- LevelKey: "level",
- NameKey: "logger",
- CallerKey: "caller",
- MessageKey: "msg",
- StacktraceKey: "stacktrace",
- LineEnding: zapcore.DefaultLineEnding,
- EncodeLevel: zapcore.LowercaseLevelEncoder,
- EncodeTime: zapcore.ISO8601TimeEncoder,
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+
+ // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps
+ EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
+ enc.AppendString(t.Format("2006-01-02T15:04:05.000000Z0700"))
+ },
+
EncodeDuration: zapcore.StringDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
},
diff --git a/client/pkg/logutil/zap_grpc.go b/client/pkg/logutil/zap_grpc.go
deleted file mode 100644
index fb5cb4e4bc1..00000000000
--- a/client/pkg/logutil/zap_grpc.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "google.golang.org/grpc/grpclog"
-)
-
-// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
-// It discards all INFO level logging in gRPC, if debug level
-// is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
- lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
- if err != nil {
- return nil, err
- }
- return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
-}
-
-// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
-// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
-// if debug level is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
- // "AddCallerSkip" to annotate caller outside of "logutil"
- lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)).Named("grpc")
- return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-type zapGRPCLogger struct {
- lg *zap.Logger
- sugar *zap.SugaredLogger
-}
-
-func (zl *zapGRPCLogger) Info(args ...interface{}) {
- if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
- return
- }
- zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
- if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
- return
- }
- zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
- if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
- return
- }
- zl.sugar.Infof(format, args...)
-}
-
-func (zl *zapGRPCLogger) Warning(args ...interface{}) {
- zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
- zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
- zl.sugar.Warnf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Error(args ...interface{}) {
- zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
- zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
- zl.sugar.Errorf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
- zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
- zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
- zl.sugar.Fatalf(format, args...)
-}
-
-func (zl *zapGRPCLogger) V(l int) bool {
- // infoLog == 0
- if l <= 0 { // debug level, then we ignore info level in gRPC
- return !zl.lg.Core().Enabled(zapcore.DebugLevel)
- }
- return true
-}
diff --git a/client/pkg/logutil/zap_grpc_test.go b/client/pkg/logutil/zap_grpc_test.go
deleted file mode 100644
index 9e028cac8e6..00000000000
--- a/client/pkg/logutil/zap_grpc_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-func TestNewGRPCLoggerV2(t *testing.T) {
- logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano()))
- defer os.RemoveAll(logPath)
-
- lcfg := zap.Config{
- Level: zap.NewAtomicLevelAt(zap.InfoLevel),
- Development: false,
- Sampling: &zap.SamplingConfig{
- Initial: 100,
- Thereafter: 100,
- },
- Encoding: "json",
- EncoderConfig: DefaultZapLoggerConfig.EncoderConfig,
- OutputPaths: []string{logPath},
- ErrorOutputPaths: []string{logPath},
- }
- gl, err := NewGRPCLoggerV2(lcfg)
- if err != nil {
- t.Fatal(err)
- }
-
- // debug level is not enabled,
- // so info level gRPC-side logging is discarded
- gl.Info("etcd-logutil-1")
- data, err := ioutil.ReadFile(logPath)
- if err != nil {
- t.Fatal(err)
- }
- if bytes.Contains(data, []byte("etcd-logutil-1")) {
- t.Fatalf("unexpected line %q", string(data))
- }
-
- gl.Warning("etcd-logutil-2")
- data, err = ioutil.ReadFile(logPath)
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Contains(data, []byte("etcd-logutil-2")) {
- t.Fatalf("can't find data in log %q", string(data))
- }
- if !bytes.Contains(data, []byte("logutil/zap_grpc_test.go:")) {
- t.Fatalf("unexpected caller; %q", string(data))
- }
-}
-
-func TestNewGRPCLoggerV2FromZapCore(t *testing.T) {
- buf := bytes.NewBuffer(nil)
- syncer := zapcore.AddSync(buf)
- cr := zapcore.NewCore(
- zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
- syncer,
- zap.NewAtomicLevelAt(zap.InfoLevel),
- )
-
- lg := NewGRPCLoggerV2FromZapCore(cr, syncer)
- lg.Warning("TestNewGRPCLoggerV2FromZapCore")
- txt := buf.String()
- if !strings.Contains(txt, "TestNewGRPCLoggerV2FromZapCore") {
- t.Fatalf("unexpected log %q", txt)
- }
-}
diff --git a/client/pkg/logutil/zap_journal.go b/client/pkg/logutil/zap_journal.go
index 9daa3e0aab1..06dc40dacd9 100644
--- a/client/pkg/logutil/zap_journal.go
+++ b/client/pkg/logutil/zap_journal.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !windows
-// +build !windows
package logutil
@@ -25,10 +24,10 @@ import (
"os"
"path/filepath"
- "go.etcd.io/etcd/client/pkg/v3/systemd"
-
"github.com/coreos/go-systemd/v22/journal"
"go.uber.org/zap/zapcore"
+
+ "go.etcd.io/etcd/client/pkg/v3/systemd"
)
// NewJournalWriter wraps "io.Writer" to redirect log output
diff --git a/client/pkg/logutil/zap_journal_test.go b/client/pkg/logutil/zap_journal_test.go
index 292c1440ebd..be5efd5d3ec 100644
--- a/client/pkg/logutil/zap_journal_test.go
+++ b/client/pkg/logutil/zap_journal_test.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !windows
-// +build !windows
package logutil
diff --git a/client/pkg/logutil/zap_test.go b/client/pkg/logutil/zap_test.go
new file mode 100644
index 00000000000..5f699822df1
--- /dev/null
+++ b/client/pkg/logutil/zap_test.go
@@ -0,0 +1,58 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "regexp"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+type commonLogFields struct {
+ Level string `json:"level"`
+ Timestamp string `json:"ts"`
+ Message string `json:"msg"`
+}
+
+const (
+ fractionSecondsPrecision = 6 // MicroSeconds
+)
+
+func TestEncodeTimePrecisionToMicroSeconds(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ syncer := zapcore.AddSync(buf)
+ zc := zapcore.NewCore(
+ zapcore.NewJSONEncoder(DefaultZapLoggerConfig.EncoderConfig),
+ syncer,
+ zap.NewAtomicLevelAt(zap.InfoLevel),
+ )
+
+ lg := zap.New(zc)
+ lg.Info("TestZapLog")
+ fields := commonLogFields{}
+ require.NoError(t, json.Unmarshal(buf.Bytes(), &fields))
+ // example 1: 2024-06-06T23:37:21.948385Z
+ // example 2 with zone offset: 2024-06-06T16:16:44.176778-0700
+ regex := `\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.(\d+)(Z|[+-]\d{4})`
+ re := regexp.MustCompile(regex)
+ matches := re.FindStringSubmatch(fields.Timestamp)
+ require.Len(t, matches, 3)
+ require.Lenf(t, matches[1], fractionSecondsPrecision, "unexpected timestamp %s", fields.Timestamp)
+}
diff --git a/client/pkg/srv/srv.go b/client/pkg/srv/srv.go
index 948c6834909..e0a1ccef35c 100644
--- a/client/pkg/srv/srv.go
+++ b/client/pkg/srv/srv.go
@@ -33,7 +33,6 @@ var (
// GetCluster gets the cluster information via DNS discovery.
// Also sees each entry as a separate instance.
func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) {
- tempName := int(0)
tcp2ap := make(map[string]url.URL)
// First, resolve the apurls
@@ -45,7 +44,10 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]
tcp2ap[tcpAddr.String()] = url
}
- stringParts := []string{}
+ var (
+ tempName int
+ stringParts []string
+ )
updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {
@@ -85,7 +87,7 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]
err := updateNodeMap(service, serviceScheme)
if err != nil {
- return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err)
+ return nil, fmt.Errorf("error querying DNS SRV records for _%s %w", service, err)
}
return stringParts, nil
}
@@ -97,8 +99,10 @@ type SRVClients struct {
// GetClient looks up the client endpoints for a service and domain.
func GetClient(service, domain string, serviceName string) (*SRVClients, error) {
- var urls []*url.URL
- var srvs []*net.SRV
+ var (
+ urls []*url.URL
+ srvs []*net.SRV
+ )
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
@@ -119,7 +123,7 @@ func GetClient(service, domain string, serviceName string) (*SRVClients, error)
errHTTP := updateURLs(GetSRVService(service, serviceName, "http"), "http")
if errHTTPS != nil && errHTTP != nil {
- return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
+ return nil, fmt.Errorf("dns lookup errors: %w and %w", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
diff --git a/client/pkg/srv/srv_test.go b/client/pkg/srv/srv_test.go
index c2ccf9485d9..a0662bb8b49 100644
--- a/client/pkg/srv/srv_test.go
+++ b/client/pkg/srv/srv_test.go
@@ -22,6 +22,8 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/client/pkg/v3/testutil"
)
@@ -52,7 +54,7 @@ func TestSRVGetCluster(t *testing.T) {
{Target: "2.example.com.", Port: 2480},
{Target: "3.example.com.", Port: 2480},
}
- srvNone := []*net.SRV{}
+ var srvNone []*net.SRV
tests := []struct {
service string
@@ -152,12 +154,8 @@ func TestSRVGetCluster(t *testing.T) {
urls := testutil.MustNewURLs(t, tt.urls)
str, err := GetCluster(tt.scheme, tt.service, name, "example.com", urls)
- if hasErr(err) != tt.werr {
- t.Fatalf("%d: err = %#v, want = %#v", i, err, tt.werr)
- }
- if strings.Join(str, ",") != tt.expected {
- t.Errorf("#%d: cluster = %s, want %s", i, str, tt.expected)
- }
+ require.Equalf(t, hasErr(err), tt.werr, "%d: err = %#v, want = %#v", i, err, tt.werr)
+ require.Equalf(t, tt.expected, strings.Join(str, ","), "#%d: cluster = %s, want %s", i, str, tt.expected)
}
}
@@ -228,10 +226,10 @@ func TestSRVDiscover(t *testing.T) {
[]*net.SRV{
{Target: "a.example.com", Port: 2480},
{Target: "b.example.com", Port: 2480},
- {Target: "c.example.com", Port: 2480},
+ {Target: "c.example.com.", Port: 2480},
},
[]*net.SRV{},
- []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com:2480"},
+ []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com.:2480"},
false,
},
}
@@ -255,9 +253,7 @@ func TestSRVDiscover(t *testing.T) {
srvs, err := GetClient("etcd-client", "example.com", "")
- if hasErr(err) != tt.werr {
- t.Fatalf("%d: err = %#v, want = %#v", i, err, tt.werr)
- }
+ require.Equalf(t, hasErr(err), tt.werr, "%d: err = %#v, want = %#v", i, err, tt.werr)
if srvs == nil {
if len(tt.expected) > 0 {
t.Errorf("#%d: srvs = nil, want non-nil", i)
diff --git a/client/pkg/testutil/assert.go b/client/pkg/testutil/assert.go
index e8e042021e9..f421328f101 100644
--- a/client/pkg/testutil/assert.go
+++ b/client/pkg/testutil/assert.go
@@ -15,53 +15,37 @@
package testutil
import (
- "fmt"
- "reflect"
"testing"
-)
-func AssertEqual(t *testing.T, e, a interface{}, msg ...string) {
- t.Helper()
- if (e == nil || a == nil) && (isNil(e) && isNil(a)) {
- return
- }
- if reflect.DeepEqual(e, a) {
- return
- }
- s := ""
- if len(msg) > 1 {
- s = msg[0] + ": "
- }
- s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a)
- FatalStack(t, s)
-}
+ "github.com/stretchr/testify/assert"
+)
-func AssertNil(t *testing.T, v interface{}) {
+// AssertNil
+// Deprecated: use github.com/stretchr/testify/assert.Nil instead.
+func AssertNil(t *testing.T, v any) {
t.Helper()
- AssertEqual(t, nil, v)
+ assert.Nil(t, v)
}
-func AssertNotNil(t *testing.T, v interface{}) {
+// AssertNotNil
+// Deprecated: use github.com/stretchr/testify/require.NotNil instead.
+func AssertNotNil(t *testing.T, v any) {
t.Helper()
if v == nil {
t.Fatalf("expected non-nil, got %+v", v)
}
}
+// AssertTrue
+// Deprecated: use github.com/stretchr/testify/assert.True instead.
func AssertTrue(t *testing.T, v bool, msg ...string) {
t.Helper()
- AssertEqual(t, true, v, msg...)
+ assert.True(t, v, msg) //nolint:testifylint
}
+// AssertFalse
+// Deprecated: use github.com/stretchr/testify/assert.False instead.
func AssertFalse(t *testing.T, v bool, msg ...string) {
t.Helper()
- AssertEqual(t, false, v, msg...)
-}
-
-func isNil(v interface{}) bool {
- if v == nil {
- return true
- }
- rv := reflect.ValueOf(v)
- return rv.Kind() != reflect.Struct && rv.IsNil()
+ assert.False(t, v, msg) //nolint:testifylint
}
diff --git a/client/pkg/testutil/before.go b/client/pkg/testutil/before.go
new file mode 100644
index 00000000000..155a491ad40
--- /dev/null
+++ b/client/pkg/testutil/before.go
@@ -0,0 +1,62 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutil
+
+import (
+ "log"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+func BeforeTest(tb testing.TB) {
+ tb.Helper()
+ RegisterLeakDetection(tb)
+
+ revertVerifyFunc := verify.EnableAllVerifications()
+
+ path, err := os.Getwd()
+ require.NoError(tb, err)
+ tempDir := tb.TempDir()
+ require.NoError(tb, os.Chdir(tempDir))
+ tb.Logf("Changing working directory to: %s", tempDir)
+
+ tb.Cleanup(func() {
+ revertVerifyFunc()
+ assert.NoError(tb, os.Chdir(path))
+ })
+}
+
+func BeforeIntegrationExamples(*testing.M) func() {
+ ExitInShortMode("Skipping: the tests require real cluster")
+
+ tempDir, err := os.MkdirTemp(os.TempDir(), "etcd-integration")
+ if err != nil {
+ log.Printf("Failed to obtain tempDir: %v", tempDir)
+ os.Exit(1)
+ }
+
+ err = os.Chdir(tempDir)
+ if err != nil {
+ log.Printf("Failed to change working dir to: %s: %v", tempDir, err)
+ os.Exit(1)
+ }
+ log.Printf("Running tests (examples) in dir(%v): ...", tempDir)
+ return func() { os.RemoveAll(tempDir) }
+}
diff --git a/client/pkg/testutil/leak.go b/client/pkg/testutil/leak.go
index f786b5ccd44..ce859ef530f 100644
--- a/client/pkg/testutil/leak.go
+++ b/client/pkg/testutil/leak.go
@@ -16,6 +16,8 @@ import (
"time"
)
+// TODO: Replace with https://github.com/uber-go/goleak.
+
/*
CheckLeakedGoroutine verifies tests do not leave any leaky
goroutines. It returns true when there are goroutines still
@@ -28,11 +30,12 @@ running(leaking) after all tests.
}
func TestSample(t *testing.T) {
- BeforeTest(t)
+ RegisterLeakDetection(t)
...
}
-
*/
+var normalizedRegexp = regexp.MustCompile(`\(0[0-9a-fx, ]*\)`)
+
func CheckLeakedGoroutine() bool {
gs := interestingGoroutines()
if len(gs) == 0 {
@@ -40,14 +43,13 @@ func CheckLeakedGoroutine() bool {
}
stackCount := make(map[string]int)
- re := regexp.MustCompile(`\(0[0-9a-fx, ]*\)`)
for _, g := range gs {
// strip out pointer arguments in first function of stack dump
- normalized := string(re.ReplaceAll([]byte(g), []byte("(...)")))
+ normalized := string(normalizedRegexp.ReplaceAll([]byte(g), []byte("(...)")))
stackCount[normalized]++
}
- fmt.Fprintf(os.Stderr, "Unexpected goroutines running after all test(s).\n")
+ fmt.Fprint(os.Stderr, "Unexpected goroutines running after all test(s).\n")
for stack, count := range stackCount {
fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack)
}
@@ -94,22 +96,22 @@ func CheckAfterTest(d time.Duration) error {
return fmt.Errorf("appears to have leaked %s:\n%s", bad, stacks)
}
-// BeforeTest is a convenient way to register before-and-after code to a test.
-// If you execute BeforeTest, you don't need to explicitly register AfterTest.
-func BeforeTest(t TB) {
+// RegisterLeakDetection is a convenient way to register before-and-after code to a test.
+// If you execute RegisterLeakDetection, you don't need to explicitly register AfterTest.
+func RegisterLeakDetection(t TB) {
if err := CheckAfterTest(10 * time.Millisecond); err != nil {
t.Skip("Found leaked goroutined BEFORE test", err)
return
}
t.Cleanup(func() {
- AfterTest(t)
+ afterTest(t)
})
}
-// AfterTest is meant to run in a defer that executes after a test completes.
+// afterTest is meant to run in a defer that executes after a test completes.
// It will detect common goroutine leaks, retrying in case there are goroutines
// not synchronously torn down, and fail the test if any goroutines are stuck.
-func AfterTest(t TB) {
+func afterTest(t TB) {
// If test-failed the leaked goroutines list is hidding the real
// source of problem.
if !t.Failed() {
@@ -128,28 +130,45 @@ func interestingGoroutines() (gs []string) {
continue
}
stack := strings.TrimSpace(sl[1])
- if stack == "" ||
- strings.Contains(stack, "sync.(*WaitGroup).Done") ||
- strings.Contains(stack, "os.(*file).close") ||
- strings.Contains(stack, "os.(*Process).Release") ||
- strings.Contains(stack, "created by os/signal.init") ||
- strings.Contains(stack, "runtime/panic.go") ||
- strings.Contains(stack, "created by testing.RunTests") ||
- strings.Contains(stack, "created by testing.runTests") ||
- strings.Contains(stack, "created by testing.(*T).Run") ||
- strings.Contains(stack, "testing.Main(") ||
- strings.Contains(stack, "runtime.goexit") ||
- strings.Contains(stack, "go.etcd.io/etcd/client/pkg/v3/testutil.interestingGoroutines") ||
- strings.Contains(stack, "go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop") ||
- strings.Contains(stack, "github.com/golang/glog.(*loggingT).flushDaemon") ||
- strings.Contains(stack, "created by runtime.gc") ||
- strings.Contains(stack, "created by text/template/parse.lex") ||
- strings.Contains(stack, "runtime.MHeap_Scavenger") ||
- strings.Contains(stack, "rcrypto/internal/boring.(*PublicKeyRSA).finalize") ||
- strings.Contains(stack, "net.(*netFD).Close(") ||
- strings.Contains(stack, "testing.(*T).Run") {
+ if stack == "" {
+ continue
+ }
+
+ shouldSkip := func() bool {
+ uninterestingMsgs := [...]string{
+ "sync.(*WaitGroup).Done",
+ "os.(*file).close",
+ "os.(*Process).Release",
+ "created by os/signal.init",
+ "runtime/panic.go",
+ "created by testing.RunTests",
+ "created by testing.runTests",
+ "created by testing.(*T).Run",
+ "testing.Main(",
+ "runtime.goexit",
+ "go.etcd.io/etcd/client/pkg/v3/testutil.interestingGoroutines",
+ "go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop",
+ "github.com/golang/glog.(*loggingT).flushDaemon",
+ "created by runtime.gc",
+ "created by text/template/parse.lex",
+ "runtime.MHeap_Scavenger",
+ "rcrypto/internal/boring.(*PublicKeyRSA).finalize",
+ "net.(*netFD).Close(",
+ "testing.(*T).Run",
+ "crypto/tls.(*certCache).evict",
+ }
+ for _, msg := range uninterestingMsgs {
+ if strings.Contains(stack, msg) {
+ return true
+ }
+ }
+ return false
+ }()
+
+ if shouldSkip {
continue
}
+
gs = append(gs, stack)
}
sort.Strings(gs)
diff --git a/client/pkg/testutil/leak_test.go b/client/pkg/testutil/leak_test.go
index fa70782f3d6..71b1c7bf3e6 100644
--- a/client/pkg/testutil/leak_test.go
+++ b/client/pkg/testutil/leak_test.go
@@ -35,7 +35,7 @@ func TestMain(m *testing.M) {
func TestSample(t *testing.T) {
SkipTestIfShortMode(t, "Counting leaked routines is disabled in --short tests")
- defer AfterTest(t)
+ defer afterTest(t)
ranSample = true
for range make([]struct{}, 100) {
go func() {
diff --git a/client/pkg/testutil/recorder.go b/client/pkg/testutil/recorder.go
index 41349fec52d..0863593ef7a 100644
--- a/client/pkg/testutil/recorder.go
+++ b/client/pkg/testutil/recorder.go
@@ -23,7 +23,7 @@ import (
type Action struct {
Name string
- Params []interface{}
+ Params []any
}
type Recorder interface {
@@ -87,7 +87,7 @@ type recorderStream struct {
}
func NewRecorderStream() Recorder {
- return NewRecorderStreamWithWaitTimout(time.Duration(5 * time.Second))
+ return NewRecorderStreamWithWaitTimout(5 * time.Second)
}
func NewRecorderStreamWithWaitTimout(waitTimeout time.Duration) Recorder {
@@ -115,7 +115,10 @@ func (r *recorderStream) Chan() <-chan Action {
func (r *recorderStream) Wait(n int) ([]Action, error) {
acts := make([]Action, n)
- timeoutC := time.After(r.waitTimeout)
+ var timeoutC <-chan time.Time
+ if r.waitTimeout != 0 {
+ timeoutC = time.After(r.waitTimeout)
+ }
for i := 0; i < n; i++ {
select {
case acts[i] = <-r.ch:
diff --git a/client/pkg/testutil/testingtb.go b/client/pkg/testutil/testingtb.go
index 970542c0405..cd2047299a6 100644
--- a/client/pkg/testutil/testingtb.go
+++ b/client/pkg/testutil/testingtb.go
@@ -15,7 +15,6 @@
package testutil
import (
- "io/ioutil"
"log"
"os"
)
@@ -24,18 +23,18 @@ import (
// We cannot implement testing.TB due to protection, so we expose this simplified interface.
type TB interface {
Cleanup(func())
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
+ Error(args ...any)
+ Errorf(format string, args ...any)
Fail()
FailNow()
Failed() bool
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Logf(format string, args ...interface{})
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Logf(format string, args ...any)
Name() string
TempDir() string
Helper()
- Skip(args ...interface{})
+ Skip(args ...any)
}
// NewTestingTBProthesis creates a fake variant of testing.TB implementation.
@@ -60,20 +59,20 @@ func (t *testingTBProthesis) Helper() {
// Ignored
}
-func (t *testingTBProthesis) Skip(args ...interface{}) {
- t.Log(append([]interface{}{"Skipping due to: "}, args...))
+func (t *testingTBProthesis) Skip(args ...any) {
+ t.Log(append([]any{"Skipping due to: "}, args...))
}
func (t *testingTBProthesis) Cleanup(f func()) {
t.cleanups = append(t.cleanups, f)
}
-func (t *testingTBProthesis) Error(args ...interface{}) {
+func (t *testingTBProthesis) Error(args ...any) {
log.Println(args...)
t.Fail()
}
-func (t *testingTBProthesis) Errorf(format string, args ...interface{}) {
+func (t *testingTBProthesis) Errorf(format string, args ...any) {
log.Printf(format, args...)
t.Fail()
}
@@ -91,19 +90,19 @@ func (t *testingTBProthesis) Failed() bool {
return t.failed
}
-func (t *testingTBProthesis) Fatal(args ...interface{}) {
+func (t *testingTBProthesis) Fatal(args ...any) {
log.Fatalln(args...)
}
-func (t *testingTBProthesis) Fatalf(format string, args ...interface{}) {
+func (t *testingTBProthesis) Fatalf(format string, args ...any) {
log.Fatalf(format, args...)
}
-func (t *testingTBProthesis) Logf(format string, args ...interface{}) {
+func (t *testingTBProthesis) Logf(format string, args ...any) {
log.Printf(format, args...)
}
-func (t *testingTBProthesis) Log(args ...interface{}) {
+func (t *testingTBProthesis) Log(args ...any) {
log.Println(args...)
}
@@ -112,7 +111,7 @@ func (t *testingTBProthesis) Name() string {
}
func (t *testingTBProthesis) TempDir() string {
- dir, err := ioutil.TempDir("", t.name)
+ dir, err := os.MkdirTemp("", t.name)
if err != nil {
t.Fatal(err)
}
diff --git a/client/pkg/testutil/testutil.go b/client/pkg/testutil/testutil.go
index 6dc55d0dfa3..425c9f3b6b5 100644
--- a/client/pkg/testutil/testutil.go
+++ b/client/pkg/testutil/testutil.go
@@ -30,6 +30,7 @@ func WaitSchedule() {
}
func MustNewURLs(t *testing.T, urls []string) []url.URL {
+ t.Helper()
if urls == nil {
return nil
}
@@ -42,6 +43,7 @@ func MustNewURLs(t *testing.T, urls []string) []url.URL {
}
func MustNewURL(t *testing.T, s string) *url.URL {
+ t.Helper()
u, err := url.Parse(s)
if err != nil {
t.Fatalf("parse %v error: %v", s, err)
@@ -51,6 +53,7 @@ func MustNewURL(t *testing.T, s string) *url.URL {
// FatalStack helps to fatal the test and print out the stacks of all running goroutines.
func FatalStack(t *testing.T, s string) {
+ t.Helper()
stackTrace := make([]byte, 1024*1024)
n := runtime.Stack(stackTrace, true)
t.Errorf("---> Test failed: %s", s)
diff --git a/client/pkg/tlsutil/cipher_suites.go b/client/pkg/tlsutil/cipher_suites.go
index f278a61f8a0..e1f21755d4b 100644
--- a/client/pkg/tlsutil/cipher_suites.go
+++ b/client/pkg/tlsutil/cipher_suites.go
@@ -14,7 +14,10 @@
package tlsutil
-import "crypto/tls"
+import (
+ "crypto/tls"
+ "fmt"
+)
// GetCipherSuite returns the corresponding cipher suite,
// and boolean value if it is supported.
@@ -37,3 +40,17 @@ func GetCipherSuite(s string) (uint16, bool) {
}
return 0, false
}
+
+// GetCipherSuites returns list of corresponding cipher suite IDs.
+func GetCipherSuites(ss []string) ([]uint16, error) {
+ cs := make([]uint16, len(ss))
+ for i, s := range ss {
+ var ok bool
+ cs[i], ok = GetCipherSuite(s)
+ if !ok {
+ return nil, fmt.Errorf("unexpected TLS cipher suite %q", s)
+ }
+ }
+
+ return cs, nil
+}
diff --git a/client/pkg/tlsutil/cipher_suites_test.go b/client/pkg/tlsutil/cipher_suites_test.go
index a17b46c2fee..54e83a5baa7 100644
--- a/client/pkg/tlsutil/cipher_suites_test.go
+++ b/client/pkg/tlsutil/cipher_suites_test.go
@@ -17,22 +17,23 @@ package tlsutil
import (
"crypto/tls"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestGetCipherSuite_not_existing(t *testing.T) {
_, ok := GetCipherSuite("not_existing")
- if ok {
- t.Fatal("Expected not ok")
- }
+ require.Falsef(t, ok, "Expected not ok")
}
-func CipherSuiteExpectedToExist(tb testing.TB, cipher string, expectedId uint16) {
+func CipherSuiteExpectedToExist(tb testing.TB, cipher string, expectedID uint16) {
+ tb.Helper()
vid, ok := GetCipherSuite(cipher)
if !ok {
tb.Errorf("Expected %v cipher to exist", cipher)
}
- if vid != expectedId {
- tb.Errorf("For %v expected=%v found=%v", cipher, expectedId, vid)
+ if vid != expectedID {
+ tb.Errorf("For %v expected=%v found=%v", cipher, expectedID, vid)
}
}
diff --git a/client/pkg/tlsutil/tlsutil.go b/client/pkg/tlsutil/tlsutil.go
index 3a5aef089a7..0f79865e805 100644
--- a/client/pkg/tlsutil/tlsutil.go
+++ b/client/pkg/tlsutil/tlsutil.go
@@ -18,7 +18,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
- "io/ioutil"
+ "os"
)
// NewCertPool creates x509 certPool with provided CA files.
@@ -26,7 +26,7 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, CAFile := range CAFiles {
- pemByte, err := ioutil.ReadFile(CAFile)
+ pemByte, err := os.ReadFile(CAFile)
if err != nil {
return nil, err
}
@@ -51,12 +51,12 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
// NewCert generates TLS cert by using the given cert,key and parse function.
func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
- cert, err := ioutil.ReadFile(certfile)
+ cert, err := os.ReadFile(certfile)
if err != nil {
return nil, err
}
- key, err := ioutil.ReadFile(keyfile)
+ key, err := os.ReadFile(keyfile)
if err != nil {
return nil, err
}
diff --git a/client/pkg/tlsutil/versions.go b/client/pkg/tlsutil/versions.go
new file mode 100644
index 00000000000..ffcecd8c670
--- /dev/null
+++ b/client/pkg/tlsutil/versions.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "fmt"
+)
+
+type TLSVersion string
+
+// Constants for TLS versions.
+const (
+ TLSVersionDefault TLSVersion = ""
+ TLSVersion12 TLSVersion = "TLS1.2"
+ TLSVersion13 TLSVersion = "TLS1.3"
+)
+
+// GetTLSVersion returns the corresponding tls.Version or error.
+func GetTLSVersion(version string) (uint16, error) {
+ var v uint16
+
+ switch version {
+ case string(TLSVersionDefault):
+ v = 0 // 0 means let Go decide.
+ case string(TLSVersion12):
+ v = tls.VersionTLS12
+ case string(TLSVersion13):
+ v = tls.VersionTLS13
+ default:
+ return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version)
+ }
+
+ return v, nil
+}
diff --git a/client/pkg/tlsutil/versions_test.go b/client/pkg/tlsutil/versions_test.go
new file mode 100644
index 00000000000..f6820de424a
--- /dev/null
+++ b/client/pkg/tlsutil/versions_test.go
@@ -0,0 +1,63 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetVersion(t *testing.T) {
+ tests := []struct {
+ name string
+ version string
+ want uint16
+ expectError bool
+ }{
+ {
+ name: "TLS1.2",
+ version: "TLS1.2",
+ want: tls.VersionTLS12,
+ },
+ {
+ name: "TLS1.3",
+ version: "TLS1.3",
+ want: tls.VersionTLS13,
+ },
+ {
+ name: "Empty version",
+ version: "",
+ want: 0,
+ },
+ {
+ name: "Converting invalid version string to TLS version",
+ version: "not_existing",
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := GetTLSVersion(tt.version)
+ if err != nil {
+ assert.Truef(t, tt.expectError, "GetTLSVersion() returned error while expecting success: %v", err)
+ return
+ }
+ assert.Equal(t, tt.want, got)
+ })
+ }
+}
diff --git a/client/pkg/transport/keepalive_listener.go b/client/pkg/transport/keepalive_listener.go
index 4ff8e7f0010..d43ac4f078a 100644
--- a/client/pkg/transport/keepalive_listener.go
+++ b/client/pkg/transport/keepalive_listener.go
@@ -16,31 +16,35 @@ package transport
import (
"crypto/tls"
+ "errors"
"fmt"
"net"
"time"
)
-type keepAliveConn interface {
- SetKeepAlive(bool) error
- SetKeepAlivePeriod(d time.Duration) error
-}
-
// NewKeepAliveListener returns a listener that listens on the given address.
// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
+//
+// Note(ahrtr):
+// only `net.TCPConn` supports `SetKeepAlive` and `SetKeepAlivePeriod`
+// by default, so if you want to wrap multiple layers of net.Listener,
+// the `keepaliveListener` should be the one which is closest to the
+// original `net.Listener` implementation, namely `TCPListener`.
func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
+ kal := &keepaliveListener{
+ Listener: l,
+ }
+
if scheme == "https" {
if tlscfg == nil {
- return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
+ return nil, errors.New("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
}
- return newTLSKeepaliveListener(l, tlscfg), nil
+ return newTLSKeepaliveListener(kal, tlscfg), nil
}
- return &keepaliveListener{
- Listener: l,
- }, nil
+ return kal, nil
}
type keepaliveListener struct{ net.Listener }
@@ -50,13 +54,38 @@ func (kln *keepaliveListener) Accept() (net.Conn, error) {
if err != nil {
return nil, err
}
- kac := c.(keepAliveConn)
+
+ kac, err := createKeepaliveConn(c)
+ if err != nil {
+ return nil, fmt.Errorf("create keepalive connection failed, %w", err)
+ }
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
- kac.SetKeepAlive(true)
- kac.SetKeepAlivePeriod(30 * time.Second)
- return c, nil
+ if err := kac.SetKeepAlive(true); err != nil {
+ return nil, fmt.Errorf("SetKeepAlive failed, %w", err)
+ }
+ if err := kac.SetKeepAlivePeriod(30 * time.Second); err != nil {
+ return nil, fmt.Errorf("SetKeepAlivePeriod failed, %w", err)
+ }
+ return kac, nil
+}
+
+func createKeepaliveConn(c net.Conn) (*keepAliveConn, error) {
+ tcpc, ok := c.(*net.TCPConn)
+ if !ok {
+ return nil, ErrNotTCP
+ }
+ return &keepAliveConn{tcpc}, nil
+}
+
+type keepAliveConn struct {
+ *net.TCPConn
+}
+
+// SetKeepAlive sets keepalive
+func (l *keepAliveConn) SetKeepAlive(doKeepAlive bool) error {
+ return l.TCPConn.SetKeepAlive(doKeepAlive)
}
// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
@@ -67,22 +96,17 @@ type tlsKeepaliveListener struct {
// Accept waits for and returns the next incoming TLS connection.
// The returned connection c is a *tls.Conn.
-func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
- c, err = l.Listener.Accept()
+func (l *tlsKeepaliveListener) Accept() (net.Conn, error) {
+ c, err := l.Listener.Accept()
if err != nil {
- return
+ return nil, err
}
- kac := c.(keepAliveConn)
- // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
- // default on linux: 30 + 8 * 30
- // default on osx: 30 + 8 * 75
- kac.SetKeepAlive(true)
- kac.SetKeepAlivePeriod(30 * time.Second)
+
c = tls.Server(c, l.config)
return c, nil
}
-// NewListener creates a Listener which accepts connections from an inner
+// newTLSKeepaliveListener creates a Listener which accepts connections from an inner
// Listener and wraps each connection with Server.
// The configuration config must be non-nil and must have
// at least one certificate.
diff --git a/client/pkg/transport/keepalive_listener_openbsd.go b/client/pkg/transport/keepalive_listener_openbsd.go
new file mode 100644
index 00000000000..024c6c23639
--- /dev/null
+++ b/client/pkg/transport/keepalive_listener_openbsd.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build openbsd
+
+package transport
+
+import "time"
+
+// SetKeepAlivePeriod sets keepalive period
+func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error {
+ // OpenBSD has no user-settable per-socket TCP keepalive options.
+ // Refer to https://github.com/etcd-io/etcd/issues/15811.
+ return nil
+}
diff --git a/client/pkg/transport/keepalive_listener_test.go b/client/pkg/transport/keepalive_listener_test.go
index 425f53368b5..dabcf639c05 100644
--- a/client/pkg/transport/keepalive_listener_test.go
+++ b/client/pkg/transport/keepalive_listener_test.go
@@ -19,6 +19,8 @@ import (
"net"
"net/http"
"testing"
+
+ "github.com/stretchr/testify/require"
)
// TestNewKeepAliveListener tests NewKeepAliveListener returns a listener
@@ -26,50 +28,35 @@ import (
// TODO: verify the keepalive option is set correctly
func TestNewKeepAliveListener(t *testing.T) {
ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected listen error")
ln, err = NewKeepAliveListener(ln, "http", nil)
- if err != nil {
- t.Fatalf("unexpected NewKeepAliveListener error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected NewKeepAliveListener error")
go http.Get("http://" + ln.Addr().String())
conn, err := ln.Accept()
- if err != nil {
- t.Fatalf("unexpected Accept error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected Accept error")
+ _, ok := conn.(*keepAliveConn)
+ require.Truef(t, ok, "Unexpected conn type: %T, wanted *keepAliveConn", conn)
conn.Close()
ln.Close()
ln, err = net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected Listen error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected Listen error")
// tls
- tlsinfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create tmpfile: %v", err)
- }
- defer del()
+ tlsinfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create tmpfile")
tlsInfo := TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile}
- tlsInfo.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
+ tlsInfo.parseFunc = fakeCertificateParserFunc(nil)
tlscfg, err := tlsInfo.ServerConfig()
- if err != nil {
- t.Fatalf("unexpected serverConfig error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected serverConfig error")
tlsln, err := NewKeepAliveListener(ln, "https", tlscfg)
- if err != nil {
- t.Fatalf("unexpected NewKeepAliveListener error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected NewKeepAliveListener error")
go http.Get("https://" + tlsln.Addr().String())
conn, err = tlsln.Accept()
- if err != nil {
- t.Fatalf("unexpected Accept error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected Accept error")
if _, ok := conn.(*tls.Conn); !ok {
t.Errorf("failed to accept *tls.Conn")
}
@@ -79,9 +66,7 @@ func TestNewKeepAliveListener(t *testing.T) {
func TestNewKeepAliveListenerTLSEmptyConfig(t *testing.T) {
ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected listen error")
_, err = NewKeepAliveListener(ln, "https", nil)
if err == nil {
diff --git a/client/pkg/transport/keepalive_listener_unix.go b/client/pkg/transport/keepalive_listener_unix.go
new file mode 100644
index 00000000000..08061f7267b
--- /dev/null
+++ b/client/pkg/transport/keepalive_listener_unix.go
@@ -0,0 +1,24 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !openbsd
+
+package transport
+
+import "time"
+
+// SetKeepAlivePeriod sets keepalive period
+func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error {
+ return l.TCPConn.SetKeepAlivePeriod(d)
+}
diff --git a/client/pkg/transport/limit_listen.go b/client/pkg/transport/limit_listen.go
index 930c542066f..bf4c4e104a2 100644
--- a/client/pkg/transport/limit_listen.go
+++ b/client/pkg/transport/limit_listen.go
@@ -23,9 +23,7 @@ import (
"time"
)
-var (
- ErrNotTCP = errors.New("only tcp connections have keepalive")
-)
+var ErrNotTCP = errors.New("only tcp connections have keepalive")
// LimitListener returns a Listener that accepts at most n simultaneous
// connections from the provided Listener.
@@ -63,6 +61,9 @@ func (l *limitListenerConn) Close() error {
return err
}
+// SetKeepAlive sets keepalive
+//
+// Deprecated: use (*keepAliveConn) SetKeepAlive instead.
func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
@@ -71,6 +72,9 @@ func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
return tcpc.SetKeepAlive(doKeepAlive)
}
+// SetKeepAlivePeriod sets keepalive period
+//
+// Deprecated: use (*keepAliveConn) SetKeepAlivePeriod instead.
func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
diff --git a/client/pkg/transport/listener.go b/client/pkg/transport/listener.go
index 992c773eaac..9c2d29ba998 100644
--- a/client/pkg/transport/listener.go
+++ b/client/pkg/transport/listener.go
@@ -32,10 +32,11 @@ import (
"strings"
"time"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/tlsutil"
-
- "go.uber.org/zap"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
)
// NewListener creates a new listner.
@@ -43,7 +44,7 @@ func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err err
return newListener(addr, scheme, WithTLSInfo(tlsinfo))
}
-// NewListenerWithOpts creates a new listener which accpets listener options.
+// NewListenerWithOpts creates a new listener which accepts listener options.
func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
return newListener(addr, scheme, opts...)
}
@@ -59,16 +60,12 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err
switch {
case lnOpts.IsSocketOpts():
// new ListenConfig with socket options.
- config, err := newListenConfig(lnOpts.socketOpts)
- if err != nil {
- return nil, err
- }
- lnOpts.ListenConfig = config
+ lnOpts.ListenConfig = newListenConfig(lnOpts.socketOpts)
// check for timeout
fallthrough
case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
// timeout listener with socket options.
- ln, err := lnOpts.ListenConfig.Listen(context.TODO(), "tcp", addr)
+ ln, err := newKeepAliveListener(&lnOpts.ListenConfig, addr)
if err != nil {
return nil, err
}
@@ -78,7 +75,7 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err
writeTimeout: lnOpts.writeTimeout,
}
case lnOpts.IsTimeout():
- ln, err := net.Listen("tcp", addr)
+ ln, err := newKeepAliveListener(nil, addr)
if err != nil {
return nil, err
}
@@ -88,7 +85,7 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err
writeTimeout: lnOpts.writeTimeout,
}
default:
- ln, err := net.Listen("tcp", addr)
+ ln, err := newKeepAliveListener(nil, addr)
if err != nil {
return nil, err
}
@@ -102,6 +99,22 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err
return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener)
}
+func newKeepAliveListener(cfg *net.ListenConfig, addr string) (net.Listener, error) {
+ var ln net.Listener
+ var err error
+
+ if cfg != nil {
+ ln, err = cfg.Listen(context.TODO(), "tcp", addr)
+ } else {
+ ln, err = net.Listen("tcp", addr)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return NewKeepAliveListener(ln, "tcp", nil)
+}
+
func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
if scheme != "https" && scheme != "unixs" {
return l, nil
@@ -112,7 +125,7 @@ func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, err
return newTLSListener(l, tlsinfo, checkSAN)
}
-func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
+func newListenConfig(sopts *SocketOpts) net.ListenConfig {
lc := net.ListenConfig{}
if sopts != nil {
ctls := getControls(sopts)
@@ -120,7 +133,7 @@ func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
lc.Control = ctls.Control
}
}
- return lc, nil
+ return lc
}
type TLSInfo struct {
@@ -152,6 +165,14 @@ type TLSInfo struct {
// Note that cipher suites are prioritized in the given order.
CipherSuites []uint16
+ // MinVersion is the minimum TLS version that is acceptable.
+ // If not set, the minimum version is TLS 1.2.
+ MinVersion uint16
+
+ // MaxVersion is the maximum TLS version that is acceptable.
+ // If not set, the default used by Go is selected (see tls.Config.MaxVersion).
+ MaxVersion uint16
+
selfCert bool
// parseFunc exists to simplify testing. Typically, parseFunc
@@ -159,12 +180,23 @@ type TLSInfo struct {
parseFunc func([]byte, []byte) (tls.Certificate, error)
// AllowedCN is a CN which must be provided by a client.
+ //
+ // Deprecated: use AllowedCNs instead.
AllowedCN string
// AllowedHostname is an IP address or hostname that must match the TLS
// certificate provided by a client.
+ //
+ // Deprecated: use AllowedHostnames instead.
AllowedHostname string
+ // AllowedCNs is a list of acceptable CNs which must be provided by a client.
+ AllowedCNs []string
+
+ // AllowedHostnames is a list of acceptable IP addresses or hostnames that must match the
+ // TLS certificate provided by a client.
+ AllowedHostnames []string
+
// Logger logs TLS errors.
// If nil, all logs are discarded.
Logger *zap.Logger
@@ -172,6 +204,9 @@ type TLSInfo struct {
// EmptyCN indicates that the cert must have empty CN.
// If true, ClientConfig() will return an error for a cert with non empty CN.
EmptyCN bool
+
+ // LocalAddr is the local IP address to use when communicating with a peer.
+ LocalAddr string
}
func (info TLSInfo) String() string {
@@ -182,34 +217,35 @@ func (info TLSInfo) Empty() bool {
return info.CertFile == "" && info.KeyFile == ""
}
-func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) {
- info.Logger = lg
+func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (TLSInfo, error) {
+ verify.Assert(lg != nil, "nil log isn't allowed")
+
+ var err error
+ info := TLSInfo{Logger: lg}
if selfSignedCertValidity == 0 {
- err = fmt.Errorf("selfSignedCertValidity is invalid,it should be greater than 0")
+ err = errors.New("selfSignedCertValidity is invalid,it should be greater than 0")
info.Logger.Warn(
"cannot generate cert",
zap.Error(err),
)
- return
+ return info, err
}
- err = fileutil.TouchDirAll(dirpath)
+ err = fileutil.TouchDirAll(lg, dirpath)
if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot create cert directory",
- zap.Error(err),
- )
- }
- return
+ info.Logger.Warn(
+ "cannot create cert directory",
+ zap.Error(err),
+ )
+ return info, err
}
certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem"))
if err != nil {
- return
+ return info, err
}
keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem"))
if err != nil {
- return
+ return info, err
}
_, errcert := os.Stat(certPath)
_, errkey := os.Stat(keyPath)
@@ -219,19 +255,17 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
info.ClientCertFile = certPath
info.ClientKeyFile = keyPath
info.selfCert = true
- return
+ return info, err
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot generate random number",
- zap.Error(err),
- )
- }
- return
+ info.Logger.Warn(
+ "cannot generate random number",
+ zap.Error(err),
+ )
+ return info, err
}
tmpl := x509.Certificate{
@@ -240,17 +274,16 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)),
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCRLSign,
ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...),
BasicConstraintsValid: true,
+ IsCA: true,
}
- if info.Logger != nil {
- info.Logger.Warn(
- "automatically generate certificates",
- zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter),
- )
- }
+ info.Logger.Warn(
+ "automatically generate certificates",
+ zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter),
+ )
for _, host := range hosts {
h, _, _ := net.SplitHostPort(host)
@@ -263,24 +296,20 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot generate ECDSA key",
- zap.Error(err),
- )
- }
- return
+ info.Logger.Warn(
+ "cannot generate ECDSA key",
+ zap.Error(err),
+ )
+ return info, err
}
derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot generate x509 certificate",
- zap.Error(err),
- )
- }
- return
+ info.Logger.Warn(
+ "cannot generate x509 certificate",
+ zap.Error(err),
+ )
+ return info, err
}
certOut, err := os.Create(certPath)
@@ -290,34 +319,29 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
zap.String("path", certPath),
zap.Error(err),
)
- return
+ return info, err
}
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close()
- if info.Logger != nil {
- info.Logger.Info("created cert file", zap.String("path", certPath))
- }
+
+ info.Logger.Info("created cert file", zap.String("path", certPath))
b, err := x509.MarshalECPrivateKey(priv)
if err != nil {
- return
+ return info, err
}
- keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+ keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot key file",
- zap.String("path", keyPath),
- zap.Error(err),
- )
- }
- return
+ info.Logger.Warn(
+ "cannot key file",
+ zap.String("path", keyPath),
+ zap.Error(err),
+ )
+ return info, err
}
pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
keyOut.Close()
- if info.Logger != nil {
- info.Logger.Info("created key file", zap.String("path", keyPath))
- }
+ info.Logger.Info("created key file", zap.String("path", keyPath))
return SelfCert(lg, dirpath, hosts, selfSignedCertValidity)
}
@@ -326,8 +350,8 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
// Previously,
// 1. Server has non-empty (*tls.Config).Certificates on client hello
// 2. Server calls (*tls.Config).GetCertificate iff:
-// - Server's (*tls.Config).Certificates is not empty, or
-// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
+// - Server's (*tls.Config).Certificates is not empty, or
+// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
//
// When (*tls.Config).Certificates is always populated on initial handshake,
// client is expected to provide a valid matching SNI to pass the TLS
@@ -365,8 +389,17 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
}
}
+ var minVersion uint16
+ if info.MinVersion != 0 {
+ minVersion = info.MinVersion
+ } else {
+ // Default minimum version is TLS 1.2, previous versions are insecure and deprecated.
+ minVersion = tls.VersionTLS12
+ }
+
cfg := &tls.Config{
- MinVersion: tls.VersionTLS12,
+ MinVersion: minVersion,
+ MaxVersion: info.MaxVersion,
ServerName: info.ServerName,
}
@@ -377,19 +410,52 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
// Client certificates may be verified by either an exact match on the CN,
// or a more general check of the CN and SANs.
var verifyCertificate func(*x509.Certificate) bool
+
+ if info.AllowedCN != "" && len(info.AllowedCNs) > 0 {
+ return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs)
+ }
+ if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 {
+ return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames)
+ }
+ if info.AllowedCN != "" && info.AllowedHostname != "" {
+ return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname)
+ }
+ if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 {
+ return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames)
+ }
+
if info.AllowedCN != "" {
- if info.AllowedHostname != "" {
- return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname)
- }
+ info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead")
verifyCertificate = func(cert *x509.Certificate) bool {
return info.AllowedCN == cert.Subject.CommonName
}
}
if info.AllowedHostname != "" {
+ info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead")
verifyCertificate = func(cert *x509.Certificate) bool {
return cert.VerifyHostname(info.AllowedHostname) == nil
}
}
+ if len(info.AllowedCNs) > 0 {
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ for _, allowedCN := range info.AllowedCNs {
+ if allowedCN == cert.Subject.CommonName {
+ return true
+ }
+ }
+ return false
+ }
+ }
+ if len(info.AllowedHostnames) > 0 {
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ for _, allowedHostname := range info.AllowedHostnames {
+ if cert.VerifyHostname(allowedHostname) == nil {
+ return true
+ }
+ }
+ return false
+ }
+ }
if verifyCertificate != nil {
cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
for _, chains := range verifiedChains {
@@ -408,23 +474,19 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) {
cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
if os.IsNotExist(err) {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to find peer cert files",
- zap.String("cert-file", info.CertFile),
- zap.String("key-file", info.KeyFile),
- zap.Error(err),
- )
- }
+ info.Logger.Warn(
+ "failed to find peer cert files",
+ zap.String("cert-file", info.CertFile),
+ zap.String("key-file", info.KeyFile),
+ zap.Error(err),
+ )
} else if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to create peer certificate",
- zap.String("cert-file", info.CertFile),
- zap.String("key-file", info.KeyFile),
- zap.Error(err),
- )
- }
+ info.Logger.Warn(
+ "failed to create peer certificate",
+ zap.String("cert-file", info.CertFile),
+ zap.String("key-file", info.KeyFile),
+ zap.Error(err),
+ )
}
return cert, err
}
@@ -435,23 +497,19 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
}
cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc)
if os.IsNotExist(err) {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to find client cert files",
- zap.String("cert-file", certfile),
- zap.String("key-file", keyfile),
- zap.Error(err),
- )
- }
+ info.Logger.Warn(
+ "failed to find client cert files",
+ zap.String("cert-file", certfile),
+ zap.String("key-file", keyfile),
+ zap.Error(err),
+ )
} else if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to create client certificate",
- zap.String("cert-file", certfile),
- zap.String("key-file", keyfile),
- zap.Error(err),
- )
- }
+ info.Logger.Warn(
+ "failed to create client certificate",
+ zap.String("cert-file", certfile),
+ zap.String("key-file", keyfile),
+ zap.Error(err),
+ )
}
return cert, err
}
@@ -497,11 +555,6 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) {
// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
cfg.NextProtos = []string{"h2"}
- // go1.13 enables TLS 1.3 by default
- // and in TLS 1.3, cipher suites are not configurable
- // setting Max TLS version to TLS 1.2 for go 1.13
- cfg.MaxVersion = tls.VersionTLS12
-
return cfg, nil
}
@@ -556,11 +609,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) {
}
}
- // go1.13 enables TLS 1.3 by default
- // and in TLS 1.3, cipher suites are not configurable
- // setting Max TLS version to TLS 1.2 for go 1.13
- cfg.MaxVersion = tls.VersionTLS12
-
return cfg, nil
}
diff --git a/client/pkg/transport/listener_opts.go b/client/pkg/transport/listener_opts.go
index ad4f6904da9..7536f6aff46 100644
--- a/client/pkg/transport/listener_opts.go
+++ b/client/pkg/transport/listener_opts.go
@@ -1,3 +1,17 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package transport
import (
diff --git a/client/pkg/transport/listener_test.go b/client/pkg/transport/listener_test.go
index 00657648ece..275f8d41486 100644
--- a/client/pkg/transport/listener_test.go
+++ b/client/pkg/transport/listener_test.go
@@ -15,59 +15,57 @@
package transport
import (
+ "crypto/rand"
"crypto/tls"
"crypto/x509"
+ "encoding/pem"
"errors"
- "io/ioutil"
+ "math/big"
"net"
"net/http"
"os"
+ "path/filepath"
+ "sync"
"testing"
"time"
- "go.uber.org/zap"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
-func createSelfCert(hosts ...string) (*TLSInfo, func(), error) {
- return createSelfCertEx("127.0.0.1")
+func createSelfCert(t *testing.T) (*TLSInfo, error) {
+ t.Helper()
+ return createSelfCertEx(t, "127.0.0.1")
}
-func createSelfCertEx(host string, additionalUsages ...x509.ExtKeyUsage) (*TLSInfo, func(), error) {
- d, terr := ioutil.TempDir("", "etcd-test-tls-")
- if terr != nil {
- return nil, nil, terr
- }
- info, err := SelfCert(zap.NewExample(), d, []string{host + ":0"}, 1, additionalUsages...)
+func createSelfCertEx(t *testing.T, host string, additionalUsages ...x509.ExtKeyUsage) (*TLSInfo, error) {
+ t.Helper()
+ d := t.TempDir()
+ info, err := SelfCert(zaptest.NewLogger(t), d, []string{host + ":0"}, 1, additionalUsages...)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- return &info, func() { os.RemoveAll(d) }, nil
+ return &info, nil
}
-func fakeCertificateParserFunc(cert tls.Certificate, err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
+func fakeCertificateParserFunc(err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
return func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
- return cert, err
+ return tls.Certificate{}, err
}
}
// TestNewListenerTLSInfo tests that NewListener with valid TLSInfo returns
// a TLS listener that accepts TLS connections.
func TestNewListenerTLSInfo(t *testing.T) {
- tlsInfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsInfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
testNewListenerTLSInfoAccept(t, *tlsInfo)
}
func TestNewListenerWithOpts(t *testing.T) {
- tlsInfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsInfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
tests := map[string]struct {
opts []ListenerOption
@@ -114,22 +112,17 @@ func TestNewListenerWithOpts(t *testing.T) {
if ln != nil {
defer ln.Close()
}
- if test.expectedErr && err == nil {
- t.Fatalf("expected error")
- }
- if !test.expectedErr && err != nil {
- t.Fatalf("unexpected error: %v", err)
+ require.Falsef(t, test.expectedErr && err == nil, "expected error")
+ if !test.expectedErr {
+ require.NoErrorf(t, err, "unexpected error: %v", err)
}
})
}
}
func TestNewListenerWithSocketOpts(t *testing.T) {
- tlsInfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsInfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
tests := map[string]struct {
opts []ListenerOption
@@ -199,29 +192,34 @@ func TestNewListenerWithSocketOpts(t *testing.T) {
for testName, test := range tests {
t.Run(testName, func(t *testing.T) {
ln, err := NewListenerWithOpts("127.0.0.1:0", test.scheme, test.opts...)
- if err != nil {
- t.Fatalf("unexpected NewListenerWithSocketOpts error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected NewListenerWithSocketOpts error")
defer ln.Close()
ln2, err := NewListenerWithOpts(ln.Addr().String(), test.scheme, test.opts...)
if ln2 != nil {
ln2.Close()
}
- if test.expectedErr && err == nil {
- t.Fatalf("expected error")
+ if test.expectedErr {
+ require.Errorf(t, err, "expected error")
}
- if !test.expectedErr && err != nil {
- t.Fatalf("unexpected error: %v", err)
+ if !test.expectedErr {
+ require.NoErrorf(t, err, "unexpected error: %v", err)
+ }
+
+ if test.scheme == "http" {
+ lnOpts := newListenOpts(test.opts...)
+ if !lnOpts.IsSocketOpts() && !lnOpts.IsTimeout() {
+ _, ok := ln.(*keepaliveListener)
+ require.Truef(t, ok, "ln: unexpected listener type: %T, wanted *keepaliveListener", ln)
+ }
}
})
}
}
func testNewListenerTLSInfoAccept(t *testing.T, tlsInfo TLSInfo) {
+ t.Helper()
ln, err := NewListener("127.0.0.1:0", "https", &tlsInfo)
- if err != nil {
- t.Fatalf("unexpected NewListener error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected NewListener error")
defer ln.Close()
tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
@@ -229,9 +227,7 @@ func testNewListenerTLSInfoAccept(t *testing.T, tlsInfo TLSInfo) {
go cli.Get("https://" + ln.Addr().String())
conn, err := ln.Accept()
- if err != nil {
- t.Fatalf("unexpected Accept error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected Accept error")
defer conn.Close()
if _, ok := conn.(*tls.Conn); !ok {
t.Error("failed to accept *tls.Conn")
@@ -258,36 +254,27 @@ func TestNewListenerTLSInfoSkipClientSANVerify(t *testing.T) {
}
func testNewListenerTLSInfoClientCheck(t *testing.T, skipClientSANVerify, goodClientHost, acceptExpected bool) {
- tlsInfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ t.Helper()
+ tlsInfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
host := "127.0.0.222"
if goodClientHost {
host = "127.0.0.1"
}
- clientTLSInfo, del2, err := createSelfCertEx(host, x509.ExtKeyUsageClientAuth)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del2()
+ clientTLSInfo, err := createSelfCertEx(t, host, x509.ExtKeyUsageClientAuth)
+ require.NoErrorf(t, err, "unable to create cert")
tlsInfo.SkipClientSANVerify = skipClientSANVerify
tlsInfo.TrustedCAFile = clientTLSInfo.CertFile
rootCAs := x509.NewCertPool()
- loaded, err := ioutil.ReadFile(tlsInfo.CertFile)
- if err != nil {
- t.Fatalf("unexpected missing certfile: %v", err)
- }
+ loaded, err := os.ReadFile(tlsInfo.CertFile)
+ require.NoErrorf(t, err, "unexpected missing certfile")
rootCAs.AppendCertsFromPEM(loaded)
clientCert, err := tls.LoadX509KeyPair(clientTLSInfo.CertFile, clientTLSInfo.KeyFile)
- if err != nil {
- t.Fatalf("unable to create peer cert: %v", err)
- }
+ require.NoErrorf(t, err, "unable to create peer cert")
tlsConfig := &tls.Config{}
tlsConfig.InsecureSkipVerify = false
@@ -295,9 +282,7 @@ func testNewListenerTLSInfoClientCheck(t *testing.T, skipClientSANVerify, goodCl
tlsConfig.RootCAs = rootCAs
ln, err := NewListener("127.0.0.1:0", "https", tlsInfo)
- if err != nil {
- t.Fatalf("unexpected NewListener error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected NewListener error")
defer ln.Close()
tr := &http.Transport{TLSClientConfig: tlsConfig}
@@ -345,11 +330,8 @@ func TestNewListenerTLSEmptyInfo(t *testing.T) {
}
func TestNewTransportTLSInfo(t *testing.T) {
- tlsinfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsinfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
tests := []TLSInfo{
{},
@@ -368,15 +350,10 @@ func TestNewTransportTLSInfo(t *testing.T) {
}
for i, tt := range tests {
- tt.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
+ tt.parseFunc = fakeCertificateParserFunc(nil)
trans, err := NewTransport(tt, time.Second)
- if err != nil {
- t.Fatalf("Received unexpected error from NewTransport: %v", err)
- }
-
- if trans.TLSClientConfig == nil {
- t.Fatalf("#%d: want non-nil TLSClientConfig", i)
- }
+ require.NoErrorf(t, err, "Received unexpected error from NewTransport")
+ require.NotNilf(t, trans.TLSClientConfig, "#%d: want non-nil TLSClientConfig", i)
}
}
@@ -417,11 +394,8 @@ func TestTLSInfoEmpty(t *testing.T) {
}
func TestTLSInfoMissingFields(t *testing.T) {
- tlsinfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsinfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
tests := []TLSInfo{
{CertFile: tlsinfo.CertFile},
@@ -442,11 +416,8 @@ func TestTLSInfoMissingFields(t *testing.T) {
}
func TestTLSInfoParseFuncError(t *testing.T) {
- tlsinfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsinfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
tests := []struct {
info TLSInfo
@@ -461,7 +432,7 @@ func TestTLSInfoParseFuncError(t *testing.T) {
}
for i, tt := range tests {
- tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, errors.New("fake"))
+ tt.info.parseFunc = fakeCertificateParserFunc(errors.New("fake"))
if _, err = tt.info.ServerConfig(); err == nil {
t.Errorf("#%d: expected non-nil error from ServerConfig()", i)
@@ -475,11 +446,8 @@ func TestTLSInfoParseFuncError(t *testing.T) {
func TestTLSInfoConfigFuncs(t *testing.T) {
ln := zaptest.NewLogger(t)
- tlsinfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsinfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
tests := []struct {
info TLSInfo
@@ -500,7 +468,7 @@ func TestTLSInfoConfigFuncs(t *testing.T) {
}
for i, tt := range tests {
- tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
+ tt.info.parseFunc = fakeCertificateParserFunc(nil)
sCfg, err := tt.info.ServerConfig()
if err != nil {
@@ -532,19 +500,16 @@ func TestNewListenerUnixSocket(t *testing.T) {
// TestNewListenerTLSInfoSelfCert tests that a new certificate accepts connections.
func TestNewListenerTLSInfoSelfCert(t *testing.T) {
- tmpdir, err := ioutil.TempDir(os.TempDir(), "tlsdir")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tmpdir)
- tlsinfo, err := SelfCert(zap.NewExample(), tmpdir, []string{"127.0.0.1"}, 1)
- if err != nil {
- t.Fatal(err)
- }
- if tlsinfo.Empty() {
- t.Fatalf("tlsinfo should have certs (%+v)", tlsinfo)
- }
+ tmpdir := t.TempDir()
+
+ tlsinfo, err := SelfCert(zaptest.NewLogger(t), tmpdir, []string{"127.0.0.1"}, 1)
+ require.NoError(t, err)
+ require.Falsef(t, tlsinfo.Empty(), "tlsinfo should have certs (%+v)", tlsinfo)
testNewListenerTLSInfoAccept(t, tlsinfo)
+
+ assert.Panicsf(t, func() {
+ SelfCert(nil, tmpdir, []string{"127.0.0.1"}, 1)
+ }, "expected panic with nil log")
}
func TestIsClosedConnError(t *testing.T) {
@@ -554,9 +519,7 @@ func TestIsClosedConnError(t *testing.T) {
}
l.Close()
_, err = l.Accept()
- if !IsClosedConnError(err) {
- t.Fatalf("expect true, got false (%v)", err)
- }
+ require.Truef(t, IsClosedConnError(err), "expect true, got false (%v)", err)
}
func TestSocktOptsEmpty(t *testing.T) {
@@ -576,3 +539,138 @@ func TestSocktOptsEmpty(t *testing.T) {
}
}
}
+
+// TestNewListenerWithACRLFile tests when a revocation list is present.
+func TestNewListenerWithACRLFile(t *testing.T) {
+ clientTLSInfo, err := createSelfCertEx(t, "127.0.0.1", x509.ExtKeyUsageClientAuth)
+ require.NoErrorf(t, err, "unable to create client cert")
+
+ loadFileAsPEM := func(fileName string) []byte {
+ loaded, readErr := os.ReadFile(fileName)
+ require.NoErrorf(t, readErr, "unable to read file %q", fileName)
+ block, _ := pem.Decode(loaded)
+ return block.Bytes
+ }
+
+ clientCert, err := x509.ParseCertificate(loadFileAsPEM(clientTLSInfo.CertFile))
+ require.NoErrorf(t, err, "unable to parse client cert")
+
+ tests := map[string]struct {
+ expectHandshakeError bool
+ revokedCertificateEntries []x509.RevocationListEntry
+ revocationListContents []byte
+ }{
+ "empty revocation list": {
+ expectHandshakeError: false,
+ },
+ "client cert is revoked": {
+ expectHandshakeError: true,
+ revokedCertificateEntries: []x509.RevocationListEntry{
+ {
+ SerialNumber: clientCert.SerialNumber,
+ RevocationTime: time.Now(),
+ },
+ },
+ },
+ "invalid CRL file content": {
+ expectHandshakeError: true,
+ revocationListContents: []byte("@invalidcontent"),
+ },
+ }
+
+ for testName, test := range tests {
+ t.Run(testName, func(t *testing.T) {
+ tmpdir := t.TempDir()
+ tlsInfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create server cert")
+ tlsInfo.TrustedCAFile = clientTLSInfo.CertFile
+ tlsInfo.CRLFile = filepath.Join(tmpdir, "revoked.r0")
+
+ cert, err := x509.ParseCertificate(loadFileAsPEM(tlsInfo.CertFile))
+ require.NoErrorf(t, err, "unable to decode server cert")
+
+ key, err := x509.ParseECPrivateKey(loadFileAsPEM(tlsInfo.KeyFile))
+ require.NoErrorf(t, err, "unable to parse server key")
+
+ revocationListContents := test.revocationListContents
+ if len(revocationListContents) == 0 {
+ tmpl := &x509.RevocationList{
+ RevokedCertificateEntries: test.revokedCertificateEntries,
+ ThisUpdate: time.Now(),
+ NextUpdate: time.Now().Add(time.Hour),
+ Number: big.NewInt(1),
+ }
+ revocationListContents, err = x509.CreateRevocationList(rand.Reader, tmpl, cert, key)
+ require.NoErrorf(t, err, "unable to create revocation list")
+ }
+
+ err = os.WriteFile(tlsInfo.CRLFile, revocationListContents, 0o600)
+ require.NoErrorf(t, err, "unable to write revocation list")
+
+ chHandshakeFailure := make(chan error, 1)
+ tlsInfo.HandshakeFailure = func(_ *tls.Conn, err error) {
+ if err != nil {
+ chHandshakeFailure <- err
+ }
+ }
+
+ rootCAs := x509.NewCertPool()
+ rootCAs.AddCert(cert)
+
+ clientCert, err := tls.LoadX509KeyPair(clientTLSInfo.CertFile, clientTLSInfo.KeyFile)
+ require.NoErrorf(t, err, "unable to create peer cert")
+
+ ln, err := NewListener("127.0.0.1:0", "https", tlsInfo)
+ require.NoErrorf(t, err, "unable to start listener")
+
+ tlsConfig := &tls.Config{}
+ tlsConfig.InsecureSkipVerify = false
+ tlsConfig.Certificates = []tls.Certificate{clientCert}
+ tlsConfig.RootCAs = rootCAs
+
+ tr := &http.Transport{TLSClientConfig: tlsConfig}
+ cli := &http.Client{Transport: tr, Timeout: 5 * time.Second}
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ if _, gerr := cli.Get("https://" + ln.Addr().String()); gerr != nil {
+ t.Logf("http GET failed: %v", gerr)
+ }
+ }()
+
+ chAcceptConn := make(chan net.Conn, 1)
+ go func() {
+ defer wg.Done()
+ conn, err := ln.Accept()
+ if err == nil {
+ chAcceptConn <- conn
+ }
+ }()
+
+ timer := time.NewTimer(5 * time.Second)
+ defer func() {
+ if !timer.Stop() {
+ <-timer.C
+ }
+ }()
+
+ select {
+ case err := <-chHandshakeFailure:
+ if !test.expectHandshakeError {
+ t.Errorf("expecting no handshake error, got: %v", err)
+ }
+ case conn := <-chAcceptConn:
+ if test.expectHandshakeError {
+ t.Errorf("expecting handshake error, got nothing")
+ }
+ conn.Close()
+ case <-timer.C:
+ t.Error("timed out waiting for closed connection or handshake error")
+ }
+
+ ln.Close()
+ wg.Wait()
+ })
+ }
+}
diff --git a/client/pkg/transport/listener_tls.go b/client/pkg/transport/listener_tls.go
index 6f1600945cc..2c94841625b 100644
--- a/client/pkg/transport/listener_tls.go
+++ b/client/pkg/transport/listener_tls.go
@@ -19,8 +19,8 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
- "io/ioutil"
"net"
+ "os"
"strings"
"sync"
)
@@ -168,16 +168,16 @@ func (l *tlsListener) acceptLoop() {
func checkCRL(crlPath string, cert []*x509.Certificate) error {
// TODO: cache
- crlBytes, err := ioutil.ReadFile(crlPath)
+ crlBytes, err := os.ReadFile(crlPath)
if err != nil {
return err
}
- certList, err := x509.ParseCRL(crlBytes)
+ certList, err := x509.ParseRevocationList(crlBytes)
if err != nil {
return err
}
revokedSerials := make(map[string]struct{})
- for _, rc := range certList.TBSCertList.RevokedCertificates {
+ for _, rc := range certList.RevokedCertificateEntries {
revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{}
}
for _, c := range cert {
@@ -222,7 +222,8 @@ func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string
func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
// reverse lookup
- wildcards, names := []string{}, []string{}
+ var names []string
+ var wildcards []string
for _, dns := range dnsNames {
if strings.HasPrefix(dns, "*.") {
wildcards = append(wildcards, dns[1:])
diff --git a/client/pkg/transport/sockopt.go b/client/pkg/transport/sockopt.go
index 38548ddd713..49b48dc8767 100644
--- a/client/pkg/transport/sockopt.go
+++ b/client/pkg/transport/sockopt.go
@@ -1,3 +1,17 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package transport
import (
@@ -21,12 +35,12 @@ type SocketOpts struct {
// in which case lock on data file could result in unexpected
// condition. User should take caution to protect against lock race.
// [1] https://man7.org/linux/man-pages/man7/socket.7.html
- ReusePort bool
+ ReusePort bool `json:"reuse-port"`
// ReuseAddress enables a socket option SO_REUSEADDR which allows
// binding to an address in `TIME_WAIT` state. Useful to improve MTTR
// in cases where etcd slow to restart due to excessive `TIME_WAIT`.
// [1] https://man7.org/linux/man-pages/man7/socket.7.html
- ReuseAddress bool
+ ReuseAddress bool `json:"reuse-address"`
}
func getControls(sopts *SocketOpts) Controls {
diff --git a/client/pkg/transport/sockopt_solaris.go b/client/pkg/transport/sockopt_solaris.go
new file mode 100644
index 00000000000..149ad510240
--- /dev/null
+++ b/client/pkg/transport/sockopt_solaris.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build solaris
+
+package transport
+
+import (
+ "errors"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+ return errors.New("port reuse is not supported on Solaris")
+}
+
+func setReuseAddress(network, address string, conn syscall.RawConn) error {
+ return conn.Control(func(fd uintptr) {
+ syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+ })
+}
diff --git a/client/pkg/transport/sockopt_unix.go b/client/pkg/transport/sockopt_unix.go
index 432b52e0fce..385eadb0074 100644
--- a/client/pkg/transport/sockopt_unix.go
+++ b/client/pkg/transport/sockopt_unix.go
@@ -1,5 +1,18 @@
-//go:build !windows
-// +build !windows
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !solaris && !wasm && !js
package transport
diff --git a/client/pkg/transport/sockopt_wasm.go b/client/pkg/transport/sockopt_wasm.go
new file mode 100644
index 00000000000..c6590b1d469
--- /dev/null
+++ b/client/pkg/transport/sockopt_wasm.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasm || js
+
+package transport
+
+import (
+ "errors"
+ "syscall"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+ return errors.New("port reuse is not supported on WASM")
+}
+
+func setReuseAddress(network, addr string, conn syscall.RawConn) error {
+ return errors.New("address reuse is not supported on WASM")
+}
diff --git a/client/pkg/transport/sockopt_windows.go b/client/pkg/transport/sockopt_windows.go
index 4e5af70b11e..2670b4dc7b5 100644
--- a/client/pkg/transport/sockopt_windows.go
+++ b/client/pkg/transport/sockopt_windows.go
@@ -1,19 +1,32 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
//go:build windows
-// +build windows
package transport
import (
- "fmt"
+ "errors"
"syscall"
)
func setReusePort(network, address string, c syscall.RawConn) error {
- return fmt.Errorf("port reuse is not supported on Windows")
+ return errors.New("port reuse is not supported on Windows")
}
// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as
// there is no protection against port hijacking.
func setReuseAddress(network, addr string, conn syscall.RawConn) error {
- return fmt.Errorf("address reuse is not supported on Windows")
+ return errors.New("address reuse is not supported on Windows")
}
diff --git a/client/pkg/transport/timeout_dialer_test.go b/client/pkg/transport/timeout_dialer_test.go
index a2ff0021473..8cb3da9c645 100644
--- a/client/pkg/transport/timeout_dialer_test.go
+++ b/client/pkg/transport/timeout_dialer_test.go
@@ -15,18 +15,19 @@
package transport
import (
+ "errors"
"net"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
func TestReadWriteTimeoutDialer(t *testing.T) {
stop := make(chan struct{})
ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected listen error")
defer func() {
stop <- struct{}{}
}()
@@ -38,9 +39,7 @@ func TestReadWriteTimeoutDialer(t *testing.T) {
rdtimeoutd: 10 * time.Millisecond,
}
conn, err := d.Dial("tcp", ln.Addr().String())
- if err != nil {
- t.Fatalf("unexpected dial error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected dial error")
defer conn.Close()
// fill the socket buffer
@@ -59,14 +58,13 @@ func TestReadWriteTimeoutDialer(t *testing.T) {
t.Fatal("wait timeout")
}
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "write" || !operr.Timeout() {
+ var operr *net.OpError
+ if !errors.As(err, &operr) || operr.Op != "write" || !operr.Timeout() {
t.Errorf("err = %v, want write i/o timeout error", err)
}
conn, err = d.Dial("tcp", ln.Addr().String())
- if err != nil {
- t.Fatalf("unexpected dial error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected dial error")
defer conn.Close()
buf := make([]byte, 10)
@@ -81,8 +79,8 @@ func TestReadWriteTimeoutDialer(t *testing.T) {
t.Fatal("wait timeout")
}
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "read" || !operr.Timeout() {
- t.Errorf("err = %v, want write i/o timeout error", err)
+ if !errors.As(err, &operr) || operr.Op != "read" || !operr.Timeout() {
+ t.Errorf("err = %v, want read i/o timeout error", err)
}
}
@@ -93,6 +91,7 @@ type testBlockingServer struct {
}
func (ts *testBlockingServer) Start(t *testing.T) {
+ t.Helper()
for i := 0; i < ts.n; i++ {
conn, err := ts.ln.Accept()
if err != nil {
diff --git a/client/pkg/transport/timeout_listener_test.go b/client/pkg/transport/timeout_listener_test.go
index 0c4f2083730..387a4610e7e 100644
--- a/client/pkg/transport/timeout_listener_test.go
+++ b/client/pkg/transport/timeout_listener_test.go
@@ -15,18 +15,19 @@
package transport
import (
+ "errors"
"net"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
// TestNewTimeoutListener tests that NewTimeoutListener returns a
// rwTimeoutListener struct with timeouts set.
func TestNewTimeoutListener(t *testing.T) {
l, err := NewTimeoutListener("127.0.0.1:0", "http", nil, time.Hour, time.Hour)
- if err != nil {
- t.Fatalf("unexpected NewTimeoutListener error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected NewTimeoutListener error")
defer l.Close()
tln := l.(*rwTimeoutListener)
if tln.readTimeout != time.Hour {
@@ -39,30 +40,29 @@ func TestNewTimeoutListener(t *testing.T) {
func TestWriteReadTimeoutListener(t *testing.T) {
ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
+ require.NoErrorf(t, err, "unexpected listen error")
wln := rwTimeoutListener{
Listener: ln,
writeTimeout: 10 * time.Millisecond,
readTimeout: 10 * time.Millisecond,
}
- stop := make(chan struct{}, 1)
- blocker := func() {
+ blocker := func(stopCh <-chan struct{}) {
conn, derr := net.Dial("tcp", ln.Addr().String())
if derr != nil {
t.Errorf("unexpected dail error: %v", derr)
}
defer conn.Close()
// block the receiver until the writer timeout
- <-stop
+ <-stopCh
}
- go blocker()
+
+ writerStopCh := make(chan struct{}, 1)
+ go blocker(writerStopCh)
conn, err := wln.Accept()
if err != nil {
- stop <- struct{}{}
+ writerStopCh <- struct{}{}
t.Fatalf("unexpected accept error: %v", err)
}
defer conn.Close()
@@ -79,20 +79,22 @@ func TestWriteReadTimeoutListener(t *testing.T) {
case <-done:
// It waits 1s more to avoid delay in low-end system.
case <-time.After(wln.writeTimeout*10 + time.Second):
- stop <- struct{}{}
+ writerStopCh <- struct{}{}
t.Fatal("wait timeout")
}
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "write" || !operr.Timeout() {
+ var operr *net.OpError
+ if !errors.As(err, &operr) || operr.Op != "write" || !operr.Timeout() {
t.Errorf("err = %v, want write i/o timeout error", err)
}
- stop <- struct{}{}
+ writerStopCh <- struct{}{}
- go blocker()
+ readerStopCh := make(chan struct{}, 1)
+ go blocker(readerStopCh)
conn, err = wln.Accept()
if err != nil {
- stop <- struct{}{}
+ readerStopCh <- struct{}{}
t.Fatalf("unexpected accept error: %v", err)
}
buf := make([]byte, 10)
@@ -105,12 +107,12 @@ func TestWriteReadTimeoutListener(t *testing.T) {
select {
case <-done:
case <-time.After(wln.readTimeout * 10):
- stop <- struct{}{}
+ readerStopCh <- struct{}{}
t.Fatal("wait timeout")
}
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "read" || !operr.Timeout() {
- t.Errorf("err = %v, want write i/o timeout error", err)
+ if !errors.As(err, &operr) || operr.Op != "read" || !operr.Timeout() {
+ t.Errorf("err = %v, want read i/o timeout error", err)
}
- stop <- struct{}{}
+ readerStopCh <- struct{}{}
}
diff --git a/client/pkg/transport/timeout_transport_test.go b/client/pkg/transport/timeout_transport_test.go
index d2dfe5f6f9b..d5b4bcaf9c8 100644
--- a/client/pkg/transport/timeout_transport_test.go
+++ b/client/pkg/transport/timeout_transport_test.go
@@ -16,20 +16,20 @@ package transport
import (
"bytes"
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
// TestNewTimeoutTransport tests that NewTimeoutTransport returns a transport
// that can dial out timeout connections.
func TestNewTimeoutTransport(t *testing.T) {
tr, err := NewTimeoutTransport(TLSInfo{}, time.Hour, time.Hour, time.Hour)
- if err != nil {
- t.Fatalf("unexpected NewTimeoutTransport error: %v", err)
- }
+ require.NoError(t, err)
remoteAddr := func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(r.RemoteAddr))
@@ -38,15 +38,11 @@ func TestNewTimeoutTransport(t *testing.T) {
defer srv.Close()
conn, err := tr.Dial("tcp", srv.Listener.Addr().String())
- if err != nil {
- t.Fatalf("unexpected dial error: %v", err)
- }
+ require.NoError(t, err)
defer conn.Close()
tconn, ok := conn.(*timeoutConn)
- if !ok {
- t.Fatalf("failed to dial out *timeoutConn")
- }
+ require.Truef(t, ok, "failed to dial out *timeoutConn")
if tconn.readTimeout != time.Hour {
t.Errorf("read timeout = %s, want %s", tconn.readTimeout, time.Hour)
}
@@ -55,31 +51,21 @@ func TestNewTimeoutTransport(t *testing.T) {
}
// ensure not reuse timeout connection
- req, err := http.NewRequest("GET", srv.URL, nil)
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
+ req, err := http.NewRequest(http.MethodGet, srv.URL, nil)
+ require.NoError(t, err)
resp, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
- addr0, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+ addr0, err := io.ReadAll(resp.Body)
resp.Body.Close()
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
+ require.NoError(t, err)
resp, err = tr.RoundTrip(req)
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
- addr1, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+ addr1, err := io.ReadAll(resp.Body)
resp.Body.Close()
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
+ require.NoError(t, err)
if bytes.Equal(addr0, addr1) {
- t.Errorf("addr0 = %s addr1= %s, want not equal", string(addr0), string(addr1))
+ t.Errorf("addr0 = %s addr1= %s, want not equal", addr0, addr1)
}
}
diff --git a/client/pkg/transport/tls.go b/client/pkg/transport/tls.go
index 62fe0d38519..d5375863fd5 100644
--- a/client/pkg/transport/tls.go
+++ b/client/pkg/transport/tls.go
@@ -15,6 +15,8 @@
package transport
import (
+ "context"
+ "errors"
"fmt"
"strings"
"time"
@@ -27,6 +29,8 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
if err != nil {
return nil, err
}
+ defer t.CloseIdleConnections()
+
var errs []string
var endpoints []string
for _, ep := range eps {
@@ -34,7 +38,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
errs = append(errs, fmt.Sprintf("%q is insecure", ep))
continue
}
- conn, cerr := t.Dial("tcp", ep[len("https://"):])
+ conn, cerr := t.DialContext(context.Background(), "tcp", ep[len("https://"):])
if cerr != nil {
errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
continue
@@ -43,7 +47,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
endpoints = append(endpoints, ep)
}
if len(errs) != 0 {
- err = fmt.Errorf("%s", strings.Join(errs, ","))
+ err = errors.New(strings.Join(errs, ","))
}
return endpoints, err
}
diff --git a/client/pkg/transport/tls_test.go b/client/pkg/transport/tls_test.go
new file mode 100644
index 00000000000..9beaa4d923e
--- /dev/null
+++ b/client/pkg/transport/tls_test.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestValidateSecureEndpoints(t *testing.T) {
+ tlsInfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
+
+ remoteAddr := func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(r.RemoteAddr))
+ }
+ srv := httptest.NewServer(http.HandlerFunc(remoteAddr))
+ defer srv.Close()
+
+ tests := map[string]struct {
+ endPoints []string
+ expectedEndpoints []string
+ expectedErr bool
+ }{
+ "invalidEndPoints": {
+ endPoints: []string{
+ "invalid endpoint",
+ },
+ expectedEndpoints: nil,
+ expectedErr: true,
+ },
+ "insecureEndpoints": {
+ endPoints: []string{
+ "http://127.0.0.1:8000",
+ "http://" + srv.Listener.Addr().String(),
+ },
+ expectedEndpoints: nil,
+ expectedErr: true,
+ },
+ "secureEndPoints": {
+ endPoints: []string{
+ "https://" + srv.Listener.Addr().String(),
+ },
+ expectedEndpoints: []string{
+ "https://" + srv.Listener.Addr().String(),
+ },
+ expectedErr: false,
+ },
+ "mixEndPoints": {
+ endPoints: []string{
+ "https://" + srv.Listener.Addr().String(),
+ "http://" + srv.Listener.Addr().String(),
+ "invalid end points",
+ },
+ expectedEndpoints: []string{
+ "https://" + srv.Listener.Addr().String(),
+ },
+ expectedErr: true,
+ },
+ }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ secureEps, err := ValidateSecureEndpoints(*tlsInfo, test.endPoints)
+ if test.expectedErr != (err != nil) {
+ t.Errorf("Unexpected error, got: %v, want: %v", err, test.expectedErr)
+ }
+
+ if !reflect.DeepEqual(test.expectedEndpoints, secureEps) {
+ t.Errorf("expected endpoints %v, got %v", test.expectedEndpoints, secureEps)
+ }
+ })
+ }
+}
diff --git a/client/pkg/transport/transport.go b/client/pkg/transport/transport.go
index 648512772d3..67170d7436d 100644
--- a/client/pkg/transport/transport.go
+++ b/client/pkg/transport/transport.go
@@ -30,10 +30,19 @@ func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, er
return nil, err
}
+ var ipAddr net.Addr
+ if info.LocalAddr != "" {
+ ipAddr, err = net.ResolveTCPAddr("tcp", info.LocalAddr+":0")
+ if err != nil {
+ return nil, err
+ }
+ }
+
t := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
- Timeout: dialtimeoutd,
+ Timeout: dialtimeoutd,
+ LocalAddr: ipAddr,
// value taken from http.DefaultTransport
KeepAlive: 30 * time.Second,
}).DialContext,
@@ -57,7 +66,7 @@ func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, er
TLSClientConfig: cfg,
// Cost of reopening connection on sockets is low, and they are mostly used in testing.
// Long living unix-transport connections were leading to 'leak' test flakes.
- // Alternativly the returned Transport (t) should override CloseIdleConnections to
+ // Alternatively the returned Transport (t) should override CloseIdleConnections to
// forward it to 'tu' as well.
IdleConnTimeout: time.Microsecond,
}
diff --git a/client/pkg/transport/transport_test.go b/client/pkg/transport/transport_test.go
index afd325da4f6..8d3e66cb36a 100644
--- a/client/pkg/transport/transport_test.go
+++ b/client/pkg/transport/transport_test.go
@@ -20,16 +20,15 @@ import (
"strings"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
// TestNewTransportTLSInvalidCipherSuitesTLS12 expects a client with invalid
// cipher suites fail to handshake with the server.
func TestNewTransportTLSInvalidCipherSuitesTLS12(t *testing.T) {
- tlsInfo, del, err := createSelfCert()
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- defer del()
+ tlsInfo, err := createSelfCert(t)
+ require.NoErrorf(t, err, "unable to create cert")
cipherSuites := []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
@@ -45,9 +44,7 @@ func TestNewTransportTLSInvalidCipherSuitesTLS12(t *testing.T) {
srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:]
ln, err := NewListener("127.0.0.1:0", "https", &srvTLS)
- if err != nil {
- t.Fatalf("unexpected NewListener error: %v", err)
- }
+ require.NoError(t, err)
defer ln.Close()
donec := make(chan struct{})
diff --git a/client/pkg/types/id.go b/client/pkg/types/id.go
index ae00388dde0..7a09647b5d5 100644
--- a/client/pkg/types/id.go
+++ b/client/pkg/types/id.go
@@ -14,7 +14,10 @@
package types
-import "strconv"
+import (
+ "strconv"
+ "strings"
+)
// ID represents a generic identifier which is canonically
// stored as a uint64 but is typically represented as a
@@ -37,3 +40,17 @@ type IDSlice []ID
func (p IDSlice) Len() int { return len(p) }
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p IDSlice) String() string {
+ var b strings.Builder
+ if p.Len() > 0 {
+ b.WriteString(p[0].String())
+ }
+
+ for i := 1; i < p.Len(); i++ {
+ b.WriteString(",")
+ b.WriteString(p[i].String())
+ }
+
+ return b.String()
+}
diff --git a/client/pkg/types/id_test.go b/client/pkg/types/id_test.go
index bec2853432b..6759b119ac1 100644
--- a/client/pkg/types/id_test.go
+++ b/client/pkg/types/id_test.go
@@ -18,6 +18,8 @@ import (
"reflect"
"sort"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestIDString(t *testing.T) {
@@ -79,9 +81,7 @@ func TestIDFromStringFail(t *testing.T) {
for i, tt := range tests {
_, err := IDFromString(tt)
- if err == nil {
- t.Fatalf("#%d: IDFromString expected error, but err=nil", i)
- }
+ require.Errorf(t, err, "#%d: IDFromString expected error", i)
}
}
diff --git a/client/pkg/types/set.go b/client/pkg/types/set.go
index e7a3cdc9ab6..3e69c8d8b94 100644
--- a/client/pkg/types/set.go
+++ b/client/pkg/types/set.go
@@ -90,7 +90,7 @@ func (us *unsafeSet) Length() int {
// Values returns the values of the Set in an unspecified order.
func (us *unsafeSet) Values() (values []string) {
- values = make([]string, 0)
+ values = make([]string, 0, len(us.d))
for val := range us.d {
values = append(values, val)
}
diff --git a/client/pkg/types/set_test.go b/client/pkg/types/set_test.go
index 7596577e9a5..6c761b9ce36 100644
--- a/client/pkg/types/set_test.go
+++ b/client/pkg/types/set_test.go
@@ -18,6 +18,8 @@ import (
"reflect"
"sort"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestUnsafeSet(t *testing.T) {
@@ -38,19 +40,14 @@ func equal(a, b []string) bool {
}
func driveSetTests(t *testing.T, s Set) {
+ t.Helper()
// Verify operations on an empty set
- eValues := []string{}
values := s.Values()
- if !reflect.DeepEqual(values, eValues) {
- t.Fatalf("Expect values=%v got %v", eValues, values)
- }
- if l := s.Length(); l != 0 {
- t.Fatalf("Expected length=0, got %d", l)
- }
+ require.Emptyf(t, values, "Expect values=%v got %v", []string{}, values)
+ l := s.Length()
+ require.Equalf(t, 0, l, "Expected length=0, got %d", l)
for _, v := range []string{"foo", "bar", "baz"} {
- if s.Contains(v) {
- t.Fatalf("Expect s.Contains(%q) to be fale, got true", v)
- }
+ require.Falsef(t, s.Contains(v), "Expect s.Contains(%q) to be fale, got true", v)
}
// Add three items, ensure they show up
@@ -58,32 +55,24 @@ func driveSetTests(t *testing.T, s Set) {
s.Add("bar")
s.Add("baz")
- eValues = []string{"foo", "bar", "baz"}
+ eValues := []string{"foo", "bar", "baz"}
values = s.Values()
- if !equal(values, eValues) {
- t.Fatalf("Expect values=%v got %v", eValues, values)
- }
+ require.Truef(t, equal(values, eValues), "Expect values=%v got %v", eValues, values)
for _, v := range eValues {
- if !s.Contains(v) {
- t.Fatalf("Expect s.Contains(%q) to be true, got false", v)
- }
+ require.Truef(t, s.Contains(v), "Expect s.Contains(%q) to be true, got false", v)
}
- if l := s.Length(); l != 3 {
- t.Fatalf("Expected length=3, got %d", l)
- }
+ l = s.Length()
+ require.Equalf(t, 3, l, "Expected length=3, got %d", l)
// Add the same item a second time, ensuring it is not duplicated
s.Add("foo")
values = s.Values()
- if !equal(values, eValues) {
- t.Fatalf("Expect values=%v got %v", eValues, values)
- }
- if l := s.Length(); l != 3 {
- t.Fatalf("Expected length=3, got %d", l)
- }
+ require.Truef(t, equal(values, eValues), "Expect values=%v got %v", eValues, values)
+ l = s.Length()
+ require.Equalf(t, 3, l, "Expected length=3, got %d", l)
// Remove all items, ensure they are gone
s.Remove("foo")
@@ -92,13 +81,10 @@ func driveSetTests(t *testing.T, s Set) {
eValues = []string{}
values = s.Values()
- if !equal(values, eValues) {
- t.Fatalf("Expect values=%v got %v", eValues, values)
- }
+ require.Truef(t, equal(values, eValues), "Expect values=%v got %v", eValues, values)
- if l := s.Length(); l != 0 {
- t.Fatalf("Expected length=0, got %d", l)
- }
+ l = s.Length()
+ require.Equalf(t, 0, l, "Expected length=0, got %d", l)
// Create new copies of the set, and ensure they are unlinked to the
// original Set by making modifications
@@ -119,9 +105,7 @@ func driveSetTests(t *testing.T, s Set) {
{[]string{"foo", "bar"}, cp2.Values()},
{[]string{"bar"}, cp3.Values()},
} {
- if !equal(tt.want, tt.got) {
- t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got)
- }
+ require.Truef(t, equal(tt.want, tt.got), "case %d: expect values=%v got %v", i, tt.want, tt.got)
}
for i, tt := range []struct {
@@ -136,10 +120,7 @@ func driveSetTests(t *testing.T, s Set) {
{false, cp2.Equals(s)},
{false, cp2.Equals(cp1)},
} {
- if tt.got != tt.want {
- t.Fatalf("case %d: want %t, got %t", i, tt.want, tt.got)
-
- }
+ require.Equalf(t, tt.want, tt.got, "case %d: want %t, got %t", i, tt.want, tt.got)
}
// Subtract values from a Set, ensuring a new Set is created and
@@ -157,9 +138,7 @@ func driveSetTests(t *testing.T, s Set) {
{[]string{"foo", "baz"}, sub1.Values()},
{[]string{}, sub2.Values()},
} {
- if !equal(tt.want, tt.got) {
- t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got)
- }
+ require.Truef(t, equal(tt.want, tt.got), "case %d: expect values=%v got %v", i, tt.want, tt.got)
}
}
diff --git a/client/pkg/types/urls.go b/client/pkg/types/urls.go
index 9e5d03ff645..49a38967e64 100644
--- a/client/pkg/types/urls.go
+++ b/client/pkg/types/urls.go
@@ -36,20 +36,25 @@ func NewURLs(strs []string) (URLs, error) {
if err != nil {
return nil, err
}
- if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
+
+ switch u.Scheme {
+ case "http", "https":
+ if _, _, err := net.SplitHostPort(u.Host); err != nil {
+ return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+ }
+
+ if u.Path != "" {
+ return nil, fmt.Errorf("URL must not contain a path: %s", in)
+ }
+ case "unix", "unixs":
+ break
+ default:
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
}
- if _, _, err := net.SplitHostPort(u.Host); err != nil {
- return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
- }
- if u.Path != "" {
- return nil, fmt.Errorf("URL must not contain a path: %s", in)
- }
all[i] = *u
}
us := URLs(all)
us.Sort()
-
return us, nil
}
diff --git a/client/pkg/types/urlsmap_test.go b/client/pkg/types/urlsmap_test.go
index da184282e79..6f8110d674f 100644
--- a/client/pkg/types/urlsmap_test.go
+++ b/client/pkg/types/urlsmap_test.go
@@ -18,14 +18,14 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/client/pkg/v3/testutil"
)
func TestParseInitialCluster(t *testing.T) {
c, err := NewURLsMap("mem1=http://10.0.0.1:2379,mem1=http://128.193.4.20:2379,mem2=http://10.0.0.2:2379,default=http://127.0.0.1:2379")
- if err != nil {
- t.Fatalf("unexpected parse error: %v", err)
- }
+ require.NoError(t, err)
wc := URLsMap(map[string]URLs{
"mem1": testutil.MustNewURLs(t, []string{"http://10.0.0.1:2379", "http://128.193.4.20:2379"}),
"mem2": testutil.MustNewURLs(t, []string{"http://10.0.0.2:2379"}),
@@ -63,9 +63,8 @@ func TestNameURLPairsString(t *testing.T) {
"five": testutil.MustNewURLs(t, nil),
})
w := "abc=http://0.0.0.0:0000,abc=http://1.1.1.1:1111,def=http://2.2.2.2:2222,ghi=http://127.0.0.1:2380,ghi=http://3.3.3.3:1234"
- if g := cls.String(); g != w {
- t.Fatalf("NameURLPairs.String():\ngot %#v\nwant %#v", g, w)
- }
+ g := cls.String()
+ require.Equalf(t, g, w, "NameURLPairs.String():\ngot %#v\nwant %#v", g, w)
}
func TestParse(t *testing.T) {
@@ -102,9 +101,7 @@ func TestParse(t *testing.T) {
// URI (https://github.com/golang/go/issues/6530).
func TestNewURLsMapIPV6(t *testing.T) {
c, err := NewURLsMap("mem1=http://[2001:db8::1]:2380,mem1=http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380,mem2=http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380")
- if err != nil {
- t.Fatalf("unexpected parse error: %v", err)
- }
+ require.NoError(t, err)
wc := URLsMap(map[string]URLs{
"mem1": testutil.MustNewURLs(t, []string{"http://[2001:db8::1]:2380", "http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380"}),
"mem2": testutil.MustNewURLs(t, []string{"http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380"}),
diff --git a/client/pkg/verify/verify.go b/client/pkg/verify/verify.go
new file mode 100644
index 00000000000..a7b2097bed9
--- /dev/null
+++ b/client/pkg/verify/verify.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+const envVerify = "ETCD_VERIFY"
+
+type VerificationType string
+
+const (
+ envVerifyValueAll VerificationType = "all"
+ envVerifyValueAssert VerificationType = "assert"
+)
+
+func getEnvVerify() string {
+ return strings.ToLower(os.Getenv(envVerify))
+}
+
+func IsVerificationEnabled(verification VerificationType) bool {
+ env := getEnvVerify()
+ return env == string(envVerifyValueAll) || env == strings.ToLower(string(verification))
+}
+
+// EnableVerifications sets `envVerify` and returns a function that
+// can be used to bring the original settings.
+func EnableVerifications(verification VerificationType) func() {
+ previousEnv := getEnvVerify()
+ os.Setenv(envVerify, string(verification))
+ return func() {
+ os.Setenv(envVerify, previousEnv)
+ }
+}
+
+// EnableAllVerifications enables verification and returns a function
+// that can be used to bring the original settings.
+func EnableAllVerifications() func() {
+ return EnableVerifications(envVerifyValueAll)
+}
+
+// DisableVerifications unsets `envVerify` and returns a function that
+// can be used to bring the original settings.
+func DisableVerifications() func() {
+ previousEnv := getEnvVerify()
+ os.Unsetenv(envVerify)
+ return func() {
+ os.Setenv(envVerify, previousEnv)
+ }
+}
+
+// Verify performs verification if the assertions are enabled.
+// In the default setup running in tests and skipped in the production code.
+func Verify(f func()) {
+ if IsVerificationEnabled(envVerifyValueAssert) {
+ f()
+ }
+}
+
+// Assert will panic with a given formatted message if the given condition is false.
+func Assert(condition bool, msg string, v ...any) {
+ if !condition {
+ panic(fmt.Sprintf("assertion failed: "+msg, v...))
+ }
+}
diff --git a/client/v2/README.md b/client/v2/README.md
deleted file mode 100644
index 5ecb67820f4..00000000000
--- a/client/v2/README.md
+++ /dev/null
@@ -1,112 +0,0 @@
-# etcd/client
-
-etcd/client is the Go client library for etcd.
-
-[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client)
-
-For full compatibility, it is recommended to install released versions of clients using go modules.
-
-## Install
-
-```bash
-go get go.etcd.io/etcd/v3/client
-```
-
-## Usage
-
-```go
-package main
-
-import (
- "log"
- "time"
- "context"
-
- "go.etcd.io/etcd/v3/client"
-)
-
-func main() {
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: client.DefaultTransport,
- // set timeout per request to fail fast when the target endpoint is unavailable
- HeaderTimeoutPerRequest: time.Second,
- }
- c, err := client.New(cfg)
- if err != nil {
- log.Fatal(err)
- }
- kapi := client.NewKeysAPI(c)
- // set "/foo" key with "bar" value
- log.Print("Setting '/foo' key with 'bar' value")
- resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Set is done. Metadata is %q\n", resp)
- }
- // get "/foo" key's value
- log.Print("Getting '/foo' key value")
- resp, err = kapi.Get(context.Background(), "/foo", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Get is done. Metadata is %q\n", resp)
- // print value
- log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
- }
-}
-```
-
-## Error Handling
-
-etcd client might return three types of errors.
-
-- context error
-
-Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
-
-- cluster error
-
-Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
-
-- response error
-
-If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
-
-Here is the example code to handle client errors:
-
-```go
-cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
-c, err := client.New(cfg)
-if err != nil {
- log.Fatal(err)
-}
-
-kapi := client.NewKeysAPI(c)
-resp, err := kapi.Set(ctx, "test", "bar", nil)
-if err != nil {
- if err == context.Canceled {
- // ctx is canceled by another routine
- } else if err == context.DeadlineExceeded {
- // ctx is attached with a deadline and it exceeded
- } else if cerr, ok := err.(*client.ClusterError); ok {
- // process (cerr.Errors)
- } else {
- // bad cluster endpoints, which are not etcd servers
- }
-}
-```
-
-
-## Caveat
-
-1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
-
-2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
-
-3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
-
-4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/client/v2/client.go b/client/v2/client.go
deleted file mode 100644
index fda25988f6b..00000000000
--- a/client/v2/client.go
+++ /dev/null
@@ -1,717 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "math/rand"
- "net"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "sync"
- "time"
-
- "go.etcd.io/etcd/api/v3/version"
-)
-
-var (
- ErrNoEndpoints = errors.New("client: no endpoints available")
- ErrTooManyRedirects = errors.New("client: too many redirects")
- ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
- ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
- errTooManyRedirectChecks = errors.New("client: too many redirect checks")
-
- // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
- // that Do() will not retry a request
- oneShotCtxValue interface{}
-)
-
-var DefaultRequestTimeout = 5 * time.Second
-
-var DefaultTransport CancelableTransport = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).DialContext,
- TLSHandshakeTimeout: 10 * time.Second,
-}
-
-type EndpointSelectionMode int
-
-const (
- // EndpointSelectionRandom is the default value of the 'SelectionMode'.
- // As the name implies, the client object will pick a node from the members
- // of the cluster in a random fashion. If the cluster has three members, A, B,
- // and C, the client picks any node from its three members as its request
- // destination.
- EndpointSelectionRandom EndpointSelectionMode = iota
-
- // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
- // requests are sent directly to the cluster leader. This reduces
- // forwarding roundtrips compared to making requests to etcd followers
- // who then forward them to the cluster leader. In the event of a leader
- // failure, however, clients configured this way cannot prioritize among
- // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
- // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
- // maintain its knowledge of current cluster state.
- //
- // This mode should be used with Client.AutoSync().
- EndpointSelectionPrioritizeLeader
-)
-
-type Config struct {
- // Endpoints defines a set of URLs (schemes, hosts and ports only)
- // that can be used to communicate with a logical etcd cluster. For
- // example, a three-node cluster could be provided like so:
- //
- // Endpoints: []string{
- // "http://node1.example.com:2379",
- // "http://node2.example.com:2379",
- // "http://node3.example.com:2379",
- // }
- //
- // If multiple endpoints are provided, the Client will attempt to
- // use them all in the event that one or more of them are unusable.
- //
- // If Client.Sync is ever called, the Client may cache an alternate
- // set of endpoints to continue operation.
- Endpoints []string
-
- // Transport is used by the Client to drive HTTP requests. If not
- // provided, DefaultTransport will be used.
- Transport CancelableTransport
-
- // CheckRedirect specifies the policy for handling HTTP redirects.
- // If CheckRedirect is not nil, the Client calls it before
- // following an HTTP redirect. The sole argument is the number of
- // requests that have already been made. If CheckRedirect returns
- // an error, Client.Do will not make any further requests and return
- // the error back it to the caller.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect CheckRedirectFunc
-
- // Username specifies the user credential to add as an authorization header
- Username string
-
- // Password is the password for the specified user to add as an authorization header
- // to the request.
- Password string
-
- // HeaderTimeoutPerRequest specifies the time limit to wait for response
- // header in a single request made by the Client. The timeout includes
- // connection time, any redirects, and header wait time.
- //
- // For non-watch GET request, server returns the response body immediately.
- // For PUT/POST/DELETE request, server will attempt to commit request
- // before responding, which is expected to take `100ms + 2 * RTT`.
- // For watch request, server returns the header immediately to notify Client
- // watch start. But if server is behind some kind of proxy, the response
- // header may be cached at proxy, and Client cannot rely on this behavior.
- //
- // Especially, wait request will ignore this timeout.
- //
- // One API call may send multiple requests to different etcd servers until it
- // succeeds. Use context of the API to specify the overall timeout.
- //
- // A HeaderTimeoutPerRequest of zero means no timeout.
- HeaderTimeoutPerRequest time.Duration
-
- // SelectionMode is an EndpointSelectionMode enum that specifies the
- // policy for choosing the etcd cluster node to which requests are sent.
- SelectionMode EndpointSelectionMode
-}
-
-func (cfg *Config) transport() CancelableTransport {
- if cfg.Transport == nil {
- return DefaultTransport
- }
- return cfg.Transport
-}
-
-func (cfg *Config) checkRedirect() CheckRedirectFunc {
- if cfg.CheckRedirect == nil {
- return DefaultCheckRedirect
- }
- return cfg.CheckRedirect
-}
-
-// CancelableTransport mimics net/http.Transport, but requires that
-// the object also support request cancellation.
-type CancelableTransport interface {
- http.RoundTripper
- CancelRequest(req *http.Request)
-}
-
-type CheckRedirectFunc func(via int) error
-
-// DefaultCheckRedirect follows up to 10 redirects, but no more.
-var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
- if via > 10 {
- return ErrTooManyRedirects
- }
- return nil
-}
-
-type Client interface {
- // Sync updates the internal cache of the etcd cluster's membership.
- Sync(context.Context) error
-
- // AutoSync periodically calls Sync() every given interval.
- // The recommended sync interval is 10 seconds to 1 minute, which does
- // not bring too much overhead to server and makes client catch up the
- // cluster change in time.
- //
- // The example to use it:
- //
- // for {
- // err := client.AutoSync(ctx, 10*time.Second)
- // if err == context.DeadlineExceeded || err == context.Canceled {
- // break
- // }
- // log.Print(err)
- // }
- AutoSync(context.Context, time.Duration) error
-
- // Endpoints returns a copy of the current set of API endpoints used
- // by Client to resolve HTTP requests. If Sync has ever been called,
- // this may differ from the initial Endpoints provided in the Config.
- Endpoints() []string
-
- // SetEndpoints sets the set of API endpoints used by Client to resolve
- // HTTP requests. If the given endpoints are not valid, an error will be
- // returned
- SetEndpoints(eps []string) error
-
- // GetVersion retrieves the current etcd server and cluster version
- GetVersion(ctx context.Context) (*version.Versions, error)
-
- httpClient
-}
-
-func New(cfg Config) (Client, error) {
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
- rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- selectionMode: cfg.SelectionMode,
- }
- if cfg.Username != "" {
- c.credentials = &credentials{
- username: cfg.Username,
- password: cfg.Password,
- }
- }
- if err := c.SetEndpoints(cfg.Endpoints); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type httpClient interface {
- Do(context.Context, httpAction) (*http.Response, []byte, error)
-}
-
-func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
- return func(ep url.URL) httpClient {
- return &redirectFollowingHTTPClient{
- checkRedirect: cr,
- client: &simpleHTTPClient{
- transport: tr,
- endpoint: ep,
- headerTimeout: headerTimeout,
- },
- }
- }
-}
-
-type credentials struct {
- username string
- password string
-}
-
-type httpClientFactory func(url.URL) httpClient
-
-type httpAction interface {
- HTTPRequest(url.URL) *http.Request
-}
-
-type httpClusterClient struct {
- clientFactory httpClientFactory
- endpoints []url.URL
- pinned int
- credentials *credentials
- sync.RWMutex
- rand *rand.Rand
- selectionMode EndpointSelectionMode
-}
-
-func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
- ceps := make([]url.URL, len(eps))
- copy(ceps, eps)
-
- // To perform a lookup on the new endpoint list without using the current
- // client, we'll copy it
- clientCopy := &httpClusterClient{
- clientFactory: c.clientFactory,
- credentials: c.credentials,
- rand: c.rand,
-
- pinned: 0,
- endpoints: ceps,
- }
-
- mAPI := NewMembersAPI(clientCopy)
- leader, err := mAPI.Leader(ctx)
- if err != nil {
- return "", err
- }
- if len(leader.ClientURLs) == 0 {
- return "", ErrNoLeaderEndpoint
- }
-
- return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
-}
-
-func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
- if len(eps) == 0 {
- return []url.URL{}, ErrNoEndpoints
- }
-
- neps := make([]url.URL, len(eps))
- for i, ep := range eps {
- u, err := url.Parse(ep)
- if err != nil {
- return []url.URL{}, err
- }
- neps[i] = *u
- }
- return neps, nil
-}
-
-func (c *httpClusterClient) SetEndpoints(eps []string) error {
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- c.Lock()
- defer c.Unlock()
-
- c.endpoints = shuffleEndpoints(c.rand, neps)
- // We're not doing anything for PrioritizeLeader here. This is
- // due to not having a context meaning we can't call getLeaderEndpoint
- // However, if you're using PrioritizeLeader, you've already been told
- // to regularly call sync, where we do have a ctx, and can figure the
- // leader. PrioritizeLeader is also quite a loose guarantee, so deal
- // with it
- c.pinned = 0
-
- return nil
-}
-
-func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- action := act
- c.RLock()
- leps := len(c.endpoints)
- eps := make([]url.URL, leps)
- n := copy(eps, c.endpoints)
- pinned := c.pinned
-
- if c.credentials != nil {
- action = &authedAction{
- act: act,
- credentials: *c.credentials,
- }
- }
- c.RUnlock()
-
- if leps == 0 {
- return nil, nil, ErrNoEndpoints
- }
-
- if leps != n {
- return nil, nil, errors.New("unable to pick endpoint: copy failed")
- }
-
- var resp *http.Response
- var body []byte
- var err error
- cerr := &ClusterError{}
- isOneShot := ctx.Value(&oneShotCtxValue) != nil
-
- for i := pinned; i < leps+pinned; i++ {
- k := i % leps
- hc := c.clientFactory(eps[k])
- resp, body, err = hc.Do(ctx, action)
- if err != nil {
- cerr.Errors = append(cerr.Errors, err)
- if err == ctx.Err() {
- return nil, nil, ctx.Err()
- }
- if err == context.Canceled || err == context.DeadlineExceeded {
- return nil, nil, err
- }
- } else if resp.StatusCode/100 == 5 {
- switch resp.StatusCode {
- case http.StatusInternalServerError, http.StatusServiceUnavailable:
- // TODO: make sure this is a no leader response
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
- default:
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
- }
- err = cerr.Errors[0]
- }
- if err != nil {
- if !isOneShot {
- continue
- }
- c.Lock()
- c.pinned = (k + 1) % leps
- c.Unlock()
- return nil, nil, err
- }
- if k != pinned {
- c.Lock()
- c.pinned = k
- c.Unlock()
- }
- return resp, body, nil
- }
-
- return nil, nil, cerr
-}
-
-func (c *httpClusterClient) Endpoints() []string {
- c.RLock()
- defer c.RUnlock()
-
- eps := make([]string, len(c.endpoints))
- for i, ep := range c.endpoints {
- eps[i] = ep.String()
- }
-
- return eps
-}
-
-func (c *httpClusterClient) Sync(ctx context.Context) error {
- mAPI := NewMembersAPI(c)
- ms, err := mAPI.List(ctx)
- if err != nil {
- return err
- }
-
- var eps []string
- for _, m := range ms {
- eps = append(eps, m.ClientURLs...)
- }
-
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- npin := 0
-
- switch c.selectionMode {
- case EndpointSelectionRandom:
- c.RLock()
- eq := endpointsEqual(c.endpoints, neps)
- c.RUnlock()
-
- if eq {
- return nil
- }
- // When items in the endpoint list changes, we choose a new pin
- neps = shuffleEndpoints(c.rand, neps)
- case EndpointSelectionPrioritizeLeader:
- nle, err := c.getLeaderEndpoint(ctx, neps)
- if err != nil {
- return ErrNoLeaderEndpoint
- }
-
- for i, n := range neps {
- if n.String() == nle {
- npin = i
- break
- }
- }
- default:
- return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
- }
-
- c.Lock()
- defer c.Unlock()
- c.endpoints = neps
- c.pinned = npin
-
- return nil
-}
-
-func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
- for {
- err := c.Sync(ctx)
- if err != nil {
- return err
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-ticker.C:
- }
- }
-}
-
-func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
- act := &getAction{Prefix: "/version"}
-
- resp, body, err := c.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- switch resp.StatusCode {
- case http.StatusOK:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- var vresp version.Versions
- if err := json.Unmarshal(body, &vresp); err != nil {
- return nil, ErrInvalidJSON
- }
- return &vresp, nil
- default:
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return nil, ErrInvalidJSON
- }
- return nil, etcdErr
- }
-}
-
-type roundTripResponse struct {
- resp *http.Response
- err error
-}
-
-type simpleHTTPClient struct {
- transport CancelableTransport
- endpoint url.URL
- headerTimeout time.Duration
-}
-
-// ErrNoRequest indicates that the HTTPRequest object could not be found
-// or was nil. No processing could continue.
-var ErrNoRequest = errors.New("no HTTPRequest was available")
-
-func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- req := act.HTTPRequest(c.endpoint)
- if req == nil {
- return nil, nil, ErrNoRequest
- }
-
- if err := printcURL(req); err != nil {
- return nil, nil, err
- }
-
- isWait := false
- if req.URL != nil {
- ws := req.URL.Query().Get("wait")
- if len(ws) != 0 {
- var err error
- isWait, err = strconv.ParseBool(ws)
- if err != nil {
- return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
- }
- }
- }
-
- var hctx context.Context
- var hcancel context.CancelFunc
- if !isWait && c.headerTimeout > 0 {
- hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
- } else {
- hctx, hcancel = context.WithCancel(ctx)
- }
- defer hcancel()
-
- reqcancel := requestCanceler(c.transport, req)
-
- rtchan := make(chan roundTripResponse, 1)
- go func() {
- resp, err := c.transport.RoundTrip(req)
- rtchan <- roundTripResponse{resp: resp, err: err}
- close(rtchan)
- }()
-
- var resp *http.Response
- var err error
-
- select {
- case rtresp := <-rtchan:
- resp, err = rtresp.resp, rtresp.err
- case <-hctx.Done():
- // cancel and wait for request to actually exit before continuing
- reqcancel()
- rtresp := <-rtchan
- resp = rtresp.resp
- switch {
- case ctx.Err() != nil:
- err = ctx.Err()
- case hctx.Err() != nil:
- err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
- default:
- panic("failed to get error from context")
- }
- }
-
- // always check for resp nil-ness to deal with possible
- // race conditions between channels above
- defer func() {
- if resp != nil {
- resp.Body.Close()
- }
- }()
-
- if err != nil {
- return nil, nil, err
- }
-
- var body []byte
- done := make(chan struct{})
- go func() {
- body, err = ioutil.ReadAll(resp.Body)
- done <- struct{}{}
- }()
-
- select {
- case <-ctx.Done():
- resp.Body.Close()
- <-done
- return nil, nil, ctx.Err()
- case <-done:
- }
-
- return resp, body, err
-}
-
-type authedAction struct {
- act httpAction
- credentials credentials
-}
-
-func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
- r := a.act.HTTPRequest(url)
- r.SetBasicAuth(a.credentials.username, a.credentials.password)
- return r
-}
-
-type redirectFollowingHTTPClient struct {
- client httpClient
- checkRedirect CheckRedirectFunc
-}
-
-func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- next := act
- for i := 0; i < 100; i++ {
- if i > 0 {
- if err := r.checkRedirect(i); err != nil {
- return nil, nil, err
- }
- }
- resp, body, err := r.client.Do(ctx, next)
- if err != nil {
- return nil, nil, err
- }
- if resp.StatusCode/100 == 3 {
- hdr := resp.Header.Get("Location")
- if hdr == "" {
- return nil, nil, fmt.Errorf("location header not set")
- }
- loc, err := url.Parse(hdr)
- if err != nil {
- return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr)
- }
- next = &redirectedHTTPAction{
- action: act,
- location: *loc,
- }
- continue
- }
- return resp, body, nil
- }
-
- return nil, nil, errTooManyRedirectChecks
-}
-
-type redirectedHTTPAction struct {
- action httpAction
- location url.URL
-}
-
-func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
- orig := r.action.HTTPRequest(ep)
- orig.URL = &r.location
- return orig
-}
-
-func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
- // copied from Go 1.9<= rand.Rand.Perm
- n := len(eps)
- p := make([]int, n)
- for i := 0; i < n; i++ {
- j := r.Intn(i + 1)
- p[i] = p[j]
- p[j] = i
- }
- neps := make([]url.URL, n)
- for i, k := range p {
- neps[i] = eps[k]
- }
- return neps
-}
-
-func endpointsEqual(left, right []url.URL) bool {
- if len(left) != len(right) {
- return false
- }
-
- sLeft := make([]string, len(left))
- sRight := make([]string, len(right))
- for i, l := range left {
- sLeft[i] = l.String()
- }
- for i, r := range right {
- sRight[i] = r.String()
- }
-
- sort.Strings(sLeft)
- sort.Strings(sRight)
- for i := range sLeft {
- if sLeft[i] != sRight[i] {
- return false
- }
- }
- return true
-}
diff --git a/client/v2/client_test.go b/client/v2/client_test.go
deleted file mode 100644
index dd6bbc69850..00000000000
--- a/client/v2/client_test.go
+++ /dev/null
@@ -1,1098 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math/rand"
- "net/http"
- "net/url"
- "reflect"
- "sort"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-type actionAssertingHTTPClient struct {
- t *testing.T
- num int
- act httpAction
-
- resp http.Response
- body []byte
- err error
-}
-
-func (a *actionAssertingHTTPClient) Do(_ context.Context, act httpAction) (*http.Response, []byte, error) {
- if !reflect.DeepEqual(a.act, act) {
- a.t.Errorf("#%d: unexpected httpAction: want=%#v got=%#v", a.num, a.act, act)
- }
-
- return &a.resp, a.body, a.err
-}
-
-type staticHTTPClient struct {
- resp http.Response
- body []byte
- err error
-}
-
-func (s *staticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) {
- return &s.resp, s.body, s.err
-}
-
-type staticHTTPAction struct {
- request http.Request
-}
-
-func (s *staticHTTPAction) HTTPRequest(url.URL) *http.Request {
- return &s.request
-}
-
-type staticHTTPResponse struct {
- resp http.Response
- body []byte
- err error
-}
-
-type multiStaticHTTPClient struct {
- responses []staticHTTPResponse
- cur int
-}
-
-func (s *multiStaticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) {
- r := s.responses[s.cur]
- s.cur++
- return &r.resp, r.body, r.err
-}
-
-func newStaticHTTPClientFactory(responses []staticHTTPResponse) httpClientFactory {
- var cur int
- return func(url.URL) httpClient {
- r := responses[cur]
- cur++
- return &staticHTTPClient{resp: r.resp, body: r.body, err: r.err}
- }
-}
-
-type fakeTransport struct {
- respchan chan *http.Response
- errchan chan error
- startCancel chan struct{}
- finishCancel chan struct{}
-}
-
-func newFakeTransport() *fakeTransport {
- return &fakeTransport{
- respchan: make(chan *http.Response, 1),
- errchan: make(chan error, 1),
- startCancel: make(chan struct{}, 1),
- finishCancel: make(chan struct{}, 1),
- }
-}
-
-func (t *fakeTransport) CancelRequest(*http.Request) {
- t.startCancel <- struct{}{}
-}
-
-type fakeAction struct{}
-
-func (a *fakeAction) HTTPRequest(url.URL) *http.Request {
- return &http.Request{}
-}
-
-func TestSimpleHTTPClientDoSuccess(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.respchan <- &http.Response{
- StatusCode: http.StatusTeapot,
- Body: ioutil.NopCloser(strings.NewReader("foo")),
- }
-
- resp, body, err := c.Do(context.Background(), &fakeAction{})
- if err != nil {
- t.Fatalf("incorrect error value: want=nil got=%v", err)
- }
-
- wantCode := http.StatusTeapot
- if wantCode != resp.StatusCode {
- t.Fatalf("invalid response code: want=%d got=%d", wantCode, resp.StatusCode)
- }
-
- wantBody := []byte("foo")
- if !reflect.DeepEqual(wantBody, body) {
- t.Fatalf("invalid response body: want=%q got=%q", wantBody, body)
- }
-}
-
-func TestSimpleHTTPClientDoError(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.errchan <- errors.New("fixture")
-
- _, _, err := c.Do(context.Background(), &fakeAction{})
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
-}
-
-type nilAction struct{}
-
-func (a *nilAction) HTTPRequest(url.URL) *http.Request {
- return nil
-}
-
-func TestSimpleHTTPClientDoNilRequest(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.errchan <- errors.New("fixture")
-
- _, _, err := c.Do(context.Background(), &nilAction{})
- if err != ErrNoRequest {
- t.Fatalf("expected non-nil error, got nil")
- }
-}
-
-func TestSimpleHTTPClientDoCancelContext(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.startCancel <- struct{}{}
- tr.finishCancel <- struct{}{}
-
- _, _, err := c.Do(context.Background(), &fakeAction{})
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
-}
-
-type checkableReadCloser struct {
- io.ReadCloser
- closed bool
-}
-
-func (c *checkableReadCloser) Close() error {
- if !c.closed {
- c.closed = true
- return c.ReadCloser.Close()
- }
- return nil
-}
-
-func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- // create an already-cancelled context
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
-
- body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))}
- go func() {
- // wait that simpleHTTPClient knows the context is already timed out,
- // and calls CancelRequest
- testutil.WaitSchedule()
-
- // response is returned before cancel effects
- tr.respchan <- &http.Response{Body: body}
- }()
-
- _, _, err := c.Do(ctx, &fakeAction{})
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
-
- if !body.closed {
- t.Fatalf("expected closed body")
- }
-}
-
-type blockingBody struct {
- c chan struct{}
-}
-
-func (bb *blockingBody) Read(p []byte) (n int, err error) {
- <-bb.c
- return 0, errors.New("closed")
-}
-
-func (bb *blockingBody) Close() error {
- close(bb.c)
- return nil
-}
-
-func TestSimpleHTTPClientDoCancelContextResponseBodyClosedWithBlockingBody(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- ctx, cancel := context.WithCancel(context.Background())
- body := &checkableReadCloser{ReadCloser: &blockingBody{c: make(chan struct{})}}
- go func() {
- tr.respchan <- &http.Response{Body: body}
- time.Sleep(2 * time.Millisecond)
- // cancel after the body is received
- cancel()
- }()
-
- _, _, err := c.Do(ctx, &fakeAction{})
- if err != context.Canceled {
- t.Fatalf("expected %+v, got %+v", context.Canceled, err)
- }
-
- if !body.closed {
- t.Fatalf("expected closed body")
- }
-}
-
-func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- donechan := make(chan struct{})
- ctx, cancel := context.WithCancel(context.Background())
- go func() {
- c.Do(ctx, &fakeAction{})
- close(donechan)
- }()
-
- // This should call CancelRequest and begin the cancellation process
- cancel()
-
- select {
- case <-donechan:
- t.Fatalf("simpleHTTPClient.Do should not have exited yet")
- default:
- }
-
- tr.finishCancel <- struct{}{}
-
- select {
- case <-donechan:
- //expected behavior
- return
- case <-time.After(time.Second):
- t.Fatalf("simpleHTTPClient.Do did not exit within 1s")
- }
-}
-
-func TestSimpleHTTPClientDoHeaderTimeout(t *testing.T) {
- tr := newFakeTransport()
- tr.finishCancel <- struct{}{}
- c := &simpleHTTPClient{transport: tr, headerTimeout: time.Millisecond}
-
- errc := make(chan error, 1)
- go func() {
- _, _, err := c.Do(context.Background(), &fakeAction{})
- errc <- err
- }()
-
- select {
- case err := <-errc:
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
- case <-time.After(time.Second):
- t.Fatalf("unexpected timeout when waiting for the test to finish")
- }
-}
-
-func TestHTTPClusterClientDo(t *testing.T) {
- fakeErr := errors.New("fake!")
- fakeURL := url.URL{}
- tests := []struct {
- client *httpClusterClient
- ctx context.Context
-
- wantCode int
- wantErr error
- wantPinned int
- }{
- // first good response short-circuits Do
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- {err: fakeErr},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantCode: http.StatusTeapot,
- },
-
- // fall through to good endpoint if err is arbitrary
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {err: fakeErr},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantCode: http.StatusTeapot,
- wantPinned: 1,
- },
-
- // context.Canceled short-circuits Do
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {err: context.Canceled},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantErr: context.Canceled,
- },
-
- // return err if there are no endpoints
- {
- client: &httpClusterClient{
- endpoints: []url.URL{},
- clientFactory: newHTTPClientFactory(nil, nil, 0),
- rand: rand.New(rand.NewSource(0)),
- },
- wantErr: ErrNoEndpoints,
- },
-
- // return err if all endpoints return arbitrary errors
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {err: fakeErr},
- {err: fakeErr},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantErr: &ClusterError{Errors: []error{fakeErr, fakeErr}},
- },
-
- // 500-level errors cause Do to fallthrough to next endpoint
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {resp: http.Response{StatusCode: http.StatusBadGateway}},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantCode: http.StatusTeapot,
- wantPinned: 1,
- },
-
- // 500-level errors cause one shot Do to fallthrough to next endpoint
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {resp: http.Response{StatusCode: http.StatusBadGateway}},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
- wantErr: fmt.Errorf("client: etcd member returns server error [Bad Gateway]"),
- wantPinned: 1,
- },
- }
-
- for i, tt := range tests {
- if tt.ctx == nil {
- tt.ctx = context.Background()
- }
- resp, _, err := tt.client.Do(tt.ctx, nil)
- if (tt.wantErr == nil && tt.wantErr != err) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) {
- t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
- continue
- }
-
- if resp == nil {
- if tt.wantCode != 0 {
- t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
- continue
- }
- } else if resp.StatusCode != tt.wantCode {
- t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
- continue
- }
-
- if tt.client.pinned != tt.wantPinned {
- t.Errorf("#%d: pinned=%d, want=%d", i, tt.client.pinned, tt.wantPinned)
- }
- }
-}
-
-func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) {
- fakeURL := url.URL{}
- tr := newFakeTransport()
- tr.finishCancel <- struct{}{}
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
- endpoints: []url.URL{fakeURL},
- }
-
- errc := make(chan error, 1)
- go func() {
- ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
- defer cancel()
- _, _, err := c.Do(ctx, &fakeAction{})
- errc <- err
- }()
-
- select {
- case err := <-errc:
- if err != context.DeadlineExceeded {
- t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded)
- }
- case <-time.After(time.Second):
- t.Fatalf("unexpected timeout when waiting for request to deadline exceed")
- }
-}
-
-type fakeCancelContext struct{}
-
-var errFakeCancelContext = errors.New("fake context canceled")
-
-func (f fakeCancelContext) Deadline() (time.Time, bool) { return time.Time{}, false }
-func (f fakeCancelContext) Done() <-chan struct{} {
- d := make(chan struct{}, 1)
- d <- struct{}{}
- return d
-}
-func (f fakeCancelContext) Err() error { return errFakeCancelContext }
-func (f fakeCancelContext) Value(key interface{}) interface{} { return 1 }
-
-func withTimeout(parent context.Context, timeout time.Duration) (
- ctx context.Context,
- cancel context.CancelFunc) {
- ctx = parent
- cancel = func() {
- ctx = nil
- }
- return ctx, cancel
-}
-
-func TestHTTPClusterClientDoCanceledContext(t *testing.T) {
- fakeURL := url.URL{}
- tr := newFakeTransport()
- tr.finishCancel <- struct{}{}
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
- endpoints: []url.URL{fakeURL},
- }
-
- errc := make(chan error, 1)
- go func() {
- ctx, cancel := withTimeout(fakeCancelContext{}, time.Millisecond)
- cancel()
- _, _, err := c.Do(ctx, &fakeAction{})
- errc <- err
- }()
-
- select {
- case err := <-errc:
- if err != errFakeCancelContext {
- t.Errorf("err = %+v, want %+v", err, errFakeCancelContext)
- }
- case <-time.After(time.Second):
- t.Fatalf("unexpected timeout when waiting for request to fake context canceled")
- }
-}
-
-func TestRedirectedHTTPAction(t *testing.T) {
- act := &redirectedHTTPAction{
- action: &staticHTTPAction{
- request: http.Request{
- Method: "DELETE",
- URL: &url.URL{
- Scheme: "https",
- Host: "foo.example.com",
- Path: "/ping",
- },
- },
- },
- location: url.URL{
- Scheme: "https",
- Host: "bar.example.com",
- Path: "/pong",
- },
- }
-
- want := &http.Request{
- Method: "DELETE",
- URL: &url.URL{
- Scheme: "https",
- Host: "bar.example.com",
- Path: "/pong",
- },
- }
- got := act.HTTPRequest(url.URL{Scheme: "http", Host: "baz.example.com", Path: "/pang"})
-
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("HTTPRequest is %#v, want %#v", want, got)
- }
-}
-
-func TestRedirectFollowingHTTPClient(t *testing.T) {
- tests := []struct {
- checkRedirect CheckRedirectFunc
- client httpClient
- wantCode int
- wantErr error
- }{
- // errors bubbled up
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- err: errors.New("fail!"),
- },
- },
- },
- wantErr: errors.New("fail!"),
- },
-
- // no need to follow redirect if none given
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantCode: http.StatusTeapot,
- },
-
- // redirects if less than max
- {
- checkRedirect: func(via int) error {
- if via >= 2 {
- return ErrTooManyRedirects
- }
- return nil
- },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantCode: http.StatusTeapot,
- },
-
- // succeed after reaching max redirects
- {
- checkRedirect: func(via int) error {
- if via >= 3 {
- return ErrTooManyRedirects
- }
- return nil
- },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantCode: http.StatusTeapot,
- },
-
- // fail if too many redirects
- {
- checkRedirect: func(via int) error {
- if via >= 2 {
- return ErrTooManyRedirects
- }
- return nil
- },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantErr: ErrTooManyRedirects,
- },
-
- // fail if Location header not set
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- },
- },
- },
- },
- wantErr: errors.New("location header not set"),
- },
-
- // fail if Location header is invalid
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{":"}},
- },
- },
- },
- },
- wantErr: errors.New("location header not valid URL: :"),
- },
-
- // fail if redirects checked way too many times
- {
- checkRedirect: func(int) error { return nil },
- client: &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- wantErr: errTooManyRedirectChecks,
- },
- }
-
- for i, tt := range tests {
- client := &redirectFollowingHTTPClient{client: tt.client, checkRedirect: tt.checkRedirect}
- resp, _, err := client.Do(context.Background(), nil)
- if (tt.wantErr == nil && tt.wantErr != err) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) {
- t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
- continue
- }
-
- if resp == nil {
- if tt.wantCode != 0 {
- t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
- }
- continue
- }
-
- if resp.StatusCode != tt.wantCode {
- t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
- continue
- }
- }
-}
-
-func TestDefaultCheckRedirect(t *testing.T) {
- tests := []struct {
- num int
- err error
- }{
- {0, nil},
- {5, nil},
- {10, nil},
- {11, ErrTooManyRedirects},
- {29, ErrTooManyRedirects},
- }
-
- for i, tt := range tests {
- err := DefaultCheckRedirect(tt.num)
- if !reflect.DeepEqual(tt.err, err) {
- t.Errorf("#%d: want=%#v got=%#v", i, tt.err, err)
- }
- }
-}
-
-func TestHTTPClusterClientSync(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- want := []string{"http://127.0.0.1:2379"}
- got := hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got)
- }
-
- err = hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("unexpected error during Sync: %#v", err)
- }
-
- want = []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"}
- got = hc.Endpoints()
- sort.Strings(got)
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints post-Sync: want=%#v got=%#v", want, got)
- }
-
- err = hc.SetEndpoints([]string{"http://127.0.0.1:4009"})
- if err != nil {
- t.Fatalf("unexpected error during reset: %#v", err)
- }
-
- want = []string{"http://127.0.0.1:4009"}
- got = hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints post-reset: want=%#v got=%#v", want, got)
- }
-}
-
-func TestHTTPClusterClientSyncFail(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {err: errors.New("fail!")},
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- want := []string{"http://127.0.0.1:2379"}
- got := hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got)
- }
-
- err = hc.Sync(context.Background())
- if err == nil {
- t.Fatalf("got nil error during Sync")
- }
-
- got = hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints after failed Sync: want=%#v got=%#v", want, got)
- }
-}
-
-func TestHTTPClusterClientAutoSyncCancelContext(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
-
- err = hc.AutoSync(ctx, time.Hour)
- if err != context.Canceled {
- t.Fatalf("incorrect error value: want=%v got=%v", context.Canceled, err)
- }
-}
-
-func TestHTTPClusterClientAutoSyncFail(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {err: errors.New("fail!")},
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- err = hc.AutoSync(context.Background(), time.Hour)
- if !strings.HasPrefix(err.Error(), ErrClusterUnavailable.Error()) {
- t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err)
- }
-}
-
-func TestHTTPClusterClientGetVersion(t *testing.T) {
- body := []byte(`{"etcdserver":"2.3.2","etcdcluster":"2.3.0"}`)
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Length": []string{"44"}}},
- body: body,
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- actual, err := hc.GetVersion(context.Background())
- if err != nil {
- t.Errorf("non-nil error: %#v", err)
- }
- expected := version.Versions{Server: "2.3.2", Cluster: "2.3.0"}
- if !reflect.DeepEqual(&expected, actual) {
- t.Errorf("incorrect Response: want=%#v got=%#v", expected, actual)
- }
-}
-
-// TestHTTPClusterClientSyncPinEndpoint tests that Sync() pins the endpoint when
-// it gets the exactly same member list as before.
-func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
- pinnedEndpoint := hc.endpoints[hc.pinned]
-
- for i := 0; i < 3; i++ {
- err = hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
- }
-
- if g := hc.endpoints[hc.pinned]; g != pinnedEndpoint {
- t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, pinnedEndpoint)
- }
- }
-}
-
-// TestHTTPClusterClientSyncUnpinEndpoint tests that Sync() unpins the endpoint when
-// it gets a different member list than before.
-func TestHTTPClusterClientSyncUnpinEndpoint(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
- wants := []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}
-
- for i := 0; i < 3; i++ {
- err = hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
- }
-
- if g := hc.endpoints[hc.pinned]; g.String() != wants[i] {
- t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, wants[i])
- }
- }
-}
-
-// TestHTTPClusterClientSyncPinLeaderEndpoint tests that Sync() pins the leader
-// when the selection mode is EndpointSelectionPrioritizeLeader
-func TestHTTPClusterClientSyncPinLeaderEndpoint(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- selectionMode: EndpointSelectionPrioritizeLeader,
- endpoints: []url.URL{{}}, // Need somewhere to pretend to send to initially
- }
-
- wants := []string{"http://127.0.0.1:4003", "http://127.0.0.1:4002"}
-
- for i, want := range wants {
- err := hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
- }
-
- pinned := hc.endpoints[hc.pinned].String()
- if pinned != want {
- t.Errorf("#%d: pinned endpoint = %v, want %v", i, pinned, want)
- }
- }
-}
-
-func TestHTTPClusterClientResetFail(t *testing.T) {
- tests := [][]string{
- // need at least one endpoint
- {},
-
- // urls must be valid
- {":"},
- }
-
- for i, tt := range tests {
- hc := &httpClusterClient{rand: rand.New(rand.NewSource(0))}
- err := hc.SetEndpoints(tt)
- if err == nil {
- t.Errorf("#%d: expected non-nil error", i)
- }
- }
-}
-
-func TestHTTPClusterClientResetPinRandom(t *testing.T) {
- round := 2000
- pinNum := 0
- for i := 0; i < round; i++ {
- hc := &httpClusterClient{rand: rand.New(rand.NewSource(int64(i)))}
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"})
- if err != nil {
- t.Fatalf("#%d: reset error (%v)", i, err)
- }
- if hc.endpoints[hc.pinned].String() == "http://127.0.0.1:4001" {
- pinNum++
- }
- }
-
- min := 1.0/3.0 - 0.05
- max := 1.0/3.0 + 0.05
- if ratio := float64(pinNum) / float64(round); ratio > max || ratio < min {
- t.Errorf("pinned ratio = %v, want [%v, %v]", ratio, min, max)
- }
-}
diff --git a/client/v2/curl.go b/client/v2/curl.go
deleted file mode 100644
index c8bc9fba20e..00000000000
--- a/client/v2/curl.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
-)
-
-var (
- cURLDebug = false
-)
-
-func EnablecURLDebug() {
- cURLDebug = true
-}
-
-func DisablecURLDebug() {
- cURLDebug = false
-}
-
-// printcURL prints the cURL equivalent request to stderr.
-// It returns an error if the body of the request cannot
-// be read.
-// The caller MUST cancel the request if there is an error.
-func printcURL(req *http.Request) error {
- if !cURLDebug {
- return nil
- }
- var (
- command string
- b []byte
- err error
- )
-
- if req.URL != nil {
- command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
- }
-
- if req.Body != nil {
- b, err = ioutil.ReadAll(req.Body)
- if err != nil {
- return err
- }
- command += fmt.Sprintf(" -d %q", string(b))
- }
-
- fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
-
- // reset body
- body := bytes.NewBuffer(b)
- req.Body = ioutil.NopCloser(body)
-
- return nil
-}
diff --git a/client/v2/doc.go b/client/v2/doc.go
deleted file mode 100644
index 5250758b017..00000000000
--- a/client/v2/doc.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package client provides bindings for the etcd APIs.
-
-Create a Config and exchange it for a Client:
-
- import (
- "net/http"
- "context"
-
- "go.etcd.io/etcd/client/v2"
- )
-
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: DefaultTransport,
- }
-
- c, err := client.New(cfg)
- if err != nil {
- // handle error
- }
-
-Clients are safe for concurrent use by multiple goroutines.
-
-Create a KeysAPI using the Client, then use it to interact with etcd:
-
- kAPI := client.NewKeysAPI(c)
-
- // create a new key /foo with the value "bar"
- _, err = kAPI.Create(context.Background(), "/foo", "bar")
- if err != nil {
- // handle error
- }
-
- // delete the newly created key only if the value is still "bar"
- _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
- if err != nil {
- // handle error
- }
-
-Use a custom context to set timeouts on your operations:
-
- import "time"
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- // set a new key, ignoring its previous state
- _, err := kAPI.Set(ctx, "/ping", "pong", nil)
- if err != nil {
- if err == context.DeadlineExceeded {
- // request took longer than 5s
- } else {
- // handle error
- }
- }
-
-*/
-package client
diff --git a/client/v2/example_keys_test.go b/client/v2/example_keys_test.go
deleted file mode 120000
index 1e4520ddfe9..00000000000
--- a/client/v2/example_keys_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/client/examples/example_keys_test.go
\ No newline at end of file
diff --git a/client/v2/go.mod b/client/v2/go.mod
deleted file mode 100644
index d6880b3f85b..00000000000
--- a/client/v2/go.mod
+++ /dev/null
@@ -1,24 +0,0 @@
-module go.etcd.io/etcd/client/v2
-
-go 1.16
-
-require (
- github.com/json-iterator/go v1.1.10
- github.com/modern-go/reflect2 v1.0.1
- go.etcd.io/etcd/api/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
-)
-
-replace (
- go.etcd.io/etcd/api/v3 => ../../api
- go.etcd.io/etcd/client/pkg/v3 => ../pkg
-)
-
-// Bad imports are sometimes causing attempts to pull that code.
-// This makes the error more explicit.
-replace (
- go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/pkg/v3 => ./FORBIDDED_DEPENDENCY
- go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
-)
diff --git a/client/v2/go.sum b/client/v2/go.sum
deleted file mode 100644
index ef8988b078e..00000000000
--- a/client/v2/go.sum
+++ /dev/null
@@ -1,145 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/client/v2/json.go b/client/v2/json.go
deleted file mode 100644
index d5be690a171..00000000000
--- a/client/v2/json.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "strconv"
- "unsafe"
-
- "github.com/json-iterator/go"
- "github.com/modern-go/reflect2"
-)
-
-type customNumberExtension struct {
- jsoniter.DummyExtension
-}
-
-func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
- if typ.String() == "interface {}" {
- return customNumberDecoder{}
- }
- return nil
-}
-
-type customNumberDecoder struct {
-}
-
-func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- var number jsoniter.Number
- iter.ReadVal(&number)
- i64, err := strconv.ParseInt(string(number), 10, 64)
- if err == nil {
- *(*interface{})(ptr) = i64
- return
- }
- f64, err := strconv.ParseFloat(string(number), 64)
- if err == nil {
- *(*interface{})(ptr) = f64
- return
- }
- iter.ReportError("DecodeNumber", err.Error())
- default:
- *(*interface{})(ptr) = iter.Read()
- }
-}
-
-// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be
-// case-sensitive when unmarshalling, and otherwise compatible with
-// the encoding/json standard library.
-func caseSensitiveJsonIterator() jsoniter.API {
- config := jsoniter.Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
- CaseSensitive: true,
- }.Froze()
- // Force jsoniter to decode number to interface{} via int64/float64, if possible.
- config.RegisterExtension(&customNumberExtension{})
- return config
-}
diff --git a/client/v2/main_test.go b/client/v2/main_test.go
deleted file mode 100644
index 2a0195aadd6..00000000000
--- a/client/v2/main_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client_test
-
-import (
- "net/http"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func exampleEndpoints() []string { return nil }
-func exampleTransport() *http.Transport { return nil }
-
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
- mocking()
- // TODO: Call 'example' when mocking() provides realistic mocking of transport.
-
- // The real testing logic of examples gets executed
- // as part of ./tests/integration/client/example/...
-}
-
-func TestMain(m *testing.M) {
- testutil.MustTestMainWithLeakDetection(m)
-}
diff --git a/client/v2/util.go b/client/v2/util.go
deleted file mode 100644
index 15a8babff4d..00000000000
--- a/client/v2/util.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "regexp"
-)
-
-var (
- roleNotFoundRegExp *regexp.Regexp
- userNotFoundRegExp *regexp.Regexp
-)
-
-func init() {
- roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
- userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
-}
-
-// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
-func IsKeyNotFound(err error) bool {
- if cErr, ok := err.(Error); ok {
- return cErr.Code == ErrorCodeKeyNotFound
- }
- return false
-}
-
-// IsRoleNotFound returns true if the error means role not found of v2 API.
-func IsRoleNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return roleNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
-
-// IsUserNotFound returns true if the error means user not found of v2 API.
-func IsUserNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return userNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
diff --git a/client/v3/OWNERS b/client/v3/OWNERS
new file mode 100644
index 00000000000..2b7f28b7939
--- /dev/null
+++ b/client/v3/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/clientv3
diff --git a/client/v3/README.md b/client/v3/README.md
index 1e037d7eb6b..af0087ebcc0 100644
--- a/client/v3/README.md
+++ b/client/v3/README.md
@@ -1,7 +1,7 @@
-# etcd/clientv3
+# etcd/client/v3
[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)
-[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3)
+[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/client/v3)
`etcd/clientv3` is the official Go etcd client for v3.
@@ -11,26 +11,23 @@
go get go.etcd.io/etcd/client/v3
```
-Warning: As etcd 3.5.0 was not yet released, the command above does not work.
-After first pre-release of 3.5.0 [#12498](https://github.com/etcd-io/etcd/issues/12498),
-etcd can be referenced using:
-```
-go get go.etcd.io/etcd/client/v3@v3.5.0-pre
-```
-
## Get started
Create client using `clientv3.New`:
```go
-cli, err := clientv3.New(clientv3.Config{
- Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
- DialTimeout: 5 * time.Second,
-})
-if err != nil {
- // handle error!
+import clientv3 "go.etcd.io/etcd/client/v3"
+
+func main() {
+ cli, err := clientv3.New(clientv3.Config{
+ Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+ DialTimeout: 5 * time.Second,
+ })
+ if err != nil {
+ // handle error!
+ }
+ defer cli.Close()
}
-defer cli.Close()
```
etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses
diff --git a/client/v3/auth.go b/client/v3/auth.go
index a6f75d32159..382172b21bf 100644
--- a/client/v3/auth.go
+++ b/client/v3/auth.go
@@ -19,9 +19,10 @@ import (
"fmt"
"strings"
+ "google.golang.org/grpc"
+
"go.etcd.io/etcd/api/v3/authpb"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "google.golang.org/grpc"
)
type (
@@ -134,67 +135,67 @@ func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth {
func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthenticateResponse)(resp), toErr(ctx, err)
+ return (*AuthenticateResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
- return (*AuthEnableResponse)(resp), toErr(ctx, err)
+ return (*AuthEnableResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
- return (*AuthDisableResponse)(resp), toErr(ctx, err)
+ return (*AuthDisableResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) {
resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...)
- return (*AuthStatusResponse)(resp), toErr(ctx, err)
+ return (*AuthStatusResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
- return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+ return (*AuthUserAddResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
- return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+ return (*AuthUserAddResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
- return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
+ return (*AuthUserDeleteResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
+ return (*AuthUserChangePasswordResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
- return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
+ return (*AuthUserGrantRoleResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
- return (*AuthUserGetResponse)(resp), toErr(ctx, err)
+ return (*AuthUserGetResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
- return (*AuthUserListResponse)(resp), toErr(ctx, err)
+ return (*AuthUserListResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
- return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
+ return (*AuthUserRevokeRoleResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
- return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
+ return (*AuthRoleAddResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
@@ -204,27 +205,27 @@ func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, ke
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
- return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
+ return (*AuthRoleGrantPermissionResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
+ return (*AuthRoleGetResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
- return (*AuthRoleListResponse)(resp), toErr(ctx, err)
+ return (*AuthRoleListResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
- return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
+ return (*AuthRoleRevokePermissionResponse)(resp), ContextError(ctx, err)
}
func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
+ return (*AuthRoleDeleteResponse)(resp), ContextError(ctx, err)
}
func StrToPermissionType(s string) (PermissionType, error) {
diff --git a/client/v3/client.go b/client/v3/client.go
index 75e3c97e22a..24f5988986d 100644
--- a/client/v3/client.go
+++ b/client/v3/client.go
@@ -18,21 +18,26 @@ import (
"context"
"errors"
"fmt"
- "strconv"
"strings"
"sync"
"time"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3/credentials"
- "go.etcd.io/etcd/client/v3/internal/endpoint"
- "go.etcd.io/etcd/client/v3/internal/resolver"
+ "github.com/coreos/go-semver/semver"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpccredentials "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/status"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/client/v3/credentials"
+ "go.etcd.io/etcd/client/v3/internal/endpoint"
+ "go.etcd.io/etcd/client/v3/internal/resolver"
)
var (
@@ -54,7 +59,9 @@ type Client struct {
cfg Config
creds grpccredentials.TransportCredentials
resolver *resolver.EtcdManualResolver
- mu *sync.RWMutex
+
+ epMu *sync.RWMutex
+ endpoints []string
ctx context.Context
cancel context.CancelFunc
@@ -63,7 +70,7 @@ type Client struct {
Username string
// Password is a password for authentication.
Password string
- authTokenBundle credentials.Bundle
+ authTokenBundle credentials.PerRPCCredentialsBundle
callOpts []grpc.CallOption
@@ -85,7 +92,7 @@ func New(cfg Config) (*Client, error) {
// service interface implementations and do not need connection management.
func NewCtxClient(ctx context.Context, opts ...Option) *Client {
cctx, cancel := context.WithCancel(ctx)
- c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)}
+ c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex), epMu: new(sync.RWMutex)}
for _, opt := range opts {
opt(c)
}
@@ -147,7 +154,7 @@ func (c *Client) Close() error {
c.Lease.Close()
}
if c.conn != nil {
- return toErr(c.ctx, c.conn.Close())
+ return ContextError(c.ctx, c.conn.Close())
}
return c.ctx.Err()
}
@@ -160,18 +167,18 @@ func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string {
// copy the slice; protect original endpoints from being changed
- c.mu.RLock()
- defer c.mu.RUnlock()
- eps := make([]string, len(c.cfg.Endpoints))
- copy(eps, c.cfg.Endpoints)
+ c.epMu.RLock()
+ defer c.epMu.RUnlock()
+ eps := make([]string, len(c.endpoints))
+ copy(eps, c.endpoints)
return eps
}
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.cfg.Endpoints = eps
+ c.epMu.Lock()
+ defer c.epMu.Unlock()
+ c.endpoints = eps
c.resolver.SetEndpoints(eps)
}
@@ -184,9 +191,19 @@ func (c *Client) Sync(ctx context.Context) error {
}
var eps []string
for _, m := range mresp.Members {
- eps = append(eps, m.ClientURLs...)
+ if len(m.Name) != 0 && !m.IsLearner {
+ eps = append(eps, m.ClientURLs...)
+ }
}
+ // The linearizable `MemberList` returned successfully, so the
+ // endpoints shouldn't be empty.
+ verify.Verify(func() {
+ if len(eps) == 0 {
+ panic("empty endpoints returned from etcd cluster")
+ }
+ })
c.SetEndpoints(eps...)
+ c.lg.Debug("set etcd endpoints by autoSync", zap.Strings("endpoints", eps))
return nil
}
@@ -203,7 +220,7 @@ func (c *Client) autoSync() {
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
err := c.Sync(ctx)
cancel()
- if err != nil && err != c.ctx.Err() {
+ if err != nil && !errors.Is(err, c.ctx.Err()) {
c.lg.Info("Auto sync endpoints failed.", zap.Error(err))
}
}
@@ -211,7 +228,9 @@ func (c *Client) autoSync() {
}
// dialSetupOpts gives the dial opts prior to any authentication.
-func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
+func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) []grpc.DialOption {
+ var opts []grpc.DialOption
+
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
@@ -225,21 +244,36 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
- opts = append(opts, grpc.WithInsecure())
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ unaryMaxRetries := defaultUnaryMaxRetries
+ if c.cfg.MaxUnaryRetries > 0 {
+ unaryMaxRetries = c.cfg.MaxUnaryRetries
+ }
+
+ backoffWaitBetween := defaultBackoffWaitBetween
+ if c.cfg.BackoffWaitBetween > 0 {
+ backoffWaitBetween = c.cfg.BackoffWaitBetween
+ }
+
+ backoffJitterFraction := defaultBackoffJitterFraction
+ if c.cfg.BackoffJitterFraction > 0 {
+ backoffJitterFraction = c.cfg.BackoffJitterFraction
}
// Interceptor retry and backoff.
// TODO: Replace all of clientv3/retry.go with RetryPolicy:
// https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130
- rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
+ rrBackoff := withBackoff(c.roundRobinQuorumBackoff(backoffWaitBetween, backoffJitterFraction))
opts = append(opts,
// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
// Streams that are safe to retry are enabled individually.
grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)),
- grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)),
+ grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(unaryMaxRetries), rrBackoff)),
)
- return opts, nil
+ return opts
}
// Dial connects to a single endpoint using the client's config.
@@ -260,7 +294,8 @@ func (c *Client) getToken(ctx context.Context) error {
resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password)
if err != nil {
- if err == rpctypes.ErrAuthNotEnabled {
+ if errors.Is(err, rpctypes.ErrAuthNotEnabled) {
+ c.authTokenBundle.UpdateAuthToken("")
return nil
}
return err
@@ -279,12 +314,9 @@ func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, e
// dial configures and dials any grpc balancer target.
func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- opts, err := c.dialSetupOpts(creds, dopts...)
- if err != nil {
- return nil, fmt.Errorf("failed to configure dialer: %v", err)
- }
- if c.Username != "" && c.Password != "" {
- c.authTokenBundle = credentials.NewBundle(credentials.Config{})
+ opts := c.dialSetupOpts(creds, dopts...)
+
+ if c.authTokenBundle != nil {
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
}
@@ -296,9 +328,7 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.
dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
}
-
- initialEndpoints := strings.Join(c.cfg.Endpoints, ";")
- target := fmt.Sprintf("%s://%p/#initially=[%s]", resolver.Schema, c, initialEndpoints)
+ target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0]))
conn, err := grpc.DialContext(dctx, target, opts...)
if err != nil {
return nil, err
@@ -306,18 +336,32 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.
return conn, nil
}
+func authority(endpoint string) string {
+ spl := strings.SplitN(endpoint, "://", 2)
+ if len(spl) < 2 {
+ if strings.HasPrefix(endpoint, "unix:") {
+ return endpoint[len("unix:"):]
+ }
+ if strings.HasPrefix(endpoint, "unixs:") {
+ return endpoint[len("unixs:"):]
+ }
+ return endpoint
+ }
+ return spl[1]
+}
+
func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials {
r := endpoint.RequiresCredentials(ep)
switch r {
- case endpoint.CREDS_DROP:
+ case endpoint.CredsDrop:
return nil
- case endpoint.CREDS_OPTIONAL:
+ case endpoint.CredsOptional:
return c.creds
- case endpoint.CREDS_REQUIRE:
+ case endpoint.CredsRequire:
if c.creds != nil {
return c.creds
}
- return credentials.NewBundle(credentials.Config{}).TransportCredentials()
+ return credentials.NewTransportCredential(nil)
default:
panic(fmt.Errorf("unsupported CredsRequirement: %v", r))
}
@@ -329,7 +373,7 @@ func newClient(cfg *Config) (*Client, error) {
}
var creds grpccredentials.TransportCredentials
if cfg.TLS != nil {
- creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
+ creds = credentials.NewTransportCredential(cfg.TLS)
}
// use a temporary skeleton client to bootstrap first connection
@@ -345,7 +389,7 @@ func newClient(cfg *Config) (*Client, error) {
creds: creds,
ctx: ctx,
cancel: cancel,
- mu: new(sync.RWMutex),
+ epMu: new(sync.RWMutex),
callOpts: defaultCallOpts,
lgMu: new(sync.RWMutex),
}
@@ -356,7 +400,10 @@ func newClient(cfg *Config) (*Client, error) {
} else if cfg.LogConfig != nil {
client.lg, err = cfg.LogConfig.Build()
} else {
- client.lg, err = createDefaultZapLogger()
+ client.lg, err = logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
+ if client.lg != nil {
+ client.lg = client.lg.Named("etcd-client")
+ }
}
if err != nil {
return nil, err
@@ -365,6 +412,7 @@ func newClient(cfg *Config) (*Client, error) {
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
+ client.authTokenBundle = credentials.NewPerRPCCredentialBundle()
}
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
@@ -388,8 +436,10 @@ func newClient(cfg *Config) (*Client, error) {
if len(cfg.Endpoints) < 1 {
client.cancel()
- return nil, fmt.Errorf("at least one Endpoint is required in client config")
+ return nil, errors.New("at least one Endpoint is required in client config")
}
+ client.SetEndpoints(cfg.Endpoints...)
+
// Use a provided endpoint target so that for https:// without any tls config given, then
// grpc will assume the certificate server name is the endpoint host.
conn, err := client.dialWithBalancer()
@@ -408,7 +458,7 @@ func newClient(cfg *Config) (*Client, error) {
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
- //get token with established connection
+ // get token with established connection
ctx, cancel = client.ctx, func() {}
if client.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout)
@@ -417,7 +467,7 @@ func newClient(cfg *Config) (*Client, error) {
if err != nil {
client.Close()
cancel()
- //TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err)
+ // TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err)
return nil, err
}
cancel()
@@ -449,6 +499,22 @@ func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFracti
}
}
+// minSupportedVersion returns the minimum version supported, which is the previous minor release.
+func minSupportedVersion() *semver.Version {
+ ver := semver.Must(semver.NewVersion(version.Version))
+ // consider only major and minor version
+ ver = &semver.Version{Major: ver.Major, Minor: ver.Minor}
+ for i := range version.AllVersions {
+ if version.AllVersions[i].Equal(*ver) {
+ if i == 0 {
+ return ver
+ }
+ return &version.AllVersions[i-1]
+ }
+ }
+ panic("current version is not in the version list")
+}
+
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
@@ -470,20 +536,13 @@ func (c *Client) checkVersion() (err error) {
errc <- rerr
return
}
- vs := strings.Split(resp.Version, ".")
- maj, min := 0, 0
- if len(vs) >= 2 {
- var serr error
- if maj, serr = strconv.Atoi(vs[0]); serr != nil {
- errc <- serr
- return
- }
- if min, serr = strconv.Atoi(vs[1]); serr != nil {
- errc <- serr
- return
- }
+ vs, serr := semver.NewVersion(resp.Version)
+ if serr != nil {
+ errc <- serr
+ return
}
- if maj < 3 || (maj == 3 && min < 2) {
+
+ if vs.LessThan(*minSupportedVersion()) {
rerr = ErrOldCluster
}
errc <- rerr
@@ -491,7 +550,7 @@ func (c *Client) checkVersion() (err error) {
}
// wait for success
for range eps {
- if err = <-errc; err == nil {
+ if err = <-errc; err != nil {
break
}
}
@@ -539,12 +598,15 @@ func isUnavailableErr(ctx context.Context, err error) bool {
return false
}
-func toErr(ctx context.Context, err error) error {
+// ContextError converts the error into an EtcdError if the error message matches one of
+// the defined messages; otherwise, it tries to retrieve the context error.
+func ContextError(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
- if _, ok := err.(rpctypes.EtcdError); ok {
+ var serverErr rpctypes.EtcdError
+ if errors.As(err, &serverErr) {
return err
}
if ev, ok := status.FromError(err); ok {
@@ -566,7 +628,7 @@ func canceledByCaller(stopCtx context.Context, err error) bool {
return false
}
- return err == context.Canceled || err == context.DeadlineExceeded
+ return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
}
// IsConnCanceled returns true, if error is from a closed gRPC connection.
@@ -584,7 +646,7 @@ func IsConnCanceled(err error) bool {
}
// >= gRPC v1.10.x
- if err == context.Canceled {
+ if errors.Is(err, context.Canceled) {
return true
}
diff --git a/client/v3/client_test.go b/client/v3/client_test.go
index b2ff7ef177c..cbe94a411b3 100644
--- a/client/v3/client_test.go
+++ b/client/v3/client_test.go
@@ -16,42 +16,49 @@ package clientv3
import (
"context"
- "fmt"
- "go.uber.org/zap"
+ "errors"
+ "io"
"net"
+ "sync"
"testing"
"time"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
"go.uber.org/zap/zaptest"
-
"google.golang.org/grpc"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
)
func NewClient(t *testing.T, cfg Config) (*Client, error) {
- cfg.Logger = zaptest.NewLogger(t)
+ t.Helper()
+ if cfg.Logger == nil {
+ cfg.Logger = zaptest.NewLogger(t).Named("client")
+ }
return New(cfg)
}
func TestDialCancel(t *testing.T) {
- testutil.BeforeTest(t)
+ testutil.RegisterLeakDetection(t)
// accept first connection so client is created with dial timeout
ln, err := net.Listen("unix", "dialcancel:12345")
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
defer ln.Close()
ep := "unix://dialcancel:12345"
cfg := Config{
Endpoints: []string{ep},
- DialTimeout: 30 * time.Second}
- c, err := NewClient(t, cfg)
- if err != nil {
- t.Fatal(err)
+ DialTimeout: 30 * time.Second,
}
+ c, err := NewClient(t, cfg)
+ require.NoError(t, err)
// connect to ipv4 black hole so dial blocks
c.SetEndpoints("http://254.0.0.1:12345")
@@ -87,7 +94,7 @@ func TestDialCancel(t *testing.T) {
}
func TestDialTimeout(t *testing.T) {
- testutil.BeforeTest(t)
+ testutil.RegisterLeakDetection(t)
wantError := context.DeadlineExceeded
@@ -109,14 +116,14 @@ func TestDialTimeout(t *testing.T) {
for i, cfg := range testCfgs {
donec := make(chan error, 1)
- go func(cfg Config) {
+ go func(cfg Config, i int) {
// without timeout, dial continues forever on ipv4 black hole
c, err := NewClient(t, cfg)
if c != nil || err == nil {
t.Errorf("#%d: new client should fail", i)
}
donec <- err
- }(cfg)
+ }(cfg, i)
time.Sleep(10 * time.Millisecond)
@@ -140,30 +147,101 @@ func TestDialTimeout(t *testing.T) {
func TestDialNoTimeout(t *testing.T) {
cfg := Config{Endpoints: []string{"127.0.0.1:12345"}}
c, err := NewClient(t, cfg)
- if c == nil || err != nil {
- t.Fatalf("new client with DialNoWait should succeed, got %v", err)
- }
+ require.NotNilf(t, c, "new client with DialNoWait should succeed, got %v", err)
+ require.NoErrorf(t, err, "new client with DialNoWait should succeed")
c.Close()
}
-func TestIsHaltErr(t *testing.T) {
- if !isHaltErr(context.TODO(), fmt.Errorf("etcdserver: some etcdserver error")) {
- t.Errorf(`error prefixed with "etcdserver: " should be Halted by default`)
+func TestMaxUnaryRetries(t *testing.T) {
+ maxUnaryRetries := uint(10)
+ cfg := Config{
+ Endpoints: []string{"127.0.0.1:12345"},
+ MaxUnaryRetries: maxUnaryRetries,
}
- if isHaltErr(context.TODO(), rpctypes.ErrGRPCStopped) {
- t.Errorf("error %v should not halt", rpctypes.ErrGRPCStopped)
+ c, err := NewClient(t, cfg)
+ require.NoError(t, err)
+ require.NotNil(t, c)
+ defer c.Close()
+
+ require.Equal(t, maxUnaryRetries, c.cfg.MaxUnaryRetries)
+}
+
+func TestBackoff(t *testing.T) {
+ backoffWaitBetween := 100 * time.Millisecond
+ cfg := Config{
+ Endpoints: []string{"127.0.0.1:12345"},
+ BackoffWaitBetween: backoffWaitBetween,
}
- if isHaltErr(context.TODO(), rpctypes.ErrGRPCNoLeader) {
- t.Errorf("error %v should not halt", rpctypes.ErrGRPCNoLeader)
+ c, err := NewClient(t, cfg)
+ require.NoError(t, err)
+ require.NotNil(t, c)
+ defer c.Close()
+
+ require.Equal(t, backoffWaitBetween, c.cfg.BackoffWaitBetween)
+}
+
+func TestBackoffJitterFraction(t *testing.T) {
+ backoffJitterFraction := float64(0.9)
+ cfg := Config{
+ Endpoints: []string{"127.0.0.1:12345"},
+ BackoffJitterFraction: backoffJitterFraction,
}
+ c, err := NewClient(t, cfg)
+ require.NoError(t, err)
+ require.NotNil(t, c)
+ defer c.Close()
+
+ require.InDelta(t, backoffJitterFraction, c.cfg.BackoffJitterFraction, 0.01)
+}
+
+func TestIsHaltErr(t *testing.T) {
+ assert.Truef(t,
+ isHaltErr(context.TODO(), errors.New("etcdserver: some etcdserver error")),
+ "error created by errors.New should be unavailable error",
+ )
+ assert.Falsef(t,
+ isHaltErr(context.TODO(), rpctypes.ErrGRPCStopped),
+ `error "%v" should not be halt error`, rpctypes.ErrGRPCStopped,
+ )
+ assert.Falsef(t,
+ isHaltErr(context.TODO(), rpctypes.ErrGRPCNoLeader),
+ `error "%v" should not be halt error`, rpctypes.ErrGRPCNoLeader,
+ )
ctx, cancel := context.WithCancel(context.TODO())
- if isHaltErr(ctx, nil) {
- t.Errorf("no error and active context should not be Halted")
- }
+ assert.Falsef(t,
+ isHaltErr(ctx, nil),
+ "no error and active context should be halt error",
+ )
cancel()
- if !isHaltErr(ctx, nil) {
- t.Errorf("cancel on context should be Halted")
- }
+ assert.Truef(t,
+ isHaltErr(ctx, nil),
+ "cancel on context should be halt error",
+ )
+}
+
+func TestIsUnavailableErr(t *testing.T) {
+ assert.Falsef(t,
+ isUnavailableErr(context.TODO(), errors.New("etcdserver: some etcdserver error")),
+ "error created by errors.New should not be unavailable error",
+ )
+ assert.Truef(t,
+ isUnavailableErr(context.TODO(), rpctypes.ErrGRPCStopped),
+ `error "%v" should be unavailable error`, rpctypes.ErrGRPCStopped,
+ )
+ assert.Falsef(t,
+ isUnavailableErr(context.TODO(), rpctypes.ErrGRPCNotCapable),
+ "error %v should not be unavailable error", rpctypes.ErrGRPCNotCapable,
+ )
+ ctx, cancel := context.WithCancel(context.TODO())
+ assert.Falsef(t,
+ isUnavailableErr(ctx, nil),
+ "no error and active context should not be unavailable error",
+ )
+ cancel()
+ assert.Falsef(t,
+ isUnavailableErr(ctx, nil),
+ "cancel on context should not be unavailable error",
+ )
}
func TestCloseCtxClient(t *testing.T) {
@@ -198,3 +276,238 @@ func TestZapWithLogger(t *testing.T) {
t.Errorf("WithZapLogger should modify *zap.Logger")
}
}
+
+func TestAuthTokenBundleNoOverwrite(t *testing.T) {
+ // This call in particular changes working directory to the tmp dir of
+ // the test. The `etcd-auth-test:0` can be created in local directory,
+ // not exceeding the longest allowed path on OsX.
+ testutil.BeforeTest(t)
+
+ // Create a mock AuthServer to handle Authenticate RPCs.
+ lis, err := net.Listen("unix", "etcd-auth-test:0")
+ require.NoError(t, err)
+ defer lis.Close()
+ addr := "unix://" + lis.Addr().String()
+ srv := grpc.NewServer()
+ etcdserverpb.RegisterAuthServer(srv, mockAuthServer{})
+ go srv.Serve(lis)
+ defer srv.Stop()
+
+ // Create a client, which should call Authenticate on the mock server to
+ // exchange username/password for an auth token.
+ c, err := NewClient(t, Config{
+ DialTimeout: 5 * time.Second,
+ Endpoints: []string{addr},
+ Username: "foo",
+ Password: "bar",
+ })
+ require.NoError(t, err)
+ defer c.Close()
+ oldTokenBundle := c.authTokenBundle
+
+ // Call the public Dial again, which should preserve the original
+ // authTokenBundle.
+ gc, err := c.Dial(addr)
+ require.NoError(t, err)
+ defer gc.Close()
+ newTokenBundle := c.authTokenBundle
+
+ if oldTokenBundle != newTokenBundle {
+ t.Error("Client.authTokenBundle has been overwritten during Client.Dial")
+ }
+}
+
+func TestSyncFiltersMembers(t *testing.T) {
+ c, _ := NewClient(t, Config{Endpoints: []string{"http://254.0.0.1:12345"}})
+ defer c.Close()
+ c.Cluster = &mockCluster{
+ []*etcdserverpb.Member{
+ {ID: 0, Name: "", ClientURLs: []string{"http://254.0.0.1:12345"}, IsLearner: false},
+ {ID: 1, Name: "isStarted", ClientURLs: []string{"http://254.0.0.2:12345"}, IsLearner: true},
+ {ID: 2, Name: "isStartedAndNotLearner", ClientURLs: []string{"http://254.0.0.3:12345"}, IsLearner: false},
+ },
+ }
+ c.Sync(context.Background())
+
+ endpoints := c.Endpoints()
+ if len(endpoints) != 1 || endpoints[0] != "http://254.0.0.3:12345" {
+ t.Error("Client.Sync uses learner and/or non-started member client URLs")
+ }
+}
+
+func TestMinSupportedVersion(t *testing.T) {
+ testutil.BeforeTest(t)
+ tests := []struct {
+ name string
+ currentVersion semver.Version
+ minSupportedVersion semver.Version
+ }{
+ {
+ name: "v3.6 client should accept v3.5",
+ currentVersion: version.V3_6,
+ minSupportedVersion: version.V3_5,
+ },
+ {
+ name: "v3.7 client should accept v3.6",
+ currentVersion: version.V3_7,
+ minSupportedVersion: version.V3_6,
+ },
+ {
+ name: "first minor version should accept its previous version",
+ currentVersion: version.V4_0,
+ minSupportedVersion: version.V3_7,
+ },
+ {
+ name: "first version in list should not accept previous versions",
+ currentVersion: version.V3_0,
+ minSupportedVersion: version.V3_0,
+ },
+ }
+
+ versionBackup := version.Version
+ t.Cleanup(func() {
+ version.Version = versionBackup
+ })
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ version.Version = tt.currentVersion.String()
+ require.True(t, minSupportedVersion().Equal(tt.minSupportedVersion))
+ })
+ }
+}
+
+func TestClientRejectOldCluster(t *testing.T) {
+ testutil.BeforeTest(t)
+ tests := []struct {
+ name string
+ endpoints []string
+ versions []string
+ expectedError error
+ }{
+ {
+ name: "all new versions with the same value",
+ endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
+ versions: []string{version.Version, version.Version, version.Version},
+ expectedError: nil,
+ },
+ {
+ name: "all new versions with different values",
+ endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
+ versions: []string{version.Version, minSupportedVersion().String(), minSupportedVersion().String()},
+ expectedError: nil,
+ },
+ {
+ name: "all old versions with different values",
+ endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
+ versions: []string{"3.3.0", "3.3.0", "3.4.0"},
+ expectedError: ErrOldCluster,
+ },
+ {
+ name: "all old versions with the same value",
+ endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
+ versions: []string{"3.3.0", "3.3.0", "3.3.0"},
+ expectedError: ErrOldCluster,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if len(tt.endpoints) != len(tt.versions) || len(tt.endpoints) == 0 {
+ t.Errorf("Unexpected endpoints and versions length, len(endpoints):%d, len(versions):%d", len(tt.endpoints), len(tt.versions))
+ return
+ }
+ endpointToVersion := make(map[string]string)
+ for j := range tt.endpoints {
+ endpointToVersion[tt.endpoints[j]] = tt.versions[j]
+ }
+ c := &Client{
+ ctx: context.Background(),
+ endpoints: tt.endpoints,
+ epMu: new(sync.RWMutex),
+ Maintenance: &mockMaintenance{
+ Version: endpointToVersion,
+ },
+ }
+
+ if err := c.checkVersion(); !errors.Is(err, tt.expectedError) {
+ t.Errorf("checkVersion err:%v", err)
+ }
+ })
+ }
+}
+
+type mockMaintenance struct {
+ Version map[string]string
+}
+
+func (mm mockMaintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
+ return &StatusResponse{Version: mm.Version[endpoint]}, nil
+}
+
+func (mm mockMaintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
+ return nil, nil
+}
+
+func (mm mockMaintenance) AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) {
+ return nil, nil
+}
+
+func (mm mockMaintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
+ return nil, nil
+}
+
+func (mm mockMaintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
+ return nil, nil
+}
+
+func (mm mockMaintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) {
+ return nil, nil
+}
+
+func (mm mockMaintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+ return nil, nil
+}
+
+func (mm mockMaintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
+ return nil, nil
+}
+
+func (mm mockMaintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) {
+ return nil, nil
+}
+
+type mockAuthServer struct {
+ *etcdserverpb.UnimplementedAuthServer
+}
+
+func (mockAuthServer) Authenticate(context.Context, *etcdserverpb.AuthenticateRequest) (*etcdserverpb.AuthenticateResponse, error) {
+ return &etcdserverpb.AuthenticateResponse{Token: "mock-token"}, nil
+}
+
+type mockCluster struct {
+ members []*etcdserverpb.Member
+}
+
+func (mc *mockCluster) MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) {
+ return &MemberListResponse{Members: mc.members}, nil
+}
+
+func (mc *mockCluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return nil, nil
+}
+
+func (mc *mockCluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return nil, nil
+}
+
+func (mc *mockCluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
+ return nil, nil
+}
+
+func (mc *mockCluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
+ return nil, nil
+}
+
+func (mc *mockCluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
+ return nil, nil
+}
diff --git a/client/v3/clientv3util/example_key_test.go b/client/v3/clientv3util/example_key_test.go
index 0d7cd3d19bd..fbbbe417260 100644
--- a/client/v3/clientv3util/example_key_test.go
+++ b/client/v3/clientv3util/example_key_test.go
@@ -18,7 +18,7 @@ import (
"context"
"log"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/clientv3util"
)
diff --git a/client/v3/clientv3util/util.go b/client/v3/clientv3util/util.go
index c8d5098a520..144777bd2c7 100644
--- a/client/v3/clientv3util/util.go
+++ b/client/v3/clientv3util/util.go
@@ -16,7 +16,7 @@
package clientv3util
import (
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
// KeyExists returns a comparison operation that evaluates to true iff the given
diff --git a/client/v3/cluster.go b/client/v3/cluster.go
index 92d7cdb56b0..1b7e83375c3 100644
--- a/client/v3/cluster.go
+++ b/client/v3/cluster.go
@@ -17,10 +17,10 @@ package clientv3
import (
"context"
+ "google.golang.org/grpc"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/types"
-
- "google.golang.org/grpc"
)
type (
@@ -34,7 +34,7 @@ type (
type Cluster interface {
// MemberList lists the current cluster membership.
- MemberList(ctx context.Context) (*MemberListResponse, error)
+ MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error)
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
@@ -93,7 +93,7 @@ func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner b
}
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
return (*MemberAddResponse)(resp), nil
}
@@ -102,7 +102,7 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
return (*MemberRemoveResponse)(resp), nil
}
@@ -119,23 +119,23 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
-func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
- // it is safe to retry on list.
- resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...)
+func (c *cluster) MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) {
+ opt := OpGet("", opts...)
+ resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: !opt.serializable}, c.callOpts...)
if err == nil {
return (*MemberListResponse)(resp), nil
}
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
r := &pb.MemberPromoteRequest{ID: id}
resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
return (*MemberPromoteResponse)(resp), nil
}
diff --git a/client/v3/compact_op_test.go b/client/v3/compact_op_test.go
index f483322adf5..914be10a908 100644
--- a/client/v3/compact_op_test.go
+++ b/client/v3/compact_op_test.go
@@ -18,13 +18,13 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/api/v3/etcdserverpb"
)
func TestCompactOp(t *testing.T) {
req1 := OpCompact(100, WithCompactPhysical()).toRequest()
req2 := &etcdserverpb.CompactionRequest{Revision: 100, Physical: true}
- if !reflect.DeepEqual(req1, req2) {
- t.Fatalf("expected %+v, got %+v", req2, req1)
- }
+ require.Truef(t, reflect.DeepEqual(req1, req2), "expected %+v, got %+v", req2, req1)
}
diff --git a/client/v3/compare.go b/client/v3/compare.go
index e2967cf38ed..663fdb4d206 100644
--- a/client/v3/compare.go
+++ b/client/v3/compare.go
@@ -18,8 +18,10 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
-type CompareTarget int
-type CompareResult int
+type (
+ CompareTarget int
+ CompareResult int
+)
const (
CompareVersion CompareTarget = iota
@@ -30,7 +32,7 @@ const (
type Cmp pb.Compare
-func Compare(cmp Cmp, result string, v interface{}) Cmp {
+func Compare(cmp Cmp, result string, v any) Cmp {
var r pb.Compare_CompareResult
switch result {
@@ -120,7 +122,7 @@ func (cmp Cmp) WithPrefix() Cmp {
}
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
-func mustInt64(val interface{}) int64 {
+func mustInt64(val any) int64 {
if v, ok := val.(int64); ok {
return v
}
@@ -132,7 +134,7 @@ func mustInt64(val interface{}) int64 {
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
// int64 otherwise.
-func mustInt64orLeaseID(val interface{}) int64 {
+func mustInt64orLeaseID(val any) int64 {
if v, ok := val.(LeaseID); ok {
return int64(v)
}
diff --git a/client/v3/concurrency/election.go b/client/v3/concurrency/election.go
index 31e93d24280..ac1303dd8b4 100644
--- a/client/v3/concurrency/election.go
+++ b/client/v3/concurrency/election.go
@@ -90,7 +90,7 @@ func (e *Election) Campaign(ctx context.Context, val string) error {
}
}
- _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
+ err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
if err != nil {
// clean up in case of context cancel
select {
diff --git a/client/v3/concurrency/key.go b/client/v3/concurrency/key.go
index 20825950f30..92e365c4715 100644
--- a/client/v3/concurrency/key.go
+++ b/client/v3/concurrency/key.go
@@ -16,9 +16,8 @@ package concurrency
import (
"context"
- "fmt"
+ "errors"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
v3 "go.etcd.io/etcd/client/v3"
)
@@ -42,24 +41,24 @@ func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) e
if err := ctx.Err(); err != nil {
return err
}
- return fmt.Errorf("lost watcher waiting for delete")
+ return errors.New("lost watcher waiting for delete")
}
// waitDeletes efficiently waits until all keys matching the prefix and no greater
-// than the create revision.
-func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
+// than the create revision are deleted.
+func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) error {
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
for {
resp, err := client.Get(ctx, pfx, getOpts...)
if err != nil {
- return nil, err
+ return err
}
if len(resp.Kvs) == 0 {
- return resp.Header, nil
+ return nil
}
lastKey := string(resp.Kvs[0].Key)
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
- return nil, err
+ return err
}
}
}
diff --git a/client/v3/concurrency/main_test.go b/client/v3/concurrency/main_test.go
index d8819be04d2..42cce0dd85f 100644
--- a/client/v3/concurrency/main_test.go
+++ b/client/v3/concurrency/main_test.go
@@ -22,12 +22,12 @@ import (
func exampleEndpoints() []string { return nil }
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
+func forUnitTestsRunInMockedContext(mocking func(), _example func()) {
mocking()
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
// The real testing logic of examples gets executed
- // as part of ./tests/integration/clientv3/integration/...
+ // as part of ./tests/integration/clientv3/concurrency/...
}
func TestMain(m *testing.M) {
diff --git a/client/v3/concurrency/mutex.go b/client/v3/concurrency/mutex.go
index c3800d6282a..6898bbcec41 100644
--- a/client/v3/concurrency/mutex.go
+++ b/client/v3/concurrency/mutex.go
@@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
+ "strings"
"sync"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
@@ -25,8 +26,11 @@ import (
)
// ErrLocked is returned by TryLock when Mutex is already locked by another session.
-var ErrLocked = errors.New("mutex: Locked by another session")
-var ErrSessionExpired = errors.New("mutex: session is expired")
+var (
+ ErrLocked = errors.New("mutex: Locked by another session")
+ ErrSessionExpired = errors.New("mutex: session is expired")
+ ErrLockReleased = errors.New("mutex: lock has already been released")
+)
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
@@ -82,7 +86,7 @@ func (m *Mutex) Lock(ctx context.Context) error {
client := m.s.Client()
// wait for deletion revisions prior to myKey
// TODO: early termination if the session key is deleted before other session keys with smaller revisions.
- _, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
+ werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
// release lock key if wait failed
if werr != nil {
m.Unlock(client.Ctx())
@@ -128,6 +132,14 @@ func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
}
func (m *Mutex) Unlock(ctx context.Context) error {
+ if m.myKey == "" || m.myRev <= 0 || m.myKey == "\x00" {
+ return ErrLockReleased
+ }
+
+ if !strings.HasPrefix(m.myKey, m.pfx) {
+ return fmt.Errorf("invalid key %q, it should have prefix %q", m.myKey, m.pfx)
+ }
+
client := m.s.Client()
if _, err := client.Delete(ctx, m.myKey); err != nil {
return err
@@ -154,6 +166,7 @@ func (lm *lockerMutex) Lock() {
panic(err)
}
}
+
func (lm *lockerMutex) Unlock() {
client := lm.s.Client()
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
diff --git a/client/v3/concurrency/session.go b/client/v3/concurrency/session.go
index 7143cc47471..2275e96c972 100644
--- a/client/v3/concurrency/session.go
+++ b/client/v3/concurrency/session.go
@@ -18,6 +18,8 @@ import (
"context"
"time"
+ "go.uber.org/zap"
+
v3 "go.etcd.io/etcd/client/v3"
)
@@ -30,15 +32,17 @@ type Session struct {
opts *sessionOptions
id v3.LeaseID
+ ctx context.Context
cancel context.CancelFunc
donec <-chan struct{}
}
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
+ lg := client.GetLogger()
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
for _, opt := range opts {
- opt(ops)
+ opt(ops, lg)
}
id := ops.leaseID
@@ -58,11 +62,14 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
}
donec := make(chan struct{})
- s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
+ s := &Session{client: client, opts: ops, id: id, ctx: ctx, cancel: cancel, donec: donec}
// keep the lease alive until client error or cancelled context
go func() {
- defer close(donec)
+ defer func() {
+ close(donec)
+ cancel()
+ }()
for range keepAlive {
// eat messages until keep alive channel closes
}
@@ -79,6 +86,12 @@ func (s *Session) Client() *v3.Client {
// Lease is the lease ID for keys bound to the session.
func (s *Session) Lease() v3.LeaseID { return s.id }
+// Ctx is the context attached to the session, it is canceled when the lease is orphaned, expires, or
+// is otherwise no longer being refreshed.
+func (s *Session) Ctx() context.Context {
+ return s.ctx
+}
+
// Done returns a channel that closes when the lease is orphaned, expires, or
// is otherwise no longer being refreshed.
func (s *Session) Done() <-chan struct{} { return s.donec }
@@ -108,14 +121,16 @@ type sessionOptions struct {
}
// SessionOption configures Session.
-type SessionOption func(*sessionOptions)
+type SessionOption func(*sessionOptions, *zap.Logger)
// WithTTL configures the session's TTL in seconds.
// If TTL is <= 0, the default 60 seconds TTL will be used.
func WithTTL(ttl int) SessionOption {
- return func(so *sessionOptions) {
+ return func(so *sessionOptions, lg *zap.Logger) {
if ttl > 0 {
so.ttl = ttl
+ } else {
+ lg.Warn("WithTTL(): TTL should be > 0, preserving current TTL", zap.Int64("current-session-ttl", int64(so.ttl)))
}
}
}
@@ -124,7 +139,7 @@ func WithTTL(ttl int) SessionOption {
// This is useful in process restart scenario, for example, to reclaim
// leadership from an election prior to restart.
func WithLease(leaseID v3.LeaseID) SessionOption {
- return func(so *sessionOptions) {
+ return func(so *sessionOptions, _ *zap.Logger) {
so.leaseID = leaseID
}
}
@@ -135,7 +150,7 @@ func WithLease(leaseID v3.LeaseID) SessionOption {
// context is canceled before Close() completes, the session's lease will be
// abandoned and left to expire instead of being revoked.
func WithContext(ctx context.Context) SessionOption {
- return func(so *sessionOptions) {
+ return func(so *sessionOptions, _ *zap.Logger) {
so.ctx = ctx
}
}
diff --git a/client/v3/concurrency/stm.go b/client/v3/concurrency/stm.go
index ba7303d0977..49a82181db7 100644
--- a/client/v3/concurrency/stm.go
+++ b/client/v3/concurrency/stm.go
@@ -303,6 +303,10 @@ type stmSerializable struct {
}
func (s *stmSerializable) Get(keys ...string) string {
+ if len(keys) == 0 {
+ return ""
+ }
+
if wv := s.wset.get(keys...); wv != nil {
return wv.val
}
diff --git a/client/v3/concurrency/stm_test.go b/client/v3/concurrency/stm_test.go
new file mode 100644
index 00000000000..90a9c07f36e
--- /dev/null
+++ b/client/v3/concurrency/stm_test.go
@@ -0,0 +1,52 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGet(t *testing.T) {
+ tests := []struct {
+ name string
+ stm *stmSerializable
+ in []string
+ resp string
+ }{
+ {
+ name: "Empty keys returns empty string",
+ stm: &stmSerializable{},
+ in: []string{},
+ resp: "",
+ },
+ {
+ name: "Nil keys returns empty string",
+ stm: &stmSerializable{},
+ in: nil,
+ resp: "",
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ resp := test.stm.Get(test.in...)
+
+ assert.Equal(t, test.resp, resp)
+ })
+ }
+}
diff --git a/client/v3/config.go b/client/v3/config.go
index 335a288732b..8351828d2f9 100644
--- a/client/v3/config.go
+++ b/client/v3/config.go
@@ -21,6 +21,8 @@ import (
"go.uber.org/zap"
"google.golang.org/grpc"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
)
type Config struct {
@@ -52,7 +54,7 @@ type Config struct {
// If 0, it defaults to "math.MaxInt32", because range response can
// easily exceed request send limits.
// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
+ // ("--max-recv-bytes" flag to etcd).
MaxCallRecvMsgSize int
// TLS holds the client secure credentials, if any.
@@ -88,5 +90,139 @@ type Config struct {
// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
PermitWithoutStream bool `json:"permit-without-stream"`
+ // MaxUnaryRetries is the maximum number of retries for unary RPCs.
+ MaxUnaryRetries uint `json:"max-unary-retries"`
+
+ // BackoffWaitBetween is the wait time before retrying an RPC.
+ BackoffWaitBetween time.Duration `json:"backoff-wait-between"`
+
+ // BackoffJitterFraction is the jitter fraction to randomize backoff wait time.
+ BackoffJitterFraction float64 `json:"backoff-jitter-fraction"`
+
// TODO: support custom balancer picker
}
+
+// ConfigSpec is the configuration from users, which comes from command-line flags,
+// environment variables or config file. It is a fully declarative configuration,
+// and can be serialized & deserialized to/from JSON.
+type ConfigSpec struct {
+ Endpoints []string `json:"endpoints"`
+ RequestTimeout time.Duration `json:"request-timeout"`
+ DialTimeout time.Duration `json:"dial-timeout"`
+ KeepAliveTime time.Duration `json:"keepalive-time"`
+ KeepAliveTimeout time.Duration `json:"keepalive-timeout"`
+ MaxCallSendMsgSize int `json:"max-request-bytes"`
+ MaxCallRecvMsgSize int `json:"max-recv-bytes"`
+ Secure *SecureConfig `json:"secure"`
+ Auth *AuthConfig `json:"auth"`
+}
+
+type SecureConfig struct {
+ Cert string `json:"cert"`
+ Key string `json:"key"`
+ Cacert string `json:"cacert"`
+ ServerName string `json:"server-name"`
+
+ InsecureTransport bool `json:"insecure-transport"`
+ InsecureSkipVerify bool `json:"insecure-skip-tls-verify"`
+}
+
+type AuthConfig struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+}
+
+func (cs *ConfigSpec) Clone() *ConfigSpec {
+ if cs == nil {
+ return nil
+ }
+
+ clone := *cs
+
+ if len(cs.Endpoints) > 0 {
+ clone.Endpoints = make([]string, len(cs.Endpoints))
+ copy(clone.Endpoints, cs.Endpoints)
+ }
+
+ if cs.Secure != nil {
+ clone.Secure = &SecureConfig{}
+ *clone.Secure = *cs.Secure
+ }
+ if cs.Auth != nil {
+ clone.Auth = &AuthConfig{}
+ *clone.Auth = *cs.Auth
+ }
+
+ return &clone
+}
+
+func (cfg AuthConfig) Empty() bool {
+ return cfg.Username == "" && cfg.Password == ""
+}
+
+// NewClientConfig creates a Config based on the provided ConfigSpec.
+func NewClientConfig(confSpec *ConfigSpec, lg *zap.Logger) (*Config, error) {
+ tlsCfg, err := newTLSConfig(confSpec.Secure, lg)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg := &Config{
+ Endpoints: confSpec.Endpoints,
+ DialTimeout: confSpec.DialTimeout,
+ DialKeepAliveTime: confSpec.KeepAliveTime,
+ DialKeepAliveTimeout: confSpec.KeepAliveTimeout,
+ MaxCallSendMsgSize: confSpec.MaxCallSendMsgSize,
+ MaxCallRecvMsgSize: confSpec.MaxCallRecvMsgSize,
+ TLS: tlsCfg,
+ }
+
+ if confSpec.Auth != nil {
+ cfg.Username = confSpec.Auth.Username
+ cfg.Password = confSpec.Auth.Password
+ }
+
+ return cfg, nil
+}
+
+func newTLSConfig(scfg *SecureConfig, lg *zap.Logger) (*tls.Config, error) {
+ var (
+ tlsCfg *tls.Config
+ err error
+ )
+
+ if scfg == nil {
+ return nil, nil
+ }
+
+ if scfg.Cert != "" || scfg.Key != "" || scfg.Cacert != "" || scfg.ServerName != "" {
+ cfgtls := &transport.TLSInfo{
+ CertFile: scfg.Cert,
+ KeyFile: scfg.Key,
+ TrustedCAFile: scfg.Cacert,
+ ServerName: scfg.ServerName,
+ Logger: lg,
+ }
+ if tlsCfg, err = cfgtls.ClientConfig(); err != nil {
+ return nil, err
+ }
+ }
+
+ // If key/cert is not given but user wants secure connection, we
+ // should still setup an empty tls configuration for gRPC to setup
+ // secure connection.
+ if tlsCfg == nil && !scfg.InsecureTransport {
+ tlsCfg = &tls.Config{}
+ }
+
+ // If the user wants to skip TLS verification then we should set
+ // the InsecureSkipVerify flag in tls configuration.
+ if scfg.InsecureSkipVerify {
+ if tlsCfg == nil {
+ tlsCfg = &tls.Config{}
+ }
+ tlsCfg.InsecureSkipVerify = scfg.InsecureSkipVerify
+ }
+
+ return tlsCfg, nil
+}
diff --git a/client/v3/config_test.go b/client/v3/config_test.go
new file mode 100644
index 00000000000..1fe2fb2d391
--- /dev/null
+++ b/client/v3/config_test.go
@@ -0,0 +1,302 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+)
+
+func TestNewClientConfig(t *testing.T) {
+ cases := []struct {
+ name string
+ spec ConfigSpec
+ expectedConf Config
+ }{
+ {
+ name: "only has basic info",
+ spec: ConfigSpec{
+ Endpoints: []string{"http://192.168.0.10:2379"},
+ DialTimeout: 2 * time.Second,
+ KeepAliveTime: 3 * time.Second,
+ KeepAliveTimeout: 5 * time.Second,
+ },
+ expectedConf: Config{
+ Endpoints: []string{"http://192.168.0.10:2379"},
+ DialTimeout: 2 * time.Second,
+ DialKeepAliveTime: 3 * time.Second,
+ DialKeepAliveTimeout: 5 * time.Second,
+ },
+ },
+ {
+ name: "auth enabled",
+ spec: ConfigSpec{
+ Endpoints: []string{"http://192.168.0.12:2379"},
+ DialTimeout: 1 * time.Second,
+ KeepAliveTime: 4 * time.Second,
+ KeepAliveTimeout: 6 * time.Second,
+ Auth: &AuthConfig{
+ Username: "test",
+ Password: "changeme",
+ },
+ },
+ expectedConf: Config{
+ Endpoints: []string{"http://192.168.0.12:2379"},
+ DialTimeout: 1 * time.Second,
+ DialKeepAliveTime: 4 * time.Second,
+ DialKeepAliveTimeout: 6 * time.Second,
+ Username: "test",
+ Password: "changeme",
+ },
+ },
+ {
+ name: "default secure transport",
+ spec: ConfigSpec{
+ Endpoints: []string{"http://192.168.0.10:2379"},
+ DialTimeout: 2 * time.Second,
+ KeepAliveTime: 3 * time.Second,
+ KeepAliveTimeout: 5 * time.Second,
+ Secure: &SecureConfig{
+ InsecureTransport: false,
+ },
+ },
+ expectedConf: Config{
+ Endpoints: []string{"http://192.168.0.10:2379"},
+ DialTimeout: 2 * time.Second,
+ DialKeepAliveTime: 3 * time.Second,
+ DialKeepAliveTimeout: 5 * time.Second,
+ TLS: &tls.Config{},
+ },
+ },
+ {
+ name: "default secure transport and skip TLS verification",
+ spec: ConfigSpec{
+ Endpoints: []string{"http://192.168.0.13:2379"},
+ DialTimeout: 1 * time.Second,
+ KeepAliveTime: 3 * time.Second,
+ KeepAliveTimeout: 5 * time.Second,
+ Secure: &SecureConfig{
+ InsecureTransport: false,
+ InsecureSkipVerify: true,
+ },
+ },
+ expectedConf: Config{
+ Endpoints: []string{"http://192.168.0.13:2379"},
+ DialTimeout: 1 * time.Second,
+ DialKeepAliveTime: 3 * time.Second,
+ DialKeepAliveTimeout: 5 * time.Second,
+ TLS: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ },
+ },
+ {
+ name: "insecure transport and skip TLS verification",
+ spec: ConfigSpec{
+ Endpoints: []string{"http://192.168.0.13:2379"},
+ DialTimeout: 1 * time.Second,
+ KeepAliveTime: 3 * time.Second,
+ KeepAliveTimeout: 5 * time.Second,
+ Secure: &SecureConfig{
+ InsecureTransport: true,
+ InsecureSkipVerify: true,
+ },
+ },
+ expectedConf: Config{
+ Endpoints: []string{"http://192.168.0.13:2379"},
+ DialTimeout: 1 * time.Second,
+ DialKeepAliveTime: 3 * time.Second,
+ DialKeepAliveTimeout: 5 * time.Second,
+ TLS: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+
+ cfg, err := NewClientConfig(&tc.spec, lg)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expectedConf, *cfg)
+ })
+ }
+}
+
+func TestNewClientConfigWithSecureCfg(t *testing.T) {
+ tls, err := transport.SelfCert(zap.NewNop(), t.TempDir(), []string{"localhost"}, 1)
+ require.NoError(t, err)
+
+ scfg := &SecureConfig{
+ Cert: tls.CertFile,
+ Key: tls.KeyFile,
+ Cacert: tls.TrustedCAFile,
+ }
+
+ cfg, err := NewClientConfig(&ConfigSpec{
+ Endpoints: []string{"http://192.168.0.13:2379"},
+ DialTimeout: 2 * time.Second,
+ KeepAliveTime: 3 * time.Second,
+ KeepAliveTimeout: 5 * time.Second,
+ Secure: scfg,
+ }, nil)
+ require.NoErrorf(t, err, "Unexpected result client config")
+ if cfg == nil || cfg.TLS == nil {
+ t.Fatalf("Unexpected result client config: %v", err)
+ }
+}
+
+func TestConfigSpecClone(t *testing.T) {
+ cfgSpec := &ConfigSpec{
+ Endpoints: []string{"ep1", "ep2", "ep3"},
+ RequestTimeout: 10 * time.Second,
+ DialTimeout: 2 * time.Second,
+ KeepAliveTime: 5 * time.Second,
+ KeepAliveTimeout: 2 * time.Second,
+
+ Secure: &SecureConfig{
+ Cert: "path/2/cert",
+ Key: "path/2/key",
+ Cacert: "path/2/cacert",
+ InsecureTransport: true,
+ InsecureSkipVerify: false,
+ },
+
+ Auth: &AuthConfig{
+ Username: "foo",
+ Password: "changeme",
+ },
+ }
+
+ testCases := []struct {
+ name string
+ cs *ConfigSpec
+ newEp []string
+ newSecure *SecureConfig
+ newAuth *AuthConfig
+ expectedEqual bool
+ }{
+ {
+ name: "normal case",
+ cs: cfgSpec,
+ expectedEqual: true,
+ },
+ {
+ name: "point to a new slice of endpoint, but with the same data",
+ cs: cfgSpec,
+ newEp: []string{"ep1", "ep2", "ep3"},
+ expectedEqual: true,
+ },
+ {
+ name: "update endpoint",
+ cs: cfgSpec,
+ newEp: []string{"ep1", "newep2", "ep3"},
+ expectedEqual: false,
+ },
+ {
+ name: "point to a new secureConfig, but with the same data",
+ cs: cfgSpec,
+ newSecure: &SecureConfig{
+ Cert: "path/2/cert",
+ Key: "path/2/key",
+ Cacert: "path/2/cacert",
+ InsecureTransport: true,
+ InsecureSkipVerify: false,
+ },
+ expectedEqual: true,
+ },
+ {
+ name: "update key in secureConfig",
+ cs: cfgSpec,
+ newSecure: &SecureConfig{
+ Cert: "path/2/cert",
+ Key: "newPath/2/key",
+ Cacert: "path/2/cacert",
+ InsecureTransport: true,
+ InsecureSkipVerify: false,
+ },
+ expectedEqual: false,
+ },
+ {
+ name: "update bool values in secureConfig",
+ cs: cfgSpec,
+ newSecure: &SecureConfig{
+ Cert: "path/2/cert",
+ Key: "path/2/key",
+ Cacert: "path/2/cacert",
+ InsecureTransport: false,
+ InsecureSkipVerify: true,
+ },
+ expectedEqual: false,
+ },
+ {
+ name: "point to a new authConfig, but with the same data",
+ cs: cfgSpec,
+ newAuth: &AuthConfig{
+ Username: "foo",
+ Password: "changeme",
+ },
+ expectedEqual: true,
+ },
+ {
+ name: "update authConfig",
+ cs: cfgSpec,
+ newAuth: &AuthConfig{
+ Username: "newUser",
+ Password: "newPassword",
+ },
+ expectedEqual: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ dataBeforeTest, err := json.Marshal(tc.cs)
+ require.NoError(t, err)
+
+ clonedCfgSpec := tc.cs.Clone()
+ if len(tc.newEp) > 0 {
+ clonedCfgSpec.Endpoints = tc.newEp
+ }
+ if tc.newSecure != nil {
+ clonedCfgSpec.Secure = tc.newSecure
+ }
+ if tc.newAuth != nil {
+ clonedCfgSpec.Auth = tc.newAuth
+ }
+
+ actualEqual := reflect.DeepEqual(tc.cs, clonedCfgSpec)
+ require.Equal(t, tc.expectedEqual, actualEqual)
+
+ // double-check the original ConfigSpec isn't updated
+ dataAfterTest, err := json.Marshal(tc.cs)
+ require.NoError(t, err)
+ require.True(t, reflect.DeepEqual(dataBeforeTest, dataAfterTest))
+ })
+ }
+}
diff --git a/client/v3/credentials/credentials.go b/client/v3/credentials/credentials.go
index 42f688eb359..a2d8b45e86a 100644
--- a/client/v3/credentials/credentials.go
+++ b/client/v3/credentials/credentials.go
@@ -19,92 +19,51 @@ package credentials
import (
"context"
"crypto/tls"
- "net"
"sync"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
grpccredentials "google.golang.org/grpc/credentials"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
-// Config defines gRPC credential configuration.
-type Config struct {
- TLSConfig *tls.Config
+func NewTransportCredential(cfg *tls.Config) grpccredentials.TransportCredentials {
+ return grpccredentials.NewTLS(cfg)
}
-// Bundle defines gRPC credential interface.
-type Bundle interface {
- grpccredentials.Bundle
+// PerRPCCredentialsBundle defines gRPC credential interface.
+type PerRPCCredentialsBundle interface {
UpdateAuthToken(token string)
+ PerRPCCredentials() grpccredentials.PerRPCCredentials
}
-// NewBundle constructs a new gRPC credential bundle.
-func NewBundle(cfg Config) Bundle {
- return &bundle{
- tc: newTransportCredential(cfg.TLSConfig),
- rc: newPerRPCCredential(),
+func NewPerRPCCredentialBundle() PerRPCCredentialsBundle {
+ return &perRPCCredentialBundle{
+ rc: &perRPCCredential{},
}
}
-// bundle implements "grpccredentials.Bundle" interface.
-type bundle struct {
- tc *transportCredential
+// perRPCCredentialBundle implements `PerRPCCredentialsBundle` interface.
+type perRPCCredentialBundle struct {
rc *perRPCCredential
}
-func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
- return b.tc
-}
-
-func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
- return b.rc
-}
-
-func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
- // no-op
- return nil, nil
-}
-
-// transportCredential implements "grpccredentials.TransportCredentials" interface.
-type transportCredential struct {
- gtc grpccredentials.TransportCredentials
-}
-
-func newTransportCredential(cfg *tls.Config) *transportCredential {
- return &transportCredential{
- gtc: grpccredentials.NewTLS(cfg),
- }
-}
-
-func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
- return tc.gtc.ClientHandshake(ctx, authority, rawConn)
-}
-
-func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
- return tc.gtc.ServerHandshake(rawConn)
-}
-
-func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
- return tc.gtc.Info()
-}
-
-func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
- return &transportCredential{
- gtc: tc.gtc.Clone(),
+func (b *perRPCCredentialBundle) UpdateAuthToken(token string) {
+ if b.rc == nil {
+ return
}
+ b.rc.UpdateAuthToken(token)
}
-func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
- return tc.gtc.OverrideServerName(serverNameOverride)
+func (b *perRPCCredentialBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+ return b.rc
}
-// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
+// perRPCCredential implements `grpccredentials.PerRPCCredentials` interface.
type perRPCCredential struct {
authToken string
authTokenMu sync.RWMutex
}
-func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
-
func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
@@ -117,13 +76,6 @@ func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string)
return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
}
-func (b *bundle) UpdateAuthToken(token string) {
- if b.rc == nil {
- return
- }
- b.rc.UpdateAuthToken(token)
-}
-
func (rc *perRPCCredential) UpdateAuthToken(token string) {
rc.authTokenMu.Lock()
rc.authToken = token
diff --git a/client/v3/credentials/credentials_test.go b/client/v3/credentials/credentials_test.go
new file mode 100644
index 00000000000..0db241e3c41
--- /dev/null
+++ b/client/v3/credentials/credentials_test.go
@@ -0,0 +1,37 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+func TestUpdateAuthToken(t *testing.T) {
+ bundle := NewPerRPCCredentialBundle()
+ ctx := context.TODO()
+
+ metadataBeforeUpdate, _ := bundle.PerRPCCredentials().GetRequestMetadata(ctx)
+ assert.Empty(t, metadataBeforeUpdate)
+
+ bundle.UpdateAuthToken("abcdefg")
+
+ metadataAfterUpdate, _ := bundle.PerRPCCredentials().GetRequestMetadata(ctx)
+ assert.Equal(t, "abcdefg", metadataAfterUpdate[rpctypes.TokenFieldNameGRPC])
+}
diff --git a/client/v3/ctx.go b/client/v3/ctx.go
index 56b69cf2ede..38cee6c27e4 100644
--- a/client/v3/ctx.go
+++ b/client/v3/ctx.go
@@ -17,9 +17,10 @@ package clientv3
import (
"context"
+ "google.golang.org/grpc/metadata"
+
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
- "google.golang.org/grpc/metadata"
)
// WithRequireLeader requires client requests to only succeed
diff --git a/client/v3/ctx_test.go b/client/v3/ctx_test.go
index 8ba616b76a4..2df734e4ea2 100644
--- a/client/v3/ctx_test.go
+++ b/client/v3/ctx_test.go
@@ -19,17 +19,17 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/metadata"
+
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
- "google.golang.org/grpc/metadata"
)
func TestMetadataWithRequireLeader(t *testing.T) {
ctx := context.TODO()
_, ok := metadata.FromOutgoingContext(ctx)
- if ok {
- t.Fatal("expected no outgoing metadata ctx key")
- }
+ require.Falsef(t, ok, "expected no outgoing metadata ctx key")
// add a conflicting key with some other value
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, "invalid")
@@ -40,28 +40,20 @@ func TestMetadataWithRequireLeader(t *testing.T) {
// expect overwrites but still keep other keys
ctx = WithRequireLeader(ctx)
md, ok = metadata.FromOutgoingContext(ctx)
- if !ok {
- t.Fatal("expected outgoing metadata ctx key")
- }
- if ss := md.Get(rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) {
- t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
- }
- if ss := md.Get("hello"); !reflect.DeepEqual(ss, []string{"1", "2"}) {
- t.Fatalf("unexpected metadata for 'hello' %v", ss)
- }
+ require.Truef(t, ok, "expected outgoing metadata ctx key")
+ ss := md.Get(rpctypes.MetadataRequireLeaderKey)
+ require.Truef(t, reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}), "unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
+ ss = md.Get("hello")
+ require.Truef(t, reflect.DeepEqual(ss, []string{"1", "2"}), "unexpected metadata for 'hello' %v", ss)
}
func TestMetadataWithClientAPIVersion(t *testing.T) {
ctx := withVersion(WithRequireLeader(context.TODO()))
md, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- t.Fatal("expected outgoing metadata ctx key")
- }
- if ss := md.Get(rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) {
- t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
- }
- if ss := md.Get(rpctypes.MetadataClientAPIVersionKey); !reflect.DeepEqual(ss, []string{version.APIVersion}) {
- t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataClientAPIVersionKey, ss)
- }
+ require.Truef(t, ok, "expected outgoing metadata ctx key")
+ ss := md.Get(rpctypes.MetadataRequireLeaderKey)
+ require.Truef(t, reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}), "unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
+ ss = md.Get(rpctypes.MetadataClientAPIVersionKey)
+ require.Truef(t, reflect.DeepEqual(ss, []string{version.APIVersion}), "unexpected metadata for %q %v", rpctypes.MetadataClientAPIVersionKey, ss)
}
diff --git a/client/v3/doc.go b/client/v3/doc.go
index 645d744a5a7..bd820d3d79e 100644
--- a/client/v3/doc.go
+++ b/client/v3/doc.go
@@ -47,8 +47,8 @@
// To specify a client request timeout, wrap the context with context.WithTimeout:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
+// defer cancel()
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
-// cancel()
// if err != nil {
// // handle error!
// }
@@ -61,7 +61,7 @@
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
+// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
//
// Here is the example code to handle client errors:
//
@@ -102,5 +102,4 @@
// The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1".
-//
package clientv3
diff --git a/client/v3/experimental/recipes/barrier.go b/client/v3/experimental/recipes/barrier.go
index 7e950a3e385..daf4abefb3a 100644
--- a/client/v3/experimental/recipes/barrier.go
+++ b/client/v3/experimental/recipes/barrier.go
@@ -49,7 +49,7 @@ func (b *Barrier) Release() error {
// Wait blocks on the barrier key until it is deleted. If there is no key, Wait
// assumes Release has already been called and returns immediately.
func (b *Barrier) Wait() error {
- resp, err := b.client.Get(b.ctx, b.key, v3.WithFirstKey()...)
+ resp, err := b.client.Get(b.ctx, b.key)
if err != nil {
return err
}
@@ -60,7 +60,7 @@ func (b *Barrier) Wait() error {
_, err = WaitEvents(
b.client,
b.key,
- resp.Header.Revision,
- []mvccpb.Event_EventType{mvccpb.PUT, mvccpb.DELETE})
+ resp.Header.Revision+1,
+ []mvccpb.Event_EventType{mvccpb.DELETE})
return err
}
diff --git a/client/v3/experimental/recipes/double_barrier.go b/client/v3/experimental/recipes/double_barrier.go
index eac5d4f7fdb..e0d00247f1c 100644
--- a/client/v3/experimental/recipes/double_barrier.go
+++ b/client/v3/experimental/recipes/double_barrier.go
@@ -18,7 +18,7 @@ import (
"context"
"go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
)
@@ -45,25 +45,47 @@ func NewDoubleBarrier(s *concurrency.Session, key string, count int) *DoubleBarr
// Enter waits for "count" processes to enter the barrier then returns
func (b *DoubleBarrier) Enter() error {
client := b.s.Client()
+
+ // Check the entered clients before creating the UniqueEphemeralKey,
+ // fail the request if there are already too many clients.
+ if resp1, err := b.enteredClients(client); err != nil {
+ return err
+ } else if len(resp1.Kvs) >= b.count {
+ return ErrTooManyClients
+ }
+
ek, err := newUniqueEphemeralKey(b.s, b.key+"/waiters")
if err != nil {
return err
}
b.myKey = ek
- resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix())
+ // Check the entered clients after creating the UniqueEphemeralKey
+ resp2, err := b.enteredClients(client)
if err != nil {
return err
}
+ if len(resp2.Kvs) >= b.count {
+ lastWaiter := resp2.Kvs[b.count-1]
+ if ek.rev > lastWaiter.CreateRevision {
+ // delete itself now, otherwise other processes may need to wait
+ // until these keys are automatically deleted when the related
+ // lease expires.
+ //nolint:staticcheck // SA9003 disable empty branch checker to keep the comment for why we ignore error
+ if err = b.myKey.Delete(); err != nil {
+ // Nothing to do here. We have to wait for the key to be
+ // deleted when the lease expires.
+ }
+ return ErrTooManyClients
+ }
- if len(resp.Kvs) > b.count {
- return ErrTooManyClients
- }
-
- if len(resp.Kvs) == b.count {
- // unblock waiters
- _, err = client.Put(b.ctx, b.key+"/ready", "")
- return err
+ if ek.rev == lastWaiter.CreateRevision {
+ // TODO(ahrtr): we might need to compare ek.key and
+ // string(lastWaiter.Key), they should be equal.
+ // unblock all other waiters
+ _, err = client.Put(b.ctx, b.key+"/ready", "")
+ return err
+ }
}
_, err = WaitEvents(
@@ -74,6 +96,18 @@ func (b *DoubleBarrier) Enter() error {
return err
}
+// enteredClients gets all the entered clients, which are ordered by the
+// createRevision in ascending order.
+func (b *DoubleBarrier) enteredClients(cli *clientv3.Client) (*clientv3.GetResponse, error) {
+ resp, err := cli.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix(),
+ clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend))
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
// Leave waits for "count" processes to leave the barrier then returns
func (b *DoubleBarrier) Leave() error {
client := b.s.Client()
@@ -96,7 +130,7 @@ func (b *DoubleBarrier) Leave() error {
}
isLowest := string(lowest.Key) == b.myKey.Key()
- if len(resp.Kvs) == 1 {
+ if len(resp.Kvs) == 1 && isLowest {
// this is the only node in the barrier; finish up
if _, err = client.Delete(b.ctx, b.key+"/ready"); err != nil {
return err
diff --git a/client/v3/experimental/recipes/key.go b/client/v3/experimental/recipes/key.go
index 10362c18fbe..ce90010538b 100644
--- a/client/v3/experimental/recipes/key.go
+++ b/client/v3/experimental/recipes/key.go
@@ -16,6 +16,7 @@ package recipe
import (
"context"
+ "errors"
"fmt"
"strings"
"time"
@@ -51,7 +52,7 @@ func newUniqueKV(kv v3.KV, prefix string, val string) (*RemoteKV, error) {
if err == nil {
return &RemoteKV{kv, newKey, rev, val}, nil
}
- if err != ErrKeyExists {
+ if !errors.Is(err, ErrKeyExists) {
return nil, err
}
}
@@ -155,7 +156,7 @@ func newUniqueEphemeralKV(s *concurrency.Session, prefix, val string) (ek *Ephem
for {
newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano())
ek, err = newEphemeralKV(s, newKey, val)
- if err == nil || err != ErrKeyExists {
+ if err == nil || !errors.Is(err, ErrKeyExists) {
break
}
}
diff --git a/client/v3/experimental/recipes/rwmutex.go b/client/v3/experimental/recipes/rwmutex.go
index 9f520baf48b..d961ead3f04 100644
--- a/client/v3/experimental/recipes/rwmutex.go
+++ b/client/v3/experimental/recipes/rwmutex.go
@@ -63,7 +63,7 @@ func (rwm *RWMutex) Lock() error {
}
}
-// waitOnLowest will wait on the last key with a revision < rwm.myKey.Revision with a
+// waitOnLastRev will wait on the last key with a revision < rwm.myKey.Revision with a
// given prefix. If there are no keys left to wait on, return true.
func (rwm *RWMutex) waitOnLastRev(pfx string) (bool, error) {
client := rwm.s.Client()
diff --git a/client/v3/experimental/recipes/watch.go b/client/v3/experimental/recipes/watch.go
index fd4a8717a96..92e7bc648f4 100644
--- a/client/v3/experimental/recipes/watch.go
+++ b/client/v3/experimental/recipes/watch.go
@@ -18,7 +18,7 @@ import (
"context"
"go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
// WaitEvents waits on a key until it observes the given events and returns the final one.
diff --git a/client/v3/go.mod b/client/v3/go.mod
index bf703a27975..f1781d92370 100644
--- a/client/v3/go.mod
+++ b/client/v3/go.mod
@@ -1,16 +1,46 @@
module go.etcd.io/etcd/client/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
require (
- github.com/dustin/go-humanize v1.0.0
+ github.com/coreos/go-semver v0.3.1
+ github.com/dustin/go-humanize v1.0.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/prometheus/client_golang v1.5.1
- go.etcd.io/etcd/api/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
- google.golang.org/grpc v1.37.0
- sigs.k8s.io/yaml v1.2.0
+ github.com/prometheus/client_golang v1.20.5
+ github.com/stretchr/testify v1.10.0
+ go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ go.uber.org/zap v1.27.0
+ google.golang.org/grpc v1.69.2
+ sigs.k8s.io/yaml v1.4.0
+)
+
+require (
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.61.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ go.opentelemetry.io/otel/metric v1.33.0 // indirect
+ go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/protobuf v1.36.1 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace (
diff --git a/client/v3/go.sum b/client/v3/go.sum
index 6ac5bd1bd3c..8ce688b25a2 100644
--- a/client/v3/go.sum
+++ b/client/v3/go.sum
@@ -1,227 +1,122 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/client/v3/internal/endpoint/endpoint.go b/client/v3/internal/endpoint/endpoint.go
index 1d3f1a7a2c7..2c45b5e3067 100644
--- a/client/v3/internal/endpoint/endpoint.go
+++ b/client/v3/internal/endpoint/endpoint.go
@@ -25,12 +25,12 @@ import (
type CredsRequirement int
const (
- // CREDS_REQUIRE - Credentials/certificate required for thi type of connection.
- CREDS_REQUIRE CredsRequirement = iota
- // CREDS_DROP - Credentials/certificate not needed and should get ignored.
- CREDS_DROP
- // CREDS_OPTIONAL - Credentials/certificate might be used if supplied
- CREDS_OPTIONAL
+ // CredsRequire - Credentials/certificate required for thi type of connection.
+ CredsRequire CredsRequirement = iota
+ // CredsDrop - Credentials/certificate not needed and should get ignored.
+ CredsDrop
+ // CredsOptional - Credentials/certificate might be used if supplied
+ CredsOptional
)
func extractHostFromHostPort(ep string) string {
@@ -41,12 +41,8 @@ func extractHostFromHostPort(ep string) string {
return host
}
-func extractHostFromPath(pathStr string) string {
- return extractHostFromHostPort(path.Base(pathStr))
-}
-
-//mustSplit2 returns the values from strings.SplitN(s, sep, 2).
-//If sep is not found, it returns ("", "", false) instead.
+// mustSplit2 returns the values from strings.SplitN(s, sep, 2).
+// If sep is not found, it returns ("", "", false) instead.
func mustSplit2(s, sep string) (string, string) {
spl := strings.SplitN(s, sep, 2)
if len(spl) < 2 {
@@ -58,20 +54,20 @@ func mustSplit2(s, sep string) (string, string) {
func schemeToCredsRequirement(schema string) CredsRequirement {
switch schema {
case "https", "unixs":
- return CREDS_REQUIRE
+ return CredsRequire
case "http":
- return CREDS_DROP
+ return CredsDrop
case "unix":
// Preserving previous behavior from:
// https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212
// that likely was a bug due to missing 'fallthrough'.
// At the same time it seems legit to let the users decide whether they
// want credential control or not (and 'unixs' schema is not a standard thing).
- return CREDS_OPTIONAL
+ return CredsOptional
case "":
- return CREDS_OPTIONAL
+ return CredsOptional
default:
- return CREDS_OPTIONAL
+ return CredsOptional
}
}
@@ -81,11 +77,12 @@ func schemeToCredsRequirement(schema string) CredsRequirement {
// The main differences:
// - etcd supports unixs & https names as opposed to unix & http to
// distinguish need to configure certificates.
-// - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
-// - etcd supports unix(s)://local-file naming schema
+// - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
+// - etcd supports unix(s)://local-file naming schema
// (as opposed to unix:local-file canonical name used by grpc for current dir files).
-// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
-// is considered serverName - to allow local testing of cert-protected communication.
+// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
+// is considered serverName - to allow local testing of cert-protected communication.
+//
// See more:
// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47
// - https://golang.org/pkg/net/#Dial
@@ -95,29 +92,29 @@ func translateEndpoint(ep string) (addr string, serverName string, requireCreds
if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") {
// absolute path case
schema, absolutePath := mustSplit2(ep, "://")
- return "unix://" + absolutePath, extractHostFromPath(absolutePath), schemeToCredsRequirement(schema)
+ return "unix://" + absolutePath, path.Base(absolutePath), schemeToCredsRequirement(schema)
}
if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") {
// legacy etcd local path
schema, localPath := mustSplit2(ep, "://")
- return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema)
+ return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema)
}
schema, localPath := mustSplit2(ep, ":")
- return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema)
+ return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema)
}
if strings.Contains(ep, "://") {
url, err := url.Parse(ep)
if err != nil {
- return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL
+ return ep, ep, CredsOptional
}
if url.Scheme == "http" || url.Scheme == "https" {
- return url.Host, url.Hostname(), schemeToCredsRequirement(url.Scheme)
+ return url.Host, url.Host, schemeToCredsRequirement(url.Scheme)
}
- return ep, url.Hostname(), schemeToCredsRequirement(url.Scheme)
+ return ep, url.Host, schemeToCredsRequirement(url.Scheme)
}
// Handles plain addresses like 10.0.0.44:437.
- return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL
+ return ep, ep, CredsOptional
}
// RequiresCredentials returns whether given endpoint requires
diff --git a/client/v3/internal/endpoint/endpoint_test.go b/client/v3/internal/endpoint/endpoint_test.go
index bc6cd71399c..95e8c73954c 100644
--- a/client/v3/internal/endpoint/endpoint_test.go
+++ b/client/v3/internal/endpoint/endpoint_test.go
@@ -25,38 +25,38 @@ func Test_interpret(t *testing.T) {
wantServerName string
wantRequiresCreds CredsRequirement
}{
- {"127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_OPTIONAL},
- {"localhost", "localhost", "localhost", CREDS_OPTIONAL},
- {"localhost:8080", "localhost:8080", "localhost", CREDS_OPTIONAL},
+ {"127.0.0.1", "127.0.0.1", "127.0.0.1", CredsOptional},
+ {"localhost", "localhost", "localhost", CredsOptional},
+ {"localhost:8080", "localhost:8080", "localhost:8080", CredsOptional},
- {"unix:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_OPTIONAL},
- {"unix:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_OPTIONAL},
+ {"unix:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CredsOptional},
+ {"unix:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1:8080", CredsOptional},
- {"unix://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_OPTIONAL},
- {"unix://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_OPTIONAL},
+ {"unix://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CredsOptional},
+ {"unix://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1:8080", CredsOptional},
- {"unixs:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_REQUIRE},
- {"unixs:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE},
- {"unixs://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_REQUIRE},
- {"unixs://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE},
+ {"unixs:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CredsRequire},
+ {"unixs:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1:8080", CredsRequire},
+ {"unixs://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CredsRequire},
+ {"unixs://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1:8080", CredsRequire},
- {"http://127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_DROP},
- {"http://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1", CREDS_DROP},
- {"https://127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_REQUIRE},
- {"https://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE},
- {"https://localhost:20000", "localhost:20000", "localhost", CREDS_REQUIRE},
+ {"http://127.0.0.1", "127.0.0.1", "127.0.0.1", CredsDrop},
+ {"http://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1:8080", CredsDrop},
+ {"https://127.0.0.1", "127.0.0.1", "127.0.0.1", CredsRequire},
+ {"https://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1:8080", CredsRequire},
+ {"https://localhost:20000", "localhost:20000", "localhost:20000", CredsRequire},
- {"unix:///tmp/abc", "unix:///tmp/abc", "abc", CREDS_OPTIONAL},
- {"unixs:///tmp/abc", "unix:///tmp/abc", "abc", CREDS_REQUIRE},
- {"unix:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc", CREDS_OPTIONAL},
- {"unixs:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc", CREDS_REQUIRE},
- {"etcd.io", "etcd.io", "etcd.io", CREDS_OPTIONAL},
- {"http://etcd.io/abc", "etcd.io", "etcd.io", CREDS_DROP},
- {"dns://something-other", "dns://something-other", "something-other", CREDS_OPTIONAL},
+ {"unix:///tmp/abc", "unix:///tmp/abc", "abc", CredsOptional},
+ {"unixs:///tmp/abc", "unix:///tmp/abc", "abc", CredsRequire},
+ {"unix:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc:1234", CredsOptional},
+ {"unixs:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc:1234", CredsRequire},
+ {"etcd.io", "etcd.io", "etcd.io", CredsOptional},
+ {"http://etcd.io/abc", "etcd.io", "etcd.io", CredsDrop},
+ {"dns://something-other", "dns://something-other", "something-other", CredsOptional},
- {"http://[2001:db8:1f70::999:de8:7648:6e8]:100/", "[2001:db8:1f70::999:de8:7648:6e8]:100", "2001:db8:1f70::999:de8:7648:6e8", CREDS_DROP},
- {"[2001:db8:1f70::999:de8:7648:6e8]:100", "[2001:db8:1f70::999:de8:7648:6e8]:100", "2001:db8:1f70::999:de8:7648:6e8", CREDS_OPTIONAL},
- {"unix:unexpected-file_name#123$456", "unix:unexpected-file_name#123$456", "unexpected-file_name#123$456", CREDS_OPTIONAL},
+ {"http://[2001:db8:1f70::999:de8:7648:6e8]:100/", "[2001:db8:1f70::999:de8:7648:6e8]:100", "[2001:db8:1f70::999:de8:7648:6e8]:100", CredsDrop},
+ {"[2001:db8:1f70::999:de8:7648:6e8]:100", "[2001:db8:1f70::999:de8:7648:6e8]:100", "[2001:db8:1f70::999:de8:7648:6e8]:100", CredsOptional},
+ {"unix:unexpected-file_name#123$456", "unix:unexpected-file_name#123$456", "unexpected-file_name#123$456", CredsOptional},
}
for _, tt := range tests {
t.Run("Interpret_"+tt.endpoint, func(t *testing.T) {
diff --git a/client/v3/internal/resolver/resolver.go b/client/v3/internal/resolver/resolver.go
index 3ee3cb8e2bb..b5c9de00786 100644
--- a/client/v3/internal/resolver/resolver.go
+++ b/client/v3/internal/resolver/resolver.go
@@ -15,10 +15,11 @@
package resolver
import (
- "go.etcd.io/etcd/client/v3/internal/endpoint"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/resolver/manual"
"google.golang.org/grpc/serviceconfig"
+
+ "go.etcd.io/etcd/client/v3/internal/endpoint"
)
const (
diff --git a/client/v3/kubernetes/client.go b/client/v3/kubernetes/client.go
new file mode 100644
index 00000000000..11f2a456447
--- /dev/null
+++ b/client/v3/kubernetes/client.go
@@ -0,0 +1,136 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+// New creates Client from config.
+// Caller is responsible to call Close() to clean up client.
+func New(cfg clientv3.Config) (*Client, error) {
+ c, err := clientv3.New(cfg)
+ if err != nil {
+ return nil, err
+ }
+ kc := &Client{
+ Client: c,
+ }
+ kc.Kubernetes = kc
+ return kc, nil
+}
+
+type Client struct {
+ *clientv3.Client
+ Kubernetes Interface
+}
+
+var _ Interface = (*Client)(nil)
+
+func (k Client) Get(ctx context.Context, key string, opts GetOptions) (resp GetResponse, err error) {
+ rangeResp, err := k.KV.Get(ctx, key, clientv3.WithRev(opts.Revision), clientv3.WithLimit(1))
+ if err != nil {
+ return resp, err
+ }
+ resp.Revision = rangeResp.Header.Revision
+ if len(rangeResp.Kvs) == 1 {
+ resp.KV = rangeResp.Kvs[0]
+ }
+ return resp, nil
+}
+
+func (k Client) List(ctx context.Context, prefix string, opts ListOptions) (resp ListResponse, err error) {
+ rangeStart := prefix
+ if opts.Continue != "" {
+ rangeStart = opts.Continue
+ }
+ rangeEnd := clientv3.GetPrefixRangeEnd(prefix)
+ rangeResp, err := k.KV.Get(ctx, rangeStart, clientv3.WithRange(rangeEnd), clientv3.WithLimit(opts.Limit), clientv3.WithRev(opts.Revision))
+ if err != nil {
+ return resp, err
+ }
+ resp.Kvs = rangeResp.Kvs
+ resp.Count = rangeResp.Count
+ resp.Revision = rangeResp.Header.Revision
+ return resp, nil
+}
+
+func (k Client) Count(ctx context.Context, prefix string, _ CountOptions) (int64, error) {
+ resp, err := k.KV.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithCountOnly())
+ if err != nil {
+ return 0, err
+ }
+ return resp.Count, nil
+}
+
+func (k Client) OptimisticPut(ctx context.Context, key string, value []byte, expectedRevision int64, opts PutOptions) (resp PutResponse, err error) {
+ txn := k.KV.Txn(ctx).If(
+ clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision),
+ ).Then(
+ clientv3.OpPut(key, string(value), clientv3.WithLease(opts.LeaseID)),
+ )
+
+ if opts.GetOnFailure {
+ txn = txn.Else(clientv3.OpGet(key))
+ }
+
+ txnResp, err := txn.Commit()
+ if err != nil {
+ return resp, err
+ }
+ resp.Succeeded = txnResp.Succeeded
+ resp.Revision = txnResp.Header.Revision
+ if opts.GetOnFailure && !txnResp.Succeeded {
+ if len(txnResp.Responses) == 0 {
+ return resp, fmt.Errorf("invalid OptimisticPut response: %v", txnResp.Responses)
+ }
+ resp.KV = kvFromTxnResponse(txnResp.Responses[0])
+ }
+ return resp, nil
+}
+
+func (k Client) OptimisticDelete(ctx context.Context, key string, expectedRevision int64, opts DeleteOptions) (resp DeleteResponse, err error) {
+ txn := k.KV.Txn(ctx).If(
+ clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision),
+ ).Then(
+ clientv3.OpDelete(key),
+ )
+ if opts.GetOnFailure {
+ txn = txn.Else(clientv3.OpGet(key))
+ }
+ txnResp, err := txn.Commit()
+ if err != nil {
+ return resp, err
+ }
+ resp.Succeeded = txnResp.Succeeded
+ resp.Revision = txnResp.Header.Revision
+ if opts.GetOnFailure && !txnResp.Succeeded {
+ resp.KV = kvFromTxnResponse(txnResp.Responses[0])
+ }
+ return resp, nil
+}
+
+func kvFromTxnResponse(resp *pb.ResponseOp) *mvccpb.KeyValue {
+ getResponse := resp.GetResponseRange()
+ if len(getResponse.Kvs) == 1 {
+ return getResponse.Kvs[0]
+ }
+ return nil
+}
diff --git a/client/v3/kubernetes/interface.go b/client/v3/kubernetes/interface.go
new file mode 100644
index 00000000000..19b82a62927
--- /dev/null
+++ b/client/v3/kubernetes/interface.go
@@ -0,0 +1,140 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+// Interface defines the minimal client-side interface that Kubernetes requires
+// to interact with etcd. Methods below are standard etcd operations with
+// semantics adjusted to better suit Kubernetes' needs.
+type Interface interface {
+ // Get retrieves a single key-value pair from etcd.
+ //
+ // If opts.Revision is set to a non-zero value, the key-value pair is retrieved at the specified revision.
+ // If the required revision has been compacted, the request will fail with ErrCompacted.
+ Get(ctx context.Context, key string, opts GetOptions) (GetResponse, error)
+
+ // List retrieves key-value pairs with the specified prefix, ordered lexicographically by key.
+ //
+ // If opts.Revision is non-zero, the key-value pairs are retrieved at the specified revision.
+ // If the required revision has been compacted, the request will fail with ErrCompacted.
+ // If opts.Limit is greater than zero, the number of returned key-value pairs is bounded by the limit.
+ // If opts.Continue is not empty, the listing will start from the key immediately after the one specified by Continue.
+ // The Continue value should be the last key returned in a previous paginated ListResponse.
+ List(ctx context.Context, prefix string, opts ListOptions) (ListResponse, error)
+
+ // Count returns the number of keys with the specified prefix.
+ //
+ // Currently, there are no options for the Count operation. However, a placeholder options struct (CountOptions)
+ // is provided for future extensibility in case options become necessary.
+ Count(ctx context.Context, prefix string, opts CountOptions) (int64, error)
+
+ // OptimisticPut creates or updates a key-value pair if the key has not been modified or created
+ // since the revision specified in expectedRevision.
+ //
+ // An OptimisticPut fails if the key has been modified since expectedRevision.
+ OptimisticPut(ctx context.Context, key string, value []byte, expectedRevision int64, opts PutOptions) (PutResponse, error)
+
+ // OptimisticDelete deletes the key-value pair if it hasn't been modified since the revision
+ // specified in expectedRevision.
+ //
+ // An OptimisticDelete fails if the key has been modified since expectedRevision.
+ OptimisticDelete(ctx context.Context, key string, expectedRevision int64, opts DeleteOptions) (DeleteResponse, error)
+}
+
+type GetOptions struct {
+ // Revision is the point-in-time of the etcd key-value store to use for the Get operation.
+ // If Revision is 0, it gets the latest value.
+ Revision int64
+}
+
+type ListOptions struct {
+ // Revision is the point-in-time of the etcd key-value store to use for the List operation.
+ // If Revision is 0, it gets the latest values.
+ Revision int64
+
+ // Limit is the maximum number of keys to return for a List operation.
+ // 0 means no limitation.
+ Limit int64
+
+ // Continue is a key from which to resume the List operation, excluding the given key.
+ // It should be set to the last key from a previous ListResponse when paginating.
+ Continue string
+}
+
+// CountOptions is a placeholder for potential future options for the Count operation.
+type CountOptions struct{}
+
+type PutOptions struct {
+ // GetOnFailure specifies whether to return the modified key-value pair if the Put operation fails due to a revision mismatch.
+ GetOnFailure bool
+
+ // LeaseID is the ID of a lease to associate with the key allowing for automatic deletion after lease expires after it's TTL (time to live).
+ // Deprecated: Should be replaced with TTL when Interface starts using one lease per object.
+ LeaseID clientv3.LeaseID
+}
+
+type DeleteOptions struct {
+ // GetOnFailure specifies whether to return the modified key-value pair if the Delete operation fails due to a revision mismatch.
+ GetOnFailure bool
+}
+
+type GetResponse struct {
+ // KV is the key-value pair retrieved from etcd.
+ KV *mvccpb.KeyValue
+
+ // Revision is the revision of the key-value store at the time of the Get operation.
+ Revision int64
+}
+
+type ListResponse struct {
+ // Kvs is the list of key-value pairs retrieved from etcd, ordered lexicographically by key.
+ Kvs []*mvccpb.KeyValue
+
+ // Count is the total number of keys with the specified prefix, even if not all were returned due to a limit.
+ Count int64
+
+ // Revision is the revision of the key-value store at the time of the List operation.
+ Revision int64
+}
+
+type PutResponse struct {
+ // KV is the created or updated key-value pair. If the Put operation failed and GetOnFailure was true, this
+ // will be the modified key-value pair that caused the failure.
+ KV *mvccpb.KeyValue
+
+ // Succeeded indicates whether the Put operation was successful.
+ Succeeded bool
+
+ // Revision is the revision of the key-value store after the Put operation.
+ Revision int64
+}
+
+type DeleteResponse struct {
+ // KV is the deleted key-value pair. If the Delete operation failed and GetOnFailure was true, this
+ // will be the modified key-value pair that caused the failure.
+ KV *mvccpb.KeyValue
+
+ // Succeeded indicates whether the Delete operation was successful.
+ Succeeded bool
+
+ // Revision is the revision of the key-value store after the Delete operation.
+ Revision int64
+}
diff --git a/client/v3/kv.go b/client/v3/kv.go
index 5e9fb7d4589..8d0c595d1e4 100644
--- a/client/v3/kv.go
+++ b/client/v3/kv.go
@@ -17,9 +17,10 @@ package clientv3
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
"google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
type (
@@ -79,12 +80,15 @@ func (op OpResponse) Txn() *TxnResponse { return op.txn }
func (resp *PutResponse) OpResponse() OpResponse {
return OpResponse{put: resp}
}
+
func (resp *GetResponse) OpResponse() OpResponse {
return OpResponse{get: resp}
}
+
func (resp *DeleteResponse) OpResponse() OpResponse {
return OpResponse{del: resp}
}
+
func (resp *TxnResponse) OpResponse() OpResponse {
return OpResponse{txn: resp}
}
@@ -112,23 +116,23 @@ func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
r, err := kv.Do(ctx, OpPut(key, val, opts...))
- return r.put, toErr(ctx, err)
+ return r.put, ContextError(ctx, err)
}
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
r, err := kv.Do(ctx, OpGet(key, opts...))
- return r.get, toErr(ctx, err)
+ return r.get, ContextError(ctx, err)
}
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
r, err := kv.Do(ctx, OpDelete(key, opts...))
- return r.del, toErr(ctx, err)
+ return r.del, ContextError(ctx, err)
}
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
return (*CompactResponse)(resp), err
}
@@ -145,10 +149,14 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
case tRange:
- var resp *pb.RangeResponse
- resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{get: (*GetResponse)(resp)}, nil
+ if op.IsSortOptionValid() {
+ var resp *pb.RangeResponse
+ resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
+ if err == nil {
+ return OpResponse{get: (*GetResponse)(resp)}, nil
+ }
+ } else {
+ err = rpctypes.ErrInvalidSortOption
}
case tPut:
var resp *pb.PutResponse
@@ -173,5 +181,5 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
default:
panic("Unknown op")
}
- return OpResponse{}, toErr(ctx, err)
+ return OpResponse{}, ContextError(ctx, err)
}
diff --git a/client/v3/lease.go b/client/v3/lease.go
index bd31e6b4a5b..11b58348286 100644
--- a/client/v3/lease.go
+++ b/client/v3/lease.go
@@ -16,15 +16,16 @@ package clientv3
import (
"context"
+ "errors"
"sync"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
type (
@@ -198,12 +199,12 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout
keepAlives: make(map[LeaseID]*keepAlive),
remote: remote,
firstKeepAliveTimeout: keepAliveTimeout,
- lg: c.lg,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
}
if c != nil {
+ l.lg = c.lg
l.callOpts = c.callOpts
}
reqLeaderCtx := WithRequireLeader(context.Background())
@@ -223,7 +224,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
}
return gresp, nil
}
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
@@ -232,14 +233,14 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
@@ -260,9 +261,15 @@ func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
}
return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
}
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
+// To identify the context passed to `KeepAlive`, a key/value pair is
+// attached to the context. The key is a `keepAliveCtxKey` object, and
+// the value is the pointer to the context object itself, ensuring
+// uniqueness as each context has a unique memory address.
+type keepAliveCtxKey struct{}
+
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
@@ -277,6 +284,10 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
default:
}
ka, ok := l.keepAlives[id]
+
+ if ctx.Done() != nil {
+ ctx = context.WithValue(ctx, keepAliveCtxKey{}, &ctx)
+ }
if !ok {
// create fresh keep alive
ka = &keepAlive{
@@ -294,7 +305,9 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
}
l.mu.Unlock()
- go l.keepAliveCtxCloser(ctx, id, ka.donec)
+ if ctx.Done() != nil {
+ go l.keepAliveCtxCloser(ctx, id, ka.donec)
+ }
l.firstKeepAliveOnce.Do(func() {
go l.recvKeepAliveLoop()
go l.deadlineLoop()
@@ -313,7 +326,7 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
return resp, err
}
if isHaltErr(ctx, err) {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
}
}
@@ -345,7 +358,7 @@ func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-cha
// close channel and remove context if still associated with keep alive
for i, c := range ka.ctxs {
- if c == ctx {
+ if c.Value(keepAliveCtxKey{}) == ctx.Value(keepAliveCtxKey{}) {
close(ka.chs[i])
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
@@ -397,26 +410,35 @@ func (l *lessor) closeRequireLeader() {
}
}
-func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
+ defer func() {
+ if cerr := stream.CloseSend(); cerr != nil {
+ if ferr == nil {
+ ferr = ContextError(ctx, cerr)
+ }
+ return
+ }
+ }()
+
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
resp, rerr := stream.Recv()
if rerr != nil {
- return nil, toErr(ctx, rerr)
+ return nil, ContextError(ctx, rerr)
}
- karesp := &LeaseKeepAliveResponse{
+ karesp = &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
@@ -439,6 +461,9 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
for {
stream, err := l.resetRecv()
if err != nil {
+ l.lg.Warn("error occurred during lease keep alive loop",
+ zap.Error(err),
+ )
if canceledByCaller(l.stopCtx, err) {
return err
}
@@ -450,7 +475,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
return err
}
- if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
+ if errors.Is(ContextError(l.stopCtx, err), rpctypes.ErrNoLeader) {
l.closeRequireLeader()
}
break
@@ -535,9 +560,12 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
// deadlineLoop reaps any keep alive channels that have not received a response
// within the lease TTL
func (l *lessor) deadlineLoop() {
+ timer := time.NewTimer(time.Second)
+ defer timer.Stop()
for {
+ timer.Reset(time.Second)
select {
- case <-time.After(time.Second):
+ case <-timer.C:
case <-l.donec:
return
}
@@ -571,7 +599,9 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for _, id := range tosend {
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
if err := stream.Send(r); err != nil {
- // TODO do something with this error?
+ l.lg.Warn("error occurred during lease keep alive request sending",
+ zap.Error(err),
+ )
return
}
}
diff --git a/client/v3/leasing/doc.go b/client/v3/leasing/doc.go
index fc97fc88268..c38af3562b7 100644
--- a/client/v3/leasing/doc.go
+++ b/client/v3/leasing/doc.go
@@ -19,28 +19,27 @@
//
// First, create a leasing KV from a clientv3.Client 'cli':
//
-// lkv, err := leasing.NewKV(cli, "leasing-prefix")
-// if err != nil {
-// // handle error
-// }
+// lkv, err := leasing.NewKV(cli, "leasing-prefix")
+// if err != nil {
+// // handle error
+// }
//
// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's
// key locally. On the server, the leasing key is stored to "leasing-prefix/abc":
//
-// resp, err := lkv.Get(context.TODO(), "abc")
+// resp, err := lkv.Get(context.TODO(), "abc")
//
// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime:
//
-// resp, err = lkv.Get(context.TODO(), "abc")
+// resp, err = lkv.Get(context.TODO(), "abc")
//
// If another leasing client writes to a leased key, then the owner relinquishes its exclusive
// access, permitting the writer to modify the key:
//
-// lkv2, err := leasing.NewKV(cli, "leasing-prefix")
-// if err != nil {
-// // handle error
-// }
-// lkv2.Put(context.TODO(), "abc", "456")
-// resp, err = lkv.Get("abc")
-//
+// lkv2, err := leasing.NewKV(cli, "leasing-prefix")
+// if err != nil {
+// // handle error
+// }
+// lkv2.Put(context.TODO(), "abc", "456")
+// resp, err = lkv.Get("abc")
package leasing
diff --git a/client/v3/leasing/kv.go b/client/v3/leasing/kv.go
index f0cded20fea..c14af78d629 100644
--- a/client/v3/leasing/kv.go
+++ b/client/v3/leasing/kv.go
@@ -16,18 +16,19 @@ package leasing
import (
"context"
+ "errors"
"strings"
"sync"
"time"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
type leasingKV struct {
@@ -282,7 +283,8 @@ func (lkv *leasingKV) acquire(ctx context.Context, key string, op v3.Op) (*v3.Tx
return resp, nil
}
// retry if transient error
- if _, ok := err.(rpctypes.EtcdError); ok {
+ var serverErr rpctypes.EtcdError
+ if errors.As(err, &serverErr) {
return nil, err
}
if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
diff --git a/client/v3/logger.go b/client/v3/logger.go
index 0c019df6c52..300363cd25b 100644
--- a/client/v3/logger.go
+++ b/client/v3/logger.go
@@ -18,24 +18,23 @@ import (
"log"
"os"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.uber.org/zap"
"go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zapgrpc"
"google.golang.org/grpc/grpclog"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
)
func init() {
// We override grpc logger only when the environment variable is set
// in order to not interfere by default with user's code or other libraries.
if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
- // We don't use grpc_zap.ReplaceGrpcLoggerV2(lg) to not push (wide) set
- // of grpc-ecosystem/go-grpc-middleware dependencies on etcd-client users.
- lg, err := logutil.NewGRPCLoggerV2(createDefaultZapLoggerConfig())
+ lg, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
if err != nil {
panic(err)
}
-
- grpclog.SetLoggerV2(lg)
+ lg = lg.Named("etcd-client")
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
}
}
@@ -53,23 +52,9 @@ func etcdClientDebugLevel() zapcore.Level {
return zapcore.InfoLevel
}
var l zapcore.Level
- if err := l.Set(envLevel); err == nil {
- log.Printf("Deprecated env ETCD_CLIENT_DEBUG value. Using default level: 'info'")
+ if err := l.Set(envLevel); err != nil {
+ log.Print("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'")
return zapcore.InfoLevel
}
return l
}
-
-func createDefaultZapLoggerConfig() zap.Config {
- lcfg := logutil.DefaultZapLoggerConfig
- lcfg.Level = zap.NewAtomicLevelAt(etcdClientDebugLevel())
- return lcfg
-}
-
-func createDefaultZapLogger() (*zap.Logger, error) {
- c, err := createDefaultZapLoggerConfig().Build()
- if err != nil {
- return nil, err
- }
- return c.Named("etcd-client"), nil
-}
diff --git a/client/v3/main_test.go b/client/v3/main_test.go
index 4007d77bc5a..998b73ae53f 100644
--- a/client/v3/main_test.go
+++ b/client/v3/main_test.go
@@ -28,7 +28,7 @@ const (
func exampleEndpoints() []string { return nil }
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
+func forUnitTestsRunInMockedContext(mocking func(), _example func()) {
mocking()
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
diff --git a/client/v3/maintenance.go b/client/v3/maintenance.go
index dbea530e66a..00aaacd15fd 100644
--- a/client/v3/maintenance.go
+++ b/client/v3/maintenance.go
@@ -16,12 +16,14 @@ package clientv3
import (
"context"
+ "errors"
"fmt"
"io"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.uber.org/zap"
"google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
type (
@@ -31,6 +33,15 @@ type (
StatusResponse pb.StatusResponse
HashKVResponse pb.HashKVResponse
MoveLeaderResponse pb.MoveLeaderResponse
+ DowngradeResponse pb.DowngradeResponse
+
+ DowngradeAction pb.DowngradeRequest_DowngradeAction
+)
+
+const (
+ DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE)
+ DowngradeEnable = DowngradeAction(pb.DowngradeRequest_ENABLE)
+ DowngradeCancel = DowngradeAction(pb.DowngradeRequest_CANCEL)
)
type Maintenance interface {
@@ -57,14 +68,40 @@ type Maintenance interface {
// is non-zero, the hash is computed on all keys at or below the given revision.
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
+ // SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it.
+ // If the context "ctx" is canceled or timed out, reading from returned
+ // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
+ SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error)
+
// Snapshot provides a reader for a point-in-time snapshot of etcd.
// If the context "ctx" is canceled or timed out, reading from returned
// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
+ // Deprecated: use SnapshotWithVersion instead.
Snapshot(ctx context.Context) (io.ReadCloser, error)
// MoveLeader requests current leader to transfer its leadership to the transferee.
// Request must be made to the leader.
MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
+
+ // Downgrade requests downgrades, verifies feasibility or cancels downgrade
+ // on the cluster version.
+ // Supported since etcd 3.5.
+ Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error)
+}
+
+// SnapshotResponse is aggregated response from the snapshot stream.
+// Consumer is responsible for closing steam by calling .Snapshot.Close()
+type SnapshotResponse struct {
+ // Header is the first header in the snapshot stream, has the current key-value store information
+ // and indicates the point in time of the snapshot.
+ Header *pb.ResponseHeader
+ // Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream.
+ Snapshot io.ReadCloser
+ // Version is the local version of server that created the snapshot.
+ // In cluster with binaries with different version, each cluster can return different result.
+ // Informs which etcd server version should be used when restoring the snapshot.
+ // Supported on etcd >= v3.6.
+ Version string
}
type maintenance struct {
@@ -80,21 +117,10 @@ func NewMaintenance(c *Client) Maintenance {
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
conn, err := c.Dial(endpoint)
if err != nil {
- return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
+ return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %w", endpoint, err)
}
- //get token with established connection
- dctx := c.ctx
- cancel := func() {}
- if c.cfg.DialTimeout > 0 {
- dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
- }
- err = c.getToken(dctx)
- cancel()
- if err != nil {
- return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err)
- }
- cancel = func() { conn.Close() }
+ cancel := func() { conn.Close() }
return RetryMaintenanceClient(c, conn), cancel, nil
},
remote: RetryMaintenanceClient(c, c.conn),
@@ -107,7 +133,6 @@ func NewMaintenance(c *Client) Maintenance {
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
api := &maintenance{
- lg: c.lg,
dial: func(string) (pb.MaintenanceClient, func(), error) {
return remote, func() {}, nil
},
@@ -115,6 +140,7 @@ func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client)
}
if c != nil {
api.callOpts = c.callOpts
+ api.lg = c.lg
}
return api
}
@@ -129,7 +155,7 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
if err == nil {
return (*AlarmResponse)(resp), nil
}
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
@@ -142,13 +168,13 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
ar, err := m.AlarmList(ctx)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
ret := AlarmResponse{}
for _, am := range ar.Alarms {
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
if derr != nil {
- return nil, toErr(ctx, derr)
+ return nil, ContextError(ctx, derr)
}
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
}
@@ -159,18 +185,18 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
if err == nil {
return (*AlarmResponse)(resp), nil
}
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
defer cancel()
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
return (*DefragmentResponse)(resp), nil
}
@@ -178,12 +204,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
defer cancel()
resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
return (*StatusResponse)(resp), nil
}
@@ -191,46 +217,78 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
-
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
defer cancel()
resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
return (*HashKVResponse)(resp), nil
}
-func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
if err != nil {
- return nil, toErr(ctx, err)
+ return nil, ContextError(ctx, err)
}
m.lg.Info("opened snapshot stream; downloading")
pr, pw := io.Pipe()
+
+ resp, err := ss.Recv()
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return nil, err
+ }
go func() {
+ // Saving response is blocking
+ err := m.save(resp, pw)
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
for {
- resp, err := ss.Recv()
+ sresp, err := ss.Recv()
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
+
+ err = m.save(sresp, pw)
if err != nil {
- switch err {
- case io.EOF:
- m.lg.Info("completed snapshot read; closing")
- default:
- m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
- }
- pw.CloseWithError(err)
+ m.logAndCloseWithError(err, pw)
return
}
+ }
+ }()
+
+ return &SnapshotResponse{
+ Header: resp.GetHeader(),
+ Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr},
+ Version: resp.GetVersion(),
+ }, nil
+}
+
+func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+ ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
- // can "resp == nil && err == nil"
- // before we receive snapshot SHA digest?
- // No, server sends EOF with an empty response
- // after it sends SHA digest at the end
+ m.lg.Info("opened snapshot stream; downloading")
+ pr, pw := io.Pipe()
- if _, werr := pw.Write(resp.Blob); werr != nil {
- pw.CloseWithError(werr)
+ go func() {
+ for {
+ resp, err := ss.Recv()
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
+ err = m.save(resp, pw)
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
return
}
}
@@ -238,6 +296,28 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
}
+func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) {
+ switch {
+ case errors.Is(err, io.EOF):
+ m.lg.Info("completed snapshot read; closing")
+ default:
+ m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
+ }
+ pw.CloseWithError(err)
+}
+
+func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error {
+ // can "resp == nil && err == nil"
+ // before we receive snapshot SHA digest?
+ // No, server sends EOF with an empty response
+ // after it sends SHA digest at the end
+
+ if _, werr := pw.Write(resp.Blob); werr != nil {
+ return werr
+ }
+ return nil
+}
+
type snapshotReadCloser struct {
ctx context.Context
io.ReadCloser
@@ -245,10 +325,26 @@ type snapshotReadCloser struct {
func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
n, err = rc.ReadCloser.Read(p)
- return n, toErr(rc.ctx, err)
+ return n, ContextError(rc.ctx, err)
}
func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
- return (*MoveLeaderResponse)(resp), toErr(ctx, err)
+ return (*MoveLeaderResponse)(resp), ContextError(ctx, err)
+}
+
+func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) {
+ var actionType pb.DowngradeRequest_DowngradeAction
+ switch action {
+ case DowngradeValidate:
+ actionType = pb.DowngradeRequest_VALIDATE
+ case DowngradeEnable:
+ actionType = pb.DowngradeRequest_ENABLE
+ case DowngradeCancel:
+ actionType = pb.DowngradeRequest_CANCEL
+ default:
+ return nil, errors.New("etcdclient: unknown downgrade action")
+ }
+ resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...)
+ return (*DowngradeResponse)(resp), ContextError(ctx, err)
}
diff --git a/client/v3/mirror/syncer.go b/client/v3/mirror/syncer.go
index c25ee9b0c55..55775dc5135 100644
--- a/client/v3/mirror/syncer.go
+++ b/client/v3/mirror/syncer.go
@@ -18,7 +18,7 @@ package mirror
import (
"context"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
const (
@@ -52,7 +52,13 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha
// if rev is not specified, we will choose the most recent revision.
if s.rev == 0 {
- resp, err := s.c.Get(ctx, "foo")
+ // If len(s.prefix) == 0, we will check a random key to fetch the most recent
+ // revision (foo), otherwise we use the provided prefix.
+ checkPath := "foo"
+ if len(s.prefix) != 0 {
+ checkPath = s.prefix
+ }
+ resp, err := s.c.Get(ctx, checkPath)
if err != nil {
errchan <- err
close(respchan)
@@ -68,7 +74,10 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha
var key string
- opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)}
+ opts := []clientv3.OpOption{
+ clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev),
+ clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend),
+ }
if len(s.prefix) == 0 {
// If len(s.prefix) == 0, we will sync the entire key-value space.
diff --git a/client/v3/mock/mockserver/mockserver.go b/client/v3/mock/mockserver/mockserver.go
index 21a8b013b40..0467cfd5df5 100644
--- a/client/v3/mock/mockserver/mockserver.go
+++ b/client/v3/mock/mockserver/mockserver.go
@@ -17,15 +17,14 @@ package mockserver
import (
"context"
"fmt"
- "io/ioutil"
"net"
"os"
"sync"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
"google.golang.org/grpc"
"google.golang.org/grpc/resolver"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
// MockServer provides a mocked out grpc server of the etcdserver interface.
@@ -33,7 +32,7 @@ type MockServer struct {
ln net.Listener
Network string
Address string
- GrpcServer *grpc.Server
+ GRPCServer *grpc.Server
}
func (ms *MockServer) ResolverAddress() resolver.Address {
@@ -64,7 +63,7 @@ func StartMockServers(count int) (ms *MockServers, err error) {
func StartMockServersOnNetwork(count int, network string) (ms *MockServers, err error) {
switch network {
case "tcp":
- return startMockServersTcp(count)
+ return startMockServersTCP(count)
case "unix":
return startMockServersUnix(count)
default:
@@ -72,7 +71,7 @@ func StartMockServersOnNetwork(count int, network string) (ms *MockServers, err
}
}
-func startMockServersTcp(count int) (ms *MockServers, err error) {
+func startMockServersTCP(count int) (ms *MockServers, err error) {
addrs := make([]string, 0, count)
for i := 0; i < count; i++ {
addrs = append(addrs, "localhost:0")
@@ -84,14 +83,14 @@ func startMockServersUnix(count int) (ms *MockServers, err error) {
dir := os.TempDir()
addrs := make([]string, 0, count)
for i := 0; i < count; i++ {
- f, err := ioutil.TempFile(dir, "etcd-unix-so-")
+ f, err := os.CreateTemp(dir, "etcd-unix-so-")
if err != nil {
- return nil, fmt.Errorf("failed to allocate temp file for unix socket: %v", err)
+ return nil, fmt.Errorf("failed to allocate temp file for unix socket: %w", err)
}
fn := f.Name()
err = os.Remove(fn)
if err != nil {
- return nil, fmt.Errorf("failed to remove temp file before creating unix socket: %v", err)
+ return nil, fmt.Errorf("failed to remove temp file before creating unix socket: %w", err)
}
addrs = append(addrs, fn)
}
@@ -111,7 +110,7 @@ func startMockServers(network string, addrs []string) (ms *MockServers, err erro
for idx, addr := range addrs {
ln, err := net.Listen(network, addr)
if err != nil {
- return nil, fmt.Errorf("failed to listen %v", err)
+ return nil, fmt.Errorf("failed to listen %w", err)
}
ms.Servers[idx] = &MockServer{ln: ln, Network: network, Address: ln.Addr().String()}
ms.StartAt(idx)
@@ -127,18 +126,19 @@ func (ms *MockServers) StartAt(idx int) (err error) {
if ms.Servers[idx].ln == nil {
ms.Servers[idx].ln, err = net.Listen(ms.Servers[idx].Network, ms.Servers[idx].Address)
if err != nil {
- return fmt.Errorf("failed to listen %v", err)
+ return fmt.Errorf("failed to listen %w", err)
}
}
svr := grpc.NewServer()
pb.RegisterKVServer(svr, &mockKVServer{})
- ms.Servers[idx].GrpcServer = svr
+ pb.RegisterLeaseServer(svr, &mockLeaseServer{})
+ ms.Servers[idx].GRPCServer = svr
ms.wg.Add(1)
go func(svr *grpc.Server, l net.Listener) {
svr.Serve(l)
- }(ms.Servers[idx].GrpcServer, ms.Servers[idx].ln)
+ }(ms.Servers[idx].GRPCServer, ms.Servers[idx].ln)
return nil
}
@@ -151,8 +151,8 @@ func (ms *MockServers) StopAt(idx int) {
return
}
- ms.Servers[idx].GrpcServer.Stop()
- ms.Servers[idx].GrpcServer = nil
+ ms.Servers[idx].GRPCServer.Stop()
+ ms.Servers[idx].GRPCServer = nil
ms.Servers[idx].ln = nil
ms.wg.Done()
}
@@ -186,3 +186,29 @@ func (m *mockKVServer) Txn(context.Context, *pb.TxnRequest) (*pb.TxnResponse, er
func (m *mockKVServer) Compact(context.Context, *pb.CompactionRequest) (*pb.CompactionResponse, error) {
return &pb.CompactionResponse{}, nil
}
+
+func (m *mockKVServer) Lease(context.Context, *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return &pb.LeaseGrantResponse{}, nil
+}
+
+type mockLeaseServer struct{}
+
+func (s mockLeaseServer) LeaseGrant(context.Context, *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return &pb.LeaseGrantResponse{}, nil
+}
+
+func (s *mockLeaseServer) LeaseRevoke(context.Context, *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ return &pb.LeaseRevokeResponse{}, nil
+}
+
+func (s *mockLeaseServer) LeaseKeepAlive(pb.Lease_LeaseKeepAliveServer) error {
+ return nil
+}
+
+func (s *mockLeaseServer) LeaseTimeToLive(context.Context, *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ return &pb.LeaseTimeToLiveResponse{}, nil
+}
+
+func (s *mockLeaseServer) LeaseLeases(context.Context, *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ return &pb.LeaseLeasesResponse{}, nil
+}
diff --git a/client/v3/namespace/doc.go b/client/v3/namespace/doc.go
index 01849b150ab..689e0e0bb38 100644
--- a/client/v3/namespace/doc.go
+++ b/client/v3/namespace/doc.go
@@ -39,5 +39,4 @@
// resp, _ = cli.Get(context.TODO(), "abc")
// fmt.Printf("%s\n", resp.Kvs[0].Value)
// // Output: 456
-//
package namespace
diff --git a/client/v3/namespace/kv.go b/client/v3/namespace/kv.go
index f745225cacd..aa338d5356d 100644
--- a/client/v3/namespace/kv.go
+++ b/client/v3/namespace/kv.go
@@ -19,7 +19,7 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type kvPrefix struct {
@@ -51,7 +51,11 @@ func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOpti
if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) {
return nil, rpctypes.ErrEmptyKey
}
- r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...)))
+ getOp := clientv3.OpGet(key, opts...)
+ if !getOp.IsSortOptionValid() {
+ return nil, rpctypes.ErrInvalidSortOption
+ }
+ r, err := kv.KV.Do(ctx, kv.prefixOp(getOp))
if err != nil {
return nil, err
}
diff --git a/client/v3/namespace/lease.go b/client/v3/namespace/lease.go
index f274de5e998..b80b530467c 100644
--- a/client/v3/namespace/lease.go
+++ b/client/v3/namespace/lease.go
@@ -18,7 +18,7 @@ import (
"bytes"
"context"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type leasePrefix struct {
diff --git a/client/v3/namespace/watch.go b/client/v3/namespace/watch.go
index 12362856d09..edf1af87b58 100644
--- a/client/v3/namespace/watch.go
+++ b/client/v3/namespace/watch.go
@@ -18,7 +18,7 @@ import (
"context"
"sync"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type watcherPrefix struct {
diff --git a/client/v3/naming/doc.go b/client/v3/naming/doc.go
index 55130fa5b14..f2050a6aa6c 100644
--- a/client/v3/naming/doc.go
+++ b/client/v3/naming/doc.go
@@ -13,10 +13,10 @@
// limitations under the License.
// Package naming provides:
-// - subpackage endpoints: an abstraction layer to store and read endpoints
-// information from etcd.
-// - subpackage resolver: an etcd-backed gRPC resolver for discovering gRPC
-// services based on the endpoints configuration
+// - subpackage endpoints: an abstraction layer to store and read endpoints
+// information from etcd.
+// - subpackage resolver: an etcd-backed gRPC resolver for discovering gRPC
+// services based on the endpoints configuration
//
// To use, first import the packages:
//
@@ -55,5 +55,4 @@
// em := endpoints.NewManager(c, service)
// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr}, clientv3.WithLease(lid));
// }
-//
package naming
diff --git a/client/v3/naming/endpoints/endpoints.go b/client/v3/naming/endpoints/endpoints.go
index 72bd2278749..bcc96b14f16 100644
--- a/client/v3/naming/endpoints/endpoints.go
+++ b/client/v3/naming/endpoints/endpoints.go
@@ -1,3 +1,17 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package endpoints
import (
@@ -18,7 +32,7 @@ type Endpoint struct {
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
// Since etcd 3.1
- Metadata interface{}
+ Metadata any
}
type Operation uint8
diff --git a/client/v3/naming/endpoints/endpoints_impl.go b/client/v3/naming/endpoints/endpoints_impl.go
index 37f04803e1e..5193d986751 100644
--- a/client/v3/naming/endpoints/endpoints_impl.go
+++ b/client/v3/naming/endpoints/endpoints_impl.go
@@ -1,3 +1,17 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package endpoints
// TODO: The API is not yet implemented.
@@ -8,12 +22,12 @@ import (
"errors"
"strings"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/naming/endpoints/internal"
-
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/naming/endpoints/internal"
)
type endpointManager struct {
@@ -78,7 +92,8 @@ func (m *endpointManager) DeleteEndpoint(ctx context.Context, key string, opts .
}
func (m *endpointManager) NewWatchChannel(ctx context.Context) (WatchChannel, error) {
- resp, err := m.client.Get(ctx, m.target, clientv3.WithPrefix(), clientv3.WithSerializable())
+ key := m.target + "/"
+ resp, err := m.client.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithSerializable())
if err != nil {
return nil, err
}
@@ -112,7 +127,8 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd
lg := m.client.GetLogger()
opts := []clientv3.OpOption{clientv3.WithRev(rev), clientv3.WithPrefix()}
- wch := m.client.Watch(ctx, m.target, opts...)
+ key := m.target + "/"
+ wch := m.client.Watch(ctx, key, opts...)
for {
select {
case <-ctx.Done():
@@ -157,7 +173,8 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd
}
func (m *endpointManager) List(ctx context.Context) (Key2EndpointMap, error) {
- resp, err := m.client.Get(ctx, m.target, clientv3.WithPrefix(), clientv3.WithSerializable())
+ key := m.target + "/"
+ resp, err := m.client.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithSerializable())
if err != nil {
return nil, err
}
diff --git a/client/v3/naming/endpoints/internal/update.go b/client/v3/naming/endpoints/internal/update.go
index 71aa83fed4c..365eb854cd1 100644
--- a/client/v3/naming/endpoints/internal/update.go
+++ b/client/v3/naming/endpoints/internal/update.go
@@ -1,3 +1,17 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package internal
// Operation describes action performed on endpoint (addition vs deletion).
@@ -34,5 +48,5 @@ type Update struct {
// Metadata is the updated metadata. It is nil if there is no metadata update.
// Metadata is not required for a custom naming implementation.
// Since etcd 3.1.
- Metadata interface{}
+ Metadata any
}
diff --git a/client/v3/naming/resolver/resolver.go b/client/v3/naming/resolver/resolver.go
index a44f61ae04c..f42b2b0b6e7 100644
--- a/client/v3/naming/resolver/resolver.go
+++ b/client/v3/naming/resolver/resolver.go
@@ -1,15 +1,30 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package resolver
import (
"context"
+ "strings"
"sync"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/naming/endpoints"
-
"google.golang.org/grpc/codes"
gresolver "google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/naming/endpoints"
)
type builder struct {
@@ -17,9 +32,15 @@ type builder struct {
}
func (b builder) Build(target gresolver.Target, cc gresolver.ClientConn, opts gresolver.BuildOptions) (gresolver.Resolver, error) {
+ // Refer to https://github.com/grpc/grpc-go/blob/16d3df80f029f57cff5458f1d6da6aedbc23545d/clientconn.go#L1587-L1611
+ endpoint := target.URL.Path
+ if endpoint == "" {
+ endpoint = target.URL.Opaque
+ }
+ endpoint = strings.TrimPrefix(endpoint, "/")
r := &resolver{
c: b.c,
- target: target.Endpoint,
+ target: endpoint,
cc: cc,
}
r.ctx, r.cancel = context.WithCancel(context.Background())
diff --git a/client/v3/op.go b/client/v3/op.go
index bd0f1f2f213..e19f2e5e44f 100644
--- a/client/v3/op.go
+++ b/client/v3/op.go
@@ -77,6 +77,9 @@ type Op struct {
cmps []Cmp
thenOps []Op
elseOps []Op
+
+ isOptsWithFromKey bool
+ isOptsWithPrefix bool
}
// accessors / mutators
@@ -121,6 +124,10 @@ func (op Op) IsKeysOnly() bool { return op.keysOnly }
// IsCountOnly returns whether countOnly is set.
func (op Op) IsCountOnly() bool { return op.countOnly }
+func (op Op) IsOptsWithFromKey() bool { return op.isOptsWithFromKey }
+
+func (op Op) IsOptsWithPrefix() bool { return op.isOptsWithPrefix }
+
// MinModRev returns the operation's minimum modify revision.
func (op Op) MinModRev() int64 { return op.minModRev }
@@ -216,6 +223,10 @@ func (op Op) isWrite() bool {
return op.t != tRange
}
+func NewOp() *Op {
+ return &Op{key: []byte("")}
+}
+
// OpGet returns "get" operation based on given key and operation options.
func OpGet(key string, opts ...OpOption) Op {
// WithPrefix and WithFromKey are not supported together
@@ -382,6 +393,7 @@ func getPrefix(key []byte) []byte {
// can return 'foo1', 'foo2', and so on.
func WithPrefix() OpOption {
return func(op *Op) {
+ op.isOptsWithPrefix = true
if len(op.key) == 0 {
op.key, op.end = []byte{0}, []byte{0}
return
@@ -406,12 +418,19 @@ func WithFromKey() OpOption {
op.key = []byte{0}
}
op.end = []byte("\x00")
+ op.isOptsWithFromKey = true
}
}
-// WithSerializable makes 'Get' request serializable. By default,
-// it's linearizable. Serializable requests are better for lower latency
-// requirement.
+// WithSerializable makes `Get` and `MemberList` requests serializable.
+// By default, they are linearizable. Serializable requests are better
+// for lower latency requirement, but users should be aware that they
+// could get stale data with serializable requests.
+//
+// In some situations users may want to use serializable requests. For
+// example, when adding a new member to a one-node cluster, it's reasonable
+// and safe to use serializable request before the new added member gets
+// started.
func WithSerializable() OpOption {
return func(op *Op) { op.serializable = true }
}
@@ -554,7 +573,37 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi
}
// IsOptsWithPrefix returns true if WithPrefix option is called in the given opts.
-func IsOptsWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) }
+func IsOptsWithPrefix(opts []OpOption) bool {
+ ret := NewOp()
+ for _, opt := range opts {
+ opt(ret)
+ }
+
+ return ret.isOptsWithPrefix
+}
// IsOptsWithFromKey returns true if WithFromKey option is called in the given opts.
-func IsOptsWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) }
+func IsOptsWithFromKey(opts []OpOption) bool {
+ ret := NewOp()
+ for _, opt := range opts {
+ opt(ret)
+ }
+
+ return ret.isOptsWithFromKey
+}
+
+func (op Op) IsSortOptionValid() bool {
+ if op.sort != nil {
+ sortOrder := int32(op.sort.Order)
+ sortTarget := int32(op.sort.Target)
+
+ if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok {
+ return false
+ }
+
+ if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok {
+ return false
+ }
+ }
+ return true
+}
diff --git a/client/v3/op_test.go b/client/v3/op_test.go
index 762044fc5e2..8933a1b935b 100644
--- a/client/v3/op_test.go
+++ b/client/v3/op_test.go
@@ -36,3 +36,70 @@ func TestOpWithSort(t *testing.T) {
t.Fatalf("expected %+v, got %+v", wreq, req)
}
}
+
+func TestIsSortOptionValid(t *testing.T) {
+ rangeReqs := []struct {
+ sortOrder pb.RangeRequest_SortOrder
+ sortTarget pb.RangeRequest_SortTarget
+ expectedValid bool
+ }{
+ {
+ sortOrder: pb.RangeRequest_ASCEND,
+ sortTarget: pb.RangeRequest_CREATE,
+ expectedValid: true,
+ },
+ {
+ sortOrder: pb.RangeRequest_ASCEND,
+ sortTarget: 100,
+ expectedValid: false,
+ },
+ {
+ sortOrder: 200,
+ sortTarget: pb.RangeRequest_MOD,
+ expectedValid: false,
+ },
+ }
+
+ for _, req := range rangeReqs {
+ getOp := Op{
+ sort: &SortOption{
+ Order: SortOrder(req.sortOrder),
+ Target: SortTarget(req.sortTarget),
+ },
+ }
+
+ actualRet := getOp.IsSortOptionValid()
+ if actualRet != req.expectedValid {
+ t.Errorf("expected sortOrder (%d) and sortTarget (%d) to be %t, but got %t",
+ req.sortOrder, req.sortTarget, req.expectedValid, actualRet)
+ }
+ }
+}
+
+func TestIsOptsWithPrefix(t *testing.T) {
+ optswithprefix := []OpOption{WithPrefix()}
+ op := OpGet("key", optswithprefix...)
+ if !IsOptsWithPrefix(optswithprefix) || !op.IsOptsWithPrefix() {
+ t.Errorf("IsOptsWithPrefix = false, expected true")
+ }
+
+ optswithfromkey := []OpOption{WithFromKey()}
+ op = OpGet("key", optswithfromkey...)
+ if IsOptsWithPrefix(optswithfromkey) || op.IsOptsWithPrefix() {
+ t.Errorf("IsOptsWithPrefix = true, expected false")
+ }
+}
+
+func TestIsOptsWithFromKey(t *testing.T) {
+ optswithfromkey := []OpOption{WithFromKey()}
+ op := OpGet("key", optswithfromkey...)
+ if !IsOptsWithFromKey(optswithfromkey) || !op.IsOptsWithFromKey() {
+ t.Errorf("IsOptsWithFromKey = false, expected true")
+ }
+
+ optswithprefix := []OpOption{WithPrefix()}
+ op = OpGet("key", optswithprefix...)
+ if IsOptsWithFromKey(optswithprefix) || op.IsOptsWithFromKey() {
+ t.Errorf("IsOptsWithFromKey = true, expected false")
+ }
+}
diff --git a/client/v3/options.go b/client/v3/options.go
index cdae1b16a2a..cc10a03d76d 100644
--- a/client/v3/options.go
+++ b/client/v3/options.go
@@ -23,7 +23,7 @@ import (
var (
// client-side handling retrying of request failures where data was not written to the wire or
- // where server indicates it did not process the data. gRPC default is default is "WaitForReady(false)"
+ // where server indicates it did not process the data. gRPC default is "WaitForReady(false)"
// but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to
// transient failures.
defaultWaitForReady = grpc.WaitForReady(true)
diff --git a/client/v3/ordering/doc.go b/client/v3/ordering/doc.go
index 856f3305801..03588248bd6 100644
--- a/client/v3/ordering/doc.go
+++ b/client/v3/ordering/doc.go
@@ -38,5 +38,4 @@
// cli.KV = ordering.NewKV(cli.KV, vf)
//
// Now calls using 'cli' will reject order violations with an error.
-//
package ordering
diff --git a/client/v3/ordering/kv.go b/client/v3/ordering/kv.go
index 7914fc4b9c5..9075cbf9890 100644
--- a/client/v3/ordering/kv.go
+++ b/client/v3/ordering/kv.go
@@ -18,7 +18,7 @@ import (
"context"
"sync"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
// kvOrdering ensures that serialized requests do not return
diff --git a/client/v3/ordering/kv_test.go b/client/v3/ordering/kv_test.go
index cc5c03bfa6b..cb6d0d3909e 100644
--- a/client/v3/ordering/kv_test.go
+++ b/client/v3/ordering/kv_test.go
@@ -16,12 +16,11 @@ package ordering
import (
"context"
- gContext "context"
"sync"
"testing"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type mockKV struct {
@@ -29,7 +28,7 @@ type mockKV struct {
response clientv3.OpResponse
}
-func (kv *mockKV) Do(ctx gContext.Context, op clientv3.Op) (clientv3.OpResponse, error) {
+func (kv *mockKV) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
return kv.response, nil
}
diff --git a/client/v3/ordering/util.go b/client/v3/ordering/util.go
index f8f65c4c9b6..cd8333cba72 100644
--- a/client/v3/ordering/util.go
+++ b/client/v3/ordering/util.go
@@ -18,7 +18,7 @@ import (
"errors"
"sync/atomic"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error
@@ -29,7 +29,7 @@ func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFu
violationCount := int32(0)
return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error {
// Each request is assigned by round-robin load-balancer's picker to a different
- // endpoints. If we cycled them 5 times (even with some level of concurrency),
+ // endpoint. If we cycled them 5 times (even with some level of concurrency),
// with high probability no endpoint points on a member with fresh data.
// TODO: Ideally we should track members (resp.opp.Header) that returned
// stale result and explicitly temporarily disable them in 'picker'.
diff --git a/client/v3/retry.go b/client/v3/retry.go
index 69ecc631471..9152c53a7d4 100644
--- a/client/v3/retry.go
+++ b/client/v3/retry.go
@@ -16,13 +16,14 @@ package clientv3
import (
"context"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
type retryPolicy uint8
@@ -52,7 +53,8 @@ func (rp retryPolicy) String() string {
// handle itself even with retries.
func isSafeRetryImmutableRPC(err error) bool {
eErr := rpctypes.Error(err)
- if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
+ var serverErr rpctypes.EtcdError
+ if errors.As(eErr, &serverErr) && serverErr.Code() != codes.Unavailable {
// interrupted by non-transient server-side or gRPC-side error
// client cannot handle itself (e.g. rpctypes.ErrCompacted)
return false
@@ -101,8 +103,9 @@ func RetryKVClient(c *Client) pb.KVClient {
kc: pb.NewKVClient(c.conn),
}
}
+
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
- return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rkv.kc.Range(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
@@ -133,23 +136,23 @@ func RetryLeaseClient(c *Client) pb.LeaseClient {
}
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
- return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
- return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rlc.lc.LeaseLeases(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
- return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rlc.lc.LeaseGrant(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
- return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
- return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
+ return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRepeatablePolicy())...)
}
type retryClusterClient struct {
@@ -164,7 +167,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient {
}
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
- return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rcc.cc.MemberList(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
@@ -195,27 +198,27 @@ func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClie
}
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
- return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rmc.mc.Alarm(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
- return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rmc.mc.Status(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
- return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rmc.mc.Hash(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
- return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rmc.mc.HashKV(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
- return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rmc.mc.Snapshot(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
- return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rmc.mc.MoveLeader(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
@@ -238,19 +241,19 @@ func RetryAuthClient(c *Client) pb.AuthClient {
}
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
- return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rac.ac.UserList(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
- return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rac.ac.UserGet(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
- return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rac.ac.RoleGet(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
- return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+ return rac.ac.RoleList(ctx, in, append(opts, withRepeatablePolicy())...)
}
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
diff --git a/client/v3/retry_interceptor.go b/client/v3/retry_interceptor.go
index 9586c334a3d..2b9301a5808 100644
--- a/client/v3/retry_interceptor.go
+++ b/client/v3/retry_interceptor.go
@@ -19,16 +19,18 @@ package clientv3
import (
"context"
+ "errors"
"io"
"sync"
"time"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
// unaryClientInterceptor returns a new retrying unary client interceptor.
@@ -37,7 +39,7 @@ import (
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
- return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
ctx = withVersion(ctx)
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
@@ -53,6 +55,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
c.GetLogger().Debug(
"retrying of unary invoker",
zap.String("target", cc.Target()),
+ zap.String("method", method),
zap.Uint("attempt", attempt),
)
lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
@@ -62,6 +65,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
c.GetLogger().Warn(
"retrying of unary invoker failed",
zap.String("target", cc.Target()),
+ zap.String("method", method),
zap.Uint("attempt", attempt),
zap.Error(lastErr),
)
@@ -73,25 +77,19 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
// its the callCtx deadline or cancellation, in which case try again.
continue
}
- if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
- // clear auth token before refreshing it.
- // call c.Auth.Authenticate with an invalid token will always fail the auth check on the server-side,
- // if the server has not apply the patch of pr #12165 (https://github.com/etcd-io/etcd/pull/12165)
- // and a rpctypes.ErrInvalidAuthToken will recursively call c.getToken until system run out of resource.
- c.authTokenBundle.UpdateAuthToken("")
-
- gterr := c.getToken(ctx)
- if gterr != nil {
+ if c.shouldRefreshToken(lastErr, callOpts) {
+ gtErr := c.refreshToken(ctx)
+ if gtErr != nil {
c.GetLogger().Warn(
"retrying of unary invoker failed to fetch new auth token",
zap.String("target", cc.Target()),
- zap.Error(gterr),
+ zap.Error(gtErr),
)
- return gterr // lastErr must be invalid auth token
+ return gtErr // lastErr must be invalid auth token
}
continue
}
- if !isSafeRetry(c.lg, lastErr, callOpts) {
+ if !isSafeRetry(c, lastErr, callOpts) {
return lastErr
}
}
@@ -111,15 +109,12 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
ctx = withVersion(ctx)
- // getToken automatically
- // TODO(cfc4n): keep this code block, remove codes about getToken in client.go after pr #12165 merged.
- if c.authTokenBundle != nil {
- // equal to c.Username != "" && c.Password != ""
- err := c.getToken(ctx)
- if err != nil && rpctypes.Error(err) != rpctypes.ErrAuthNotEnabled {
- c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err))
- return nil, err
- }
+ // getToken automatically. Otherwise, auth token may be invalid after watch reconnection because the token has expired
+ // (see https://github.com/etcd-io/etcd/issues/11954 for more).
+ err := c.getToken(ctx)
+ if err != nil {
+ c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err))
+ return nil, err
}
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
@@ -148,15 +143,45 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli
}
}
+// shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions,
+// and returns a boolean value.
+func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
+ if errors.Is(rpctypes.Error(err), rpctypes.ErrUserEmpty) {
+ // refresh the token when username, password is present but the server returns ErrUserEmpty
+ // which is possible when the client token is cleared somehow
+ return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != ""
+ }
+
+ return callOpts.retryAuth &&
+ (errors.Is(rpctypes.Error(err), rpctypes.ErrInvalidAuthToken) || errors.Is(rpctypes.Error(err), rpctypes.ErrAuthOldRevision))
+}
+
+func (c *Client) refreshToken(ctx context.Context) error {
+ if c.authTokenBundle == nil {
+ // c.authTokenBundle will be initialized only when
+ // c.Username != "" && c.Password != "".
+ //
+ // When users use the TLS CommonName based authentication, the
+ // authTokenBundle is always nil. But it's possible for the clients
+ // to get `rpctypes.ErrAuthOldRevision` response when the clients
+ // concurrently modify auth data (e.g, addUser, deleteUser etc.).
+ // In this case, there is no need to refresh the token; instead the
+ // clients just need to retry the operations (e.g. Put, Delete etc).
+ return nil
+ }
+
+ return c.getToken(ctx)
+}
+
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
// a new ClientStream according to the retry policy.
type serverStreamingRetryingStream struct {
grpc.ClientStream
client *Client
- bufferedSends []interface{} // single message that the client can sen
- receivedGood bool // indicates whether any prior receives were successful
- wasClosedSend bool // indicates that CloseSend was closed
+ bufferedSends []any // single message that the client can sen
+ receivedGood bool // indicates whether any prior receives were successful
+ wasClosedSend bool // indicates that CloseSend was closed
ctx context.Context
callOpts *options
streamerCall func(ctx context.Context) (grpc.ClientStream, error)
@@ -175,7 +200,7 @@ func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
return s.ClientStream
}
-func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
+func (s *serverStreamingRetryingStream) SendMsg(m any) error {
s.mu.Lock()
s.bufferedSends = append(s.bufferedSends, m)
s.mu.Unlock()
@@ -197,7 +222,7 @@ func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
return s.getStream().Trailer()
}
-func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
+func (s *serverStreamingRetryingStream) RecvMsg(m any) error {
attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
if !attemptRetry {
return lastErr // success or hard failure
@@ -224,12 +249,12 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
return lastErr
}
-func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
+func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) {
s.mu.RLock()
wasGood := s.receivedGood
s.mu.RUnlock()
err := s.getStream().RecvMsg(m)
- if err == nil || err == io.EOF {
+ if err == nil || errors.Is(err, io.EOF) {
s.mu.Lock()
s.receivedGood = true
s.mu.Unlock()
@@ -245,19 +270,15 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}
// its the callCtx deadline or cancellation, in which case try again.
return true, err
}
- if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
- // clear auth token to avoid failure when call getToken
- s.client.authTokenBundle.UpdateAuthToken("")
-
- gterr := s.client.getToken(s.ctx)
- if gterr != nil {
- s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
+ if s.client.shouldRefreshToken(err, s.callOpts) {
+ gtErr := s.client.refreshToken(s.ctx)
+ if gtErr != nil {
+ s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gtErr))
return false, err // return the original error for simplicity
}
return true, err
-
}
- return isSafeRetry(s.client.lg, err, s.callOpts), err
+ return isSafeRetry(s.client, err, s.callOpts), err
}
func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
@@ -289,7 +310,7 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro
select {
case <-ctx.Done():
timer.Stop()
- return contextErrToGrpcErr(ctx.Err())
+ return contextErrToGRPCErr(ctx.Err())
case <-timer.C:
}
}
@@ -297,17 +318,28 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro
}
// isSafeRetry returns "true", if request is safe for retry with the given error.
-func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
+func isSafeRetry(c *Client, err error, callOpts *options) bool {
if isContextError(err) {
return false
}
+
+ // Situation when learner refuses RPC it is supposed to not serve is from the server
+ // perspective not retryable.
+ // But for backward-compatibility reasons we need to support situation that
+ // customer provides mix of learners (not yet voters) and voters with an
+ // expectation to pick voter in the next attempt.
+ // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability.
+ if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 {
+ return true
+ }
+
switch callOpts.retryPolicy {
case repeatable:
return isSafeRetryImmutableRPC(err)
case nonRepeatable:
return isSafeRetryMutableRPC(err)
default:
- lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
+ c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
return false
}
}
@@ -316,25 +348,23 @@ func isContextError(err error) bool {
return status.Code(err) == codes.DeadlineExceeded || status.Code(err) == codes.Canceled
}
-func contextErrToGrpcErr(err error) error {
- switch err {
- case context.DeadlineExceeded:
+func contextErrToGRPCErr(err error) error {
+ switch {
+ case errors.Is(err, context.DeadlineExceeded):
return status.Errorf(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
+ case errors.Is(err, context.Canceled):
return status.Errorf(codes.Canceled, err.Error())
default:
return status.Errorf(codes.Unknown, err.Error())
}
}
-var (
- defaultOptions = &options{
- retryPolicy: nonRepeatable,
- max: 0, // disable
- backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
- retryAuth: true,
- }
-)
+var defaultOptions = &options{
+ retryPolicy: nonRepeatable,
+ max: 0, // disable
+ backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
+ retryAuth: true,
+}
// backoffFunc denotes a family of functions that control the backoff duration between call retries.
//
@@ -344,10 +374,10 @@ var (
// with the next iteration.
type backoffFunc func(attempt uint) time.Duration
-// withRetryPolicy sets the retry policy of this call.
-func withRetryPolicy(rp retryPolicy) retryOption {
+// withRepeatablePolicy sets the repeatable policy of this call.
+func withRepeatablePolicy() retryOption {
return retryOption{applyFunc: func(o *options) {
- o.retryPolicy = rp
+ o.retryPolicy = repeatable
}}
}
@@ -358,7 +388,7 @@ func withMax(maxRetries uint) retryOption {
}}
}
-// WithBackoff sets the `BackoffFunc `used to control time between retries.
+// WithBackoff sets the `BackoffFunc` used to control time between retries.
func withBackoff(bf backoffFunc) retryOption {
return retryOption{applyFunc: func(o *options) {
o.backoffFunc = bf
diff --git a/client/v3/retry_interceptor_test.go b/client/v3/retry_interceptor_test.go
new file mode 100644
index 00000000000..393bb9bbe5c
--- /dev/null
+++ b/client/v3/retry_interceptor_test.go
@@ -0,0 +1,132 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "testing"
+
+ grpccredentials "google.golang.org/grpc/credentials"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/v3/credentials"
+)
+
+type dummyAuthTokenBundle struct{}
+
+func (d dummyAuthTokenBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+ return nil
+}
+
+func (d dummyAuthTokenBundle) UpdateAuthToken(token string) {
+}
+
+func TestClientShouldRefreshToken(t *testing.T) {
+ type fields struct {
+ authTokenBundle credentials.PerRPCCredentialsBundle
+ }
+ type args struct {
+ err error
+ callOpts *options
+ }
+
+ optsWithTrue := &options{
+ retryAuth: true,
+ }
+ optsWithFalse := &options{
+ retryAuth: false,
+ }
+
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ want bool
+ }{
+ {
+ name: "ErrUserEmpty and non nil authTokenBundle",
+ fields: fields{
+ authTokenBundle: &dummyAuthTokenBundle{},
+ },
+ args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
+ want: true,
+ },
+ {
+ name: "ErrUserEmpty and nil authTokenBundle",
+ fields: fields{
+ authTokenBundle: nil,
+ },
+ args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
+ want: false,
+ },
+ {
+ name: "ErrGRPCInvalidAuthToken and retryAuth",
+ fields: fields{
+ authTokenBundle: nil,
+ },
+ args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithTrue},
+ want: true,
+ },
+ {
+ name: "ErrGRPCInvalidAuthToken and !retryAuth",
+ fields: fields{
+ authTokenBundle: nil,
+ },
+ args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithFalse},
+ want: false,
+ },
+ {
+ name: "ErrGRPCAuthOldRevision and retryAuth",
+ fields: fields{
+ authTokenBundle: nil,
+ },
+ args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithTrue},
+ want: true,
+ },
+ {
+ name: "ErrGRPCAuthOldRevision and !retryAuth",
+ fields: fields{
+ authTokenBundle: nil,
+ },
+ args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithFalse},
+ want: false,
+ },
+ {
+ name: "Other error and retryAuth",
+ fields: fields{
+ authTokenBundle: nil,
+ },
+ args: args{rpctypes.ErrGRPCAuthFailed, optsWithTrue},
+ want: false,
+ },
+ {
+ name: "Other error and !retryAuth",
+ fields: fields{
+ authTokenBundle: nil,
+ },
+ args: args{rpctypes.ErrGRPCAuthFailed, optsWithFalse},
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &Client{
+ authTokenBundle: tt.fields.authTokenBundle,
+ }
+ if got := c.shouldRefreshToken(tt.args.err, tt.args.callOpts); got != tt.want {
+ t.Errorf("shouldRefreshToken() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/client/v3/snapshot/v3_snapshot.go b/client/v3/snapshot/v3_snapshot.go
index b6eb592dda3..324e497cfd0 100644
--- a/client/v3/snapshot/v3_snapshot.go
+++ b/client/v3/snapshot/v3_snapshot.go
@@ -23,9 +23,10 @@ import (
"time"
"github.com/dustin/go-humanize"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
// hasChecksum returns "true" if the file size "n"
@@ -36,24 +37,22 @@ func hasChecksum(n int64) bool {
return (n % 512) == sha256.Size
}
-// Save fetches snapshot from remote etcd server and saves data
-// to target path. If the context "ctx" is canceled or timed out,
+// SaveWithVersion fetches snapshot from remote etcd server, saves data
+// to target path and returns server version. If the context "ctx" is canceled or timed out,
// snapshot save stream will error out (e.g. context.Canceled,
// context.DeadlineExceeded). Make sure to specify only one endpoint
// in client configuration. Snapshot API must be requested to a
// selected node, and saved snapshot is the point-in-time state of
// the selected node.
-func Save(ctx context.Context, lg *zap.Logger, cfg clientv3.Config, dbPath string) error {
- if lg == nil {
- lg = zap.NewExample()
- }
+// Etcd ", v1),
-// Compare(Version(k1), "=", 2)
-// ).Then(
-// OpPut(k2,v2), OpPut(k3,v3)
-// ).Else(
-// OpPut(k4,v4), OpPut(k5,v5)
-// ).Commit()
-//
+// Txn(context.TODO()).If(
+// Compare(Value(k1), ">", v1),
+// Compare(Version(k1), "=", 2)
+// ).Then(
+// OpPut(k2,v2), OpPut(k3,v3)
+// ).Else(
+// OpPut(k4,v4), OpPut(k5,v5)
+// ).Commit()
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations
@@ -145,7 +144,7 @@ func (txn *txn) Commit() (*TxnResponse, error) {
var err error
resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
if err != nil {
- return nil, toErr(txn.ctx, err)
+ return nil, ContextError(txn.ctx, err)
}
return (*TxnResponse)(resp), nil
}
diff --git a/client/v3/txn_test.go b/client/v3/txn_test.go
index 4be01001368..de7aec3b8e9 100644
--- a/client/v3/txn_test.go
+++ b/client/v3/txn_test.go
@@ -23,12 +23,11 @@ import (
)
func TestTxnPanics(t *testing.T) {
- testutil.BeforeTest(t)
+ testutil.RegisterLeakDetection(t)
kv := &kv{}
- errc := make(chan string, 1)
- df := func() {
+ df := func(errc chan string) {
if s := recover(); s != nil {
errc <- s.(string)
}
@@ -38,53 +37,53 @@ func TestTxnPanics(t *testing.T) {
op := OpPut("foo", "bar")
tests := []struct {
- f func()
+ f func(chan string)
err string
}{
{
- f: func() {
- defer df()
+ f: func(errc chan string) {
+ defer df(errc)
kv.Txn(context.TODO()).If(cmp).If(cmp)
},
err: "cannot call If twice!",
},
{
- f: func() {
- defer df()
+ f: func(errc chan string) {
+ defer df(errc)
kv.Txn(context.TODO()).Then(op).If(cmp)
},
err: "cannot call If after Then!",
},
{
- f: func() {
- defer df()
+ f: func(errc chan string) {
+ defer df(errc)
kv.Txn(context.TODO()).Else(op).If(cmp)
},
err: "cannot call If after Else!",
},
{
- f: func() {
- defer df()
+ f: func(errc chan string) {
+ defer df(errc)
kv.Txn(context.TODO()).Then(op).Then(op)
},
err: "cannot call Then twice!",
},
{
- f: func() {
- defer df()
+ f: func(errc chan string) {
+ defer df(errc)
kv.Txn(context.TODO()).Else(op).Then(op)
},
err: "cannot call Then after Else!",
},
{
- f: func() {
- defer df()
+ f: func(errc chan string) {
+ defer df(errc)
kv.Txn(context.TODO()).Else(op).Else(op)
},
@@ -93,7 +92,8 @@ func TestTxnPanics(t *testing.T) {
}
for i, tt := range tests {
- go tt.f()
+ errc := make(chan string, 1)
+ go tt.f(errc)
select {
case err := <-errc:
if err != tt.err {
diff --git a/client/v3/utils.go b/client/v3/utils.go
index b998c41b90f..850275877d3 100644
--- a/client/v3/utils.go
+++ b/client/v3/utils.go
@@ -16,9 +16,6 @@ package clientv3
import (
"math/rand"
- "reflect"
- "runtime"
- "strings"
"time"
)
@@ -32,18 +29,3 @@ func jitterUp(duration time.Duration, jitter float64) time.Duration {
multiplier := jitter * (rand.Float64()*2 - 1)
return time.Duration(float64(duration) * (1 + multiplier))
}
-
-// Check if the provided function is being called in the op options.
-func isOpFuncCalled(op string, opts []OpOption) bool {
- for _, opt := range opts {
- v := reflect.ValueOf(opt)
- if v.Kind() == reflect.Func {
- if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
- if strings.Contains(opFunc.Name(), op) {
- return true
- }
- }
- }
- }
- return false
-}
diff --git a/client/v3/watch.go b/client/v3/watch.go
index b73925ba128..a46f98b8e28 100644
--- a/client/v3/watch.go
+++ b/client/v3/watch.go
@@ -21,15 +21,15 @@ import (
"sync"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
const (
@@ -37,6 +37,13 @@ const (
EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
+
+ // AutoWatchID is the watcher ID passed in WatchStream.Watch when no
+ // user-provided ID is available. If pass, an ID will automatically be assigned.
+ AutoWatchID = 0
+
+ // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
+ InvalidWatchID = -1
)
type Event mvccpb.Event
@@ -142,12 +149,12 @@ type watcher struct {
mu sync.Mutex
// streams holds all the active grpc streams keyed by ctx value.
- streams map[string]*watchGrpcStream
+ streams map[string]*watchGRPCStream
lg *zap.Logger
}
-// watchGrpcStream tracks all watch resources attached to a single grpc stream.
-type watchGrpcStream struct {
+// watchGRPCStream tracks all watch resources attached to a single grpc stream.
+type watchGRPCStream struct {
owner *watcher
remote pb.WatchClient
callOpts []grpc.CallOption
@@ -214,8 +221,7 @@ type watchRequest struct {
}
// progressRequest is issued by the subscriber to request watch progress
-type progressRequest struct {
-}
+type progressRequest struct{}
// watcherStream represents a registered watcher
type watcherStream struct {
@@ -244,7 +250,7 @@ func NewWatcher(c *Client) Watcher {
func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
w := &watcher{
remote: wc,
- streams: make(map[string]*watchGrpcStream),
+ streams: make(map[string]*watchGRPCStream),
}
if c != nil {
w.callOpts = c.callOpts
@@ -254,8 +260,10 @@ func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
}
// never closes
-var valCtxCh = make(chan struct{})
-var zeroTime = time.Unix(0, 0)
+var (
+ valCtxCh = make(chan struct{})
+ zeroTime = time.Unix(0, 0)
+)
// ctx with only the values; never Done
type valCtx struct{ context.Context }
@@ -264,9 +272,9 @@ func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
func (vc *valCtx) Err() error { return nil }
-func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
+func (w *watcher) newWatcherGRPCStream(inctx context.Context) *watchGRPCStream {
ctx, cancel := context.WithCancel(&valCtx{inctx})
- wgs := &watchGrpcStream{
+ wgs := &watchGRPCStream{
owner: w,
remote: w.remote,
callOpts: w.callOpts,
@@ -327,7 +335,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
}
wgs := w.streams[ctxKey]
if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
+ wgs = w.newWatcherGRPCStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
@@ -388,7 +396,7 @@ func (w *watcher) Close() (err error) {
}
}
// Consider context.Canceled as a successful close
- if err == context.Canceled {
+ if errors.Is(err, context.Canceled) {
err = nil
}
return err
@@ -401,11 +409,11 @@ func (w *watcher) RequestProgress(ctx context.Context) (err error) {
w.mu.Lock()
if w.streams == nil {
w.mu.Unlock()
- return fmt.Errorf("no stream found for context")
+ return errors.New("no stream found for context")
}
wgs := w.streams[ctxKey]
if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
+ wgs = w.newWatcherGRPCStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
@@ -428,17 +436,17 @@ func (w *watcher) RequestProgress(ctx context.Context) (err error) {
}
}
-func (w *watchGrpcStream) close() (err error) {
+func (w *watchGRPCStream) close() (err error) {
w.cancel()
<-w.donec
select {
case err = <-w.errc:
default:
}
- return toErr(w.ctx, err)
+ return ContextError(w.ctx, err)
}
-func (w *watcher) closeStream(wgs *watchGrpcStream) {
+func (w *watcher) closeStream(wgs *watchGRPCStream) {
w.mu.Lock()
close(wgs.donec)
wgs.cancel()
@@ -448,9 +456,9 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) {
w.mu.Unlock()
}
-func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
+func (w *watchGRPCStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
// check watch ID for backward compatibility (<= v3.3)
- if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
+ if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") {
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
// failed; no channel
close(ws.recvc)
@@ -460,7 +468,7 @@ func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream
w.substreams[ws.id] = ws
}
-func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
+func (w *watchGRPCStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
select {
case ws.outc <- *resp:
case <-ws.initReq.ctx.Done():
@@ -469,7 +477,7 @@ func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchRespo
close(ws.outc)
}
-func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
+func (w *watchGRPCStream) closeSubstream(ws *watcherStream) {
// send channel response in case stream was never established
select {
case ws.initReq.retc <- ws.outc:
@@ -481,7 +489,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
} else if ws.outc != nil {
close(ws.outc)
}
- if ws.id != -1 {
+ if ws.id != InvalidWatchID {
delete(w.substreams, ws.id)
return
}
@@ -494,7 +502,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
}
// run is the root of the goroutines for managing a watcher client
-func (w *watchGrpcStream) run() {
+func (w *watchGRPCStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
@@ -533,6 +541,7 @@ func (w *watchGrpcStream) run() {
cancelSet := make(map[int64]struct{})
var cur *pb.WatchResponse
+ backoff := time.Millisecond
for {
select {
// Watch() requested
@@ -543,7 +552,7 @@ func (w *watchGrpcStream) run() {
// TODO: pass custom watch ID?
ws := &watcherStream{
initReq: *wreq,
- id: -1,
+ id: InvalidWatchID,
outc: outc,
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
@@ -645,10 +654,11 @@ func (w *watchGrpcStream) run() {
// watch client failed on Recv; spawn another if possible
case err := <-w.errc:
- if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
+ if isHaltErr(w.ctx, err) || errors.Is(ContextError(w.ctx, err), v3rpc.ErrNoLeader) {
closeErr = err
return
}
+ backoff = w.backoffIfUnavailable(backoff, err)
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
@@ -669,7 +679,7 @@ func (w *watchGrpcStream) run() {
if len(w.substreams)+len(w.resuming) == 0 {
return
}
- if ws.id != -1 {
+ if ws.id != InvalidWatchID {
// client is closing an established watch; close it on the server proactively instead of waiting
// to close when the next message arrives
cancelSet[ws.id] = struct{}{}
@@ -690,7 +700,7 @@ func (w *watchGrpcStream) run() {
// nextResume chooses the next resuming to register with the grpc stream. Abandoned
// streams are marked as nil in the queue since the head must wait for its inflight registration.
-func (w *watchGrpcStream) nextResume() *watcherStream {
+func (w *watchGRPCStream) nextResume() *watcherStream {
for len(w.resuming) != 0 {
if w.resuming[0] != nil {
return w.resuming[0]
@@ -701,7 +711,7 @@ func (w *watchGrpcStream) nextResume() *watcherStream {
}
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
-func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
+func (w *watchGRPCStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
events := make([]*Event, len(pbresp.Events))
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
@@ -716,18 +726,17 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
cancelReason: pbresp.CancelReason,
}
- // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
+ // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to
// indicate they should be broadcast.
- if wr.IsProgressNotify() && pbresp.WatchId == -1 {
+ if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID {
return w.broadcastResponse(wr)
}
return w.unicastResponse(wr, pbresp.WatchId)
-
}
// broadcastResponse send a watch response to all watch substreams.
-func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
+func (w *watchGRPCStream) broadcastResponse(wr *WatchResponse) bool {
for _, ws := range w.substreams {
select {
case ws.recvc <- wr:
@@ -738,8 +747,8 @@ func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
}
// unicastResponse sends a watch response to a specific watch substream.
-func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
- ws, ok := w.substreams[watchId]
+func (w *watchGRPCStream) unicastResponse(wr *WatchResponse, watchID int64) bool {
+ ws, ok := w.substreams[watchID]
if !ok {
return false
}
@@ -752,7 +761,7 @@ func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool
}
// serveWatchClient forwards messages from the grpc stream to run()
-func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
+func (w *watchGRPCStream) serveWatchClient(wc pb.Watch_WatchClient) {
for {
resp, err := wc.Recv()
if err != nil {
@@ -771,7 +780,7 @@ func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
}
// serveSubstream forwards watch responses from run() to the subscriber
-func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
+func (w *watchGRPCStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
if ws.closing {
panic("created substream goroutine but substream is closing")
}
@@ -839,12 +848,13 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
}
} else {
// current progress of watch; <= store revision
- nextRev = wr.Header.Revision
+ nextRev = wr.Header.Revision + 1
}
if len(wr.Events) > 0 {
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
}
+
ws.initReq.rev = nextRev
// created event is already sent above,
@@ -867,13 +877,13 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
// lazily send cancel message if events on missing id
}
-func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
+func (w *watchGRPCStream) newWatchClient() (pb.Watch_WatchClient, error) {
// mark all substreams as resuming
close(w.resumec)
w.resumec = make(chan struct{})
w.joinSubstreams()
for _, ws := range w.substreams {
- ws.id = -1
+ ws.id = InvalidWatchID
w.resuming = append(w.resuming, ws)
}
// strip out nils, if any
@@ -913,7 +923,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
return wc, nil
}
-func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
+func (w *watchGRPCStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
var wg sync.WaitGroup
wg.Add(len(w.resuming))
donec := make(chan struct{})
@@ -950,7 +960,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
}
// joinSubstreams waits for all substream goroutines to complete.
-func (w *watchGrpcStream) joinSubstreams() {
+func (w *watchGRPCStream) joinSubstreams() {
for _, ws := range w.substreams {
<-ws.donec
}
@@ -963,10 +973,25 @@ func (w *watchGrpcStream) joinSubstreams() {
var maxBackoff = 100 * time.Millisecond
+func (w *watchGRPCStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
+ if isUnavailableErr(w.ctx, err) {
+ // retry, but backoff
+ if backoff < maxBackoff {
+ // 25% backoff factor
+ backoff = backoff + backoff/4
+ if backoff > maxBackoff {
+ backoff = maxBackoff
+ }
+ }
+ time.Sleep(backoff)
+ }
+ return backoff
+}
+
// openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false
-func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
+func (w *watchGRPCStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
backoff := time.Millisecond
for {
select {
@@ -983,17 +1008,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
}
- if isUnavailableErr(w.ctx, err) {
- // retry, but backoff
- if backoff < maxBackoff {
- // 25% backoff factor
- backoff = backoff + backoff/4
- if backoff > maxBackoff {
- backoff = maxBackoff
- }
- }
- time.Sleep(backoff)
- }
+ backoff = w.backoffIfUnavailable(backoff, err)
}
return ws, nil
}
@@ -1022,7 +1037,7 @@ func (pr *progressRequest) toPB() *pb.WatchRequest {
func streamKeyFromCtx(ctx context.Context) string {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
- return fmt.Sprintf("%+v", md)
+ return fmt.Sprintf("%+v", map[string][]string(md))
}
return ""
}
diff --git a/client/v3/watch_test.go b/client/v3/watch_test.go
index 2a56ca4a938..721fc4a8a23 100644
--- a/client/v3/watch_test.go
+++ b/client/v3/watch_test.go
@@ -15,8 +15,11 @@
package clientv3
import (
+ "context"
"testing"
+ "google.golang.org/grpc/metadata"
+
"go.etcd.io/etcd/api/v3/mvccpb"
)
@@ -53,3 +56,53 @@ func TestEvent(t *testing.T) {
}
}
}
+
+// TestStreamKeyFromCtx tests the streamKeyFromCtx function to ensure it correctly
+// formats metadata as a map[string][]string when extracting metadata from the context.
+//
+// The fmt package in Go guarantees that maps are printed in a consistent order,
+// sorted by the keys. This test verifies that the streamKeyFromCtx function
+// produces the expected formatted string representation of metadata maps when called with
+// various context scenarios.
+func TestStreamKeyFromCtx(t *testing.T) {
+ tests := []struct {
+ name string
+ ctx context.Context
+ expected string
+ }{
+ {
+ name: "multiple keys",
+ ctx: metadata.NewOutgoingContext(context.Background(), metadata.MD{
+ "key1": []string{"value1"},
+ "key2": []string{"value2a", "value2b"},
+ }),
+ expected: "map[key1:[value1] key2:[value2a value2b]]",
+ },
+ {
+ name: "no keys",
+ ctx: metadata.NewOutgoingContext(context.Background(), metadata.MD{}),
+ expected: "map[]",
+ },
+ {
+ name: "only one key",
+ ctx: metadata.NewOutgoingContext(context.Background(), metadata.MD{
+ "key1": []string{"value1", "value1a"},
+ }),
+ expected: "map[key1:[value1 value1a]]",
+ },
+ {
+ name: "no metadata",
+ ctx: context.Background(),
+ expected: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := streamKeyFromCtx(tt.ctx)
+ if actual != tt.expected {
+ t.Errorf("streamKeyFromCtx() = %v, expected %v", actual, tt.expected)
+ }
+ })
+ }
+}
diff --git a/client/v3/yaml/config.go b/client/v3/yaml/config.go
index 2937286d0d4..99d07236433 100644
--- a/client/v3/yaml/config.go
+++ b/client/v3/yaml/config.go
@@ -18,12 +18,12 @@ package yaml
import (
"crypto/tls"
"crypto/x509"
- "io/ioutil"
+ "os"
"sigs.k8s.io/yaml"
"go.etcd.io/etcd/client/pkg/v3/tlsutil"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type yamlConfig struct {
@@ -42,7 +42,7 @@ type yamlConfig struct {
// NewConfig creates a new clientv3.Config from a yaml file.
func NewConfig(fpath string) (*clientv3.Config, error) {
- b, err := ioutil.ReadFile(fpath)
+ b, err := os.ReadFile(fpath)
if err != nil {
return nil, err
}
diff --git a/client/v3/yaml/config_test.go b/client/v3/yaml/config_test.go
index dd689323703..4d23f27494e 100644
--- a/client/v3/yaml/config_test.go
+++ b/client/v3/yaml/config_test.go
@@ -15,12 +15,12 @@
package yaml
import (
- "io/ioutil"
"log"
"os"
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
"sigs.k8s.io/yaml"
)
@@ -73,24 +73,17 @@ func TestConfigFromFile(t *testing.T) {
}
for i, tt := range tests {
- tmpfile, err := ioutil.TempFile("", "clientcfg")
+ tmpfile, err := os.CreateTemp("", "clientcfg")
if err != nil {
log.Fatal(err)
}
b, err := yaml.Marshal(tt.ym)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
_, err = tmpfile.Write(b)
- if err != nil {
- t.Fatal(err)
- }
- err = tmpfile.Close()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+ require.NoError(t, tmpfile.Close())
cfg, cerr := NewConfig(tmpfile.Name())
if cerr != nil && !tt.werr {
diff --git a/codecov.yml b/codecov.yml
index a4b3b7f2773..26d5841622c 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -1,18 +1,43 @@
+---
+# https://docs.codecov.com/docs/codecovyml-reference
codecov:
- token: "6040de41-c073-4d6f-bbf8-d89256ef31e1"
+ token: 6040de41-c073-4d6f-bbf8-d89256ef31e1
disable_default_path_fixes: true
-
+ require_ci_to_pass: false
+ notify:
+ wait_for_ci: false
fixes:
- - "go.etcd.io/etcd/api/v3/::api/"
- - "go.etcd.io/etcd/client/v3/::client/v3/"
- - "go.etcd.io/etcd/client/v2/::client/v2/"
- - "go.etcd.io/etcd/etcdctl/v3/::etcdctl/"
- - "go.etcd.io/etcd/pkg/v3/::pkg/"
- - "go.etcd.io/etcd/raft/v3/::raft/"
- - "go.etcd.io/etcd/server/v3/::server/"
-
+ - go.etcd.io/etcd/api/v3/::api/
+ - go.etcd.io/etcd/client/v3/::client/v3/
+ - go.etcd.io/etcd/client/v2/::client/v2/
+ - go.etcd.io/etcd/etcdctl/v3/::etcdctl/
+ - go.etcd.io/etcd/etcdutl/v3/::etcdutl/
+ - go.etcd.io/etcd/pkg/v3/::pkg/
+ - go.etcd.io/etcd/server/v3/::server/
ignore:
- - "**/*.pb.go"
- - "**/*.pb.gw.go"
- - "tests/**/*"
- - "go.etcd.io/etcd/tests/**/*"
+ - '**/*.pb.go'
+ - '**/*.pb.gw.go'
+ - tests/**/*
+ - go.etcd.io/etcd/tests/**/*
+coverage:
+ range: 60..80
+ round: down
+ precision: 2
+ status:
+ project:
+ default:
+ target: auto
+ # allow some coverage reductions within a threshold
+ # this allows a 1% drop from the previous base commit coverage
+ threshold: 1%
+ patch:
+ default:
+ target: auto
+ threshold: 80%
+comment:
+ layout: "header, files, diff, footer"
+ behavior: default # default: update, if exists. Otherwise post new; new: delete old and post new
+ require_changes: false # if true: only post the comment if coverage changes
+ require_base: false # [true :: must have a base report to post]
+ require_head: true # [true :: must have a head report to post]
+ hide_project_coverage: false # [true :: only show coverage on the git diff]
diff --git a/contrib/OWNERS b/contrib/OWNERS
new file mode 100644
index 00000000000..e75163c5098
--- /dev/null
+++ b/contrib/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/contrib
diff --git a/contrib/README.md b/contrib/README.md
index 33af884c1f1..b9a4d7ae3eb 100644
--- a/contrib/README.md
+++ b/contrib/README.md
@@ -2,6 +2,8 @@
Scripts and files which may be useful but aren't part of the core etcd project.
-* [systemd](systemd) - an example unit file for deploying etcd on systemd-based distributions
+* [lock](lock) - example addressing the expired lease problem of distributed locking with etcd
+* [mixin](mixin) - customisable set of Grafana dashboard and Prometheus alerts for etcd
* [raftexample](raftexample) - an example distributed key-value store using raft
+* [systemd](systemd) - an example unit file for deploying etcd on systemd-based distributions
* [systemd/etcd3-multinode](systemd/etcd3-multinode) - multi-node cluster setup with systemd
diff --git a/contrib/lock/README.md b/contrib/lock/README.md
index e1e408a0a2f..d33630e25fa 100644
--- a/contrib/lock/README.md
+++ b/contrib/lock/README.md
@@ -15,10 +15,10 @@ For building `client` and `storage`, just execute `go build` in each directory.
## How to try
-At first you need to start an etcd cluster, which works as lock service in the figures. On top of the etcd source directory, execute commands like below:
+At first, you need to start an etcd cluster, which works as lock service in the figures. On top of the etcd source directory, execute commands like below:
```
-$ ./build # build etcd
-$ goreman start
+$ make # build etcd
+$ bin/etcd # start etcd
```
Then run `storage` command in `storage` directory:
@@ -28,33 +28,45 @@ $ ./storage
Now client processes ("Client 1" and "Client 2" in the figures) can be started. At first, execute below command for starting a client process which corresponds to "Client 1":
```
-$ GODEBUG=gcstoptheworld=2 ./client 1
+$ ./client 1
```
It will show an output like this:
```
client 1 starts
-creted etcd client
-acquired lock, version: 1029195466614598192
-took 6.771998255s for allocation, took 36.217205ms for GC
-emulated stop the world GC, make sure the /lock/* key disappeared and hit any key after executing client 2:
+created etcd client and session
+acquired lock, version: 694d82254d5fa305
+please manually revoke the lease using 'etcdctl lease revoke 694d82254d5fa305' or wait for it to expire, then start executing client 2 and hit any key...
```
-The process causes stop the world GC pause for making lease expiration intentionally and waits a keyboard input. Now another client process can be started like this:
+
+Verify the lease was created using:
+```
+$ bin/etcdctl lease list
+found 1 leases
+694d82254d5fa305
+```
+
+Then proceed to manually revoke the lease using:
+```
+$ bin/etcdctl lease revoke 694d82254d5fa305
+lease 694d82254d5fa305 revoked
+```
+
+Now another client process can be started like this:
```
$ ./client 2
client 2 starts
-creted etcd client
-acquired lock, version: 4703569812595502727
+created etcd client and session
+acquired lock, version: 694d82254e18770a
this is client 2, continuing
```
-If things go well the second client process invoked as `./client 2` finishes soon. It successfully writes a key to `storage` process. After checking this, please hit any key for `./client 1` and resume the process. It will show an output like below:
+If things go well the second client process invoked as `./client 2` finishes soon. It successfully writes a key to `storage` process.
+
+After checking this, please hit any key for `./client 1` and resume the process. It will show an output like below:
```
resuming client 1
-failed to write to storage: error: given version (4703569812595502721) differ from the existing version (4703569812595502727)
+expected fail to write to storage with old lease version: error: given version (694d82254d5fa305) is different from the existing version (694d82254e18770a)
```
-### Notes on the parameters related to stop the world GC pause
-`client` program includes two constant values: `nrGarbageObjects` and `sessionTTL`. These parameters are configured for causing lease expiration with stop the world GC pause of go runtime. They heavily rely on resources of a machine for executing the example. If lease expiration doesn't happen on your machine, update these parameters and try again.
-
[fencing]: https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html
[fencing-tokens]: https://martin.kleppmann.com/2016/02/fencing-tokens.png
[unsafe-lock]: https://martin.kleppmann.com/2016/02/unsafe-lock.png
diff --git a/contrib/lock/client/client.go b/contrib/lock/client/client.go
index a3e562be7cb..ddb5498e55b 100644
--- a/contrib/lock/client/client.go
+++ b/contrib/lock/client/client.go
@@ -15,56 +15,25 @@
// An example distributed locking with fencing in the case of etcd
// Based on https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html
-// Important usage:
-// If you are invoking this program as client 1, you need to configure GODEBUG env var like below:
-// GODEBUG=gcstoptheworld=2 ./client 1
-
package main
import (
"bufio"
"bytes"
+ "context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
+ "log"
"net/http"
"os"
- "runtime"
"strconv"
"time"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
)
-type node struct {
- next *node
-}
-
-const (
- // These const values might be need adjustment.
- nrGarbageObjects = 100 * 1000 * 1000
- sessionTTL = 1
-)
-
-func stopTheWorld() {
- n := new(node)
- root := n
- allocStart := time.Now()
- for i := 0; i < nrGarbageObjects; i++ {
- n.next = new(node)
- n = n.next
- }
- func(n *node) {}(root) // dummy usage of root for removing a compiler error
- root = nil
- allocDur := time.Since(allocStart)
-
- gcStart := time.Now()
- runtime.GC()
- gcDur := time.Since(gcStart)
- fmt.Printf("took %v for allocation, took %v for GC\n", allocDur, gcDur)
-}
-
type request struct {
Op string `json:"op"`
Key string `json:"key"`
@@ -88,27 +57,24 @@ func write(key string, value string, version int64) error {
reqBytes, err := json.Marshal(&req)
if err != nil {
- fmt.Printf("failed to marshal request: %s\n", err)
- os.Exit(1)
+ log.Fatalf("failed to marshal request: %s", err)
}
httpResp, err := http.Post("http://localhost:8080", "application/json", bytes.NewReader(reqBytes))
if err != nil {
- fmt.Printf("failed to send a request to storage: %s\n", err)
- os.Exit(1)
+ log.Fatalf("failed to send a request to storage: %s", err)
}
- respBytes, err := ioutil.ReadAll(httpResp.Body)
+ respBytes, err := io.ReadAll(httpResp.Body)
if err != nil {
- fmt.Printf("failed to read request body: %s\n", err)
- os.Exit(1)
+ log.Fatalf("failed to read request body: %s", err)
}
+ httpResp.Body.Close()
resp := new(response)
err = json.Unmarshal(respBytes, resp)
if err != nil {
- fmt.Printf("failed to unmarshal response json: %s\n", err)
- os.Exit(1)
+ log.Fatalf("failed to unmarshal response json: %s", err)
}
if resp.Err != "" {
@@ -118,90 +84,63 @@ func write(key string, value string, version int64) error {
return nil
}
-func read(key string) (string, int64) {
- req := request{
- Op: "read",
- Key: key,
- }
-
- reqBytes, err := json.Marshal(&req)
- if err != nil {
- fmt.Printf("failed to marshal request: %s\n", err)
- os.Exit(1)
- }
-
- httpResp, err := http.Post("http://localhost:8080", "application/json", bytes.NewReader(reqBytes))
- if err != nil {
- fmt.Printf("failed to send a request to storage: %s\n", err)
- os.Exit(1)
- }
-
- respBytes, err := ioutil.ReadAll(httpResp.Body)
- if err != nil {
- fmt.Printf("failed to read request body: %s\n", err)
- os.Exit(1)
- }
-
- resp := new(response)
- err = json.Unmarshal(respBytes, resp)
- if err != nil {
- fmt.Printf("failed to unmarshal response json: %s\n", err)
- os.Exit(1)
- }
-
- return resp.Val, resp.Version
-}
-
func main() {
if len(os.Args) != 2 {
- fmt.Printf("usage: %s <1 or 2>\n", os.Args[0])
- return
+ log.Fatalf("usage: %s <1 or 2>", os.Args[0])
}
mode, err := strconv.Atoi(os.Args[1])
if err != nil || mode != 1 && mode != 2 {
- fmt.Printf("mode should be 1 or 2 (given value is %s)\n", os.Args[1])
- return
+ log.Fatalf("mode should be 1 or 2 (given value is %s)", os.Args[1])
}
- fmt.Printf("client %d starts\n", mode)
+ log.Printf("client %d starts\n", mode)
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:22379", "http://127.0.0.1:32379"},
})
if err != nil {
- fmt.Printf("failed to create an etcd client: %s\n", err)
- os.Exit(1)
+ log.Fatalf("failed to create an etcd client: %s", err)
}
- fmt.Printf("creted etcd client\n")
+ // do a connection check first, otherwise it will hang infinitely on newSession
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ _, err = client.MemberList(ctx)
+ if err != nil {
+ log.Fatalf("failed to reach etcd: %s", err)
+ }
- session, err := concurrency.NewSession(client, concurrency.WithTTL(sessionTTL))
+ session, err := concurrency.NewSession(client, concurrency.WithTTL(1))
if err != nil {
- fmt.Printf("failed to create a session: %s\n", err)
- os.Exit(1)
+ log.Fatalf("failed to create a session: %s", err)
}
+ log.Print("created etcd client and session")
+
locker := concurrency.NewLocker(session, "/lock")
locker.Lock()
defer locker.Unlock()
version := session.Lease()
- fmt.Printf("acquired lock, version: %d\n", version)
+ log.Printf("acquired lock, version: %x", version)
if mode == 1 {
- stopTheWorld()
- fmt.Printf("emulated stop the world GC, make sure the /lock/* key disappeared and hit any key after executing client 2: ")
+ log.Printf("please manually revoke the lease using 'etcdctl lease revoke %x' or wait for it to expire, then start executing client 2 and hit any key...", version)
reader := bufio.NewReader(os.Stdin)
- reader.ReadByte()
- fmt.Printf("resuming client 1\n")
+ _, _ = reader.ReadByte()
+ log.Print("resuming client 1")
} else {
- fmt.Printf("this is client 2, continuing\n")
+ log.Print("this is client 2, continuing\n")
}
- err = write("key0", fmt.Sprintf("value from client %d", mode), int64(version))
+ err = write("key0", fmt.Sprintf("value from client %x", mode), int64(version))
if err != nil {
- fmt.Printf("failed to write to storage: %s\n", err) // client 1 should show this message
+ if mode == 1 {
+ log.Printf("expected fail to write to storage with old lease version: %s\n", err) // client 1 should show this message
+ } else {
+ log.Fatalf("unexpected fail to write to storage: %s\n", err)
+ }
} else {
- fmt.Printf("successfully write a key to storage\n")
+ log.Printf("successfully write a key to storage using lease %x\n", int64(version))
}
}
diff --git a/contrib/lock/storage/storage.go b/contrib/lock/storage/storage.go
index 66ed6a7f2e9..7e39e38f62d 100644
--- a/contrib/lock/storage/storage.go
+++ b/contrib/lock/storage/storage.go
@@ -17,7 +17,7 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"os"
"strings"
@@ -57,7 +57,7 @@ func writeResponse(resp response, w http.ResponseWriter) {
}
func handler(w http.ResponseWriter, r *http.Request) {
- rBytes, err := ioutil.ReadAll(r.Body)
+ rBytes, err := io.ReadAll(r.Body)
if err != nil {
fmt.Printf("failed to read http request: %s\n", err)
os.Exit(1)
@@ -79,7 +79,7 @@ func handler(w http.ResponseWriter, r *http.Request) {
} else if strings.Compare(req.Op, "write") == 0 {
if val, ok := data[req.Key]; ok {
if req.Version != val.version {
- writeResponse(response{"", -1, fmt.Sprintf("given version (%d) is different from the existing version (%d)", req.Version, val.version)}, w)
+ writeResponse(response{"", -1, fmt.Sprintf("given version (%x) is different from the existing version (%x)", req.Version, val.version)}, w)
} else {
data[req.Key].val = req.Val
data[req.Key].version = req.Version
@@ -90,12 +90,22 @@ func handler(w http.ResponseWriter, r *http.Request) {
writeResponse(response{req.Val, req.Version, ""}, w)
}
} else {
- fmt.Printf("unknown op: %s\n", req.Op)
+ fmt.Printf("unknown op: %s\n", escape(req.Op))
return
}
}
+func escape(s string) string {
+ escaped := strings.ReplaceAll(s, "\n", " ")
+ escaped = strings.ReplaceAll(escaped, "\r", " ")
+ return escaped
+}
+
func main() {
http.HandleFunc("/", handler)
- http.ListenAndServe(":8080", nil)
+ err := http.ListenAndServe(":8080", nil)
+ if err != nil {
+ fmt.Printf("failed to listen and serve: %s\n", err)
+ os.Exit(1)
+ }
}
diff --git a/contrib/mixin/.gitignore b/contrib/mixin/.gitignore
new file mode 100644
index 00000000000..22d0d82f809
--- /dev/null
+++ b/contrib/mixin/.gitignore
@@ -0,0 +1 @@
+vendor
diff --git a/contrib/mixin/.lint b/contrib/mixin/.lint
new file mode 100644
index 00000000000..d373b8e2f1d
--- /dev/null
+++ b/contrib/mixin/.lint
@@ -0,0 +1,18 @@
+---
+exclusions:
+ template-instance-rule:
+ reason: The mixin only uses `instance` for alerts, and `cluster` for dashboard queries
+ template-job-rule:
+ reason: The dashboards use 'cluster' label as selector, rather than 'job'
+ target-job-rule:
+ reason: The mixin uses 'cluster' instead of 'job'
+ target-instance-rule:
+ reason: The mixin only uses `instance` for alerts, and `cluster` for dashboard queries
+ alert-name-camelcase:
+ reason: etcd is spelled all lowercase, meaning all alert name start with a lowercase
+ alert-summary-style:
+ reason: etcd is spelled all lowercase, meaning summaries starting with 'etcd' are still valid
+ panel-units-rule:
+ reason: Stat panels have no unit, and some panels use custom unit or text
+ panel-title-description-rule:
+ reason: Suppress noisy linting rule until we can address minor tech debt like this
diff --git a/contrib/mixin/Makefile b/contrib/mixin/Makefile
new file mode 100644
index 00000000000..dbbcb1363eb
--- /dev/null
+++ b/contrib/mixin/Makefile
@@ -0,0 +1,27 @@
+.PHONY: tools manifests test clean jb_install
+
+OS := linux
+ARCH ?= amd64
+PROMETHEUS_VERSION := 2.33.1
+
+tools:
+ go install github.com/google/go-jsonnet/cmd/jsonnet@latest
+ go install github.com/brancz/gojsontoyaml@latest
+ go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
+ wget -qO- "https://github.com/prometheus/prometheus/releases/download/v${PROMETHEUS_VERSION}/prometheus-${PROMETHEUS_VERSION}.${OS}-${ARCH}.tar.gz" |\
+ tar xvz --strip-components=1 -C "$$(go env GOPATH)/bin" prometheus-${PROMETHEUS_VERSION}.${OS}-${ARCH}/promtool
+
+manifests: manifests/etcd-prometheusRules.yaml
+
+manifests/etcd-prometheusRules.yaml:
+ mkdir -p manifests
+ jsonnet -e '(import "mixin.libsonnet").prometheusAlerts' | gojsontoyaml > manifests/etcd-prometheusRules.yaml
+
+test: manifests/etcd-prometheusRules.yaml
+ promtool test rules test.yaml
+
+jb_install:
+ jb install
+
+clean:
+ rm -rf manifests/*.yaml
diff --git a/contrib/mixin/OWNERS b/contrib/mixin/OWNERS
new file mode 100644
index 00000000000..c299e9b517c
--- /dev/null
+++ b/contrib/mixin/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/observability
diff --git a/contrib/mixin/README.md b/contrib/mixin/README.md
index 224066f457d..0da7ea8fc37 100644
--- a/contrib/mixin/README.md
+++ b/contrib/mixin/README.md
@@ -2,21 +2,35 @@
> NOTE: This project is *alpha* stage. Flags, configuration, behaviour and design may change significantly in following releases.
-A set of customisable Prometheus alerts for etcd.
+A customisable set of Grafana dashboard and Prometheus alerts for etcd.
Instructions for use are the same as the [kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin).
+## Grafana 7.x support
+
+By default, this mixin generates the dashboard compatible with Grafana 8.x or newer.
+To generate dashboard for Grafana 7.x, set in the config.libsonnet:
+
+```
+// set to true if dashboards should be compatible with Grafana 7x or earlier
+grafana7x: true,
+```
+
## Background
* For more information about monitoring mixins, see this [design doc](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/edit#).
## Testing alerts
-Make sure to have [jsonnet](https://jsonnet.org/) and [gojsontoyaml](https://github.com/brancz/gojsontoyaml) installed.
+Make sure to have [jsonnet](https://jsonnet.org/) and [gojsontoyaml](https://github.com/brancz/gojsontoyaml) installed. You can fetch it via
+
+```
+make tools
+```
First compile the mixin to a YAML file, which the promtool will read:
```
-jsonnet -e '(import "mixin.libsonnet").prometheusAlerts' | gojsontoyaml > mixin.yaml
+make manifests
```
Then run the unit test:
diff --git a/contrib/mixin/alerts/alerts.libsonnet b/contrib/mixin/alerts/alerts.libsonnet
new file mode 100644
index 00000000000..856fe4c1e38
--- /dev/null
+++ b/contrib/mixin/alerts/alerts.libsonnet
@@ -0,0 +1,240 @@
+{
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'etcd',
+ rules: [
+ {
+ alert: 'etcdMembersDown',
+ expr: |||
+ max without (endpoint) (
+ sum without (%(etcd_instance_labels)s) (up{%(etcd_selector)s} == bool 0)
+ or
+ count without (To) (
+ sum without (%(etcd_instance_labels)s) (rate(etcd_network_peer_sent_failures_total{%(etcd_selector)s}[%(network_failure_range)ss])) > 0.01
+ )
+ )
+ > 0
+ ||| % { etcd_instance_labels: $._config.etcd_instance_labels, etcd_selector: $._config.etcd_selector, network_failure_range: $._config.scrape_interval_seconds * 4 },
+ 'for': '10m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": members are down ({{ $value }}).' % $._config.clusterLabel,
+ summary: 'etcd cluster members are down.',
+ },
+ },
+ {
+ alert: 'etcdInsufficientMembers',
+ expr: |||
+ sum(up{%(etcd_selector)s} == bool 1) without (%(etcd_instance_labels)s) < ((count(up{%(etcd_selector)s}) without (%(etcd_instance_labels)s) + 1) / 2)
+ ||| % $._config,
+ 'for': '3m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": insufficient members ({{ $value }}).' % $._config.clusterLabel,
+ summary: 'etcd cluster has insufficient number of members.',
+ },
+ },
+ {
+ alert: 'etcdNoLeader',
+ expr: |||
+ etcd_server_has_leader{%(etcd_selector)s} == 0
+ ||| % $._config,
+ 'for': '1m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": member {{ $labels.instance }} has no leader.' % $._config.clusterLabel,
+ summary: 'etcd cluster has no leader.',
+ },
+ },
+ {
+ alert: 'etcdHighNumberOfLeaderChanges',
+ expr: |||
+ increase((max without (%(etcd_instance_labels)s) (etcd_server_leader_changes_seen_total{%(etcd_selector)s}) or 0*absent(etcd_server_leader_changes_seen_total{%(etcd_selector)s}))[15m:1m]) >= 4
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": {{ $value }} leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.' % $._config.clusterLabel,
+ summary: 'etcd cluster has high number of leader changes.',
+ },
+ },
+ {
+ alert: 'etcdHighNumberOfFailedGRPCRequests',
+ expr: |||
+ 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code)
+ /
+ sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code)
+ > 1
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": {{ $value }}%% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
+ summary: 'etcd cluster has high number of failed grpc requests.',
+ },
+ },
+ {
+ alert: 'etcdHighNumberOfFailedGRPCRequests',
+ expr: |||
+ 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code)
+ /
+ sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code)
+ > 5
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": {{ $value }}%% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
+ summary: 'etcd cluster has high number of failed grpc requests.',
+ },
+ },
+ {
+ alert: 'etcdGRPCRequestsSlow',
+ expr: |||
+ histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{%(etcd_selector)s, grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type))
+ > 0.15
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": 99th percentile of gRPC requests is {{ $value }}s on etcd instance {{ $labels.instance }} for {{ $labels.grpc_method }} method.' % $._config.clusterLabel,
+ summary: 'etcd grpc requests are slow',
+ },
+ },
+ {
+ alert: 'etcdMemberCommunicationSlow',
+ expr: |||
+ histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{%(etcd_selector)s}[5m]))
+ > 0.15
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": member communication with {{ $labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
+ summary: 'etcd cluster member communication is slow.',
+ },
+ },
+ {
+ alert: 'etcdHighNumberOfFailedProposals',
+ expr: |||
+ rate(etcd_server_proposals_failed_total{%(etcd_selector)s}[15m]) > 5
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": {{ $value }} proposal failures within the last 30 minutes on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
+ summary: 'etcd cluster has high number of proposal failures.',
+ },
+ },
+ {
+ alert: 'etcdHighFsyncDurations',
+ expr: |||
+ histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m]))
+ > 0.5
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
+ summary: 'etcd cluster 99th percentile fsync durations are too high.',
+ },
+ },
+ {
+ alert: 'etcdHighFsyncDurations',
+ expr: |||
+ histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m]))
+ > 1
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
+ summary: 'etcd cluster 99th percentile fsync durations are too high.',
+ },
+ },
+ {
+ alert: 'etcdHighCommitDurations',
+ expr: |||
+ histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{%(etcd_selector)s}[5m]))
+ > 0.25
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": 99th percentile commit durations {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
+ summary: 'etcd cluster 99th percentile commit durations are too high.',
+ },
+ },
+ {
+ alert: 'etcdDatabaseQuotaLowSpace',
+ expr: |||
+ (last_over_time(etcd_mvcc_db_total_size_in_bytes{%(etcd_selector)s}[5m]) / last_over_time(etcd_server_quota_backend_bytes{%(etcd_selector)s}[5m]))*100 > 95
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.' % $._config.clusterLabel,
+ summary: 'etcd cluster database is running full.',
+ },
+ },
+ {
+ alert: 'etcdExcessiveDatabaseGrowth',
+ expr: |||
+ predict_linear(etcd_mvcc_db_total_size_in_bytes{%(etcd_selector)s}[4h], 4*60*60) > etcd_server_quota_backend_bytes{%(etcd_selector)s}
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive.' % $._config.clusterLabel,
+ summary: 'etcd cluster database growing very fast.',
+ },
+ },
+ {
+ alert: 'etcdDatabaseHighFragmentationRatio',
+ expr: |||
+ (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes{%(etcd_selector)s}[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes{%(etcd_selector)s}[5m])) < 0.5 and etcd_mvcc_db_total_size_in_use_in_bytes{%(etcd_selector)s} > 104857600
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'etcd cluster "{{ $labels.%s }}": database size in use on instance {{ $labels.instance }} is {{ $value | humanizePercentage }} of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.' % $._config.clusterLabel,
+ summary: 'etcd database size in use is less than 50% of the actual allocated storage.',
+ runbook_url: 'https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation',
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/contrib/mixin/config.libsonnet b/contrib/mixin/config.libsonnet
new file mode 100644
index 00000000000..d27b13091e9
--- /dev/null
+++ b/contrib/mixin/config.libsonnet
@@ -0,0 +1,26 @@
+{
+
+ _config+:: {
+
+ // set to true if dashboards should be compatible with Grafana 7x or earlier
+ grafana7x: false,
+
+ etcd_selector: 'job=~".*etcd.*"',
+ // etcd_instance_labels are the label names that are uniquely
+ // identifying an instance and need to be aggreated away for alerts
+ // that are about an etcd cluster as a whole. For example, if etcd
+ // instances are deployed on K8s, you will likely want to change
+ // this to 'instance, pod'.
+ etcd_instance_labels: 'instance',
+ // scrape_interval_seconds is the global scrape interval which can be
+ // used to dynamically adjust rate windows as a function of the interval.
+ scrape_interval_seconds: 30,
+ // Dashboard variable refresh option on Grafana (https://grafana.com/docs/grafana/latest/datasources/prometheus/).
+ // 0 : Never (Will never refresh the Dashboard variables values)
+ // 1 : On Dashboard Load (Will refresh Dashboards variables when dashboard are loaded)
+ // 2 : On Time Range Change (Will refresh Dashboards variables when time range will be changed)
+ dashboard_var_refresh: 2,
+ // clusterLabel is used to identify a cluster.
+ clusterLabel: 'job',
+ },
+}
diff --git a/contrib/mixin/dashboards/dashboards.libsonnet b/contrib/mixin/dashboards/dashboards.libsonnet
new file mode 100644
index 00000000000..af089a68f55
--- /dev/null
+++ b/contrib/mixin/dashboards/dashboards.libsonnet
@@ -0,0 +1,2 @@
+(import "etcd.libsonnet") +
+(import "etcd-grafana7x.libsonnet")
diff --git a/contrib/mixin/dashboards/etcd-grafana7x.libsonnet b/contrib/mixin/dashboards/etcd-grafana7x.libsonnet
new file mode 100644
index 00000000000..38d5085c927
--- /dev/null
+++ b/contrib/mixin/dashboards/etcd-grafana7x.libsonnet
@@ -0,0 +1,1186 @@
+{
+ grafanaDashboards+:: if $._config.grafana7x then {
+ 'etcd.json': {
+ uid: std.md5('etcd.json'),
+ title: 'etcd',
+ description: 'etcd sample Grafana dashboard with Prometheus',
+ tags: ['etcd-mixin'],
+ style: 'dark',
+ timezone: 'browser',
+ editable: true,
+ hideControls: false,
+ sharedCrosshair: false,
+ rows: [
+ {
+ collapse: false,
+ editable: true,
+ height: '250px',
+ panels: [
+ {
+ cacheTimeout: null,
+ colorBackground: false,
+ colorValue: false,
+ colors: [
+ 'rgba(245, 54, 54, 0.9)',
+ 'rgba(237, 129, 40, 0.89)',
+ 'rgba(50, 172, 45, 0.97)',
+ ],
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ format: 'none',
+ gauge: {
+ maxValue: 100,
+ minValue: 0,
+ show: false,
+ thresholdLabels: false,
+ thresholdMarkers: true,
+ },
+ id: 28,
+ interval: null,
+ isNew: true,
+ links: [],
+ mappingType: 1,
+ mappingTypes: [
+ {
+ name: 'value to text',
+ value: 1,
+ },
+ {
+ name: 'range to text',
+ value: 2,
+ },
+ ],
+ maxDataPoints: 100,
+ nullPointMode: 'connected',
+ nullText: null,
+ postfix: '',
+ postfixFontSize: '50%',
+ prefix: '',
+ prefixFontSize: '50%',
+ rangeMaps: [{
+ from: 'null',
+ text: 'N/A',
+ to: 'null',
+ }],
+ span: 3,
+ sparkline: {
+ fillColor: 'rgba(31, 118, 189, 0.18)',
+ full: false,
+ lineColor: 'rgb(31, 120, 193)',
+ show: false,
+ },
+ targets: [{
+ expr: 'sum(etcd_server_has_leader{%s, %s="$cluster"})' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: '',
+ metric: 'etcd_server_has_leader',
+ refId: 'A',
+ step: 20,
+ }],
+ thresholds: '',
+ title: 'Up',
+ type: 'singlestat',
+ valueFontSize: '200%',
+ valueMaps: [{
+ op: '=',
+ text: 'N/A',
+ value: 'null',
+ }],
+ valueName: 'avg',
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 0,
+ id: 23,
+ isNew: true,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 5,
+ stack: false,
+ steppedLine: false,
+ targets: [
+ {
+ expr: 'sum(rate(grpc_server_started_total{%s, %s="$cluster",grpc_type="unary"}[$__rate_interval]))' % [$._config.etcd_selector, $._config.clusterLabel],
+ format: 'time_series',
+ intervalFactor: 2,
+ legendFormat: 'RPC Rate',
+ metric: 'grpc_server_started_total',
+ refId: 'A',
+ step: 2,
+ },
+ {
+ expr: 'sum(rate(grpc_server_handled_total{%s, %s="$cluster",grpc_type="unary",grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[$__rate_interval]))' % [$._config.etcd_selector, $._config.clusterLabel],
+ format: 'time_series',
+ intervalFactor: 2,
+ legendFormat: 'RPC Failed Rate',
+ metric: 'grpc_server_handled_total',
+ refId: 'B',
+ step: 2,
+ },
+ ],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'RPC Rate',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'ops',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 0,
+ id: 41,
+ isNew: true,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 4,
+ stack: true,
+ steppedLine: false,
+ targets: [
+ {
+ expr: 'sum(grpc_server_started_total{%(etcd_selector)s,%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})' % $._config,
+ intervalFactor: 2,
+ legendFormat: 'Watch Streams',
+ metric: 'grpc_server_handled_total',
+ refId: 'A',
+ step: 4,
+ },
+ {
+ expr: 'sum(grpc_server_started_total{%(etcd_selector)s,%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})' % $._config,
+ intervalFactor: 2,
+ legendFormat: 'Lease Streams',
+ metric: 'grpc_server_handled_total',
+ refId: 'B',
+ step: 4,
+ },
+ ],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Active Streams',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'short',
+ label: '',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ ],
+ showTitle: false,
+ title: 'Row',
+ },
+ {
+ collapse: false,
+ editable: true,
+ height: '250px',
+ panels: [
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ decimals: null,
+ editable: true,
+ 'error': false,
+ fill: 0,
+ grid: {},
+ id: 1,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 4,
+ stack: false,
+ steppedLine: false,
+ targets: [{
+ expr: 'etcd_mvcc_db_total_size_in_bytes{%s, %s="$cluster"}' % [$._config.etcd_selector, $._config.clusterLabel],
+ hide: false,
+ interval: '',
+ intervalFactor: 2,
+ legendFormat: '{{instance}} DB Size',
+ metric: '',
+ refId: 'A',
+ step: 4,
+ }],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'DB Size',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'cumulative',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'bytes',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: false,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 0,
+ grid: {},
+ id: 3,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 1,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 4,
+ stack: false,
+ steppedLine: true,
+ targets: [
+ {
+ expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{%s, %s="$cluster"}[$__rate_interval])) by (instance, le))' % [$._config.etcd_selector, $._config.clusterLabel],
+ hide: false,
+ intervalFactor: 2,
+ legendFormat: '{{instance}} WAL fsync',
+ metric: 'etcd_disk_wal_fsync_duration_seconds_bucket',
+ refId: 'A',
+ step: 4,
+ },
+ {
+ expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{%s, %s="$cluster"}[$__rate_interval])) by (instance, le))' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: '{{instance}} DB fsync',
+ metric: 'etcd_disk_backend_commit_duration_seconds_bucket',
+ refId: 'B',
+ step: 4,
+ },
+ ],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Disk Sync Duration',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'cumulative',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 's',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: false,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 0,
+ id: 29,
+ isNew: true,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 4,
+ stack: false,
+ steppedLine: false,
+ targets: [{
+ expr: 'process_resident_memory_bytes{%s, %s="$cluster"}' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: '{{instance}} Resident Memory',
+ metric: 'process_resident_memory_bytes',
+ refId: 'A',
+ step: 4,
+ }],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Memory',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'bytes',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ ],
+ title: 'New row',
+ },
+ {
+ collapse: false,
+ editable: true,
+ height: '250px',
+ panels: [
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 5,
+ id: 22,
+ isNew: true,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 3,
+ stack: true,
+ steppedLine: false,
+ targets: [{
+ expr: 'rate(etcd_network_client_grpc_received_bytes_total{%s, %s="$cluster"}[$__rate_interval])' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: '{{instance}} Client Traffic In',
+ metric: 'etcd_network_client_grpc_received_bytes_total',
+ refId: 'A',
+ step: 4,
+ }],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Client Traffic In',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'Bps',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 5,
+ id: 21,
+ isNew: true,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 3,
+ stack: true,
+ steppedLine: false,
+ targets: [{
+ expr: 'rate(etcd_network_client_grpc_sent_bytes_total{%s, %s="$cluster"}[$__rate_interval])' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: '{{instance}} Client Traffic Out',
+ metric: 'etcd_network_client_grpc_sent_bytes_total',
+ refId: 'A',
+ step: 4,
+ }],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Client Traffic Out',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'Bps',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 0,
+ id: 20,
+ isNew: true,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 3,
+ stack: false,
+ steppedLine: false,
+ targets: [{
+ expr: 'sum(rate(etcd_network_peer_received_bytes_total{%s, %s="$cluster"}[$__rate_interval])) by (instance)' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: '{{instance}} Peer Traffic In',
+ metric: 'etcd_network_peer_received_bytes_total',
+ refId: 'A',
+ step: 4,
+ }],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Peer Traffic In',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'Bps',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ decimals: null,
+ editable: true,
+ 'error': false,
+ fill: 0,
+ grid: {},
+ id: 16,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 3,
+ stack: false,
+ steppedLine: false,
+ targets: [{
+ expr: 'sum(rate(etcd_network_peer_sent_bytes_total{%s, %s="$cluster"}[$__rate_interval])) by (instance)' % [$._config.etcd_selector, $._config.clusterLabel],
+ hide: false,
+ interval: '',
+ intervalFactor: 2,
+ legendFormat: '{{instance}} Peer Traffic Out',
+ metric: 'etcd_network_peer_sent_bytes_total',
+ refId: 'A',
+ step: 4,
+ }],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Peer Traffic Out',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'cumulative',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'Bps',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ ],
+ title: 'New row',
+ },
+ {
+ collapse: false,
+ editable: true,
+ height: '250px',
+ panels: [
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ editable: true,
+ 'error': false,
+ fill: 0,
+ id: 40,
+ isNew: true,
+ legend: {
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 6,
+ stack: false,
+ steppedLine: false,
+ targets: [
+ {
+ expr: 'sum(rate(etcd_server_proposals_failed_total{%s, %s="$cluster"}[$__rate_interval]))' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: 'Proposal Failure Rate',
+ metric: 'etcd_server_proposals_failed_total',
+ refId: 'A',
+ step: 2,
+ },
+ {
+ expr: 'sum(etcd_server_proposals_pending{%s, %s="$cluster"})' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: 'Proposal Pending Total',
+ metric: 'etcd_server_proposals_pending',
+ refId: 'B',
+ step: 2,
+ },
+ {
+ expr: 'sum(rate(etcd_server_proposals_committed_total{%s, %s="$cluster"}[$__rate_interval]))' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: 'Proposal Commit Rate',
+ metric: 'etcd_server_proposals_committed_total',
+ refId: 'C',
+ step: 2,
+ },
+ {
+ expr: 'sum(rate(etcd_server_proposals_applied_total{%s, %s="$cluster"}[$__rate_interval]))' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: 'Proposal Apply Rate',
+ refId: 'D',
+ step: 2,
+ },
+ ],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Raft Proposals',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'short',
+ label: '',
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ datasource: '$datasource',
+ decimals: 0,
+ editable: true,
+ 'error': false,
+ fill: 0,
+ id: 19,
+ isNew: true,
+ legend: {
+ alignAsTable: false,
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ rightSide: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ percentage: false,
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ span: 6,
+ stack: false,
+ steppedLine: false,
+ targets: [{
+ expr: 'changes(etcd_server_leader_changes_seen_total{%s, %s="$cluster"}[1d])' % [$._config.etcd_selector, $._config.clusterLabel],
+ intervalFactor: 2,
+ legendFormat: '{{instance}} Total Leader Elections Per Day',
+ metric: 'etcd_server_leader_changes_seen_total',
+ refId: 'A',
+ step: 2,
+ }],
+ thresholds: [],
+ timeFrom: null,
+ timeShift: null,
+ title: 'Total Leader Elections Per Day',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ },
+ {
+ aliasColors: {},
+ bars: false,
+ dashLength: 10,
+ dashes: false,
+ datasource: '$datasource',
+ decimals: 0,
+ editable: true,
+ 'error': false,
+ fieldConfig: {
+ defaults: {
+ custom: {},
+ },
+ overrides: [],
+ },
+ fill: 0,
+ fillGradient: 0,
+ gridPos: {
+ h: 7,
+ w: 12,
+ x: 0,
+ y: 28,
+ },
+ hiddenSeries: false,
+ id: 42,
+ isNew: true,
+ legend: {
+ alignAsTable: false,
+ avg: false,
+ current: false,
+ max: false,
+ min: false,
+ rightSide: false,
+ show: false,
+ total: false,
+ values: false,
+ },
+ lines: true,
+ linewidth: 2,
+ links: [],
+ nullPointMode: 'connected',
+ options: {
+ alertThreshold: true,
+ },
+ percentage: false,
+ pluginVersion: '7.4.3',
+ pointradius: 5,
+ points: false,
+ renderer: 'flot',
+ seriesOverrides: [],
+ spaceLength: 10,
+ stack: false,
+ steppedLine: false,
+ targets: [
+ {
+ expr: 'histogram_quantile(0.99, sum by (instance, le) (rate(etcd_network_peer_round_trip_time_seconds_bucket{%s, %s="$cluster"}[$__rate_interval])))' % [$._config.etcd_selector, $._config.clusterLabel],
+ interval: '',
+ intervalFactor: 2,
+ legendFormat: '{{instance}} Peer round trip time',
+ metric: 'etcd_network_peer_round_trip_time_seconds_bucket',
+ refId: 'A',
+ step: 2,
+ },
+ ],
+ thresholds: [],
+ timeFrom: null,
+ timeRegions: [],
+ timeShift: null,
+ title: 'Peer round trip time',
+ tooltip: {
+ msResolution: false,
+ shared: true,
+ sort: 0,
+ value_type: 'individual',
+ },
+ type: 'graph',
+ xaxis: {
+ buckets: null,
+ mode: 'time',
+ name: null,
+ show: true,
+ values: [],
+ },
+ yaxes: [
+ {
+ '$$hashKey': 'object:925',
+ decimals: null,
+ format: 's',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ {
+ '$$hashKey': 'object:926',
+ format: 'short',
+ label: null,
+ logBase: 1,
+ max: null,
+ min: null,
+ show: true,
+ },
+ ],
+ yaxis: {
+ align: false,
+ alignLevel: null,
+ },
+ },
+ ],
+ title: 'New row',
+ },
+ ],
+ time: {
+ from: 'now-15m',
+ to: 'now',
+ },
+ timepicker: {
+ now: true,
+ refresh_intervals: [
+ '5s',
+ '10s',
+ '30s',
+ '1m',
+ '5m',
+ '15m',
+ '30m',
+ '1h',
+ '2h',
+ '1d',
+ ],
+ time_options: [
+ '5m',
+ '15m',
+ '1h',
+ '6h',
+ '12h',
+ '24h',
+ '2d',
+ '7d',
+ '30d',
+ ],
+ },
+ templating: {
+ list: [
+ {
+ current: {
+ text: 'Prometheus',
+ value: 'Prometheus',
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: '',
+ type: 'datasource',
+ },
+ {
+ allValue: null,
+ current: {
+ text: 'prod',
+ value: 'prod',
+ },
+ datasource: '$datasource',
+ hide: 0,
+ includeAll: false,
+ label: 'cluster',
+ multi: false,
+ name: 'cluster',
+ options: [],
+ query: 'label_values(etcd_server_has_leader{%s}, %s)' % [$._config.etcd_selector, $._config.clusterLabel],
+ refresh: $._config.dashboard_var_refresh,
+ regex: '',
+ sort: 2,
+ tagValuesQuery: '',
+ tags: [],
+ tagsQuery: '',
+ type: 'query',
+ useTags: false,
+ },
+ ],
+ },
+ annotations: {
+ list: [],
+ },
+ refresh: '10s',
+ schemaVersion: 13,
+ version: 215,
+ links: [],
+ gnetId: null,
+ },
+ } else {},
+}
diff --git a/contrib/mixin/dashboards/etcd.libsonnet b/contrib/mixin/dashboards/etcd.libsonnet
new file mode 100644
index 00000000000..57d69998ac8
--- /dev/null
+++ b/contrib/mixin/dashboards/etcd.libsonnet
@@ -0,0 +1,40 @@
+{
+ grafanaDashboards+:: if !$._config.grafana7x then {
+ local g = import './g.libsonnet',
+ local panels = import './panels.libsonnet',
+ local variables = import './variables.libsonnet',
+ local targets = import './targets.libsonnet',
+ local v = variables($._config),
+ local t = targets(v, $._config),
+
+ 'etcd.json':
+ g.dashboard.new('etcd')
+ + g.dashboard.withUid(std.md5('etcd.json'))
+ + g.dashboard.withRefresh('10s')
+ + g.dashboard.time.withFrom('now-15m')
+ + g.dashboard.time.withTo('now')
+ + g.dashboard.withDescription('etcd sample Grafana dashboard with Prometheus')
+ + g.dashboard.withTags(['etcd-mixin'])
+ + g.dashboard.withVariables([
+ v.datasource,
+ v.cluster,
+ ])
+ + g.dashboard.withPanels(
+ [
+ panels.stat.up('Up', t.up) { gridPos: { x: 0, h: 7, w: 6, y: 0 } },
+ panels.timeSeries.rpcRate('RPC rate', [t.rpcRate, t.rpcFailedRate]) { gridPos: { x: 6, h: 7, w: 10, y: 0 } },
+ panels.timeSeries.activeStreams('Active streams', [t.watchStreams, t.leaseStreams]) { gridPos: { x: 16, h: 7, w: 8, y: 0 } },
+ panels.timeSeries.dbSize('DB size', [t.dbSize]) { gridPos: { x: 0, h: 7, w: 8, y: 25 } },
+ panels.timeSeries.diskSync('Disk sync duration', [t.walFsync, t.dbFsync]) { gridPos: { x: 8, h: 7, w: 8, y: 25 } },
+ panels.timeSeries.memory('Memory', [t.memory]) { gridPos: { x: 16, h: 7, w: 8, y: 25 } },
+ panels.timeSeries.traffic('Client traffic in', [t.clientTrafficIn]) { gridPos: { x: 0, h: 7, w: 6, y: 50 } },
+ panels.timeSeries.traffic('Client traffic out', [t.clientTrafficOut]) { gridPos: { x: 6, h: 7, w: 6, y: 50 } },
+ panels.timeSeries.traffic('Peer traffic in', [t.peerTrafficIn]) { gridPos: { x: 12, h: 7, w: 6, y: 50 } },
+ panels.timeSeries.traffic('Peer traffic out', [t.peerTrafficOut]) { gridPos: { x: 18, h: 7, w: 6, y: 50 } },
+ panels.timeSeries.raftProposals('Raft proposals', [t.raftProposals]) { gridPos: { x: 0, h: 7, w: 8, y: 75 } },
+ panels.timeSeries.leaderElections('Total leader elections per day', [t.leaderElections]) { gridPos: { x: 8, h: 7, w: 8, y: 75 } },
+ panels.timeSeries.peerRtt('Peer round trip time', [t.peerRtt]) { gridPos: { x: 16, h: 7, w: 8, y: 75 } },
+ ]
+ ),
+ } else {},
+}
diff --git a/contrib/mixin/dashboards/g.libsonnet b/contrib/mixin/dashboards/g.libsonnet
new file mode 100644
index 00000000000..6da9f4eef90
--- /dev/null
+++ b/contrib/mixin/dashboards/g.libsonnet
@@ -0,0 +1 @@
+import 'github.com/grafana/grafonnet/gen/grafonnet-v10.0.0/main.libsonnet'
diff --git a/contrib/mixin/dashboards/panels.libsonnet b/contrib/mixin/dashboards/panels.libsonnet
new file mode 100644
index 00000000000..81455675faa
--- /dev/null
+++ b/contrib/mixin/dashboards/panels.libsonnet
@@ -0,0 +1,59 @@
+local g = import 'g.libsonnet';
+
+{
+ stat: {
+ local stat = g.panel.stat,
+ base(title, targets):
+ stat.new(title)
+ + stat.queryOptions.withTargets(targets)
+ + stat.queryOptions.withInterval('1m'),
+ up(title, targets):
+ self.base(title, targets)
+ + stat.options.withColorMode('none')
+ + stat.options.withGraphMode('none')
+ + stat.options.reduceOptions.withCalcs([
+ 'lastNotNull',
+ ]),
+ },
+ timeSeries: {
+ local timeSeries = g.panel.timeSeries,
+ local fieldOverride = g.panel.timeSeries.fieldOverride,
+ local custom = timeSeries.fieldConfig.defaults.custom,
+ local defaults = timeSeries.fieldConfig.defaults,
+ local options = timeSeries.options,
+
+
+ base(title, targets):
+ timeSeries.new(title)
+ + timeSeries.queryOptions.withTargets(targets)
+ + timeSeries.queryOptions.withInterval('1m')
+ + custom.withLineWidth(2)
+ + custom.withFillOpacity(0)
+ + custom.withShowPoints('never'),
+
+ rpcRate(title, targets):
+ self.base(title, targets)
+ + timeSeries.standardOptions.withUnit('ops'),
+ activeStreams(title, targets):
+ self.base(title, targets),
+ dbSize(title, targets):
+ self.base(title, targets)
+ + timeSeries.standardOptions.withUnit('bytes'),
+ diskSync(title, targets):
+ self.base(title, targets)
+ + timeSeries.standardOptions.withUnit('s'),
+ memory(title, targets):
+ self.base(title, targets)
+ + timeSeries.standardOptions.withUnit('bytes'),
+ traffic(title, targets):
+ self.base(title, targets)
+ + timeSeries.standardOptions.withUnit('Bps'),
+ raftProposals(title, targets):
+ self.base(title, targets),
+ leaderElections(title, targets):
+ self.base(title, targets),
+ peerRtt(title, targets):
+ self.base(title, targets)
+ + timeSeries.standardOptions.withUnit('s'),
+ },
+}
diff --git a/contrib/mixin/dashboards/targets.libsonnet b/contrib/mixin/dashboards/targets.libsonnet
new file mode 100644
index 00000000000..3eb3158952f
--- /dev/null
+++ b/contrib/mixin/dashboards/targets.libsonnet
@@ -0,0 +1,104 @@
+local g = import './g.libsonnet';
+local prometheusQuery = g.query.prometheus;
+
+function(variables, config) {
+ up:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'sum(etcd_server_has_leader{%s, %s="$cluster"})' % [config.etcd_selector, config.clusterLabel]
+ )
+ + prometheusQuery.withLegendFormat(|||
+ {{cluster}} - {{namespace}}
+ |||),
+
+ rpcRate:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'sum(rate(grpc_server_started_total{%s, %s="$cluster",grpc_type="unary"}[$__rate_interval]))' % [config.etcd_selector, config.clusterLabel]
+ )
+ + prometheusQuery.withLegendFormat('RPC rate'),
+ rpcFailedRate:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'sum(rate(grpc_server_handled_total{%s, %s="$cluster",grpc_type="unary",grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[$__rate_interval]))' % [config.etcd_selector, config.clusterLabel]
+ )
+ + prometheusQuery.withLegendFormat('RPC failed rate'),
+ watchStreams:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'sum(grpc_server_started_total{%(etcd_selector)s,%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})' % config
+ )
+ + prometheusQuery.withLegendFormat('Watch streams'),
+ leaseStreams:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'sum(grpc_server_started_total{%(etcd_selector)s,%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})' % config
+ )
+ + prometheusQuery.withLegendFormat('Lease streams'),
+ dbSize:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'etcd_mvcc_db_total_size_in_bytes{%s, %s="$cluster"}' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} DB size'),
+ walFsync:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{%s, %s="$cluster"}[$__rate_interval])) by (instance, le))' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} WAL fsync'),
+ dbFsync:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{%s, %s="$cluster"}[$__rate_interval])) by (instance, le))' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} DB fsync'),
+ memory:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'process_resident_memory_bytes{%s, %s="$cluster"}' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} resident memory'),
+ clientTrafficIn:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'rate(etcd_network_client_grpc_received_bytes_total{%s, %s="$cluster"}[$__rate_interval])' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} client traffic in'),
+ clientTrafficOut:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'rate(etcd_network_client_grpc_sent_bytes_total{%s, %s="$cluster"}[$__rate_interval])' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} client traffic out'),
+ peerTrafficIn:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'sum(rate(etcd_network_peer_received_bytes_total{%s, %s="$cluster"}[$__rate_interval])) by (instance)' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} peer traffic in'),
+ peerTrafficOut:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'sum(rate(etcd_network_peer_sent_bytes_total{%s, %s="$cluster"}[$__rate_interval])) by (instance)' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} peer traffic out'),
+ raftProposals:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'changes(etcd_server_leader_changes_seen_total{%s, %s="$cluster"}[1d])' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} total leader elections per day'),
+ leaderElections:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'changes(etcd_server_leader_changes_seen_total{%s, %s="$cluster"}[1d])' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} total leader elections per day'),
+ peerRtt:
+ prometheusQuery.new(
+ '$' + variables.datasource.name,
+ 'histogram_quantile(0.99, sum by (instance, le) (rate(etcd_network_peer_round_trip_time_seconds_bucket{%s, %s="$cluster"}[$__rate_interval])))' % [config.etcd_selector, config.clusterLabel],
+ )
+ + prometheusQuery.withLegendFormat('{{instance}} peer round trip time'),
+}
diff --git a/contrib/mixin/dashboards/variables.libsonnet b/contrib/mixin/dashboards/variables.libsonnet
new file mode 100644
index 00000000000..7dafc63a91b
--- /dev/null
+++ b/contrib/mixin/dashboards/variables.libsonnet
@@ -0,0 +1,21 @@
+// variables.libsonnet
+local g = import './g.libsonnet';
+local var = g.dashboard.variable;
+
+
+function(config) {
+ datasource:
+ var.datasource.new('datasource', 'prometheus')
+ + var.datasource.generalOptions.withLabel('Data Source'),
+
+ cluster:
+ var.query.new('cluster')
+ + var.query.generalOptions.withLabel('cluster')
+ + var.query.withDatasourceFromVariable(self.datasource)
+ + { refresh: config.dashboard_var_refresh }
+ + var.query.queryTypes.withLabelValues(
+ config.clusterLabel,
+ 'etcd_server_has_leader{%s}' % [config.etcd_selector]
+ ),
+
+}
diff --git a/contrib/mixin/jsonnetfile.json b/contrib/mixin/jsonnetfile.json
new file mode 100644
index 00000000000..8c5b5e48a76
--- /dev/null
+++ b/contrib/mixin/jsonnetfile.json
@@ -0,0 +1,15 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/grafonnet.git",
+ "subdir": "gen/grafonnet-v10.0.0"
+ }
+ },
+ "version": "main"
+ }
+ ],
+ "legacyImports": true
+}
\ No newline at end of file
diff --git a/contrib/mixin/jsonnetfile.lock.json b/contrib/mixin/jsonnetfile.lock.json
new file mode 100644
index 00000000000..cc41cc3d6f3
--- /dev/null
+++ b/contrib/mixin/jsonnetfile.lock.json
@@ -0,0 +1,36 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/grafonnet.git",
+ "subdir": "gen/grafonnet-v10.0.0"
+ }
+ },
+ "version": "e85299323fd8808187d30865cc5c7a38a347399a",
+ "sum": "uJCTMGtY/7c5HSLQ7UQD38TOPmuSYrIKLIKmdSF/Htk="
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/jsonnet-libs/docsonnet.git",
+ "subdir": "doc-util"
+ }
+ },
+ "version": "fd8de9039b3c06da77d635a3a8289809a5bfb542",
+ "sum": "mFebrE9fhyAKW4zbnidcjVFupziN5LPA/Z7ii94uCzs="
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/jsonnet-libs/xtd.git",
+ "subdir": ""
+ }
+ },
+ "version": "0256a910ac71f0f842696d7bca0bf01ea77eb654",
+ "sum": "zBOpb1oTNvXdq9RF6yzTHill5r1YTJLBBoqyx4JYtAg="
+ }
+ ],
+ "legacyImports": false
+}
diff --git a/contrib/mixin/mixin.libsonnet b/contrib/mixin/mixin.libsonnet
index 1bb0f10da1e..edb8e14e92a 100644
--- a/contrib/mixin/mixin.libsonnet
+++ b/contrib/mixin/mixin.libsonnet
@@ -1,1316 +1,3 @@
-{
- _config+:: {
- etcd_selector: 'job=~".*etcd.*"',
- // etcd_instance_labels are the label names that are uniquely
- // identifying an instance and need to be aggreated away for alerts
- // that are about an etcd cluster as a whole. For example, if etcd
- // instances are deployed on K8s, you will likely want to change
- // this to 'instance, pod'.
- etcd_instance_labels: 'instance',
- // scrape_interval_seconds is the global scrape interval which can be
- // used to dynamically adjust rate windows as a function of the interval.
- scrape_interval_seconds: 30,
- // Dashboard variable refresh option on Grafana (https://grafana.com/docs/grafana/latest/datasources/prometheus/).
- // 0 : Never (Will never refresh the Dashboard variables values)
- // 1 : On Dashboard Load (Will refresh Dashboards variables when dashboard are loaded)
- // 2 : On Time Range Change (Will refresh Dashboards variables when time range will be changed)
- dashboard_var_refresh: 2,
- },
-
- prometheusAlerts+:: {
- groups+: [
- {
- name: 'etcd',
- rules: [
- {
- alert: 'etcdMembersDown',
- expr: |||
- max without (endpoint) (
- sum without (%(etcd_instance_labels)s) (up{%(etcd_selector)s} == bool 0)
- or
- count without (To) (
- sum without (%(etcd_instance_labels)s) (rate(etcd_network_peer_sent_failures_total{%(etcd_selector)s}[%(network_failure_range)ss])) > 0.01
- )
- )
- > 0
- ||| % {etcd_instance_labels: $._config.etcd_instance_labels, etcd_selector: $._config.etcd_selector, network_failure_range: $._config.scrape_interval_seconds*4},
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": members are down ({{ $value }}).',
- summary: 'etcd cluster members are down.',
- },
- },
- {
- alert: 'etcdInsufficientMembers',
- expr: |||
- sum(up{%(etcd_selector)s} == bool 1) without (%(etcd_instance_labels)s) < ((count(up{%(etcd_selector)s}) without (%(etcd_instance_labels)s) + 1) / 2)
- ||| % $._config,
- 'for': '3m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": insufficient members ({{ $value }}).',
- summary: 'etcd cluster has insufficient number of members.',
- },
- },
- {
- alert: 'etcdNoLeader',
- expr: |||
- etcd_server_has_leader{%(etcd_selector)s} == 0
- ||| % $._config,
- 'for': '1m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": member {{ $labels.instance }} has no leader.',
- summary: 'etcd cluster has no leader.',
- },
- },
- {
- alert: 'etcdHighNumberOfLeaderChanges',
- expr: |||
- increase((max without (%(etcd_instance_labels)s) (etcd_server_leader_changes_seen_total{%(etcd_selector)s}) or 0*absent(etcd_server_leader_changes_seen_total{%(etcd_selector)s}))[15m:1m]) >= 4
- ||| % $._config,
- 'for': '5m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": {{ $value }} leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.',
- summary: 'etcd cluster has high number of leader changes.',
- },
- },
- {
- alert: 'etcdHighNumberOfFailedGRPCRequests',
- expr: |||
- 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code!="OK"}[5m])) without (grpc_type, grpc_code)
- /
- sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code)
- > 1
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.',
- summary: 'etcd cluster has high number of failed grpc requests.',
- },
- },
- {
- alert: 'etcdHighNumberOfFailedGRPCRequests',
- expr: |||
- 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code!="OK"}[5m])) without (grpc_type, grpc_code)
- /
- sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code)
- > 5
- ||| % $._config,
- 'for': '5m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.',
- summary: 'etcd cluster has high number of failed grpc requests.',
- },
- },
- {
- alert: 'etcdGRPCRequestsSlow',
- expr: |||
- histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{%(etcd_selector)s, grpc_type="unary"}[5m])) without(grpc_type))
- > 0.15
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": gRPC requests to {{ $labels.grpc_method }} are taking {{ $value }}s on etcd instance {{ $labels.instance }}.',
- summary: 'etcd grpc requests are slow',
- },
- },
- {
- alert: 'etcdMemberCommunicationSlow',
- expr: |||
- histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{%(etcd_selector)s}[5m]))
- > 0.15
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": member communication with {{ $labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.',
- summary: 'etcd cluster member communication is slow.',
- },
- },
- {
- alert: 'etcdHighNumberOfFailedProposals',
- expr: |||
- rate(etcd_server_proposals_failed_total{%(etcd_selector)s}[15m]) > 5
- ||| % $._config,
- 'for': '15m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": {{ $value }} proposal failures within the last 30 minutes on etcd instance {{ $labels.instance }}.',
- summary: 'etcd cluster has high number of proposal failures.',
- },
- },
- {
- alert: 'etcdHighFsyncDurations',
- expr: |||
- histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m]))
- > 0.5
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.',
- summary: 'etcd cluster 99th percentile fsync durations are too high.',
- },
- },
- {
- alert: 'etcdHighFsyncDurations',
- expr: |||
- histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m]))
- > 1
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- message: 'etcd cluster "{{ $labels.job }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.',
- },
- },
- {
- alert: 'etcdHighCommitDurations',
- expr: |||
- histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{%(etcd_selector)s}[5m]))
- > 0.25
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.job }}": 99th percentile commit durations {{ $value }}s on etcd instance {{ $labels.instance }}.',
- summary: 'etcd cluster 99th percentile commit durations are too high.',
- },
- },
- {
- alert: 'etcdBackendQuotaLowSpace',
- expr: |||
- (etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100 > 95
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- message: 'etcd cluster "{{ $labels.job }}": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.',
- },
- },
- {
- alert: 'etcdExcessiveDatabaseGrowth',
- expr: |||
- increase(((etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100)[240m:1m]) > 50
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- message: 'etcd cluster "{{ $labels.job }}": Observed surge in etcd writes leading to 50% increase in database size over the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive.',
- },
- },
- ],
- },
- ],
- },
-
- grafanaDashboards+:: {
- 'etcd.json': {
- uid: std.md5('etcd.json'),
- title: 'etcd',
- description: 'etcd sample Grafana dashboard with Prometheus',
- tags: [ 'etcd-mixin' ],
- style: 'dark',
- timezone: 'browser',
- editable: true,
- hideControls: false,
- sharedCrosshair: false,
- rows: [
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- cacheTimeout: null,
- colorBackground: false,
- colorValue: false,
- colors: [
- 'rgba(245, 54, 54, 0.9)',
- 'rgba(237, 129, 40, 0.89)',
- 'rgba(50, 172, 45, 0.97)',
- ],
- datasource: '$datasource',
- editable: true,
- 'error': false,
- format: 'none',
- gauge: {
- maxValue: 100,
- minValue: 0,
- show: false,
- thresholdLabels: false,
- thresholdMarkers: true,
- },
- id: 28,
- interval: null,
- isNew: true,
- links: [],
- mappingType: 1,
- mappingTypes: [
- {
- name: 'value to text',
- value: 1,
- },
- {
- name: 'range to text',
- value: 2,
- },
- ],
- maxDataPoints: 100,
- nullPointMode: 'connected',
- nullText: null,
- postfix: '',
- postfixFontSize: '50%',
- prefix: '',
- prefixFontSize: '50%',
- rangeMaps: [{
- from: 'null',
- text: 'N/A',
- to: 'null',
- }],
- span: 3,
- sparkline: {
- fillColor: 'rgba(31, 118, 189, 0.18)',
- full: false,
- lineColor: 'rgb(31, 120, 193)',
- show: false,
- },
- targets: [{
- expr: 'sum(etcd_server_has_leader{job="$cluster"})',
- intervalFactor: 2,
- legendFormat: '',
- metric: 'etcd_server_has_leader',
- refId: 'A',
- step: 20,
- }],
- thresholds: '',
- title: 'Up',
- type: 'singlestat',
- valueFontSize: '200%',
- valueMaps: [{
- op: '=',
- text: 'N/A',
- value: 'null',
- }],
- valueName: 'avg',
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 23,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 5,
- stack: false,
- steppedLine: false,
- targets: [
- {
- expr: 'sum(rate(grpc_server_started_total{job="$cluster",grpc_type="unary"}[5m]))',
- format: 'time_series',
- intervalFactor: 2,
- legendFormat: 'RPC Rate',
- metric: 'grpc_server_started_total',
- refId: 'A',
- step: 2,
- },
- {
- expr: 'sum(rate(grpc_server_handled_total{job="$cluster",grpc_type="unary",grpc_code!="OK"}[5m]))',
- format: 'time_series',
- intervalFactor: 2,
- legendFormat: 'RPC Failed Rate',
- metric: 'grpc_server_handled_total',
- refId: 'B',
- step: 2,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'RPC Rate',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'ops',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 41,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: true,
- steppedLine: false,
- targets: [
- {
- expr: 'sum(grpc_server_started_total{job="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{job="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})',
- intervalFactor: 2,
- legendFormat: 'Watch Streams',
- metric: 'grpc_server_handled_total',
- refId: 'A',
- step: 4,
- },
- {
- expr: 'sum(grpc_server_started_total{job="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{job="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})',
- intervalFactor: 2,
- legendFormat: 'Lease Streams',
- metric: 'grpc_server_handled_total',
- refId: 'B',
- step: 4,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Active Streams',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'short',
- label: '',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- ],
- showTitle: false,
- title: 'Row',
- },
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- decimals: null,
- editable: true,
- 'error': false,
- fill: 0,
- grid: {},
- id: 1,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'etcd_mvcc_db_total_size_in_bytes{job="$cluster"}',
- hide: false,
- interval: '',
- intervalFactor: 2,
- legendFormat: '{{instance}} DB Size',
- metric: '',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'DB Size',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'cumulative',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'bytes',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- logBase: 1,
- max: null,
- min: null,
- show: false,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- grid: {},
- id: 3,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 1,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: false,
- steppedLine: true,
- targets: [
- {
- expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{job="$cluster"}[5m])) by (instance, le))',
- hide: false,
- intervalFactor: 2,
- legendFormat: '{{instance}} WAL fsync',
- metric: 'etcd_disk_wal_fsync_duration_seconds_bucket',
- refId: 'A',
- step: 4,
- },
- {
- expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{job="$cluster"}[5m])) by (instance, le))',
- intervalFactor: 2,
- legendFormat: '{{instance}} DB fsync',
- metric: 'etcd_disk_backend_commit_duration_seconds_bucket',
- refId: 'B',
- step: 4,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Disk Sync Duration',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'cumulative',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 's',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- logBase: 1,
- max: null,
- min: null,
- show: false,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 29,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'process_resident_memory_bytes{job="$cluster"}',
- intervalFactor: 2,
- legendFormat: '{{instance}} Resident Memory',
- metric: 'process_resident_memory_bytes',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Memory',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'bytes',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- ],
- title: 'New row',
- },
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 5,
- id: 22,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: true,
- steppedLine: false,
- targets: [{
- expr: 'rate(etcd_network_client_grpc_received_bytes_total{job="$cluster"}[5m])',
- intervalFactor: 2,
- legendFormat: '{{instance}} Client Traffic In',
- metric: 'etcd_network_client_grpc_received_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Client Traffic In',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 5,
- id: 21,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: true,
- steppedLine: false,
- targets: [{
- expr: 'rate(etcd_network_client_grpc_sent_bytes_total{job="$cluster"}[5m])',
- intervalFactor: 2,
- legendFormat: '{{instance}} Client Traffic Out',
- metric: 'etcd_network_client_grpc_sent_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Client Traffic Out',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 20,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'sum(rate(etcd_network_peer_received_bytes_total{job="$cluster"}[5m])) by (instance)',
- intervalFactor: 2,
- legendFormat: '{{instance}} Peer Traffic In',
- metric: 'etcd_network_peer_received_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Peer Traffic In',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- decimals: null,
- editable: true,
- 'error': false,
- fill: 0,
- grid: {},
- id: 16,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'sum(rate(etcd_network_peer_sent_bytes_total{job="$cluster"}[5m])) by (instance)',
- hide: false,
- interval: '',
- intervalFactor: 2,
- legendFormat: '{{instance}} Peer Traffic Out',
- metric: 'etcd_network_peer_sent_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Peer Traffic Out',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'cumulative',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- ],
- title: 'New row',
- },
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 40,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 6,
- stack: false,
- steppedLine: false,
- targets: [
- {
- expr: 'sum(rate(etcd_server_proposals_failed_total{job="$cluster"}[5m]))',
- intervalFactor: 2,
- legendFormat: 'Proposal Failure Rate',
- metric: 'etcd_server_proposals_failed_total',
- refId: 'A',
- step: 2,
- },
- {
- expr: 'sum(etcd_server_proposals_pending{job="$cluster"})',
- intervalFactor: 2,
- legendFormat: 'Proposal Pending Total',
- metric: 'etcd_server_proposals_pending',
- refId: 'B',
- step: 2,
- },
- {
- expr: 'sum(rate(etcd_server_proposals_committed_total{job="$cluster"}[5m]))',
- intervalFactor: 2,
- legendFormat: 'Proposal Commit Rate',
- metric: 'etcd_server_proposals_committed_total',
- refId: 'C',
- step: 2,
- },
- {
- expr: 'sum(rate(etcd_server_proposals_applied_total{job="$cluster"}[5m]))',
- intervalFactor: 2,
- legendFormat: 'Proposal Apply Rate',
- refId: 'D',
- step: 2,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Raft Proposals',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'short',
- label: '',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- decimals: 0,
- editable: true,
- 'error': false,
- fill: 0,
- id: 19,
- isNew: true,
- legend: {
- alignAsTable: false,
- avg: false,
- current: false,
- max: false,
- min: false,
- rightSide: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 6,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'changes(etcd_server_leader_changes_seen_total{job="$cluster"}[1d])',
- intervalFactor: 2,
- legendFormat: '{{instance}} Total Leader Elections Per Day',
- metric: 'etcd_server_leader_changes_seen_total',
- refId: 'A',
- step: 2,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Total Leader Elections Per Day',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- ],
- title: 'New row',
- },
- ],
- time: {
- from: 'now-15m',
- to: 'now',
- },
- timepicker: {
- now: true,
- refresh_intervals: [
- '5s',
- '10s',
- '30s',
- '1m',
- '5m',
- '15m',
- '30m',
- '1h',
- '2h',
- '1d',
- ],
- time_options: [
- '5m',
- '15m',
- '1h',
- '6h',
- '12h',
- '24h',
- '2d',
- '7d',
- '30d',
- ],
- },
- templating: {
- list: [
- {
- current: {
- text: 'Prometheus',
- value: 'Prometheus',
- },
- hide: 0,
- label: null,
- name: 'datasource',
- options: [],
- query: 'prometheus',
- refresh: 1,
- regex: '',
- type: 'datasource',
- },
- {
- allValue: null,
- current: {
- text: 'prod',
- value: 'prod',
- },
- datasource: '$datasource',
- hide: 0,
- includeAll: false,
- label: 'cluster',
- multi: false,
- name: 'cluster',
- options: [],
- query: 'label_values(etcd_server_has_leader, job)',
- refresh: $._config.dashboard_var_refresh,
- regex: '',
- sort: 2,
- tagValuesQuery: '',
- tags: [],
- tagsQuery: '',
- type: 'query',
- useTags: false,
- },
- ],
- },
- annotations: {
- list: [],
- },
- refresh: '10s',
- schemaVersion: 13,
- version: 215,
- links: [],
- gnetId: null,
- },
- },
-}
+(import './config.libsonnet') +
+(import './dashboards/dashboards.libsonnet') +
+(import './alerts/alerts.libsonnet')
diff --git a/contrib/mixin/test.yaml b/contrib/mixin/test.yaml
index 24162bd4d4e..bfb50d8af3c 100644
--- a/contrib/mixin/test.yaml
+++ b/contrib/mixin/test.yaml
@@ -1,17 +1,15 @@
-rule_files:
- - mixin.yaml
-
+---
+rule_files: [manifests/etcd-prometheusRules.yaml]
evaluation_interval: 1m
-
tests:
- interval: 1m
input_series:
- - series: 'up{job="etcd",instance="10.10.10.0"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.1"}'
- values: '1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.2"}'
- values: '1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0'
+ - series: up{job="etcd",instance="10.10.10.0"}
+ values: 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0
+ - series: up{job="etcd",instance="10.10.10.1"}
+ values: 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0
+ - series: up{job="etcd",instance="10.10.10.2"}
+ values: 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0
alert_rule_test:
- eval_time: 3m
alertname: etcdInsufficientMembers
@@ -27,7 +25,7 @@ tests:
severity: critical
exp_annotations:
description: 'etcd cluster "etcd": members are down (3).'
- summary: 'etcd cluster members are down.'
+ summary: etcd cluster members are down.
- eval_time: 7m
alertname: etcdInsufficientMembers
- eval_time: 11m
@@ -38,7 +36,7 @@ tests:
severity: critical
exp_annotations:
description: 'etcd cluster "etcd": insufficient members (1).'
- summary: 'etcd cluster has insufficient number of members.'
+ summary: etcd cluster has insufficient number of members.
- eval_time: 15m
alertname: etcdInsufficientMembers
exp_alerts:
@@ -47,16 +45,15 @@ tests:
severity: critical
exp_annotations:
description: 'etcd cluster "etcd": insufficient members (0).'
- summary: 'etcd cluster has insufficient number of members.'
-
+ summary: etcd cluster has insufficient number of members.
- interval: 1m
input_series:
- - series: 'up{job="etcd",instance="10.10.10.0"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.1"}'
- values: '1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.2"}'
- values: '1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
+ - series: up{job="etcd",instance="10.10.10.0"}
+ values: 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0
+ - series: up{job="etcd",instance="10.10.10.1"}
+ values: 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0
+ - series: up{job="etcd",instance="10.10.10.2"}
+ values: 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
alert_rule_test:
- eval_time: 14m
alertname: etcdMembersDown
@@ -66,16 +63,15 @@ tests:
severity: critical
exp_annotations:
description: 'etcd cluster "etcd": members are down (3).'
- summary: 'etcd cluster members are down.'
-
+ summary: etcd cluster members are down.
- interval: 1m
input_series:
- - series: 'up{job="etcd",instance="10.10.10.0"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.1"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0'
- - series: 'etcd_network_peer_sent_failures_total{To="member-1",job="etcd",endpoint="test"}'
- values: '0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18'
+ - series: up{job="etcd",instance="10.10.10.0"}
+ values: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0
+ - series: up{job="etcd",instance="10.10.10.1"}
+ values: 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0
+ - series: etcd_network_peer_sent_failures_total{To="member-1",job="etcd",endpoint="test"}
+ values: 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
alert_rule_test:
- eval_time: 13m
alertname: etcdMembersDown
@@ -85,15 +81,15 @@ tests:
severity: critical
exp_annotations:
description: 'etcd cluster "etcd": members are down (1).'
- summary: 'etcd cluster members are down.'
+ summary: etcd cluster members are down.
- interval: 1m
input_series:
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}'
- values: '0 0 2 0 0 1 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}'
- values: '0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}'
- values: '0 0 0 0 0 0 0 0'
+ - series: etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}
+ values: 0 0 2 0 0 1 0 0 0 0 0 0 0 0 0 0
+ - series: etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}
+ values: 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0
+ - series: etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}
+ values: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
alert_rule_test:
- eval_time: 10m
alertname: etcdHighNumberOfLeaderChanges
@@ -103,33 +99,59 @@ tests:
severity: warning
exp_annotations:
description: 'etcd cluster "etcd": 4 leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.'
- summary: 'etcd cluster has high number of leader changes.'
+ summary: etcd cluster has high number of leader changes.
- interval: 1m
input_series:
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}'
- values: '0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}'
- values: '0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}'
- values: '0 0 0 0 0 0 0 0'
+ - series: etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}
+ values: 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0
+ - series: etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}
+ values: 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+ - series: etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}
+ values: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
alert_rule_test:
- eval_time: 10m
alertname: etcdHighNumberOfLeaderChanges
exp_alerts:
- interval: 1m
input_series:
- - series: '((etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100){job="etcd",instance="10.10.10.0"}'
- values: '0 10 20 0 0 10 0 0 30 0 0 0 0 0 0 0'
- - series: '((etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100){job="etcd",instance="10.10.10.1"}'
- values: '0 0 10 0 20 0 0 0 0 0 0 0 0 0 0 0'
- - series: '((etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100){job="etcd",instance="10.10.10.2"}'
- values: '0 0 0 0 0 0 0 0'
+ - series: etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.0"}
+ values: 0+8192x240
+ - series: etcd_server_quota_backend_bytes{job="etcd",instance="10.10.10.0"}
+ values: 524288+0x240
+ - series: etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.1"}
+ values: 0+1024x240
+ - series: etcd_server_quota_backend_bytes{job="etcd",instance="10.10.10.1"}
+ values: 524288+0x240
alert_rule_test:
- - eval_time: 10m
+ - eval_time: 11m
alertname: etcdExcessiveDatabaseGrowth
exp_alerts:
- exp_labels:
+ instance: 10.10.10.0
+ job: etcd
+ severity: warning
+ exp_annotations:
+ description: 'etcd cluster "etcd": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance 10.10.10.0, please check as it might be disruptive.'
+ summary: etcd cluster database growing very fast.
+ - interval: 1m
+ input_series:
+ - series: etcd_mvcc_db_total_size_in_use_in_bytes{job="etcd",instance="10.10.10.0"}
+ values: 300000000+0x10
+ - series: etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.0"}
+ values: 1000000000+0x10
+ - series: etcd_mvcc_db_total_size_in_use_in_bytes{job="etcd",instance="10.10.10.1"}
+ values: 700000000+0x10
+ - series: etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.1"}
+ values: 1000000000+0x10
+ alert_rule_test:
+ - eval_time: 11m
+ alertname: etcdDatabaseHighFragmentationRatio
+ exp_alerts:
+ - exp_labels:
+ instance: 10.10.10.0
job: etcd
severity: warning
exp_annotations:
- message: 'etcd cluster "etcd": Observed surge in etcd writes leading to 50% increase in database size over the past four hours, please check as it might be disruptive.'
+ description: 'etcd cluster "etcd": database size in use on instance 10.10.10.0 is 30% of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.'
+ runbook_url: https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation
+ summary: etcd database size in use is less than 50% of the actual allocated storage.
diff --git a/contrib/raftexample/Procfile b/contrib/raftexample/Procfile
index 6b2f7ccf03c..f6e87132693 100644
--- a/contrib/raftexample/Procfile
+++ b/contrib/raftexample/Procfile
@@ -1,4 +1,4 @@
-# Use goreman to run `go get github.com/mattn/goreman`
+# Use goreman to run `go install github.com/mattn/goreman@latest`
raftexample1: ./raftexample --id 1 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 12380
raftexample2: ./raftexample --id 2 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 22380
raftexample3: ./raftexample --id 3 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 32380
diff --git a/contrib/raftexample/README.md b/contrib/raftexample/README.md
index 2e73996a6a6..900a91e0f0d 100644
--- a/contrib/raftexample/README.md
+++ b/contrib/raftexample/README.md
@@ -1,6 +1,6 @@
# raftexample
-raftexample is an example usage of etcd's [raft library](../../raft). It provides a simple REST API for a key-value store cluster backed by the [Raft][raft] consensus algorithm.
+raftexample is an example usage of etcd's [raft library](https://github.com/etcd-io/raft). It provides a simple REST API for a key-value store cluster backed by the [Raft][raft] consensus algorithm.
[raft]: http://raftconsensus.github.io/
diff --git a/contrib/raftexample/httpapi.go b/contrib/raftexample/httpapi.go
index 13445597eaf..dbe226add33 100644
--- a/contrib/raftexample/httpapi.go
+++ b/contrib/raftexample/httpapi.go
@@ -15,12 +15,12 @@
package main
import (
- "io/ioutil"
+ "io"
"log"
"net/http"
"strconv"
- "go.etcd.io/etcd/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/raftpb"
)
// Handler for a http based key-value store backed by raft
@@ -32,9 +32,9 @@ type httpKVAPI struct {
func (h *httpKVAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
key := r.RequestURI
defer r.Body.Close()
- switch {
- case r.Method == "PUT":
- v, err := ioutil.ReadAll(r.Body)
+ switch r.Method {
+ case http.MethodPut:
+ v, err := io.ReadAll(r.Body)
if err != nil {
log.Printf("Failed to read on PUT (%v)\n", err)
http.Error(w, "Failed on PUT", http.StatusBadRequest)
@@ -46,21 +46,21 @@ func (h *httpKVAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Optimistic-- no waiting for ack from raft. Value is not yet
// committed so a subsequent GET on the key may return old value
w.WriteHeader(http.StatusNoContent)
- case r.Method == "GET":
+ case http.MethodGet:
if v, ok := h.store.Lookup(key); ok {
w.Write([]byte(v))
} else {
http.Error(w, "Failed to GET", http.StatusNotFound)
}
- case r.Method == "POST":
- url, err := ioutil.ReadAll(r.Body)
+ case http.MethodPost:
+ url, err := io.ReadAll(r.Body)
if err != nil {
log.Printf("Failed to read on POST (%v)\n", err)
http.Error(w, "Failed on POST", http.StatusBadRequest)
return
}
- nodeId, err := strconv.ParseUint(key[1:], 0, 64)
+ nodeID, err := strconv.ParseUint(key[1:], 0, 64)
if err != nil {
log.Printf("Failed to convert ID for conf change (%v)\n", err)
http.Error(w, "Failed on POST", http.StatusBadRequest)
@@ -69,15 +69,14 @@ func (h *httpKVAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
cc := raftpb.ConfChange{
Type: raftpb.ConfChangeAddNode,
- NodeID: nodeId,
+ NodeID: nodeID,
Context: url,
}
h.confChangeC <- cc
-
// As above, optimistic that raft will apply the conf change
w.WriteHeader(http.StatusNoContent)
- case r.Method == "DELETE":
- nodeId, err := strconv.ParseUint(key[1:], 0, 64)
+ case http.MethodDelete:
+ nodeID, err := strconv.ParseUint(key[1:], 0, 64)
if err != nil {
log.Printf("Failed to convert ID for conf change (%v)\n", err)
http.Error(w, "Failed on DELETE", http.StatusBadRequest)
@@ -86,23 +85,23 @@ func (h *httpKVAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
cc := raftpb.ConfChange{
Type: raftpb.ConfChangeRemoveNode,
- NodeID: nodeId,
+ NodeID: nodeID,
}
h.confChangeC <- cc
// As above, optimistic that raft will apply the conf change
w.WriteHeader(http.StatusNoContent)
default:
- w.Header().Set("Allow", "PUT")
- w.Header().Add("Allow", "GET")
- w.Header().Add("Allow", "POST")
- w.Header().Add("Allow", "DELETE")
+ w.Header().Set("Allow", http.MethodPut)
+ w.Header().Add("Allow", http.MethodGet)
+ w.Header().Add("Allow", http.MethodPost)
+ w.Header().Add("Allow", http.MethodDelete)
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
}
-// serveHttpKVAPI starts a key-value server with a GET/PUT API and listens.
-func serveHttpKVAPI(kv *kvstore, port int, confChangeC chan<- raftpb.ConfChange, errorC <-chan error) {
+// serveHTTPKVAPI starts a key-value server with a GET/PUT API and listens.
+func serveHTTPKVAPI(kv *kvstore, port int, confChangeC chan<- raftpb.ConfChange, errorC <-chan error) {
srv := http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: &httpKVAPI{
diff --git a/contrib/raftexample/kvstore.go b/contrib/raftexample/kvstore.go
index ba49d00ee55..11bbf1fc6e7 100644
--- a/contrib/raftexample/kvstore.go
+++ b/contrib/raftexample/kvstore.go
@@ -18,11 +18,13 @@ import (
"bytes"
"encoding/gob"
"encoding/json"
+ "errors"
"log"
+ "strings"
"sync"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/raft/v3/raftpb"
)
// a key-value store backed by raft
@@ -63,7 +65,7 @@ func (s *kvstore) Lookup(key string) (string, bool) {
}
func (s *kvstore) Propose(k string, v string) {
- var buf bytes.Buffer
+ var buf strings.Builder
if err := gob.NewEncoder(&buf).Encode(kv{k, v}); err != nil {
log.Fatal(err)
}
@@ -112,7 +114,7 @@ func (s *kvstore) getSnapshot() ([]byte, error) {
func (s *kvstore) loadSnapshot() (*raftpb.Snapshot, error) {
snapshot, err := s.snapshotter.Load()
- if err == snap.ErrNoSnapshot {
+ if errors.Is(err, snap.ErrNoSnapshot) {
return nil, nil
}
if err != nil {
diff --git a/contrib/raftexample/kvstore_test.go b/contrib/raftexample/kvstore_test.go
index 231f778f2ee..7ebef32ee79 100644
--- a/contrib/raftexample/kvstore_test.go
+++ b/contrib/raftexample/kvstore_test.go
@@ -17,6 +17,8 @@ package main
import (
"reflect"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func Test_kvstore_snapshot(t *testing.T) {
@@ -24,24 +26,15 @@ func Test_kvstore_snapshot(t *testing.T) {
s := &kvstore{kvStore: tm}
v, _ := s.Lookup("foo")
- if v != "bar" {
- t.Fatalf("foo has unexpected value, got %s", v)
- }
+ require.Equalf(t, "bar", v, "foo has unexpected value, got %s", v)
data, err := s.getSnapshot()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
s.kvStore = nil
- if err := s.recoverFromSnapshot(data); err != nil {
- t.Fatal(err)
- }
+ err = s.recoverFromSnapshot(data)
+ require.NoError(t, err)
v, _ = s.Lookup("foo")
- if v != "bar" {
- t.Fatalf("foo has unexpected value, got %s", v)
- }
- if !reflect.DeepEqual(s.kvStore, tm) {
- t.Fatalf("store expected %+v, got %+v", tm, s.kvStore)
- }
+ require.Equalf(t, "bar", v, "foo has unexpected value, got %s", v)
+ require.Truef(t, reflect.DeepEqual(s.kvStore, tm), "store expected %+v, got %+v", tm, s.kvStore)
}
diff --git a/contrib/raftexample/main.go b/contrib/raftexample/main.go
index b52a1fb6498..73f02787a35 100644
--- a/contrib/raftexample/main.go
+++ b/contrib/raftexample/main.go
@@ -18,7 +18,7 @@ import (
"flag"
"strings"
- "go.etcd.io/etcd/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/raftpb"
)
func main() {
@@ -41,5 +41,5 @@ func main() {
kvs = newKVStore(<-snapshotterReady, proposeC, commitC, errorC)
// the key-value http handler will propose updates to raft
- serveHttpKVAPI(kvs, *kvport, confChangeC, errorC)
+ serveHTTPKVAPI(kvs, *kvport, confChangeC, errorC)
}
diff --git a/contrib/raftexample/raft.go b/contrib/raftexample/raft.go
index b2bcb25626a..5e5dc010bbe 100644
--- a/contrib/raftexample/raft.go
+++ b/contrib/raftexample/raft.go
@@ -16,6 +16,7 @@ package main
import (
"context"
+ "errors"
"fmt"
"log"
"net/http"
@@ -26,13 +27,13 @@ import (
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
- "go.etcd.io/etcd/server/v3/wal"
- "go.etcd.io/etcd/server/v3/wal/walpb"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
"go.uber.org/zap"
)
@@ -85,8 +86,8 @@ var defaultSnapshotCount uint64 = 10000
// commit channel, followed by a nil message (to indicate the channel is
// current), then new log entries. To shutdown, close proposeC and read errorC.
func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, error), proposeC <-chan string,
- confChangeC <-chan raftpb.ConfChange) (<-chan *commit, <-chan error, <-chan *snap.Snapshotter) {
-
+ confChangeC <-chan raftpb.ConfChange,
+) (<-chan *commit, <-chan error, <-chan *snap.Snapshotter) {
commitC := make(chan *commit)
errorC := make(chan error)
@@ -207,7 +208,7 @@ func (rc *raftNode) loadSnapshot() *raftpb.Snapshot {
log.Fatalf("raftexample: error listing snapshots (%v)", err)
}
snapshot, err := rc.snapshotter.LoadNewestAvailable(walSnaps)
- if err != nil && err != snap.ErrNoSnapshot {
+ if err != nil && !errors.Is(err, snap.ErrNoSnapshot) {
log.Fatalf("raftexample: error loading snapshot (%v)", err)
}
return snapshot
@@ -218,7 +219,7 @@ func (rc *raftNode) loadSnapshot() *raftpb.Snapshot {
// openWAL returns a WAL ready for reading.
func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot) *wal.WAL {
if !wal.Exist(rc.waldir) {
- if err := os.Mkdir(rc.waldir, 0750); err != nil {
+ if err := os.Mkdir(rc.waldir, 0o750); err != nil {
log.Fatalf("raftexample: cannot create dir for wal (%v)", err)
}
@@ -273,7 +274,7 @@ func (rc *raftNode) writeError(err error) {
func (rc *raftNode) startRaft() {
if !fileutil.Exist(rc.snapdir) {
- if err := os.Mkdir(rc.snapdir, 0750); err != nil {
+ if err := os.Mkdir(rc.snapdir, 0o750); err != nil {
log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
}
}
@@ -392,10 +393,13 @@ func (rc *raftNode) maybeTriggerSnapshot(applyDoneC <-chan struct{}) {
compactIndex = rc.appliedIndex - snapshotCatchUpEntriesN
}
if err := rc.raftStorage.Compact(compactIndex); err != nil {
- panic(err)
+ if !errors.Is(err, raft.ErrCompacted) {
+ panic(err)
+ }
+ } else {
+ log.Printf("compacted log at index %d", compactIndex)
}
- log.Printf("compacted log at index %d", compactIndex)
rc.snapshotIndex = rc.appliedIndex
}
@@ -449,14 +453,18 @@ func (rc *raftNode) serveChannels() {
// store raft entries to wal, then publish over commit channel
case rd := <-rc.node.Ready():
- rc.wal.Save(rd.HardState, rd.Entries)
+ // Must save the snapshot file and WAL snapshot entry before saving any other entries
+ // or hardstate to ensure that recovery after a snapshot restore is possible.
if !raft.IsEmptySnap(rd.Snapshot) {
rc.saveSnap(rd.Snapshot)
+ }
+ rc.wal.Save(rd.HardState, rd.Entries)
+ if !raft.IsEmptySnap(rd.Snapshot) {
rc.raftStorage.ApplySnapshot(rd.Snapshot)
rc.publishSnapshot(rd.Snapshot)
}
rc.raftStorage.Append(rd.Entries)
- rc.transport.Send(rd.Messages)
+ rc.transport.Send(rc.processMessages(rd.Messages))
applyDoneC, ok := rc.publishEntries(rc.entriesToApply(rd.CommittedEntries))
if !ok {
rc.stop()
@@ -476,6 +484,18 @@ func (rc *raftNode) serveChannels() {
}
}
+// When there is a `raftpb.EntryConfChange` after creating the snapshot,
+// then the confState included in the snapshot is out of date. so We need
+// to update the confState before sending a snapshot to a follower.
+func (rc *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
+ for i := 0; i < len(ms); i++ {
+ if ms[i].Type == raftpb.MsgSnap {
+ ms[i].Snapshot.Metadata.ConfState = rc.confState
+ }
+ }
+ return ms
+}
+
func (rc *raftNode) serveRaft() {
url, err := url.Parse(rc.peers[rc.id-1])
if err != nil {
@@ -499,7 +519,7 @@ func (rc *raftNode) serveRaft() {
func (rc *raftNode) Process(ctx context.Context, m raftpb.Message) error {
return rc.node.Step(ctx, m)
}
-func (rc *raftNode) IsIDRemoved(id uint64) bool { return false }
+func (rc *raftNode) IsIDRemoved(_ uint64) bool { return false }
func (rc *raftNode) ReportUnreachable(id uint64) { rc.node.ReportUnreachable(id) }
func (rc *raftNode) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
rc.node.ReportSnapshot(id, status)
diff --git a/contrib/raftexample/raft_test.go b/contrib/raftexample/raft_test.go
new file mode 100644
index 00000000000..a686c8c0707
--- /dev/null
+++ b/contrib/raftexample/raft_test.go
@@ -0,0 +1,129 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func TestProcessMessages(t *testing.T) {
+ cases := []struct {
+ name string
+ confState raftpb.ConfState
+ InputMessages []raftpb.Message
+ ExpectedMessages []raftpb.Message
+ }{
+ {
+ name: "only one snapshot message",
+ confState: raftpb.ConfState{
+ Voters: []uint64{2, 6, 8, 10},
+ },
+ InputMessages: []raftpb.Message{
+ {
+ Type: raftpb.MsgSnap,
+ To: 8,
+ Snapshot: &raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Index: 100,
+ Term: 3,
+ ConfState: raftpb.ConfState{
+ Voters: []uint64{2, 6, 8},
+ AutoLeave: true,
+ },
+ },
+ },
+ },
+ },
+ ExpectedMessages: []raftpb.Message{
+ {
+ Type: raftpb.MsgSnap,
+ To: 8,
+ Snapshot: &raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Index: 100,
+ Term: 3,
+ ConfState: raftpb.ConfState{
+ Voters: []uint64{2, 6, 8, 10},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "one snapshot message and one other message",
+ confState: raftpb.ConfState{
+ Voters: []uint64{2, 7, 8, 12},
+ },
+ InputMessages: []raftpb.Message{
+ {
+ Type: raftpb.MsgSnap,
+ To: 8,
+ Snapshot: &raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Index: 100,
+ Term: 3,
+ ConfState: raftpb.ConfState{
+ Voters: []uint64{2, 6, 8},
+ AutoLeave: true,
+ },
+ },
+ },
+ },
+ {
+ Type: raftpb.MsgApp,
+ From: 6,
+ To: 8,
+ },
+ },
+ ExpectedMessages: []raftpb.Message{
+ {
+ Type: raftpb.MsgSnap,
+ To: 8,
+ Snapshot: &raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Index: 100,
+ Term: 3,
+ ConfState: raftpb.ConfState{
+ Voters: []uint64{2, 7, 8, 12},
+ },
+ },
+ },
+ },
+ {
+ Type: raftpb.MsgApp,
+ From: 6,
+ To: 8,
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ rn := &raftNode{
+ confState: tc.confState,
+ }
+
+ outputMessages := rn.processMessages(tc.InputMessages)
+ require.Truef(t, reflect.DeepEqual(outputMessages, tc.ExpectedMessages), "Unexpected messages, expected: %v, got %v", tc.ExpectedMessages, outputMessages)
+ })
+ }
+}
diff --git a/contrib/raftexample/raftexample_test.go b/contrib/raftexample/raftexample_test.go
index de3e48a739a..cda01204a18 100644
--- a/contrib/raftexample/raftexample_test.go
+++ b/contrib/raftexample/raftexample_test.go
@@ -17,14 +17,17 @@ package main
import (
"bytes"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"os"
+ "sync"
"testing"
"time"
- "go.etcd.io/etcd/raft/v3/raftpb"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/raft/v3/raftpb"
)
func getSnapshotFn() (func() ([]byte, error), <-chan struct{}) {
@@ -77,7 +80,7 @@ func newCluster(n int) *cluster {
func (clus *cluster) Close() (err error) {
for i := range clus.peers {
go func(i int) {
- for range clus.commitC[i] {
+ for range clus.commitC[i] { //revive:disable-line:empty-block
// drain pending commits
}
}(i)
@@ -95,9 +98,8 @@ func (clus *cluster) Close() (err error) {
func (clus *cluster) closeNoErrors(t *testing.T) {
t.Log("closing cluster...")
- if err := clus.Close(); err != nil {
- t.Fatal(err)
- }
+ err := clus.Close()
+ require.NoError(t, err)
t.Log("closing cluster [done]")
}
@@ -124,7 +126,7 @@ func TestProposeOnCommit(t *testing.T) {
}
}
donec <- struct{}{}
- for range cC {
+ for range cC { //revive:disable-line:empty-block
// acknowledge the commits from other nodes so
// raft continues to make progress
}
@@ -152,8 +154,12 @@ func TestCloseProposerInflight(t *testing.T) {
clus := newCluster(1)
defer clus.closeNoErrors(t)
+ var wg sync.WaitGroup
+ wg.Add(1)
+
// some inflight ops
go func() {
+ defer wg.Done()
clus.proposeC[0] <- "foo"
clus.proposeC[0] <- "bar"
}()
@@ -162,6 +168,8 @@ func TestCloseProposerInflight(t *testing.T) {
if c, ok := <-clus.commitC[0]; !ok || c.data[0] != "foo" {
t.Fatalf("Commit failed")
}
+
+ wg.Wait()
}
func TestPutAndGetKeyValue(t *testing.T) {
@@ -193,33 +201,24 @@ func TestPutAndGetKeyValue(t *testing.T) {
body := bytes.NewBufferString(wantValue)
cli := srv.Client()
- req, err := http.NewRequest("PUT", url, body)
- if err != nil {
- t.Fatal(err)
- }
+ req, err := http.NewRequest(http.MethodPut, url, body)
+ require.NoError(t, err)
req.Header.Set("Content-Type", "text/html; charset=utf-8")
_, err = cli.Do(req)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
// wait for a moment for processing message, otherwise get would be failed.
<-time.After(time.Second)
resp, err := cli.Get(url)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
+ data, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
defer resp.Body.Close()
- if gotValue := string(data); wantValue != gotValue {
- t.Fatalf("expect %s, got %s", wantValue, gotValue)
- }
+ gotValue := string(data)
+ require.Equalf(t, wantValue, gotValue, "expect %s, got %s", wantValue, gotValue)
}
// TestAddNewNode tests adding new node to the existing cluster.
diff --git a/etcd.conf.yml.sample b/etcd.conf.yml.sample
index 0d7a2c6b3d1..40cda38310f 100644
--- a/etcd.conf.yml.sample
+++ b/etcd.conf.yml.sample
@@ -57,7 +57,8 @@ discovery-proxy:
# DNS domain used to bootstrap initial cluster.
discovery-srv:
-# Initial cluster configuration for bootstrapping.
+# Comma separated string of initial cluster configuration for bootstrapping.
+# Example: initial-cluster: "infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380"
initial-cluster:
# Initial cluster token for the etcd cluster during bootstrap.
@@ -69,9 +70,6 @@ initial-cluster-state: 'new'
# Reject reconfiguration requests that would cause quorum loss.
strict-reconfig-check: false
-# Accept etcd V2 client requests
-enable-v2: true
-
# Enable runtime profiling data via HTTP server
enable-pprof: true
@@ -125,6 +123,15 @@ peer-transport-security:
# Peer TLS using generated certificates.
auto-tls: false
+ # Allowed CN for inter peer authentication.
+ allowed-cn:
+
+ # Allowed TLS hostname for inter peer authentication.
+ allowed-hostname:
+
+# The validity period of the self-signed certificate, the unit is year.
+self-signed-cert-validity: 1
+
# Enable debug-level logging for etcd.
log-level: debug
@@ -138,3 +145,13 @@ force-new-cluster: false
auto-compaction-mode: periodic
auto-compaction-retention: "1"
+
+# Limit etcd to a specific set of tls cipher suites
+cipher-suites: [
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
+]
+
+# Limit etcd to specific TLS protocol versions
+tls-min-version: 'TLS1.2'
+tls-max-version: 'TLS1.3'
diff --git a/etcdctl/OWNERS b/etcdctl/OWNERS
new file mode 100644
index 00000000000..6ea7c455ff4
--- /dev/null
+++ b/etcdctl/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/etcdctl
diff --git a/etcdctl/README.md b/etcdctl/README.md
index 8dc9d2a13d7..40e93c4d1c0 100644
--- a/etcdctl/README.md
+++ b/etcdctl/README.md
@@ -119,15 +119,30 @@ RPC: Range
- print-value-only -- print only value when used with write-out=simple
-- consistency -- Linearizable(l) or Serializable(s)
+- consistency -- Linearizable(l) or Serializable(s), defaults to Linearizable(l).
- from-key -- Get keys that are greater than or equal to the given key using byte compare
- keys-only -- Get only the keys
-#### Output
+- max-create-revision -- restrict results to kvs with create revision lower or equal than the supplied revision
+
+- min-create-revision -- restrict results to kvs with create revision greater or equal than the supplied revision
+
+- max-mod-revision -- restrict results to kvs with modified revision lower or equal than the supplied revision
+- min-mod-revision -- restrict results to kvs with modified revision greater or equal than the supplied revision
+
+#### Output
+Prints the data in format below,
+```
\\n\\n\\n\...
+```
+
+Note serializable requests are better for lower latency requirement, but
+stale data might be returned if serializable option (`--consistency=s`)
+is specified.
+
#### Examples
@@ -376,7 +391,7 @@ Prints the compacted revision.
### WATCH [options] [key or prefix] [range_end] [--] [exec-command arg1 arg2 ...]
-Watch watches events stream on keys or prefixes, [key or prefix, range_end) if range_end is given. The watch command runs until it encounters an error or is terminated by the user. If range_end is given, it must be lexicographically greater than key or "\x00".
+Watch watches events stream on keys or prefixes, [key or prefix, range_end) if range_end is given. The watch command runs until it encounters an error or is terminated by the user. If range_end is given, it must be lexicographically greater than key or "\x00".
RPC: Watch
@@ -711,10 +726,19 @@ MEMBER LIST prints the member details for all members associated with an etcd cl
RPC: MemberList
+#### Options
+- consistency -- Linearizable(l) or Serializable(s), defaults to Linearizable(l).
+
#### Output
Prints a humanized table of the member IDs, statuses, names, peer addresses, and client addresses.
+Note serializable requests are better for lower latency requirement, but
+stale member list might be returned if serializable option (`--consistency=s`)
+is specified. In some situations users may want to use serializable requests.
+For example, when adding a new member to a one-node cluster, it's reasonable
+and safe to use serializable request before the new added member gets started.
+
#### Examples
```bash
@@ -809,13 +833,13 @@ Get the status for all endpoints in the cluster associated with the default endp
```bash
./etcdctl -w table endpoint --cluster status
-+------------------------+------------------+----------------+---------+-----------+-----------+------------+
-| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
-+------------------------+------------------+----------------+---------+-----------+-----------+------------+
-| http://127.0.0.1:2379 | 8211f1d0f64f3269 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 |
-| http://127.0.0.1:22379 | 91bc3c398fb3c146 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 |
-| http://127.0.0.1:32379 | fd422379fda50e48 | 3.2.0-rc.1+git | 25 kB | true | 2 | 8 |
-+------------------------+------------------+----------------+---------+-----------+-----------+------------+
++------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+
+| ENDPOINT | ID | VERSION | STORAGE VERSION | DB SIZE | DB SIZE IN USE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
++------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+
+| http://127.0.0.1:2379 | 8211f1d0f64f3269 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | false | false | 2 | 8 | 8 | |
+| http://127.0.0.1:22379 | 91bc3c398fb3c146 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | true | false | 2 | 8 | 8 | |
+| http://127.0.0.1:32379 | fd422379fda50e48 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | false | false | 2 | 8 | 8 | |
++------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+
```
### ENDPOINT HASHKV
@@ -837,28 +861,73 @@ Prints a line of JSON encoding each endpoint URL and KV history hash.
Get the hash for the default endpoint:
```bash
-./etcdctl endpoint hashkv
-# 127.0.0.1:2379, 1084519789
+./etcdctl endpoint hashkv --cluster
+http://127.0.0.1:2379, 2064120424, 13
+http://127.0.0.1:22379, 2064120424, 13
+http://127.0.0.1:32379, 2064120424, 13
```
Get the status for the default endpoint as JSON:
```bash
-./etcdctl -w json endpoint hashkv
-# [{"Endpoint":"127.0.0.1:2379","Hash":{"header":{"cluster_id":14841639068965178418,"member_id":10276657743932975437,"revision":1,"raft_term":3},"hash":1084519789,"compact_revision":-1}}]
+./etcdctl endpoint hash --cluster -w json | jq
+[
+ {
+ "Endpoint": "http://127.0.0.1:2379",
+ "HashKV": {
+ "header": {
+ "cluster_id": 17237436991929494000,
+ "member_id": 9372538179322590000,
+ "revision": 13,
+ "raft_term": 2
+ },
+ "hash": 2064120424,
+ "compact_revision": -1,
+ "hash_revision": 13
+ }
+ },
+ {
+ "Endpoint": "http://127.0.0.1:22379",
+ "HashKV": {
+ "header": {
+ "cluster_id": 17237436991929494000,
+ "member_id": 10501334649042878000,
+ "revision": 13,
+ "raft_term": 2
+ },
+ "hash": 2064120424,
+ "compact_revision": -1,
+ "hash_revision": 13
+ }
+ },
+ {
+ "Endpoint": "http://127.0.0.1:32379",
+ "HashKV": {
+ "header": {
+ "cluster_id": 17237436991929494000,
+ "member_id": 18249187646912140000,
+ "revision": 13,
+ "raft_term": 2
+ },
+ "hash": 2064120424,
+ "compact_revision": -1,
+ "hash_revision": 13
+ }
+ }
+]
```
Get the status for all endpoints in the cluster associated with the default endpoint:
```bash
-./etcdctl -w table endpoint --cluster hashkv
-+------------------------+------------+
-| ENDPOINT | HASH |
-+------------------------+------------+
-| http://127.0.0.1:2379 | 1084519789 |
-| http://127.0.0.1:22379 | 1084519789 |
-| http://127.0.0.1:32379 | 1084519789 |
-+------------------------+------------+
+$ ./etcdctl endpoint hash --cluster -w table
++------------------------+-----------+---------------+
+| ENDPOINT | HASH | HASH REVISION |
++------------------------+-----------+---------------+
+| http://127.0.0.1:2379 | 784522900 | 16 |
+| http://127.0.0.1:22379 | 784522900 | 16 |
+| http://127.0.0.1:32379 | 784522900 | 16 |
++------------------------+-----------+---------------+
```
### ALARM \
@@ -913,7 +982,7 @@ If NOSPACE alarm is present:
### DEFRAG [options]
-DEFRAG defragments the backend database file for a set of given endpoints while etcd is running, ~~or directly defragments an etcd data directory while etcd is not running~~. When an etcd member reclaims storage space from deleted and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the database, the etcd member releases this free space back to the file system.
+DEFRAG defragments the backend database file for a set of given endpoints while etcd is running. When an etcd member reclaims storage space from deleted and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the database, the etcd member releases this free space back to the file system.
**Note: to defragment offline (`--data-dir` flag), use: `etcutl defrag` instead**
@@ -921,9 +990,6 @@ DEFRAG defragments the backend database file for a set of given endpoints while
**Note that defragmentation request does not get replicated over cluster. That is, the request is only applied to the local node. Specify all members in `--endpoints` flag or `--cluster` flag to automatically find all cluster members.**
-#### Options
-
-- data-dir -- Optional. **Deprecated**. If present, defragments a data directory not in use by etcd. To be removed in v3.6.
#### Output
@@ -946,16 +1012,6 @@ Finished defragmenting etcd member[http://127.0.0.1:22379]
Finished defragmenting etcd member[http://127.0.0.1:32379]
```
-To defragment a data directory directly, use the `etcdutl` with `--data-dir` flag
-(`etcdctl` will remove this flag in v3.6):
-
-``` bash
-# Defragment while etcd is not running
-./etcdutl defrag --data-dir default.etcd
-# success (exit status 0)
-# Error: cannot open database at default.etcd/member/snap/db
-```
-
#### Remarks
DEFRAG returns a zero exit code only if it succeeded defragmenting all given endpoints.
@@ -981,106 +1037,97 @@ Save a snapshot to "snapshot.db":
### SNAPSHOT RESTORE [options] \
-Note: Deprecated. Use `etcdutl snapshot restore` instead. To be removed in v3.6.
+Removed in v3.6. Use `etcdutl snapshot restore` instead.
-SNAPSHOT RESTORE creates an etcd data directory for an etcd cluster member from a backend database snapshot and a new cluster configuration. Restoring the snapshot into each member for a new cluster configuration will initialize a new etcd cluster preloaded by the snapshot data.
-#### Options
-
-The snapshot restore options closely resemble to those used in the `etcd` command for defining a cluster.
+### SNAPSHOT STATUS \
-- data-dir -- Path to the data directory. Uses \.etcd if none given.
+Removed in v3.6. Use `etcdutl snapshot status` instead.
-- wal-dir -- Path to the WAL directory. Uses data directory if none given.
+### MOVE-LEADER \
-- initial-cluster -- The initial cluster configuration for the restored etcd cluster.
+MOVE-LEADER transfers leadership from the leader to another member in the cluster.
-- initial-cluster-token -- Initial cluster token for the restored etcd cluster.
+#### Example
-- initial-advertise-peer-urls -- List of peer URLs for the member being restored.
+```bash
+# to choose transferee
+transferee_id=$(./etcdctl \
+ --endpoints localhost:2379,localhost:22379,localhost:32379 \
+ endpoint status | grep -m 1 "false" | awk -F', ' '{print $2}')
+echo ${transferee_id}
+# c89feb932daef420
-- name -- Human-readable name for the etcd cluster member being restored.
+# endpoints should include leader node
+./etcdctl --endpoints ${transferee_ep} move-leader ${transferee_id}
+# Error: no leader endpoint given at [localhost:22379 localhost:32379]
-- skip-hash-check -- Ignore snapshot integrity hash value (required if copied from data directory)
+# request to leader with target node ID
+./etcdctl --endpoints ${leader_ep} move-leader ${transferee_id}
+# Leadership transferred from 45ddc0e800e20b93 to c89feb932daef420
+```
-#### Output
+### DOWNGRADE \
-A new etcd data directory initialized with the snapshot.
+NOTICE: Downgrades is an experimental feature in v3.6 and is not recommended for production clusters.
-#### Example
+Downgrade provides commands to downgrade cluster.
+Normally etcd members cannot be downgraded due to cluster version mechanism.
-Save a snapshot, restore into a new 3 node cluster, and start the cluster:
-```
-./etcdctl snapshot save snapshot.db
+After initial bootstrap, cluster members agree on the cluster version. Every 5 seconds, leader checks versions of all members and picks lowers minor version.
+New members will refuse joining cluster with cluster version newer than theirs, thus preventing cluster from downgrading.
+Downgrade commands allow cluster administrator to force cluster version to be lowered to previous minor version, thus allowing to downgrade the cluster.
-# restore members
-bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
-bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
-bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+Downgrade should be executed in stages:
+1. Verify that cluster is ready to be downgraded by running `etcdctl downgrade validate `
+2. Start the downgrade process by running `etcdctl downgrade enable `
+3. For each cluster member:
+ 1. Ensure that member is ready for downgrade by confirming that it wrote `The server is ready to downgrade` log.
+ 2. Replace member binary with one with older version.
+ 3. Confirm that member has correctly started and joined the cluster.
+4. Ensure that downgrade process has succeeded by checking leader log for `the cluster has been downgraded`
-# launch members
-bin/etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 &
-bin/etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 &
-bin/etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 &
-```
+Downgrade can be canceled by running `etcdctl downgrade cancel` command.
-### SNAPSHOT STATUS \
+In case of downgrade being canceled, cluster version will return to its normal behavior (pick the lowest member minor version).
+If no members were downgraded, cluster version will return to original value.
+If at least one member was downgraded, cluster version will stay at the `` until downgraded members are upgraded back.
-Note: Deprecated. Use `etcdutl snapshot restore` instead. To be removed in v3.6.
+### DOWNGRADE VALIDATE \
-SNAPSHOT STATUS lists information about a given backend database snapshot file.
+DOWNGRADE VALIDATE validate downgrade capability before starting downgrade.
-#### Output
+#### Example
-##### Simple format
+```bash
+./etcdctl downgrade validate 3.5
+Downgrade validate success, cluster version 3.6
-Prints a humanized table of the database hash, revision, total keys, and size.
+./etcdctl downgrade validate 3.4
+Error: etcdserver: invalid downgrade target version
-##### JSON format
+```
-Prints a line of JSON encoding the database hash, revision, total keys, and size.
+### DOWNGRADE ENABLE \
-#### Examples
-```bash
-./etcdctl snapshot status file.db
-# cf1550fb, 3, 3, 25 kB
-```
+DOWNGRADE ENABLE starts a downgrade action to cluster.
-```bash
-./etcdctl --write-out=json snapshot status file.db
-# {"hash":3474280699,"revision":3,"totalKey":3,"totalSize":24576}
-```
+#### Example
```bash
-./etcdctl --write-out=table snapshot status file.db
-+----------+----------+------------+------------+
-| HASH | REVISION | TOTAL KEYS | TOTAL SIZE |
-+----------+----------+------------+------------+
-| cf1550fb | 3 | 3 | 25 kB |
-+----------+----------+------------+------------+
+./etcdctl downgrade enable 3.5
+Downgrade enable success, cluster version 3.6
```
-### MOVE-LEADER \
+### DOWNGRADE CANCEL \
-MOVE-LEADER transfers leadership from the leader to another member in the cluster.
+DOWNGRADE CANCEL cancels the ongoing downgrade action to cluster.
#### Example
```bash
-# to choose transferee
-transferee_id=$(./etcdctl \
- --endpoints localhost:2379,localhost:22379,localhost:32379 \
- endpoint status | grep -m 1 "false" | awk -F', ' '{print $2}')
-echo ${transferee_id}
-# c89feb932daef420
-
-# endpoints should include leader node
-./etcdctl --endpoints ${transferee_ep} move-leader ${transferee_id}
-# Error: no leader endpoint given at [localhost:22379 localhost:32379]
-
-# request to leader with target node ID
-./etcdctl --endpoints ${leader_ep} move-leader ${transferee_id}
-# Leadership transferred from 45ddc0e800e20b93 to c89feb932daef420
+./etcdctl downgrade cancel
+Downgrade cancel success, cluster version 3.5
```
## Concurrency commands
@@ -1155,7 +1202,7 @@ Whenever a leader is elected, its proposal is given as output.
ELECT returns a zero exit code only if it is terminated by a signal and can revoke its candidacy or leadership, if any.
-If a candidate is abnormally terminated, election rogress may be delayed by up to the default lease length of 60 seconds.
+If a candidate is abnormally terminated, election progress may be delayed by up to the default lease length of 60 seconds.
## Authentication commands
@@ -1488,6 +1535,8 @@ RPC: UserRevokeRole
- dest-insecure-transport -- Disable transport security for client connections
+- max-txn-ops -- Maximum number of operations permitted in a transaction during syncing updates
+
#### Output
The approximate total number of keys transferred to the destination cluster, updated every 30 seconds.
@@ -1527,6 +1576,26 @@ CHECK provides commands for checking properties of the etcd cluster.
CHECK PERF checks the performance of the etcd cluster for 60 seconds. Running the `check perf` often can create a large keyspace history which can be auto compacted and defragmented using the `--auto-compact` and `--auto-defrag` options as described below.
+Notice that different workload models use different configurations in terms of number of clients and throughput. Here is the configuration for each load:
+
+
+| Load | Number of clients | Number of put requests (requests/sec) |
+|---------|------|---------|
+| Small | 50 | 10000 |
+| Medium | 200 | 100000 |
+| Large | 500 | 1000000 |
+| xLarge | 1000 | 3000000 |
+
+The test checks for the following conditions:
+
+- The throughput should be at least 90% of the issued request
+- All the requests should be done in less than 500 ms
+- The standard deviation of the requests should be less than 100 ms
+
+
+Hence, a workload model may work while another one might fail.
+
+
RPC: CheckPerf
#### Options
diff --git a/etcdctl/ctlv2/command/auth_commands.go b/etcdctl/ctlv2/command/auth_commands.go
deleted file mode 100644
index fa19200ecfd..00000000000
--- a/etcdctl/ctlv2/command/auth_commands.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
-)
-
-func NewAuthCommands() cli.Command {
- return cli.Command{
- Name: "auth",
- Usage: "overall auth controls",
- Subcommands: []cli.Command{
- {
- Name: "enable",
- Usage: "enable auth access controls",
- ArgsUsage: " ",
- Action: actionAuthEnable,
- },
- {
- Name: "disable",
- Usage: "disable auth access controls",
- ArgsUsage: " ",
- Action: actionAuthDisable,
- },
- },
- }
-}
-
-func actionAuthEnable(c *cli.Context) error {
- authEnableDisable(c, true)
- return nil
-}
-
-func actionAuthDisable(c *cli.Context) error {
- authEnableDisable(c, false)
- return nil
-}
-
-func mustNewAuthAPI(c *cli.Context) client.AuthAPI {
- hc := mustNewClient(c)
-
- if c.GlobalBool("debug") {
- fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
- }
-
- return client.NewAuthAPI(hc)
-}
-
-func authEnableDisable(c *cli.Context, enable bool) {
- if len(c.Args()) != 0 {
- fmt.Fprintln(os.Stderr, "No arguments accepted")
- os.Exit(1)
- }
- s := mustNewAuthAPI(c)
- ctx, cancel := contextWithTotalTimeout(c)
- var err error
- if enable {
- err = s.Enable(ctx)
- } else {
- err = s.Disable(ctx)
- }
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
- if enable {
- fmt.Println("Authentication Enabled")
- } else {
- fmt.Println("Authentication Disabled")
- }
-}
diff --git a/etcdctl/ctlv2/command/backup_command.go b/etcdctl/ctlv2/command/backup_command.go
deleted file mode 100644
index be77ea7a906..00000000000
--- a/etcdctl/ctlv2/command/backup_command.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "github.com/urfave/cli"
- "go.etcd.io/etcd/etcdutl/v3/etcdutl"
-)
-
-const (
- description = "Performs an offline backup of etcd directory.\n\n" +
- "Moved to `./etcdutl backup` and going to be decomissioned in v3.5\n\n" +
- "The recommended (online) backup command is: `./etcdctl snapshot save ...`.\n\n"
-)
-
-func NewBackupCommand() cli.Command {
- return cli.Command{
- Name: "backup",
- Usage: "--data-dir=... --backup-dir={output}",
- UsageText: "[deprecated] offline backup an etcd directory.",
- Description: description,
- Flags: []cli.Flag{
- cli.StringFlag{Name: "data-dir", Value: "", Usage: "Path to the etcd data dir"},
- cli.StringFlag{Name: "wal-dir", Value: "", Usage: "Path to the etcd wal dir"},
- cli.StringFlag{Name: "backup-dir", Value: "", Usage: "Path to the backup dir"},
- cli.StringFlag{Name: "backup-wal-dir", Value: "", Usage: "Path to the backup wal dir"},
- cli.BoolFlag{Name: "with-v3", Usage: "Backup v3 backend data"},
- },
- Action: handleBackup,
- }
-}
-
-func handleBackup(c *cli.Context) error {
- etcdutl.HandleBackup(
- c.Bool("with-v3"),
- c.String("data-dir"),
- c.String("backup-dir"),
- c.String("wal-dir"),
- c.String("backup-wal-dir"),
- )
- return nil
-}
diff --git a/etcdctl/ctlv2/command/cluster_health.go b/etcdctl/ctlv2/command/cluster_health.go
deleted file mode 100644
index a89646b9f54..00000000000
--- a/etcdctl/ctlv2/command/cluster_health.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "os/signal"
- "time"
-
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/urfave/cli"
-)
-
-func NewClusterHealthCommand() cli.Command {
- return cli.Command{
- Name: "cluster-health",
- Usage: "check the health of the etcd cluster",
- ArgsUsage: " ",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "forever, f", Usage: "forever check the health every 10 second until CTRL+C"},
- },
- Action: handleClusterHealth,
- }
-}
-
-func handleClusterHealth(c *cli.Context) error {
- forever := c.Bool("forever")
- if forever {
- sigch := make(chan os.Signal, 1)
- signal.Notify(sigch, os.Interrupt)
-
- go func() {
- <-sigch
- os.Exit(0)
- }()
- }
-
- tr, err := getTransport(c)
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- hc := http.Client{
- Transport: tr,
- }
-
- cln := mustNewClientNoSync(c)
- mi := client.NewMembersAPI(cln)
- ms, err := mi.List(context.TODO())
- if err != nil {
- fmt.Println("cluster may be unhealthy: failed to list members")
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- for {
- healthyMembers := 0
- for _, m := range ms {
- if len(m.ClientURLs) == 0 {
- fmt.Printf("member %s is unreachable: no available published client urls\n", m.ID)
- continue
- }
-
- checked := false
- for _, url := range m.ClientURLs {
- resp, err := hc.Get(url + "/health")
- if err != nil {
- fmt.Printf("failed to check the health of member %s on %s: %v\n", m.ID, url, err)
- continue
- }
-
- result := struct{ Health string }{}
- nresult := struct{ Health bool }{}
- bytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- fmt.Printf("failed to check the health of member %s on %s: %v\n", m.ID, url, err)
- continue
- }
- resp.Body.Close()
-
- err = json.Unmarshal(bytes, &result)
- if err != nil {
- err = json.Unmarshal(bytes, &nresult)
- }
- if err != nil {
- fmt.Printf("failed to check the health of member %s on %s: %v\n", m.ID, url, err)
- continue
- }
-
- checked = true
- if result.Health == "true" || nresult.Health {
- fmt.Printf("member %s is healthy: got healthy result from %s\n", m.ID, url)
- healthyMembers++
- } else {
- fmt.Printf("member %s is unhealthy: got unhealthy result from %s\n", m.ID, url)
- }
- break
- }
- if !checked {
- fmt.Printf("member %s is unreachable: %v are all unreachable\n", m.ID, m.ClientURLs)
- }
- }
- switch healthyMembers {
- case len(ms):
- fmt.Println("cluster is healthy")
- case 0:
- fmt.Println("cluster is unavailable")
- default:
- fmt.Println("cluster is degraded")
- }
-
- if !forever {
- if healthyMembers == len(ms) {
- os.Exit(cobrautl.ExitSuccess)
- }
- os.Exit(cobrautl.ExitClusterNotHealthy)
- }
-
- fmt.Printf("\nnext check after 10 second...\n\n")
- time.Sleep(10 * time.Second)
- }
-}
diff --git a/etcdctl/ctlv2/command/doc.go b/etcdctl/ctlv2/command/doc.go
deleted file mode 100644
index cedf3f762f6..00000000000
--- a/etcdctl/ctlv2/command/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package command is a set of libraries for etcdctl commands.
-package command
diff --git a/etcdctl/ctlv2/command/error.go b/etcdctl/ctlv2/command/error.go
deleted file mode 100644
index 4403150af8c..00000000000
--- a/etcdctl/ctlv2/command/error.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "encoding/json"
- "fmt"
- "os"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
-)
-
-func handleError(c *cli.Context, code int, err error) {
- if c.GlobalString("output") == "json" {
- if err, ok := err.(*client.Error); ok {
- b, err := json.Marshal(err)
- if err != nil {
- panic(err)
- }
- fmt.Fprintln(os.Stderr, string(b))
- os.Exit(code)
- }
- }
-
- fmt.Fprintln(os.Stderr, "Error: ", err)
- if cerr, ok := err.(*client.ClusterError); ok {
- fmt.Fprintln(os.Stderr, cerr.Detail())
- }
- os.Exit(code)
-}
diff --git a/etcdctl/ctlv2/command/exec_watch_command.go b/etcdctl/ctlv2/command/exec_watch_command.go
deleted file mode 100644
index 2b52d8a76c9..00000000000
--- a/etcdctl/ctlv2/command/exec_watch_command.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "context"
- "errors"
- "fmt"
- "os"
- "os/exec"
- "os/signal"
-
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/urfave/cli"
-)
-
-// NewExecWatchCommand returns the CLI command for "exec-watch".
-func NewExecWatchCommand() cli.Command {
- return cli.Command{
- Name: "exec-watch",
- Usage: "watch a key for changes and exec an executable",
- ArgsUsage: " [args...]",
- Flags: []cli.Flag{
- cli.IntFlag{Name: "after-index", Value: 0, Usage: "watch after the given index"},
- cli.BoolFlag{Name: "recursive, r", Usage: "watch all values for key and child keys"},
- },
- Action: func(c *cli.Context) error {
- execWatchCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// execWatchCommandFunc executes the "exec-watch" command.
-func execWatchCommandFunc(c *cli.Context, ki client.KeysAPI) {
- args := c.Args()
- argslen := len(args)
-
- if argslen < 2 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key and command to exec required"))
- }
-
- var (
- key string
- cmdArgs []string
- )
-
- foundSep := false
- for i := range args {
- if args[i] == "--" && i != 0 {
- foundSep = true
- break
- }
- }
-
- if foundSep {
- key = args[0]
- cmdArgs = args[2:]
- } else {
- // If no flag is parsed, the order of key and cmdArgs will be switched and
- // args will not contain `--`.
- key = args[argslen-1]
- cmdArgs = args[:argslen-1]
- }
-
- index := c.Uint64("after-index")
-
- recursive := c.Bool("recursive")
-
- sigch := make(chan os.Signal, 1)
- signal.Notify(sigch, os.Interrupt)
-
- go func() {
- <-sigch
- os.Exit(0)
- }()
-
- w := ki.Watcher(key, &client.WatcherOptions{AfterIndex: uint64(index), Recursive: recursive})
-
- for {
- resp, err := w.Next(context.TODO())
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
- if resp.Node.Dir {
- fmt.Fprintf(os.Stderr, "Ignored dir %s change\n", resp.Node.Key)
- continue
- }
-
- cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
- cmd.Env = environResponse(resp, os.Environ())
-
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
-
- go func() {
- err := cmd.Start()
- if err != nil {
- fmt.Fprint(os.Stderr, err.Error())
- os.Exit(1)
- }
- cmd.Wait()
- }()
- }
-}
-
-func environResponse(resp *client.Response, env []string) []string {
- env = append(env, "ETCD_WATCH_ACTION="+resp.Action)
- env = append(env, "ETCD_WATCH_MODIFIED_INDEX="+fmt.Sprintf("%d", resp.Node.ModifiedIndex))
- env = append(env, "ETCD_WATCH_KEY="+resp.Node.Key)
- env = append(env, "ETCD_WATCH_VALUE="+resp.Node.Value)
- return env
-}
diff --git a/etcdctl/ctlv2/command/format.go b/etcdctl/ctlv2/command/format.go
deleted file mode 100644
index 58ca0eff838..00000000000
--- a/etcdctl/ctlv2/command/format.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "encoding/json"
- "fmt"
- "os"
-
- "go.etcd.io/etcd/client/v2"
-)
-
-// printResponseKey only supports to print key correctly.
-func printResponseKey(resp *client.Response, format string) {
- // Format the result.
- switch format {
- case "simple":
- if resp.Action != "delete" {
- fmt.Println(resp.Node.Value)
- } else {
- fmt.Println("PrevNode.Value:", resp.PrevNode.Value)
- }
- case "extended":
- // Extended prints in a rfc2822 style format
- fmt.Println("Key:", resp.Node.Key)
- fmt.Println("Created-Index:", resp.Node.CreatedIndex)
- fmt.Println("Modified-Index:", resp.Node.ModifiedIndex)
-
- if resp.PrevNode != nil {
- fmt.Println("PrevNode.Value:", resp.PrevNode.Value)
- }
-
- fmt.Println("TTL:", resp.Node.TTL)
- fmt.Println("Index:", resp.Index)
- if resp.Action != "delete" {
- fmt.Println("")
- fmt.Println(resp.Node.Value)
- }
- case "json":
- b, err := json.Marshal(resp)
- if err != nil {
- panic(err)
- }
- fmt.Println(string(b))
- default:
- fmt.Fprintln(os.Stderr, "Unsupported output format:", format)
- }
-}
diff --git a/etcdctl/ctlv2/command/get_command.go b/etcdctl/ctlv2/command/get_command.go
deleted file mode 100644
index 9f158c9c909..00000000000
--- a/etcdctl/ctlv2/command/get_command.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
- "fmt"
- "os"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewGetCommand returns the CLI command for "get".
-func NewGetCommand() cli.Command {
- return cli.Command{
- Name: "get",
- Usage: "retrieve the value of a key",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "sort", Usage: "returns result in sorted order"},
- cli.BoolFlag{Name: "quorum, q", Usage: "require quorum for get request"},
- },
- Action: func(c *cli.Context) error {
- getCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// getCommandFunc executes the "get" command.
-func getCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
-
- key := c.Args()[0]
- sorted := c.Bool("sort")
- quorum := c.Bool("quorum")
-
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Get(ctx, key, &client.GetOptions{Sort: sorted, Quorum: quorum})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- if resp.Node.Dir {
- fmt.Fprintln(os.Stderr, fmt.Sprintf("%s: is a directory", resp.Node.Key))
- os.Exit(1)
- }
-
- printResponseKey(resp, c.GlobalString("output"))
-}
diff --git a/etcdctl/ctlv2/command/ls_command.go b/etcdctl/ctlv2/command/ls_command.go
deleted file mode 100644
index cbc76ba1876..00000000000
--- a/etcdctl/ctlv2/command/ls_command.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-func NewLsCommand() cli.Command {
- return cli.Command{
- Name: "ls",
- Usage: "retrieve a directory",
- ArgsUsage: "[key]",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "sort", Usage: "returns result in sorted order"},
- cli.BoolFlag{Name: "recursive, r", Usage: "returns all key names recursively for the given path"},
- cli.BoolFlag{Name: "p", Usage: "append slash (/) to directories"},
- cli.BoolFlag{Name: "quorum, q", Usage: "require quorum for get request"},
- },
- Action: func(c *cli.Context) error {
- lsCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// lsCommandFunc executes the "ls" command.
-func lsCommandFunc(c *cli.Context, ki client.KeysAPI) {
- key := "/"
- if len(c.Args()) != 0 {
- key = c.Args()[0]
- }
-
- sort := c.Bool("sort")
- recursive := c.Bool("recursive")
- quorum := c.Bool("quorum")
-
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Get(ctx, key, &client.GetOptions{Sort: sort, Recursive: recursive, Quorum: quorum})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- printLs(c, resp)
-}
-
-// printLs writes a response out in a manner similar to the `ls` command in unix.
-// Non-empty directories list their contents and files list their name.
-func printLs(c *cli.Context, resp *client.Response) {
- if c.GlobalString("output") == "simple" {
- if !resp.Node.Dir {
- fmt.Println(resp.Node.Key)
- }
- for _, node := range resp.Node.Nodes {
- rPrint(c, node)
- }
- } else {
- // user wants JSON or extended output
- printResponseKey(resp, c.GlobalString("output"))
- }
-}
-
-// rPrint recursively prints out the nodes in the node structure.
-func rPrint(c *cli.Context, n *client.Node) {
- if n.Dir && c.Bool("p") {
- fmt.Println(fmt.Sprintf("%v/", n.Key))
- } else {
- fmt.Println(n.Key)
- }
-
- for _, node := range n.Nodes {
- rPrint(c, node)
- }
-}
diff --git a/etcdctl/ctlv2/command/member_commands.go b/etcdctl/ctlv2/command/member_commands.go
deleted file mode 100644
index 0fc99fcdca0..00000000000
--- a/etcdctl/ctlv2/command/member_commands.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/urfave/cli"
-)
-
-func NewMemberCommand() cli.Command {
- return cli.Command{
- Name: "member",
- Usage: "member add, remove and list subcommands",
- Subcommands: []cli.Command{
- {
- Name: "list",
- Usage: "enumerate existing cluster members",
- ArgsUsage: " ",
- Action: actionMemberList,
- },
- {
- Name: "add",
- Usage: "add a new member to the etcd cluster",
- ArgsUsage: " ",
- Action: actionMemberAdd,
- },
- {
- Name: "remove",
- Usage: "remove an existing member from the etcd cluster",
- ArgsUsage: "",
- Action: actionMemberRemove,
- },
- {
- Name: "update",
- Usage: "update an existing member in the etcd cluster",
- ArgsUsage: " ",
- Action: actionMemberUpdate,
- },
- },
- }
-}
-
-func actionMemberList(c *cli.Context) error {
- if len(c.Args()) != 0 {
- fmt.Fprintln(os.Stderr, "No arguments accepted")
- os.Exit(1)
- }
- mAPI := mustNewMembersAPI(c)
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
-
- members, err := mAPI.List(ctx)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
- leader, err := mAPI.Leader(ctx)
- if err != nil {
- fmt.Fprintln(os.Stderr, "Failed to get leader: ", err)
- os.Exit(1)
- }
-
- for _, m := range members {
- isLeader := false
- if m.ID == leader.ID {
- isLeader = true
- }
- if len(m.Name) == 0 {
- fmt.Printf("%s[unstarted]: peerURLs=%s\n", m.ID, strings.Join(m.PeerURLs, ","))
- } else {
- fmt.Printf("%s: name=%s peerURLs=%s clientURLs=%s isLeader=%v\n", m.ID, m.Name, strings.Join(m.PeerURLs, ","), strings.Join(m.ClientURLs, ","), isLeader)
- }
- }
-
- return nil
-}
-
-func actionMemberAdd(c *cli.Context) error {
- args := c.Args()
- if len(args) != 2 {
- fmt.Fprintln(os.Stderr, "Provide a name and a single member peerURL")
- os.Exit(1)
- }
-
- mAPI := mustNewMembersAPI(c)
-
- url := args[1]
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
-
- m, err := mAPI.Add(ctx, url)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- newID := m.ID
- newName := args[0]
- fmt.Printf("Added member named %s with ID %s to cluster\n", newName, newID)
-
- members, err := mAPI.List(ctx)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- conf := []string{}
- for _, memb := range members {
- for _, u := range memb.PeerURLs {
- n := memb.Name
- if memb.ID == newID {
- n = newName
- }
- conf = append(conf, fmt.Sprintf("%s=%s", n, u))
- }
- }
-
- fmt.Print("\n")
- fmt.Printf("ETCD_NAME=%q\n", newName)
- fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ","))
- fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n")
- return nil
-}
-
-func actionMemberRemove(c *cli.Context) error {
- args := c.Args()
- if len(args) != 1 {
- fmt.Fprintln(os.Stderr, "Provide a single member ID")
- os.Exit(1)
- }
- removalID := args[0]
-
- mAPI := mustNewMembersAPI(c)
-
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
- // Get the list of members.
- members, err := mAPI.List(ctx)
- if err != nil {
- fmt.Fprintln(os.Stderr, "Error while verifying ID against known members:", err.Error())
- os.Exit(1)
- }
- // Sanity check the input.
- foundID := false
- for _, m := range members {
- if m.ID == removalID {
- foundID = true
- }
- if m.Name == removalID {
- // Note that, so long as it's not ambiguous, we *could* do the right thing by name here.
- fmt.Fprintf(os.Stderr, "Found a member named %s; if this is correct, please use its ID, eg:\n\tetcdctl member remove %s\n", m.Name, m.ID)
- fmt.Fprintf(os.Stderr, "For more details, read the documentation at https://github.com/etcd-io/etcd/blob/main/Documentation/runtime-configuration.md#remove-a-member\n\n")
- }
- }
- if !foundID {
- fmt.Fprintf(os.Stderr, "Couldn't find a member in the cluster with an ID of %s.\n", removalID)
- os.Exit(1)
- }
-
- // Actually attempt to remove the member.
- err = mAPI.Remove(ctx, removalID)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Received an error trying to remove member %s: %s", removalID, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("Removed member %s from cluster\n", removalID)
- return nil
-}
-
-func actionMemberUpdate(c *cli.Context) error {
- args := c.Args()
- if len(args) != 2 {
- fmt.Fprintln(os.Stderr, "Provide an ID and a list of comma separated peerURL (0xabcd http://example.com,http://example1.com)")
- os.Exit(1)
- }
-
- mAPI := mustNewMembersAPI(c)
-
- mid := args[0]
- urls := args[1]
- ctx, cancel := contextWithTotalTimeout(c)
- err := mAPI.Update(ctx, mid, strings.Split(urls, ","))
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("Updated member with ID %s in cluster\n", mid)
- return nil
-}
diff --git a/etcdctl/ctlv2/command/mk_command.go b/etcdctl/ctlv2/command/mk_command.go
deleted file mode 100644
index 1db24f16a64..00000000000
--- a/etcdctl/ctlv2/command/mk_command.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
- "os"
- "time"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewMakeCommand returns the CLI command for "mk".
-func NewMakeCommand() cli.Command {
- return cli.Command{
- Name: "mk",
- Usage: "make a new key with a given value",
- ArgsUsage: " ",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "in-order", Usage: "create in-order key under directory "},
- cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"},
- },
- Action: func(c *cli.Context) error {
- mkCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// mkCommandFunc executes the "mk" command.
-func mkCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
- key := c.Args()[0]
- value, err := argOrStdin(c.Args(), os.Stdin, 1)
- if err != nil {
- handleError(c, cobrautl.ExitBadArgs, errors.New("value required"))
- }
-
- ttl := c.Int("ttl")
- inorder := c.Bool("in-order")
-
- var resp *client.Response
- ctx, cancel := contextWithTotalTimeout(c)
- if !inorder {
- // Since PrevNoExist means that the Node must not exist previously,
- // this Set method always creates a new key. Therefore, mk command
- // succeeds only if the key did not previously exist, and the command
- // prevents one from overwriting values accidentally.
- resp, err = ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: client.PrevNoExist})
- } else {
- // If in-order flag is specified then create an inorder key under
- // the directory identified by the key argument.
- resp, err = ki.CreateInOrder(ctx, key, value, &client.CreateInOrderOptions{TTL: time.Duration(ttl) * time.Second})
- }
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- printResponseKey(resp, c.GlobalString("output"))
-}
diff --git a/etcdctl/ctlv2/command/mkdir_command.go b/etcdctl/ctlv2/command/mkdir_command.go
deleted file mode 100644
index 1a307601e92..00000000000
--- a/etcdctl/ctlv2/command/mkdir_command.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
- "time"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewMakeDirCommand returns the CLI command for "mkdir".
-func NewMakeDirCommand() cli.Command {
- return cli.Command{
- Name: "mkdir",
- Usage: "make a new directory",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"},
- },
- Action: func(c *cli.Context) error {
- mkdirCommandFunc(c, mustNewKeyAPI(c), client.PrevNoExist)
- return nil
- },
- }
-}
-
-// mkdirCommandFunc executes the "mkdir" command.
-func mkdirCommandFunc(c *cli.Context, ki client.KeysAPI, prevExist client.PrevExistType) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
-
- key := c.Args()[0]
- ttl := c.Int("ttl")
-
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Set(ctx, key, "", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
- if c.GlobalString("output") != "simple" {
- printResponseKey(resp, c.GlobalString("output"))
- }
-}
diff --git a/etcdctl/ctlv2/command/rm_command.go b/etcdctl/ctlv2/command/rm_command.go
deleted file mode 100644
index 45ccf4632fb..00000000000
--- a/etcdctl/ctlv2/command/rm_command.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewRemoveCommand returns the CLI command for "rm".
-func NewRemoveCommand() cli.Command {
- return cli.Command{
- Name: "rm",
- Usage: "remove a key or a directory",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "dir", Usage: "removes the key if it is an empty directory or a key-value pair"},
- cli.BoolFlag{Name: "recursive, r", Usage: "removes the key and all child keys(if it is a directory)"},
- cli.StringFlag{Name: "with-value", Value: "", Usage: "previous value"},
- cli.IntFlag{Name: "with-index", Value: 0, Usage: "previous index"},
- },
- Action: func(c *cli.Context) error {
- rmCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// rmCommandFunc executes the "rm" command.
-func rmCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
- key := c.Args()[0]
- recursive := c.Bool("recursive")
- dir := c.Bool("dir")
- prevValue := c.String("with-value")
- prevIndex := c.Int("with-index")
-
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Delete(ctx, key, &client.DeleteOptions{PrevIndex: uint64(prevIndex), PrevValue: prevValue, Dir: dir, Recursive: recursive})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
- if !resp.Node.Dir || c.GlobalString("output") != "simple" {
- printResponseKey(resp, c.GlobalString("output"))
- }
-}
diff --git a/etcdctl/ctlv2/command/rmdir_command.go b/etcdctl/ctlv2/command/rmdir_command.go
deleted file mode 100644
index c7f5c8edd69..00000000000
--- a/etcdctl/ctlv2/command/rmdir_command.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewRemoveDirCommand returns the CLI command for "rmdir".
-func NewRemoveDirCommand() cli.Command {
- return cli.Command{
- Name: "rmdir",
- Usage: "removes the key if it is an empty directory or a key-value pair",
- ArgsUsage: "",
- Action: func(c *cli.Context) error {
- rmdirCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// rmdirCommandFunc executes the "rmdir" command.
-func rmdirCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
- key := c.Args()[0]
-
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Delete(ctx, key, &client.DeleteOptions{Dir: true})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- if !resp.Node.Dir || c.GlobalString("output") != "simple" {
- printResponseKey(resp, c.GlobalString("output"))
- }
-}
diff --git a/etcdctl/ctlv2/command/role_commands.go b/etcdctl/ctlv2/command/role_commands.go
deleted file mode 100644
index 498739c4583..00000000000
--- a/etcdctl/ctlv2/command/role_commands.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "os"
- "reflect"
- "strings"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/pkg/v3/pathutil"
- "go.etcd.io/etcd/client/v2"
-)
-
-func NewRoleCommands() cli.Command {
- return cli.Command{
- Name: "role",
- Usage: "role add, grant and revoke subcommands",
- Subcommands: []cli.Command{
- {
- Name: "add",
- Usage: "add a new role for the etcd cluster",
- ArgsUsage: " ",
- Action: actionRoleAdd,
- },
- {
- Name: "get",
- Usage: "get details for a role",
- ArgsUsage: "",
- Action: actionRoleGet,
- },
- {
- Name: "list",
- Usage: "list all roles",
- ArgsUsage: " ",
- Action: actionRoleList,
- },
- {
- Name: "remove",
- Usage: "remove a role from the etcd cluster",
- ArgsUsage: "",
- Action: actionRoleRemove,
- },
- {
- Name: "grant",
- Usage: "grant path matches to an etcd role",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.StringFlag{Name: "path", Value: "", Usage: "Path granted for the role to access"},
- cli.BoolFlag{Name: "read", Usage: "Grant read-only access"},
- cli.BoolFlag{Name: "write", Usage: "Grant write-only access"},
- cli.BoolFlag{Name: "readwrite, rw", Usage: "Grant read-write access"},
- },
- Action: actionRoleGrant,
- },
- {
- Name: "revoke",
- Usage: "revoke path matches for an etcd role",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.StringFlag{Name: "path", Value: "", Usage: "Path revoked for the role to access"},
- cli.BoolFlag{Name: "read", Usage: "Revoke read access"},
- cli.BoolFlag{Name: "write", Usage: "Revoke write access"},
- cli.BoolFlag{Name: "readwrite, rw", Usage: "Revoke read-write access"},
- },
- Action: actionRoleRevoke,
- },
- },
- }
-}
-
-func mustNewAuthRoleAPI(c *cli.Context) client.AuthRoleAPI {
- hc := mustNewClient(c)
-
- if c.GlobalBool("debug") {
- fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
- }
-
- return client.NewAuthRoleAPI(hc)
-}
-
-func actionRoleList(c *cli.Context) error {
- if len(c.Args()) != 0 {
- fmt.Fprintln(os.Stderr, "No arguments accepted")
- os.Exit(1)
- }
- r := mustNewAuthRoleAPI(c)
- ctx, cancel := contextWithTotalTimeout(c)
- roles, err := r.ListRoles(ctx)
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- for _, role := range roles {
- fmt.Printf("%s\n", role)
- }
-
- return nil
-}
-
-func actionRoleAdd(c *cli.Context) error {
- api, role := mustRoleAPIAndName(c)
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
- currentRole, _ := api.GetRole(ctx, role)
- if currentRole != nil {
- fmt.Fprintf(os.Stderr, "Role %s already exists\n", role)
- os.Exit(1)
- }
-
- err := api.AddRole(ctx, role)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("Role %s created\n", role)
- return nil
-}
-
-func actionRoleRemove(c *cli.Context) error {
- api, role := mustRoleAPIAndName(c)
- ctx, cancel := contextWithTotalTimeout(c)
- err := api.RemoveRole(ctx, role)
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("Role %s removed\n", role)
- return nil
-}
-
-func actionRoleGrant(c *cli.Context) error {
- roleGrantRevoke(c, true)
- return nil
-}
-
-func actionRoleRevoke(c *cli.Context) error {
- roleGrantRevoke(c, false)
- return nil
-}
-
-func roleGrantRevoke(c *cli.Context, grant bool) {
- path := c.String("path")
- if path == "" {
- fmt.Fprintln(os.Stderr, "No path specified; please use `--path`")
- os.Exit(1)
- }
- if pathutil.CanonicalURLPath(path) != path {
- fmt.Fprintf(os.Stderr, "Not canonical path; please use `--path=%s`\n", pathutil.CanonicalURLPath(path))
- os.Exit(1)
- }
-
- read := c.Bool("read")
- write := c.Bool("write")
- rw := c.Bool("readwrite")
- permcount := 0
- for _, v := range []bool{read, write, rw} {
- if v {
- permcount++
- }
- }
- if permcount != 1 {
- fmt.Fprintln(os.Stderr, "Please specify exactly one of --read, --write or --readwrite")
- os.Exit(1)
- }
- var permType client.PermissionType
- switch {
- case read:
- permType = client.ReadPermission
- case write:
- permType = client.WritePermission
- case rw:
- permType = client.ReadWritePermission
- }
-
- api, role := mustRoleAPIAndName(c)
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
- currentRole, err := api.GetRole(ctx, role)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
- var newRole *client.Role
- if grant {
- newRole, err = api.GrantRoleKV(ctx, role, []string{path}, permType)
- } else {
- newRole, err = api.RevokeRoleKV(ctx, role, []string{path}, permType)
- }
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
- if reflect.DeepEqual(newRole, currentRole) {
- if grant {
- fmt.Printf("Role unchanged; already granted")
- } else {
- fmt.Printf("Role unchanged; already revoked")
- }
- }
-
- fmt.Printf("Role %s updated\n", role)
-}
-
-func actionRoleGet(c *cli.Context) error {
- api, rolename := mustRoleAPIAndName(c)
-
- ctx, cancel := contextWithTotalTimeout(c)
- role, err := api.GetRole(ctx, rolename)
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
- fmt.Printf("Role: %s\n", role.Role)
- fmt.Printf("KV Read:\n")
- for _, v := range role.Permissions.KV.Read {
- fmt.Printf("\t%s\n", v)
- }
- fmt.Printf("KV Write:\n")
- for _, v := range role.Permissions.KV.Write {
- fmt.Printf("\t%s\n", v)
- }
- return nil
-}
-
-func mustRoleAPIAndName(c *cli.Context) (client.AuthRoleAPI, string) {
- args := c.Args()
- if len(args) != 1 {
- fmt.Fprintln(os.Stderr, "Please provide a role name")
- os.Exit(1)
- }
-
- name := args[0]
- api := mustNewAuthRoleAPI(c)
- return api, name
-}
diff --git a/etcdctl/ctlv2/command/set_command.go b/etcdctl/ctlv2/command/set_command.go
deleted file mode 100644
index 372c5b96cac..00000000000
--- a/etcdctl/ctlv2/command/set_command.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
- "os"
- "time"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewSetCommand returns the CLI command for "set".
-func NewSetCommand() cli.Command {
- return cli.Command{
- Name: "set",
- Usage: "set the value of a key",
- ArgsUsage: " ",
- Description: `Set sets the value of a key.
-
- When begins with '-', is interpreted as a flag.
- Insert '--' for workaround:
-
- $ set -- `,
- Flags: []cli.Flag{
- cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"},
- cli.StringFlag{Name: "swap-with-value", Value: "", Usage: "previous value"},
- cli.IntFlag{Name: "swap-with-index", Value: 0, Usage: "previous index"},
- },
- Action: func(c *cli.Context) error {
- setCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// setCommandFunc executes the "set" command.
-func setCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
- key := c.Args()[0]
- value, err := argOrStdin(c.Args(), os.Stdin, 1)
- if err != nil {
- handleError(c, cobrautl.ExitBadArgs, errors.New("value required"))
- }
-
- ttl := c.Int("ttl")
- prevValue := c.String("swap-with-value")
- prevIndex := c.Int("swap-with-index")
-
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevIndex: uint64(prevIndex), PrevValue: prevValue})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- printResponseKey(resp, c.GlobalString("output"))
-}
diff --git a/etcdctl/ctlv2/command/set_dir_command.go b/etcdctl/ctlv2/command/set_dir_command.go
deleted file mode 100644
index ed85ad57aa5..00000000000
--- a/etcdctl/ctlv2/command/set_dir_command.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
-)
-
-// NewSetDirCommand returns the CLI command for "setDir".
-func NewSetDirCommand() cli.Command {
- return cli.Command{
- Name: "setdir",
- Usage: "create a new directory or update an existing directory TTL",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"},
- },
- Action: func(c *cli.Context) error {
- mkdirCommandFunc(c, mustNewKeyAPI(c), client.PrevIgnore)
- return nil
- },
- }
-}
diff --git a/etcdctl/ctlv2/command/update_command.go b/etcdctl/ctlv2/command/update_command.go
deleted file mode 100644
index 71cd6ce662d..00000000000
--- a/etcdctl/ctlv2/command/update_command.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
- "os"
- "time"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewUpdateCommand returns the CLI command for "update".
-func NewUpdateCommand() cli.Command {
- return cli.Command{
- Name: "update",
- Usage: "update an existing key with a given value",
- ArgsUsage: " ",
- Flags: []cli.Flag{
- cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"},
- },
- Action: func(c *cli.Context) error {
- updateCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// updateCommandFunc executes the "update" command.
-func updateCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
- key := c.Args()[0]
- value, err := argOrStdin(c.Args(), os.Stdin, 1)
- if err != nil {
- handleError(c, cobrautl.ExitBadArgs, errors.New("value required"))
- }
-
- ttl := c.Int("ttl")
-
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: client.PrevExist})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
-
- printResponseKey(resp, c.GlobalString("output"))
-}
diff --git a/etcdctl/ctlv2/command/update_dir_command.go b/etcdctl/ctlv2/command/update_dir_command.go
deleted file mode 100644
index e1fd61777b6..00000000000
--- a/etcdctl/ctlv2/command/update_dir_command.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
- "time"
-
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewUpdateDirCommand returns the CLI command for "updatedir".
-func NewUpdateDirCommand() cli.Command {
- return cli.Command{
- Name: "updatedir",
- Usage: "update an existing directory",
- ArgsUsage: " ",
- Flags: []cli.Flag{
- cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"},
- },
- Action: func(c *cli.Context) error {
- updatedirCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// updatedirCommandFunc executes the "updatedir" command.
-func updatedirCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
- key := c.Args()[0]
- ttl := c.Int("ttl")
- ctx, cancel := contextWithTotalTimeout(c)
- resp, err := ki.Set(ctx, key, "", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: client.PrevExist})
- cancel()
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
- if c.GlobalString("output") != "simple" {
- printResponseKey(resp, c.GlobalString("output"))
- }
-}
diff --git a/etcdctl/ctlv2/command/user_commands.go b/etcdctl/ctlv2/command/user_commands.go
deleted file mode 100644
index 5da11753bfb..00000000000
--- a/etcdctl/ctlv2/command/user_commands.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/bgentry/speakeasy"
- "github.com/urfave/cli"
- "go.etcd.io/etcd/client/v2"
-)
-
-func NewUserCommands() cli.Command {
- return cli.Command{
- Name: "user",
- Usage: "user add, grant and revoke subcommands",
- Subcommands: []cli.Command{
- {
- Name: "add",
- Usage: "add a new user for the etcd cluster",
- ArgsUsage: "",
- Action: actionUserAdd,
- },
- {
- Name: "get",
- Usage: "get details for a user",
- ArgsUsage: "",
- Action: actionUserGet,
- },
- {
- Name: "list",
- Usage: "list all current users",
- ArgsUsage: "",
- Action: actionUserList,
- },
- {
- Name: "remove",
- Usage: "remove a user for the etcd cluster",
- ArgsUsage: "",
- Action: actionUserRemove,
- },
- {
- Name: "grant",
- Usage: "grant roles to an etcd user",
- ArgsUsage: "",
- Flags: []cli.Flag{cli.StringSliceFlag{Name: "roles", Value: new(cli.StringSlice), Usage: "List of roles to grant or revoke"}},
- Action: actionUserGrant,
- },
- {
- Name: "revoke",
- Usage: "revoke roles for an etcd user",
- ArgsUsage: "",
- Flags: []cli.Flag{cli.StringSliceFlag{Name: "roles", Value: new(cli.StringSlice), Usage: "List of roles to grant or revoke"}},
- Action: actionUserRevoke,
- },
- {
- Name: "passwd",
- Usage: "change password for a user",
- ArgsUsage: "",
- Action: actionUserPasswd,
- },
- },
- }
-}
-
-func mustNewAuthUserAPI(c *cli.Context) client.AuthUserAPI {
- hc := mustNewClient(c)
-
- if c.GlobalBool("debug") {
- fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
- }
-
- return client.NewAuthUserAPI(hc)
-}
-
-func actionUserList(c *cli.Context) error {
- if len(c.Args()) != 0 {
- fmt.Fprintln(os.Stderr, "No arguments accepted")
- os.Exit(1)
- }
- u := mustNewAuthUserAPI(c)
- ctx, cancel := contextWithTotalTimeout(c)
- users, err := u.ListUsers(ctx)
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- for _, user := range users {
- fmt.Printf("%s\n", user)
- }
- return nil
-}
-
-func actionUserAdd(c *cli.Context) error {
- api, userarg := mustUserAPIAndName(c)
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
- user, _, _ := getUsernamePassword("", userarg+":")
-
- _, pass, err := getUsernamePassword("New password: ", userarg)
- if err != nil {
- fmt.Fprintln(os.Stderr, "Error reading password:", err)
- os.Exit(1)
- }
- err = api.AddUser(ctx, user, pass)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("User %s created\n", user)
- return nil
-}
-
-func actionUserRemove(c *cli.Context) error {
- api, user := mustUserAPIAndName(c)
- ctx, cancel := contextWithTotalTimeout(c)
- err := api.RemoveUser(ctx, user)
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("User %s removed\n", user)
- return nil
-}
-
-func actionUserPasswd(c *cli.Context) error {
- api, user := mustUserAPIAndName(c)
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
- pass, err := speakeasy.Ask("New password: ")
- if err != nil {
- fmt.Fprintln(os.Stderr, "Error reading password:", err)
- os.Exit(1)
- }
-
- _, err = api.ChangePassword(ctx, user, pass)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("Password updated\n")
- return nil
-}
-
-func actionUserGrant(c *cli.Context) error {
- userGrantRevoke(c, true)
- return nil
-}
-
-func actionUserRevoke(c *cli.Context) error {
- userGrantRevoke(c, false)
- return nil
-}
-
-func userGrantRevoke(c *cli.Context, grant bool) {
- roles := c.StringSlice("roles")
- if len(roles) == 0 {
- fmt.Fprintln(os.Stderr, "No roles specified; please use `--roles`")
- os.Exit(1)
- }
-
- ctx, cancel := contextWithTotalTimeout(c)
- defer cancel()
-
- api, user := mustUserAPIAndName(c)
- var err error
- if grant {
- _, err = api.GrantUser(ctx, user, roles)
- } else {
- _, err = api.RevokeUser(ctx, user, roles)
- }
-
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- fmt.Printf("User %s updated\n", user)
-}
-
-func actionUserGet(c *cli.Context) error {
- api, username := mustUserAPIAndName(c)
- ctx, cancel := contextWithTotalTimeout(c)
- user, err := api.GetUser(ctx, username)
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
- fmt.Printf("User: %s\n", user.User)
- fmt.Printf("Roles: %s\n", strings.Join(user.Roles, " "))
- return nil
-}
-
-func mustUserAPIAndName(c *cli.Context) (client.AuthUserAPI, string) {
- args := c.Args()
- if len(args) != 1 {
- fmt.Fprintln(os.Stderr, "Please provide a username")
- os.Exit(1)
- }
-
- api := mustNewAuthUserAPI(c)
- username := args[0]
- return api, username
-}
diff --git a/etcdctl/ctlv2/command/util.go b/etcdctl/ctlv2/command/util.go
deleted file mode 100644
index b80486b7ec0..00000000000
--- a/etcdctl/ctlv2/command/util.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "strings"
- "syscall"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/bgentry/speakeasy"
- "github.com/urfave/cli"
-)
-
-var (
- ErrNoAvailSrc = errors.New("no available argument and stdin")
-
- // the maximum amount of time a dial will wait for a connection to setup.
- // 30s is long enough for most of the network conditions.
- defaultDialTimeout = 30 * time.Second
-)
-
-func argOrStdin(args []string, stdin io.Reader, i int) (string, error) {
- if i < len(args) {
- return args[i], nil
- }
- bytes, err := ioutil.ReadAll(stdin)
- if string(bytes) == "" || err != nil {
- return "", ErrNoAvailSrc
- }
- return string(bytes), nil
-}
-
-func getPeersFlagValue(c *cli.Context) []string {
- peerstr := c.GlobalString("endpoints")
-
- if peerstr == "" {
- peerstr = os.Getenv("ETCDCTL_ENDPOINTS")
- }
-
- if peerstr == "" {
- peerstr = c.GlobalString("endpoint")
- }
-
- if peerstr == "" {
- peerstr = os.Getenv("ETCDCTL_ENDPOINT")
- }
-
- if peerstr == "" {
- peerstr = c.GlobalString("peers")
- }
-
- if peerstr == "" {
- peerstr = os.Getenv("ETCDCTL_PEERS")
- }
-
- // If we still don't have peers, use a default
- if peerstr == "" {
- peerstr = "http://127.0.0.1:2379,http://127.0.0.1:4001"
- }
-
- return strings.Split(peerstr, ",")
-}
-
-func getDomainDiscoveryFlagValue(c *cli.Context) ([]string, error) {
- domainstr, insecure, serviceName := getDiscoveryDomain(c)
-
- // If we still don't have domain discovery, return nothing
- if domainstr == "" {
- return []string{}, nil
- }
-
- discoverer := client.NewSRVDiscover()
- eps, err := discoverer.Discover(domainstr, serviceName)
- if err != nil {
- return nil, err
- }
- if insecure {
- return eps, err
- }
- // strip insecure connections
- ret := []string{}
- for _, ep := range eps {
- if strings.HasPrefix(ep, "http://") {
- fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep)
- continue
- }
- ret = append(ret, ep)
- }
- return ret, err
-}
-
-func getDiscoveryDomain(c *cli.Context) (domainstr string, insecure bool, serviceName string) {
- domainstr = c.GlobalString("discovery-srv")
- // Use an environment variable if nothing was supplied on the
- // command line
- if domainstr == "" {
- domainstr = os.Getenv("ETCDCTL_DISCOVERY_SRV")
- }
- insecure = c.GlobalBool("insecure-discovery") || (os.Getenv("ETCDCTL_INSECURE_DISCOVERY") != "")
- serviceName = c.GlobalString("discovery-srv-name")
- if serviceName == "" {
- serviceName = os.Getenv("ETCDCTL_DISCOVERY_SRV_NAME")
- }
- return domainstr, insecure, serviceName
-}
-
-func getEndpoints(c *cli.Context) ([]string, error) {
- eps, err := getDomainDiscoveryFlagValue(c)
- if err != nil {
- return nil, err
- }
-
- // If domain discovery returns no endpoints, check peer flag
- if len(eps) == 0 {
- eps = getPeersFlagValue(c)
- }
-
- for i, ep := range eps {
- u, err := url.Parse(ep)
- if err != nil {
- return nil, err
- }
-
- if u.Scheme == "" {
- u.Scheme = "http"
- }
-
- eps[i] = u.String()
- }
-
- return eps, nil
-}
-
-func getTransport(c *cli.Context) (*http.Transport, error) {
- cafile := c.GlobalString("ca-file")
- certfile := c.GlobalString("cert-file")
- keyfile := c.GlobalString("key-file")
-
- // Use an environment variable if nothing was supplied on the
- // command line
- if cafile == "" {
- cafile = os.Getenv("ETCDCTL_CA_FILE")
- }
- if certfile == "" {
- certfile = os.Getenv("ETCDCTL_CERT_FILE")
- }
- if keyfile == "" {
- keyfile = os.Getenv("ETCDCTL_KEY_FILE")
- }
-
- discoveryDomain, insecure, _ := getDiscoveryDomain(c)
- if insecure {
- discoveryDomain = ""
- }
- tls := transport.TLSInfo{
- CertFile: certfile,
- KeyFile: keyfile,
- ServerName: discoveryDomain,
- TrustedCAFile: cafile,
- }
-
- dialTimeout := defaultDialTimeout
- totalTimeout := c.GlobalDuration("total-timeout")
- if totalTimeout != 0 && totalTimeout < dialTimeout {
- dialTimeout = totalTimeout
- }
- return transport.NewTransport(tls, dialTimeout)
-}
-
-func getUsernamePasswordFromFlag(usernameFlag string) (username string, password string, err error) {
- return getUsernamePassword("Password: ", usernameFlag)
-}
-
-func getUsernamePassword(prompt, usernameFlag string) (username string, password string, err error) {
- colon := strings.Index(usernameFlag, ":")
- if colon == -1 {
- username = usernameFlag
- // Prompt for the password.
- password, err = speakeasy.Ask(prompt)
- if err != nil {
- return "", "", err
- }
- } else {
- username = usernameFlag[:colon]
- password = usernameFlag[colon+1:]
- }
- return username, password, nil
-}
-
-func mustNewKeyAPI(c *cli.Context) client.KeysAPI {
- return client.NewKeysAPI(mustNewClient(c))
-}
-
-func mustNewMembersAPI(c *cli.Context) client.MembersAPI {
- return client.NewMembersAPI(mustNewClient(c))
-}
-
-func mustNewClient(c *cli.Context) client.Client {
- hc, err := newClient(c)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- debug := c.GlobalBool("debug")
- if debug {
- client.EnablecURLDebug()
- }
-
- if !c.GlobalBool("no-sync") {
- if debug {
- fmt.Fprintf(os.Stderr, "start to sync cluster using endpoints(%s)\n", strings.Join(hc.Endpoints(), ","))
- }
- ctx, cancel := contextWithTotalTimeout(c)
- err := hc.Sync(ctx)
- cancel()
- if err != nil {
- if err == client.ErrNoEndpoints {
- fmt.Fprintf(os.Stderr, "etcd cluster has no published client endpoints.\n")
- fmt.Fprintf(os.Stderr, "Try '--no-sync' if you want to access non-published client endpoints(%s).\n", strings.Join(hc.Endpoints(), ","))
- handleError(c, cobrautl.ExitServerError, err)
- }
- if isConnectionError(err) {
- handleError(c, cobrautl.ExitBadConnection, err)
- }
- }
- if debug {
- fmt.Fprintf(os.Stderr, "got endpoints(%s) after sync\n", strings.Join(hc.Endpoints(), ","))
- }
- }
-
- if debug {
- fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
- }
-
- return hc
-}
-
-func isConnectionError(err error) bool {
- switch t := err.(type) {
- case *client.ClusterError:
- for _, cerr := range t.Errors {
- if !isConnectionError(cerr) {
- return false
- }
- }
- return true
- case *net.OpError:
- if t.Op == "dial" || t.Op == "read" {
- return true
- }
- return isConnectionError(t.Err)
- case syscall.Errno:
- if t == syscall.ECONNREFUSED {
- return true
- }
- case net.Error:
- if t.Timeout() {
- return true
- }
- }
- return false
-}
-
-func mustNewClientNoSync(c *cli.Context) client.Client {
- hc, err := newClient(c)
- if err != nil {
- fmt.Fprintln(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- if c.GlobalBool("debug") {
- fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
- client.EnablecURLDebug()
- }
-
- return hc
-}
-
-func newClient(c *cli.Context) (client.Client, error) {
- eps, err := getEndpoints(c)
- if err != nil {
- return nil, err
- }
-
- tr, err := getTransport(c)
- if err != nil {
- return nil, err
- }
-
- cfg := client.Config{
- Transport: tr,
- Endpoints: eps,
- HeaderTimeoutPerRequest: c.GlobalDuration("timeout"),
- }
-
- uFlag := c.GlobalString("username")
-
- if uFlag == "" {
- uFlag = os.Getenv("ETCDCTL_USERNAME")
- }
-
- if uFlag != "" {
- username, password, err := getUsernamePasswordFromFlag(uFlag)
- if err != nil {
- return nil, err
- }
- cfg.Username = username
- cfg.Password = password
- }
-
- return client.New(cfg)
-}
-
-func contextWithTotalTimeout(c *cli.Context) (context.Context, context.CancelFunc) {
- return context.WithTimeout(context.Background(), c.GlobalDuration("total-timeout"))
-}
diff --git a/etcdctl/ctlv2/command/util_test.go b/etcdctl/ctlv2/command/util_test.go
deleted file mode 100644
index 7f9bed8dcd2..00000000000
--- a/etcdctl/ctlv2/command/util_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "bytes"
- "testing"
-)
-
-func TestArgOrStdin(t *testing.T) {
- tests := []struct {
- args []string
- stdin string
- i int
- w string
- we error
- }{
- {
- args: []string{
- "a",
- },
- stdin: "b",
- i: 0,
- w: "a",
- we: nil,
- },
- {
- args: []string{
- "a",
- },
- stdin: "b",
- i: 1,
- w: "b",
- we: nil,
- },
- {
- args: []string{
- "a",
- },
- stdin: "",
- i: 1,
- w: "",
- we: ErrNoAvailSrc,
- },
- }
-
- for i, tt := range tests {
- var b bytes.Buffer
- b.Write([]byte(tt.stdin))
- g, ge := argOrStdin(tt.args, &b, tt.i)
- if g != tt.w {
- t.Errorf("#%d: expect %v, not %v", i, tt.w, g)
- }
- if ge != tt.we {
- t.Errorf("#%d: expect %v, not %v", i, tt.we, ge)
- }
- }
-}
diff --git a/etcdctl/ctlv2/command/watch_command.go b/etcdctl/ctlv2/command/watch_command.go
deleted file mode 100644
index 3af28934216..00000000000
--- a/etcdctl/ctlv2/command/watch_command.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "context"
- "errors"
- "fmt"
- "os"
- "os/signal"
-
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/urfave/cli"
-)
-
-// NewWatchCommand returns the CLI command for "watch".
-func NewWatchCommand() cli.Command {
- return cli.Command{
- Name: "watch",
- Usage: "watch a key for changes",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "forever, f", Usage: "forever watch a key until CTRL+C"},
- cli.IntFlag{Name: "after-index", Value: 0, Usage: "watch after the given index"},
- cli.BoolFlag{Name: "recursive, r", Usage: "returns all values for key and child keys"},
- },
- Action: func(c *cli.Context) error {
- watchCommandFunc(c, mustNewKeyAPI(c))
- return nil
- },
- }
-}
-
-// watchCommandFunc executes the "watch" command.
-func watchCommandFunc(c *cli.Context, ki client.KeysAPI) {
- if len(c.Args()) == 0 {
- handleError(c, cobrautl.ExitBadArgs, errors.New("key required"))
- }
- key := c.Args()[0]
- recursive := c.Bool("recursive")
- forever := c.Bool("forever")
- index := c.Int("after-index")
-
- stop := false
- w := ki.Watcher(key, &client.WatcherOptions{AfterIndex: uint64(index), Recursive: recursive})
-
- sigch := make(chan os.Signal, 1)
- signal.Notify(sigch, os.Interrupt)
-
- go func() {
- <-sigch
- os.Exit(0)
- }()
-
- for !stop {
- resp, err := w.Next(context.TODO())
- if err != nil {
- handleError(c, cobrautl.ExitServerError, err)
- }
- if resp.Node.Dir {
- continue
- }
- if recursive {
- fmt.Printf("[%s] %s\n", resp.Action, resp.Node.Key)
- }
-
- printResponseKey(resp, c.GlobalString("output"))
-
- if !forever {
- stop = true
- }
- }
-}
diff --git a/etcdctl/ctlv2/ctl.go b/etcdctl/ctlv2/ctl.go
deleted file mode 100644
index 981da0d9f8a..00000000000
--- a/etcdctl/ctlv2/ctl.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ctlv2 contains the main entry point for the etcdctl for v2 API.
-package ctlv2
-
-import (
- "fmt"
- "os"
- "time"
-
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/etcdctl/v3/ctlv2/command"
-
- "github.com/urfave/cli"
-)
-
-func Start() error {
- app := cli.NewApp()
- app.Name = "etcdctl"
- app.Version = version.Version
- cli.VersionPrinter = func(c *cli.Context) {
- fmt.Fprintf(c.App.Writer, "etcdctl version: %v\n", c.App.Version)
- fmt.Fprintln(c.App.Writer, "API version: 2")
- }
- app.Usage = "A simple command line client for etcd."
-
- app.Flags = []cli.Flag{
- cli.BoolFlag{Name: "debug", Usage: "output cURL commands which can be used to reproduce the request"},
- cli.BoolFlag{Name: "no-sync", Usage: "don't synchronize cluster information before sending request"},
- cli.StringFlag{Name: "output, o", Value: "simple", Usage: "output response in the given format (`simple`, `extended` or `json`)"},
- cli.StringFlag{Name: "discovery-srv, D", Usage: "domain name to query for SRV records describing cluster endpoints"},
- cli.BoolFlag{Name: "insecure-discovery", Usage: "accept insecure SRV records describing cluster endpoints"},
- cli.StringFlag{Name: "peers, C", Value: "", Usage: "DEPRECATED - \"--endpoints\" should be used instead"},
- cli.StringFlag{Name: "endpoint", Value: "", Usage: "DEPRECATED - \"--endpoints\" should be used instead"},
- cli.StringFlag{Name: "endpoints", Value: "", Usage: "a comma-delimited list of machine addresses in the cluster (default: \"http://127.0.0.1:2379,http://127.0.0.1:4001\")"},
- cli.StringFlag{Name: "cert-file", Value: "", Usage: "identify HTTPS client using this SSL certificate file"},
- cli.StringFlag{Name: "key-file", Value: "", Usage: "identify HTTPS client using this SSL key file"},
- cli.StringFlag{Name: "ca-file", Value: "", Usage: "verify certificates of HTTPS-enabled servers using this CA bundle"},
- cli.StringFlag{Name: "username, u", Value: "", Usage: "provide username[:password] and prompt if password is not supplied."},
- cli.DurationFlag{Name: "timeout", Value: 2 * time.Second, Usage: "connection timeout per request"},
- cli.DurationFlag{Name: "total-timeout", Value: 5 * time.Second, Usage: "timeout for the command execution (except watch)"},
- }
- app.Commands = []cli.Command{
- command.NewBackupCommand(),
- command.NewClusterHealthCommand(),
- command.NewMakeCommand(),
- command.NewMakeDirCommand(),
- command.NewRemoveCommand(),
- command.NewRemoveDirCommand(),
- command.NewGetCommand(),
- command.NewLsCommand(),
- command.NewSetCommand(),
- command.NewSetDirCommand(),
- command.NewUpdateCommand(),
- command.NewUpdateDirCommand(),
- command.NewWatchCommand(),
- command.NewExecWatchCommand(),
- command.NewMemberCommand(),
- command.NewUserCommands(),
- command.NewRoleCommands(),
- command.NewAuthCommands(),
- }
- return app.Run(os.Args)
-}
-
-func MustStart() {
- err := Start()
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-}
diff --git a/etcdctl/ctlv3/command/alarm_command.go b/etcdctl/ctlv3/command/alarm_command.go
index 556ae11e3d0..679f9d98f27 100644
--- a/etcdctl/ctlv3/command/alarm_command.go
+++ b/etcdctl/ctlv3/command/alarm_command.go
@@ -18,6 +18,7 @@ import (
"fmt"
"github.com/spf13/cobra"
+
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
diff --git a/etcdctl/ctlv3/command/auth_command.go b/etcdctl/ctlv3/command/auth_command.go
index e163dc68198..13ccda44d16 100644
--- a/etcdctl/ctlv3/command/auth_command.go
+++ b/etcdctl/ctlv3/command/auth_command.go
@@ -15,9 +15,11 @@
package command
import (
+ "errors"
"fmt"
"github.com/spf13/cobra"
+
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
@@ -81,7 +83,7 @@ func authEnableCommandFunc(cmd *cobra.Command, args []string) {
if _, err = cli.AuthEnable(ctx); err == nil {
break
}
- if err == rpctypes.ErrRootRoleNotExist {
+ if errors.Is(err, rpctypes.ErrRootRoleNotExist) {
if _, err = cli.RoleAdd(ctx, "root"); err != nil {
break
}
diff --git a/etcdctl/ctlv3/command/check.go b/etcdctl/ctlv3/command/check.go
index a2a5ca31593..3678f32c0aa 100644
--- a/etcdctl/ctlv3/command/check.go
+++ b/etcdctl/ctlv3/command/check.go
@@ -26,13 +26,13 @@ import (
"sync"
"time"
+ "github.com/cheggaaa/pb/v3"
+ "github.com/spf13/cobra"
+ "golang.org/x/time/rate"
+
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/pkg/v3/report"
-
- "github.com/spf13/cobra"
- "golang.org/x/time/rate"
- "gopkg.in/cheggaaa/pb.v1"
)
var (
@@ -126,17 +126,20 @@ func NewCheckPerfCommand() *cobra.Command {
}
// TODO: support customized configuration
- cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "The performance check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge)")
+ cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "The performance check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge). Different workload models use different configurations in terms of number of clients and expected throughput.")
cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "The prefix for writing the performance check's keys.")
cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.")
cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.")
+ cmd.RegisterFlagCompletionFunc("load", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"small", "medium", "large", "xLarge"}, cobra.ShellCompDirectiveDefault
+ })
return cmd
}
// newCheckPerfCommand executes the "check perf" command.
func newCheckPerfCommand(cmd *cobra.Command, args []string) {
- var checkPerfAlias = map[string]string{
+ checkPerfAlias := map[string]string{
"s": "s", "small": "s",
"m": "m", "medium": "m",
"l": "l", "large": "l",
@@ -155,7 +158,7 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
cc := clientConfigFromCmd(cmd)
clients := make([]*v3.Client, cfg.clients)
for i := 0; i < cfg.clients; i++ {
- clients[i] = cc.mustClient()
+ clients[i] = mustClient(cc)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second)
@@ -177,7 +180,6 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
k, v := make([]byte, ksize), string(make([]byte, vsize))
bar := pb.New(cfg.duration)
- bar.Format("Bom !")
bar.Start()
r := report.NewReport("%4.4f")
@@ -310,7 +312,7 @@ func NewCheckDatascaleCommand() *cobra.Command {
// newCheckDatascaleCommand executes the "check datascale" command.
func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
- var checkDatascaleAlias = map[string]string{
+ checkDatascaleAlias := map[string]string{
"s": "s", "small": "s",
"m": "m", "medium": "m",
"l": "l", "large": "l",
@@ -328,7 +330,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
cc := clientConfigFromCmd(cmd)
clients := make([]*v3.Client, cfg.clients)
for i := 0; i < cfg.clients; i++ {
- clients[i] = cc.mustClient()
+ clients[i] = mustClient(cc)
}
// get endpoints
@@ -363,9 +365,8 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
os.Exit(cobrautl.ExitError)
}
- fmt.Println(fmt.Sprintf("Start data scale check for work load [%v key-value pairs, %v bytes per key-value, %v concurrent clients].", cfg.limit, cfg.kvSize, cfg.clients))
+ fmt.Printf("Start data scale check for work load [%v key-value pairs, %v bytes per key-value, %v concurrent clients].\n", cfg.limit, cfg.kvSize, cfg.clients)
bar := pb.New(cfg.limit)
- bar.Format("Bom !")
bar.Start()
for i := range clients {
@@ -434,6 +435,6 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
}
os.Exit(cobrautl.ExitError)
} else {
- fmt.Println(fmt.Sprintf("PASS: Approximate system memory used : %v MB.", strconv.FormatFloat(mbUsed, 'f', 2, 64)))
+ fmt.Printf("PASS: Approximate system memory used : %v MB.\n", strconv.FormatFloat(mbUsed, 'f', 2, 64))
}
}
diff --git a/etcdctl/ctlv3/command/compaction_command.go b/etcdctl/ctlv3/command/compaction_command.go
index b0b7c3118f3..5c0bb1019a9 100644
--- a/etcdctl/ctlv3/command/compaction_command.go
+++ b/etcdctl/ctlv3/command/compaction_command.go
@@ -19,7 +19,8 @@ import (
"strconv"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
diff --git a/etcdctl/ctlv3/command/completion_command.go b/etcdctl/ctlv3/command/completion_command.go
new file mode 100644
index 00000000000..cb98d4a7d69
--- /dev/null
+++ b/etcdctl/ctlv3/command/completion_command.go
@@ -0,0 +1,84 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+)
+
+func NewCompletionCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "completion [bash|zsh|fish|powershell]",
+ Short: "Generate completion script",
+ Long: `To load completions:
+
+Bash:
+
+ $ source <(etcdctl completion bash)
+
+ # To load completions for each session, execute once:
+ # Linux:
+ $ etcdctl completion bash > /etc/bash_completion.d/etcdctl
+ # macOS:
+ $ etcdctl completion bash > /usr/local/etc/bash_completion.d/etcdctl
+
+Zsh:
+
+ # If shell completion is not already enabled in your environment,
+ # you will need to enable it. You can execute the following once:
+
+ $ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+ # To load completions for each session, execute once:
+ $ etcdctl completion zsh > "${fpath[1]}/_etcdctl"
+
+ # You will need to start a new shell for this setup to take effect.
+
+fish:
+
+ $ etcdctl completion fish | source
+
+ # To load completions for each session, execute once:
+ $ etcdctl completion fish > ~/.config/fish/completions/etcdctl.fish
+
+PowerShell:
+
+ PS> etcdctl completion powershell | Out-String | Invoke-Expression
+
+ # To load completions for every new session, run:
+ PS> etcdctl completion powershell > etcdctl.ps1
+ # and source this file from your PowerShell profile.
+`,
+ DisableFlagsInUseLine: true,
+ ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
+ Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
+ Run: func(cmd *cobra.Command, args []string) {
+ switch args[0] {
+ case "bash":
+ cmd.Root().GenBashCompletion(os.Stdout)
+ case "zsh":
+ cmd.Root().GenZshCompletion(os.Stdout)
+ case "fish":
+ cmd.Root().GenFishCompletion(os.Stdout, true)
+ case "powershell":
+ cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
+ }
+ },
+ }
+
+ return cmd
+}
diff --git a/etcdctl/ctlv3/command/defrag_command.go b/etcdctl/ctlv3/command/defrag_command.go
index 42e47cbb905..36116d4888c 100644
--- a/etcdctl/ctlv3/command/defrag_command.go
+++ b/etcdctl/ctlv3/command/defrag_command.go
@@ -17,14 +17,11 @@ package command
import (
"fmt"
"os"
+ "time"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/etcdutl/v3/etcdutl"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-var (
- defragDataDir string
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
)
// NewDefragCommand returns the cobra command for "Defrag".
@@ -35,31 +32,27 @@ func NewDefragCommand() *cobra.Command {
Run: defragCommandFunc,
}
cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list")
- cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Optional. If present, defragments a data directory not in use by etcd.")
return cmd
}
func defragCommandFunc(cmd *cobra.Command, args []string) {
- if len(defragDataDir) > 0 {
- fmt.Fprintf(os.Stderr, "Use `etcdutl defrag` instead. The --data-dir is going to be decomissioned in v3.6.\n\n")
- err := etcdutl.DefragData(defragDataDir)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- }
-
failures := 0
- c := mustClientFromCmd(cmd)
+ cfg := clientConfigFromCmd(cmd)
for _, ep := range endpointsFromCluster(cmd) {
+ cfg.Endpoints = []string{ep}
+ c := mustClient(cfg)
ctx, cancel := commandCtx(cmd)
+ start := time.Now()
_, err := c.Defragment(ctx, ep)
+ d := time.Since(start)
cancel()
if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to defragment etcd member[%s] (%v)\n", ep, err)
+ fmt.Fprintf(os.Stderr, "Failed to defragment etcd member[%s]. took %s. (%v)\n", ep, d.String(), err)
failures++
} else {
- fmt.Printf("Finished defragmenting etcd member[%s]\n", ep)
+ fmt.Printf("Finished defragmenting etcd member[%s]. took %s\n", ep, d.String())
}
+ c.Close()
}
if failures != 0 {
diff --git a/etcdctl/ctlv3/command/del_command.go b/etcdctl/ctlv3/command/del_command.go
index dbfd186dbad..51b7abb3edf 100644
--- a/etcdctl/ctlv3/command/del_command.go
+++ b/etcdctl/ctlv3/command/del_command.go
@@ -16,9 +16,12 @@ package command
import (
"fmt"
+ "os"
+ "time"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
@@ -26,6 +29,7 @@ var (
delPrefix bool
delPrevKV bool
delFromKey bool
+ delRange bool
)
// NewDelCommand returns the cobra command for "del".
@@ -39,6 +43,7 @@ func NewDelCommand() *cobra.Command {
cmd.Flags().BoolVar(&delPrefix, "prefix", false, "delete keys with matching prefix")
cmd.Flags().BoolVar(&delPrevKV, "prev-kv", false, "return deleted key-value pairs")
cmd.Flags().BoolVar(&delFromKey, "from-key", false, "delete keys that are greater than or equal to the given key using byte compare")
+ cmd.Flags().BoolVar(&delRange, "range", false, "delete range of keys")
return cmd
}
@@ -63,13 +68,18 @@ func getDelOp(args []string) (string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
}
- opts := []clientv3.OpOption{}
+ var opts []clientv3.OpOption
key := args[0]
if len(args) > 1 {
if delPrefix || delFromKey {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set"))
}
opts = append(opts, clientv3.WithRange(args[1]))
+ if !delRange {
+ fmt.Fprintf(os.Stderr, "Warning: Keys between %q and %q will be deleted. Please interrupt the command within next 2 seconds to cancel. "+
+ "You can provide `--range` flag to avoid the delay.\n", args[0], args[1])
+ time.Sleep(2 * time.Second)
+ }
}
if delPrefix {
diff --git a/etcdctl/ctlv3/command/downgrade_command.go b/etcdctl/ctlv3/command/downgrade_command.go
new file mode 100644
index 00000000000..8b6ab9cd19e
--- /dev/null
+++ b/etcdctl/ctlv3/command/downgrade_command.go
@@ -0,0 +1,137 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "errors"
+
+ "github.com/spf13/cobra"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
+)
+
+// NewDowngradeCommand returns the cobra command for "downgrade".
+func NewDowngradeCommand() *cobra.Command {
+ dc := &cobra.Command{
+ Use: "downgrade ",
+ Short: "Downgrade related commands",
+ }
+
+ dc.AddCommand(NewDowngradeValidateCommand())
+ dc.AddCommand(NewDowngradeEnableCommand())
+ dc.AddCommand(NewDowngradeCancelCommand())
+
+ return dc
+}
+
+// NewDowngradeValidateCommand returns the cobra command for "downgrade validate".
+func NewDowngradeValidateCommand() *cobra.Command {
+ cc := &cobra.Command{
+ Use: "validate ",
+ Short: "Validate downgrade capability before starting downgrade",
+
+ Run: downgradeValidateCommandFunc,
+ }
+ return cc
+}
+
+// NewDowngradeEnableCommand returns the cobra command for "downgrade enable".
+func NewDowngradeEnableCommand() *cobra.Command {
+ cc := &cobra.Command{
+ Use: "enable ",
+ Short: "Start a downgrade action to cluster",
+
+ Run: downgradeEnableCommandFunc,
+ }
+ return cc
+}
+
+// NewDowngradeCancelCommand returns the cobra command for "downgrade cancel".
+func NewDowngradeCancelCommand() *cobra.Command {
+ cc := &cobra.Command{
+ Use: "cancel",
+ Short: "Cancel the ongoing downgrade action to cluster",
+
+ Run: downgradeCancelCommandFunc,
+ }
+ return cc
+}
+
+// downgradeValidateCommandFunc executes the "downgrade validate" command.
+func downgradeValidateCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) < 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided"))
+ }
+ if len(args) > 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments"))
+ }
+ targetVersion := args[0]
+
+ if len(targetVersion) == 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided"))
+ }
+
+ ctx, cancel := commandCtx(cmd)
+ cli := mustClientFromCmd(cmd)
+
+ resp, err := cli.Downgrade(ctx, clientv3.DowngradeValidate, targetVersion)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.DowngradeValidate(*resp)
+}
+
+// downgradeEnableCommandFunc executes the "downgrade enable" command.
+func downgradeEnableCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) < 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided"))
+ }
+ if len(args) > 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments"))
+ }
+ targetVersion := args[0]
+
+ if len(targetVersion) == 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided"))
+ }
+
+ ctx, cancel := commandCtx(cmd)
+ cli := mustClientFromCmd(cmd)
+
+ resp, err := cli.Downgrade(ctx, clientv3.DowngradeEnable, targetVersion)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.DowngradeEnable(*resp)
+}
+
+// downgradeCancelCommandFunc executes the "downgrade cancel" command.
+func downgradeCancelCommandFunc(cmd *cobra.Command, args []string) {
+ ctx, cancel := commandCtx(cmd)
+ cli := mustClientFromCmd(cmd)
+
+ resp, err := cli.Downgrade(ctx, clientv3.DowngradeCancel, "")
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.DowngradeCancel(*resp)
+}
diff --git a/etcdctl/ctlv3/command/elect_command.go b/etcdctl/ctlv3/command/elect_command.go
index ae412f99e17..eee4289e710 100644
--- a/etcdctl/ctlv3/command/elect_command.go
+++ b/etcdctl/ctlv3/command/elect_command.go
@@ -21,16 +21,14 @@ import (
"os/signal"
"syscall"
- "go.etcd.io/etcd/client/v3"
+ "github.com/spf13/cobra"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/spf13/cobra"
)
-var (
- electListen bool
-)
+var electListen bool
// NewElectCommand returns the cobra command for "elect".
func NewElectCommand() *cobra.Command {
diff --git a/etcdctl/ctlv3/command/ep_command.go b/etcdctl/ctlv3/command/ep_command.go
index 9e4aad04d8f..65df3dff372 100644
--- a/etcdctl/ctlv3/command/ep_command.go
+++ b/etcdctl/ctlv3/command/ep_command.go
@@ -15,23 +15,27 @@
package command
import (
+ "errors"
"fmt"
"os"
"sync"
"time"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- v3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/pkg/v3/flags"
-
- "github.com/spf13/cobra"
- "go.uber.org/zap"
)
-var epClusterEndpoints bool
-var epHashKVRev int64
+var (
+ epClusterEndpoints bool
+ epHashKVRev int64
+)
// NewEndpointCommand returns the cobra command for "endpoint".
func NewEndpointCommand() *cobra.Command {
@@ -75,7 +79,7 @@ func newEpHashKVCommand() *cobra.Command {
Short: "Prints the KV history hash for each endpoint in --endpoints",
Run: epHashKVCommandFunc,
}
- hc.PersistentFlags().Int64Var(&epHashKVRev, "rev", 0, "maximum revision to hash (default: all revisions)")
+ hc.PersistentFlags().Int64Var(&epHashKVRev, "rev", 0, "maximum revision to hash (default: latest revision)")
return hc
}
@@ -88,21 +92,20 @@ type epHealth struct {
// epHealthCommandFunc executes the "endpoint-health" command.
func epHealthCommandFunc(cmd *cobra.Command, args []string) {
- lg, err := zap.NewProduction()
+ lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
flags.SetPflagsFromEnv(lg, "ETCDCTL", cmd.InheritedFlags())
initDisplayFromCmd(cmd)
- sec := secureCfgFromCmd(cmd)
- dt := dialTimeoutFromCmd(cmd)
- ka := keepAliveTimeFromCmd(cmd)
- kat := keepAliveTimeoutFromCmd(cmd)
- auth := authCfgFromCmd(cmd)
- cfgs := []*v3.Config{}
+ cfgSpec := clientConfigFromCmd(cmd)
+
+ var cfgs []*clientv3.Config
for _, ep := range endpointsFromCluster(cmd) {
- cfg, err := newClientCfg([]string{ep}, dt, ka, kat, sec, auth)
+ cloneCfgSpec := cfgSpec.Clone()
+ cloneCfgSpec.Endpoints = []string{ep}
+ cfg, err := clientv3.NewClientConfig(cloneCfgSpec, lg)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
@@ -113,11 +116,11 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
hch := make(chan epHealth, len(cfgs))
for _, cfg := range cfgs {
wg.Add(1)
- go func(cfg *v3.Config) {
+ go func(cfg *clientv3.Config) {
defer wg.Done()
ep := cfg.Endpoints[0]
cfg.Logger = lg.Named("client")
- cli, err := v3.New(*cfg)
+ cli, err := clientv3.New(*cfg)
if err != nil {
hch <- epHealth{Ep: ep, Health: false, Error: err.Error()}
return
@@ -129,7 +132,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
_, err = cli.Get(ctx, "health")
eh := epHealth{Ep: ep, Health: false, Took: time.Since(st).String()}
// permission denied is OK since proposal goes through consensus to get it
- if err == nil || err == rpctypes.ErrPermissionDenied {
+ if err == nil || errors.Is(err, rpctypes.ErrPermissionDenied) {
eh.Health = true
} else {
eh.Error = err.Error()
@@ -164,7 +167,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
close(hch)
errs := false
- healthList := []epHealth{}
+ var healthList []epHealth
for h := range hch {
healthList = append(healthList, h)
if h.Error != "" {
@@ -178,19 +181,22 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
}
type epStatus struct {
- Ep string `json:"Endpoint"`
- Resp *v3.StatusResponse `json:"Status"`
+ Ep string `json:"Endpoint"`
+ Resp *clientv3.StatusResponse `json:"Status"`
}
func epStatusCommandFunc(cmd *cobra.Command, args []string) {
- c := mustClientFromCmd(cmd)
+ cfg := clientConfigFromCmd(cmd)
- statusList := []epStatus{}
+ var statusList []epStatus
var err error
for _, ep := range endpointsFromCluster(cmd) {
+ cfg.Endpoints = []string{ep}
+ c := mustClient(cfg)
ctx, cancel := commandCtx(cmd)
resp, serr := c.Status(ctx, ep)
cancel()
+ c.Close()
if serr != nil {
err = serr
fmt.Fprintf(os.Stderr, "Failed to get the status of endpoint %s (%v)\n", ep, serr)
@@ -207,19 +213,22 @@ func epStatusCommandFunc(cmd *cobra.Command, args []string) {
}
type epHashKV struct {
- Ep string `json:"Endpoint"`
- Resp *v3.HashKVResponse `json:"HashKV"`
+ Ep string `json:"Endpoint"`
+ Resp *clientv3.HashKVResponse `json:"HashKV"`
}
func epHashKVCommandFunc(cmd *cobra.Command, args []string) {
- c := mustClientFromCmd(cmd)
+ cfg := clientConfigFromCmd(cmd)
- hashList := []epHashKV{}
+ var hashList []epHashKV
var err error
for _, ep := range endpointsFromCluster(cmd) {
+ cfg.Endpoints = []string{ep}
+ c := mustClient(cfg)
ctx, cancel := commandCtx(cmd)
resp, serr := c.HashKV(ctx, ep, epHashKVRev)
cancel()
+ c.Close()
if serr != nil {
err = serr
fmt.Fprintf(os.Stderr, "Failed to get the hash of endpoint %s (%v)\n", ep, serr)
@@ -244,21 +253,14 @@ func endpointsFromCluster(cmd *cobra.Command) []string {
return endpoints
}
- sec := secureCfgFromCmd(cmd)
- dt := dialTimeoutFromCmd(cmd)
- ka := keepAliveTimeFromCmd(cmd)
- kat := keepAliveTimeoutFromCmd(cmd)
- eps, err := endpointsFromCmd(cmd)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
+ cfgSpec := clientConfigFromCmd(cmd)
// exclude auth for not asking needless password (MemberList() doesn't need authentication)
-
- cfg, err := newClientCfg(eps, dt, ka, kat, sec, nil)
+ lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+ cfg, err := clientv3.NewClientConfig(cfgSpec, lg)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
- c, err := v3.New(*cfg)
+ c, err := clientv3.New(*cfg)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
@@ -270,11 +272,11 @@ func endpointsFromCluster(cmd *cobra.Command) []string {
}()
membs, err := c.MemberList(ctx)
if err != nil {
- err = fmt.Errorf("failed to fetch endpoints from etcd cluster member list: %v", err)
+ err = fmt.Errorf("failed to fetch endpoints from etcd cluster member list: %w", err)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
- ret := []string{}
+ var ret []string
for _, m := range membs.Members {
ret = append(ret, m.ClientURLs...)
}
diff --git a/etcdctl/ctlv3/command/get_command.go b/etcdctl/ctlv3/command/get_command.go
index c94ac08b987..7d687aa5c90 100644
--- a/etcdctl/ctlv3/command/get_command.go
+++ b/etcdctl/ctlv3/command/get_command.go
@@ -19,21 +19,26 @@ import (
"strings"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
var (
- getConsistency string
- getLimit int64
- getSortOrder string
- getSortTarget string
- getPrefix bool
- getFromKey bool
- getRev int64
- getKeysOnly bool
- getCountOnly bool
- printValueOnly bool
+ getConsistency string
+ getLimit int64
+ getSortOrder string
+ getSortTarget string
+ getPrefix bool
+ getFromKey bool
+ getRev int64
+ getKeysOnly bool
+ getCountOnly bool
+ printValueOnly bool
+ getMinCreateRev int64
+ getMaxCreateRev int64
+ getMinModRev int64
+ getMaxModRev int64
)
// NewGetCommand returns the cobra command for "get".
@@ -54,6 +59,21 @@ func NewGetCommand() *cobra.Command {
cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys")
cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "Get only the count")
cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`)
+ cmd.Flags().Int64Var(&getMinCreateRev, "min-create-rev", 0, "Minimum create revision")
+ cmd.Flags().Int64Var(&getMaxCreateRev, "max-create-rev", 0, "Maximum create revision")
+ cmd.Flags().Int64Var(&getMinModRev, "min-mod-rev", 0, "Minimum modification revision")
+ cmd.Flags().Int64Var(&getMaxModRev, "max-mod-rev", 0, "Maximum modification revision")
+
+ cmd.RegisterFlagCompletionFunc("consistency", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"l", "s"}, cobra.ShellCompDirectiveDefault
+ })
+ cmd.RegisterFlagCompletionFunc("order", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"ASCEND", "DESCEND"}, cobra.ShellCompDirectiveDefault
+ })
+ cmd.RegisterFlagCompletionFunc("sort-by", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"CREATE", "KEY", "MODIFY", "VALUE", "VERSION"}, cobra.ShellCompDirectiveDefault
+ })
+
return cmd
}
@@ -96,13 +116,9 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one"))
}
- opts := []clientv3.OpOption{}
- switch getConsistency {
- case "s":
+ var opts []clientv3.OpOption
+ if IsSerializable(getConsistency) {
opts = append(opts, clientv3.WithSerializable())
- case "l":
- default:
- cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("unknown consistency flag %q", getConsistency))
}
key := args[0]
@@ -176,5 +192,29 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
opts = append(opts, clientv3.WithCountOnly())
}
+ if getMinCreateRev > 0 {
+ opts = append(opts, clientv3.WithMinCreateRev(getMinCreateRev))
+ }
+
+ if getMaxCreateRev > 0 {
+ if getMinCreateRev > getMaxCreateRev {
+ cobrautl.ExitWithError(cobrautl.ExitBadFeature,
+ fmt.Errorf("getMinCreateRev(=%v) > getMaxCreateRev(=%v)", getMinCreateRev, getMaxCreateRev))
+ }
+ opts = append(opts, clientv3.WithMaxCreateRev(getMaxCreateRev))
+ }
+
+ if getMinModRev > 0 {
+ opts = append(opts, clientv3.WithMinModRev(getMinModRev))
+ }
+
+ if getMaxModRev > 0 {
+ if getMinModRev > getMaxModRev {
+ cobrautl.ExitWithError(cobrautl.ExitBadFeature,
+ fmt.Errorf("getMinModRev(=%v) > getMaxModRev(=%v)", getMinModRev, getMaxModRev))
+ }
+ opts = append(opts, clientv3.WithMaxModRev(getMaxModRev))
+ }
+
return key, opts
}
diff --git a/etcdctl/ctlv3/command/global.go b/etcdctl/ctlv3/command/global.go
index c50ab5963a5..019b638d394 100644
--- a/etcdctl/ctlv3/command/global.go
+++ b/etcdctl/ctlv3/command/global.go
@@ -15,26 +15,25 @@
package command
import (
- "crypto/tls"
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"strings"
"time"
"github.com/bgentry/speakeasy"
- "go.etcd.io/etcd/client/pkg/v3/srv"
- "go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/pkg/v3/flags"
-
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"go.uber.org/zap"
"google.golang.org/grpc/grpclog"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/srv"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "go.etcd.io/etcd/pkg/v3/flags"
)
// GlobalFlags are flags that defined globally
@@ -48,6 +47,8 @@ type GlobalFlags struct {
CommandTimeOut time.Duration
KeepAliveTime time.Duration
KeepAliveTimeout time.Duration
+ MaxCallSendMsgSize int
+ MaxCallRecvMsgSize int
DNSClusterServiceName string
TLS transport.TLSInfo
@@ -61,21 +62,6 @@ type GlobalFlags struct {
Debug bool
}
-type secureCfg struct {
- cert string
- key string
- cacert string
- serverName string
-
- insecureTransport bool
- insecureSkipVerify bool
-}
-
-type authCfg struct {
- username string
- password string
-}
-
type discoveryCfg struct {
domain string
insecure bool
@@ -98,23 +84,14 @@ func initDisplayFromCmd(cmd *cobra.Command) {
}
}
-type clientConfig struct {
- endpoints []string
- dialTimeout time.Duration
- keepAliveTime time.Duration
- keepAliveTimeout time.Duration
- scfg *secureCfg
- acfg *authCfg
-}
-
type discardValue struct{}
func (*discardValue) String() string { return "" }
func (*discardValue) Set(string) error { return nil }
func (*discardValue) Type() string { return "" }
-func clientConfigFromCmd(cmd *cobra.Command) *clientConfig {
- lg, err := zap.NewProduction()
+func clientConfigFromCmd(cmd *cobra.Command) *clientv3.ConfigSpec {
+ lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
@@ -141,21 +118,23 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientConfig {
// too many routine connection disconnects to turn on by default.
//
// See https://github.com/etcd-io/etcd/pull/9623 for background
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr))
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, io.Discard, os.Stderr))
}
- cfg := &clientConfig{}
- cfg.endpoints, err = endpointsFromCmd(cmd)
+ cfg := &clientv3.ConfigSpec{}
+ cfg.Endpoints, err = endpointsFromCmd(cmd)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
- cfg.dialTimeout = dialTimeoutFromCmd(cmd)
- cfg.keepAliveTime = keepAliveTimeFromCmd(cmd)
- cfg.keepAliveTimeout = keepAliveTimeoutFromCmd(cmd)
+ cfg.DialTimeout = dialTimeoutFromCmd(cmd)
+ cfg.KeepAliveTime = keepAliveTimeFromCmd(cmd)
+ cfg.KeepAliveTimeout = keepAliveTimeoutFromCmd(cmd)
+ cfg.MaxCallSendMsgSize = maxCallSendMsgSizeFromCmd(cmd)
+ cfg.MaxCallRecvMsgSize = maxCallRecvMsgSizeFromCmd(cmd)
- cfg.scfg = secureCfgFromCmd(cmd)
- cfg.acfg = authCfgFromCmd(cmd)
+ cfg.Secure = secureCfgFromCmd(cmd)
+ cfg.Auth = authCfgFromCmd(cmd)
initDisplayFromCmd(cmd)
return cfg
@@ -163,7 +142,8 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientConfig {
func mustClientCfgFromCmd(cmd *cobra.Command) *clientv3.Config {
cc := clientConfigFromCmd(cmd)
- cfg, err := newClientCfg(cc.endpoints, cc.dialTimeout, cc.keepAliveTime, cc.keepAliveTimeout, cc.scfg, cc.acfg)
+ lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+ cfg, err := clientv3.NewClientConfig(cc, lg)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
@@ -172,11 +152,12 @@ func mustClientCfgFromCmd(cmd *cobra.Command) *clientv3.Config {
func mustClientFromCmd(cmd *cobra.Command) *clientv3.Client {
cfg := clientConfigFromCmd(cmd)
- return cfg.mustClient()
+ return mustClient(cfg)
}
-func (cc *clientConfig) mustClient() *clientv3.Client {
- cfg, err := newClientCfg(cc.endpoints, cc.dialTimeout, cc.keepAliveTime, cc.keepAliveTimeout, cc.scfg, cc.acfg)
+func mustClient(cc *clientv3.ConfigSpec) *clientv3.Client {
+ lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+ cfg, err := clientv3.NewClientConfig(cc, lg)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
@@ -189,72 +170,11 @@ func (cc *clientConfig) mustClient() *clientv3.Client {
return client
}
-func newClientCfg(endpoints []string, dialTimeout, keepAliveTime, keepAliveTimeout time.Duration, scfg *secureCfg, acfg *authCfg) (*clientv3.Config, error) {
- // set tls if any one tls option set
- var cfgtls *transport.TLSInfo
- tlsinfo := transport.TLSInfo{}
- tlsinfo.Logger, _ = zap.NewProduction()
- if scfg.cert != "" {
- tlsinfo.CertFile = scfg.cert
- cfgtls = &tlsinfo
- }
-
- if scfg.key != "" {
- tlsinfo.KeyFile = scfg.key
- cfgtls = &tlsinfo
- }
-
- if scfg.cacert != "" {
- tlsinfo.TrustedCAFile = scfg.cacert
- cfgtls = &tlsinfo
- }
-
- if scfg.serverName != "" {
- tlsinfo.ServerName = scfg.serverName
- cfgtls = &tlsinfo
- }
-
- cfg := &clientv3.Config{
- Endpoints: endpoints,
- DialTimeout: dialTimeout,
- DialKeepAliveTime: keepAliveTime,
- DialKeepAliveTimeout: keepAliveTimeout,
- }
-
- if cfgtls != nil {
- clientTLS, err := cfgtls.ClientConfig()
- if err != nil {
- return nil, err
- }
- cfg.TLS = clientTLS
- }
-
- // if key/cert is not given but user wants secure connection, we
- // should still setup an empty tls configuration for gRPC to setup
- // secure connection.
- if cfg.TLS == nil && !scfg.insecureTransport {
- cfg.TLS = &tls.Config{}
- }
-
- // If the user wants to skip TLS verification then we should set
- // the InsecureSkipVerify flag in tls configuration.
- if scfg.insecureSkipVerify && cfg.TLS != nil {
- cfg.TLS.InsecureSkipVerify = true
- }
-
- if acfg != nil {
- cfg.Username = acfg.username
- cfg.Password = acfg.password
- }
-
- return cfg, nil
-}
-
func argOrStdin(args []string, stdin io.Reader, i int) (string, error) {
if i < len(args) {
return args[i], nil
}
- bytes, err := ioutil.ReadAll(stdin)
+ bytes, err := io.ReadAll(stdin)
if string(bytes) == "" || err != nil {
return "", errors.New("no available argument and stdin")
}
@@ -285,7 +205,23 @@ func keepAliveTimeoutFromCmd(cmd *cobra.Command) time.Duration {
return keepAliveTimeout
}
-func secureCfgFromCmd(cmd *cobra.Command) *secureCfg {
+func maxCallSendMsgSizeFromCmd(cmd *cobra.Command) int {
+ maxRequestBytes, err := cmd.Flags().GetInt("max-request-bytes")
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ return maxRequestBytes
+}
+
+func maxCallRecvMsgSizeFromCmd(cmd *cobra.Command) int {
+ maxReceiveBytes, err := cmd.Flags().GetInt("max-recv-bytes")
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ return maxReceiveBytes
+}
+
+func secureCfgFromCmd(cmd *cobra.Command) *clientv3.SecureConfig {
cert, key, cacert := keyAndCertFromCmd(cmd)
insecureTr := insecureTransportFromCmd(cmd)
skipVerify := insecureSkipVerifyFromCmd(cmd)
@@ -295,14 +231,14 @@ func secureCfgFromCmd(cmd *cobra.Command) *secureCfg {
discoveryCfg.domain = ""
}
- return &secureCfg{
- cert: cert,
- key: key,
- cacert: cacert,
- serverName: discoveryCfg.domain,
+ return &clientv3.SecureConfig{
+ Cert: cert,
+ Key: key,
+ Cacert: cacert,
+ ServerName: discoveryCfg.domain,
- insecureTransport: insecureTr,
- insecureSkipVerify: skipVerify,
+ InsecureTransport: insecureTr,
+ InsecureSkipVerify: skipVerify,
}
}
@@ -345,7 +281,7 @@ func keyAndCertFromCmd(cmd *cobra.Command) (cert, key, cacert string) {
return cert, key, cacert
}
-func authCfgFromCmd(cmd *cobra.Command) *authCfg {
+func authCfgFromCmd(cmd *cobra.Command) *clientv3.AuthConfig {
userFlag, err := cmd.Flags().GetString("user")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
@@ -359,23 +295,23 @@ func authCfgFromCmd(cmd *cobra.Command) *authCfg {
return nil
}
- var cfg authCfg
+ var cfg clientv3.AuthConfig
if passwordFlag == "" {
splitted := strings.SplitN(userFlag, ":", 2)
if len(splitted) < 2 {
- cfg.username = userFlag
- cfg.password, err = speakeasy.Ask("Password: ")
+ cfg.Username = userFlag
+ cfg.Password, err = speakeasy.Ask("Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
- cfg.username = splitted[0]
- cfg.password = splitted[1]
+ cfg.Username = splitted[0]
+ cfg.Password = splitted[1]
}
} else {
- cfg.username = userFlag
- cfg.password = passwordFlag
+ cfg.Username = userFlag
+ cfg.Password = passwordFlag
}
return &cfg
@@ -447,7 +383,7 @@ func endpointsFromFlagValue(cmd *cobra.Command) ([]string, error) {
return eps, err
}
// strip insecure connections
- ret := []string{}
+ var ret []string
for _, ep := range eps {
if strings.HasPrefix(ep, "http://") {
fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep)
diff --git a/etcdctl/ctlv3/command/lease_command.go b/etcdctl/ctlv3/command/lease_command.go
index 97cacdfaf30..8e70670b638 100644
--- a/etcdctl/ctlv3/command/lease_command.go
+++ b/etcdctl/ctlv3/command/lease_command.go
@@ -19,10 +19,10 @@ import (
"fmt"
"strconv"
+ "github.com/spf13/cobra"
+
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/spf13/cobra"
)
// NewLeaseCommand returns the cobra command for "lease".
@@ -61,14 +61,14 @@ func leaseGrantCommandFunc(cmd *cobra.Command, args []string) {
ttl, err := strconv.ParseInt(args[0], 10, 64)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad TTL (%v)", err))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad TTL (%w)", err))
}
ctx, cancel := commandCtx(cmd)
resp, err := mustClientFromCmd(cmd).Grant(ctx, ttl)
cancel()
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to grant lease (%v)", err))
+ cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to grant lease (%w)", err))
}
display.Grant(*resp)
}
@@ -96,7 +96,7 @@ func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) {
resp, err := mustClientFromCmd(cmd).Revoke(ctx, id)
cancel()
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to revoke lease (%v)", err))
+ cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to revoke lease (%w)", err))
}
display.Revoke(id, *resp)
}
@@ -151,9 +151,7 @@ func leaseListCommandFunc(cmd *cobra.Command, args []string) {
display.Leases(*resp)
}
-var (
- leaseKeepAliveOnce bool
-)
+var leaseKeepAliveOnce bool
// NewLeaseKeepAliveCommand returns the cobra command for "lease keep-alive".
func NewLeaseKeepAliveCommand() *cobra.Command {
@@ -202,7 +200,7 @@ func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) {
func leaseFromArgs(arg string) v3.LeaseID {
id, err := strconv.ParseInt(arg, 16, 64)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID arg (%w), expecting ID in Hex", err))
}
return v3.LeaseID(id)
}
diff --git a/etcdctl/ctlv3/command/lock_command.go b/etcdctl/ctlv3/command/lock_command.go
index 3b383ec9a2e..64fb0ee8192 100644
--- a/etcdctl/ctlv3/command/lock_command.go
+++ b/etcdctl/ctlv3/command/lock_command.go
@@ -23,11 +23,11 @@ import (
"os/signal"
"syscall"
- "go.etcd.io/etcd/client/v3"
+ "github.com/spf13/cobra"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/spf13/cobra"
)
var lockTTL = 10
@@ -59,7 +59,8 @@ func getExitCodeFromError(err error) int {
return cobrautl.ExitSuccess
}
- if exitErr, ok := err.(*exec.ExitError); ok {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
return status.ExitStatus()
}
diff --git a/etcdctl/ctlv3/command/make_mirror_command.go b/etcdctl/ctlv3/command/make_mirror_command.go
index aaa51eae9e7..c8b6220d596 100644
--- a/etcdctl/ctlv3/command/make_mirror_command.go
+++ b/etcdctl/ctlv3/command/make_mirror_command.go
@@ -23,14 +23,17 @@ import (
"time"
"github.com/bgentry/speakeasy"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "github.com/spf13/cobra"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
+)
- "github.com/spf13/cobra"
+const (
+ defaultMaxTxnOps = uint(128)
)
var (
@@ -43,6 +46,8 @@ var (
mmuser string
mmpassword string
mmnodestprefix bool
+ mmrev int64
+ mmmaxTxnOps uint
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
@@ -54,6 +59,8 @@ func NewMakeMirrorCommand() *cobra.Command {
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
+ c.Flags().Int64Var(&mmrev, "rev", 0, "Specify the kv revision to start to mirror")
+ c.Flags().UintVar(&mmmaxTxnOps, "max-txn-ops", defaultMaxTxnOps, "Maximum number of operations permitted in a transaction during syncing updates.")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
@@ -67,29 +74,29 @@ func NewMakeMirrorCommand() *cobra.Command {
return c
}
-func authDestCfg() *authCfg {
+func authDestCfg() *clientv3.AuthConfig {
if mmuser == "" {
return nil
}
- var cfg authCfg
+ var cfg clientv3.AuthConfig
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
- cfg.username = mmuser
- cfg.password, err = speakeasy.Ask("Destination Password: ")
+ cfg.Username = mmuser
+ cfg.Password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
- cfg.username = splitted[0]
- cfg.password = splitted[1]
+ cfg.Username = splitted[0]
+ cfg.Password = splitted[1]
}
} else {
- cfg.username = mmuser
- cfg.password = mmpassword
+ cfg.Username = mmuser
+ cfg.Password = mmpassword
}
return &cfg
@@ -103,24 +110,28 @@ func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
- sec := &secureCfg{
- cert: mmcert,
- key: mmkey,
- cacert: mmcacert,
- insecureTransport: mminsecureTr,
+ maxCallSendMsgSize := maxCallSendMsgSizeFromCmd(cmd)
+ maxCallRecvMsgSize := maxCallRecvMsgSizeFromCmd(cmd)
+ sec := &clientv3.SecureConfig{
+ Cert: mmcert,
+ Key: mmkey,
+ Cacert: mmcacert,
+ InsecureTransport: mminsecureTr,
}
auth := authDestCfg()
- cc := &clientConfig{
- endpoints: []string{args[0]},
- dialTimeout: dialTimeout,
- keepAliveTime: keepAliveTime,
- keepAliveTimeout: keepAliveTimeout,
- scfg: sec,
- acfg: auth,
+ cc := &clientv3.ConfigSpec{
+ Endpoints: []string{args[0]},
+ DialTimeout: dialTimeout,
+ KeepAliveTime: keepAliveTime,
+ KeepAliveTimeout: keepAliveTimeout,
+ MaxCallSendMsgSize: maxCallSendMsgSize,
+ MaxCallRecvMsgSize: maxCallRecvMsgSize,
+ Secure: sec,
+ Auth: auth,
}
- dc := cc.mustClient()
+ dc := mustClient(cc)
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
@@ -130,6 +141,11 @@ func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
+ // if destination prefix is specified and remove destination prefix is true return error
+ if mmnodestprefix && len(mmdestprefix) > 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
+ }
+
go func() {
for {
time.Sleep(30 * time.Second)
@@ -137,33 +153,37 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er
}
}()
- s := mirror.NewSyncer(c, mmprefix, 0)
+ startRev := mmrev - 1
+ if startRev < 0 {
+ startRev = 0
+ }
- rc, errc := s.SyncBase(ctx)
+ s := mirror.NewSyncer(c, mmprefix, startRev)
- // if destination prefix is specified and remove destination prefix is true return error
- if mmnodestprefix && len(mmdestprefix) > 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
- }
+ // If a rev is provided, then do not sync the whole key space.
+ // Instead, just start watching the key space starting from the rev
+ if startRev == 0 {
+ rc, errc := s.SyncBase(ctx)
- // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
- if !mmnodestprefix && len(mmdestprefix) == 0 {
- mmdestprefix = mmprefix
- }
+ // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
+ if !mmnodestprefix && len(mmdestprefix) == 0 {
+ mmdestprefix = mmprefix
+ }
- for r := range rc {
- for _, kv := range r.Kvs {
- _, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
- if err != nil {
- return err
+ for r := range rc {
+ for _, kv := range r.Kvs {
+ _, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&total, 1)
}
- atomic.AddInt64(&total, 1)
}
- }
- err := <-errc
- if err != nil {
- return err
+ err := <-errc
+ if err != nil {
+ return err
+ }
}
wc := s.SyncUpdates(ctx)
@@ -174,7 +194,7 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er
}
var lastRev int64
- ops := []clientv3.Op{}
+ var ops []clientv3.Op
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
@@ -186,6 +206,15 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er
ops = []clientv3.Op{}
}
lastRev = nextRev
+
+ if len(ops) == int(mmmaxTxnOps) {
+ _, err := dc.Txn(ctx).Then(ops...).Commit()
+ if err != nil {
+ return err
+ }
+ ops = []clientv3.Op{}
+ }
+
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
diff --git a/etcdctl/ctlv3/command/member_command.go b/etcdctl/ctlv3/command/member_command.go
index 752f151673c..256bf2b9c7b 100644
--- a/etcdctl/ctlv3/command/member_command.go
+++ b/etcdctl/ctlv3/command/member_command.go
@@ -21,13 +21,15 @@ import (
"strings"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
var (
- memberPeerURLs string
- isLearner bool
+ memberPeerURLs string
+ isLearner bool
+ memberConsistency string
)
// NewMemberCommand returns the cobra command for "member".
@@ -99,6 +101,8 @@ The items in the lists are ID, Status, Name, Peer Addrs, Client Addrs, Is Learne
Run: memberListCommandFunc,
}
+ cc.Flags().StringVar(&memberConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)")
+
return cc
}
@@ -157,7 +161,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
display.MemberAdd(*resp)
if _, ok := (display).(*simplePrinter); ok {
- conf := []string{}
+ var conf []string
for _, memb := range resp.Members {
for _, u := range memb.PeerURLs {
n := memb.Name
@@ -172,7 +176,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
fmt.Printf("ETCD_NAME=%q\n", newMemberName)
fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ","))
fmt.Printf("ETCD_INITIAL_ADVERTISE_PEER_URLS=%q\n", memberPeerURLs)
- fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n")
+ fmt.Print("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n")
}
}
@@ -184,7 +188,7 @@ func memberRemoveCommandFunc(cmd *cobra.Command, args []string) {
id, err := strconv.ParseUint(args[0], 16, 64)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad member ID arg (%v), expecting ID in Hex", err))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad member ID arg (%w), expecting ID in Hex", err))
}
ctx, cancel := commandCtx(cmd)
@@ -204,7 +208,7 @@ func memberUpdateCommandFunc(cmd *cobra.Command, args []string) {
id, err := strconv.ParseUint(args[0], 16, 64)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad member ID arg (%v), expecting ID in Hex", err))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad member ID arg (%w), expecting ID in Hex", err))
}
if len(memberPeerURLs) == 0 {
@@ -225,8 +229,12 @@ func memberUpdateCommandFunc(cmd *cobra.Command, args []string) {
// memberListCommandFunc executes the "member list" command.
func memberListCommandFunc(cmd *cobra.Command, args []string) {
+ var opts []clientv3.OpOption
+ if IsSerializable(memberConsistency) {
+ opts = append(opts, clientv3.WithSerializable())
+ }
ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).MemberList(ctx)
+ resp, err := mustClientFromCmd(cmd).MemberList(ctx, opts...)
cancel()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
@@ -243,7 +251,7 @@ func memberPromoteCommandFunc(cmd *cobra.Command, args []string) {
id, err := strconv.ParseUint(args[0], 16, 64)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad member ID arg (%v), expecting ID in Hex", err))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad member ID arg (%w), expecting ID in Hex", err))
}
ctx, cancel := commandCtx(cmd)
diff --git a/etcdctl/ctlv3/command/move_leader_command.go b/etcdctl/ctlv3/command/move_leader_command.go
index 098c897cd7a..a7b4f397b1c 100644
--- a/etcdctl/ctlv3/command/move_leader_command.go
+++ b/etcdctl/ctlv3/command/move_leader_command.go
@@ -19,7 +19,8 @@ import (
"strconv"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
@@ -43,9 +44,10 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
- c := mustClientFromCmd(cmd)
- eps := c.Endpoints()
- c.Close()
+ cfg := clientConfigFromCmd(cmd)
+ cli := mustClient(cfg)
+ eps := cli.Endpoints()
+ cli.Close()
ctx, cancel := commandCtx(cmd)
@@ -53,9 +55,8 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
var leaderCli *clientv3.Client
var leaderID uint64
for _, ep := range eps {
- cfg := clientConfigFromCmd(cmd)
- cfg.endpoints = []string{ep}
- cli := cfg.mustClient()
+ cfg.Endpoints = []string{ep}
+ cli := mustClient(cfg)
resp, serr := cli.Status(ctx, ep)
if serr != nil {
cobrautl.ExitWithError(cobrautl.ExitError, serr)
diff --git a/etcdctl/ctlv3/command/printer.go b/etcdctl/ctlv3/command/printer.go
index 2d31d9ec8c6..90a76ebfc31 100644
--- a/etcdctl/ctlv3/command/printer.go
+++ b/etcdctl/ctlv3/command/printer.go
@@ -19,11 +19,11 @@ import (
"fmt"
"strings"
+ "github.com/dustin/go-humanize"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/dustin/go-humanize"
)
type printer interface {
@@ -50,6 +50,10 @@ type printer interface {
EndpointHashKV([]epHashKV)
MoveLeader(leader, target uint64, r v3.MoveLeaderResponse)
+ DowngradeValidate(r v3.DowngradeResponse)
+ DowngradeEnable(r v3.DowngradeResponse)
+ DowngradeCancel(r v3.DowngradeResponse)
+
Alarm(v3.AlarmResponse)
RoleAdd(role string, r v3.AuthRoleAddResponse)
@@ -75,7 +79,7 @@ func NewPrinter(printerType string, isHex bool) printer {
case "simple":
return &simplePrinter{isHex: isHex}
case "fields":
- return &fieldsPrinter{newPrinterUnsupported("fields")}
+ return &fieldsPrinter{printer: newPrinterUnsupported("fields"), isHex: isHex}
case "json":
return newJSONPrinter(isHex)
case "protobuf":
@@ -88,7 +92,7 @@ func NewPrinter(printerType string, isHex bool) printer {
type printerRPC struct {
printer
- p func(interface{})
+ p func(any)
}
func (p *printerRPC) Del(r v3.DeleteResponse) { p.p((*pb.DeleteRangeResponse)(&r)) }
@@ -107,14 +111,22 @@ func (p *printerRPC) MemberAdd(r v3.MemberAddResponse) { p.p((*pb.MemberAddRespo
func (p *printerRPC) MemberRemove(id uint64, r v3.MemberRemoveResponse) {
p.p((*pb.MemberRemoveResponse)(&r))
}
+
func (p *printerRPC) MemberUpdate(id uint64, r v3.MemberUpdateResponse) {
p.p((*pb.MemberUpdateResponse)(&r))
}
+
+func (p *printerRPC) MemberPromote(id uint64, r v3.MemberPromoteResponse) {
+ p.p((*pb.MemberPromoteResponse)(&r))
+}
func (p *printerRPC) MemberList(r v3.MemberListResponse) { p.p((*pb.MemberListResponse)(&r)) }
func (p *printerRPC) Alarm(r v3.AlarmResponse) { p.p((*pb.AlarmResponse)(&r)) }
func (p *printerRPC) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) {
p.p((*pb.MoveLeaderResponse)(&r))
}
+func (p *printerRPC) DowngradeValidate(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) }
+func (p *printerRPC) DowngradeEnable(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) }
+func (p *printerRPC) DowngradeCancel(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) }
func (p *printerRPC) RoleAdd(_ string, r v3.AuthRoleAddResponse) { p.p((*pb.AuthRoleAddResponse)(&r)) }
func (p *printerRPC) RoleGet(_ string, r v3.AuthRoleGetResponse) { p.p((*pb.AuthRoleGetResponse)(&r)) }
func (p *printerRPC) RoleDelete(_ string, r v3.AuthRoleDeleteResponse) {
@@ -124,6 +136,7 @@ func (p *printerRPC) RoleList(r v3.AuthRoleListResponse) { p.p((*pb.AuthRoleList
func (p *printerRPC) RoleGrantPermission(_ string, r v3.AuthRoleGrantPermissionResponse) {
p.p((*pb.AuthRoleGrantPermissionResponse)(&r))
}
+
func (p *printerRPC) RoleRevokePermission(_ string, _ string, _ string, r v3.AuthRoleRevokePermissionResponse) {
p.p((*pb.AuthRoleRevokePermissionResponse)(&r))
}
@@ -133,15 +146,19 @@ func (p *printerRPC) UserList(r v3.AuthUserListResponse) { p.p((*pb.Auth
func (p *printerRPC) UserChangePassword(r v3.AuthUserChangePasswordResponse) {
p.p((*pb.AuthUserChangePasswordResponse)(&r))
}
+
func (p *printerRPC) UserGrantRole(_ string, _ string, r v3.AuthUserGrantRoleResponse) {
p.p((*pb.AuthUserGrantRoleResponse)(&r))
}
+
func (p *printerRPC) UserRevokeRole(_ string, _ string, r v3.AuthUserRevokeRoleResponse) {
p.p((*pb.AuthUserRevokeRoleResponse)(&r))
}
+
func (p *printerRPC) UserDelete(_ string, r v3.AuthUserDeleteResponse) {
p.p((*pb.AuthUserDeleteResponse)(&r))
}
+
func (p *printerRPC) AuthStatus(r v3.AuthStatusResponse) {
p.p((*pb.AuthStatusResponse)(&r))
}
@@ -149,7 +166,7 @@ func (p *printerRPC) AuthStatus(r v3.AuthStatusResponse) {
type printerUnsupported struct{ printerRPC }
func newPrinterUnsupported(n string) printer {
- f := func(interface{}) {
+ f := func(any) {
cobrautl.ExitWithError(cobrautl.ExitBadFeature, errors.New(n+" not supported as output format"))
}
return &printerUnsupported{printerRPC{nil, f}}
@@ -160,6 +177,9 @@ func (p *printerUnsupported) EndpointStatus([]epStatus) { p.p(nil) }
func (p *printerUnsupported) EndpointHashKV([]epHashKV) { p.p(nil) }
func (p *printerUnsupported) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { p.p(nil) }
+func (p *printerUnsupported) DowngradeValidate(r v3.DowngradeResponse) { p.p(nil) }
+func (p *printerUnsupported) DowngradeEnable(r v3.DowngradeResponse) { p.p(nil) }
+func (p *printerUnsupported) DowngradeCancel(r v3.DowngradeResponse) { p.p(nil) }
func makeMemberListTable(r v3.MemberListResponse) (hdr []string, rows [][]string) {
hdr = []string{"ID", "Status", "Name", "Peer Addrs", "Client Addrs", "Is Learner"}
@@ -198,14 +218,20 @@ func makeEndpointHealthTable(healthList []epHealth) (hdr []string, rows [][]stri
}
func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]string) {
- hdr = []string{"endpoint", "ID", "version", "db size", "is leader", "is learner", "raft term",
- "raft index", "raft applied index", "errors"}
+ hdr = []string{
+ "endpoint", "ID", "version", "storage version", "db size", "in use", "percentage not in use", "quota", "is leader", "is learner", "raft term",
+ "raft index", "raft applied index", "errors",
+ }
for _, status := range statusList {
rows = append(rows, []string{
status.Ep,
fmt.Sprintf("%x", status.Resp.Header.MemberId),
status.Resp.Version,
+ status.Resp.StorageVersion,
humanize.Bytes(uint64(status.Resp.DbSize)),
+ humanize.Bytes(uint64(status.Resp.DbSizeInUse)),
+ fmt.Sprintf("%d%%", int(float64(100-(status.Resp.DbSizeInUse*100/status.Resp.DbSize)))),
+ humanize.Bytes(uint64(status.Resp.DbSizeQuota)),
fmt.Sprint(status.Resp.Leader == status.Resp.Header.MemberId),
fmt.Sprint(status.Resp.IsLearner),
fmt.Sprint(status.Resp.RaftTerm),
@@ -218,11 +244,12 @@ func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]stri
}
func makeEndpointHashKVTable(hashList []epHashKV) (hdr []string, rows [][]string) {
- hdr = []string{"endpoint", "hash"}
+ hdr = []string{"endpoint", "hash", "hash_revision"}
for _, h := range hashList {
rows = append(rows, []string{
h.Ep,
fmt.Sprint(h.Resp.Hash),
+ fmt.Sprint(h.Resp.HashRevision),
})
}
return hdr, rows
diff --git a/etcdctl/ctlv3/command/printer_fields.go b/etcdctl/ctlv3/command/printer_fields.go
index ca4611c735c..86f1c70d71e 100644
--- a/etcdctl/ctlv3/command/printer_fields.go
+++ b/etcdctl/ctlv3/command/printer_fields.go
@@ -19,10 +19,14 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
spb "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
v3 "go.etcd.io/etcd/client/v3"
)
-type fieldsPrinter struct{ printer }
+type fieldsPrinter struct {
+ printer
+ isHex bool
+}
func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) {
fmt.Printf("\"%sKey\" : %q\n", pfx, string(kv.Key))
@@ -30,13 +34,27 @@ func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) {
fmt.Printf("\"%sModRevision\" : %d\n", pfx, kv.ModRevision)
fmt.Printf("\"%sVersion\" : %d\n", pfx, kv.Version)
fmt.Printf("\"%sValue\" : %q\n", pfx, string(kv.Value))
- fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease)
+ if p.isHex {
+ fmt.Printf("\"%sLease\" : %016x\n", pfx, kv.Lease)
+ } else {
+ fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease)
+ }
}
func (p *fieldsPrinter) hdr(h *pb.ResponseHeader) {
- fmt.Println(`"ClusterID" :`, h.ClusterId)
- fmt.Println(`"MemberID" :`, h.MemberId)
- fmt.Println(`"Revision" :`, h.Revision)
+ if p.isHex {
+ fmt.Println(`"ClusterID" :`, types.ID(h.ClusterId))
+ fmt.Println(`"MemberID" :`, types.ID(h.MemberId))
+ } else {
+ fmt.Println(`"ClusterID" :`, h.ClusterId)
+ fmt.Println(`"MemberID" :`, h.MemberId)
+ }
+ // Revision only makes sense for k/v responses. For other kinds of
+ // responses, i.e. MemberList, usually the revision isn't populated
+ // at all; so it would be better to hide this field in these cases.
+ if h.Revision > 0 {
+ fmt.Println(`"Revision" :`, h.Revision)
+ }
fmt.Println(`"RaftTerm" :`, h.RaftTerm)
}
@@ -94,7 +112,11 @@ func (p *fieldsPrinter) Watch(resp v3.WatchResponse) {
func (p *fieldsPrinter) Grant(r v3.LeaseGrantResponse) {
p.hdr(r.ResponseHeader)
- fmt.Println(`"ID" :`, r.ID)
+ if p.isHex {
+ fmt.Printf("\"ID\" : %016x\n", r.ID)
+ } else {
+ fmt.Println(`"ID" :`, r.ID)
+ }
fmt.Println(`"TTL" :`, r.TTL)
}
@@ -104,13 +126,21 @@ func (p *fieldsPrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) {
func (p *fieldsPrinter) KeepAlive(r v3.LeaseKeepAliveResponse) {
p.hdr(r.ResponseHeader)
- fmt.Println(`"ID" :`, r.ID)
+ if p.isHex {
+ fmt.Printf("\"ID\" : %016x\n", r.ID)
+ } else {
+ fmt.Println(`"ID" :`, r.ID)
+ }
fmt.Println(`"TTL" :`, r.TTL)
}
func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) {
p.hdr(r.ResponseHeader)
- fmt.Println(`"ID" :`, r.ID)
+ if p.isHex {
+ fmt.Printf("\"ID\" : %016x\n", r.ID)
+ } else {
+ fmt.Println(`"ID" :`, r.ID)
+ }
fmt.Println(`"TTL" :`, r.TTL)
fmt.Println(`"GrantedTTL" :`, r.GrantedTTL)
for _, k := range r.Keys {
@@ -121,14 +151,22 @@ func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) {
func (p *fieldsPrinter) Leases(r v3.LeaseLeasesResponse) {
p.hdr(r.ResponseHeader)
for _, item := range r.Leases {
- fmt.Println(`"ID" :`, item.ID)
+ if p.isHex {
+ fmt.Printf("\"ID\" : %016x\n", item.ID)
+ } else {
+ fmt.Println(`"ID" :`, item.ID)
+ }
}
}
func (p *fieldsPrinter) MemberList(r v3.MemberListResponse) {
p.hdr(r.Header)
for _, m := range r.Members {
- fmt.Println(`"ID" :`, m.ID)
+ if p.isHex {
+ fmt.Println(`"ID" :`, types.ID(m.ID))
+ } else {
+ fmt.Println(`"ID" :`, m.ID)
+ }
fmt.Printf("\"Name\" : %q\n", m.Name)
for _, u := range m.PeerURLs {
fmt.Printf("\"PeerURL\" : %q\n", u)
@@ -155,7 +193,9 @@ func (p *fieldsPrinter) EndpointStatus(eps []epStatus) {
for _, ep := range eps {
p.hdr(ep.Resp.Header)
fmt.Printf("\"Version\" : %q\n", ep.Resp.Version)
+ fmt.Printf("\"StorageVersion\" : %q\n", ep.Resp.StorageVersion)
fmt.Println(`"DBSize" :`, ep.Resp.DbSize)
+ fmt.Println(`"DBSizeInUse" :`, ep.Resp.DbSizeInUse)
fmt.Println(`"Leader" :`, ep.Resp.Leader)
fmt.Println(`"IsLearner" :`, ep.Resp.IsLearner)
fmt.Println(`"RaftIndex" :`, ep.Resp.RaftIndex)
@@ -172,6 +212,7 @@ func (p *fieldsPrinter) EndpointHashKV(hs []epHashKV) {
p.hdr(h.Resp.Header)
fmt.Printf("\"Endpoint\" : %q\n", h.Ep)
fmt.Println(`"Hash" :`, h.Resp.Hash)
+ fmt.Println(`"HashRevision" :`, h.Resp.HashRevision)
fmt.Println()
}
}
@@ -179,7 +220,11 @@ func (p *fieldsPrinter) EndpointHashKV(hs []epHashKV) {
func (p *fieldsPrinter) Alarm(r v3.AlarmResponse) {
p.hdr(r.Header)
for _, a := range r.Alarms {
- fmt.Println(`"MemberID" :`, a.MemberID)
+ if p.isHex {
+ fmt.Println(`"MemberID" :`, types.ID(a.MemberID))
+ } else {
+ fmt.Println(`"MemberID" :`, a.MemberID)
+ }
fmt.Println(`"AlarmType" :`, a.Alarm)
fmt.Println()
}
@@ -197,15 +242,17 @@ func (p *fieldsPrinter) RoleGet(role string, r v3.AuthRoleGetResponse) {
func (p *fieldsPrinter) RoleDelete(role string, r v3.AuthRoleDeleteResponse) { p.hdr(r.Header) }
func (p *fieldsPrinter) RoleList(r v3.AuthRoleListResponse) {
p.hdr(r.Header)
- fmt.Printf(`"Roles" :`)
+ fmt.Print(`"Roles" :`)
for _, r := range r.Roles {
fmt.Printf(" %q", r)
}
fmt.Println()
}
+
func (p *fieldsPrinter) RoleGrantPermission(role string, r v3.AuthRoleGrantPermissionResponse) {
p.hdr(r.Header)
}
+
func (p *fieldsPrinter) RoleRevokePermission(role string, key string, end string, r v3.AuthRoleRevokePermissionResponse) {
p.hdr(r.Header)
}
@@ -214,6 +261,7 @@ func (p *fieldsPrinter) UserChangePassword(r v3.AuthUserChangePasswordResponse)
func (p *fieldsPrinter) UserGrantRole(user string, role string, r v3.AuthUserGrantRoleResponse) {
p.hdr(r.Header)
}
+
func (p *fieldsPrinter) UserRevokeRole(user string, role string, r v3.AuthUserRevokeRoleResponse) {
p.hdr(r.Header)
}
diff --git a/etcdctl/ctlv3/command/printer_json.go b/etcdctl/ctlv3/command/printer_json.go
index ca90a4a311c..49abb35ff5f 100644
--- a/etcdctl/ctlv3/command/printer_json.go
+++ b/etcdctl/ctlv3/command/printer_json.go
@@ -15,13 +15,13 @@
package command
import (
- "bytes"
"encoding/json"
"fmt"
"os"
"strconv"
+ "strings"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type jsonPrinter struct {
@@ -48,7 +48,7 @@ func (p *jsonPrinter) MemberList(r clientv3.MemberListResponse) {
}
}
-func printJSON(v interface{}) {
+func printJSON(v any) {
b, err := json.Marshal(v)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
@@ -58,7 +58,7 @@ func printJSON(v interface{}) {
}
func printMemberListWithHexJSON(r clientv3.MemberListResponse) {
- var buffer bytes.Buffer
+ var buffer strings.Builder
var b []byte
buffer.WriteString("{\"header\":{\"cluster_id\":\"")
b = strconv.AppendUint(nil, r.Header.ClusterId, 16)
@@ -67,7 +67,7 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) {
b = strconv.AppendUint(nil, r.Header.MemberId, 16)
buffer.Write(b)
buffer.WriteString("\",\"raft_term\":")
- b = strconv.AppendUint(nil, r.Header.RaftTerm, 16)
+ b = strconv.AppendUint(nil, r.Header.RaftTerm, 10)
buffer.Write(b)
buffer.WriteByte('}')
for i := 0; i < len(r.Members); i++ {
@@ -84,7 +84,7 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) {
return
}
buffer.Write(b)
- buffer.WriteString(",\"clientURLS\":")
+ buffer.WriteString(",\"clientURLs\":")
b, err = json.Marshal(r.Members[i].ClientURLs)
if err != nil {
return
@@ -97,5 +97,4 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) {
}
buffer.WriteString("}")
fmt.Println(buffer.String())
-
}
diff --git a/etcdctl/ctlv3/command/printer_protobuf.go b/etcdctl/ctlv3/command/printer_protobuf.go
index da1da9f3441..3c839bffc8c 100644
--- a/etcdctl/ctlv3/command/printer_protobuf.go
+++ b/etcdctl/ctlv3/command/printer_protobuf.go
@@ -51,7 +51,7 @@ func (p *pbPrinter) Watch(r v3.WatchResponse) {
printPB(&wr)
}
-func printPB(v interface{}) {
+func printPB(v any) {
m, ok := v.(pbMarshal)
if !ok {
cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("marshal unsupported for type %T (%v)", v, v))
diff --git a/etcdctl/ctlv3/command/printer_simple.go b/etcdctl/ctlv3/command/printer_simple.go
index c5939fa4728..c7e4fab725e 100644
--- a/etcdctl/ctlv3/command/printer_simple.go
+++ b/etcdctl/ctlv3/command/printer_simple.go
@@ -24,6 +24,8 @@ import (
v3 "go.etcd.io/etcd/client/v3"
)
+const rootRole = "root"
+
type simplePrinter struct {
isHex bool
valueOnly bool
@@ -124,7 +126,11 @@ func (s *simplePrinter) Alarm(resp v3.AlarmResponse) {
}
func (s *simplePrinter) MemberAdd(r v3.MemberAddResponse) {
- fmt.Printf("Member %16x added to cluster %16x\n", r.Member.ID, r.Header.ClusterId)
+ asLearner := " "
+ if r.Member.IsLearner {
+ asLearner = " as learner "
+ }
+ fmt.Printf("Member %16x added%sto cluster %16x\n", r.Member.ID, asLearner, r.Header.ClusterId)
}
func (s *simplePrinter) MemberRemove(id uint64, r v3.MemberRemoveResponse) {
@@ -174,12 +180,32 @@ func (s *simplePrinter) MoveLeader(leader, target uint64, r v3.MoveLeaderRespons
fmt.Printf("Leadership transferred from %s to %s\n", types.ID(leader), types.ID(target))
}
+func (s *simplePrinter) DowngradeValidate(r v3.DowngradeResponse) {
+ fmt.Printf("Downgrade validate success, cluster version %s\n", r.Version)
+}
+
+func (s *simplePrinter) DowngradeEnable(r v3.DowngradeResponse) {
+ fmt.Printf("Downgrade enable success, cluster version %s\n", r.Version)
+}
+
+func (s *simplePrinter) DowngradeCancel(r v3.DowngradeResponse) {
+ fmt.Printf("Downgrade cancel success, cluster version %s\n", r.Version)
+}
+
func (s *simplePrinter) RoleAdd(role string, r v3.AuthRoleAddResponse) {
fmt.Printf("Role %s created\n", role)
}
func (s *simplePrinter) RoleGet(role string, r v3.AuthRoleGetResponse) {
fmt.Printf("Role %s\n", role)
+ if rootRole == role && r.Perm == nil {
+ fmt.Println("KV Read:")
+ fmt.Println("\t[, ")
+ fmt.Println("KV Write:")
+ fmt.Println("\t[, ")
+ return
+ }
+
fmt.Println("KV Read:")
printRange := func(perm *v3.Permission) {
@@ -190,16 +216,16 @@ func (s *simplePrinter) RoleGet(role string, r v3.AuthRoleGetResponse) {
} else {
fmt.Printf("\t[%s, ", sKey)
}
- if v3.GetPrefixRangeEnd(sKey) == sRangeEnd {
+ if v3.GetPrefixRangeEnd(sKey) == sRangeEnd && len(sKey) > 0 {
fmt.Printf(" (prefix %s)", sKey)
}
- fmt.Printf("\n")
+ fmt.Print("\n")
}
for _, perm := range r.Perm {
if perm.PermType == v3.PermRead || perm.PermType == v3.PermReadWrite {
if len(perm.RangeEnd) == 0 {
- fmt.Printf("\t%s\n", string(perm.Key))
+ fmt.Printf("\t%s\n", perm.Key)
} else {
printRange((*v3.Permission)(perm))
}
@@ -209,7 +235,7 @@ func (s *simplePrinter) RoleGet(role string, r v3.AuthRoleGetResponse) {
for _, perm := range r.Perm {
if perm.PermType == v3.PermWrite || perm.PermType == v3.PermReadWrite {
if len(perm.RangeEnd) == 0 {
- fmt.Printf("\t%s\n", string(perm.Key))
+ fmt.Printf("\t%s\n", perm.Key)
} else {
printRange((*v3.Permission)(perm))
}
@@ -249,11 +275,11 @@ func (s *simplePrinter) UserAdd(name string, r v3.AuthUserAddResponse) {
func (s *simplePrinter) UserGet(name string, r v3.AuthUserGetResponse) {
fmt.Printf("User: %s\n", name)
- fmt.Printf("Roles:")
+ fmt.Print("Roles:")
for _, role := range r.Roles {
fmt.Printf(" %s", role)
}
- fmt.Printf("\n")
+ fmt.Print("\n")
}
func (s *simplePrinter) UserChangePassword(v3.AuthUserChangePasswordResponse) {
diff --git a/etcdctl/ctlv3/command/printer_table.go b/etcdctl/ctlv3/command/printer_table.go
index 2bc6cfcf603..c576231ed5b 100644
--- a/etcdctl/ctlv3/command/printer_table.go
+++ b/etcdctl/ctlv3/command/printer_table.go
@@ -17,9 +17,9 @@ package command
import (
"os"
- v3 "go.etcd.io/etcd/client/v3"
-
"github.com/olekukonko/tablewriter"
+
+ v3 "go.etcd.io/etcd/client/v3"
)
type tablePrinter struct{ printer }
@@ -34,6 +34,7 @@ func (tp *tablePrinter) MemberList(r v3.MemberListResponse) {
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.Render()
}
+
func (tp *tablePrinter) EndpointHealth(r []epHealth) {
hdr, rows := makeEndpointHealthTable(r)
table := tablewriter.NewWriter(os.Stdout)
@@ -44,6 +45,7 @@ func (tp *tablePrinter) EndpointHealth(r []epHealth) {
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.Render()
}
+
func (tp *tablePrinter) EndpointStatus(r []epStatus) {
hdr, rows := makeEndpointStatusTable(r)
table := tablewriter.NewWriter(os.Stdout)
@@ -54,6 +56,7 @@ func (tp *tablePrinter) EndpointStatus(r []epStatus) {
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.Render()
}
+
func (tp *tablePrinter) EndpointHashKV(r []epHashKV) {
hdr, rows := makeEndpointHashKVTable(r)
table := tablewriter.NewWriter(os.Stdout)
diff --git a/etcdctl/ctlv3/command/put_command.go b/etcdctl/ctlv3/command/put_command.go
index 35eb32148d6..4814db55990 100644
--- a/etcdctl/ctlv3/command/put_command.go
+++ b/etcdctl/ctlv3/command/put_command.go
@@ -20,7 +20,8 @@ import (
"strconv"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
@@ -98,10 +99,10 @@ func getPutOp(args []string) (string, string, []clientv3.OpOption) {
id, err := strconv.ParseInt(leaseStr, 16, 64)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%w), expecting ID in Hex", err))
}
- opts := []clientv3.OpOption{}
+ var opts []clientv3.OpOption
if id != 0 {
opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id)))
}
diff --git a/etcdctl/ctlv3/command/role_command.go b/etcdctl/ctlv3/command/role_command.go
index 885f11fdb34..705d88d886f 100644
--- a/etcdctl/ctlv3/command/role_command.go
+++ b/etcdctl/ctlv3/command/role_command.go
@@ -19,7 +19,8 @@ import (
"fmt"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
diff --git a/etcdctl/ctlv3/command/snapshot_command.go b/etcdctl/ctlv3/command/snapshot_command.go
index e5d3f3f1c56..66934bede37 100644
--- a/etcdctl/ctlv3/command/snapshot_command.go
+++ b/etcdctl/ctlv3/command/snapshot_command.go
@@ -17,88 +17,62 @@ package command
import (
"context"
"fmt"
- "os"
"github.com/spf13/cobra"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
snapshot "go.etcd.io/etcd/client/v3/snapshot"
- "go.etcd.io/etcd/etcdutl/v3/etcdutl"
+ "go.etcd.io/etcd/etcdctl/v3/util"
"go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.uber.org/zap"
)
-const (
- defaultName = "default"
- defaultInitialAdvertisePeerURLs = "http://localhost:2380"
-)
+var snapshotExample = util.Normalize(`
+ # Save snapshot to a given file
+ etcdctl snapshot save /backup/etcd-snapshot.db
-var (
- restoreCluster string
- restoreClusterToken string
- restoreDataDir string
- restoreWalDir string
- restorePeerURLs string
- restoreName string
- skipHashCheck bool
-)
+ # Get snapshot from given address and save it to file
+ etcdctl snapshot save --endpoints=127.0.0.1:3000 /backup/etcd-snapshot.db
+
+ # Get snapshot from given address with certificates
+ etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/etcd/ca.crt --cert=/etc/etcd/etcd.crt --key=/etc/etcd/etcd.key snapshot save /backup/etcd-snapshot.db
+
+ # Get snapshot wih certain user and password
+ etcdctl --user=root --password=password123 snapshot save /backup/etcd-snapshot.db
+
+ # Get snapshot from given address with timeout
+ etcdctl --endpoints=https://127.0.0.1:2379 --dial-timeout=20s snapshot save /backup/etcd-snapshot.db
+
+ # Save snapshot with desirable time format
+ etcdctl snapshot save /mnt/backup/etcd/backup_$(date +%Y%m%d_%H%M%S).db`)
// NewSnapshotCommand returns the cobra command for "snapshot".
func NewSnapshotCommand() *cobra.Command {
cmd := &cobra.Command{
- Use: "snapshot ",
- Short: "Manages etcd node snapshots",
+ Use: "snapshot ",
+ Short: "Manages etcd node snapshots",
+ Example: snapshotExample,
}
cmd.AddCommand(NewSnapshotSaveCommand())
- cmd.AddCommand(NewSnapshotRestoreCommand())
- cmd.AddCommand(newSnapshotStatusCommand())
return cmd
}
func NewSnapshotSaveCommand() *cobra.Command {
return &cobra.Command{
- Use: "save ",
- Short: "Stores an etcd node backend snapshot to a given file",
- Run: snapshotSaveCommandFunc,
- }
-}
-
-func newSnapshotStatusCommand() *cobra.Command {
- return &cobra.Command{
- Use: "status ",
- Short: "[deprecated] Gets backend snapshot status of a given file",
- Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint.
-The items in the lists are hash, revision, total keys, total size.
-
-Moved to 'etcdctl snapshot status ...'
-`,
- Run: snapshotStatusCommandFunc,
+ Use: "save ",
+ Short: "Stores an etcd node backend snapshot to a given file",
+ Run: snapshotSaveCommandFunc,
+ Example: snapshotExample,
}
}
-func NewSnapshotRestoreCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "restore [options]",
- Short: "Restores an etcd member snapshot to an etcd directory",
- Run: snapshotRestoreCommandFunc,
- Long: "Moved to `etcdctl snapshot restore ...`\n",
- }
- cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "Path to the data directory")
- cmd.Flags().StringVar(&restoreWalDir, "wal-dir", "", "Path to the WAL directory (use --data-dir if none given)")
- cmd.Flags().StringVar(&restoreCluster, "initial-cluster", initialClusterFromName(defaultName), "Initial cluster configuration for restore bootstrap")
- cmd.Flags().StringVar(&restoreClusterToken, "initial-cluster-token", "etcd-cluster", "Initial cluster token for the etcd cluster during restore bootstrap")
- cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster")
- cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member")
- cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)")
-
- return cmd
-}
-
func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
- err := fmt.Errorf("snapshot save expects one argument")
+ err := fmt.Errorf("snapshot save expects one argument ")
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
- lg, err := zap.NewProduction()
+ lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
@@ -112,27 +86,12 @@ func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) {
defer cancel()
path := args[0]
- if err := snapshot.Save(ctx, lg, *cfg, path); err != nil {
+ version, err := snapshot.SaveWithVersion(ctx, lg, *cfg, path)
+ if err != nil {
cobrautl.ExitWithError(cobrautl.ExitInterrupted, err)
}
fmt.Printf("Snapshot saved at %s\n", path)
-}
-
-func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
- fmt.Fprintf(os.Stderr, "Deprecated: Use `etcdutl snapshot status` instead.\n\n")
- etcdutl.SnapshotStatusCommandFunc(cmd, args)
-}
-
-func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
- fmt.Fprintf(os.Stderr, "Deprecated: Use `etcdutl snapshot restore` instead.\n\n")
- etcdutl.SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir,
- restorePeerURLs, restoreName, skipHashCheck, args)
-}
-
-func initialClusterFromName(name string) string {
- n := name
- if name == "" {
- n = defaultName
+ if version != "" {
+ fmt.Printf("Server version %s\n", version)
}
- return fmt.Sprintf("%s=http://localhost:2380", n)
}
diff --git a/etcdctl/ctlv3/command/txn_command.go b/etcdctl/ctlv3/command/txn_command.go
index e20b6d95a8e..5433f82bda6 100644
--- a/etcdctl/ctlv3/command/txn_command.go
+++ b/etcdctl/ctlv3/command/txn_command.go
@@ -22,11 +22,11 @@ import (
"strconv"
"strings"
+ "github.com/spf13/cobra"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/spf13/cobra"
)
var txnInteractive bool
@@ -36,7 +36,30 @@ func NewTxnCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "txn [options]",
Short: "Txn processes all the requests in one transaction",
- Run: txnCommandFunc,
+ Long: `Txn reads multiple etcd requests from standard input and applies them as a single atomic transaction.
+
+A transaction consists of three components:
+1) a list of conditions,
+2) a list of requests to apply if all the conditions are true,
+3) a list of requests to apply if any condition is false.
+
+Example interactive stdin usage:
+
+---
+etcdctl txn -i
+# compares:
+mod("key1") > "0"
+
+# success requests (get, put, delete):
+put key1 "overwrote-key1"
+
+# failure requests (get, put, delete):
+put key1 "created-key1"
+put key2 "some extra key"
+---
+
+Refer to https://github.com/etcd-io/etcd/blob/main/etcdctl/README.md#txn-options.`,
+ Run: txnCommandFunc,
}
cmd.Flags().BoolVarP(&txnInteractive, "interactive", "i", false, "Input transaction in interactive mode")
return cmd
@@ -85,7 +108,7 @@ func readCompares(r *bufio.Reader) (cmps []clientv3.Cmp) {
break
}
- cmp, err := parseCompare(line)
+ cmp, err := ParseCompare(line)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitInvalidInput, err)
}
@@ -119,7 +142,7 @@ func readOps(r *bufio.Reader) (ops []clientv3.Op) {
}
func parseRequestUnion(line string) (*clientv3.Op, error) {
- args := argify(line)
+ args := Argify(line)
if len(args) < 2 {
return nil, fmt.Errorf("invalid txn compare request: %s", line)
}
@@ -153,7 +176,7 @@ func parseRequestUnion(line string) (*clientv3.Op, error) {
return &op, nil
}
-func parseCompare(line string) (*clientv3.Cmp, error) {
+func ParseCompare(line string) (*clientv3.Cmp, error) {
var (
key string
op string
@@ -171,7 +194,7 @@ func parseCompare(line string) (*clientv3.Cmp, error) {
return nil, fmt.Errorf("malformed comparison: %s; got %s(%q) %s %q", line, target, key, op, val)
}
if serr != nil {
- return nil, fmt.Errorf("malformed comparison: %s (%v)", line, serr)
+ return nil, fmt.Errorf("malformed comparison: %s (%w)", line, serr)
}
var (
diff --git a/etcdctl/ctlv3/command/user_command.go b/etcdctl/ctlv3/command/user_command.go
index ced3af72190..f3e59a7048b 100644
--- a/etcdctl/ctlv3/command/user_command.go
+++ b/etcdctl/ctlv3/command/user_command.go
@@ -21,13 +21,12 @@ import (
"github.com/bgentry/speakeasy"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/client/v3"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
)
-var (
- userShowDetail bool
-)
+var userShowDetail bool
// NewUserCommand returns the cobra command for "user".
func NewUserCommand() *cobra.Command {
@@ -199,7 +198,7 @@ func userGetCommandFunc(cmd *cobra.Command, args []string) {
if userShowDetail {
fmt.Printf("User: %s\n", name)
for _, role := range resp.Roles {
- fmt.Printf("\n")
+ fmt.Print("\n")
roleResp, err := client.Auth.RoleGet(context.TODO(), role)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
@@ -279,7 +278,7 @@ func readPasswordInteractive(name string) string {
prompt1 := fmt.Sprintf("Password of %s: ", name)
password1, err1 := speakeasy.Ask(prompt1)
if err1 != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %s", err1))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %w", err1))
}
if len(password1) == 0 {
@@ -289,7 +288,7 @@ func readPasswordInteractive(name string) string {
prompt2 := fmt.Sprintf("Type password of %s again for confirmation: ", name)
password2, err2 := speakeasy.Ask(prompt2)
if err2 != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %s", err2))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %w", err2))
}
if password1 != password2 {
diff --git a/etcdctl/ctlv3/command/util.go b/etcdctl/ctlv3/command/util.go
index cd15fd33952..05fa19bc49e 100644
--- a/etcdctl/ctlv3/command/util.go
+++ b/etcdctl/ctlv3/command/util.go
@@ -19,18 +19,18 @@ import (
"crypto/tls"
"encoding/hex"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"regexp"
"strconv"
"strings"
"time"
+ "github.com/spf13/cobra"
+
pb "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/spf13/cobra"
)
func printKV(isHex bool, valueOnly bool, kv *pb.KeyValue) {
@@ -56,9 +56,10 @@ func addHexPrefix(s string) string {
return string(ns)
}
-func argify(s string) []string {
- r := regexp.MustCompile(`"(?:[^"\\]|\\.)*"|'[^']*'|[^'"\s]\S*[^'"\s]?`)
- args := r.FindAllString(s, -1)
+var argsRegexp = regexp.MustCompile(`"(?:[^"\\]|\\.)*"|'[^']*'|[^'"\s]\S*[^'"\s]?`)
+
+func Argify(s string) []string {
+ args := argsRegexp.FindAllString(s, -1)
for i := range args {
if len(args[i]) == 0 {
continue
@@ -93,7 +94,7 @@ func isCommandTimeoutFlagSet(cmd *cobra.Command) bool {
}
// get the process_resident_memory_bytes from /metrics
-func endpointMemoryMetrics(host string, scfg *secureCfg) float64 {
+func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 {
residentMemoryKey := "process_resident_memory_bytes"
var residentMemoryValue string
if !strings.HasPrefix(host, "http://") && !strings.HasPrefix(host, "https://") {
@@ -102,25 +103,25 @@ func endpointMemoryMetrics(host string, scfg *secureCfg) float64 {
url := host + "/metrics"
if strings.HasPrefix(host, "https://") {
// load client certificate
- cert, err := tls.LoadX509KeyPair(scfg.cert, scfg.key)
+ cert, err := tls.LoadX509KeyPair(scfg.Cert, scfg.Key)
if err != nil {
- fmt.Println(fmt.Sprintf("client certificate error: %v", err))
+ fmt.Printf("client certificate error: %v\n", err)
return 0.0
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
- InsecureSkipVerify: scfg.insecureSkipVerify,
+ InsecureSkipVerify: scfg.InsecureSkipVerify,
}
}
resp, err := http.Get(url)
if err != nil {
- fmt.Println(fmt.Sprintf("fetch error: %v", err))
+ fmt.Printf("fetch error: %v\n", err)
return 0.0
}
- byts, readerr := ioutil.ReadAll(resp.Body)
+ byts, readerr := io.ReadAll(resp.Body)
resp.Body.Close()
if readerr != nil {
- fmt.Println(fmt.Sprintf("fetch error: reading %s: %v", url, readerr))
+ fmt.Printf("fetch error: reading %s: %v\n", url, readerr)
return 0.0
}
@@ -131,12 +132,12 @@ func endpointMemoryMetrics(host string, scfg *secureCfg) float64 {
}
}
if residentMemoryValue == "" {
- fmt.Println(fmt.Sprintf("could not find: %v", residentMemoryKey))
+ fmt.Printf("could not find: %v\n", residentMemoryKey)
return 0.0
}
residentMemoryBytes, parseErr := strconv.ParseFloat(residentMemoryValue, 64)
if parseErr != nil {
- fmt.Println(fmt.Sprintf("parse error: %v", parseErr))
+ fmt.Printf("parse error: %v\n", parseErr)
return 0.0
}
@@ -144,10 +145,10 @@ func endpointMemoryMetrics(host string, scfg *secureCfg) float64 {
}
// compact keyspace history to a provided revision
-func compact(c *v3.Client, rev int64) {
+func compact(c *clientv3.Client, rev int64) {
fmt.Printf("Compacting with revision %d\n", rev)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- _, err := c.Compact(ctx, rev, v3.WithCompactPhysical())
+ _, err := c.Compact(ctx, rev, clientv3.WithCompactPhysical())
cancel()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
@@ -156,7 +157,7 @@ func compact(c *v3.Client, rev int64) {
}
// defrag a given endpoint
-func defrag(c *v3.Client, ep string) {
+func defrag(c *clientv3.Client, ep string) {
fmt.Printf("Defragmenting %q\n", ep)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err := c.Defragment(ctx, ep)
@@ -166,3 +167,14 @@ func defrag(c *v3.Client, ep string) {
}
fmt.Printf("Defragmented %q\n", ep)
}
+
+func IsSerializable(option string) bool {
+ switch option {
+ case "s":
+ return true
+ case "l":
+ default:
+ cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("unknown consistency flag %q", getConsistency))
+ }
+ return false
+}
diff --git a/etcdctl/ctlv3/command/version_command.go b/etcdctl/ctlv3/command/version_command.go
index b65c299048b..e57567d6a3d 100644
--- a/etcdctl/ctlv3/command/version_command.go
+++ b/etcdctl/ctlv3/command/version_command.go
@@ -17,9 +17,9 @@ package command
import (
"fmt"
- "go.etcd.io/etcd/api/v3/version"
-
"github.com/spf13/cobra"
+
+ "go.etcd.io/etcd/api/v3/version"
)
// NewVersionCommand prints out the version of etcd.
diff --git a/etcdctl/ctlv3/command/watch_command.go b/etcdctl/ctlv3/command/watch_command.go
index 28826500483..fa8fc72d82f 100644
--- a/etcdctl/ctlv3/command/watch_command.go
+++ b/etcdctl/ctlv3/command/watch_command.go
@@ -23,10 +23,10 @@ import (
"os/exec"
"strings"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
"github.com/spf13/cobra"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
)
var (
@@ -99,11 +99,11 @@ func watchInteractiveFunc(cmd *cobra.Command, osArgs []string, envKey, envRange
for {
l, err := reader.ReadString('\n')
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("error reading watch request line: %v", err))
+ cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("error reading watch request line: %w", err))
}
l = strings.TrimSuffix(l, "\n")
- args := argify(l)
+ args := Argify(l)
if len(args) < 1 {
fmt.Fprintf(os.Stderr, "Invalid command: %s (watch and progress supported)\n", l)
continue
@@ -134,7 +134,6 @@ func watchInteractiveFunc(cmd *cobra.Command, osArgs []string, envKey, envRange
fmt.Fprintf(os.Stderr, "Invalid command %s (only support watch)\n", l)
continue
}
-
}
}
diff --git a/etcdctl/ctlv3/command/watch_command_test.go b/etcdctl/ctlv3/command/watch_command_test.go
index 2292deadcbb..5a98b563878 100644
--- a/etcdctl/ctlv3/command/watch_command_test.go
+++ b/etcdctl/ctlv3/command/watch_command_test.go
@@ -17,6 +17,8 @@ package command
import (
"reflect"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func Test_parseWatchArgs(t *testing.T) {
@@ -534,25 +536,13 @@ func Test_parseWatchArgs(t *testing.T) {
}
for i, ts := range tt {
watchArgs, execArgs, err := parseWatchArgs(ts.osArgs, ts.commandArgs, ts.envKey, ts.envRange, ts.interactive)
- if err != ts.err {
- t.Fatalf("#%d: error expected %v, got %v", i, ts.err, err)
- }
- if !reflect.DeepEqual(watchArgs, ts.watchArgs) {
- t.Fatalf("#%d: watchArgs expected %q, got %v", i, ts.watchArgs, watchArgs)
- }
- if !reflect.DeepEqual(execArgs, ts.execArgs) {
- t.Fatalf("#%d: execArgs expected %q, got %v", i, ts.execArgs, execArgs)
- }
+ require.ErrorIsf(t, err, ts.err, "#%d: error expected %v, got %v", i, ts.err, err)
+ require.Truef(t, reflect.DeepEqual(watchArgs, ts.watchArgs), "#%d: watchArgs expected %q, got %v", i, ts.watchArgs, watchArgs)
+ require.Truef(t, reflect.DeepEqual(execArgs, ts.execArgs), "#%d: execArgs expected %q, got %v", i, ts.execArgs, execArgs)
if ts.interactive {
- if ts.interactiveWatchPrefix != watchPrefix {
- t.Fatalf("#%d: interactive watchPrefix expected %v, got %v", i, ts.interactiveWatchPrefix, watchPrefix)
- }
- if ts.interactiveWatchRev != watchRev {
- t.Fatalf("#%d: interactive watchRev expected %d, got %d", i, ts.interactiveWatchRev, watchRev)
- }
- if ts.interactiveWatchPrevKey != watchPrevKey {
- t.Fatalf("#%d: interactive watchPrevKey expected %v, got %v", i, ts.interactiveWatchPrevKey, watchPrevKey)
- }
+ require.Equalf(t, ts.interactiveWatchPrefix, watchPrefix, "#%d: interactive watchPrefix expected %v, got %v", i, ts.interactiveWatchPrefix, watchPrefix)
+ require.Equalf(t, ts.interactiveWatchRev, watchRev, "#%d: interactive watchRev expected %d, got %d", i, ts.interactiveWatchRev, watchRev)
+ require.Equalf(t, ts.interactiveWatchPrevKey, watchPrevKey, "#%d: interactive watchPrevKey expected %v, got %v", i, ts.interactiveWatchPrevKey, watchPrevKey)
}
}
}
diff --git a/etcdctl/ctlv3/ctl.go b/etcdctl/ctlv3/ctl.go
index d25263c734d..168386fb0a7 100644
--- a/etcdctl/ctlv3/ctl.go
+++ b/etcdctl/ctlv3/ctl.go
@@ -16,13 +16,14 @@
package ctlv3
import (
+ "os"
"time"
+ "github.com/spf13/cobra"
+
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/etcdctl/v3/ctlv3/command"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/spf13/cobra"
)
const (
@@ -37,10 +38,7 @@ const (
var (
globalFlags = command.GlobalFlags{}
-)
-
-var (
- rootCmd = &cobra.Command{
+ rootCmd = &cobra.Command{
Use: cliName,
Short: cliDescription,
SuggestFor: []string{"etcdctl"},
@@ -53,11 +51,16 @@ func init() {
rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)")
rootCmd.PersistentFlags().BoolVar(&globalFlags.IsHex, "hex", false, "print byte strings as hex encoded strings")
+ rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault
+ })
rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections")
rootCmd.PersistentFlags().DurationVar(&globalFlags.CommandTimeOut, "command-timeout", defaultCommandTimeOut, "timeout for short running command (excluding dial timeout)")
rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTime, "keepalive-time", defaultKeepAliveTime, "keepalive time for client connections")
rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTimeout, "keepalive-timeout", defaultKeepAliveTimeOut, "keepalive timeout for client connections")
+ rootCmd.PersistentFlags().IntVar(&globalFlags.MaxCallSendMsgSize, "max-request-bytes", 0, "client-side request send limit in bytes (if 0, it defaults to 2.0 MiB (2 * 1024 * 1024).)")
+ rootCmd.PersistentFlags().IntVar(&globalFlags.MaxCallRecvMsgSize, "max-recv-bytes", 0, "client-side response receive limit in bytes (if 0, it defaults to \"math.MaxInt32\")")
// TODO: secure by default when etcd enables secure gRPC by default.
rootCmd.PersistentFlags().BoolVar(&globalFlags.Insecure, "insecure-transport", true, "disable transport security for client connections")
@@ -93,6 +96,8 @@ func init() {
command.NewUserCommand(),
command.NewRoleCommand(),
command.NewCheckCommand(),
+ command.NewCompletionCommand(),
+ command.NewDowngradeCommand(),
)
}
@@ -109,7 +114,11 @@ func Start() error {
func MustStart() {
if err := Start(); err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
+ if rootCmd.SilenceErrors {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ } else {
+ os.Exit(cobrautl.ExitError)
+ }
}
}
diff --git a/etcdctl/go.mod b/etcdctl/go.mod
index 04001e5cee8..c5d7a64b653 100644
--- a/etcdctl/go.mod
+++ b/etcdctl/go.mod
@@ -1,35 +1,57 @@
module go.etcd.io/etcd/etcdctl/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
require (
- github.com/bgentry/speakeasy v0.1.0
- github.com/dustin/go-humanize v1.0.0
+ github.com/bgentry/speakeasy v0.2.0
+ github.com/cheggaaa/pb/v3 v3.1.5
+ github.com/dustin/go-humanize v1.0.1
github.com/olekukonko/tablewriter v0.0.5
- github.com/spf13/cobra v1.1.3
+ github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
- github.com/urfave/cli v1.22.4
- go.etcd.io/etcd/api/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/v2 v2.305.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/etcdutl/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
- golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
- google.golang.org/grpc v1.37.0
- gopkg.in/cheggaaa/pb.v1 v1.0.28
+ github.com/stretchr/testify v1.10.0
+ go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
+ go.uber.org/zap v1.27.0
+ golang.org/x/time v0.8.0
+ google.golang.org/grpc v1.69.2
+)
+
+require (
+ github.com/VividCortex/ewma v1.2.0 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/fatih/color v1.18.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/protobuf v1.36.1 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace (
go.etcd.io/etcd/api/v3 => ../api
go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
- go.etcd.io/etcd/client/v2 => ../client/v2
go.etcd.io/etcd/client/v3 => ../client/v3
- go.etcd.io/etcd/etcdutl/v3 => ../etcdutl
go.etcd.io/etcd/pkg/v3 => ../pkg
- go.etcd.io/etcd/raft/v3 => ../raft
- go.etcd.io/etcd/server/v3 => ../server
)
// Bad imports are sometimes causing attempts to pull that code.
diff --git a/etcdctl/go.sum b/etcdctl/go.sum
index 90c5668201b..cadbe35f017 100644
--- a/etcdctl/go.sum
+++ b/etcdctl/go.sum
@@ -1,520 +1,150 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
+github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E=
+github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cheggaaa/pb/v3 v3.1.5 h1:QuuUzeM2WsAqG2gMqtzaWithDJv0i+i6UlnwSCI4QLk=
+github.com/cheggaaa/pb/v3 v3.1.5/go.mod h1:CrxkeghYTXi1lQBEI7jSn+3svI3cuc19haAj6jM60XI=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/urfave/cli v1.22.4 h1:u7tSpNPPswAFymm8IehJhy4uJMlUuU/GmqSkvJ1InXA=
-github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0 h1:FPuyGXkE6qPKJ71PyS0sdXuxUvYGXAXxV0XHpx0qjHE=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
-gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/etcdctl/main.go b/etcdctl/main.go
index 7fbd771f648..f4a17434eb2 100644
--- a/etcdctl/main.go
+++ b/etcdctl/main.go
@@ -16,55 +16,9 @@
package main
import (
- "fmt"
- "os"
-
- "go.etcd.io/etcd/etcdctl/v3/ctlv2"
"go.etcd.io/etcd/etcdctl/v3/ctlv3"
)
-const (
- apiEnv = "ETCDCTL_API"
-)
-
-/**
-mainWithError is fully analogous to main, but instead of signaling errors
-by os.Exit, it exposes the error explicitly, such that test-logic can intercept
-control to e.g. dump coverage data (even for test-for-failure scenarios).
-*/
-func mainWithError() error {
- apiv := os.Getenv(apiEnv)
-
- // unset apiEnv to avoid side-effect for future env and flag parsing.
- os.Unsetenv(apiEnv)
-
- if len(apiv) == 0 || apiv == "3" {
- return ctlv3.Start()
- }
-
- if apiv == "2" {
- return ctlv2.Start()
- }
-
- fmt.Fprintf(os.Stderr, "unsupported API version: %s\n", apiv)
- return fmt.Errorf("unsupported API version: %s", apiv)
-}
-
func main() {
- apiv := os.Getenv(apiEnv)
-
- // unset apiEnv to avoid side-effect for future env and flag parsing.
- os.Unsetenv(apiEnv)
- if len(apiv) == 0 || apiv == "3" {
- ctlv3.MustStart()
- return
- }
-
- if apiv == "2" {
- ctlv2.MustStart()
- return
- }
-
- fmt.Fprintf(os.Stderr, "unsupported API version: %v\n", apiv)
- os.Exit(1)
+ ctlv3.MustStart()
}
diff --git a/etcdctl/main_test.go b/etcdctl/main_test.go
deleted file mode 100644
index 604241cacaf..00000000000
--- a/etcdctl/main_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "log"
- "os"
- "strings"
- "testing"
-)
-
-func SplitTestArgs(args []string) (testArgs, appArgs []string) {
- for i, arg := range os.Args {
- switch {
- case strings.HasPrefix(arg, "-test."):
- testArgs = append(testArgs, arg)
- case i == 0:
- appArgs = append(appArgs, arg)
- testArgs = append(testArgs, arg)
- default:
- appArgs = append(appArgs, arg)
- }
- }
- return
-}
-
-// Empty test to avoid no-tests warning.
-func TestEmpty(t *testing.T) {}
-
-/**
- * The purpose of this "test" is to run etcdctl with code-coverage
- * collection turned on. The technique is documented here:
- *
- * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage
- */
-func TestMain(m *testing.M) {
- // don't launch etcdctl when invoked via go test
- if strings.HasSuffix(os.Args[0], "etcdctl.test") {
- return
- }
-
- testArgs, appArgs := SplitTestArgs(os.Args)
-
- os.Args = appArgs
-
- err := mainWithError()
- if err != nil {
- log.Fatalf("etcdctl failed with: %v", err)
- }
-
- // This will generate coverage files:
- os.Args = testArgs
- m.Run()
-}
diff --git a/etcdctl/util/normalizer.go b/etcdctl/util/normalizer.go
new file mode 100644
index 00000000000..99e11e077ce
--- /dev/null
+++ b/etcdctl/util/normalizer.go
@@ -0,0 +1,49 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import "strings"
+
+const indentation = " "
+
+// Normalize normalizes a string:
+// 1. trim the leading and trailing space
+// 2. add an indentation before each line
+func Normalize(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ return normalizer{s}.trim().indent().string
+}
+
+type normalizer struct {
+ string
+}
+
+func (n normalizer) trim() normalizer {
+ n.string = strings.TrimSpace(n.string)
+ return n
+}
+
+func (n normalizer) indent() normalizer {
+ indentedLines := []string{}
+ for _, line := range strings.Split(n.string, "\n") {
+ trimmed := strings.TrimSpace(line)
+ indented := indentation + trimmed
+ indentedLines = append(indentedLines, indented)
+ }
+ n.string = strings.Join(indentedLines, "\n")
+ return n
+}
diff --git a/etcdutl/OWNERS b/etcdutl/OWNERS
new file mode 100644
index 00000000000..1c529743605
--- /dev/null
+++ b/etcdutl/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/etcdutl
diff --git a/etcdutl/README.md b/etcdutl/README.md
index 966a615110e..06cf85d7e9b 100644
--- a/etcdutl/README.md
+++ b/etcdutl/README.md
@@ -59,6 +59,10 @@ The snapshot restore options closely resemble to those used in the `etcd` comman
- skip-hash-check -- Ignore snapshot integrity hash value (required if copied from data directory)
+- bump-revision -- How much to increase the latest revision after restore
+
+- mark-compacted -- Mark the latest revision after restore as the point of scheduled compaction (required if --bump-revision > 0, disallowed otherwise)
+
#### Output
A new etcd data directory initialized with the snapshot.
@@ -67,17 +71,18 @@ A new etcd data directory initialized with the snapshot.
Save a snapshot, restore into a new 3 node cluster, and start the cluster:
```
-./etcdutl snapshot save snapshot.db
+# save snapshot
+./etcdctl snapshot save snapshot.db
# restore members
-bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
-bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
-bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
# launch members
-bin/etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 &
-bin/etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 &
-bin/etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 &
+./etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 &
+./etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 &
+./etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 &
```
### SNAPSHOT STATUS \
@@ -114,25 +119,47 @@ Prints a line of JSON encoding the database hash, revision, total keys, and size
+----------+----------+------------+------------+
```
-### VERSION
+### HASHKV [options] \
-Prints the version of etcdutl.
+HASHKV prints hash of keys and values up to given revision.
+
+#### Options
+
+- rev -- Revision number. Default is 0 which means the latest revision.
#### Output
-Prints etcd version and API version.
+##### Simple format
+
+Prints a humanized table of the KV hash, hash revision and compact revision.
+
+##### JSON format
+
+Prints a line of JSON encoding the KV hash, hash revision and compact revision.
#### Examples
+```bash
+./etcdutl hashkv file.db
+# 35c86e9b, 214, 150
+```
```bash
-./etcdutl version
-# etcdutl version: 3.1.0-alpha.0+git
-# API version: 3.1
+./etcdutl --write-out=json hashkv file.db
+# {"hash":902327963,"hashRevision":214,"compactRevision":150}
+```
+
+```bash
+./etcdutl --write-out=table hashkv file.db
++----------+---------------+------------------+
+| HASH | HASH REVISION | COMPACT REVISION |
++----------+---------------+------------------+
+| 35c86e9b | 214 | 150 |
++----------+---------------+------------------+
```
### VERSION
-Prints the version of etcdctl.
+Prints the version of etcdutl.
#### Output
@@ -140,6 +167,7 @@ Prints etcd version and API version.
#### Examples
+
```bash
./etcdutl version
# etcdutl version: 3.5.0
diff --git a/etcdutl/ctl.go b/etcdutl/ctl.go
index a044547c63c..8418729bfd1 100644
--- a/etcdutl/ctl.go
+++ b/etcdutl/ctl.go
@@ -17,6 +17,7 @@ package main
import (
"github.com/spf13/cobra"
+
"go.etcd.io/etcd/etcdutl/v3/etcdutl"
)
@@ -25,22 +26,25 @@ const (
cliDescription = "An administrative command line tool for etcd3."
)
-var (
- rootCmd = &cobra.Command{
- Use: cliName,
- Short: cliDescription,
- SuggestFor: []string{"etcdutl"},
- }
-)
+var rootCmd = &cobra.Command{
+ Use: cliName,
+ Short: cliDescription,
+ SuggestFor: []string{"etcdutl"},
+}
func init() {
rootCmd.PersistentFlags().StringVarP(&etcdutl.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)")
+ rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault
+ })
rootCmd.AddCommand(
- etcdutl.NewBackupCommand(),
etcdutl.NewDefragCommand(),
etcdutl.NewSnapshotCommand(),
+ etcdutl.NewHashKVCommand(),
etcdutl.NewVersionCommand(),
+ etcdutl.NewCompletionCommand(),
+ etcdutl.NewMigrateCommand(),
)
}
diff --git a/etcdutl/etcdutl/backup_command.go b/etcdutl/etcdutl/backup_command.go
deleted file mode 100644
index c09bcf14a79..00000000000
--- a/etcdutl/etcdutl/backup_command.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdutl
-
-import (
- "os"
- "path"
- "regexp"
- "time"
-
- "github.com/spf13/cobra"
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/idutil"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/datadir"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/etcdserver/cindex"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- "go.etcd.io/etcd/server/v3/verify"
- "go.etcd.io/etcd/server/v3/wal"
- "go.etcd.io/etcd/server/v3/wal/walpb"
-
- bolt "go.etcd.io/bbolt"
- "go.uber.org/zap"
-)
-
-var (
- withV3 bool
- dataDir string
- backupDir string
- walDir string
- backupWalDir string
-)
-
-func NewBackupCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "backup",
- Short: "[legacy] offline backup of etcd directory",
-
- Long: "Prefer: `etcdctl snapshot save` instead.",
- Run: doBackup,
- }
- cmd.Flags().StringVar(&dataDir, "data-dir", "", "Path to the etcd data dir")
- cmd.Flags().StringVar(&walDir, "wal-dir", "", "Path to the etcd wal dir")
- cmd.Flags().StringVar(&backupDir, "backup-dir", "", "Path to the backup dir")
- cmd.Flags().StringVar(&backupWalDir, "backup-wal-dir", "", "Path to the backup wal dir")
- cmd.Flags().BoolVar(&withV3, "with-v3", true, "Backup v3 backend data")
- cmd.MarkFlagRequired("data-dir")
- cmd.MarkFlagRequired("backup-dir")
- return cmd
-}
-
-func doBackup(cmd *cobra.Command, args []string) {
- HandleBackup(withV3, dataDir, backupDir, walDir, backupWalDir)
-}
-
-type desiredCluster struct {
- clusterId types.ID
- nodeId types.ID
- members []*membership.Member
- confState raftpb.ConfState
-}
-
-func newDesiredCluster() desiredCluster {
- idgen := idutil.NewGenerator(0, time.Now())
- nodeID := idgen.Next()
- clusterID := idgen.Next()
-
- return desiredCluster{
- clusterId: types.ID(clusterID),
- nodeId: types.ID(nodeID),
- members: []*membership.Member{
- {
- ID: types.ID(nodeID),
- Attributes: membership.Attributes{
- Name: "etcdctl-v2-backup",
- },
- RaftAttributes: membership.RaftAttributes{
- PeerURLs: []string{"http://use-flag--force-new-cluster:2080"},
- }}},
- confState: raftpb.ConfState{Voters: []uint64{nodeID}},
- }
-}
-
-// HandleBackup handles a request that intends to do a backup.
-func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, destWAL string) error {
- lg := GetLogger()
-
- srcSnap := datadir.ToSnapDir(srcDir)
- destSnap := datadir.ToSnapDir(destDir)
-
- if srcWAL == "" {
- srcWAL = datadir.ToWalDir(srcDir)
- }
-
- if destWAL == "" {
- destWAL = datadir.ToWalDir(destDir)
- }
-
- if err := fileutil.CreateDirAll(destSnap); err != nil {
- lg.Fatal("failed creating backup snapshot dir", zap.String("dest-snap", destSnap), zap.Error(err))
- }
-
- destDbPath := datadir.ToBackendFileName(destDir)
- srcDbPath := datadir.ToBackendFileName(srcDir)
- desired := newDesiredCluster()
-
- walsnap := saveSnap(lg, destSnap, srcSnap, &desired)
- metadata, state, ents := translateWAL(lg, srcWAL, walsnap, withV3)
- saveDB(lg, destDbPath, srcDbPath, state.Commit, state.Term, &desired, withV3)
-
- neww, err := wal.Create(lg, destWAL, pbutil.MustMarshal(&metadata))
- if err != nil {
- lg.Fatal("wal.Create failed", zap.Error(err))
- }
- defer neww.Close()
- if err := neww.Save(state, ents); err != nil {
- lg.Fatal("wal.Save failed ", zap.Error(err))
- }
- if err := neww.SaveSnapshot(walsnap); err != nil {
- lg.Fatal("SaveSnapshot", zap.Error(err))
- }
-
- verify.MustVerifyIfEnabled(verify.Config{
- Logger: lg,
- DataDir: destDir,
- ExactIndex: false,
- })
-
- return nil
-}
-
-func saveSnap(lg *zap.Logger, destSnap, srcSnap string, desired *desiredCluster) (walsnap walpb.Snapshot) {
- ss := snap.New(lg, srcSnap)
- snapshot, err := ss.Load()
- if err != nil && err != snap.ErrNoSnapshot {
- lg.Fatal("saveSnap(Snapshoter.Load) failed", zap.Error(err))
- }
- if snapshot != nil {
- walsnap.Index, walsnap.Term, walsnap.ConfState = snapshot.Metadata.Index, snapshot.Metadata.Term, &desired.confState
- newss := snap.New(lg, destSnap)
- snapshot.Metadata.ConfState = desired.confState
- snapshot.Data = mustTranslateV2store(lg, snapshot.Data, desired)
- if err = newss.SaveSnap(*snapshot); err != nil {
- lg.Fatal("saveSnap(Snapshoter.SaveSnap) failed", zap.Error(err))
- }
- }
- return walsnap
-}
-
-// mustTranslateV2store processes storeData such that they match 'desiredCluster'.
-// In particular the method overrides membership information.
-func mustTranslateV2store(lg *zap.Logger, storeData []byte, desired *desiredCluster) []byte {
- st := v2store.New()
- if err := st.Recovery(storeData); err != nil {
- lg.Panic("cannot translate v2store", zap.Error(err))
- }
-
- raftCluster := membership.NewClusterFromMembers(lg, desired.clusterId, desired.members)
- raftCluster.SetID(desired.nodeId, desired.clusterId)
- raftCluster.SetStore(st)
- raftCluster.PushMembershipToStorage()
-
- outputData, err := st.Save()
- if err != nil {
- lg.Panic("cannot save v2store", zap.Error(err))
- }
- return outputData
-}
-
-func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot, v3 bool) (etcdserverpb.Metadata, raftpb.HardState, []raftpb.Entry) {
- w, err := wal.OpenForRead(lg, srcWAL, walsnap)
- if err != nil {
- lg.Fatal("wal.OpenForRead failed", zap.Error(err))
- }
- defer w.Close()
- wmetadata, state, ents, err := w.ReadAll()
- switch err {
- case nil:
- case wal.ErrSnapshotNotFound:
- lg.Warn("failed to find the match snapshot record", zap.Any("walsnap", walsnap), zap.String("srcWAL", srcWAL))
- lg.Warn("etcdctl will add it back. Start auto fixing...")
- default:
- lg.Fatal("unexpected error while reading WAL", zap.Error(err))
- }
-
- re := path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")
- memberAttrRE := regexp.MustCompile(re)
-
- for i := 0; i < len(ents); i++ {
-
- // Replacing WAL entries with 'dummy' entries allows to avoid
- // complicated entries shifting and risk of other data (like consistent_index)
- // running out of sync.
- // Also moving entries and computing offsets would get complicated if
- // TERM changes (so there are superflous entries from previous term).
-
- if ents[i].Type == raftpb.EntryConfChange {
- lg.Info("ignoring EntryConfChange raft entry")
- raftEntryToNoOp(&ents[i])
- continue
- }
-
- var raftReq etcdserverpb.InternalRaftRequest
- var v2Req *etcdserverpb.Request
- if pbutil.MaybeUnmarshal(&raftReq, ents[i].Data) {
- v2Req = raftReq.V2
- } else {
- v2Req = &etcdserverpb.Request{}
- pbutil.MustUnmarshal(v2Req, ents[i].Data)
- }
-
- if v2Req != nil && v2Req.Method == "PUT" && memberAttrRE.MatchString(v2Req.Path) {
- lg.Info("ignoring member attribute update on",
- zap.Stringer("entry", &ents[i]),
- zap.String("v2Req.Path", v2Req.Path))
- raftEntryToNoOp(&ents[i])
- continue
- }
-
- if v2Req != nil {
- lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i]))
- }
-
- if raftReq.ClusterMemberAttrSet != nil {
- lg.Info("ignoring cluster_member_attr_set")
- raftEntryToNoOp(&ents[i])
- continue
- }
-
- if v3 || raftReq.Header == nil {
- lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i]))
- continue
- }
- lg.Info("ignoring v3 raft entry")
- raftEntryToNoOp(&ents[i])
- }
- var metadata etcdserverpb.Metadata
- pbutil.MustUnmarshal(&metadata, wmetadata)
- return metadata, state, ents
-}
-
-func raftEntryToNoOp(entry *raftpb.Entry) {
- // Empty (dummy) entries are send by RAFT when new leader is getting elected.
- // They do not cary any change to data-model so its safe to replace entries
- // to be ignored with them.
- *entry = raftpb.Entry{Term: entry.Term, Index: entry.Index, Type: raftpb.EntryNormal, Data: nil}
-}
-
-// saveDB copies the v3 backend and strips cluster information.
-func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desired *desiredCluster, v3 bool) {
-
- // open src db to safely copy db state
- if v3 {
- var src *bolt.DB
- ch := make(chan *bolt.DB, 1)
- go func() {
- db, err := bolt.Open(srcDB, 0444, &bolt.Options{ReadOnly: true})
- if err != nil {
- lg.Fatal("bolt.Open FAILED", zap.Error(err))
- }
- ch <- db
- }()
- select {
- case src = <-ch:
- case <-time.After(time.Second):
- lg.Fatal("timed out waiting to acquire lock on", zap.String("srcDB", srcDB))
- src = <-ch
- }
- defer src.Close()
-
- tx, err := src.Begin(false)
- if err != nil {
- lg.Fatal("bbolt.BeginTx failed", zap.Error(err))
- }
-
- // copy srcDB to destDB
- dest, err := os.Create(destDB)
- if err != nil {
- lg.Fatal("creation of destination file failed", zap.String("dest", destDB), zap.Error(err))
- }
- if _, err := tx.WriteTo(dest); err != nil {
- lg.Fatal("bbolt write to destination file failed", zap.String("dest", destDB), zap.Error(err))
- }
- dest.Close()
- if err := tx.Rollback(); err != nil {
- lg.Fatal("bbolt tx.Rollback failed", zap.String("dest", destDB), zap.Error(err))
- }
- }
-
- be := backend.NewDefaultBackend(destDB)
- defer be.Close()
-
- if err := membership.TrimClusterFromBackend(be); err != nil {
- lg.Fatal("bbolt tx.Membership failed", zap.Error(err))
- }
-
- raftCluster := membership.NewClusterFromMembers(lg, desired.clusterId, desired.members)
- raftCluster.SetID(desired.nodeId, desired.clusterId)
- raftCluster.SetBackend(be)
- raftCluster.PushMembershipToStorage()
-
- if !v3 {
- tx := be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
- cindex.UnsafeCreateMetaBucket(tx)
- cindex.UnsafeUpdateConsistentIndex(tx, idx, term, false)
- } else {
- // Thanks to translateWAL not moving entries, but just replacing them with
- // 'empty', there is no need to update the consistency index.
- }
-
-}
diff --git a/etcdutl/etcdutl/common.go b/etcdutl/etcdutl/common.go
index 4b4a198aaf8..d54827d0457 100644
--- a/etcdutl/etcdutl/common.go
+++ b/etcdutl/etcdutl/common.go
@@ -15,13 +15,15 @@
package etcdutl
import (
- "go.etcd.io/etcd/pkg/v3/cobrautl"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
)
func GetLogger() *zap.Logger {
- config := zap.NewProductionConfig()
+ config := logutil.DefaultZapLoggerConfig
config.Encoding = "console"
config.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder
lg, err := config.Build()
diff --git a/etcdutl/etcdutl/completion_commmand.go b/etcdutl/etcdutl/completion_commmand.go
new file mode 100644
index 00000000000..b723337e1a8
--- /dev/null
+++ b/etcdutl/etcdutl/completion_commmand.go
@@ -0,0 +1,84 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdutl
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+)
+
+func NewCompletionCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "completion [bash|zsh|fish|powershell]",
+ Short: "Generate completion script",
+ Long: `To load completions:
+
+Bash:
+
+ $ source <(etcdutl completion bash)
+
+ # To load completions for each session, execute once:
+ # Linux:
+ $ etcdutl completion bash > /etc/bash_completion.d/etcdutl
+ # macOS:
+ $ etcdutl completion bash > /usr/local/etc/bash_completion.d/etcdutl
+
+Zsh:
+
+ # If shell completion is not already enabled in your environment,
+ # you will need to enable it. You can execute the following once:
+
+ $ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+ # To load completions for each session, execute once:
+ $ etcdutl completion zsh > "${fpath[1]}/_etcdutl"
+
+ # You will need to start a new shell for this setup to take effect.
+
+fish:
+
+ $ etcdutl completion fish | source
+
+ # To load completions for each session, execute once:
+ $ etcdutl completion fish > ~/.config/fish/completions/etcdutl.fish
+
+PowerShell:
+
+ PS> etcdutl completion powershell | Out-String | Invoke-Expression
+
+ # To load completions for every new session, run:
+ PS> etcdutl completion powershell > etcdutl.ps1
+ # and source this file from your PowerShell profile.
+`,
+ DisableFlagsInUseLine: true,
+ ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
+ Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
+ Run: func(cmd *cobra.Command, args []string) {
+ switch args[0] {
+ case "bash":
+ cmd.Root().GenBashCompletion(os.Stdout)
+ case "zsh":
+ cmd.Root().GenZshCompletion(os.Stdout)
+ case "fish":
+ cmd.Root().GenFishCompletion(os.Stdout, true)
+ case "powershell":
+ cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
+ }
+ },
+ }
+
+ return cmd
+}
diff --git a/etcdutl/etcdutl/defrag_command.go b/etcdutl/etcdutl/defrag_command.go
index 1660dd7071a..5135f5f5eb8 100644
--- a/etcdutl/etcdutl/defrag_command.go
+++ b/etcdutl/etcdutl/defrag_command.go
@@ -20,14 +20,13 @@ import (
"time"
"github.com/spf13/cobra"
+
"go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/server/v3/datadir"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
)
-var (
- defragDataDir string
-)
+var defragDataDir string
// NewDefragCommand returns the cobra command for "Defrag".
func NewDefragCommand() *cobra.Command {
@@ -38,6 +37,7 @@ func NewDefragCommand() *cobra.Command {
}
cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Required. Defragments a data directory not in use by etcd.")
cmd.MarkFlagRequired("data-dir")
+ cmd.MarkFlagDirname("data-dir")
return cmd
}
@@ -45,7 +45,7 @@ func defragCommandFunc(cmd *cobra.Command, args []string) {
err := DefragData(defragDataDir)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError,
- fmt.Errorf("Failed to defragment etcd data[%s] (%v)", defragDataDir, err))
+ fmt.Errorf("Failed to defragment etcd data[%s] (%w)", defragDataDir, err))
}
}
@@ -56,7 +56,7 @@ func DefragData(dataDir string) error {
dbDir := datadir.ToBackendFileName(dataDir)
go func() {
defer close(bch)
- cfg := backend.DefaultBackendConfig()
+ cfg := backend.DefaultBackendConfig(lg)
cfg.Logger = lg
cfg.Path = dbDir
be = backend.New(cfg)
@@ -65,7 +65,7 @@ func DefragData(dataDir string) error {
case <-bch:
case <-time.After(time.Second):
fmt.Fprintf(os.Stderr, "waiting for etcd to close and release its lock on %q. "+
- "To defrag a running etcd instance, omit --data-dir.\n", dbDir)
+ "To defrag a running etcd instance, use `etcdctl defrag` instead.\n", dbDir)
<-bch
}
return be.Defrag()
diff --git a/etcdutl/etcdutl/hashkv_command.go b/etcdutl/etcdutl/hashkv_command.go
new file mode 100644
index 00000000000..20c47a51452
--- /dev/null
+++ b/etcdutl/etcdutl/hashkv_command.go
@@ -0,0 +1,72 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdutl
+
+import (
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+var hashKVRevision int64
+
+// NewHashKVCommand returns the cobra command for "hashkv".
+func NewHashKVCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "hashkv ",
+ Short: "Prints the KV history hash of a given file",
+ Args: cobra.ExactArgs(1),
+ Run: hashKVCommandFunc,
+ }
+ cmd.Flags().Int64Var(&hashKVRevision, "rev", 0, "maximum revision to hash (default: latest revision)")
+ return cmd
+}
+
+func hashKVCommandFunc(cmd *cobra.Command, args []string) {
+ printer := initPrinterFromCmd(cmd)
+
+ ds, err := calculateHashKV(args[0], hashKVRevision)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ printer.DBHashKV(ds)
+}
+
+type HashKV struct {
+ Hash uint32 `json:"hash"`
+ HashRevision int64 `json:"hashRevision"`
+ CompactRevision int64 `json:"compactRevision"`
+}
+
+func calculateHashKV(dbPath string, rev int64) (HashKV, error) {
+ cfg := backend.DefaultBackendConfig(zap.NewNop())
+ cfg.Path = dbPath
+ b := backend.New(cfg)
+ st := mvcc.NewStore(zap.NewNop(), b, nil, mvcc.StoreConfig{})
+ hst := mvcc.NewHashStorage(zap.NewNop(), st)
+
+ h, _, err := hst.HashByRev(rev)
+ if err != nil {
+ return HashKV{}, err
+ }
+ return HashKV{
+ Hash: h.Hash,
+ HashRevision: h.Revision,
+ CompactRevision: h.CompactRevision,
+ }, nil
+}
diff --git a/etcdutl/etcdutl/migrate_command.go b/etcdutl/etcdutl/migrate_command.go
new file mode 100644
index 00000000000..15188984f85
--- /dev/null
+++ b/etcdutl/etcdutl/migrate_command.go
@@ -0,0 +1,158 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdutl
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+)
+
+// NewMigrateCommand prints out the version of etcd.
+func NewMigrateCommand() *cobra.Command {
+ o := newMigrateOptions()
+ cmd := &cobra.Command{
+ Use: "migrate",
+ Short: "Migrates schema of etcd data dir files to make them compatible with different etcd version",
+ Run: func(cmd *cobra.Command, args []string) {
+ cfg, err := o.Config()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
+ }
+ err = migrateCommandFunc(cfg)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ },
+ }
+ o.AddFlags(cmd)
+ return cmd
+}
+
+type migrateOptions struct {
+ dataDir string
+ targetVersion string
+ force bool
+}
+
+func newMigrateOptions() *migrateOptions {
+ return &migrateOptions{}
+}
+
+func (o *migrateOptions) AddFlags(cmd *cobra.Command) {
+ cmd.Flags().StringVar(&o.dataDir, "data-dir", o.dataDir, "Path to the etcd data dir")
+ cmd.MarkFlagRequired("data-dir")
+ cmd.MarkFlagDirname("data-dir")
+
+ cmd.Flags().StringVar(&o.targetVersion, "target-version", o.targetVersion, `Target etcd version to migrate contents of data dir. Minimal value 3.5. Format "X.Y" for example 3.6.`)
+ cmd.MarkFlagRequired("target-version")
+
+ cmd.Flags().BoolVar(&o.force, "force", o.force, "Ignore migration failure and forcefully override storage version. Not recommended.")
+}
+
+func (o *migrateOptions) Config() (*migrateConfig, error) {
+ c := &migrateConfig{
+ force: o.force,
+ lg: GetLogger(),
+ }
+ var err error
+ dotCount := strings.Count(o.targetVersion, ".")
+ if dotCount != 1 {
+ return nil, fmt.Errorf(`wrong target version format, expected "X.Y", got %q`, o.targetVersion)
+ }
+ c.targetVersion, err = semver.NewVersion(o.targetVersion + ".0")
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse target version: %w", err)
+ }
+ if c.targetVersion.LessThan(version.V3_5) {
+ return nil, fmt.Errorf(`target version %q not supported. Minimal "3.5"`, storageVersionToString(c.targetVersion))
+ }
+
+ dbPath := datadir.ToBackendFileName(o.dataDir)
+ c.be = backend.NewDefaultBackend(GetLogger(), dbPath)
+
+ walPath := datadir.ToWALDir(o.dataDir)
+ w, err := wal.OpenForRead(c.lg, walPath, walpb.Snapshot{})
+ if err != nil {
+ return nil, fmt.Errorf(`failed to open wal: %w`, err)
+ }
+ defer w.Close()
+ c.walVersion, err = wal.ReadWALVersion(w)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to read wal: %w`, err)
+ }
+
+ return c, nil
+}
+
+type migrateConfig struct {
+ lg *zap.Logger
+ be backend.Backend
+ targetVersion *semver.Version
+ walVersion schema.WALVersion
+ force bool
+}
+
+func migrateCommandFunc(c *migrateConfig) error {
+ defer c.be.Close()
+ tx := c.be.BatchTx()
+ current, err := schema.DetectSchemaVersion(c.lg, c.be.ReadTx())
+ if err != nil {
+ c.lg.Error("failed to detect storage version. Please make sure you are using data dir from etcd v3.5 and older")
+ return err
+ }
+ if current == *c.targetVersion {
+ c.lg.Info("storage version up-to-date", zap.String("storage-version", storageVersionToString(¤t)))
+ return nil
+ }
+ err = schema.Migrate(c.lg, tx, c.walVersion, *c.targetVersion)
+ if err != nil {
+ if !c.force {
+ return err
+ }
+ c.lg.Info("normal migrate failed, trying with force", zap.Error(err))
+ migrateForce(c.lg, tx, c.targetVersion)
+ }
+ c.be.ForceCommit()
+ return nil
+}
+
+func migrateForce(lg *zap.Logger, tx backend.BatchTx, target *semver.Version) {
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ // Storage version is only supported since v3.6
+ if target.LessThan(version.V3_6) {
+ schema.UnsafeClearStorageVersion(tx)
+ lg.Warn("forcefully cleared storage version")
+ } else {
+ schema.UnsafeSetStorageVersion(tx, target)
+ lg.Warn("forcefully set storage version", zap.String("storage-version", storageVersionToString(target)))
+ }
+}
+
+func storageVersionToString(ver *semver.Version) string {
+ return fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
+}
diff --git a/etcdutl/etcdutl/printer.go b/etcdutl/etcdutl/printer.go
index ad4e60246e6..739928dc5ff 100644
--- a/etcdutl/etcdutl/printer.go
+++ b/etcdutl/etcdutl/printer.go
@@ -18,19 +18,18 @@ import (
"errors"
"fmt"
+ "github.com/dustin/go-humanize"
"github.com/spf13/cobra"
+
"go.etcd.io/etcd/etcdutl/v3/snapshot"
"go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/dustin/go-humanize"
)
-var (
- OutputFormat string
-)
+var OutputFormat string
type printer interface {
DBStatus(snapshot.Status)
+ DBHashKV(HashKV)
}
func NewPrinter(printerType string) printer {
@@ -51,27 +50,39 @@ func NewPrinter(printerType string) printer {
type printerRPC struct {
printer
- p func(interface{})
+ p func(any)
}
type printerUnsupported struct{ printerRPC }
func newPrinterUnsupported(n string) printer {
- f := func(interface{}) {
+ f := func(any) {
cobrautl.ExitWithError(cobrautl.ExitBadFeature, errors.New(n+" not supported as output format"))
}
return &printerUnsupported{printerRPC{nil, f}}
}
func (p *printerUnsupported) DBStatus(snapshot.Status) { p.p(nil) }
+func (p *printerUnsupported) DBHashKV(HashKV) { p.p(nil) }
func makeDBStatusTable(ds snapshot.Status) (hdr []string, rows [][]string) {
- hdr = []string{"hash", "revision", "total keys", "total size"}
+ hdr = []string{"hash", "revision", "total keys", "total size", "version"}
rows = append(rows, []string{
fmt.Sprintf("%x", ds.Hash),
fmt.Sprint(ds.Revision),
fmt.Sprint(ds.TotalKey),
humanize.Bytes(uint64(ds.TotalSize)),
+ ds.Version,
+ })
+ return hdr, rows
+}
+
+func makeDBHashKVTable(ds HashKV) (hdr []string, rows [][]string) {
+ hdr = []string{"hash", "hash revision", "compact revision"}
+ rows = append(rows, []string{
+ fmt.Sprint(ds.Hash),
+ fmt.Sprint(ds.HashRevision),
+ fmt.Sprint(ds.CompactRevision),
})
return hdr, rows
}
diff --git a/etcdutl/etcdutl/printer_fields.go b/etcdutl/etcdutl/printer_fields.go
index 374312cf5d9..68f50014162 100644
--- a/etcdutl/etcdutl/printer_fields.go
+++ b/etcdutl/etcdutl/printer_fields.go
@@ -27,4 +27,11 @@ func (p *fieldsPrinter) DBStatus(r snapshot.Status) {
fmt.Println(`"Revision" :`, r.Revision)
fmt.Println(`"Keys" :`, r.TotalKey)
fmt.Println(`"Size" :`, r.TotalSize)
+ fmt.Println(`"Version" :`, r.Version)
+}
+
+func (p *fieldsPrinter) DBHashKV(r HashKV) {
+ fmt.Println(`"Hash" :`, r.Hash)
+ fmt.Println(`"Hash revision" :`, r.HashRevision)
+ fmt.Println(`"Compact revision" :`, r.CompactRevision)
}
diff --git a/etcdutl/etcdutl/printer_json.go b/etcdutl/etcdutl/printer_json.go
index 38fe3e4548e..ffe3a35f4c4 100644
--- a/etcdutl/etcdutl/printer_json.go
+++ b/etcdutl/etcdutl/printer_json.go
@@ -33,9 +33,10 @@ func newJSONPrinter() printer {
}
func (p *jsonPrinter) DBStatus(r snapshot.Status) { printJSON(r) }
+func (p *jsonPrinter) DBHashKV(r HashKV) { printJSON(r) }
// !!! Share ??
-func printJSON(v interface{}) {
+func printJSON(v any) {
b, err := json.Marshal(v)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
diff --git a/etcdutl/etcdutl/printer_protobuf.go b/etcdutl/etcdutl/printer_protobuf.go
index 0a9003b475d..f640a391edc 100644
--- a/etcdutl/etcdutl/printer_protobuf.go
+++ b/etcdutl/etcdutl/printer_protobuf.go
@@ -33,7 +33,7 @@ func newPBPrinter() printer {
}
}
-func printPB(v interface{}) {
+func printPB(v any) {
m, ok := v.(pbMarshal)
if !ok {
cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("marshal unsupported for type %T (%v)", v, v))
diff --git a/etcdutl/etcdutl/printer_simple.go b/etcdutl/etcdutl/printer_simple.go
index 306ebf0c7f3..b55c501dcf5 100644
--- a/etcdutl/etcdutl/printer_simple.go
+++ b/etcdutl/etcdutl/printer_simple.go
@@ -21,8 +21,7 @@ import (
"go.etcd.io/etcd/etcdutl/v3/snapshot"
)
-type simplePrinter struct {
-}
+type simplePrinter struct{}
func (s *simplePrinter) DBStatus(ds snapshot.Status) {
_, rows := makeDBStatusTable(ds)
@@ -30,3 +29,10 @@ func (s *simplePrinter) DBStatus(ds snapshot.Status) {
fmt.Println(strings.Join(row, ", "))
}
}
+
+func (s *simplePrinter) DBHashKV(ds HashKV) {
+ _, rows := makeDBHashKVTable(ds)
+ for _, row := range rows {
+ fmt.Println(strings.Join(row, ", "))
+ }
+}
diff --git a/etcdutl/etcdutl/printer_table.go b/etcdutl/etcdutl/printer_table.go
index 2f8f81d4e6a..ec66ea38f76 100644
--- a/etcdutl/etcdutl/printer_table.go
+++ b/etcdutl/etcdutl/printer_table.go
@@ -17,9 +17,9 @@ package etcdutl
import (
"os"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
-
"github.com/olekukonko/tablewriter"
+
+ "go.etcd.io/etcd/etcdutl/v3/snapshot"
)
type tablePrinter struct{ printer }
@@ -34,3 +34,14 @@ func (tp *tablePrinter) DBStatus(r snapshot.Status) {
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.Render()
}
+
+func (tp *tablePrinter) DBHashKV(r HashKV) {
+ hdr, rows := makeDBHashKVTable(r)
+ table := tablewriter.NewWriter(os.Stdout)
+ table.SetHeader(hdr)
+ for _, row := range rows {
+ table.Append(row)
+ }
+ table.SetAlignment(tablewriter.ALIGN_RIGHT)
+ table.Render()
+}
diff --git a/etcdutl/etcdutl/snapshot_command.go b/etcdutl/etcdutl/snapshot_command.go
index 94ab2a5ac92..52cb4a1bbdd 100644
--- a/etcdutl/etcdutl/snapshot_command.go
+++ b/etcdutl/etcdutl/snapshot_command.go
@@ -18,11 +18,12 @@ import (
"fmt"
"strings"
+ "github.com/spf13/cobra"
+
"go.etcd.io/etcd/etcdutl/v3/snapshot"
"go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/server/v3/datadir"
-
- "github.com/spf13/cobra"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
)
const (
@@ -34,10 +35,13 @@ var (
restoreCluster string
restoreClusterToken string
restoreDataDir string
- restoreWalDir string
+ restoreWALDir string
restorePeerURLs string
restoreName string
skipHashCheck bool
+ initialMmapSize = backend.InitialMmapSize
+ markCompacted bool
+ revisionBump uint64
)
// NewSnapshotCommand returns the cobra command for "snapshot".
@@ -46,27 +50,11 @@ func NewSnapshotCommand() *cobra.Command {
Use: "snapshot ",
Short: "Manages etcd node snapshots",
}
- cmd.AddCommand(NewSnapshotSaveCommand())
cmd.AddCommand(NewSnapshotRestoreCommand())
cmd.AddCommand(newSnapshotStatusCommand())
return cmd
}
-func NewSnapshotSaveCommand() *cobra.Command {
- return &cobra.Command{
- Use: "save ",
- Short: "Stores an etcd node backend snapshot to a given file",
- Hidden: true,
- DisableFlagsInUseLine: true,
- Run: func(cmd *cobra.Command, args []string) {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs,
- fmt.Errorf("In order to download snapshot use: "+
- "`etcdctl snapshot save ...`"))
- },
- Deprecated: "Use `etcdctl snapshot save` to download snapshot",
- }
-}
-
func newSnapshotStatusCommand() *cobra.Command {
return &cobra.Command{
Use: "status ",
@@ -85,14 +73,18 @@ func NewSnapshotRestoreCommand() *cobra.Command {
Run: snapshotRestoreCommandFunc,
}
cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "Path to the output data directory")
- cmd.Flags().StringVar(&restoreWalDir, "wal-dir", "", "Path to the WAL directory (use --data-dir if none given)")
+ cmd.Flags().StringVar(&restoreWALDir, "wal-dir", "", "Path to the WAL directory (use --data-dir if none given)")
cmd.Flags().StringVar(&restoreCluster, "initial-cluster", initialClusterFromName(defaultName), "Initial cluster configuration for restore bootstrap")
cmd.Flags().StringVar(&restoreClusterToken, "initial-cluster-token", "etcd-cluster", "Initial cluster token for the etcd cluster during restore bootstrap")
cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster")
cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member")
cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)")
+ cmd.Flags().Uint64Var(&initialMmapSize, "initial-memory-map-size", initialMmapSize, "Initial memory map size of the database in bytes. It uses the default value if not defined or defined to 0")
+ cmd.Flags().Uint64Var(&revisionBump, "bump-revision", 0, "How much to increase the latest revision after restore")
+ cmd.Flags().BoolVar(&markCompacted, "mark-compacted", false, "Mark the latest revision after restore as the point of scheduled compaction (required if --bump-revision > 0, disallowed otherwise)")
- cmd.MarkFlagRequired("data-dir")
+ cmd.MarkFlagDirname("data-dir")
+ cmd.MarkFlagDirname("wal-dir")
return cmd
}
@@ -114,31 +106,40 @@ func SnapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
}
func snapshotRestoreCommandFunc(_ *cobra.Command, args []string) {
- SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir,
- restorePeerURLs, restoreName, skipHashCheck, args)
+ SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWALDir,
+ restorePeerURLs, restoreName, skipHashCheck, initialMmapSize, revisionBump, markCompacted, args)
}
func SnapshotRestoreCommandFunc(restoreCluster string,
restoreClusterToken string,
restoreDataDir string,
- restoreWalDir string,
+ restoreWALDir string,
restorePeerURLs string,
restoreName string,
skipHashCheck bool,
- args []string) {
+ initialMmapSize uint64,
+ revisionBump uint64,
+ markCompacted bool,
+ args []string,
+) {
if len(args) != 1 {
err := fmt.Errorf("snapshot restore requires exactly one argument")
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
+ if (revisionBump == 0 && markCompacted) || (revisionBump > 0 && !markCompacted) {
+ err := fmt.Errorf("--mark-compacted required if --revision-bump > 0")
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
+ }
+
dataDir := restoreDataDir
if dataDir == "" {
dataDir = restoreName + ".etcd"
}
- walDir := restoreWalDir
+ walDir := restoreWALDir
if walDir == "" {
- walDir = datadir.ToWalDir(dataDir)
+ walDir = datadir.ToWALDir(dataDir)
}
lg := GetLogger()
@@ -153,6 +154,9 @@ func SnapshotRestoreCommandFunc(restoreCluster string,
InitialCluster: restoreCluster,
InitialClusterToken: restoreClusterToken,
SkipHashCheck: skipHashCheck,
+ InitialMmapSize: initialMmapSize,
+ RevisionBump: revisionBump,
+ MarkCompacted: markCompacted,
}); err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
diff --git a/etcdutl/etcdutl/version_command.go b/etcdutl/etcdutl/version_command.go
index 1cb1a146b4b..a8ecd3e6a4c 100644
--- a/etcdutl/etcdutl/version_command.go
+++ b/etcdutl/etcdutl/version_command.go
@@ -17,9 +17,9 @@ package etcdutl
import (
"fmt"
- "go.etcd.io/etcd/api/v3/version"
-
"github.com/spf13/cobra"
+
+ "go.etcd.io/etcd/api/v3/version"
)
// NewVersionCommand prints out the version of etcd.
diff --git a/etcdutl/go.mod b/etcdutl/go.mod
index 6e61a0e4ca7..f5be9382170 100644
--- a/etcdutl/go.mod
+++ b/etcdutl/go.mod
@@ -1,14 +1,15 @@
module go.etcd.io/etcd/etcdutl/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
replace (
go.etcd.io/etcd/api/v3 => ../api
go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
- go.etcd.io/etcd/client/v2 => ../client/v2
+ go.etcd.io/etcd/client/v2 => ./../client/internal/v2
go.etcd.io/etcd/client/v3 => ../client/v3
go.etcd.io/etcd/pkg/v3 => ../pkg
- go.etcd.io/etcd/raft/v3 => ../raft
go.etcd.io/etcd/server/v3 => ../server
)
@@ -21,15 +22,76 @@ replace (
)
require (
- github.com/dustin/go-humanize v1.0.0
+ github.com/coreos/go-semver v0.3.1
+ github.com/dustin/go-humanize v1.0.1
github.com/olekukonko/tablewriter v0.0.5
- github.com/spf13/cobra v1.1.3
- go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0
- go.etcd.io/etcd/api/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/server/v3 v3.5.0-alpha.0
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
+ github.com/spf13/cobra v1.8.1
+ github.com/stretchr/testify v1.10.0
+ go.etcd.io/bbolt v1.4.0-beta.0
+ go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/server/v3 v3.6.0-alpha.0
+ go.etcd.io/raft/v3 v3.6.0-beta.0
+ go.uber.org/zap v1.27.0
+)
+
+require (
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jonboulle/clockwork v0.4.0 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_golang v1.20.5 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.61.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/soheilhy/cmux v0.1.5 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
+ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
+ go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
+ go.opentelemetry.io/otel v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
+ go.opentelemetry.io/otel/metric v1.33.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.33.0 // indirect
+ go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.4.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ golang.org/x/time v0.8.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/grpc v1.69.2 // indirect
+ google.golang.org/protobuf v1.36.1 // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
+ sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/etcdutl/go.sum b/etcdutl/go.sum
index d44b8a0a84d..aebba98e094 100644
--- a/etcdutl/go.sum
+++ b/etcdutl/go.sum
@@ -1,508 +1,187 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
+github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
+github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
+github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0 h1:FPuyGXkE6qPKJ71PyS0sdXuxUvYGXAXxV0XHpx0qjHE=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM=
+go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec=
+go.etcd.io/raft/v3 v3.6.0-beta.0 h1:MZFQVjCQxPJj5K9oS69Y+atNvYnGNyOQBnroTdw56jQ=
+go.etcd.io/raft/v3 v3.6.0-beta.0/go.mod h1:C2JoekRXfvImSrk5GnqD0aZ3a+cGVRnyem9qqn2DCEw=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
+go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/etcdutl/main_test.go b/etcdutl/main_test.go
deleted file mode 100644
index b54b2ba234f..00000000000
--- a/etcdutl/main_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "log"
- "os"
- "strings"
- "testing"
-)
-
-func SplitTestArgs(args []string) (testArgs, appArgs []string) {
- for i, arg := range os.Args {
- switch {
- case strings.HasPrefix(arg, "-test."):
- testArgs = append(testArgs, arg)
- case i == 0:
- appArgs = append(appArgs, arg)
- testArgs = append(testArgs, arg)
- default:
- appArgs = append(appArgs, arg)
- }
- }
- return
-}
-
-// Empty test to avoid no-tests warning.
-func TestEmpty(t *testing.T) {}
-
-/**
- * The purpose of this "test" is to run etcdctl with code-coverage
- * collection turned on. The technique is documented here:
- *
- * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage
- */
-func TestMain(m *testing.M) {
- // don't launch etcdutl when invoked via go test
- if strings.HasSuffix(os.Args[0], "etcdutl.test") {
- return
- }
-
- testArgs, appArgs := SplitTestArgs(os.Args)
-
- os.Args = appArgs
-
- err := Start()
- if err != nil {
- log.Fatalf("etcdctl failed with: %v", err)
- }
-
- // This will generate coverage files:
- os.Args = testArgs
- m.Run()
-}
diff --git a/etcdutl/snapshot/util.go b/etcdutl/snapshot/util.go
deleted file mode 100644
index 2c1fae21fa1..00000000000
--- a/etcdutl/snapshot/util.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package snapshot
-
-import (
- "encoding/binary"
-)
-
-type revision struct {
- main int64
- sub int64
-}
-
-func bytesToRev(bytes []byte) revision {
- return revision{
- main: int64(binary.BigEndian.Uint64(bytes[0:8])),
- sub: int64(binary.BigEndian.Uint64(bytes[9:])),
- }
-}
diff --git a/etcdutl/snapshot/v3_snapshot.go b/etcdutl/snapshot/v3_snapshot.go
index 9272a8f0b13..3d17dcbae51 100644
--- a/etcdutl/snapshot/v3_snapshot.go
+++ b/etcdutl/snapshot/v3_snapshot.go
@@ -15,6 +15,7 @@
package snapshot
import (
+ "bytes"
"context"
"crypto/sha256"
"encoding/json"
@@ -26,37 +27,40 @@ import (
"reflect"
"strings"
+ "go.uber.org/zap"
+
bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/snapshot"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
"go.etcd.io/etcd/server/v3/verify"
- "go.etcd.io/etcd/server/v3/wal"
- "go.etcd.io/etcd/server/v3/wal/walpb"
- "go.uber.org/zap"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
)
// Manager defines snapshot methods.
type Manager interface {
- // Save fetches snapshot from remote etcd server and saves data
- // to target path. If the context "ctx" is canceled or timed out,
+ // Save fetches snapshot from remote etcd server, saves data
+ // to target path and returns server version. If the context "ctx" is canceled or timed out,
// snapshot save stream will error out (e.g. context.Canceled,
// context.DeadlineExceeded). Make sure to specify only one endpoint
// in client configuration. Snapshot API must be requested to a
// selected node, and saved snapshot is the point-in-time state of
// the selected node.
- Save(ctx context.Context, cfg clientv3.Config, dbPath string) error
+ Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error)
// Status returns the snapshot file information.
Status(dbPath string) (Status, error)
@@ -69,9 +73,6 @@ type Manager interface {
// NewV3 returns a new snapshot Manager for v3.x snapshot.
func NewV3(lg *zap.Logger) Manager {
- if lg == nil {
- lg = zap.NewExample()
- }
return &v3Manager{lg: lg}
}
@@ -84,7 +85,8 @@ type v3Manager struct {
snapDir string
cl *membership.RaftCluster
- skipHashCheck bool
+ skipHashCheck bool
+ initialMmapSize uint64
}
// hasChecksum returns "true" if the file size "n"
@@ -96,8 +98,8 @@ func hasChecksum(n int64) bool {
}
// Save fetches snapshot from remote etcd server and saves data to target path.
-func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) error {
- return snapshot.Save(ctx, s.lg, cfg, dbPath)
+func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error) {
+ return snapshot.SaveWithVersion(ctx, s.lg, cfg, dbPath)
}
// Status is the snapshot file status.
@@ -106,6 +108,9 @@ type Status struct {
Revision int64 `json:"revision"`
TotalKey int `json:"totalKey"`
TotalSize int64 `json:"totalSize"`
+ // Version is equal to storageVersion of the snapshot
+ // Empty if server does not supports versioned snapshots ( 0)
+ MarkCompacted bool
}
// Restore restores a new etcd data directory from given snapshot file.
@@ -247,6 +292,7 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
s.walDir = walDir
s.snapDir = filepath.Join(dataDir, "member", "snap")
s.skipHashCheck = cfg.SkipHashCheck
+ s.initialMmapSize = cfg.InitialMmapSize
s.lg.Info(
"restoring snapshot",
@@ -254,12 +300,19 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
zap.String("wal-dir", s.walDir),
zap.String("data-dir", dataDir),
zap.String("snap-dir", s.snapDir),
- zap.Stack("stack"),
+ zap.Uint64("initial-memory-map-size", s.initialMmapSize),
)
if err = s.saveDB(); err != nil {
return err
}
+
+ if cfg.MarkCompacted && cfg.RevisionBump > 0 {
+ if err = s.modifyLatestRevision(cfg.RevisionBump); err != nil {
+ return err
+ }
+ }
+
hardstate, err := s.saveWALAndSnap()
if err != nil {
return err
@@ -275,6 +328,7 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
zap.String("wal-dir", s.walDir),
zap.String("data-dir", dataDir),
zap.String("snap-dir", s.snapDir),
+ zap.Uint64("initial-memory-map-size", s.initialMmapSize),
)
return verify.VerifyIfEnabled(verify.Config{
@@ -295,17 +349,81 @@ func (s *v3Manager) saveDB() error {
return err
}
- be := backend.NewDefaultBackend(s.outDbPath())
+ be := backend.NewDefaultBackend(s.lg, s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()
- err = membership.TrimMembershipFromBackend(s.lg, be)
+ err = schema.NewMembershipBackend(s.lg, be).TrimMembershipFromBackend()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// modifyLatestRevision can increase the latest revision by the given amount and sets the scheduled compaction
+// to that revision so that the server will consider this revision compacted.
+func (s *v3Manager) modifyLatestRevision(bumpAmount uint64) error {
+ be := backend.NewDefaultBackend(s.lg, s.outDbPath())
+ defer func() {
+ be.ForceCommit()
+ be.Close()
+ }()
+
+ tx := be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+
+ latest, err := s.unsafeGetLatestRevision(tx)
if err != nil {
return err
}
+ latest = s.unsafeBumpBucketsRevision(tx, latest, int64(bumpAmount))
+ s.unsafeMarkRevisionCompacted(tx, latest)
+
return nil
}
+func (s *v3Manager) unsafeBumpBucketsRevision(tx backend.UnsafeWriter, latest mvcc.Revision, amount int64) mvcc.Revision {
+ s.lg.Info(
+ "bumping latest revision",
+ zap.Int64("latest-revision", latest.Main),
+ zap.Int64("bump-amount", amount),
+ zap.Int64("new-latest-revision", latest.Main+amount),
+ )
+
+ latest.Main += amount
+ latest.Sub = 0
+ k := mvcc.NewRevBytes()
+ k = mvcc.RevToBytes(latest, k)
+ tx.UnsafePut(schema.Key, k, []byte{})
+
+ return latest
+}
+
+func (s *v3Manager) unsafeMarkRevisionCompacted(tx backend.UnsafeWriter, latest mvcc.Revision) {
+ s.lg.Info(
+ "marking revision compacted",
+ zap.Int64("revision", latest.Main),
+ )
+
+ mvcc.UnsafeSetScheduledCompact(tx, latest.Main)
+}
+
+func (s *v3Manager) unsafeGetLatestRevision(tx backend.UnsafeReader) (mvcc.Revision, error) {
+ var latest mvcc.Revision
+ err := tx.UnsafeForEach(schema.Key, func(k, _ []byte) (err error) {
+ rev := mvcc.BytesToRev(k)
+
+ if rev.GreaterThan(latest) {
+ latest = rev
+ }
+
+ return nil
+ })
+ return latest, err
+}
+
func (s *v3Manager) copyAndVerifyDB() error {
srcf, ferr := os.Open(s.srcDbPath)
if ferr != nil {
@@ -325,23 +443,18 @@ func (s *v3Manager) copyAndVerifyDB() error {
return err
}
- if err := fileutil.CreateDirAll(s.snapDir); err != nil {
+ if err := fileutil.CreateDirAll(s.lg, s.snapDir); err != nil {
return err
}
outDbPath := s.outDbPath()
- db, dberr := os.OpenFile(outDbPath, os.O_RDWR|os.O_CREATE, 0600)
+ db, dberr := os.OpenFile(outDbPath, os.O_RDWR|os.O_CREATE, 0o600)
if dberr != nil {
return dberr
}
- dbClosed := false
- defer func() {
- if !dbClosed {
- db.Close()
- dbClosed = true
- }
- }()
+ defer db.Close()
+
if _, err := io.Copy(db, srcf); err != nil {
return err
}
@@ -378,7 +491,7 @@ func (s *v3Manager) copyAndVerifyDB() error {
}
// db hash is OK, can now modify DB so it can be part of a new cluster
- db.Close()
+
return nil
}
@@ -386,16 +499,14 @@ func (s *v3Manager) copyAndVerifyDB() error {
//
// TODO: This code ignores learners !!!
func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
- if err := fileutil.CreateDirAll(s.walDir); err != nil {
+ if err := fileutil.CreateDirAll(s.lg, s.walDir); err != nil {
return nil, err
}
- // add members again to persist them to the store we create.
- st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
- s.cl.SetStore(st)
- be := backend.NewDefaultBackend(s.outDbPath())
+ // add members again to persist them to the backend we create.
+ be := backend.NewDefaultBackend(s.lg, s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()
- s.cl.SetBackend(be)
+ s.cl.SetBackend(schema.NewMembershipBackend(s.lg, be))
for _, m := range s.cl.Members() {
s.cl.AddMember(m, true)
}
@@ -452,15 +563,11 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
return nil, err
}
- b, berr := st.Save()
- if berr != nil {
- return nil, berr
- }
confState := raftpb.ConfState{
Voters: nodeIDs,
}
raftSnap := raftpb.Snapshot{
- Data: b,
+ Data: etcdserver.GetMembershipInfoInV2Format(s.lg, s.cl),
Metadata: raftpb.SnapshotMetadata{
Index: commit,
Term: term,
@@ -476,9 +583,9 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
}
func (s *v3Manager) updateCIndex(commit uint64, term uint64) error {
- be := backend.NewDefaultBackend(s.outDbPath())
+ be := backend.NewDefaultBackend(s.lg, s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()
- cindex.UpdateConsistentIndex(be.BatchTx(), commit, term, false)
+ cindex.UpdateConsistentIndexForce(be.BatchTx(), commit, term)
return nil
}
diff --git a/etcdutl/snapshot/v3_snapshot_test.go b/etcdutl/snapshot/v3_snapshot_test.go
new file mode 100644
index 00000000000..c2b3d5202a1
--- /dev/null
+++ b/etcdutl/snapshot/v3_snapshot_test.go
@@ -0,0 +1,160 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snapshot
+
+import (
+ "context"
+ "errors"
+ "path/filepath"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+// TestSnapshotStatus is the happy case.
+// It inserts pre-defined number of keys and asserts the output hash of status command.
+// The expected hash value must not be changed.
+// If it changes, there must be some backwards incompatible change introduced.
+func TestSnapshotStatus(t *testing.T) {
+ dbpath := createDB(t, insertKeys(t, 10, 100))
+
+ status, err := NewV3(zap.NewNop()).Status(dbpath)
+ require.NoError(t, err)
+
+ assert.Equal(t, uint32(0x62132b4d), status.Hash)
+ assert.Equal(t, int64(11), status.Revision)
+}
+
+// TestSnapshotStatusCorruptRevision tests if snapshot status command fails when there is an unexpected revision in "key" bucket.
+func TestSnapshotStatusCorruptRevision(t *testing.T) {
+ dbpath := createDB(t, insertKeys(t, 1, 0))
+
+ db, err := bbolt.Open(dbpath, 0o600, nil)
+ require.NoError(t, err)
+ defer db.Close()
+
+ err = db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket([]byte("key"))
+ if b == nil {
+ return errors.New("key bucket not found")
+ }
+ return b.Put([]byte("0"), []byte{})
+ })
+ require.NoError(t, err)
+ db.Close()
+
+ _, err = NewV3(zap.NewNop()).Status(dbpath)
+ require.ErrorContains(t, err, "invalid revision length")
+}
+
+// TestSnapshotStatusNegativeRevisionMain tests if snapshot status command fails when main revision number is negative.
+func TestSnapshotStatusNegativeRevisionMain(t *testing.T) {
+ dbpath := createDB(t, insertKeys(t, 1, 0))
+
+ db, err := bbolt.Open(dbpath, 0o666, nil)
+ require.NoError(t, err)
+ defer db.Close()
+
+ err = db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(schema.Key.Name())
+ if b == nil {
+ return errors.New("key bucket not found")
+ }
+ bytes := mvcc.NewRevBytes()
+ mvcc.RevToBytes(mvcc.Revision{Main: -1}, bytes)
+ return b.Put(bytes, []byte{})
+ })
+ require.NoError(t, err)
+ db.Close()
+
+ _, err = NewV3(zap.NewNop()).Status(dbpath)
+ require.ErrorContains(t, err, "negative revision")
+}
+
+// TestSnapshotStatusNegativeRevisionSub tests if snapshot status command fails when sub revision number is negative.
+func TestSnapshotStatusNegativeRevisionSub(t *testing.T) {
+ dbpath := createDB(t, insertKeys(t, 1, 0))
+
+ db, err := bbolt.Open(dbpath, 0o666, nil)
+ require.NoError(t, err)
+ defer db.Close()
+
+ err = db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket([]byte("key"))
+ if b == nil {
+ return errors.New("key bucket not found")
+ }
+ bytes := mvcc.NewRevBytes()
+ mvcc.RevToBytes(mvcc.Revision{Sub: -1}, bytes)
+ return b.Put(bytes, []byte{})
+ })
+ require.NoError(t, err)
+ db.Close()
+
+ _, err = NewV3(zap.NewNop()).Status(dbpath)
+ require.ErrorContains(t, err, "negative revision")
+}
+
+// insertKeys insert `numKeys` number of keys of `valueSize` size into a running etcd server.
+func insertKeys(t *testing.T, numKeys, valueSize int) func(*etcdserver.EtcdServer) {
+ t.Helper()
+ return func(srv *etcdserver.EtcdServer) {
+ val := make([]byte, valueSize)
+ for i := 0; i < numKeys; i++ {
+ req := etcdserverpb.PutRequest{
+ Key: []byte(strconv.Itoa(i)),
+ Value: val,
+ }
+ _, err := srv.Put(context.TODO(), &req)
+ require.NoError(t, err)
+ }
+ }
+}
+
+// createDB creates a bbolt database file by running an embedded etcd server.
+// While the server is running, `generateContent` function is called to insert values.
+// It returns the path of bbolt database.
+func createDB(t *testing.T, generateContent func(*etcdserver.EtcdServer)) string {
+ t.Helper()
+
+ cfg := embed.NewConfig()
+ cfg.LogLevel = "fatal"
+ cfg.Dir = t.TempDir()
+
+ etcd, err := embed.StartEtcd(cfg)
+ require.NoError(t, err)
+ defer etcd.Close()
+
+ select {
+ case <-etcd.Server.ReadyNotify():
+ case <-time.After(10 * time.Second):
+ t.FailNow()
+ }
+
+ generateContent(etcd.Server)
+
+ return filepath.Join(cfg.Dir, "member", "snap", "db")
+}
diff --git a/go.mod b/go.mod
index 4e847849da8..07f352223f3 100644
--- a/go.mod
+++ b/go.mod
@@ -1,37 +1,104 @@
module go.etcd.io/etcd/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
replace (
go.etcd.io/etcd/api/v3 => ./api
go.etcd.io/etcd/client/pkg/v3 => ./client/pkg
- go.etcd.io/etcd/client/v2 => ./client/v2
+ go.etcd.io/etcd/client/v2 => ./client/internal/v2
go.etcd.io/etcd/client/v3 => ./client/v3
go.etcd.io/etcd/etcdctl/v3 => ./etcdctl
go.etcd.io/etcd/etcdutl/v3 => ./etcdutl
go.etcd.io/etcd/pkg/v3 => ./pkg
- go.etcd.io/etcd/raft/v3 => ./raft
go.etcd.io/etcd/server/v3 => ./server
go.etcd.io/etcd/tests/v3 => ./tests
)
require (
- github.com/bgentry/speakeasy v0.1.0
- github.com/dustin/go-humanize v1.0.0
- github.com/spf13/cobra v1.1.3
- go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0
- go.etcd.io/etcd/api/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/v2 v2.305.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/etcdctl/v3 v3.0.0-00010101000000-000000000000
- go.etcd.io/etcd/etcdutl/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/server/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
- golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
- google.golang.org/grpc v1.37.0
- gopkg.in/cheggaaa/pb.v1 v1.0.28
+ github.com/bgentry/speakeasy v0.2.0
+ github.com/cheggaaa/pb/v3 v3.1.5
+ github.com/coreos/go-semver v0.3.1
+ github.com/dustin/go-humanize v1.0.1
+ github.com/spf13/cobra v1.8.1
+ github.com/stretchr/testify v1.10.0
+ go.etcd.io/bbolt v1.4.0-beta.0
+ go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/v2 v2.306.0-alpha.0
+ go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/server/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/tests/v3 v3.0.0-00010101000000-000000000000
+ go.etcd.io/raft/v3 v3.6.0-beta.0
+ go.uber.org/zap v1.27.0
+ golang.org/x/time v0.8.0
+ google.golang.org/grpc v1.69.2
+ google.golang.org/protobuf v1.36.1
+)
+
+require (
+ github.com/VividCortex/ewma v1.2.0 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/fatih/color v1.18.0 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
+ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jonboulle/clockwork v0.4.0 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/olekukonko/tablewriter v0.0.5 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_golang v1.20.5 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.61.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/soheilhy/cmux v0.1.5 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
+ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
+ go.etcd.io/gofail v0.2.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
+ go.opentelemetry.io/otel v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
+ go.opentelemetry.io/otel/metric v1.33.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.33.0 // indirect
+ go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.4.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
+ sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/go.sum b/go.sum
index d35fb315f0c..b7475fd319e 100644
--- a/go.sum
+++ b/go.sum
@@ -1,537 +1,268 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
+github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E=
+github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cheggaaa/pb/v3 v3.1.5 h1:QuuUzeM2WsAqG2gMqtzaWithDJv0i+i6UlnwSCI4QLk=
+github.com/cheggaaa/pb/v3 v3.1.5/go.mod h1:CrxkeghYTXi1lQBEI7jSn+3svI3cuc19haAj6jM60XI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
+github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
-github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
+github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
+github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0 h1:FPuyGXkE6qPKJ71PyS0sdXuxUvYGXAXxV0XHpx0qjHE=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM=
+go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec=
+go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA=
+go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o=
+go.etcd.io/raft/v3 v3.6.0-beta.0 h1:MZFQVjCQxPJj5K9oS69Y+atNvYnGNyOQBnroTdw56jQ=
+go.etcd.io/raft/v3 v3.6.0-beta.0/go.mod h1:C2JoekRXfvImSrk5GnqD0aZ3a+cGVRnyem9qqn2DCEw=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
+go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
-gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/hack/kubernetes-deploy/etcd.yml b/hack/kubernetes-deploy/etcd.yml
index 84bf6be95ad..13c24a2b274 100644
--- a/hack/kubernetes-deploy/etcd.yml
+++ b/hack/kubernetes-deploy/etcd.yml
@@ -1,18 +1,17 @@
+---
apiVersion: v1
kind: Service
metadata:
name: etcd-client
spec:
ports:
- - name: etcd-client-port
- port: 2379
- protocol: TCP
- targetPort: 2379
+ - name: etcd-client-port
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
selector:
app: etcd
-
---
-
apiVersion: v1
kind: Pod
metadata:
@@ -22,35 +21,33 @@ metadata:
name: etcd0
spec:
containers:
- - command:
- - /usr/local/bin/etcd
- - --name
- - etcd0
- - --initial-advertise-peer-urls
- - http://etcd0:2380
- - --listen-peer-urls
- - http://0.0.0.0:2380
- - --listen-client-urls
- - http://0.0.0.0:2379
- - --advertise-client-urls
- - http://etcd0:2379
- - --initial-cluster
- - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
- - --initial-cluster-state
- - new
- image: quay.io/coreos/etcd:latest
- name: etcd0
- ports:
- - containerPort: 2379
- name: client
- protocol: TCP
- - containerPort: 2380
- name: server
- protocol: TCP
+ - command:
+ - /usr/local/bin/etcd
+ - --name
+ - etcd0
+ - --initial-advertise-peer-urls
+ - http://etcd0:2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ - --listen-client-urls
+ - http://0.0.0.0:2379
+ - --advertise-client-urls
+ - http://etcd0:2379
+ - --initial-cluster
+ - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
+ - --initial-cluster-state
+ - new
+ image: quay.io/coreos/etcd:latest
+ name: etcd0
+ ports:
+ - containerPort: 2379
+ name: client
+ protocol: TCP
+ - containerPort: 2380
+ name: server
+ protocol: TCP
restartPolicy: Always
-
---
-
apiVersion: v1
kind: Service
metadata:
@@ -59,19 +56,17 @@ metadata:
name: etcd0
spec:
ports:
- - name: client
- port: 2379
- protocol: TCP
- targetPort: 2379
- - name: server
- port: 2380
- protocol: TCP
- targetPort: 2380
+ - name: client
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
+ - name: server
+ port: 2380
+ protocol: TCP
+ targetPort: 2380
selector:
etcd_node: etcd0
-
---
-
apiVersion: v1
kind: Pod
metadata:
@@ -81,35 +76,33 @@ metadata:
name: etcd1
spec:
containers:
- - command:
- - /usr/local/bin/etcd
- - --name
- - etcd1
- - --initial-advertise-peer-urls
- - http://etcd1:2380
- - --listen-peer-urls
- - http://0.0.0.0:2380
- - --listen-client-urls
- - http://0.0.0.0:2379
- - --advertise-client-urls
- - http://etcd1:2379
- - --initial-cluster
- - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
- - --initial-cluster-state
- - new
- image: quay.io/coreos/etcd:latest
- name: etcd1
- ports:
- - containerPort: 2379
- name: client
- protocol: TCP
- - containerPort: 2380
- name: server
- protocol: TCP
+ - command:
+ - /usr/local/bin/etcd
+ - --name
+ - etcd1
+ - --initial-advertise-peer-urls
+ - http://etcd1:2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ - --listen-client-urls
+ - http://0.0.0.0:2379
+ - --advertise-client-urls
+ - http://etcd1:2379
+ - --initial-cluster
+ - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
+ - --initial-cluster-state
+ - new
+ image: quay.io/coreos/etcd:latest
+ name: etcd1
+ ports:
+ - containerPort: 2379
+ name: client
+ protocol: TCP
+ - containerPort: 2380
+ name: server
+ protocol: TCP
restartPolicy: Always
-
---
-
apiVersion: v1
kind: Service
metadata:
@@ -118,19 +111,17 @@ metadata:
name: etcd1
spec:
ports:
- - name: client
- port: 2379
- protocol: TCP
- targetPort: 2379
- - name: server
- port: 2380
- protocol: TCP
- targetPort: 2380
+ - name: client
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
+ - name: server
+ port: 2380
+ protocol: TCP
+ targetPort: 2380
selector:
etcd_node: etcd1
-
---
-
apiVersion: v1
kind: Pod
metadata:
@@ -140,35 +131,33 @@ metadata:
name: etcd2
spec:
containers:
- - command:
- - /usr/local/bin/etcd
- - --name
- - etcd2
- - --initial-advertise-peer-urls
- - http://etcd2:2380
- - --listen-peer-urls
- - http://0.0.0.0:2380
- - --listen-client-urls
- - http://0.0.0.0:2379
- - --advertise-client-urls
- - http://etcd2:2379
- - --initial-cluster
- - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
- - --initial-cluster-state
- - new
- image: quay.io/coreos/etcd:latest
- name: etcd2
- ports:
- - containerPort: 2379
- name: client
- protocol: TCP
- - containerPort: 2380
- name: server
- protocol: TCP
+ - command:
+ - /usr/local/bin/etcd
+ - --name
+ - etcd2
+ - --initial-advertise-peer-urls
+ - http://etcd2:2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ - --listen-client-urls
+ - http://0.0.0.0:2379
+ - --advertise-client-urls
+ - http://etcd2:2379
+ - --initial-cluster
+ - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
+ - --initial-cluster-state
+ - new
+ image: quay.io/coreos/etcd:latest
+ name: etcd2
+ ports:
+ - containerPort: 2379
+ name: client
+ protocol: TCP
+ - containerPort: 2380
+ name: server
+ protocol: TCP
restartPolicy: Always
-
---
-
apiVersion: v1
kind: Service
metadata:
@@ -177,13 +166,13 @@ metadata:
name: etcd2
spec:
ports:
- - name: client
- port: 2379
- protocol: TCP
- targetPort: 2379
- - name: server
- port: 2380
- protocol: TCP
- targetPort: 2380
+ - name: client
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
+ - name: server
+ port: 2380
+ protocol: TCP
+ targetPort: 2380
selector:
etcd_node: etcd2
diff --git a/hack/kubernetes-deploy/vulcand.yml b/hack/kubernetes-deploy/vulcand.yml
index bb61eec461b..0a09a1e56df 100644
--- a/hack/kubernetes-deploy/vulcand.yml
+++ b/hack/kubernetes-deploy/vulcand.yml
@@ -1,3 +1,4 @@
+---
apiVersion: v1
kind: Pod
metadata:
@@ -6,17 +7,17 @@ metadata:
name: vulcand
spec:
containers:
- - command:
- - /go/bin/vulcand
- - -apiInterface=0.0.0.0
- - --etcd=http://etcd-client:2379
- image: mailgun/vulcand:v0.8.0-beta.2
- name: vulcand
- ports:
- - containerPort: 8081
- name: api
- protocol: TCP
- - containerPort: 8082
- name: server
- protocol: TCP
+ - command:
+ - /go/bin/vulcand
+ - -apiInterface=0.0.0.0
+ - --etcd=http://etcd-client:2379
+ image: mailgun/vulcand:v0.8.0-beta.2
+ name: vulcand
+ ports:
+ - containerPort: 8081
+ name: api
+ protocol: TCP
+ - containerPort: 8082
+ name: server
+ protocol: TCP
restartPolicy: Always
diff --git a/logos/etcd-xkcd-2347.png b/logos/etcd-xkcd-2347.png
new file mode 100644
index 00000000000..3873f8b1668
Binary files /dev/null and b/logos/etcd-xkcd-2347.png differ
diff --git a/pkg/adt/interval_tree.go b/pkg/adt/interval_tree.go
index 74a9aeb141e..3c1c3ea8322 100644
--- a/pkg/adt/interval_tree.go
+++ b/pkg/adt/interval_tree.go
@@ -176,7 +176,7 @@ func (x *intervalNode) visit(iv *Interval, sentinel *intervalNode, nv nodeVisito
// IntervalValue represents a range tree node that contains a range and a value.
type IntervalValue struct {
Ivl Interval
- Val interface{}
+ Val any
}
// IntervalTree represents a (mostly) textbook implementation of the
@@ -184,7 +184,7 @@ type IntervalValue struct {
// and chapter 14.3 interval tree with search supporting "stabbing queries".
type IntervalTree interface {
// Insert adds a node with the given interval into the tree.
- Insert(ivl Interval, val interface{})
+ Insert(ivl Interval, val any)
// Delete removes the node with the given interval from the tree, returning
// true if a node is in fact removed.
Delete(ivl Interval) bool
@@ -241,34 +241,34 @@ type intervalTree struct {
//
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324
//
-// 0. RB-DELETE(T, z)
-// 1.
-// 2. y = z
-// 3. y-original-color = y.color
-// 4.
-// 5. if z.left == T.nil
-// 6. x = z.right
-// 7. RB-TRANSPLANT(T, z, z.right)
-// 8. else if z.right == T.nil
-// 9. x = z.left
-// 10. RB-TRANSPLANT(T, z, z.left)
-// 11. else
-// 12. y = TREE-MINIMUM(z.right)
-// 13. y-original-color = y.color
-// 14. x = y.right
-// 15. if y.p == z
-// 16. x.p = y
-// 17. else
-// 18. RB-TRANSPLANT(T, y, y.right)
-// 19. y.right = z.right
-// 20. y.right.p = y
-// 21. RB-TRANSPLANT(T, z, y)
-// 22. y.left = z.left
-// 23. y.left.p = y
-// 24. y.color = z.color
-// 25.
-// 26. if y-original-color == BLACK
-// 27. RB-DELETE-FIXUP(T, x)
+// RB-DELETE(T, z)
+//
+// y = z
+// y-original-color = y.color
+//
+// if z.left == T.nil
+// x = z.right
+// RB-TRANSPLANT(T, z, z.right)
+// else if z.right == T.nil
+// x = z.left
+// RB-TRANSPLANT(T, z, z.left)
+// else
+// y = TREE-MINIMUM(z.right)
+// y-original-color = y.color
+// x = y.right
+// if y.p == z
+// x.p = y
+// else
+// RB-TRANSPLANT(T, y, y.right)
+// y.right = z.right
+// y.right.p = y
+// RB-TRANSPLANT(T, z, y)
+// y.left = z.left
+// y.left.p = y
+// y.color = z.color
+//
+// if y-original-color == BLACK
+// RB-DELETE-FIXUP(T, x)
// Delete removes the node with the given interval from the tree, returning
// true if a node is in fact removed.
@@ -317,48 +317,47 @@ func (ivt *intervalTree) Delete(ivl Interval) bool {
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326
//
-// 0. RB-DELETE-FIXUP(T, z)
-// 1.
-// 2. while x â T.root and x.color == BLACK
-// 3. if x == x.p.left
-// 4. w = x.p.right
-// 5. if w.color == RED
-// 6. w.color = BLACK
-// 7. x.p.color = RED
-// 8. LEFT-ROTATE(T, x, p)
-// 9. if w.left.color == BLACK and w.right.color == BLACK
-// 10. w.color = RED
-// 11. x = x.p
-// 12. else if w.right.color == BLACK
-// 13. w.left.color = BLACK
-// 14. w.color = RED
-// 15. RIGHT-ROTATE(T, w)
-// 16. w = w.p.right
-// 17. w.color = x.p.color
-// 18. x.p.color = BLACK
-// 19. LEFT-ROTATE(T, w.p)
-// 20. x = T.root
-// 21. else
-// 22. w = x.p.left
-// 23. if w.color == RED
-// 24. w.color = BLACK
-// 25. x.p.color = RED
-// 26. RIGHT-ROTATE(T, x, p)
-// 27. if w.right.color == BLACK and w.left.color == BLACK
-// 28. w.color = RED
-// 29. x = x.p
-// 30. else if w.left.color == BLACK
-// 31. w.right.color = BLACK
-// 32. w.color = RED
-// 33. LEFT-ROTATE(T, w)
-// 34. w = w.p.left
-// 35. w.color = x.p.color
-// 36. x.p.color = BLACK
-// 37. RIGHT-ROTATE(T, w.p)
-// 38. x = T.root
-// 39.
-// 40. x.color = BLACK
+// RB-DELETE-FIXUP(T, z)
//
+// while x â T.root and x.color == BLACK
+// if x == x.p.left
+// w = x.p.right
+// if w.color == RED
+// w.color = BLACK
+// x.p.color = RED
+// LEFT-ROTATE(T, x, p)
+// if w.left.color == BLACK and w.right.color == BLACK
+// w.color = RED
+// x = x.p
+// else if w.right.color == BLACK
+// w.left.color = BLACK
+// w.color = RED
+// RIGHT-ROTATE(T, w)
+// w = w.p.right
+// w.color = x.p.color
+// x.p.color = BLACK
+// LEFT-ROTATE(T, w.p)
+// x = T.root
+// else
+// w = x.p.left
+// if w.color == RED
+// w.color = BLACK
+// x.p.color = RED
+// RIGHT-ROTATE(T, x, p)
+// if w.right.color == BLACK and w.left.color == BLACK
+// w.color = RED
+// x = x.p
+// else if w.left.color == BLACK
+// w.right.color = BLACK
+// w.color = RED
+// LEFT-ROTATE(T, w)
+// w = w.p.left
+// w.color = x.p.color
+// x.p.color = BLACK
+// RIGHT-ROTATE(T, w.p)
+// x = T.root
+//
+// x.color = BLACK
func (ivt *intervalTree) deleteFixup(x *intervalNode) {
for x != ivt.root && x.color(ivt.sentinel) == black {
if x == x.parent.left { // line 3-20
@@ -424,7 +423,7 @@ func (ivt *intervalTree) deleteFixup(x *intervalNode) {
}
}
-func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *intervalNode {
+func (ivt *intervalTree) createIntervalNode(ivl Interval, val any) *intervalNode {
return &intervalNode{
iv: IntervalValue{ivl, val},
max: ivl.End,
@@ -439,35 +438,35 @@ func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *inte
//
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315
//
-// 0. RB-INSERT(T, z)
-// 1.
-// 2. y = T.nil
-// 3. x = T.root
-// 4.
-// 5. while x â T.nil
-// 6. y = x
-// 7. if z.key < x.key
-// 8. x = x.left
-// 9. else
-// 10. x = x.right
-// 11.
-// 12. z.p = y
-// 13.
-// 14. if y == T.nil
-// 15. T.root = z
-// 16. else if z.key < y.key
-// 17. y.left = z
-// 18. else
-// 19. y.right = z
-// 20.
-// 21. z.left = T.nil
-// 22. z.right = T.nil
-// 23. z.color = RED
-// 24.
-// 25. RB-INSERT-FIXUP(T, z)
+// RB-INSERT(T, z)
+//
+// y = T.nil
+// x = T.root
+//
+// while x â T.nil
+// y = x
+// if z.key < x.key
+// x = x.left
+// else
+// x = x.right
+//
+// z.p = y
+//
+// if y == T.nil
+// T.root = z
+// else if z.key < y.key
+// y.left = z
+// else
+// y.right = z
+//
+// z.left = T.nil
+// z.right = T.nil
+// z.color = RED
+//
+// RB-INSERT-FIXUP(T, z)
// Insert adds a node with the given interval into the tree.
-func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
+func (ivt *intervalTree) Insert(ivl Interval, val any) {
y := ivt.sentinel
z := ivt.createIntervalNode(ivl, val)
x := ivt.root
@@ -499,42 +498,40 @@ func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316
//
-// 0. RB-INSERT-FIXUP(T, z)
-// 1.
-// 2. while z.p.color == RED
-// 3. if z.p == z.p.p.left
-// 4. y = z.p.p.right
-// 5. if y.color == RED
-// 6. z.p.color = BLACK
-// 7. y.color = BLACK
-// 8. z.p.p.color = RED
-// 9. z = z.p.p
-// 10. else if z == z.p.right
-// 11. z = z.p
-// 12. LEFT-ROTATE(T, z)
-// 13. z.p.color = BLACK
-// 14. z.p.p.color = RED
-// 15. RIGHT-ROTATE(T, z.p.p)
-// 16. else
-// 17. y = z.p.p.left
-// 18. if y.color == RED
-// 19. z.p.color = BLACK
-// 20. y.color = BLACK
-// 21. z.p.p.color = RED
-// 22. z = z.p.p
-// 23. else if z == z.p.right
-// 24. z = z.p
-// 25. RIGHT-ROTATE(T, z)
-// 26. z.p.color = BLACK
-// 27. z.p.p.color = RED
-// 28. LEFT-ROTATE(T, z.p.p)
-// 29.
-// 30. T.root.color = BLACK
+// RB-INSERT-FIXUP(T, z)
//
+// while z.p.color == RED
+// if z.p == z.p.p.left
+// y = z.p.p.right
+// if y.color == RED
+// z.p.color = BLACK
+// y.color = BLACK
+// z.p.p.color = RED
+// z = z.p.p
+// else if z == z.p.right
+// z = z.p
+// LEFT-ROTATE(T, z)
+// z.p.color = BLACK
+// z.p.p.color = RED
+// RIGHT-ROTATE(T, z.p.p)
+// else
+// y = z.p.p.left
+// if y.color == RED
+// z.p.color = BLACK
+// y.color = BLACK
+// z.p.p.color = RED
+// z = z.p.p
+// else if z == z.p.right
+// z = z.p
+// RIGHT-ROTATE(T, z)
+// z.p.color = BLACK
+// z.p.p.color = RED
+// LEFT-ROTATE(T, z.p.p)
+//
+// T.root.color = BLACK
func (ivt *intervalTree) insertFixup(z *intervalNode) {
for z.parent.color(ivt.sentinel) == red {
if z.parent == z.parent.parent.left { // line 3-15
-
y := z.parent.parent.right
if y.color(ivt.sentinel) == red {
y.c = black
@@ -578,26 +575,25 @@ func (ivt *intervalTree) insertFixup(z *intervalNode) {
//
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313
//
-// 0. LEFT-ROTATE(T, x)
-// 1.
-// 2. y = x.right
-// 3. x.right = y.left
-// 4.
-// 5. if y.left â T.nil
-// 6. y.left.p = x
-// 7.
-// 8. y.p = x.p
-// 9.
-// 10. if x.p == T.nil
-// 11. T.root = y
-// 12. else if x == x.p.left
-// 13. x.p.left = y
-// 14. else
-// 15. x.p.right = y
-// 16.
-// 17. y.left = x
-// 18. x.p = y
+// LEFT-ROTATE(T, x)
+//
+// y = x.right
+// x.right = y.left
+//
+// if y.left â T.nil
+// y.left.p = x
+//
+// y.p = x.p
//
+// if x.p == T.nil
+// T.root = y
+// else if x == x.p.left
+// x.p.left = y
+// else
+// x.p.right = y
+//
+// y.left = x
+// x.p = y
func (ivt *intervalTree) rotateLeft(x *intervalNode) {
// rotateLeft x must have right child
if x.right == ivt.sentinel {
@@ -624,26 +620,25 @@ func (ivt *intervalTree) rotateLeft(x *intervalNode) {
// rotateRight moves x so it is right of its left child
//
-// 0. RIGHT-ROTATE(T, x)
-// 1.
-// 2. y = x.left
-// 3. x.left = y.right
-// 4.
-// 5. if y.right â T.nil
-// 6. y.right.p = x
-// 7.
-// 8. y.p = x.p
-// 9.
-// 10. if x.p == T.nil
-// 11. T.root = y
-// 12. else if x == x.p.right
-// 13. x.p.right = y
-// 14. else
-// 15. x.p.left = y
-// 16.
-// 17. y.right = x
-// 18. x.p = y
+// RIGHT-ROTATE(T, x)
+//
+// y = x.left
+// x.left = y.right
+//
+// if y.right â T.nil
+// y.right.p = x
+//
+// y.p = x.p
+//
+// if x.p == T.nil
+// T.root = y
+// else if x == x.p.right
+// x.p.right = y
+// else
+// x.p.left = y
//
+// y.right = x
+// x.p = y
func (ivt *intervalTree) rotateRight(x *intervalNode) {
// rotateRight x must have left child
if x.left == ivt.sentinel {
diff --git a/pkg/adt/interval_tree_test.go b/pkg/adt/interval_tree_test.go
index 608841f92f3..7c9e8c69a30 100644
--- a/pkg/adt/interval_tree_test.go
+++ b/pkg/adt/interval_tree_test.go
@@ -18,7 +18,8 @@ import (
"math/rand"
"reflect"
"testing"
- "time"
+
+ "github.com/stretchr/testify/require"
)
// TestIntervalTreeInsert tests interval tree insertion.
@@ -54,36 +55,35 @@ func TestIntervalTreeInsert(t *testing.T) {
tr := ivt.(*intervalTree)
visits := tr.visitLevel()
- if !reflect.DeepEqual(expected, visits) {
- t.Fatalf("level order expected %v, got %v", expected, visits)
- }
+ require.Truef(t, reflect.DeepEqual(expected, visits), "level order expected %v, got %v", expected, visits)
}
// TestIntervalTreeSelfBalanced ensures range tree is self-balanced after inserting ranges to the tree.
// Use https://www.cs.usfca.edu/~galles/visualization/RedBlack.html for test case creation.
//
// Regular Binary Search Tree
-// [0,1]
-// \
-// [1,2]
-// \
-// [3,4]
-// \
-// [5,6]
-// \
-// [7,8]
-// \
-// [8,9]
+//
+// [0,1]
+// \
+// [1,2]
+// \
+// [3,4]
+// \
+// [5,6]
+// \
+// [7,8]
+// \
+// [8,9]
//
// Self-Balancing Binary Search Tree
-// [1,2]
-// / \
-// [0,1] [5,6]
-// / \
-// [3,4] [7,8]
-// \
-// [8,9]
//
+// [1,2]
+// / \
+// [0,1] [5,6]
+// / \
+// [3,4] [7,8]
+// \
+// [8,9]
func TestIntervalTreeSelfBalanced(t *testing.T) {
ivt := NewIntervalTree()
ivt.Insert(NewInt64Interval(0, 1), 0)
@@ -107,71 +107,65 @@ func TestIntervalTreeSelfBalanced(t *testing.T) {
tr := ivt.(*intervalTree)
visits := tr.visitLevel()
- if !reflect.DeepEqual(expected, visits) {
- t.Fatalf("level order expected %v, got %v", expected, visits)
- }
+ require.Truef(t, reflect.DeepEqual(expected, visits), "level order expected %v, got %v", expected, visits)
- if visits[len(visits)-1].depth != 3 {
- t.Fatalf("expected self-balanced tree with last level 3, but last level got %d", visits[len(visits)-1].depth)
- }
+ require.Equalf(t, 3, visits[len(visits)-1].depth, "expected self-balanced tree with last level 3, but last level got %d", visits[len(visits)-1].depth)
}
// TestIntervalTreeDelete ensures delete operation maintains red-black tree properties.
// Use https://www.cs.usfca.edu/~galles/visualization/RedBlack.html for test case creation.
// See https://github.com/etcd-io/etcd/issues/10877 for more detail.
//
-//
// After insertion:
-// [510,511]
-// / \
-// ---------- -----------------------
-// / \
-// [82,83] [830,831]
-// / \ / \
-// / \ / \
-// [11,12] [383,384](red) [647,648] [899,900](red)
-// / \ / \ / \
-// / \ / \ / \
-// [261,262] [410,411] [514,515](red) [815,816](red) [888,889] [972,973]
-// / \ /
-// / \ /
-// [238,239](red) [292,293](red) [953,954](red)
//
+// [510,511]
+// / \
+// ---------- -----------------------
+// / \
+// [82,83] [830,831]
+// / \ / \
+// / \ / \
+// [11,12] [383,384](red) [647,648] [899,900](red)
+// / \ / \ / \
+// / \ / \ / \
+// [261,262] [410,411] [514,515](red) [815,816](red) [888,889] [972,973]
+// / \ /
+// / \ /
+// [238,239](red) [292,293](red) [953,954](red)
//
// After deleting 514 (no rebalance):
-// [510,511]
-// / \
-// ---------- -----------------------
-// / \
-// [82,83] [830,831]
-// / \ / \
-// / \ / \
-// [11,12] [383,384](red) [647,648] [899,900](red)
-// / \ \ / \
-// / \ \ / \
-// [261,262] [410,411] [815,816](red) [888,889] [972,973]
-// / \ /
-// / \ /
-// [238,239](red) [292,293](red) [953,954](red)
//
+// [510,511]
+// / \
+// ---------- -----------------------
+// / \
+// [82,83] [830,831]
+// / \ / \
+// / \ / \
+// [11,12] [383,384](red) [647,648] [899,900](red)
+// / \ \ / \
+// / \ \ / \
+// [261,262] [410,411] [815,816](red) [888,889] [972,973]
+// / \ /
+// / \ /
+// [238,239](red) [292,293](red) [953,954](red)
//
// After deleting 11 (requires rebalancing):
-// [510,511]
-// / \
-// ---------- --------------------------
-// / \
-// [383,384] [830,831]
-// / \ / \
-// / \ / \
-// [261,262](red) [410,411] [647,648] [899,900](red)
-// / \ \ / \
-// / \ \ / \
-// [82,83] [292,293] [815,816](red) [888,889] [972,973]
-// \ /
-// \ /
-// [238,239](red) [953,954](red)
-//
//
+// [510,511]
+// / \
+// ---------- --------------------------
+// / \
+// [383,384] [830,831]
+// / \ / \
+// / \ / \
+// [261,262](red) [410,411] [647,648] [899,900](red)
+// / \ \ / \
+// / \ \ / \
+// [82,83] [292,293] [815,816](red) [888,889] [972,973]
+// \ /
+// \ /
+// [238,239](red) [953,954](red)
func TestIntervalTreeDelete(t *testing.T) {
ivt := NewIntervalTree()
ivt.Insert(NewInt64Interval(510, 511), 0)
@@ -216,15 +210,11 @@ func TestIntervalTreeDelete(t *testing.T) {
{root: NewInt64Interval(953, 954), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4},
}
visitsBeforeDelete := tr.visitLevel()
- if !reflect.DeepEqual(expectedBeforeDelete, visitsBeforeDelete) {
- t.Fatalf("level order after insertion expected %v, got %v", expectedBeforeDelete, visitsBeforeDelete)
- }
+ require.Truef(t, reflect.DeepEqual(expectedBeforeDelete, visitsBeforeDelete), "level order after insertion expected %v, got %v", expectedBeforeDelete, visitsBeforeDelete)
// delete the node "514"
range514 := NewInt64Interval(514, 515)
- if deleted := tr.Delete(NewInt64Interval(514, 515)); !deleted {
- t.Fatalf("range %v not deleted", range514)
- }
+ require.Truef(t, tr.Delete(NewInt64Interval(514, 515)), "range %v not deleted", range514)
expectedAfterDelete514 := []visitedInterval{
{root: NewInt64Interval(510, 511), color: black, left: NewInt64Interval(82, 83), right: NewInt64Interval(830, 831), depth: 0},
@@ -248,15 +238,11 @@ func TestIntervalTreeDelete(t *testing.T) {
{root: NewInt64Interval(953, 954), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4},
}
visitsAfterDelete514 := tr.visitLevel()
- if !reflect.DeepEqual(expectedAfterDelete514, visitsAfterDelete514) {
- t.Fatalf("level order after deleting '514' expected %v, got %v", expectedAfterDelete514, visitsAfterDelete514)
- }
+ require.Truef(t, reflect.DeepEqual(expectedAfterDelete514, visitsAfterDelete514), "level order after deleting '514' expected %v, got %v", expectedAfterDelete514, visitsAfterDelete514)
// delete the node "11"
range11 := NewInt64Interval(11, 12)
- if deleted := tr.Delete(NewInt64Interval(11, 12)); !deleted {
- t.Fatalf("range %v not deleted", range11)
- }
+ require.Truef(t, tr.Delete(NewInt64Interval(11, 12)), "range %v not deleted", range11)
expectedAfterDelete11 := []visitedInterval{
{root: NewInt64Interval(510, 511), color: black, left: NewInt64Interval(383, 384), right: NewInt64Interval(830, 831), depth: 0},
@@ -279,9 +265,7 @@ func TestIntervalTreeDelete(t *testing.T) {
{root: NewInt64Interval(953, 954), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4},
}
visitsAfterDelete11 := tr.visitLevel()
- if !reflect.DeepEqual(expectedAfterDelete11, visitsAfterDelete11) {
- t.Fatalf("level order after deleting '11' expected %v, got %v", expectedAfterDelete11, visitsAfterDelete11)
- }
+ require.Truef(t, reflect.DeepEqual(expectedAfterDelete11, visitsAfterDelete11), "level order after deleting '11' expected %v, got %v", expectedAfterDelete11, visitsAfterDelete11)
}
func TestIntervalTreeIntersects(t *testing.T) {
@@ -325,9 +309,7 @@ func TestIntervalTreeStab(t *testing.T) {
ivt.Insert(NewStringInterval("0", "3"), 0)
tr := ivt.(*intervalTree)
- if tr.root.max.Compare(StringComparable("8")) != 0 {
- t.Fatalf("wrong root max got %v, expected 8", tr.root.max)
- }
+ require.Equalf(t, 0, tr.root.max.Compare(StringComparable("8")), "wrong root max got %v, expected 8", tr.root.max)
if x := len(ivt.Stab(NewStringPoint("0"))); x != 3 {
t.Errorf("got %d, expected 3", x)
}
@@ -361,7 +343,6 @@ func TestIntervalTreeRandom(t *testing.T) {
ivs := make(map[xy]struct{})
ivt := NewIntervalTree()
maxv := 128
- rand.Seed(time.Now().UnixNano())
for i := rand.Intn(maxv) + 1; i != 0; i-- {
x, y := int64(rand.Intn(maxv)), int64(rand.Intn(maxv))
@@ -384,12 +365,8 @@ func TestIntervalTreeRandom(t *testing.T) {
for ab := range ivs {
for xy := range ivs {
v := xy.x + int64(rand.Intn(int(xy.y-xy.x)))
- if slen := len(ivt.Stab(NewInt64Point(v))); slen == 0 {
- t.Fatalf("expected %v stab non-zero for [%+v)", v, xy)
- }
- if !ivt.Intersects(NewInt64Point(v)) {
- t.Fatalf("did not get %d as expected for [%+v)", v, xy)
- }
+ require.NotEmptyf(t, ivt.Stab(NewInt64Point(v)), "expected %v stab non-zero for [%+v)", v, xy)
+ require.Truef(t, ivt.Intersects(NewInt64Point(v)), "did not get %d as expected for [%+v)", v, xy)
}
if !ivt.Delete(NewInt64Interval(ab.x, ab.y)) {
t.Errorf("did not delete %v as expected", ab)
diff --git a/pkg/cobrautl/help.go b/pkg/cobrautl/help.go
index 44cdc9aa886..574578199e7 100644
--- a/pkg/cobrautl/help.go
+++ b/pkg/cobrautl/help.go
@@ -17,7 +17,6 @@
package cobrautl
import (
- "bytes"
"fmt"
"io"
"os"
@@ -44,6 +43,10 @@ var (
}
return strings.Join(parts, " ")
},
+ "indent": func(s string) string {
+ pad := strings.Repeat(" ", 2)
+ return pad + strings.Replace(s, "\n", "\n"+pad, -1)
+ },
}
)
@@ -52,39 +55,43 @@ func init() {
{{ $cmd := .Cmd }}\
{{ $cmdname := cmdName .Cmd .Cmd.Root }}\
NAME:
-{{ if not .Cmd.HasParent }}\
-{{printf "\t%s - %s" .Cmd.Name .Cmd.Short}}
+{{if not .Cmd.HasParent}}\
+{{printf "%s - %s" .Cmd.Name .Cmd.Short | indent}}
{{else}}\
-{{printf "\t%s - %s" $cmdname .Cmd.Short}}
+{{printf "%s - %s" $cmdname .Cmd.Short | indent}}
{{end}}\
USAGE:
-{{printf "\t%s" .Cmd.UseLine}}
+{{printf "%s" .Cmd.UseLine | indent}}
{{ if not .Cmd.HasParent }}\
VERSION:
-{{printf "\t%s" .Version}}
+{{printf "%s" .Version | indent}}
{{end}}\
{{if .Cmd.HasSubCommands}}\
API VERSION:
-{{printf "\t%s" .APIVersion}}
+{{.APIVersion | indent}}
{{end}}\
-{{if .Cmd.HasSubCommands}}\
+{{if .Cmd.HasExample}}\
+Examples:
+{{.Cmd.Example}}
+{{end}}\
+{{if .Cmd.HasSubCommands}}\
COMMANDS:
{{range .SubCommands}}\
{{ $cmdname := cmdName . $cmd }}\
{{ if .Runnable }}\
-{{printf "\t%s\t%s" $cmdname .Short}}
+{{printf "%s\t%s" $cmdname .Short | indent}}
{{end}}\
{{end}}\
{{end}}\
{{ if .Cmd.Long }}\
DESCRIPTION:
-{{range $line := descToLines .Cmd.Long}}{{printf "\t%s" $line}}
+{{range $line := descToLines .Cmd.Long}}{{printf "%s" $line | indent}}
{{end}}\
{{end}}\
{{if .Cmd.HasLocalFlags}}\
@@ -99,11 +106,11 @@ GLOBAL OPTIONS:
{{end}}
`[1:]
- commandUsageTemplate = template.Must(template.New("command_usage").Funcs(templFuncs).Parse(strings.Replace(commandUsage, "\\\n", "", -1)))
+ commandUsageTemplate = template.Must(template.New("command_usage").Funcs(templFuncs).Parse(strings.ReplaceAll(commandUsage, "\\\n", "")))
}
func etcdFlagUsages(flagSet *pflag.FlagSet) string {
- x := new(bytes.Buffer)
+ x := new(strings.Builder)
flagSet.VisitAll(func(flag *pflag.Flag) {
if len(flag.Deprecated) > 0 {
diff --git a/pkg/contention/contention.go b/pkg/contention/contention.go
index 26ce9a2f347..d883eb3de9a 100644
--- a/pkg/contention/contention.go
+++ b/pkg/contention/contention.go
@@ -27,8 +27,7 @@ import (
type TimeoutDetector struct {
mu sync.Mutex // protects all
maxDuration time.Duration
- // map from event to time
- // time is the last seen time of the event.
+ // map from event to last seen time of event.
records map[uint64]time.Time
}
@@ -40,7 +39,7 @@ func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector {
}
}
-// Reset resets the NewTimeoutDetector.
+// Reset resets the TimeoutDetector.
func (td *TimeoutDetector) Reset() {
td.mu.Lock()
defer td.mu.Unlock()
@@ -48,9 +47,11 @@ func (td *TimeoutDetector) Reset() {
td.records = make(map[uint64]time.Time)
}
-// Observe observes an event for given id. It returns false and exceeded duration
-// if the interval is longer than the expectation.
-func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) {
+// Observe observes an event of given id. It computes
+// the time elapsed between successive events of given id.
+// It returns whether this time elapsed exceeds the expectation,
+// and the amount by which it exceeds the expectation.
+func (td *TimeoutDetector) Observe(id uint64) (bool, time.Duration) {
td.mu.Lock()
defer td.mu.Unlock()
@@ -58,12 +59,12 @@ func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) {
now := time.Now()
exceed := time.Duration(0)
- if pt, found := td.records[which]; found {
+ if pt, found := td.records[id]; found {
exceed = now.Sub(pt) - td.maxDuration
if exceed > 0 {
ok = false
}
}
- td.records[which] = now
+ td.records[id] = now
return ok, exceed
}
diff --git a/pkg/cpuutil/endian.go b/pkg/cpuutil/endian.go
index 06c06cd4a5f..d654b747664 100644
--- a/pkg/cpuutil/endian.go
+++ b/pkg/cpuutil/endian.go
@@ -19,7 +19,7 @@ import (
"unsafe"
)
-const intWidth int = int(unsafe.Sizeof(0))
+const intWidth = int(unsafe.Sizeof(0))
var byteOrder binary.ByteOrder
@@ -27,7 +27,7 @@ var byteOrder binary.ByteOrder
func ByteOrder() binary.ByteOrder { return byteOrder }
func init() {
- i := int(0x1)
+ i := 0x1
if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
byteOrder = binary.BigEndian
} else {
diff --git a/pkg/crc/crc_test.go b/pkg/crc/crc_test.go
index 45759640904..3c9cc3a280c 100644
--- a/pkg/crc/crc_test.go
+++ b/pkg/crc/crc_test.go
@@ -8,15 +8,16 @@ import (
"hash/crc32"
"reflect"
"testing"
+
+ "github.com/stretchr/testify/require"
)
// TestHash32 tests that Hash32 provided by this package can take an initial
// crc and behaves exactly the same as the standard one in the following calls.
func TestHash32(t *testing.T) {
stdhash := crc32.New(crc32.IEEETable)
- if _, err := stdhash.Write([]byte("test data")); err != nil {
- t.Fatalf("unexpected write error: %v", err)
- }
+ _, err := stdhash.Write([]byte("test data"))
+ require.NoErrorf(t, err, "unexpected write error: %v", err)
// create a new hash with stdhash.Sum32() as initial crc
hash := New(stdhash.Sum32(), crc32.IEEETable)
@@ -38,12 +39,10 @@ func TestHash32(t *testing.T) {
}
// write something
- if _, err := stdhash.Write([]byte("test data")); err != nil {
- t.Fatalf("unexpected write error: %v", err)
- }
- if _, err := hash.Write([]byte("test data")); err != nil {
- t.Fatalf("unexpected write error: %v", err)
- }
+ _, err = stdhash.Write([]byte("test data"))
+ require.NoErrorf(t, err, "unexpected write error: %v", err)
+ _, err = hash.Write([]byte("test data"))
+ require.NoErrorf(t, err, "unexpected write error: %v", err)
wsum32 = stdhash.Sum32()
if g := hash.Sum32(); g != wsum32 {
t.Errorf("Sum32 after write = %d, want %d", g, wsum32)
diff --git a/pkg/expect/expect.go b/pkg/expect/expect.go
index 12f95f98d9f..8aab16e52a4 100644
--- a/pkg/expect/expect.go
+++ b/pkg/expect/expect.go
@@ -13,121 +13,243 @@
// limitations under the License.
// Package expect implements a small expect-style interface
-// TODO(ptab): Consider migration to https://github.com/google/goexpect.
package expect
import (
"bufio"
+ "context"
+ "errors"
"fmt"
"io"
"os"
"os/exec"
+ "regexp"
"strings"
"sync"
"syscall"
+ "time"
"github.com/creack/pty"
)
-const DEBUG_LINES_TAIL = 40
+const debugLinesTail = 40
+
+var ErrProcessRunning = fmt.Errorf("process is still running")
+
+type ExpectedResponse struct {
+ Value string
+ IsRegularExpr bool
+}
type ExpectProcess struct {
+ cfg expectConfig
+
cmd *exec.Cmd
fpty *os.File
wg sync.WaitGroup
- cond *sync.Cond // for broadcasting updates are available
- mu sync.Mutex // protects lines and err
- lines []string
- count int // increment whenever new line gets added
- err error
+ readCloseCh chan struct{} // close it if async read goroutine exits
- // StopSignal is the signal Stop sends to the process; defaults to SIGKILL.
- StopSignal os.Signal
+ mu sync.Mutex // protects lines, count, cur, exitErr and exitCode
+ lines []string
+ count int // increment whenever new line gets added
+ cur int // current read position
+ exitErr error // process exit error
+ exitCode int
}
// NewExpect creates a new process for expect testing.
func NewExpect(name string, arg ...string) (ep *ExpectProcess, err error) {
- // if env[] is nil, use current system env
- return NewExpectWithEnv(name, arg, nil)
+ // if env[] is nil, use current system env and the default command as name
+ return NewExpectWithEnv(name, arg, nil, name)
}
// NewExpectWithEnv creates a new process with user defined env variables for expect testing.
-func NewExpectWithEnv(name string, args []string, env []string) (ep *ExpectProcess, err error) {
- cmd := exec.Command(name, args...)
- cmd.Env = env
+func NewExpectWithEnv(name string, args []string, env []string, serverProcessConfigName string) (ep *ExpectProcess, err error) {
ep = &ExpectProcess{
- cmd: cmd,
- StopSignal: syscall.SIGKILL,
+ cfg: expectConfig{
+ name: serverProcessConfigName,
+ cmd: name,
+ args: args,
+ env: env,
+ },
+ readCloseCh: make(chan struct{}),
}
- ep.cond = sync.NewCond(&ep.mu)
- ep.cmd.Stderr = ep.cmd.Stdout
- ep.cmd.Stdin = nil
+ ep.cmd = commandFromConfig(ep.cfg)
if ep.fpty, err = pty.Start(ep.cmd); err != nil {
return nil, err
}
- ep.wg.Add(1)
+ ep.wg.Add(2)
go ep.read()
+ go ep.waitSaveExitErr()
return ep, nil
}
+type expectConfig struct {
+ name string
+ cmd string
+ args []string
+ env []string
+}
+
+func commandFromConfig(config expectConfig) *exec.Cmd {
+ cmd := exec.Command(config.cmd, config.args...)
+ cmd.Env = config.env
+ cmd.Stderr = cmd.Stdout
+ cmd.Stdin = nil
+ return cmd
+}
+
+func (ep *ExpectProcess) Pid() int {
+ return ep.cmd.Process.Pid
+}
+
func (ep *ExpectProcess) read() {
- defer ep.wg.Done()
- printDebugLines := os.Getenv("EXPECT_DEBUG") != ""
+ defer func() {
+ ep.wg.Done()
+ close(ep.readCloseCh)
+ }()
+ defer func(fpty *os.File) {
+ err := fpty.Close()
+ if err != nil {
+ // we deliberately only log the error here, closing the PTY should mostly be (expected) broken pipes
+ fmt.Printf("error while closing fpty: %v", err)
+ }
+ }(ep.fpty)
+
r := bufio.NewReader(ep.fpty)
- for ep.err == nil {
- l, rerr := r.ReadString('\n')
- ep.mu.Lock()
- ep.err = rerr
- if l != "" {
- if printDebugLines {
- fmt.Printf("%s-%d: %s", ep.cmd.Path, ep.cmd.Process.Pid, l)
- }
- ep.lines = append(ep.lines, l)
- ep.count++
- if len(ep.lines) == 1 {
- ep.cond.Signal()
- }
+ for {
+ err := ep.tryReadNextLine(r)
+ if err != nil {
+ break
}
- ep.mu.Unlock()
}
- ep.cond.Signal()
}
-// ExpectFunc returns the first line satisfying the function f.
-func (ep *ExpectProcess) ExpectFunc(f func(string) bool) (string, error) {
- lastLinesBuffer := make([]string, 0)
+func (ep *ExpectProcess) tryReadNextLine(r *bufio.Reader) error {
+ printDebugLines := os.Getenv("EXPECT_DEBUG") != ""
+ l, err := r.ReadString('\n')
+
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+
+ if l != "" {
+ if printDebugLines {
+ fmt.Printf("%s (%s) (%d): %s", ep.cmd.Path, ep.cfg.name, ep.cmd.Process.Pid, l)
+ }
+ ep.lines = append(ep.lines, l)
+ ep.count++
+ }
+
+ // we're checking the error here at the bottom to ensure any leftover reads are still taken into account
+ return err
+}
+
+func (ep *ExpectProcess) waitSaveExitErr() {
+ defer ep.wg.Done()
+ err := ep.waitProcess()
ep.mu.Lock()
+ defer ep.mu.Unlock()
+ if err != nil {
+ ep.exitErr = err
+ }
+}
+
+// ExpectFunc returns the first line satisfying the function f.
+func (ep *ExpectProcess) ExpectFunc(ctx context.Context, f func(string) bool) (string, error) {
+ i := 0
for {
- for len(ep.lines) == 0 && ep.err == nil {
- ep.cond.Wait()
+ line, errsFound := func() (string, bool) {
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+
+ // check if this expect has been already closed
+ if ep.cmd == nil {
+ return "", true
+ }
+
+ for i < len(ep.lines) {
+ line := ep.lines[i]
+ i++
+ if f(line) {
+ return line, false
+ }
+ }
+ return "", ep.exitErr != nil
+ }()
+
+ if line != "" {
+ return line, nil
}
- if len(ep.lines) == 0 {
+
+ if errsFound {
break
}
- l := ep.lines[0]
- ep.lines = ep.lines[1:]
- lastLinesBuffer = append(lastLinesBuffer, l)
- if l := len(lastLinesBuffer); l > DEBUG_LINES_TAIL {
- lastLinesBuffer = lastLinesBuffer[l-DEBUG_LINES_TAIL : l-1]
+
+ select {
+ case <-ctx.Done():
+ return "", fmt.Errorf("context done before matching log found: %w", ctx.Err())
+ case <-time.After(time.Millisecond * 10):
+ // continue loop
}
- if f(l) {
- ep.mu.Unlock()
- return l, nil
+ }
+
+ select {
+ // NOTE: we wait readCloseCh for ep.read() to complete draining the log before acquring the lock.
+ case <-ep.readCloseCh:
+ case <-ctx.Done():
+ return "", fmt.Errorf("context done before to found matching log")
+ }
+
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+
+ // retry it since we get all the log data
+ for i < len(ep.lines) {
+ line := ep.lines[i]
+ i++
+ if f(line) {
+ return line, nil
}
}
- ep.mu.Unlock()
- return "", fmt.Errorf("match not found."+
- " Set EXPECT_DEBUG for more info Err: %v, last lines:\n%s",
- ep.err, strings.Join(lastLinesBuffer, ""))
+
+ lastLinesIndex := len(ep.lines) - debugLinesTail
+ if lastLinesIndex < 0 {
+ lastLinesIndex = 0
+ }
+ lastLines := strings.Join(ep.lines[lastLinesIndex:], "")
+ return "", fmt.Errorf("match not found. "+
+ " Set EXPECT_DEBUG for more info Errs: [%v], last lines:\n%s",
+ ep.exitErr, lastLines)
+}
+
+// ExpectWithContext returns the first line containing the given string.
+func (ep *ExpectProcess) ExpectWithContext(ctx context.Context, s ExpectedResponse) (string, error) {
+ var (
+ expr *regexp.Regexp
+ err error
+ )
+ if s.IsRegularExpr {
+ expr, err = regexp.Compile(s.Value)
+ if err != nil {
+ return "", err
+ }
+ }
+ return ep.ExpectFunc(ctx, func(txt string) bool {
+ if expr != nil {
+ return expr.MatchString(txt)
+ }
+ return strings.Contains(txt, s.Value)
+ })
}
// Expect returns the first line containing the given string.
+// Deprecated: please use ExpectWithContext instead.
func (ep *ExpectProcess) Expect(s string) (string, error) {
- return ep.ExpectFunc(func(txt string) bool { return strings.Contains(txt, s) })
+ return ep.ExpectWithContext(context.Background(), ExpectedResponse{Value: s})
}
// LineCount returns the number of recorded lines since
@@ -138,42 +260,111 @@ func (ep *ExpectProcess) LineCount() int {
return ep.count
}
-// Stop kills the expect process and waits for it to exit.
-func (ep *ExpectProcess) Stop() error { return ep.close(true) }
+// ExitCode returns the exit code of this process.
+// If the process is still running, it returns exit code 0 and ErrProcessRunning.
+func (ep *ExpectProcess) ExitCode() (int, error) {
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+
+ if ep.cmd == nil {
+ return ep.exitCode, nil
+ }
+
+ if ep.exitErr != nil {
+ // If the child process panics or is killed, for instance, the
+ // goFailpoint triggers the exit event, the ep.cmd isn't nil and
+ // the exitCode will describe the case.
+ if ep.exitCode != 0 {
+ return ep.exitCode, nil
+ }
+
+ // If the wait4(2) in waitProcess returns error, the child
+ // process might be reaped if the process handles the SIGCHILD
+ // in other goroutine. It's unlikely in this repo. But we
+ // should return the error for log even if the child process
+ // is still running.
+ return 0, ep.exitErr
+ }
+
+ return 0, ErrProcessRunning
+}
+
+// ExitError returns the exit error of this process (if any).
+// If the process is still running, it returns ErrProcessRunning instead.
+func (ep *ExpectProcess) ExitError() error {
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+
+ if ep.cmd == nil {
+ return ep.exitErr
+ }
+
+ return ErrProcessRunning
+}
+
+// Stop signals the process to terminate via SIGTERM
+func (ep *ExpectProcess) Stop() error {
+ err := ep.Signal(syscall.SIGTERM)
+ if err != nil && errors.Is(err, os.ErrProcessDone) {
+ return nil
+ }
+ return err
+}
// Signal sends a signal to the expect process
func (ep *ExpectProcess) Signal(sig os.Signal) error {
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+
+ if ep.cmd == nil {
+ return errors.New("expect process already closed")
+ }
+
return ep.cmd.Process.Signal(sig)
}
-// Close waits for the expect process to exit.
-// Close currently does not return error if process exited with !=0 status.
-// TODO: Close should expose underlying proces failure by default.
-func (ep *ExpectProcess) Close() error { return ep.close(false) }
+func (ep *ExpectProcess) waitProcess() error {
+ state, err := ep.cmd.Process.Wait()
+ if err != nil {
+ return err
+ }
-func (ep *ExpectProcess) close(kill bool) error {
- if ep.cmd == nil {
- return ep.err
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+ ep.exitCode = exitCode(state)
+
+ if !state.Success() {
+ return fmt.Errorf("unexpected exit code [%d] after running [%s]", ep.exitCode, ep.cmd.String())
}
- if kill {
- ep.Signal(ep.StopSignal)
+
+ return nil
+}
+
+// exitCode returns correct exit code for a process based on signaled or exited.
+func exitCode(state *os.ProcessState) int {
+ status := state.Sys().(syscall.WaitStatus)
+
+ if status.Signaled() {
+ return 128 + int(status.Signal())
}
+ return status.ExitStatus()
+}
- err := ep.cmd.Wait()
- ep.fpty.Close()
+// Wait waits for the process to finish.
+func (ep *ExpectProcess) Wait() {
ep.wg.Wait()
+}
- if err != nil {
- if !kill && strings.Contains(err.Error(), "exit status") {
- // non-zero exit code
- err = nil
- } else if kill && strings.Contains(err.Error(), "signal:") {
- err = nil
- }
- }
+// Close waits for the expect process to exit and return its error.
+func (ep *ExpectProcess) Close() error {
+ ep.wg.Wait()
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+
+ // this signals to other funcs that the process has finished
ep.cmd = nil
- return err
+ return ep.exitErr
}
func (ep *ExpectProcess) Send(command string) error {
@@ -181,11 +372,20 @@ func (ep *ExpectProcess) Send(command string) error {
return err
}
-func (ep *ExpectProcess) ProcessError() error {
- if strings.Contains(ep.err.Error(), "input/output error") {
- // TODO: The expect library should not return
- // `/dev/ptmx: input/output error` when process just exits.
- return nil
+func (ep *ExpectProcess) Lines() []string {
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+ return ep.lines
+}
+
+// ReadLine returns line by line.
+func (ep *ExpectProcess) ReadLine() string {
+ ep.mu.Lock()
+ defer ep.mu.Unlock()
+ if ep.count > ep.cur {
+ line := ep.lines[ep.cur]
+ ep.cur++
+ return line
}
- return ep.err
+ return ""
}
diff --git a/pkg/expect/expect_test.go b/pkg/expect/expect_test.go
index c5ed18ec60b..3d621b9aacb 100644
--- a/pkg/expect/expect_test.go
+++ b/pkg/expect/expect_test.go
@@ -17,100 +17,131 @@
package expect
import (
+ "context"
"os"
+ "strings"
"testing"
"time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestExpectFunc(t *testing.T) {
- ep, err := NewExpect("/bin/echo", "hello world")
- if err != nil {
- t.Fatal(err)
- }
+ ep, err := NewExpect("echo", "hello world")
+ require.NoError(t, err)
wstr := "hello world\r\n"
- l, eerr := ep.ExpectFunc(func(a string) bool { return len(a) > 10 })
- if eerr != nil {
- t.Fatal(eerr)
- }
- if l != wstr {
- t.Fatalf(`got "%v", expected "%v"`, l, wstr)
- }
- if cerr := ep.Close(); cerr != nil {
- t.Fatal(cerr)
- }
+ l, eerr := ep.ExpectFunc(context.Background(), func(a string) bool { return len(a) > 10 })
+ require.NoError(t, eerr)
+ require.Equalf(t, l, wstr, `got "%v", expected "%v"`, l, wstr)
+ require.NoError(t, ep.Close())
+}
+
+func TestExpectFuncTimeout(t *testing.T) {
+ ep, err := NewExpect("tail", "-f", "/dev/null")
+ require.NoError(t, err)
+ go func() {
+ // It's enough to have "talkative" process to stuck in the infinite loop of reading
+ for {
+ if serr := ep.Send("new line\n"); serr != nil {
+ return
+ }
+ }
+ }()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cancel()
+
+ _, err = ep.ExpectFunc(ctx, func(a string) bool { return false })
+
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+
+ require.NoError(t, ep.Stop())
+ require.ErrorContains(t, ep.Close(), "unexpected exit code [143]")
+ require.Equal(t, 143, ep.exitCode)
+}
+
+func TestExpectFuncExitFailure(t *testing.T) {
+ // tail -x should not exist and return a non-zero exit code
+ ep, err := NewExpect("tail", "-x")
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cancel()
+
+ _, err = ep.ExpectFunc(ctx, func(s string) bool {
+ return strings.Contains(s, "something entirely unexpected")
+ })
+ require.ErrorContains(t, err, "unexpected exit code [1]")
+ require.Equal(t, 1, ep.exitCode)
+}
+
+func TestExpectFuncExitFailureStop(t *testing.T) {
+ // tail -x should not exist and return a non-zero exit code
+ ep, err := NewExpect("tail", "-x")
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cancel()
+
+ _, err = ep.ExpectFunc(ctx, func(s string) bool {
+ return strings.Contains(s, "something entirely unexpected")
+ })
+ require.ErrorContains(t, err, "unexpected exit code [1]")
+ exitCode, err := ep.ExitCode()
+ require.Equal(t, 1, exitCode)
+ require.NoError(t, err)
+ require.NoError(t, ep.Stop())
+ require.ErrorContains(t, ep.Close(), "unexpected exit code [1]")
+ exitCode, err = ep.ExitCode()
+ require.Equal(t, 1, exitCode)
+ require.NoError(t, err)
}
func TestEcho(t *testing.T) {
- ep, err := NewExpect("/bin/echo", "hello world")
- if err != nil {
- t.Fatal(err)
- }
- l, eerr := ep.Expect("world")
- if eerr != nil {
- t.Fatal(eerr)
- }
+ ep, err := NewExpect("echo", "hello world")
+ require.NoError(t, err)
+ ctx := context.Background()
+ l, eerr := ep.ExpectWithContext(ctx, ExpectedResponse{Value: "world"})
+ require.NoError(t, eerr)
wstr := "hello world"
- if l[:len(wstr)] != wstr {
- t.Fatalf(`got "%v", expected "%v"`, l, wstr)
- }
- if cerr := ep.Close(); cerr != nil {
- t.Fatal(cerr)
- }
- if _, eerr = ep.Expect("..."); eerr == nil {
- t.Fatalf("expected error on closed expect process")
- }
+ require.Equalf(t, l[:len(wstr)], wstr, `got "%v", expected "%v"`, l, wstr)
+ require.NoError(t, ep.Close())
+ _, eerr = ep.ExpectWithContext(ctx, ExpectedResponse{Value: "..."})
+ require.Errorf(t, eerr, "expected error on closed expect process")
}
func TestLineCount(t *testing.T) {
- ep, err := NewExpect("/usr/bin/printf", "1\n2\n3")
- if err != nil {
- t.Fatal(err)
- }
+ ep, err := NewExpect("printf", "1\n2\n3")
+ require.NoError(t, err)
wstr := "3"
- l, eerr := ep.Expect(wstr)
- if eerr != nil {
- t.Fatal(eerr)
- }
- if l != wstr {
- t.Fatalf(`got "%v", expected "%v"`, l, wstr)
- }
- if ep.LineCount() != 3 {
- t.Fatalf("got %d, expected 3", ep.LineCount())
- }
- if cerr := ep.Close(); cerr != nil {
- t.Fatal(cerr)
- }
+ l, eerr := ep.ExpectWithContext(context.Background(), ExpectedResponse{Value: wstr})
+ require.NoError(t, eerr)
+ require.Equalf(t, l, wstr, `got "%v", expected "%v"`, l, wstr)
+ require.Equalf(t, 3, ep.LineCount(), "got %d, expected 3", ep.LineCount())
+ require.NoError(t, ep.Close())
}
func TestSend(t *testing.T) {
- ep, err := NewExpect("/usr/bin/tr", "a", "b")
- if err != nil {
- t.Fatal(err)
- }
- if err := ep.Send("a\r"); err != nil {
- t.Fatal(err)
- }
- if _, err := ep.Expect("b"); err != nil {
- t.Fatal(err)
- }
- if err := ep.Stop(); err != nil {
- t.Fatal(err)
- }
+ ep, err := NewExpect("tr", "a", "b")
+ require.NoError(t, err)
+ err = ep.Send("a\r")
+ require.NoError(t, err)
+ _, err = ep.ExpectWithContext(context.Background(), ExpectedResponse{Value: "b"})
+ require.NoError(t, err)
+ require.NoError(t, ep.Stop())
}
func TestSignal(t *testing.T) {
- ep, err := NewExpect("/bin/sleep", "100")
- if err != nil {
- t.Fatal(err)
- }
+ ep, err := NewExpect("sleep", "100")
+ require.NoError(t, err)
ep.Signal(os.Interrupt)
donec := make(chan struct{})
go func() {
defer close(donec)
- werr := "signal: interrupt"
- if cerr := ep.Close(); cerr == nil || cerr.Error() != werr {
- t.Errorf("got error %v, wanted error %s", cerr, werr)
- }
+ err = ep.Close()
+ assert.ErrorContains(t, err, "unexpected exit code [130]")
+ assert.ErrorContains(t, err, "sleep 100")
}()
select {
case <-time.After(5 * time.Second):
@@ -118,3 +149,76 @@ func TestSignal(t *testing.T) {
case <-donec:
}
}
+
+func TestExitCodeAfterKill(t *testing.T) {
+ ep, err := NewExpect("sleep", "100")
+ require.NoError(t, err)
+
+ ep.Signal(os.Kill)
+ ep.Wait()
+ code, err := ep.ExitCode()
+ assert.Equal(t, 137, code)
+ assert.NoError(t, err)
+}
+
+func TestExpectForFailFastCommand(t *testing.T) {
+ ep, err := NewExpect("sh", "-c", `echo "curl: (59) failed setting cipher list"; exit 59`)
+ require.NoError(t, err)
+
+ _, err = ep.Expect("failed setting cipher list")
+ require.NoError(t, err)
+}
+
+func TestResponseMatchRegularExpr(t *testing.T) {
+ testCases := []struct {
+ name string
+ mockOutput string
+ expectedResp ExpectedResponse
+ expectMatch bool
+ }{
+ {
+ name: "exact match",
+ mockOutput: "hello world",
+ expectedResp: ExpectedResponse{Value: "hello world"},
+ expectMatch: true,
+ },
+ {
+ name: "not exact match",
+ mockOutput: "hello world",
+ expectedResp: ExpectedResponse{Value: "hello wld"},
+ expectMatch: false,
+ },
+ {
+ name: "match regular expression",
+ mockOutput: "hello world",
+ expectedResp: ExpectedResponse{Value: `.*llo\sworld`, IsRegularExpr: true},
+ expectMatch: true,
+ },
+ {
+ name: "not match regular expression",
+ mockOutput: "hello world",
+ expectedResp: ExpectedResponse{Value: `.*llo wrld`, IsRegularExpr: true},
+ expectMatch: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ ep, err := NewExpect("echo", "-n", tc.mockOutput)
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ l, err := ep.ExpectWithContext(ctx, tc.expectedResp)
+
+ if tc.expectMatch {
+ require.Equal(t, tc.mockOutput, l)
+ } else {
+ require.Error(t, err)
+ }
+
+ require.NoError(t, ep.Close())
+ })
+ }
+}
diff --git a/pkg/featuregate/feature_gate.go b/pkg/featuregate/feature_gate.go
new file mode 100644
index 00000000000..cb77017b5a7
--- /dev/null
+++ b/pkg/featuregate/feature_gate.go
@@ -0,0 +1,420 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package featuregate is copied from k8s.io/component-base@v0.30.1 to avoid any potential circular dependency between k8s and etcd.
+package featuregate
+
+import (
+ "flag"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/spf13/pflag"
+ "go.uber.org/zap"
+)
+
+type Feature string
+
+const (
+ defaultFlagName = "feature-gates"
+
+ // allAlphaGate is a global toggle for alpha features. Per-feature key
+ // values override the default set by allAlphaGate. Examples:
+ // AllAlpha=false,NewFeature=true will result in newFeature=true
+ // AllAlpha=true,NewFeature=false will result in newFeature=false
+ allAlphaGate Feature = "AllAlpha"
+
+ // allBetaGate is a global toggle for beta features. Per-feature key
+ // values override the default set by allBetaGate. Examples:
+ // AllBeta=false,NewFeature=true will result in NewFeature=true
+ // AllBeta=true,NewFeature=false will result in NewFeature=false
+ allBetaGate Feature = "AllBeta"
+)
+
+var (
+ // The generic features.
+ defaultFeatures = map[Feature]FeatureSpec{
+ allAlphaGate: {Default: false, PreRelease: Alpha},
+ allBetaGate: {Default: false, PreRelease: Beta},
+ }
+
+ // Special handling for a few gates.
+ specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){
+ allAlphaGate: setUnsetAlphaGates,
+ allBetaGate: setUnsetBetaGates,
+ }
+)
+
+type FeatureSpec struct {
+ // Default is the default enablement state for the feature
+ Default bool
+ // LockToDefault indicates that the feature is locked to its default and cannot be changed
+ LockToDefault bool
+ // PreRelease indicates the maturity level of the feature
+ PreRelease prerelease
+}
+
+type prerelease string
+
+const (
+ // Values for PreRelease.
+ Alpha = prerelease("ALPHA")
+ Beta = prerelease("BETA")
+ GA = prerelease("")
+
+ // Deprecated
+ Deprecated = prerelease("DEPRECATED")
+)
+
+// FeatureGate indicates whether a given feature is enabled or not
+type FeatureGate interface {
+ // Enabled returns true if the key is enabled.
+ Enabled(key Feature) bool
+ // KnownFeatures returns a slice of strings describing the FeatureGate's known features.
+ KnownFeatures() []string
+ // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
+ // set on the copy without mutating the original. This is useful for validating
+ // config against potential feature gate changes before committing those changes.
+ DeepCopy() MutableFeatureGate
+ // String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...".
+ String() string
+}
+
+// MutableFeatureGate parses and stores flag gates for known features from
+// a string like feature1=true,feature2=false,...
+type MutableFeatureGate interface {
+ FeatureGate
+
+ // AddFlag adds a flag for setting global feature gates to the specified FlagSet.
+ AddFlag(fs *flag.FlagSet, flagName string)
+ // Set parses and stores flag gates for known features
+ // from a string like feature1=true,feature2=false,...
+ Set(value string) error
+ // SetFromMap stores flag gates for known features from a map[string]bool or returns an error
+ SetFromMap(m map[string]bool) error
+ // Add adds features to the featureGate.
+ Add(features map[Feature]FeatureSpec) error
+ // GetAll returns a copy of the map of known feature names to feature specs.
+ GetAll() map[Feature]FeatureSpec
+ // AddMetrics adds feature enablement metrics
+ AddMetrics()
+ // OverrideDefault sets a local override for the registered default value of a named
+ // feature. If the feature has not been previously registered (e.g. by a call to Add), has a
+ // locked default, or if the gate has already registered itself with a FlagSet, a non-nil
+ // error is returned.
+ //
+ // When two or more components consume a common feature, one component can override its
+ // default at runtime in order to adopt new defaults before or after the other
+ // components. For example, a new feature can be evaluated with a limited blast radius by
+ // overriding its default to true for a limited number of components without simultaneously
+ // changing its default for all consuming components.
+ OverrideDefault(name Feature, override bool) error
+}
+
+// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
+type featureGate struct {
+ lg *zap.Logger
+
+ featureGateName string
+
+ special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool)
+
+ // lock guards writes to known, enabled, and reads/writes of closed
+ lock sync.Mutex
+ // known holds a map[Feature]FeatureSpec
+ known atomic.Value
+ // enabled holds a map[Feature]bool
+ enabled atomic.Value
+ // closed is set to true when AddFlag is called, and prevents subsequent calls to Add
+ closed bool
+}
+
+func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) {
+ for k, v := range known {
+ if v.PreRelease == Alpha {
+ if _, found := enabled[k]; !found {
+ enabled[k] = val
+ }
+ }
+ }
+}
+
+func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) {
+ for k, v := range known {
+ if v.PreRelease == Beta {
+ if _, found := enabled[k]; !found {
+ enabled[k] = val
+ }
+ }
+ }
+}
+
+// Set, String, and Type implement pflag.Value
+var _ pflag.Value = &featureGate{}
+
+func New(name string, lg *zap.Logger) *featureGate {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ known := map[Feature]FeatureSpec{}
+ for k, v := range defaultFeatures {
+ known[k] = v
+ }
+
+ f := &featureGate{
+ lg: lg,
+ featureGateName: name,
+ special: specialFeatures,
+ }
+ f.known.Store(known)
+ f.enabled.Store(map[Feature]bool{})
+
+ return f
+}
+
+// Set parses a string of the form "key1=value1,key2=value2,..." into a
+// map[string]bool of known keys or returns an error.
+func (f *featureGate) Set(value string) error {
+ m := make(map[string]bool)
+ for _, s := range strings.Split(value, ",") {
+ if len(s) == 0 {
+ continue
+ }
+ arr := strings.SplitN(s, "=", 2)
+ k := strings.TrimSpace(arr[0])
+ if len(arr) != 2 {
+ return fmt.Errorf("missing bool value for %s", k)
+ }
+ v := strings.TrimSpace(arr[1])
+ boolValue, err := strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("invalid value of %s=%s, err: %w", k, v, err)
+ }
+ m[k] = boolValue
+ }
+ return f.SetFromMap(m)
+}
+
+// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
+func (f *featureGate) SetFromMap(m map[string]bool) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Copy existing state
+ known := map[Feature]FeatureSpec{}
+ for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
+ known[k] = v
+ }
+ enabled := map[Feature]bool{}
+ for k, v := range f.enabled.Load().(map[Feature]bool) {
+ enabled[k] = v
+ }
+
+ for k, v := range m {
+ k := Feature(k)
+ featureSpec, ok := known[k]
+ if !ok {
+ return fmt.Errorf("unrecognized feature gate: %s", k)
+ }
+ if featureSpec.LockToDefault && featureSpec.Default != v {
+ return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default)
+ }
+ enabled[k] = v
+ // Handle "special" features like "all alpha gates"
+ if fn, found := f.special[k]; found {
+ fn(known, enabled, v)
+ }
+
+ if featureSpec.PreRelease == Deprecated {
+ f.lg.Warn(fmt.Sprintf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v))
+ } else if featureSpec.PreRelease == GA {
+ f.lg.Warn(fmt.Sprintf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v))
+ }
+ }
+
+ // Persist changes
+ f.known.Store(known)
+ f.enabled.Store(enabled)
+
+ f.lg.Info(fmt.Sprintf("feature gates: %v", f.enabled))
+ return nil
+}
+
+// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...".
+func (f *featureGate) String() string {
+ pairs := []string{}
+ for k, v := range f.enabled.Load().(map[Feature]bool) {
+ pairs = append(pairs, fmt.Sprintf("%s=%t", k, v))
+ }
+ sort.Strings(pairs)
+ return strings.Join(pairs, ",")
+}
+
+func (f *featureGate) Type() string {
+ return "mapStringBool"
+}
+
+// Add adds features to the featureGate.
+func (f *featureGate) Add(features map[Feature]FeatureSpec) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.closed {
+ return fmt.Errorf("cannot add a feature gate after adding it to the flag set")
+ }
+
+ // Copy existing state
+ known := map[Feature]FeatureSpec{}
+ for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
+ known[k] = v
+ }
+
+ for name, spec := range features {
+ if existingSpec, found := known[name]; found {
+ if existingSpec == spec {
+ continue
+ }
+ return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec)
+ }
+
+ known[name] = spec
+ }
+
+ // Persist updated state
+ f.known.Store(known)
+
+ return nil
+}
+
+func (f *featureGate) OverrideDefault(name Feature, override bool) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.closed {
+ return fmt.Errorf("cannot override default for feature %q: gates already added to a flag set", name)
+ }
+
+ known := map[Feature]FeatureSpec{}
+ for name, spec := range f.known.Load().(map[Feature]FeatureSpec) {
+ known[name] = spec
+ }
+
+ spec, ok := known[name]
+ switch {
+ case !ok:
+ return fmt.Errorf("cannot override default: feature %q is not registered", name)
+ case spec.LockToDefault:
+ return fmt.Errorf("cannot override default: feature %q default is locked to %t", name, spec.Default)
+ case spec.PreRelease == Deprecated:
+ f.lg.Warn(fmt.Sprintf("Overriding default of deprecated feature gate %s=%t. It will be removed in a future release.", name, override))
+ case spec.PreRelease == GA:
+ f.lg.Warn(fmt.Sprintf("Overriding default of GA feature gate %s=%t. It will be removed in a future release.", name, override))
+ }
+
+ spec.Default = override
+ known[name] = spec
+ f.known.Store(known)
+
+ return nil
+}
+
+// GetAll returns a copy of the map of known feature names to feature specs.
+func (f *featureGate) GetAll() map[Feature]FeatureSpec {
+ retval := map[Feature]FeatureSpec{}
+ for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
+ retval[k] = v
+ }
+ return retval
+}
+
+// Enabled returns true if the key is enabled. If the key is not known, this call will panic.
+func (f *featureGate) Enabled(key Feature) bool {
+ if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok {
+ return v
+ }
+ if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok {
+ return v.Default
+ }
+
+ panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName))
+}
+
+// AddFlag adds a flag for setting global feature gates to the specified FlagSet.
+func (f *featureGate) AddFlag(fs *flag.FlagSet, flagName string) {
+ if flagName == "" {
+ flagName = defaultFlagName
+ }
+ f.lock.Lock()
+ // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead?
+ // Not all components expose a feature gates flag using this AddFlag method, and
+ // in the future, all components will completely stop exposing a feature gates flag,
+ // in favor of componentconfig.
+ f.closed = true
+ f.lock.Unlock()
+
+ known := f.KnownFeatures()
+ fs.Var(f, flagName, ""+
+ "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
+ "Options are:\n"+strings.Join(known, "\n"))
+}
+
+func (f *featureGate) AddMetrics() {
+ // TODO(henrybear327): implement this.
+}
+
+// KnownFeatures returns a slice of strings describing the FeatureGate's known features.
+// Deprecated and GA features are hidden from the list.
+func (f *featureGate) KnownFeatures() []string {
+ var known []string
+ for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
+ if v.PreRelease == GA || v.PreRelease == Deprecated {
+ continue
+ }
+ known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default))
+ }
+ sort.Strings(known)
+ return known
+}
+
+// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
+// set on the copy without mutating the original. This is useful for validating
+// config against potential feature gate changes before committing those changes.
+func (f *featureGate) DeepCopy() MutableFeatureGate {
+ // Copy existing state.
+ known := map[Feature]FeatureSpec{}
+ for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
+ known[k] = v
+ }
+ enabled := map[Feature]bool{}
+ for k, v := range f.enabled.Load().(map[Feature]bool) {
+ enabled[k] = v
+ }
+
+ // Construct a new featureGate around the copied state.
+ // Note that specialFeatures is treated as immutable by convention,
+ // and we maintain the value of f.closed across the copy.
+ fg := &featureGate{
+ special: specialFeatures,
+ closed: f.closed,
+ }
+
+ fg.known.Store(known)
+ fg.enabled.Store(enabled)
+
+ return fg
+}
diff --git a/pkg/featuregate/feature_gate_test.go b/pkg/featuregate/feature_gate_test.go
new file mode 100644
index 00000000000..7411f363fa3
--- /dev/null
+++ b/pkg/featuregate/feature_gate_test.go
@@ -0,0 +1,613 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package featuregate
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+)
+
+func TestFeatureGateFlag(t *testing.T) {
+ // gates for testing
+ const testAlphaGate Feature = "TestAlpha"
+ const testBetaGate Feature = "TestBeta"
+
+ tests := []struct {
+ arg string
+ expect map[Feature]bool
+ parseError string
+ }{
+ {
+ arg: "",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "fooBarBaz=true",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ parseError: "unrecognized feature gate: fooBarBaz",
+ },
+ {
+ arg: "AllAlpha=false",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "AllAlpha=true",
+ expect: map[Feature]bool{
+ allAlphaGate: true,
+ allBetaGate: false,
+ testAlphaGate: true,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "AllAlpha=banana",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ parseError: "invalid value of AllAlpha",
+ },
+ {
+ arg: "AllAlpha=false,TestAlpha=true",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: true,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "TestAlpha=true,AllAlpha=false",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: true,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "AllAlpha=true,TestAlpha=false",
+ expect: map[Feature]bool{
+ allAlphaGate: true,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "TestAlpha=false,AllAlpha=true",
+ expect: map[Feature]bool{
+ allAlphaGate: true,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "TestBeta=true,AllAlpha=false",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: true,
+ },
+ },
+
+ {
+ arg: "AllBeta=false",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "AllBeta=true",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: true,
+ testAlphaGate: false,
+ testBetaGate: true,
+ },
+ },
+ {
+ arg: "AllBeta=banana",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ parseError: "invalid value of AllBeta",
+ },
+ {
+ arg: "AllBeta=false,TestBeta=true",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: true,
+ },
+ },
+ {
+ arg: "TestBeta=true,AllBeta=false",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: false,
+ testBetaGate: true,
+ },
+ },
+ {
+ arg: "AllBeta=true,TestBeta=false",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: true,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "TestBeta=false,AllBeta=true",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: true,
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ arg: "TestAlpha=true,AllBeta=false",
+ expect: map[Feature]bool{
+ allAlphaGate: false,
+ allBetaGate: false,
+ testAlphaGate: true,
+ testBetaGate: false,
+ },
+ },
+ }
+ for i, test := range tests {
+ t.Run(test.arg, func(t *testing.T) {
+ fs := flag.NewFlagSet("testfeaturegateflag", flag.ContinueOnError)
+ f := New("test", zaptest.NewLogger(t))
+ f.Add(map[Feature]FeatureSpec{
+ testAlphaGate: {Default: false, PreRelease: Alpha},
+ testBetaGate: {Default: false, PreRelease: Beta},
+ })
+ f.AddFlag(fs, defaultFlagName)
+
+ err := fs.Parse([]string{fmt.Sprintf("--%s=%s", defaultFlagName, test.arg)})
+ if test.parseError != "" {
+ if !strings.Contains(err.Error(), test.parseError) {
+ t.Errorf("%d: Parse() Expected %v, Got %v", i, test.parseError, err)
+ }
+ } else if err != nil {
+ t.Errorf("%d: Parse() Expected nil, Got %v", i, err)
+ }
+ for k, v := range test.expect {
+ if actual := f.enabled.Load().(map[Feature]bool)[k]; actual != v {
+ t.Errorf("%d: expected %s=%v, Got %v", i, k, v, actual)
+ }
+ }
+ })
+ }
+}
+
+func TestFeatureGateOverride(t *testing.T) {
+ const testAlphaGate Feature = "TestAlpha"
+ const testBetaGate Feature = "TestBeta"
+
+ // Don't parse the flag, assert defaults are used.
+ f := New("test", zaptest.NewLogger(t))
+ f.Add(map[Feature]FeatureSpec{
+ testAlphaGate: {Default: false, PreRelease: Alpha},
+ testBetaGate: {Default: false, PreRelease: Beta},
+ })
+
+ f.Set("TestAlpha=true,TestBeta=true")
+ if f.Enabled(testAlphaGate) != true {
+ t.Errorf("Expected true")
+ }
+ if f.Enabled(testBetaGate) != true {
+ t.Errorf("Expected true")
+ }
+
+ f.Set("TestAlpha=false")
+ if f.Enabled(testAlphaGate) != false {
+ t.Errorf("Expected false")
+ }
+ if f.Enabled(testBetaGate) != true {
+ t.Errorf("Expected true")
+ }
+}
+
+func TestFeatureGateFlagDefaults(t *testing.T) {
+ // gates for testing
+ const testAlphaGate Feature = "TestAlpha"
+ const testBetaGate Feature = "TestBeta"
+
+ // Don't parse the flag, assert defaults are used.
+ f := New("test", zaptest.NewLogger(t))
+ f.Add(map[Feature]FeatureSpec{
+ testAlphaGate: {Default: false, PreRelease: Alpha},
+ testBetaGate: {Default: true, PreRelease: Beta},
+ })
+
+ if f.Enabled(testAlphaGate) != false {
+ t.Errorf("Expected false")
+ }
+ if f.Enabled(testBetaGate) != true {
+ t.Errorf("Expected true")
+ }
+}
+
+func TestFeatureGateKnownFeatures(t *testing.T) {
+ // gates for testing
+ const (
+ testAlphaGate Feature = "TestAlpha"
+ testBetaGate Feature = "TestBeta"
+ testGAGate Feature = "TestGA"
+ testDeprecatedGate Feature = "TestDeprecated"
+ )
+
+ // Don't parse the flag, assert defaults are used.
+ f := New("test", zaptest.NewLogger(t))
+ f.Add(map[Feature]FeatureSpec{
+ testAlphaGate: {Default: false, PreRelease: Alpha},
+ testBetaGate: {Default: true, PreRelease: Beta},
+ testGAGate: {Default: true, PreRelease: GA},
+ testDeprecatedGate: {Default: false, PreRelease: Deprecated},
+ })
+
+ known := strings.Join(f.KnownFeatures(), " ")
+
+ assert.Contains(t, known, testAlphaGate)
+ assert.Contains(t, known, testBetaGate)
+ assert.NotContains(t, known, testGAGate)
+ assert.NotContains(t, known, testDeprecatedGate)
+}
+
+func TestFeatureGateSetFromMap(t *testing.T) {
+ // gates for testing
+ const testAlphaGate Feature = "TestAlpha"
+ const testBetaGate Feature = "TestBeta"
+ const testLockedTrueGate Feature = "TestLockedTrue"
+ const testLockedFalseGate Feature = "TestLockedFalse"
+
+ tests := []struct {
+ name string
+ setmap map[string]bool
+ expect map[Feature]bool
+ setmapError string
+ }{
+ {
+ name: "set TestAlpha and TestBeta true",
+ setmap: map[string]bool{
+ "TestAlpha": true,
+ "TestBeta": true,
+ },
+ expect: map[Feature]bool{
+ testAlphaGate: true,
+ testBetaGate: true,
+ },
+ },
+ {
+ name: "set TestBeta true",
+ setmap: map[string]bool{
+ "TestBeta": true,
+ },
+ expect: map[Feature]bool{
+ testAlphaGate: false,
+ testBetaGate: true,
+ },
+ },
+ {
+ name: "set TestAlpha false",
+ setmap: map[string]bool{
+ "TestAlpha": false,
+ },
+ expect: map[Feature]bool{
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ name: "set TestInvaild true",
+ setmap: map[string]bool{
+ "TestInvaild": true,
+ },
+ expect: map[Feature]bool{
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ setmapError: "unrecognized feature gate:",
+ },
+ {
+ name: "set locked gates",
+ setmap: map[string]bool{
+ "TestLockedTrue": true,
+ "TestLockedFalse": false,
+ },
+ expect: map[Feature]bool{
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ },
+ {
+ name: "set locked gates",
+ setmap: map[string]bool{
+ "TestLockedTrue": false,
+ },
+ expect: map[Feature]bool{
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ setmapError: "cannot set feature gate TestLockedTrue to false, feature is locked to true",
+ },
+ {
+ name: "set locked gates",
+ setmap: map[string]bool{
+ "TestLockedFalse": true,
+ },
+ expect: map[Feature]bool{
+ testAlphaGate: false,
+ testBetaGate: false,
+ },
+ setmapError: "cannot set feature gate TestLockedFalse to true, feature is locked to false",
+ },
+ }
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("SetFromMap %s", test.name), func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ f.Add(map[Feature]FeatureSpec{
+ testAlphaGate: {Default: false, PreRelease: Alpha},
+ testBetaGate: {Default: false, PreRelease: Beta},
+ testLockedTrueGate: {Default: true, PreRelease: GA, LockToDefault: true},
+ testLockedFalseGate: {Default: false, PreRelease: GA, LockToDefault: true},
+ })
+ err := f.SetFromMap(test.setmap)
+ if test.setmapError != "" {
+ if err == nil {
+ t.Errorf("expected error, got none")
+ } else if !strings.Contains(err.Error(), test.setmapError) {
+ t.Errorf("%d: SetFromMap(%#v) Expected err:%v, Got err:%v", i, test.setmap, test.setmapError, err)
+ }
+ } else if err != nil {
+ t.Errorf("%d: SetFromMap(%#v) Expected success, Got err:%v", i, test.setmap, err)
+ }
+ for k, v := range test.expect {
+ if actual := f.Enabled(k); actual != v {
+ t.Errorf("%d: SetFromMap(%#v) Expected %s=%v, Got %s=%v", i, test.setmap, k, v, k, actual)
+ }
+ }
+ })
+ }
+}
+
+func TestFeatureGateMetrics(t *testing.T) {
+ // TODO(henrybear327): Add tests once feature gate metrics are added.
+}
+
+func TestFeatureGateString(t *testing.T) {
+ // gates for testing
+ const testAlphaGate Feature = "TestAlpha"
+ const testBetaGate Feature = "TestBeta"
+ const testGAGate Feature = "TestGA"
+
+ featuremap := map[Feature]FeatureSpec{
+ testGAGate: {Default: true, PreRelease: GA},
+ testAlphaGate: {Default: false, PreRelease: Alpha},
+ testBetaGate: {Default: true, PreRelease: Beta},
+ }
+
+ tests := []struct {
+ setmap map[string]bool
+ expect string
+ }{
+ {
+ setmap: map[string]bool{
+ "TestAlpha": false,
+ },
+ expect: "TestAlpha=false",
+ },
+ {
+ setmap: map[string]bool{
+ "TestAlpha": false,
+ "TestBeta": true,
+ },
+ expect: "TestAlpha=false,TestBeta=true",
+ },
+ {
+ setmap: map[string]bool{
+ "TestGA": true,
+ "TestAlpha": false,
+ "TestBeta": true,
+ },
+ expect: "TestAlpha=false,TestBeta=true,TestGA=true",
+ },
+ }
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("SetFromMap %s", test.expect), func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ f.Add(featuremap)
+ f.SetFromMap(test.setmap)
+ result := f.String()
+ if result != test.expect {
+ t.Errorf("%d: SetFromMap(%#v) Expected %s, Got %s", i, test.setmap, test.expect, result)
+ }
+ })
+ }
+}
+
+func TestFeatureGateOverrideDefault(t *testing.T) {
+ t.Run("overrides take effect", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ if err := f.Add(map[Feature]FeatureSpec{
+ "TestFeature1": {Default: true},
+ "TestFeature2": {Default: false},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.OverrideDefault("TestFeature1", false); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.OverrideDefault("TestFeature2", true); err != nil {
+ t.Fatal(err)
+ }
+ if f.Enabled("TestFeature1") {
+ t.Error("expected TestFeature1 to have effective default of false")
+ }
+ if !f.Enabled("TestFeature2") {
+ t.Error("expected TestFeature2 to have effective default of true")
+ }
+ })
+
+ t.Run("overrides are preserved across deep copies", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ if err := f.Add(map[Feature]FeatureSpec{"TestFeature": {Default: false}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.OverrideDefault("TestFeature", true); err != nil {
+ t.Fatal(err)
+ }
+ fcopy := f.DeepCopy()
+ if !fcopy.Enabled("TestFeature") {
+ t.Error("default override was not preserved by deep copy")
+ }
+ })
+
+ t.Run("reflected in known features", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ if err := f.Add(map[Feature]FeatureSpec{"TestFeature": {
+ Default: false,
+ PreRelease: Alpha,
+ }}); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.OverrideDefault("TestFeature", true); err != nil {
+ t.Fatal(err)
+ }
+ var found bool
+ for _, s := range f.KnownFeatures() {
+ if !strings.Contains(s, "TestFeature") {
+ continue
+ }
+ found = true
+ if !strings.Contains(s, "default=true") {
+ t.Errorf("expected override of default to be reflected in known feature description %q", s)
+ }
+ }
+ if !found {
+ t.Error("found no entry for TestFeature in known features")
+ }
+ })
+
+ t.Run("may not change default for specs with locked defaults", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ if err := f.Add(map[Feature]FeatureSpec{
+ "LockedFeature": {
+ Default: true,
+ LockToDefault: true,
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if f.OverrideDefault("LockedFeature", false) == nil {
+ t.Error("expected error when attempting to override the default for a feature with a locked default")
+ }
+ if f.OverrideDefault("LockedFeature", true) == nil {
+ t.Error("expected error when attempting to override the default for a feature with a locked default")
+ }
+ })
+
+ t.Run("does not supersede explicitly-set value", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ if err := f.Add(map[Feature]FeatureSpec{"TestFeature": {Default: true}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.OverrideDefault("TestFeature", false); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.SetFromMap(map[string]bool{"TestFeature": true}); err != nil {
+ t.Fatal(err)
+ }
+ if !f.Enabled("TestFeature") {
+ t.Error("expected feature to be effectively enabled despite default override")
+ }
+ })
+
+ t.Run("prevents re-registration of feature spec after overriding default", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ if err := f.Add(map[Feature]FeatureSpec{
+ "TestFeature": {
+ Default: true,
+ PreRelease: Alpha,
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.OverrideDefault("TestFeature", false); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Add(map[Feature]FeatureSpec{
+ "TestFeature": {
+ Default: true,
+ PreRelease: Alpha,
+ },
+ }); err == nil {
+ t.Error("expected re-registration to return a non-nil error after overriding its default")
+ }
+ })
+
+ t.Run("does not allow override for an unknown feature", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ if err := f.OverrideDefault("TestFeature", true); err == nil {
+ t.Error("expected an error to be returned in attempt to override default for unregistered feature")
+ }
+ })
+
+ t.Run("returns error if already added to flag set", func(t *testing.T) {
+ f := New("test", zaptest.NewLogger(t))
+ fs := flag.NewFlagSet("test", flag.ContinueOnError)
+ f.AddFlag(fs, defaultFlagName)
+
+ if err := f.OverrideDefault("TestFeature", true); err == nil {
+ t.Error("expected a non-nil error to be returned")
+ }
+ })
+}
diff --git a/pkg/flags/flag.go b/pkg/flags/flag.go
index 76a51a89019..b48921c6dcd 100644
--- a/pkg/flags/flag.go
+++ b/pkg/flags/flag.go
@@ -19,6 +19,7 @@ import (
"flag"
"fmt"
"os"
+ "strconv"
"strings"
"github.com/spf13/pflag"
@@ -66,7 +67,7 @@ func SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error {
// FlagToEnv converts flag string to upper-case environment variable key string.
func FlagToEnv(prefix, name string) string {
- return prefix + "_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
+ return prefix + "_" + strings.ToUpper(strings.ReplaceAll(name, "-", "_"))
}
func verifyEnv(lg *zap.Logger, prefix string, usedEnvKey, alreadySet map[string]bool) {
@@ -107,7 +108,7 @@ func setFlagFromEnv(lg *zap.Logger, fs flagSetter, prefix, fname string, usedEnv
if val != "" {
usedEnvKey[key] = true
if serr := fs.Set(fname, val); serr != nil {
- return fmt.Errorf("invalid value %q for %s: %v", val, key, serr)
+ return fmt.Errorf("invalid value %q for %s: %w", val, key, serr)
}
if log && lg != nil {
lg.Info(
@@ -130,3 +131,16 @@ func IsSet(fs *flag.FlagSet, name string) bool {
})
return set
}
+
+// GetBoolFlagVal returns the value of the a given bool flag if it is explicitly set
+// in the cmd line arguments, otherwise returns nil.
+func GetBoolFlagVal(fs *flag.FlagSet, flagName string) (*bool, error) {
+ if !IsSet(fs, flagName) {
+ return nil, nil
+ }
+ flagVal, parseErr := strconv.ParseBool(fs.Lookup(flagName).Value.String())
+ if parseErr != nil {
+ return nil, parseErr
+ }
+ return &flagVal, nil
+}
diff --git a/pkg/flags/flag_test.go b/pkg/flags/flag_test.go
index a176497ae50..f2b8ce1eb71 100644
--- a/pkg/flags/flag_test.go
+++ b/pkg/flags/flag_test.go
@@ -16,11 +16,11 @@ package flags
import (
"flag"
- "os"
"strings"
"testing"
- "go.uber.org/zap"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
)
func TestSetFlagsFromEnv(t *testing.T) {
@@ -30,9 +30,8 @@ func TestSetFlagsFromEnv(t *testing.T) {
fs.String("c", "", "")
fs.Parse([]string{})
- os.Clearenv()
// flags should be settable using env vars
- os.Setenv("ETCD_A", "foo")
+ t.Setenv("ETCD_A", "foo")
// and command-line flags
if err := fs.Set("b", "bar"); err != nil {
t.Fatal(err)
@@ -43,13 +42,12 @@ func TestSetFlagsFromEnv(t *testing.T) {
"a": "",
"b": "bar",
} {
- if got := fs.Lookup(f).Value.String(); got != want {
- t.Fatalf("flag %q=%q, want %q", f, got, want)
- }
+ got := fs.Lookup(f).Value.String()
+ require.Equalf(t, want, got, "flag %q=%q, want %q", f, got, want)
}
// now read the env and verify flags were updated as expected
- err := SetFlagsFromEnv(zap.NewExample(), "ETCD", fs)
+ err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs)
if err != nil {
t.Errorf("err=%v, want nil", err)
}
@@ -67,8 +65,8 @@ func TestSetFlagsFromEnvBad(t *testing.T) {
// now verify that an error is propagated
fs := flag.NewFlagSet("testing", flag.ExitOnError)
fs.Int("x", 0, "")
- os.Setenv("ETCD_X", "not_a_number")
- if err := SetFlagsFromEnv(zap.NewExample(), "ETCD", fs); err == nil {
+ t.Setenv("ETCD_X", "not_a_number")
+ if err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs); err == nil {
t.Errorf("err=nil, want != nil")
}
}
@@ -78,19 +76,14 @@ func TestSetFlagsFromEnvParsingError(t *testing.T) {
var tickMs uint
fs.UintVar(&tickMs, "heartbeat-interval", 0, "Time (in milliseconds) of a heartbeat interval.")
- if oerr := os.Setenv("ETCD_HEARTBEAT_INTERVAL", "100 # ms"); oerr != nil {
- t.Fatal(oerr)
- }
- defer os.Unsetenv("ETCD_HEARTBEAT_INTERVAL")
+ t.Setenv("ETCD_HEARTBEAT_INTERVAL", "100 # ms")
- err := SetFlagsFromEnv(zap.NewExample(), "ETCD", fs)
+ err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs)
for _, v := range []string{"invalid syntax", "parse error"} {
if strings.Contains(err.Error(), v) {
err = nil
break
}
}
- if err != nil {
- t.Fatalf("unexpected error %v", err)
- }
+ require.NoErrorf(t, err, "unexpected error %v", err)
}
diff --git a/pkg/flags/strings.go b/pkg/flags/strings.go
index a80190658e4..e3d131f7902 100644
--- a/pkg/flags/strings.go
+++ b/pkg/flags/strings.go
@@ -49,5 +49,5 @@ func NewStringsValue(s string) (ss *StringsValue) {
// StringsFromFlag returns a string slice from the flag.
func StringsFromFlag(fs *flag.FlagSet, flagName string) []string {
- return []string(*fs.Lookup(flagName).Value.(*StringsValue))
+ return *fs.Lookup(flagName).Value.(*StringsValue)
}
diff --git a/pkg/flags/strings_test.go b/pkg/flags/strings_test.go
index 3835612b052..5d4ed6db568 100644
--- a/pkg/flags/strings_test.go
+++ b/pkg/flags/strings_test.go
@@ -17,6 +17,8 @@ package flags
import (
"reflect"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestStringsValue(t *testing.T) {
@@ -30,8 +32,6 @@ func TestStringsValue(t *testing.T) {
}
for i := range tests {
ss := []string(*NewStringsValue(tests[i].s))
- if !reflect.DeepEqual(tests[i].exp, ss) {
- t.Fatalf("#%d: expected %q, got %q", i, tests[i].exp, ss)
- }
+ require.Truef(t, reflect.DeepEqual(tests[i].exp, ss), "#%d: expected %q, got %q", i, tests[i].exp, ss)
}
}
diff --git a/pkg/flags/uint32.go b/pkg/flags/uint32.go
new file mode 100644
index 00000000000..496730a4549
--- /dev/null
+++ b/pkg/flags/uint32.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "strconv"
+)
+
+type uint32Value uint32
+
+// NewUint32Value creates an uint32 instance with the provided value.
+func NewUint32Value(v uint32) *uint32Value {
+ val := new(uint32Value)
+ *val = uint32Value(v)
+ return val
+}
+
+// Set parses a command line uint32 value.
+// Implements "flag.Value" interface.
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+// Uint32FromFlag return the uint32 value of a flag with the given name
+func Uint32FromFlag(fs *flag.FlagSet, name string) uint32 {
+ val := *fs.Lookup(name).Value.(*uint32Value)
+ return uint32(val)
+}
diff --git a/pkg/flags/uint32_test.go b/pkg/flags/uint32_test.go
new file mode 100644
index 00000000000..949fbefb671
--- /dev/null
+++ b/pkg/flags/uint32_test.go
@@ -0,0 +1,110 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUint32Value(t *testing.T) {
+ cases := []struct {
+ name string
+ s string
+ expectedVal uint32
+ expectError bool
+ }{
+ {
+ name: "normal uint32 value",
+ s: "200",
+ expectedVal: 200,
+ },
+ {
+ name: "zero value",
+ s: "0",
+ expectedVal: 0,
+ },
+ {
+ name: "negative int value",
+ s: "-200",
+ expectError: true,
+ },
+ {
+ name: "invalid integer value",
+ s: "invalid",
+ expectError: true,
+ },
+ }
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ var val uint32Value
+ err := val.Set(tc.s)
+
+ if tc.expectError {
+ if err == nil {
+ t.Errorf("Expected failure on parsing uint32 value from %s", tc.s)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error when parsing %s: %v", tc.s, err)
+ }
+ assert.Equal(t, tc.expectedVal, uint32(val))
+ }
+ })
+ }
+}
+
+func TestUint32FromFlag(t *testing.T) {
+ const flagName = "max-concurrent-streams"
+
+ cases := []struct {
+ name string
+ defaultVal uint32
+ arguments []string
+ expectedVal uint32
+ }{
+ {
+ name: "only default value",
+ defaultVal: 15,
+ arguments: []string{},
+ expectedVal: 15,
+ },
+ {
+ name: "argument has different value from the default one",
+ defaultVal: 16,
+ arguments: []string{"--max-concurrent-streams", "200"},
+ expectedVal: 200,
+ },
+ {
+ name: "argument has the same value from the default one",
+ defaultVal: 105,
+ arguments: []string{"--max-concurrent-streams", "105"},
+ expectedVal: 105,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ fs := flag.NewFlagSet("etcd", flag.ContinueOnError)
+ fs.Var(NewUint32Value(tc.defaultVal), flagName, "Maximum concurrent streams that each client can open at a time.")
+ require.NoError(t, fs.Parse(tc.arguments))
+ actualMaxStream := Uint32FromFlag(fs, flagName)
+ assert.Equal(t, tc.expectedVal, actualMaxStream)
+ })
+ }
+}
diff --git a/pkg/flags/unique_strings_test.go b/pkg/flags/unique_strings_test.go
index 86d2b0fc2b0..7bd9c3a2118 100644
--- a/pkg/flags/unique_strings_test.go
+++ b/pkg/flags/unique_strings_test.go
@@ -17,6 +17,8 @@ package flags
import (
"reflect"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestNewUniqueStrings(t *testing.T) {
@@ -58,11 +60,7 @@ func TestNewUniqueStrings(t *testing.T) {
}
for i := range tests {
uv := NewUniqueStringsValue(tests[i].s)
- if !reflect.DeepEqual(tests[i].exp, uv.Values) {
- t.Fatalf("#%d: expected %+v, got %+v", i, tests[i].exp, uv.Values)
- }
- if uv.String() != tests[i].rs {
- t.Fatalf("#%d: expected %q, got %q", i, tests[i].rs, uv.String())
- }
+ require.Truef(t, reflect.DeepEqual(tests[i].exp, uv.Values), "#%d: expected %+v, got %+v", i, tests[i].exp, uv.Values)
+ require.Equalf(t, uv.String(), tests[i].rs, "#%d: expected %q, got %q", i, tests[i].rs, uv.String())
}
}
diff --git a/pkg/flags/unique_urls.go b/pkg/flags/unique_urls.go
index 5b22ef21ad7..cc9b13294b3 100644
--- a/pkg/flags/unique_urls.go
+++ b/pkg/flags/unique_urls.go
@@ -50,7 +50,11 @@ func (us *UniqueURLs) Set(s string) error {
us.Values = make(map[string]struct{})
us.uss = make([]url.URL, 0)
for _, v := range ss {
- us.Values[v.String()] = struct{}{}
+ x := v.String()
+ if _, exists := us.Values[x]; exists {
+ continue
+ }
+ us.Values[x] = struct{}{}
us.uss = append(us.uss, v)
}
return nil
diff --git a/pkg/flags/unique_urls_test.go b/pkg/flags/unique_urls_test.go
index adc4a6b5a19..a37e9ca35fa 100644
--- a/pkg/flags/unique_urls_test.go
+++ b/pkg/flags/unique_urls_test.go
@@ -15,8 +15,11 @@
package flags
import (
- "reflect"
+ "flag"
+ "strings"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestNewUniqueURLsWithExceptions(t *testing.T) {
@@ -83,11 +86,29 @@ func TestNewUniqueURLsWithExceptions(t *testing.T) {
}
for i := range tests {
uv := NewUniqueURLsWithExceptions(tests[i].s, tests[i].exception)
- if !reflect.DeepEqual(tests[i].exp, uv.Values) {
- t.Fatalf("#%d: expected %+v, got %+v", i, tests[i].exp, uv.Values)
- }
- if uv.String() != tests[i].rs {
- t.Fatalf("#%d: expected %q, got %q", i, tests[i].rs, uv.String())
- }
+ require.Equal(t, tests[i].exp, uv.Values)
+ require.Equal(t, tests[i].rs, uv.String())
+ }
+}
+
+func TestUniqueURLsFromFlag(t *testing.T) {
+ const name = "test"
+ urls := []string{
+ "https://1.2.3.4:1",
+ "https://1.2.3.4:2",
+ "https://1.2.3.4:3",
+ "https://1.2.3.4:1",
+ }
+ fs := flag.NewFlagSet(name, flag.ExitOnError)
+ u := NewUniqueURLsWithExceptions(strings.Join(urls, ","))
+ fs.Var(u, name, "usage")
+ uss := UniqueURLsFromFlag(fs, name)
+
+ require.Equal(t, len(u.Values), len(uss))
+
+ um := make(map[string]struct{})
+ for _, x := range uss {
+ um[x.String()] = struct{}{}
}
+ require.Equal(t, u.Values, um)
}
diff --git a/pkg/flags/urls.go b/pkg/flags/urls.go
index 885d32f457a..27db58743be 100644
--- a/pkg/flags/urls.go
+++ b/pkg/flags/urls.go
@@ -62,5 +62,5 @@ func NewURLsValue(s string) *URLsValue {
// URLsFromFlag returns a slices from url got from the flag.
func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
- return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue))
+ return *fs.Lookup(urlsFlagName).Value.(*URLsValue)
}
diff --git a/pkg/flags/urls_test.go b/pkg/flags/urls_test.go
index ff4bda8d407..4b8429264bf 100644
--- a/pkg/flags/urls_test.go
+++ b/pkg/flags/urls_test.go
@@ -18,6 +18,8 @@ import (
"net/url"
"reflect"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestValidateURLsValueBad(t *testing.T) {
@@ -29,9 +31,6 @@ func TestValidateURLsValueBad(t *testing.T) {
// bad port specification
"127.0.0.1:foo",
"127.0.0.1:",
- // unix sockets not supported
- "unix://",
- "unix://tmp/etcd.sock",
// bad strings
"somewhere",
"234#$",
@@ -56,6 +55,9 @@ func TestNewURLsValue(t *testing.T) {
{s: "http://10.1.1.1:80", exp: []url.URL{{Scheme: "http", Host: "10.1.1.1:80"}}},
{s: "http://localhost:80", exp: []url.URL{{Scheme: "http", Host: "localhost:80"}}},
{s: "http://:80", exp: []url.URL{{Scheme: "http", Host: ":80"}}},
+ {s: "unix://tmp/etcd.sock", exp: []url.URL{{Scheme: "unix", Host: "tmp", Path: "/etcd.sock"}}},
+ {s: "unix:///tmp/127.27.84.4:23432", exp: []url.URL{{Scheme: "unix", Path: "/tmp/127.27.84.4:23432"}}},
+ {s: "unix://127.0.0.5:1456", exp: []url.URL{{Scheme: "unix", Host: "127.0.0.5:1456"}}},
{
s: "http://localhost:1,https://localhost:2",
exp: []url.URL{
@@ -66,8 +68,6 @@ func TestNewURLsValue(t *testing.T) {
}
for i := range tests {
uu := []url.URL(*NewURLsValue(tests[i].s))
- if !reflect.DeepEqual(tests[i].exp, uu) {
- t.Fatalf("#%d: expected %+v, got %+v", i, tests[i].exp, uu)
- }
+ require.Truef(t, reflect.DeepEqual(tests[i].exp, uu), "#%d: expected %+v, got %+v", i, tests[i].exp, uu)
}
}
diff --git a/pkg/go.mod b/pkg/go.mod
index afb2b9e1923..5648eeb3707 100644
--- a/pkg/go.mod
+++ b/pkg/go.mod
@@ -1,23 +1,45 @@
module go.etcd.io/etcd/pkg/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
require (
- github.com/creack/pty v1.1.11
- github.com/dustin/go-humanize v1.0.0
- github.com/golang/protobuf v1.5.1 // indirect
- github.com/spf13/cobra v1.1.3
+ github.com/creack/pty v1.1.18
+ github.com/dustin/go-humanize v1.0.1
+ github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
- google.golang.org/grpc v1.37.0
+ github.com/stretchr/testify v1.10.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ go.uber.org/zap v1.27.0
+ google.golang.org/grpc v1.69.2
)
+require (
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ go.opentelemetry.io/otel v1.33.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.33.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/protobuf v1.36.1 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+replace go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
+
+// Bad imports are sometimes causing attempts to pull that code.
+// This makes the error more explicit.
+// Etcd contains lots of packages and dependency relationship.
+// Shouldn't import unnecessary dependencies
replace (
go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
go.etcd.io/etcd/api/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY
go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
)
diff --git a/pkg/go.sum b/pkg/go.sum
index a0c5c50c2b1..511e5bd673d 100644
--- a/pkg/go.sum
+++ b/pkg/go.sum
@@ -1,362 +1,72 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/pkg/grpc_testing/stub_server.go b/pkg/grpc_testing/stub_server.go
deleted file mode 100644
index e7e8c49d495..00000000000
--- a/pkg/grpc_testing/stub_server.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package grpc_testing
-
-import (
- "context"
- "fmt"
- "net"
-
- "google.golang.org/grpc"
- testpb "google.golang.org/grpc/test/grpc_testing"
-)
-
-// StubServer is borrowed from the interal package of grpc-go.
-// See https://github.com/grpc/grpc-go/blob/master/internal/stubserver/stubserver.go
-// Since it cannot be imported directly, we have to copy and paste it here,
-// and useless code for our testing is removed.
-
-// StubServer is a server that is easy to customize within individual test
-// cases.
-type StubServer struct {
- testService testpb.TestServiceServer
-
- // Network and Address are parameters for Listen. Defaults will be used if these are empty before Start.
- Network string
- Address string
-
- s *grpc.Server
-
- cleanups []func() // Lambdas executed in Stop(); populated by Start().
-}
-
-func New(testService testpb.TestServiceServer) *StubServer {
- return &StubServer{testService: testService}
-}
-
-// Start starts the server and creates a client connected to it.
-func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error {
- if ss.Network == "" {
- ss.Network = "tcp"
- }
- if ss.Address == "" {
- ss.Address = "localhost:0"
- }
-
- lis, err := net.Listen(ss.Network, ss.Address)
- if err != nil {
- return fmt.Errorf("net.Listen(%q, %q) = %v", ss.Network, ss.Address, err)
- }
- ss.Address = lis.Addr().String()
- ss.cleanups = append(ss.cleanups, func() { lis.Close() })
-
- s := grpc.NewServer(sopts...)
- testpb.RegisterTestServiceServer(s, ss.testService)
- go s.Serve(lis)
- ss.cleanups = append(ss.cleanups, s.Stop)
- ss.s = s
-
- return nil
-}
-
-// Stop stops ss and cleans up all resources it consumed.
-func (ss *StubServer) Stop() {
- for i := len(ss.cleanups) - 1; i >= 0; i-- {
- ss.cleanups[i]()
- }
-}
-
-// Addr gets the address the server listening on.
-func (ss *StubServer) Addr() string {
- return ss.Address
-}
-
-type dummyStubServer struct {
- testpb.UnimplementedTestServiceServer
- body []byte
-}
-
-func (d dummyStubServer) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
- return &testpb.SimpleResponse{
- Payload: &testpb.Payload{
- Type: testpb.PayloadType_COMPRESSABLE,
- Body: d.body,
- },
- }, nil
-}
-
-// NewDummyStubServer creates a simple test server that serves Unary calls with
-// responses with the given payload.
-func NewDummyStubServer(body []byte) *StubServer {
- return New(dummyStubServer{body: body})
-}
diff --git a/pkg/grpctesting/recorder.go b/pkg/grpctesting/recorder.go
new file mode 100644
index 00000000000..046f1ca0f30
--- /dev/null
+++ b/pkg/grpctesting/recorder.go
@@ -0,0 +1,69 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpctesting
+
+import (
+ "context"
+ "sync"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+)
+
+type GRPCRecorder struct {
+ mux sync.RWMutex
+ requests []RequestInfo
+}
+
+type RequestInfo struct {
+ FullMethod string
+ Authority string
+}
+
+func (ri *GRPCRecorder) UnaryInterceptor() grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ ri.record(toRequestInfo(ctx, info))
+ resp, err := handler(ctx, req)
+ return resp, err
+ }
+}
+
+func (ri *GRPCRecorder) RecordedRequests() []RequestInfo {
+ ri.mux.RLock()
+ defer ri.mux.RUnlock()
+ reqs := make([]RequestInfo, len(ri.requests))
+ copy(reqs, ri.requests)
+ return reqs
+}
+
+func toRequestInfo(ctx context.Context, info *grpc.UnaryServerInfo) RequestInfo {
+ req := RequestInfo{
+ FullMethod: info.FullMethod,
+ }
+ md, ok := metadata.FromIncomingContext(ctx)
+ if ok {
+ as := md.Get(":authority")
+ if len(as) != 0 {
+ req.Authority = as[0]
+ }
+ }
+ return req
+}
+
+func (ri *GRPCRecorder) record(r RequestInfo) {
+ ri.mux.Lock()
+ defer ri.mux.Unlock()
+ ri.requests = append(ri.requests, r)
+}
diff --git a/pkg/grpctesting/stub_server.go b/pkg/grpctesting/stub_server.go
new file mode 100644
index 00000000000..bdc991cbed2
--- /dev/null
+++ b/pkg/grpctesting/stub_server.go
@@ -0,0 +1,116 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpctesting
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "sync/atomic"
+
+ "google.golang.org/grpc"
+ testpb "google.golang.org/grpc/interop/grpc_testing"
+)
+
+// StubServer is borrowed from the interal package of grpc-go.
+// See https://github.com/grpc/grpc-go/blob/master/internal/stubserver/stubserver.go
+// Since it cannot be imported directly, we have to copy and paste it here,
+// and useless code for our testing is removed.
+
+// StubServer is a server that is easy to customize within individual test
+// cases.
+type StubServer struct {
+ testService testpb.TestServiceServer
+
+ // Network and Address are parameters for Listen. Defaults will be used if these are empty before Start.
+ Network string
+ Address string
+
+ s *grpc.Server
+
+ cleanups []func() // Lambdas executed in Stop(); populated by Start().
+ started chan struct{}
+}
+
+func New(testService testpb.TestServiceServer) *StubServer {
+ return &StubServer{
+ testService: testService,
+ started: make(chan struct{}),
+ }
+}
+
+// Start starts the server and creates a client connected to it.
+func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error {
+ if ss.Network == "" {
+ ss.Network = "tcp"
+ }
+ if ss.Address == "" {
+ ss.Address = "localhost:0"
+ }
+
+ lis, err := net.Listen(ss.Network, ss.Address)
+ if err != nil {
+ return fmt.Errorf("net.Listen(%q, %q) = %w", ss.Network, ss.Address, err)
+ }
+ ss.Address = lis.Addr().String()
+ ss.cleanups = append(ss.cleanups, func() { lis.Close() })
+
+ s := grpc.NewServer(sopts...)
+ testpb.RegisterTestServiceServer(s, ss.testService)
+ go func() {
+ close(ss.started)
+ s.Serve(lis)
+ }()
+ ss.cleanups = append(ss.cleanups, s.Stop)
+ ss.s = s
+
+ return nil
+}
+
+// Stop stops ss and cleans up all resources it consumed.
+func (ss *StubServer) Stop() {
+ <-ss.started
+ for i := len(ss.cleanups) - 1; i >= 0; i-- {
+ ss.cleanups[i]()
+ }
+}
+
+// Addr gets the address the server listening on.
+func (ss *StubServer) Addr() string {
+ return ss.Address
+}
+
+type dummyStubServer struct {
+ testpb.UnimplementedTestServiceServer
+ counter uint64
+}
+
+func (d *dummyStubServer) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
+ newCount := atomic.AddUint64(&d.counter, 1)
+
+ return &testpb.SimpleResponse{
+ Payload: &testpb.Payload{
+ Type: testpb.PayloadType_COMPRESSABLE,
+ Body: []byte(strconv.FormatUint(newCount, 10)),
+ },
+ }, nil
+}
+
+// NewDummyStubServer creates a simple test server that serves Unary calls with
+// responses with the given payload.
+func NewDummyStubServer(body []byte) *StubServer {
+ return New(&dummyStubServer{})
+}
diff --git a/pkg/httputil/httputil.go b/pkg/httputil/httputil.go
index 3bf58a3a1dc..41758138a47 100644
--- a/pkg/httputil/httputil.go
+++ b/pkg/httputil/httputil.go
@@ -21,7 +21,6 @@ package httputil
import (
"io"
- "io/ioutil"
"net"
"net/http"
)
@@ -31,7 +30,7 @@ import (
// therefore available for reuse.
// Borrowed from golang/net/context/ctxhttp/cancelreq.go.
func GracefulClose(resp *http.Response) {
- io.Copy(ioutil.Discard, resp.Body)
+ io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
diff --git a/pkg/ioutil/pagewriter.go b/pkg/ioutil/pagewriter.go
index cf9a8dc664d..ebab6487e08 100644
--- a/pkg/ioutil/pagewriter.go
+++ b/pkg/ioutil/pagewriter.go
@@ -16,6 +16,8 @@ package ioutil
import (
"io"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
)
var defaultBufferBytes = 128 * 1024
@@ -41,6 +43,7 @@ type PageWriter struct {
// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes
// to write per page. pageOffset is the starting offset of io.Writer.
func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
+ verify.Assert(pageBytes > 0, "invalid pageBytes (%d) value, it must be greater than 0", pageBytes)
return &PageWriter{
w: w,
pageOffset: pageOffset,
@@ -101,11 +104,6 @@ func (pw *PageWriter) Flush() error {
return err
}
-// FlushN flushes buffered data and returns the number of written bytes.
-func (pw *PageWriter) FlushN() (int, error) {
- return pw.flush()
-}
-
func (pw *PageWriter) flush() (int, error) {
if pw.bufferedBytes == 0 {
return 0, nil
diff --git a/pkg/ioutil/pagewriter_test.go b/pkg/ioutil/pagewriter_test.go
index 10610691c3c..77f1336bbe8 100644
--- a/pkg/ioutil/pagewriter_test.go
+++ b/pkg/ioutil/pagewriter_test.go
@@ -17,6 +17,9 @@ package ioutil
import (
"math/rand"
"testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestPageWriterRandom(t *testing.T) {
@@ -34,17 +37,14 @@ func TestPageWriterRandom(t *testing.T) {
}
n += c
}
- if cw.writeBytes > n {
- t.Fatalf("wrote %d bytes to io.Writer, but only wrote %d bytes", cw.writeBytes, n)
- }
- if n-cw.writeBytes > pageBytes {
- t.Fatalf("got %d bytes pending, expected less than %d bytes", n-cw.writeBytes, pageBytes)
- }
+ require.LessOrEqualf(t, cw.writeBytes, n, "wrote %d bytes to io.Writer, but only wrote %d bytes", cw.writeBytes, n)
+ maxPendingBytes := pageBytes + defaultBufferBytes
+ require.LessOrEqualf(t, n-cw.writeBytes, maxPendingBytes, "got %d bytes pending, expected less than %d bytes", n-cw.writeBytes, maxPendingBytes)
t.Logf("total writes: %d", cw.writes)
t.Logf("total write bytes: %d (of %d)", cw.writeBytes, n)
}
-// TestPageWriterPariallack tests the case where a write overflows the buffer
+// TestPageWriterPartialSlack tests the case where a write overflows the buffer
// but there is not enough data to complete the slack write.
func TestPageWriterPartialSlack(t *testing.T) {
defaultBufferBytes = 1024
@@ -59,9 +59,7 @@ func TestPageWriterPartialSlack(t *testing.T) {
if err := w.Flush(); err != nil {
t.Fatal(err)
}
- if cw.writes != 1 {
- t.Fatalf("got %d writes, expected 1", cw.writes)
- }
+ require.Equalf(t, 1, cw.writes, "got %d writes, expected 1", cw.writes)
// nearly fill buffer
if _, err := w.Write(buf[:1022]); err != nil {
t.Fatal(err)
@@ -70,16 +68,12 @@ func TestPageWriterPartialSlack(t *testing.T) {
if _, err := w.Write(buf[:8]); err != nil {
t.Fatal(err)
}
- if cw.writes != 1 {
- t.Fatalf("got %d writes, expected 1", cw.writes)
- }
+ require.Equalf(t, 1, cw.writes, "got %d writes, expected 1", cw.writes)
// finish writing slack space
if _, err := w.Write(buf[:128]); err != nil {
t.Fatal(err)
}
- if cw.writes != 2 {
- t.Fatalf("got %d writes, expected 2", cw.writes)
- }
+ require.Equalf(t, 2, cw.writes, "got %d writes, expected 2", cw.writes)
}
// TestPageWriterOffset tests if page writer correctly repositions when offset is given.
@@ -95,9 +89,7 @@ func TestPageWriterOffset(t *testing.T) {
if err := w.Flush(); err != nil {
t.Fatal(err)
}
- if w.pageOffset != 64 {
- t.Fatalf("w.pageOffset expected 64, got %d", w.pageOffset)
- }
+ require.Equalf(t, 64, w.pageOffset, "w.pageOffset expected 64, got %d", w.pageOffset)
w = NewPageWriter(cw, w.pageOffset, pageBytes)
if _, err := w.Write(buf[:64]); err != nil {
@@ -106,8 +98,45 @@ func TestPageWriterOffset(t *testing.T) {
if err := w.Flush(); err != nil {
t.Fatal(err)
}
- if w.pageOffset != 0 {
- t.Fatalf("w.pageOffset expected 0, got %d", w.pageOffset)
+ require.Equalf(t, 0, w.pageOffset, "w.pageOffset expected 0, got %d", w.pageOffset)
+}
+
+func TestPageWriterPageBytes(t *testing.T) {
+ cases := []struct {
+ name string
+ pageBytes int
+ expectPanic bool
+ }{
+ {
+ name: "normal page bytes",
+ pageBytes: 4096,
+ expectPanic: false,
+ },
+ {
+ name: "negative page bytes",
+ pageBytes: -1,
+ expectPanic: true,
+ },
+ {
+ name: "zero page bytes",
+ pageBytes: 0,
+ expectPanic: true,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ defaultBufferBytes = 1024
+ cw := &checkPageWriter{pageBytes: tc.pageBytes, t: t}
+ if tc.expectPanic {
+ assert.Panicsf(t, func() {
+ NewPageWriter(cw, tc.pageBytes, 0)
+ }, "expected panic when pageBytes is %d", tc.pageBytes)
+ } else {
+ pw := NewPageWriter(cw, tc.pageBytes, 0)
+ assert.NotNil(t, pw)
+ }
+ })
}
}
@@ -120,9 +149,7 @@ type checkPageWriter struct {
}
func (cw *checkPageWriter) Write(p []byte) (int, error) {
- if len(p)%cw.pageBytes != 0 {
- cw.t.Fatalf("got write len(p) = %d, expected len(p) == k*cw.pageBytes", len(p))
- }
+ require.Equalf(cw.t, 0, len(p)%cw.pageBytes, "got write len(p) = %d, expected len(p) == k*cw.pageBytes", len(p))
cw.writes++
cw.writeBytes += len(p)
return len(p), nil
diff --git a/pkg/ioutil/readcloser_test.go b/pkg/ioutil/readcloser_test.go
index 6d13bdcec02..4b79fb44a76 100644
--- a/pkg/ioutil/readcloser_test.go
+++ b/pkg/ioutil/readcloser_test.go
@@ -18,6 +18,8 @@ import (
"bytes"
"io"
"testing"
+
+ "github.com/stretchr/testify/require"
)
type readerNilCloser struct{ io.Reader }
@@ -28,19 +30,15 @@ func (rc *readerNilCloser) Close() error { return nil }
func TestExactReadCloserExpectEOF(t *testing.T) {
buf := bytes.NewBuffer(make([]byte, 10))
rc := NewExactReadCloser(&readerNilCloser{buf}, 1)
- if _, err := rc.Read(make([]byte, 10)); err != ErrExpectEOF {
- t.Fatalf("expected %v, got %v", ErrExpectEOF, err)
- }
+ _, err := rc.Read(make([]byte, 10))
+ require.ErrorIsf(t, err, ErrExpectEOF, "expected %v, got %v", ErrExpectEOF, err)
}
// TestExactReadCloserShort expects an eof when reading too little
func TestExactReadCloserShort(t *testing.T) {
buf := bytes.NewBuffer(make([]byte, 5))
rc := NewExactReadCloser(&readerNilCloser{buf}, 10)
- if _, err := rc.Read(make([]byte, 10)); err != nil {
- t.Fatalf("Read expected nil err, got %v", err)
- }
- if err := rc.Close(); err != ErrShortRead {
- t.Fatalf("Close expected %v, got %v", ErrShortRead, err)
- }
+ _, err := rc.Read(make([]byte, 10))
+ require.NoErrorf(t, err, "Read expected nil err, got %v", err)
+ require.ErrorIs(t, rc.Close(), ErrShortRead)
}
diff --git a/pkg/netutil/netutil.go b/pkg/netutil/netutil.go
index bf737a4d942..0f1a685855c 100644
--- a/pkg/netutil/netutil.go
+++ b/pkg/netutil/netutil.go
@@ -16,6 +16,7 @@ package netutil
import (
"context"
+ "errors"
"fmt"
"net"
"net/url"
@@ -23,9 +24,9 @@ import (
"sort"
"time"
- "go.etcd.io/etcd/client/pkg/v3/types"
-
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
)
// indirection for testing
@@ -70,14 +71,14 @@ func resolveTCPAddrs(ctx context.Context, lg *zap.Logger, urls [][]url.URL) ([][
for i, u := range us {
nu, err := url.Parse(u.String())
if err != nil {
- return nil, fmt.Errorf("failed to parse %q (%v)", u.String(), err)
+ return nil, fmt.Errorf("failed to parse %q (%w)", u.String(), err)
}
nus[i] = *nu
}
for i, u := range nus {
h, err := resolveURL(ctx, lg, u)
if err != nil {
- return nil, fmt.Errorf("failed to resolve %q (%v)", u.String(), err)
+ return nil, fmt.Errorf("failed to resolve %q (%w)", u.String(), err)
}
if h != "" {
nus[i].Host = h
@@ -103,7 +104,7 @@ func resolveURL(ctx context.Context, lg *zap.Logger, u url.URL) (string, error)
)
return "", err
}
- if host == "localhost" || net.ParseIP(host) != nil {
+ if host == "localhost" {
return "", nil
}
for ctx.Err() == nil {
@@ -148,20 +149,31 @@ func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (b
if len(a) != len(b) {
return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b))
}
+
+ sort.Sort(types.URLs(a))
+ sort.Sort(types.URLs(b))
+ var needResolve bool
+ for i := range a {
+ if !reflect.DeepEqual(a[i], b[i]) {
+ needResolve = true
+ break
+ }
+ }
+ if !needResolve {
+ return true, nil
+ }
+
+ // If URLs are not equal, try to resolve it and compare again.
urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b})
if err != nil {
return false, err
}
- preva, prevb := a, b
a, b = urls[0], urls[1]
sort.Sort(types.URLs(a))
sort.Sort(types.URLs(b))
for i := range a {
if !reflect.DeepEqual(a[i], b[i]) {
- return false, fmt.Errorf("%q(resolved from %q) != %q(resolved from %q)",
- a[i].String(), preva[i].String(),
- b[i].String(), prevb[i].String(),
- )
+ return false, fmt.Errorf("resolved urls: %q != %q", a[i].String(), b[i].String())
}
}
return true, nil
@@ -174,27 +186,13 @@ func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string
if len(a) != len(b) {
return false, fmt.Errorf("len(%q) != len(%q)", a, b)
}
- urlsA := make([]url.URL, 0)
- for _, str := range a {
- u, err := url.Parse(str)
- if err != nil {
- return false, fmt.Errorf("failed to parse %q", str)
- }
- urlsA = append(urlsA, *u)
- }
- urlsB := make([]url.URL, 0)
- for _, str := range b {
- u, err := url.Parse(str)
- if err != nil {
- return false, fmt.Errorf("failed to parse %q", str)
- }
- urlsB = append(urlsB, *u)
+ urlsA, err := stringsToURLs(a)
+ if err != nil {
+ return false, err
}
- if lg == nil {
- lg, _ = zap.NewProduction()
- if lg == nil {
- lg = zap.NewExample()
- }
+ urlsB, err := stringsToURLs(b)
+ if err != nil {
+ return false, err
}
return urlsEqual(ctx, lg, urlsA, urlsB)
}
@@ -207,7 +205,19 @@ func urlsToStrings(us []url.URL) []string {
return rs
}
+func stringsToURLs(us []string) ([]url.URL, error) {
+ urls := make([]url.URL, 0, len(us))
+ for _, str := range us {
+ u, err := url.Parse(str)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse string to URL: %q", str)
+ }
+ urls = append(urls, *u)
+ }
+ return urls, nil
+}
+
func IsNetworkTimeoutError(err error) bool {
- nerr, ok := err.(net.Error)
- return ok && nerr.Timeout()
+ var nerr net.Error
+ return errors.As(err, &nerr) && nerr.Timeout()
}
diff --git a/pkg/netutil/netutil_test.go b/pkg/netutil/netutil_test.go
index 42b05ca295a..5b4551e1fc7 100644
--- a/pkg/netutil/netutil_test.go
+++ b/pkg/netutil/netutil_test.go
@@ -17,6 +17,7 @@ package netutil
import (
"context"
"errors"
+ "fmt"
"net"
"net/url"
"reflect"
@@ -24,7 +25,7 @@ import (
"testing"
"time"
- "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
)
func TestResolveTCPAddrs(t *testing.T) {
@@ -119,17 +120,20 @@ func TestResolveTCPAddrs(t *testing.T) {
if err != nil {
return nil, err
}
- if tt.hostMap[host] == "" {
- return nil, errors.New("cannot resolve host")
- }
i, err := strconv.Atoi(port)
if err != nil {
return nil, err
}
+ if ip := net.ParseIP(host); ip != nil {
+ return &net.TCPAddr{IP: ip, Port: i, Zone: ""}, nil
+ }
+ if tt.hostMap[host] == "" {
+ return nil, errors.New("cannot resolve host")
+ }
return &net.TCPAddr{IP: net.ParseIP(tt.hostMap[host]), Port: i, Zone: ""}, nil
}
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
- urls, err := resolveTCPAddrs(ctx, zap.NewExample(), tt.urls)
+ urls, err := resolveTCPAddrs(ctx, zaptest.NewLogger(t), tt.urls)
cancel()
if tt.hasError {
if err == nil {
@@ -151,128 +155,151 @@ func TestURLsEqual(t *testing.T) {
"second.com": "10.0.11.2",
}
resolveTCPAddr = func(ctx context.Context, addr string) (*net.TCPAddr, error) {
- host, port, herr := net.SplitHostPort(addr)
- if herr != nil {
- return nil, herr
- }
- if _, ok := hostm[host]; !ok {
- return nil, errors.New("cannot resolve host.")
+ host, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
}
i, err := strconv.Atoi(port)
if err != nil {
return nil, err
}
+ if ip := net.ParseIP(host); ip != nil {
+ return &net.TCPAddr{IP: ip, Port: i, Zone: ""}, nil
+ }
+ if hostm[host] == "" {
+ return nil, errors.New("cannot resolve host")
+ }
return &net.TCPAddr{IP: net.ParseIP(hostm[host]), Port: i, Zone: ""}, nil
}
tests := []struct {
+ n int
a []url.URL
b []url.URL
expect bool
err error
}{
{
+ n: 0,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
expect: true,
},
{
+ n: 1,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: true,
},
{
+ n: 2,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "https", Host: "10.0.10.1:2379"}},
expect: false,
- err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "https://10.0.10.1:2379"(resolved from "https://10.0.10.1:2379")`),
+ err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "https://10.0.10.1:2379"`),
},
{
+ n: 3,
a: []url.URL{{Scheme: "https", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: false,
- err: errors.New(`"https://10.0.10.1:2379"(resolved from "https://example.com:2379") != "http://10.0.10.1:2379"(resolved from "http://10.0.10.1:2379")`),
+ err: errors.New(`resolved urls: "https://10.0.10.1:2379" != "http://10.0.10.1:2379"`),
},
{
+ n: 4,
a: []url.URL{{Scheme: "unix", Host: "abc:2379"}},
b: []url.URL{{Scheme: "unix", Host: "abc:2379"}},
expect: true,
},
{
+ n: 5,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
+ n: 6,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
+ n: 7,
a: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
+ n: 8,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
- err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://127.0.0.1:2380"(resolved from "http://127.0.0.1:2380")`),
+ err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://127.0.0.1:2380"`),
},
{
+ n: 9,
a: []url.URL{{Scheme: "http", Host: "example.com:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: false,
- err: errors.New(`"http://10.0.10.1:2380"(resolved from "http://example.com:2380") != "http://10.0.10.1:2379"(resolved from "http://10.0.10.1:2379")`),
+ err: errors.New(`resolved urls: "http://10.0.10.1:2380" != "http://10.0.10.1:2379"`),
},
{
+ n: 10,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
expect: false,
- err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
+ err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://10.0.0.1:2379"`),
},
{
+ n: 11,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
expect: false,
- err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
+ err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://10.0.0.1:2379"`),
},
{
+ n: 12,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
- err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://127.0.0.1:2380"(resolved from "http://127.0.0.1:2380")`),
+ err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://127.0.0.1:2380"`),
},
{
+ n: 13,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
- err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "http://127.0.0.1:2380"(resolved from "http://127.0.0.1:2380")`),
+ err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://127.0.0.1:2380"`),
},
{
+ n: 14,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
- err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
+ err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://10.0.0.1:2379"`),
},
{
+ n: 15,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
- err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
+ err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://10.0.0.1:2379"`),
},
{
+ n: 16,
a: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
err: errors.New(`len(["http://10.0.0.1:2379"]) != len(["http://10.0.0.1:2379" "http://127.0.0.1:2380"])`),
},
{
+ n: 17,
a: []url.URL{{Scheme: "http", Host: "first.com:2379"}, {Scheme: "http", Host: "second.com:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}},
expect: true,
},
{
+ n: 18,
a: []url.URL{{Scheme: "http", Host: "second.com:2380"}, {Scheme: "http", Host: "first.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}},
expect: true,
@@ -280,23 +307,51 @@ func TestURLsEqual(t *testing.T) {
}
for i, test := range tests {
- result, err := urlsEqual(context.TODO(), zap.NewExample(), test.a, test.b)
+ result, err := urlsEqual(context.TODO(), zaptest.NewLogger(t), test.a, test.b)
if result != test.expect {
- t.Errorf("#%d: a:%v b:%v, expected %v but %v", i, test.a, test.b, test.expect, result)
+ t.Errorf("idx=%d #%d: a:%v b:%v, expected %v but %v", i, test.n, test.a, test.b, test.expect, result)
}
if test.err != nil {
if err.Error() != test.err.Error() {
- t.Errorf("#%d: err expected %v but %v", i, test.err, err)
+ t.Errorf("idx=%d #%d: err expected %v but %v", i, test.n, test.err, err)
}
}
}
}
+
func TestURLStringsEqual(t *testing.T) {
- result, err := URLStringsEqual(context.TODO(), zap.NewExample(), []string{"http://127.0.0.1:8080"}, []string{"http://127.0.0.1:8080"})
- if !result {
- t.Errorf("unexpected result %v", result)
+ defer func() { resolveTCPAddr = resolveTCPAddrDefault }()
+ errOnResolve := func(ctx context.Context, addr string) (*net.TCPAddr, error) {
+ return nil, fmt.Errorf("unexpected attempt to resolve: %q", addr)
+ }
+ cases := []struct {
+ urlsA []string
+ urlsB []string
+ resolver func(ctx context.Context, addr string) (*net.TCPAddr, error)
+ }{
+ {[]string{"http://127.0.0.1:8080"}, []string{"http://127.0.0.1:8080"}, resolveTCPAddrDefault},
+ {[]string{
+ "http://host1:8080",
+ "http://host2:8080",
+ }, []string{
+ "http://host1:8080",
+ "http://host2:8080",
+ }, errOnResolve},
+ {
+ urlsA: []string{"https://[c262:266f:fa53:0ee6:966e:e3f0:d68f:b046]:2380"},
+ urlsB: []string{"https://[c262:266f:fa53:ee6:966e:e3f0:d68f:b046]:2380"},
+ resolver: resolveTCPAddrDefault,
+ },
}
- if err != nil {
- t.Errorf("unexpected error %v", err)
+ for idx, c := range cases {
+ t.Logf("TestURLStringsEqual, case #%d", idx)
+ resolveTCPAddr = c.resolver
+ result, err := URLStringsEqual(context.TODO(), zaptest.NewLogger(t), c.urlsA, c.urlsB)
+ if !result {
+ t.Errorf("unexpected result %v", result)
+ }
+ if err != nil {
+ t.Errorf("unexpected error %v", err)
+ }
}
}
diff --git a/pkg/netutil/routes.go b/pkg/netutil/routes.go
index f66719ea163..a7d67df3d41 100644
--- a/pkg/netutil/routes.go
+++ b/pkg/netutil/routes.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !linux
-// +build !linux
package netutil
diff --git a/pkg/netutil/routes_linux.go b/pkg/netutil/routes_linux.go
index 5f8bd0c4889..b00ce457ec7 100644
--- a/pkg/netutil/routes_linux.go
+++ b/pkg/netutil/routes_linux.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build linux
-// +build linux
package netutil
@@ -22,15 +21,17 @@ import (
"encoding/binary"
"fmt"
"net"
- "sort"
+ "slices"
"syscall"
"go.etcd.io/etcd/pkg/v3/cpuutil"
)
-var errNoDefaultRoute = fmt.Errorf("could not find default route")
-var errNoDefaultHost = fmt.Errorf("could not find default host")
-var errNoDefaultInterface = fmt.Errorf("could not find default interface")
+var (
+ errNoDefaultRoute = fmt.Errorf("could not find default route")
+ errNoDefaultHost = fmt.Errorf("could not find default host")
+ errNoDefaultInterface = fmt.Errorf("could not find default interface")
+)
// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
// An IPv4 address is preferred to an IPv6 address for backward compatibility.
@@ -49,14 +50,13 @@ func GetDefaultHost() (string, error) {
}
// sort so choice is deterministic
- var families []int
+ var families []uint8
for family := range rmsgs {
- families = append(families, int(family))
+ families = append(families, family)
}
- sort.Ints(families)
+ slices.Sort(families)
- for _, f := range families {
- family := uint8(f)
+ for _, family := range families {
if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
return host, err
}
@@ -154,7 +154,6 @@ func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
}
return nil, fmt.Errorf("could not find address for interface index %v", idx)
-
}
// Used to get a name of interface.
diff --git a/pkg/netutil/routes_linux_test.go b/pkg/netutil/routes_linux_test.go
index 2d0dade6aff..a0056e990e7 100644
--- a/pkg/netutil/routes_linux_test.go
+++ b/pkg/netutil/routes_linux_test.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build linux
-// +build linux
package netutil
diff --git a/pkg/notify/notify.go b/pkg/notify/notify.go
new file mode 100644
index 00000000000..8925a1ea218
--- /dev/null
+++ b/pkg/notify/notify.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package notify
+
+import (
+ "sync"
+)
+
+// Notifier is a thread safe struct that can be used to send notification about
+// some event to multiple consumers.
+type Notifier struct {
+ mu sync.RWMutex
+ channel chan struct{}
+}
+
+// NewNotifier returns new notifier
+func NewNotifier() *Notifier {
+ return &Notifier{
+ channel: make(chan struct{}),
+ }
+}
+
+// Receive returns channel that can be used to wait for notification.
+// Consumers will be informed by closing the channel.
+func (n *Notifier) Receive() <-chan struct{} {
+ n.mu.RLock()
+ defer n.mu.RUnlock()
+ return n.channel
+}
+
+// Notify closes the channel passed to consumers and creates new channel to used
+// for next notification.
+func (n *Notifier) Notify() {
+ newChannel := make(chan struct{})
+ n.mu.Lock()
+ channelToClose := n.channel
+ n.channel = newChannel
+ n.mu.Unlock()
+ close(channelToClose)
+}
diff --git a/pkg/osutil/interrupt_unix.go b/pkg/osutil/interrupt_unix.go
index 78161d4961c..ec9dc7b67a6 100644
--- a/pkg/osutil/interrupt_unix.go
+++ b/pkg/osutil/interrupt_unix.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !windows && !plan9
-// +build !windows,!plan9
package osutil
@@ -23,6 +22,8 @@ import (
"sync"
"syscall"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+
"go.uber.org/zap"
)
@@ -34,7 +35,7 @@ var (
interruptRegisterMu, interruptExitMu sync.Mutex
// interruptHandlers holds all registered InterruptHandlers in order
// they will be executed.
- interruptHandlers = []InterruptHandler{}
+ interruptHandlers []InterruptHandler
)
// RegisterInterruptHandler registers a new InterruptHandler. Handlers registered
@@ -47,6 +48,7 @@ func RegisterInterruptHandler(h InterruptHandler) {
// HandleInterrupts calls the handler functions on receiving a SIGINT or SIGTERM.
func HandleInterrupts(lg *zap.Logger) {
+ verify.Assert(lg != nil, "the logger should not be nil")
notifier := make(chan os.Signal, 1)
signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
@@ -60,9 +62,7 @@ func HandleInterrupts(lg *zap.Logger) {
interruptExitMu.Lock()
- if lg != nil {
- lg.Info("received signal; shutting down", zap.String("signal", sig.String()))
- }
+ lg.Info("received signal; shutting down", zap.String("signal", sig.String()))
for _, h := range ihs {
h()
diff --git a/pkg/osutil/interrupt_windows.go b/pkg/osutil/interrupt_windows.go
index 7572690d2dd..a4c82b99e06 100644
--- a/pkg/osutil/interrupt_windows.go
+++ b/pkg/osutil/interrupt_windows.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build windows
-// +build windows
package osutil
diff --git a/pkg/osutil/osutil.go b/pkg/osutil/osutil.go
index cbf96e2e04d..f79108a6870 100644
--- a/pkg/osutil/osutil.go
+++ b/pkg/osutil/osutil.go
@@ -15,27 +15,5 @@
// Package osutil implements operating system-related utility functions.
package osutil
-import (
- "os"
- "strings"
-)
-
-var (
- // support to override setting SIG_DFL so tests don't terminate early
- setDflSignal = dflSignal
-)
-
-func Unsetenv(key string) error {
- envs := os.Environ()
- os.Clearenv()
- for _, e := range envs {
- strs := strings.SplitN(e, "=", 2)
- if strs[0] == key {
- continue
- }
- if err := os.Setenv(strs[0], strs[1]); err != nil {
- return err
- }
- }
- return nil
-}
+// support to override setting SIG_DFL so tests don't terminate early
+var setDflSignal = dflSignal
diff --git a/pkg/osutil/osutil_test.go b/pkg/osutil/osutil_test.go
index c03895b8ac9..28fcc7288b1 100644
--- a/pkg/osutil/osutil_test.go
+++ b/pkg/osutil/osutil_test.go
@@ -17,40 +17,15 @@ package osutil
import (
"os"
"os/signal"
- "reflect"
"syscall"
"testing"
"time"
- "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
)
func init() { setDflSignal = func(syscall.Signal) {} }
-func TestUnsetenv(t *testing.T) {
- tests := []string{
- "data",
- "space data",
- "equal=data",
- }
- for i, tt := range tests {
- key := "ETCD_UNSETENV_TEST"
- if os.Getenv(key) != "" {
- t.Fatalf("#%d: cannot get empty %s", i, key)
- }
- env := os.Environ()
- if err := os.Setenv(key, tt); err != nil {
- t.Fatalf("#%d: cannot set %s: %v", i, key, err)
- }
- if err := Unsetenv(key); err != nil {
- t.Errorf("#%d: unsetenv %s error: %v", i, key, err)
- }
- if g := os.Environ(); !reflect.DeepEqual(g, env) {
- t.Errorf("#%d: env = %+v, want %+v", i, g, env)
- }
- }
-}
-
func waitSig(t *testing.T, c <-chan os.Signal, sig os.Signal) {
select {
case s := <-c:
@@ -71,7 +46,7 @@ func TestHandleInterrupts(t *testing.T) {
c := make(chan os.Signal, 2)
signal.Notify(c, sig)
- HandleInterrupts(zap.NewExample())
+ HandleInterrupts(zaptest.NewLogger(t))
syscall.Kill(syscall.Getpid(), sig)
// we should receive the signal once from our own kill and
diff --git a/pkg/osutil/signal.go b/pkg/osutil/signal.go
index c324ea16ec1..154f88204dd 100644
--- a/pkg/osutil/signal.go
+++ b/pkg/osutil/signal.go
@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !linux || cov
-// +build !linux cov
+//go:build !linux
package osutil
diff --git a/pkg/osutil/signal_linux.go b/pkg/osutil/signal_linux.go
index 93e0f350805..a392de83dfe 100644
--- a/pkg/osutil/signal_linux.go
+++ b/pkg/osutil/signal_linux.go
@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux && !cov
-// +build linux,!cov
+//go:build linux
package osutil
diff --git a/pkg/proxy/fixtures/gencerts.sh b/pkg/proxy/fixtures/gencerts.sh
index 50147b62183..d61160d2a6c 100755
--- a/pkg/proxy/fixtures/gencerts.sh
+++ b/pkg/proxy/fixtures/gencerts.sh
@@ -1,12 +1,21 @@
#!/bin/bash
+set -euo pipefail
+
if ! [[ "$0" =~ "./gencerts.sh" ]]; then
echo "must be run from 'fixtures'"
exit 255
fi
-if ! which cfssl; then
+if ! command -v cfssl; then
echo "cfssl is not installed"
+ echo 'use: bash -c "cd ../../../tools/mod; go install github.com/cloudflare/cfssl/cmd/cfssl"'
+ exit 255
+fi
+
+if ! command -v cfssljson; then
+ echo "cfssljson is not installed"
+ echo 'use: bash -c "cd ../../../tools/mod; go install github.com/cloudflare/cfssl/cmd/cfssljson"'
exit 255
fi
diff --git a/pkg/proxy/server.go b/pkg/proxy/server.go
index 9a7b105f9a7..bc71c3a1660 100644
--- a/pkg/proxy/server.go
+++ b/pkg/proxy/server.go
@@ -16,6 +16,7 @@ package proxy
import (
"context"
+ "errors"
"fmt"
"io"
mrand "math/rand"
@@ -27,27 +28,18 @@ import (
"sync"
"time"
- "go.etcd.io/etcd/client/pkg/v3/transport"
-
humanize "github.com/dustin/go-humanize"
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
)
var (
defaultDialTimeout = 3 * time.Second
defaultBufferSize = 48 * 1024
defaultRetryInterval = 10 * time.Millisecond
- defaultLogger *zap.Logger
)
-func init() {
- var err error
- defaultLogger, err = zap.NewProduction()
- if err != nil {
- panic(err)
- }
-}
-
// Server defines proxy server layer that simulates common network faults:
// latency spikes and packet drop or corruption. The proxy overhead is very
// small overhead (<500Îŧs per request). Please run tests to compute actual
@@ -240,9 +232,6 @@ func NewServer(cfg ServerConfig) Server {
if s.retryInterval == 0 {
s.retryInterval = defaultRetryInterval
}
- if s.lg == nil {
- s.lg = defaultLogger
- }
close(s.pauseAcceptc)
close(s.pauseTxc)
@@ -439,7 +428,7 @@ func (s *server) ioCopy(dst io.Writer, src io.Reader, ptype proxyType) {
for {
nr1, err := src.Read(buf)
if err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return
}
// connection already closed
@@ -557,7 +546,7 @@ func (s *server) ioCopy(dst io.Writer, src io.Reader, ptype proxyType) {
var nw int
nw, err = dst.Write(data)
if err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return
}
select {
@@ -818,7 +807,6 @@ func computeLatency(lat, rv time.Duration) time.Duration {
rv = lat / 10
}
now := time.Now()
- mrand.Seed(int64(now.Nanosecond()))
sign := 1
if now.Second()%2 == 0 {
sign = -1
diff --git a/pkg/proxy/server_test.go b/pkg/proxy/server_test.go
index 686a8c362b3..c1d3805cfb0 100644
--- a/pkg/proxy/server_test.go
+++ b/pkg/proxy/server_test.go
@@ -19,7 +19,7 @@ import (
"context"
"crypto/tls"
"fmt"
- "io/ioutil"
+ "io"
"log"
"math/rand"
"net"
@@ -30,11 +30,11 @@ import (
"testing"
"time"
- "github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/client/pkg/v3/transport"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
"go.uber.org/zap/zaptest"
- "go.uber.org/zap"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
)
func TestServer_Unix_Insecure(t *testing.T) { testServer(t, "unix", false, false) }
@@ -73,7 +73,9 @@ func testServer(t *testing.T, scheme string, secure bool, delayTx bool) {
cfg.TLSInfo = tlsInfo
}
p := NewServer(cfg)
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
data1 := []byte("Hello World!")
@@ -96,6 +98,7 @@ func testServer(t *testing.T, scheme string, secure bool, delayTx bool) {
writec <- data1
now := time.Now()
if d := <-recvc; !bytes.Equal(data1, d) {
+ close(writec)
t.Fatalf("expected %q, got %q", string(data1), string(d))
}
took1 := time.Since(now)
@@ -110,6 +113,7 @@ func testServer(t *testing.T, scheme string, secure bool, delayTx bool) {
writec <- data2
now = time.Now()
if d := <-recvc; !bytes.Equal(data2, d) {
+ close(writec)
t.Fatalf("expected %q, got %q", string(data2), string(d))
}
took2 := time.Since(now)
@@ -122,6 +126,7 @@ func testServer(t *testing.T, scheme string, secure bool, delayTx bool) {
if delayTx {
p.UndelayTx()
if took2 < lat-rv {
+ close(writec)
t.Fatalf("expected took2 %v (with latency) > delay: %v", took2, lat-rv)
}
}
@@ -193,7 +198,9 @@ func testServerDelayAccept(t *testing.T, secure bool) {
cfg.TLSInfo = tlsInfo
}
p := NewServer(cfg)
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
data := []byte("Hello World!")
@@ -243,7 +250,9 @@ func TestServer_PauseTx(t *testing.T) {
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
p.PauseTx()
@@ -290,7 +299,9 @@ func TestServer_ModifyTx_corrupt(t *testing.T) {
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
p.ModifyTx(func(d []byte) []byte {
@@ -326,7 +337,9 @@ func TestServer_ModifyTx_packet_loss(t *testing.T) {
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
// 50% packet loss
@@ -363,7 +376,9 @@ func TestServer_BlackholeTx(t *testing.T) {
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
p.BlackholeTx()
@@ -414,7 +429,9 @@ func TestServer_Shutdown(t *testing.T) {
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
s, _ := p.(*server)
@@ -445,7 +462,9 @@ func TestServer_ShutdownListener(t *testing.T) {
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer p.Close()
// shut down destination
@@ -476,7 +495,7 @@ func testServerHTTP(t *testing.T, secure, delayTx bool) {
mux := http.NewServeMux()
mux.HandleFunc("/hello", func(w http.ResponseWriter, req *http.Request) {
- d, err := ioutil.ReadAll(req.Body)
+ d, err := io.ReadAll(req.Body)
req.Body.Close()
if err != nil {
t.Fatal(err)
@@ -497,7 +516,7 @@ func testServerHTTP(t *testing.T, secure, delayTx bool) {
Addr: dstAddr,
Handler: mux,
TLSConfig: tlsConfig,
- ErrorLog: log.New(ioutil.Discard, "net/http", 0),
+ ErrorLog: log.New(io.Discard, "net/http", 0),
}
donec := make(chan struct{})
@@ -524,7 +543,9 @@ func testServerHTTP(t *testing.T, secure, delayTx bool) {
cfg.TLSInfo = tlsInfo
}
p := NewServer(cfg)
- <-p.Ready()
+
+ waitForServer(t, p)
+
defer func() {
lg.Info("closing Proxy server...")
p.Close()
@@ -538,7 +559,7 @@ func testServerHTTP(t *testing.T, secure, delayTx bool) {
now := time.Now()
if secure {
tp, terr := transport.NewTransport(tlsInfo, 3*time.Second)
- assert.NoError(t, terr)
+ require.NoError(t, terr)
cli := &http.Client{Transport: tp}
resp, err = cli.Post("https://"+srcAddr+"/hello", "", strings.NewReader(data))
defer cli.CloseIdleConnections()
@@ -547,8 +568,8 @@ func testServerHTTP(t *testing.T, secure, delayTx bool) {
resp, err = http.Post("http://"+srcAddr+"/hello", "", strings.NewReader(data))
defer http.DefaultClient.CloseIdleConnections()
}
- assert.NoError(t, err)
- d, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+ d, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
@@ -588,7 +609,7 @@ func testServerHTTP(t *testing.T, secure, delayTx bool) {
if err != nil {
t.Fatal(err)
}
- d, err = ioutil.ReadAll(resp.Body)
+ d, err = io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
@@ -607,7 +628,6 @@ func testServerHTTP(t *testing.T, secure, delayTx bool) {
func newUnixAddr() string {
now := time.Now().UnixNano()
- rand.Seed(now)
addr := fmt.Sprintf("%X%X.unix-conn", now, rand.Intn(35000))
os.RemoveAll(addr)
return addr
@@ -667,3 +687,13 @@ func receive(t *testing.T, ln net.Listener) (data []byte) {
}
return buf.Bytes()
}
+
+// Waits until a proxy is ready to serve.
+// Aborts test on proxy start-up error.
+func waitForServer(t *testing.T, s Server) {
+ select {
+ case <-s.Ready():
+ case err := <-s.Error():
+ t.Fatal(err)
+ }
+}
diff --git a/pkg/report/report.go b/pkg/report/report.go
index 36d09b9d508..4d138f9744e 100644
--- a/pkg/report/report.go
+++ b/pkg/report/report.go
@@ -138,7 +138,7 @@ func copyFloats(s []float64) (c []float64) {
func (r *report) String() (s string) {
if len(r.stats.Lats) > 0 {
- s += fmt.Sprintf("\nSummary:\n")
+ s += "\nSummary:\n"
s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.stats.Total.Seconds()))
s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.stats.Slowest))
s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.stats.Fastest))
@@ -226,7 +226,7 @@ func percentiles(nums []float64) (data []float64) {
func (r *report) sprintLatencies() string {
data := percentiles(r.stats.Lats)
- s := fmt.Sprintf("\nLatency distribution:\n")
+ s := "\nLatency distribution:\n"
for i := 0; i < len(pctls); i++ {
if data[i] > 0 {
s += fmt.Sprintf(" %v%% in %s.\n", pctls[i], r.sec2str(data[i]))
@@ -257,7 +257,7 @@ func (r *report) histogram() string {
bi++
}
}
- s := fmt.Sprintf("\nResponse time histogram:\n")
+ s := "\nResponse time histogram:\n"
for i := 0; i < len(buckets); i++ {
// Normalize bar lengths.
var barLen int
@@ -270,7 +270,7 @@ func (r *report) histogram() string {
}
func (r *report) errors() string {
- s := fmt.Sprintf("\nError distribution:\n")
+ s := "\nError distribution:\n"
for err, num := range r.stats.ErrorDist {
s += fmt.Sprintf(" [%d]\t%s\n", num, err)
}
diff --git a/pkg/report/report_test.go b/pkg/report/report_test.go
index 6f073f3e8d3..d6bdc3bf95c 100644
--- a/pkg/report/report_test.go
+++ b/pkg/report/report_test.go
@@ -20,6 +20,8 @@ import (
"strings"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
func TestPercentiles(t *testing.T) {
@@ -64,9 +66,7 @@ func TestReport(t *testing.T) {
ErrorDist: map[string]int{"oops": 1},
Lats: []float64{1.0, 1.0, 1.0, 1.0, 1.0},
}
- if !reflect.DeepEqual(stats, wStats) {
- t.Fatalf("got %+v, want %+v", stats, wStats)
- }
+ require.Truef(t, reflect.DeepEqual(stats, wStats), "got %+v, want %+v", stats, wStats)
wstrs := []string{
"Stddev:\t0",
@@ -108,7 +108,5 @@ func TestWeightedReport(t *testing.T) {
ErrorDist: map[string]int{"oops": 1},
Lats: []float64{0.5, 0.5, 0.5, 0.5, 0.5},
}
- if !reflect.DeepEqual(stats, wStats) {
- t.Fatalf("got %+v, want %+v", stats, wStats)
- }
+ require.Truef(t, reflect.DeepEqual(stats, wStats), "got %+v, want %+v", stats, wStats)
}
diff --git a/pkg/report/timeseries.go b/pkg/report/timeseries.go
index a999c2dcc77..6f5ed8c4a54 100644
--- a/pkg/report/timeseries.go
+++ b/pkg/report/timeseries.go
@@ -15,12 +15,12 @@
package report
import (
- "bytes"
"encoding/csv"
"fmt"
"log"
"math"
"sort"
+ "strings"
"sync"
"time"
)
@@ -64,9 +64,9 @@ func (sp *secondPoints) Add(ts time.Time, lat time.Duration) {
sp.tm[tk] = secondPoint{minLatency: lat, maxLatency: lat, totalLatency: lat, count: 1}
} else {
if lat != time.Duration(0) {
- v.minLatency = minDuration(v.minLatency, lat)
+ v.minLatency = min(v.minLatency, lat)
}
- v.maxLatency = maxDuration(v.maxLatency, lat)
+ v.maxLatency = max(v.maxLatency, lat)
v.totalLatency += lat
v.count++
sp.tm[tk] = v
@@ -119,12 +119,12 @@ func (sp *secondPoints) getTimeSeries() TimeSeries {
}
func (t TimeSeries) String() string {
- buf := new(bytes.Buffer)
+ buf := new(strings.Builder)
wr := csv.NewWriter(buf)
if err := wr.Write([]string{"UNIX-SECOND", "MIN-LATENCY-MS", "AVG-LATENCY-MS", "MAX-LATENCY-MS", "AVG-THROUGHPUT"}); err != nil {
log.Fatal(err)
}
- rows := [][]string{}
+ var rows [][]string
for i := range t {
row := []string{
fmt.Sprintf("%d", t[i].Timestamp),
@@ -144,17 +144,3 @@ func (t TimeSeries) String() string {
}
return fmt.Sprintf("\nSample in one second (unix latency throughput):\n%s", buf.String())
}
-
-func minDuration(a, b time.Duration) time.Duration {
- if a < b {
- return a
- }
- return b
-}
-
-func maxDuration(a, b time.Duration) time.Duration {
- if a > b {
- return a
- }
- return b
-}
diff --git a/pkg/report/timeseries_test.go b/pkg/report/timeseries_test.go
index 13fcbfa397d..6cdae2ef6fe 100644
--- a/pkg/report/timeseries_test.go
+++ b/pkg/report/timeseries_test.go
@@ -17,6 +17,8 @@ package report
import (
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
func TestGetTimeseries(t *testing.T) {
@@ -25,20 +27,12 @@ func TestGetTimeseries(t *testing.T) {
sp.Add(now, time.Second)
sp.Add(now.Add(5*time.Second), time.Second)
n := sp.getTimeSeries().Len()
- if n < 3 {
- t.Fatalf("expected at 6 points of time series, got %s", sp.getTimeSeries())
- }
+ require.GreaterOrEqualf(t, n, 3, "expected at 6 points of time series, got %s", sp.getTimeSeries())
// add a point with duplicate timestamp
sp.Add(now, 3*time.Second)
ts := sp.getTimeSeries()
- if ts[0].MinLatency != time.Second {
- t.Fatalf("ts[0] min latency expected %v, got %s", time.Second, ts[0].MinLatency)
- }
- if ts[0].AvgLatency != 2*time.Second {
- t.Fatalf("ts[0] average latency expected %v, got %s", 2*time.Second, ts[0].AvgLatency)
- }
- if ts[0].MaxLatency != 3*time.Second {
- t.Fatalf("ts[0] max latency expected %v, got %s", 3*time.Second, ts[0].MaxLatency)
- }
+ require.Equalf(t, time.Second, ts[0].MinLatency, "ts[0] min latency expected %v, got %s", time.Second, ts[0].MinLatency)
+ require.Equalf(t, 2*time.Second, ts[0].AvgLatency, "ts[0] average latency expected %v, got %s", 2*time.Second, ts[0].AvgLatency)
+ require.Equalf(t, 3*time.Second, ts[0].MaxLatency, "ts[0] max latency expected %v, got %s", 3*time.Second, ts[0].MaxLatency)
}
diff --git a/pkg/runtime/fds_other.go b/pkg/runtime/fds_other.go
index 034f3d42646..2311bb19725 100644
--- a/pkg/runtime/fds_other.go
+++ b/pkg/runtime/fds_other.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !linux
-// +build !linux
package runtime
diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go
index 234d01989df..06a243df914 100644
--- a/pkg/schedule/schedule.go
+++ b/pkg/schedule/schedule.go
@@ -17,9 +17,36 @@ package schedule
import (
"context"
"sync"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
)
-type Job func(context.Context)
+type Job interface {
+ Name() string
+ Do(context.Context)
+}
+
+type job struct {
+ name string
+ do func(context.Context)
+}
+
+func (j job) Name() string {
+ return j.name
+}
+
+func (j job) Do(ctx context.Context) {
+ j.do(ctx)
+}
+
+func NewJob(name string, do func(ctx context.Context)) Job {
+ return job{
+ name: name,
+ do: do,
+ }
+}
// Scheduler can schedule jobs.
type Scheduler interface {
@@ -56,14 +83,18 @@ type fifo struct {
finishCond *sync.Cond
donec chan struct{}
+ lg *zap.Logger
}
// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO
// order sequentially
-func NewFIFOScheduler() Scheduler {
+func NewFIFOScheduler(lg *zap.Logger) Scheduler {
+ verify.Assert(lg != nil, "the logger should not be nil")
+
f := &fifo{
resume: make(chan struct{}, 1),
donec: make(chan struct{}, 1),
+ lg: lg,
}
f.finishCond = sync.NewCond(&f.mu)
f.ctx, f.cancel = context.WithCancel(context.Background())
@@ -125,7 +156,6 @@ func (f *fifo) Stop() {
}
func (f *fifo) run() {
- // TODO: recover from job panic?
defer func() {
close(f.donec)
close(f.resume)
@@ -149,17 +179,29 @@ func (f *fifo) run() {
f.mu.Unlock()
// clean up pending jobs
for _, todo := range pendings {
- todo(f.ctx)
+ f.executeJob(todo, true)
}
return
}
} else {
- todo(f.ctx)
+ f.executeJob(todo, false)
+ }
+ }
+}
+
+func (f *fifo) executeJob(todo Job, updatedFinishedStats bool) {
+ defer func() {
+ if !updatedFinishedStats {
f.finishCond.L.Lock()
f.finished++
f.pendings = f.pendings[1:]
f.finishCond.Broadcast()
f.finishCond.L.Unlock()
}
- }
+ if err := recover(); err != nil {
+ f.lg.Panic("execute job failed", zap.String("job", todo.Name()), zap.Any("panic", err))
+ }
+ }()
+
+ todo.Do(f.ctx)
}
diff --git a/pkg/schedule/schedule_test.go b/pkg/schedule/schedule_test.go
index aa9c709f819..af0b5e613ea 100644
--- a/pkg/schedule/schedule_test.go
+++ b/pkg/schedule/schedule_test.go
@@ -16,21 +16,31 @@ package schedule
import (
"context"
+ "fmt"
"testing"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
)
func TestFIFOSchedule(t *testing.T) {
- s := NewFIFOScheduler()
+ s := NewFIFOScheduler(zaptest.NewLogger(t))
defer s.Stop()
next := 0
jobCreator := func(i int) Job {
- return func(ctx context.Context) {
- if next != i {
- t.Fatalf("job#%d: got %d, want %d", i, next, i)
- }
+ return NewJob(fmt.Sprintf("i_%d_increse", i), func(ctx context.Context) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Println("err: ", err)
+ }
+ }()
+ require.Equalf(t, next, i, "job#%d: got %d, want %d", i, next, i)
next = i + 1
- }
+ if next%3 == 0 {
+ panic("fifo panic")
+ }
+ })
}
var jobs []Job
@@ -43,7 +53,7 @@ func TestFIFOSchedule(t *testing.T) {
}
s.WaitFinish(100)
- if s.Scheduled() != 100 {
- t.Errorf("scheduled = %d, want %d", s.Scheduled(), 100)
+ if s.Finished() != 100 {
+ t.Errorf("finished = %d, want %d", s.Finished(), 100)
}
}
diff --git a/pkg/stringutil/rand.go b/pkg/stringutil/rand.go
index a15b0de0c08..347ee7c2b46 100644
--- a/pkg/stringutil/rand.go
+++ b/pkg/stringutil/rand.go
@@ -16,7 +16,6 @@ package stringutil
import (
"math/rand"
- "time"
)
// UniqueStrings returns a slice of randomly generated unique strings.
@@ -24,7 +23,7 @@ func UniqueStrings(slen uint, n int) (ss []string) {
exist := make(map[string]struct{})
ss = make([]string, 0, n)
for len(ss) < n {
- s := randString(slen)
+ s := RandString(slen)
if _, ok := exist[s]; !ok {
ss = append(ss, s)
exist[s] = struct{}{}
@@ -37,15 +36,14 @@ func UniqueStrings(slen uint, n int) (ss []string) {
func RandomStrings(slen uint, n int) (ss []string) {
ss = make([]string, 0, n)
for i := 0; i < n; i++ {
- ss = append(ss, randString(slen))
+ ss = append(ss, RandString(slen))
}
return ss
}
const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
-func randString(l uint) string {
- rand.Seed(time.Now().UnixNano())
+func RandString(l uint) string {
s := make([]byte, l)
for i := 0; i < int(l); i++ {
s[i] = chars[rand.Intn(len(chars))]
diff --git a/pkg/stringutil/rand_test.go b/pkg/stringutil/rand_test.go
index 1b4a9dfd2c8..5ecd0ba50a1 100644
--- a/pkg/stringutil/rand_test.go
+++ b/pkg/stringutil/rand_test.go
@@ -15,16 +15,16 @@
package stringutil
import (
- "fmt"
+ "sort"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestUniqueStrings(t *testing.T) {
ss := UniqueStrings(10, 50)
+ sort.Strings(ss)
for i := 1; i < len(ss); i++ {
- if ss[i-1] == ss[i] {
- t.Fatalf("ss[i-1] %q == ss[i] %q", ss[i-1], ss[i])
- }
+ require.NotEqualf(t, ss[i-1], ss[i], "ss[i-1] %q == ss[i] %q", ss[i-1], ss[i])
}
- fmt.Println(ss)
}
diff --git a/pkg/traceutil/trace.go b/pkg/traceutil/trace.go
index bdd8e9b66a2..abf5cf1d229 100644
--- a/pkg/traceutil/trace.go
+++ b/pkg/traceutil/trace.go
@@ -16,24 +16,25 @@
package traceutil
import (
- "bytes"
"context"
"fmt"
"math/rand"
+ "strings"
"time"
"go.uber.org/zap"
)
-const (
- TraceKey = "trace"
- StartTimeKey = "startTime"
-)
+// TraceKey is used as a key of context for Trace.
+type TraceKey struct{}
+
+// StartTimeKey is used as a key of context for start time of operation.
+type StartTimeKey struct{}
// Field is a kv pair to record additional details of the trace.
type Field struct {
Key string
- Value interface{}
+ Value any
}
func (f *Field) format() string {
@@ -44,7 +45,7 @@ func writeFields(fields []Field) string {
if len(fields) == 0 {
return ""
}
- var buf bytes.Buffer
+ var buf strings.Builder
buf.WriteString("{")
for _, f := range fields {
buf.WriteString(f.format())
@@ -81,7 +82,7 @@ func TODO() *Trace {
}
func Get(ctx context.Context) *Trace {
- if trace, ok := ctx.Value(TraceKey).(*Trace); ok && trace != nil {
+ if trace, ok := ctx.Value(TraceKey{}).(*Trace); ok && trace != nil {
return trace
}
return TODO()
@@ -181,41 +182,43 @@ func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) {
var steps []string
lastStepTime := t.startTime
for i := 0; i < len(t.steps); i++ {
- step := t.steps[i]
+ tstep := t.steps[i]
// add subtrace common fields which defined at the beginning to each sub-steps
- if step.isSubTraceStart {
+ if tstep.isSubTraceStart {
for j := i + 1; j < len(t.steps) && !t.steps[j].isSubTraceEnd; j++ {
- t.steps[j].fields = append(step.fields, t.steps[j].fields...)
+ t.steps[j].fields = append(tstep.fields, t.steps[j].fields...)
}
continue
}
// add subtrace common fields which defined at the end to each sub-steps
- if step.isSubTraceEnd {
+ if tstep.isSubTraceEnd {
for j := i - 1; j >= 0 && !t.steps[j].isSubTraceStart; j-- {
- t.steps[j].fields = append(step.fields, t.steps[j].fields...)
+ t.steps[j].fields = append(tstep.fields, t.steps[j].fields...)
}
continue
}
}
for i := 0; i < len(t.steps); i++ {
- step := t.steps[i]
- if step.isSubTraceStart || step.isSubTraceEnd {
+ tstep := t.steps[i]
+ if tstep.isSubTraceStart || tstep.isSubTraceEnd {
continue
}
- stepDuration := step.time.Sub(lastStepTime)
+ stepDuration := tstep.time.Sub(lastStepTime)
if stepDuration > threshold {
steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)",
- traceNum, step.msg, writeFields(step.fields), stepDuration))
+ traceNum, tstep.msg, writeFields(tstep.fields), stepDuration))
}
- lastStepTime = step.time
+ lastStepTime = tstep.time
}
- fs := []zap.Field{zap.String("detail", writeFields(t.fields)),
+ fs := []zap.Field{
+ zap.String("detail", writeFields(t.fields)),
zap.Duration("duration", totalDuration),
zap.Time("start", t.startTime),
zap.Time("end", endTime),
zap.Strings("steps", steps),
- zap.Int("step_count", len(steps))}
+ zap.Int("step_count", len(steps)),
+ }
return msg, fs
}
diff --git a/pkg/traceutil/trace_test.go b/pkg/traceutil/trace_test.go
index d629d0b89c4..4d6d3513f3a 100644
--- a/pkg/traceutil/trace_test.go
+++ b/pkg/traceutil/trace_test.go
@@ -18,13 +18,14 @@ import (
"bytes"
"context"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"testing"
"time"
- "go.uber.org/zap"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
)
func TestGet(t *testing.T) {
@@ -41,7 +42,7 @@ func TestGet(t *testing.T) {
},
{
name: "When the context has trace",
- inputCtx: context.WithValue(context.Background(), TraceKey, traceForTest),
+ inputCtx: context.WithValue(context.Background(), TraceKey{}, traceForTest),
outputTrace: traceForTest,
},
}
@@ -52,7 +53,7 @@ func TestGet(t *testing.T) {
if trace == nil {
t.Errorf("Expected %v; Got nil", tt.outputTrace)
}
- if trace.operation != tt.outputTrace.operation {
+ if tt.outputTrace == nil || trace.operation != tt.outputTrace.operation {
t.Errorf("Expected %v; Got %v", tt.outputTrace, trace)
}
})
@@ -205,7 +206,7 @@ func TestLog(t *testing.T) {
logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano()))
defer os.RemoveAll(logPath)
- lcfg := zap.NewProductionConfig()
+ lcfg := logutil.DefaultZapLoggerConfig
lcfg.OutputPaths = []string{logPath}
lcfg.ErrorOutputPaths = []string{logPath}
lg, _ := lcfg.Build()
@@ -215,10 +216,8 @@ func TestLog(t *testing.T) {
}
tt.trace.lg = lg
tt.trace.Log()
- data, err := ioutil.ReadFile(logPath)
- if err != nil {
- t.Fatal(err)
- }
+ data, err := os.ReadFile(logPath)
+ require.NoError(t, err)
for _, msg := range tt.expectedMsg {
if !bytes.Contains(data, []byte(msg)) {
@@ -238,7 +237,7 @@ func TestLogIfLong(t *testing.T) {
}{
{
name: "When the duration is smaller than threshold",
- threshold: time.Duration(200 * time.Millisecond),
+ threshold: 200 * time.Millisecond,
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
@@ -251,7 +250,7 @@ func TestLogIfLong(t *testing.T) {
},
{
name: "When the duration is longer than threshold",
- threshold: time.Duration(50 * time.Millisecond),
+ threshold: 50 * time.Millisecond,
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
@@ -266,7 +265,7 @@ func TestLogIfLong(t *testing.T) {
},
{
name: "When not all steps are longer than step threshold",
- threshold: time.Duration(50 * time.Millisecond),
+ threshold: 50 * time.Millisecond,
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
@@ -286,17 +285,15 @@ func TestLogIfLong(t *testing.T) {
logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano()))
defer os.RemoveAll(logPath)
- lcfg := zap.NewProductionConfig()
+ lcfg := logutil.DefaultZapLoggerConfig
lcfg.OutputPaths = []string{logPath}
lcfg.ErrorOutputPaths = []string{logPath}
lg, _ := lcfg.Build()
tt.trace.lg = lg
tt.trace.LogIfLong(tt.threshold)
- data, err := ioutil.ReadFile(logPath)
- if err != nil {
- t.Fatal(err)
- }
+ data, err := os.ReadFile(logPath)
+ require.NoError(t, err)
for _, msg := range tt.expectedMsg {
if !bytes.Contains(data, []byte(msg)) {
t.Errorf("Expected to find %v in log", msg)
diff --git a/pkg/wait/wait.go b/pkg/wait/wait.go
index a3e2aec7cb5..8989f32d579 100644
--- a/pkg/wait/wait.go
+++ b/pkg/wait/wait.go
@@ -34,9 +34,9 @@ type Wait interface {
// Register waits returns a chan that waits on the given ID.
// The chan will be triggered when Trigger is called with
// the same ID.
- Register(id uint64) <-chan interface{}
+ Register(id uint64) <-chan any
// Trigger triggers the waiting chans with the given ID.
- Trigger(id uint64, x interface{})
+ Trigger(id uint64, x any)
IsRegistered(id uint64) bool
}
@@ -46,7 +46,7 @@ type list struct {
type listElement struct {
l sync.RWMutex
- m map[uint64]chan interface{}
+ m map[uint64]chan any
}
// New creates a Wait.
@@ -55,14 +55,14 @@ func New() Wait {
e: make([]listElement, defaultListElementLength),
}
for i := 0; i < len(res.e); i++ {
- res.e[i].m = make(map[uint64]chan interface{})
+ res.e[i].m = make(map[uint64]chan any)
}
return &res
}
-func (w *list) Register(id uint64) <-chan interface{} {
+func (w *list) Register(id uint64) <-chan any {
idx := id % defaultListElementLength
- newCh := make(chan interface{}, 1)
+ newCh := make(chan any, 1)
w.e[idx].l.Lock()
defer w.e[idx].l.Unlock()
if _, ok := w.e[idx].m[id]; !ok {
@@ -73,7 +73,7 @@ func (w *list) Register(id uint64) <-chan interface{} {
return newCh
}
-func (w *list) Trigger(id uint64, x interface{}) {
+func (w *list) Trigger(id uint64, x any) {
idx := id % defaultListElementLength
w.e[idx].l.Lock()
ch := w.e[idx].m[id]
@@ -94,17 +94,17 @@ func (w *list) IsRegistered(id uint64) bool {
}
type waitWithResponse struct {
- ch <-chan interface{}
+ ch <-chan any
}
-func NewWithResponse(ch <-chan interface{}) Wait {
+func NewWithResponse(ch <-chan any) Wait {
return &waitWithResponse{ch: ch}
}
-func (w *waitWithResponse) Register(id uint64) <-chan interface{} {
+func (w *waitWithResponse) Register(id uint64) <-chan any {
return w.ch
}
-func (w *waitWithResponse) Trigger(id uint64, x interface{}) {}
+func (w *waitWithResponse) Trigger(id uint64, x any) {}
func (w *waitWithResponse) IsRegistered(id uint64) bool {
panic("waitWithResponse.IsRegistered() shouldn't be called")
}
diff --git a/pkg/wait/wait_time.go b/pkg/wait/wait_time.go
index 297e48a47d7..131788959e6 100644
--- a/pkg/wait/wait_time.go
+++ b/pkg/wait/wait_time.go
@@ -19,9 +19,9 @@ import "sync"
type WaitTime interface {
// Wait returns a chan that waits on the given logical deadline.
// The chan will be triggered when Trigger is called with a
- // deadline that is later than the one it is waiting for.
+ // deadline that is later than or equal to the one it is waiting for.
Wait(deadline uint64) <-chan struct{}
- // Trigger triggers all the waiting chans with an earlier logical deadline.
+ // Trigger triggers all the waiting chans with an equal or earlier logical deadline.
Trigger(deadline uint64)
}
diff --git a/pkg/wait/wait_time_test.go b/pkg/wait/wait_time_test.go
index 26164c4acee..20475582897 100644
--- a/pkg/wait/wait_time_test.go
+++ b/pkg/wait/wait_time_test.go
@@ -54,10 +54,10 @@ func TestWaitTime(t *testing.T) {
func TestWaitTestStress(t *testing.T) {
chs := make([]<-chan struct{}, 0)
wt := NewTimeList()
- for i := 0; i < 10000; i++ {
+ for i := 0; i <= 10000; i++ {
chs = append(chs, wt.Wait(uint64(i)))
}
- wt.Trigger(10000 + 1)
+ wt.Trigger(10000)
for _, ch := range chs {
select {
@@ -78,9 +78,9 @@ func BenchmarkWaitTime(b *testing.B) {
func BenchmarkTriggerAnd10KWaitTime(b *testing.B) {
for i := 0; i < b.N; i++ {
wt := NewTimeList()
- for j := 0; j < 10000; j++ {
+ for j := 0; j <= 10000; j++ {
wt.Wait(uint64(j))
}
- wt.Trigger(10000 + 1)
+ wt.Trigger(10000)
}
}
diff --git a/raft/LICENSE b/raft/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/raft/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/raft/OWNERS b/raft/OWNERS
deleted file mode 100644
index ab781066e23..00000000000
--- a/raft/OWNERS
+++ /dev/null
@@ -1,19 +0,0 @@
-approvers:
-- heyitsanthony
-- philips
-- fanminshi
-- gyuho
-- mitake
-- jpbetz
-- xiang90
-- bdarnell
-reviewers:
-- heyitsanthony
-- philips
-- fanminshi
-- gyuho
-- mitake
-- jpbetz
-- xiang90
-- bdarnell
-- tschottdorf
diff --git a/raft/README.md b/raft/README.md
deleted file mode 100644
index fbd8b4d49b5..00000000000
--- a/raft/README.md
+++ /dev/null
@@ -1,201 +0,0 @@
-# Raft library
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, Hyperledger and more.
-
-Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
-
-To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
-
-In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
-
-A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/main/contrib/raftexample
-
-# Features
-
-This raft implementation is a full feature implementation of Raft protocol. Features includes:
-
-- Leader election
-- Log replication
-- Log compaction
-- Membership changes
-- Leadership transfer extension
-- Efficient linearizable read-only queries served by both the leader and followers
- - leader checks with quorum and bypasses Raft log before processing read-only queries
- - followers asks leader to get a safe read index before processing read-only queries
-- More efficient lease-based linearizable read-only queries served by both the leader and followers
- - leader bypasses Raft log and processing read-only queries locally
- - followers asks leader to get a safe read index before processing read-only queries
- - this approach relies on the clock of the all the machines in raft group
-
-This raft implementation also includes a few optional enhancements:
-
-- Optimistic pipelining to reduce log replication latency
-- Flow control for log replication
-- Batching Raft messages to reduce synchronized network I/O calls
-- Batching log entries to reduce disk synchronized I/O
-- Writing to leader's disk in parallel
-- Internal proposal redirection from followers to leader
-- Automatic stepping down when the leader loses quorum
-- Protection against unbounded log growth when quorum is lost
-
-## Notable Users
-
-- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
-- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
-- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store
-- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
-- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
-- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
-
-## Usage
-
-The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a three-node cluster
-```go
- storage := raft.NewMemoryStorage()
- c := &raft.Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
- // Set peer list to the other nodes in the cluster.
- // Note that they need to be started separately as well.
- n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-```
-
-Start a single node cluster, like so:
-```go
- // Create storage and config as shown above.
- // Set peer list to itself, so this node can become the leader of this single-node cluster.
- peers := []raft.Peer{{ID: 0x01}}
- n := raft.StartNode(c, peers)
-```
-
-To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
-```go
- // Create storage and config as shown above.
- n := raft.StartNode(c, nil)
-```
-
-To restart a node from previous state:
-```go
- storage := raft.NewMemoryStorage()
-
- // Recover the in-memory storage from persistent snapshot, state and entries.
- storage.ApplySnapshot(snapshot)
- storage.SetHardState(state)
- storage.Append(entries)
-
- c := &raft.Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
-
- // Restart raft without peer information.
- // Peer information is already included in the storage.
- n := raft.RestartNode(c)
-```
-
-After creating a Node, the user has a few responsibilities:
-
-First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
-
-1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
-
-Third, after receiving a message from another node, pass it to Node.Step:
-
-```go
- func recvRaftRPC(ctx context.Context, m raftpb.Message) {
- n.Step(ctx, m)
- }
-```
-
-Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
-```go
- for {
- select {
- case <-s.Ticker:
- n.Tick()
- case rd := <-s.Node.Ready():
- saveToStorage(rd.HardState, rd.Entries, rd.Snapshot)
- send(rd.Messages)
- if !raft.IsEmptySnap(rd.Snapshot) {
- processSnapshot(rd.Snapshot)
- }
- for _, entry := range rd.CommittedEntries {
- process(entry)
- if entry.Type == raftpb.EntryConfChange {
- var cc raftpb.ConfChange
- cc.Unmarshal(entry.Data)
- s.Node.ApplyConfChange(cc)
- }
- }
- s.Node.Advance()
- case <-s.done:
- return
- }
- }
-```
-
-To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
-
-```go
- n.Propose(ctx, data)
-```
-
-If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout.
-
-To add or remove node in a cluster, build ConfChange struct 'cc' and call:
-
-```go
- n.ProposeConfChange(ctx, cc)
-```
-
-After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
-
-```go
- var cc raftpb.ConfChange
- cc.Unmarshal(data)
- n.ApplyConfChange(cc)
-```
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-## Implementation notes
-
-This implementation is up to date with the final Raft thesis (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
-
-To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
-
-This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
-
-## Go docs
-
-More detailed development documentation can be found in go docs: https://pkg.go.dev/go.etcd.io/etcd/raft/v3.
\ No newline at end of file
diff --git a/raft/bootstrap.go b/raft/bootstrap.go
deleted file mode 100644
index 824bd5f51bc..00000000000
--- a/raft/bootstrap.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "errors"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-// Bootstrap initializes the RawNode for first use by appending configuration
-// changes for the supplied peers. This method returns an error if the Storage
-// is nonempty.
-//
-// It is recommended that instead of calling this method, applications bootstrap
-// their state manually by setting up a Storage that has a first index > 1 and
-// which stores the desired ConfState as its InitialState.
-func (rn *RawNode) Bootstrap(peers []Peer) error {
- if len(peers) == 0 {
- return errors.New("must provide at least one peer to Bootstrap")
- }
- lastIndex, err := rn.raft.raftLog.storage.LastIndex()
- if err != nil {
- return err
- }
-
- if lastIndex != 0 {
- return errors.New("can't bootstrap a nonempty Storage")
- }
-
- // We've faked out initial entries above, but nothing has been
- // persisted. Start with an empty HardState (thus the first Ready will
- // emit a HardState update for the app to persist).
- rn.prevHardSt = emptyState
-
- // TODO(tbg): remove StartNode and give the application the right tools to
- // bootstrap the initial membership in a cleaner way.
- rn.raft.becomeFollower(1, None)
- ents := make([]pb.Entry, len(peers))
- for i, peer := range peers {
- cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
- data, err := cc.Marshal()
- if err != nil {
- return err
- }
-
- ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
- }
- rn.raft.raftLog.append(ents...)
-
- // Now apply them, mainly so that the application can call Campaign
- // immediately after StartNode in tests. Note that these nodes will
- // be added to raft twice: here and when the application's Ready
- // loop calls ApplyConfChange. The calls to addNode must come after
- // all calls to raftLog.append so progress.next is set after these
- // bootstrapping entries (it is an error if we try to append these
- // entries since they have already been committed).
- // We do not set raftLog.applied so the application will be able
- // to observe all conf changes via Ready.CommittedEntries.
- //
- // TODO(bdarnell): These entries are still unstable; do we need to preserve
- // the invariant that committed < unstable?
- rn.raft.raftLog.committed = uint64(len(ents))
- for _, peer := range peers {
- rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2())
- }
- return nil
-}
diff --git a/raft/confchange/confchange.go b/raft/confchange/confchange.go
deleted file mode 100644
index 55e6830ce8a..00000000000
--- a/raft/confchange/confchange.go
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package confchange
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "go.etcd.io/etcd/raft/v3/quorum"
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// Changer facilitates configuration changes. It exposes methods to handle
-// simple and joint consensus while performing the proper validation that allows
-// refusing invalid configuration changes before they affect the active
-// configuration.
-type Changer struct {
- Tracker tracker.ProgressTracker
- LastIndex uint64
-}
-
-// EnterJoint verifies that the outgoing (=right) majority config of the joint
-// config is empty and initializes it with a copy of the incoming (=left)
-// majority config. That is, it transitions from
-//
-// (1 2 3)&&()
-// to
-// (1 2 3)&&(1 2 3).
-//
-// The supplied changes are then applied to the incoming majority config,
-// resulting in a joint configuration that in terms of the Raft thesis[1]
-// (Section 4.3) corresponds to `C_{new,old}`.
-//
-// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
-func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
- cfg, prs, err := c.checkAndCopy()
- if err != nil {
- return c.err(err)
- }
- if joint(cfg) {
- err := errors.New("config is already joint")
- return c.err(err)
- }
- if len(incoming(cfg.Voters)) == 0 {
- // We allow adding nodes to an empty config for convenience (testing and
- // bootstrap), but you can't enter a joint state.
- err := errors.New("can't make a zero-voter config joint")
- return c.err(err)
- }
- // Clear the outgoing config.
- *outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{}
- // Copy incoming to outgoing.
- for id := range incoming(cfg.Voters) {
- outgoing(cfg.Voters)[id] = struct{}{}
- }
-
- if err := c.apply(&cfg, prs, ccs...); err != nil {
- return c.err(err)
- }
- cfg.AutoLeave = autoLeave
- return checkAndReturn(cfg, prs)
-}
-
-// LeaveJoint transitions out of a joint configuration. It is an error to call
-// this method if the configuration is not joint, i.e. if the outgoing majority
-// config Voters[1] is empty.
-//
-// The outgoing majority config of the joint configuration will be removed,
-// that is, the incoming config is promoted as the sole decision maker. In the
-// notation of the Raft thesis[1] (Section 4.3), this method transitions from
-// `C_{new,old}` into `C_new`.
-//
-// At the same time, any staged learners (LearnersNext) the addition of which
-// was held back by an overlapping voter in the former outgoing config will be
-// inserted into Learners.
-//
-// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
-func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) {
- cfg, prs, err := c.checkAndCopy()
- if err != nil {
- return c.err(err)
- }
- if !joint(cfg) {
- err := errors.New("can't leave a non-joint config")
- return c.err(err)
- }
- if len(outgoing(cfg.Voters)) == 0 {
- err := fmt.Errorf("configuration is not joint: %v", cfg)
- return c.err(err)
- }
- for id := range cfg.LearnersNext {
- nilAwareAdd(&cfg.Learners, id)
- prs[id].IsLearner = true
- }
- cfg.LearnersNext = nil
-
- for id := range outgoing(cfg.Voters) {
- _, isVoter := incoming(cfg.Voters)[id]
- _, isLearner := cfg.Learners[id]
-
- if !isVoter && !isLearner {
- delete(prs, id)
- }
- }
- *outgoingPtr(&cfg.Voters) = nil
- cfg.AutoLeave = false
-
- return checkAndReturn(cfg, prs)
-}
-
-// Simple carries out a series of configuration changes that (in aggregate)
-// mutates the incoming majority config Voters[0] by at most one. This method
-// will return an error if that is not the case, if the resulting quorum is
-// zero, or if the configuration is in a joint state (i.e. if there is an
-// outgoing configuration).
-func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
- cfg, prs, err := c.checkAndCopy()
- if err != nil {
- return c.err(err)
- }
- if joint(cfg) {
- err := errors.New("can't apply simple config change in joint config")
- return c.err(err)
- }
- if err := c.apply(&cfg, prs, ccs...); err != nil {
- return c.err(err)
- }
- if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 {
- return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config")
- }
-
- return checkAndReturn(cfg, prs)
-}
-
-// apply a change to the configuration. By convention, changes to voters are
-// always made to the incoming majority config Voters[0]. Voters[1] is either
-// empty or preserves the outgoing majority configuration while in a joint state.
-func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error {
- for _, cc := range ccs {
- if cc.NodeID == 0 {
- // etcd replaces the NodeID with zero if it decides (downstream of
- // raft) to not apply a change, so we have to have explicit code
- // here to ignore these.
- continue
- }
- switch cc.Type {
- case pb.ConfChangeAddNode:
- c.makeVoter(cfg, prs, cc.NodeID)
- case pb.ConfChangeAddLearnerNode:
- c.makeLearner(cfg, prs, cc.NodeID)
- case pb.ConfChangeRemoveNode:
- c.remove(cfg, prs, cc.NodeID)
- case pb.ConfChangeUpdateNode:
- default:
- return fmt.Errorf("unexpected conf type %d", cc.Type)
- }
- }
- if len(incoming(cfg.Voters)) == 0 {
- return errors.New("removed all voters")
- }
- return nil
-}
-
-// makeVoter adds or promotes the given ID to be a voter in the incoming
-// majority config.
-func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) {
- pr := prs[id]
- if pr == nil {
- c.initProgress(cfg, prs, id, false /* isLearner */)
- return
- }
-
- pr.IsLearner = false
- nilAwareDelete(&cfg.Learners, id)
- nilAwareDelete(&cfg.LearnersNext, id)
- incoming(cfg.Voters)[id] = struct{}{}
-}
-
-// makeLearner makes the given ID a learner or stages it to be a learner once
-// an active joint configuration is exited.
-//
-// The former happens when the peer is not a part of the outgoing config, in
-// which case we either add a new learner or demote a voter in the incoming
-// config.
-//
-// The latter case occurs when the configuration is joint and the peer is a
-// voter in the outgoing config. In that case, we do not want to add the peer
-// as a learner because then we'd have to track a peer as a voter and learner
-// simultaneously. Instead, we add the learner to LearnersNext, so that it will
-// be added to Learners the moment the outgoing config is removed by
-// LeaveJoint().
-func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) {
- pr := prs[id]
- if pr == nil {
- c.initProgress(cfg, prs, id, true /* isLearner */)
- return
- }
- if pr.IsLearner {
- return
- }
- // Remove any existing voter in the incoming config...
- c.remove(cfg, prs, id)
- // ... but save the Progress.
- prs[id] = pr
- // Use LearnersNext if we can't add the learner to Learners directly, i.e.
- // if the peer is still tracked as a voter in the outgoing config. It will
- // be turned into a learner in LeaveJoint().
- //
- // Otherwise, add a regular learner right away.
- if _, onRight := outgoing(cfg.Voters)[id]; onRight {
- nilAwareAdd(&cfg.LearnersNext, id)
- } else {
- pr.IsLearner = true
- nilAwareAdd(&cfg.Learners, id)
- }
-}
-
-// remove this peer as a voter or learner from the incoming config.
-func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) {
- if _, ok := prs[id]; !ok {
- return
- }
-
- delete(incoming(cfg.Voters), id)
- nilAwareDelete(&cfg.Learners, id)
- nilAwareDelete(&cfg.LearnersNext, id)
-
- // If the peer is still a voter in the outgoing config, keep the Progress.
- if _, onRight := outgoing(cfg.Voters)[id]; !onRight {
- delete(prs, id)
- }
-}
-
-// initProgress initializes a new progress for the given node or learner.
-func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) {
- if !isLearner {
- incoming(cfg.Voters)[id] = struct{}{}
- } else {
- nilAwareAdd(&cfg.Learners, id)
- }
- prs[id] = &tracker.Progress{
- // Initializing the Progress with the last index means that the follower
- // can be probed (with the last index).
- //
- // TODO(tbg): seems awfully optimistic. Using the first index would be
- // better. The general expectation here is that the follower has no log
- // at all (and will thus likely need a snapshot), though the app may
- // have applied a snapshot out of band before adding the replica (thus
- // making the first index the better choice).
- Next: c.LastIndex,
- Match: 0,
- Inflights: tracker.NewInflights(c.Tracker.MaxInflight),
- IsLearner: isLearner,
- // When a node is first added, we should mark it as recently active.
- // Otherwise, CheckQuorum may cause us to step down if it is invoked
- // before the added node has had a chance to communicate with us.
- RecentActive: true,
- }
-}
-
-// checkInvariants makes sure that the config and progress are compatible with
-// each other. This is used to check both what the Changer is initialized with,
-// as well as what it returns.
-func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error {
- // NB: intentionally allow the empty config. In production we'll never see a
- // non-empty config (we prevent it from being created) but we will need to
- // be able to *create* an initial config, for example during bootstrap (or
- // during tests). Instead of having to hand-code this, we allow
- // transitioning from an empty config into any other legal and non-empty
- // config.
- for _, ids := range []map[uint64]struct{}{
- cfg.Voters.IDs(),
- cfg.Learners,
- cfg.LearnersNext,
- } {
- for id := range ids {
- if _, ok := prs[id]; !ok {
- return fmt.Errorf("no progress for %d", id)
- }
- }
- }
-
- // Any staged learner was staged because it could not be directly added due
- // to a conflicting voter in the outgoing config.
- for id := range cfg.LearnersNext {
- if _, ok := outgoing(cfg.Voters)[id]; !ok {
- return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id)
- }
- if prs[id].IsLearner {
- return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id)
- }
- }
- // Conversely Learners and Voters doesn't intersect at all.
- for id := range cfg.Learners {
- if _, ok := outgoing(cfg.Voters)[id]; ok {
- return fmt.Errorf("%d is in Learners and Voters[1]", id)
- }
- if _, ok := incoming(cfg.Voters)[id]; ok {
- return fmt.Errorf("%d is in Learners and Voters[0]", id)
- }
- if !prs[id].IsLearner {
- return fmt.Errorf("%d is in Learners, but is not marked as learner", id)
- }
- }
-
- if !joint(cfg) {
- // We enforce that empty maps are nil instead of zero.
- if outgoing(cfg.Voters) != nil {
- return fmt.Errorf("cfg.Voters[1] must be nil when not joint")
- }
- if cfg.LearnersNext != nil {
- return fmt.Errorf("cfg.LearnersNext must be nil when not joint")
- }
- if cfg.AutoLeave {
- return fmt.Errorf("AutoLeave must be false when not joint")
- }
- }
-
- return nil
-}
-
-// checkAndCopy copies the tracker's config and progress map (deeply enough for
-// the purposes of the Changer) and returns those copies. It returns an error
-// if checkInvariants does.
-func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) {
- cfg := c.Tracker.Config.Clone()
- prs := tracker.ProgressMap{}
-
- for id, pr := range c.Tracker.Progress {
- // A shallow copy is enough because we only mutate the Learner field.
- ppr := *pr
- prs[id] = &ppr
- }
- return checkAndReturn(cfg, prs)
-}
-
-// checkAndReturn calls checkInvariants on the input and returns either the
-// resulting error or the input.
-func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) {
- if err := checkInvariants(cfg, prs); err != nil {
- return tracker.Config{}, tracker.ProgressMap{}, err
- }
- return cfg, prs, nil
-}
-
-// err returns zero values and an error.
-func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) {
- return tracker.Config{}, nil, err
-}
-
-// nilAwareAdd populates a map entry, creating the map if necessary.
-func nilAwareAdd(m *map[uint64]struct{}, id uint64) {
- if *m == nil {
- *m = map[uint64]struct{}{}
- }
- (*m)[id] = struct{}{}
-}
-
-// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after.
-func nilAwareDelete(m *map[uint64]struct{}, id uint64) {
- if *m == nil {
- return
- }
- delete(*m, id)
- if len(*m) == 0 {
- *m = nil
- }
-}
-
-// symdiff returns the count of the symmetric difference between the sets of
-// uint64s, i.e. len( (l - r) \union (r - l)).
-func symdiff(l, r map[uint64]struct{}) int {
- var n int
- pairs := [][2]quorum.MajorityConfig{
- {l, r}, // count elems in l but not in r
- {r, l}, // count elems in r but not in l
- }
- for _, p := range pairs {
- for id := range p[0] {
- if _, ok := p[1][id]; !ok {
- n++
- }
- }
- }
- return n
-}
-
-func joint(cfg tracker.Config) bool {
- return len(outgoing(cfg.Voters)) > 0
-}
-
-func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] }
-func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] }
-func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] }
-
-// Describe prints the type and NodeID of the configuration changes as a
-// space-delimited string.
-func Describe(ccs ...pb.ConfChangeSingle) string {
- var buf strings.Builder
- for _, cc := range ccs {
- if buf.Len() > 0 {
- buf.WriteByte(' ')
- }
- fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID)
- }
- return buf.String()
-}
diff --git a/raft/confchange/datadriven_test.go b/raft/confchange/datadriven_test.go
deleted file mode 100644
index ab1524091c5..00000000000
--- a/raft/confchange/datadriven_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package confchange
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-func TestConfChangeDataDriven(t *testing.T) {
- datadriven.Walk(t, "testdata", func(t *testing.T, path string) {
- tr := tracker.MakeProgressTracker(10)
- c := Changer{
- Tracker: tr,
- LastIndex: 0, // incremented in this test with each cmd
- }
-
- // The test files use the commands
- // - simple: run a simple conf change (i.e. no joint consensus),
- // - enter-joint: enter a joint config, and
- // - leave-joint: leave a joint config.
- // The first two take a list of config changes, which have the following
- // syntax:
- // - vn: make n a voter,
- // - ln: make n a learner,
- // - rn: remove n, and
- // - un: update n.
- datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
- defer func() {
- c.LastIndex++
- }()
- var ccs []pb.ConfChangeSingle
- toks := strings.Split(strings.TrimSpace(d.Input), " ")
- if toks[0] == "" {
- toks = nil
- }
- for _, tok := range toks {
- if len(tok) < 2 {
- return fmt.Sprintf("unknown token %s", tok)
- }
- var cc pb.ConfChangeSingle
- switch tok[0] {
- case 'v':
- cc.Type = pb.ConfChangeAddNode
- case 'l':
- cc.Type = pb.ConfChangeAddLearnerNode
- case 'r':
- cc.Type = pb.ConfChangeRemoveNode
- case 'u':
- cc.Type = pb.ConfChangeUpdateNode
- default:
- return fmt.Sprintf("unknown input: %s", tok)
- }
- id, err := strconv.ParseUint(tok[1:], 10, 64)
- if err != nil {
- return err.Error()
- }
- cc.NodeID = id
- ccs = append(ccs, cc)
- }
-
- var cfg tracker.Config
- var prs tracker.ProgressMap
- var err error
- switch d.Cmd {
- case "simple":
- cfg, prs, err = c.Simple(ccs...)
- case "enter-joint":
- var autoLeave bool
- if len(d.CmdArgs) > 0 {
- d.ScanArgs(t, "autoleave", &autoLeave)
- }
- cfg, prs, err = c.EnterJoint(autoLeave, ccs...)
- case "leave-joint":
- if len(ccs) > 0 {
- err = errors.New("this command takes no input")
- } else {
- cfg, prs, err = c.LeaveJoint()
- }
- default:
- return "unknown command"
- }
- if err != nil {
- return err.Error() + "\n"
- }
- c.Tracker.Config, c.Tracker.Progress = cfg, prs
- return fmt.Sprintf("%s\n%s", c.Tracker.Config, c.Tracker.Progress)
- })
- })
-}
diff --git a/raft/confchange/quick_test.go b/raft/confchange/quick_test.go
deleted file mode 100644
index 16d72c199ba..00000000000
--- a/raft/confchange/quick_test.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package confchange
-
-import (
- "fmt"
- "math/rand"
- "reflect"
- "testing"
- "testing/quick"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// TestConfChangeQuick uses quickcheck to verify that simple and joint config
-// changes arrive at the same result.
-func TestConfChangeQuick(t *testing.T) {
- cfg := &quick.Config{
- MaxCount: 1000,
- }
-
- // Log the first couple of runs to give some indication of things working
- // as intended.
- const infoCount = 5
-
- runWithJoint := func(c *Changer, ccs []pb.ConfChangeSingle) error {
- cfg, prs, err := c.EnterJoint(false /* autoLeave */, ccs...)
- if err != nil {
- return err
- }
- // Also do this with autoLeave on, just to check that we'd get the same
- // result.
- cfg2a, prs2a, err := c.EnterJoint(true /* autoLeave */, ccs...)
- if err != nil {
- return err
- }
- cfg2a.AutoLeave = false
- if !reflect.DeepEqual(cfg, cfg2a) || !reflect.DeepEqual(prs, prs2a) {
- return fmt.Errorf("cfg: %+v\ncfg2a: %+v\nprs: %+v\nprs2a: %+v",
- cfg, cfg2a, prs, prs2a)
- }
- c.Tracker.Config = cfg
- c.Tracker.Progress = prs
- cfg2b, prs2b, err := c.LeaveJoint()
- if err != nil {
- return err
- }
- // Reset back to the main branch with autoLeave=false.
- c.Tracker.Config = cfg
- c.Tracker.Progress = prs
- cfg, prs, err = c.LeaveJoint()
- if err != nil {
- return err
- }
- if !reflect.DeepEqual(cfg, cfg2b) || !reflect.DeepEqual(prs, prs2b) {
- return fmt.Errorf("cfg: %+v\ncfg2b: %+v\nprs: %+v\nprs2b: %+v",
- cfg, cfg2b, prs, prs2b)
- }
- c.Tracker.Config = cfg
- c.Tracker.Progress = prs
- return nil
- }
-
- runWithSimple := func(c *Changer, ccs []pb.ConfChangeSingle) error {
- for _, cc := range ccs {
- cfg, prs, err := c.Simple(cc)
- if err != nil {
- return err
- }
- c.Tracker.Config, c.Tracker.Progress = cfg, prs
- }
- return nil
- }
-
- type testFunc func(*Changer, []pb.ConfChangeSingle) error
-
- wrapper := func(invoke testFunc) func(setup initialChanges, ccs confChanges) (*Changer, error) {
- return func(setup initialChanges, ccs confChanges) (*Changer, error) {
- tr := tracker.MakeProgressTracker(10)
- c := &Changer{
- Tracker: tr,
- LastIndex: 10,
- }
-
- if err := runWithSimple(c, setup); err != nil {
- return nil, err
- }
-
- err := invoke(c, ccs)
- return c, err
- }
- }
-
- var n int
- f1 := func(setup initialChanges, ccs confChanges) *Changer {
- c, err := wrapper(runWithSimple)(setup, ccs)
- if err != nil {
- t.Fatal(err)
- }
- if n < infoCount {
- t.Log("initial setup:", Describe(setup...))
- t.Log("changes:", Describe(ccs...))
- t.Log(c.Tracker.Config)
- t.Log(c.Tracker.Progress)
- }
- n++
- return c
- }
- f2 := func(setup initialChanges, ccs confChanges) *Changer {
- c, err := wrapper(runWithJoint)(setup, ccs)
- if err != nil {
- t.Fatal(err)
- }
- return c
- }
- err := quick.CheckEqual(f1, f2, cfg)
- if err == nil {
- return
- }
- cErr, ok := err.(*quick.CheckEqualError)
- if !ok {
- t.Fatal(err)
- }
-
- t.Error("setup:", Describe(cErr.In[0].([]pb.ConfChangeSingle)...))
- t.Error("ccs:", Describe(cErr.In[1].([]pb.ConfChangeSingle)...))
- t.Errorf("out1: %+v\nout2: %+v", cErr.Out1, cErr.Out2)
-}
-
-type confChangeTyp pb.ConfChangeType
-
-func (confChangeTyp) Generate(rand *rand.Rand, _ int) reflect.Value {
- return reflect.ValueOf(confChangeTyp(rand.Intn(4)))
-}
-
-type confChanges []pb.ConfChangeSingle
-
-func genCC(num func() int, id func() uint64, typ func() pb.ConfChangeType) []pb.ConfChangeSingle {
- var ccs []pb.ConfChangeSingle
- n := num()
- for i := 0; i < n; i++ {
- ccs = append(ccs, pb.ConfChangeSingle{Type: typ(), NodeID: id()})
- }
- return ccs
-}
-
-func (confChanges) Generate(rand *rand.Rand, _ int) reflect.Value {
- num := func() int {
- return 1 + rand.Intn(9)
- }
- id := func() uint64 {
- // Note that num() >= 1, so we're never returning 1 from this method,
- // meaning that we'll never touch NodeID one, which is special to avoid
- // voterless configs altogether in this test.
- return 1 + uint64(num())
- }
- typ := func() pb.ConfChangeType {
- return pb.ConfChangeType(rand.Intn(len(pb.ConfChangeType_name)))
- }
- return reflect.ValueOf(genCC(num, id, typ))
-}
-
-type initialChanges []pb.ConfChangeSingle
-
-func (initialChanges) Generate(rand *rand.Rand, _ int) reflect.Value {
- num := func() int {
- return 1 + rand.Intn(5)
- }
- id := func() uint64 { return uint64(num()) }
- typ := func() pb.ConfChangeType {
- return pb.ConfChangeAddNode
- }
- // NodeID one is special - it's in the initial config and will be a voter
- // always (this is to avoid uninteresting edge cases where the simple conf
- // changes can't easily make progress).
- ccs := append([]pb.ConfChangeSingle{{Type: pb.ConfChangeAddNode, NodeID: 1}}, genCC(num, id, typ)...)
- return reflect.ValueOf(ccs)
-}
diff --git a/raft/confchange/restore.go b/raft/confchange/restore.go
deleted file mode 100644
index ea317fc289a..00000000000
--- a/raft/confchange/restore.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package confchange
-
-import (
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// toConfChangeSingle translates a conf state into 1) a slice of operations creating
-// first the config that will become the outgoing one, and then the incoming one, and
-// b) another slice that, when applied to the config resulted from 1), represents the
-// ConfState.
-func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) {
- // Example to follow along this code:
- // voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4)
- //
- // This means that before entering the joint config, the configuration
- // had voters (1 2 4 6) and perhaps some learners that are already gone.
- // The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6)
- // are no longer voters; however 4 is poised to become a learner upon leaving
- // the joint state.
- // We can't tell whether 5 was a learner before entering the joint config,
- // but it doesn't matter (we'll pretend that it wasn't).
- //
- // The code below will construct
- // outgoing = add 1; add 2; add 4; add 6
- // incoming = remove 1; remove 2; remove 4; remove 6
- // add 1; add 2; add 3;
- // add-learner 5;
- // add-learner 4;
- //
- // So, when starting with an empty config, after applying 'outgoing' we have
- //
- // quorum=(1 2 4 6)
- //
- // From which we enter a joint state via 'incoming'
- //
- // quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4)
- //
- // as desired.
-
- for _, id := range cs.VotersOutgoing {
- // If there are outgoing voters, first add them one by one so that the
- // (non-joint) config has them all.
- out = append(out, pb.ConfChangeSingle{
- Type: pb.ConfChangeAddNode,
- NodeID: id,
- })
-
- }
-
- // We're done constructing the outgoing slice, now on to the incoming one
- // (which will apply on top of the config created by the outgoing slice).
-
- // First, we'll remove all of the outgoing voters.
- for _, id := range cs.VotersOutgoing {
- in = append(in, pb.ConfChangeSingle{
- Type: pb.ConfChangeRemoveNode,
- NodeID: id,
- })
- }
- // Then we'll add the incoming voters and learners.
- for _, id := range cs.Voters {
- in = append(in, pb.ConfChangeSingle{
- Type: pb.ConfChangeAddNode,
- NodeID: id,
- })
- }
- for _, id := range cs.Learners {
- in = append(in, pb.ConfChangeSingle{
- Type: pb.ConfChangeAddLearnerNode,
- NodeID: id,
- })
- }
- // Same for LearnersNext; these are nodes we want to be learners but which
- // are currently voters in the outgoing config.
- for _, id := range cs.LearnersNext {
- in = append(in, pb.ConfChangeSingle{
- Type: pb.ConfChangeAddLearnerNode,
- NodeID: id,
- })
- }
- return out, in
-}
-
-func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) {
- for _, op := range ops {
- cfg, prs, err := op(chg)
- if err != nil {
- return tracker.Config{}, nil, err
- }
- chg.Tracker.Config = cfg
- chg.Tracker.Progress = prs
- }
- return chg.Tracker.Config, chg.Tracker.Progress, nil
-}
-
-// Restore takes a Changer (which must represent an empty configuration), and
-// runs a sequence of changes enacting the configuration described in the
-// ConfState.
-//
-// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure
-// the Changer only needs a ProgressMap (not a whole Tracker) at which point
-// this can just take LastIndex and MaxInflight directly instead and cook up
-// the results from that alone.
-func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) {
- outgoing, incoming := toConfChangeSingle(cs)
-
- var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error)
-
- if len(outgoing) == 0 {
- // No outgoing config, so just apply the incoming changes one by one.
- for _, cc := range incoming {
- cc := cc // loop-local copy
- ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
- return chg.Simple(cc)
- })
- }
- } else {
- // The ConfState describes a joint configuration.
- //
- // First, apply all of the changes of the outgoing config one by one, so
- // that it temporarily becomes the incoming active config. For example,
- // if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&().
- for _, cc := range outgoing {
- cc := cc // loop-local copy
- ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
- return chg.Simple(cc)
- })
- }
- // Now enter the joint state, which rotates the above additions into the
- // outgoing config, and adds the incoming config in. Continuing the
- // example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations
- // would be removing 2,3,4 and then adding in 1,2,3 while transitioning
- // into a joint state.
- ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
- return chg.EnterJoint(cs.AutoLeave, incoming...)
- })
- }
-
- return chain(chg, ops...)
-}
diff --git a/raft/confchange/restore_test.go b/raft/confchange/restore_test.go
deleted file mode 100644
index 50712c7941f..00000000000
--- a/raft/confchange/restore_test.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package confchange
-
-import (
- "math/rand"
- "reflect"
- "sort"
- "testing"
- "testing/quick"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-type rndConfChange pb.ConfState
-
-// Generate creates a random (valid) ConfState for use with quickcheck.
-func (rndConfChange) Generate(rand *rand.Rand, _ int) reflect.Value {
- conv := func(sl []int) []uint64 {
- // We want IDs but the incoming slice is zero-indexed, so add one to
- // each.
- out := make([]uint64, len(sl))
- for i := range sl {
- out[i] = uint64(sl[i] + 1)
- }
- return out
- }
- var cs pb.ConfState
- // NB: never generate the empty ConfState, that one should be unit tested.
- nVoters := 1 + rand.Intn(5)
-
- nLearners := rand.Intn(5)
- // The number of voters that are in the outgoing config but not in the
- // incoming one. (We'll additionally retain a random number of the
- // incoming voters below).
- nRemovedVoters := rand.Intn(3)
-
- // Voters, learners, and removed voters must not overlap. A "removed voter"
- // is one that we have in the outgoing config but not the incoming one.
- ids := conv(rand.Perm(2 * (nVoters + nLearners + nRemovedVoters)))
-
- cs.Voters = ids[:nVoters]
- ids = ids[nVoters:]
-
- if nLearners > 0 {
- cs.Learners = ids[:nLearners]
- ids = ids[nLearners:]
- }
-
- // Roll the dice on how many of the incoming voters we decide were also
- // previously voters.
- //
- // NB: this code avoids creating non-nil empty slices (here and below).
- nOutgoingRetainedVoters := rand.Intn(nVoters + 1)
- if nOutgoingRetainedVoters > 0 || nRemovedVoters > 0 {
- cs.VotersOutgoing = append([]uint64(nil), cs.Voters[:nOutgoingRetainedVoters]...)
- cs.VotersOutgoing = append(cs.VotersOutgoing, ids[:nRemovedVoters]...)
- }
- // Only outgoing voters that are not also incoming voters can be in
- // LearnersNext (they represent demotions).
- if nRemovedVoters > 0 {
- if nLearnersNext := rand.Intn(nRemovedVoters + 1); nLearnersNext > 0 {
- cs.LearnersNext = ids[:nLearnersNext]
- }
- }
-
- cs.AutoLeave = len(cs.VotersOutgoing) > 0 && rand.Intn(2) == 1
- return reflect.ValueOf(rndConfChange(cs))
-}
-
-func TestRestore(t *testing.T) {
- cfg := quick.Config{MaxCount: 1000}
-
- f := func(cs pb.ConfState) bool {
- chg := Changer{
- Tracker: tracker.MakeProgressTracker(20),
- LastIndex: 10,
- }
- cfg, prs, err := Restore(chg, cs)
- if err != nil {
- t.Error(err)
- return false
- }
- chg.Tracker.Config = cfg
- chg.Tracker.Progress = prs
-
- for _, sl := range [][]uint64{
- cs.Voters,
- cs.Learners,
- cs.VotersOutgoing,
- cs.LearnersNext,
- } {
- sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] })
- }
-
- cs2 := chg.Tracker.ConfState()
- // NB: cs.Equivalent does the same "sorting" dance internally, but let's
- // test it a bit here instead of relying on it.
- if reflect.DeepEqual(cs, cs2) && cs.Equivalent(cs2) == nil && cs2.Equivalent(cs) == nil {
- return true // success
- }
- t.Errorf(`
-before: %+#v
-after: %+#v`, cs, cs2)
- return false
- }
-
- ids := func(sl ...uint64) []uint64 {
- return sl
- }
-
- // Unit tests.
- for _, cs := range []pb.ConfState{
- {},
- {Voters: ids(1, 2, 3)},
- {Voters: ids(1, 2, 3), Learners: ids(4, 5, 6)},
- {Voters: ids(1, 2, 3), Learners: ids(5), VotersOutgoing: ids(1, 2, 4, 6), LearnersNext: ids(4)},
- } {
- if !f(cs) {
- t.FailNow() // f() already logged a nice t.Error()
- }
- }
-
- if err := quick.Check(func(cs rndConfChange) bool {
- return f(pb.ConfState(cs))
- }, &cfg); err != nil {
- t.Error(err)
- }
-}
diff --git a/raft/confchange/testdata/joint_autoleave.txt b/raft/confchange/testdata/joint_autoleave.txt
deleted file mode 100644
index 9ec8cb0a467..00000000000
--- a/raft/confchange/testdata/joint_autoleave.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# Test the autoleave argument to EnterJoint. It defaults to false in the
-# datadriven tests. The flag has no associated semantics in this package,
-# it is simply passed through.
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=0
-
-# Autoleave is reflected in the config.
-enter-joint autoleave=true
-v2 v3
-----
-voters=(1 2 3)&&(1) autoleave
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-3: StateProbe match=0 next=1
-
-# Can't enter-joint twice, even if autoleave changes.
-enter-joint autoleave=false
-----
-config is already joint
-
-leave-joint
-----
-voters=(1 2 3)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-3: StateProbe match=0 next=1
diff --git a/raft/confchange/testdata/joint_idempotency.txt b/raft/confchange/testdata/joint_idempotency.txt
deleted file mode 100644
index 6d1346b7895..00000000000
--- a/raft/confchange/testdata/joint_idempotency.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Verify that operations upon entering the joint state are idempotent, i.e.
-# removing an absent node is fine, etc.
-
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=0
-
-enter-joint
-r1 r2 r9 v2 v3 v4 v2 v3 v4 l2 l2 r4 r4 l1 l1
-----
-voters=(3)&&(1) learners=(2) learners_next=(1)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1 learner
-3: StateProbe match=0 next=1
-
-leave-joint
-----
-voters=(3) learners=(1 2)
-1: StateProbe match=0 next=0 learner
-2: StateProbe match=0 next=1 learner
-3: StateProbe match=0 next=1
diff --git a/raft/confchange/testdata/joint_learners_next.txt b/raft/confchange/testdata/joint_learners_next.txt
deleted file mode 100644
index df1da7d0c9f..00000000000
--- a/raft/confchange/testdata/joint_learners_next.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-# Verify that when a voter is demoted in a joint config, it will show up in
-# learners_next until the joint config is left, and only then will the progress
-# turn into that of a learner, without resetting the progress. Note that this
-# last fact is verified by `next`, which can tell us which "round" the progress
-# was originally created in.
-
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=0
-
-enter-joint
-v2 l1
-----
-voters=(2)&&(1) learners_next=(1)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-
-leave-joint
-----
-voters=(2) learners=(1)
-1: StateProbe match=0 next=0 learner
-2: StateProbe match=0 next=1
diff --git a/raft/confchange/testdata/joint_safety.txt b/raft/confchange/testdata/joint_safety.txt
deleted file mode 100644
index 75d11b199e0..00000000000
--- a/raft/confchange/testdata/joint_safety.txt
+++ /dev/null
@@ -1,81 +0,0 @@
-leave-joint
-----
-can't leave a non-joint config
-
-enter-joint
-----
-can't make a zero-voter config joint
-
-enter-joint
-v1
-----
-can't make a zero-voter config joint
-
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=3
-
-leave-joint
-----
-can't leave a non-joint config
-
-# Can enter into joint config.
-enter-joint
-----
-voters=(1)&&(1)
-1: StateProbe match=0 next=3
-
-enter-joint
-----
-config is already joint
-
-leave-joint
-----
-voters=(1)
-1: StateProbe match=0 next=3
-
-leave-joint
-----
-can't leave a non-joint config
-
-# Can enter again, this time with some ops.
-enter-joint
-r1 v2 v3 l4
-----
-voters=(2 3)&&(1) learners=(4)
-1: StateProbe match=0 next=3
-2: StateProbe match=0 next=9
-3: StateProbe match=0 next=9
-4: StateProbe match=0 next=9 learner
-
-enter-joint
-----
-config is already joint
-
-enter-joint
-v12
-----
-config is already joint
-
-simple
-l15
-----
-can't apply simple config change in joint config
-
-leave-joint
-----
-voters=(2 3) learners=(4)
-2: StateProbe match=0 next=9
-3: StateProbe match=0 next=9
-4: StateProbe match=0 next=9 learner
-
-simple
-l9
-----
-voters=(2 3) learners=(4 9)
-2: StateProbe match=0 next=9
-3: StateProbe match=0 next=9
-4: StateProbe match=0 next=9 learner
-9: StateProbe match=0 next=14 learner
diff --git a/raft/confchange/testdata/simple_idempotency.txt b/raft/confchange/testdata/simple_idempotency.txt
deleted file mode 100644
index 2f7ca2e247b..00000000000
--- a/raft/confchange/testdata/simple_idempotency.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=0
-
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=0
-
-simple
-v2
-----
-voters=(1 2)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=2
-
-simple
-l1
-----
-voters=(2) learners=(1)
-1: StateProbe match=0 next=0 learner
-2: StateProbe match=0 next=2
-
-simple
-l1
-----
-voters=(2) learners=(1)
-1: StateProbe match=0 next=0 learner
-2: StateProbe match=0 next=2
-
-simple
-r1
-----
-voters=(2)
-2: StateProbe match=0 next=2
-
-simple
-r1
-----
-voters=(2)
-2: StateProbe match=0 next=2
-
-simple
-v3
-----
-voters=(2 3)
-2: StateProbe match=0 next=2
-3: StateProbe match=0 next=7
-
-simple
-r3
-----
-voters=(2)
-2: StateProbe match=0 next=2
-
-simple
-r3
-----
-voters=(2)
-2: StateProbe match=0 next=2
-
-simple
-r4
-----
-voters=(2)
-2: StateProbe match=0 next=2
diff --git a/raft/confchange/testdata/simple_promote_demote.txt b/raft/confchange/testdata/simple_promote_demote.txt
deleted file mode 100644
index 52369b450e3..00000000000
--- a/raft/confchange/testdata/simple_promote_demote.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-# Set up three voters for this test.
-
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=0
-
-simple
-v2
-----
-voters=(1 2)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-
-simple
-v3
-----
-voters=(1 2 3)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-3: StateProbe match=0 next=2
-
-# Can atomically demote and promote without a hitch.
-# This is pointless, but possible.
-simple
-l1 v1
-----
-voters=(1 2 3)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-3: StateProbe match=0 next=2
-
-# Can demote a voter.
-simple
-l2
-----
-voters=(1 3) learners=(2)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1 learner
-3: StateProbe match=0 next=2
-
-# Can atomically promote and demote the same voter.
-# This is pointless, but possible.
-simple
-v2 l2
-----
-voters=(1 3) learners=(2)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1 learner
-3: StateProbe match=0 next=2
-
-# Can promote a voter.
-simple
-v2
-----
-voters=(1 2 3)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-3: StateProbe match=0 next=2
diff --git a/raft/confchange/testdata/simple_safety.txt b/raft/confchange/testdata/simple_safety.txt
deleted file mode 100644
index 6566c5fccf7..00000000000
--- a/raft/confchange/testdata/simple_safety.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-simple
-l1
-----
-removed all voters
-
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=1
-
-simple
-v2 l3
-----
-voters=(1 2) learners=(3)
-1: StateProbe match=0 next=1
-2: StateProbe match=0 next=2
-3: StateProbe match=0 next=2 learner
-
-simple
-r1 v5
-----
-more than one voter changed without entering joint config
-
-simple
-r1 r2
-----
-removed all voters
-
-simple
-v3 v4
-----
-more than one voter changed without entering joint config
-
-simple
-l1 v5
-----
-more than one voter changed without entering joint config
-
-simple
-l1 l2
-----
-removed all voters
-
-simple
-l2 l3 l4 l5
-----
-voters=(1) learners=(2 3 4 5)
-1: StateProbe match=0 next=1
-2: StateProbe match=0 next=2 learner
-3: StateProbe match=0 next=2 learner
-4: StateProbe match=0 next=8 learner
-5: StateProbe match=0 next=8 learner
-
-simple
-r1
-----
-removed all voters
-
-simple
-r2 r3 r4 r5
-----
-voters=(1)
-1: StateProbe match=0 next=1
diff --git a/raft/confchange/testdata/update.txt b/raft/confchange/testdata/update.txt
deleted file mode 100644
index 50a703ccf1d..00000000000
--- a/raft/confchange/testdata/update.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Nobody cares about ConfChangeUpdateNode, but at least use it once. It is used
-# by etcd as a convenient way to pass a blob through their conf change machinery
-# that updates information tracked outside of raft.
-
-simple
-v1
-----
-voters=(1)
-1: StateProbe match=0 next=0
-
-simple
-v2 u1
-----
-voters=(1 2)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
-
-simple
-u1 u2 u3 u1 u2 u3
-----
-voters=(1 2)
-1: StateProbe match=0 next=0
-2: StateProbe match=0 next=1
diff --git a/raft/confchange/testdata/zero.txt b/raft/confchange/testdata/zero.txt
deleted file mode 100644
index 5e0d46fe6b6..00000000000
--- a/raft/confchange/testdata/zero.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-# NodeID zero is ignored.
-simple
-v1 r0 v0 l0
-----
-voters=(1)
-1: StateProbe match=0 next=0
diff --git a/raft/design.md b/raft/design.md
deleted file mode 100644
index 7bc0531dce6..00000000000
--- a/raft/design.md
+++ /dev/null
@@ -1,57 +0,0 @@
-## Progress
-
-Progress represents a followerâs progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress.
-
-`replication message` is a `msgApp` with log entries.
-
-A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about followerâs replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
-
-A progress is in one of the three state: `probe`, `replicate`, `snapshot`.
-
-```
- +--------------------------------------------------------+
- | send snapshot |
- | |
- +---------+----------+ +----------v---------+
- +---> probe | | snapshot |
- | | max inflight = 1 <----------------------------------+ max inflight = 0 |
- | +---------+----------+ +--------------------+
- | | 1. snapshot success
- | | (next=snapshot.index + 1)
- | | 2. snapshot failure
- | | (no change)
- | | 3. receives msgAppResp(rej=false&&index>lastsnap.index)
- | | (match=m.index,next=match+1)
-receives msgAppResp(rej=true)
-(next=match+1)| |
- | |
- | |
- | | receives msgAppResp(rej=false&&index>match)
- | | (match=m.index,next=match+1)
- | |
- | |
- | |
- | +---------v----------+
- | | replicate |
- +---+ max inflight = n |
- +--------------------+
-```
-
-When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
-
-When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
-
-When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
-
-A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
-
-A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question)
-
-A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
-
-### Flow Control
-
-1. limit the max size of message sent per message. Max should be configurable.
-Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
-
-2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly.
diff --git a/raft/diff_test.go b/raft/diff_test.go
deleted file mode 100644
index 1c4c5272d1a..00000000000
--- a/raft/diff_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "strings"
-)
-
-func diffu(a, b string) string {
- if a == b {
- return ""
- }
- aname, bname := mustTemp("base", a), mustTemp("other", b)
- defer os.Remove(aname)
- defer os.Remove(bname)
- cmd := exec.Command("diff", "-u", aname, bname)
- buf, err := cmd.CombinedOutput()
- if err != nil {
- if _, ok := err.(*exec.ExitError); ok {
- // do nothing
- return string(buf)
- }
- panic(err)
- }
- return string(buf)
-}
-
-func mustTemp(pre, body string) string {
- f, err := ioutil.TempFile("", pre)
- if err != nil {
- panic(err)
- }
- _, err = io.Copy(f, strings.NewReader(body))
- if err != nil {
- panic(err)
- }
- f.Close()
- return f.Name()
-}
-
-func ltoa(l *raftLog) string {
- s := fmt.Sprintf("committed: %d\n", l.committed)
- s += fmt.Sprintf("applied: %d\n", l.applied)
- for i, e := range l.allEntries() {
- s += fmt.Sprintf("#%d: %+v\n", i, e)
- }
- return s
-}
diff --git a/raft/doc.go b/raft/doc.go
deleted file mode 100644
index d491352d441..00000000000
--- a/raft/doc.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package raft sends and receives messages in the Protocol Buffer format
-defined in the raftpb package.
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-A simple example application, _raftexample_, is also available to help illustrate
-how to use this package in practice:
-https://github.com/etcd-io/etcd/tree/main/contrib/raftexample
-
-Usage
-
-The primary object in raft is a Node. You either start a Node from scratch
-using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a node from scratch:
-
- storage := raft.NewMemoryStorage()
- c := &Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
- n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-
-To restart a node from previous state:
-
- storage := raft.NewMemoryStorage()
-
- // recover the in-memory storage from persistent
- // snapshot, state and entries.
- storage.ApplySnapshot(snapshot)
- storage.SetHardState(state)
- storage.Append(entries)
-
- c := &Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
-
- // restart raft without peer information.
- // peer information is already included in the storage.
- n := raft.RestartNode(c)
-
-Now that you are holding onto a Node you have a few responsibilities:
-
-First, you must read from the Node.Ready() channel and process the updates
-it contains. These steps may be performed in parallel, except as noted in step
-2.
-
-1. Write HardState, Entries, and Snapshot to persistent storage if they are
-not empty. Note that when writing an Entry with Index i, any
-previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that
-no messages be sent until the latest HardState has been persisted to disk,
-and all Entries written by any previous Ready batch (Messages may be sent while
-entries from the same batch are being persisted). To reduce the I/O latency, an
-optimization can be applied to make leader write to disk in parallel with its
-followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
-MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
-large).
-
-Note: Marshalling messages is not thread-safe; it is important that you
-make sure that no new entries are persisted while marshalling.
-The easiest way to achieve this is to serialize the messages directly inside
-your main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine.
-If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
-to apply it to the node. The configuration change may be cancelled at this point
-by setting the NodeID field to zero before calling ApplyConfChange
-(but ApplyConfChange must be called one way or the other, and the decision to cancel
-must be based solely on the state machine and not external information such as
-the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates.
-This may be done at any time after step 1, although all updates must be processed
-in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an
-implementation of the Storage interface. The provided MemoryStorage
-type can be used for this (if you repopulate its state upon a
-restart), or you can supply your own disk-backed implementation.
-
-Third, when you receive a message from another node, pass it to Node.Step:
-
- func recvRaftRPC(ctx context.Context, m raftpb.Message) {
- n.Step(ctx, m)
- }
-
-Finally, you need to call Node.Tick() at regular intervals (probably
-via a time.Ticker). Raft has two important timeouts: heartbeat and the
-election timeout. However, internally to the raft package time is
-represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
- for {
- select {
- case <-s.Ticker:
- n.Tick()
- case rd := <-s.Node.Ready():
- saveToStorage(rd.State, rd.Entries, rd.Snapshot)
- send(rd.Messages)
- if !raft.IsEmptySnap(rd.Snapshot) {
- processSnapshot(rd.Snapshot)
- }
- for _, entry := range rd.CommittedEntries {
- process(entry)
- if entry.Type == raftpb.EntryConfChange {
- var cc raftpb.ConfChange
- cc.Unmarshal(entry.Data)
- s.Node.ApplyConfChange(cc)
- }
- }
- s.Node.Advance()
- case <-s.done:
- return
- }
- }
-
-To propose changes to the state machine from your node take your application
-data, serialize it into a byte slice and call:
-
- n.Propose(ctx, data)
-
-If the proposal is committed, data will appear in committed entries with type
-raftpb.EntryNormal. There is no guarantee that a proposed command will be
-committed; you may have to re-propose after a timeout.
-
-To add or remove a node in a cluster, build ConfChange struct 'cc' and call:
-
- n.ProposeConfChange(ctx, cc)
-
-After config change is committed, some committed entry with type
-raftpb.EntryConfChange will be returned. You must apply it to node through:
-
- var cc raftpb.ConfChange
- cc.Unmarshal(data)
- n.ApplyConfChange(cc)
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-Implementation notes
-
-This implementation is up to date with the final Raft thesis
-(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our
-implementation of the membership change protocol differs somewhat from
-that described in chapter 4. The key invariant that membership changes
-happen one node at a time is preserved, but in our implementation the
-membership change takes effect when its entry is applied, not when it
-is added to the log (so the entry is committed under the old
-membership instead of the new). This is equivalent in terms of safety,
-since the old and new configurations are guaranteed to overlap.
-
-To ensure that we do not attempt to commit two membership changes at
-once by matching log positions (which would be unsafe since they
-should have different quorum requirements), we simply disallow any
-proposed membership change while any uncommitted change appears in
-the leader's log.
-
-This approach introduces a problem when you try to remove a member
-from a two-member cluster: If one of the members dies before the
-other one receives the commit of the confchange entry, then the member
-cannot be removed any more since the cluster cannot make progress.
-For this reason it is highly recommended to use three or more nodes in
-every cluster.
-
-MessageType
-
-Package raft sends and receives message in Protocol Buffer format (defined
-in raftpb package). Each state (follower, candidate, leader) implements its
-own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
-advancing with the given raftpb.Message. Each step is determined by its
-raftpb.MessageType. Note that every step is checked by one common method
-'Step' that safety-checks the terms of node and incoming message to prevent
-stale log entries:
-
- 'MsgHup' is used for election. If a node is a follower or candidate, the
- 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
- candidate has not received any heartbeat before the election timeout, it
- passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
- start a new election.
-
- 'MsgBeat' is an internal type that signals the leader to send a heartbeat of
- the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
- the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
- send periodic 'MsgHeartbeat' messages to its followers.
-
- 'MsgProp' proposes to append data to its log entries. This is a special
- type to redirect proposals to leader. Therefore, send method overwrites
- raftpb.Message's term with its HardState's term to avoid attaching its
- local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
- method, the leader first calls the 'appendEntry' method to append entries
- to its log, and then calls 'bcastAppend' method to send those entries to
- its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
- follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
- method. It is stored with sender's ID and later forwarded to leader by
- rafthttp package.
-
- 'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
- which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
- type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
- back to follower, because it indicates that there is a valid leader sending
- 'MsgApp' messages. Candidate and follower respond to this message in
- 'MsgAppResp' type.
-
- 'MsgAppResp' is response to log replication request('MsgApp'). When
- 'MsgApp' is passed to candidate or follower's Step method, it responds by
- calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
- mailbox.
-
- 'MsgVote' requests votes for election. When a node is a follower or
- candidate and 'MsgHup' is passed to its Step method, then the node calls
- 'campaign' method to campaign itself to become a leader. Once 'campaign'
- method is called, the node becomes candidate and sends 'MsgVote' to peers
- in cluster to request votes. When passed to leader or candidate's Step
- method and the message's Term is lower than leader's or candidate's,
- 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
- If leader or candidate receives 'MsgVote' with higher term, it will revert
- back to follower. When 'MsgVote' is passed to follower, it votes for the
- sender only when sender's last term is greater than MsgVote's term or
- sender's last term is equal to MsgVote's term but sender's last committed
- index is greater than or equal to follower's.
-
- 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
- passed to candidate, the candidate calculates how many votes it has won. If
- it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
- If candidate receives majority of votes of denials, it reverts back to
- follower.
-
- 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
- protocol. When Config.PreVote is true, a pre-election is carried out first
- (using the same rules as a regular election), and no node increases its term
- number unless the pre-election indicates that the campaigning node would win.
- This minimizes disruption when a partitioned node rejoins the cluster.
-
- 'MsgSnap' requests to install a snapshot message. When a node has just
- become a leader or the leader receives 'MsgProp' message, it calls
- 'bcastAppend' method, which then calls 'sendAppend' method to each
- follower. In 'sendAppend', if a leader fails to get term or entries,
- the leader requests snapshot by sending 'MsgSnap' type message.
-
- 'MsgSnapStatus' tells the result of snapshot install message. When a
- follower rejected 'MsgSnap', it indicates the snapshot request with
- 'MsgSnap' had failed from network issues which causes the network layer
- to fail to send out snapshots to its followers. Then leader considers
- follower's progress as probe. When 'MsgSnap' were not rejected, it
- indicates that the snapshot succeeded and the leader sets follower's
- progress to probe and resumes its log replication.
-
- 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
- to candidate and message's term is higher than candidate's, the candidate
- reverts back to follower and updates its committed index from the one in
- this heartbeat. And it sends the message to its mailbox. When
- 'MsgHeartbeat' is passed to follower's Step method and message's term is
- higher than follower's, the follower updates its leaderID with the ID
- from the message.
-
- 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
- is passed to leader's Step method, the leader knows which follower
- responded. And only when the leader's last committed index is greater than
- follower's Match index, the leader runs 'sendAppend` method.
-
- 'MsgUnreachable' tells that request(message) wasn't delivered. When
- 'MsgUnreachable' is passed to leader's Step method, the leader discovers
- that the follower that sent this 'MsgUnreachable' is not reachable, often
- indicating 'MsgApp' is lost. When follower's progress state is replicate,
- the leader sets it back to probe.
-
-*/
-package raft
diff --git a/raft/example_test.go b/raft/example_test.go
deleted file mode 100644
index 51c1689245e..00000000000
--- a/raft/example_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func applyToStore(ents []pb.Entry) {}
-func sendMessages(msgs []pb.Message) {}
-func saveStateToDisk(st pb.HardState) {}
-func saveToDisk(ents []pb.Entry) {}
-
-func ExampleNode() {
- c := &Config{}
- n := StartNode(c, nil)
- defer n.Stop()
-
- // stuff to n happens in other goroutines
-
- // the last known state
- var prev pb.HardState
- for {
- // Ready blocks until there is new state ready.
- rd := <-n.Ready()
- if !isHardStateEqual(prev, rd.HardState) {
- saveStateToDisk(rd.HardState)
- prev = rd.HardState
- }
-
- saveToDisk(rd.Entries)
- go applyToStore(rd.CommittedEntries)
- sendMessages(rd.Messages)
- }
-}
diff --git a/raft/go.mod b/raft/go.mod
deleted file mode 100644
index a559403e740..00000000000
--- a/raft/go.mod
+++ /dev/null
@@ -1,20 +0,0 @@
-module go.etcd.io/etcd/raft/v3
-
-go 1.16
-
-require (
- github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 // indirect
- github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5
- github.com/gogo/protobuf v1.3.2
- github.com/golang/protobuf v1.5.2
- github.com/pkg/errors v0.9.1 // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
-)
-
-// Bad imports are sometimes causing attempts to pull that code.
-// This makes the error more explicit.
-replace go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
-
-replace go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
-
-replace go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
diff --git a/raft/go.sum b/raft/go.sum
deleted file mode 100644
index 622e9f3eb18..00000000000
--- a/raft/go.sum
+++ /dev/null
@@ -1,142 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/raft/interaction_test.go b/raft/interaction_test.go
deleted file mode 100644
index 57d0d97d446..00000000000
--- a/raft/interaction_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft_test
-
-import (
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3/rafttest"
-)
-
-func TestInteraction(t *testing.T) {
- // NB: if this test fails, run `go test ./raft -rewrite` and inspect the
- // diff. Only commit the changes if you understand what caused them and if
- // they are desired.
- datadriven.Walk(t, "testdata", func(t *testing.T, path string) {
- env := rafttest.NewInteractionEnv(nil)
- datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
- return env.Handle(t, *d)
- })
- })
-}
diff --git a/raft/log.go b/raft/log.go
deleted file mode 100644
index c94c41f7783..00000000000
--- a/raft/log.go
+++ /dev/null
@@ -1,406 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "fmt"
- "log"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-type raftLog struct {
- // storage contains all stable entries since the last snapshot.
- storage Storage
-
- // unstable contains all unstable entries and snapshot.
- // they will be saved into storage.
- unstable unstable
-
- // committed is the highest log position that is known to be in
- // stable storage on a quorum of nodes.
- committed uint64
- // applied is the highest log position that the application has
- // been instructed to apply to its state machine.
- // Invariant: applied <= committed
- applied uint64
-
- logger Logger
-
- // maxNextEntsSize is the maximum number aggregate byte size of the messages
- // returned from calls to nextEnts.
- maxNextEntsSize uint64
-}
-
-// newLog returns log using the given storage and default options. It
-// recovers the log to the state that it just commits and applies the
-// latest snapshot.
-func newLog(storage Storage, logger Logger) *raftLog {
- return newLogWithSize(storage, logger, noLimit)
-}
-
-// newLogWithSize returns a log using the given storage and max
-// message size.
-func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog {
- if storage == nil {
- log.Panic("storage must not be nil")
- }
- log := &raftLog{
- storage: storage,
- logger: logger,
- maxNextEntsSize: maxNextEntsSize,
- }
- firstIndex, err := storage.FirstIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- lastIndex, err := storage.LastIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- log.unstable.offset = lastIndex + 1
- log.unstable.logger = logger
- // Initialize our committed and applied pointers to the time of the last compaction.
- log.committed = firstIndex - 1
- log.applied = firstIndex - 1
-
- return log
-}
-
-func (l *raftLog) String() string {
- return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
-}
-
-// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
-// it returns (last index of new entries, true).
-func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
- if l.matchTerm(index, logTerm) {
- lastnewi = index + uint64(len(ents))
- ci := l.findConflict(ents)
- switch {
- case ci == 0:
- case ci <= l.committed:
- l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
- default:
- offset := index + 1
- l.append(ents[ci-offset:]...)
- }
- l.commitTo(min(committed, lastnewi))
- return lastnewi, true
- }
- return 0, false
-}
-
-func (l *raftLog) append(ents ...pb.Entry) uint64 {
- if len(ents) == 0 {
- return l.lastIndex()
- }
- if after := ents[0].Index - 1; after < l.committed {
- l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
- }
- l.unstable.truncateAndAppend(ents)
- return l.lastIndex()
-}
-
-// findConflict finds the index of the conflict.
-// It returns the first pair of conflicting entries between the existing
-// entries and the given entries, if there are any.
-// If there is no conflicting entries, and the existing entries contains
-// all the given entries, zero will be returned.
-// If there is no conflicting entries, but the given entries contains new
-// entries, the index of the first new entry will be returned.
-// An entry is considered to be conflicting if it has the same index but
-// a different term.
-// The index of the given entries MUST be continuously increasing.
-func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
- for _, ne := range ents {
- if !l.matchTerm(ne.Index, ne.Term) {
- if ne.Index <= l.lastIndex() {
- l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
- ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
- }
- return ne.Index
- }
- }
- return 0
-}
-
-// findConflictByTerm takes an (index, term) pair (indicating a conflicting log
-// entry on a leader/follower during an append) and finds the largest index in
-// log l with a term <= `term` and an index <= `index`. If no such index exists
-// in the log, the log's first index is returned.
-//
-// The index provided MUST be equal to or less than l.lastIndex(). Invalid
-// inputs log a warning and the input index is returned.
-func (l *raftLog) findConflictByTerm(index uint64, term uint64) uint64 {
- if li := l.lastIndex(); index > li {
- // NB: such calls should not exist, but since there is a straightfoward
- // way to recover, do it.
- //
- // It is tempting to also check something about the first index, but
- // there is odd behavior with peers that have no log, in which case
- // lastIndex will return zero and firstIndex will return one, which
- // leads to calls with an index of zero into this method.
- l.logger.Warningf("index(%d) is out of range [0, lastIndex(%d)] in findConflictByTerm",
- index, li)
- return index
- }
- for {
- logTerm, err := l.term(index)
- if logTerm <= term || err != nil {
- break
- }
- index--
- }
- return index
-}
-
-func (l *raftLog) unstableEntries() []pb.Entry {
- if len(l.unstable.entries) == 0 {
- return nil
- }
- return l.unstable.entries
-}
-
-// nextEnts returns all the available entries for execution.
-// If applied is smaller than the index of snapshot, it returns all committed
-// entries after the index of snapshot.
-func (l *raftLog) nextEnts() (ents []pb.Entry) {
- off := max(l.applied+1, l.firstIndex())
- if l.committed+1 > off {
- ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize)
- if err != nil {
- l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
- }
- return ents
- }
- return nil
-}
-
-// hasNextEnts returns if there is any available entries for execution. This
-// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
-func (l *raftLog) hasNextEnts() bool {
- off := max(l.applied+1, l.firstIndex())
- return l.committed+1 > off
-}
-
-// hasPendingSnapshot returns if there is pending snapshot waiting for applying.
-func (l *raftLog) hasPendingSnapshot() bool {
- return l.unstable.snapshot != nil && !IsEmptySnap(*l.unstable.snapshot)
-}
-
-func (l *raftLog) snapshot() (pb.Snapshot, error) {
- if l.unstable.snapshot != nil {
- return *l.unstable.snapshot, nil
- }
- return l.storage.Snapshot()
-}
-
-func (l *raftLog) firstIndex() uint64 {
- if i, ok := l.unstable.maybeFirstIndex(); ok {
- return i
- }
- index, err := l.storage.FirstIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- return index
-}
-
-func (l *raftLog) lastIndex() uint64 {
- if i, ok := l.unstable.maybeLastIndex(); ok {
- return i
- }
- i, err := l.storage.LastIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- return i
-}
-
-func (l *raftLog) commitTo(tocommit uint64) {
- // never decrease commit
- if l.committed < tocommit {
- if l.lastIndex() < tocommit {
- l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
- }
- l.committed = tocommit
- }
-}
-
-func (l *raftLog) appliedTo(i uint64) {
- if i == 0 {
- return
- }
- if l.committed < i || i < l.applied {
- l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
- }
- l.applied = i
-}
-
-func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
-
-func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
-
-func (l *raftLog) lastTerm() uint64 {
- t, err := l.term(l.lastIndex())
- if err != nil {
- l.logger.Panicf("unexpected error when getting the last term (%v)", err)
- }
- return t
-}
-
-func (l *raftLog) term(i uint64) (uint64, error) {
- // the valid term range is [index of dummy entry, last index]
- dummyIndex := l.firstIndex() - 1
- if i < dummyIndex || i > l.lastIndex() {
- // TODO: return an error instead?
- return 0, nil
- }
-
- if t, ok := l.unstable.maybeTerm(i); ok {
- return t, nil
- }
-
- t, err := l.storage.Term(i)
- if err == nil {
- return t, nil
- }
- if err == ErrCompacted || err == ErrUnavailable {
- return 0, err
- }
- panic(err) // TODO(bdarnell)
-}
-
-func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
- if i > l.lastIndex() {
- return nil, nil
- }
- return l.slice(i, l.lastIndex()+1, maxsize)
-}
-
-// allEntries returns all entries in the log.
-func (l *raftLog) allEntries() []pb.Entry {
- ents, err := l.entries(l.firstIndex(), noLimit)
- if err == nil {
- return ents
- }
- if err == ErrCompacted { // try again if there was a racing compaction
- return l.allEntries()
- }
- // TODO (xiangli): handle error?
- panic(err)
-}
-
-// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
-// by comparing the index and term of the last entries in the existing logs.
-// If the logs have last entries with different terms, then the log with the
-// later term is more up-to-date. If the logs end with the same term, then
-// whichever log has the larger lastIndex is more up-to-date. If the logs are
-// the same, the given log is up-to-date.
-func (l *raftLog) isUpToDate(lasti, term uint64) bool {
- return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
-}
-
-func (l *raftLog) matchTerm(i, term uint64) bool {
- t, err := l.term(i)
- if err != nil {
- return false
- }
- return t == term
-}
-
-func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
- if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
- l.commitTo(maxIndex)
- return true
- }
- return false
-}
-
-func (l *raftLog) restore(s pb.Snapshot) {
- l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
- l.committed = s.Metadata.Index
- l.unstable.restore(s)
-}
-
-// slice returns a slice of log entries from lo through hi-1, inclusive.
-func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
- err := l.mustCheckOutOfBounds(lo, hi)
- if err != nil {
- return nil, err
- }
- if lo == hi {
- return nil, nil
- }
- var ents []pb.Entry
- if lo < l.unstable.offset {
- storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
- if err == ErrCompacted {
- return nil, err
- } else if err == ErrUnavailable {
- l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
- } else if err != nil {
- panic(err) // TODO(bdarnell)
- }
-
- // check if ents has reached the size limitation
- if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
- return storedEnts, nil
- }
-
- ents = storedEnts
- }
- if hi > l.unstable.offset {
- unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
- if len(ents) > 0 {
- combined := make([]pb.Entry, len(ents)+len(unstable))
- n := copy(combined, ents)
- copy(combined[n:], unstable)
- ents = combined
- } else {
- ents = unstable
- }
- }
- return limitSize(ents, maxSize), nil
-}
-
-// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
-func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
- if lo > hi {
- l.logger.Panicf("invalid slice %d > %d", lo, hi)
- }
- fi := l.firstIndex()
- if lo < fi {
- return ErrCompacted
- }
-
- length := l.lastIndex() + 1 - fi
- if hi > fi+length {
- l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
- }
- return nil
-}
-
-func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
- if err == nil {
- return t
- }
- if err == ErrCompacted {
- return 0
- }
- l.logger.Panicf("unexpected error (%v)", err)
- return 0
-}
diff --git a/raft/log_test.go b/raft/log_test.go
deleted file mode 100644
index 0ad23f93ba1..00000000000
--- a/raft/log_test.go
+++ /dev/null
@@ -1,819 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "reflect"
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func TestFindConflict(t *testing.T) {
- previousEnts := []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}}
- tests := []struct {
- ents []pb.Entry
- wconflict uint64
- }{
- // no conflict, empty ent
- {[]pb.Entry{}, 0},
- // no conflict
- {[]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}}, 0},
- {[]pb.Entry{{Index: 2, Term: 2}, {Index: 3, Term: 3}}, 0},
- {[]pb.Entry{{Index: 3, Term: 3}}, 0},
- // no conflict, but has new entries
- {[]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 4}}, 4},
- {[]pb.Entry{{Index: 2, Term: 2}, {Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 4}}, 4},
- {[]pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 4}}, 4},
- {[]pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 4}}, 4},
- // conflicts with existing entries
- {[]pb.Entry{{Index: 1, Term: 4}, {Index: 2, Term: 4}}, 1},
- {[]pb.Entry{{Index: 2, Term: 1}, {Index: 3, Term: 4}, {Index: 4, Term: 4}}, 2},
- {[]pb.Entry{{Index: 3, Term: 1}, {Index: 4, Term: 2}, {Index: 5, Term: 4}, {Index: 6, Term: 4}}, 3},
- }
-
- for i, tt := range tests {
- raftLog := newLog(NewMemoryStorage(), raftLogger)
- raftLog.append(previousEnts...)
-
- gconflict := raftLog.findConflict(tt.ents)
- if gconflict != tt.wconflict {
- t.Errorf("#%d: conflict = %d, want %d", i, gconflict, tt.wconflict)
- }
- }
-}
-
-func TestIsUpToDate(t *testing.T) {
- previousEnts := []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}}
- raftLog := newLog(NewMemoryStorage(), raftLogger)
- raftLog.append(previousEnts...)
- tests := []struct {
- lastIndex uint64
- term uint64
- wUpToDate bool
- }{
- // greater term, ignore lastIndex
- {raftLog.lastIndex() - 1, 4, true},
- {raftLog.lastIndex(), 4, true},
- {raftLog.lastIndex() + 1, 4, true},
- // smaller term, ignore lastIndex
- {raftLog.lastIndex() - 1, 2, false},
- {raftLog.lastIndex(), 2, false},
- {raftLog.lastIndex() + 1, 2, false},
- // equal term, equal or lager lastIndex wins
- {raftLog.lastIndex() - 1, 3, false},
- {raftLog.lastIndex(), 3, true},
- {raftLog.lastIndex() + 1, 3, true},
- }
-
- for i, tt := range tests {
- gUpToDate := raftLog.isUpToDate(tt.lastIndex, tt.term)
- if gUpToDate != tt.wUpToDate {
- t.Errorf("#%d: uptodate = %v, want %v", i, gUpToDate, tt.wUpToDate)
- }
- }
-}
-
-func TestAppend(t *testing.T) {
- previousEnts := []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}
- tests := []struct {
- ents []pb.Entry
- windex uint64
- wents []pb.Entry
- wunstable uint64
- }{
- {
- []pb.Entry{},
- 2,
- []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}},
- 3,
- },
- {
- []pb.Entry{{Index: 3, Term: 2}},
- 3,
- []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 2}},
- 3,
- },
- // conflicts with index 1
- {
- []pb.Entry{{Index: 1, Term: 2}},
- 1,
- []pb.Entry{{Index: 1, Term: 2}},
- 1,
- },
- // conflicts with index 2
- {
- []pb.Entry{{Index: 2, Term: 3}, {Index: 3, Term: 3}},
- 3,
- []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 3}, {Index: 3, Term: 3}},
- 2,
- },
- }
-
- for i, tt := range tests {
- storage := NewMemoryStorage()
- storage.Append(previousEnts)
- raftLog := newLog(storage, raftLogger)
-
- index := raftLog.append(tt.ents...)
- if index != tt.windex {
- t.Errorf("#%d: lastIndex = %d, want %d", i, index, tt.windex)
- }
- g, err := raftLog.entries(1, noLimit)
- if err != nil {
- t.Fatalf("#%d: unexpected error %v", i, err)
- }
- if !reflect.DeepEqual(g, tt.wents) {
- t.Errorf("#%d: logEnts = %+v, want %+v", i, g, tt.wents)
- }
- if goff := raftLog.unstable.offset; goff != tt.wunstable {
- t.Errorf("#%d: unstable = %d, want %d", i, goff, tt.wunstable)
- }
- }
-}
-
-// TestLogMaybeAppend ensures:
-// If the given (index, term) matches with the existing log:
-// 1. If an existing entry conflicts with a new one (same index
-// but different terms), delete the existing entry and all that
-// follow it
-// 2.Append any new entries not already in the log
-// If the given (index, term) does not match with the existing log:
-// return false
-func TestLogMaybeAppend(t *testing.T) {
- previousEnts := []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}}
- lastindex := uint64(3)
- lastterm := uint64(3)
- commit := uint64(1)
-
- tests := []struct {
- logTerm uint64
- index uint64
- committed uint64
- ents []pb.Entry
-
- wlasti uint64
- wappend bool
- wcommit uint64
- wpanic bool
- }{
- // not match: term is different
- {
- lastterm - 1, lastindex, lastindex, []pb.Entry{{Index: lastindex + 1, Term: 4}},
- 0, false, commit, false,
- },
- // not match: index out of bound
- {
- lastterm, lastindex + 1, lastindex, []pb.Entry{{Index: lastindex + 2, Term: 4}},
- 0, false, commit, false,
- },
- // match with the last existing entry
- {
- lastterm, lastindex, lastindex, nil,
- lastindex, true, lastindex, false,
- },
- {
- lastterm, lastindex, lastindex + 1, nil,
- lastindex, true, lastindex, false, // do not increase commit higher than lastnewi
- },
- {
- lastterm, lastindex, lastindex - 1, nil,
- lastindex, true, lastindex - 1, false, // commit up to the commit in the message
- },
- {
- lastterm, lastindex, 0, nil,
- lastindex, true, commit, false, // commit do not decrease
- },
- {
- 0, 0, lastindex, nil,
- 0, true, commit, false, // commit do not decrease
- },
- {
- lastterm, lastindex, lastindex, []pb.Entry{{Index: lastindex + 1, Term: 4}},
- lastindex + 1, true, lastindex, false,
- },
- {
- lastterm, lastindex, lastindex + 1, []pb.Entry{{Index: lastindex + 1, Term: 4}},
- lastindex + 1, true, lastindex + 1, false,
- },
- {
- lastterm, lastindex, lastindex + 2, []pb.Entry{{Index: lastindex + 1, Term: 4}},
- lastindex + 1, true, lastindex + 1, false, // do not increase commit higher than lastnewi
- },
- {
- lastterm, lastindex, lastindex + 2, []pb.Entry{{Index: lastindex + 1, Term: 4}, {Index: lastindex + 2, Term: 4}},
- lastindex + 2, true, lastindex + 2, false,
- },
- // match with the the entry in the middle
- {
- lastterm - 1, lastindex - 1, lastindex, []pb.Entry{{Index: lastindex, Term: 4}},
- lastindex, true, lastindex, false,
- },
- {
- lastterm - 2, lastindex - 2, lastindex, []pb.Entry{{Index: lastindex - 1, Term: 4}},
- lastindex - 1, true, lastindex - 1, false,
- },
- {
- lastterm - 3, lastindex - 3, lastindex, []pb.Entry{{Index: lastindex - 2, Term: 4}},
- lastindex - 2, true, lastindex - 2, true, // conflict with existing committed entry
- },
- {
- lastterm - 2, lastindex - 2, lastindex, []pb.Entry{{Index: lastindex - 1, Term: 4}, {Index: lastindex, Term: 4}},
- lastindex, true, lastindex, false,
- },
- }
-
- for i, tt := range tests {
- raftLog := newLog(NewMemoryStorage(), raftLogger)
- raftLog.append(previousEnts...)
- raftLog.committed = commit
- func() {
- defer func() {
- if r := recover(); r != nil {
- if !tt.wpanic {
- t.Errorf("%d: panic = %v, want %v", i, true, tt.wpanic)
- }
- }
- }()
- glasti, gappend := raftLog.maybeAppend(tt.index, tt.logTerm, tt.committed, tt.ents...)
- gcommit := raftLog.committed
-
- if glasti != tt.wlasti {
- t.Errorf("#%d: lastindex = %d, want %d", i, glasti, tt.wlasti)
- }
- if gappend != tt.wappend {
- t.Errorf("#%d: append = %v, want %v", i, gappend, tt.wappend)
- }
- if gcommit != tt.wcommit {
- t.Errorf("#%d: committed = %d, want %d", i, gcommit, tt.wcommit)
- }
- if gappend && len(tt.ents) != 0 {
- gents, err := raftLog.slice(raftLog.lastIndex()-uint64(len(tt.ents))+1, raftLog.lastIndex()+1, noLimit)
- if err != nil {
- t.Fatalf("unexpected error %v", err)
- }
- if !reflect.DeepEqual(tt.ents, gents) {
- t.Errorf("#%d: appended entries = %v, want %v", i, gents, tt.ents)
- }
- }
- }()
- }
-}
-
-// TestCompactionSideEffects ensures that all the log related functionality works correctly after
-// a compaction.
-func TestCompactionSideEffects(t *testing.T) {
- var i uint64
- // Populate the log with 1000 entries; 750 in stable storage and 250 in unstable.
- lastIndex := uint64(1000)
- unstableIndex := uint64(750)
- lastTerm := lastIndex
- storage := NewMemoryStorage()
- for i = 1; i <= unstableIndex; i++ {
- storage.Append([]pb.Entry{{Term: i, Index: i}})
- }
- raftLog := newLog(storage, raftLogger)
- for i = unstableIndex; i < lastIndex; i++ {
- raftLog.append(pb.Entry{Term: i + 1, Index: i + 1})
- }
-
- ok := raftLog.maybeCommit(lastIndex, lastTerm)
- if !ok {
- t.Fatalf("maybeCommit returned false")
- }
- raftLog.appliedTo(raftLog.committed)
-
- offset := uint64(500)
- storage.Compact(offset)
-
- if raftLog.lastIndex() != lastIndex {
- t.Errorf("lastIndex = %d, want %d", raftLog.lastIndex(), lastIndex)
- }
-
- for j := offset; j <= raftLog.lastIndex(); j++ {
- if mustTerm(raftLog.term(j)) != j {
- t.Errorf("term(%d) = %d, want %d", j, mustTerm(raftLog.term(j)), j)
- }
- }
-
- for j := offset; j <= raftLog.lastIndex(); j++ {
- if !raftLog.matchTerm(j, j) {
- t.Errorf("matchTerm(%d) = false, want true", j)
- }
- }
-
- unstableEnts := raftLog.unstableEntries()
- if g := len(unstableEnts); g != 250 {
- t.Errorf("len(unstableEntries) = %d, want = %d", g, 250)
- }
- if unstableEnts[0].Index != 751 {
- t.Errorf("Index = %d, want = %d", unstableEnts[0].Index, 751)
- }
-
- prev := raftLog.lastIndex()
- raftLog.append(pb.Entry{Index: raftLog.lastIndex() + 1, Term: raftLog.lastIndex() + 1})
- if raftLog.lastIndex() != prev+1 {
- t.Errorf("lastIndex = %d, want = %d", raftLog.lastIndex(), prev+1)
- }
-
- ents, err := raftLog.entries(raftLog.lastIndex(), noLimit)
- if err != nil {
- t.Fatalf("unexpected error %v", err)
- }
- if len(ents) != 1 {
- t.Errorf("len(entries) = %d, want = %d", len(ents), 1)
- }
-}
-
-func TestHasNextEnts(t *testing.T) {
- snap := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{Term: 1, Index: 3},
- }
- ents := []pb.Entry{
- {Term: 1, Index: 4},
- {Term: 1, Index: 5},
- {Term: 1, Index: 6},
- }
- tests := []struct {
- applied uint64
- hasNext bool
- }{
- {0, true},
- {3, true},
- {4, true},
- {5, false},
- }
- for i, tt := range tests {
- storage := NewMemoryStorage()
- storage.ApplySnapshot(snap)
- raftLog := newLog(storage, raftLogger)
- raftLog.append(ents...)
- raftLog.maybeCommit(5, 1)
- raftLog.appliedTo(tt.applied)
-
- hasNext := raftLog.hasNextEnts()
- if hasNext != tt.hasNext {
- t.Errorf("#%d: hasNext = %v, want %v", i, hasNext, tt.hasNext)
- }
- }
-}
-
-func TestNextEnts(t *testing.T) {
- snap := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{Term: 1, Index: 3},
- }
- ents := []pb.Entry{
- {Term: 1, Index: 4},
- {Term: 1, Index: 5},
- {Term: 1, Index: 6},
- }
- tests := []struct {
- applied uint64
- wents []pb.Entry
- }{
- {0, ents[:2]},
- {3, ents[:2]},
- {4, ents[1:2]},
- {5, nil},
- }
- for i, tt := range tests {
- storage := NewMemoryStorage()
- storage.ApplySnapshot(snap)
- raftLog := newLog(storage, raftLogger)
- raftLog.append(ents...)
- raftLog.maybeCommit(5, 1)
- raftLog.appliedTo(tt.applied)
-
- nents := raftLog.nextEnts()
- if !reflect.DeepEqual(nents, tt.wents) {
- t.Errorf("#%d: nents = %+v, want %+v", i, nents, tt.wents)
- }
- }
-}
-
-// TestUnstableEnts ensures unstableEntries returns the unstable part of the
-// entries correctly.
-func TestUnstableEnts(t *testing.T) {
- previousEnts := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}
- tests := []struct {
- unstable uint64
- wents []pb.Entry
- }{
- {3, nil},
- {1, previousEnts},
- }
-
- for i, tt := range tests {
- // append stable entries to storage
- storage := NewMemoryStorage()
- storage.Append(previousEnts[:tt.unstable-1])
-
- // append unstable entries to raftlog
- raftLog := newLog(storage, raftLogger)
- raftLog.append(previousEnts[tt.unstable-1:]...)
-
- ents := raftLog.unstableEntries()
- if l := len(ents); l > 0 {
- raftLog.stableTo(ents[l-1].Index, ents[l-1].Term)
- }
- if !reflect.DeepEqual(ents, tt.wents) {
- t.Errorf("#%d: unstableEnts = %+v, want %+v", i, ents, tt.wents)
- }
- w := previousEnts[len(previousEnts)-1].Index + 1
- if g := raftLog.unstable.offset; g != w {
- t.Errorf("#%d: unstable = %d, want %d", i, g, w)
- }
- }
-}
-
-func TestCommitTo(t *testing.T) {
- previousEnts := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}}
- commit := uint64(2)
- tests := []struct {
- commit uint64
- wcommit uint64
- wpanic bool
- }{
- {3, 3, false},
- {1, 2, false}, // never decrease
- {4, 0, true}, // commit out of range -> panic
- }
- for i, tt := range tests {
- func() {
- defer func() {
- if r := recover(); r != nil {
- if !tt.wpanic {
- t.Errorf("%d: panic = %v, want %v", i, true, tt.wpanic)
- }
- }
- }()
- raftLog := newLog(NewMemoryStorage(), raftLogger)
- raftLog.append(previousEnts...)
- raftLog.committed = commit
- raftLog.commitTo(tt.commit)
- if raftLog.committed != tt.wcommit {
- t.Errorf("#%d: committed = %d, want %d", i, raftLog.committed, tt.wcommit)
- }
- }()
- }
-}
-
-func TestStableTo(t *testing.T) {
- tests := []struct {
- stablei uint64
- stablet uint64
- wunstable uint64
- }{
- {1, 1, 2},
- {2, 2, 3},
- {2, 1, 1}, // bad term
- {3, 1, 1}, // bad index
- }
- for i, tt := range tests {
- raftLog := newLog(NewMemoryStorage(), raftLogger)
- raftLog.append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}...)
- raftLog.stableTo(tt.stablei, tt.stablet)
- if raftLog.unstable.offset != tt.wunstable {
- t.Errorf("#%d: unstable = %d, want %d", i, raftLog.unstable.offset, tt.wunstable)
- }
- }
-}
-
-func TestStableToWithSnap(t *testing.T) {
- snapi, snapt := uint64(5), uint64(2)
- tests := []struct {
- stablei uint64
- stablet uint64
- newEnts []pb.Entry
-
- wunstable uint64
- }{
- {snapi + 1, snapt, nil, snapi + 1},
- {snapi, snapt, nil, snapi + 1},
- {snapi - 1, snapt, nil, snapi + 1},
-
- {snapi + 1, snapt + 1, nil, snapi + 1},
- {snapi, snapt + 1, nil, snapi + 1},
- {snapi - 1, snapt + 1, nil, snapi + 1},
-
- {snapi + 1, snapt, []pb.Entry{{Index: snapi + 1, Term: snapt}}, snapi + 2},
- {snapi, snapt, []pb.Entry{{Index: snapi + 1, Term: snapt}}, snapi + 1},
- {snapi - 1, snapt, []pb.Entry{{Index: snapi + 1, Term: snapt}}, snapi + 1},
-
- {snapi + 1, snapt + 1, []pb.Entry{{Index: snapi + 1, Term: snapt}}, snapi + 1},
- {snapi, snapt + 1, []pb.Entry{{Index: snapi + 1, Term: snapt}}, snapi + 1},
- {snapi - 1, snapt + 1, []pb.Entry{{Index: snapi + 1, Term: snapt}}, snapi + 1},
- }
- for i, tt := range tests {
- s := NewMemoryStorage()
- s.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: snapi, Term: snapt}})
- raftLog := newLog(s, raftLogger)
- raftLog.append(tt.newEnts...)
- raftLog.stableTo(tt.stablei, tt.stablet)
- if raftLog.unstable.offset != tt.wunstable {
- t.Errorf("#%d: unstable = %d, want %d", i, raftLog.unstable.offset, tt.wunstable)
- }
- }
-}
-
-//TestCompaction ensures that the number of log entries is correct after compactions.
-func TestCompaction(t *testing.T) {
- tests := []struct {
- lastIndex uint64
- compact []uint64
- wleft []int
- wallow bool
- }{
- // out of upper bound
- {1000, []uint64{1001}, []int{-1}, false},
- {1000, []uint64{300, 500, 800, 900}, []int{700, 500, 200, 100}, true},
- // out of lower bound
- {1000, []uint64{300, 299}, []int{700, -1}, false},
- }
-
- for i, tt := range tests {
- func() {
- defer func() {
- if r := recover(); r != nil {
- if tt.wallow {
- t.Errorf("%d: allow = %v, want %v: %v", i, false, true, r)
- }
- }
- }()
-
- storage := NewMemoryStorage()
- for i := uint64(1); i <= tt.lastIndex; i++ {
- storage.Append([]pb.Entry{{Index: i}})
- }
- raftLog := newLog(storage, raftLogger)
- raftLog.maybeCommit(tt.lastIndex, 0)
- raftLog.appliedTo(raftLog.committed)
-
- for j := 0; j < len(tt.compact); j++ {
- err := storage.Compact(tt.compact[j])
- if err != nil {
- if tt.wallow {
- t.Errorf("#%d.%d allow = %t, want %t", i, j, false, tt.wallow)
- }
- continue
- }
- if len(raftLog.allEntries()) != tt.wleft[j] {
- t.Errorf("#%d.%d len = %d, want %d", i, j, len(raftLog.allEntries()), tt.wleft[j])
- }
- }
- }()
- }
-}
-
-func TestLogRestore(t *testing.T) {
- index := uint64(1000)
- term := uint64(1000)
- snap := pb.SnapshotMetadata{Index: index, Term: term}
- storage := NewMemoryStorage()
- storage.ApplySnapshot(pb.Snapshot{Metadata: snap})
- raftLog := newLog(storage, raftLogger)
-
- if len(raftLog.allEntries()) != 0 {
- t.Errorf("len = %d, want 0", len(raftLog.allEntries()))
- }
- if raftLog.firstIndex() != index+1 {
- t.Errorf("firstIndex = %d, want %d", raftLog.firstIndex(), index+1)
- }
- if raftLog.committed != index {
- t.Errorf("committed = %d, want %d", raftLog.committed, index)
- }
- if raftLog.unstable.offset != index+1 {
- t.Errorf("unstable = %d, want %d", raftLog.unstable.offset, index+1)
- }
- if mustTerm(raftLog.term(index)) != term {
- t.Errorf("term = %d, want %d", mustTerm(raftLog.term(index)), term)
- }
-}
-
-func TestIsOutOfBounds(t *testing.T) {
- offset := uint64(100)
- num := uint64(100)
- storage := NewMemoryStorage()
- storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: offset}})
- l := newLog(storage, raftLogger)
- for i := uint64(1); i <= num; i++ {
- l.append(pb.Entry{Index: i + offset})
- }
-
- first := offset + 1
- tests := []struct {
- lo, hi uint64
- wpanic bool
- wErrCompacted bool
- }{
- {
- first - 2, first + 1,
- false,
- true,
- },
- {
- first - 1, first + 1,
- false,
- true,
- },
- {
- first, first,
- false,
- false,
- },
- {
- first + num/2, first + num/2,
- false,
- false,
- },
- {
- first + num - 1, first + num - 1,
- false,
- false,
- },
- {
- first + num, first + num,
- false,
- false,
- },
- {
- first + num, first + num + 1,
- true,
- false,
- },
- {
- first + num + 1, first + num + 1,
- true,
- false,
- },
- }
-
- for i, tt := range tests {
- func() {
- defer func() {
- if r := recover(); r != nil {
- if !tt.wpanic {
- t.Errorf("%d: panic = %v, want %v: %v", i, true, false, r)
- }
- }
- }()
- err := l.mustCheckOutOfBounds(tt.lo, tt.hi)
- if tt.wpanic {
- t.Errorf("#%d: panic = %v, want %v", i, false, true)
- }
- if tt.wErrCompacted && err != ErrCompacted {
- t.Errorf("#%d: err = %v, want %v", i, err, ErrCompacted)
- }
- if !tt.wErrCompacted && err != nil {
- t.Errorf("#%d: unexpected err %v", i, err)
- }
- }()
- }
-}
-
-func TestTerm(t *testing.T) {
- var i uint64
- offset := uint64(100)
- num := uint64(100)
-
- storage := NewMemoryStorage()
- storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: offset, Term: 1}})
- l := newLog(storage, raftLogger)
- for i = 1; i < num; i++ {
- l.append(pb.Entry{Index: offset + i, Term: i})
- }
-
- tests := []struct {
- index uint64
- w uint64
- }{
- {offset - 1, 0},
- {offset, 1},
- {offset + num/2, num / 2},
- {offset + num - 1, num - 1},
- {offset + num, 0},
- }
-
- for j, tt := range tests {
- term := mustTerm(l.term(tt.index))
- if term != tt.w {
- t.Errorf("#%d: at = %d, want %d", j, term, tt.w)
- }
- }
-}
-
-func TestTermWithUnstableSnapshot(t *testing.T) {
- storagesnapi := uint64(100)
- unstablesnapi := storagesnapi + 5
-
- storage := NewMemoryStorage()
- storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: storagesnapi, Term: 1}})
- l := newLog(storage, raftLogger)
- l.restore(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: unstablesnapi, Term: 1}})
-
- tests := []struct {
- index uint64
- w uint64
- }{
- // cannot get term from storage
- {storagesnapi, 0},
- // cannot get term from the gap between storage ents and unstable snapshot
- {storagesnapi + 1, 0},
- {unstablesnapi - 1, 0},
- // get term from unstable snapshot index
- {unstablesnapi, 1},
- }
-
- for i, tt := range tests {
- term := mustTerm(l.term(tt.index))
- if term != tt.w {
- t.Errorf("#%d: at = %d, want %d", i, term, tt.w)
- }
- }
-}
-
-func TestSlice(t *testing.T) {
- var i uint64
- offset := uint64(100)
- num := uint64(100)
- last := offset + num
- half := offset + num/2
- halfe := pb.Entry{Index: half, Term: half}
-
- storage := NewMemoryStorage()
- storage.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: offset}})
- for i = 1; i < num/2; i++ {
- storage.Append([]pb.Entry{{Index: offset + i, Term: offset + i}})
- }
- l := newLog(storage, raftLogger)
- for i = num / 2; i < num; i++ {
- l.append(pb.Entry{Index: offset + i, Term: offset + i})
- }
-
- tests := []struct {
- from uint64
- to uint64
- limit uint64
-
- w []pb.Entry
- wpanic bool
- }{
- // test no limit
- {offset - 1, offset + 1, noLimit, nil, false},
- {offset, offset + 1, noLimit, nil, false},
- {half - 1, half + 1, noLimit, []pb.Entry{{Index: half - 1, Term: half - 1}, {Index: half, Term: half}}, false},
- {half, half + 1, noLimit, []pb.Entry{{Index: half, Term: half}}, false},
- {last - 1, last, noLimit, []pb.Entry{{Index: last - 1, Term: last - 1}}, false},
- {last, last + 1, noLimit, nil, true},
-
- // test limit
- {half - 1, half + 1, 0, []pb.Entry{{Index: half - 1, Term: half - 1}}, false},
- {half - 1, half + 1, uint64(halfe.Size() + 1), []pb.Entry{{Index: half - 1, Term: half - 1}}, false},
- {half - 2, half + 1, uint64(halfe.Size() + 1), []pb.Entry{{Index: half - 2, Term: half - 2}}, false},
- {half - 1, half + 1, uint64(halfe.Size() * 2), []pb.Entry{{Index: half - 1, Term: half - 1}, {Index: half, Term: half}}, false},
- {half - 1, half + 2, uint64(halfe.Size() * 3), []pb.Entry{{Index: half - 1, Term: half - 1}, {Index: half, Term: half}, {Index: half + 1, Term: half + 1}}, false},
- {half, half + 2, uint64(halfe.Size()), []pb.Entry{{Index: half, Term: half}}, false},
- {half, half + 2, uint64(halfe.Size() * 2), []pb.Entry{{Index: half, Term: half}, {Index: half + 1, Term: half + 1}}, false},
- }
-
- for j, tt := range tests {
- func() {
- defer func() {
- if r := recover(); r != nil {
- if !tt.wpanic {
- t.Errorf("%d: panic = %v, want %v: %v", j, true, false, r)
- }
- }
- }()
- g, err := l.slice(tt.from, tt.to, tt.limit)
- if tt.from <= offset && err != ErrCompacted {
- t.Fatalf("#%d: err = %v, want %v", j, err, ErrCompacted)
- }
- if tt.from > offset && err != nil {
- t.Fatalf("#%d: unexpected error %v", j, err)
- }
- if !reflect.DeepEqual(g, tt.w) {
- t.Errorf("#%d: from %d to %d = %v, want %v", j, tt.from, tt.to, g, tt.w)
- }
- }()
- }
-}
-
-func mustTerm(term uint64, err error) uint64 {
- if err != nil {
- panic(err)
- }
- return term
-}
diff --git a/raft/log_unstable.go b/raft/log_unstable.go
deleted file mode 100644
index 230fd21f994..00000000000
--- a/raft/log_unstable.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "go.etcd.io/etcd/raft/v3/raftpb"
-
-// unstable.entries[i] has raft log position i+unstable.offset.
-// Note that unstable.offset may be less than the highest log
-// position in storage; this means that the next write to storage
-// might need to truncate the log before persisting unstable.entries.
-type unstable struct {
- // the incoming unstable snapshot, if any.
- snapshot *pb.Snapshot
- // all entries that have not yet been written to storage.
- entries []pb.Entry
- offset uint64
-
- logger Logger
-}
-
-// maybeFirstIndex returns the index of the first possible entry in entries
-// if it has a snapshot.
-func (u *unstable) maybeFirstIndex() (uint64, bool) {
- if u.snapshot != nil {
- return u.snapshot.Metadata.Index + 1, true
- }
- return 0, false
-}
-
-// maybeLastIndex returns the last index if it has at least one
-// unstable entry or snapshot.
-func (u *unstable) maybeLastIndex() (uint64, bool) {
- if l := len(u.entries); l != 0 {
- return u.offset + uint64(l) - 1, true
- }
- if u.snapshot != nil {
- return u.snapshot.Metadata.Index, true
- }
- return 0, false
-}
-
-// maybeTerm returns the term of the entry at index i, if there
-// is any.
-func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
- if i < u.offset {
- if u.snapshot != nil && u.snapshot.Metadata.Index == i {
- return u.snapshot.Metadata.Term, true
- }
- return 0, false
- }
-
- last, ok := u.maybeLastIndex()
- if !ok {
- return 0, false
- }
- if i > last {
- return 0, false
- }
-
- return u.entries[i-u.offset].Term, true
-}
-
-func (u *unstable) stableTo(i, t uint64) {
- gt, ok := u.maybeTerm(i)
- if !ok {
- return
- }
- // if i < offset, term is matched with the snapshot
- // only update the unstable entries if term is matched with
- // an unstable entry.
- if gt == t && i >= u.offset {
- u.entries = u.entries[i+1-u.offset:]
- u.offset = i + 1
- u.shrinkEntriesArray()
- }
-}
-
-// shrinkEntriesArray discards the underlying array used by the entries slice
-// if most of it isn't being used. This avoids holding references to a bunch of
-// potentially large entries that aren't needed anymore. Simply clearing the
-// entries wouldn't be safe because clients might still be using them.
-func (u *unstable) shrinkEntriesArray() {
- // We replace the array if we're using less than half of the space in
- // it. This number is fairly arbitrary, chosen as an attempt to balance
- // memory usage vs number of allocations. It could probably be improved
- // with some focused tuning.
- const lenMultiple = 2
- if len(u.entries) == 0 {
- u.entries = nil
- } else if len(u.entries)*lenMultiple < cap(u.entries) {
- newEntries := make([]pb.Entry, len(u.entries))
- copy(newEntries, u.entries)
- u.entries = newEntries
- }
-}
-
-func (u *unstable) stableSnapTo(i uint64) {
- if u.snapshot != nil && u.snapshot.Metadata.Index == i {
- u.snapshot = nil
- }
-}
-
-func (u *unstable) restore(s pb.Snapshot) {
- u.offset = s.Metadata.Index + 1
- u.entries = nil
- u.snapshot = &s
-}
-
-func (u *unstable) truncateAndAppend(ents []pb.Entry) {
- after := ents[0].Index
- switch {
- case after == u.offset+uint64(len(u.entries)):
- // after is the next index in the u.entries
- // directly append
- u.entries = append(u.entries, ents...)
- case after <= u.offset:
- u.logger.Infof("replace the unstable entries from index %d", after)
- // The log is being truncated to before our current offset
- // portion, so set the offset and replace the entries
- u.offset = after
- u.entries = ents
- default:
- // truncate to after and copy to u.entries
- // then append
- u.logger.Infof("truncate the unstable entries before index %d", after)
- u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
- u.entries = append(u.entries, ents...)
- }
-}
-
-func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
- u.mustCheckOutOfBounds(lo, hi)
- return u.entries[lo-u.offset : hi-u.offset]
-}
-
-// u.offset <= lo <= hi <= u.offset+len(u.entries)
-func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
- if lo > hi {
- u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
- }
- upper := u.offset + uint64(len(u.entries))
- if lo < u.offset || hi > upper {
- u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
- }
-}
diff --git a/raft/log_unstable_test.go b/raft/log_unstable_test.go
deleted file mode 100644
index 8d9b4668341..00000000000
--- a/raft/log_unstable_test.go
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "reflect"
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func TestUnstableMaybeFirstIndex(t *testing.T) {
- tests := []struct {
- entries []pb.Entry
- offset uint64
- snap *pb.Snapshot
-
- wok bool
- windex uint64
- }{
- // no snapshot
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- false, 0,
- },
- {
- []pb.Entry{}, 0, nil,
- false, 0,
- },
- // has snapshot
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- true, 5,
- },
- {
- []pb.Entry{}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- true, 5,
- },
- }
-
- for i, tt := range tests {
- u := unstable{
- entries: tt.entries,
- offset: tt.offset,
- snapshot: tt.snap,
- logger: raftLogger,
- }
- index, ok := u.maybeFirstIndex()
- if ok != tt.wok {
- t.Errorf("#%d: ok = %t, want %t", i, ok, tt.wok)
- }
- if index != tt.windex {
- t.Errorf("#%d: index = %d, want %d", i, index, tt.windex)
- }
- }
-}
-
-func TestMaybeLastIndex(t *testing.T) {
- tests := []struct {
- entries []pb.Entry
- offset uint64
- snap *pb.Snapshot
-
- wok bool
- windex uint64
- }{
- // last in entries
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- true, 5,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- true, 5,
- },
- // last in snapshot
- {
- []pb.Entry{}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- true, 4,
- },
- // empty unstable
- {
- []pb.Entry{}, 0, nil,
- false, 0,
- },
- }
-
- for i, tt := range tests {
- u := unstable{
- entries: tt.entries,
- offset: tt.offset,
- snapshot: tt.snap,
- logger: raftLogger,
- }
- index, ok := u.maybeLastIndex()
- if ok != tt.wok {
- t.Errorf("#%d: ok = %t, want %t", i, ok, tt.wok)
- }
- if index != tt.windex {
- t.Errorf("#%d: index = %d, want %d", i, index, tt.windex)
- }
- }
-}
-
-func TestUnstableMaybeTerm(t *testing.T) {
- tests := []struct {
- entries []pb.Entry
- offset uint64
- snap *pb.Snapshot
- index uint64
-
- wok bool
- wterm uint64
- }{
- // term from entries
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- 5,
- true, 1,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- 6,
- false, 0,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- 4,
- false, 0,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 5,
- true, 1,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 6,
- false, 0,
- },
- // term from snapshot
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 4,
- true, 1,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 3,
- false, 0,
- },
- {
- []pb.Entry{}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 5,
- false, 0,
- },
- {
- []pb.Entry{}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 4,
- true, 1,
- },
- {
- []pb.Entry{}, 0, nil,
- 5,
- false, 0,
- },
- }
-
- for i, tt := range tests {
- u := unstable{
- entries: tt.entries,
- offset: tt.offset,
- snapshot: tt.snap,
- logger: raftLogger,
- }
- term, ok := u.maybeTerm(tt.index)
- if ok != tt.wok {
- t.Errorf("#%d: ok = %t, want %t", i, ok, tt.wok)
- }
- if term != tt.wterm {
- t.Errorf("#%d: term = %d, want %d", i, term, tt.wterm)
- }
- }
-}
-
-func TestUnstableRestore(t *testing.T) {
- u := unstable{
- entries: []pb.Entry{{Index: 5, Term: 1}},
- offset: 5,
- snapshot: &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- logger: raftLogger,
- }
- s := pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 6, Term: 2}}
- u.restore(s)
-
- if u.offset != s.Metadata.Index+1 {
- t.Errorf("offset = %d, want %d", u.offset, s.Metadata.Index+1)
- }
- if len(u.entries) != 0 {
- t.Errorf("len = %d, want 0", len(u.entries))
- }
- if !reflect.DeepEqual(u.snapshot, &s) {
- t.Errorf("snap = %v, want %v", u.snapshot, &s)
- }
-}
-
-func TestUnstableStableTo(t *testing.T) {
- tests := []struct {
- entries []pb.Entry
- offset uint64
- snap *pb.Snapshot
- index, term uint64
-
- woffset uint64
- wlen int
- }{
- {
- []pb.Entry{}, 0, nil,
- 5, 1,
- 0, 0,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- 5, 1, // stable to the first entry
- 6, 0,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}, {Index: 6, Term: 1}}, 5, nil,
- 5, 1, // stable to the first entry
- 6, 1,
- },
- {
- []pb.Entry{{Index: 6, Term: 2}}, 6, nil,
- 6, 1, // stable to the first entry and term mismatch
- 6, 1,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- 4, 1, // stable to old entry
- 5, 1,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- 4, 2, // stable to old entry
- 5, 1,
- },
- // with snapshot
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 5, 1, // stable to the first entry
- 6, 0,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}, {Index: 6, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 5, 1, // stable to the first entry
- 6, 1,
- },
- {
- []pb.Entry{{Index: 6, Term: 2}}, 6, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 5, Term: 1}},
- 6, 1, // stable to the first entry and term mismatch
- 6, 1,
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 1}},
- 4, 1, // stable to snapshot
- 5, 1,
- },
- {
- []pb.Entry{{Index: 5, Term: 2}}, 5, &pb.Snapshot{Metadata: pb.SnapshotMetadata{Index: 4, Term: 2}},
- 4, 1, // stable to old entry
- 5, 1,
- },
- }
-
- for i, tt := range tests {
- u := unstable{
- entries: tt.entries,
- offset: tt.offset,
- snapshot: tt.snap,
- logger: raftLogger,
- }
- u.stableTo(tt.index, tt.term)
- if u.offset != tt.woffset {
- t.Errorf("#%d: offset = %d, want %d", i, u.offset, tt.woffset)
- }
- if len(u.entries) != tt.wlen {
- t.Errorf("#%d: len = %d, want %d", i, len(u.entries), tt.wlen)
- }
- }
-}
-
-func TestUnstableTruncateAndAppend(t *testing.T) {
- tests := []struct {
- entries []pb.Entry
- offset uint64
- snap *pb.Snapshot
- toappend []pb.Entry
-
- woffset uint64
- wentries []pb.Entry
- }{
- // append to the end
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- []pb.Entry{{Index: 6, Term: 1}, {Index: 7, Term: 1}},
- 5, []pb.Entry{{Index: 5, Term: 1}, {Index: 6, Term: 1}, {Index: 7, Term: 1}},
- },
- // replace the unstable entries
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- []pb.Entry{{Index: 5, Term: 2}, {Index: 6, Term: 2}},
- 5, []pb.Entry{{Index: 5, Term: 2}, {Index: 6, Term: 2}},
- },
- {
- []pb.Entry{{Index: 5, Term: 1}}, 5, nil,
- []pb.Entry{{Index: 4, Term: 2}, {Index: 5, Term: 2}, {Index: 6, Term: 2}},
- 4, []pb.Entry{{Index: 4, Term: 2}, {Index: 5, Term: 2}, {Index: 6, Term: 2}},
- },
- // truncate the existing entries and append
- {
- []pb.Entry{{Index: 5, Term: 1}, {Index: 6, Term: 1}, {Index: 7, Term: 1}}, 5, nil,
- []pb.Entry{{Index: 6, Term: 2}},
- 5, []pb.Entry{{Index: 5, Term: 1}, {Index: 6, Term: 2}},
- },
- {
- []pb.Entry{{Index: 5, Term: 1}, {Index: 6, Term: 1}, {Index: 7, Term: 1}}, 5, nil,
- []pb.Entry{{Index: 7, Term: 2}, {Index: 8, Term: 2}},
- 5, []pb.Entry{{Index: 5, Term: 1}, {Index: 6, Term: 1}, {Index: 7, Term: 2}, {Index: 8, Term: 2}},
- },
- }
-
- for i, tt := range tests {
- u := unstable{
- entries: tt.entries,
- offset: tt.offset,
- snapshot: tt.snap,
- logger: raftLogger,
- }
- u.truncateAndAppend(tt.toappend)
- if u.offset != tt.woffset {
- t.Errorf("#%d: offset = %d, want %d", i, u.offset, tt.woffset)
- }
- if !reflect.DeepEqual(u.entries, tt.wentries) {
- t.Errorf("#%d: entries = %v, want %v", i, u.entries, tt.wentries)
- }
- }
-}
diff --git a/raft/logger.go b/raft/logger.go
deleted file mode 100644
index dc73b1f2104..00000000000
--- a/raft/logger.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "sync"
-)
-
-type Logger interface {
- Debug(v ...interface{})
- Debugf(format string, v ...interface{})
-
- Error(v ...interface{})
- Errorf(format string, v ...interface{})
-
- Info(v ...interface{})
- Infof(format string, v ...interface{})
-
- Warning(v ...interface{})
- Warningf(format string, v ...interface{})
-
- Fatal(v ...interface{})
- Fatalf(format string, v ...interface{})
-
- Panic(v ...interface{})
- Panicf(format string, v ...interface{})
-}
-
-func SetLogger(l Logger) {
- raftLoggerMu.Lock()
- raftLogger = l
- raftLoggerMu.Unlock()
-}
-
-func ResetDefaultLogger() {
- SetLogger(defaultLogger)
-}
-
-func getLogger() Logger {
- raftLoggerMu.Lock()
- defer raftLoggerMu.Unlock()
- return raftLogger
-}
-
-var (
- defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
- discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
- raftLoggerMu sync.Mutex
- raftLogger = Logger(defaultLogger)
-)
-
-const (
- calldepth = 2
-)
-
-// DefaultLogger is a default implementation of the Logger interface.
-type DefaultLogger struct {
- *log.Logger
- debug bool
-}
-
-func (l *DefaultLogger) EnableTimestamps() {
- l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
-}
-
-func (l *DefaultLogger) EnableDebug() {
- l.debug = true
-}
-
-func (l *DefaultLogger) Debug(v ...interface{}) {
- if l.debug {
- l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
- }
-}
-
-func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
- if l.debug {
- l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
- }
-}
-
-func (l *DefaultLogger) Info(v ...interface{}) {
- l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Infof(format string, v ...interface{}) {
- l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Error(v ...interface{}) {
- l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
- l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Warning(v ...interface{}) {
- l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
- l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Fatal(v ...interface{}) {
- l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
- os.Exit(1)
-}
-
-func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
- l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
- os.Exit(1)
-}
-
-func (l *DefaultLogger) Panic(v ...interface{}) {
- l.Logger.Panic(v...)
-}
-
-func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
- l.Logger.Panicf(format, v...)
-}
-
-func header(lvl, msg string) string {
- return fmt.Sprintf("%s: %s", lvl, msg)
-}
diff --git a/raft/node.go b/raft/node.go
deleted file mode 100644
index dca5954f7ae..00000000000
--- a/raft/node.go
+++ /dev/null
@@ -1,590 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "context"
- "errors"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-type SnapshotStatus int
-
-const (
- SnapshotFinish SnapshotStatus = 1
- SnapshotFailure SnapshotStatus = 2
-)
-
-var (
- emptyState = pb.HardState{}
-
- // ErrStopped is returned by methods on Nodes that have been stopped.
- ErrStopped = errors.New("raft: stopped")
-)
-
-// SoftState provides state that is useful for logging and debugging.
-// The state is volatile and does not need to be persisted to the WAL.
-type SoftState struct {
- Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
- RaftState StateType
-}
-
-func (a *SoftState) equal(b *SoftState) bool {
- return a.Lead == b.Lead && a.RaftState == b.RaftState
-}
-
-// Ready encapsulates the entries and messages that are ready to read,
-// be saved to stable storage, committed or sent to other peers.
-// All fields in Ready are read-only.
-type Ready struct {
- // The current volatile state of a Node.
- // SoftState will be nil if there is no update.
- // It is not required to consume or store SoftState.
- *SoftState
-
- // The current state of a Node to be saved to stable storage BEFORE
- // Messages are sent.
- // HardState will be equal to empty state if there is no update.
- pb.HardState
-
- // ReadStates can be used for node to serve linearizable read requests locally
- // when its applied index is greater than the index in ReadState.
- // Note that the readState will be returned when raft receives msgReadIndex.
- // The returned is only valid for the request that requested to read.
- ReadStates []ReadState
-
- // Entries specifies entries to be saved to stable storage BEFORE
- // Messages are sent.
- Entries []pb.Entry
-
- // Snapshot specifies the snapshot to be saved to stable storage.
- Snapshot pb.Snapshot
-
- // CommittedEntries specifies entries to be committed to a
- // store/state-machine. These have previously been committed to stable
- // store.
- CommittedEntries []pb.Entry
-
- // Messages specifies outbound messages to be sent AFTER Entries are
- // committed to stable storage.
- // If it contains a MsgSnap message, the application MUST report back to raft
- // when the snapshot has been received or has failed by calling ReportSnapshot.
- Messages []pb.Message
-
- // MustSync indicates whether the HardState and Entries must be synchronously
- // written to disk or if an asynchronous write is permissible.
- MustSync bool
-}
-
-func isHardStateEqual(a, b pb.HardState) bool {
- return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
-}
-
-// IsEmptyHardState returns true if the given HardState is empty.
-func IsEmptyHardState(st pb.HardState) bool {
- return isHardStateEqual(st, emptyState)
-}
-
-// IsEmptySnap returns true if the given Snapshot is empty.
-func IsEmptySnap(sp pb.Snapshot) bool {
- return sp.Metadata.Index == 0
-}
-
-func (rd Ready) containsUpdates() bool {
- return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
- !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
- len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
-}
-
-// appliedCursor extracts from the Ready the highest index the client has
-// applied (once the Ready is confirmed via Advance). If no information is
-// contained in the Ready, returns zero.
-func (rd Ready) appliedCursor() uint64 {
- if n := len(rd.CommittedEntries); n > 0 {
- return rd.CommittedEntries[n-1].Index
- }
- if index := rd.Snapshot.Metadata.Index; index > 0 {
- return index
- }
- return 0
-}
-
-// Node represents a node in a raft cluster.
-type Node interface {
- // Tick increments the internal logical clock for the Node by a single tick. Election
- // timeouts and heartbeat timeouts are in units of ticks.
- Tick()
- // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
- Campaign(ctx context.Context) error
- // Propose proposes that data be appended to the log. Note that proposals can be lost without
- // notice, therefore it is user's job to ensure proposal retries.
- Propose(ctx context.Context, data []byte) error
- // ProposeConfChange proposes a configuration change. Like any proposal, the
- // configuration change may be dropped with or without an error being
- // returned. In particular, configuration changes are dropped unless the
- // leader has certainty that there is no prior unapplied configuration
- // change in its log.
- //
- // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
- // message. The latter allows arbitrary configuration changes via joint
- // consensus, notably including replacing a voter. Passing a ConfChangeV2
- // message is only allowed if all Nodes participating in the cluster run a
- // version of this library aware of the V2 API. See pb.ConfChangeV2 for
- // usage details and semantics.
- ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
-
- // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
- Step(ctx context.Context, msg pb.Message) error
-
- // Ready returns a channel that returns the current point-in-time state.
- // Users of the Node must call Advance after retrieving the state returned by Ready.
- //
- // NOTE: No committed entries from the next Ready may be applied until all committed entries
- // and snapshots from the previous one have finished.
- Ready() <-chan Ready
-
- // Advance notifies the Node that the application has saved progress up to the last Ready.
- // It prepares the node to return the next available Ready.
- //
- // The application should generally call Advance after it applies the entries in last Ready.
- //
- // However, as an optimization, the application may call Advance while it is applying the
- // commands. For example. when the last Ready contains a snapshot, the application might take
- // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
- // progress, it can call Advance before finishing applying the last ready.
- Advance()
- // ApplyConfChange applies a config change (previously passed to
- // ProposeConfChange) to the node. This must be called whenever a config
- // change is observed in Ready.CommittedEntries, except when the app decides
- // to reject the configuration change (i.e. treats it as a noop instead), in
- // which case it must not be called.
- //
- // Returns an opaque non-nil ConfState protobuf which must be recorded in
- // snapshots.
- ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
-
- // TransferLeadership attempts to transfer leadership to the given transferee.
- TransferLeadership(ctx context.Context, lead, transferee uint64)
-
- // ReadIndex request a read state. The read state will be set in the ready.
- // Read state has a read index. Once the application advances further than the read
- // index, any linearizable read requests issued before the read request can be
- // processed safely. The read state will have the same rctx attached.
- // Note that request can be lost without notice, therefore it is user's job
- // to ensure read index retries.
- ReadIndex(ctx context.Context, rctx []byte) error
-
- // Status returns the current status of the raft state machine.
- Status() Status
- // ReportUnreachable reports the given node is not reachable for the last send.
- ReportUnreachable(id uint64)
- // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
- // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
- // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
- // snapshot (for e.g., while streaming it from leader to follower), should be reported to the
- // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
- // log probes until the follower can apply the snapshot and advance its state. If the follower
- // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
- // updates from the leader. Therefore, it is crucial that the application ensures that any
- // failure in snapshot sending is caught and reported back to the leader; so it can resume raft
- // log probing in the follower.
- ReportSnapshot(id uint64, status SnapshotStatus)
- // Stop performs any necessary termination of the Node.
- Stop()
-}
-
-type Peer struct {
- ID uint64
- Context []byte
-}
-
-// StartNode returns a new Node given configuration and a list of raft peers.
-// It appends a ConfChangeAddNode entry for each given peer to the initial log.
-//
-// Peers must not be zero length; call RestartNode in that case.
-func StartNode(c *Config, peers []Peer) Node {
- if len(peers) == 0 {
- panic("no peers given; use RestartNode instead")
- }
- rn, err := NewRawNode(c)
- if err != nil {
- panic(err)
- }
- rn.Bootstrap(peers)
-
- n := newNode(rn)
-
- go n.run()
- return &n
-}
-
-// RestartNode is similar to StartNode but does not take a list of peers.
-// The current membership of the cluster will be restored from the Storage.
-// If the caller has an existing state machine, pass in the last log index that
-// has been applied to it; otherwise use zero.
-func RestartNode(c *Config) Node {
- rn, err := NewRawNode(c)
- if err != nil {
- panic(err)
- }
- n := newNode(rn)
- go n.run()
- return &n
-}
-
-type msgWithResult struct {
- m pb.Message
- result chan error
-}
-
-// node is the canonical implementation of the Node interface
-type node struct {
- propc chan msgWithResult
- recvc chan pb.Message
- confc chan pb.ConfChangeV2
- confstatec chan pb.ConfState
- readyc chan Ready
- advancec chan struct{}
- tickc chan struct{}
- done chan struct{}
- stop chan struct{}
- status chan chan Status
-
- rn *RawNode
-}
-
-func newNode(rn *RawNode) node {
- return node{
- propc: make(chan msgWithResult),
- recvc: make(chan pb.Message),
- confc: make(chan pb.ConfChangeV2),
- confstatec: make(chan pb.ConfState),
- readyc: make(chan Ready),
- advancec: make(chan struct{}),
- // make tickc a buffered chan, so raft node can buffer some ticks when the node
- // is busy processing raft messages. Raft node will resume process buffered
- // ticks when it becomes idle.
- tickc: make(chan struct{}, 128),
- done: make(chan struct{}),
- stop: make(chan struct{}),
- status: make(chan chan Status),
- rn: rn,
- }
-}
-
-func (n *node) Stop() {
- select {
- case n.stop <- struct{}{}:
- // Not already stopped, so trigger it
- case <-n.done:
- // Node has already been stopped - no need to do anything
- return
- }
- // Block until the stop has been acknowledged by run()
- <-n.done
-}
-
-func (n *node) run() {
- var propc chan msgWithResult
- var readyc chan Ready
- var advancec chan struct{}
- var rd Ready
-
- r := n.rn.raft
-
- lead := None
-
- for {
- if advancec != nil {
- readyc = nil
- } else if n.rn.HasReady() {
- // Populate a Ready. Note that this Ready is not guaranteed to
- // actually be handled. We will arm readyc, but there's no guarantee
- // that we will actually send on it. It's possible that we will
- // service another channel instead, loop around, and then populate
- // the Ready again. We could instead force the previous Ready to be
- // handled first, but it's generally good to emit larger Readys plus
- // it simplifies testing (by emitting less frequently and more
- // predictably).
- rd = n.rn.readyWithoutAccept()
- readyc = n.readyc
- }
-
- if lead != r.lead {
- if r.hasLeader() {
- if lead == None {
- r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
- } else {
- r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
- }
- propc = n.propc
- } else {
- r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
- propc = nil
- }
- lead = r.lead
- }
-
- select {
- // TODO: maybe buffer the config propose if there exists one (the way
- // described in raft dissertation)
- // Currently it is dropped in Step silently.
- case pm := <-propc:
- m := pm.m
- m.From = r.id
- err := r.Step(m)
- if pm.result != nil {
- pm.result <- err
- close(pm.result)
- }
- case m := <-n.recvc:
- // filter out response message from unknown From.
- if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) {
- r.Step(m)
- }
- case cc := <-n.confc:
- _, okBefore := r.prs.Progress[r.id]
- cs := r.applyConfChange(cc)
- // If the node was removed, block incoming proposals. Note that we
- // only do this if the node was in the config before. Nodes may be
- // a member of the group without knowing this (when they're catching
- // up on the log and don't have the latest config) and we don't want
- // to block the proposal channel in that case.
- //
- // NB: propc is reset when the leader changes, which, if we learn
- // about it, sort of implies that we got readded, maybe? This isn't
- // very sound and likely has bugs.
- if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter {
- var found bool
- outer:
- for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
- for _, id := range sl {
- if id == r.id {
- found = true
- break outer
- }
- }
- }
- if !found {
- propc = nil
- }
- }
- select {
- case n.confstatec <- cs:
- case <-n.done:
- }
- case <-n.tickc:
- n.rn.Tick()
- case readyc <- rd:
- n.rn.acceptReady(rd)
- advancec = n.advancec
- case <-advancec:
- n.rn.Advance(rd)
- rd = Ready{}
- advancec = nil
- case c := <-n.status:
- c <- getStatus(r)
- case <-n.stop:
- close(n.done)
- return
- }
- }
-}
-
-// Tick increments the internal logical clock for this Node. Election timeouts
-// and heartbeat timeouts are in units of ticks.
-func (n *node) Tick() {
- select {
- case n.tickc <- struct{}{}:
- case <-n.done:
- default:
- n.rn.raft.logger.Warningf("%x A tick missed to fire. Node blocks too long!", n.rn.raft.id)
- }
-}
-
-func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
-
-func (n *node) Propose(ctx context.Context, data []byte) error {
- return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
-}
-
-func (n *node) Step(ctx context.Context, m pb.Message) error {
- // ignore unexpected local messages receiving over network
- if IsLocalMsg(m.Type) {
- // TODO: return an error?
- return nil
- }
- return n.step(ctx, m)
-}
-
-func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
- typ, data, err := pb.MarshalConfChange(c)
- if err != nil {
- return pb.Message{}, err
- }
- return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
-}
-
-func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
- msg, err := confChangeToMsg(cc)
- if err != nil {
- return err
- }
- return n.Step(ctx, msg)
-}
-
-func (n *node) step(ctx context.Context, m pb.Message) error {
- return n.stepWithWaitOption(ctx, m, false)
-}
-
-func (n *node) stepWait(ctx context.Context, m pb.Message) error {
- return n.stepWithWaitOption(ctx, m, true)
-}
-
-// Step advances the state machine using msgs. The ctx.Err() will be returned,
-// if any.
-func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
- if m.Type != pb.MsgProp {
- select {
- case n.recvc <- m:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
- }
- }
- ch := n.propc
- pm := msgWithResult{m: m}
- if wait {
- pm.result = make(chan error, 1)
- }
- select {
- case ch <- pm:
- if !wait {
- return nil
- }
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
- }
- select {
- case err := <-pm.result:
- if err != nil {
- return err
- }
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
- }
- return nil
-}
-
-func (n *node) Ready() <-chan Ready { return n.readyc }
-
-func (n *node) Advance() {
- select {
- case n.advancec <- struct{}{}:
- case <-n.done:
- }
-}
-
-func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
- var cs pb.ConfState
- select {
- case n.confc <- cc.AsV2():
- case <-n.done:
- }
- select {
- case cs = <-n.confstatec:
- case <-n.done:
- }
- return &cs
-}
-
-func (n *node) Status() Status {
- c := make(chan Status)
- select {
- case n.status <- c:
- return <-c
- case <-n.done:
- return Status{}
- }
-}
-
-func (n *node) ReportUnreachable(id uint64) {
- select {
- case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
- case <-n.done:
- }
-}
-
-func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
- rej := status == SnapshotFailure
-
- select {
- case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
- case <-n.done:
- }
-}
-
-func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
- select {
- // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
- case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
- case <-n.done:
- case <-ctx.Done():
- }
-}
-
-func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
- return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
-
-func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
- rd := Ready{
- Entries: r.raftLog.unstableEntries(),
- CommittedEntries: r.raftLog.nextEnts(),
- Messages: r.msgs,
- }
- if softSt := r.softState(); !softSt.equal(prevSoftSt) {
- rd.SoftState = softSt
- }
- if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
- rd.HardState = hardSt
- }
- if r.raftLog.unstable.snapshot != nil {
- rd.Snapshot = *r.raftLog.unstable.snapshot
- }
- if len(r.readStates) != 0 {
- rd.ReadStates = r.readStates
- }
- rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
- return rd
-}
-
-// MustSync returns true if the hard state and count of Raft entries indicate
-// that a synchronous write to persistent storage is required.
-func MustSync(st, prevst pb.HardState, entsnum int) bool {
- // Persistent state on all servers:
- // (Updated on stable storage before responding to RPCs)
- // currentTerm
- // votedFor
- // log entries[]
- return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
-}
diff --git a/raft/node_bench_test.go b/raft/node_bench_test.go
deleted file mode 100644
index fde40feb4dc..00000000000
--- a/raft/node_bench_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "context"
- "testing"
- "time"
-)
-
-func BenchmarkOneNode(b *testing.B) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- go n.run()
-
- defer n.Stop()
-
- n.Campaign(ctx)
- go func() {
- for i := 0; i < b.N; i++ {
- n.Propose(ctx, []byte("foo"))
- }
- }()
-
- for {
- rd := <-n.Ready()
- s.Append(rd.Entries)
- // a reasonable disk sync latency
- time.Sleep(1 * time.Millisecond)
- n.Advance()
- if rd.HardState.Commit == uint64(b.N+1) {
- return
- }
- }
-}
diff --git a/raft/node_test.go b/raft/node_test.go
deleted file mode 100644
index e44d073e268..00000000000
--- a/raft/node_test.go
+++ /dev/null
@@ -1,1020 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "bytes"
- "context"
- "fmt"
- "math"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-// readyWithTimeout selects from n.Ready() with a 1-second timeout. It
-// panics on timeout, which is better than the indefinite wait that
-// would occur if this channel were read without being wrapped in a
-// select.
-func readyWithTimeout(n Node) Ready {
- select {
- case rd := <-n.Ready():
- return rd
- case <-time.After(time.Second):
- panic("timed out waiting for ready")
- }
-}
-
-// TestNodeStep ensures that node.Step sends msgProp to propc chan
-// and other kinds of messages to recvc chan.
-func TestNodeStep(t *testing.T) {
- for i, msgn := range raftpb.MessageType_name {
- n := &node{
- propc: make(chan msgWithResult, 1),
- recvc: make(chan raftpb.Message, 1),
- }
- msgt := raftpb.MessageType(i)
- n.Step(context.TODO(), raftpb.Message{Type: msgt})
- // Proposal goes to proc chan. Others go to recvc chan.
- if msgt == raftpb.MsgProp {
- select {
- case <-n.propc:
- default:
- t.Errorf("%d: cannot receive %s on propc chan", msgt, msgn)
- }
- } else {
- if IsLocalMsg(msgt) {
- select {
- case <-n.recvc:
- t.Errorf("%d: step should ignore %s", msgt, msgn)
- default:
- }
- } else {
- select {
- case <-n.recvc:
- default:
- t.Errorf("%d: cannot receive %s on recvc chan", msgt, msgn)
- }
- }
- }
- }
-}
-
-// Cancel and Stop should unblock Step()
-func TestNodeStepUnblock(t *testing.T) {
- // a node without buffer to block step
- n := &node{
- propc: make(chan msgWithResult),
- done: make(chan struct{}),
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- stopFunc := func() { close(n.done) }
-
- tests := []struct {
- unblock func()
- werr error
- }{
- {stopFunc, ErrStopped},
- {cancel, context.Canceled},
- }
-
- for i, tt := range tests {
- errc := make(chan error, 1)
- go func() {
- err := n.Step(ctx, raftpb.Message{Type: raftpb.MsgProp})
- errc <- err
- }()
- tt.unblock()
- select {
- case err := <-errc:
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- //clean up side-effect
- if ctx.Err() != nil {
- ctx = context.TODO()
- }
- select {
- case <-n.done:
- n.done = make(chan struct{})
- default:
- }
- case <-time.After(1 * time.Second):
- t.Fatalf("#%d: failed to unblock step", i)
- }
- }
-}
-
-// TestNodePropose ensures that node.Propose sends the given proposal to the underlying raft.
-func TestNodePropose(t *testing.T) {
- msgs := []raftpb.Message{}
- appendStep := func(r *raft, m raftpb.Message) error {
- msgs = append(msgs, m)
- return nil
- }
-
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- r := rn.raft
- go n.run()
- if err := n.Campaign(context.TODO()); err != nil {
- t.Fatal(err)
- }
- for {
- rd := <-n.Ready()
- s.Append(rd.Entries)
- // change the step function to appendStep until this raft becomes leader
- if rd.SoftState.Lead == r.id {
- r.step = appendStep
- n.Advance()
- break
- }
- n.Advance()
- }
- n.Propose(context.TODO(), []byte("somedata"))
- n.Stop()
-
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
- }
- if msgs[0].Type != raftpb.MsgProp {
- t.Errorf("msg type = %d, want %d", msgs[0].Type, raftpb.MsgProp)
- }
- if !bytes.Equal(msgs[0].Entries[0].Data, []byte("somedata")) {
- t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, []byte("somedata"))
- }
-}
-
-// TestNodeReadIndex ensures that node.ReadIndex sends the MsgReadIndex message to the underlying raft.
-// It also ensures that ReadState can be read out through ready chan.
-func TestNodeReadIndex(t *testing.T) {
- msgs := []raftpb.Message{}
- appendStep := func(r *raft, m raftpb.Message) error {
- msgs = append(msgs, m)
- return nil
- }
- wrs := []ReadState{{Index: uint64(1), RequestCtx: []byte("somedata")}}
-
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- r := rn.raft
- r.readStates = wrs
-
- go n.run()
- n.Campaign(context.TODO())
- for {
- rd := <-n.Ready()
- if !reflect.DeepEqual(rd.ReadStates, wrs) {
- t.Errorf("ReadStates = %v, want %v", rd.ReadStates, wrs)
- }
-
- s.Append(rd.Entries)
-
- if rd.SoftState.Lead == r.id {
- n.Advance()
- break
- }
- n.Advance()
- }
-
- r.step = appendStep
- wrequestCtx := []byte("somedata2")
- n.ReadIndex(context.TODO(), wrequestCtx)
- n.Stop()
-
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
- }
- if msgs[0].Type != raftpb.MsgReadIndex {
- t.Errorf("msg type = %d, want %d", msgs[0].Type, raftpb.MsgReadIndex)
- }
- if !bytes.Equal(msgs[0].Entries[0].Data, wrequestCtx) {
- t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, wrequestCtx)
- }
-}
-
-// TestDisableProposalForwarding ensures that proposals are not forwarded to
-// the leader when DisableProposalForwarding is true.
-func TestDisableProposalForwarding(t *testing.T) {
- r1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- cfg3 := newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- cfg3.DisableProposalForwarding = true
- r3 := newRaft(cfg3)
- nt := newNetwork(r1, r2, r3)
-
- // elect r1 as leader
- nt.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgHup})
-
- var testEntries = []raftpb.Entry{{Data: []byte("testdata")}}
-
- // send proposal to r2(follower) where DisableProposalForwarding is false
- r2.Step(raftpb.Message{From: 2, To: 2, Type: raftpb.MsgProp, Entries: testEntries})
-
- // verify r2(follower) does forward the proposal when DisableProposalForwarding is false
- if len(r2.msgs) != 1 {
- t.Fatalf("len(r2.msgs) expected 1, got %d", len(r2.msgs))
- }
-
- // send proposal to r3(follower) where DisableProposalForwarding is true
- r3.Step(raftpb.Message{From: 3, To: 3, Type: raftpb.MsgProp, Entries: testEntries})
-
- // verify r3(follower) does not forward the proposal when DisableProposalForwarding is true
- if len(r3.msgs) != 0 {
- t.Fatalf("len(r3.msgs) expected 0, got %d", len(r3.msgs))
- }
-}
-
-// TestNodeReadIndexToOldLeader ensures that raftpb.MsgReadIndex to old leader
-// gets forwarded to the new leader and 'send' method does not attach its term.
-func TestNodeReadIndexToOldLeader(t *testing.T) {
- r1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- nt := newNetwork(r1, r2, r3)
-
- // elect r1 as leader
- nt.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgHup})
-
- var testEntries = []raftpb.Entry{{Data: []byte("testdata")}}
-
- // send readindex request to r2(follower)
- r2.Step(raftpb.Message{From: 2, To: 2, Type: raftpb.MsgReadIndex, Entries: testEntries})
-
- // verify r2(follower) forwards this message to r1(leader) with term not set
- if len(r2.msgs) != 1 {
- t.Fatalf("len(r2.msgs) expected 1, got %d", len(r2.msgs))
- }
- readIndxMsg1 := raftpb.Message{From: 2, To: 1, Type: raftpb.MsgReadIndex, Entries: testEntries}
- if !reflect.DeepEqual(r2.msgs[0], readIndxMsg1) {
- t.Fatalf("r2.msgs[0] expected %+v, got %+v", readIndxMsg1, r2.msgs[0])
- }
-
- // send readindex request to r3(follower)
- r3.Step(raftpb.Message{From: 3, To: 3, Type: raftpb.MsgReadIndex, Entries: testEntries})
-
- // verify r3(follower) forwards this message to r1(leader) with term not set as well.
- if len(r3.msgs) != 1 {
- t.Fatalf("len(r3.msgs) expected 1, got %d", len(r3.msgs))
- }
- readIndxMsg2 := raftpb.Message{From: 3, To: 1, Type: raftpb.MsgReadIndex, Entries: testEntries}
- if !reflect.DeepEqual(r3.msgs[0], readIndxMsg2) {
- t.Fatalf("r3.msgs[0] expected %+v, got %+v", readIndxMsg2, r3.msgs[0])
- }
-
- // now elect r3 as leader
- nt.send(raftpb.Message{From: 3, To: 3, Type: raftpb.MsgHup})
-
- // let r1 steps the two messages previously we got from r2, r3
- r1.Step(readIndxMsg1)
- r1.Step(readIndxMsg2)
-
- // verify r1(follower) forwards these messages again to r3(new leader)
- if len(r1.msgs) != 2 {
- t.Fatalf("len(r1.msgs) expected 1, got %d", len(r1.msgs))
- }
- readIndxMsg3 := raftpb.Message{From: 2, To: 3, Type: raftpb.MsgReadIndex, Entries: testEntries}
- if !reflect.DeepEqual(r1.msgs[0], readIndxMsg3) {
- t.Fatalf("r1.msgs[0] expected %+v, got %+v", readIndxMsg3, r1.msgs[0])
- }
- readIndxMsg3 = raftpb.Message{From: 3, To: 3, Type: raftpb.MsgReadIndex, Entries: testEntries}
- if !reflect.DeepEqual(r1.msgs[1], readIndxMsg3) {
- t.Fatalf("r1.msgs[1] expected %+v, got %+v", readIndxMsg3, r1.msgs[1])
- }
-}
-
-// TestNodeProposeConfig ensures that node.ProposeConfChange sends the given configuration proposal
-// to the underlying raft.
-func TestNodeProposeConfig(t *testing.T) {
- msgs := []raftpb.Message{}
- appendStep := func(r *raft, m raftpb.Message) error {
- msgs = append(msgs, m)
- return nil
- }
-
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- r := rn.raft
- go n.run()
- n.Campaign(context.TODO())
- for {
- rd := <-n.Ready()
- s.Append(rd.Entries)
- // change the step function to appendStep until this raft becomes leader
- if rd.SoftState.Lead == r.id {
- r.step = appendStep
- n.Advance()
- break
- }
- n.Advance()
- }
- cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
- ccdata, err := cc.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- n.ProposeConfChange(context.TODO(), cc)
- n.Stop()
-
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
- }
- if msgs[0].Type != raftpb.MsgProp {
- t.Errorf("msg type = %d, want %d", msgs[0].Type, raftpb.MsgProp)
- }
- if !bytes.Equal(msgs[0].Entries[0].Data, ccdata) {
- t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, ccdata)
- }
-}
-
-// TestNodeProposeAddDuplicateNode ensures that two proposes to add the same node should
-// not affect the later propose to add new node.
-func TestNodeProposeAddDuplicateNode(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- go n.run()
- n.Campaign(context.TODO())
- rdyEntries := make([]raftpb.Entry, 0)
- ticker := time.NewTicker(time.Millisecond * 100)
- defer ticker.Stop()
- done := make(chan struct{})
- stop := make(chan struct{})
- applyConfChan := make(chan struct{})
-
- go func() {
- defer close(done)
- for {
- select {
- case <-stop:
- return
- case <-ticker.C:
- n.Tick()
- case rd := <-n.Ready():
- s.Append(rd.Entries)
- applied := false
- for _, e := range rd.Entries {
- rdyEntries = append(rdyEntries, e)
- switch e.Type {
- case raftpb.EntryNormal:
- case raftpb.EntryConfChange:
- var cc raftpb.ConfChange
- cc.Unmarshal(e.Data)
- n.ApplyConfChange(cc)
- applied = true
- }
- }
- n.Advance()
- if applied {
- applyConfChan <- struct{}{}
- }
- }
- }
- }()
-
- cc1 := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
- ccdata1, _ := cc1.Marshal()
- n.ProposeConfChange(context.TODO(), cc1)
- <-applyConfChan
-
- // try add the same node again
- n.ProposeConfChange(context.TODO(), cc1)
- <-applyConfChan
-
- // the new node join should be ok
- cc2 := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 2}
- ccdata2, _ := cc2.Marshal()
- n.ProposeConfChange(context.TODO(), cc2)
- <-applyConfChan
-
- close(stop)
- <-done
-
- if len(rdyEntries) != 4 {
- t.Errorf("len(entry) = %d, want %d, %v\n", len(rdyEntries), 4, rdyEntries)
- }
- if !bytes.Equal(rdyEntries[1].Data, ccdata1) {
- t.Errorf("data = %v, want %v", rdyEntries[1].Data, ccdata1)
- }
- if !bytes.Equal(rdyEntries[3].Data, ccdata2) {
- t.Errorf("data = %v, want %v", rdyEntries[3].Data, ccdata2)
- }
- n.Stop()
-}
-
-// TestBlockProposal ensures that node will block proposal when it does not
-// know who is the current leader; node will accept proposal when it knows
-// who is the current leader.
-func TestBlockProposal(t *testing.T) {
- rn := newTestRawNode(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- n := newNode(rn)
- go n.run()
- defer n.Stop()
-
- errc := make(chan error, 1)
- go func() {
- errc <- n.Propose(context.TODO(), []byte("somedata"))
- }()
-
- testutil.WaitSchedule()
- select {
- case err := <-errc:
- t.Errorf("err = %v, want blocking", err)
- default:
- }
-
- n.Campaign(context.TODO())
- select {
- case err := <-errc:
- if err != nil {
- t.Errorf("err = %v, want %v", err, nil)
- }
- case <-time.After(10 * time.Second):
- t.Errorf("blocking proposal, want unblocking")
- }
-}
-
-func TestNodeProposeWaitDropped(t *testing.T) {
- msgs := []raftpb.Message{}
- droppingMsg := []byte("test_dropping")
- dropStep := func(r *raft, m raftpb.Message) error {
- if m.Type == raftpb.MsgProp && strings.Contains(m.String(), string(droppingMsg)) {
- t.Logf("dropping message: %v", m.String())
- return ErrProposalDropped
- }
- msgs = append(msgs, m)
- return nil
- }
-
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- r := rn.raft
- go n.run()
- n.Campaign(context.TODO())
- for {
- rd := <-n.Ready()
- s.Append(rd.Entries)
- // change the step function to dropStep until this raft becomes leader
- if rd.SoftState.Lead == r.id {
- r.step = dropStep
- n.Advance()
- break
- }
- n.Advance()
- }
- proposalTimeout := time.Millisecond * 100
- ctx, cancel := context.WithTimeout(context.Background(), proposalTimeout)
- // propose with cancel should be cancelled earyly if dropped
- err := n.Propose(ctx, droppingMsg)
- if err != ErrProposalDropped {
- t.Errorf("should drop proposal : %v", err)
- }
- cancel()
-
- n.Stop()
- if len(msgs) != 0 {
- t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
- }
-}
-
-// TestNodeTick ensures that node.Tick() will increase the
-// elapsed of the underlying raft state machine.
-func TestNodeTick(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- r := rn.raft
- go n.run()
- elapsed := r.electionElapsed
- n.Tick()
-
- for len(n.tickc) != 0 {
- time.Sleep(100 * time.Millisecond)
- }
-
- n.Stop()
- if r.electionElapsed != elapsed+1 {
- t.Errorf("elapsed = %d, want %d", r.electionElapsed, elapsed+1)
- }
-}
-
-// TestNodeStop ensures that node.Stop() blocks until the node has stopped
-// processing, and that it is idempotent
-func TestNodeStop(t *testing.T) {
- rn := newTestRawNode(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- n := newNode(rn)
- donec := make(chan struct{})
-
- go func() {
- n.run()
- close(donec)
- }()
-
- status := n.Status()
- n.Stop()
-
- select {
- case <-donec:
- case <-time.After(time.Second):
- t.Fatalf("timed out waiting for node to stop!")
- }
-
- emptyStatus := Status{}
-
- if reflect.DeepEqual(status, emptyStatus) {
- t.Errorf("status = %v, want not empty", status)
- }
- // Further status should return be empty, the node is stopped.
- status = n.Status()
- if !reflect.DeepEqual(status, emptyStatus) {
- t.Errorf("status = %v, want empty", status)
- }
- // Subsequent Stops should have no effect.
- n.Stop()
-}
-
-func TestReadyContainUpdates(t *testing.T) {
- tests := []struct {
- rd Ready
- wcontain bool
- }{
- {Ready{}, false},
- {Ready{SoftState: &SoftState{Lead: 1}}, true},
- {Ready{HardState: raftpb.HardState{Vote: 1}}, true},
- {Ready{Entries: make([]raftpb.Entry, 1)}, true},
- {Ready{CommittedEntries: make([]raftpb.Entry, 1)}, true},
- {Ready{Messages: make([]raftpb.Message, 1)}, true},
- {Ready{Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}}, true},
- }
-
- for i, tt := range tests {
- if g := tt.rd.containsUpdates(); g != tt.wcontain {
- t.Errorf("#%d: containUpdates = %v, want %v", i, g, tt.wcontain)
- }
- }
-}
-
-// TestNodeStart ensures that a node can be started correctly. The node should
-// start with correct configuration change entries, and can accept and commit
-// proposals.
-func TestNodeStart(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
- ccdata, err := cc.Marshal()
- if err != nil {
- t.Fatalf("unexpected marshal error: %v", err)
- }
- wants := []Ready{
- {
- HardState: raftpb.HardState{Term: 1, Commit: 1, Vote: 0},
- Entries: []raftpb.Entry{
- {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
- },
- CommittedEntries: []raftpb.Entry{
- {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
- },
- MustSync: true,
- },
- {
- HardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1},
- Entries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
- CommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
- MustSync: true,
- },
- }
- storage := NewMemoryStorage()
- c := &Config{
- ID: 1,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: noLimit,
- MaxInflightMsgs: 256,
- }
- n := StartNode(c, []Peer{{ID: 1}})
- defer n.Stop()
- g := <-n.Ready()
- if !reflect.DeepEqual(g, wants[0]) {
- t.Fatalf("#%d: g = %+v,\n w %+v", 1, g, wants[0])
- } else {
- storage.Append(g.Entries)
- n.Advance()
- }
-
- if err := n.Campaign(ctx); err != nil {
- t.Fatal(err)
- }
- rd := <-n.Ready()
- storage.Append(rd.Entries)
- n.Advance()
-
- n.Propose(ctx, []byte("foo"))
- if g2 := <-n.Ready(); !reflect.DeepEqual(g2, wants[1]) {
- t.Errorf("#%d: g = %+v,\n w %+v", 2, g2, wants[1])
- } else {
- storage.Append(g2.Entries)
- n.Advance()
- }
-
- select {
- case rd := <-n.Ready():
- t.Errorf("unexpected Ready: %+v", rd)
- case <-time.After(time.Millisecond):
- }
-}
-
-func TestNodeRestart(t *testing.T) {
- entries := []raftpb.Entry{
- {Term: 1, Index: 1},
- {Term: 1, Index: 2, Data: []byte("foo")},
- }
- st := raftpb.HardState{Term: 1, Commit: 1}
-
- want := Ready{
- // No HardState is emitted because there was no change.
- HardState: raftpb.HardState{},
- // commit up to index commit index in st
- CommittedEntries: entries[:st.Commit],
- // MustSync is false because no HardState or new entries are provided.
- MustSync: false,
- }
-
- storage := NewMemoryStorage()
- storage.SetHardState(st)
- storage.Append(entries)
- c := &Config{
- ID: 1,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: noLimit,
- MaxInflightMsgs: 256,
- }
- n := RestartNode(c)
- defer n.Stop()
- if g := <-n.Ready(); !reflect.DeepEqual(g, want) {
- t.Errorf("g = %+v,\n w %+v", g, want)
- }
- n.Advance()
-
- select {
- case rd := <-n.Ready():
- t.Errorf("unexpected Ready: %+v", rd)
- case <-time.After(time.Millisecond):
- }
-}
-
-func TestNodeRestartFromSnapshot(t *testing.T) {
- snap := raftpb.Snapshot{
- Metadata: raftpb.SnapshotMetadata{
- ConfState: raftpb.ConfState{Voters: []uint64{1, 2}},
- Index: 2,
- Term: 1,
- },
- }
- entries := []raftpb.Entry{
- {Term: 1, Index: 3, Data: []byte("foo")},
- }
- st := raftpb.HardState{Term: 1, Commit: 3}
-
- want := Ready{
- // No HardState is emitted because nothing changed relative to what is
- // already persisted.
- HardState: raftpb.HardState{},
- // commit up to index commit index in st
- CommittedEntries: entries,
- // MustSync is only true when there is a new HardState or new entries;
- // neither is the case here.
- MustSync: false,
- }
-
- s := NewMemoryStorage()
- s.SetHardState(st)
- s.ApplySnapshot(snap)
- s.Append(entries)
- c := &Config{
- ID: 1,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: noLimit,
- MaxInflightMsgs: 256,
- }
- n := RestartNode(c)
- defer n.Stop()
- if g := <-n.Ready(); !reflect.DeepEqual(g, want) {
- t.Errorf("g = %+v,\n w %+v", g, want)
- } else {
- n.Advance()
- }
-
- select {
- case rd := <-n.Ready():
- t.Errorf("unexpected Ready: %+v", rd)
- case <-time.After(time.Millisecond):
- }
-}
-
-func TestNodeAdvance(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- storage := NewMemoryStorage()
- c := &Config{
- ID: 1,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: noLimit,
- MaxInflightMsgs: 256,
- }
- n := StartNode(c, []Peer{{ID: 1}})
- defer n.Stop()
- rd := <-n.Ready()
- storage.Append(rd.Entries)
- n.Advance()
-
- n.Campaign(ctx)
- <-n.Ready()
-
- n.Propose(ctx, []byte("foo"))
- select {
- case rd = <-n.Ready():
- t.Fatalf("unexpected Ready before Advance: %+v", rd)
- case <-time.After(time.Millisecond):
- }
- storage.Append(rd.Entries)
- n.Advance()
- select {
- case <-n.Ready():
- case <-time.After(100 * time.Millisecond):
- t.Errorf("expect Ready after Advance, but there is no Ready available")
- }
-}
-
-func TestSoftStateEqual(t *testing.T) {
- tests := []struct {
- st *SoftState
- we bool
- }{
- {&SoftState{}, true},
- {&SoftState{Lead: 1}, false},
- {&SoftState{RaftState: StateLeader}, false},
- }
- for i, tt := range tests {
- if g := tt.st.equal(&SoftState{}); g != tt.we {
- t.Errorf("#%d, equal = %v, want %v", i, g, tt.we)
- }
- }
-}
-
-func TestIsHardStateEqual(t *testing.T) {
- tests := []struct {
- st raftpb.HardState
- we bool
- }{
- {emptyState, true},
- {raftpb.HardState{Vote: 1}, false},
- {raftpb.HardState{Commit: 1}, false},
- {raftpb.HardState{Term: 1}, false},
- }
-
- for i, tt := range tests {
- if isHardStateEqual(tt.st, emptyState) != tt.we {
- t.Errorf("#%d, equal = %v, want %v", i, isHardStateEqual(tt.st, emptyState), tt.we)
- }
- }
-}
-
-func TestNodeProposeAddLearnerNode(t *testing.T) {
- ticker := time.NewTicker(time.Millisecond * 100)
- defer ticker.Stop()
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 10, 1, s)
- n := newNode(rn)
- go n.run()
- n.Campaign(context.TODO())
- stop := make(chan struct{})
- done := make(chan struct{})
- applyConfChan := make(chan struct{})
- go func() {
- defer close(done)
- for {
- select {
- case <-stop:
- return
- case <-ticker.C:
- n.Tick()
- case rd := <-n.Ready():
- s.Append(rd.Entries)
- t.Logf("raft: %v", rd.Entries)
- for _, ent := range rd.Entries {
- if ent.Type != raftpb.EntryConfChange {
- continue
- }
- var cc raftpb.ConfChange
- cc.Unmarshal(ent.Data)
- state := n.ApplyConfChange(cc)
- if len(state.Learners) == 0 ||
- state.Learners[0] != cc.NodeID ||
- cc.NodeID != 2 {
- t.Errorf("apply conf change should return new added learner: %v", state.String())
- }
-
- if len(state.Voters) != 1 {
- t.Errorf("add learner should not change the nodes: %v", state.String())
- }
- t.Logf("apply raft conf %v changed to: %v", cc, state.String())
- applyConfChan <- struct{}{}
- }
- n.Advance()
- }
- }
- }()
- cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddLearnerNode, NodeID: 2}
- n.ProposeConfChange(context.TODO(), cc)
- <-applyConfChan
- close(stop)
- <-done
-}
-
-func TestAppendPagination(t *testing.T) {
- const maxSizePerMsg = 2048
- n := newNetworkWithConfig(func(c *Config) {
- c.MaxSizePerMsg = maxSizePerMsg
- }, nil, nil, nil)
-
- seenFullMessage := false
- // Inspect all messages to see that we never exceed the limit, but
- // we do see messages of larger than half the limit.
- n.msgHook = func(m raftpb.Message) bool {
- if m.Type == raftpb.MsgApp {
- size := 0
- for _, e := range m.Entries {
- size += len(e.Data)
- }
- if size > maxSizePerMsg {
- t.Errorf("sent MsgApp that is too large: %d bytes", size)
- }
- if size > maxSizePerMsg/2 {
- seenFullMessage = true
- }
- }
- return true
- }
-
- n.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgHup})
-
- // Partition the network while we make our proposals. This forces
- // the entries to be batched into larger messages.
- n.isolate(1)
- blob := []byte(strings.Repeat("a", 1000))
- for i := 0; i < 5; i++ {
- n.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgProp, Entries: []raftpb.Entry{{Data: blob}}})
- }
- n.recover()
-
- // After the partition recovers, tick the clock to wake everything
- // back up and send the messages.
- n.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgBeat})
- if !seenFullMessage {
- t.Error("didn't see any messages more than half the max size; something is wrong with this test")
- }
-}
-
-func TestCommitPagination(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1))
- cfg := newTestConfig(1, 10, 1, s)
- cfg.MaxCommittedSizePerReady = 2048
- rn, err := NewRawNode(cfg)
- if err != nil {
- t.Fatal(err)
- }
- n := newNode(rn)
- go n.run()
- n.Campaign(context.TODO())
-
- rd := readyWithTimeout(&n)
- if len(rd.CommittedEntries) != 1 {
- t.Fatalf("expected 1 (empty) entry, got %d", len(rd.CommittedEntries))
- }
- s.Append(rd.Entries)
- n.Advance()
-
- blob := []byte(strings.Repeat("a", 1000))
- for i := 0; i < 3; i++ {
- if err := n.Propose(context.TODO(), blob); err != nil {
- t.Fatal(err)
- }
- }
-
- // The 3 proposals will commit in two batches.
- rd = readyWithTimeout(&n)
- if len(rd.CommittedEntries) != 2 {
- t.Fatalf("expected 2 entries in first batch, got %d", len(rd.CommittedEntries))
- }
- s.Append(rd.Entries)
- n.Advance()
- rd = readyWithTimeout(&n)
- if len(rd.CommittedEntries) != 1 {
- t.Fatalf("expected 1 entry in second batch, got %d", len(rd.CommittedEntries))
- }
- s.Append(rd.Entries)
- n.Advance()
-}
-
-type ignoreSizeHintMemStorage struct {
- *MemoryStorage
-}
-
-func (s *ignoreSizeHintMemStorage) Entries(lo, hi uint64, maxSize uint64) ([]raftpb.Entry, error) {
- return s.MemoryStorage.Entries(lo, hi, math.MaxUint64)
-}
-
-// TestNodeCommitPaginationAfterRestart regression tests a scenario in which the
-// Storage's Entries size limitation is slightly more permissive than Raft's
-// internal one. The original bug was the following:
-//
-// - node learns that index 11 (or 100, doesn't matter) is committed
-// - nextEnts returns index 1..10 in CommittedEntries due to size limiting. However,
-// index 10 already exceeds maxBytes, due to a user-provided impl of Entries.
-// - Commit index gets bumped to 10
-// - the node persists the HardState, but crashes before applying the entries
-// - upon restart, the storage returns the same entries, but `slice` takes a different code path
-// (since it is now called with an upper bound of 10) and removes the last entry.
-// - Raft emits a HardState with a regressing commit index.
-//
-// A simpler version of this test would have the storage return a lot less entries than dictated
-// by maxSize (for example, exactly one entry) after the restart, resulting in a larger regression.
-// This wouldn't need to exploit anything about Raft-internal code paths to fail.
-func TestNodeCommitPaginationAfterRestart(t *testing.T) {
- s := &ignoreSizeHintMemStorage{
- MemoryStorage: newTestMemoryStorage(withPeers(1)),
- }
- persistedHardState := raftpb.HardState{
- Term: 1,
- Vote: 1,
- Commit: 10,
- }
-
- s.hardState = persistedHardState
- s.ents = make([]raftpb.Entry, 10)
- var size uint64
- for i := range s.ents {
- ent := raftpb.Entry{
- Term: 1,
- Index: uint64(i + 1),
- Type: raftpb.EntryNormal,
- Data: []byte("a"),
- }
-
- s.ents[i] = ent
- size += uint64(ent.Size())
- }
-
- cfg := newTestConfig(1, 10, 1, s)
- // Set a MaxSizePerMsg that would suggest to Raft that the last committed entry should
- // not be included in the initial rd.CommittedEntries. However, our storage will ignore
- // this and *will* return it (which is how the Commit index ended up being 10 initially).
- cfg.MaxSizePerMsg = size - uint64(s.ents[len(s.ents)-1].Size()) - 1
-
- rn, err := NewRawNode(cfg)
- if err != nil {
- t.Fatal(err)
- }
- n := newNode(rn)
- go n.run()
- defer n.Stop()
-
- rd := readyWithTimeout(&n)
- if !IsEmptyHardState(rd.HardState) && rd.HardState.Commit < persistedHardState.Commit {
- t.Errorf("HardState regressed: Commit %d -> %d\nCommitting:\n%+v",
- persistedHardState.Commit, rd.HardState.Commit,
- DescribeEntries(rd.CommittedEntries, func(data []byte) string { return fmt.Sprintf("%q", data) }),
- )
- }
-}
diff --git a/raft/quorum/bench_test.go b/raft/quorum/bench_test.go
deleted file mode 100644
index 5c7961ed6cf..00000000000
--- a/raft/quorum/bench_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package quorum
-
-import (
- "fmt"
- "math"
- "math/rand"
- "testing"
-)
-
-func BenchmarkMajorityConfig_CommittedIndex(b *testing.B) {
- // go test -run - -bench . -benchmem ./raft/quorum
- for _, n := range []int{1, 3, 5, 7, 9, 11} {
- b.Run(fmt.Sprintf("voters=%d", n), func(b *testing.B) {
- c := MajorityConfig{}
- l := mapAckIndexer{}
- for i := uint64(0); i < uint64(n); i++ {
- c[i+1] = struct{}{}
- l[i+1] = Index(rand.Int63n(math.MaxInt64))
- }
-
- for i := 0; i < b.N; i++ {
- _ = c.CommittedIndex(l)
- }
- })
- }
-}
diff --git a/raft/quorum/datadriven_test.go b/raft/quorum/datadriven_test.go
deleted file mode 100644
index b40eaa76c69..00000000000
--- a/raft/quorum/datadriven_test.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package quorum
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "github.com/cockroachdb/datadriven"
-)
-
-// TestDataDriven parses and executes the test cases in ./testdata/*. An entry
-// in such a file specifies the command, which is either of "committed" to check
-// CommittedIndex or "vote" to verify a VoteResult. The underlying configuration
-// and inputs are specified via the arguments 'cfg' and 'cfgj' (for the majority
-// config and, optionally, majority config joint to the first one) and 'idx'
-// (for CommittedIndex) and 'votes' (for VoteResult).
-//
-// Internally, the harness runs some additional checks on each test case for
-// which it is known that the result shouldn't change. For example,
-// interchanging the majority configurations of a joint quorum must not
-// influence the result; if it does, this is noted in the test's output.
-func TestDataDriven(t *testing.T) {
- datadriven.Walk(t, "testdata", func(t *testing.T, path string) {
- datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
- // Two majority configs. The first one is always used (though it may
- // be empty) and the second one is used iff joint is true.
- var joint bool
- var ids, idsj []uint64
- // The committed indexes for the nodes in the config in the order in
- // which they appear in (ids,idsj), without repetition. An underscore
- // denotes an omission (i.e. no information for this voter); this is
- // different from 0. For example,
- //
- // cfg=(1,2) cfgj=(2,3,4) idxs=(_,5,_,7) initializes the idx for voter 2
- // to 5 and that for voter 4 to 7 (and no others).
- //
- // cfgj=zero is specified to instruct the test harness to treat cfgj
- // as zero instead of not specified (i.e. it will trigger a joint
- // quorum test instead of a majority quorum test for cfg only).
- var idxs []Index
- // Votes. These are initialized similar to idxs except the only values
- // used are 1 (voted against) and 2 (voted for). This looks awkward,
- // but is convenient because it allows sharing code between the two.
- var votes []Index
-
- // Parse the args.
- for _, arg := range d.CmdArgs {
- for i := range arg.Vals {
- switch arg.Key {
- case "cfg":
- var n uint64
- arg.Scan(t, i, &n)
- ids = append(ids, n)
- case "cfgj":
- joint = true
- if arg.Vals[i] == "zero" {
- if len(arg.Vals) != 1 {
- t.Fatalf("cannot mix 'zero' into configuration")
- }
- } else {
- var n uint64
- arg.Scan(t, i, &n)
- idsj = append(idsj, n)
- }
- case "idx":
- var n uint64
- // Register placeholders as zeroes.
- if arg.Vals[i] != "_" {
- arg.Scan(t, i, &n)
- if n == 0 {
- // This is a restriction caused by the above
- // special-casing for _.
- t.Fatalf("cannot use 0 as idx")
- }
- }
- idxs = append(idxs, Index(n))
- case "votes":
- var s string
- arg.Scan(t, i, &s)
- switch s {
- case "y":
- votes = append(votes, 2)
- case "n":
- votes = append(votes, 1)
- case "_":
- votes = append(votes, 0)
- default:
- t.Fatalf("unknown vote: %s", s)
- }
- default:
- t.Fatalf("unknown arg %s", arg.Key)
- }
- }
- }
-
- // Build the two majority configs.
- c := MajorityConfig{}
- for _, id := range ids {
- c[id] = struct{}{}
- }
- cj := MajorityConfig{}
- for _, id := range idsj {
- cj[id] = struct{}{}
- }
-
- // Helper that returns an AckedIndexer which has the specified indexes
- // mapped to the right IDs.
- makeLookuper := func(idxs []Index, ids, idsj []uint64) mapAckIndexer {
- l := mapAckIndexer{}
- var p int // next to consume from idxs
- for _, id := range append(append([]uint64(nil), ids...), idsj...) {
- if _, ok := l[id]; ok {
- continue
- }
- if p < len(idxs) {
- // NB: this creates zero entries for placeholders that we remove later.
- // The upshot of doing it that way is to avoid having to specify place-
- // holders multiple times when omitting voters present in both halves of
- // a joint config.
- l[id] = idxs[p]
- p++
- }
- }
-
- for id := range l {
- // Zero entries are created by _ placeholders; we don't want
- // them in the lookuper because "no entry" is different from
- // "zero entry". Note that we prevent tests from specifying
- // zero commit indexes, so that there's no confusion between
- // the two concepts.
- if l[id] == 0 {
- delete(l, id)
- }
- }
- return l
- }
-
- {
- input := idxs
- if d.Cmd == "vote" {
- input = votes
- }
- if voters := JointConfig([2]MajorityConfig{c, cj}).IDs(); len(voters) != len(input) {
- return fmt.Sprintf("error: mismatched input (explicit or _) for voters %v: %v",
- voters, input)
- }
- }
-
- var buf strings.Builder
- switch d.Cmd {
- case "committed":
- l := makeLookuper(idxs, ids, idsj)
-
- // Branch based on whether this is a majority or joint quorum
- // test case.
- if !joint {
- idx := c.CommittedIndex(l)
- fmt.Fprint(&buf, c.Describe(l))
- // These alternative computations should return the same
- // result. If not, print to the output.
- if aIdx := alternativeMajorityCommittedIndex(c, l); aIdx != idx {
- fmt.Fprintf(&buf, "%s <-- via alternative computation\n", aIdx)
- }
- // Joining a majority with the empty majority should give same result.
- if aIdx := JointConfig([2]MajorityConfig{c, {}}).CommittedIndex(l); aIdx != idx {
- fmt.Fprintf(&buf, "%s <-- via zero-joint quorum\n", aIdx)
- }
- // Joining a majority with itself should give same result.
- if aIdx := JointConfig([2]MajorityConfig{c, c}).CommittedIndex(l); aIdx != idx {
- fmt.Fprintf(&buf, "%s <-- via self-joint quorum\n", aIdx)
- }
- overlay := func(c MajorityConfig, l AckedIndexer, id uint64, idx Index) AckedIndexer {
- ll := mapAckIndexer{}
- for iid := range c {
- if iid == id {
- ll[iid] = idx
- } else if idx, ok := l.AckedIndex(iid); ok {
- ll[iid] = idx
- }
- }
- return ll
- }
- for id := range c {
- iidx, _ := l.AckedIndex(id)
- if idx > iidx && iidx > 0 {
- // If the committed index was definitely above the currently
- // inspected idx, the result shouldn't change if we lower it
- // further.
- lo := overlay(c, l, id, iidx-1)
- if aIdx := c.CommittedIndex(lo); aIdx != idx {
- fmt.Fprintf(&buf, "%s <-- overlaying %d->%d", aIdx, id, iidx)
- }
- lo = overlay(c, l, id, 0)
- if aIdx := c.CommittedIndex(lo); aIdx != idx {
- fmt.Fprintf(&buf, "%s <-- overlaying %d->0", aIdx, id)
- }
- }
- }
- fmt.Fprintf(&buf, "%s\n", idx)
- } else {
- cc := JointConfig([2]MajorityConfig{c, cj})
- fmt.Fprint(&buf, cc.Describe(l))
- idx := cc.CommittedIndex(l)
- // Interchanging the majorities shouldn't make a difference. If it does, print.
- if aIdx := JointConfig([2]MajorityConfig{cj, c}).CommittedIndex(l); aIdx != idx {
- fmt.Fprintf(&buf, "%s <-- via symmetry\n", aIdx)
- }
- fmt.Fprintf(&buf, "%s\n", idx)
- }
- case "vote":
- ll := makeLookuper(votes, ids, idsj)
- l := map[uint64]bool{}
- for id, v := range ll {
- l[id] = v != 1 // NB: 1 == false, 2 == true
- }
-
- if !joint {
- // Test a majority quorum.
- r := c.VoteResult(l)
- fmt.Fprintf(&buf, "%v\n", r)
- } else {
- // Run a joint quorum test case.
- r := JointConfig([2]MajorityConfig{c, cj}).VoteResult(l)
- // Interchanging the majorities shouldn't make a difference. If it does, print.
- if ar := JointConfig([2]MajorityConfig{cj, c}).VoteResult(l); ar != r {
- fmt.Fprintf(&buf, "%v <-- via symmetry\n", ar)
- }
- fmt.Fprintf(&buf, "%v\n", r)
- }
- default:
- t.Fatalf("unknown command: %s", d.Cmd)
- }
- return buf.String()
- })
- })
-}
diff --git a/raft/quorum/joint.go b/raft/quorum/joint.go
deleted file mode 100644
index e3741e0b0a9..00000000000
--- a/raft/quorum/joint.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package quorum
-
-// JointConfig is a configuration of two groups of (possibly overlapping)
-// majority configurations. Decisions require the support of both majorities.
-type JointConfig [2]MajorityConfig
-
-func (c JointConfig) String() string {
- if len(c[1]) > 0 {
- return c[0].String() + "&&" + c[1].String()
- }
- return c[0].String()
-}
-
-// IDs returns a newly initialized map representing the set of voters present
-// in the joint configuration.
-func (c JointConfig) IDs() map[uint64]struct{} {
- m := map[uint64]struct{}{}
- for _, cc := range c {
- for id := range cc {
- m[id] = struct{}{}
- }
- }
- return m
-}
-
-// Describe returns a (multi-line) representation of the commit indexes for the
-// given lookuper.
-func (c JointConfig) Describe(l AckedIndexer) string {
- return MajorityConfig(c.IDs()).Describe(l)
-}
-
-// CommittedIndex returns the largest committed index for the given joint
-// quorum. An index is jointly committed if it is committed in both constituent
-// majorities.
-func (c JointConfig) CommittedIndex(l AckedIndexer) Index {
- idx0 := c[0].CommittedIndex(l)
- idx1 := c[1].CommittedIndex(l)
- if idx0 < idx1 {
- return idx0
- }
- return idx1
-}
-
-// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
-// a result indicating whether the vote is pending, lost, or won. A joint quorum
-// requires both majority quorums to vote in favor.
-func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult {
- r1 := c[0].VoteResult(votes)
- r2 := c[1].VoteResult(votes)
-
- if r1 == r2 {
- // If they agree, return the agreed state.
- return r1
- }
- if r1 == VoteLost || r2 == VoteLost {
- // If either config has lost, loss is the only possible outcome.
- return VoteLost
- }
- // One side won, the other one is pending, so the whole outcome is.
- return VotePending
-}
diff --git a/raft/quorum/majority.go b/raft/quorum/majority.go
deleted file mode 100644
index 8858a36b634..00000000000
--- a/raft/quorum/majority.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package quorum
-
-import (
- "fmt"
- "math"
- "sort"
- "strings"
-)
-
-// MajorityConfig is a set of IDs that uses majority quorums to make decisions.
-type MajorityConfig map[uint64]struct{}
-
-func (c MajorityConfig) String() string {
- sl := make([]uint64, 0, len(c))
- for id := range c {
- sl = append(sl, id)
- }
- sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] })
- var buf strings.Builder
- buf.WriteByte('(')
- for i := range sl {
- if i > 0 {
- buf.WriteByte(' ')
- }
- fmt.Fprint(&buf, sl[i])
- }
- buf.WriteByte(')')
- return buf.String()
-}
-
-// Describe returns a (multi-line) representation of the commit indexes for the
-// given lookuper.
-func (c MajorityConfig) Describe(l AckedIndexer) string {
- if len(c) == 0 {
- return ""
- }
- type tup struct {
- id uint64
- idx Index
- ok bool // idx found?
- bar int // length of bar displayed for this tup
- }
-
- // Below, populate .bar so that the i-th largest commit index has bar i (we
- // plot this as sort of a progress bar). The actual code is a bit more
- // complicated and also makes sure that equal index => equal bar.
-
- n := len(c)
- info := make([]tup, 0, n)
- for id := range c {
- idx, ok := l.AckedIndex(id)
- info = append(info, tup{id: id, idx: idx, ok: ok})
- }
-
- // Sort by index
- sort.Slice(info, func(i, j int) bool {
- if info[i].idx == info[j].idx {
- return info[i].id < info[j].id
- }
- return info[i].idx < info[j].idx
- })
-
- // Populate .bar.
- for i := range info {
- if i > 0 && info[i-1].idx < info[i].idx {
- info[i].bar = i
- }
- }
-
- // Sort by ID.
- sort.Slice(info, func(i, j int) bool {
- return info[i].id < info[j].id
- })
-
- var buf strings.Builder
-
- // Print.
- fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n")
- for i := range info {
- bar := info[i].bar
- if !info[i].ok {
- fmt.Fprint(&buf, "?"+strings.Repeat(" ", n))
- } else {
- fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar))
- }
- fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id)
- }
- return buf.String()
-}
-
-// Slice returns the MajorityConfig as a sorted slice.
-func (c MajorityConfig) Slice() []uint64 {
- var sl []uint64
- for id := range c {
- sl = append(sl, id)
- }
- sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] })
- return sl
-}
-
-func insertionSort(sl []uint64) {
- a, b := 0, len(sl)
- for i := a + 1; i < b; i++ {
- for j := i; j > a && sl[j] < sl[j-1]; j-- {
- sl[j], sl[j-1] = sl[j-1], sl[j]
- }
- }
-}
-
-// CommittedIndex computes the committed index from those supplied via the
-// provided AckedIndexer (for the active config).
-func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index {
- n := len(c)
- if n == 0 {
- // This plays well with joint quorums which, when one half is the zero
- // MajorityConfig, should behave like the other half.
- return math.MaxUint64
- }
-
- // Use an on-stack slice to collect the committed indexes when n <= 7
- // (otherwise we alloc). The alternative is to stash a slice on
- // MajorityConfig, but this impairs usability (as is, MajorityConfig is just
- // a map, and that's nice). The assumption is that running with a
- // replication factor of >7 is rare, and in cases in which it happens
- // performance is a lesser concern (additionally the performance
- // implications of an allocation here are far from drastic).
- var stk [7]uint64
- var srt []uint64
- if len(stk) >= n {
- srt = stk[:n]
- } else {
- srt = make([]uint64, n)
- }
-
- {
- // Fill the slice with the indexes observed. Any unused slots will be
- // left as zero; these correspond to voters that may report in, but
- // haven't yet. We fill from the right (since the zeroes will end up on
- // the left after sorting below anyway).
- i := n - 1
- for id := range c {
- if idx, ok := l.AckedIndex(id); ok {
- srt[i] = uint64(idx)
- i--
- }
- }
- }
-
- // Sort by index. Use a bespoke algorithm (copied from the stdlib's sort
- // package) to keep srt on the stack.
- insertionSort(srt)
-
- // The smallest index into the array for which the value is acked by a
- // quorum. In other words, from the end of the slice, move n/2+1 to the
- // left (accounting for zero-indexing).
- pos := n - (n/2 + 1)
- return Index(srt[pos])
-}
-
-// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
-// a result indicating whether the vote is pending (i.e. neither a quorum of
-// yes/no has been reached), won (a quorum of yes has been reached), or lost (a
-// quorum of no has been reached).
-func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult {
- if len(c) == 0 {
- // By convention, the elections on an empty config win. This comes in
- // handy with joint quorums because it'll make a half-populated joint
- // quorum behave like a majority quorum.
- return VoteWon
- }
-
- ny := [2]int{} // vote counts for no and yes, respectively
-
- var missing int
- for id := range c {
- v, ok := votes[id]
- if !ok {
- missing++
- continue
- }
- if v {
- ny[1]++
- } else {
- ny[0]++
- }
- }
-
- q := len(c)/2 + 1
- if ny[1] >= q {
- return VoteWon
- }
- if ny[1]+missing >= q {
- return VotePending
- }
- return VoteLost
-}
diff --git a/raft/quorum/quick_test.go b/raft/quorum/quick_test.go
deleted file mode 100644
index d838b54f8c3..00000000000
--- a/raft/quorum/quick_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package quorum
-
-import (
- "math"
- "math/rand"
- "reflect"
- "testing"
- "testing/quick"
-)
-
-// TestQuick uses quickcheck to heuristically assert that the main
-// implementation of (MajorityConfig).CommittedIndex agrees with a "dumb"
-// alternative version.
-func TestQuick(t *testing.T) {
- cfg := &quick.Config{
- MaxCount: 50000,
- }
-
- t.Run("majority_commit", func(t *testing.T) {
- fn1 := func(c memberMap, l idxMap) uint64 {
- return uint64(MajorityConfig(c).CommittedIndex(mapAckIndexer(l)))
- }
- fn2 := func(c memberMap, l idxMap) uint64 {
- return uint64(alternativeMajorityCommittedIndex(MajorityConfig(c), mapAckIndexer(l)))
- }
- if err := quick.CheckEqual(fn1, fn2, cfg); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// smallRandIdxMap returns a reasonably sized map of ids to commit indexes.
-func smallRandIdxMap(rand *rand.Rand, _ int) map[uint64]Index {
- // Hard-code a reasonably small size here (quick will hard-code 50, which
- // is not useful here).
- size := 10
-
- n := rand.Intn(size)
- ids := rand.Perm(2 * n)[:n]
- idxs := make([]int, len(ids))
- for i := range idxs {
- idxs[i] = rand.Intn(n)
- }
-
- m := map[uint64]Index{}
- for i := range ids {
- m[uint64(ids[i])] = Index(idxs[i])
- }
- return m
-}
-
-type idxMap map[uint64]Index
-
-func (idxMap) Generate(rand *rand.Rand, size int) reflect.Value {
- m := smallRandIdxMap(rand, size)
- return reflect.ValueOf(m)
-}
-
-type memberMap map[uint64]struct{}
-
-func (memberMap) Generate(rand *rand.Rand, size int) reflect.Value {
- m := smallRandIdxMap(rand, size)
- mm := map[uint64]struct{}{}
- for id := range m {
- mm[id] = struct{}{}
- }
- return reflect.ValueOf(mm)
-}
-
-// This is an alternative implementation of (MajorityConfig).CommittedIndex(l).
-func alternativeMajorityCommittedIndex(c MajorityConfig, l AckedIndexer) Index {
- if len(c) == 0 {
- return math.MaxUint64
- }
-
- idToIdx := map[uint64]Index{}
- for id := range c {
- if idx, ok := l.AckedIndex(id); ok {
- idToIdx[id] = idx
- }
- }
-
- // Build a map from index to voters who have acked that or any higher index.
- idxToVotes := map[Index]int{}
- for _, idx := range idToIdx {
- idxToVotes[idx] = 0
- }
-
- for _, idx := range idToIdx {
- for idy := range idxToVotes {
- if idy > idx {
- continue
- }
- idxToVotes[idy]++
- }
- }
-
- // Find the maximum index that has achieved quorum.
- q := len(c)/2 + 1
- var maxQuorumIdx Index
- for idx, n := range idxToVotes {
- if n >= q && idx > maxQuorumIdx {
- maxQuorumIdx = idx
- }
- }
-
- return maxQuorumIdx
-}
diff --git a/raft/quorum/quorum.go b/raft/quorum/quorum.go
deleted file mode 100644
index 2899e46c96d..00000000000
--- a/raft/quorum/quorum.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package quorum
-
-import (
- "math"
- "strconv"
-)
-
-// Index is a Raft log position.
-type Index uint64
-
-func (i Index) String() string {
- if i == math.MaxUint64 {
- return "â"
- }
- return strconv.FormatUint(uint64(i), 10)
-}
-
-// AckedIndexer allows looking up a commit index for a given ID of a voter
-// from a corresponding MajorityConfig.
-type AckedIndexer interface {
- AckedIndex(voterID uint64) (idx Index, found bool)
-}
-
-type mapAckIndexer map[uint64]Index
-
-func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) {
- idx, ok := m[id]
- return idx, ok
-}
-
-// VoteResult indicates the outcome of a vote.
-//
-//go:generate stringer -type=VoteResult
-type VoteResult uint8
-
-const (
- // VotePending indicates that the decision of the vote depends on future
- // votes, i.e. neither "yes" or "no" has reached quorum yet.
- VotePending VoteResult = 1 + iota
- // VoteLost indicates that the quorum has voted "no".
- VoteLost
- // VoteWon indicates that the quorum has voted "yes".
- VoteWon
-)
diff --git a/raft/quorum/testdata/joint_commit.txt b/raft/quorum/testdata/joint_commit.txt
deleted file mode 100644
index 12f19fb331c..00000000000
--- a/raft/quorum/testdata/joint_commit.txt
+++ /dev/null
@@ -1,481 +0,0 @@
-# No difference between a simple majority quorum and a simple majority quorum
-# joint with an empty majority quorum. (This is asserted for all datadriven tests
-# by the framework, so we don't dwell on it more).
-#
-# Note that by specifying cfgj explicitly we tell the test harness to treat the
-# input as a joint quorum and not a majority quorum. If we didn't specify
-# cfgj=zero the test would pass just the same, but it wouldn't be exercising the
-# joint quorum path.
-committed cfg=(1,2,3) cfgj=zero idx=(100,101,99)
-----
- idx
-x> 100 (id=1)
-xx> 101 (id=2)
-> 99 (id=3)
-100
-
-# Joint nonoverlapping singleton quorums.
-
-committed cfg=(1) cfgj=(2) idx=(_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-0
-
-# Voter 1 has 100 committed, 2 nothing. This means we definitely won't commit
-# past 100.
-committed cfg=(1) cfgj=(2) idx=(100,_)
-----
- idx
-x> 100 (id=1)
-? 0 (id=2)
-0
-
-# Committed index collapses once both majorities do, to the lower index.
-committed cfg=(1) cfgj=(2) idx=(13, 100)
-----
- idx
-> 13 (id=1)
-x> 100 (id=2)
-13
-
-# Joint overlapping (i.e. identical) singleton quorum.
-
-committed cfg=(1) cfgj=(1) idx=(_)
-----
- idx
-? 0 (id=1)
-0
-
-committed cfg=(1) cfgj=(1) idx=(100)
-----
- idx
-> 100 (id=1)
-100
-
-
-
-# Two-node config joint with non-overlapping single node config
-committed cfg=(1,3) cfgj=(2) idx=(_,_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-0
-
-committed cfg=(1,3) cfgj=(2) idx=(100,_,_)
-----
- idx
-xx> 100 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-0
-
-# 1 has 100 committed, 2 has 50 (collapsing half of the joint quorum to 50).
-committed cfg=(1,3) cfgj=(2) idx=(100,_,50)
-----
- idx
-xx> 100 (id=1)
-x> 50 (id=2)
-? 0 (id=3)
-0
-
-# 2 reports 45, collapsing the other half (to 45).
-committed cfg=(1,3) cfgj=(2) idx=(100,45,50)
-----
- idx
-xx> 100 (id=1)
-x> 50 (id=2)
-> 45 (id=3)
-45
-
-# Two-node config with overlapping single-node config.
-
-committed cfg=(1,2) cfgj=(2) idx=(_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-0
-
-# 1 reports 100.
-committed cfg=(1,2) cfgj=(2) idx=(100,_)
-----
- idx
-x> 100 (id=1)
-? 0 (id=2)
-0
-
-# 2 reports 100.
-committed cfg=(1,2) cfgj=(2) idx=(_,100)
-----
- idx
-? 0 (id=1)
-x> 100 (id=2)
-0
-
-committed cfg=(1,2) cfgj=(2) idx=(50,100)
-----
- idx
-> 50 (id=1)
-x> 100 (id=2)
-50
-
-committed cfg=(1,2) cfgj=(2) idx=(100,50)
-----
- idx
-x> 100 (id=1)
-> 50 (id=2)
-50
-
-
-
-# Joint non-overlapping two-node configs.
-
-committed cfg=(1,2) cfgj=(3,4) idx=(50,_,_,_)
-----
- idx
-xxx> 50 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-0
-
-committed cfg=(1,2) cfgj=(3,4) idx=(50,_,49,_)
-----
- idx
-xxx> 50 (id=1)
-? 0 (id=2)
-xx> 49 (id=3)
-? 0 (id=4)
-0
-
-committed cfg=(1,2) cfgj=(3,4) idx=(50,48,49,_)
-----
- idx
-xxx> 50 (id=1)
-x> 48 (id=2)
-xx> 49 (id=3)
-? 0 (id=4)
-0
-
-committed cfg=(1,2) cfgj=(3,4) idx=(50,48,49,47)
-----
- idx
-xxx> 50 (id=1)
-x> 48 (id=2)
-xx> 49 (id=3)
-> 47 (id=4)
-47
-
-# Joint overlapping two-node configs.
-committed cfg=(1,2) cfgj=(2,3) idx=(_,_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-0
-
-committed cfg=(1,2) cfgj=(2,3) idx=(100,_,_)
-----
- idx
-xx> 100 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-0
-
-committed cfg=(1,2) cfgj=(2,3) idx=(_,100,_)
-----
- idx
-? 0 (id=1)
-xx> 100 (id=2)
-? 0 (id=3)
-0
-
-committed cfg=(1,2) cfgj=(2,3) idx=(_,100,99)
-----
- idx
-? 0 (id=1)
-xx> 100 (id=2)
-x> 99 (id=3)
-0
-
-committed cfg=(1,2) cfgj=(2,3) idx=(101,100,99)
-----
- idx
-xx> 101 (id=1)
-x> 100 (id=2)
-> 99 (id=3)
-99
-
-# Joint identical two-node configs.
-committed cfg=(1,2) cfgj=(1,2) idx=(_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-0
-
-committed cfg=(1,2) cfgj=(1,2) idx=(_,40)
-----
- idx
-? 0 (id=1)
-x> 40 (id=2)
-0
-
-committed cfg=(1,2) cfgj=(1,2) idx=(41,40)
-----
- idx
-x> 41 (id=1)
-> 40 (id=2)
-40
-
-
-
-# Joint disjoint three-node configs.
-
-committed cfg=(1,2,3) cfgj=(4,5,6) idx=(_,_,_,_,_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-? 0 (id=5)
-? 0 (id=6)
-0
-
-committed cfg=(1,2,3) cfgj=(4,5,6) idx=(100,_,_,_,_,_)
-----
- idx
-xxxxx> 100 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-? 0 (id=5)
-? 0 (id=6)
-0
-
-committed cfg=(1,2,3) cfgj=(4,5,6) idx=(100,_,_,90,_,_)
-----
- idx
-xxxxx> 100 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-xxxx> 90 (id=4)
-? 0 (id=5)
-? 0 (id=6)
-0
-
-committed cfg=(1,2,3) cfgj=(4,5,6) idx=(100,99,_,_,_,_)
-----
- idx
-xxxxx> 100 (id=1)
-xxxx> 99 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-? 0 (id=5)
-? 0 (id=6)
-0
-
-# First quorum <= 99, second one <= 97. Both quorums guarantee that 90 is
-# committed.
-committed cfg=(1,2,3) cfgj=(4,5,6) idx=(_,99,90,97,95,_)
-----
- idx
-? 0 (id=1)
-xxxxx> 99 (id=2)
-xx> 90 (id=3)
-xxxx> 97 (id=4)
-xxx> 95 (id=5)
-? 0 (id=6)
-90
-
-# First quorum collapsed to 92. Second one already had at least 95 committed,
-# so the result also collapses.
-committed cfg=(1,2,3) cfgj=(4,5,6) idx=(92,99,90,97,95,_)
-----
- idx
-xx> 92 (id=1)
-xxxxx> 99 (id=2)
-x> 90 (id=3)
-xxxx> 97 (id=4)
-xxx> 95 (id=5)
-? 0 (id=6)
-92
-
-# Second quorum collapses, but nothing changes in the output.
-committed cfg=(1,2,3) cfgj=(4,5,6) idx=(92,99,90,97,95,77)
-----
- idx
-xx> 92 (id=1)
-xxxxx> 99 (id=2)
-x> 90 (id=3)
-xxxx> 97 (id=4)
-xxx> 95 (id=5)
-> 77 (id=6)
-92
-
-
-# Joint overlapping three-node configs.
-
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(_,_,_,_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-? 0 (id=5)
-0
-
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,_,_,_,_)
-----
- idx
-xxxx> 100 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-? 0 (id=5)
-0
-
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,101,_,_,_)
-----
- idx
-xxx> 100 (id=1)
-xxxx> 101 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-? 0 (id=5)
-0
-
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,101,100,_,_)
-----
- idx
-xx> 100 (id=1)
-xxxx> 101 (id=2)
-> 100 (id=3)
-? 0 (id=4)
-? 0 (id=5)
-0
-
-# Second quorum could commit either 98 or 99, but first quorum is open.
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(_,100,_,99,98)
-----
- idx
-? 0 (id=1)
-xxxx> 100 (id=2)
-? 0 (id=3)
-xxx> 99 (id=4)
-xx> 98 (id=5)
-0
-
-# Additionally, first quorum can commit either 100 or 99
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(_,100,99,99,98)
-----
- idx
-? 0 (id=1)
-xxxx> 100 (id=2)
-xx> 99 (id=3)
-> 99 (id=4)
-x> 98 (id=5)
-98
-
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(1,100,99,99,98)
-----
- idx
-> 1 (id=1)
-xxxx> 100 (id=2)
-xx> 99 (id=3)
-> 99 (id=4)
-x> 98 (id=5)
-98
-
-committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,100,99,99,98)
-----
- idx
-xxx> 100 (id=1)
-> 100 (id=2)
-x> 99 (id=3)
-> 99 (id=4)
-> 98 (id=5)
-99
-
-
-# More overlap.
-
-committed cfg=(1,2,3) cfgj=(2,3,4) idx=(_,_,_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-? 0 (id=4)
-0
-
-committed cfg=(1,2,3) cfgj=(2,3,4) idx=(_,100,99,_)
-----
- idx
-? 0 (id=1)
-xxx> 100 (id=2)
-xx> 99 (id=3)
-? 0 (id=4)
-99
-
-committed cfg=(1,2,3) cfgj=(2,3,4) idx=(98,100,99,_)
-----
- idx
-x> 98 (id=1)
-xxx> 100 (id=2)
-xx> 99 (id=3)
-? 0 (id=4)
-99
-
-committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,100,99,_)
-----
- idx
-xx> 100 (id=1)
-> 100 (id=2)
-x> 99 (id=3)
-? 0 (id=4)
-99
-
-committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,100,99,98)
-----
- idx
-xx> 100 (id=1)
-> 100 (id=2)
-x> 99 (id=3)
-> 98 (id=4)
-99
-
-committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,_,_,101)
-----
- idx
-xx> 100 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-xxx> 101 (id=4)
-0
-
-committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,99,_,101)
-----
- idx
-xx> 100 (id=1)
-x> 99 (id=2)
-? 0 (id=3)
-xxx> 101 (id=4)
-99
-
-# Identical. This is also exercised in the test harness, so it's listed here
-# only briefly.
-committed cfg=(1,2,3) cfgj=(1,2,3) idx=(50,45,_)
-----
- idx
-xx> 50 (id=1)
-x> 45 (id=2)
-? 0 (id=3)
-45
diff --git a/raft/quorum/testdata/joint_vote.txt b/raft/quorum/testdata/joint_vote.txt
deleted file mode 100644
index 36cd0cabcff..00000000000
--- a/raft/quorum/testdata/joint_vote.txt
+++ /dev/null
@@ -1,165 +0,0 @@
-# Empty joint config wins all votes. This isn't used in production. Note that
-# by specifying cfgj explicitly we tell the test harness to treat the input as
-# a joint quorum and not a majority quorum.
-vote cfgj=zero
-----
-VoteWon
-
-# More examples with close to trivial configs.
-
-vote cfg=(1) cfgj=zero votes=(_)
-----
-VotePending
-
-vote cfg=(1) cfgj=zero votes=(y)
-----
-VoteWon
-
-vote cfg=(1) cfgj=zero votes=(n)
-----
-VoteLost
-
-vote cfg=(1) cfgj=(1) votes=(_)
-----
-VotePending
-
-vote cfg=(1) cfgj=(1) votes=(y)
-----
-VoteWon
-
-vote cfg=(1) cfgj=(1) votes=(n)
-----
-VoteLost
-
-vote cfg=(1) cfgj=(2) votes=(_,_)
-----
-VotePending
-
-vote cfg=(1) cfgj=(2) votes=(y,_)
-----
-VotePending
-
-vote cfg=(1) cfgj=(2) votes=(y,y)
-----
-VoteWon
-
-vote cfg=(1) cfgj=(2) votes=(y,n)
-----
-VoteLost
-
-vote cfg=(1) cfgj=(2) votes=(n,_)
-----
-VoteLost
-
-vote cfg=(1) cfgj=(2) votes=(n,n)
-----
-VoteLost
-
-vote cfg=(1) cfgj=(2) votes=(n,y)
-----
-VoteLost
-
-# Two node configs.
-
-vote cfg=(1,2) cfgj=(3,4) votes=(_,_,_,_)
-----
-VotePending
-
-vote cfg=(1,2) cfgj=(3,4) votes=(y,_,_,_)
-----
-VotePending
-
-vote cfg=(1,2) cfgj=(3,4) votes=(y,y,_,_)
-----
-VotePending
-
-vote cfg=(1,2) cfgj=(3,4) votes=(y,y,n,_)
-----
-VoteLost
-
-vote cfg=(1,2) cfgj=(3,4) votes=(y,y,n,n)
-----
-VoteLost
-
-vote cfg=(1,2) cfgj=(3,4) votes=(y,y,y,n)
-----
-VoteLost
-
-vote cfg=(1,2) cfgj=(3,4) votes=(y,y,y,y)
-----
-VoteWon
-
-vote cfg=(1,2) cfgj=(2,3) votes=(_,_,_)
-----
-VotePending
-
-vote cfg=(1,2) cfgj=(2,3) votes=(_,n,_)
-----
-VoteLost
-
-vote cfg=(1,2) cfgj=(2,3) votes=(y,y,_)
-----
-VotePending
-
-vote cfg=(1,2) cfgj=(2,3) votes=(y,y,n)
-----
-VoteLost
-
-vote cfg=(1,2) cfgj=(2,3) votes=(y,y,y)
-----
-VoteWon
-
-vote cfg=(1,2) cfgj=(1,2) votes=(_,_)
-----
-VotePending
-
-vote cfg=(1,2) cfgj=(1,2) votes=(y,_)
-----
-VotePending
-
-vote cfg=(1,2) cfgj=(1,2) votes=(y,n)
-----
-VoteLost
-
-vote cfg=(1,2) cfgj=(1,2) votes=(n,_)
-----
-VoteLost
-
-vote cfg=(1,2) cfgj=(1,2) votes=(n,n)
-----
-VoteLost
-
-
-# Simple example for overlapping three node configs.
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,_,_,_)
-----
-VotePending
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,n,_,_)
-----
-VotePending
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,n,n,_)
-----
-VoteLost
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,y,y,_)
-----
-VoteWon
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,_,_)
-----
-VotePending
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,n,_)
-----
-VotePending
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,n,n)
-----
-VoteLost
-
-vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,n,y)
-----
-VoteWon
diff --git a/raft/quorum/testdata/majority_commit.txt b/raft/quorum/testdata/majority_commit.txt
deleted file mode 100644
index 6ff5d0b89e0..00000000000
--- a/raft/quorum/testdata/majority_commit.txt
+++ /dev/null
@@ -1,153 +0,0 @@
-# The empty quorum commits "everything". This is useful for its use in joint
-# quorums.
-committed
-----
-â
-
-
-
-# A single voter quorum is not final when no index is known.
-committed cfg=(1) idx=(_)
-----
- idx
-? 0 (id=1)
-0
-
-# When an index is known, that's the committed index, and that's final.
-committed cfg=(1) idx=(12)
-----
- idx
-> 12 (id=1)
-12
-
-
-
-
-# With two nodes, start out similarly.
-committed cfg=(1, 2) idx=(_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-0
-
-# The first committed index becomes known (for n1). Nothing changes in the
-# output because idx=12 is not known to be on a quorum (which is both nodes).
-committed cfg=(1, 2) idx=(12,_)
-----
- idx
-x> 12 (id=1)
-? 0 (id=2)
-0
-
-# The second index comes in and finalize the decision. The result will be the
-# smaller of the two indexes.
-committed cfg=(1,2) idx=(12,5)
-----
- idx
-x> 12 (id=1)
-> 5 (id=2)
-5
-
-
-
-
-# No surprises for three nodes.
-committed cfg=(1,2,3) idx=(_,_,_)
-----
- idx
-? 0 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-0
-
-committed cfg=(1,2,3) idx=(12,_,_)
-----
- idx
-xx> 12 (id=1)
-? 0 (id=2)
-? 0 (id=3)
-0
-
-# We see a committed index, but a higher committed index for the last pending
-# votes could change (increment) the outcome, so not final yet.
-committed cfg=(1,2,3) idx=(12,5,_)
-----
- idx
-xx> 12 (id=1)
-x> 5 (id=2)
-? 0 (id=3)
-5
-
-# a) the case in which it does:
-committed cfg=(1,2,3) idx=(12,5,6)
-----
- idx
-xx> 12 (id=1)
-> 5 (id=2)
-x> 6 (id=3)
-6
-
-# b) the case in which it does not:
-committed cfg=(1,2,3) idx=(12,5,4)
-----
- idx
-xx> 12 (id=1)
-x> 5 (id=2)
-> 4 (id=3)
-5
-
-# c) a different case in which the last index is pending but it has no chance of
-# swaying the outcome (because nobody in the current quorum agrees on anything
-# higher than the candidate):
-committed cfg=(1,2,3) idx=(5,5,_)
-----
- idx
-x> 5 (id=1)
-> 5 (id=2)
-? 0 (id=3)
-5
-
-# c) continued: Doesn't matter what shows up last. The result is final.
-committed cfg=(1,2,3) idx=(5,5,12)
-----
- idx
-> 5 (id=1)
-> 5 (id=2)
-xx> 12 (id=3)
-5
-
-# With all committed idx known, the result is final.
-committed cfg=(1, 2, 3) idx=(100, 101, 103)
-----
- idx
-> 100 (id=1)
-x> 101 (id=2)
-xx> 103 (id=3)
-101
-
-
-
-# Some more complicated examples. Similar to case c) above. The result is
-# already final because no index higher than 103 is one short of quorum.
-committed cfg=(1, 2, 3, 4, 5) idx=(101, 104, 103, 103,_)
-----
- idx
-x> 101 (id=1)
-xxxx> 104 (id=2)
-xx> 103 (id=3)
-> 103 (id=4)
-? 0 (id=5)
-103
-
-# A similar case which is not final because another vote for >= 103 would change
-# the outcome.
-committed cfg=(1, 2, 3, 4, 5) idx=(101, 102, 103, 103,_)
-----
- idx
-x> 101 (id=1)
-xx> 102 (id=2)
-xxx> 103 (id=3)
-> 103 (id=4)
-? 0 (id=5)
-102
diff --git a/raft/quorum/testdata/majority_vote.txt b/raft/quorum/testdata/majority_vote.txt
deleted file mode 100644
index 5f9564b4f51..00000000000
--- a/raft/quorum/testdata/majority_vote.txt
+++ /dev/null
@@ -1,97 +0,0 @@
-# The empty config always announces a won vote.
-vote
-----
-VoteWon
-
-vote cfg=(1) votes=(_)
-----
-VotePending
-
-vote cfg=(1) votes=(n)
-----
-VoteLost
-
-vote cfg=(123) votes=(y)
-----
-VoteWon
-
-
-
-
-vote cfg=(4,8) votes=(_,_)
-----
-VotePending
-
-# With two voters, a single rejection loses the vote.
-vote cfg=(4,8) votes=(n,_)
-----
-VoteLost
-
-vote cfg=(4,8) votes=(y,_)
-----
-VotePending
-
-vote cfg=(4,8) votes=(n,y)
-----
-VoteLost
-
-vote cfg=(4,8) votes=(y,y)
-----
-VoteWon
-
-
-
-vote cfg=(2,4,7) votes=(_,_,_)
-----
-VotePending
-
-vote cfg=(2,4,7) votes=(n,_,_)
-----
-VotePending
-
-vote cfg=(2,4,7) votes=(y,_,_)
-----
-VotePending
-
-vote cfg=(2,4,7) votes=(n,n,_)
-----
-VoteLost
-
-vote cfg=(2,4,7) votes=(y,n,_)
-----
-VotePending
-
-vote cfg=(2,4,7) votes=(y,y,_)
-----
-VoteWon
-
-vote cfg=(2,4,7) votes=(y,y,n)
-----
-VoteWon
-
-vote cfg=(2,4,7) votes=(n,y,n)
-----
-VoteLost
-
-
-
-# Test some random example with seven nodes (why not).
-vote cfg=(1,2,3,4,5,6,7) votes=(y,y,n,y,_,_,_)
-----
-VotePending
-
-vote cfg=(1,2,3,4,5,6,7) votes=(_,y,y,_,n,y,n)
-----
-VotePending
-
-vote cfg=(1,2,3,4,5,6,7) votes=(y,y,n,y,_,n,y)
-----
-VoteWon
-
-vote cfg=(1,2,3,4,5,6,7) votes=(y,y,_,n,y,n,n)
-----
-VotePending
-
-vote cfg=(1,2,3,4,5,6,7) votes=(y,y,n,y,n,n,n)
-----
-VoteLost
diff --git a/raft/quorum/voteresult_string.go b/raft/quorum/voteresult_string.go
deleted file mode 100644
index 9eca8fd0c96..00000000000
--- a/raft/quorum/voteresult_string.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by "stringer -type=VoteResult"; DO NOT EDIT.
-
-package quorum
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[VotePending-1]
- _ = x[VoteLost-2]
- _ = x[VoteWon-3]
-}
-
-const _VoteResult_name = "VotePendingVoteLostVoteWon"
-
-var _VoteResult_index = [...]uint8{0, 11, 19, 26}
-
-func (i VoteResult) String() string {
- i -= 1
- if i >= VoteResult(len(_VoteResult_index)-1) {
- return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")"
- }
- return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]]
-}
diff --git a/raft/raft.go b/raft/raft.go
deleted file mode 100644
index c80262ebaf0..00000000000
--- a/raft/raft.go
+++ /dev/null
@@ -1,1837 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math"
- "math/rand"
- "sort"
- "strings"
- "sync"
- "time"
-
- "go.etcd.io/etcd/raft/v3/confchange"
- "go.etcd.io/etcd/raft/v3/quorum"
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// None is a placeholder node ID used when there is no leader.
-const None uint64 = 0
-const noLimit = math.MaxUint64
-
-// Possible values for StateType.
-const (
- StateFollower StateType = iota
- StateCandidate
- StateLeader
- StatePreCandidate
- numStates
-)
-
-type ReadOnlyOption int
-
-const (
- // ReadOnlySafe guarantees the linearizability of the read only request by
- // communicating with the quorum. It is the default and suggested option.
- ReadOnlySafe ReadOnlyOption = iota
- // ReadOnlyLeaseBased ensures linearizability of the read only request by
- // relying on the leader lease. It can be affected by clock drift.
- // If the clock drift is unbounded, leader might keep the lease longer than it
- // should (clock can move backward/pause without any bound). ReadIndex is not safe
- // in that case.
- ReadOnlyLeaseBased
-)
-
-// Possible values for CampaignType
-const (
- // campaignPreElection represents the first phase of a normal election when
- // Config.PreVote is true.
- campaignPreElection CampaignType = "CampaignPreElection"
- // campaignElection represents a normal (time-based) election (the second phase
- // of the election when Config.PreVote is true).
- campaignElection CampaignType = "CampaignElection"
- // campaignTransfer represents the type of leader transfer
- campaignTransfer CampaignType = "CampaignTransfer"
-)
-
-// ErrProposalDropped is returned when the proposal is ignored by some cases,
-// so that the proposer can be notified and fail fast.
-var ErrProposalDropped = errors.New("raft proposal dropped")
-
-// lockedRand is a small wrapper around rand.Rand to provide
-// synchronization among multiple raft groups. Only the methods needed
-// by the code are exposed (e.g. Intn).
-type lockedRand struct {
- mu sync.Mutex
- rand *rand.Rand
-}
-
-func (r *lockedRand) Intn(n int) int {
- r.mu.Lock()
- v := r.rand.Intn(n)
- r.mu.Unlock()
- return v
-}
-
-var globalRand = &lockedRand{
- rand: rand.New(rand.NewSource(time.Now().UnixNano())),
-}
-
-// CampaignType represents the type of campaigning
-// the reason we use the type of string instead of uint64
-// is because it's simpler to compare and fill in raft entries
-type CampaignType string
-
-// StateType represents the role of a node in a cluster.
-type StateType uint64
-
-var stmap = [...]string{
- "StateFollower",
- "StateCandidate",
- "StateLeader",
- "StatePreCandidate",
-}
-
-func (st StateType) String() string {
- return stmap[uint64(st)]
-}
-
-// Config contains the parameters to start a raft.
-type Config struct {
- // ID is the identity of the local raft. ID cannot be 0.
- ID uint64
-
- // ElectionTick is the number of Node.Tick invocations that must pass between
- // elections. That is, if a follower does not receive any message from the
- // leader of current term before ElectionTick has elapsed, it will become
- // candidate and start an election. ElectionTick must be greater than
- // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
- // unnecessary leader switching.
- ElectionTick int
- // HeartbeatTick is the number of Node.Tick invocations that must pass between
- // heartbeats. That is, a leader sends heartbeat messages to maintain its
- // leadership every HeartbeatTick ticks.
- HeartbeatTick int
-
- // Storage is the storage for raft. raft generates entries and states to be
- // stored in storage. raft reads the persisted entries and states out of
- // Storage when it needs. raft reads out the previous state and configuration
- // out of storage when restarting.
- Storage Storage
- // Applied is the last applied index. It should only be set when restarting
- // raft. raft will not return entries to the application smaller or equal to
- // Applied. If Applied is unset when restarting, raft might return previous
- // applied entries. This is a very application dependent configuration.
- Applied uint64
-
- // MaxSizePerMsg limits the max byte size of each append message. Smaller
- // value lowers the raft recovery cost(initial probing and message lost
- // during normal operation). On the other side, it might affect the
- // throughput during normal replication. Note: math.MaxUint64 for unlimited,
- // 0 for at most one entry per message.
- MaxSizePerMsg uint64
- // MaxCommittedSizePerReady limits the size of the committed entries which
- // can be applied.
- MaxCommittedSizePerReady uint64
- // MaxUncommittedEntriesSize limits the aggregate byte size of the
- // uncommitted entries that may be appended to a leader's log. Once this
- // limit is exceeded, proposals will begin to return ErrProposalDropped
- // errors. Note: 0 for no limit.
- MaxUncommittedEntriesSize uint64
- // MaxInflightMsgs limits the max number of in-flight append messages during
- // optimistic replication phase. The application transportation layer usually
- // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
- // overflowing that sending buffer. TODO (xiangli): feedback to application to
- // limit the proposal rate?
- MaxInflightMsgs int
-
- // CheckQuorum specifies if the leader should check quorum activity. Leader
- // steps down when quorum is not active for an electionTimeout.
- CheckQuorum bool
-
- // PreVote enables the Pre-Vote algorithm described in raft thesis section
- // 9.6. This prevents disruption when a node that has been partitioned away
- // rejoins the cluster.
- PreVote bool
-
- // ReadOnlyOption specifies how the read only request is processed.
- //
- // ReadOnlySafe guarantees the linearizability of the read only request by
- // communicating with the quorum. It is the default and suggested option.
- //
- // ReadOnlyLeaseBased ensures linearizability of the read only request by
- // relying on the leader lease. It can be affected by clock drift.
- // If the clock drift is unbounded, leader might keep the lease longer than it
- // should (clock can move backward/pause without any bound). ReadIndex is not safe
- // in that case.
- // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
- ReadOnlyOption ReadOnlyOption
-
- // Logger is the logger used for raft log. For multinode which can host
- // multiple raft group, each raft group can have its own logger
- Logger Logger
-
- // DisableProposalForwarding set to true means that followers will drop
- // proposals, rather than forwarding them to the leader. One use case for
- // this feature would be in a situation where the Raft leader is used to
- // compute the data of a proposal, for example, adding a timestamp from a
- // hybrid logical clock to data in a monotonically increasing way. Forwarding
- // should be disabled to prevent a follower with an inaccurate hybrid
- // logical clock from assigning the timestamp and then forwarding the data
- // to the leader.
- DisableProposalForwarding bool
-}
-
-func (c *Config) validate() error {
- if c.ID == None {
- return errors.New("cannot use none as id")
- }
-
- if c.HeartbeatTick <= 0 {
- return errors.New("heartbeat tick must be greater than 0")
- }
-
- if c.ElectionTick <= c.HeartbeatTick {
- return errors.New("election tick must be greater than heartbeat tick")
- }
-
- if c.Storage == nil {
- return errors.New("storage cannot be nil")
- }
-
- if c.MaxUncommittedEntriesSize == 0 {
- c.MaxUncommittedEntriesSize = noLimit
- }
-
- // default MaxCommittedSizePerReady to MaxSizePerMsg because they were
- // previously the same parameter.
- if c.MaxCommittedSizePerReady == 0 {
- c.MaxCommittedSizePerReady = c.MaxSizePerMsg
- }
-
- if c.MaxInflightMsgs <= 0 {
- return errors.New("max inflight messages must be greater than 0")
- }
-
- if c.Logger == nil {
- c.Logger = getLogger()
- }
-
- if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
- return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
- }
-
- return nil
-}
-
-type raft struct {
- id uint64
-
- Term uint64
- Vote uint64
-
- readStates []ReadState
-
- // the log
- raftLog *raftLog
-
- maxMsgSize uint64
- maxUncommittedSize uint64
- // TODO(tbg): rename to trk.
- prs tracker.ProgressTracker
-
- state StateType
-
- // isLearner is true if the local raft node is a learner.
- isLearner bool
-
- msgs []pb.Message
-
- // the leader id
- lead uint64
- // leadTransferee is id of the leader transfer target when its value is not zero.
- // Follow the procedure defined in raft thesis 3.10.
- leadTransferee uint64
- // Only one conf change may be pending (in the log, but not yet
- // applied) at a time. This is enforced via pendingConfIndex, which
- // is set to a value >= the log index of the latest pending
- // configuration change (if any). Config changes are only allowed to
- // be proposed if the leader's applied index is greater than this
- // value.
- pendingConfIndex uint64
- // an estimate of the size of the uncommitted tail of the Raft log. Used to
- // prevent unbounded log growth. Only maintained by the leader. Reset on
- // term changes.
- uncommittedSize uint64
-
- readOnly *readOnly
-
- // number of ticks since it reached last electionTimeout when it is leader
- // or candidate.
- // number of ticks since it reached last electionTimeout or received a
- // valid message from current leader when it is a follower.
- electionElapsed int
-
- // number of ticks since it reached last heartbeatTimeout.
- // only leader keeps heartbeatElapsed.
- heartbeatElapsed int
-
- checkQuorum bool
- preVote bool
-
- heartbeatTimeout int
- electionTimeout int
- // randomizedElectionTimeout is a random number between
- // [electiontimeout, 2 * electiontimeout - 1]. It gets reset
- // when raft changes its state to follower or candidate.
- randomizedElectionTimeout int
- disableProposalForwarding bool
-
- tick func()
- step stepFunc
-
- logger Logger
-
- // pendingReadIndexMessages is used to store messages of type MsgReadIndex
- // that can't be answered as new leader didn't committed any log in
- // current term. Those will be handled as fast as first log is committed in
- // current term.
- pendingReadIndexMessages []pb.Message
-}
-
-func newRaft(c *Config) *raft {
- if err := c.validate(); err != nil {
- panic(err.Error())
- }
- raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady)
- hs, cs, err := c.Storage.InitialState()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
-
- r := &raft{
- id: c.ID,
- lead: None,
- isLearner: false,
- raftLog: raftlog,
- maxMsgSize: c.MaxSizePerMsg,
- maxUncommittedSize: c.MaxUncommittedEntriesSize,
- prs: tracker.MakeProgressTracker(c.MaxInflightMsgs),
- electionTimeout: c.ElectionTick,
- heartbeatTimeout: c.HeartbeatTick,
- logger: c.Logger,
- checkQuorum: c.CheckQuorum,
- preVote: c.PreVote,
- readOnly: newReadOnly(c.ReadOnlyOption),
- disableProposalForwarding: c.DisableProposalForwarding,
- }
-
- cfg, prs, err := confchange.Restore(confchange.Changer{
- Tracker: r.prs,
- LastIndex: raftlog.lastIndex(),
- }, cs)
- if err != nil {
- panic(err)
- }
- assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs))
-
- if !IsEmptyHardState(hs) {
- r.loadState(hs)
- }
- if c.Applied > 0 {
- raftlog.appliedTo(c.Applied)
- }
- r.becomeFollower(r.Term, None)
-
- var nodesStrs []string
- for _, n := range r.prs.VoterNodes() {
- nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
- }
-
- r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
- r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
- return r
-}
-
-func (r *raft) hasLeader() bool { return r.lead != None }
-
-func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
-
-func (r *raft) hardState() pb.HardState {
- return pb.HardState{
- Term: r.Term,
- Vote: r.Vote,
- Commit: r.raftLog.committed,
- }
-}
-
-// send schedules persisting state to a stable storage and AFTER that
-// sending the message (as part of next Ready message processing).
-func (r *raft) send(m pb.Message) {
- if m.From == None {
- m.From = r.id
- }
- if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
- if m.Term == 0 {
- // All {pre-,}campaign messages need to have the term set when
- // sending.
- // - MsgVote: m.Term is the term the node is campaigning for,
- // non-zero as we increment the term when campaigning.
- // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
- // granted, non-zero for the same reason MsgVote is
- // - MsgPreVote: m.Term is the term the node will campaign,
- // non-zero as we use m.Term to indicate the next term we'll be
- // campaigning for
- // - MsgPreVoteResp: m.Term is the term received in the original
- // MsgPreVote if the pre-vote was granted, non-zero for the
- // same reasons MsgPreVote is
- panic(fmt.Sprintf("term should be set when sending %s", m.Type))
- }
- } else {
- if m.Term != 0 {
- panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
- }
- // do not attach term to MsgProp, MsgReadIndex
- // proposals are a way to forward to the leader and
- // should be treated as local message.
- // MsgReadIndex is also forwarded to leader.
- if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
- m.Term = r.Term
- }
- }
- r.msgs = append(r.msgs, m)
-}
-
-// sendAppend sends an append RPC with new entries (if any) and the
-// current commit index to the given peer.
-func (r *raft) sendAppend(to uint64) {
- r.maybeSendAppend(to, true)
-}
-
-// maybeSendAppend sends an append RPC with new entries to the given peer,
-// if necessary. Returns true if a message was sent. The sendIfEmpty
-// argument controls whether messages with no entries will be sent
-// ("empty" messages are useful to convey updated Commit indexes, but
-// are undesirable when we're sending multiple messages in a batch).
-func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool {
- pr := r.prs.Progress[to]
- if pr.IsPaused() {
- return false
- }
- m := pb.Message{}
- m.To = to
-
- term, errt := r.raftLog.term(pr.Next - 1)
- ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
- if len(ents) == 0 && !sendIfEmpty {
- return false
- }
-
- if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
- if !pr.RecentActive {
- r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
- return false
- }
-
- m.Type = pb.MsgSnap
- snapshot, err := r.raftLog.snapshot()
- if err != nil {
- if err == ErrSnapshotTemporarilyUnavailable {
- r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
- return false
- }
- panic(err) // TODO(bdarnell)
- }
- if IsEmptySnap(snapshot) {
- panic("need non-empty snapshot")
- }
- m.Snapshot = snapshot
- sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
- r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
- r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
- pr.BecomeSnapshot(sindex)
- r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
- } else {
- m.Type = pb.MsgApp
- m.Index = pr.Next - 1
- m.LogTerm = term
- m.Entries = ents
- m.Commit = r.raftLog.committed
- if n := len(m.Entries); n != 0 {
- switch pr.State {
- // optimistically increase the next when in StateReplicate
- case tracker.StateReplicate:
- last := m.Entries[n-1].Index
- pr.OptimisticUpdate(last)
- pr.Inflights.Add(last)
- case tracker.StateProbe:
- pr.ProbeSent = true
- default:
- r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
- }
- }
- }
- r.send(m)
- return true
-}
-
-// sendHeartbeat sends a heartbeat RPC to the given peer.
-func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
- // Attach the commit as min(to.matched, r.committed).
- // When the leader sends out heartbeat message,
- // the receiver(follower) might not be matched with the leader
- // or it might not have all the committed entries.
- // The leader MUST NOT forward the follower's commit to
- // an unmatched index.
- commit := min(r.prs.Progress[to].Match, r.raftLog.committed)
- m := pb.Message{
- To: to,
- Type: pb.MsgHeartbeat,
- Commit: commit,
- Context: ctx,
- }
-
- r.send(m)
-}
-
-// bcastAppend sends RPC, with entries to all peers that are not up-to-date
-// according to the progress recorded in r.prs.
-func (r *raft) bcastAppend() {
- r.prs.Visit(func(id uint64, _ *tracker.Progress) {
- if id == r.id {
- return
- }
- r.sendAppend(id)
- })
-}
-
-// bcastHeartbeat sends RPC, without entries to all the peers.
-func (r *raft) bcastHeartbeat() {
- lastCtx := r.readOnly.lastPendingRequestCtx()
- if len(lastCtx) == 0 {
- r.bcastHeartbeatWithCtx(nil)
- } else {
- r.bcastHeartbeatWithCtx([]byte(lastCtx))
- }
-}
-
-func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
- r.prs.Visit(func(id uint64, _ *tracker.Progress) {
- if id == r.id {
- return
- }
- r.sendHeartbeat(id, ctx)
- })
-}
-
-func (r *raft) advance(rd Ready) {
- r.reduceUncommittedSize(rd.CommittedEntries)
-
- // If entries were applied (or a snapshot), update our cursor for
- // the next Ready. Note that if the current HardState contains a
- // new Commit index, this does not mean that we're also applying
- // all of the new entries due to commit pagination by size.
- if newApplied := rd.appliedCursor(); newApplied > 0 {
- oldApplied := r.raftLog.applied
- r.raftLog.appliedTo(newApplied)
-
- if r.prs.Config.AutoLeave && oldApplied <= r.pendingConfIndex && newApplied >= r.pendingConfIndex && r.state == StateLeader {
- // If the current (and most recent, at least for this leader's term)
- // configuration should be auto-left, initiate that now. We use a
- // nil Data which unmarshals into an empty ConfChangeV2 and has the
- // benefit that appendEntry can never refuse it based on its size
- // (which registers as zero).
- ent := pb.Entry{
- Type: pb.EntryConfChangeV2,
- Data: nil,
- }
- // There's no way in which this proposal should be able to be rejected.
- if !r.appendEntry(ent) {
- panic("refused un-refusable auto-leaving ConfChangeV2")
- }
- r.pendingConfIndex = r.raftLog.lastIndex()
- r.logger.Infof("initiating automatic transition out of joint configuration %s", r.prs.Config)
- }
- }
-
- if len(rd.Entries) > 0 {
- e := rd.Entries[len(rd.Entries)-1]
- r.raftLog.stableTo(e.Index, e.Term)
- }
- if !IsEmptySnap(rd.Snapshot) {
- r.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
- }
-}
-
-// maybeCommit attempts to advance the commit index. Returns true if
-// the commit index changed (in which case the caller should call
-// r.bcastAppend).
-func (r *raft) maybeCommit() bool {
- mci := r.prs.Committed()
- return r.raftLog.maybeCommit(mci, r.Term)
-}
-
-func (r *raft) reset(term uint64) {
- if r.Term != term {
- r.Term = term
- r.Vote = None
- }
- r.lead = None
-
- r.electionElapsed = 0
- r.heartbeatElapsed = 0
- r.resetRandomizedElectionTimeout()
-
- r.abortLeaderTransfer()
-
- r.prs.ResetVotes()
- r.prs.Visit(func(id uint64, pr *tracker.Progress) {
- *pr = tracker.Progress{
- Match: 0,
- Next: r.raftLog.lastIndex() + 1,
- Inflights: tracker.NewInflights(r.prs.MaxInflight),
- IsLearner: pr.IsLearner,
- }
- if id == r.id {
- pr.Match = r.raftLog.lastIndex()
- }
- })
-
- r.pendingConfIndex = 0
- r.uncommittedSize = 0
- r.readOnly = newReadOnly(r.readOnly.option)
-}
-
-func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) {
- li := r.raftLog.lastIndex()
- for i := range es {
- es[i].Term = r.Term
- es[i].Index = li + 1 + uint64(i)
- }
- // Track the size of this uncommitted proposal.
- if !r.increaseUncommittedSize(es) {
- r.logger.Debugf(
- "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal",
- r.id,
- )
- // Drop the proposal.
- return false
- }
- // use latest "last" index after truncate/append
- li = r.raftLog.append(es...)
- r.prs.Progress[r.id].MaybeUpdate(li)
- // Regardless of maybeCommit's return, our caller will call bcastAppend.
- r.maybeCommit()
- return true
-}
-
-// tickElection is run by followers and candidates after r.electionTimeout.
-func (r *raft) tickElection() {
- r.electionElapsed++
-
- if r.promotable() && r.pastElectionTimeout() {
- r.electionElapsed = 0
- r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
- }
-}
-
-// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
-func (r *raft) tickHeartbeat() {
- r.heartbeatElapsed++
- r.electionElapsed++
-
- if r.electionElapsed >= r.electionTimeout {
- r.electionElapsed = 0
- if r.checkQuorum {
- r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
- }
- // If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
- if r.state == StateLeader && r.leadTransferee != None {
- r.abortLeaderTransfer()
- }
- }
-
- if r.state != StateLeader {
- return
- }
-
- if r.heartbeatElapsed >= r.heartbeatTimeout {
- r.heartbeatElapsed = 0
- r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
- }
-}
-
-func (r *raft) becomeFollower(term uint64, lead uint64) {
- r.step = stepFollower
- r.reset(term)
- r.tick = r.tickElection
- r.lead = lead
- r.state = StateFollower
- r.logger.Infof("%x became follower at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeCandidate() {
- // TODO(xiangli) remove the panic when the raft implementation is stable
- if r.state == StateLeader {
- panic("invalid transition [leader -> candidate]")
- }
- r.step = stepCandidate
- r.reset(r.Term + 1)
- r.tick = r.tickElection
- r.Vote = r.id
- r.state = StateCandidate
- r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomePreCandidate() {
- // TODO(xiangli) remove the panic when the raft implementation is stable
- if r.state == StateLeader {
- panic("invalid transition [leader -> pre-candidate]")
- }
- // Becoming a pre-candidate changes our step functions and state,
- // but doesn't change anything else. In particular it does not increase
- // r.Term or change r.Vote.
- r.step = stepCandidate
- r.prs.ResetVotes()
- r.tick = r.tickElection
- r.lead = None
- r.state = StatePreCandidate
- r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeLeader() {
- // TODO(xiangli) remove the panic when the raft implementation is stable
- if r.state == StateFollower {
- panic("invalid transition [follower -> leader]")
- }
- r.step = stepLeader
- r.reset(r.Term)
- r.tick = r.tickHeartbeat
- r.lead = r.id
- r.state = StateLeader
- // Followers enter replicate mode when they've been successfully probed
- // (perhaps after having received a snapshot as a result). The leader is
- // trivially in this state. Note that r.reset() has initialized this
- // progress with the last index already.
- r.prs.Progress[r.id].BecomeReplicate()
-
- // Conservatively set the pendingConfIndex to the last index in the
- // log. There may or may not be a pending config change, but it's
- // safe to delay any future proposals until we commit all our
- // pending log entries, and scanning the entire tail of the log
- // could be expensive.
- r.pendingConfIndex = r.raftLog.lastIndex()
-
- emptyEnt := pb.Entry{Data: nil}
- if !r.appendEntry(emptyEnt) {
- // This won't happen because we just called reset() above.
- r.logger.Panic("empty entry was dropped")
- }
- // As a special case, don't count the initial empty entry towards the
- // uncommitted log quota. This is because we want to preserve the
- // behavior of allowing one entry larger than quota if the current
- // usage is zero.
- r.reduceUncommittedSize([]pb.Entry{emptyEnt})
- r.logger.Infof("%x became leader at term %d", r.id, r.Term)
-}
-
-func (r *raft) hup(t CampaignType) {
- if r.state == StateLeader {
- r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
- return
- }
-
- if !r.promotable() {
- r.logger.Warningf("%x is unpromotable and can not campaign", r.id)
- return
- }
- ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
- if err != nil {
- r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
- }
- if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
- r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
- return
- }
-
- r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
- r.campaign(t)
-}
-
-// campaign transitions the raft instance to candidate state. This must only be
-// called after verifying that this is a legitimate transition.
-func (r *raft) campaign(t CampaignType) {
- if !r.promotable() {
- // This path should not be hit (callers are supposed to check), but
- // better safe than sorry.
- r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id)
- }
- var term uint64
- var voteMsg pb.MessageType
- if t == campaignPreElection {
- r.becomePreCandidate()
- voteMsg = pb.MsgPreVote
- // PreVote RPCs are sent for the next term before we've incremented r.Term.
- term = r.Term + 1
- } else {
- r.becomeCandidate()
- voteMsg = pb.MsgVote
- term = r.Term
- }
- if _, _, res := r.poll(r.id, voteRespMsgType(voteMsg), true); res == quorum.VoteWon {
- // We won the election after voting for ourselves (which must mean that
- // this is a single-node cluster). Advance to the next state.
- if t == campaignPreElection {
- r.campaign(campaignElection)
- } else {
- r.becomeLeader()
- }
- return
- }
- var ids []uint64
- {
- idMap := r.prs.Voters.IDs()
- ids = make([]uint64, 0, len(idMap))
- for id := range idMap {
- ids = append(ids, id)
- }
- sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] })
- }
- for _, id := range ids {
- if id == r.id {
- continue
- }
- r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
-
- var ctx []byte
- if t == campaignTransfer {
- ctx = []byte(t)
- }
- r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
- }
-}
-
-func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) {
- if v {
- r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
- } else {
- r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
- }
- r.prs.RecordVote(id, v)
- return r.prs.TallyVotes()
-}
-
-func (r *raft) Step(m pb.Message) error {
- // Handle the message term, which may result in our stepping down to a follower.
- switch {
- case m.Term == 0:
- // local message
- case m.Term > r.Term:
- if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
- force := bytes.Equal(m.Context, []byte(campaignTransfer))
- inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
- if !force && inLease {
- // If a server receives a RequestVote request within the minimum election timeout
- // of hearing from a current leader, it does not update its term or grant its vote
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
- return nil
- }
- }
- switch {
- case m.Type == pb.MsgPreVote:
- // Never change our term in response to a PreVote
- case m.Type == pb.MsgPreVoteResp && !m.Reject:
- // We send pre-vote requests with a term in our future. If the
- // pre-vote is granted, we will increment our term when we get a
- // quorum. If it is not, the term comes from the node that
- // rejected our vote so we should become a follower at the new
- // term.
- default:
- r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
- r.id, r.Term, m.Type, m.From, m.Term)
- if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
- r.becomeFollower(m.Term, m.From)
- } else {
- r.becomeFollower(m.Term, None)
- }
- }
-
- case m.Term < r.Term:
- if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
- // We have received messages from a leader at a lower term. It is possible
- // that these messages were simply delayed in the network, but this could
- // also mean that this node has advanced its term number during a network
- // partition, and it is now unable to either win an election or to rejoin
- // the majority on the old term. If checkQuorum is false, this will be
- // handled by incrementing term numbers in response to MsgVote with a
- // higher term, but if checkQuorum is true we may not advance the term on
- // MsgVote and must generate other messages to advance the term. The net
- // result of these two features is to minimize the disruption caused by
- // nodes that have been removed from the cluster's configuration: a
- // removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
- // but it will not receive MsgApp or MsgHeartbeat, so it will not create
- // disruptive term increases, by notifying leader of this node's activeness.
- // The above comments also true for Pre-Vote
- //
- // When follower gets isolated, it soon starts an election ending
- // up with a higher term than leader, although it won't receive enough
- // votes to win the election. When it regains connectivity, this response
- // with "pb.MsgAppResp" of higher term would force leader to step down.
- // However, this disruption is inevitable to free this stuck node with
- // fresh election. This can be prevented with Pre-Vote phase.
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
- } else if m.Type == pb.MsgPreVote {
- // Before Pre-Vote enable, there may have candidate with higher term,
- // but less log. After update to Pre-Vote, the cluster may deadlock if
- // we drop messages with a lower term.
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
- r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true})
- } else {
- // ignore other cases
- r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
- r.id, r.Term, m.Type, m.From, m.Term)
- }
- return nil
- }
-
- switch m.Type {
- case pb.MsgHup:
- if r.preVote {
- r.hup(campaignPreElection)
- } else {
- r.hup(campaignElection)
- }
-
- case pb.MsgVote, pb.MsgPreVote:
- // We can vote if this is a repeat of a vote we've already cast...
- canVote := r.Vote == m.From ||
- // ...we haven't voted and we don't think there's a leader yet in this term...
- (r.Vote == None && r.lead == None) ||
- // ...or this is a PreVote for a future term...
- (m.Type == pb.MsgPreVote && m.Term > r.Term)
- // ...and we believe the candidate is up to date.
- if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
- // Note: it turns out that that learners must be allowed to cast votes.
- // This seems counter- intuitive but is necessary in the situation in which
- // a learner has been promoted (i.e. is now a voter) but has not learned
- // about this yet.
- // For example, consider a group in which id=1 is a learner and id=2 and
- // id=3 are voters. A configuration change promoting 1 can be committed on
- // the quorum `{2,3}` without the config change being appended to the
- // learner's log. If the leader (say 2) fails, there are de facto two
- // voters remaining. Only 3 can win an election (due to its log containing
- // all committed entries), but to do so it will need 1 to vote. But 1
- // considers itself a learner and will continue to do so until 3 has
- // stepped up as leader, replicates the conf change to 1, and 1 applies it.
- // Ultimately, by receiving a request to vote, the learner realizes that
- // the candidate believes it to be a voter, and that it should act
- // accordingly. The candidate's config may be stale, too; but in that case
- // it won't win the election, at least in the absence of the bug discussed
- // in:
- // https://github.com/etcd-io/etcd/issues/7625#issuecomment-488798263.
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
- // When responding to Msg{Pre,}Vote messages we include the term
- // from the message, not the local term. To see why, consider the
- // case where a single node was previously partitioned away and
- // it's local term is now out of date. If we include the local term
- // (recall that for pre-votes we don't update the local term), the
- // (pre-)campaigning node on the other end will proceed to ignore
- // the message (it ignores all out of date messages).
- // The term in the original message and current local term are the
- // same in the case of regular votes, but different for pre-votes.
- r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
- if m.Type == pb.MsgVote {
- // Only record real votes.
- r.electionElapsed = 0
- r.Vote = m.From
- }
- } else {
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
- r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
- }
-
- default:
- err := r.step(r, m)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-type stepFunc func(r *raft, m pb.Message) error
-
-func stepLeader(r *raft, m pb.Message) error {
- // These message types do not require any progress for m.From.
- switch m.Type {
- case pb.MsgBeat:
- r.bcastHeartbeat()
- return nil
- case pb.MsgCheckQuorum:
- // The leader should always see itself as active. As a precaution, handle
- // the case in which the leader isn't in the configuration any more (for
- // example if it just removed itself).
- //
- // TODO(tbg): I added a TODO in removeNode, it doesn't seem that the
- // leader steps down when removing itself. I might be missing something.
- if pr := r.prs.Progress[r.id]; pr != nil {
- pr.RecentActive = true
- }
- if !r.prs.QuorumActive() {
- r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
- r.becomeFollower(r.Term, None)
- }
- // Mark everyone (but ourselves) as inactive in preparation for the next
- // CheckQuorum.
- r.prs.Visit(func(id uint64, pr *tracker.Progress) {
- if id != r.id {
- pr.RecentActive = false
- }
- })
- return nil
- case pb.MsgProp:
- if len(m.Entries) == 0 {
- r.logger.Panicf("%x stepped empty MsgProp", r.id)
- }
- if r.prs.Progress[r.id] == nil {
- // If we are not currently a member of the range (i.e. this node
- // was removed from the configuration while serving as leader),
- // drop any new proposals.
- return ErrProposalDropped
- }
- if r.leadTransferee != None {
- r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
- return ErrProposalDropped
- }
-
- for i := range m.Entries {
- e := &m.Entries[i]
- var cc pb.ConfChangeI
- if e.Type == pb.EntryConfChange {
- var ccc pb.ConfChange
- if err := ccc.Unmarshal(e.Data); err != nil {
- panic(err)
- }
- cc = ccc
- } else if e.Type == pb.EntryConfChangeV2 {
- var ccc pb.ConfChangeV2
- if err := ccc.Unmarshal(e.Data); err != nil {
- panic(err)
- }
- cc = ccc
- }
- if cc != nil {
- alreadyPending := r.pendingConfIndex > r.raftLog.applied
- alreadyJoint := len(r.prs.Config.Voters[1]) > 0
- wantsLeaveJoint := len(cc.AsV2().Changes) == 0
-
- var refused string
- if alreadyPending {
- refused = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied)
- } else if alreadyJoint && !wantsLeaveJoint {
- refused = "must transition out of joint config first"
- } else if !alreadyJoint && wantsLeaveJoint {
- refused = "not in joint state; refusing empty conf change"
- }
-
- if refused != "" {
- r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.prs.Config, refused)
- m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
- } else {
- r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1
- }
- }
- }
-
- if !r.appendEntry(m.Entries...) {
- return ErrProposalDropped
- }
- r.bcastAppend()
- return nil
- case pb.MsgReadIndex:
- // only one voting member (the leader) in the cluster
- if r.prs.IsSingleton() {
- if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None {
- r.send(resp)
- }
- return nil
- }
-
- // Postpone read only request when this leader has not committed
- // any log entry at its term.
- if !r.committedEntryInCurrentTerm() {
- r.pendingReadIndexMessages = append(r.pendingReadIndexMessages, m)
- return nil
- }
-
- sendMsgReadIndexResponse(r, m)
-
- return nil
- }
-
- // All other message types require a progress for m.From (pr).
- pr := r.prs.Progress[m.From]
- if pr == nil {
- r.logger.Debugf("%x no progress available for %x", r.id, m.From)
- return nil
- }
- switch m.Type {
- case pb.MsgAppResp:
- pr.RecentActive = true
-
- if m.Reject {
- // RejectHint is the suggested next base entry for appending (i.e.
- // we try to append entry RejectHint+1 next), and LogTerm is the
- // term that the follower has at index RejectHint. Older versions
- // of this library did not populate LogTerm for rejections and it
- // is zero for followers with an empty log.
- //
- // Under normal circumstances, the leader's log is longer than the
- // follower's and the follower's log is a prefix of the leader's
- // (i.e. there is no divergent uncommitted suffix of the log on the
- // follower). In that case, the first probe reveals where the
- // follower's log ends (RejectHint=follower's last index) and the
- // subsequent probe succeeds.
- //
- // However, when networks are partitioned or systems overloaded,
- // large divergent log tails can occur. The naive attempt, probing
- // entry by entry in decreasing order, will be the product of the
- // length of the diverging tails and the network round-trip latency,
- // which can easily result in hours of time spent probing and can
- // even cause outright outages. The probes are thus optimized as
- // described below.
- r.logger.Debugf("%x received MsgAppResp(rejected, hint: (index %d, term %d)) from %x for index %d",
- r.id, m.RejectHint, m.LogTerm, m.From, m.Index)
- nextProbeIdx := m.RejectHint
- if m.LogTerm > 0 {
- // If the follower has an uncommitted log tail, we would end up
- // probing one by one until we hit the common prefix.
- //
- // For example, if the leader has:
- //
- // idx 1 2 3 4 5 6 7 8 9
- // -----------------
- // term (L) 1 3 3 3 5 5 5 5 5
- // term (F) 1 1 1 1 2 2
- //
- // Then, after sending an append anchored at (idx=9,term=5) we
- // would receive a RejectHint of 6 and LogTerm of 2. Without the
- // code below, we would try an append at index 6, which would
- // fail again.
- //
- // However, looking only at what the leader knows about its own
- // log and the rejection hint, it is clear that a probe at index
- // 6, 5, 4, 3, and 2 must fail as well:
- //
- // For all of these indexes, the leader's log term is larger than
- // the rejection's log term. If a probe at one of these indexes
- // succeeded, its log term at that index would match the leader's,
- // i.e. 3 or 5 in this example. But the follower already told the
- // leader that it is still at term 2 at index 9, and since the
- // log term only ever goes up (within a log), this is a contradiction.
- //
- // At index 1, however, the leader can draw no such conclusion,
- // as its term 1 is not larger than the term 2 from the
- // follower's rejection. We thus probe at 1, which will succeed
- // in this example. In general, with this approach we probe at
- // most once per term found in the leader's log.
- //
- // There is a similar mechanism on the follower (implemented in
- // handleAppendEntries via a call to findConflictByTerm) that is
- // useful if the follower has a large divergent uncommitted log
- // tail[1], as in this example:
- //
- // idx 1 2 3 4 5 6 7 8 9
- // -----------------
- // term (L) 1 3 3 3 3 3 3 3 7
- // term (F) 1 3 3 4 4 5 5 5 6
- //
- // Naively, the leader would probe at idx=9, receive a rejection
- // revealing the log term of 6 at the follower. Since the leader's
- // term at the previous index is already smaller than 6, the leader-
- // side optimization discussed above is ineffective. The leader thus
- // probes at index 8 and, naively, receives a rejection for the same
- // index and log term 5. Again, the leader optimization does not improve
- // over linear probing as term 5 is above the leader's term 3 for that
- // and many preceding indexes; the leader would have to probe linearly
- // until it would finally hit index 3, where the probe would succeed.
- //
- // Instead, we apply a similar optimization on the follower. When the
- // follower receives the probe at index 8 (log term 3), it concludes
- // that all of the leader's log preceding that index has log terms of
- // 3 or below. The largest index in the follower's log with a log term
- // of 3 or below is index 3. The follower will thus return a rejection
- // for index=3, log term=3 instead. The leader's next probe will then
- // succeed at that index.
- //
- // [1]: more precisely, if the log terms in the large uncommitted
- // tail on the follower are larger than the leader's. At first,
- // it may seem unintuitive that a follower could even have such
- // a large tail, but it can happen:
- //
- // 1. Leader appends (but does not commit) entries 2 and 3, crashes.
- // idx 1 2 3 4 5 6 7 8 9
- // -----------------
- // term (L) 1 2 2 [crashes]
- // term (F) 1
- // term (F) 1
- //
- // 2. a follower becomes leader and appends entries at term 3.
- // -----------------
- // term (x) 1 2 2 [down]
- // term (F) 1 3 3 3 3
- // term (F) 1
- //
- // 3. term 3 leader goes down, term 2 leader returns as term 4
- // leader. It commits the log & entries at term 4.
- //
- // -----------------
- // term (L) 1 2 2 2
- // term (x) 1 3 3 3 3 [down]
- // term (F) 1
- // -----------------
- // term (L) 1 2 2 2 4 4 4
- // term (F) 1 3 3 3 3 [gets probed]
- // term (F) 1 2 2 2 4 4 4
- //
- // 4. the leader will now probe the returning follower at index
- // 7, the rejection points it at the end of the follower's log
- // which is at a higher log term than the actually committed
- // log.
- nextProbeIdx = r.raftLog.findConflictByTerm(m.RejectHint, m.LogTerm)
- }
- if pr.MaybeDecrTo(m.Index, nextProbeIdx) {
- r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
- if pr.State == tracker.StateReplicate {
- pr.BecomeProbe()
- }
- r.sendAppend(m.From)
- }
- } else {
- oldPaused := pr.IsPaused()
- if pr.MaybeUpdate(m.Index) {
- switch {
- case pr.State == tracker.StateProbe:
- pr.BecomeReplicate()
- case pr.State == tracker.StateSnapshot && pr.Match >= pr.PendingSnapshot:
- // TODO(tbg): we should also enter this branch if a snapshot is
- // received that is below pr.PendingSnapshot but which makes it
- // possible to use the log again.
- r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
- // Transition back to replicating state via probing state
- // (which takes the snapshot into account). If we didn't
- // move to replicating state, that would only happen with
- // the next round of appends (but there may not be a next
- // round for a while, exposing an inconsistent RaftStatus).
- pr.BecomeProbe()
- pr.BecomeReplicate()
- case pr.State == tracker.StateReplicate:
- pr.Inflights.FreeLE(m.Index)
- }
-
- if r.maybeCommit() {
- // committed index has progressed for the term, so it is safe
- // to respond to pending read index requests
- releasePendingReadIndexMessages(r)
- r.bcastAppend()
- } else if oldPaused {
- // If we were paused before, this node may be missing the
- // latest commit index, so send it.
- r.sendAppend(m.From)
- }
- // We've updated flow control information above, which may
- // allow us to send multiple (size-limited) in-flight messages
- // at once (such as when transitioning from probe to
- // replicate, or when freeTo() covers multiple messages). If
- // we have more entries to send, send as many messages as we
- // can (without sending empty messages for the commit index)
- for r.maybeSendAppend(m.From, false) {
- }
- // Transfer leadership is in progress.
- if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
- r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
- r.sendTimeoutNow(m.From)
- }
- }
- }
- case pb.MsgHeartbeatResp:
- pr.RecentActive = true
- pr.ProbeSent = false
-
- // free one slot for the full inflights window to allow progress.
- if pr.State == tracker.StateReplicate && pr.Inflights.Full() {
- pr.Inflights.FreeFirstOne()
- }
- if pr.Match < r.raftLog.lastIndex() {
- r.sendAppend(m.From)
- }
-
- if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
- return nil
- }
-
- if r.prs.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon {
- return nil
- }
-
- rss := r.readOnly.advance(m)
- for _, rs := range rss {
- if resp := r.responseToReadIndexReq(rs.req, rs.index); resp.To != None {
- r.send(resp)
- }
- }
- case pb.MsgSnapStatus:
- if pr.State != tracker.StateSnapshot {
- return nil
- }
- // TODO(tbg): this code is very similar to the snapshot handling in
- // MsgAppResp above. In fact, the code there is more correct than the
- // code here and should likely be updated to match (or even better, the
- // logic pulled into a newly created Progress state machine handler).
- if !m.Reject {
- pr.BecomeProbe()
- r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
- } else {
- // NB: the order here matters or we'll be probing erroneously from
- // the snapshot index, but the snapshot never applied.
- pr.PendingSnapshot = 0
- pr.BecomeProbe()
- r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
- }
- // If snapshot finish, wait for the MsgAppResp from the remote node before sending
- // out the next MsgApp.
- // If snapshot failure, wait for a heartbeat interval before next try
- pr.ProbeSent = true
- case pb.MsgUnreachable:
- // During optimistic replication, if the remote becomes unreachable,
- // there is huge probability that a MsgApp is lost.
- if pr.State == tracker.StateReplicate {
- pr.BecomeProbe()
- }
- r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
- case pb.MsgTransferLeader:
- if pr.IsLearner {
- r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
- return nil
- }
- leadTransferee := m.From
- lastLeadTransferee := r.leadTransferee
- if lastLeadTransferee != None {
- if lastLeadTransferee == leadTransferee {
- r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
- r.id, r.Term, leadTransferee, leadTransferee)
- return nil
- }
- r.abortLeaderTransfer()
- r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
- }
- if leadTransferee == r.id {
- r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
- return nil
- }
- // Transfer leadership to third party.
- r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
- // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
- r.electionElapsed = 0
- r.leadTransferee = leadTransferee
- if pr.Match == r.raftLog.lastIndex() {
- r.sendTimeoutNow(leadTransferee)
- r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
- } else {
- r.sendAppend(leadTransferee)
- }
- }
- return nil
-}
-
-// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
-// whether they respond to MsgVoteResp or MsgPreVoteResp.
-func stepCandidate(r *raft, m pb.Message) error {
- // Only handle vote responses corresponding to our candidacy (while in
- // StateCandidate, we may get stale MsgPreVoteResp messages in this term from
- // our pre-candidate state).
- var myVoteRespType pb.MessageType
- if r.state == StatePreCandidate {
- myVoteRespType = pb.MsgPreVoteResp
- } else {
- myVoteRespType = pb.MsgVoteResp
- }
- switch m.Type {
- case pb.MsgProp:
- r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
- return ErrProposalDropped
- case pb.MsgApp:
- r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
- r.handleAppendEntries(m)
- case pb.MsgHeartbeat:
- r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
- r.handleHeartbeat(m)
- case pb.MsgSnap:
- r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
- r.handleSnapshot(m)
- case myVoteRespType:
- gr, rj, res := r.poll(m.From, m.Type, !m.Reject)
- r.logger.Infof("%x has received %d %s votes and %d vote rejections", r.id, gr, m.Type, rj)
- switch res {
- case quorum.VoteWon:
- if r.state == StatePreCandidate {
- r.campaign(campaignElection)
- } else {
- r.becomeLeader()
- r.bcastAppend()
- }
- case quorum.VoteLost:
- // pb.MsgPreVoteResp contains future term of pre-candidate
- // m.Term > r.Term; reuse r.Term
- r.becomeFollower(r.Term, None)
- }
- case pb.MsgTimeoutNow:
- r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
- }
- return nil
-}
-
-func stepFollower(r *raft, m pb.Message) error {
- switch m.Type {
- case pb.MsgProp:
- if r.lead == None {
- r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
- return ErrProposalDropped
- } else if r.disableProposalForwarding {
- r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
- return ErrProposalDropped
- }
- m.To = r.lead
- r.send(m)
- case pb.MsgApp:
- r.electionElapsed = 0
- r.lead = m.From
- r.handleAppendEntries(m)
- case pb.MsgHeartbeat:
- r.electionElapsed = 0
- r.lead = m.From
- r.handleHeartbeat(m)
- case pb.MsgSnap:
- r.electionElapsed = 0
- r.lead = m.From
- r.handleSnapshot(m)
- case pb.MsgTransferLeader:
- if r.lead == None {
- r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
- return nil
- }
- m.To = r.lead
- r.send(m)
- case pb.MsgTimeoutNow:
- r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
- // Leadership transfers never use pre-vote even if r.preVote is true; we
- // know we are not recovering from a partition so there is no need for the
- // extra round trip.
- r.hup(campaignTransfer)
- case pb.MsgReadIndex:
- if r.lead == None {
- r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
- return nil
- }
- m.To = r.lead
- r.send(m)
- case pb.MsgReadIndexResp:
- if len(m.Entries) != 1 {
- r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
- return nil
- }
- r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
- }
- return nil
-}
-
-func (r *raft) handleAppendEntries(m pb.Message) {
- if m.Index < r.raftLog.committed {
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
- return
- }
-
- if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
- } else {
- r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x",
- r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
-
- // Return a hint to the leader about the maximum index and term that the
- // two logs could be divergent at. Do this by searching through the
- // follower's log for the maximum (index, term) pair with a term <= the
- // MsgApp's LogTerm and an index <= the MsgApp's Index. This can help
- // skip all indexes in the follower's uncommitted tail with terms
- // greater than the MsgApp's LogTerm.
- //
- // See the other caller for findConflictByTerm (in stepLeader) for a much
- // more detailed explanation of this mechanism.
- hintIndex := min(m.Index, r.raftLog.lastIndex())
- hintIndex = r.raftLog.findConflictByTerm(hintIndex, m.LogTerm)
- hintTerm, err := r.raftLog.term(hintIndex)
- if err != nil {
- panic(fmt.Sprintf("term(%d) must be valid, but got %v", hintIndex, err))
- }
- r.send(pb.Message{
- To: m.From,
- Type: pb.MsgAppResp,
- Index: m.Index,
- Reject: true,
- RejectHint: hintIndex,
- LogTerm: hintTerm,
- })
- }
-}
-
-func (r *raft) handleHeartbeat(m pb.Message) {
- r.raftLog.commitTo(m.Commit)
- r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
-}
-
-func (r *raft) handleSnapshot(m pb.Message) {
- sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
- if r.restore(m.Snapshot) {
- r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, sindex, sterm)
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
- } else {
- r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, sindex, sterm)
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
- }
-}
-
-// restore recovers the state machine from a snapshot. It restores the log and the
-// configuration of state machine. If this method returns false, the snapshot was
-// ignored, either because it was obsolete or because of an error.
-func (r *raft) restore(s pb.Snapshot) bool {
- if s.Metadata.Index <= r.raftLog.committed {
- return false
- }
- if r.state != StateFollower {
- // This is defense-in-depth: if the leader somehow ended up applying a
- // snapshot, it could move into a new term without moving into a
- // follower state. This should never fire, but if it did, we'd have
- // prevented damage by returning early, so log only a loud warning.
- //
- // At the time of writing, the instance is guaranteed to be in follower
- // state when this method is called.
- r.logger.Warningf("%x attempted to restore snapshot as leader; should never happen", r.id)
- r.becomeFollower(r.Term+1, None)
- return false
- }
-
- // More defense-in-depth: throw away snapshot if recipient is not in the
- // config. This shouldn't ever happen (at the time of writing) but lots of
- // code here and there assumes that r.id is in the progress tracker.
- found := false
- cs := s.Metadata.ConfState
-
- for _, set := range [][]uint64{
- cs.Voters,
- cs.Learners,
- cs.VotersOutgoing,
- // `LearnersNext` doesn't need to be checked. According to the rules, if a peer in
- // `LearnersNext`, it has to be in `VotersOutgoing`.
- } {
- for _, id := range set {
- if id == r.id {
- found = true
- break
- }
- }
- if found {
- break
- }
- }
- if !found {
- r.logger.Warningf(
- "%x attempted to restore snapshot but it is not in the ConfState %v; should never happen",
- r.id, cs,
- )
- return false
- }
-
- // Now go ahead and actually restore.
-
- if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
- r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
- r.raftLog.commitTo(s.Metadata.Index)
- return false
- }
-
- r.raftLog.restore(s)
-
- // Reset the configuration and add the (potentially updated) peers in anew.
- r.prs = tracker.MakeProgressTracker(r.prs.MaxInflight)
- cfg, prs, err := confchange.Restore(confchange.Changer{
- Tracker: r.prs,
- LastIndex: r.raftLog.lastIndex(),
- }, cs)
-
- if err != nil {
- // This should never happen. Either there's a bug in our config change
- // handling or the client corrupted the conf change.
- panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err))
- }
-
- assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs))
-
- pr := r.prs.Progress[r.id]
- pr.MaybeUpdate(pr.Next - 1) // TODO(tbg): this is untested and likely unneeded
-
- r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
- return true
-}
-
-// promotable indicates whether state machine can be promoted to leader,
-// which is true when its own id is in progress list.
-func (r *raft) promotable() bool {
- pr := r.prs.Progress[r.id]
- return pr != nil && !pr.IsLearner && !r.raftLog.hasPendingSnapshot()
-}
-
-func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState {
- cfg, prs, err := func() (tracker.Config, tracker.ProgressMap, error) {
- changer := confchange.Changer{
- Tracker: r.prs,
- LastIndex: r.raftLog.lastIndex(),
- }
- if cc.LeaveJoint() {
- return changer.LeaveJoint()
- } else if autoLeave, ok := cc.EnterJoint(); ok {
- return changer.EnterJoint(autoLeave, cc.Changes...)
- }
- return changer.Simple(cc.Changes...)
- }()
-
- if err != nil {
- // TODO(tbg): return the error to the caller.
- panic(err)
- }
-
- return r.switchToConfig(cfg, prs)
-}
-
-// switchToConfig reconfigures this node to use the provided configuration. It
-// updates the in-memory state and, when necessary, carries out additional
-// actions such as reacting to the removal of nodes or changed quorum
-// requirements.
-//
-// The inputs usually result from restoring a ConfState or applying a ConfChange.
-func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.ConfState {
- r.prs.Config = cfg
- r.prs.Progress = prs
-
- r.logger.Infof("%x switched to configuration %s", r.id, r.prs.Config)
- cs := r.prs.ConfState()
- pr, ok := r.prs.Progress[r.id]
-
- // Update whether the node itself is a learner, resetting to false when the
- // node is removed.
- r.isLearner = ok && pr.IsLearner
-
- if (!ok || r.isLearner) && r.state == StateLeader {
- // This node is leader and was removed or demoted. We prevent demotions
- // at the time writing but hypothetically we handle them the same way as
- // removing the leader: stepping down into the next Term.
- //
- // TODO(tbg): step down (for sanity) and ask follower with largest Match
- // to TimeoutNow (to avoid interruption). This might still drop some
- // proposals but it's better than nothing.
- //
- // TODO(tbg): test this branch. It is untested at the time of writing.
- return cs
- }
-
- // The remaining steps only make sense if this node is the leader and there
- // are other nodes.
- if r.state != StateLeader || len(cs.Voters) == 0 {
- return cs
- }
-
- if r.maybeCommit() {
- // If the configuration change means that more entries are committed now,
- // broadcast/append to everyone in the updated config.
- r.bcastAppend()
- } else {
- // Otherwise, still probe the newly added replicas; there's no reason to
- // let them wait out a heartbeat interval (or the next incoming
- // proposal).
- r.prs.Visit(func(id uint64, pr *tracker.Progress) {
- r.maybeSendAppend(id, false /* sendIfEmpty */)
- })
- }
- // If the the leadTransferee was removed or demoted, abort the leadership transfer.
- if _, tOK := r.prs.Config.Voters.IDs()[r.leadTransferee]; !tOK && r.leadTransferee != 0 {
- r.abortLeaderTransfer()
- }
-
- return cs
-}
-
-func (r *raft) loadState(state pb.HardState) {
- if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
- r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
- }
- r.raftLog.committed = state.Commit
- r.Term = state.Term
- r.Vote = state.Vote
-}
-
-// pastElectionTimeout returns true iff r.electionElapsed is greater
-// than or equal to the randomized election timeout in
-// [electiontimeout, 2 * electiontimeout - 1].
-func (r *raft) pastElectionTimeout() bool {
- return r.electionElapsed >= r.randomizedElectionTimeout
-}
-
-func (r *raft) resetRandomizedElectionTimeout() {
- r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
-}
-
-func (r *raft) sendTimeoutNow(to uint64) {
- r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
-}
-
-func (r *raft) abortLeaderTransfer() {
- r.leadTransferee = None
-}
-
-// committedEntryInCurrentTerm return true if the peer has committed an entry in its term.
-func (r *raft) committedEntryInCurrentTerm() bool {
- return r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) == r.Term
-}
-
-// responseToReadIndexReq constructs a response for `req`. If `req` comes from the peer
-// itself, a blank value will be returned.
-func (r *raft) responseToReadIndexReq(req pb.Message, readIndex uint64) pb.Message {
- if req.From == None || req.From == r.id {
- r.readStates = append(r.readStates, ReadState{
- Index: readIndex,
- RequestCtx: req.Entries[0].Data,
- })
- return pb.Message{}
- }
- return pb.Message{
- Type: pb.MsgReadIndexResp,
- To: req.From,
- Index: readIndex,
- Entries: req.Entries,
- }
-}
-
-// increaseUncommittedSize computes the size of the proposed entries and
-// determines whether they would push leader over its maxUncommittedSize limit.
-// If the new entries would exceed the limit, the method returns false. If not,
-// the increase in uncommitted entry size is recorded and the method returns
-// true.
-//
-// Empty payloads are never refused. This is used both for appending an empty
-// entry at a new leader's term, as well as leaving a joint configuration.
-func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool {
- var s uint64
- for _, e := range ents {
- s += uint64(PayloadSize(e))
- }
-
- if r.uncommittedSize > 0 && s > 0 && r.uncommittedSize+s > r.maxUncommittedSize {
- // If the uncommitted tail of the Raft log is empty, allow any size
- // proposal. Otherwise, limit the size of the uncommitted tail of the
- // log and drop any proposal that would push the size over the limit.
- // Note the added requirement s>0 which is used to make sure that
- // appending single empty entries to the log always succeeds, used both
- // for replicating a new leader's initial empty entry, and for
- // auto-leaving joint configurations.
- return false
- }
- r.uncommittedSize += s
- return true
-}
-
-// reduceUncommittedSize accounts for the newly committed entries by decreasing
-// the uncommitted entry size limit.
-func (r *raft) reduceUncommittedSize(ents []pb.Entry) {
- if r.uncommittedSize == 0 {
- // Fast-path for followers, who do not track or enforce the limit.
- return
- }
-
- var s uint64
- for _, e := range ents {
- s += uint64(PayloadSize(e))
- }
- if s > r.uncommittedSize {
- // uncommittedSize may underestimate the size of the uncommitted Raft
- // log tail but will never overestimate it. Saturate at 0 instead of
- // allowing overflow.
- r.uncommittedSize = 0
- } else {
- r.uncommittedSize -= s
- }
-}
-
-func numOfPendingConf(ents []pb.Entry) int {
- n := 0
- for i := range ents {
- if ents[i].Type == pb.EntryConfChange || ents[i].Type == pb.EntryConfChangeV2 {
- n++
- }
- }
- return n
-}
-
-func releasePendingReadIndexMessages(r *raft) {
- if !r.committedEntryInCurrentTerm() {
- r.logger.Error("pending MsgReadIndex should be released only after first commit in current term")
- return
- }
-
- msgs := r.pendingReadIndexMessages
- r.pendingReadIndexMessages = nil
-
- for _, m := range msgs {
- sendMsgReadIndexResponse(r, m)
- }
-}
-
-func sendMsgReadIndexResponse(r *raft, m pb.Message) {
- // thinking: use an internally defined context instead of the user given context.
- // We can express this in terms of the term and index instead of a user-supplied value.
- // This would allow multiple reads to piggyback on the same message.
- switch r.readOnly.option {
- // If more than the local vote is needed, go through a full broadcast.
- case ReadOnlySafe:
- r.readOnly.addRequest(r.raftLog.committed, m)
- // The local node automatically acks the request.
- r.readOnly.recvAck(r.id, m.Entries[0].Data)
- r.bcastHeartbeatWithCtx(m.Entries[0].Data)
- case ReadOnlyLeaseBased:
- if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None {
- r.send(resp)
- }
- }
-}
diff --git a/raft/raft_flow_control_test.go b/raft/raft_flow_control_test.go
deleted file mode 100644
index 5430568c3e2..00000000000
--- a/raft/raft_flow_control_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-// TestMsgAppFlowControlFull ensures:
-// 1. msgApp can fill the sending window until full
-// 2. when the window is full, no more msgApp can be sent.
-
-func TestMsgAppFlowControlFull(t *testing.T) {
- r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
-
- pr2 := r.prs.Progress[2]
- // force the progress to be in replicate state
- pr2.BecomeReplicate()
- // fill in the inflights window
- for i := 0; i < r.prs.MaxInflight; i++ {
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- ms := r.readMessages()
- if len(ms) != 1 {
- t.Fatalf("#%d: len(ms) = %d, want 1", i, len(ms))
- }
- }
-
- // ensure 1
- if !pr2.Inflights.Full() {
- t.Fatalf("inflights.full = %t, want %t", pr2.Inflights.Full(), true)
- }
-
- // ensure 2
- for i := 0; i < 10; i++ {
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- ms := r.readMessages()
- if len(ms) != 0 {
- t.Fatalf("#%d: len(ms) = %d, want 0", i, len(ms))
- }
- }
-}
-
-// TestMsgAppFlowControlMoveForward ensures msgAppResp can move
-// forward the sending window correctly:
-// 1. valid msgAppResp.index moves the windows to pass all smaller or equal index.
-// 2. out-of-dated msgAppResp has no effect on the sliding window.
-func TestMsgAppFlowControlMoveForward(t *testing.T) {
- r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
-
- pr2 := r.prs.Progress[2]
- // force the progress to be in replicate state
- pr2.BecomeReplicate()
- // fill in the inflights window
- for i := 0; i < r.prs.MaxInflight; i++ {
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- r.readMessages()
- }
-
- // 1 is noop, 2 is the first proposal we just sent.
- // so we start with 2.
- for tt := 2; tt < r.prs.MaxInflight; tt++ {
- // move forward the window
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: uint64(tt)})
- r.readMessages()
-
- // fill in the inflights window again
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- ms := r.readMessages()
- if len(ms) != 1 {
- t.Fatalf("#%d: len(ms) = %d, want 1", tt, len(ms))
- }
-
- // ensure 1
- if !pr2.Inflights.Full() {
- t.Fatalf("inflights.full = %t, want %t", pr2.Inflights.Full(), true)
- }
-
- // ensure 2
- for i := 0; i < tt; i++ {
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: uint64(i)})
- if !pr2.Inflights.Full() {
- t.Fatalf("#%d: inflights.full = %t, want %t", tt, pr2.Inflights.Full(), true)
- }
- }
- }
-}
-
-// TestMsgAppFlowControlRecvHeartbeat ensures a heartbeat response
-// frees one slot if the window is full.
-func TestMsgAppFlowControlRecvHeartbeat(t *testing.T) {
- r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
-
- pr2 := r.prs.Progress[2]
- // force the progress to be in replicate state
- pr2.BecomeReplicate()
- // fill in the inflights window
- for i := 0; i < r.prs.MaxInflight; i++ {
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- r.readMessages()
- }
-
- for tt := 1; tt < 5; tt++ {
- if !pr2.Inflights.Full() {
- t.Fatalf("#%d: inflights.full = %t, want %t", tt, pr2.Inflights.Full(), true)
- }
-
- // recv tt msgHeartbeatResp and expect one free slot
- for i := 0; i < tt; i++ {
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})
- r.readMessages()
- if pr2.Inflights.Full() {
- t.Fatalf("#%d.%d: inflights.full = %t, want %t", tt, i, pr2.Inflights.Full(), false)
- }
- }
-
- // one slot
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- ms := r.readMessages()
- if len(ms) != 1 {
- t.Fatalf("#%d: free slot = 0, want 1", tt)
- }
-
- // and just one slot
- for i := 0; i < 10; i++ {
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- ms1 := r.readMessages()
- if len(ms1) != 0 {
- t.Fatalf("#%d.%d: len(ms) = %d, want 0", tt, i, len(ms1))
- }
- }
-
- // clear all pending messages.
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})
- r.readMessages()
- }
-}
diff --git a/raft/raft_paper_test.go b/raft/raft_paper_test.go
deleted file mode 100644
index 9c71ebaa24d..00000000000
--- a/raft/raft_paper_test.go
+++ /dev/null
@@ -1,937 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This file contains tests which verify that the scenarios described
-in the raft paper (https://raft.github.io/raft.pdf) are
-handled by the raft implementation correctly. Each test focuses on
-several sentences written in the paper. This could help us to prevent
-most implementation bugs.
-
-Each test is composed of three parts: init, test and check.
-Init part uses simple and understandable way to simulate the init state.
-Test part uses Step function to generate the scenario. Check part checks
-outgoing messages and state.
-*/
-package raft
-
-import (
- "fmt"
- "reflect"
- "sort"
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func TestFollowerUpdateTermFromMessage(t *testing.T) {
- testUpdateTermFromMessage(t, StateFollower)
-}
-func TestCandidateUpdateTermFromMessage(t *testing.T) {
- testUpdateTermFromMessage(t, StateCandidate)
-}
-func TestLeaderUpdateTermFromMessage(t *testing.T) {
- testUpdateTermFromMessage(t, StateLeader)
-}
-
-// testUpdateTermFromMessage tests that if one serverâs current term is
-// smaller than the otherâs, then it updates its current term to the larger
-// value. If a candidate or leader discovers that its term is out of date,
-// it immediately reverts to follower state.
-// Reference: section 5.1
-func testUpdateTermFromMessage(t *testing.T, state StateType) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- switch state {
- case StateFollower:
- r.becomeFollower(1, 2)
- case StateCandidate:
- r.becomeCandidate()
- case StateLeader:
- r.becomeCandidate()
- r.becomeLeader()
- }
-
- r.Step(pb.Message{Type: pb.MsgApp, Term: 2})
-
- if r.Term != 2 {
- t.Errorf("term = %d, want %d", r.Term, 2)
- }
- if r.state != StateFollower {
- t.Errorf("state = %v, want %v", r.state, StateFollower)
- }
-}
-
-// TestRejectStaleTermMessage tests that if a server receives a request with
-// a stale term number, it rejects the request.
-// Our implementation ignores the request instead.
-// Reference: section 5.1
-func TestRejectStaleTermMessage(t *testing.T) {
- called := false
- fakeStep := func(r *raft, m pb.Message) error {
- called = true
- return nil
- }
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r.step = fakeStep
- r.loadState(pb.HardState{Term: 2})
-
- r.Step(pb.Message{Type: pb.MsgApp, Term: r.Term - 1})
-
- if called {
- t.Errorf("stepFunc called = %v, want %v", called, false)
- }
-}
-
-// TestStartAsFollower tests that when servers start up, they begin as followers.
-// Reference: section 5.2
-func TestStartAsFollower(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- if r.state != StateFollower {
- t.Errorf("state = %s, want %s", r.state, StateFollower)
- }
-}
-
-// TestLeaderBcastBeat tests that if the leader receives a heartbeat tick,
-// it will send a MsgHeartbeat with m.Index = 0, m.LogTerm=0 and empty entries
-// as heartbeat to all followers.
-// Reference: section 5.2
-func TestLeaderBcastBeat(t *testing.T) {
- // heartbeat interval
- hi := 1
- r := newTestRaft(1, 10, hi, newTestMemoryStorage(withPeers(1, 2, 3)))
- r.becomeCandidate()
- r.becomeLeader()
- for i := 0; i < 10; i++ {
- mustAppendEntry(r, pb.Entry{Index: uint64(i) + 1})
- }
-
- for i := 0; i < hi; i++ {
- r.tick()
- }
-
- msgs := r.readMessages()
- sort.Sort(messageSlice(msgs))
- wmsgs := []pb.Message{
- {From: 1, To: 2, Term: 1, Type: pb.MsgHeartbeat},
- {From: 1, To: 3, Term: 1, Type: pb.MsgHeartbeat},
- }
- if !reflect.DeepEqual(msgs, wmsgs) {
- t.Errorf("msgs = %v, want %v", msgs, wmsgs)
- }
-}
-
-func TestFollowerStartElection(t *testing.T) {
- testNonleaderStartElection(t, StateFollower)
-}
-func TestCandidateStartNewElection(t *testing.T) {
- testNonleaderStartElection(t, StateCandidate)
-}
-
-// testNonleaderStartElection tests that if a follower receives no communication
-// over election timeout, it begins an election to choose a new leader. It
-// increments its current term and transitions to candidate state. It then
-// votes for itself and issues RequestVote RPCs in parallel to each of the
-// other servers in the cluster.
-// Reference: section 5.2
-// Also if a candidate fails to obtain a majority, it will time out and
-// start a new election by incrementing its term and initiating another
-// round of RequestVote RPCs.
-// Reference: section 5.2
-func testNonleaderStartElection(t *testing.T, state StateType) {
- // election timeout
- et := 10
- r := newTestRaft(1, et, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- switch state {
- case StateFollower:
- r.becomeFollower(1, 2)
- case StateCandidate:
- r.becomeCandidate()
- }
-
- for i := 1; i < 2*et; i++ {
- r.tick()
- }
-
- if r.Term != 2 {
- t.Errorf("term = %d, want 2", r.Term)
- }
- if r.state != StateCandidate {
- t.Errorf("state = %s, want %s", r.state, StateCandidate)
- }
- if !r.prs.Votes[r.id] {
- t.Errorf("vote for self = false, want true")
- }
- msgs := r.readMessages()
- sort.Sort(messageSlice(msgs))
- wmsgs := []pb.Message{
- {From: 1, To: 2, Term: 2, Type: pb.MsgVote},
- {From: 1, To: 3, Term: 2, Type: pb.MsgVote},
- }
- if !reflect.DeepEqual(msgs, wmsgs) {
- t.Errorf("msgs = %v, want %v", msgs, wmsgs)
- }
-}
-
-// TestLeaderElectionInOneRoundRPC tests all cases that may happen in
-// leader election during one round of RequestVote RPC:
-// a) it wins the election
-// b) it loses the election
-// c) it is unclear about the result
-// Reference: section 5.2
-func TestLeaderElectionInOneRoundRPC(t *testing.T) {
- tests := []struct {
- size int
- votes map[uint64]bool
- state StateType
- }{
- // win the election when receiving votes from a majority of the servers
- {1, map[uint64]bool{}, StateLeader},
- {3, map[uint64]bool{2: true, 3: true}, StateLeader},
- {3, map[uint64]bool{2: true}, StateLeader},
- {5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},
- {5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},
- {5, map[uint64]bool{2: true, 3: true}, StateLeader},
-
- // return to follower state if it receives vote denial from a majority
- {3, map[uint64]bool{2: false, 3: false}, StateFollower},
- {5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},
- {5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},
-
- // stay in candidate if it does not obtain the majority
- {3, map[uint64]bool{}, StateCandidate},
- {5, map[uint64]bool{2: true}, StateCandidate},
- {5, map[uint64]bool{2: false, 3: false}, StateCandidate},
- {5, map[uint64]bool{}, StateCandidate},
- }
- for i, tt := range tests {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(idsBySize(tt.size)...)))
-
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- for id, vote := range tt.votes {
- r.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})
- }
-
- if r.state != tt.state {
- t.Errorf("#%d: state = %s, want %s", i, r.state, tt.state)
- }
- if g := r.Term; g != 1 {
- t.Errorf("#%d: term = %d, want %d", i, g, 1)
- }
- }
-}
-
-// TestFollowerVote tests that each follower will vote for at most one
-// candidate in a given term, on a first-come-first-served basis.
-// Reference: section 5.2
-func TestFollowerVote(t *testing.T) {
- tests := []struct {
- vote uint64
- nvote uint64
- wreject bool
- }{
- {None, 1, false},
- {None, 2, false},
- {1, 1, false},
- {2, 2, false},
- {1, 2, true},
- {2, 1, true},
- }
- for i, tt := range tests {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r.loadState(pb.HardState{Term: 1, Vote: tt.vote})
-
- r.Step(pb.Message{From: tt.nvote, To: 1, Term: 1, Type: pb.MsgVote})
-
- msgs := r.readMessages()
- wmsgs := []pb.Message{
- {From: 1, To: tt.nvote, Term: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},
- }
- if !reflect.DeepEqual(msgs, wmsgs) {
- t.Errorf("#%d: msgs = %v, want %v", i, msgs, wmsgs)
- }
- }
-}
-
-// TestCandidateFallback tests that while waiting for votes,
-// if a candidate receives an AppendEntries RPC from another server claiming
-// to be leader whose term is at least as large as the candidate's current term,
-// it recognizes the leader as legitimate and returns to follower state.
-// Reference: section 5.2
-func TestCandidateFallback(t *testing.T) {
- tests := []pb.Message{
- {From: 2, To: 1, Term: 1, Type: pb.MsgApp},
- {From: 2, To: 1, Term: 2, Type: pb.MsgApp},
- }
- for i, tt := range tests {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- if r.state != StateCandidate {
- t.Fatalf("unexpected state = %s, want %s", r.state, StateCandidate)
- }
-
- r.Step(tt)
-
- if g := r.state; g != StateFollower {
- t.Errorf("#%d: state = %s, want %s", i, g, StateFollower)
- }
- if g := r.Term; g != tt.Term {
- t.Errorf("#%d: term = %d, want %d", i, g, tt.Term)
- }
- }
-}
-
-func TestFollowerElectionTimeoutRandomized(t *testing.T) {
- SetLogger(discardLogger)
- defer SetLogger(defaultLogger)
- testNonleaderElectionTimeoutRandomized(t, StateFollower)
-}
-func TestCandidateElectionTimeoutRandomized(t *testing.T) {
- SetLogger(discardLogger)
- defer SetLogger(defaultLogger)
- testNonleaderElectionTimeoutRandomized(t, StateCandidate)
-}
-
-// testNonleaderElectionTimeoutRandomized tests that election timeout for
-// follower or candidate is randomized.
-// Reference: section 5.2
-func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {
- et := 10
- r := newTestRaft(1, et, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- timeouts := make(map[int]bool)
- for round := 0; round < 50*et; round++ {
- switch state {
- case StateFollower:
- r.becomeFollower(r.Term+1, 2)
- case StateCandidate:
- r.becomeCandidate()
- }
-
- time := 0
- for len(r.readMessages()) == 0 {
- r.tick()
- time++
- }
- timeouts[time] = true
- }
-
- for d := et + 1; d < 2*et; d++ {
- if !timeouts[d] {
- t.Errorf("timeout in %d ticks should happen", d)
- }
- }
-}
-
-func TestFollowersElectionTimeoutNonconflict(t *testing.T) {
- SetLogger(discardLogger)
- defer SetLogger(defaultLogger)
- testNonleadersElectionTimeoutNonconflict(t, StateFollower)
-}
-func TestCandidatesElectionTimeoutNonconflict(t *testing.T) {
- SetLogger(discardLogger)
- defer SetLogger(defaultLogger)
- testNonleadersElectionTimeoutNonconflict(t, StateCandidate)
-}
-
-// testNonleadersElectionTimeoutNonconflict tests that in most cases only a
-// single server(follower or candidate) will time out, which reduces the
-// likelihood of split vote in the new election.
-// Reference: section 5.2
-func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {
- et := 10
- size := 5
- rs := make([]*raft, size)
- ids := idsBySize(size)
- for k := range rs {
- rs[k] = newTestRaft(ids[k], et, 1, newTestMemoryStorage(withPeers(ids...)))
- }
- conflicts := 0
- for round := 0; round < 1000; round++ {
- for _, r := range rs {
- switch state {
- case StateFollower:
- r.becomeFollower(r.Term+1, None)
- case StateCandidate:
- r.becomeCandidate()
- }
- }
-
- timeoutNum := 0
- for timeoutNum == 0 {
- for _, r := range rs {
- r.tick()
- if len(r.readMessages()) > 0 {
- timeoutNum++
- }
- }
- }
- // several rafts time out at the same tick
- if timeoutNum > 1 {
- conflicts++
- }
- }
-
- if g := float64(conflicts) / 1000; g > 0.3 {
- t.Errorf("probability of conflicts = %v, want <= 0.3", g)
- }
-}
-
-// TestLeaderStartReplication tests that when receiving client proposals,
-// the leader appends the proposal to its log as a new entry, then issues
-// AppendEntries RPCs in parallel to each of the other servers to replicate
-// the entry. Also, when sending an AppendEntries RPC, the leader includes
-// the index and term of the entry in its log that immediately precedes
-// the new entries.
-// Also, it writes the new entry into stable storage.
-// Reference: section 5.3
-func TestLeaderStartReplication(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1, 2, 3))
- r := newTestRaft(1, 10, 1, s)
- r.becomeCandidate()
- r.becomeLeader()
- commitNoopEntry(r, s)
- li := r.raftLog.lastIndex()
-
- ents := []pb.Entry{{Data: []byte("some data")}}
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})
-
- if g := r.raftLog.lastIndex(); g != li+1 {
- t.Errorf("lastIndex = %d, want %d", g, li+1)
- }
- if g := r.raftLog.committed; g != li {
- t.Errorf("committed = %d, want %d", g, li)
- }
- msgs := r.readMessages()
- sort.Sort(messageSlice(msgs))
- wents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte("some data")}}
- wmsgs := []pb.Message{
- {From: 1, To: 2, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},
- {From: 1, To: 3, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},
- }
- if !reflect.DeepEqual(msgs, wmsgs) {
- t.Errorf("msgs = %+v, want %+v", msgs, wmsgs)
- }
- if g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {
- t.Errorf("ents = %+v, want %+v", g, wents)
- }
-}
-
-// TestLeaderCommitEntry tests that when the entry has been safely replicated,
-// the leader gives out the applied entries, which can be applied to its state
-// machine.
-// Also, the leader keeps track of the highest index it knows to be committed,
-// and it includes that index in future AppendEntries RPCs so that the other
-// servers eventually find out.
-// Reference: section 5.3
-func TestLeaderCommitEntry(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1, 2, 3))
- r := newTestRaft(1, 10, 1, s)
- r.becomeCandidate()
- r.becomeLeader()
- commitNoopEntry(r, s)
- li := r.raftLog.lastIndex()
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
-
- for _, m := range r.readMessages() {
- r.Step(acceptAndReply(m))
- }
-
- if g := r.raftLog.committed; g != li+1 {
- t.Errorf("committed = %d, want %d", g, li+1)
- }
- wents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte("some data")}}
- if g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {
- t.Errorf("nextEnts = %+v, want %+v", g, wents)
- }
- msgs := r.readMessages()
- sort.Sort(messageSlice(msgs))
- for i, m := range msgs {
- if w := uint64(i + 2); m.To != w {
- t.Errorf("to = %x, want %x", m.To, w)
- }
- if m.Type != pb.MsgApp {
- t.Errorf("type = %v, want %v", m.Type, pb.MsgApp)
- }
- if m.Commit != li+1 {
- t.Errorf("commit = %d, want %d", m.Commit, li+1)
- }
- }
-}
-
-// TestLeaderAcknowledgeCommit tests that a log entry is committed once the
-// leader that created the entry has replicated it on a majority of the servers.
-// Reference: section 5.3
-func TestLeaderAcknowledgeCommit(t *testing.T) {
- tests := []struct {
- size int
- acceptors map[uint64]bool
- wack bool
- }{
- {1, nil, true},
- {3, nil, false},
- {3, map[uint64]bool{2: true}, true},
- {3, map[uint64]bool{2: true, 3: true}, true},
- {5, nil, false},
- {5, map[uint64]bool{2: true}, false},
- {5, map[uint64]bool{2: true, 3: true}, true},
- {5, map[uint64]bool{2: true, 3: true, 4: true}, true},
- {5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},
- }
- for i, tt := range tests {
- s := newTestMemoryStorage(withPeers(idsBySize(tt.size)...))
- r := newTestRaft(1, 10, 1, s)
- r.becomeCandidate()
- r.becomeLeader()
- commitNoopEntry(r, s)
- li := r.raftLog.lastIndex()
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
-
- for _, m := range r.readMessages() {
- if tt.acceptors[m.To] {
- r.Step(acceptAndReply(m))
- }
- }
-
- if g := r.raftLog.committed > li; g != tt.wack {
- t.Errorf("#%d: ack commit = %v, want %v", i, g, tt.wack)
- }
- }
-}
-
-// TestLeaderCommitPrecedingEntries tests that when leader commits a log entry,
-// it also commits all preceding entries in the leaderâs log, including
-// entries created by previous leaders.
-// Also, it applies the entry to its local state machine (in log order).
-// Reference: section 5.3
-func TestLeaderCommitPrecedingEntries(t *testing.T) {
- tests := [][]pb.Entry{
- {},
- {{Term: 2, Index: 1}},
- {{Term: 1, Index: 1}, {Term: 2, Index: 2}},
- {{Term: 1, Index: 1}},
- }
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1, 2, 3))
- storage.Append(tt)
- r := newTestRaft(1, 10, 1, storage)
- r.loadState(pb.HardState{Term: 2})
- r.becomeCandidate()
- r.becomeLeader()
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
-
- for _, m := range r.readMessages() {
- r.Step(acceptAndReply(m))
- }
-
- li := uint64(len(tt))
- wents := append(tt, pb.Entry{Term: 3, Index: li + 1}, pb.Entry{Term: 3, Index: li + 2, Data: []byte("some data")})
- if g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {
- t.Errorf("#%d: ents = %+v, want %+v", i, g, wents)
- }
- }
-}
-
-// TestFollowerCommitEntry tests that once a follower learns that a log entry
-// is committed, it applies the entry to its local state machine (in log order).
-// Reference: section 5.3
-func TestFollowerCommitEntry(t *testing.T) {
- tests := []struct {
- ents []pb.Entry
- commit uint64
- }{
- {
- []pb.Entry{
- {Term: 1, Index: 1, Data: []byte("some data")},
- },
- 1,
- },
- {
- []pb.Entry{
- {Term: 1, Index: 1, Data: []byte("some data")},
- {Term: 1, Index: 2, Data: []byte("some data2")},
- },
- 2,
- },
- {
- []pb.Entry{
- {Term: 1, Index: 1, Data: []byte("some data2")},
- {Term: 1, Index: 2, Data: []byte("some data")},
- },
- 2,
- },
- {
- []pb.Entry{
- {Term: 1, Index: 1, Data: []byte("some data")},
- {Term: 1, Index: 2, Data: []byte("some data2")},
- },
- 1,
- },
- }
- for i, tt := range tests {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r.becomeFollower(1, 2)
-
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})
-
- if g := r.raftLog.committed; g != tt.commit {
- t.Errorf("#%d: committed = %d, want %d", i, g, tt.commit)
- }
- wents := tt.ents[:int(tt.commit)]
- if g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {
- t.Errorf("#%d: nextEnts = %v, want %v", i, g, wents)
- }
- }
-}
-
-// TestFollowerCheckMsgApp tests that if the follower does not find an
-// entry in its log with the same index and term as the one in AppendEntries RPC,
-// then it refuses the new entries. Otherwise it replies that it accepts the
-// append entries.
-// Reference: section 5.3
-func TestFollowerCheckMsgApp(t *testing.T) {
- ents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}
- tests := []struct {
- term uint64
- index uint64
- windex uint64
- wreject bool
- wrejectHint uint64
- wlogterm uint64
- }{
- // match with committed entries
- {0, 0, 1, false, 0, 0},
- {ents[0].Term, ents[0].Index, 1, false, 0, 0},
- // match with uncommitted entries
- {ents[1].Term, ents[1].Index, 2, false, 0, 0},
-
- // unmatch with existing entry
- {ents[0].Term, ents[1].Index, ents[1].Index, true, 1, 1},
- // unexisting entry
- {ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2, 2},
- }
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1, 2, 3))
- storage.Append(ents)
- r := newTestRaft(1, 10, 1, storage)
- r.loadState(pb.HardState{Commit: 1})
- r.becomeFollower(2, 2)
-
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index})
-
- msgs := r.readMessages()
- wmsgs := []pb.Message{
- {From: 1, To: 2, Type: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint, LogTerm: tt.wlogterm},
- }
- if !reflect.DeepEqual(msgs, wmsgs) {
- t.Errorf("#%d: msgs = %+v, want %+v", i, msgs, wmsgs)
- }
- }
-}
-
-// TestFollowerAppendEntries tests that when AppendEntries RPC is valid,
-// the follower will delete the existing conflict entry and all that follow it,
-// and append any new entries not already in the log.
-// Also, it writes the new entry into stable storage.
-// Reference: section 5.3
-func TestFollowerAppendEntries(t *testing.T) {
- tests := []struct {
- index, term uint64
- ents []pb.Entry
- wents []pb.Entry
- wunstable []pb.Entry
- }{
- {
- 2, 2,
- []pb.Entry{{Term: 3, Index: 3}},
- []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},
- []pb.Entry{{Term: 3, Index: 3}},
- },
- {
- 1, 1,
- []pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},
- []pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},
- []pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},
- },
- {
- 0, 0,
- []pb.Entry{{Term: 1, Index: 1}},
- []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},
- nil,
- },
- {
- 0, 0,
- []pb.Entry{{Term: 3, Index: 1}},
- []pb.Entry{{Term: 3, Index: 1}},
- []pb.Entry{{Term: 3, Index: 1}},
- },
- }
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1, 2, 3))
- storage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})
- r := newTestRaft(1, 10, 1, storage)
- r.becomeFollower(2, 2)
-
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})
-
- if g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {
- t.Errorf("#%d: ents = %+v, want %+v", i, g, tt.wents)
- }
- if g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {
- t.Errorf("#%d: unstableEnts = %+v, want %+v", i, g, tt.wunstable)
- }
- }
-}
-
-// TestLeaderSyncFollowerLog tests that the leader could bring a follower's log
-// into consistency with its own.
-// Reference: section 5.3, figure 7
-func TestLeaderSyncFollowerLog(t *testing.T) {
- ents := []pb.Entry{
- {},
- {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
- {Term: 4, Index: 4}, {Term: 4, Index: 5},
- {Term: 5, Index: 6}, {Term: 5, Index: 7},
- {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},
- }
- term := uint64(8)
- tests := [][]pb.Entry{
- {
- {},
- {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
- {Term: 4, Index: 4}, {Term: 4, Index: 5},
- {Term: 5, Index: 6}, {Term: 5, Index: 7},
- {Term: 6, Index: 8}, {Term: 6, Index: 9},
- },
- {
- {},
- {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
- {Term: 4, Index: 4},
- },
- {
- {},
- {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
- {Term: 4, Index: 4}, {Term: 4, Index: 5},
- {Term: 5, Index: 6}, {Term: 5, Index: 7},
- {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},
- },
- {
- {},
- {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
- {Term: 4, Index: 4}, {Term: 4, Index: 5},
- {Term: 5, Index: 6}, {Term: 5, Index: 7},
- {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},
- {Term: 7, Index: 11}, {Term: 7, Index: 12},
- },
- {
- {},
- {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
- {Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},
- },
- {
- {},
- {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},
- {Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},
- {Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},
- },
- }
- for i, tt := range tests {
- leadStorage := newTestMemoryStorage(withPeers(1, 2, 3))
- leadStorage.Append(ents)
- lead := newTestRaft(1, 10, 1, leadStorage)
- lead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})
- followerStorage := newTestMemoryStorage(withPeers(1, 2, 3))
- followerStorage.Append(tt)
- follower := newTestRaft(2, 10, 1, followerStorage)
- follower.loadState(pb.HardState{Term: term - 1})
- // It is necessary to have a three-node cluster.
- // The second may have more up-to-date log than the first one, so the
- // first node needs the vote from the third node to become the leader.
- n := newNetwork(lead, follower, nopStepper)
- n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- // The election occurs in the term after the one we loaded with
- // lead.loadState above.
- n.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})
-
- n.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
-
- if g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != "" {
- t.Errorf("#%d: log diff:\n%s", i, g)
- }
- }
-}
-
-// TestVoteRequest tests that the vote request includes information about the candidateâs log
-// and are sent to all of the other nodes.
-// Reference: section 5.4.1
-func TestVoteRequest(t *testing.T) {
- tests := []struct {
- ents []pb.Entry
- wterm uint64
- }{
- {[]pb.Entry{{Term: 1, Index: 1}}, 2},
- {[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}, 3},
- }
- for j, tt := range tests {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r.Step(pb.Message{
- From: 2, To: 1, Type: pb.MsgApp, Term: tt.wterm - 1, LogTerm: 0, Index: 0, Entries: tt.ents,
- })
- r.readMessages()
-
- for i := 1; i < r.electionTimeout*2; i++ {
- r.tickElection()
- }
-
- msgs := r.readMessages()
- sort.Sort(messageSlice(msgs))
- if len(msgs) != 2 {
- t.Fatalf("#%d: len(msg) = %d, want %d", j, len(msgs), 2)
- }
- for i, m := range msgs {
- if m.Type != pb.MsgVote {
- t.Errorf("#%d: msgType = %d, want %d", i, m.Type, pb.MsgVote)
- }
- if m.To != uint64(i+2) {
- t.Errorf("#%d: to = %d, want %d", i, m.To, i+2)
- }
- if m.Term != tt.wterm {
- t.Errorf("#%d: term = %d, want %d", i, m.Term, tt.wterm)
- }
- windex, wlogterm := tt.ents[len(tt.ents)-1].Index, tt.ents[len(tt.ents)-1].Term
- if m.Index != windex {
- t.Errorf("#%d: index = %d, want %d", i, m.Index, windex)
- }
- if m.LogTerm != wlogterm {
- t.Errorf("#%d: logterm = %d, want %d", i, m.LogTerm, wlogterm)
- }
- }
- }
-}
-
-// TestVoter tests the voter denies its vote if its own log is more up-to-date
-// than that of the candidate.
-// Reference: section 5.4.1
-func TestVoter(t *testing.T) {
- tests := []struct {
- ents []pb.Entry
- logterm uint64
- index uint64
-
- wreject bool
- }{
- // same logterm
- {[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},
- {[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},
- {[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},
- // candidate higher logterm
- {[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},
- {[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},
- {[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},
- // voter higher logterm
- {[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},
- {[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},
- {[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},
- }
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1, 2))
- storage.Append(tt.ents)
- r := newTestRaft(1, 10, 1, storage)
-
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})
-
- msgs := r.readMessages()
- if len(msgs) != 1 {
- t.Fatalf("#%d: len(msg) = %d, want %d", i, len(msgs), 1)
- }
- m := msgs[0]
- if m.Type != pb.MsgVoteResp {
- t.Errorf("#%d: msgType = %d, want %d", i, m.Type, pb.MsgVoteResp)
- }
- if m.Reject != tt.wreject {
- t.Errorf("#%d: reject = %t, want %t", i, m.Reject, tt.wreject)
- }
- }
-}
-
-// TestLeaderOnlyCommitsLogFromCurrentTerm tests that only log entries from the leaderâs
-// current term are committed by counting replicas.
-// Reference: section 5.4.2
-func TestLeaderOnlyCommitsLogFromCurrentTerm(t *testing.T) {
- ents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}
- tests := []struct {
- index uint64
- wcommit uint64
- }{
- // do not commit log entries in previous terms
- {1, 0},
- {2, 0},
- // commit log in current term
- {3, 3},
- }
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1, 2))
- storage.Append(ents)
- r := newTestRaft(1, 10, 1, storage)
- r.loadState(pb.HardState{Term: 2})
- // become leader at term 3
- r.becomeCandidate()
- r.becomeLeader()
- r.readMessages()
- // propose a entry to current term
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
-
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Term: r.Term, Index: tt.index})
- if r.raftLog.committed != tt.wcommit {
- t.Errorf("#%d: commit = %d, want %d", i, r.raftLog.committed, tt.wcommit)
- }
- }
-}
-
-type messageSlice []pb.Message
-
-func (s messageSlice) Len() int { return len(s) }
-func (s messageSlice) Less(i, j int) bool { return fmt.Sprint(s[i]) < fmt.Sprint(s[j]) }
-func (s messageSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func commitNoopEntry(r *raft, s *MemoryStorage) {
- if r.state != StateLeader {
- panic("it should only be used when it is the leader")
- }
- r.bcastAppend()
- // simulate the response of MsgApp
- msgs := r.readMessages()
- for _, m := range msgs {
- if m.Type != pb.MsgApp || len(m.Entries) != 1 || m.Entries[0].Data != nil {
- panic("not a message to append noop entry")
- }
- r.Step(acceptAndReply(m))
- }
- // ignore further messages to refresh followers' commit index
- r.readMessages()
- s.Append(r.raftLog.unstableEntries())
- r.raftLog.appliedTo(r.raftLog.committed)
- r.raftLog.stableTo(r.raftLog.lastIndex(), r.raftLog.lastTerm())
-}
-
-func acceptAndReply(m pb.Message) pb.Message {
- if m.Type != pb.MsgApp {
- panic("type should be MsgApp")
- }
- return pb.Message{
- From: m.To,
- To: m.From,
- Term: m.Term,
- Type: pb.MsgAppResp,
- Index: m.Index + uint64(len(m.Entries)),
- }
-}
diff --git a/raft/raft_snap_test.go b/raft/raft_snap_test.go
deleted file mode 100644
index 6b2afeebd93..00000000000
--- a/raft/raft_snap_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-var (
- testingSnap = pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2}},
- },
- }
-)
-
-func TestSendingSnapshotSetPendingSnapshot(t *testing.T) {
- storage := newTestMemoryStorage(withPeers(1))
- sm := newTestRaft(1, 10, 1, storage)
- sm.restore(testingSnap)
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- // force set the next of node 2, so that
- // node 2 needs a snapshot
- sm.prs.Progress[2].Next = sm.raftLog.firstIndex()
-
- sm.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: sm.prs.Progress[2].Next - 1, Reject: true})
- if sm.prs.Progress[2].PendingSnapshot != 11 {
- t.Fatalf("PendingSnapshot = %d, want 11", sm.prs.Progress[2].PendingSnapshot)
- }
-}
-
-func TestPendingSnapshotPauseReplication(t *testing.T) {
- storage := newTestMemoryStorage(withPeers(1, 2))
- sm := newTestRaft(1, 10, 1, storage)
- sm.restore(testingSnap)
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- sm.prs.Progress[2].BecomeSnapshot(11)
-
- sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- msgs := sm.readMessages()
- if len(msgs) != 0 {
- t.Fatalf("len(msgs) = %d, want 0", len(msgs))
- }
-}
-
-func TestSnapshotFailure(t *testing.T) {
- storage := newTestMemoryStorage(withPeers(1, 2))
- sm := newTestRaft(1, 10, 1, storage)
- sm.restore(testingSnap)
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- sm.prs.Progress[2].Next = 1
- sm.prs.Progress[2].BecomeSnapshot(11)
-
- sm.Step(pb.Message{From: 2, To: 1, Type: pb.MsgSnapStatus, Reject: true})
- if sm.prs.Progress[2].PendingSnapshot != 0 {
- t.Fatalf("PendingSnapshot = %d, want 0", sm.prs.Progress[2].PendingSnapshot)
- }
- if sm.prs.Progress[2].Next != 1 {
- t.Fatalf("Next = %d, want 1", sm.prs.Progress[2].Next)
- }
- if !sm.prs.Progress[2].ProbeSent {
- t.Errorf("ProbeSent = %v, want true", sm.prs.Progress[2].ProbeSent)
- }
-}
-
-func TestSnapshotSucceed(t *testing.T) {
- storage := newTestMemoryStorage(withPeers(1, 2))
- sm := newTestRaft(1, 10, 1, storage)
- sm.restore(testingSnap)
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- sm.prs.Progress[2].Next = 1
- sm.prs.Progress[2].BecomeSnapshot(11)
-
- sm.Step(pb.Message{From: 2, To: 1, Type: pb.MsgSnapStatus, Reject: false})
- if sm.prs.Progress[2].PendingSnapshot != 0 {
- t.Fatalf("PendingSnapshot = %d, want 0", sm.prs.Progress[2].PendingSnapshot)
- }
- if sm.prs.Progress[2].Next != 12 {
- t.Fatalf("Next = %d, want 12", sm.prs.Progress[2].Next)
- }
- if !sm.prs.Progress[2].ProbeSent {
- t.Errorf("ProbeSent = %v, want true", sm.prs.Progress[2].ProbeSent)
- }
-}
-
-func TestSnapshotAbort(t *testing.T) {
- storage := newTestMemoryStorage(withPeers(1, 2))
- sm := newTestRaft(1, 10, 1, storage)
- sm.restore(testingSnap)
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- sm.prs.Progress[2].Next = 1
- sm.prs.Progress[2].BecomeSnapshot(11)
-
- // A successful msgAppResp that has a higher/equal index than the
- // pending snapshot should abort the pending snapshot.
- sm.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: 11})
- if sm.prs.Progress[2].PendingSnapshot != 0 {
- t.Fatalf("PendingSnapshot = %d, want 0", sm.prs.Progress[2].PendingSnapshot)
- }
- // The follower entered StateReplicate and the leader send an append
- // and optimistically updated the progress (so we see 13 instead of 12).
- // There is something to append because the leader appended an empty entry
- // to the log at index 12 when it assumed leadership.
- if sm.prs.Progress[2].Next != 13 {
- t.Fatalf("Next = %d, want 13", sm.prs.Progress[2].Next)
- }
- if n := sm.prs.Progress[2].Inflights.Count(); n != 1 {
- t.Fatalf("expected an inflight message, got %d", n)
- }
-}
diff --git a/raft/raft_test.go b/raft/raft_test.go
deleted file mode 100644
index f21670776b7..00000000000
--- a/raft/raft_test.go
+++ /dev/null
@@ -1,4853 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "bytes"
- "fmt"
- "math"
- "math/rand"
- "reflect"
- "strings"
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// nextEnts returns the appliable entries and updates the applied index
-func nextEnts(r *raft, s *MemoryStorage) (ents []pb.Entry) {
- // Transfer all unstable entries to "stable" storage.
- s.Append(r.raftLog.unstableEntries())
- r.raftLog.stableTo(r.raftLog.lastIndex(), r.raftLog.lastTerm())
-
- ents = r.raftLog.nextEnts()
- r.raftLog.appliedTo(r.raftLog.committed)
- return ents
-}
-
-func mustAppendEntry(r *raft, ents ...pb.Entry) {
- if !r.appendEntry(ents...) {
- panic("entry unexpectedly dropped")
- }
-}
-
-type stateMachine interface {
- Step(m pb.Message) error
- readMessages() []pb.Message
-}
-
-func (r *raft) readMessages() []pb.Message {
- msgs := r.msgs
- r.msgs = make([]pb.Message, 0)
-
- return msgs
-}
-
-func TestProgressLeader(t *testing.T) {
- r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
- r.prs.Progress[2].BecomeReplicate()
-
- // Send proposals to r1. The first 5 entries should be appended to the log.
- propMsg := pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("foo")}}}
- for i := 0; i < 5; i++ {
- if pr := r.prs.Progress[r.id]; pr.State != tracker.StateReplicate || pr.Match != uint64(i+1) || pr.Next != pr.Match+1 {
- t.Errorf("unexpected progress %v", pr)
- }
- if err := r.Step(propMsg); err != nil {
- t.Fatalf("proposal resulted in error: %v", err)
- }
- }
-}
-
-// TestProgressResumeByHeartbeatResp ensures raft.heartbeat reset progress.paused by heartbeat response.
-func TestProgressResumeByHeartbeatResp(t *testing.T) {
- r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
-
- r.prs.Progress[2].ProbeSent = true
-
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
- if !r.prs.Progress[2].ProbeSent {
- t.Errorf("paused = %v, want true", r.prs.Progress[2].ProbeSent)
- }
-
- r.prs.Progress[2].BecomeReplicate()
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})
- if r.prs.Progress[2].ProbeSent {
- t.Errorf("paused = %v, want false", r.prs.Progress[2].ProbeSent)
- }
-}
-
-func TestProgressPaused(t *testing.T) {
- r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
-
- ms := r.readMessages()
- if len(ms) != 1 {
- t.Errorf("len(ms) = %d, want 1", len(ms))
- }
-}
-
-func TestProgressFlowControl(t *testing.T) {
- cfg := newTestConfig(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- cfg.MaxInflightMsgs = 3
- cfg.MaxSizePerMsg = 2048
- r := newRaft(cfg)
- r.becomeCandidate()
- r.becomeLeader()
-
- // Throw away all the messages relating to the initial election.
- r.readMessages()
-
- // While node 2 is in probe state, propose a bunch of entries.
- r.prs.Progress[2].BecomeProbe()
- blob := []byte(strings.Repeat("a", 1000))
- for i := 0; i < 10; i++ {
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: blob}}})
- }
-
- ms := r.readMessages()
- // First append has two entries: the empty entry to confirm the
- // election, and the first proposal (only one proposal gets sent
- // because we're in probe state).
- if len(ms) != 1 || ms[0].Type != pb.MsgApp {
- t.Fatalf("expected 1 MsgApp, got %v", ms)
- }
- if len(ms[0].Entries) != 2 {
- t.Fatalf("expected 2 entries, got %d", len(ms[0].Entries))
- }
- if len(ms[0].Entries[0].Data) != 0 || len(ms[0].Entries[1].Data) != 1000 {
- t.Fatalf("unexpected entry sizes: %v", ms[0].Entries)
- }
-
- // When this append is acked, we change to replicate state and can
- // send multiple messages at once.
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: ms[0].Entries[1].Index})
- ms = r.readMessages()
- if len(ms) != 3 {
- t.Fatalf("expected 3 messages, got %d", len(ms))
- }
- for i, m := range ms {
- if m.Type != pb.MsgApp {
- t.Errorf("%d: expected MsgApp, got %s", i, m.Type)
- }
- if len(m.Entries) != 2 {
- t.Errorf("%d: expected 2 entries, got %d", i, len(m.Entries))
- }
- }
-
- // Ack all three of those messages together and get the last two
- // messages (containing three entries).
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: ms[2].Entries[1].Index})
- ms = r.readMessages()
- if len(ms) != 2 {
- t.Fatalf("expected 2 messages, got %d", len(ms))
- }
- for i, m := range ms {
- if m.Type != pb.MsgApp {
- t.Errorf("%d: expected MsgApp, got %s", i, m.Type)
- }
- }
- if len(ms[0].Entries) != 2 {
- t.Errorf("%d: expected 2 entries, got %d", 0, len(ms[0].Entries))
- }
- if len(ms[1].Entries) != 1 {
- t.Errorf("%d: expected 1 entry, got %d", 1, len(ms[1].Entries))
- }
-}
-
-func TestUncommittedEntryLimit(t *testing.T) {
- // Use a relatively large number of entries here to prevent regression of a
- // bug which computed the size before it was fixed. This test would fail
- // with the bug, either because we'd get dropped proposals earlier than we
- // expect them, or because the final tally ends up nonzero. (At the time of
- // writing, the former).
- const maxEntries = 1024
- testEntry := pb.Entry{Data: []byte("testdata")}
- maxEntrySize := maxEntries * PayloadSize(testEntry)
-
- if n := PayloadSize(pb.Entry{Data: nil}); n != 0 {
- t.Fatal("entry with no Data must have zero payload size")
- }
-
- cfg := newTestConfig(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- cfg.MaxUncommittedEntriesSize = uint64(maxEntrySize)
- cfg.MaxInflightMsgs = 2 * 1024 // avoid interference
- r := newRaft(cfg)
- r.becomeCandidate()
- r.becomeLeader()
- if n := r.uncommittedSize; n != 0 {
- t.Fatalf("expected zero uncommitted size, got %d bytes", n)
- }
-
- // Set the two followers to the replicate state. Commit to tail of log.
- const numFollowers = 2
- r.prs.Progress[2].BecomeReplicate()
- r.prs.Progress[3].BecomeReplicate()
- r.uncommittedSize = 0
-
- // Send proposals to r1. The first 5 entries should be appended to the log.
- propMsg := pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{testEntry}}
- propEnts := make([]pb.Entry, maxEntries)
- for i := 0; i < maxEntries; i++ {
- if err := r.Step(propMsg); err != nil {
- t.Fatalf("proposal resulted in error: %v", err)
- }
- propEnts[i] = testEntry
- }
-
- // Send one more proposal to r1. It should be rejected.
- if err := r.Step(propMsg); err != ErrProposalDropped {
- t.Fatalf("proposal not dropped: %v", err)
- }
-
- // Read messages and reduce the uncommitted size as if we had committed
- // these entries.
- ms := r.readMessages()
- if e := maxEntries * numFollowers; len(ms) != e {
- t.Fatalf("expected %d messages, got %d", e, len(ms))
- }
- r.reduceUncommittedSize(propEnts)
- if r.uncommittedSize != 0 {
- t.Fatalf("committed everything, but still tracking %d", r.uncommittedSize)
- }
-
- // Send a single large proposal to r1. Should be accepted even though it
- // pushes us above the limit because we were beneath it before the proposal.
- propEnts = make([]pb.Entry, 2*maxEntries)
- for i := range propEnts {
- propEnts[i] = testEntry
- }
- propMsgLarge := pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: propEnts}
- if err := r.Step(propMsgLarge); err != nil {
- t.Fatalf("proposal resulted in error: %v", err)
- }
-
- // Send one more proposal to r1. It should be rejected, again.
- if err := r.Step(propMsg); err != ErrProposalDropped {
- t.Fatalf("proposal not dropped: %v", err)
- }
-
- // But we can always append an entry with no Data. This is used both for the
- // leader's first empty entry and for auto-transitioning out of joint config
- // states.
- if err := r.Step(
- pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}},
- ); err != nil {
- t.Fatal(err)
- }
-
- // Read messages and reduce the uncommitted size as if we had committed
- // these entries.
- ms = r.readMessages()
- if e := 2 * numFollowers; len(ms) != e {
- t.Fatalf("expected %d messages, got %d", e, len(ms))
- }
- r.reduceUncommittedSize(propEnts)
- if n := r.uncommittedSize; n != 0 {
- t.Fatalf("expected zero uncommitted size, got %d", n)
- }
-}
-
-func TestLeaderElection(t *testing.T) {
- testLeaderElection(t, false)
-}
-
-func TestLeaderElectionPreVote(t *testing.T) {
- testLeaderElection(t, true)
-}
-
-func testLeaderElection(t *testing.T, preVote bool) {
- var cfg func(*Config)
- candState := StateCandidate
- candTerm := uint64(1)
- if preVote {
- cfg = preVoteConfig
- // In pre-vote mode, an election that fails to complete
- // leaves the node in pre-candidate state without advancing
- // the term.
- candState = StatePreCandidate
- candTerm = 0
- }
- tests := []struct {
- *network
- state StateType
- expTerm uint64
- }{
- {newNetworkWithConfig(cfg, nil, nil, nil), StateLeader, 1},
- {newNetworkWithConfig(cfg, nil, nil, nopStepper), StateLeader, 1},
- {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper), candState, candTerm},
- {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil), candState, candTerm},
- {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil, nil), StateLeader, 1},
-
- // three logs further along than 0, but in the same term so rejections
- // are returned instead of the votes being ignored.
- {newNetworkWithConfig(cfg,
- nil, entsWithConfig(cfg, 1), entsWithConfig(cfg, 1), entsWithConfig(cfg, 1, 1), nil),
- StateFollower, 1},
- }
-
- for i, tt := range tests {
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- sm := tt.network.peers[1].(*raft)
- if sm.state != tt.state {
- t.Errorf("#%d: state = %s, want %s", i, sm.state, tt.state)
- }
- if g := sm.Term; g != tt.expTerm {
- t.Errorf("#%d: term = %d, want %d", i, g, tt.expTerm)
- }
- }
-}
-
-// TestLearnerElectionTimeout verfies that the leader should not start election even
-// when times out.
-func TestLearnerElectionTimeout(t *testing.T) {
- n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
- n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
-
- // n2 is learner. Learner should not start election even when times out.
- setRandomizedElectionTimeout(n2, n2.electionTimeout)
- for i := 0; i < n2.electionTimeout; i++ {
- n2.tick()
- }
-
- if n2.state != StateFollower {
- t.Errorf("peer 2 state: %s, want %s", n2.state, StateFollower)
- }
-}
-
-// TestLearnerPromotion verifies that the learner should not election until
-// it is promoted to a normal peer.
-func TestLearnerPromotion(t *testing.T) {
- n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
- n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
-
- nt := newNetwork(n1, n2)
-
- if n1.state == StateLeader {
- t.Error("peer 1 state is leader, want not", n1.state)
- }
-
- // n1 should become leader
- setRandomizedElectionTimeout(n1, n1.electionTimeout)
- for i := 0; i < n1.electionTimeout; i++ {
- n1.tick()
- }
-
- if n1.state != StateLeader {
- t.Errorf("peer 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Errorf("peer 2 state: %s, want %s", n2.state, StateFollower)
- }
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
-
- n1.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
- n2.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
- if n2.isLearner {
- t.Error("peer 2 is learner, want not")
- }
-
- // n2 start election, should become leader
- setRandomizedElectionTimeout(n2, n2.electionTimeout)
- for i := 0; i < n2.electionTimeout; i++ {
- n2.tick()
- }
-
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})
-
- if n1.state != StateFollower {
- t.Errorf("peer 1 state: %s, want %s", n1.state, StateFollower)
- }
- if n2.state != StateLeader {
- t.Errorf("peer 2 state: %s, want %s", n2.state, StateLeader)
- }
-}
-
-// TestLearnerCanVote checks that a learner can vote when it receives a valid Vote request.
-// See (*raft).Step for why this is necessary and correct behavior.
-func TestLearnerCanVote(t *testing.T) {
- n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
-
- n2.becomeFollower(1, None)
-
- n2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})
-
- if len(n2.msgs) != 1 {
- t.Fatalf("expected exactly one message, not %+v", n2.msgs)
- }
- msg := n2.msgs[0]
- if msg.Type != pb.MsgVoteResp && !msg.Reject {
- t.Fatal("expected learner to not reject vote")
- }
-}
-
-func TestLeaderCycle(t *testing.T) {
- testLeaderCycle(t, false)
-}
-
-func TestLeaderCyclePreVote(t *testing.T) {
- testLeaderCycle(t, true)
-}
-
-// testLeaderCycle verifies that each node in a cluster can campaign
-// and be elected in turn. This ensures that elections (including
-// pre-vote) work when not starting from a clean slate (as they do in
-// TestLeaderElection)
-func testLeaderCycle(t *testing.T, preVote bool) {
- var cfg func(*Config)
- if preVote {
- cfg = preVoteConfig
- }
- n := newNetworkWithConfig(cfg, nil, nil, nil)
- for campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {
- n.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})
-
- for _, peer := range n.peers {
- sm := peer.(*raft)
- if sm.id == campaignerID && sm.state != StateLeader {
- t.Errorf("preVote=%v: campaigning node %d state = %v, want StateLeader",
- preVote, sm.id, sm.state)
- } else if sm.id != campaignerID && sm.state != StateFollower {
- t.Errorf("preVote=%v: after campaign of node %d, "+
- "node %d had state = %v, want StateFollower",
- preVote, campaignerID, sm.id, sm.state)
- }
- }
- }
-}
-
-// TestLeaderElectionOverwriteNewerLogs tests a scenario in which a
-// newly-elected leader does *not* have the newest (i.e. highest term)
-// log entries, and must overwrite higher-term log entries with
-// lower-term ones.
-func TestLeaderElectionOverwriteNewerLogs(t *testing.T) {
- testLeaderElectionOverwriteNewerLogs(t, false)
-}
-
-func TestLeaderElectionOverwriteNewerLogsPreVote(t *testing.T) {
- testLeaderElectionOverwriteNewerLogs(t, true)
-}
-
-func testLeaderElectionOverwriteNewerLogs(t *testing.T, preVote bool) {
- var cfg func(*Config)
- if preVote {
- cfg = preVoteConfig
- }
- // This network represents the results of the following sequence of
- // events:
- // - Node 1 won the election in term 1.
- // - Node 1 replicated a log entry to node 2 but died before sending
- // it to other nodes.
- // - Node 3 won the second election in term 2.
- // - Node 3 wrote an entry to its logs but died without sending it
- // to any other nodes.
- //
- // At this point, nodes 1, 2, and 3 all have uncommitted entries in
- // their logs and could win an election at term 3. The winner's log
- // entry overwrites the losers'. (TestLeaderSyncFollowerLog tests
- // the case where older log entries are overwritten, so this test
- // focuses on the case where the newer entries are lost).
- n := newNetworkWithConfig(cfg,
- entsWithConfig(cfg, 1), // Node 1: Won first election
- entsWithConfig(cfg, 1), // Node 2: Got logs from node 1
- entsWithConfig(cfg, 2), // Node 3: Won second election
- votedWithConfig(cfg, 3, 2), // Node 4: Voted but didn't get logs
- votedWithConfig(cfg, 3, 2)) // Node 5: Voted but didn't get logs
-
- // Node 1 campaigns. The election fails because a quorum of nodes
- // know about the election that already happened at term 2. Node 1's
- // term is pushed ahead to 2.
- n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- sm1 := n.peers[1].(*raft)
- if sm1.state != StateFollower {
- t.Errorf("state = %s, want StateFollower", sm1.state)
- }
- if sm1.Term != 2 {
- t.Errorf("term = %d, want 2", sm1.Term)
- }
-
- // Node 1 campaigns again with a higher term. This time it succeeds.
- n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- if sm1.state != StateLeader {
- t.Errorf("state = %s, want StateLeader", sm1.state)
- }
- if sm1.Term != 3 {
- t.Errorf("term = %d, want 3", sm1.Term)
- }
-
- // Now all nodes agree on a log entry with term 1 at index 1 (and
- // term 3 at index 2).
- for i := range n.peers {
- sm := n.peers[i].(*raft)
- entries := sm.raftLog.allEntries()
- if len(entries) != 2 {
- t.Fatalf("node %d: len(entries) == %d, want 2", i, len(entries))
- }
- if entries[0].Term != 1 {
- t.Errorf("node %d: term at index 1 == %d, want 1", i, entries[0].Term)
- }
- if entries[1].Term != 3 {
- t.Errorf("node %d: term at index 2 == %d, want 3", i, entries[1].Term)
- }
- }
-}
-
-func TestVoteFromAnyState(t *testing.T) {
- testVoteFromAnyState(t, pb.MsgVote)
-}
-
-func TestPreVoteFromAnyState(t *testing.T) {
- testVoteFromAnyState(t, pb.MsgPreVote)
-}
-
-func testVoteFromAnyState(t *testing.T, vt pb.MessageType) {
- for st := StateType(0); st < numStates; st++ {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- r.Term = 1
-
- switch st {
- case StateFollower:
- r.becomeFollower(r.Term, 3)
- case StatePreCandidate:
- r.becomePreCandidate()
- case StateCandidate:
- r.becomeCandidate()
- case StateLeader:
- r.becomeCandidate()
- r.becomeLeader()
- }
-
- // Note that setting our state above may have advanced r.Term
- // past its initial value.
- origTerm := r.Term
- newTerm := r.Term + 1
-
- msg := pb.Message{
- From: 2,
- To: 1,
- Type: vt,
- Term: newTerm,
- LogTerm: newTerm,
- Index: 42,
- }
- if err := r.Step(msg); err != nil {
- t.Errorf("%s,%s: Step failed: %s", vt, st, err)
- }
- if len(r.msgs) != 1 {
- t.Errorf("%s,%s: %d response messages, want 1: %+v", vt, st, len(r.msgs), r.msgs)
- } else {
- resp := r.msgs[0]
- if resp.Type != voteRespMsgType(vt) {
- t.Errorf("%s,%s: response message is %s, want %s",
- vt, st, resp.Type, voteRespMsgType(vt))
- }
- if resp.Reject {
- t.Errorf("%s,%s: unexpected rejection", vt, st)
- }
- }
-
- // If this was a real vote, we reset our state and term.
- if vt == pb.MsgVote {
- if r.state != StateFollower {
- t.Errorf("%s,%s: state %s, want %s", vt, st, r.state, StateFollower)
- }
- if r.Term != newTerm {
- t.Errorf("%s,%s: term %d, want %d", vt, st, r.Term, newTerm)
- }
- if r.Vote != 2 {
- t.Errorf("%s,%s: vote %d, want 2", vt, st, r.Vote)
- }
- } else {
- // In a prevote, nothing changes.
- if r.state != st {
- t.Errorf("%s,%s: state %s, want %s", vt, st, r.state, st)
- }
- if r.Term != origTerm {
- t.Errorf("%s,%s: term %d, want %d", vt, st, r.Term, origTerm)
- }
- // if st == StateFollower or StatePreCandidate, r hasn't voted yet.
- // In StateCandidate or StateLeader, it's voted for itself.
- if r.Vote != None && r.Vote != 1 {
- t.Errorf("%s,%s: vote %d, want %d or 1", vt, st, r.Vote, None)
- }
- }
- }
-}
-
-func TestLogReplication(t *testing.T) {
- tests := []struct {
- *network
- msgs []pb.Message
- wcommitted uint64
- }{
- {
- newNetwork(nil, nil, nil),
- []pb.Message{
- {From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}},
- },
- 2,
- },
- {
- newNetwork(nil, nil, nil),
- []pb.Message{
- {From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}},
- {From: 1, To: 2, Type: pb.MsgHup},
- {From: 1, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}},
- },
- 4,
- },
- }
-
- for i, tt := range tests {
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- for _, m := range tt.msgs {
- tt.send(m)
- }
-
- for j, x := range tt.network.peers {
- sm := x.(*raft)
-
- if sm.raftLog.committed != tt.wcommitted {
- t.Errorf("#%d.%d: committed = %d, want %d", i, j, sm.raftLog.committed, tt.wcommitted)
- }
-
- ents := []pb.Entry{}
- for _, e := range nextEnts(sm, tt.network.storage[j]) {
- if e.Data != nil {
- ents = append(ents, e)
- }
- }
- props := []pb.Message{}
- for _, m := range tt.msgs {
- if m.Type == pb.MsgProp {
- props = append(props, m)
- }
- }
- for k, m := range props {
- if !bytes.Equal(ents[k].Data, m.Entries[0].Data) {
- t.Errorf("#%d.%d: data = %d, want %d", i, j, ents[k].Data, m.Entries[0].Data)
- }
- }
- }
- }
-}
-
-// TestLearnerLogReplication tests that a learner can receive entries from the leader.
-func TestLearnerLogReplication(t *testing.T) {
- n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
- n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
-
- nt := newNetwork(n1, n2)
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
-
- setRandomizedElectionTimeout(n1, n1.electionTimeout)
- for i := 0; i < n1.electionTimeout; i++ {
- n1.tick()
- }
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
-
- // n1 is leader and n2 is learner
- if n1.state != StateLeader {
- t.Errorf("peer 1 state: %s, want %s", n1.state, StateLeader)
- }
- if !n2.isLearner {
- t.Error("peer 2 state: not learner, want yes")
- }
-
- nextCommitted := n1.raftLog.committed + 1
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- if n1.raftLog.committed != nextCommitted {
- t.Errorf("peer 1 wants committed to %d, but still %d", nextCommitted, n1.raftLog.committed)
- }
-
- if n1.raftLog.committed != n2.raftLog.committed {
- t.Errorf("peer 2 wants committed to %d, but still %d", n1.raftLog.committed, n2.raftLog.committed)
- }
-
- match := n1.prs.Progress[2].Match
- if match != n2.raftLog.committed {
- t.Errorf("progress 2 of leader 1 wants match %d, but got %d", n2.raftLog.committed, match)
- }
-}
-
-func TestSingleNodeCommit(t *testing.T) {
- tt := newNetwork(nil)
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
-
- sm := tt.peers[1].(*raft)
- if sm.raftLog.committed != 3 {
- t.Errorf("committed = %d, want %d", sm.raftLog.committed, 3)
- }
-}
-
-// TestCannotCommitWithoutNewTermEntry tests the entries cannot be committed
-// when leader changes, no new proposal comes in and ChangeTerm proposal is
-// filtered.
-func TestCannotCommitWithoutNewTermEntry(t *testing.T) {
- tt := newNetwork(nil, nil, nil, nil, nil)
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // 0 cannot reach 2,3,4
- tt.cut(1, 3)
- tt.cut(1, 4)
- tt.cut(1, 5)
-
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
-
- sm := tt.peers[1].(*raft)
- if sm.raftLog.committed != 1 {
- t.Errorf("committed = %d, want %d", sm.raftLog.committed, 1)
- }
-
- // network recovery
- tt.recover()
- // avoid committing ChangeTerm proposal
- tt.ignore(pb.MsgApp)
-
- // elect 2 as the new leader with term 2
- tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- // no log entries from previous term should be committed
- sm = tt.peers[2].(*raft)
- if sm.raftLog.committed != 1 {
- t.Errorf("committed = %d, want %d", sm.raftLog.committed, 1)
- }
-
- tt.recover()
- // send heartbeat; reset wait
- tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})
- // append an entry at current term
- tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
- // expect the committed to be advanced
- if sm.raftLog.committed != 5 {
- t.Errorf("committed = %d, want %d", sm.raftLog.committed, 5)
- }
-}
-
-// TestCommitWithoutNewTermEntry tests the entries could be committed
-// when leader changes, no new proposal comes in.
-func TestCommitWithoutNewTermEntry(t *testing.T) {
- tt := newNetwork(nil, nil, nil, nil, nil)
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // 0 cannot reach 2,3,4
- tt.cut(1, 3)
- tt.cut(1, 4)
- tt.cut(1, 5)
-
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
-
- sm := tt.peers[1].(*raft)
- if sm.raftLog.committed != 1 {
- t.Errorf("committed = %d, want %d", sm.raftLog.committed, 1)
- }
-
- // network recovery
- tt.recover()
-
- // elect 2 as the new leader with term 2
- // after append a ChangeTerm entry from the current term, all entries
- // should be committed
- tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- if sm.raftLog.committed != 4 {
- t.Errorf("committed = %d, want %d", sm.raftLog.committed, 4)
- }
-}
-
-func TestDuelingCandidates(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- nt := newNetwork(a, b, c)
- nt.cut(1, 3)
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- // 1 becomes leader since it receives votes from 1 and 2
- sm := nt.peers[1].(*raft)
- if sm.state != StateLeader {
- t.Errorf("state = %s, want %s", sm.state, StateLeader)
- }
-
- // 3 stays as candidate since it receives a vote from 3 and a rejection from 2
- sm = nt.peers[3].(*raft)
- if sm.state != StateCandidate {
- t.Errorf("state = %s, want %s", sm.state, StateCandidate)
- }
-
- nt.recover()
-
- // candidate 3 now increases its term and tries to vote again
- // we expect it to disrupt the leader 1 since it has a higher term
- // 3 will be follower again since both 1 and 2 rejects its vote request since 3 does not have a long enough log
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- wlog := &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}}},
- committed: 1,
- unstable: unstable{offset: 2},
- }
- tests := []struct {
- sm *raft
- state StateType
- term uint64
- raftLog *raftLog
- }{
- {a, StateFollower, 2, wlog},
- {b, StateFollower, 2, wlog},
- {c, StateFollower, 2, newLog(NewMemoryStorage(), raftLogger)},
- }
-
- for i, tt := range tests {
- if g := tt.sm.state; g != tt.state {
- t.Errorf("#%d: state = %s, want %s", i, g, tt.state)
- }
- if g := tt.sm.Term; g != tt.term {
- t.Errorf("#%d: term = %d, want %d", i, g, tt.term)
- }
- base := ltoa(tt.raftLog)
- if sm, ok := nt.peers[1+uint64(i)].(*raft); ok {
- l := ltoa(sm.raftLog)
- if g := diffu(base, l); g != "" {
- t.Errorf("#%d: diff:\n%s", i, g)
- }
- } else {
- t.Logf("#%d: empty log", i)
- }
- }
-}
-
-func TestDuelingPreCandidates(t *testing.T) {
- cfgA := newTestConfig(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- cfgB := newTestConfig(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- cfgC := newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- cfgA.PreVote = true
- cfgB.PreVote = true
- cfgC.PreVote = true
- a := newRaft(cfgA)
- b := newRaft(cfgB)
- c := newRaft(cfgC)
-
- nt := newNetwork(a, b, c)
- nt.cut(1, 3)
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- // 1 becomes leader since it receives votes from 1 and 2
- sm := nt.peers[1].(*raft)
- if sm.state != StateLeader {
- t.Errorf("state = %s, want %s", sm.state, StateLeader)
- }
-
- // 3 campaigns then reverts to follower when its PreVote is rejected
- sm = nt.peers[3].(*raft)
- if sm.state != StateFollower {
- t.Errorf("state = %s, want %s", sm.state, StateFollower)
- }
-
- nt.recover()
-
- // Candidate 3 now increases its term and tries to vote again.
- // With PreVote, it does not disrupt the leader.
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- wlog := &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}}},
- committed: 1,
- unstable: unstable{offset: 2},
- }
- tests := []struct {
- sm *raft
- state StateType
- term uint64
- raftLog *raftLog
- }{
- {a, StateLeader, 1, wlog},
- {b, StateFollower, 1, wlog},
- {c, StateFollower, 1, newLog(NewMemoryStorage(), raftLogger)},
- }
-
- for i, tt := range tests {
- if g := tt.sm.state; g != tt.state {
- t.Errorf("#%d: state = %s, want %s", i, g, tt.state)
- }
- if g := tt.sm.Term; g != tt.term {
- t.Errorf("#%d: term = %d, want %d", i, g, tt.term)
- }
- base := ltoa(tt.raftLog)
- if sm, ok := nt.peers[1+uint64(i)].(*raft); ok {
- l := ltoa(sm.raftLog)
- if g := diffu(base, l); g != "" {
- t.Errorf("#%d: diff:\n%s", i, g)
- }
- } else {
- t.Logf("#%d: empty log", i)
- }
- }
-}
-
-func TestCandidateConcede(t *testing.T) {
- tt := newNetwork(nil, nil, nil)
- tt.isolate(1)
-
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- tt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- // heal the partition
- tt.recover()
- // send heartbeat; reset wait
- tt.send(pb.Message{From: 3, To: 3, Type: pb.MsgBeat})
-
- data := []byte("force follower")
- // send a proposal to 3 to flush out a MsgApp to 1
- tt.send(pb.Message{From: 3, To: 3, Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
- // send heartbeat; flush out commit
- tt.send(pb.Message{From: 3, To: 3, Type: pb.MsgBeat})
-
- a := tt.peers[1].(*raft)
- if g := a.state; g != StateFollower {
- t.Errorf("state = %s, want %s", g, StateFollower)
- }
- if g := a.Term; g != 1 {
- t.Errorf("term = %d, want %d", g, 1)
- }
- wantLog := ltoa(&raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},
- },
- unstable: unstable{offset: 3},
- committed: 2,
- })
- for i, p := range tt.peers {
- if sm, ok := p.(*raft); ok {
- l := ltoa(sm.raftLog)
- if g := diffu(wantLog, l); g != "" {
- t.Errorf("#%d: diff:\n%s", i, g)
- }
- } else {
- t.Logf("#%d: empty log", i)
- }
- }
-}
-
-func TestSingleNodeCandidate(t *testing.T) {
- tt := newNetwork(nil)
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- sm := tt.peers[1].(*raft)
- if sm.state != StateLeader {
- t.Errorf("state = %d, want %d", sm.state, StateLeader)
- }
-}
-
-func TestSingleNodePreCandidate(t *testing.T) {
- tt := newNetworkWithConfig(preVoteConfig, nil)
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- sm := tt.peers[1].(*raft)
- if sm.state != StateLeader {
- t.Errorf("state = %d, want %d", sm.state, StateLeader)
- }
-}
-
-func TestOldMessages(t *testing.T) {
- tt := newNetwork(nil, nil, nil)
- // make 0 leader @ term 3
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- // pretend we're an old leader trying to make progress; this entry is expected to be ignored.
- tt.send(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, Entries: []pb.Entry{{Index: 3, Term: 2}}})
- // commit a new entry
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
-
- ilog := &raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{
- {}, {Data: nil, Term: 1, Index: 1},
- {Data: nil, Term: 2, Index: 2}, {Data: nil, Term: 3, Index: 3},
- {Data: []byte("somedata"), Term: 3, Index: 4},
- },
- },
- unstable: unstable{offset: 5},
- committed: 4,
- }
- base := ltoa(ilog)
- for i, p := range tt.peers {
- if sm, ok := p.(*raft); ok {
- l := ltoa(sm.raftLog)
- if g := diffu(base, l); g != "" {
- t.Errorf("#%d: diff:\n%s", i, g)
- }
- } else {
- t.Logf("#%d: empty log", i)
- }
- }
-}
-
-// TestOldMessagesReply - optimization - reply with new term.
-
-func TestProposal(t *testing.T) {
- tests := []struct {
- *network
- success bool
- }{
- {newNetwork(nil, nil, nil), true},
- {newNetwork(nil, nil, nopStepper), true},
- {newNetwork(nil, nopStepper, nopStepper), false},
- {newNetwork(nil, nopStepper, nopStepper, nil), false},
- {newNetwork(nil, nopStepper, nopStepper, nil, nil), true},
- }
-
- for j, tt := range tests {
- send := func(m pb.Message) {
- defer func() {
- // only recover if we expect it to panic (success==false)
- if !tt.success {
- e := recover()
- if e != nil {
- t.Logf("#%d: err: %s", j, e)
- }
- }
- }()
- tt.send(m)
- }
-
- data := []byte("somedata")
-
- // promote 1 to become leader
- send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
-
- wantLog := newLog(NewMemoryStorage(), raftLogger)
- if tt.success {
- wantLog = &raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},
- },
- unstable: unstable{offset: 3},
- committed: 2}
- }
- base := ltoa(wantLog)
- for i, p := range tt.peers {
- if sm, ok := p.(*raft); ok {
- l := ltoa(sm.raftLog)
- if g := diffu(base, l); g != "" {
- t.Errorf("#%d: peer %d diff:\n%s", j, i, g)
- }
- } else {
- t.Logf("#%d: peer %d empty log", j, i)
- }
- }
- sm := tt.network.peers[1].(*raft)
- if g := sm.Term; g != 1 {
- t.Errorf("#%d: term = %d, want %d", j, g, 1)
- }
- }
-}
-
-func TestProposalByProxy(t *testing.T) {
- data := []byte("somedata")
- tests := []*network{
- newNetwork(nil, nil, nil),
- newNetwork(nil, nil, nopStepper),
- }
-
- for j, tt := range tests {
- // promote 0 the leader
- tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // propose via follower
- tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
-
- wantLog := &raftLog{
- storage: &MemoryStorage{
- ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Data: data, Index: 2}},
- },
- unstable: unstable{offset: 3},
- committed: 2}
- base := ltoa(wantLog)
- for i, p := range tt.peers {
- if sm, ok := p.(*raft); ok {
- l := ltoa(sm.raftLog)
- if g := diffu(base, l); g != "" {
- t.Errorf("#%d: peer %d diff:\n%s", j, i, g)
- }
- } else {
- t.Logf("#%d: peer %d empty log", j, i)
- }
- }
- sm := tt.peers[1].(*raft)
- if g := sm.Term; g != 1 {
- t.Errorf("#%d: term = %d, want %d", j, g, 1)
- }
- }
-}
-
-func TestCommit(t *testing.T) {
- tests := []struct {
- matches []uint64
- logs []pb.Entry
- smTerm uint64
- w uint64
- }{
- // single
- {[]uint64{1}, []pb.Entry{{Index: 1, Term: 1}}, 1, 1},
- {[]uint64{1}, []pb.Entry{{Index: 1, Term: 1}}, 2, 0},
- {[]uint64{2}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}, 2, 2},
- {[]uint64{1}, []pb.Entry{{Index: 1, Term: 2}}, 2, 1},
-
- // odd
- {[]uint64{2, 1, 1}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}, 1, 1},
- {[]uint64{2, 1, 1}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}}, 2, 0},
- {[]uint64{2, 1, 2}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}, 2, 2},
- {[]uint64{2, 1, 2}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}}, 2, 0},
-
- // even
- {[]uint64{2, 1, 1, 1}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}, 1, 1},
- {[]uint64{2, 1, 1, 1}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}}, 2, 0},
- {[]uint64{2, 1, 1, 2}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}, 1, 1},
- {[]uint64{2, 1, 1, 2}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}}, 2, 0},
- {[]uint64{2, 1, 2, 2}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}, 2, 2},
- {[]uint64{2, 1, 2, 2}, []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}}, 2, 0},
- }
-
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1))
- storage.Append(tt.logs)
- storage.hardState = pb.HardState{Term: tt.smTerm}
-
- sm := newTestRaft(1, 10, 2, storage)
- for j := 0; j < len(tt.matches); j++ {
- id := uint64(j) + 1
- if id > 1 {
- sm.applyConfChange(pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: id}.AsV2())
- }
- pr := sm.prs.Progress[id]
- pr.Match, pr.Next = tt.matches[j], tt.matches[j]+1
- }
- sm.maybeCommit()
- if g := sm.raftLog.committed; g != tt.w {
- t.Errorf("#%d: committed = %d, want %d", i, g, tt.w)
- }
- }
-}
-
-func TestPastElectionTimeout(t *testing.T) {
- tests := []struct {
- elapse int
- wprobability float64
- round bool
- }{
- {5, 0, false},
- {10, 0.1, true},
- {13, 0.4, true},
- {15, 0.6, true},
- {18, 0.9, true},
- {20, 1, false},
- }
-
- for i, tt := range tests {
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- sm.electionElapsed = tt.elapse
- c := 0
- for j := 0; j < 10000; j++ {
- sm.resetRandomizedElectionTimeout()
- if sm.pastElectionTimeout() {
- c++
- }
- }
- got := float64(c) / 10000.0
- if tt.round {
- got = math.Floor(got*10+0.5) / 10.0
- }
- if got != tt.wprobability {
- t.Errorf("#%d: probability = %v, want %v", i, got, tt.wprobability)
- }
- }
-}
-
-// ensure that the Step function ignores the message from old term and does not pass it to the
-// actual stepX function.
-func TestStepIgnoreOldTermMsg(t *testing.T) {
- called := false
- fakeStep := func(r *raft, m pb.Message) error {
- called = true
- return nil
- }
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- sm.step = fakeStep
- sm.Term = 2
- sm.Step(pb.Message{Type: pb.MsgApp, Term: sm.Term - 1})
- if called {
- t.Errorf("stepFunc called = %v , want %v", called, false)
- }
-}
-
-// TestHandleMsgApp ensures:
-// 1. Reply false if log doesnât contain an entry at prevLogIndex whose term matches prevLogTerm.
-// 2. If an existing entry conflicts with a new one (same index but different terms),
-// delete the existing entry and all that follow it; append any new entries not already in the log.
-// 3. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry).
-func TestHandleMsgApp(t *testing.T) {
- tests := []struct {
- m pb.Message
- wIndex uint64
- wCommit uint64
- wReject bool
- }{
- // Ensure 1
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 3, Index: 2, Commit: 3}, 2, 0, true}, // previous log mismatch
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 3, Index: 3, Commit: 3}, 2, 0, true}, // previous log non-exist
-
- // Ensure 2
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 1, Index: 1, Commit: 1}, 2, 1, false},
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 0, Index: 0, Commit: 1, Entries: []pb.Entry{{Index: 1, Term: 2}}}, 1, 1, false},
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 2, Index: 2, Commit: 3, Entries: []pb.Entry{{Index: 3, Term: 2}, {Index: 4, Term: 2}}}, 4, 3, false},
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 2, Index: 2, Commit: 4, Entries: []pb.Entry{{Index: 3, Term: 2}}}, 3, 3, false},
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 1, Index: 1, Commit: 4, Entries: []pb.Entry{{Index: 2, Term: 2}}}, 2, 2, false},
-
- // Ensure 3
- {pb.Message{Type: pb.MsgApp, Term: 1, LogTerm: 1, Index: 1, Commit: 3}, 2, 1, false}, // match entry 1, commit up to last new entry 1
- {pb.Message{Type: pb.MsgApp, Term: 1, LogTerm: 1, Index: 1, Commit: 3, Entries: []pb.Entry{{Index: 2, Term: 2}}}, 2, 2, false}, // match entry 1, commit up to last new entry 2
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 2, Index: 2, Commit: 3}, 2, 2, false}, // match entry 2, commit up to last new entry 2
- {pb.Message{Type: pb.MsgApp, Term: 2, LogTerm: 2, Index: 2, Commit: 4}, 2, 2, false}, // commit up to log.last()
- }
-
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1))
- storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}})
- sm := newTestRaft(1, 10, 1, storage)
- sm.becomeFollower(2, None)
-
- sm.handleAppendEntries(tt.m)
- if sm.raftLog.lastIndex() != tt.wIndex {
- t.Errorf("#%d: lastIndex = %d, want %d", i, sm.raftLog.lastIndex(), tt.wIndex)
- }
- if sm.raftLog.committed != tt.wCommit {
- t.Errorf("#%d: committed = %d, want %d", i, sm.raftLog.committed, tt.wCommit)
- }
- m := sm.readMessages()
- if len(m) != 1 {
- t.Fatalf("#%d: msg = nil, want 1", i)
- }
- if m[0].Reject != tt.wReject {
- t.Errorf("#%d: reject = %v, want %v", i, m[0].Reject, tt.wReject)
- }
- }
-}
-
-// TestHandleHeartbeat ensures that the follower commits to the commit in the message.
-func TestHandleHeartbeat(t *testing.T) {
- commit := uint64(2)
- tests := []struct {
- m pb.Message
- wCommit uint64
- }{
- {pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},
- {pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit
- }
-
- for i, tt := range tests {
- storage := newTestMemoryStorage(withPeers(1, 2))
- storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})
- sm := newTestRaft(1, 5, 1, storage)
- sm.becomeFollower(2, 2)
- sm.raftLog.commitTo(commit)
- sm.handleHeartbeat(tt.m)
- if sm.raftLog.committed != tt.wCommit {
- t.Errorf("#%d: committed = %d, want %d", i, sm.raftLog.committed, tt.wCommit)
- }
- m := sm.readMessages()
- if len(m) != 1 {
- t.Fatalf("#%d: msg = nil, want 1", i)
- }
- if m[0].Type != pb.MsgHeartbeatResp {
- t.Errorf("#%d: type = %v, want MsgHeartbeatResp", i, m[0].Type)
- }
- }
-}
-
-// TestHandleHeartbeatResp ensures that we re-send log entries when we get a heartbeat response.
-func TestHandleHeartbeatResp(t *testing.T) {
- storage := newTestMemoryStorage(withPeers(1, 2))
- storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})
- sm := newTestRaft(1, 5, 1, storage)
- sm.becomeCandidate()
- sm.becomeLeader()
- sm.raftLog.commitTo(sm.raftLog.lastIndex())
-
- // A heartbeat response from a node that is behind; re-send MsgApp
- sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})
- msgs := sm.readMessages()
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want 1", len(msgs))
- }
- if msgs[0].Type != pb.MsgApp {
- t.Errorf("type = %v, want MsgApp", msgs[0].Type)
- }
-
- // A second heartbeat response generates another MsgApp re-send
- sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})
- msgs = sm.readMessages()
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want 1", len(msgs))
- }
- if msgs[0].Type != pb.MsgApp {
- t.Errorf("type = %v, want MsgApp", msgs[0].Type)
- }
-
- // Once we have an MsgAppResp, heartbeats no longer send MsgApp.
- sm.Step(pb.Message{
- From: 2,
- Type: pb.MsgAppResp,
- Index: msgs[0].Index + uint64(len(msgs[0].Entries)),
- })
- // Consume the message sent in response to MsgAppResp
- sm.readMessages()
-
- sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})
- msgs = sm.readMessages()
- if len(msgs) != 0 {
- t.Fatalf("len(msgs) = %d, want 0: %+v", len(msgs), msgs)
- }
-}
-
-// TestRaftFreesReadOnlyMem ensures raft will free read request from
-// readOnly readIndexQueue and pendingReadIndex map.
-// related issue: https://github.com/etcd-io/etcd/issues/7571
-func TestRaftFreesReadOnlyMem(t *testing.T) {
- sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
- sm.becomeCandidate()
- sm.becomeLeader()
- sm.raftLog.commitTo(sm.raftLog.lastIndex())
-
- ctx := []byte("ctx")
-
- // leader starts linearizable read request.
- // more info: raft dissertation 6.4, step 2.
- sm.Step(pb.Message{From: 2, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: ctx}}})
- msgs := sm.readMessages()
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want 1", len(msgs))
- }
- if msgs[0].Type != pb.MsgHeartbeat {
- t.Fatalf("type = %v, want MsgHeartbeat", msgs[0].Type)
- }
- if !bytes.Equal(msgs[0].Context, ctx) {
- t.Fatalf("Context = %v, want %v", msgs[0].Context, ctx)
- }
- if len(sm.readOnly.readIndexQueue) != 1 {
- t.Fatalf("len(readIndexQueue) = %v, want 1", len(sm.readOnly.readIndexQueue))
- }
- if len(sm.readOnly.pendingReadIndex) != 1 {
- t.Fatalf("len(pendingReadIndex) = %v, want 1", len(sm.readOnly.pendingReadIndex))
- }
- if _, ok := sm.readOnly.pendingReadIndex[string(ctx)]; !ok {
- t.Fatalf("can't find context %v in pendingReadIndex ", ctx)
- }
-
- // heartbeat responses from majority of followers (1 in this case)
- // acknowledge the authority of the leader.
- // more info: raft dissertation 6.4, step 3.
- sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp, Context: ctx})
- if len(sm.readOnly.readIndexQueue) != 0 {
- t.Fatalf("len(readIndexQueue) = %v, want 0", len(sm.readOnly.readIndexQueue))
- }
- if len(sm.readOnly.pendingReadIndex) != 0 {
- t.Fatalf("len(pendingReadIndex) = %v, want 0", len(sm.readOnly.pendingReadIndex))
- }
- if _, ok := sm.readOnly.pendingReadIndex[string(ctx)]; ok {
- t.Fatalf("found context %v in pendingReadIndex, want none", ctx)
- }
-}
-
-// TestMsgAppRespWaitReset verifies the resume behavior of a leader
-// MsgAppResp.
-func TestMsgAppRespWaitReset(t *testing.T) {
- sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- sm.becomeCandidate()
- sm.becomeLeader()
-
- // The new leader has just emitted a new Term 4 entry; consume those messages
- // from the outgoing queue.
- sm.bcastAppend()
- sm.readMessages()
-
- // Node 2 acks the first entry, making it committed.
- sm.Step(pb.Message{
- From: 2,
- Type: pb.MsgAppResp,
- Index: 1,
- })
- if sm.raftLog.committed != 1 {
- t.Fatalf("expected committed to be 1, got %d", sm.raftLog.committed)
- }
- // Also consume the MsgApp messages that update Commit on the followers.
- sm.readMessages()
-
- // A new command is now proposed on node 1.
- sm.Step(pb.Message{
- From: 1,
- Type: pb.MsgProp,
- Entries: []pb.Entry{{}},
- })
-
- // The command is broadcast to all nodes not in the wait state.
- // Node 2 left the wait state due to its MsgAppResp, but node 3 is still waiting.
- msgs := sm.readMessages()
- if len(msgs) != 1 {
- t.Fatalf("expected 1 message, got %d: %+v", len(msgs), msgs)
- }
- if msgs[0].Type != pb.MsgApp || msgs[0].To != 2 {
- t.Errorf("expected MsgApp to node 2, got %v to %d", msgs[0].Type, msgs[0].To)
- }
- if len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {
- t.Errorf("expected to send entry 2, but got %v", msgs[0].Entries)
- }
-
- // Now Node 3 acks the first entry. This releases the wait and entry 2 is sent.
- sm.Step(pb.Message{
- From: 3,
- Type: pb.MsgAppResp,
- Index: 1,
- })
- msgs = sm.readMessages()
- if len(msgs) != 1 {
- t.Fatalf("expected 1 message, got %d: %+v", len(msgs), msgs)
- }
- if msgs[0].Type != pb.MsgApp || msgs[0].To != 3 {
- t.Errorf("expected MsgApp to node 3, got %v to %d", msgs[0].Type, msgs[0].To)
- }
- if len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {
- t.Errorf("expected to send entry 2, but got %v", msgs[0].Entries)
- }
-}
-
-func TestRecvMsgVote(t *testing.T) {
- testRecvMsgVote(t, pb.MsgVote)
-}
-
-func TestRecvMsgPreVote(t *testing.T) {
- testRecvMsgVote(t, pb.MsgPreVote)
-}
-
-func testRecvMsgVote(t *testing.T, msgType pb.MessageType) {
- tests := []struct {
- state StateType
- index, logTerm uint64
- voteFor uint64
- wreject bool
- }{
- {StateFollower, 0, 0, None, true},
- {StateFollower, 0, 1, None, true},
- {StateFollower, 0, 2, None, true},
- {StateFollower, 0, 3, None, false},
-
- {StateFollower, 1, 0, None, true},
- {StateFollower, 1, 1, None, true},
- {StateFollower, 1, 2, None, true},
- {StateFollower, 1, 3, None, false},
-
- {StateFollower, 2, 0, None, true},
- {StateFollower, 2, 1, None, true},
- {StateFollower, 2, 2, None, false},
- {StateFollower, 2, 3, None, false},
-
- {StateFollower, 3, 0, None, true},
- {StateFollower, 3, 1, None, true},
- {StateFollower, 3, 2, None, false},
- {StateFollower, 3, 3, None, false},
-
- {StateFollower, 3, 2, 2, false},
- {StateFollower, 3, 2, 1, true},
-
- {StateLeader, 3, 3, 1, true},
- {StatePreCandidate, 3, 3, 1, true},
- {StateCandidate, 3, 3, 1, true},
- }
-
- max := func(a, b uint64) uint64 {
- if a > b {
- return a
- }
- return b
- }
-
- for i, tt := range tests {
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- sm.state = tt.state
- switch tt.state {
- case StateFollower:
- sm.step = stepFollower
- case StateCandidate, StatePreCandidate:
- sm.step = stepCandidate
- case StateLeader:
- sm.step = stepLeader
- }
- sm.Vote = tt.voteFor
- sm.raftLog = &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 2}, {Index: 2, Term: 2}}},
- unstable: unstable{offset: 3},
- }
-
- // raft.Term is greater than or equal to raft.raftLog.lastTerm. In this
- // test we're only testing MsgVote responses when the campaigning node
- // has a different raft log compared to the recipient node.
- // Additionally we're verifying behaviour when the recipient node has
- // already given out its vote for its current term. We're not testing
- // what the recipient node does when receiving a message with a
- // different term number, so we simply initialize both term numbers to
- // be the same.
- term := max(sm.raftLog.lastTerm(), tt.logTerm)
- sm.Term = term
- sm.Step(pb.Message{Type: msgType, Term: term, From: 2, Index: tt.index, LogTerm: tt.logTerm})
-
- msgs := sm.readMessages()
- if g := len(msgs); g != 1 {
- t.Fatalf("#%d: len(msgs) = %d, want 1", i, g)
- continue
- }
- if g := msgs[0].Type; g != voteRespMsgType(msgType) {
- t.Errorf("#%d, m.Type = %v, want %v", i, g, voteRespMsgType(msgType))
- }
- if g := msgs[0].Reject; g != tt.wreject {
- t.Errorf("#%d, m.Reject = %v, want %v", i, g, tt.wreject)
- }
- }
-}
-
-func TestStateTransition(t *testing.T) {
- tests := []struct {
- from StateType
- to StateType
- wallow bool
- wterm uint64
- wlead uint64
- }{
- {StateFollower, StateFollower, true, 1, None},
- {StateFollower, StatePreCandidate, true, 0, None},
- {StateFollower, StateCandidate, true, 1, None},
- {StateFollower, StateLeader, false, 0, None},
-
- {StatePreCandidate, StateFollower, true, 0, None},
- {StatePreCandidate, StatePreCandidate, true, 0, None},
- {StatePreCandidate, StateCandidate, true, 1, None},
- {StatePreCandidate, StateLeader, true, 0, 1},
-
- {StateCandidate, StateFollower, true, 0, None},
- {StateCandidate, StatePreCandidate, true, 0, None},
- {StateCandidate, StateCandidate, true, 1, None},
- {StateCandidate, StateLeader, true, 0, 1},
-
- {StateLeader, StateFollower, true, 1, None},
- {StateLeader, StatePreCandidate, false, 0, None},
- {StateLeader, StateCandidate, false, 1, None},
- {StateLeader, StateLeader, true, 0, 1},
- }
-
- for i, tt := range tests {
- func() {
- defer func() {
- if r := recover(); r != nil {
- if tt.wallow {
- t.Errorf("%d: allow = %v, want %v", i, false, true)
- }
- }
- }()
-
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- sm.state = tt.from
-
- switch tt.to {
- case StateFollower:
- sm.becomeFollower(tt.wterm, tt.wlead)
- case StatePreCandidate:
- sm.becomePreCandidate()
- case StateCandidate:
- sm.becomeCandidate()
- case StateLeader:
- sm.becomeLeader()
- }
-
- if sm.Term != tt.wterm {
- t.Errorf("%d: term = %d, want %d", i, sm.Term, tt.wterm)
- }
- if sm.lead != tt.wlead {
- t.Errorf("%d: lead = %d, want %d", i, sm.lead, tt.wlead)
- }
- }()
- }
-}
-
-func TestAllServerStepdown(t *testing.T) {
- tests := []struct {
- state StateType
-
- wstate StateType
- wterm uint64
- windex uint64
- }{
- {StateFollower, StateFollower, 3, 0},
- {StatePreCandidate, StateFollower, 3, 0},
- {StateCandidate, StateFollower, 3, 0},
- {StateLeader, StateFollower, 3, 1},
- }
-
- tmsgTypes := [...]pb.MessageType{pb.MsgVote, pb.MsgApp}
- tterm := uint64(3)
-
- for i, tt := range tests {
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- switch tt.state {
- case StateFollower:
- sm.becomeFollower(1, None)
- case StatePreCandidate:
- sm.becomePreCandidate()
- case StateCandidate:
- sm.becomeCandidate()
- case StateLeader:
- sm.becomeCandidate()
- sm.becomeLeader()
- }
-
- for j, msgType := range tmsgTypes {
- sm.Step(pb.Message{From: 2, Type: msgType, Term: tterm, LogTerm: tterm})
-
- if sm.state != tt.wstate {
- t.Errorf("#%d.%d state = %v , want %v", i, j, sm.state, tt.wstate)
- }
- if sm.Term != tt.wterm {
- t.Errorf("#%d.%d term = %v , want %v", i, j, sm.Term, tt.wterm)
- }
- if sm.raftLog.lastIndex() != tt.windex {
- t.Errorf("#%d.%d index = %v , want %v", i, j, sm.raftLog.lastIndex(), tt.windex)
- }
- if uint64(len(sm.raftLog.allEntries())) != tt.windex {
- t.Errorf("#%d.%d len(ents) = %v , want %v", i, j, len(sm.raftLog.allEntries()), tt.windex)
- }
- wlead := uint64(2)
- if msgType == pb.MsgVote {
- wlead = None
- }
- if sm.lead != wlead {
- t.Errorf("#%d, sm.lead = %d, want %d", i, sm.lead, None)
- }
- }
- }
-}
-
-func TestCandidateResetTermMsgHeartbeat(t *testing.T) {
- testCandidateResetTerm(t, pb.MsgHeartbeat)
-}
-
-func TestCandidateResetTermMsgApp(t *testing.T) {
- testCandidateResetTerm(t, pb.MsgApp)
-}
-
-// testCandidateResetTerm tests when a candidate receives a
-// MsgHeartbeat or MsgApp from leader, "Step" resets the term
-// with leader's and reverts back to follower.
-func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- nt := newNetwork(a, b, c)
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- if a.state != StateLeader {
- t.Errorf("state = %s, want %s", a.state, StateLeader)
- }
- if b.state != StateFollower {
- t.Errorf("state = %s, want %s", b.state, StateFollower)
- }
- if c.state != StateFollower {
- t.Errorf("state = %s, want %s", c.state, StateFollower)
- }
-
- // isolate 3 and increase term in rest
- nt.isolate(3)
-
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- if a.state != StateLeader {
- t.Errorf("state = %s, want %s", a.state, StateLeader)
- }
- if b.state != StateFollower {
- t.Errorf("state = %s, want %s", b.state, StateFollower)
- }
-
- // trigger campaign in isolated c
- c.resetRandomizedElectionTimeout()
- for i := 0; i < c.randomizedElectionTimeout; i++ {
- c.tick()
- }
-
- if c.state != StateCandidate {
- t.Errorf("state = %s, want %s", c.state, StateCandidate)
- }
-
- nt.recover()
-
- // leader sends to isolated candidate
- // and expects candidate to revert to follower
- nt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})
-
- if c.state != StateFollower {
- t.Errorf("state = %s, want %s", c.state, StateFollower)
- }
-
- // follower c term is reset with leader's
- if a.Term != c.Term {
- t.Errorf("follower term expected same term as leader's %d, got %d", a.Term, c.Term)
- }
-}
-
-func TestLeaderStepdownWhenQuorumActive(t *testing.T) {
- sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- sm.checkQuorum = true
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- for i := 0; i < sm.electionTimeout+1; i++ {
- sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp, Term: sm.Term})
- sm.tick()
- }
-
- if sm.state != StateLeader {
- t.Errorf("state = %v, want %v", sm.state, StateLeader)
- }
-}
-
-func TestLeaderStepdownWhenQuorumLost(t *testing.T) {
- sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- sm.checkQuorum = true
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- for i := 0; i < sm.electionTimeout+1; i++ {
- sm.tick()
- }
-
- if sm.state != StateFollower {
- t.Errorf("state = %v, want %v", sm.state, StateFollower)
- }
-}
-
-func TestLeaderSupersedingWithCheckQuorum(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- a.checkQuorum = true
- b.checkQuorum = true
- c.checkQuorum = true
-
- nt := newNetwork(a, b, c)
- setRandomizedElectionTimeout(b, b.electionTimeout+1)
-
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- if a.state != StateLeader {
- t.Errorf("state = %s, want %s", a.state, StateLeader)
- }
-
- if c.state != StateFollower {
- t.Errorf("state = %s, want %s", c.state, StateFollower)
- }
-
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- // Peer b rejected c's vote since its electionElapsed had not reached to electionTimeout
- if c.state != StateCandidate {
- t.Errorf("state = %s, want %s", c.state, StateCandidate)
- }
-
- // Letting b's electionElapsed reach to electionTimeout
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- if c.state != StateLeader {
- t.Errorf("state = %s, want %s", c.state, StateLeader)
- }
-}
-
-func TestLeaderElectionWithCheckQuorum(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- a.checkQuorum = true
- b.checkQuorum = true
- c.checkQuorum = true
-
- nt := newNetwork(a, b, c)
- setRandomizedElectionTimeout(a, a.electionTimeout+1)
- setRandomizedElectionTimeout(b, b.electionTimeout+2)
-
- // Immediately after creation, votes are cast regardless of the
- // election timeout.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- if a.state != StateLeader {
- t.Errorf("state = %s, want %s", a.state, StateLeader)
- }
-
- if c.state != StateFollower {
- t.Errorf("state = %s, want %s", c.state, StateFollower)
- }
-
- // need to reset randomizedElectionTimeout larger than electionTimeout again,
- // because the value might be reset to electionTimeout since the last state changes
- setRandomizedElectionTimeout(a, a.electionTimeout+1)
- setRandomizedElectionTimeout(b, b.electionTimeout+2)
- for i := 0; i < a.electionTimeout; i++ {
- a.tick()
- }
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- if a.state != StateFollower {
- t.Errorf("state = %s, want %s", a.state, StateFollower)
- }
-
- if c.state != StateLeader {
- t.Errorf("state = %s, want %s", c.state, StateLeader)
- }
-}
-
-// TestFreeStuckCandidateWithCheckQuorum ensures that a candidate with a higher term
-// can disrupt the leader even if the leader still "officially" holds the lease, The
-// leader is expected to step down and adopt the candidate's term
-func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- a.checkQuorum = true
- b.checkQuorum = true
- c.checkQuorum = true
-
- nt := newNetwork(a, b, c)
- setRandomizedElectionTimeout(b, b.electionTimeout+1)
-
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(1)
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- if b.state != StateFollower {
- t.Errorf("state = %s, want %s", b.state, StateFollower)
- }
-
- if c.state != StateCandidate {
- t.Errorf("state = %s, want %s", c.state, StateCandidate)
- }
-
- if c.Term != b.Term+1 {
- t.Errorf("term = %d, want %d", c.Term, b.Term+1)
- }
-
- // Vote again for safety
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- if b.state != StateFollower {
- t.Errorf("state = %s, want %s", b.state, StateFollower)
- }
-
- if c.state != StateCandidate {
- t.Errorf("state = %s, want %s", c.state, StateCandidate)
- }
-
- if c.Term != b.Term+2 {
- t.Errorf("term = %d, want %d", c.Term, b.Term+2)
- }
-
- nt.recover()
- nt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})
-
- // Disrupt the leader so that the stuck peer is freed
- if a.state != StateFollower {
- t.Errorf("state = %s, want %s", a.state, StateFollower)
- }
-
- if c.Term != a.Term {
- t.Errorf("term = %d, want %d", c.Term, a.Term)
- }
-
- // Vote again, should become leader this time
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- if c.state != StateLeader {
- t.Errorf("peer 3 state: %s, want %s", c.state, StateLeader)
- }
-}
-
-func TestNonPromotableVoterWithCheckQuorum(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1)))
-
- a.checkQuorum = true
- b.checkQuorum = true
-
- nt := newNetwork(a, b)
- setRandomizedElectionTimeout(b, b.electionTimeout+1)
- // Need to remove 2 again to make it a non-promotable node since newNetwork overwritten some internal states
- b.applyConfChange(pb.ConfChange{Type: pb.ConfChangeRemoveNode, NodeID: 2}.AsV2())
-
- if b.promotable() {
- t.Fatalf("promotable = %v, want false", b.promotable())
- }
-
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- if a.state != StateLeader {
- t.Errorf("state = %s, want %s", a.state, StateLeader)
- }
-
- if b.state != StateFollower {
- t.Errorf("state = %s, want %s", b.state, StateFollower)
- }
-
- if b.lead != 1 {
- t.Errorf("lead = %d, want 1", b.lead)
- }
-}
-
-// TestDisruptiveFollower tests isolated follower,
-// with slow network incoming from leader, election times out
-// to become a candidate with an increased term. Then, the
-// candiate's response to late leader heartbeat forces the leader
-// to step down.
-func TestDisruptiveFollower(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- n1.checkQuorum = true
- n2.checkQuorum = true
- n3.checkQuorum = true
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
- n3.becomeFollower(1, None)
-
- nt := newNetwork(n1, n2, n3)
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // check state
- // n1.state == StateLeader
- // n2.state == StateFollower
- // n3.state == StateFollower
- if n1.state != StateLeader {
- t.Fatalf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Fatalf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StateFollower {
- t.Fatalf("node 3 state: %s, want %s", n3.state, StateFollower)
- }
-
- // etcd server "advanceTicksForElection" on restart;
- // this is to expedite campaign trigger when given larger
- // election timeouts (e.g. multi-datacenter deploy)
- // Or leader messages are being delayed while ticks elapse
- setRandomizedElectionTimeout(n3, n3.electionTimeout+2)
- for i := 0; i < n3.randomizedElectionTimeout-1; i++ {
- n3.tick()
- }
-
- // ideally, before last election tick elapses,
- // the follower n3 receives "pb.MsgApp" or "pb.MsgHeartbeat"
- // from leader n1, and then resets its "electionElapsed"
- // however, last tick may elapse before receiving any
- // messages from leader, thus triggering campaign
- n3.tick()
-
- // n1 is still leader yet
- // while its heartbeat to candidate n3 is being delayed
-
- // check state
- // n1.state == StateLeader
- // n2.state == StateFollower
- // n3.state == StateCandidate
- if n1.state != StateLeader {
- t.Fatalf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Fatalf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StateCandidate {
- t.Fatalf("node 3 state: %s, want %s", n3.state, StateCandidate)
- }
- // check term
- // n1.Term == 2
- // n2.Term == 2
- // n3.Term == 3
- if n1.Term != 2 {
- t.Fatalf("node 1 term: %d, want %d", n1.Term, 2)
- }
- if n2.Term != 2 {
- t.Fatalf("node 2 term: %d, want %d", n2.Term, 2)
- }
- if n3.Term != 3 {
- t.Fatalf("node 3 term: %d, want %d", n3.Term, 3)
- }
-
- // while outgoing vote requests are still queued in n3,
- // leader heartbeat finally arrives at candidate n3
- // however, due to delayed network from leader, leader
- // heartbeat was sent with lower term than candidate's
- nt.send(pb.Message{From: 1, To: 3, Term: n1.Term, Type: pb.MsgHeartbeat})
-
- // then candidate n3 responds with "pb.MsgAppResp" of higher term
- // and leader steps down from a message with higher term
- // this is to disrupt the current leader, so that candidate
- // with higher term can be freed with following election
-
- // check state
- // n1.state == StateFollower
- // n2.state == StateFollower
- // n3.state == StateCandidate
- if n1.state != StateFollower {
- t.Fatalf("node 1 state: %s, want %s", n1.state, StateFollower)
- }
- if n2.state != StateFollower {
- t.Fatalf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StateCandidate {
- t.Fatalf("node 3 state: %s, want %s", n3.state, StateCandidate)
- }
- // check term
- // n1.Term == 3
- // n2.Term == 2
- // n3.Term == 3
- if n1.Term != 3 {
- t.Fatalf("node 1 term: %d, want %d", n1.Term, 3)
- }
- if n2.Term != 2 {
- t.Fatalf("node 2 term: %d, want %d", n2.Term, 2)
- }
- if n3.Term != 3 {
- t.Fatalf("node 3 term: %d, want %d", n3.Term, 3)
- }
-}
-
-// TestDisruptiveFollowerPreVote tests isolated follower,
-// with slow network incoming from leader, election times out
-// to become a pre-candidate with less log than current leader.
-// Then pre-vote phase prevents this isolated node from forcing
-// current leader to step down, thus less disruptions.
-func TestDisruptiveFollowerPreVote(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- n1.checkQuorum = true
- n2.checkQuorum = true
- n3.checkQuorum = true
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
- n3.becomeFollower(1, None)
-
- nt := newNetwork(n1, n2, n3)
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // check state
- // n1.state == StateLeader
- // n2.state == StateFollower
- // n3.state == StateFollower
- if n1.state != StateLeader {
- t.Fatalf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Fatalf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StateFollower {
- t.Fatalf("node 3 state: %s, want %s", n3.state, StateFollower)
- }
-
- nt.isolate(3)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
- n1.preVote = true
- n2.preVote = true
- n3.preVote = true
- nt.recover()
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- // check state
- // n1.state == StateLeader
- // n2.state == StateFollower
- // n3.state == StatePreCandidate
- if n1.state != StateLeader {
- t.Fatalf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Fatalf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StatePreCandidate {
- t.Fatalf("node 3 state: %s, want %s", n3.state, StatePreCandidate)
- }
- // check term
- // n1.Term == 2
- // n2.Term == 2
- // n3.Term == 2
- if n1.Term != 2 {
- t.Fatalf("node 1 term: %d, want %d", n1.Term, 2)
- }
- if n2.Term != 2 {
- t.Fatalf("node 2 term: %d, want %d", n2.Term, 2)
- }
- if n3.Term != 2 {
- t.Fatalf("node 2 term: %d, want %d", n3.Term, 2)
- }
-
- // delayed leader heartbeat does not force current leader to step down
- nt.send(pb.Message{From: 1, To: 3, Term: n1.Term, Type: pb.MsgHeartbeat})
- if n1.state != StateLeader {
- t.Fatalf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
-}
-
-func TestReadOnlyOptionSafe(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- nt := newNetwork(a, b, c)
- setRandomizedElectionTimeout(b, b.electionTimeout+1)
-
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- if a.state != StateLeader {
- t.Fatalf("state = %s, want %s", a.state, StateLeader)
- }
-
- tests := []struct {
- sm *raft
- proposals int
- wri uint64
- wctx []byte
- }{
- {a, 10, 11, []byte("ctx1")},
- {b, 10, 21, []byte("ctx2")},
- {c, 10, 31, []byte("ctx3")},
- {a, 10, 41, []byte("ctx4")},
- {b, 10, 51, []byte("ctx5")},
- {c, 10, 61, []byte("ctx6")},
- }
-
- for i, tt := range tests {
- for j := 0; j < tt.proposals; j++ {
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- }
-
- nt.send(pb.Message{From: tt.sm.id, To: tt.sm.id, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: tt.wctx}}})
-
- r := tt.sm
- if len(r.readStates) == 0 {
- t.Errorf("#%d: len(readStates) = 0, want non-zero", i)
- }
- rs := r.readStates[0]
- if rs.Index != tt.wri {
- t.Errorf("#%d: readIndex = %d, want %d", i, rs.Index, tt.wri)
- }
-
- if !bytes.Equal(rs.RequestCtx, tt.wctx) {
- t.Errorf("#%d: requestCtx = %v, want %v", i, rs.RequestCtx, tt.wctx)
- }
- r.readStates = nil
- }
-}
-
-func TestReadOnlyWithLearner(t *testing.T) {
- a := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
- b := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
-
- nt := newNetwork(a, b)
- setRandomizedElectionTimeout(b, b.electionTimeout+1)
-
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- if a.state != StateLeader {
- t.Fatalf("state = %s, want %s", a.state, StateLeader)
- }
-
- tests := []struct {
- sm *raft
- proposals int
- wri uint64
- wctx []byte
- }{
- {a, 10, 11, []byte("ctx1")},
- {b, 10, 21, []byte("ctx2")},
- {a, 10, 31, []byte("ctx3")},
- {b, 10, 41, []byte("ctx4")},
- }
-
- for i, tt := range tests {
- for j := 0; j < tt.proposals; j++ {
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- }
-
- nt.send(pb.Message{From: tt.sm.id, To: tt.sm.id, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: tt.wctx}}})
-
- r := tt.sm
- if len(r.readStates) == 0 {
- t.Fatalf("#%d: len(readStates) = 0, want non-zero", i)
- }
- rs := r.readStates[0]
- if rs.Index != tt.wri {
- t.Errorf("#%d: readIndex = %d, want %d", i, rs.Index, tt.wri)
- }
-
- if !bytes.Equal(rs.RequestCtx, tt.wctx) {
- t.Errorf("#%d: requestCtx = %v, want %v", i, rs.RequestCtx, tt.wctx)
- }
- r.readStates = nil
- }
-}
-
-func TestReadOnlyOptionLease(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- a.readOnly.option = ReadOnlyLeaseBased
- b.readOnly.option = ReadOnlyLeaseBased
- c.readOnly.option = ReadOnlyLeaseBased
- a.checkQuorum = true
- b.checkQuorum = true
- c.checkQuorum = true
-
- nt := newNetwork(a, b, c)
- setRandomizedElectionTimeout(b, b.electionTimeout+1)
-
- for i := 0; i < b.electionTimeout; i++ {
- b.tick()
- }
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- if a.state != StateLeader {
- t.Fatalf("state = %s, want %s", a.state, StateLeader)
- }
-
- tests := []struct {
- sm *raft
- proposals int
- wri uint64
- wctx []byte
- }{
- {a, 10, 11, []byte("ctx1")},
- {b, 10, 21, []byte("ctx2")},
- {c, 10, 31, []byte("ctx3")},
- {a, 10, 41, []byte("ctx4")},
- {b, 10, 51, []byte("ctx5")},
- {c, 10, 61, []byte("ctx6")},
- }
-
- for i, tt := range tests {
- for j := 0; j < tt.proposals; j++ {
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- }
-
- nt.send(pb.Message{From: tt.sm.id, To: tt.sm.id, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: tt.wctx}}})
-
- r := tt.sm
- rs := r.readStates[0]
- if rs.Index != tt.wri {
- t.Errorf("#%d: readIndex = %d, want %d", i, rs.Index, tt.wri)
- }
-
- if !bytes.Equal(rs.RequestCtx, tt.wctx) {
- t.Errorf("#%d: requestCtx = %v, want %v", i, rs.RequestCtx, tt.wctx)
- }
- r.readStates = nil
- }
-}
-
-// TestReadOnlyForNewLeader ensures that a leader only accepts MsgReadIndex message
-// when it commits at least one log entry at it term.
-func TestReadOnlyForNewLeader(t *testing.T) {
- nodeConfigs := []struct {
- id uint64
- committed uint64
- applied uint64
- compactIndex uint64
- }{
- {1, 1, 1, 0},
- {2, 2, 2, 2},
- {3, 2, 2, 2},
- }
- peers := make([]stateMachine, 0)
- for _, c := range nodeConfigs {
- storage := newTestMemoryStorage(withPeers(1, 2, 3))
- storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}})
- storage.SetHardState(pb.HardState{Term: 1, Commit: c.committed})
- if c.compactIndex != 0 {
- storage.Compact(c.compactIndex)
- }
- cfg := newTestConfig(c.id, 10, 1, storage)
- cfg.Applied = c.applied
- raft := newRaft(cfg)
- peers = append(peers, raft)
- }
- nt := newNetwork(peers...)
-
- // Drop MsgApp to forbid peer a to commit any log entry at its term after it becomes leader.
- nt.ignore(pb.MsgApp)
- // Force peer a to become leader.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- sm := nt.peers[1].(*raft)
- if sm.state != StateLeader {
- t.Fatalf("state = %s, want %s", sm.state, StateLeader)
- }
-
- // Ensure peer a drops read only request.
- var windex uint64 = 4
- wctx := []byte("ctx")
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})
- if len(sm.readStates) != 0 {
- t.Fatalf("len(readStates) = %d, want zero", len(sm.readStates))
- }
-
- nt.recover()
-
- // Force peer a to commit a log entry at its term
- for i := 0; i < sm.heartbeatTimeout; i++ {
- sm.tick()
- }
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- if sm.raftLog.committed != 4 {
- t.Fatalf("committed = %d, want 4", sm.raftLog.committed)
- }
- lastLogTerm := sm.raftLog.zeroTermOnErrCompacted(sm.raftLog.term(sm.raftLog.committed))
- if lastLogTerm != sm.Term {
- t.Fatalf("last log term = %d, want %d", lastLogTerm, sm.Term)
- }
-
- // Ensure peer a processed postponed read only request after it committed an entry at its term.
- if len(sm.readStates) != 1 {
- t.Fatalf("len(readStates) = %d, want 1", len(sm.readStates))
- }
- rs := sm.readStates[0]
- if rs.Index != windex {
- t.Fatalf("readIndex = %d, want %d", rs.Index, windex)
- }
- if !bytes.Equal(rs.RequestCtx, wctx) {
- t.Fatalf("requestCtx = %v, want %v", rs.RequestCtx, wctx)
- }
-
- // Ensure peer a accepts read only request after it committed an entry at its term.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})
- if len(sm.readStates) != 2 {
- t.Fatalf("len(readStates) = %d, want 2", len(sm.readStates))
- }
- rs = sm.readStates[1]
- if rs.Index != windex {
- t.Fatalf("readIndex = %d, want %d", rs.Index, windex)
- }
- if !bytes.Equal(rs.RequestCtx, wctx) {
- t.Fatalf("requestCtx = %v, want %v", rs.RequestCtx, wctx)
- }
-}
-
-func TestLeaderAppResp(t *testing.T) {
- // initial progress: match = 0; next = 3
- tests := []struct {
- index uint64
- reject bool
- // progress
- wmatch uint64
- wnext uint64
- // message
- wmsgNum int
- windex uint64
- wcommitted uint64
- }{
- {3, true, 0, 3, 0, 0, 0}, // stale resp; no replies
- {2, true, 0, 2, 1, 1, 0}, // denied resp; leader does not commit; decrease next and send probing msg
- {2, false, 2, 4, 2, 2, 2}, // accept resp; leader commits; broadcast with commit index
- {0, false, 0, 3, 0, 0, 0}, // ignore heartbeat replies
- }
-
- for i, tt := range tests {
- // sm term is 1 after it becomes the leader.
- // thus the last log term must be 1 to be committed.
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- sm.raftLog = &raftLog{
- storage: &MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}}},
- unstable: unstable{offset: 3},
- }
- sm.becomeCandidate()
- sm.becomeLeader()
- sm.readMessages()
- sm.Step(pb.Message{From: 2, Type: pb.MsgAppResp, Index: tt.index, Term: sm.Term, Reject: tt.reject, RejectHint: tt.index})
-
- p := sm.prs.Progress[2]
- if p.Match != tt.wmatch {
- t.Errorf("#%d match = %d, want %d", i, p.Match, tt.wmatch)
- }
- if p.Next != tt.wnext {
- t.Errorf("#%d next = %d, want %d", i, p.Next, tt.wnext)
- }
-
- msgs := sm.readMessages()
-
- if len(msgs) != tt.wmsgNum {
- t.Errorf("#%d msgNum = %d, want %d", i, len(msgs), tt.wmsgNum)
- }
- for j, msg := range msgs {
- if msg.Index != tt.windex {
- t.Errorf("#%d.%d index = %d, want %d", i, j, msg.Index, tt.windex)
- }
- if msg.Commit != tt.wcommitted {
- t.Errorf("#%d.%d commit = %d, want %d", i, j, msg.Commit, tt.wcommitted)
- }
- }
- }
-}
-
-// When the leader receives a heartbeat tick, it should
-// send a MsgHeartbeat with m.Index = 0, m.LogTerm=0 and empty entries.
-func TestBcastBeat(t *testing.T) {
- offset := uint64(1000)
- // make a state machine with log.offset = 1000
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: offset,
- Term: 1,
- ConfState: pb.ConfState{Voters: []uint64{1, 2, 3}},
- },
- }
- storage := NewMemoryStorage()
- storage.ApplySnapshot(s)
- sm := newTestRaft(1, 10, 1, storage)
- sm.Term = 1
-
- sm.becomeCandidate()
- sm.becomeLeader()
- for i := 0; i < 10; i++ {
- mustAppendEntry(sm, pb.Entry{Index: uint64(i) + 1})
- }
- // slow follower
- sm.prs.Progress[2].Match, sm.prs.Progress[2].Next = 5, 6
- // normal follower
- sm.prs.Progress[3].Match, sm.prs.Progress[3].Next = sm.raftLog.lastIndex(), sm.raftLog.lastIndex()+1
-
- sm.Step(pb.Message{Type: pb.MsgBeat})
- msgs := sm.readMessages()
- if len(msgs) != 2 {
- t.Fatalf("len(msgs) = %v, want 2", len(msgs))
- }
- wantCommitMap := map[uint64]uint64{
- 2: min(sm.raftLog.committed, sm.prs.Progress[2].Match),
- 3: min(sm.raftLog.committed, sm.prs.Progress[3].Match),
- }
- for i, m := range msgs {
- if m.Type != pb.MsgHeartbeat {
- t.Fatalf("#%d: type = %v, want = %v", i, m.Type, pb.MsgHeartbeat)
- }
- if m.Index != 0 {
- t.Fatalf("#%d: prevIndex = %d, want %d", i, m.Index, 0)
- }
- if m.LogTerm != 0 {
- t.Fatalf("#%d: prevTerm = %d, want %d", i, m.LogTerm, 0)
- }
- if wantCommitMap[m.To] == 0 {
- t.Fatalf("#%d: unexpected to %d", i, m.To)
- } else {
- if m.Commit != wantCommitMap[m.To] {
- t.Fatalf("#%d: commit = %d, want %d", i, m.Commit, wantCommitMap[m.To])
- }
- delete(wantCommitMap, m.To)
- }
- if len(m.Entries) != 0 {
- t.Fatalf("#%d: len(entries) = %d, want 0", i, len(m.Entries))
- }
- }
-}
-
-// tests the output of the state machine when receiving MsgBeat
-func TestRecvMsgBeat(t *testing.T) {
- tests := []struct {
- state StateType
- wMsg int
- }{
- {StateLeader, 2},
- // candidate and follower should ignore MsgBeat
- {StateCandidate, 0},
- {StateFollower, 0},
- }
-
- for i, tt := range tests {
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- sm.raftLog = &raftLog{storage: &MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}}}}
- sm.Term = 1
- sm.state = tt.state
- switch tt.state {
- case StateFollower:
- sm.step = stepFollower
- case StateCandidate:
- sm.step = stepCandidate
- case StateLeader:
- sm.step = stepLeader
- }
- sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
-
- msgs := sm.readMessages()
- if len(msgs) != tt.wMsg {
- t.Errorf("%d: len(msgs) = %d, want %d", i, len(msgs), tt.wMsg)
- }
- for _, m := range msgs {
- if m.Type != pb.MsgHeartbeat {
- t.Errorf("%d: msg.type = %v, want %v", i, m.Type, pb.MsgHeartbeat)
- }
- }
- }
-}
-
-func TestLeaderIncreaseNext(t *testing.T) {
- previousEnts := []pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}}
- tests := []struct {
- // progress
- state tracker.StateType
- next uint64
-
- wnext uint64
- }{
- // state replicate, optimistically increase next
- // previous entries + noop entry + propose + 1
- {tracker.StateReplicate, 2, uint64(len(previousEnts) + 1 + 1 + 1)},
- // state probe, not optimistically increase next
- {tracker.StateProbe, 2, 2},
- }
-
- for i, tt := range tests {
- sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- sm.raftLog.append(previousEnts...)
- sm.becomeCandidate()
- sm.becomeLeader()
- sm.prs.Progress[2].State = tt.state
- sm.prs.Progress[2].Next = tt.next
- sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
-
- p := sm.prs.Progress[2]
- if p.Next != tt.wnext {
- t.Errorf("#%d next = %d, want %d", i, p.Next, tt.wnext)
- }
- }
-}
-
-func TestSendAppendForProgressProbe(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
- r.readMessages()
- r.prs.Progress[2].BecomeProbe()
-
- // each round is a heartbeat
- for i := 0; i < 3; i++ {
- if i == 0 {
- // we expect that raft will only send out one msgAPP on the first
- // loop. After that, the follower is paused until a heartbeat response is
- // received.
- mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
- r.sendAppend(2)
- msg := r.readMessages()
- if len(msg) != 1 {
- t.Errorf("len(msg) = %d, want %d", len(msg), 1)
- }
- if msg[0].Index != 0 {
- t.Errorf("index = %d, want %d", msg[0].Index, 0)
- }
- }
-
- if !r.prs.Progress[2].ProbeSent {
- t.Errorf("paused = %v, want true", r.prs.Progress[2].ProbeSent)
- }
- for j := 0; j < 10; j++ {
- mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
- r.sendAppend(2)
- if l := len(r.readMessages()); l != 0 {
- t.Errorf("len(msg) = %d, want %d", l, 0)
- }
- }
-
- // do a heartbeat
- for j := 0; j < r.heartbeatTimeout; j++ {
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
- }
- if !r.prs.Progress[2].ProbeSent {
- t.Errorf("paused = %v, want true", r.prs.Progress[2].ProbeSent)
- }
-
- // consume the heartbeat
- msg := r.readMessages()
- if len(msg) != 1 {
- t.Errorf("len(msg) = %d, want %d", len(msg), 1)
- }
- if msg[0].Type != pb.MsgHeartbeat {
- t.Errorf("type = %v, want %v", msg[0].Type, pb.MsgHeartbeat)
- }
- }
-
- // a heartbeat response will allow another message to be sent
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})
- msg := r.readMessages()
- if len(msg) != 1 {
- t.Errorf("len(msg) = %d, want %d", len(msg), 1)
- }
- if msg[0].Index != 0 {
- t.Errorf("index = %d, want %d", msg[0].Index, 0)
- }
- if !r.prs.Progress[2].ProbeSent {
- t.Errorf("paused = %v, want true", r.prs.Progress[2].ProbeSent)
- }
-}
-
-func TestSendAppendForProgressReplicate(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
- r.readMessages()
- r.prs.Progress[2].BecomeReplicate()
-
- for i := 0; i < 10; i++ {
- mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
- r.sendAppend(2)
- msgs := r.readMessages()
- if len(msgs) != 1 {
- t.Errorf("len(msg) = %d, want %d", len(msgs), 1)
- }
- }
-}
-
-func TestSendAppendForProgressSnapshot(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
- r.readMessages()
- r.prs.Progress[2].BecomeSnapshot(10)
-
- for i := 0; i < 10; i++ {
- mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
- r.sendAppend(2)
- msgs := r.readMessages()
- if len(msgs) != 0 {
- t.Errorf("len(msg) = %d, want %d", len(msgs), 0)
- }
- }
-}
-
-func TestRecvMsgUnreachable(t *testing.T) {
- previousEnts := []pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}}
- s := newTestMemoryStorage(withPeers(1, 2))
- s.Append(previousEnts)
- r := newTestRaft(1, 10, 1, s)
- r.becomeCandidate()
- r.becomeLeader()
- r.readMessages()
- // set node 2 to state replicate
- r.prs.Progress[2].Match = 3
- r.prs.Progress[2].BecomeReplicate()
- r.prs.Progress[2].OptimisticUpdate(5)
-
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgUnreachable})
-
- if r.prs.Progress[2].State != tracker.StateProbe {
- t.Errorf("state = %s, want %s", r.prs.Progress[2].State, tracker.StateProbe)
- }
- if wnext := r.prs.Progress[2].Match + 1; r.prs.Progress[2].Next != wnext {
- t.Errorf("next = %d, want %d", r.prs.Progress[2].Next, wnext)
- }
-}
-
-func TestRestore(t *testing.T) {
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2, 3}},
- },
- }
-
- storage := newTestMemoryStorage(withPeers(1, 2))
- sm := newTestRaft(1, 10, 1, storage)
- if ok := sm.restore(s); !ok {
- t.Fatal("restore fail, want succeed")
- }
-
- if sm.raftLog.lastIndex() != s.Metadata.Index {
- t.Errorf("log.lastIndex = %d, want %d", sm.raftLog.lastIndex(), s.Metadata.Index)
- }
- if mustTerm(sm.raftLog.term(s.Metadata.Index)) != s.Metadata.Term {
- t.Errorf("log.lastTerm = %d, want %d", mustTerm(sm.raftLog.term(s.Metadata.Index)), s.Metadata.Term)
- }
- sg := sm.prs.VoterNodes()
- if !reflect.DeepEqual(sg, s.Metadata.ConfState.Voters) {
- t.Errorf("sm.Voters = %+v, want %+v", sg, s.Metadata.ConfState.Voters)
- }
-
- if ok := sm.restore(s); ok {
- t.Fatal("restore succeed, want fail")
- }
- // It should not campaign before actually applying data.
- for i := 0; i < sm.randomizedElectionTimeout; i++ {
- sm.tick()
- }
- if sm.state != StateFollower {
- t.Errorf("state = %d, want %d", sm.state, StateFollower)
- }
-}
-
-// TestRestoreWithLearner restores a snapshot which contains learners.
-func TestRestoreWithLearner(t *testing.T) {
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2}, Learners: []uint64{3}},
- },
- }
-
- storage := newTestMemoryStorage(withPeers(1, 2), withLearners(3))
- sm := newTestLearnerRaft(3, 8, 2, storage)
- if ok := sm.restore(s); !ok {
- t.Error("restore fail, want succeed")
- }
-
- if sm.raftLog.lastIndex() != s.Metadata.Index {
- t.Errorf("log.lastIndex = %d, want %d", sm.raftLog.lastIndex(), s.Metadata.Index)
- }
- if mustTerm(sm.raftLog.term(s.Metadata.Index)) != s.Metadata.Term {
- t.Errorf("log.lastTerm = %d, want %d", mustTerm(sm.raftLog.term(s.Metadata.Index)), s.Metadata.Term)
- }
- sg := sm.prs.VoterNodes()
- if len(sg) != len(s.Metadata.ConfState.Voters) {
- t.Errorf("sm.Voters = %+v, length not equal with %+v", sg, s.Metadata.ConfState.Voters)
- }
- lns := sm.prs.LearnerNodes()
- if len(lns) != len(s.Metadata.ConfState.Learners) {
- t.Errorf("sm.LearnerNodes = %+v, length not equal with %+v", sg, s.Metadata.ConfState.Learners)
- }
- for _, n := range s.Metadata.ConfState.Voters {
- if sm.prs.Progress[n].IsLearner {
- t.Errorf("sm.Node %x isLearner = %s, want %t", n, sm.prs.Progress[n], false)
- }
- }
- for _, n := range s.Metadata.ConfState.Learners {
- if !sm.prs.Progress[n].IsLearner {
- t.Errorf("sm.Node %x isLearner = %s, want %t", n, sm.prs.Progress[n], true)
- }
- }
-
- if ok := sm.restore(s); ok {
- t.Error("restore succeed, want fail")
- }
-}
-
-/// Tests if outgoing voter can receive and apply snapshot correctly.
-func TestRestoreWithVotersOutgoing(t *testing.T) {
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{2, 3, 4}, VotersOutgoing: []uint64{1, 2, 3}},
- },
- }
-
- storage := newTestMemoryStorage(withPeers(1, 2))
- sm := newTestRaft(1, 10, 1, storage)
- if ok := sm.restore(s); !ok {
- t.Fatal("restore fail, want succeed")
- }
-
- if sm.raftLog.lastIndex() != s.Metadata.Index {
- t.Errorf("log.lastIndex = %d, want %d", sm.raftLog.lastIndex(), s.Metadata.Index)
- }
- if mustTerm(sm.raftLog.term(s.Metadata.Index)) != s.Metadata.Term {
- t.Errorf("log.lastTerm = %d, want %d", mustTerm(sm.raftLog.term(s.Metadata.Index)), s.Metadata.Term)
- }
- sg := sm.prs.VoterNodes()
- if !reflect.DeepEqual(sg, []uint64{1, 2, 3, 4}) {
- t.Errorf("sm.Voters = %+v, want %+v", sg, s.Metadata.ConfState.Voters)
- }
-
- if ok := sm.restore(s); ok {
- t.Fatal("restore succeed, want fail")
- }
- // It should not campaign before actually applying data.
- for i := 0; i < sm.randomizedElectionTimeout; i++ {
- sm.tick()
- }
- if sm.state != StateFollower {
- t.Errorf("state = %d, want %d", sm.state, StateFollower)
- }
-}
-
-// TestRestoreVoterToLearner verifies that a normal peer can be downgraded to a
-// learner through a snapshot. At the time of writing, we don't allow
-// configuration changes to do this directly, but note that the snapshot may
-// compress multiple changes to the configuration into one: the voter could have
-// been removed, then readded as a learner and the snapshot reflects both
-// changes. In that case, a voter receives a snapshot telling it that it is now
-// a learner. In fact, the node has to accept that snapshot, or it is
-// permanently cut off from the Raft log.
-func TestRestoreVoterToLearner(t *testing.T) {
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2}, Learners: []uint64{3}},
- },
- }
-
- storage := newTestMemoryStorage(withPeers(1, 2, 3))
- sm := newTestRaft(3, 10, 1, storage)
-
- if sm.isLearner {
- t.Errorf("%x is learner, want not", sm.id)
- }
- if ok := sm.restore(s); !ok {
- t.Error("restore failed unexpectedly")
- }
-}
-
-// TestRestoreLearnerPromotion checks that a learner can become to a follower after
-// restoring snapshot.
-func TestRestoreLearnerPromotion(t *testing.T) {
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2, 3}},
- },
- }
-
- storage := newTestMemoryStorage(withPeers(1, 2), withLearners(3))
- sm := newTestLearnerRaft(3, 10, 1, storage)
-
- if !sm.isLearner {
- t.Errorf("%x is not learner, want yes", sm.id)
- }
-
- if ok := sm.restore(s); !ok {
- t.Error("restore fail, want succeed")
- }
-
- if sm.isLearner {
- t.Errorf("%x is learner, want not", sm.id)
- }
-}
-
-// TestLearnerReceiveSnapshot tests that a learner can receive a snpahost from leader
-func TestLearnerReceiveSnapshot(t *testing.T) {
- // restore the state machine from a snapshot so it has a compacted log and a snapshot
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1}, Learners: []uint64{2}},
- },
- }
-
- store := newTestMemoryStorage(withPeers(1), withLearners(2))
- n1 := newTestLearnerRaft(1, 10, 1, store)
- n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
-
- n1.restore(s)
- ready := newReady(n1, &SoftState{}, pb.HardState{})
- store.ApplySnapshot(ready.Snapshot)
- n1.advance(ready)
-
- // Force set n1 appplied index.
- n1.raftLog.appliedTo(n1.raftLog.committed)
-
- nt := newNetwork(n1, n2)
-
- setRandomizedElectionTimeout(n1, n1.electionTimeout)
- for i := 0; i < n1.electionTimeout; i++ {
- n1.tick()
- }
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
-
- if n2.raftLog.committed != n1.raftLog.committed {
- t.Errorf("peer 2 must commit to %d, but %d", n1.raftLog.committed, n2.raftLog.committed)
- }
-}
-
-func TestRestoreIgnoreSnapshot(t *testing.T) {
- previousEnts := []pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}}
- commit := uint64(1)
- storage := newTestMemoryStorage(withPeers(1, 2))
- sm := newTestRaft(1, 10, 1, storage)
- sm.raftLog.append(previousEnts...)
- sm.raftLog.commitTo(commit)
-
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: commit,
- Term: 1,
- ConfState: pb.ConfState{Voters: []uint64{1, 2}},
- },
- }
-
- // ignore snapshot
- if ok := sm.restore(s); ok {
- t.Errorf("restore = %t, want %t", ok, false)
- }
- if sm.raftLog.committed != commit {
- t.Errorf("commit = %d, want %d", sm.raftLog.committed, commit)
- }
-
- // ignore snapshot and fast forward commit
- s.Metadata.Index = commit + 1
- if ok := sm.restore(s); ok {
- t.Errorf("restore = %t, want %t", ok, false)
- }
- if sm.raftLog.committed != commit+1 {
- t.Errorf("commit = %d, want %d", sm.raftLog.committed, commit+1)
- }
-}
-
-func TestProvideSnap(t *testing.T) {
- // restore the state machine from a snapshot so it has a compacted log and a snapshot
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2}},
- },
- }
- storage := newTestMemoryStorage(withPeers(1))
- sm := newTestRaft(1, 10, 1, storage)
- sm.restore(s)
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- // force set the next of node 2, so that node 2 needs a snapshot
- sm.prs.Progress[2].Next = sm.raftLog.firstIndex()
- sm.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: sm.prs.Progress[2].Next - 1, Reject: true})
-
- msgs := sm.readMessages()
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want 1", len(msgs))
- }
- m := msgs[0]
- if m.Type != pb.MsgSnap {
- t.Errorf("m.Type = %v, want %v", m.Type, pb.MsgSnap)
- }
-}
-
-func TestIgnoreProvidingSnap(t *testing.T) {
- // restore the state machine from a snapshot so it has a compacted log and a snapshot
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2}},
- },
- }
- storage := newTestMemoryStorage(withPeers(1))
- sm := newTestRaft(1, 10, 1, storage)
- sm.restore(s)
-
- sm.becomeCandidate()
- sm.becomeLeader()
-
- // force set the next of node 2, so that node 2 needs a snapshot
- // change node 2 to be inactive, expect node 1 ignore sending snapshot to 2
- sm.prs.Progress[2].Next = sm.raftLog.firstIndex() - 1
- sm.prs.Progress[2].RecentActive = false
-
- sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
-
- msgs := sm.readMessages()
- if len(msgs) != 0 {
- t.Errorf("len(msgs) = %d, want 0", len(msgs))
- }
-}
-
-func TestRestoreFromSnapMsg(t *testing.T) {
- s := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- Index: 11, // magic number
- Term: 11, // magic number
- ConfState: pb.ConfState{Voters: []uint64{1, 2}},
- },
- }
- m := pb.Message{Type: pb.MsgSnap, From: 1, Term: 2, Snapshot: s}
-
- sm := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- sm.Step(m)
-
- if sm.lead != uint64(1) {
- t.Errorf("sm.lead = %d, want 1", sm.lead)
- }
-
- // TODO(bdarnell): what should this test?
-}
-
-func TestSlowNodeRestore(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
- for j := 0; j <= 100; j++ {
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- }
- lead := nt.peers[1].(*raft)
- nextEnts(lead, nt.storage[1])
- nt.storage[1].CreateSnapshot(lead.raftLog.applied, &pb.ConfState{Voters: lead.prs.VoterNodes()}, nil)
- nt.storage[1].Compact(lead.raftLog.applied)
-
- nt.recover()
- // send heartbeats so that the leader can learn everyone is active.
- // node 3 will only be considered as active when node 1 receives a reply from it.
- for {
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
- if lead.prs.Progress[3].RecentActive {
- break
- }
- }
-
- // trigger a snapshot
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
-
- follower := nt.peers[3].(*raft)
-
- // trigger a commit
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- if follower.raftLog.committed != lead.raftLog.committed {
- t.Errorf("follower.committed = %d, want %d", follower.raftLog.committed, lead.raftLog.committed)
- }
-}
-
-// TestStepConfig tests that when raft step msgProp in EntryConfChange type,
-// it appends the entry to log and sets pendingConf to be true.
-func TestStepConfig(t *testing.T) {
- // a raft that cannot make progress
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
- index := r.raftLog.lastIndex()
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange}}})
- if g := r.raftLog.lastIndex(); g != index+1 {
- t.Errorf("index = %d, want %d", g, index+1)
- }
- if r.pendingConfIndex != index+1 {
- t.Errorf("pendingConfIndex = %d, want %d", r.pendingConfIndex, index+1)
- }
-}
-
-// TestStepIgnoreConfig tests that if raft step the second msgProp in
-// EntryConfChange type when the first one is uncommitted, the node will set
-// the proposal to noop and keep its original state.
-func TestStepIgnoreConfig(t *testing.T) {
- // a raft that cannot make progress
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.becomeCandidate()
- r.becomeLeader()
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange}}})
- index := r.raftLog.lastIndex()
- pendingConfIndex := r.pendingConfIndex
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange}}})
- wents := []pb.Entry{{Type: pb.EntryNormal, Term: 1, Index: 3, Data: nil}}
- ents, err := r.raftLog.entries(index+1, noLimit)
- if err != nil {
- t.Fatalf("unexpected error %v", err)
- }
- if !reflect.DeepEqual(ents, wents) {
- t.Errorf("ents = %+v, want %+v", ents, wents)
- }
- if r.pendingConfIndex != pendingConfIndex {
- t.Errorf("pendingConfIndex = %d, want %d", r.pendingConfIndex, pendingConfIndex)
- }
-}
-
-// TestNewLeaderPendingConfig tests that new leader sets its pendingConfigIndex
-// based on uncommitted entries.
-func TestNewLeaderPendingConfig(t *testing.T) {
- tests := []struct {
- addEntry bool
- wpendingIndex uint64
- }{
- {false, 0},
- {true, 1},
- }
- for i, tt := range tests {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- if tt.addEntry {
- mustAppendEntry(r, pb.Entry{Type: pb.EntryNormal})
- }
- r.becomeCandidate()
- r.becomeLeader()
- if r.pendingConfIndex != tt.wpendingIndex {
- t.Errorf("#%d: pendingConfIndex = %d, want %d",
- i, r.pendingConfIndex, tt.wpendingIndex)
- }
- }
-}
-
-// TestAddNode tests that addNode could update nodes correctly.
-func TestAddNode(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
- nodes := r.prs.VoterNodes()
- wnodes := []uint64{1, 2}
- if !reflect.DeepEqual(nodes, wnodes) {
- t.Errorf("nodes = %v, want %v", nodes, wnodes)
- }
-}
-
-// TestAddLearner tests that addLearner could update nodes correctly.
-func TestAddLearner(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- // Add new learner peer.
- r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddLearnerNode}.AsV2())
- if r.isLearner {
- t.Fatal("expected 1 to be voter")
- }
- nodes := r.prs.LearnerNodes()
- wnodes := []uint64{2}
- if !reflect.DeepEqual(nodes, wnodes) {
- t.Errorf("nodes = %v, want %v", nodes, wnodes)
- }
- if !r.prs.Progress[2].IsLearner {
- t.Fatal("expected 2 to be learner")
- }
-
- // Promote peer to voter.
- r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
- if r.prs.Progress[2].IsLearner {
- t.Fatal("expected 2 to be voter")
- }
-
- // Demote r.
- r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeAddLearnerNode}.AsV2())
- if !r.prs.Progress[1].IsLearner {
- t.Fatal("expected 1 to be learner")
- }
- if !r.isLearner {
- t.Fatal("expected 1 to be learner")
- }
-
- // Promote r again.
- r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeAddNode}.AsV2())
- if r.prs.Progress[1].IsLearner {
- t.Fatal("expected 1 to be voter")
- }
- if r.isLearner {
- t.Fatal("expected 1 to be voter")
- }
-}
-
-// TestAddNodeCheckQuorum tests that addNode does not trigger a leader election
-// immediately when checkQuorum is set.
-func TestAddNodeCheckQuorum(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- r.checkQuorum = true
-
- r.becomeCandidate()
- r.becomeLeader()
-
- for i := 0; i < r.electionTimeout-1; i++ {
- r.tick()
- }
-
- r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
-
- // This tick will reach electionTimeout, which triggers a quorum check.
- r.tick()
-
- // Node 1 should still be the leader after a single tick.
- if r.state != StateLeader {
- t.Errorf("state = %v, want %v", r.state, StateLeader)
- }
-
- // After another electionTimeout ticks without hearing from node 2,
- // node 1 should step down.
- for i := 0; i < r.electionTimeout; i++ {
- r.tick()
- }
-
- if r.state != StateFollower {
- t.Errorf("state = %v, want %v", r.state, StateFollower)
- }
-}
-
-// TestRemoveNode tests that removeNode could update nodes and
-// and removed list correctly.
-func TestRemoveNode(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
- r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeRemoveNode}.AsV2())
- w := []uint64{1}
- if g := r.prs.VoterNodes(); !reflect.DeepEqual(g, w) {
- t.Errorf("nodes = %v, want %v", g, w)
- }
-
- // Removing the remaining voter will panic.
- defer func() {
- if r := recover(); r == nil {
- t.Error("did not panic")
- }
- }()
- r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeRemoveNode}.AsV2())
-}
-
-// TestRemoveLearner tests that removeNode could update nodes and
-// and removed list correctly.
-func TestRemoveLearner(t *testing.T) {
- r := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
- r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeRemoveNode}.AsV2())
- w := []uint64{1}
- if g := r.prs.VoterNodes(); !reflect.DeepEqual(g, w) {
- t.Errorf("nodes = %v, want %v", g, w)
- }
-
- w = nil
- if g := r.prs.LearnerNodes(); !reflect.DeepEqual(g, w) {
- t.Errorf("nodes = %v, want %v", g, w)
- }
-
- // Removing the remaining voter will panic.
- defer func() {
- if r := recover(); r == nil {
- t.Error("did not panic")
- }
- }()
- r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeRemoveNode}.AsV2())
-}
-
-func TestPromotable(t *testing.T) {
- id := uint64(1)
- tests := []struct {
- peers []uint64
- wp bool
- }{
- {[]uint64{1}, true},
- {[]uint64{1, 2, 3}, true},
- {[]uint64{}, false},
- {[]uint64{2, 3}, false},
- }
- for i, tt := range tests {
- r := newTestRaft(id, 5, 1, newTestMemoryStorage(withPeers(tt.peers...)))
- if g := r.promotable(); g != tt.wp {
- t.Errorf("#%d: promotable = %v, want %v", i, g, tt.wp)
- }
- }
-}
-
-func TestRaftNodes(t *testing.T) {
- tests := []struct {
- ids []uint64
- wids []uint64
- }{
- {
- []uint64{1, 2, 3},
- []uint64{1, 2, 3},
- },
- {
- []uint64{3, 2, 1},
- []uint64{1, 2, 3},
- },
- }
- for i, tt := range tests {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(tt.ids...)))
- if !reflect.DeepEqual(r.prs.VoterNodes(), tt.wids) {
- t.Errorf("#%d: nodes = %+v, want %+v", i, r.prs.VoterNodes(), tt.wids)
- }
- }
-}
-
-func TestCampaignWhileLeader(t *testing.T) {
- testCampaignWhileLeader(t, false)
-}
-
-func TestPreCampaignWhileLeader(t *testing.T) {
- testCampaignWhileLeader(t, true)
-}
-
-func testCampaignWhileLeader(t *testing.T, preVote bool) {
- cfg := newTestConfig(1, 5, 1, newTestMemoryStorage(withPeers(1)))
- cfg.PreVote = preVote
- r := newRaft(cfg)
- if r.state != StateFollower {
- t.Errorf("expected new node to be follower but got %s", r.state)
- }
- // We don't call campaign() directly because it comes after the check
- // for our current state.
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- if r.state != StateLeader {
- t.Errorf("expected single-node election to become leader but got %s", r.state)
- }
- term := r.Term
- r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- if r.state != StateLeader {
- t.Errorf("expected to remain leader but got %s", r.state)
- }
- if r.Term != term {
- t.Errorf("expected to remain in term %v but got %v", term, r.Term)
- }
-}
-
-// TestCommitAfterRemoveNode verifies that pending commands can become
-// committed when a config change reduces the quorum requirements.
-func TestCommitAfterRemoveNode(t *testing.T) {
- // Create a cluster with two nodes.
- s := newTestMemoryStorage(withPeers(1, 2))
- r := newTestRaft(1, 5, 1, s)
- r.becomeCandidate()
- r.becomeLeader()
-
- // Begin to remove the second node.
- cc := pb.ConfChange{
- Type: pb.ConfChangeRemoveNode,
- NodeID: 2,
- }
- ccData, err := cc.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- r.Step(pb.Message{
- Type: pb.MsgProp,
- Entries: []pb.Entry{
- {Type: pb.EntryConfChange, Data: ccData},
- },
- })
- // Stabilize the log and make sure nothing is committed yet.
- if ents := nextEnts(r, s); len(ents) > 0 {
- t.Fatalf("unexpected committed entries: %v", ents)
- }
- ccIndex := r.raftLog.lastIndex()
-
- // While the config change is pending, make another proposal.
- r.Step(pb.Message{
- Type: pb.MsgProp,
- Entries: []pb.Entry{
- {Type: pb.EntryNormal, Data: []byte("hello")},
- },
- })
-
- // Node 2 acknowledges the config change, committing it.
- r.Step(pb.Message{
- Type: pb.MsgAppResp,
- From: 2,
- Index: ccIndex,
- })
- ents := nextEnts(r, s)
- if len(ents) != 2 {
- t.Fatalf("expected two committed entries, got %v", ents)
- }
- if ents[0].Type != pb.EntryNormal || ents[0].Data != nil {
- t.Fatalf("expected ents[0] to be empty, but got %v", ents[0])
- }
- if ents[1].Type != pb.EntryConfChange {
- t.Fatalf("expected ents[1] to be EntryConfChange, got %v", ents[1])
- }
-
- // Apply the config change. This reduces quorum requirements so the
- // pending command can now commit.
- r.applyConfChange(cc.AsV2())
- ents = nextEnts(r, s)
- if len(ents) != 1 || ents[0].Type != pb.EntryNormal ||
- string(ents[0].Data) != "hello" {
- t.Fatalf("expected one committed EntryNormal, got %v", ents)
- }
-}
-
-// TestLeaderTransferToUpToDateNode verifies transferring should succeed
-// if the transferee has the most up-to-date log entries when transfer starts.
-func TestLeaderTransferToUpToDateNode(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- lead := nt.peers[1].(*raft)
-
- if lead.lead != 1 {
- t.Fatalf("after election leader is %x, want 1", lead.lead)
- }
-
- // Transfer leadership to 2.
- nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateFollower, 2)
-
- // After some log replication, transfer leadership back to 1.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
-
- nt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-// TestLeaderTransferToUpToDateNodeFromFollower verifies transferring should succeed
-// if the transferee has the most up-to-date log entries when transfer starts.
-// Not like TestLeaderTransferToUpToDateNode, where the leader transfer message
-// is sent to the leader, in this test case every leader transfer message is sent
-// to the follower.
-func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- lead := nt.peers[1].(*raft)
-
- if lead.lead != 1 {
- t.Fatalf("after election leader is %x, want 1", lead.lead)
- }
-
- // Transfer leadership to 2.
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateFollower, 2)
-
- // After some log replication, transfer leadership back to 1.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-// TestLeaderTransferWithCheckQuorum ensures transferring leader still works
-// even the current leader is still under its leader lease
-func TestLeaderTransferWithCheckQuorum(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- for i := 1; i < 4; i++ {
- r := nt.peers[uint64(i)].(*raft)
- r.checkQuorum = true
- setRandomizedElectionTimeout(r, r.electionTimeout+i)
- }
-
- // Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1
- f := nt.peers[2].(*raft)
- for i := 0; i < f.electionTimeout; i++ {
- f.tick()
- }
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- lead := nt.peers[1].(*raft)
-
- if lead.lead != 1 {
- t.Fatalf("after election leader is %x, want 1", lead.lead)
- }
-
- // Transfer leadership to 2.
- nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateFollower, 2)
-
- // After some log replication, transfer leadership back to 1.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
-
- nt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-func TestLeaderTransferToSlowFollower(t *testing.T) {
- defaultLogger.EnableDebug()
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
-
- nt.recover()
- lead := nt.peers[1].(*raft)
- if lead.prs.Progress[3].Match != 1 {
- t.Fatalf("node 1 has match %x for node 3, want %x", lead.prs.Progress[3].Match, 1)
- }
-
- // Transfer leadership to 3 when node 3 is lack of log.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateFollower, 3)
-}
-
-func TestLeaderTransferAfterSnapshot(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- lead := nt.peers[1].(*raft)
- nextEnts(lead, nt.storage[1])
- nt.storage[1].CreateSnapshot(lead.raftLog.applied, &pb.ConfState{Voters: lead.prs.VoterNodes()}, nil)
- nt.storage[1].Compact(lead.raftLog.applied)
-
- nt.recover()
- if lead.prs.Progress[3].Match != 1 {
- t.Fatalf("node 1 has match %x for node 3, want %x", lead.prs.Progress[3].Match, 1)
- }
-
- filtered := pb.Message{}
- // Snapshot needs to be applied before sending MsgAppResp
- nt.msgHook = func(m pb.Message) bool {
- if m.Type != pb.MsgAppResp || m.From != 3 || m.Reject {
- return true
- }
- filtered = m
- return false
- }
- // Transfer leadership to 3 when node 3 is lack of snapshot.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.state != StateLeader {
- t.Fatalf("node 1 should still be leader as snapshot is not applied, got %x", lead.state)
- }
- if reflect.DeepEqual(filtered, pb.Message{}) {
- t.Fatalf("Follower should report snapshot progress automatically.")
- }
-
- // Apply snapshot and resume progress
- follower := nt.peers[3].(*raft)
- ready := newReady(follower, &SoftState{}, pb.HardState{})
- nt.storage[3].ApplySnapshot(ready.Snapshot)
- follower.advance(ready)
- nt.msgHook = nil
- nt.send(filtered)
-
- checkLeaderTransferState(t, lead, StateFollower, 3)
-}
-
-func TestLeaderTransferToSelf(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- lead := nt.peers[1].(*raft)
-
- // Transfer leadership to self, there will be noop.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-func TestLeaderTransferToNonExistingNode(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- lead := nt.peers[1].(*raft)
- // Transfer leadership to non-existing node, there will be noop.
- nt.send(pb.Message{From: 4, To: 1, Type: pb.MsgTransferLeader})
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-func TestLeaderTransferTimeout(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
-
- lead := nt.peers[1].(*raft)
-
- // Transfer leadership to isolated node, wait for timeout.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
- for i := 0; i < lead.heartbeatTimeout; i++ {
- lead.tick()
- }
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- for i := 0; i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
- lead.tick()
- }
-
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-func TestLeaderTransferIgnoreProposal(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
-
- lead := nt.peers[1].(*raft)
-
- // Transfer leadership to isolated node to let transfer pending, then send proposal.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- err := lead.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
- if err != ErrProposalDropped {
- t.Fatalf("should return drop proposal error while transferring")
- }
-
- if lead.prs.Progress[1].Match != 1 {
- t.Fatalf("node 1 has match %x, want %x", lead.prs.Progress[1].Match, 1)
- }
-}
-
-func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
-
- lead := nt.peers[1].(*raft)
-
- // Transfer leadership to isolated node to let transfer pending.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup, Index: 1, Term: 2})
-
- checkLeaderTransferState(t, lead, StateFollower, 2)
-}
-
-func TestLeaderTransferRemoveNode(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.ignore(pb.MsgTimeoutNow)
-
- lead := nt.peers[1].(*raft)
-
- // The leadTransferee is removed when leadship transferring.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- lead.applyConfChange(pb.ConfChange{NodeID: 3, Type: pb.ConfChangeRemoveNode}.AsV2())
-
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-func TestLeaderTransferDemoteNode(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.ignore(pb.MsgTimeoutNow)
-
- lead := nt.peers[1].(*raft)
-
- // The leadTransferee is demoted when leadship transferring.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- lead.applyConfChange(pb.ConfChangeV2{
- Changes: []pb.ConfChangeSingle{
- {
- Type: pb.ConfChangeRemoveNode,
- NodeID: 3,
- },
- {
- Type: pb.ConfChangeAddLearnerNode,
- NodeID: 3,
- },
- },
- })
-
- // Make the Raft group commit the LeaveJoint entry.
- lead.applyConfChange(pb.ConfChangeV2{})
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-// TestLeaderTransferBack verifies leadership can transfer back to self when last transfer is pending.
-func TestLeaderTransferBack(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
-
- lead := nt.peers[1].(*raft)
-
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- // Transfer leadership back to self.
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-// TestLeaderTransferSecondTransferToAnotherNode verifies leader can transfer to another node
-// when last transfer is pending.
-func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
-
- lead := nt.peers[1].(*raft)
-
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- // Transfer leadership to another node.
- nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
-
- checkLeaderTransferState(t, lead, StateFollower, 2)
-}
-
-// TestLeaderTransferSecondTransferToSameNode verifies second transfer leader request
-// to the same node should not extend the timeout while the first one is pending.
-func TestLeaderTransferSecondTransferToSameNode(t *testing.T) {
- nt := newNetwork(nil, nil, nil)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- nt.isolate(3)
-
- lead := nt.peers[1].(*raft)
-
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- if lead.leadTransferee != 3 {
- t.Fatalf("wait transferring, leadTransferee = %v, want %v", lead.leadTransferee, 3)
- }
-
- for i := 0; i < lead.heartbeatTimeout; i++ {
- lead.tick()
- }
- // Second transfer leadership request to the same node.
- nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
-
- for i := 0; i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
- lead.tick()
- }
-
- checkLeaderTransferState(t, lead, StateLeader, 1)
-}
-
-func checkLeaderTransferState(t *testing.T, r *raft, state StateType, lead uint64) {
- if r.state != state || r.lead != lead {
- t.Fatalf("after transferring, node has state %v lead %v, want state %v lead %v", r.state, r.lead, state, lead)
- }
- if r.leadTransferee != None {
- t.Fatalf("after transferring, node has leadTransferee %v, want leadTransferee %v", r.leadTransferee, None)
- }
-}
-
-// TestTransferNonMember verifies that when a MsgTimeoutNow arrives at
-// a node that has been removed from the group, nothing happens.
-// (previously, if the node also got votes, it would panic as it
-// transitioned to StateLeader)
-func TestTransferNonMember(t *testing.T) {
- r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(2, 3, 4)))
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgTimeoutNow})
-
- r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVoteResp})
- r.Step(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp})
- if r.state != StateFollower {
- t.Fatalf("state is %s, want StateFollower", r.state)
- }
-}
-
-// TestNodeWithSmallerTermCanCompleteElection tests the scenario where a node
-// that has been partitioned away (and fallen behind) rejoins the cluster at
-// about the same time the leader node gets partitioned away.
-// Previously the cluster would come to a standstill when run with PreVote
-// enabled.
-func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
- n3.becomeFollower(1, None)
-
- n1.preVote = true
- n2.preVote = true
- n3.preVote = true
-
- // cause a network partition to isolate node 3
- nt := newNetwork(n1, n2, n3)
- nt.cut(1, 3)
- nt.cut(2, 3)
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- sm := nt.peers[1].(*raft)
- if sm.state != StateLeader {
- t.Errorf("peer 1 state: %s, want %s", sm.state, StateLeader)
- }
-
- sm = nt.peers[2].(*raft)
- if sm.state != StateFollower {
- t.Errorf("peer 2 state: %s, want %s", sm.state, StateFollower)
- }
-
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
- sm = nt.peers[3].(*raft)
- if sm.state != StatePreCandidate {
- t.Errorf("peer 3 state: %s, want %s", sm.state, StatePreCandidate)
- }
-
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- // check whether the term values are expected
- // a.Term == 3
- // b.Term == 3
- // c.Term == 1
- sm = nt.peers[1].(*raft)
- if sm.Term != 3 {
- t.Errorf("peer 1 term: %d, want %d", sm.Term, 3)
- }
-
- sm = nt.peers[2].(*raft)
- if sm.Term != 3 {
- t.Errorf("peer 2 term: %d, want %d", sm.Term, 3)
- }
-
- sm = nt.peers[3].(*raft)
- if sm.Term != 1 {
- t.Errorf("peer 3 term: %d, want %d", sm.Term, 1)
- }
-
- // check state
- // a == follower
- // b == leader
- // c == pre-candidate
- sm = nt.peers[1].(*raft)
- if sm.state != StateFollower {
- t.Errorf("peer 1 state: %s, want %s", sm.state, StateFollower)
- }
- sm = nt.peers[2].(*raft)
- if sm.state != StateLeader {
- t.Errorf("peer 2 state: %s, want %s", sm.state, StateLeader)
- }
- sm = nt.peers[3].(*raft)
- if sm.state != StatePreCandidate {
- t.Errorf("peer 3 state: %s, want %s", sm.state, StatePreCandidate)
- }
-
- sm.logger.Infof("going to bring back peer 3 and kill peer 2")
- // recover the network then immediately isolate b which is currently
- // the leader, this is to emulate the crash of b.
- nt.recover()
- nt.cut(2, 1)
- nt.cut(2, 3)
-
- // call for election
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // do we have a leader?
- sma := nt.peers[1].(*raft)
- smb := nt.peers[3].(*raft)
- if sma.state != StateLeader && smb.state != StateLeader {
- t.Errorf("no leader")
- }
-}
-
-// TestPreVoteWithSplitVote verifies that after split vote, cluster can complete
-// election in next round.
-func TestPreVoteWithSplitVote(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
- n3.becomeFollower(1, None)
-
- n1.preVote = true
- n2.preVote = true
- n3.preVote = true
-
- nt := newNetwork(n1, n2, n3)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // simulate leader down. followers start split vote.
- nt.isolate(1)
- nt.send([]pb.Message{
- {From: 2, To: 2, Type: pb.MsgHup},
- {From: 3, To: 3, Type: pb.MsgHup},
- }...)
-
- // check whether the term values are expected
- // n2.Term == 3
- // n3.Term == 3
- sm := nt.peers[2].(*raft)
- if sm.Term != 3 {
- t.Errorf("peer 2 term: %d, want %d", sm.Term, 3)
- }
- sm = nt.peers[3].(*raft)
- if sm.Term != 3 {
- t.Errorf("peer 3 term: %d, want %d", sm.Term, 3)
- }
-
- // check state
- // n2 == candidate
- // n3 == candidate
- sm = nt.peers[2].(*raft)
- if sm.state != StateCandidate {
- t.Errorf("peer 2 state: %s, want %s", sm.state, StateCandidate)
- }
- sm = nt.peers[3].(*raft)
- if sm.state != StateCandidate {
- t.Errorf("peer 3 state: %s, want %s", sm.state, StateCandidate)
- }
-
- // node 2 election timeout first
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- // check whether the term values are expected
- // n2.Term == 4
- // n3.Term == 4
- sm = nt.peers[2].(*raft)
- if sm.Term != 4 {
- t.Errorf("peer 2 term: %d, want %d", sm.Term, 4)
- }
- sm = nt.peers[3].(*raft)
- if sm.Term != 4 {
- t.Errorf("peer 3 term: %d, want %d", sm.Term, 4)
- }
-
- // check state
- // n2 == leader
- // n3 == follower
- sm = nt.peers[2].(*raft)
- if sm.state != StateLeader {
- t.Errorf("peer 2 state: %s, want %s", sm.state, StateLeader)
- }
- sm = nt.peers[3].(*raft)
- if sm.state != StateFollower {
- t.Errorf("peer 3 state: %s, want %s", sm.state, StateFollower)
- }
-}
-
-// TestPreVoteWithCheckQuorum ensures that after a node become pre-candidate,
-// it will checkQuorum correctly.
-func TestPreVoteWithCheckQuorum(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
- n3.becomeFollower(1, None)
-
- n1.preVote = true
- n2.preVote = true
- n3.preVote = true
-
- n1.checkQuorum = true
- n2.checkQuorum = true
- n3.checkQuorum = true
-
- nt := newNetwork(n1, n2, n3)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // isolate node 1. node 2 and node 3 have leader info
- nt.isolate(1)
-
- // check state
- sm := nt.peers[1].(*raft)
- if sm.state != StateLeader {
- t.Fatalf("peer 1 state: %s, want %s", sm.state, StateLeader)
- }
- sm = nt.peers[2].(*raft)
- if sm.state != StateFollower {
- t.Fatalf("peer 2 state: %s, want %s", sm.state, StateFollower)
- }
- sm = nt.peers[3].(*raft)
- if sm.state != StateFollower {
- t.Fatalf("peer 3 state: %s, want %s", sm.state, StateFollower)
- }
-
- // node 2 will ignore node 3's PreVote
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- // Do we have a leader?
- if n2.state != StateLeader && n3.state != StateFollower {
- t.Errorf("no leader")
- }
-}
-
-// TestLearnerCampaign verifies that a learner won't campaign even if it receives
-// a MsgHup or MsgTimeoutNow.
-func TestLearnerCampaign(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- n1.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddLearnerNode}.AsV2())
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1)))
- n2.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddLearnerNode}.AsV2())
- nt := newNetwork(n1, n2)
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- if !n2.isLearner {
- t.Fatalf("failed to make n2 a learner")
- }
-
- if n2.state != StateFollower {
- t.Fatalf("n2 campaigned despite being learner")
- }
-
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- if n1.state != StateLeader || n1.lead != 1 {
- t.Fatalf("n1 did not become leader")
- }
-
- // NB: TransferLeader already checks that the recipient is not a learner, but
- // the check could have happened by the time the recipient becomes a learner,
- // in which case it will receive MsgTimeoutNow as in this test case and we
- // verify that it's ignored.
- nt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTimeoutNow})
-
- if n2.state != StateFollower {
- t.Fatalf("n2 accepted leadership transfer despite being learner")
- }
-}
-
-// simulate rolling update a cluster for Pre-Vote. cluster has 3 nodes [n1, n2, n3].
-// n1 is leader with term 2
-// n2 is follower with term 2
-// n3 is partitioned, with term 4 and less log, state is candidate
-func newPreVoteMigrationCluster(t *testing.T) *network {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
-
- n1.becomeFollower(1, None)
- n2.becomeFollower(1, None)
- n3.becomeFollower(1, None)
-
- n1.preVote = true
- n2.preVote = true
- // We intentionally do not enable PreVote for n3, this is done so in order
- // to simulate a rolling restart process where it's possible to have a mixed
- // version cluster with replicas with PreVote enabled, and replicas without.
-
- nt := newNetwork(n1, n2, n3)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
-
- // Cause a network partition to isolate n3.
- nt.isolate(3)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- // check state
- // n1.state == StateLeader
- // n2.state == StateFollower
- // n3.state == StateCandidate
- if n1.state != StateLeader {
- t.Fatalf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Fatalf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StateCandidate {
- t.Fatalf("node 3 state: %s, want %s", n3.state, StateCandidate)
- }
-
- // check term
- // n1.Term == 2
- // n2.Term == 2
- // n3.Term == 4
- if n1.Term != 2 {
- t.Fatalf("node 1 term: %d, want %d", n1.Term, 2)
- }
- if n2.Term != 2 {
- t.Fatalf("node 2 term: %d, want %d", n2.Term, 2)
- }
- if n3.Term != 4 {
- t.Fatalf("node 3 term: %d, want %d", n3.Term, 4)
- }
-
- // Enable prevote on n3, then recover the network
- n3.preVote = true
- nt.recover()
-
- return nt
-}
-
-func TestPreVoteMigrationCanCompleteElection(t *testing.T) {
- nt := newPreVoteMigrationCluster(t)
-
- // n1 is leader with term 2
- // n2 is follower with term 2
- // n3 is pre-candidate with term 4, and less log
- n2 := nt.peers[2].(*raft)
- n3 := nt.peers[3].(*raft)
-
- // simulate leader down
- nt.isolate(1)
-
- // Call for elections from both n2 and n3.
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- // check state
- // n2.state == Follower
- // n3.state == PreCandidate
- if n2.state != StateFollower {
- t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StatePreCandidate {
- t.Errorf("node 3 state: %s, want %s", n3.state, StatePreCandidate)
- }
-
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
- nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
-
- // Do we have a leader?
- if n2.state != StateLeader && n3.state != StateFollower {
- t.Errorf("no leader")
- }
-}
-
-func TestPreVoteMigrationWithFreeStuckPreCandidate(t *testing.T) {
- nt := newPreVoteMigrationCluster(t)
-
- // n1 is leader with term 2
- // n2 is follower with term 2
- // n3 is pre-candidate with term 4, and less log
- n1 := nt.peers[1].(*raft)
- n2 := nt.peers[2].(*raft)
- n3 := nt.peers[3].(*raft)
-
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- if n1.state != StateLeader {
- t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StatePreCandidate {
- t.Errorf("node 3 state: %s, want %s", n3.state, StatePreCandidate)
- }
-
- // Pre-Vote again for safety
- nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
-
- if n1.state != StateLeader {
- t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- if n2.state != StateFollower {
- t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- if n3.state != StatePreCandidate {
- t.Errorf("node 3 state: %s, want %s", n3.state, StatePreCandidate)
- }
-
- nt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: n1.Term})
-
- // Disrupt the leader so that the stuck peer is freed
- if n1.state != StateFollower {
- t.Errorf("state = %s, want %s", n1.state, StateFollower)
- }
- if n3.Term != n1.Term {
- t.Errorf("term = %d, want %d", n3.Term, n1.Term)
- }
-}
-
-func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool) {
- nt := newNetwork(nil, nil, nil)
- n1 := nt.peers[1].(*raft)
- n2 := nt.peers[2].(*raft)
- nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
- if n1.state != StateLeader {
- t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
-
- // Begin to remove the third node.
- cc := pb.ConfChange{
- Type: pb.ConfChangeRemoveNode,
- NodeID: 2,
- }
- var ccData []byte
- var err error
- var ty pb.EntryType
- if v2 {
- ccv2 := cc.AsV2()
- ccData, err = ccv2.Marshal()
- ty = pb.EntryConfChangeV2
- } else {
- ccData, err = cc.Marshal()
- ty = pb.EntryConfChange
- }
- if err != nil {
- t.Fatal(err)
- }
- nt.send(pb.Message{
- From: 1,
- To: 1,
- Type: pb.MsgProp,
- Entries: []pb.Entry{
- {Type: ty, Data: ccData},
- },
- })
-
- // Trigger campaign in node 2
- for i := 0; i < n2.randomizedElectionTimeout; i++ {
- n2.tick()
- }
- // It's still follower because committed conf change is not applied.
- if n2.state != StateFollower {
- t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
-
- // Transfer leadership to peer 2.
- nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
- if n1.state != StateLeader {
- t.Errorf("node 1 state: %s, want %s", n1.state, StateLeader)
- }
- // It's still follower because committed conf change is not applied.
- if n2.state != StateFollower {
- t.Errorf("node 2 state: %s, want %s", n2.state, StateFollower)
- }
- // Abort transfer leader
- for i := 0; i < n1.electionTimeout; i++ {
- n1.tick()
- }
-
- // Advance apply
- nextEnts(n2, nt.storage[2])
-
- // Transfer leadership to peer 2 again.
- nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
- if n1.state != StateFollower {
- t.Errorf("node 1 state: %s, want %s", n1.state, StateFollower)
- }
- if n2.state != StateLeader {
- t.Errorf("node 2 state: %s, want %s", n2.state, StateLeader)
- }
-
- nextEnts(n1, nt.storage[1])
- // Trigger campaign in node 2
- for i := 0; i < n1.randomizedElectionTimeout; i++ {
- n1.tick()
- }
- if n1.state != StateCandidate {
- t.Errorf("node 1 state: %s, want %s", n1.state, StateCandidate)
- }
-}
-
-// Tests if unapplied ConfChange is checked before campaign.
-func TestConfChangeCheckBeforeCampaign(t *testing.T) {
- testConfChangeCheckBeforeCampaign(t, false)
-}
-
-// Tests if unapplied ConfChangeV2 is checked before campaign.
-func TestConfChangeV2CheckBeforeCampaign(t *testing.T) {
- testConfChangeCheckBeforeCampaign(t, true)
-}
-
-func TestFastLogRejection(t *testing.T) {
- tests := []struct {
- leaderLog []pb.Entry // Logs on the leader
- followerLog []pb.Entry // Logs on the follower
- rejectHintTerm uint64 // Expected term included in rejected MsgAppResp.
- rejectHintIndex uint64 // Expected index included in rejected MsgAppResp.
- nextAppendTerm uint64 // Expected term when leader appends after rejected.
- nextAppendIndex uint64 // Expected index when leader appends after rejected.
- }{
- // This case tests that leader can find the conflict index quickly.
- // Firstly leader appends (type=MsgApp,index=7,logTerm=4, entries=...);
- // After rejected leader appends (type=MsgApp,index=3,logTerm=2).
- {
- leaderLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 4, Index: 4},
- {Term: 4, Index: 5},
- {Term: 4, Index: 6},
- {Term: 4, Index: 7},
- },
- followerLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 3, Index: 4},
- {Term: 3, Index: 5},
- {Term: 3, Index: 6},
- {Term: 3, Index: 7},
- {Term: 3, Index: 8},
- {Term: 3, Index: 9},
- {Term: 3, Index: 10},
- {Term: 3, Index: 11},
- },
- rejectHintTerm: 3,
- rejectHintIndex: 7,
- nextAppendTerm: 2,
- nextAppendIndex: 3,
- },
- // This case tests that leader can find the conflict index quickly.
- // Firstly leader appends (type=MsgApp,index=8,logTerm=5, entries=...);
- // After rejected leader appends (type=MsgApp,index=4,logTerm=3).
- {
- leaderLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 3, Index: 4},
- {Term: 4, Index: 5},
- {Term: 4, Index: 6},
- {Term: 4, Index: 7},
- {Term: 5, Index: 8},
- },
- followerLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 3, Index: 4},
- {Term: 3, Index: 5},
- {Term: 3, Index: 6},
- {Term: 3, Index: 7},
- {Term: 3, Index: 8},
- {Term: 3, Index: 9},
- {Term: 3, Index: 10},
- {Term: 3, Index: 11},
- },
- rejectHintTerm: 3,
- rejectHintIndex: 8,
- nextAppendTerm: 3,
- nextAppendIndex: 4,
- },
- // This case tests that follower can find the conflict index quickly.
- // Firstly leader appends (type=MsgApp,index=4,logTerm=1, entries=...);
- // After rejected leader appends (type=MsgApp,index=1,logTerm=1).
- {
- leaderLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 1, Index: 2},
- {Term: 1, Index: 3},
- {Term: 1, Index: 4},
- },
- followerLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 4, Index: 4},
- },
- rejectHintTerm: 1,
- rejectHintIndex: 1,
- nextAppendTerm: 1,
- nextAppendIndex: 1,
- },
- // This case is similar to the previous case. However, this time, the
- // leader has a longer uncommitted log tail than the follower.
- // Firstly leader appends (type=MsgApp,index=6,logTerm=1, entries=...);
- // After rejected leader appends (type=MsgApp,index=1,logTerm=1).
- {
- leaderLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 1, Index: 2},
- {Term: 1, Index: 3},
- {Term: 1, Index: 4},
- {Term: 1, Index: 5},
- {Term: 1, Index: 6},
- },
- followerLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 4, Index: 4},
- },
- rejectHintTerm: 1,
- rejectHintIndex: 1,
- nextAppendTerm: 1,
- nextAppendIndex: 1,
- },
- // This case is similar to the previous case. However, this time, the
- // follower has a longer uncommitted log tail than the leader.
- // Firstly leader appends (type=MsgApp,index=4,logTerm=1, entries=...);
- // After rejected leader appends (type=MsgApp,index=1,logTerm=1).
- {
- leaderLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 1, Index: 2},
- {Term: 1, Index: 3},
- {Term: 1, Index: 4},
- },
- followerLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 4, Index: 4},
- {Term: 4, Index: 5},
- {Term: 4, Index: 6},
- },
- rejectHintTerm: 1,
- rejectHintIndex: 1,
- nextAppendTerm: 1,
- nextAppendIndex: 1,
- },
- // An normal case that there are no log conflicts.
- // Firstly leader appends (type=MsgApp,index=5,logTerm=5, entries=...);
- // After rejected leader appends (type=MsgApp,index=4,logTerm=4).
- {
- leaderLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 1, Index: 2},
- {Term: 1, Index: 3},
- {Term: 4, Index: 4},
- {Term: 5, Index: 5},
- },
- followerLog: []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 1, Index: 2},
- {Term: 1, Index: 3},
- {Term: 4, Index: 4},
- },
- rejectHintTerm: 4,
- rejectHintIndex: 4,
- nextAppendTerm: 4,
- nextAppendIndex: 4,
- },
- // Test case from example comment in stepLeader (on leader).
- {
- leaderLog: []pb.Entry{
- {Term: 2, Index: 1},
- {Term: 5, Index: 2},
- {Term: 5, Index: 3},
- {Term: 5, Index: 4},
- {Term: 5, Index: 5},
- {Term: 5, Index: 6},
- {Term: 5, Index: 7},
- {Term: 5, Index: 8},
- {Term: 5, Index: 9},
- },
- followerLog: []pb.Entry{
- {Term: 2, Index: 1},
- {Term: 4, Index: 2},
- {Term: 4, Index: 3},
- {Term: 4, Index: 4},
- {Term: 4, Index: 5},
- {Term: 4, Index: 6},
- },
- rejectHintTerm: 4,
- rejectHintIndex: 6,
- nextAppendTerm: 2,
- nextAppendIndex: 1,
- },
- // Test case from example comment in handleAppendEntries (on follower).
- {
- leaderLog: []pb.Entry{
- {Term: 2, Index: 1},
- {Term: 2, Index: 2},
- {Term: 2, Index: 3},
- {Term: 2, Index: 4},
- {Term: 2, Index: 5},
- },
- followerLog: []pb.Entry{
- {Term: 2, Index: 1},
- {Term: 4, Index: 2},
- {Term: 4, Index: 3},
- {Term: 4, Index: 4},
- {Term: 4, Index: 5},
- {Term: 4, Index: 6},
- {Term: 4, Index: 7},
- {Term: 4, Index: 8},
- },
- nextAppendTerm: 2,
- nextAppendIndex: 1,
- rejectHintTerm: 2,
- rejectHintIndex: 1,
- },
- }
-
- for i, test := range tests {
- t.Run("", func(t *testing.T) {
- s1 := NewMemoryStorage()
- s1.snapshot.Metadata.ConfState = pb.ConfState{Voters: []uint64{1, 2, 3}}
- s1.Append(test.leaderLog)
- s2 := NewMemoryStorage()
- s2.snapshot.Metadata.ConfState = pb.ConfState{Voters: []uint64{1, 2, 3}}
- s2.Append(test.followerLog)
-
- n1 := newTestRaft(1, 10, 1, s1)
- n2 := newTestRaft(2, 10, 1, s2)
-
- n1.becomeCandidate()
- n1.becomeLeader()
-
- n2.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHeartbeat})
-
- msgs := n2.readMessages()
- if len(msgs) != 1 {
- t.Errorf("can't read 1 message from peer 2")
- }
- if msgs[0].Type != pb.MsgHeartbeatResp {
- t.Errorf("can't read heartbeat response from peer 2")
- }
- if n1.Step(msgs[0]) != nil {
- t.Errorf("peer 1 step heartbeat response fail")
- }
-
- msgs = n1.readMessages()
- if len(msgs) != 1 {
- t.Errorf("can't read 1 message from peer 1")
- }
- if msgs[0].Type != pb.MsgApp {
- t.Errorf("can't read append from peer 1")
- }
-
- if n2.Step(msgs[0]) != nil {
- t.Errorf("peer 2 step append fail")
- }
- msgs = n2.readMessages()
- if len(msgs) != 1 {
- t.Errorf("can't read 1 message from peer 2")
- }
- if msgs[0].Type != pb.MsgAppResp {
- t.Errorf("can't read append response from peer 2")
- }
- if !msgs[0].Reject {
- t.Errorf("expected rejected append response from peer 2")
- }
- if msgs[0].LogTerm != test.rejectHintTerm {
- t.Fatalf("#%d expected hint log term = %d, but got %d", i, test.rejectHintTerm, msgs[0].LogTerm)
- }
- if msgs[0].RejectHint != test.rejectHintIndex {
- t.Fatalf("#%d expected hint index = %d, but got %d", i, test.rejectHintIndex, msgs[0].RejectHint)
- }
-
- if n1.Step(msgs[0]) != nil {
- t.Errorf("peer 1 step append fail")
- }
- msgs = n1.readMessages()
- if msgs[0].LogTerm != test.nextAppendTerm {
- t.Fatalf("#%d expected log term = %d, but got %d", i, test.nextAppendTerm, msgs[0].LogTerm)
- }
- if msgs[0].Index != test.nextAppendIndex {
- t.Fatalf("#%d expected index = %d, but got %d", i, test.nextAppendIndex, msgs[0].Index)
- }
- })
- }
-}
-
-func entsWithConfig(configFunc func(*Config), terms ...uint64) *raft {
- storage := NewMemoryStorage()
- for i, term := range terms {
- storage.Append([]pb.Entry{{Index: uint64(i + 1), Term: term}})
- }
- cfg := newTestConfig(1, 5, 1, storage)
- if configFunc != nil {
- configFunc(cfg)
- }
- sm := newRaft(cfg)
- sm.reset(terms[len(terms)-1])
- return sm
-}
-
-// votedWithConfig creates a raft state machine with Vote and Term set
-// to the given value but no log entries (indicating that it voted in
-// the given term but has not received any logs).
-func votedWithConfig(configFunc func(*Config), vote, term uint64) *raft {
- storage := NewMemoryStorage()
- storage.SetHardState(pb.HardState{Vote: vote, Term: term})
- cfg := newTestConfig(1, 5, 1, storage)
- if configFunc != nil {
- configFunc(cfg)
- }
- sm := newRaft(cfg)
- sm.reset(term)
- return sm
-}
-
-type network struct {
- peers map[uint64]stateMachine
- storage map[uint64]*MemoryStorage
- dropm map[connem]float64
- ignorem map[pb.MessageType]bool
-
- // msgHook is called for each message sent. It may inspect the
- // message and return true to send it or false to drop it.
- msgHook func(pb.Message) bool
-}
-
-// newNetwork initializes a network from peers.
-// A nil node will be replaced with a new *stateMachine.
-// A *stateMachine will get its k, id.
-// When using stateMachine, the address list is always [1, n].
-func newNetwork(peers ...stateMachine) *network {
- return newNetworkWithConfig(nil, peers...)
-}
-
-// newNetworkWithConfig is like newNetwork but calls the given func to
-// modify the configuration of any state machines it creates.
-func newNetworkWithConfig(configFunc func(*Config), peers ...stateMachine) *network {
- size := len(peers)
- peerAddrs := idsBySize(size)
-
- npeers := make(map[uint64]stateMachine, size)
- nstorage := make(map[uint64]*MemoryStorage, size)
-
- for j, p := range peers {
- id := peerAddrs[j]
- switch v := p.(type) {
- case nil:
- nstorage[id] = newTestMemoryStorage(withPeers(peerAddrs...))
- cfg := newTestConfig(id, 10, 1, nstorage[id])
- if configFunc != nil {
- configFunc(cfg)
- }
- sm := newRaft(cfg)
- npeers[id] = sm
- case *raft:
- // TODO(tbg): this is all pretty confused. Clean this up.
- learners := make(map[uint64]bool, len(v.prs.Learners))
- for i := range v.prs.Learners {
- learners[i] = true
- }
- v.id = id
- v.prs = tracker.MakeProgressTracker(v.prs.MaxInflight)
- if len(learners) > 0 {
- v.prs.Learners = map[uint64]struct{}{}
- }
- for i := 0; i < size; i++ {
- pr := &tracker.Progress{}
- if _, ok := learners[peerAddrs[i]]; ok {
- pr.IsLearner = true
- v.prs.Learners[peerAddrs[i]] = struct{}{}
- } else {
- v.prs.Voters[0][peerAddrs[i]] = struct{}{}
- }
- v.prs.Progress[peerAddrs[i]] = pr
- }
- v.reset(v.Term)
- npeers[id] = v
- case *blackHole:
- npeers[id] = v
- default:
- panic(fmt.Sprintf("unexpected state machine type: %T", p))
- }
- }
- return &network{
- peers: npeers,
- storage: nstorage,
- dropm: make(map[connem]float64),
- ignorem: make(map[pb.MessageType]bool),
- }
-}
-
-func preVoteConfig(c *Config) {
- c.PreVote = true
-}
-
-func (nw *network) send(msgs ...pb.Message) {
- for len(msgs) > 0 {
- m := msgs[0]
- p := nw.peers[m.To]
- p.Step(m)
- msgs = append(msgs[1:], nw.filter(p.readMessages())...)
- }
-}
-
-func (nw *network) drop(from, to uint64, perc float64) {
- nw.dropm[connem{from, to}] = perc
-}
-
-func (nw *network) cut(one, other uint64) {
- nw.drop(one, other, 2.0) // always drop
- nw.drop(other, one, 2.0) // always drop
-}
-
-func (nw *network) isolate(id uint64) {
- for i := 0; i < len(nw.peers); i++ {
- nid := uint64(i) + 1
- if nid != id {
- nw.drop(id, nid, 1.0) // always drop
- nw.drop(nid, id, 1.0) // always drop
- }
- }
-}
-
-func (nw *network) ignore(t pb.MessageType) {
- nw.ignorem[t] = true
-}
-
-func (nw *network) recover() {
- nw.dropm = make(map[connem]float64)
- nw.ignorem = make(map[pb.MessageType]bool)
-}
-
-func (nw *network) filter(msgs []pb.Message) []pb.Message {
- mm := []pb.Message{}
- for _, m := range msgs {
- if nw.ignorem[m.Type] {
- continue
- }
- switch m.Type {
- case pb.MsgHup:
- // hups never go over the network, so don't drop them but panic
- panic("unexpected msgHup")
- default:
- perc := nw.dropm[connem{m.From, m.To}]
- if n := rand.Float64(); n < perc {
- continue
- }
- }
- if nw.msgHook != nil {
- if !nw.msgHook(m) {
- continue
- }
- }
- mm = append(mm, m)
- }
- return mm
-}
-
-type connem struct {
- from, to uint64
-}
-
-type blackHole struct{}
-
-func (blackHole) Step(pb.Message) error { return nil }
-func (blackHole) readMessages() []pb.Message { return nil }
-
-var nopStepper = &blackHole{}
-
-func idsBySize(size int) []uint64 {
- ids := make([]uint64, size)
- for i := 0; i < size; i++ {
- ids[i] = 1 + uint64(i)
- }
- return ids
-}
-
-// setRandomizedElectionTimeout set up the value by caller instead of choosing
-// by system, in some test scenario we need to fill in some expected value to
-// ensure the certainty
-func setRandomizedElectionTimeout(r *raft, v int) {
- r.randomizedElectionTimeout = v
-}
-
-func newTestConfig(id uint64, election, heartbeat int, storage Storage) *Config {
- return &Config{
- ID: id,
- ElectionTick: election,
- HeartbeatTick: heartbeat,
- Storage: storage,
- MaxSizePerMsg: noLimit,
- MaxInflightMsgs: 256,
- }
-}
-
-type testMemoryStorageOptions func(*MemoryStorage)
-
-func withPeers(peers ...uint64) testMemoryStorageOptions {
- return func(ms *MemoryStorage) {
- ms.snapshot.Metadata.ConfState.Voters = peers
- }
-}
-
-func withLearners(learners ...uint64) testMemoryStorageOptions {
- return func(ms *MemoryStorage) {
- ms.snapshot.Metadata.ConfState.Learners = learners
- }
-}
-
-func newTestMemoryStorage(opts ...testMemoryStorageOptions) *MemoryStorage {
- ms := NewMemoryStorage()
- for _, o := range opts {
- o(ms)
- }
- return ms
-}
-
-func newTestRaft(id uint64, election, heartbeat int, storage Storage) *raft {
- return newRaft(newTestConfig(id, election, heartbeat, storage))
-}
-
-func newTestLearnerRaft(id uint64, election, heartbeat int, storage Storage) *raft {
- cfg := newTestConfig(id, election, heartbeat, storage)
- return newRaft(cfg)
-}
-
-// newTestRawNode sets up a RawNode with the given peers. The configuration will
-// not be reflected in the Storage.
-func newTestRawNode(id uint64, election, heartbeat int, storage Storage) *RawNode {
- cfg := newTestConfig(id, election, heartbeat, storage)
- rn, err := NewRawNode(cfg)
- if err != nil {
- panic(err)
- }
- return rn
-}
diff --git a/raft/raftpb/confchange.go b/raft/raftpb/confchange.go
deleted file mode 100644
index 47fae65dfe1..00000000000
--- a/raft/raftpb/confchange.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raftpb
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "github.com/gogo/protobuf/proto"
-)
-
-// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow
-// treating them in a unified manner.
-type ConfChangeI interface {
- AsV2() ConfChangeV2
- AsV1() (ConfChange, bool)
-}
-
-// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2
-// and returns the result along with the corresponding EntryType.
-func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) {
- var typ EntryType
- var ccdata []byte
- var err error
- if ccv1, ok := c.AsV1(); ok {
- typ = EntryConfChange
- ccdata, err = ccv1.Marshal()
- } else {
- ccv2 := c.AsV2()
- typ = EntryConfChangeV2
- ccdata, err = ccv2.Marshal()
- }
- return typ, ccdata, err
-}
-
-// AsV2 returns a V2 configuration change carrying out the same operation.
-func (c ConfChange) AsV2() ConfChangeV2 {
- return ConfChangeV2{
- Changes: []ConfChangeSingle{{
- Type: c.Type,
- NodeID: c.NodeID,
- }},
- Context: c.Context,
- }
-}
-
-// AsV1 returns the ConfChange and true.
-func (c ConfChange) AsV1() (ConfChange, bool) {
- return c, true
-}
-
-// AsV2 is the identity.
-func (c ConfChangeV2) AsV2() ConfChangeV2 { return c }
-
-// AsV1 returns ConfChange{} and false.
-func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false }
-
-// EnterJoint returns two bools. The second bool is true if and only if this
-// config change will use Joint Consensus, which is the case if it contains more
-// than one change or if the use of Joint Consensus was requested explicitly.
-// The first bool can only be true if second one is, and indicates whether the
-// Joint State will be left automatically.
-func (c ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) {
- // NB: in theory, more config changes could qualify for the "simple"
- // protocol but it depends on the config on top of which the changes apply.
- // For example, adding two learners is not OK if both nodes are part of the
- // base config (i.e. two voters are turned into learners in the process of
- // applying the conf change). In practice, these distinctions should not
- // matter, so we keep it simple and use Joint Consensus liberally.
- if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 {
- // Use Joint Consensus.
- var autoLeave bool
- switch c.Transition {
- case ConfChangeTransitionAuto:
- autoLeave = true
- case ConfChangeTransitionJointImplicit:
- autoLeave = true
- case ConfChangeTransitionJointExplicit:
- default:
- panic(fmt.Sprintf("unknown transition: %+v", c))
- }
- return autoLeave, true
- }
- return false, false
-}
-
-// LeaveJoint is true if the configuration change leaves a joint configuration.
-// This is the case if the ConfChangeV2 is zero, with the possible exception of
-// the Context field.
-func (c ConfChangeV2) LeaveJoint() bool {
- // NB: c is already a copy.
- c.Context = nil
- return proto.Equal(&c, &ConfChangeV2{})
-}
-
-// ConfChangesFromString parses a Space-delimited sequence of operations into a
-// slice of ConfChangeSingle. The supported operations are:
-// - vn: make n a voter,
-// - ln: make n a learner,
-// - rn: remove n, and
-// - un: update n.
-func ConfChangesFromString(s string) ([]ConfChangeSingle, error) {
- var ccs []ConfChangeSingle
- toks := strings.Split(strings.TrimSpace(s), " ")
- if toks[0] == "" {
- toks = nil
- }
- for _, tok := range toks {
- if len(tok) < 2 {
- return nil, fmt.Errorf("unknown token %s", tok)
- }
- var cc ConfChangeSingle
- switch tok[0] {
- case 'v':
- cc.Type = ConfChangeAddNode
- case 'l':
- cc.Type = ConfChangeAddLearnerNode
- case 'r':
- cc.Type = ConfChangeRemoveNode
- case 'u':
- cc.Type = ConfChangeUpdateNode
- default:
- return nil, fmt.Errorf("unknown input: %s", tok)
- }
- id, err := strconv.ParseUint(tok[1:], 10, 64)
- if err != nil {
- return nil, err
- }
- cc.NodeID = id
- ccs = append(ccs, cc)
- }
- return ccs, nil
-}
-
-// ConfChangesToString is the inverse to ConfChangesFromString.
-func ConfChangesToString(ccs []ConfChangeSingle) string {
- var buf strings.Builder
- for i, cc := range ccs {
- if i > 0 {
- buf.WriteByte(' ')
- }
- switch cc.Type {
- case ConfChangeAddNode:
- buf.WriteByte('v')
- case ConfChangeAddLearnerNode:
- buf.WriteByte('l')
- case ConfChangeRemoveNode:
- buf.WriteByte('r')
- case ConfChangeUpdateNode:
- buf.WriteByte('u')
- default:
- buf.WriteString("unknown")
- }
- fmt.Fprintf(&buf, "%d", cc.NodeID)
- }
- return buf.String()
-}
diff --git a/raft/raftpb/confstate.go b/raft/raftpb/confstate.go
deleted file mode 100644
index 39b9dd70004..00000000000
--- a/raft/raftpb/confstate.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raftpb
-
-import (
- "fmt"
- "reflect"
- "sort"
-)
-
-// Equivalent returns a nil error if the inputs describe the same configuration.
-// On mismatch, returns a descriptive error showing the differences.
-func (cs ConfState) Equivalent(cs2 ConfState) error {
- cs1 := cs
- orig1, orig2 := cs1, cs2
- s := func(sl *[]uint64) {
- *sl = append([]uint64(nil), *sl...)
- sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] })
- }
-
- for _, cs := range []*ConfState{&cs1, &cs2} {
- s(&cs.Voters)
- s(&cs.Learners)
- s(&cs.VotersOutgoing)
- s(&cs.LearnersNext)
- }
-
- if !reflect.DeepEqual(cs1, cs2) {
- return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2)
- }
- return nil
-}
diff --git a/raft/raftpb/confstate_test.go b/raft/raftpb/confstate_test.go
deleted file mode 100644
index 712d7158317..00000000000
--- a/raft/raftpb/confstate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raftpb
-
-import (
- "testing"
-)
-
-func TestConfState_Equivalent(t *testing.T) {
- type testCase struct {
- cs, cs2 ConfState
- ok bool
- }
-
- testCases := []testCase{
- // Reordered voters and learners.
- {ConfState{
- Voters: []uint64{1, 2, 3},
- Learners: []uint64{5, 4, 6},
- VotersOutgoing: []uint64{9, 8, 7},
- LearnersNext: []uint64{10, 20, 15},
- }, ConfState{
- Voters: []uint64{1, 2, 3},
- Learners: []uint64{4, 5, 6},
- VotersOutgoing: []uint64{7, 9, 8},
- LearnersNext: []uint64{20, 10, 15},
- }, true},
- // Not sensitive to nil vs empty slice.
- {ConfState{Voters: []uint64{}}, ConfState{Voters: []uint64(nil)}, true},
- // Non-equivalent voters.
- {ConfState{Voters: []uint64{1, 2, 3, 4}}, ConfState{Voters: []uint64{2, 1, 3}}, false},
- {ConfState{Voters: []uint64{1, 4, 3}}, ConfState{Voters: []uint64{2, 1, 3}}, false},
- // Non-equivalent learners.
- {ConfState{Voters: []uint64{1, 2, 3, 4}}, ConfState{Voters: []uint64{2, 1, 3}}, false},
- // Sensitive to AutoLeave flag.
- {ConfState{AutoLeave: true}, ConfState{}, false},
- }
-
- for _, tc := range testCases {
- t.Run("", func(t *testing.T) {
- if err := tc.cs.Equivalent(tc.cs2); (err == nil) != tc.ok {
- t.Fatalf("wanted error: %t, got:\n%s", tc.ok, err)
- }
- })
- }
-}
diff --git a/raft/raftpb/raft.pb.go b/raft/raftpb/raft.pb.go
deleted file mode 100644
index 1ee77a9a457..00000000000
--- a/raft/raftpb/raft.pb.go
+++ /dev/null
@@ -1,2988 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft.proto
-
-package raftpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type EntryType int32
-
-const (
- EntryNormal EntryType = 0
- EntryConfChange EntryType = 1
- EntryConfChangeV2 EntryType = 2
-)
-
-var EntryType_name = map[int32]string{
- 0: "EntryNormal",
- 1: "EntryConfChange",
- 2: "EntryConfChangeV2",
-}
-
-var EntryType_value = map[string]int32{
- "EntryNormal": 0,
- "EntryConfChange": 1,
- "EntryConfChangeV2": 2,
-}
-
-func (x EntryType) Enum() *EntryType {
- p := new(EntryType)
- *p = x
- return p
-}
-
-func (x EntryType) String() string {
- return proto.EnumName(EntryType_name, int32(x))
-}
-
-func (x *EntryType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
- if err != nil {
- return err
- }
- *x = EntryType(value)
- return nil
-}
-
-func (EntryType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{0}
-}
-
-// For description of different message types, see:
-// https://pkg.go.dev/go.etcd.io/etcd/raft/v3#hdr-MessageType
-type MessageType int32
-
-const (
- MsgHup MessageType = 0
- MsgBeat MessageType = 1
- MsgProp MessageType = 2
- MsgApp MessageType = 3
- MsgAppResp MessageType = 4
- MsgVote MessageType = 5
- MsgVoteResp MessageType = 6
- MsgSnap MessageType = 7
- MsgHeartbeat MessageType = 8
- MsgHeartbeatResp MessageType = 9
- MsgUnreachable MessageType = 10
- MsgSnapStatus MessageType = 11
- MsgCheckQuorum MessageType = 12
- MsgTransferLeader MessageType = 13
- MsgTimeoutNow MessageType = 14
- MsgReadIndex MessageType = 15
- MsgReadIndexResp MessageType = 16
- MsgPreVote MessageType = 17
- MsgPreVoteResp MessageType = 18
-)
-
-var MessageType_name = map[int32]string{
- 0: "MsgHup",
- 1: "MsgBeat",
- 2: "MsgProp",
- 3: "MsgApp",
- 4: "MsgAppResp",
- 5: "MsgVote",
- 6: "MsgVoteResp",
- 7: "MsgSnap",
- 8: "MsgHeartbeat",
- 9: "MsgHeartbeatResp",
- 10: "MsgUnreachable",
- 11: "MsgSnapStatus",
- 12: "MsgCheckQuorum",
- 13: "MsgTransferLeader",
- 14: "MsgTimeoutNow",
- 15: "MsgReadIndex",
- 16: "MsgReadIndexResp",
- 17: "MsgPreVote",
- 18: "MsgPreVoteResp",
-}
-
-var MessageType_value = map[string]int32{
- "MsgHup": 0,
- "MsgBeat": 1,
- "MsgProp": 2,
- "MsgApp": 3,
- "MsgAppResp": 4,
- "MsgVote": 5,
- "MsgVoteResp": 6,
- "MsgSnap": 7,
- "MsgHeartbeat": 8,
- "MsgHeartbeatResp": 9,
- "MsgUnreachable": 10,
- "MsgSnapStatus": 11,
- "MsgCheckQuorum": 12,
- "MsgTransferLeader": 13,
- "MsgTimeoutNow": 14,
- "MsgReadIndex": 15,
- "MsgReadIndexResp": 16,
- "MsgPreVote": 17,
- "MsgPreVoteResp": 18,
-}
-
-func (x MessageType) Enum() *MessageType {
- p := new(MessageType)
- *p = x
- return p
-}
-
-func (x MessageType) String() string {
- return proto.EnumName(MessageType_name, int32(x))
-}
-
-func (x *MessageType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
- if err != nil {
- return err
- }
- *x = MessageType(value)
- return nil
-}
-
-func (MessageType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{1}
-}
-
-// ConfChangeTransition specifies the behavior of a configuration change with
-// respect to joint consensus.
-type ConfChangeTransition int32
-
-const (
- // Automatically use the simple protocol if possible, otherwise fall back
- // to ConfChangeJointImplicit. Most applications will want to use this.
- ConfChangeTransitionAuto ConfChangeTransition = 0
- // Use joint consensus unconditionally, and transition out of them
- // automatically (by proposing a zero configuration change).
- //
- // This option is suitable for applications that want to minimize the time
- // spent in the joint configuration and do not store the joint configuration
- // in the state machine (outside of InitialState).
- ConfChangeTransitionJointImplicit ConfChangeTransition = 1
- // Use joint consensus and remain in the joint configuration until the
- // application proposes a no-op configuration change. This is suitable for
- // applications that want to explicitly control the transitions, for example
- // to use a custom payload (via the Context field).
- ConfChangeTransitionJointExplicit ConfChangeTransition = 2
-)
-
-var ConfChangeTransition_name = map[int32]string{
- 0: "ConfChangeTransitionAuto",
- 1: "ConfChangeTransitionJointImplicit",
- 2: "ConfChangeTransitionJointExplicit",
-}
-
-var ConfChangeTransition_value = map[string]int32{
- "ConfChangeTransitionAuto": 0,
- "ConfChangeTransitionJointImplicit": 1,
- "ConfChangeTransitionJointExplicit": 2,
-}
-
-func (x ConfChangeTransition) Enum() *ConfChangeTransition {
- p := new(ConfChangeTransition)
- *p = x
- return p
-}
-
-func (x ConfChangeTransition) String() string {
- return proto.EnumName(ConfChangeTransition_name, int32(x))
-}
-
-func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition")
- if err != nil {
- return err
- }
- *x = ConfChangeTransition(value)
- return nil
-}
-
-func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{2}
-}
-
-type ConfChangeType int32
-
-const (
- ConfChangeAddNode ConfChangeType = 0
- ConfChangeRemoveNode ConfChangeType = 1
- ConfChangeUpdateNode ConfChangeType = 2
- ConfChangeAddLearnerNode ConfChangeType = 3
-)
-
-var ConfChangeType_name = map[int32]string{
- 0: "ConfChangeAddNode",
- 1: "ConfChangeRemoveNode",
- 2: "ConfChangeUpdateNode",
- 3: "ConfChangeAddLearnerNode",
-}
-
-var ConfChangeType_value = map[string]int32{
- "ConfChangeAddNode": 0,
- "ConfChangeRemoveNode": 1,
- "ConfChangeUpdateNode": 2,
- "ConfChangeAddLearnerNode": 3,
-}
-
-func (x ConfChangeType) Enum() *ConfChangeType {
- p := new(ConfChangeType)
- *p = x
- return p
-}
-
-func (x ConfChangeType) String() string {
- return proto.EnumName(ConfChangeType_name, int32(x))
-}
-
-func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
- if err != nil {
- return err
- }
- *x = ConfChangeType(value)
- return nil
-}
-
-func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{3}
-}
-
-type Entry struct {
- Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
- Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
- Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
- Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
-}
-
-func (m *Entry) Reset() { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage() {}
-func (*Entry) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{0}
-}
-func (m *Entry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Entry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Entry.Merge(m, src)
-}
-func (m *Entry) XXX_Size() int {
- return m.Size()
-}
-func (m *Entry) XXX_DiscardUnknown() {
- xxx_messageInfo_Entry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Entry proto.InternalMessageInfo
-
-type SnapshotMetadata struct {
- ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
- Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"`
- Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"`
-}
-
-func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} }
-func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
-func (*SnapshotMetadata) ProtoMessage() {}
-func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{1}
-}
-func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SnapshotMetadata.Merge(m, src)
-}
-func (m *SnapshotMetadata) XXX_Size() int {
- return m.Size()
-}
-func (m *SnapshotMetadata) XXX_DiscardUnknown() {
- xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
-
-type Snapshot struct {
- Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
- Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{2}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Snapshot.Merge(m, src)
-}
-func (m *Snapshot) XXX_Size() int {
- return m.Size()
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-type Message struct {
- Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
- To uint64 `protobuf:"varint,2,opt,name=to" json:"to"`
- From uint64 `protobuf:"varint,3,opt,name=from" json:"from"`
- Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"`
- // logTerm is generally used for appending Raft logs to followers. For example,
- // (type=MsgApp,index=100,logTerm=5) means leader appends entries starting at
- // index=101, and the term of entry at index 100 is 5.
- // (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some
- // entries from its leader as it already has an entry with term 5 at index 100.
- LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
- Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"`
- Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
- Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"`
- Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
- Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"`
- RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
- Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
-}
-
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
-func (*Message) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{3}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Message.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Message) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message.Merge(m, src)
-}
-func (m *Message) XXX_Size() int {
- return m.Size()
-}
-func (m *Message) XXX_DiscardUnknown() {
- xxx_messageInfo_Message.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-type HardState struct {
- Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
- Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
- Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
-}
-
-func (m *HardState) Reset() { *m = HardState{} }
-func (m *HardState) String() string { return proto.CompactTextString(m) }
-func (*HardState) ProtoMessage() {}
-func (*HardState) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{4}
-}
-func (m *HardState) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HardState) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HardState.Merge(m, src)
-}
-func (m *HardState) XXX_Size() int {
- return m.Size()
-}
-func (m *HardState) XXX_DiscardUnknown() {
- xxx_messageInfo_HardState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HardState proto.InternalMessageInfo
-
-type ConfState struct {
- // The voters in the incoming config. (If the configuration is not joint,
- // then the outgoing config is empty).
- Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"`
- // The learners in the incoming config.
- Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
- // The voters in the outgoing config.
- VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"`
- // The nodes that will become learners when the outgoing config is removed.
- // These nodes are necessarily currently in nodes_joint (or they would have
- // been added to the incoming config right away).
- LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"`
- // If set, the config is joint and Raft will automatically transition into
- // the final config (i.e. remove the outgoing config) when this is safe.
- AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"`
-}
-
-func (m *ConfState) Reset() { *m = ConfState{} }
-func (m *ConfState) String() string { return proto.CompactTextString(m) }
-func (*ConfState) ProtoMessage() {}
-func (*ConfState) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{5}
-}
-func (m *ConfState) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ConfState) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfState.Merge(m, src)
-}
-func (m *ConfState) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfState) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfState proto.InternalMessageInfo
-
-type ConfChange struct {
- Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"`
- NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"`
- Context []byte `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
- // NB: this is used only by etcd to thread through a unique identifier.
- // Ideally it should really use the Context instead. No counterpart to
- // this field exists in ConfChangeV2.
- ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"`
-}
-
-func (m *ConfChange) Reset() { *m = ConfChange{} }
-func (m *ConfChange) String() string { return proto.CompactTextString(m) }
-func (*ConfChange) ProtoMessage() {}
-func (*ConfChange) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{6}
-}
-func (m *ConfChange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ConfChange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfChange.Merge(m, src)
-}
-func (m *ConfChange) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfChange) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfChange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfChange proto.InternalMessageInfo
-
-// ConfChangeSingle is an individual configuration change operation. Multiple
-// such operations can be carried out atomically via a ConfChangeV2.
-type ConfChangeSingle struct {
- Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"`
- NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"`
-}
-
-func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} }
-func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) }
-func (*ConfChangeSingle) ProtoMessage() {}
-func (*ConfChangeSingle) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{7}
-}
-func (m *ConfChangeSingle) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfChangeSingle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ConfChangeSingle.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ConfChangeSingle) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfChangeSingle.Merge(m, src)
-}
-func (m *ConfChangeSingle) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfChangeSingle) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfChangeSingle.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfChangeSingle proto.InternalMessageInfo
-
-// ConfChangeV2 messages initiate configuration changes. They support both the
-// simple "one at a time" membership change protocol and full Joint Consensus
-// allowing for arbitrary changes in membership.
-//
-// The supplied context is treated as an opaque payload and can be used to
-// attach an action on the state machine to the application of the config change
-// proposal. Note that contrary to Joint Consensus as outlined in the Raft
-// paper[1], configuration changes become active when they are *applied* to the
-// state machine (not when they are appended to the log).
-//
-// The simple protocol can be used whenever only a single change is made.
-//
-// Non-simple changes require the use of Joint Consensus, for which two
-// configuration changes are run. The first configuration change specifies the
-// desired changes and transitions the Raft group into the joint configuration,
-// in which quorum requires a majority of both the pre-changes and post-changes
-// configuration. Joint Consensus avoids entering fragile intermediate
-// configurations that could compromise survivability. For example, without the
-// use of Joint Consensus and running across three availability zones with a
-// replication factor of three, it is not possible to replace a voter without
-// entering an intermediate configuration that does not survive the outage of
-// one availability zone.
-//
-// The provided ConfChangeTransition specifies how (and whether) Joint Consensus
-// is used, and assigns the task of leaving the joint configuration either to
-// Raft or the application. Leaving the joint configuration is accomplished by
-// proposing a ConfChangeV2 with only and optionally the Context field
-// populated.
-//
-// For details on Raft membership changes, see:
-//
-// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
-type ConfChangeV2 struct {
- Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"`
- Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"`
- Context []byte `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"`
-}
-
-func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} }
-func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) }
-func (*ConfChangeV2) ProtoMessage() {}
-func (*ConfChangeV2) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{8}
-}
-func (m *ConfChangeV2) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfChangeV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ConfChangeV2.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ConfChangeV2) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfChangeV2.Merge(m, src)
-}
-func (m *ConfChangeV2) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfChangeV2) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfChangeV2.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfChangeV2 proto.InternalMessageInfo
-
-func init() {
- proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
- proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
- proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value)
- proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
- proto.RegisterType((*Entry)(nil), "raftpb.Entry")
- proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
- proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
- proto.RegisterType((*Message)(nil), "raftpb.Message")
- proto.RegisterType((*HardState)(nil), "raftpb.HardState")
- proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
- proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
- proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle")
- proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2")
-}
-
-func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
-
-var fileDescriptor_b042552c306ae59b = []byte{
- // 1026 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xdb, 0x46,
- 0x17, 0x25, 0x29, 0x5a, 0x3f, 0x57, 0xb2, 0x3c, 0xbe, 0xf1, 0x17, 0x10, 0x86, 0xc1, 0xe8, 0x53,
- 0x52, 0x44, 0x70, 0x11, 0xb7, 0xd0, 0xa2, 0x28, 0xba, 0xf3, 0x4f, 0x00, 0xab, 0xb0, 0xdc, 0x54,
- 0x76, 0xbc, 0x28, 0x50, 0x08, 0x63, 0x71, 0x44, 0xb3, 0x15, 0x39, 0x04, 0x39, 0x72, 0xed, 0x4d,
- 0x51, 0xf4, 0x09, 0xba, 0xec, 0x26, 0xdb, 0x3e, 0x40, 0x9f, 0xc2, 0x4b, 0x03, 0xdd, 0x74, 0x15,
- 0x34, 0xf6, 0x8b, 0x14, 0x33, 0x1c, 0x4a, 0x94, 0x6c, 0x64, 0xd1, 0xdd, 0xcc, 0xb9, 0x67, 0xee,
- 0x9c, 0x73, 0xef, 0xe5, 0x10, 0x20, 0xa1, 0x63, 0xb1, 0x13, 0x27, 0x5c, 0x70, 0x2c, 0xcb, 0x75,
- 0x7c, 0xbe, 0xb9, 0xe1, 0x73, 0x9f, 0x2b, 0xe8, 0x33, 0xb9, 0xca, 0xa2, 0xed, 0x9f, 0x61, 0xe5,
- 0x75, 0x24, 0x92, 0x6b, 0x74, 0xc0, 0x3e, 0x65, 0x49, 0xe8, 0x58, 0x2d, 0xb3, 0x63, 0xef, 0xd9,
- 0x37, 0xef, 0x9f, 0x19, 0x03, 0x85, 0xe0, 0x26, 0xac, 0xf4, 0x22, 0x8f, 0x5d, 0x39, 0xa5, 0x42,
- 0x28, 0x83, 0xf0, 0x53, 0xb0, 0x4f, 0xaf, 0x63, 0xe6, 0x98, 0x2d, 0xb3, 0xd3, 0xec, 0xae, 0xef,
- 0x64, 0x77, 0xed, 0xa8, 0x94, 0x32, 0x30, 0x4b, 0x74, 0x1d, 0x33, 0x44, 0xb0, 0x0f, 0xa8, 0xa0,
- 0x8e, 0xdd, 0x32, 0x3b, 0x8d, 0x81, 0x5a, 0xb7, 0x7f, 0x31, 0x81, 0x9c, 0x44, 0x34, 0x4e, 0x2f,
- 0xb8, 0xe8, 0x33, 0x41, 0x3d, 0x2a, 0x28, 0x7e, 0x01, 0x30, 0xe2, 0xd1, 0x78, 0x98, 0x0a, 0x2a,
- 0xb2, 0xdc, 0xf5, 0x79, 0xee, 0x7d, 0x1e, 0x8d, 0x4f, 0x64, 0x40, 0xe7, 0xae, 0x8d, 0x72, 0x40,
- 0x2a, 0x0d, 0x94, 0xd2, 0xa2, 0x89, 0x0c, 0x92, 0xfe, 0x84, 0xf4, 0x57, 0x34, 0xa1, 0x90, 0xf6,
- 0x77, 0x50, 0xcd, 0x15, 0x48, 0x89, 0x52, 0x81, 0xba, 0xb3, 0x31, 0x50, 0x6b, 0xfc, 0x0a, 0xaa,
- 0xa1, 0x56, 0xa6, 0x12, 0xd7, 0xbb, 0x4e, 0xae, 0x65, 0x59, 0xb9, 0xce, 0x3b, 0xe3, 0xb7, 0xdf,
- 0x95, 0xa0, 0xd2, 0x67, 0x69, 0x4a, 0x7d, 0x86, 0xaf, 0xc0, 0x16, 0xf3, 0x5a, 0x3d, 0xc9, 0x73,
- 0xe8, 0x70, 0xb1, 0x5a, 0x92, 0x86, 0x1b, 0x60, 0x09, 0xbe, 0xe0, 0xc4, 0x12, 0x5c, 0xda, 0x18,
- 0x27, 0x7c, 0xc9, 0x86, 0x44, 0x66, 0x06, 0xed, 0x65, 0x83, 0xe8, 0x42, 0x65, 0xc2, 0x7d, 0xd5,
- 0xdd, 0x95, 0x42, 0x30, 0x07, 0xe7, 0x65, 0x2b, 0x3f, 0x2c, 0xdb, 0x2b, 0xa8, 0xb0, 0x48, 0x24,
- 0x01, 0x4b, 0x9d, 0x4a, 0xab, 0xd4, 0xa9, 0x77, 0x57, 0x17, 0x7a, 0x9c, 0xa7, 0xd2, 0x1c, 0xdc,
- 0x82, 0xf2, 0x88, 0x87, 0x61, 0x20, 0x9c, 0x6a, 0x21, 0x97, 0xc6, 0xb0, 0x0b, 0xd5, 0x54, 0x57,
- 0xcc, 0xa9, 0xa9, 0x4a, 0x92, 0xe5, 0x4a, 0xe6, 0x15, 0xcc, 0x79, 0x32, 0x63, 0xc2, 0x7e, 0x60,
- 0x23, 0xe1, 0x40, 0xcb, 0xec, 0x54, 0xf3, 0x8c, 0x19, 0x86, 0x2f, 0x00, 0xb2, 0xd5, 0x61, 0x10,
- 0x09, 0xa7, 0x5e, 0xb8, 0xb3, 0x80, 0xa3, 0x03, 0x95, 0x11, 0x8f, 0x04, 0xbb, 0x12, 0x4e, 0x43,
- 0x35, 0x36, 0xdf, 0xb6, 0xbf, 0x87, 0xda, 0x21, 0x4d, 0xbc, 0x6c, 0x7c, 0xf2, 0x0a, 0x9a, 0x0f,
- 0x2a, 0xe8, 0x80, 0x7d, 0xc9, 0x05, 0x5b, 0xfc, 0x38, 0x24, 0x52, 0x30, 0x5c, 0x7a, 0x68, 0xb8,
- 0xfd, 0xa7, 0x09, 0xb5, 0xd9, 0xbc, 0xe2, 0x53, 0x28, 0xcb, 0x33, 0x49, 0xea, 0x98, 0xad, 0x52,
- 0xc7, 0x1e, 0xe8, 0x1d, 0x6e, 0x42, 0x75, 0xc2, 0x68, 0x12, 0xc9, 0x88, 0xa5, 0x22, 0xb3, 0x3d,
- 0xbe, 0x84, 0xb5, 0x8c, 0x35, 0xe4, 0x53, 0xe1, 0xf3, 0x20, 0xf2, 0x9d, 0x92, 0xa2, 0x34, 0x33,
- 0xf8, 0x1b, 0x8d, 0xe2, 0x73, 0x58, 0xcd, 0x0f, 0x0d, 0x23, 0xe9, 0xd4, 0x56, 0xb4, 0x46, 0x0e,
- 0x1e, 0xb3, 0x2b, 0x81, 0xcf, 0x01, 0xe8, 0x54, 0xf0, 0xe1, 0x84, 0xd1, 0x4b, 0xa6, 0x86, 0x21,
- 0x2f, 0x68, 0x4d, 0xe2, 0x47, 0x12, 0x6e, 0xbf, 0x33, 0x01, 0xa4, 0xe8, 0xfd, 0x0b, 0x1a, 0xf9,
- 0x0c, 0x3f, 0xd7, 0x63, 0x6b, 0xa9, 0xb1, 0x7d, 0x5a, 0xfc, 0x0c, 0x33, 0xc6, 0x83, 0xc9, 0x7d,
- 0x09, 0x95, 0x88, 0x7b, 0x6c, 0x18, 0x78, 0xba, 0x28, 0x4d, 0x19, 0xbc, 0x7b, 0xff, 0xac, 0x7c,
- 0xcc, 0x3d, 0xd6, 0x3b, 0x18, 0x94, 0x65, 0xb8, 0xe7, 0x15, 0xfb, 0x62, 0x2f, 0xf4, 0x05, 0x37,
- 0xc1, 0x0a, 0x3c, 0xdd, 0x08, 0xd0, 0xa7, 0xad, 0xde, 0xc1, 0xc0, 0x0a, 0xbc, 0x76, 0x08, 0x64,
- 0x7e, 0xf9, 0x49, 0x10, 0xf9, 0x93, 0xb9, 0x48, 0xf3, 0xbf, 0x88, 0xb4, 0x3e, 0x26, 0xb2, 0xfd,
- 0x87, 0x09, 0x8d, 0x79, 0x9e, 0xb3, 0x2e, 0xee, 0x01, 0x88, 0x84, 0x46, 0x69, 0x20, 0x02, 0x1e,
- 0xe9, 0x1b, 0xb7, 0x1e, 0xb9, 0x71, 0xc6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x84, 0xca, 0x48,
- 0xb1, 0xb2, 0x8e, 0x17, 0x9e, 0x94, 0x65, 0x6b, 0xf9, 0x17, 0xa6, 0xe9, 0xc5, 0x9a, 0x95, 0x16,
- 0x6a, 0xb6, 0x7d, 0x08, 0xb5, 0xd9, 0xbb, 0x8b, 0x6b, 0x50, 0x57, 0x9b, 0x63, 0x9e, 0x84, 0x74,
- 0x42, 0x0c, 0x7c, 0x02, 0x6b, 0x0a, 0x98, 0xe7, 0x27, 0x26, 0xfe, 0x0f, 0xd6, 0x97, 0xc0, 0xb3,
- 0x2e, 0xb1, 0xb6, 0xff, 0xb2, 0xa0, 0x5e, 0x78, 0x96, 0x10, 0xa0, 0xdc, 0x4f, 0xfd, 0xc3, 0x69,
- 0x4c, 0x0c, 0xac, 0x43, 0xa5, 0x9f, 0xfa, 0x7b, 0x8c, 0x0a, 0x62, 0xea, 0xcd, 0x9b, 0x84, 0xc7,
- 0xc4, 0xd2, 0xac, 0xdd, 0x38, 0x26, 0x25, 0x6c, 0x02, 0x64, 0xeb, 0x01, 0x4b, 0x63, 0x62, 0x6b,
- 0xe2, 0x19, 0x17, 0x8c, 0xac, 0x48, 0x6d, 0x7a, 0xa3, 0xa2, 0x65, 0x1d, 0x95, 0x4f, 0x00, 0xa9,
- 0x20, 0x81, 0x86, 0xbc, 0x8c, 0xd1, 0x44, 0x9c, 0xcb, 0x5b, 0xaa, 0xb8, 0x01, 0xa4, 0x88, 0xa8,
- 0x43, 0x35, 0x44, 0x68, 0xf6, 0x53, 0xff, 0x6d, 0x94, 0x30, 0x3a, 0xba, 0xa0, 0xe7, 0x13, 0x46,
- 0x00, 0xd7, 0x61, 0x55, 0x27, 0x92, 0x5f, 0xdc, 0x34, 0x25, 0x75, 0x4d, 0xdb, 0xbf, 0x60, 0xa3,
- 0x1f, 0xbf, 0x9d, 0xf2, 0x64, 0x1a, 0x92, 0x86, 0xb4, 0xdd, 0x4f, 0x7d, 0xd5, 0xa0, 0x31, 0x4b,
- 0x8e, 0x18, 0xf5, 0x58, 0x42, 0x56, 0xf5, 0xe9, 0xd3, 0x20, 0x64, 0x7c, 0x2a, 0x8e, 0xf9, 0x4f,
- 0xa4, 0xa9, 0xc5, 0x0c, 0x18, 0xf5, 0xd4, 0xff, 0x8e, 0xac, 0x69, 0x31, 0x33, 0x44, 0x89, 0x21,
- 0xda, 0xef, 0x9b, 0x84, 0x29, 0x8b, 0xeb, 0xfa, 0x56, 0xbd, 0x57, 0x1c, 0xdc, 0xfe, 0xd5, 0x84,
- 0x8d, 0xc7, 0xc6, 0x03, 0xb7, 0xc0, 0x79, 0x0c, 0xdf, 0x9d, 0x0a, 0x4e, 0x0c, 0xfc, 0x04, 0xfe,
- 0xff, 0x58, 0xf4, 0x6b, 0x1e, 0x44, 0xa2, 0x17, 0xc6, 0x93, 0x60, 0x14, 0xc8, 0x56, 0x7c, 0x8c,
- 0xf6, 0xfa, 0x4a, 0xd3, 0xac, 0xed, 0x6b, 0x68, 0x2e, 0x7e, 0x14, 0xb2, 0x18, 0x73, 0x64, 0xd7,
- 0xf3, 0xe4, 0xf8, 0x13, 0x03, 0x9d, 0xa2, 0xd8, 0x01, 0x0b, 0xf9, 0x25, 0x53, 0x11, 0x73, 0x31,
- 0xf2, 0x36, 0xf6, 0xa8, 0xc8, 0x22, 0xd6, 0xa2, 0x91, 0x5d, 0xcf, 0x3b, 0xca, 0xde, 0x1e, 0x15,
- 0x2d, 0xed, 0xbd, 0xb8, 0xf9, 0xe0, 0x1a, 0xb7, 0x1f, 0x5c, 0xe3, 0xe6, 0xce, 0x35, 0x6f, 0xef,
- 0x5c, 0xf3, 0x9f, 0x3b, 0xd7, 0xfc, 0xed, 0xde, 0x35, 0x7e, 0xbf, 0x77, 0x8d, 0xdb, 0x7b, 0xd7,
- 0xf8, 0xfb, 0xde, 0x35, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xee, 0xe3, 0x39, 0x8b, 0xbb, 0x08,
- 0x00, 0x00,
-}
-
-func (m *Entry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x22
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.Index))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Index))
- i--
- dAtA[i] = 0x10
- {
- size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Snapshot) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Message) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Message) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Context != nil {
- i -= len(m.Context)
- copy(dAtA[i:], m.Context)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
- i--
- dAtA[i] = 0x62
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
- i--
- dAtA[i] = 0x58
- i--
- if m.Reject {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x50
- {
- size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
- i--
- dAtA[i] = 0x40
- if len(m.Entries) > 0 {
- for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.Index))
- i--
- dAtA[i] = 0x30
- i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
- i--
- dAtA[i] = 0x28
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x20
- i = encodeVarintRaft(dAtA, i, uint64(m.From))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.To))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *HardState) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *ConfState) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i--
- if m.AutoLeave {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- if len(m.LearnersNext) > 0 {
- for iNdEx := len(m.LearnersNext) - 1; iNdEx >= 0; iNdEx-- {
- i = encodeVarintRaft(dAtA, i, uint64(m.LearnersNext[iNdEx]))
- i--
- dAtA[i] = 0x20
- }
- }
- if len(m.VotersOutgoing) > 0 {
- for iNdEx := len(m.VotersOutgoing) - 1; iNdEx >= 0; iNdEx-- {
- i = encodeVarintRaft(dAtA, i, uint64(m.VotersOutgoing[iNdEx]))
- i--
- dAtA[i] = 0x18
- }
- }
- if len(m.Learners) > 0 {
- for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
- i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
- i--
- dAtA[i] = 0x10
- }
- }
- if len(m.Voters) > 0 {
- for iNdEx := len(m.Voters) - 1; iNdEx >= 0; iNdEx-- {
- i = encodeVarintRaft(dAtA, i, uint64(m.Voters[iNdEx]))
- i--
- dAtA[i] = 0x8
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ConfChange) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Context != nil {
- i -= len(m.Context)
- copy(dAtA[i:], m.Context)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
- i--
- dAtA[i] = 0x22
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfChangeSingle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfChangeV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Context != nil {
- i -= len(m.Context)
- copy(dAtA[i:], m.Context)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Changes) > 0 {
- for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.Transition))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
- offset -= sovRaft(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Entry) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Type))
- n += 1 + sovRaft(uint64(m.Term))
- n += 1 + sovRaft(uint64(m.Index))
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovRaft(uint64(l))
- }
- return n
-}
-
-func (m *SnapshotMetadata) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ConfState.Size()
- n += 1 + l + sovRaft(uint64(l))
- n += 1 + sovRaft(uint64(m.Index))
- n += 1 + sovRaft(uint64(m.Term))
- return n
-}
-
-func (m *Snapshot) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovRaft(uint64(l))
- }
- l = m.Metadata.Size()
- n += 1 + l + sovRaft(uint64(l))
- return n
-}
-
-func (m *Message) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Type))
- n += 1 + sovRaft(uint64(m.To))
- n += 1 + sovRaft(uint64(m.From))
- n += 1 + sovRaft(uint64(m.Term))
- n += 1 + sovRaft(uint64(m.LogTerm))
- n += 1 + sovRaft(uint64(m.Index))
- if len(m.Entries) > 0 {
- for _, e := range m.Entries {
- l = e.Size()
- n += 1 + l + sovRaft(uint64(l))
- }
- }
- n += 1 + sovRaft(uint64(m.Commit))
- l = m.Snapshot.Size()
- n += 1 + l + sovRaft(uint64(l))
- n += 2
- n += 1 + sovRaft(uint64(m.RejectHint))
- if m.Context != nil {
- l = len(m.Context)
- n += 1 + l + sovRaft(uint64(l))
- }
- return n
-}
-
-func (m *HardState) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Term))
- n += 1 + sovRaft(uint64(m.Vote))
- n += 1 + sovRaft(uint64(m.Commit))
- return n
-}
-
-func (m *ConfState) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Voters) > 0 {
- for _, e := range m.Voters {
- n += 1 + sovRaft(uint64(e))
- }
- }
- if len(m.Learners) > 0 {
- for _, e := range m.Learners {
- n += 1 + sovRaft(uint64(e))
- }
- }
- if len(m.VotersOutgoing) > 0 {
- for _, e := range m.VotersOutgoing {
- n += 1 + sovRaft(uint64(e))
- }
- }
- if len(m.LearnersNext) > 0 {
- for _, e := range m.LearnersNext {
- n += 1 + sovRaft(uint64(e))
- }
- }
- n += 2
- return n
-}
-
-func (m *ConfChange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.ID))
- n += 1 + sovRaft(uint64(m.Type))
- n += 1 + sovRaft(uint64(m.NodeID))
- if m.Context != nil {
- l = len(m.Context)
- n += 1 + l + sovRaft(uint64(l))
- }
- return n
-}
-
-func (m *ConfChangeSingle) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Type))
- n += 1 + sovRaft(uint64(m.NodeID))
- return n
-}
-
-func (m *ConfChangeV2) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Transition))
- if len(m.Changes) > 0 {
- for _, e := range m.Changes {
- l = e.Size()
- n += 1 + l + sovRaft(uint64(l))
- }
- }
- if m.Context != nil {
- l = len(m.Context)
- n += 1 + l + sovRaft(uint64(l))
- }
- return n
-}
-
-func sovRaft(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaft(x uint64) (n int) {
- return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Entry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Entry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= EntryType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
- }
- m.Index = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Index |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
- }
- m.Index = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Index |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Snapshot) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Message) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Message: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= MessageType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
- }
- m.To = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.To |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
- }
- m.From = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.From |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
- }
- m.LogTerm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LogTerm |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
- }
- m.Index = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Index |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Entries = append(m.Entries, Entry{})
- if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
- }
- m.Commit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Commit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Reject = bool(v != 0)
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
- }
- m.RejectHint = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectHint |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
- if m.Context == nil {
- m.Context = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HardState) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HardState: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
- }
- m.Vote = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Vote |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
- }
- m.Commit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Commit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ConfState) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Voters = append(m.Voters, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.Voters) == 0 {
- m.Voters = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Voters = append(m.Voters, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType)
- }
- case 2:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Learners = append(m.Learners, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.Learners) == 0 {
- m.Learners = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Learners = append(m.Learners, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
- }
- case 3:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.VotersOutgoing = append(m.VotersOutgoing, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.VotersOutgoing) == 0 {
- m.VotersOutgoing = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.VotersOutgoing = append(m.VotersOutgoing, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType)
- }
- case 4:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LearnersNext = append(m.LearnersNext, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.LearnersNext) == 0 {
- m.LearnersNext = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LearnersNext = append(m.LearnersNext, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType)
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AutoLeave = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ConfChange) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= ConfChangeType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
- }
- m.NodeID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NodeID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
- if m.Context == nil {
- m.Context = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= ConfChangeType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
- }
- m.NodeID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NodeID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ConfChangeV2) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType)
- }
- m.Transition = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Transition |= ConfChangeTransition(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Changes = append(m.Changes, ConfChangeSingle{})
- if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
- if m.Context == nil {
- m.Context = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRaft(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRaft
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupRaft
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRaft
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupRaft = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/raft/raftpb/raft.proto b/raft/raftpb/raft.proto
deleted file mode 100644
index f46a54c948b..00000000000
--- a/raft/raftpb/raft.proto
+++ /dev/null
@@ -1,187 +0,0 @@
-syntax = "proto2";
-package raftpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-option (gogoproto.goproto_unkeyed_all) = false;
-option (gogoproto.goproto_unrecognized_all) = false;
-option (gogoproto.goproto_sizecache_all) = false;
-
-enum EntryType {
- EntryNormal = 0;
- EntryConfChange = 1; // corresponds to pb.ConfChange
- EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2
-}
-
-message Entry {
- optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
- optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
- optional EntryType Type = 1 [(gogoproto.nullable) = false];
- optional bytes Data = 4;
-}
-
-message SnapshotMetadata {
- optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
- optional uint64 index = 2 [(gogoproto.nullable) = false];
- optional uint64 term = 3 [(gogoproto.nullable) = false];
-}
-
-message Snapshot {
- optional bytes data = 1;
- optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
-}
-
-// For description of different message types, see:
-// https://pkg.go.dev/go.etcd.io/etcd/raft/v3#hdr-MessageType
-enum MessageType {
- MsgHup = 0;
- MsgBeat = 1;
- MsgProp = 2;
- MsgApp = 3;
- MsgAppResp = 4;
- MsgVote = 5;
- MsgVoteResp = 6;
- MsgSnap = 7;
- MsgHeartbeat = 8;
- MsgHeartbeatResp = 9;
- MsgUnreachable = 10;
- MsgSnapStatus = 11;
- MsgCheckQuorum = 12;
- MsgTransferLeader = 13;
- MsgTimeoutNow = 14;
- MsgReadIndex = 15;
- MsgReadIndexResp = 16;
- MsgPreVote = 17;
- MsgPreVoteResp = 18;
-}
-
-message Message {
- optional MessageType type = 1 [(gogoproto.nullable) = false];
- optional uint64 to = 2 [(gogoproto.nullable) = false];
- optional uint64 from = 3 [(gogoproto.nullable) = false];
- optional uint64 term = 4 [(gogoproto.nullable) = false];
- // logTerm is generally used for appending Raft logs to followers. For example,
- // (type=MsgApp,index=100,logTerm=5) means leader appends entries starting at
- // index=101, and the term of entry at index 100 is 5.
- // (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some
- // entries from its leader as it already has an entry with term 5 at index 100.
- optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
- optional uint64 index = 6 [(gogoproto.nullable) = false];
- repeated Entry entries = 7 [(gogoproto.nullable) = false];
- optional uint64 commit = 8 [(gogoproto.nullable) = false];
- optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
- optional bool reject = 10 [(gogoproto.nullable) = false];
- optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
- optional bytes context = 12;
-}
-
-message HardState {
- optional uint64 term = 1 [(gogoproto.nullable) = false];
- optional uint64 vote = 2 [(gogoproto.nullable) = false];
- optional uint64 commit = 3 [(gogoproto.nullable) = false];
-}
-
-// ConfChangeTransition specifies the behavior of a configuration change with
-// respect to joint consensus.
-enum ConfChangeTransition {
- // Automatically use the simple protocol if possible, otherwise fall back
- // to ConfChangeJointImplicit. Most applications will want to use this.
- ConfChangeTransitionAuto = 0;
- // Use joint consensus unconditionally, and transition out of them
- // automatically (by proposing a zero configuration change).
- //
- // This option is suitable for applications that want to minimize the time
- // spent in the joint configuration and do not store the joint configuration
- // in the state machine (outside of InitialState).
- ConfChangeTransitionJointImplicit = 1;
- // Use joint consensus and remain in the joint configuration until the
- // application proposes a no-op configuration change. This is suitable for
- // applications that want to explicitly control the transitions, for example
- // to use a custom payload (via the Context field).
- ConfChangeTransitionJointExplicit = 2;
-}
-
-message ConfState {
- // The voters in the incoming config. (If the configuration is not joint,
- // then the outgoing config is empty).
- repeated uint64 voters = 1;
- // The learners in the incoming config.
- repeated uint64 learners = 2;
- // The voters in the outgoing config.
- repeated uint64 voters_outgoing = 3;
- // The nodes that will become learners when the outgoing config is removed.
- // These nodes are necessarily currently in nodes_joint (or they would have
- // been added to the incoming config right away).
- repeated uint64 learners_next = 4;
- // If set, the config is joint and Raft will automatically transition into
- // the final config (i.e. remove the outgoing config) when this is safe.
- optional bool auto_leave = 5 [(gogoproto.nullable) = false];
-}
-
-enum ConfChangeType {
- ConfChangeAddNode = 0;
- ConfChangeRemoveNode = 1;
- ConfChangeUpdateNode = 2;
- ConfChangeAddLearnerNode = 3;
-}
-
-message ConfChange {
- optional ConfChangeType type = 2 [(gogoproto.nullable) = false];
- optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ];
- optional bytes context = 4;
-
- // NB: this is used only by etcd to thread through a unique identifier.
- // Ideally it should really use the Context instead. No counterpart to
- // this field exists in ConfChangeV2.
- optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ];
-}
-
-// ConfChangeSingle is an individual configuration change operation. Multiple
-// such operations can be carried out atomically via a ConfChangeV2.
-message ConfChangeSingle {
- optional ConfChangeType type = 1 [(gogoproto.nullable) = false];
- optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"];
-}
-
-// ConfChangeV2 messages initiate configuration changes. They support both the
-// simple "one at a time" membership change protocol and full Joint Consensus
-// allowing for arbitrary changes in membership.
-//
-// The supplied context is treated as an opaque payload and can be used to
-// attach an action on the state machine to the application of the config change
-// proposal. Note that contrary to Joint Consensus as outlined in the Raft
-// paper[1], configuration changes become active when they are *applied* to the
-// state machine (not when they are appended to the log).
-//
-// The simple protocol can be used whenever only a single change is made.
-//
-// Non-simple changes require the use of Joint Consensus, for which two
-// configuration changes are run. The first configuration change specifies the
-// desired changes and transitions the Raft group into the joint configuration,
-// in which quorum requires a majority of both the pre-changes and post-changes
-// configuration. Joint Consensus avoids entering fragile intermediate
-// configurations that could compromise survivability. For example, without the
-// use of Joint Consensus and running across three availability zones with a
-// replication factor of three, it is not possible to replace a voter without
-// entering an intermediate configuration that does not survive the outage of
-// one availability zone.
-//
-// The provided ConfChangeTransition specifies how (and whether) Joint Consensus
-// is used, and assigns the task of leaving the joint configuration either to
-// Raft or the application. Leaving the joint configuration is accomplished by
-// proposing a ConfChangeV2 with only and optionally the Context field
-// populated.
-//
-// For details on Raft membership changes, see:
-//
-// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
-message ConfChangeV2 {
- optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false];
- repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false];
- optional bytes context = 3;
-}
diff --git a/raft/raftpb/raft_test.go b/raft/raftpb/raft_test.go
deleted file mode 100644
index ff6b6d86a3e..00000000000
--- a/raft/raftpb/raft_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raftpb
-
-import (
- "math/bits"
- "testing"
- "unsafe"
-)
-
-func TestProtoMemorySizes(t *testing.T) {
- assert := func(size, exp uintptr, name string) {
- t.Helper()
- if size != exp {
- t.Errorf("expected size of %s proto to be %d bytes, found %d bytes", name, exp, size)
- }
- }
-
- if64Bit := func(yes, no uintptr) uintptr {
- if bits.UintSize == 64 {
- return yes
- }
- return no
- }
-
- var e Entry
- assert(unsafe.Sizeof(e), if64Bit(48, 32), "Entry")
-
- var sm SnapshotMetadata
- assert(unsafe.Sizeof(sm), if64Bit(120, 68), "SnapshotMetadata")
-
- var s Snapshot
- assert(unsafe.Sizeof(s), if64Bit(144, 80), "Snapshot")
-
- var m Message
- assert(unsafe.Sizeof(m), if64Bit(264, 168), "Message")
-
- var hs HardState
- assert(unsafe.Sizeof(hs), 24, "HardState")
-
- var cs ConfState
- assert(unsafe.Sizeof(cs), if64Bit(104, 52), "ConfState")
-
- var cc ConfChange
- assert(unsafe.Sizeof(cc), if64Bit(48, 32), "ConfChange")
-
- var ccs ConfChangeSingle
- assert(unsafe.Sizeof(ccs), if64Bit(16, 12), "ConfChangeSingle")
-
- var ccv2 ConfChangeV2
- assert(unsafe.Sizeof(ccv2), if64Bit(56, 28), "ConfChangeV2")
-}
diff --git a/raft/rafttest/doc.go b/raft/rafttest/doc.go
deleted file mode 100644
index bba9a1a3868..00000000000
--- a/raft/rafttest/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package rafttest provides functional tests for etcd's raft implementation.
-package rafttest
diff --git a/raft/rafttest/interaction_env.go b/raft/rafttest/interaction_env.go
deleted file mode 100644
index 4a6adc5a547..00000000000
--- a/raft/rafttest/interaction_env.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "bufio"
- "fmt"
- "math"
- "strings"
-
- "go.etcd.io/etcd/raft/v3"
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-// InteractionOpts groups the options for an InteractionEnv.
-type InteractionOpts struct {
- OnConfig func(*raft.Config)
-}
-
-// Node is a member of a raft group tested via an InteractionEnv.
-type Node struct {
- *raft.RawNode
- Storage
-
- Config *raft.Config
- History []pb.Snapshot
-}
-
-// InteractionEnv facilitates testing of complex interactions between the
-// members of a raft group.
-type InteractionEnv struct {
- Options *InteractionOpts
- Nodes []Node
- Messages []pb.Message // in-flight messages
-
- Output *RedirectLogger
-}
-
-// NewInteractionEnv initializes an InteractionEnv. opts may be nil.
-func NewInteractionEnv(opts *InteractionOpts) *InteractionEnv {
- if opts == nil {
- opts = &InteractionOpts{}
- }
- return &InteractionEnv{
- Options: opts,
- Output: &RedirectLogger{
- Builder: &strings.Builder{},
- },
- }
-}
-
-func (env *InteractionEnv) withIndent(f func()) {
- orig := env.Output.Builder
- env.Output.Builder = &strings.Builder{}
- f()
-
- scanner := bufio.NewScanner(strings.NewReader(env.Output.Builder.String()))
- for scanner.Scan() {
- orig.WriteString(" " + scanner.Text() + "\n")
- }
- env.Output.Builder = orig
-}
-
-// Storage is the interface used by InteractionEnv. It is comprised of raft's
-// Storage interface plus access to operations that maintain the log and drive
-// the Ready handling loop.
-type Storage interface {
- raft.Storage
- SetHardState(state pb.HardState) error
- ApplySnapshot(pb.Snapshot) error
- Compact(newFirstIndex uint64) error
- Append([]pb.Entry) error
-}
-
-// defaultRaftConfig sets up a *raft.Config with reasonable testing defaults.
-// In particular, no limits are set.
-func defaultRaftConfig(id uint64, applied uint64, s raft.Storage) *raft.Config {
- return &raft.Config{
- ID: id,
- Applied: applied,
- ElectionTick: 3,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: math.MaxUint64,
- MaxInflightMsgs: math.MaxInt32,
- }
-}
-
-func defaultEntryFormatter(b []byte) string {
- return fmt.Sprintf("%q", b)
-}
diff --git a/raft/rafttest/interaction_env_handler.go b/raft/rafttest/interaction_env_handler.go
deleted file mode 100644
index 8c8724cf18b..00000000000
--- a/raft/rafttest/interaction_env_handler.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "strconv"
- "testing"
-
- "github.com/cockroachdb/datadriven"
-)
-
-// Handle is the entrypoint for data-driven interaction testing. Commands and
-// parameters are parsed from the supplied TestData. Errors during data parsing
-// are reported via the supplied *testing.T; errors from the raft nodes and the
-// storage engine are reported to the output buffer.
-func (env *InteractionEnv) Handle(t *testing.T, d datadriven.TestData) string {
- env.Output.Reset()
- var err error
- switch d.Cmd {
- case "_breakpoint":
- // This is a helper case to attach a debugger to when a problem needs
- // to be investigated in a longer test file. In such a case, add the
- // following stanza immediately before the interesting behavior starts:
- //
- // _breakpoint:
- // ----
- // ok
- //
- // and set a breakpoint on the `case` above.
- case "add-nodes":
- // Example:
- //
- // add-nodes voters=(1 2 3) learners=(4 5) index=2 content=foo
- err = env.handleAddNodes(t, d)
- case "campaign":
- // Example:
- //
- // campaign
- err = env.handleCampaign(t, d)
- case "compact":
- // Example:
- //
- // compact
- err = env.handleCompact(t, d)
- case "deliver-msgs":
- // Deliver the messages for a given recipient.
- //
- // Example:
- //
- // deliver-msgs
- err = env.handleDeliverMsgs(t, d)
- case "process-ready":
- // Example:
- //
- // process-ready 3
- err = env.handleProcessReady(t, d)
- case "log-level":
- // Set the log level. NONE disables all output, including from the test
- // harness (except errors).
- //
- // Example:
- //
- // log-level WARN
- err = env.handleLogLevel(t, d)
- case "raft-log":
- // Print the Raft log.
- //
- // Example:
- //
- // raft-log 3
- err = env.handleRaftLog(t, d)
- case "stabilize":
- // Deliver messages to and run process-ready on the set of IDs until
- // no more work is to be done.
- //
- // Example:
- //
- // stabilize 1 4
- err = env.handleStabilize(t, d)
- case "status":
- // Print Raft status.
- //
- // Example:
- //
- // status 5
- err = env.handleStatus(t, d)
- case "tick-heartbeat":
- // Tick a heartbeat interval.
- //
- // Example:
- //
- // tick-heartbeat 3
- err = env.handleTickHeartbeat(t, d)
- case "propose":
- // Propose an entry.
- //
- // Example:
- //
- // propose 1 foo
- err = env.handlePropose(t, d)
- case "propose-conf-change":
- // Propose a configuration change.
- //
- // Example:
- //
- // propose-conf-change transition=explicit
- // v1 v3 l4 r5
- //
- // Example:
- //
- // propose-conf-change v1=true
- // v5
- err = env.handleProposeConfChange(t, d)
- default:
- err = fmt.Errorf("unknown command")
- }
- if err != nil {
- env.Output.WriteString(err.Error())
- }
- // NB: the highest log level suppresses all output, including that of the
- // handlers. This comes in useful during setup which can be chatty.
- // However, errors are always logged.
- if env.Output.Len() == 0 {
- return "ok"
- }
- if env.Output.Lvl == len(lvlNames)-1 {
- if err != nil {
- return err.Error()
- }
- return "ok (quiet)"
- }
- return env.Output.String()
-}
-
-func firstAsInt(t *testing.T, d datadriven.TestData) int {
- t.Helper()
- n, err := strconv.Atoi(d.CmdArgs[0].Key)
- if err != nil {
- t.Fatal(err)
- }
- return n
-}
-
-func firstAsNodeIdx(t *testing.T, d datadriven.TestData) int {
- t.Helper()
- n := firstAsInt(t, d)
- return n - 1
-}
-
-func nodeIdxs(t *testing.T, d datadriven.TestData) []int {
- var ints []int
- for i := 0; i < len(d.CmdArgs); i++ {
- if len(d.CmdArgs[i].Vals) != 0 {
- continue
- }
- n, err := strconv.Atoi(d.CmdArgs[i].Key)
- if err != nil {
- t.Fatal(err)
- }
- ints = append(ints, n-1)
- }
- return ints
-}
diff --git a/raft/rafttest/interaction_env_handler_add_nodes.go b/raft/rafttest/interaction_env_handler_add_nodes.go
deleted file mode 100644
index 517477ef4bd..00000000000
--- a/raft/rafttest/interaction_env_handler_add_nodes.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "errors"
- "fmt"
- "reflect"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3"
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func (env *InteractionEnv) handleAddNodes(t *testing.T, d datadriven.TestData) error {
- n := firstAsInt(t, d)
- var snap pb.Snapshot
- for _, arg := range d.CmdArgs[1:] {
- for i := range arg.Vals {
- switch arg.Key {
- case "voters":
- var id uint64
- arg.Scan(t, i, &id)
- snap.Metadata.ConfState.Voters = append(snap.Metadata.ConfState.Voters, id)
- case "learners":
- var id uint64
- arg.Scan(t, i, &id)
- snap.Metadata.ConfState.Learners = append(snap.Metadata.ConfState.Learners, id)
- case "index":
- arg.Scan(t, i, &snap.Metadata.Index)
- case "content":
- arg.Scan(t, i, &snap.Data)
- }
- }
- }
- return env.AddNodes(n, snap)
-}
-
-type snapOverrideStorage struct {
- Storage
- snapshotOverride func() (pb.Snapshot, error)
-}
-
-func (s snapOverrideStorage) Snapshot() (pb.Snapshot, error) {
- if s.snapshotOverride != nil {
- return s.snapshotOverride()
- }
- return s.Storage.Snapshot()
-}
-
-var _ raft.Storage = snapOverrideStorage{}
-
-// AddNodes adds n new nodes initializes from the given snapshot (which may be
-// empty). They will be assigned consecutive IDs.
-func (env *InteractionEnv) AddNodes(n int, snap pb.Snapshot) error {
- bootstrap := !reflect.DeepEqual(snap, pb.Snapshot{})
- for i := 0; i < n; i++ {
- id := uint64(1 + len(env.Nodes))
- s := snapOverrideStorage{
- Storage: raft.NewMemoryStorage(),
- // When you ask for a snapshot, you get the most recent snapshot.
- //
- // TODO(tbg): this is sort of clunky, but MemoryStorage itself will
- // give you some fixed snapshot and also the snapshot changes
- // whenever you compact the logs and vice versa, so it's all a bit
- // awkward to use.
- snapshotOverride: func() (pb.Snapshot, error) {
- snaps := env.Nodes[int(id-1)].History
- return snaps[len(snaps)-1], nil
- },
- }
- if bootstrap {
- // NB: we could make this work with 1, but MemoryStorage just
- // doesn't play well with that and it's not a loss of generality.
- if snap.Metadata.Index <= 1 {
- return errors.New("index must be specified as > 1 due to bootstrap")
- }
- snap.Metadata.Term = 1
- if err := s.ApplySnapshot(snap); err != nil {
- return err
- }
- fi, err := s.FirstIndex()
- if err != nil {
- return err
- }
- // At the time of writing and for *MemoryStorage, applying a
- // snapshot also truncates appropriately, but this would change with
- // other storage engines potentially.
- if exp := snap.Metadata.Index + 1; fi != exp {
- return fmt.Errorf("failed to establish first index %d; got %d", exp, fi)
- }
- }
- cfg := defaultRaftConfig(id, snap.Metadata.Index, s)
- if env.Options.OnConfig != nil {
- env.Options.OnConfig(cfg)
- if cfg.ID != id {
- // This could be supported but then we need to do more work
- // translating back and forth -- not worth it.
- return errors.New("OnConfig must not change the ID")
- }
- }
- if cfg.Logger != nil {
- return errors.New("OnConfig must not set Logger")
- }
- cfg.Logger = env.Output
-
- rn, err := raft.NewRawNode(cfg)
- if err != nil {
- return err
- }
-
- node := Node{
- RawNode: rn,
- // TODO(tbg): allow a more general Storage, as long as it also allows
- // us to apply snapshots, append entries, and update the HardState.
- Storage: s,
- Config: cfg,
- History: []pb.Snapshot{snap},
- }
- env.Nodes = append(env.Nodes, node)
- }
- return nil
-}
diff --git a/raft/rafttest/interaction_env_handler_campaign.go b/raft/rafttest/interaction_env_handler_campaign.go
deleted file mode 100644
index bde5cc42e1f..00000000000
--- a/raft/rafttest/interaction_env_handler_campaign.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "testing"
-
- "github.com/cockroachdb/datadriven"
-)
-
-func (env *InteractionEnv) handleCampaign(t *testing.T, d datadriven.TestData) error {
- idx := firstAsNodeIdx(t, d)
- return env.Campaign(t, idx)
-}
-
-// Campaign the node at the given index.
-func (env *InteractionEnv) Campaign(t *testing.T, idx int) error {
- return env.Nodes[idx].Campaign()
-}
diff --git a/raft/rafttest/interaction_env_handler_compact.go b/raft/rafttest/interaction_env_handler_compact.go
deleted file mode 100644
index 25fa1d22c91..00000000000
--- a/raft/rafttest/interaction_env_handler_compact.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "strconv"
- "testing"
-
- "github.com/cockroachdb/datadriven"
-)
-
-func (env *InteractionEnv) handleCompact(t *testing.T, d datadriven.TestData) error {
- idx := firstAsNodeIdx(t, d)
- newFirstIndex, err := strconv.ParseUint(d.CmdArgs[1].Key, 10, 64)
- if err != nil {
- return err
- }
- return env.Compact(idx, newFirstIndex)
-}
-
-// Compact truncates the log on the node at index idx so that the supplied new
-// first index results.
-func (env *InteractionEnv) Compact(idx int, newFirstIndex uint64) error {
- if err := env.Nodes[idx].Compact(newFirstIndex); err != nil {
- return err
- }
- return env.RaftLog(idx)
-}
diff --git a/raft/rafttest/interaction_env_handler_deliver_msgs.go b/raft/rafttest/interaction_env_handler_deliver_msgs.go
deleted file mode 100644
index 8072e876ffd..00000000000
--- a/raft/rafttest/interaction_env_handler_deliver_msgs.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "strconv"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func (env *InteractionEnv) handleDeliverMsgs(t *testing.T, d datadriven.TestData) error {
- var rs []Recipient
- for _, arg := range d.CmdArgs {
- if len(arg.Vals) == 0 {
- id, err := strconv.ParseUint(arg.Key, 10, 64)
- if err != nil {
- t.Fatal(err)
- }
- rs = append(rs, Recipient{ID: id})
- }
- for i := range arg.Vals {
- switch arg.Key {
- case "drop":
- var id uint64
- arg.Scan(t, i, &id)
- var found bool
- for _, r := range rs {
- if r.ID == id {
- found = true
- }
- }
- if found {
- t.Fatalf("can't both deliver and drop msgs to %d", id)
- }
- rs = append(rs, Recipient{ID: id, Drop: true})
- }
- }
- }
-
- if n := env.DeliverMsgs(rs...); n == 0 {
- env.Output.WriteString("no messages\n")
- }
- return nil
-}
-
-type Recipient struct {
- ID uint64
- Drop bool
-}
-
-// DeliverMsgs goes through env.Messages and, depending on the Drop flag,
-// delivers or drops messages to the specified Recipients. Returns the
-// number of messages handled (i.e. delivered or dropped). A handled message
-// is removed from env.Messages.
-func (env *InteractionEnv) DeliverMsgs(rs ...Recipient) int {
- var n int
- for _, r := range rs {
- var msgs []raftpb.Message
- msgs, env.Messages = splitMsgs(env.Messages, r.ID)
- n += len(msgs)
- for _, msg := range msgs {
- if r.Drop {
- fmt.Fprint(env.Output, "dropped: ")
- }
- fmt.Fprintln(env.Output, raft.DescribeMessage(msg, defaultEntryFormatter))
- if r.Drop {
- // NB: it's allowed to drop messages to nodes that haven't been instantiated yet,
- // we haven't used msg.To yet.
- continue
- }
- toIdx := int(msg.To - 1)
- if err := env.Nodes[toIdx].Step(msg); err != nil {
- fmt.Fprintln(env.Output, err)
- }
- }
- }
- return n
-}
diff --git a/raft/rafttest/interaction_env_handler_log_level.go b/raft/rafttest/interaction_env_handler_log_level.go
deleted file mode 100644
index 2194c9ee1a1..00000000000
--- a/raft/rafttest/interaction_env_handler_log_level.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "github.com/cockroachdb/datadriven"
-)
-
-func (env *InteractionEnv) handleLogLevel(t *testing.T, d datadriven.TestData) error {
- return env.LogLevel(d.CmdArgs[0].Key)
-}
-
-func (env *InteractionEnv) LogLevel(name string) error {
- for i, s := range lvlNames {
- if strings.EqualFold(s, name) {
- env.Output.Lvl = i
- return nil
- }
- }
- return fmt.Errorf("log levels must be either of %v", lvlNames)
-}
diff --git a/raft/rafttest/interaction_env_handler_process_ready.go b/raft/rafttest/interaction_env_handler_process_ready.go
deleted file mode 100644
index d94ac60334f..00000000000
--- a/raft/rafttest/interaction_env_handler_process_ready.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func (env *InteractionEnv) handleProcessReady(t *testing.T, d datadriven.TestData) error {
- idxs := nodeIdxs(t, d)
- for _, idx := range idxs {
- var err error
- if len(idxs) > 1 {
- fmt.Fprintf(env.Output, "> %d handling Ready\n", idx+1)
- env.withIndent(func() { err = env.ProcessReady(idx) })
- } else {
- err = env.ProcessReady(idx)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// ProcessReady runs Ready handling on the node with the given index.
-func (env *InteractionEnv) ProcessReady(idx int) error {
- // TODO(tbg): Allow simulating crashes here.
- rn, s := env.Nodes[idx].RawNode, env.Nodes[idx].Storage
- rd := rn.Ready()
- env.Output.WriteString(raft.DescribeReady(rd, defaultEntryFormatter))
- // TODO(tbg): the order of operations here is not necessarily safe. See:
- // https://github.com/etcd-io/etcd/pull/10861
- if !raft.IsEmptyHardState(rd.HardState) {
- if err := s.SetHardState(rd.HardState); err != nil {
- return err
- }
- }
- if err := s.Append(rd.Entries); err != nil {
- return err
- }
- if !raft.IsEmptySnap(rd.Snapshot) {
- if err := s.ApplySnapshot(rd.Snapshot); err != nil {
- return err
- }
- }
- for _, ent := range rd.CommittedEntries {
- var update []byte
- var cs *raftpb.ConfState
- switch ent.Type {
- case raftpb.EntryConfChange:
- var cc raftpb.ConfChange
- if err := cc.Unmarshal(ent.Data); err != nil {
- return err
- }
- update = cc.Context
- cs = rn.ApplyConfChange(cc)
- case raftpb.EntryConfChangeV2:
- var cc raftpb.ConfChangeV2
- if err := cc.Unmarshal(ent.Data); err != nil {
- return err
- }
- cs = rn.ApplyConfChange(cc)
- update = cc.Context
- default:
- update = ent.Data
- }
-
- // Record the new state by starting with the current state and applying
- // the command.
- lastSnap := env.Nodes[idx].History[len(env.Nodes[idx].History)-1]
- var snap raftpb.Snapshot
- snap.Data = append(snap.Data, lastSnap.Data...)
- // NB: this hard-codes an "appender" state machine.
- snap.Data = append(snap.Data, update...)
- snap.Metadata.Index = ent.Index
- snap.Metadata.Term = ent.Term
- if cs == nil {
- sl := env.Nodes[idx].History
- cs = &sl[len(sl)-1].Metadata.ConfState
- }
- snap.Metadata.ConfState = *cs
- env.Nodes[idx].History = append(env.Nodes[idx].History, snap)
- }
-
- env.Messages = append(env.Messages, rd.Messages...)
-
- rn.Advance(rd)
- return nil
-}
diff --git a/raft/rafttest/interaction_env_handler_propose.go b/raft/rafttest/interaction_env_handler_propose.go
deleted file mode 100644
index 7e8832340cf..00000000000
--- a/raft/rafttest/interaction_env_handler_propose.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "testing"
-
- "github.com/cockroachdb/datadriven"
-)
-
-func (env *InteractionEnv) handlePropose(t *testing.T, d datadriven.TestData) error {
- idx := firstAsNodeIdx(t, d)
- if len(d.CmdArgs) != 2 || len(d.CmdArgs[1].Vals) > 0 {
- t.Fatalf("expected exactly one key with no vals: %+v", d.CmdArgs[1:])
- }
- return env.Propose(idx, []byte(d.CmdArgs[1].Key))
-}
-
-// Propose a regular entry.
-func (env *InteractionEnv) Propose(idx int, data []byte) error {
- return env.Nodes[idx].Propose(data)
-}
diff --git a/raft/rafttest/interaction_env_handler_propose_conf_change.go b/raft/rafttest/interaction_env_handler_propose_conf_change.go
deleted file mode 100644
index ddc8ffcbd08..00000000000
--- a/raft/rafttest/interaction_env_handler_propose_conf_change.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "strconv"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func (env *InteractionEnv) handleProposeConfChange(t *testing.T, d datadriven.TestData) error {
- idx := firstAsNodeIdx(t, d)
- var v1 bool
- transition := raftpb.ConfChangeTransitionAuto
- for _, arg := range d.CmdArgs[1:] {
- for _, val := range arg.Vals {
- switch arg.Key {
- case "v1":
- var err error
- v1, err = strconv.ParseBool(val)
- if err != nil {
- return err
- }
- case "transition":
- switch val {
- case "auto":
- transition = raftpb.ConfChangeTransitionAuto
- case "implicit":
- transition = raftpb.ConfChangeTransitionJointImplicit
- case "explicit":
- transition = raftpb.ConfChangeTransitionJointExplicit
- default:
- return fmt.Errorf("unknown transition %s", val)
- }
- default:
- return fmt.Errorf("unknown command %s", arg.Key)
- }
- }
- }
-
- ccs, err := raftpb.ConfChangesFromString(d.Input)
- if err != nil {
- return err
- }
-
- var c raftpb.ConfChangeI
- if v1 {
- if len(ccs) > 1 || transition != raftpb.ConfChangeTransitionAuto {
- return fmt.Errorf("v1 conf change can only have one operation and no transition")
- }
- c = raftpb.ConfChange{
- Type: ccs[0].Type,
- NodeID: ccs[0].NodeID,
- }
- } else {
- c = raftpb.ConfChangeV2{
- Transition: transition,
- Changes: ccs,
- }
- }
- return env.ProposeConfChange(idx, c)
-}
-
-// ProposeConfChange proposes a configuration change on the node with the given index.
-func (env *InteractionEnv) ProposeConfChange(idx int, c raftpb.ConfChangeI) error {
- return env.Nodes[idx].ProposeConfChange(c)
-}
diff --git a/raft/rafttest/interaction_env_handler_raft_log.go b/raft/rafttest/interaction_env_handler_raft_log.go
deleted file mode 100644
index 5a99e3e180d..00000000000
--- a/raft/rafttest/interaction_env_handler_raft_log.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "math"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3"
-)
-
-func (env *InteractionEnv) handleRaftLog(t *testing.T, d datadriven.TestData) error {
- idx := firstAsNodeIdx(t, d)
- return env.RaftLog(idx)
-}
-
-// RaftLog pretty prints the raft log to the output buffer.
-func (env *InteractionEnv) RaftLog(idx int) error {
- s := env.Nodes[idx].Storage
- fi, err := s.FirstIndex()
- if err != nil {
- return err
- }
- li, err := s.LastIndex()
- if err != nil {
- return err
- }
- if li < fi {
- // TODO(tbg): this is what MemoryStorage returns, but unclear if it's
- // the "correct" thing to do.
- fmt.Fprintf(env.Output, "log is empty: first index=%d, last index=%d", fi, li)
- return nil
- }
- ents, err := s.Entries(fi, li+1, math.MaxUint64)
- if err != nil {
- return err
- }
- env.Output.WriteString(raft.DescribeEntries(ents, defaultEntryFormatter))
- return err
-}
diff --git a/raft/rafttest/interaction_env_handler_stabilize.go b/raft/rafttest/interaction_env_handler_stabilize.go
deleted file mode 100644
index 573e215f4c3..00000000000
--- a/raft/rafttest/interaction_env_handler_stabilize.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func (env *InteractionEnv) handleStabilize(t *testing.T, d datadriven.TestData) error {
- idxs := nodeIdxs(t, d)
- return env.Stabilize(idxs...)
-}
-
-// Stabilize repeatedly runs Ready handling on and message delivery to the set
-// of nodes specified via the idxs slice until reaching a fixed point.
-func (env *InteractionEnv) Stabilize(idxs ...int) error {
- var nodes []Node
- for _, idx := range idxs {
- nodes = append(nodes, env.Nodes[idx])
- }
- if len(nodes) == 0 {
- nodes = env.Nodes
- }
-
- for {
- done := true
- for _, rn := range nodes {
- if rn.HasReady() {
- done = false
- idx := int(rn.Status().ID - 1)
- fmt.Fprintf(env.Output, "> %d handling Ready\n", idx+1)
- env.withIndent(func() { env.ProcessReady(idx) })
- }
- }
- for _, rn := range nodes {
- id := rn.Status().ID
- // NB: we grab the messages just to see whether to print the header.
- // DeliverMsgs will do it again.
- if msgs, _ := splitMsgs(env.Messages, id); len(msgs) > 0 {
- fmt.Fprintf(env.Output, "> %d receiving messages\n", id)
- env.withIndent(func() { env.DeliverMsgs(Recipient{ID: id}) })
- done = false
- }
- }
- if done {
- return nil
- }
- }
-}
-
-func splitMsgs(msgs []raftpb.Message, to uint64) (toMsgs []raftpb.Message, rmdr []raftpb.Message) {
- // NB: this method does not reorder messages.
- for _, msg := range msgs {
- if msg.To == to {
- toMsgs = append(toMsgs, msg)
- } else {
- rmdr = append(rmdr, msg)
- }
- }
- return toMsgs, rmdr
-}
diff --git a/raft/rafttest/interaction_env_handler_status.go b/raft/rafttest/interaction_env_handler_status.go
deleted file mode 100644
index bf5973a3b56..00000000000
--- a/raft/rafttest/interaction_env_handler_status.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "testing"
-
- "github.com/cockroachdb/datadriven"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-func (env *InteractionEnv) handleStatus(t *testing.T, d datadriven.TestData) error {
- idx := firstAsNodeIdx(t, d)
- return env.Status(idx)
-}
-
-// Status pretty-prints the raft status for the node at the given index to the output
-// buffer.
-func (env *InteractionEnv) Status(idx int) error {
- // TODO(tbg): actually print the full status.
- st := env.Nodes[idx].Status()
- m := tracker.ProgressMap{}
- for id, pr := range st.Progress {
- pr := pr // loop-local copy
- m[id] = &pr
- }
- fmt.Fprint(env.Output, m)
- return nil
-}
diff --git a/raft/rafttest/interaction_env_handler_tick_heartbeat.go b/raft/rafttest/interaction_env_handler_tick_heartbeat.go
deleted file mode 100644
index 349ca78efad..00000000000
--- a/raft/rafttest/interaction_env_handler_tick_heartbeat.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "testing"
-
- "github.com/cockroachdb/datadriven"
-)
-
-func (env *InteractionEnv) handleTickHeartbeat(t *testing.T, d datadriven.TestData) error {
- idx := firstAsNodeIdx(t, d)
- return env.Tick(idx, env.Nodes[idx].Config.HeartbeatTick)
-}
-
-// Tick the node at the given index the given number of times.
-func (env *InteractionEnv) Tick(idx int, num int) error {
- for i := 0; i < num; i++ {
- env.Nodes[idx].Tick()
- }
- return nil
-}
diff --git a/raft/rafttest/interaction_env_logger.go b/raft/rafttest/interaction_env_logger.go
deleted file mode 100644
index 1b883d559df..00000000000
--- a/raft/rafttest/interaction_env_logger.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "fmt"
- "strings"
-
- "go.etcd.io/etcd/raft/v3"
-)
-
-type logLevels [6]string
-
-var lvlNames logLevels = [...]string{"DEBUG", "INFO", "WARN", "ERROR", "FATAL", "NONE"}
-
-type RedirectLogger struct {
- *strings.Builder
- Lvl int // 0 = DEBUG, 1 = INFO, 2 = WARNING, 3 = ERROR, 4 = FATAL, 5 = NONE
-}
-
-var _ raft.Logger = (*RedirectLogger)(nil)
-
-func (l *RedirectLogger) printf(lvl int, format string, args ...interface{}) {
- if l.Lvl <= lvl {
- fmt.Fprint(l, lvlNames[lvl], " ")
- fmt.Fprintf(l, format, args...)
- if n := len(format); n > 0 && format[n-1] != '\n' {
- l.WriteByte('\n')
- }
- }
-}
-func (l *RedirectLogger) print(lvl int, args ...interface{}) {
- if l.Lvl <= lvl {
- fmt.Fprint(l, lvlNames[lvl], " ")
- fmt.Fprintln(l, args...)
- }
-}
-
-func (l *RedirectLogger) Debug(v ...interface{}) {
- l.print(0, v...)
-}
-
-func (l *RedirectLogger) Debugf(format string, v ...interface{}) {
- l.printf(0, format, v...)
-}
-
-func (l *RedirectLogger) Info(v ...interface{}) {
- l.print(1, v...)
-}
-
-func (l *RedirectLogger) Infof(format string, v ...interface{}) {
- l.printf(1, format, v...)
-}
-
-func (l *RedirectLogger) Warning(v ...interface{}) {
- l.print(2, v...)
-}
-
-func (l *RedirectLogger) Warningf(format string, v ...interface{}) {
- l.printf(2, format, v...)
-}
-
-func (l *RedirectLogger) Error(v ...interface{}) {
- l.print(3, v...)
-}
-
-func (l *RedirectLogger) Errorf(format string, v ...interface{}) {
- l.printf(3, format, v...)
-}
-
-func (l *RedirectLogger) Fatal(v ...interface{}) {
- l.print(4, v...)
-}
-
-func (l *RedirectLogger) Fatalf(format string, v ...interface{}) {
-
- l.printf(4, format, v...)
-}
-
-func (l *RedirectLogger) Panic(v ...interface{}) {
- l.print(4, v...)
-}
-
-func (l *RedirectLogger) Panicf(format string, v ...interface{}) {
- l.printf(4, format, v...)
-}
diff --git a/raft/rafttest/network.go b/raft/rafttest/network.go
deleted file mode 100644
index 0e86bf3a548..00000000000
--- a/raft/rafttest/network.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "math/rand"
- "sync"
- "time"
-
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-// a network interface
-type iface interface {
- send(m raftpb.Message)
- recv() chan raftpb.Message
- disconnect()
- connect()
-}
-
-type raftNetwork struct {
- rand *rand.Rand
- mu sync.Mutex
- disconnected map[uint64]bool
- dropmap map[conn]float64
- delaymap map[conn]delay
- recvQueues map[uint64]chan raftpb.Message
-}
-
-type conn struct {
- from, to uint64
-}
-
-type delay struct {
- d time.Duration
- rate float64
-}
-
-func newRaftNetwork(nodes ...uint64) *raftNetwork {
- pn := &raftNetwork{
- rand: rand.New(rand.NewSource(1)),
- recvQueues: make(map[uint64]chan raftpb.Message),
- dropmap: make(map[conn]float64),
- delaymap: make(map[conn]delay),
- disconnected: make(map[uint64]bool),
- }
-
- for _, n := range nodes {
- pn.recvQueues[n] = make(chan raftpb.Message, 1024)
- }
- return pn
-}
-
-func (rn *raftNetwork) nodeNetwork(id uint64) iface {
- return &nodeNetwork{id: id, raftNetwork: rn}
-}
-
-func (rn *raftNetwork) send(m raftpb.Message) {
- rn.mu.Lock()
- to := rn.recvQueues[m.To]
- if rn.disconnected[m.To] {
- to = nil
- }
- drop := rn.dropmap[conn{m.From, m.To}]
- dl := rn.delaymap[conn{m.From, m.To}]
- rn.mu.Unlock()
-
- if to == nil {
- return
- }
- if drop != 0 && rn.rand.Float64() < drop {
- return
- }
- // TODO: shall we dl without blocking the send call?
- if dl.d != 0 && rn.rand.Float64() < dl.rate {
- rd := rn.rand.Int63n(int64(dl.d))
- time.Sleep(time.Duration(rd))
- }
-
- // use marshal/unmarshal to copy message to avoid data race.
- b, err := m.Marshal()
- if err != nil {
- panic(err)
- }
-
- var cm raftpb.Message
- err = cm.Unmarshal(b)
- if err != nil {
- panic(err)
- }
-
- select {
- case to <- cm:
- default:
- // drop messages when the receiver queue is full.
- }
-}
-
-func (rn *raftNetwork) recvFrom(from uint64) chan raftpb.Message {
- rn.mu.Lock()
- fromc := rn.recvQueues[from]
- if rn.disconnected[from] {
- fromc = nil
- }
- rn.mu.Unlock()
-
- return fromc
-}
-
-func (rn *raftNetwork) drop(from, to uint64, rate float64) {
- rn.mu.Lock()
- defer rn.mu.Unlock()
- rn.dropmap[conn{from, to}] = rate
-}
-
-func (rn *raftNetwork) delay(from, to uint64, d time.Duration, rate float64) {
- rn.mu.Lock()
- defer rn.mu.Unlock()
- rn.delaymap[conn{from, to}] = delay{d, rate}
-}
-
-func (rn *raftNetwork) disconnect(id uint64) {
- rn.mu.Lock()
- defer rn.mu.Unlock()
- rn.disconnected[id] = true
-}
-
-func (rn *raftNetwork) connect(id uint64) {
- rn.mu.Lock()
- defer rn.mu.Unlock()
- rn.disconnected[id] = false
-}
-
-type nodeNetwork struct {
- id uint64
- *raftNetwork
-}
-
-func (nt *nodeNetwork) connect() {
- nt.raftNetwork.connect(nt.id)
-}
-
-func (nt *nodeNetwork) disconnect() {
- nt.raftNetwork.disconnect(nt.id)
-}
-
-func (nt *nodeNetwork) send(m raftpb.Message) {
- nt.raftNetwork.send(m)
-}
-
-func (nt *nodeNetwork) recv() chan raftpb.Message {
- return nt.recvFrom(nt.id)
-}
diff --git a/raft/rafttest/network_test.go b/raft/rafttest/network_test.go
deleted file mode 100644
index 39447476e4d..00000000000
--- a/raft/rafttest/network_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "testing"
- "time"
-
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func TestNetworkDrop(t *testing.T) {
- // drop around 10% messages
- sent := 1000
- droprate := 0.1
- nt := newRaftNetwork(1, 2)
- nt.drop(1, 2, droprate)
- for i := 0; i < sent; i++ {
- nt.send(raftpb.Message{From: 1, To: 2})
- }
-
- c := nt.recvFrom(2)
-
- received := 0
- done := false
- for !done {
- select {
- case <-c:
- received++
- default:
- done = true
- }
- }
-
- drop := sent - received
- if drop > int((droprate+0.1)*float64(sent)) || drop < int((droprate-0.1)*float64(sent)) {
- t.Errorf("drop = %d, want around %.2f", drop, droprate*float64(sent))
- }
-}
-
-func TestNetworkDelay(t *testing.T) {
- sent := 1000
- delay := time.Millisecond
- delayrate := 0.1
- nt := newRaftNetwork(1, 2)
-
- nt.delay(1, 2, delay, delayrate)
- var total time.Duration
- for i := 0; i < sent; i++ {
- s := time.Now()
- nt.send(raftpb.Message{From: 1, To: 2})
- total += time.Since(s)
- }
-
- w := time.Duration(float64(sent)*delayrate/2) * delay
- // there is some overhead in the send call since it generates random numbers.
- if total < w {
- t.Errorf("total = %v, want > %v", total, w)
- }
-}
diff --git a/raft/rafttest/node.go b/raft/rafttest/node.go
deleted file mode 100644
index cb1a1241fcd..00000000000
--- a/raft/rafttest/node.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "context"
- "log"
- "math/rand"
- "sync"
- "time"
-
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-type node struct {
- raft.Node
- id uint64
- iface iface
- stopc chan struct{}
- pausec chan bool
-
- // stable
- storage *raft.MemoryStorage
-
- mu sync.Mutex // guards state
- state raftpb.HardState
-}
-
-func startNode(id uint64, peers []raft.Peer, iface iface) *node {
- st := raft.NewMemoryStorage()
- c := &raft.Config{
- ID: id,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: st,
- MaxSizePerMsg: 1024 * 1024,
- MaxInflightMsgs: 256,
- MaxUncommittedEntriesSize: 1 << 30,
- }
- rn := raft.StartNode(c, peers)
- n := &node{
- Node: rn,
- id: id,
- storage: st,
- iface: iface,
- pausec: make(chan bool),
- }
- n.start()
- return n
-}
-
-func (n *node) start() {
- n.stopc = make(chan struct{})
- ticker := time.NewTicker(5 * time.Millisecond).C
-
- go func() {
- for {
- select {
- case <-ticker:
- n.Tick()
- case rd := <-n.Ready():
- if !raft.IsEmptyHardState(rd.HardState) {
- n.mu.Lock()
- n.state = rd.HardState
- n.mu.Unlock()
- n.storage.SetHardState(n.state)
- }
- n.storage.Append(rd.Entries)
- time.Sleep(time.Millisecond)
-
- // simulate async send, more like real world...
- for _, m := range rd.Messages {
- mlocal := m
- go func() {
- time.Sleep(time.Duration(rand.Int63n(10)) * time.Millisecond)
- n.iface.send(mlocal)
- }()
- }
- n.Advance()
- case m := <-n.iface.recv():
- go n.Step(context.TODO(), m)
- case <-n.stopc:
- n.Stop()
- log.Printf("raft.%d: stop", n.id)
- n.Node = nil
- close(n.stopc)
- return
- case p := <-n.pausec:
- recvms := make([]raftpb.Message, 0)
- for p {
- select {
- case m := <-n.iface.recv():
- recvms = append(recvms, m)
- case p = <-n.pausec:
- }
- }
- // step all pending messages
- for _, m := range recvms {
- n.Step(context.TODO(), m)
- }
- }
- }
- }()
-}
-
-// stop stops the node. stop a stopped node might panic.
-// All in memory state of node is discarded.
-// All stable MUST be unchanged.
-func (n *node) stop() {
- n.iface.disconnect()
- n.stopc <- struct{}{}
- // wait for the shutdown
- <-n.stopc
-}
-
-// restart restarts the node. restart a started node
-// blocks and might affect the future stop operation.
-func (n *node) restart() {
- // wait for the shutdown
- <-n.stopc
- c := &raft.Config{
- ID: n.id,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: n.storage,
- MaxSizePerMsg: 1024 * 1024,
- MaxInflightMsgs: 256,
- MaxUncommittedEntriesSize: 1 << 30,
- }
- n.Node = raft.RestartNode(c)
- n.start()
- n.iface.connect()
-}
-
-// pause pauses the node.
-// The paused node buffers the received messages and replies
-// all of them when it resumes.
-func (n *node) pause() {
- n.pausec <- true
-}
-
-// resume resumes the paused node.
-func (n *node) resume() {
- n.pausec <- false
-}
diff --git a/raft/rafttest/node_bench_test.go b/raft/rafttest/node_bench_test.go
deleted file mode 100644
index 6d69003a629..00000000000
--- a/raft/rafttest/node_bench_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "context"
- "testing"
- "time"
-
- "go.etcd.io/etcd/raft/v3"
-)
-
-func BenchmarkProposal3Nodes(b *testing.B) {
- peers := []raft.Peer{{ID: 1, Context: nil}, {ID: 2, Context: nil}, {ID: 3, Context: nil}}
- nt := newRaftNetwork(1, 2, 3)
-
- nodes := make([]*node, 0)
-
- for i := 1; i <= 3; i++ {
- n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
- nodes = append(nodes, n)
- }
- // get ready and warm up
- time.Sleep(50 * time.Millisecond)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- nodes[0].Propose(context.TODO(), []byte("somedata"))
- }
-
- for _, n := range nodes {
- if n.state.Commit != uint64(b.N+4) {
- continue
- }
- }
- b.StopTimer()
-
- for _, n := range nodes {
- n.stop()
- }
-}
diff --git a/raft/rafttest/node_test.go b/raft/rafttest/node_test.go
deleted file mode 100644
index caa0b570b1c..00000000000
--- a/raft/rafttest/node_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafttest
-
-import (
- "context"
- "testing"
- "time"
-
- "go.etcd.io/etcd/raft/v3"
-)
-
-func TestBasicProgress(t *testing.T) {
- peers := []raft.Peer{{ID: 1, Context: nil}, {ID: 2, Context: nil}, {ID: 3, Context: nil}, {ID: 4, Context: nil}, {ID: 5, Context: nil}}
- nt := newRaftNetwork(1, 2, 3, 4, 5)
-
- nodes := make([]*node, 0)
-
- for i := 1; i <= 5; i++ {
- n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
- nodes = append(nodes, n)
- }
-
- waitLeader(nodes)
-
- for i := 0; i < 100; i++ {
- nodes[0].Propose(context.TODO(), []byte("somedata"))
- }
-
- if !waitCommitConverge(nodes, 100) {
- t.Errorf("commits failed to converge!")
- }
-
- for _, n := range nodes {
- n.stop()
- }
-}
-
-func TestRestart(t *testing.T) {
- peers := []raft.Peer{{ID: 1, Context: nil}, {ID: 2, Context: nil}, {ID: 3, Context: nil}, {ID: 4, Context: nil}, {ID: 5, Context: nil}}
- nt := newRaftNetwork(1, 2, 3, 4, 5)
-
- nodes := make([]*node, 0)
-
- for i := 1; i <= 5; i++ {
- n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
- nodes = append(nodes, n)
- }
-
- l := waitLeader(nodes)
- k1, k2 := (l+1)%5, (l+2)%5
-
- for i := 0; i < 30; i++ {
- nodes[l].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[k1].stop()
- for i := 0; i < 30; i++ {
- nodes[(l+3)%5].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[k2].stop()
- for i := 0; i < 30; i++ {
- nodes[(l+4)%5].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[k2].restart()
- for i := 0; i < 30; i++ {
- nodes[l].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[k1].restart()
-
- if !waitCommitConverge(nodes, 120) {
- t.Errorf("commits failed to converge!")
- }
-
- for _, n := range nodes {
- n.stop()
- }
-}
-
-func TestPause(t *testing.T) {
- peers := []raft.Peer{{ID: 1, Context: nil}, {ID: 2, Context: nil}, {ID: 3, Context: nil}, {ID: 4, Context: nil}, {ID: 5, Context: nil}}
- nt := newRaftNetwork(1, 2, 3, 4, 5)
-
- nodes := make([]*node, 0)
-
- for i := 1; i <= 5; i++ {
- n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
- nodes = append(nodes, n)
- }
-
- waitLeader(nodes)
-
- for i := 0; i < 30; i++ {
- nodes[0].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[1].pause()
- for i := 0; i < 30; i++ {
- nodes[0].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[2].pause()
- for i := 0; i < 30; i++ {
- nodes[0].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[2].resume()
- for i := 0; i < 30; i++ {
- nodes[0].Propose(context.TODO(), []byte("somedata"))
- }
- nodes[1].resume()
-
- if !waitCommitConverge(nodes, 120) {
- t.Errorf("commits failed to converge!")
- }
-
- for _, n := range nodes {
- n.stop()
- }
-}
-
-func waitLeader(ns []*node) int {
- var l map[uint64]struct{}
- var lindex int
-
- for {
- l = make(map[uint64]struct{})
-
- for i, n := range ns {
- lead := n.Status().SoftState.Lead
- if lead != 0 {
- l[lead] = struct{}{}
- if n.id == lead {
- lindex = i
- }
- }
- }
-
- if len(l) == 1 {
- return lindex
- }
- }
-}
-
-func waitCommitConverge(ns []*node, target uint64) bool {
- var c map[uint64]struct{}
-
- for i := 0; i < 50; i++ {
- c = make(map[uint64]struct{})
- var good int
-
- for _, n := range ns {
- commit := n.Node.Status().HardState.Commit
- c[commit] = struct{}{}
- if commit > target {
- good++
- }
- }
-
- if len(c) == 1 && good == len(ns) {
- return true
- }
- time.Sleep(100 * time.Millisecond)
- }
-
- return false
-}
diff --git a/raft/rawnode.go b/raft/rawnode.go
deleted file mode 100644
index 4111d029dd6..00000000000
--- a/raft/rawnode.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "errors"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// ErrStepLocalMsg is returned when try to step a local raft message
-var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
-
-// ErrStepPeerNotFound is returned when try to step a response message
-// but there is no peer found in raft.prs for that node.
-var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
-
-// RawNode is a thread-unsafe Node.
-// The methods of this struct correspond to the methods of Node and are described
-// more fully there.
-type RawNode struct {
- raft *raft
- prevSoftSt *SoftState
- prevHardSt pb.HardState
-}
-
-// NewRawNode instantiates a RawNode from the given configuration.
-//
-// See Bootstrap() for bootstrapping an initial state; this replaces the former
-// 'peers' argument to this method (with identical behavior). However, It is
-// recommended that instead of calling Bootstrap, applications bootstrap their
-// state manually by setting up a Storage that has a first index > 1 and which
-// stores the desired ConfState as its InitialState.
-func NewRawNode(config *Config) (*RawNode, error) {
- r := newRaft(config)
- rn := &RawNode{
- raft: r,
- }
- rn.prevSoftSt = r.softState()
- rn.prevHardSt = r.hardState()
- return rn, nil
-}
-
-// Tick advances the internal logical clock by a single tick.
-func (rn *RawNode) Tick() {
- rn.raft.tick()
-}
-
-// TickQuiesced advances the internal logical clock by a single tick without
-// performing any other state machine processing. It allows the caller to avoid
-// periodic heartbeats and elections when all of the peers in a Raft group are
-// known to be at the same state. Expected usage is to periodically invoke Tick
-// or TickQuiesced depending on whether the group is "active" or "quiesced".
-//
-// WARNING: Be very careful about using this method as it subverts the Raft
-// state machine. You should probably be using Tick instead.
-func (rn *RawNode) TickQuiesced() {
- rn.raft.electionElapsed++
-}
-
-// Campaign causes this RawNode to transition to candidate state.
-func (rn *RawNode) Campaign() error {
- return rn.raft.Step(pb.Message{
- Type: pb.MsgHup,
- })
-}
-
-// Propose proposes data be appended to the raft log.
-func (rn *RawNode) Propose(data []byte) error {
- return rn.raft.Step(pb.Message{
- Type: pb.MsgProp,
- From: rn.raft.id,
- Entries: []pb.Entry{
- {Data: data},
- }})
-}
-
-// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for
-// details.
-func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error {
- m, err := confChangeToMsg(cc)
- if err != nil {
- return err
- }
- return rn.raft.Step(m)
-}
-
-// ApplyConfChange applies a config change to the local node. The app must call
-// this when it applies a configuration change, except when it decides to reject
-// the configuration change, in which case no call must take place.
-func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
- cs := rn.raft.applyConfChange(cc.AsV2())
- return &cs
-}
-
-// Step advances the state machine using the given message.
-func (rn *RawNode) Step(m pb.Message) error {
- // ignore unexpected local messages receiving over network
- if IsLocalMsg(m.Type) {
- return ErrStepLocalMsg
- }
- if pr := rn.raft.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) {
- return rn.raft.Step(m)
- }
- return ErrStepPeerNotFound
-}
-
-// Ready returns the outstanding work that the application needs to handle. This
-// includes appending and applying entries or a snapshot, updating the HardState,
-// and sending messages. The returned Ready() *must* be handled and subsequently
-// passed back via Advance().
-func (rn *RawNode) Ready() Ready {
- rd := rn.readyWithoutAccept()
- rn.acceptReady(rd)
- return rd
-}
-
-// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there
-// is no obligation that the Ready must be handled.
-func (rn *RawNode) readyWithoutAccept() Ready {
- return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
-}
-
-// acceptReady is called when the consumer of the RawNode has decided to go
-// ahead and handle a Ready. Nothing must alter the state of the RawNode between
-// this call and the prior call to Ready().
-func (rn *RawNode) acceptReady(rd Ready) {
- if rd.SoftState != nil {
- rn.prevSoftSt = rd.SoftState
- }
- if len(rd.ReadStates) != 0 {
- rn.raft.readStates = nil
- }
- rn.raft.msgs = nil
-}
-
-// HasReady called when RawNode user need to check if any Ready pending.
-// Checking logic in this method should be consistent with Ready.containsUpdates().
-func (rn *RawNode) HasReady() bool {
- r := rn.raft
- if !r.softState().equal(rn.prevSoftSt) {
- return true
- }
- if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
- return true
- }
- if r.raftLog.hasPendingSnapshot() {
- return true
- }
- if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
- return true
- }
- if len(r.readStates) != 0 {
- return true
- }
- return false
-}
-
-// Advance notifies the RawNode that the application has applied and saved progress in the
-// last Ready results.
-func (rn *RawNode) Advance(rd Ready) {
- if !IsEmptyHardState(rd.HardState) {
- rn.prevHardSt = rd.HardState
- }
- rn.raft.advance(rd)
-}
-
-// Status returns the current status of the given group. This allocates, see
-// BasicStatus and WithProgress for allocation-friendlier choices.
-func (rn *RawNode) Status() Status {
- status := getStatus(rn.raft)
- return status
-}
-
-// BasicStatus returns a BasicStatus. Notably this does not contain the
-// Progress map; see WithProgress for an allocation-free way to inspect it.
-func (rn *RawNode) BasicStatus() BasicStatus {
- return getBasicStatus(rn.raft)
-}
-
-// ProgressType indicates the type of replica a Progress corresponds to.
-type ProgressType byte
-
-const (
- // ProgressTypePeer accompanies a Progress for a regular peer replica.
- ProgressTypePeer ProgressType = iota
- // ProgressTypeLearner accompanies a Progress for a learner replica.
- ProgressTypeLearner
-)
-
-// WithProgress is a helper to introspect the Progress for this node and its
-// peers.
-func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) {
- rn.raft.prs.Visit(func(id uint64, pr *tracker.Progress) {
- typ := ProgressTypePeer
- if pr.IsLearner {
- typ = ProgressTypeLearner
- }
- p := *pr
- p.Inflights = nil
- visitor(id, typ, p)
- })
-}
-
-// ReportUnreachable reports the given node is not reachable for the last send.
-func (rn *RawNode) ReportUnreachable(id uint64) {
- _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
-}
-
-// ReportSnapshot reports the status of the sent snapshot.
-func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
- rej := status == SnapshotFailure
-
- _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
-}
-
-// TransferLeader tries to transfer leadership to the given transferee.
-func (rn *RawNode) TransferLeader(transferee uint64) {
- _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
-}
-
-// ReadIndex requests a read state. The read state will be set in ready.
-// Read State has a read index. Once the application advances further than the read
-// index, any linearizable read requests issued before the read request can be
-// processed safely. The read state will have the same rctx attached.
-func (rn *RawNode) ReadIndex(rctx []byte) {
- _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
diff --git a/raft/rawnode_test.go b/raft/rawnode_test.go
deleted file mode 100644
index 898b0f12c3e..00000000000
--- a/raft/rawnode_test.go
+++ /dev/null
@@ -1,1107 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "bytes"
- "context"
- "fmt"
- "math"
- "reflect"
- "testing"
-
- "go.etcd.io/etcd/raft/v3/quorum"
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// rawNodeAdapter is essentially a lint that makes sure that RawNode implements
-// "most of" Node. The exceptions (some of which are easy to fix) are listed
-// below.
-type rawNodeAdapter struct {
- *RawNode
-}
-
-var _ Node = (*rawNodeAdapter)(nil)
-
-// Node specifies lead, which is pointless, can just be filled in.
-func (a *rawNodeAdapter) TransferLeadership(ctx context.Context, lead, transferee uint64) {
- a.RawNode.TransferLeader(transferee)
-}
-
-// Node has a goroutine, RawNode doesn't need this.
-func (a *rawNodeAdapter) Stop() {}
-
-// RawNode returns a *Status.
-func (a *rawNodeAdapter) Status() Status { return a.RawNode.Status() }
-
-// RawNode takes a Ready. It doesn't really have to do that I think? It can hold on
-// to it internally. But maybe that approach is frail.
-func (a *rawNodeAdapter) Advance() { a.RawNode.Advance(Ready{}) }
-
-// RawNode returns a Ready, not a chan of one.
-func (a *rawNodeAdapter) Ready() <-chan Ready { return nil }
-
-// Node takes more contexts. Easy enough to fix.
-
-func (a *rawNodeAdapter) Campaign(context.Context) error { return a.RawNode.Campaign() }
-func (a *rawNodeAdapter) ReadIndex(_ context.Context, rctx []byte) error {
- a.RawNode.ReadIndex(rctx)
- // RawNode swallowed the error in ReadIndex, it probably should not do that.
- return nil
-}
-func (a *rawNodeAdapter) Step(_ context.Context, m pb.Message) error { return a.RawNode.Step(m) }
-func (a *rawNodeAdapter) Propose(_ context.Context, data []byte) error {
- return a.RawNode.Propose(data)
-}
-func (a *rawNodeAdapter) ProposeConfChange(_ context.Context, cc pb.ConfChangeI) error {
- return a.RawNode.ProposeConfChange(cc)
-}
-
-// TestRawNodeStep ensures that RawNode.Step ignore local message.
-func TestRawNodeStep(t *testing.T) {
- for i, msgn := range pb.MessageType_name {
- t.Run(msgn, func(t *testing.T) {
- s := NewMemoryStorage()
- s.SetHardState(pb.HardState{Term: 1, Commit: 1})
- s.Append([]pb.Entry{{Term: 1, Index: 1}})
- if err := s.ApplySnapshot(pb.Snapshot{Metadata: pb.SnapshotMetadata{
- ConfState: pb.ConfState{
- Voters: []uint64{1},
- },
- Index: 1,
- Term: 1,
- }}); err != nil {
- t.Fatal(err)
- }
- // Append an empty entry to make sure the non-local messages (like
- // vote requests) are ignored and don't trigger assertions.
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s))
- if err != nil {
- t.Fatal(err)
- }
- msgt := pb.MessageType(i)
- err = rawNode.Step(pb.Message{Type: msgt})
- // LocalMsg should be ignored.
- if IsLocalMsg(msgt) {
- if err != ErrStepLocalMsg {
- t.Errorf("%d: step should ignore %s", msgt, msgn)
- }
- }
- })
- }
-}
-
-// TestNodeStepUnblock from node_test.go has no equivalent in rawNode because there is
-// no goroutine in RawNode.
-
-// TestRawNodeProposeAndConfChange tests the configuration change mechanism. Each
-// test case sends a configuration change which is either simple or joint, verifies
-// that it applies and that the resulting ConfState matches expectations, and for
-// joint configurations makes sure that they are exited successfully.
-func TestRawNodeProposeAndConfChange(t *testing.T) {
- testCases := []struct {
- cc pb.ConfChangeI
- exp pb.ConfState
- exp2 *pb.ConfState
- }{
- // V1 config change.
- {
- pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: 2},
- pb.ConfState{Voters: []uint64{1, 2}},
- nil,
- },
- // Proposing the same as a V2 change works just the same, without entering
- // a joint config.
- {
- pb.ConfChangeV2{Changes: []pb.ConfChangeSingle{
- {Type: pb.ConfChangeAddNode, NodeID: 2},
- },
- },
- pb.ConfState{Voters: []uint64{1, 2}},
- nil,
- },
- // Ditto if we add it as a learner instead.
- {
- pb.ConfChangeV2{Changes: []pb.ConfChangeSingle{
- {Type: pb.ConfChangeAddLearnerNode, NodeID: 2},
- },
- },
- pb.ConfState{Voters: []uint64{1}, Learners: []uint64{2}},
- nil,
- },
- // We can ask explicitly for joint consensus if we want it.
- {
- pb.ConfChangeV2{Changes: []pb.ConfChangeSingle{
- {Type: pb.ConfChangeAddLearnerNode, NodeID: 2},
- },
- Transition: pb.ConfChangeTransitionJointExplicit,
- },
- pb.ConfState{Voters: []uint64{1}, VotersOutgoing: []uint64{1}, Learners: []uint64{2}},
- &pb.ConfState{Voters: []uint64{1}, Learners: []uint64{2}},
- },
- // Ditto, but with implicit transition (the harness checks this).
- {
- pb.ConfChangeV2{Changes: []pb.ConfChangeSingle{
- {Type: pb.ConfChangeAddLearnerNode, NodeID: 2},
- },
- Transition: pb.ConfChangeTransitionJointImplicit,
- },
- pb.ConfState{
- Voters: []uint64{1}, VotersOutgoing: []uint64{1}, Learners: []uint64{2},
- AutoLeave: true,
- },
- &pb.ConfState{Voters: []uint64{1}, Learners: []uint64{2}},
- },
- // Add a new node and demote n1. This exercises the interesting case in
- // which we really need joint config changes and also need LearnersNext.
- {
- pb.ConfChangeV2{Changes: []pb.ConfChangeSingle{
- {NodeID: 2, Type: pb.ConfChangeAddNode},
- {NodeID: 1, Type: pb.ConfChangeAddLearnerNode},
- {NodeID: 3, Type: pb.ConfChangeAddLearnerNode},
- },
- },
- pb.ConfState{
- Voters: []uint64{2},
- VotersOutgoing: []uint64{1},
- Learners: []uint64{3},
- LearnersNext: []uint64{1},
- AutoLeave: true,
- },
- &pb.ConfState{Voters: []uint64{2}, Learners: []uint64{1, 3}},
- },
- // Ditto explicit.
- {
- pb.ConfChangeV2{Changes: []pb.ConfChangeSingle{
- {NodeID: 2, Type: pb.ConfChangeAddNode},
- {NodeID: 1, Type: pb.ConfChangeAddLearnerNode},
- {NodeID: 3, Type: pb.ConfChangeAddLearnerNode},
- },
- Transition: pb.ConfChangeTransitionJointExplicit,
- },
- pb.ConfState{
- Voters: []uint64{2},
- VotersOutgoing: []uint64{1},
- Learners: []uint64{3},
- LearnersNext: []uint64{1},
- },
- &pb.ConfState{Voters: []uint64{2}, Learners: []uint64{1, 3}},
- },
- // Ditto implicit.
- {
- pb.ConfChangeV2{
- Changes: []pb.ConfChangeSingle{
- {NodeID: 2, Type: pb.ConfChangeAddNode},
- {NodeID: 1, Type: pb.ConfChangeAddLearnerNode},
- {NodeID: 3, Type: pb.ConfChangeAddLearnerNode},
- },
- Transition: pb.ConfChangeTransitionJointImplicit,
- },
- pb.ConfState{
- Voters: []uint64{2},
- VotersOutgoing: []uint64{1},
- Learners: []uint64{3},
- LearnersNext: []uint64{1},
- AutoLeave: true,
- },
- &pb.ConfState{Voters: []uint64{2}, Learners: []uint64{1, 3}},
- },
- }
-
- for _, tc := range testCases {
- t.Run("", func(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1))
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s))
- if err != nil {
- t.Fatal(err)
- }
-
- rawNode.Campaign()
- proposed := false
- var (
- lastIndex uint64
- ccdata []byte
- )
- // Propose the ConfChange, wait until it applies, save the resulting
- // ConfState.
- var cs *pb.ConfState
- for cs == nil {
- rd := rawNode.Ready()
- s.Append(rd.Entries)
- for _, ent := range rd.CommittedEntries {
- var cc pb.ConfChangeI
- if ent.Type == pb.EntryConfChange {
- var ccc pb.ConfChange
- if err = ccc.Unmarshal(ent.Data); err != nil {
- t.Fatal(err)
- }
- cc = ccc
- } else if ent.Type == pb.EntryConfChangeV2 {
- var ccc pb.ConfChangeV2
- if err = ccc.Unmarshal(ent.Data); err != nil {
- t.Fatal(err)
- }
- cc = ccc
- }
- if cc != nil {
- cs = rawNode.ApplyConfChange(cc)
- }
- }
- rawNode.Advance(rd)
- // Once we are the leader, propose a command and a ConfChange.
- if !proposed && rd.SoftState.Lead == rawNode.raft.id {
- if err = rawNode.Propose([]byte("somedata")); err != nil {
- t.Fatal(err)
- }
- if ccv1, ok := tc.cc.AsV1(); ok {
- ccdata, err = ccv1.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- rawNode.ProposeConfChange(ccv1)
- } else {
- ccv2 := tc.cc.AsV2()
- ccdata, err = ccv2.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- rawNode.ProposeConfChange(ccv2)
- }
- proposed = true
- }
- }
-
- // Check that the last index is exactly the conf change we put in,
- // down to the bits. Note that this comes from the Storage, which
- // will not reflect any unstable entries that we'll only be presented
- // with in the next Ready.
- lastIndex, err = s.LastIndex()
- if err != nil {
- t.Fatal(err)
- }
-
- entries, err := s.Entries(lastIndex-1, lastIndex+1, noLimit)
- if err != nil {
- t.Fatal(err)
- }
- if len(entries) != 2 {
- t.Fatalf("len(entries) = %d, want %d", len(entries), 2)
- }
- if !bytes.Equal(entries[0].Data, []byte("somedata")) {
- t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, []byte("somedata"))
- }
- typ := pb.EntryConfChange
- if _, ok := tc.cc.AsV1(); !ok {
- typ = pb.EntryConfChangeV2
- }
- if entries[1].Type != typ {
- t.Fatalf("type = %v, want %v", entries[1].Type, typ)
- }
- if !bytes.Equal(entries[1].Data, ccdata) {
- t.Errorf("data = %v, want %v", entries[1].Data, ccdata)
- }
-
- if exp := &tc.exp; !reflect.DeepEqual(exp, cs) {
- t.Fatalf("exp:\n%+v\nact:\n%+v", exp, cs)
- }
-
- var maybePlusOne uint64
- if autoLeave, ok := tc.cc.AsV2().EnterJoint(); ok && autoLeave {
- // If this is an auto-leaving joint conf change, it will have
- // appended the entry that auto-leaves, so add one to the last
- // index that forms the basis of our expectations on
- // pendingConfIndex. (Recall that lastIndex was taken from stable
- // storage, but this auto-leaving entry isn't on stable storage
- // yet).
- maybePlusOne = 1
- }
- if exp, act := lastIndex+maybePlusOne, rawNode.raft.pendingConfIndex; exp != act {
- t.Fatalf("pendingConfIndex: expected %d, got %d", exp, act)
- }
-
- // Move the RawNode along. If the ConfChange was simple, nothing else
- // should happen. Otherwise, we're in a joint state, which is either
- // left automatically or not. If not, we add the proposal that leaves
- // it manually.
- rd := rawNode.Ready()
- var context []byte
- if !tc.exp.AutoLeave {
- if len(rd.Entries) > 0 {
- t.Fatal("expected no more entries")
- }
- if tc.exp2 == nil {
- return
- }
- context = []byte("manual")
- t.Log("leaving joint state manually")
- if err := rawNode.ProposeConfChange(pb.ConfChangeV2{Context: context}); err != nil {
- t.Fatal(err)
- }
- rd = rawNode.Ready()
- }
-
- // Check that the right ConfChange comes out.
- if len(rd.Entries) != 1 || rd.Entries[0].Type != pb.EntryConfChangeV2 {
- t.Fatalf("expected exactly one more entry, got %+v", rd)
- }
- var cc pb.ConfChangeV2
- if err := cc.Unmarshal(rd.Entries[0].Data); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(cc, pb.ConfChangeV2{Context: context}) {
- t.Fatalf("expected zero ConfChangeV2, got %+v", cc)
- }
- // Lie and pretend the ConfChange applied. It won't do so because now
- // we require the joint quorum and we're only running one node.
- cs = rawNode.ApplyConfChange(cc)
- if exp := tc.exp2; !reflect.DeepEqual(exp, cs) {
- t.Fatalf("exp:\n%+v\nact:\n%+v", exp, cs)
- }
- })
- }
-}
-
-// TestRawNodeJointAutoLeave tests the configuration change auto leave even leader
-// lost leadership.
-func TestRawNodeJointAutoLeave(t *testing.T) {
- testCc := pb.ConfChangeV2{Changes: []pb.ConfChangeSingle{
- {Type: pb.ConfChangeAddLearnerNode, NodeID: 2},
- },
- Transition: pb.ConfChangeTransitionJointImplicit,
- }
- expCs := pb.ConfState{
- Voters: []uint64{1}, VotersOutgoing: []uint64{1}, Learners: []uint64{2},
- AutoLeave: true,
- }
- exp2Cs := pb.ConfState{Voters: []uint64{1}, Learners: []uint64{2}}
-
- t.Run("", func(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1))
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s))
- if err != nil {
- t.Fatal(err)
- }
-
- rawNode.Campaign()
- proposed := false
- var (
- lastIndex uint64
- ccdata []byte
- )
- // Propose the ConfChange, wait until it applies, save the resulting
- // ConfState.
- var cs *pb.ConfState
- for cs == nil {
- rd := rawNode.Ready()
- s.Append(rd.Entries)
- for _, ent := range rd.CommittedEntries {
- var cc pb.ConfChangeI
- if ent.Type == pb.EntryConfChangeV2 {
- var ccc pb.ConfChangeV2
- if err = ccc.Unmarshal(ent.Data); err != nil {
- t.Fatal(err)
- }
- cc = &ccc
- }
- if cc != nil {
- // Force it step down.
- rawNode.Step(pb.Message{Type: pb.MsgHeartbeatResp, From: 1, Term: rawNode.raft.Term + 1})
- cs = rawNode.ApplyConfChange(cc)
- }
- }
- rawNode.Advance(rd)
- // Once we are the leader, propose a command and a ConfChange.
- if !proposed && rd.SoftState.Lead == rawNode.raft.id {
- if err = rawNode.Propose([]byte("somedata")); err != nil {
- t.Fatal(err)
- }
- ccdata, err = testCc.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- rawNode.ProposeConfChange(testCc)
- proposed = true
- }
- }
-
- // Check that the last index is exactly the conf change we put in,
- // down to the bits. Note that this comes from the Storage, which
- // will not reflect any unstable entries that we'll only be presented
- // with in the next Ready.
- lastIndex, err = s.LastIndex()
- if err != nil {
- t.Fatal(err)
- }
-
- entries, err := s.Entries(lastIndex-1, lastIndex+1, noLimit)
- if err != nil {
- t.Fatal(err)
- }
- if len(entries) != 2 {
- t.Fatalf("len(entries) = %d, want %d", len(entries), 2)
- }
- if !bytes.Equal(entries[0].Data, []byte("somedata")) {
- t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, []byte("somedata"))
- }
- if entries[1].Type != pb.EntryConfChangeV2 {
- t.Fatalf("type = %v, want %v", entries[1].Type, pb.EntryConfChangeV2)
- }
- if !bytes.Equal(entries[1].Data, ccdata) {
- t.Errorf("data = %v, want %v", entries[1].Data, ccdata)
- }
-
- if !reflect.DeepEqual(&expCs, cs) {
- t.Fatalf("exp:\n%+v\nact:\n%+v", expCs, cs)
- }
-
- if rawNode.raft.pendingConfIndex != 0 {
- t.Fatalf("pendingConfIndex: expected %d, got %d", 0, rawNode.raft.pendingConfIndex)
- }
-
- // Move the RawNode along. It should not leave joint because it's follower.
- rd := rawNode.readyWithoutAccept()
- // Check that the right ConfChange comes out.
- if len(rd.Entries) != 0 {
- t.Fatalf("expected zero entry, got %+v", rd)
- }
-
- // Make it leader again. It should leave joint automatically after moving apply index.
- rawNode.Campaign()
- rd = rawNode.Ready()
- s.Append(rd.Entries)
- rawNode.Advance(rd)
- rd = rawNode.Ready()
- s.Append(rd.Entries)
-
- // Check that the right ConfChange comes out.
- if len(rd.Entries) != 1 || rd.Entries[0].Type != pb.EntryConfChangeV2 {
- t.Fatalf("expected exactly one more entry, got %+v", rd)
- }
- var cc pb.ConfChangeV2
- if err := cc.Unmarshal(rd.Entries[0].Data); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(cc, pb.ConfChangeV2{Context: nil}) {
- t.Fatalf("expected zero ConfChangeV2, got %+v", cc)
- }
- // Lie and pretend the ConfChange applied. It won't do so because now
- // we require the joint quorum and we're only running one node.
- cs = rawNode.ApplyConfChange(cc)
- if exp := exp2Cs; !reflect.DeepEqual(&exp, cs) {
- t.Fatalf("exp:\n%+v\nact:\n%+v", exp, cs)
- }
- })
-}
-
-// TestRawNodeProposeAddDuplicateNode ensures that two proposes to add the same node should
-// not affect the later propose to add new node.
-func TestRawNodeProposeAddDuplicateNode(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1))
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s))
- if err != nil {
- t.Fatal(err)
- }
- rd := rawNode.Ready()
- s.Append(rd.Entries)
- rawNode.Advance(rd)
-
- rawNode.Campaign()
- for {
- rd = rawNode.Ready()
- s.Append(rd.Entries)
- if rd.SoftState.Lead == rawNode.raft.id {
- rawNode.Advance(rd)
- break
- }
- rawNode.Advance(rd)
- }
-
- proposeConfChangeAndApply := func(cc pb.ConfChange) {
- rawNode.ProposeConfChange(cc)
- rd = rawNode.Ready()
- s.Append(rd.Entries)
- for _, entry := range rd.CommittedEntries {
- if entry.Type == pb.EntryConfChange {
- var cc pb.ConfChange
- cc.Unmarshal(entry.Data)
- rawNode.ApplyConfChange(cc)
- }
- }
- rawNode.Advance(rd)
- }
-
- cc1 := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: 1}
- ccdata1, err := cc1.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- proposeConfChangeAndApply(cc1)
-
- // try to add the same node again
- proposeConfChangeAndApply(cc1)
-
- // the new node join should be ok
- cc2 := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: 2}
- ccdata2, err := cc2.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- proposeConfChangeAndApply(cc2)
-
- lastIndex, err := s.LastIndex()
- if err != nil {
- t.Fatal(err)
- }
-
- // the last three entries should be: ConfChange cc1, cc1, cc2
- entries, err := s.Entries(lastIndex-2, lastIndex+1, noLimit)
- if err != nil {
- t.Fatal(err)
- }
- if len(entries) != 3 {
- t.Fatalf("len(entries) = %d, want %d", len(entries), 3)
- }
- if !bytes.Equal(entries[0].Data, ccdata1) {
- t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, ccdata1)
- }
- if !bytes.Equal(entries[2].Data, ccdata2) {
- t.Errorf("entries[2].Data = %v, want %v", entries[2].Data, ccdata2)
- }
-}
-
-// TestRawNodeReadIndex ensures that Rawnode.ReadIndex sends the MsgReadIndex message
-// to the underlying raft. It also ensures that ReadState can be read out.
-func TestRawNodeReadIndex(t *testing.T) {
- msgs := []pb.Message{}
- appendStep := func(r *raft, m pb.Message) error {
- msgs = append(msgs, m)
- return nil
- }
- wrs := []ReadState{{Index: uint64(1), RequestCtx: []byte("somedata")}}
-
- s := newTestMemoryStorage(withPeers(1))
- c := newTestConfig(1, 10, 1, s)
- rawNode, err := NewRawNode(c)
- if err != nil {
- t.Fatal(err)
- }
- rawNode.raft.readStates = wrs
- // ensure the ReadStates can be read out
- hasReady := rawNode.HasReady()
- if !hasReady {
- t.Errorf("HasReady() returns %t, want %t", hasReady, true)
- }
- rd := rawNode.Ready()
- if !reflect.DeepEqual(rd.ReadStates, wrs) {
- t.Errorf("ReadStates = %d, want %d", rd.ReadStates, wrs)
- }
- s.Append(rd.Entries)
- rawNode.Advance(rd)
- // ensure raft.readStates is reset after advance
- if rawNode.raft.readStates != nil {
- t.Errorf("readStates = %v, want %v", rawNode.raft.readStates, nil)
- }
-
- wrequestCtx := []byte("somedata2")
- rawNode.Campaign()
- for {
- rd = rawNode.Ready()
- s.Append(rd.Entries)
-
- if rd.SoftState.Lead == rawNode.raft.id {
- rawNode.Advance(rd)
-
- // Once we are the leader, issue a ReadIndex request
- rawNode.raft.step = appendStep
- rawNode.ReadIndex(wrequestCtx)
- break
- }
- rawNode.Advance(rd)
- }
- // ensure that MsgReadIndex message is sent to the underlying raft
- if len(msgs) != 1 {
- t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
- }
- if msgs[0].Type != pb.MsgReadIndex {
- t.Errorf("msg type = %d, want %d", msgs[0].Type, pb.MsgReadIndex)
- }
- if !bytes.Equal(msgs[0].Entries[0].Data, wrequestCtx) {
- t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, wrequestCtx)
- }
-}
-
-// TestBlockProposal from node_test.go has no equivalent in rawNode because there is
-// no leader check in RawNode.
-
-// TestNodeTick from node_test.go has no equivalent in rawNode because
-// it reaches into the raft object which is not exposed.
-
-// TestNodeStop from node_test.go has no equivalent in rawNode because there is
-// no goroutine in RawNode.
-
-// TestRawNodeStart ensures that a node can be started correctly. Note that RawNode
-// requires the application to bootstrap the state, i.e. it does not accept peers
-// and will not create faux configuration change entries.
-func TestRawNodeStart(t *testing.T) {
- want := Ready{
- SoftState: &SoftState{Lead: 1, RaftState: StateLeader},
- HardState: pb.HardState{Term: 1, Commit: 3, Vote: 1},
- Entries: []pb.Entry{
- {Term: 1, Index: 2, Data: nil}, // empty entry
- {Term: 1, Index: 3, Data: []byte("foo")}, // empty entry
- },
- CommittedEntries: []pb.Entry{
- {Term: 1, Index: 2, Data: nil}, // empty entry
- {Term: 1, Index: 3, Data: []byte("foo")}, // empty entry
- },
- MustSync: true,
- }
-
- storage := NewMemoryStorage()
- storage.ents[0].Index = 1
-
- // TODO(tbg): this is a first prototype of what bootstrapping could look
- // like (without the annoying faux ConfChanges). We want to persist a
- // ConfState at some index and make sure that this index can't be reached
- // from log position 1, so that followers are forced to pick up the
- // ConfState in order to move away from log position 1 (unless they got
- // bootstrapped in the same way already). Failing to do so would mean that
- // followers diverge from the bootstrapped nodes and don't learn about the
- // initial config.
- //
- // NB: this is exactly what CockroachDB does. The Raft log really begins at
- // index 10, so empty followers (at index 1) always need a snapshot first.
- type appenderStorage interface {
- Storage
- ApplySnapshot(pb.Snapshot) error
- }
- bootstrap := func(storage appenderStorage, cs pb.ConfState) error {
- if len(cs.Voters) == 0 {
- return fmt.Errorf("no voters specified")
- }
- fi, err := storage.FirstIndex()
- if err != nil {
- return err
- }
- if fi < 2 {
- return fmt.Errorf("FirstIndex >= 2 is prerequisite for bootstrap")
- }
- if _, err = storage.Entries(fi, fi, math.MaxUint64); err == nil {
- // TODO(tbg): match exact error
- return fmt.Errorf("should not have been able to load first index")
- }
- li, err := storage.LastIndex()
- if err != nil {
- return err
- }
- if _, err = storage.Entries(li, li, math.MaxUint64); err == nil {
- return fmt.Errorf("should not have been able to load last index")
- }
- hs, ics, err := storage.InitialState()
- if err != nil {
- return err
- }
- if !IsEmptyHardState(hs) {
- return fmt.Errorf("HardState not empty")
- }
- if len(ics.Voters) != 0 {
- return fmt.Errorf("ConfState not empty")
- }
-
- meta := pb.SnapshotMetadata{
- Index: 1,
- Term: 0,
- ConfState: cs,
- }
- snap := pb.Snapshot{Metadata: meta}
- return storage.ApplySnapshot(snap)
- }
-
- if err := bootstrap(storage, pb.ConfState{Voters: []uint64{1}}); err != nil {
- t.Fatal(err)
- }
-
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, storage))
- if err != nil {
- t.Fatal(err)
- }
- if rawNode.HasReady() {
- t.Fatalf("unexpected ready: %+v", rawNode.Ready())
- }
- rawNode.Campaign()
- rawNode.Propose([]byte("foo"))
- if !rawNode.HasReady() {
- t.Fatal("expected a Ready")
- }
- rd := rawNode.Ready()
- storage.Append(rd.Entries)
- rawNode.Advance(rd)
-
- rd.SoftState, want.SoftState = nil, nil
-
- if !reflect.DeepEqual(rd, want) {
- t.Fatalf("unexpected Ready:\n%+v\nvs\n%+v", rd, want)
- }
-
- if rawNode.HasReady() {
- t.Errorf("unexpected Ready: %+v", rawNode.Ready())
- }
-}
-
-func TestRawNodeRestart(t *testing.T) {
- entries := []pb.Entry{
- {Term: 1, Index: 1},
- {Term: 1, Index: 2, Data: []byte("foo")},
- }
- st := pb.HardState{Term: 1, Commit: 1}
-
- want := Ready{
- HardState: emptyState,
- // commit up to commit index in st
- CommittedEntries: entries[:st.Commit],
- MustSync: false,
- }
-
- storage := newTestMemoryStorage(withPeers(1))
- storage.SetHardState(st)
- storage.Append(entries)
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, storage))
- if err != nil {
- t.Fatal(err)
- }
- rd := rawNode.Ready()
- if !reflect.DeepEqual(rd, want) {
- t.Errorf("g = %+v,\n w %+v", rd, want)
- }
- rawNode.Advance(rd)
- if rawNode.HasReady() {
- t.Errorf("unexpected Ready: %+v", rawNode.Ready())
- }
-}
-
-func TestRawNodeRestartFromSnapshot(t *testing.T) {
- snap := pb.Snapshot{
- Metadata: pb.SnapshotMetadata{
- ConfState: pb.ConfState{Voters: []uint64{1, 2}},
- Index: 2,
- Term: 1,
- },
- }
- entries := []pb.Entry{
- {Term: 1, Index: 3, Data: []byte("foo")},
- }
- st := pb.HardState{Term: 1, Commit: 3}
-
- want := Ready{
- HardState: emptyState,
- // commit up to commit index in st
- CommittedEntries: entries,
- MustSync: false,
- }
-
- s := NewMemoryStorage()
- s.SetHardState(st)
- s.ApplySnapshot(snap)
- s.Append(entries)
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s))
- if err != nil {
- t.Fatal(err)
- }
- if rd := rawNode.Ready(); !reflect.DeepEqual(rd, want) {
- t.Errorf("g = %+v,\n w %+v", rd, want)
- } else {
- rawNode.Advance(rd)
- }
- if rawNode.HasReady() {
- t.Errorf("unexpected Ready: %+v", rawNode.HasReady())
- }
-}
-
-// TestNodeAdvance from node_test.go has no equivalent in rawNode because there is
-// no dependency check between Ready() and Advance()
-
-func TestRawNodeStatus(t *testing.T) {
- s := newTestMemoryStorage(withPeers(1))
- rn, err := NewRawNode(newTestConfig(1, 10, 1, s))
- if err != nil {
- t.Fatal(err)
- }
- if status := rn.Status(); status.Progress != nil {
- t.Fatalf("expected no Progress because not leader: %+v", status.Progress)
- }
- if err := rn.Campaign(); err != nil {
- t.Fatal(err)
- }
- status := rn.Status()
- if status.Lead != 1 {
- t.Fatal("not lead")
- }
- if status.RaftState != StateLeader {
- t.Fatal("not leader")
- }
- if exp, act := *rn.raft.prs.Progress[1], status.Progress[1]; !reflect.DeepEqual(exp, act) {
- t.Fatalf("want: %+v\ngot: %+v", exp, act)
- }
- expCfg := tracker.Config{Voters: quorum.JointConfig{
- quorum.MajorityConfig{1: {}},
- nil,
- }}
- if !reflect.DeepEqual(expCfg, status.Config) {
- t.Fatalf("want: %+v\ngot: %+v", expCfg, status.Config)
- }
-}
-
-// TestRawNodeCommitPaginationAfterRestart is the RawNode version of
-// TestNodeCommitPaginationAfterRestart. The anomaly here was even worse as the
-// Raft group would forget to apply entries:
-//
-// - node learns that index 11 is committed
-// - nextEnts returns index 1..10 in CommittedEntries (but index 10 already
-// exceeds maxBytes), which isn't noticed internally by Raft
-// - Commit index gets bumped to 10
-// - the node persists the HardState, but crashes before applying the entries
-// - upon restart, the storage returns the same entries, but `slice` takes a
-// different code path and removes the last entry.
-// - Raft does not emit a HardState, but when the app calls Advance(), it bumps
-// its internal applied index cursor to 10 (when it should be 9)
-// - the next Ready asks the app to apply index 11 (omitting index 10), losing a
-// write.
-func TestRawNodeCommitPaginationAfterRestart(t *testing.T) {
- s := &ignoreSizeHintMemStorage{
- MemoryStorage: newTestMemoryStorage(withPeers(1)),
- }
- persistedHardState := pb.HardState{
- Term: 1,
- Vote: 1,
- Commit: 10,
- }
-
- s.hardState = persistedHardState
- s.ents = make([]pb.Entry, 10)
- var size uint64
- for i := range s.ents {
- ent := pb.Entry{
- Term: 1,
- Index: uint64(i + 1),
- Type: pb.EntryNormal,
- Data: []byte("a"),
- }
-
- s.ents[i] = ent
- size += uint64(ent.Size())
- }
-
- cfg := newTestConfig(1, 10, 1, s)
- // Set a MaxSizePerMsg that would suggest to Raft that the last committed entry should
- // not be included in the initial rd.CommittedEntries. However, our storage will ignore
- // this and *will* return it (which is how the Commit index ended up being 10 initially).
- cfg.MaxSizePerMsg = size - uint64(s.ents[len(s.ents)-1].Size()) - 1
-
- s.ents = append(s.ents, pb.Entry{
- Term: 1,
- Index: uint64(11),
- Type: pb.EntryNormal,
- Data: []byte("boom"),
- })
-
- rawNode, err := NewRawNode(cfg)
- if err != nil {
- t.Fatal(err)
- }
-
- for highestApplied := uint64(0); highestApplied != 11; {
- rd := rawNode.Ready()
- n := len(rd.CommittedEntries)
- if n == 0 {
- t.Fatalf("stopped applying entries at index %d", highestApplied)
- }
- if next := rd.CommittedEntries[0].Index; highestApplied != 0 && highestApplied+1 != next {
- t.Fatalf("attempting to apply index %d after index %d, leaving a gap", next, highestApplied)
- }
- highestApplied = rd.CommittedEntries[n-1].Index
- rawNode.Advance(rd)
- rawNode.Step(pb.Message{
- Type: pb.MsgHeartbeat,
- To: 1,
- From: 1, // illegal, but we get away with it
- Term: 1,
- Commit: 11,
- })
- }
-}
-
-// TestRawNodeBoundedLogGrowthWithPartition tests a scenario where a leader is
-// partitioned from a quorum of nodes. It verifies that the leader's log is
-// protected from unbounded growth even as new entries continue to be proposed.
-// This protection is provided by the MaxUncommittedEntriesSize configuration.
-func TestRawNodeBoundedLogGrowthWithPartition(t *testing.T) {
- const maxEntries = 16
- data := []byte("testdata")
- testEntry := pb.Entry{Data: data}
- maxEntrySize := uint64(maxEntries * PayloadSize(testEntry))
-
- s := newTestMemoryStorage(withPeers(1))
- cfg := newTestConfig(1, 10, 1, s)
- cfg.MaxUncommittedEntriesSize = maxEntrySize
- rawNode, err := NewRawNode(cfg)
- if err != nil {
- t.Fatal(err)
- }
- rd := rawNode.Ready()
- s.Append(rd.Entries)
- rawNode.Advance(rd)
-
- // Become the leader.
- rawNode.Campaign()
- for {
- rd = rawNode.Ready()
- s.Append(rd.Entries)
- if rd.SoftState.Lead == rawNode.raft.id {
- rawNode.Advance(rd)
- break
- }
- rawNode.Advance(rd)
- }
-
- // Simulate a network partition while we make our proposals by never
- // committing anything. These proposals should not cause the leader's
- // log to grow indefinitely.
- for i := 0; i < 1024; i++ {
- rawNode.Propose(data)
- }
-
- // Check the size of leader's uncommitted log tail. It should not exceed the
- // MaxUncommittedEntriesSize limit.
- checkUncommitted := func(exp uint64) {
- t.Helper()
- if a := rawNode.raft.uncommittedSize; exp != a {
- t.Fatalf("expected %d uncommitted entry bytes, found %d", exp, a)
- }
- }
- checkUncommitted(maxEntrySize)
-
- // Recover from the partition. The uncommitted tail of the Raft log should
- // disappear as entries are committed.
- rd = rawNode.Ready()
- if len(rd.CommittedEntries) != maxEntries {
- t.Fatalf("expected %d entries, got %d", maxEntries, len(rd.CommittedEntries))
- }
- s.Append(rd.Entries)
- rawNode.Advance(rd)
- checkUncommitted(0)
-}
-
-func BenchmarkStatus(b *testing.B) {
- setup := func(members int) *RawNode {
- peers := make([]uint64, members)
- for i := range peers {
- peers[i] = uint64(i + 1)
- }
- cfg := newTestConfig(1, 3, 1, newTestMemoryStorage(withPeers(peers...)))
- cfg.Logger = discardLogger
- r := newRaft(cfg)
- r.becomeFollower(1, 1)
- r.becomeCandidate()
- r.becomeLeader()
- return &RawNode{raft: r}
- }
-
- for _, members := range []int{1, 3, 5, 100} {
- b.Run(fmt.Sprintf("members=%d", members), func(b *testing.B) {
- rn := setup(members)
-
- b.Run("Status", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- _ = rn.Status()
- }
- })
-
- b.Run("Status-example", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- s := rn.Status()
- var n uint64
- for _, pr := range s.Progress {
- n += pr.Match
- }
- _ = n
- }
- })
-
- b.Run("BasicStatus", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- _ = rn.BasicStatus()
- }
- })
-
- b.Run("WithProgress", func(b *testing.B) {
- b.ReportAllocs()
- visit := func(uint64, ProgressType, tracker.Progress) {}
-
- for i := 0; i < b.N; i++ {
- rn.WithProgress(visit)
- }
- })
- b.Run("WithProgress-example", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- var n uint64
- visit := func(_ uint64, _ ProgressType, pr tracker.Progress) {
- n += pr.Match
- }
- rn.WithProgress(visit)
- _ = n
- }
- })
- })
- }
-}
-
-func TestRawNodeConsumeReady(t *testing.T) {
- // Check that readyWithoutAccept() does not call acceptReady (which resets
- // the messages) but Ready() does.
- s := newTestMemoryStorage(withPeers(1))
- rn := newTestRawNode(1, 3, 1, s)
- m1 := pb.Message{Context: []byte("foo")}
- m2 := pb.Message{Context: []byte("bar")}
-
- // Inject first message, make sure it's visible via readyWithoutAccept.
- rn.raft.msgs = append(rn.raft.msgs, m1)
- rd := rn.readyWithoutAccept()
- if len(rd.Messages) != 1 || !reflect.DeepEqual(rd.Messages[0], m1) {
- t.Fatalf("expected only m1 sent, got %+v", rd.Messages)
- }
- if len(rn.raft.msgs) != 1 || !reflect.DeepEqual(rn.raft.msgs[0], m1) {
- t.Fatalf("expected only m1 in raft.msgs, got %+v", rn.raft.msgs)
- }
- // Now call Ready() which should move the message into the Ready (as opposed
- // to leaving it in both places).
- rd = rn.Ready()
- if len(rn.raft.msgs) > 0 {
- t.Fatalf("messages not reset: %+v", rn.raft.msgs)
- }
- if len(rd.Messages) != 1 || !reflect.DeepEqual(rd.Messages[0], m1) {
- t.Fatalf("expected only m1 sent, got %+v", rd.Messages)
- }
- // Add a message to raft to make sure that Advance() doesn't drop it.
- rn.raft.msgs = append(rn.raft.msgs, m2)
- rn.Advance(rd)
- if len(rn.raft.msgs) != 1 || !reflect.DeepEqual(rn.raft.msgs[0], m2) {
- t.Fatalf("expected only m2 in raft.msgs, got %+v", rn.raft.msgs)
- }
-}
diff --git a/raft/read_only.go b/raft/read_only.go
deleted file mode 100644
index ad0688522d6..00000000000
--- a/raft/read_only.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "go.etcd.io/etcd/raft/v3/raftpb"
-
-// ReadState provides state for read only query.
-// It's caller's responsibility to call ReadIndex first before getting
-// this state from ready, it's also caller's duty to differentiate if this
-// state is what it requests through RequestCtx, eg. given a unique id as
-// RequestCtx
-type ReadState struct {
- Index uint64
- RequestCtx []byte
-}
-
-type readIndexStatus struct {
- req pb.Message
- index uint64
- // NB: this never records 'false', but it's more convenient to use this
- // instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If
- // this becomes performance sensitive enough (doubtful), quorum.VoteResult
- // can change to an API that is closer to that of CommittedIndex.
- acks map[uint64]bool
-}
-
-type readOnly struct {
- option ReadOnlyOption
- pendingReadIndex map[string]*readIndexStatus
- readIndexQueue []string
-}
-
-func newReadOnly(option ReadOnlyOption) *readOnly {
- return &readOnly{
- option: option,
- pendingReadIndex: make(map[string]*readIndexStatus),
- }
-}
-
-// addRequest adds a read only request into readonly struct.
-// `index` is the commit index of the raft state machine when it received
-// the read only request.
-// `m` is the original read only request message from the local or remote node.
-func (ro *readOnly) addRequest(index uint64, m pb.Message) {
- s := string(m.Entries[0].Data)
- if _, ok := ro.pendingReadIndex[s]; ok {
- return
- }
- ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)}
- ro.readIndexQueue = append(ro.readIndexQueue, s)
-}
-
-// recvAck notifies the readonly struct that the raft state machine received
-// an acknowledgment of the heartbeat that attached with the read only request
-// context.
-func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool {
- rs, ok := ro.pendingReadIndex[string(context)]
- if !ok {
- return nil
- }
-
- rs.acks[id] = true
- return rs.acks
-}
-
-// advance advances the read only request queue kept by the readonly struct.
-// It dequeues the requests until it finds the read only request that has
-// the same context as the given `m`.
-func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
- var (
- i int
- found bool
- )
-
- ctx := string(m.Context)
- rss := []*readIndexStatus{}
-
- for _, okctx := range ro.readIndexQueue {
- i++
- rs, ok := ro.pendingReadIndex[okctx]
- if !ok {
- panic("cannot find corresponding read state from pending map")
- }
- rss = append(rss, rs)
- if okctx == ctx {
- found = true
- break
- }
- }
-
- if found {
- ro.readIndexQueue = ro.readIndexQueue[i:]
- for _, rs := range rss {
- delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
- }
- return rss
- }
-
- return nil
-}
-
-// lastPendingRequestCtx returns the context of the last pending read only
-// request in readonly struct.
-func (ro *readOnly) lastPendingRequestCtx() string {
- if len(ro.readIndexQueue) == 0 {
- return ""
- }
- return ro.readIndexQueue[len(ro.readIndexQueue)-1]
-}
diff --git a/raft/status.go b/raft/status.go
deleted file mode 100644
index acfb56c3915..00000000000
--- a/raft/status.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "fmt"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/raft/v3/tracker"
-)
-
-// Status contains information about this Raft peer and its view of the system.
-// The Progress is only populated on the leader.
-type Status struct {
- BasicStatus
- Config tracker.Config
- Progress map[uint64]tracker.Progress
-}
-
-// BasicStatus contains basic information about the Raft peer. It does not allocate.
-type BasicStatus struct {
- ID uint64
-
- pb.HardState
- SoftState
-
- Applied uint64
-
- LeadTransferee uint64
-}
-
-func getProgressCopy(r *raft) map[uint64]tracker.Progress {
- m := make(map[uint64]tracker.Progress)
- r.prs.Visit(func(id uint64, pr *tracker.Progress) {
- p := *pr
- p.Inflights = pr.Inflights.Clone()
- pr = nil
-
- m[id] = p
- })
- return m
-}
-
-func getBasicStatus(r *raft) BasicStatus {
- s := BasicStatus{
- ID: r.id,
- LeadTransferee: r.leadTransferee,
- }
- s.HardState = r.hardState()
- s.SoftState = *r.softState()
- s.Applied = r.raftLog.applied
- return s
-}
-
-// getStatus gets a copy of the current raft status.
-func getStatus(r *raft) Status {
- var s Status
- s.BasicStatus = getBasicStatus(r)
- if s.RaftState == StateLeader {
- s.Progress = getProgressCopy(r)
- }
- s.Config = r.prs.Config.Clone()
- return s
-}
-
-// MarshalJSON translates the raft status into JSON.
-// TODO: try to simplify this by introducing ID type into raft
-func (s Status) MarshalJSON() ([]byte, error) {
- j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
- s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
-
- if len(s.Progress) == 0 {
- j += "},"
- } else {
- for k, v := range s.Progress {
- subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
- j += subj
- }
- // remove the trailing ","
- j = j[:len(j)-1] + "},"
- }
-
- j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
- return []byte(j), nil
-}
-
-func (s Status) String() string {
- b, err := s.MarshalJSON()
- if err != nil {
- getLogger().Panicf("unexpected error: %v", err)
- }
- return string(b)
-}
diff --git a/raft/storage.go b/raft/storage.go
deleted file mode 100644
index 8b16d4fa24f..00000000000
--- a/raft/storage.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "errors"
- "sync"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-// ErrCompacted is returned by Storage.Entries/Compact when a requested
-// index is unavailable because it predates the last snapshot.
-var ErrCompacted = errors.New("requested index is unavailable due to compaction")
-
-// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
-// index is older than the existing snapshot.
-var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
-
-// ErrUnavailable is returned by Storage interface when the requested log entries
-// are unavailable.
-var ErrUnavailable = errors.New("requested entry at index is unavailable")
-
-// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
-// snapshot is temporarily unavailable.
-var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
-
-// Storage is an interface that may be implemented by the application
-// to retrieve log entries from storage.
-//
-// If any Storage method returns an error, the raft instance will
-// become inoperable and refuse to participate in elections; the
-// application is responsible for cleanup and recovery in this case.
-type Storage interface {
- // TODO(tbg): split this into two interfaces, LogStorage and StateStorage.
-
- // InitialState returns the saved HardState and ConfState information.
- InitialState() (pb.HardState, pb.ConfState, error)
- // Entries returns a slice of log entries in the range [lo,hi).
- // MaxSize limits the total size of the log entries returned, but
- // Entries returns at least one entry if any.
- Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
- // Term returns the term of entry i, which must be in the range
- // [FirstIndex()-1, LastIndex()]. The term of the entry before
- // FirstIndex is retained for matching purposes even though the
- // rest of that entry may not be available.
- Term(i uint64) (uint64, error)
- // LastIndex returns the index of the last entry in the log.
- LastIndex() (uint64, error)
- // FirstIndex returns the index of the first log entry that is
- // possibly available via Entries (older entries have been incorporated
- // into the latest Snapshot; if storage only contains the dummy entry the
- // first log entry is not available).
- FirstIndex() (uint64, error)
- // Snapshot returns the most recent snapshot.
- // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
- // so raft state machine could know that Storage needs some time to prepare
- // snapshot and call Snapshot later.
- Snapshot() (pb.Snapshot, error)
-}
-
-// MemoryStorage implements the Storage interface backed by an
-// in-memory array.
-type MemoryStorage struct {
- // Protects access to all fields. Most methods of MemoryStorage are
- // run on the raft goroutine, but Append() is run on an application
- // goroutine.
- sync.Mutex
-
- hardState pb.HardState
- snapshot pb.Snapshot
- // ents[i] has raft log position i+snapshot.Metadata.Index
- ents []pb.Entry
-}
-
-// NewMemoryStorage creates an empty MemoryStorage.
-func NewMemoryStorage() *MemoryStorage {
- return &MemoryStorage{
- // When starting from scratch populate the list with a dummy entry at term zero.
- ents: make([]pb.Entry, 1),
- }
-}
-
-// InitialState implements the Storage interface.
-func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
- return ms.hardState, ms.snapshot.Metadata.ConfState, nil
-}
-
-// SetHardState saves the current HardState.
-func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
- ms.Lock()
- defer ms.Unlock()
- ms.hardState = st
- return nil
-}
-
-// Entries implements the Storage interface.
-func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
- ms.Lock()
- defer ms.Unlock()
- offset := ms.ents[0].Index
- if lo <= offset {
- return nil, ErrCompacted
- }
- if hi > ms.lastIndex()+1 {
- getLogger().Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
- }
- // only contains dummy entries.
- if len(ms.ents) == 1 {
- return nil, ErrUnavailable
- }
-
- ents := ms.ents[lo-offset : hi-offset]
- return limitSize(ents, maxSize), nil
-}
-
-// Term implements the Storage interface.
-func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
- ms.Lock()
- defer ms.Unlock()
- offset := ms.ents[0].Index
- if i < offset {
- return 0, ErrCompacted
- }
- if int(i-offset) >= len(ms.ents) {
- return 0, ErrUnavailable
- }
- return ms.ents[i-offset].Term, nil
-}
-
-// LastIndex implements the Storage interface.
-func (ms *MemoryStorage) LastIndex() (uint64, error) {
- ms.Lock()
- defer ms.Unlock()
- return ms.lastIndex(), nil
-}
-
-func (ms *MemoryStorage) lastIndex() uint64 {
- return ms.ents[0].Index + uint64(len(ms.ents)) - 1
-}
-
-// FirstIndex implements the Storage interface.
-func (ms *MemoryStorage) FirstIndex() (uint64, error) {
- ms.Lock()
- defer ms.Unlock()
- return ms.firstIndex(), nil
-}
-
-func (ms *MemoryStorage) firstIndex() uint64 {
- return ms.ents[0].Index + 1
-}
-
-// Snapshot implements the Storage interface.
-func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
- ms.Lock()
- defer ms.Unlock()
- return ms.snapshot, nil
-}
-
-// ApplySnapshot overwrites the contents of this Storage object with
-// those of the given snapshot.
-func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
- ms.Lock()
- defer ms.Unlock()
-
- //handle check for old snapshot being applied
- msIndex := ms.snapshot.Metadata.Index
- snapIndex := snap.Metadata.Index
- if msIndex >= snapIndex {
- return ErrSnapOutOfDate
- }
-
- ms.snapshot = snap
- ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
- return nil
-}
-
-// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
-// can be used to reconstruct the state at that point.
-// If any configuration changes have been made since the last compaction,
-// the result of the last ApplyConfChange must be passed in.
-func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
- ms.Lock()
- defer ms.Unlock()
- if i <= ms.snapshot.Metadata.Index {
- return pb.Snapshot{}, ErrSnapOutOfDate
- }
-
- offset := ms.ents[0].Index
- if i > ms.lastIndex() {
- getLogger().Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
- }
-
- ms.snapshot.Metadata.Index = i
- ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
- if cs != nil {
- ms.snapshot.Metadata.ConfState = *cs
- }
- ms.snapshot.Data = data
- return ms.snapshot, nil
-}
-
-// Compact discards all log entries prior to compactIndex.
-// It is the application's responsibility to not attempt to compact an index
-// greater than raftLog.applied.
-func (ms *MemoryStorage) Compact(compactIndex uint64) error {
- ms.Lock()
- defer ms.Unlock()
- offset := ms.ents[0].Index
- if compactIndex <= offset {
- return ErrCompacted
- }
- if compactIndex > ms.lastIndex() {
- getLogger().Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
- }
-
- i := compactIndex - offset
- ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
- ents[0].Index = ms.ents[i].Index
- ents[0].Term = ms.ents[i].Term
- ents = append(ents, ms.ents[i+1:]...)
- ms.ents = ents
- return nil
-}
-
-// Append the new entries to storage.
-// TODO (xiangli): ensure the entries are continuous and
-// entries[0].Index > ms.entries[0].Index
-func (ms *MemoryStorage) Append(entries []pb.Entry) error {
- if len(entries) == 0 {
- return nil
- }
-
- ms.Lock()
- defer ms.Unlock()
-
- first := ms.firstIndex()
- last := entries[0].Index + uint64(len(entries)) - 1
-
- // shortcut if there is no new entry.
- if last < first {
- return nil
- }
- // truncate compacted entries
- if first > entries[0].Index {
- entries = entries[first-entries[0].Index:]
- }
-
- offset := entries[0].Index - ms.ents[0].Index
- switch {
- case uint64(len(ms.ents)) > offset:
- ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
- ms.ents = append(ms.ents, entries...)
- case uint64(len(ms.ents)) == offset:
- ms.ents = append(ms.ents, entries...)
- default:
- getLogger().Panicf("missing log entry [last: %d, append at: %d]",
- ms.lastIndex(), entries[0].Index)
- }
- return nil
-}
diff --git a/raft/storage_test.go b/raft/storage_test.go
deleted file mode 100644
index 04eb85bdf15..00000000000
--- a/raft/storage_test.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "math"
- "reflect"
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func TestStorageTerm(t *testing.T) {
- ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- tests := []struct {
- i uint64
-
- werr error
- wterm uint64
- wpanic bool
- }{
- {2, ErrCompacted, 0, false},
- {3, nil, 3, false},
- {4, nil, 4, false},
- {5, nil, 5, false},
- {6, ErrUnavailable, 0, false},
- }
-
- for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
-
- func() {
- defer func() {
- if r := recover(); r != nil {
- if !tt.wpanic {
- t.Errorf("%d: panic = %v, want %v", i, true, tt.wpanic)
- }
- }
- }()
-
- term, err := s.Term(tt.i)
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- if term != tt.wterm {
- t.Errorf("#%d: term = %d, want %d", i, term, tt.wterm)
- }
- }()
- }
-}
-
-func TestStorageEntries(t *testing.T) {
- ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 6}}
- tests := []struct {
- lo, hi, maxsize uint64
-
- werr error
- wentries []pb.Entry
- }{
- {2, 6, math.MaxUint64, ErrCompacted, nil},
- {3, 4, math.MaxUint64, ErrCompacted, nil},
- {4, 5, math.MaxUint64, nil, []pb.Entry{{Index: 4, Term: 4}}},
- {4, 6, math.MaxUint64, nil, []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}}},
- {4, 7, math.MaxUint64, nil, []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 6}}},
- // even if maxsize is zero, the first entry should be returned
- {4, 7, 0, nil, []pb.Entry{{Index: 4, Term: 4}}},
- // limit to 2
- {4, 7, uint64(ents[1].Size() + ents[2].Size()), nil, []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}}},
- // limit to 2
- {4, 7, uint64(ents[1].Size() + ents[2].Size() + ents[3].Size()/2), nil, []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}}},
- {4, 7, uint64(ents[1].Size() + ents[2].Size() + ents[3].Size() - 1), nil, []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}}},
- // all
- {4, 7, uint64(ents[1].Size() + ents[2].Size() + ents[3].Size()), nil, []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 6}}},
- }
-
- for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
- entries, err := s.Entries(tt.lo, tt.hi, tt.maxsize)
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- if !reflect.DeepEqual(entries, tt.wentries) {
- t.Errorf("#%d: entries = %v, want %v", i, entries, tt.wentries)
- }
- }
-}
-
-func TestStorageLastIndex(t *testing.T) {
- ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- s := &MemoryStorage{ents: ents}
-
- last, err := s.LastIndex()
- if err != nil {
- t.Errorf("err = %v, want nil", err)
- }
- if last != 5 {
- t.Errorf("last = %d, want %d", last, 5)
- }
-
- s.Append([]pb.Entry{{Index: 6, Term: 5}})
- last, err = s.LastIndex()
- if err != nil {
- t.Errorf("err = %v, want nil", err)
- }
- if last != 6 {
- t.Errorf("last = %d, want %d", last, 6)
- }
-}
-
-func TestStorageFirstIndex(t *testing.T) {
- ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- s := &MemoryStorage{ents: ents}
-
- first, err := s.FirstIndex()
- if err != nil {
- t.Errorf("err = %v, want nil", err)
- }
- if first != 4 {
- t.Errorf("first = %d, want %d", first, 4)
- }
-
- s.Compact(4)
- first, err = s.FirstIndex()
- if err != nil {
- t.Errorf("err = %v, want nil", err)
- }
- if first != 5 {
- t.Errorf("first = %d, want %d", first, 5)
- }
-}
-
-func TestStorageCompact(t *testing.T) {
- ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- tests := []struct {
- i uint64
-
- werr error
- windex uint64
- wterm uint64
- wlen int
- }{
- {2, ErrCompacted, 3, 3, 3},
- {3, ErrCompacted, 3, 3, 3},
- {4, nil, 4, 4, 2},
- {5, nil, 5, 5, 1},
- }
-
- for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
- err := s.Compact(tt.i)
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- if s.ents[0].Index != tt.windex {
- t.Errorf("#%d: index = %d, want %d", i, s.ents[0].Index, tt.windex)
- }
- if s.ents[0].Term != tt.wterm {
- t.Errorf("#%d: term = %d, want %d", i, s.ents[0].Term, tt.wterm)
- }
- if len(s.ents) != tt.wlen {
- t.Errorf("#%d: len = %d, want %d", i, len(s.ents), tt.wlen)
- }
- }
-}
-
-func TestStorageCreateSnapshot(t *testing.T) {
- ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- cs := &pb.ConfState{Voters: []uint64{1, 2, 3}}
- data := []byte("data")
-
- tests := []struct {
- i uint64
-
- werr error
- wsnap pb.Snapshot
- }{
- {4, nil, pb.Snapshot{Data: data, Metadata: pb.SnapshotMetadata{Index: 4, Term: 4, ConfState: *cs}}},
- {5, nil, pb.Snapshot{Data: data, Metadata: pb.SnapshotMetadata{Index: 5, Term: 5, ConfState: *cs}}},
- }
-
- for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
- snap, err := s.CreateSnapshot(tt.i, cs, data)
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- if !reflect.DeepEqual(snap, tt.wsnap) {
- t.Errorf("#%d: snap = %+v, want %+v", i, snap, tt.wsnap)
- }
- }
-}
-
-func TestStorageAppend(t *testing.T) {
- ents := []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}}
- tests := []struct {
- entries []pb.Entry
-
- werr error
- wentries []pb.Entry
- }{
- {
- []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}},
- nil,
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}},
- },
- {
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}},
- nil,
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}},
- },
- {
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 6}, {Index: 5, Term: 6}},
- nil,
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 6}, {Index: 5, Term: 6}},
- },
- {
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 5}},
- nil,
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 5}},
- },
- // truncate incoming entries, truncate the existing entries and append
- {
- []pb.Entry{{Index: 2, Term: 3}, {Index: 3, Term: 3}, {Index: 4, Term: 5}},
- nil,
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 5}},
- },
- // truncate the existing entries and append
- {
- []pb.Entry{{Index: 4, Term: 5}},
- nil,
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 5}},
- },
- // direct append
- {
- []pb.Entry{{Index: 6, Term: 5}},
- nil,
- []pb.Entry{{Index: 3, Term: 3}, {Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 5}},
- },
- }
-
- for i, tt := range tests {
- s := &MemoryStorage{ents: ents}
- err := s.Append(tt.entries)
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- if !reflect.DeepEqual(s.ents, tt.wentries) {
- t.Errorf("#%d: entries = %v, want %v", i, s.ents, tt.wentries)
- }
- }
-}
-
-func TestStorageApplySnapshot(t *testing.T) {
- cs := &pb.ConfState{Voters: []uint64{1, 2, 3}}
- data := []byte("data")
-
- tests := []pb.Snapshot{{Data: data, Metadata: pb.SnapshotMetadata{Index: 4, Term: 4, ConfState: *cs}},
- {Data: data, Metadata: pb.SnapshotMetadata{Index: 3, Term: 3, ConfState: *cs}},
- }
-
- s := NewMemoryStorage()
-
- //Apply Snapshot successful
- i := 0
- tt := tests[i]
- err := s.ApplySnapshot(tt)
- if err != nil {
- t.Errorf("#%d: err = %v, want %v", i, err, nil)
- }
-
- //Apply Snapshot fails due to ErrSnapOutOfDate
- i = 1
- tt = tests[i]
- err = s.ApplySnapshot(tt)
- if err != ErrSnapOutOfDate {
- t.Errorf("#%d: err = %v, want %v", i, err, ErrSnapOutOfDate)
- }
-}
diff --git a/raft/testdata/campaign.txt b/raft/testdata/campaign.txt
deleted file mode 100644
index c5deb2dc182..00000000000
--- a/raft/testdata/campaign.txt
+++ /dev/null
@@ -1,117 +0,0 @@
-log-level info
-----
-ok
-
-add-nodes 3 voters=(1,2,3) index=2
-----
-INFO 1 switched to configuration voters=(1 2 3)
-INFO 1 became follower at term 0
-INFO newRaft 1 [peers: [1,2,3], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-INFO 2 switched to configuration voters=(1 2 3)
-INFO 2 became follower at term 0
-INFO newRaft 2 [peers: [1,2,3], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-INFO 3 switched to configuration voters=(1 2 3)
-INFO 3 became follower at term 0
-INFO newRaft 3 [peers: [1,2,3], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-
-campaign 1
-----
-INFO 1 is starting a new election at term 0
-INFO 1 became candidate at term 1
-INFO 1 received MsgVoteResp from 1 at term 1
-INFO 1 [logterm: 1, index: 2] sent MsgVote request to 2 at term 1
-INFO 1 [logterm: 1, index: 2] sent MsgVote request to 3 at term 1
-
-stabilize
-----
-> 1 handling Ready
- Ready MustSync=true:
- Lead:0 State:StateCandidate
- HardState Term:1 Vote:1 Commit:2
- Messages:
- 1->2 MsgVote Term:1 Log:1/2
- 1->3 MsgVote Term:1 Log:1/2
-> 2 receiving messages
- 1->2 MsgVote Term:1 Log:1/2
- INFO 2 [term: 0] received a MsgVote message with higher term from 1 [term: 1]
- INFO 2 became follower at term 1
- INFO 2 [logterm: 1, index: 2, vote: 0] cast MsgVote for 1 [logterm: 1, index: 2] at term 1
-> 3 receiving messages
- 1->3 MsgVote Term:1 Log:1/2
- INFO 3 [term: 0] received a MsgVote message with higher term from 1 [term: 1]
- INFO 3 became follower at term 1
- INFO 3 [logterm: 1, index: 2, vote: 0] cast MsgVote for 1 [logterm: 1, index: 2] at term 1
-> 2 handling Ready
- Ready MustSync=true:
- HardState Term:1 Vote:1 Commit:2
- Messages:
- 2->1 MsgVoteResp Term:1 Log:0/0
-> 3 handling Ready
- Ready MustSync=true:
- HardState Term:1 Vote:1 Commit:2
- Messages:
- 3->1 MsgVoteResp Term:1 Log:0/0
-> 1 receiving messages
- 2->1 MsgVoteResp Term:1 Log:0/0
- INFO 1 received MsgVoteResp from 2 at term 1
- INFO 1 has received 2 MsgVoteResp votes and 0 vote rejections
- INFO 1 became leader at term 1
- 3->1 MsgVoteResp Term:1 Log:0/0
-> 1 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateLeader
- Entries:
- 1/3 EntryNormal ""
- Messages:
- 1->2 MsgApp Term:1 Log:1/2 Commit:2 Entries:[1/3 EntryNormal ""]
- 1->3 MsgApp Term:1 Log:1/2 Commit:2 Entries:[1/3 EntryNormal ""]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/2 Commit:2 Entries:[1/3 EntryNormal ""]
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/2 Commit:2 Entries:[1/3 EntryNormal ""]
-> 2 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- Entries:
- 1/3 EntryNormal ""
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/3
-> 3 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- Entries:
- 1/3 EntryNormal ""
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/3
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/3
- 3->1 MsgAppResp Term:1 Log:0/3
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:3
- CommittedEntries:
- 1/3 EntryNormal ""
- Messages:
- 1->2 MsgApp Term:1 Log:1/3 Commit:3
- 1->3 MsgApp Term:1 Log:1/3 Commit:3
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/3 Commit:3
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/3 Commit:3
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:3
- CommittedEntries:
- 1/3 EntryNormal ""
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/3
-> 3 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:3
- CommittedEntries:
- 1/3 EntryNormal ""
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/3
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/3
- 3->1 MsgAppResp Term:1 Log:0/3
diff --git a/raft/testdata/campaign_learner_must_vote.txt b/raft/testdata/campaign_learner_must_vote.txt
deleted file mode 100644
index 55d42aa436e..00000000000
--- a/raft/testdata/campaign_learner_must_vote.txt
+++ /dev/null
@@ -1,152 +0,0 @@
-# Regression test that verifies that learners can vote. This holds only in the
-# sense that if a learner is asked to vote, a candidate believes that they are a
-# voter based on its current config, which may be more recent than that of the
-# learner. If learners which are actually voters but don't know it yet don't
-# vote in that situation, the raft group may end up unavailable despite a quorum
-# of voters (as of the latest config) being available.
-#
-# See:
-# https://github.com/etcd-io/etcd/pull/10998
-
-# Turn output off during boilerplate.
-log-level none
-----
-ok
-
-# Bootstrap three nodes.
-add-nodes 3 voters=(1,2) learners=(3) index=2
-----
-ok
-
-# n1 gets to be leader.
-campaign 1
-----
-ok
-
-stabilize
-----
-ok (quiet)
-
-# Propose a conf change on n1 that promotes n3 to voter.
-propose-conf-change 1
-v3
-----
-ok
-
-# Commit and fully apply said conf change. n1 and n2 now consider n3 a voter.
-stabilize 1 2
-----
-ok (quiet)
-
-# Drop all inflight messages to 3. We don't want it to be caught up when it is
-# asked to vote.
-deliver-msgs drop=(3)
-----
-ok (quiet)
-
-# We now pretend that n1 is dead, and n2 is trying to become leader.
-
-log-level debug
-----
-ok
-
-campaign 2
-----
-INFO 2 is starting a new election at term 1
-INFO 2 became candidate at term 2
-INFO 2 received MsgVoteResp from 2 at term 2
-INFO 2 [logterm: 1, index: 4] sent MsgVote request to 1 at term 2
-INFO 2 [logterm: 1, index: 4] sent MsgVote request to 3 at term 2
-
-# Send out the MsgVote requests.
-process-ready 2
-----
-Ready MustSync=true:
-Lead:0 State:StateCandidate
-HardState Term:2 Vote:2 Commit:4
-Messages:
-2->1 MsgVote Term:2 Log:1/4
-2->3 MsgVote Term:2 Log:1/4
-
-# n2 is now campaigning while n1 is down (does not respond). The latest config
-# has n3 as a voter, but n3 doesn't even have the corresponding conf change in
-# its log. Still, it casts a vote for n2 which can in turn become leader and
-# catches up n3.
-stabilize 3
-----
-> 3 receiving messages
- 2->3 MsgVote Term:2 Log:1/4
- INFO 3 [term: 1] received a MsgVote message with higher term from 2 [term: 2]
- INFO 3 became follower at term 2
- INFO 3 [logterm: 1, index: 3, vote: 0] cast MsgVote for 2 [logterm: 1, index: 4] at term 2
-> 3 handling Ready
- Ready MustSync=true:
- Lead:0 State:StateFollower
- HardState Term:2 Vote:2 Commit:3
- Messages:
- 3->2 MsgVoteResp Term:2 Log:0/0
-
-stabilize 2 3
-----
-> 2 receiving messages
- 3->2 MsgVoteResp Term:2 Log:0/0
- INFO 2 received MsgVoteResp from 3 at term 2
- INFO 2 has received 2 MsgVoteResp votes and 0 vote rejections
- INFO 2 became leader at term 2
-> 2 handling Ready
- Ready MustSync=true:
- Lead:2 State:StateLeader
- Entries:
- 2/5 EntryNormal ""
- Messages:
- 2->1 MsgApp Term:2 Log:1/4 Commit:4 Entries:[2/5 EntryNormal ""]
- 2->3 MsgApp Term:2 Log:1/4 Commit:4 Entries:[2/5 EntryNormal ""]
-> 3 receiving messages
- 2->3 MsgApp Term:2 Log:1/4 Commit:4 Entries:[2/5 EntryNormal ""]
- DEBUG 3 [logterm: 0, index: 4] rejected MsgApp [logterm: 1, index: 4] from 2
-> 3 handling Ready
- Ready MustSync=false:
- Lead:2 State:StateFollower
- Messages:
- 3->2 MsgAppResp Term:2 Log:1/4 Rejected (Hint: 3)
-> 2 receiving messages
- 3->2 MsgAppResp Term:2 Log:1/4 Rejected (Hint: 3)
- DEBUG 2 received MsgAppResp(rejected, hint: (index 3, term 1)) from 3 for index 4
- DEBUG 2 decreased progress of 3 to [StateProbe match=0 next=4]
-> 2 handling Ready
- Ready MustSync=false:
- Messages:
- 2->3 MsgApp Term:2 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v3, 2/5 EntryNormal ""]
-> 3 receiving messages
- 2->3 MsgApp Term:2 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v3, 2/5 EntryNormal ""]
-> 3 handling Ready
- Ready MustSync=true:
- HardState Term:2 Vote:2 Commit:4
- Entries:
- 1/4 EntryConfChangeV2 v3
- 2/5 EntryNormal ""
- CommittedEntries:
- 1/4 EntryConfChangeV2 v3
- Messages:
- 3->2 MsgAppResp Term:2 Log:0/5
- INFO 3 switched to configuration voters=(1 2 3)
-> 2 receiving messages
- 3->2 MsgAppResp Term:2 Log:0/5
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:2 Vote:2 Commit:5
- CommittedEntries:
- 2/5 EntryNormal ""
- Messages:
- 2->3 MsgApp Term:2 Log:2/5 Commit:5
-> 3 receiving messages
- 2->3 MsgApp Term:2 Log:2/5 Commit:5
-> 3 handling Ready
- Ready MustSync=false:
- HardState Term:2 Vote:2 Commit:5
- CommittedEntries:
- 2/5 EntryNormal ""
- Messages:
- 3->2 MsgAppResp Term:2 Log:0/5
-> 2 receiving messages
- 3->2 MsgAppResp Term:2 Log:0/5
diff --git a/raft/testdata/confchange_v1_add_single.txt b/raft/testdata/confchange_v1_add_single.txt
deleted file mode 100644
index d9cc1a7b1c6..00000000000
--- a/raft/testdata/confchange_v1_add_single.txt
+++ /dev/null
@@ -1,97 +0,0 @@
-# Run a V1 membership change that adds a single voter.
-
-# Bootstrap n1.
-add-nodes 1 voters=(1) index=2
-----
-INFO 1 switched to configuration voters=(1)
-INFO 1 became follower at term 0
-INFO newRaft 1 [peers: [1], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-
-campaign 1
-----
-INFO 1 is starting a new election at term 0
-INFO 1 became candidate at term 1
-INFO 1 received MsgVoteResp from 1 at term 1
-INFO 1 became leader at term 1
-
-# Add v2 (with an auto transition).
-propose-conf-change 1 v1=true
-v2
-----
-ok
-
-# Pull n2 out of thin air.
-add-nodes 1
-----
-INFO 2 switched to configuration voters=()
-INFO 2 became follower at term 0
-INFO newRaft 2 [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
-
-# n1 commits the conf change using itself as commit quorum, immediately transitions into
-# the final config, and catches up n2. Note that it's using an EntryConfChange, not an
-# EntryConfChangeV2, so this is compatible with nodes that don't know about V2 conf changes.
-stabilize
-----
-> 1 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateLeader
- HardState Term:1 Vote:1 Commit:4
- Entries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChange v2
- CommittedEntries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChange v2
- INFO 1 switched to configuration voters=(1 2)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChange v2]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChange v2]
- INFO 2 [term: 0] received a MsgApp message with higher term from 1 [term: 1]
- INFO 2 became follower at term 1
- DEBUG 2 [logterm: 0, index: 3] rejected MsgApp [logterm: 1, index: 3] from 1
-> 2 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- HardState Term:1 Commit:0
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
- DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3
- DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1]
- DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1]
- DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=1 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
-> 2 receiving messages
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- INFO log [committed=0, applied=0, unstable.offset=1, len(unstable.Entries)=0] starts to restore snapshot [index: 4, term: 1]
- INFO 2 switched to configuration voters=(1 2)
- INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1]
- INFO 2 [commit: 4] restored snapshot [index: 4, term: 1]
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:4
- Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
- DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/4 Commit:4
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/4 Commit:4
-> 2 handling Ready
- Ready MustSync=false:
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
diff --git a/raft/testdata/confchange_v1_remove_leader.txt b/raft/testdata/confchange_v1_remove_leader.txt
deleted file mode 100644
index 8afe8cdafb2..00000000000
--- a/raft/testdata/confchange_v1_remove_leader.txt
+++ /dev/null
@@ -1,224 +0,0 @@
-# We'll turn this back on after the boilerplate.
-log-level none
-----
-ok
-
-# Run a V1 membership change that removes the leader.
-# Bootstrap n1, n2, n3.
-add-nodes 3 voters=(1,2,3) index=2
-----
-ok
-
-campaign 1
-----
-ok
-
-stabilize
-----
-ok (quiet)
-
-log-level debug
-----
-ok
-
-# Start removing n1.
-propose-conf-change 1 v1=true
-r1
-----
-ok
-
-# Propose an extra entry which will be sent out together with the conf change.
-propose 1 foo
-----
-ok
-
-# Send out the corresponding appends.
-process-ready 1
-----
-Ready MustSync=true:
-Entries:
-1/4 EntryConfChange r1
-1/5 EntryNormal "foo"
-Messages:
-1->2 MsgApp Term:1 Log:1/3 Commit:3 Entries:[1/4 EntryConfChange r1]
-1->3 MsgApp Term:1 Log:1/3 Commit:3 Entries:[1/4 EntryConfChange r1]
-1->2 MsgApp Term:1 Log:1/4 Commit:3 Entries:[1/5 EntryNormal "foo"]
-1->3 MsgApp Term:1 Log:1/4 Commit:3 Entries:[1/5 EntryNormal "foo"]
-
-# Send response from n2 (which is enough to commit the entries so far next time
-# n1 runs).
-stabilize 2
-----
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/3 Commit:3 Entries:[1/4 EntryConfChange r1]
- 1->2 MsgApp Term:1 Log:1/4 Commit:3 Entries:[1/5 EntryNormal "foo"]
-> 2 handling Ready
- Ready MustSync=true:
- Entries:
- 1/4 EntryConfChange r1
- 1/5 EntryNormal "foo"
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
- 2->1 MsgAppResp Term:1 Log:0/5
-
-# Put another entry in n1's log.
-propose 1 bar
-----
-ok
-
-# n1 applies the conf change, so it has now removed itself. But it still has
-# an uncommitted entry in the log. If the leader unconditionally counted itself
-# as part of the commit quorum, we'd be in trouble. In the block below, we see
-# it send out appends to the other nodes for the 'bar' entry.
-stabilize 1
-----
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/6 EntryNormal "bar"
- Messages:
- 1->2 MsgApp Term:1 Log:1/5 Commit:3 Entries:[1/6 EntryNormal "bar"]
- 1->3 MsgApp Term:1 Log:1/5 Commit:3 Entries:[1/6 EntryNormal "bar"]
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
- 2->1 MsgAppResp Term:1 Log:0/5
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:5
- CommittedEntries:
- 1/4 EntryConfChange r1
- 1/5 EntryNormal "foo"
- Messages:
- 1->2 MsgApp Term:1 Log:1/6 Commit:4
- 1->3 MsgApp Term:1 Log:1/6 Commit:4
- 1->2 MsgApp Term:1 Log:1/6 Commit:5
- 1->3 MsgApp Term:1 Log:1/6 Commit:5
- INFO 1 switched to configuration voters=(2 3)
-
-# n2 responds, n3 doesn't yet. Quorum for 'bar' should not be reached...
-stabilize 2
-----
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/5 Commit:3 Entries:[1/6 EntryNormal "bar"]
- 1->2 MsgApp Term:1 Log:1/6 Commit:4
- 1->2 MsgApp Term:1 Log:1/6 Commit:5
-> 2 handling Ready
- Ready MustSync=true:
- HardState Term:1 Vote:1 Commit:5
- Entries:
- 1/6 EntryNormal "bar"
- CommittedEntries:
- 1/4 EntryConfChange r1
- 1/5 EntryNormal "foo"
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/6
- 2->1 MsgAppResp Term:1 Log:0/6
- 2->1 MsgAppResp Term:1 Log:0/6
- INFO 2 switched to configuration voters=(2 3)
-
-# ... which thankfully is what we see on the leader.
-stabilize 1
-----
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/6
- 2->1 MsgAppResp Term:1 Log:0/6
- 2->1 MsgAppResp Term:1 Log:0/6
-
-# When n3 responds, quorum is reached and everything falls into place.
-stabilize
-----
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/3 Commit:3 Entries:[1/4 EntryConfChange r1]
- 1->3 MsgApp Term:1 Log:1/4 Commit:3 Entries:[1/5 EntryNormal "foo"]
- 1->3 MsgApp Term:1 Log:1/5 Commit:3 Entries:[1/6 EntryNormal "bar"]
- 1->3 MsgApp Term:1 Log:1/6 Commit:4
- 1->3 MsgApp Term:1 Log:1/6 Commit:5
-> 3 handling Ready
- Ready MustSync=true:
- HardState Term:1 Vote:1 Commit:5
- Entries:
- 1/4 EntryConfChange r1
- 1/5 EntryNormal "foo"
- 1/6 EntryNormal "bar"
- CommittedEntries:
- 1/4 EntryConfChange r1
- 1/5 EntryNormal "foo"
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/4
- 3->1 MsgAppResp Term:1 Log:0/5
- 3->1 MsgAppResp Term:1 Log:0/6
- 3->1 MsgAppResp Term:1 Log:0/6
- 3->1 MsgAppResp Term:1 Log:0/6
- INFO 3 switched to configuration voters=(2 3)
-> 1 receiving messages
- 3->1 MsgAppResp Term:1 Log:0/4
- 3->1 MsgAppResp Term:1 Log:0/5
- 3->1 MsgAppResp Term:1 Log:0/6
- 3->1 MsgAppResp Term:1 Log:0/6
- 3->1 MsgAppResp Term:1 Log:0/6
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:6
- CommittedEntries:
- 1/6 EntryNormal "bar"
- Messages:
- 1->2 MsgApp Term:1 Log:1/6 Commit:6
- 1->3 MsgApp Term:1 Log:1/6 Commit:6
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/6 Commit:6
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/6 Commit:6
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:6
- CommittedEntries:
- 1/6 EntryNormal "bar"
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/6
-> 3 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:6
- CommittedEntries:
- 1/6 EntryNormal "bar"
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/6
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/6
- 3->1 MsgAppResp Term:1 Log:0/6
-
-# However not all is well. n1 is still leader but unconditionally drops all
-# proposals on the floor, so we're effectively stuck if it still heartbeats
-# its followers...
-propose 1 baz
-----
-raft proposal dropped
-
-tick-heartbeat 1
-----
-ok
-
-# ... which, uh oh, it does.
-# TODO(tbg): change behavior so that a leader that is removed immediately steps
-# down, and initiates an optimistic handover.
-stabilize
-----
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:6
- 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:6
-> 2 receiving messages
- 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:6
-> 3 receiving messages
- 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:6
-> 2 handling Ready
- Ready MustSync=false:
- Messages:
- 2->1 MsgHeartbeatResp Term:1 Log:0/0
-> 3 handling Ready
- Ready MustSync=false:
- Messages:
- 3->1 MsgHeartbeatResp Term:1 Log:0/0
-> 1 receiving messages
- 2->1 MsgHeartbeatResp Term:1 Log:0/0
- 3->1 MsgHeartbeatResp Term:1 Log:0/0
diff --git a/raft/testdata/confchange_v2_add_double_auto.txt b/raft/testdata/confchange_v2_add_double_auto.txt
deleted file mode 100644
index 0a5e205bf0d..00000000000
--- a/raft/testdata/confchange_v2_add_double_auto.txt
+++ /dev/null
@@ -1,408 +0,0 @@
-# Run a V2 membership change that adds two voters at once and auto-leaves the
-# joint configuration. (This is the same as specifying an explicit transition
-# since more than one change is being made atomically).
-
-# Bootstrap n1.
-add-nodes 1 voters=(1) index=2
-----
-INFO 1 switched to configuration voters=(1)
-INFO 1 became follower at term 0
-INFO newRaft 1 [peers: [1], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-
-campaign 1
-----
-INFO 1 is starting a new election at term 0
-INFO 1 became candidate at term 1
-INFO 1 received MsgVoteResp from 1 at term 1
-INFO 1 became leader at term 1
-
-propose-conf-change 1 transition=auto
-v2 v3
-----
-ok
-
-# Add two "empty" nodes to the cluster, n2 and n3.
-add-nodes 2
-----
-INFO 2 switched to configuration voters=()
-INFO 2 became follower at term 0
-INFO newRaft 2 [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
-INFO 3 switched to configuration voters=()
-INFO 3 became follower at term 0
-INFO newRaft 3 [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
-
-# n1 immediately gets to commit & apply the conf change using only itself. We see that
-# it starts transitioning out of that joint configuration (though we will only see that
-# proposal in the next ready handling loop, when it is emitted). We also see that this
-# is using joint consensus, which it has to since we're carrying out two additions at
-# once.
-process-ready 1
-----
-Ready MustSync=true:
-Lead:1 State:StateLeader
-HardState Term:1 Vote:1 Commit:4
-Entries:
-1/3 EntryNormal ""
-1/4 EntryConfChangeV2 v2 v3
-CommittedEntries:
-1/3 EntryNormal ""
-1/4 EntryConfChangeV2 v2 v3
-INFO 1 switched to configuration voters=(1 2 3)&&(1) autoleave
-INFO initiating automatic transition out of joint configuration voters=(1 2 3)&&(1) autoleave
-
-# n1 immediately probes n2 and n3.
-stabilize 1
-----
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/5 EntryConfChangeV2
- Messages:
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2 v3]
- 1->3 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2 v3]
-
-# First, play out the whole interaction between n1 and n2. We see n1's probe to
-# n2 get rejected (since n2 needs a snapshot); the snapshot is delivered at which
-# point n2 switches to the correct config, and n1 catches it up. This notably
-# includes the empty conf change which gets committed and applied by both and
-# which transitions them out of their joint configuration into the final one (1 2 3).
-stabilize 1 2
-----
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2 v3]
- INFO 2 [term: 0] received a MsgApp message with higher term from 1 [term: 1]
- INFO 2 became follower at term 1
- DEBUG 2 [logterm: 0, index: 3] rejected MsgApp [logterm: 1, index: 3] from 1
-> 2 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- HardState Term:1 Commit:0
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
- DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3
- DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1]
- DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1]
- DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=1 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true
-> 2 receiving messages
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true
- INFO log [committed=0, applied=0, unstable.offset=1, len(unstable.Entries)=0] starts to restore snapshot [index: 4, term: 1]
- INFO 2 switched to configuration voters=(1 2 3)&&(1) autoleave
- INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1]
- INFO 2 [commit: 4] restored snapshot [index: 4, term: 1]
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:4
- Snapshot Index:4 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
- DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/4 Commit:4 Entries:[1/5 EntryConfChangeV2]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/4 Commit:4 Entries:[1/5 EntryConfChangeV2]
-> 2 handling Ready
- Ready MustSync=true:
- Entries:
- 1/5 EntryConfChangeV2
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/5
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/5
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:5
- CommittedEntries:
- 1/5 EntryConfChangeV2
- Messages:
- 1->2 MsgApp Term:1 Log:1/5 Commit:5
- INFO 1 switched to configuration voters=(1 2 3)
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/5 Commit:5
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:5
- CommittedEntries:
- 1/5 EntryConfChangeV2
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/5
- INFO 2 switched to configuration voters=(1 2 3)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/5
-
-# n3 immediately receives a snapshot in the final configuration.
-stabilize 1 3
-----
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2 v3]
- INFO 3 [term: 0] received a MsgApp message with higher term from 1 [term: 1]
- INFO 3 became follower at term 1
- DEBUG 3 [logterm: 0, index: 3] rejected MsgApp [logterm: 1, index: 3] from 1
-> 3 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- HardState Term:1 Commit:0
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
-> 1 receiving messages
- 3->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
- DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 3 for index 3
- DEBUG 1 decreased progress of 3 to [StateProbe match=0 next=1]
- DEBUG 1 [firstindex: 3, commit: 5] sent snapshot[index: 5, term: 1] to 3 [StateProbe match=0 next=1]
- DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=1 paused pendingSnap=5]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->3 MsgSnap Term:1 Log:0/0 Snapshot: Index:5 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
-> 3 receiving messages
- 1->3 MsgSnap Term:1 Log:0/0 Snapshot: Index:5 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- INFO log [committed=0, applied=0, unstable.offset=1, len(unstable.Entries)=0] starts to restore snapshot [index: 5, term: 1]
- INFO 3 switched to configuration voters=(1 2 3)
- INFO 3 [commit: 5, lastindex: 5, lastterm: 1] restored snapshot [index: 5, term: 1]
- INFO 3 [commit: 5] restored snapshot [index: 5, term: 1]
-> 3 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:5
- Snapshot Index:5 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/5
-> 1 receiving messages
- 3->1 MsgAppResp Term:1 Log:0/5
- DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=5 next=6 paused pendingSnap=5]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->3 MsgApp Term:1 Log:1/5 Commit:5
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/5 Commit:5
-> 3 handling Ready
- Ready MustSync=false:
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/5
-> 1 receiving messages
- 3->1 MsgAppResp Term:1 Log:0/5
-
-# Nothing else happens.
-stabilize
-----
-ok
-
-# Now remove two nodes. What's new here is that the leader will actually have
-# to go to a quorum to commit the transition into the joint config.
-
-propose-conf-change 1
-r2 r3
-----
-ok
-
-# n1 sends out MsgApps.
-stabilize 1
-----
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/6 EntryConfChangeV2 r2 r3
- Messages:
- 1->2 MsgApp Term:1 Log:1/5 Commit:5 Entries:[1/6 EntryConfChangeV2 r2 r3]
- 1->3 MsgApp Term:1 Log:1/5 Commit:5 Entries:[1/6 EntryConfChangeV2 r2 r3]
-
-# n2, n3 ack them.
-stabilize 2 3
-----
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/5 Commit:5 Entries:[1/6 EntryConfChangeV2 r2 r3]
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/5 Commit:5 Entries:[1/6 EntryConfChangeV2 r2 r3]
-> 2 handling Ready
- Ready MustSync=true:
- Entries:
- 1/6 EntryConfChangeV2 r2 r3
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/6
-> 3 handling Ready
- Ready MustSync=true:
- Entries:
- 1/6 EntryConfChangeV2 r2 r3
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/6
-
-# n1 gets some more proposals. This is part of a regression test: There used to
-# be a bug in which these proposals would prompt the leader to transition out of
-# the same joint state multiple times, which would cause a panic.
-propose 1 foo
-----
-ok
-
-propose 1 bar
-----
-ok
-
-# n1 switches to the joint config, then initiates a transition into the final
-# config.
-stabilize 1
-----
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/7 EntryNormal "foo"
- 1/8 EntryNormal "bar"
- Messages:
- 1->2 MsgApp Term:1 Log:1/6 Commit:5 Entries:[1/7 EntryNormal "foo"]
- 1->3 MsgApp Term:1 Log:1/6 Commit:5 Entries:[1/7 EntryNormal "foo"]
- 1->2 MsgApp Term:1 Log:1/7 Commit:5 Entries:[1/8 EntryNormal "bar"]
- 1->3 MsgApp Term:1 Log:1/7 Commit:5 Entries:[1/8 EntryNormal "bar"]
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/6
- 3->1 MsgAppResp Term:1 Log:0/6
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:6
- CommittedEntries:
- 1/6 EntryConfChangeV2 r2 r3
- Messages:
- 1->2 MsgApp Term:1 Log:1/8 Commit:6
- 1->3 MsgApp Term:1 Log:1/8 Commit:6
- INFO 1 switched to configuration voters=(1)&&(1 2 3) autoleave
- INFO initiating automatic transition out of joint configuration voters=(1)&&(1 2 3) autoleave
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/9 EntryConfChangeV2
-
-# n2 and n3 also switch to the joint config, and ack the transition out of it.
-stabilize 2 3
-----
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/6 Commit:5 Entries:[1/7 EntryNormal "foo"]
- 1->2 MsgApp Term:1 Log:1/7 Commit:5 Entries:[1/8 EntryNormal "bar"]
- 1->2 MsgApp Term:1 Log:1/8 Commit:6
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/6 Commit:5 Entries:[1/7 EntryNormal "foo"]
- 1->3 MsgApp Term:1 Log:1/7 Commit:5 Entries:[1/8 EntryNormal "bar"]
- 1->3 MsgApp Term:1 Log:1/8 Commit:6
-> 2 handling Ready
- Ready MustSync=true:
- HardState Term:1 Commit:6
- Entries:
- 1/7 EntryNormal "foo"
- 1/8 EntryNormal "bar"
- CommittedEntries:
- 1/6 EntryConfChangeV2 r2 r3
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/7
- 2->1 MsgAppResp Term:1 Log:0/8
- 2->1 MsgAppResp Term:1 Log:0/8
- INFO 2 switched to configuration voters=(1)&&(1 2 3) autoleave
-> 3 handling Ready
- Ready MustSync=true:
- HardState Term:1 Commit:6
- Entries:
- 1/7 EntryNormal "foo"
- 1/8 EntryNormal "bar"
- CommittedEntries:
- 1/6 EntryConfChangeV2 r2 r3
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/7
- 3->1 MsgAppResp Term:1 Log:0/8
- 3->1 MsgAppResp Term:1 Log:0/8
- INFO 3 switched to configuration voters=(1)&&(1 2 3) autoleave
-
-# n2 and n3 also leave the joint config and the dust settles. We see at the very
-# end that n1 receives some messages from them that it refuses because it does
-# not have them in its config any more.
-stabilize
-----
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/7
- 2->1 MsgAppResp Term:1 Log:0/8
- 2->1 MsgAppResp Term:1 Log:0/8
- 3->1 MsgAppResp Term:1 Log:0/7
- 3->1 MsgAppResp Term:1 Log:0/8
- 3->1 MsgAppResp Term:1 Log:0/8
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:8
- CommittedEntries:
- 1/7 EntryNormal "foo"
- 1/8 EntryNormal "bar"
- Messages:
- 1->2 MsgApp Term:1 Log:1/8 Commit:7 Entries:[1/9 EntryConfChangeV2]
- 1->3 MsgApp Term:1 Log:1/8 Commit:7 Entries:[1/9 EntryConfChangeV2]
- 1->2 MsgApp Term:1 Log:1/9 Commit:8
- 1->3 MsgApp Term:1 Log:1/9 Commit:8
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/8 Commit:7 Entries:[1/9 EntryConfChangeV2]
- 1->2 MsgApp Term:1 Log:1/9 Commit:8
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/8 Commit:7 Entries:[1/9 EntryConfChangeV2]
- 1->3 MsgApp Term:1 Log:1/9 Commit:8
-> 2 handling Ready
- Ready MustSync=true:
- HardState Term:1 Commit:8
- Entries:
- 1/9 EntryConfChangeV2
- CommittedEntries:
- 1/7 EntryNormal "foo"
- 1/8 EntryNormal "bar"
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/9
- 2->1 MsgAppResp Term:1 Log:0/9
-> 3 handling Ready
- Ready MustSync=true:
- HardState Term:1 Commit:8
- Entries:
- 1/9 EntryConfChangeV2
- CommittedEntries:
- 1/7 EntryNormal "foo"
- 1/8 EntryNormal "bar"
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/9
- 3->1 MsgAppResp Term:1 Log:0/9
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/9
- 2->1 MsgAppResp Term:1 Log:0/9
- 3->1 MsgAppResp Term:1 Log:0/9
- 3->1 MsgAppResp Term:1 Log:0/9
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:9
- CommittedEntries:
- 1/9 EntryConfChangeV2
- Messages:
- 1->2 MsgApp Term:1 Log:1/9 Commit:9
- 1->3 MsgApp Term:1 Log:1/9 Commit:9
- INFO 1 switched to configuration voters=(1)
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/9 Commit:9
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/9 Commit:9
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:9
- CommittedEntries:
- 1/9 EntryConfChangeV2
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/9
- INFO 2 switched to configuration voters=(1)
-> 3 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:9
- CommittedEntries:
- 1/9 EntryConfChangeV2
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/9
- INFO 3 switched to configuration voters=(1)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/9
- raft: cannot step as peer not found
- 3->1 MsgAppResp Term:1 Log:0/9
- raft: cannot step as peer not found
diff --git a/raft/testdata/confchange_v2_add_double_implicit.txt b/raft/testdata/confchange_v2_add_double_implicit.txt
deleted file mode 100644
index a93eb81cb52..00000000000
--- a/raft/testdata/confchange_v2_add_double_implicit.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-# Run a V2 membership change that adds a single voter but explicitly asks for the
-# use of joint consensus (with auto-leaving).
-
-# TODO(tbg): also verify that if the leader changes while in the joint state, the
-# new leader will auto-transition out of the joint state just the same.
-
-# Bootstrap n1.
-add-nodes 1 voters=(1) index=2
-----
-INFO 1 switched to configuration voters=(1)
-INFO 1 became follower at term 0
-INFO newRaft 1 [peers: [1], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-
-campaign 1
-----
-INFO 1 is starting a new election at term 0
-INFO 1 became candidate at term 1
-INFO 1 received MsgVoteResp from 1 at term 1
-INFO 1 became leader at term 1
-
-propose-conf-change 1 transition=implicit
-v2
-----
-ok
-
-# Add n2.
-add-nodes 1
-----
-INFO 2 switched to configuration voters=()
-INFO 2 became follower at term 0
-INFO newRaft 2 [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
-
-# n1 commits the conf change using itself as commit quorum, then starts catching up n2.
-# When that's done, it starts auto-transitioning out. Note that the snapshots propagating
-# the joint config have the AutoLeave flag set in their config.
-stabilize 1 2
-----
-> 1 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateLeader
- HardState Term:1 Vote:1 Commit:4
- Entries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChangeV2 v2
- CommittedEntries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChangeV2 v2
- INFO 1 switched to configuration voters=(1 2)&&(1) autoleave
- INFO initiating automatic transition out of joint configuration voters=(1 2)&&(1) autoleave
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/5 EntryConfChangeV2
- Messages:
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2]
- INFO 2 [term: 0] received a MsgApp message with higher term from 1 [term: 1]
- INFO 2 became follower at term 1
- DEBUG 2 [logterm: 0, index: 3] rejected MsgApp [logterm: 1, index: 3] from 1
-> 2 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- HardState Term:1 Commit:0
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
- DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3
- DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1]
- DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1]
- DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=1 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true
-> 2 receiving messages
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true
- INFO log [committed=0, applied=0, unstable.offset=1, len(unstable.Entries)=0] starts to restore snapshot [index: 4, term: 1]
- INFO 2 switched to configuration voters=(1 2)&&(1) autoleave
- INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1]
- INFO 2 [commit: 4] restored snapshot [index: 4, term: 1]
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:4
- Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
- DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/4 Commit:4 Entries:[1/5 EntryConfChangeV2]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/4 Commit:4 Entries:[1/5 EntryConfChangeV2]
-> 2 handling Ready
- Ready MustSync=true:
- Entries:
- 1/5 EntryConfChangeV2
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/5
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/5
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:5
- CommittedEntries:
- 1/5 EntryConfChangeV2
- Messages:
- 1->2 MsgApp Term:1 Log:1/5 Commit:5
- INFO 1 switched to configuration voters=(1 2)
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/5 Commit:5
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:5
- CommittedEntries:
- 1/5 EntryConfChangeV2
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/5
- INFO 2 switched to configuration voters=(1 2)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/5
diff --git a/raft/testdata/confchange_v2_add_single_auto.txt b/raft/testdata/confchange_v2_add_single_auto.txt
deleted file mode 100644
index 47c7f10b8e8..00000000000
--- a/raft/testdata/confchange_v2_add_single_auto.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-# Run a V2 membership change that adds a single voter in auto mode, which means
-# that joint consensus is not used but a direct transition into the new config
-# takes place.
-
-# Bootstrap n1.
-add-nodes 1 voters=(1) index=2
-----
-INFO 1 switched to configuration voters=(1)
-INFO 1 became follower at term 0
-INFO newRaft 1 [peers: [1], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-
-campaign 1
-----
-INFO 1 is starting a new election at term 0
-INFO 1 became candidate at term 1
-INFO 1 received MsgVoteResp from 1 at term 1
-INFO 1 became leader at term 1
-
-# Add v2 (with an auto transition).
-propose-conf-change 1
-v2
-----
-ok
-
-# Pull n2 out of thin air.
-add-nodes 1
-----
-INFO 2 switched to configuration voters=()
-INFO 2 became follower at term 0
-INFO newRaft 2 [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
-
-# n1 commits the conf change using itself as commit quorum, immediately transitions into
-# the final config, and catches up n2.
-stabilize
-----
-> 1 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateLeader
- HardState Term:1 Vote:1 Commit:4
- Entries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChangeV2 v2
- CommittedEntries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChangeV2 v2
- INFO 1 switched to configuration voters=(1 2)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2]
- INFO 2 [term: 0] received a MsgApp message with higher term from 1 [term: 1]
- INFO 2 became follower at term 1
- DEBUG 2 [logterm: 0, index: 3] rejected MsgApp [logterm: 1, index: 3] from 1
-> 2 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- HardState Term:1 Commit:0
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
- DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3
- DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1]
- DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1]
- DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=1 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
-> 2 receiving messages
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- INFO log [committed=0, applied=0, unstable.offset=1, len(unstable.Entries)=0] starts to restore snapshot [index: 4, term: 1]
- INFO 2 switched to configuration voters=(1 2)
- INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1]
- INFO 2 [commit: 4] restored snapshot [index: 4, term: 1]
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:4
- Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
- DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/4 Commit:4
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/4 Commit:4
-> 2 handling Ready
- Ready MustSync=false:
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
diff --git a/raft/testdata/confchange_v2_add_single_explicit.txt b/raft/testdata/confchange_v2_add_single_explicit.txt
deleted file mode 100644
index dd4a4f65467..00000000000
--- a/raft/testdata/confchange_v2_add_single_explicit.txt
+++ /dev/null
@@ -1,206 +0,0 @@
-# Run a V2 membership change that adds a single voter but explicitly asks for the
-# use of joint consensus, including wanting to transition out of the joint config
-# manually.
-
-# Bootstrap n1.
-add-nodes 1 voters=(1) index=2
-----
-INFO 1 switched to configuration voters=(1)
-INFO 1 became follower at term 0
-INFO newRaft 1 [peers: [1], term: 0, commit: 2, applied: 2, lastindex: 2, lastterm: 1]
-
-campaign 1
-----
-INFO 1 is starting a new election at term 0
-INFO 1 became candidate at term 1
-INFO 1 received MsgVoteResp from 1 at term 1
-INFO 1 became leader at term 1
-
-# Add v2 with an explicit transition.
-propose-conf-change 1 transition=explicit
-v2
-----
-ok
-
-# Pull n2 out of thin air.
-add-nodes 1
-----
-INFO 2 switched to configuration voters=()
-INFO 2 became follower at term 0
-INFO newRaft 2 [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
-
-# n1 commits the conf change using itself as commit quorum, then starts catching up n2.
-# Everyone remains in the joint config. Note that the snapshot below has AutoLeave unset.
-stabilize 1 2
-----
-> 1 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateLeader
- HardState Term:1 Vote:1 Commit:4
- Entries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChangeV2 v2
- CommittedEntries:
- 1/3 EntryNormal ""
- 1/4 EntryConfChangeV2 v2
- INFO 1 switched to configuration voters=(1 2)&&(1)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 v2]
- INFO 2 [term: 0] received a MsgApp message with higher term from 1 [term: 1]
- INFO 2 became follower at term 1
- DEBUG 2 [logterm: 0, index: 3] rejected MsgApp [logterm: 1, index: 3] from 1
-> 2 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- HardState Term:1 Commit:0
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0)
- DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3
- DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1]
- DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1]
- DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=1 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:false
-> 2 receiving messages
- 1->2 MsgSnap Term:1 Log:0/0 Snapshot: Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:false
- INFO log [committed=0, applied=0, unstable.offset=1, len(unstable.Entries)=0] starts to restore snapshot [index: 4, term: 1]
- INFO 2 switched to configuration voters=(1 2)&&(1)
- INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1]
- INFO 2 [commit: 4] restored snapshot [index: 4, term: 1]
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:4
- Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:false
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
- DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:1 Log:1/4 Commit:4
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/4 Commit:4
-> 2 handling Ready
- Ready MustSync=false:
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/4
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/4
-
-# Check that we're not allowed to change membership again while in the joint state.
-# This leads to an empty entry being proposed instead (index 5 in the stabilize block
-# below).
-propose-conf-change 1
-v3 v4 v5
-----
-INFO 1 ignoring conf change {ConfChangeTransitionAuto [{ConfChangeAddNode 3} {ConfChangeAddNode 4} {ConfChangeAddNode 5}] []} at config voters=(1 2)&&(1): must transition out of joint config first
-
-# Propose a transition out of the joint config. We'll see this at index 6 below.
-propose-conf-change 1
-----
-ok
-
-# The group commits the command and everyone switches to the final config.
-stabilize
-----
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/5 EntryNormal ""
- 1/6 EntryConfChangeV2
- Messages:
- 1->2 MsgApp Term:1 Log:1/4 Commit:4 Entries:[1/5 EntryNormal ""]
- 1->2 MsgApp Term:1 Log:1/5 Commit:4 Entries:[1/6 EntryConfChangeV2]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/4 Commit:4 Entries:[1/5 EntryNormal ""]
- 1->2 MsgApp Term:1 Log:1/5 Commit:4 Entries:[1/6 EntryConfChangeV2]
-> 2 handling Ready
- Ready MustSync=true:
- Entries:
- 1/5 EntryNormal ""
- 1/6 EntryConfChangeV2
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/5
- 2->1 MsgAppResp Term:1 Log:0/6
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/5
- 2->1 MsgAppResp Term:1 Log:0/6
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:6
- CommittedEntries:
- 1/5 EntryNormal ""
- 1/6 EntryConfChangeV2
- Messages:
- 1->2 MsgApp Term:1 Log:1/6 Commit:5
- 1->2 MsgApp Term:1 Log:1/6 Commit:6
- INFO 1 switched to configuration voters=(1 2)
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/6 Commit:5
- 1->2 MsgApp Term:1 Log:1/6 Commit:6
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:6
- CommittedEntries:
- 1/5 EntryNormal ""
- 1/6 EntryConfChangeV2
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/6
- 2->1 MsgAppResp Term:1 Log:0/6
- INFO 2 switched to configuration voters=(1 2)
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/6
- 2->1 MsgAppResp Term:1 Log:0/6
-
-# Check that trying to transition out again won't do anything.
-propose-conf-change 1
-----
-INFO 1 ignoring conf change {ConfChangeTransitionAuto [] []} at config voters=(1 2): not in joint state; refusing empty conf change
-
-# Finishes work for the empty entry we just proposed.
-stabilize
-----
-> 1 handling Ready
- Ready MustSync=true:
- Entries:
- 1/7 EntryNormal ""
- Messages:
- 1->2 MsgApp Term:1 Log:1/6 Commit:6 Entries:[1/7 EntryNormal ""]
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/6 Commit:6 Entries:[1/7 EntryNormal ""]
-> 2 handling Ready
- Ready MustSync=true:
- Entries:
- 1/7 EntryNormal ""
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/7
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/7
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:1 Vote:1 Commit:7
- CommittedEntries:
- 1/7 EntryNormal ""
- Messages:
- 1->2 MsgApp Term:1 Log:1/7 Commit:7
-> 2 receiving messages
- 1->2 MsgApp Term:1 Log:1/7 Commit:7
-> 2 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:7
- CommittedEntries:
- 1/7 EntryNormal ""
- Messages:
- 2->1 MsgAppResp Term:1 Log:0/7
-> 1 receiving messages
- 2->1 MsgAppResp Term:1 Log:0/7
diff --git a/raft/testdata/probe_and_replicate.txt b/raft/testdata/probe_and_replicate.txt
deleted file mode 100644
index bebae6ef9c8..00000000000
--- a/raft/testdata/probe_and_replicate.txt
+++ /dev/null
@@ -1,767 +0,0 @@
-# This test creates a complete Raft log configuration and demonstrates how a
-# leader probes and replicates to each of its followers. The log configuration
-# constructed is almost[*] identical to the one present in Figure 7 of the raft
-# paper (https://raft.github.io/raft.pdf), which looks like:
-#
-# 1 2 3 4 5 6 7 8 9 10 11 12
-# n1: [1][1][1][4][4][5][5][6][6][6]
-# n2: [1][1][1][4][4][5][5][6][6]
-# n3: [1][1][1][4]
-# n4: [1][1][1][4][4][5][5][6][6][6][6]
-# n5: [1][1][1][4][4][5][5][6][7][7][7][7]
-# n6: [1][1][1][4][4][4][4]
-# n7: [1][1][1][2][2][2][3][3][3][3][3]
-#
-# Once in this state, we then elect node 1 as the leader and stabilize the
-# entire raft group. This demonstrates how a newly elected leader probes for
-# matching indexes, overwrites conflicting entries, and catches up all
-# followers.
-#
-# [*] the only differences are:
-# 1. n5 is given a larger uncommitted log tail, which is used to demonstrate a
-# follower-side probing optimization.
-# 2. the log indexes are shifted by 10 in this test because add-nodes wants to
-# start with an index > 1.
-#
-
-
-# Set up the log configuration. This is mostly unintersting, but the order of
-# each leadership change and the nodes that are allowed to hear about them is
-# very important. Most readers of this test can skip this section.
-log-level none
-----
-ok
-
-## Start with seven nodes.
-add-nodes 7 voters=(1,2,3,4,5,6,7) index=10
-----
-ok
-
-## Create term 1 entries.
-campaign 1
-----
-ok
-
-stabilize
-----
-ok (quiet)
-
-propose 1 prop_1_12
-----
-ok
-
-propose 1 prop_1_13
-----
-ok
-
-stabilize
-----
-ok (quiet)
-
-## Create term 2 entries.
-campaign 2
-----
-ok
-
-stabilize 2
-----
-ok (quiet)
-
-stabilize 6
-----
-ok (quiet)
-
-stabilize 2 5 7
-----
-ok (quiet)
-
-propose 2 prop_2_15
-----
-ok
-
-propose 2 prop_2_16
-----
-ok
-
-stabilize 2 7
-----
-ok (quiet)
-
-deliver-msgs drop=(1,2,3,4,5,6,7)
-----
-ok (quiet)
-
-## Create term 3 entries.
-campaign 7
-----
-ok
-
-stabilize 7
-----
-ok (quiet)
-
-stabilize 1 2 3 4 5 6
-----
-ok (quiet)
-
-stabilize 7
-----
-ok (quiet)
-
-propose 7 prop_3_18
-----
-ok
-
-propose 7 prop_3_19
-----
-ok
-
-propose 7 prop_3_20
-----
-ok
-
-propose 7 prop_3_21
-----
-ok
-
-stabilize 7
-----
-ok (quiet)
-
-deliver-msgs drop=(1,2,3,4,5,6,7)
-----
-ok (quiet)
-
-## Create term 4 entries.
-campaign 6
-----
-ok
-
-stabilize 1 2 3 4 5 6
-----
-ok (quiet)
-
-propose 6 prop_4_15
-----
-ok
-
-stabilize 1 2 4 5 6
-----
-ok (quiet)
-
-propose 6 prop_4_16
-----
-ok
-
-propose 6 prop_4_17
-----
-ok
-
-stabilize 6
-----
-ok (quiet)
-
-deliver-msgs drop=(1,2,3,4,5,6,7)
-----
-ok (quiet)
-
-## Create term 5 entries.
-campaign 5
-----
-ok
-
-stabilize 1 2 4 5
-----
-ok (quiet)
-
-propose 5 prop_5_17
-----
-ok
-
-stabilize 1 2 4 5
-----
-ok (quiet)
-
-deliver-msgs drop=(1,2,3,4,5,6,7)
-----
-ok (quiet)
-
-## Create term 6 entries.
-campaign 4
-----
-ok
-
-stabilize 1 2 4 5
-----
-ok (quiet)
-
-propose 4 prop_6_19
-----
-ok
-
-stabilize 1 2 4
-----
-ok (quiet)
-
-propose 4 prop_6_20
-----
-ok
-
-stabilize 1 4
-----
-ok (quiet)
-
-propose 4 prop_6_21
-----
-ok
-
-stabilize 4
-----
-ok (quiet)
-
-deliver-msgs drop=(1,2,3,4,5,6,7)
-----
-ok (quiet)
-
-## Create term 7 entries.
-campaign 5
-----
-ok
-
-stabilize 5
-----
-ok (quiet)
-
-stabilize 1 3 6 7
-----
-ok (quiet)
-
-stabilize 5
-----
-ok (quiet)
-
-propose 5 prop_7_20
-----
-ok
-
-propose 5 prop_7_21
-----
-ok
-
-propose 5 prop_7_22
-----
-ok
-
-stabilize 5
-----
-ok (quiet)
-
-deliver-msgs drop=(1,2,3,4,5,6,7)
-----
-ok (quiet)
-
-
-# Show the Raft log from each node.
-log-level info
-----
-ok
-
-raft-log 1
-----
-1/11 EntryNormal ""
-1/12 EntryNormal "prop_1_12"
-1/13 EntryNormal "prop_1_13"
-4/14 EntryNormal ""
-4/15 EntryNormal "prop_4_15"
-5/16 EntryNormal ""
-5/17 EntryNormal "prop_5_17"
-6/18 EntryNormal ""
-6/19 EntryNormal "prop_6_19"
-6/20 EntryNormal "prop_6_20"
-
-raft-log 2
-----
-1/11 EntryNormal ""
-1/12 EntryNormal "prop_1_12"
-1/13 EntryNormal "prop_1_13"
-4/14 EntryNormal ""
-4/15 EntryNormal "prop_4_15"
-5/16 EntryNormal ""
-5/17 EntryNormal "prop_5_17"
-6/18 EntryNormal ""
-6/19 EntryNormal "prop_6_19"
-
-raft-log 3
-----
-1/11 EntryNormal ""
-1/12 EntryNormal "prop_1_12"
-1/13 EntryNormal "prop_1_13"
-4/14 EntryNormal ""
-
-raft-log 4
-----
-1/11 EntryNormal ""
-1/12 EntryNormal "prop_1_12"
-1/13 EntryNormal "prop_1_13"
-4/14 EntryNormal ""
-4/15 EntryNormal "prop_4_15"
-5/16 EntryNormal ""
-5/17 EntryNormal "prop_5_17"
-6/18 EntryNormal ""
-6/19 EntryNormal "prop_6_19"
-6/20 EntryNormal "prop_6_20"
-6/21 EntryNormal "prop_6_21"
-
-raft-log 5
-----
-1/11 EntryNormal ""
-1/12 EntryNormal "prop_1_12"
-1/13 EntryNormal "prop_1_13"
-4/14 EntryNormal ""
-4/15 EntryNormal "prop_4_15"
-5/16 EntryNormal ""
-5/17 EntryNormal "prop_5_17"
-6/18 EntryNormal ""
-7/19 EntryNormal ""
-7/20 EntryNormal "prop_7_20"
-7/21 EntryNormal "prop_7_21"
-7/22 EntryNormal "prop_7_22"
-
-raft-log 6
-----
-1/11 EntryNormal ""
-1/12 EntryNormal "prop_1_12"
-1/13 EntryNormal "prop_1_13"
-4/14 EntryNormal ""
-4/15 EntryNormal "prop_4_15"
-4/16 EntryNormal "prop_4_16"
-4/17 EntryNormal "prop_4_17"
-
-raft-log 7
-----
-1/11 EntryNormal ""
-1/12 EntryNormal "prop_1_12"
-1/13 EntryNormal "prop_1_13"
-2/14 EntryNormal ""
-2/15 EntryNormal "prop_2_15"
-2/16 EntryNormal "prop_2_16"
-3/17 EntryNormal ""
-3/18 EntryNormal "prop_3_18"
-3/19 EntryNormal "prop_3_19"
-3/20 EntryNormal "prop_3_20"
-3/21 EntryNormal "prop_3_21"
-
-
-# Elect node 1 as leader and stabilize.
-campaign 1
-----
-INFO 1 is starting a new election at term 7
-INFO 1 became candidate at term 8
-INFO 1 received MsgVoteResp from 1 at term 8
-INFO 1 [logterm: 6, index: 20] sent MsgVote request to 2 at term 8
-INFO 1 [logterm: 6, index: 20] sent MsgVote request to 3 at term 8
-INFO 1 [logterm: 6, index: 20] sent MsgVote request to 4 at term 8
-INFO 1 [logterm: 6, index: 20] sent MsgVote request to 5 at term 8
-INFO 1 [logterm: 6, index: 20] sent MsgVote request to 6 at term 8
-INFO 1 [logterm: 6, index: 20] sent MsgVote request to 7 at term 8
-
-## Get elected.
-stabilize 1
-----
-> 1 handling Ready
- Ready MustSync=true:
- Lead:0 State:StateCandidate
- HardState Term:8 Vote:1 Commit:18
- Messages:
- 1->2 MsgVote Term:8 Log:6/20
- 1->3 MsgVote Term:8 Log:6/20
- 1->4 MsgVote Term:8 Log:6/20
- 1->5 MsgVote Term:8 Log:6/20
- 1->6 MsgVote Term:8 Log:6/20
- 1->7 MsgVote Term:8 Log:6/20
-
-stabilize 2 3 4 5 6 7
-----
-> 2 receiving messages
- 1->2 MsgVote Term:8 Log:6/20
- INFO 2 [term: 6] received a MsgVote message with higher term from 1 [term: 8]
- INFO 2 became follower at term 8
- INFO 2 [logterm: 6, index: 19, vote: 0] cast MsgVote for 1 [logterm: 6, index: 20] at term 8
-> 3 receiving messages
- 1->3 MsgVote Term:8 Log:6/20
- INFO 3 [term: 7] received a MsgVote message with higher term from 1 [term: 8]
- INFO 3 became follower at term 8
- INFO 3 [logterm: 4, index: 14, vote: 0] cast MsgVote for 1 [logterm: 6, index: 20] at term 8
-> 4 receiving messages
- 1->4 MsgVote Term:8 Log:6/20
- INFO 4 [term: 6] received a MsgVote message with higher term from 1 [term: 8]
- INFO 4 became follower at term 8
- INFO 4 [logterm: 6, index: 21, vote: 0] rejected MsgVote from 1 [logterm: 6, index: 20] at term 8
-> 5 receiving messages
- 1->5 MsgVote Term:8 Log:6/20
- INFO 5 [term: 7] received a MsgVote message with higher term from 1 [term: 8]
- INFO 5 became follower at term 8
- INFO 5 [logterm: 7, index: 22, vote: 0] rejected MsgVote from 1 [logterm: 6, index: 20] at term 8
-> 6 receiving messages
- 1->6 MsgVote Term:8 Log:6/20
- INFO 6 [term: 7] received a MsgVote message with higher term from 1 [term: 8]
- INFO 6 became follower at term 8
- INFO 6 [logterm: 4, index: 17, vote: 0] cast MsgVote for 1 [logterm: 6, index: 20] at term 8
-> 7 receiving messages
- 1->7 MsgVote Term:8 Log:6/20
- INFO 7 [term: 7] received a MsgVote message with higher term from 1 [term: 8]
- INFO 7 became follower at term 8
- INFO 7 [logterm: 3, index: 21, vote: 0] cast MsgVote for 1 [logterm: 6, index: 20] at term 8
-> 2 handling Ready
- Ready MustSync=true:
- Lead:0 State:StateFollower
- HardState Term:8 Vote:1 Commit:18
- Messages:
- 2->1 MsgVoteResp Term:8 Log:0/0
-> 3 handling Ready
- Ready MustSync=true:
- HardState Term:8 Vote:1 Commit:14
- Messages:
- 3->1 MsgVoteResp Term:8 Log:0/0
-> 4 handling Ready
- Ready MustSync=true:
- Lead:0 State:StateFollower
- HardState Term:8 Commit:18
- Messages:
- 4->1 MsgVoteResp Term:8 Log:0/0 Rejected (Hint: 0)
-> 5 handling Ready
- Ready MustSync=true:
- Lead:0 State:StateFollower
- HardState Term:8 Commit:18
- Messages:
- 5->1 MsgVoteResp Term:8 Log:0/0 Rejected (Hint: 0)
-> 6 handling Ready
- Ready MustSync=true:
- HardState Term:8 Vote:1 Commit:15
- Messages:
- 6->1 MsgVoteResp Term:8 Log:0/0
-> 7 handling Ready
- Ready MustSync=true:
- HardState Term:8 Vote:1 Commit:13
- Messages:
- 7->1 MsgVoteResp Term:8 Log:0/0
-
-stabilize 1
-----
-> 1 receiving messages
- 2->1 MsgVoteResp Term:8 Log:0/0
- INFO 1 received MsgVoteResp from 2 at term 8
- INFO 1 has received 2 MsgVoteResp votes and 0 vote rejections
- 3->1 MsgVoteResp Term:8 Log:0/0
- INFO 1 received MsgVoteResp from 3 at term 8
- INFO 1 has received 3 MsgVoteResp votes and 0 vote rejections
- 4->1 MsgVoteResp Term:8 Log:0/0 Rejected (Hint: 0)
- INFO 1 received MsgVoteResp rejection from 4 at term 8
- INFO 1 has received 3 MsgVoteResp votes and 1 vote rejections
- 5->1 MsgVoteResp Term:8 Log:0/0 Rejected (Hint: 0)
- INFO 1 received MsgVoteResp rejection from 5 at term 8
- INFO 1 has received 3 MsgVoteResp votes and 2 vote rejections
- 6->1 MsgVoteResp Term:8 Log:0/0
- INFO 1 received MsgVoteResp from 6 at term 8
- INFO 1 has received 4 MsgVoteResp votes and 2 vote rejections
- INFO 1 became leader at term 8
- 7->1 MsgVoteResp Term:8 Log:0/0
-> 1 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateLeader
- Entries:
- 8/21 EntryNormal ""
- Messages:
- 1->2 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
- 1->3 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
- 1->4 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
- 1->5 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
- 1->6 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
- 1->7 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
-
-## Recover each follower, one by one.
-stabilize 1 2
-----
-> 2 receiving messages
- 1->2 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
-> 2 handling Ready
- Ready MustSync=false:
- Lead:1 State:StateFollower
- Messages:
- 2->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 19)
-> 1 receiving messages
- 2->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 19)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:8 Log:6/19 Commit:18 Entries:[6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
-> 2 receiving messages
- 1->2 MsgApp Term:8 Log:6/19 Commit:18 Entries:[6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
-> 2 handling Ready
- Ready MustSync=true:
- Entries:
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- Messages:
- 2->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 2->1 MsgAppResp Term:8 Log:0/21
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->2 MsgApp Term:8 Log:8/21 Commit:18
-> 2 receiving messages
- 1->2 MsgApp Term:8 Log:8/21 Commit:18
-> 2 handling Ready
- Ready MustSync=false:
- Messages:
- 2->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 2->1 MsgAppResp Term:8 Log:0/21
-
-stabilize 1 3
-----
-> 3 receiving messages
- 1->3 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
-> 3 handling Ready
- Ready MustSync=false:
- Lead:1 State:StateFollower
- Messages:
- 3->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 14)
-> 1 receiving messages
- 3->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 14)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->3 MsgApp Term:8 Log:4/14 Commit:18 Entries:[4/15 EntryNormal "prop_4_15", 5/16 EntryNormal "", 5/17 EntryNormal "prop_5_17", 6/18 EntryNormal "", 6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
-> 3 receiving messages
- 1->3 MsgApp Term:8 Log:4/14 Commit:18 Entries:[4/15 EntryNormal "prop_4_15", 5/16 EntryNormal "", 5/17 EntryNormal "prop_5_17", 6/18 EntryNormal "", 6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
-> 3 handling Ready
- Ready MustSync=true:
- HardState Term:8 Vote:1 Commit:18
- Entries:
- 4/15 EntryNormal "prop_4_15"
- 5/16 EntryNormal ""
- 5/17 EntryNormal "prop_5_17"
- 6/18 EntryNormal ""
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- CommittedEntries:
- 4/15 EntryNormal "prop_4_15"
- 5/16 EntryNormal ""
- 5/17 EntryNormal "prop_5_17"
- 6/18 EntryNormal ""
- Messages:
- 3->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 3->1 MsgAppResp Term:8 Log:0/21
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->3 MsgApp Term:8 Log:8/21 Commit:18
-> 3 receiving messages
- 1->3 MsgApp Term:8 Log:8/21 Commit:18
-> 3 handling Ready
- Ready MustSync=false:
- Messages:
- 3->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 3->1 MsgAppResp Term:8 Log:0/21
-
-stabilize 1 4
-----
-> 4 receiving messages
- 1->4 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
- INFO found conflict at index 21 [existing term: 6, conflicting term: 8]
- INFO replace the unstable entries from index 21
-> 4 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- Entries:
- 8/21 EntryNormal ""
- Messages:
- 4->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 4->1 MsgAppResp Term:8 Log:0/21
-> 1 handling Ready
- Ready MustSync=false:
- HardState Term:8 Vote:1 Commit:21
- CommittedEntries:
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- Messages:
- 1->2 MsgApp Term:8 Log:8/21 Commit:21
- 1->3 MsgApp Term:8 Log:8/21 Commit:21
- 1->4 MsgApp Term:8 Log:8/21 Commit:21
-> 4 receiving messages
- 1->4 MsgApp Term:8 Log:8/21 Commit:21
-> 4 handling Ready
- Ready MustSync=false:
- HardState Term:8 Commit:21
- CommittedEntries:
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- Messages:
- 4->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 4->1 MsgAppResp Term:8 Log:0/21
-
-stabilize 1 5
-----
-> 5 receiving messages
- 1->5 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
-> 5 handling Ready
- Ready MustSync=false:
- Lead:1 State:StateFollower
- Messages:
- 5->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 18)
-> 1 receiving messages
- 5->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 18)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->5 MsgApp Term:8 Log:6/18 Commit:21 Entries:[6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
-> 5 receiving messages
- 1->5 MsgApp Term:8 Log:6/18 Commit:21 Entries:[6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
- INFO found conflict at index 19 [existing term: 7, conflicting term: 6]
- INFO replace the unstable entries from index 19
-> 5 handling Ready
- Ready MustSync=true:
- HardState Term:8 Commit:21
- Entries:
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- CommittedEntries:
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- Messages:
- 5->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 5->1 MsgAppResp Term:8 Log:0/21
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->5 MsgApp Term:8 Log:8/21 Commit:21
-> 5 receiving messages
- 1->5 MsgApp Term:8 Log:8/21 Commit:21
-> 5 handling Ready
- Ready MustSync=false:
- Messages:
- 5->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 5->1 MsgAppResp Term:8 Log:0/21
-
-stabilize 1 6
-----
-> 6 receiving messages
- 1->6 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
-> 6 handling Ready
- Ready MustSync=false:
- Lead:1 State:StateFollower
- Messages:
- 6->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 17)
-> 1 receiving messages
- 6->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 17)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->6 MsgApp Term:8 Log:4/15 Commit:21 Entries:[5/16 EntryNormal "", 5/17 EntryNormal "prop_5_17", 6/18 EntryNormal "", 6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
-> 6 receiving messages
- 1->6 MsgApp Term:8 Log:4/15 Commit:21 Entries:[5/16 EntryNormal "", 5/17 EntryNormal "prop_5_17", 6/18 EntryNormal "", 6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
- INFO found conflict at index 16 [existing term: 4, conflicting term: 5]
- INFO replace the unstable entries from index 16
-> 6 handling Ready
- Ready MustSync=true:
- HardState Term:8 Vote:1 Commit:21
- Entries:
- 5/16 EntryNormal ""
- 5/17 EntryNormal "prop_5_17"
- 6/18 EntryNormal ""
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- CommittedEntries:
- 5/16 EntryNormal ""
- 5/17 EntryNormal "prop_5_17"
- 6/18 EntryNormal ""
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- Messages:
- 6->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 6->1 MsgAppResp Term:8 Log:0/21
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->6 MsgApp Term:8 Log:8/21 Commit:21
-> 6 receiving messages
- 1->6 MsgApp Term:8 Log:8/21 Commit:21
-> 6 handling Ready
- Ready MustSync=false:
- Messages:
- 6->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 6->1 MsgAppResp Term:8 Log:0/21
-
-stabilize 1 7
-----
-> 7 receiving messages
- 1->7 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""]
-> 7 handling Ready
- Ready MustSync=false:
- Lead:1 State:StateFollower
- Messages:
- 7->1 MsgAppResp Term:8 Log:3/20 Rejected (Hint: 20)
-> 1 receiving messages
- 7->1 MsgAppResp Term:8 Log:3/20 Rejected (Hint: 20)
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->7 MsgApp Term:8 Log:1/13 Commit:21 Entries:[4/14 EntryNormal "", 4/15 EntryNormal "prop_4_15", 5/16 EntryNormal "", 5/17 EntryNormal "prop_5_17", 6/18 EntryNormal "", 6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
-> 7 receiving messages
- 1->7 MsgApp Term:8 Log:1/13 Commit:21 Entries:[4/14 EntryNormal "", 4/15 EntryNormal "prop_4_15", 5/16 EntryNormal "", 5/17 EntryNormal "prop_5_17", 6/18 EntryNormal "", 6/19 EntryNormal "prop_6_19", 6/20 EntryNormal "prop_6_20", 8/21 EntryNormal ""]
- INFO found conflict at index 14 [existing term: 2, conflicting term: 4]
- INFO replace the unstable entries from index 14
-> 7 handling Ready
- Ready MustSync=true:
- HardState Term:8 Vote:1 Commit:21
- Entries:
- 4/14 EntryNormal ""
- 4/15 EntryNormal "prop_4_15"
- 5/16 EntryNormal ""
- 5/17 EntryNormal "prop_5_17"
- 6/18 EntryNormal ""
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- CommittedEntries:
- 4/14 EntryNormal ""
- 4/15 EntryNormal "prop_4_15"
- 5/16 EntryNormal ""
- 5/17 EntryNormal "prop_5_17"
- 6/18 EntryNormal ""
- 6/19 EntryNormal "prop_6_19"
- 6/20 EntryNormal "prop_6_20"
- 8/21 EntryNormal ""
- Messages:
- 7->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 7->1 MsgAppResp Term:8 Log:0/21
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->7 MsgApp Term:8 Log:8/21 Commit:21
-> 7 receiving messages
- 1->7 MsgApp Term:8 Log:8/21 Commit:21
-> 7 handling Ready
- Ready MustSync=false:
- Messages:
- 7->1 MsgAppResp Term:8 Log:0/21
-> 1 receiving messages
- 7->1 MsgAppResp Term:8 Log:0/21
diff --git a/raft/testdata/snapshot_succeed_via_app_resp.txt b/raft/testdata/snapshot_succeed_via_app_resp.txt
deleted file mode 100644
index 96ded532cd7..00000000000
--- a/raft/testdata/snapshot_succeed_via_app_resp.txt
+++ /dev/null
@@ -1,156 +0,0 @@
-# TestSnapshotSucceedViaAppResp regression tests the situation in which a snap-
-# shot is sent to a follower at the most recent index (i.e. the snapshot index
-# is the leader's last index is the committed index). In that situation, a bug
-# in the past left the follower in probing status until the next log entry was
-# committed.
-#
-# See https://github.com/etcd-io/etcd/pull/10308 for additional background.
-
-# Turn off output during the setup of the test.
-log-level none
-----
-ok
-
-# Start with two nodes, but the config already has a third.
-add-nodes 2 voters=(1,2,3) index=10
-----
-ok
-
-campaign 1
-----
-ok
-
-# Fully replicate everything, including the leader's empty index.
-stabilize
-----
-ok (quiet)
-
-compact 1 11
-----
-ok (quiet)
-
-# Drop inflight messages to n3.
-deliver-msgs drop=(3)
-----
-ok (quiet)
-
-# Show the Raft log messages from now on.
-log-level debug
-----
-ok
-
-status 1
-----
-1: StateReplicate match=11 next=12 inactive
-2: StateReplicate match=11 next=12
-3: StateProbe match=0 next=11 paused inactive
-
-# Add the node that will receive a snapshot (it has no state at all, does not
-# even have a config).
-add-nodes 1
-----
-INFO 3 switched to configuration voters=()
-INFO 3 became follower at term 0
-INFO newRaft 3 [peers: [], term: 0, commit: 0, applied: 0, lastindex: 0, lastterm: 0]
-
-# Time passes on the leader so that it will try the previously missing follower
-# again.
-tick-heartbeat 1
-----
-ok
-
-process-ready 1
-----
-Ready MustSync=false:
-Messages:
-1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11
-1->3 MsgHeartbeat Term:1 Log:0/0
-
-# Iterate until no more work is done by the new peer. It receives the heartbeat
-# and responds.
-stabilize 3
-----
-> 3 receiving messages
- 1->3 MsgHeartbeat Term:1 Log:0/0
- INFO 3 [term: 0] received a MsgHeartbeat message with higher term from 1 [term: 1]
- INFO 3 became follower at term 1
-> 3 handling Ready
- Ready MustSync=true:
- Lead:1 State:StateFollower
- HardState Term:1 Commit:0
- Messages:
- 3->1 MsgHeartbeatResp Term:1 Log:0/0
-
-# The leader in turn will realize that n3 needs a snapshot, which it initiates.
-stabilize 1
-----
-> 1 receiving messages
- 3->1 MsgHeartbeatResp Term:1 Log:0/0
- DEBUG 1 [firstindex: 12, commit: 11] sent snapshot[index: 11, term: 1] to 3 [StateProbe match=0 next=11]
- DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=11 paused pendingSnap=11]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->3 MsgSnap Term:1 Log:0/0 Snapshot: Index:11 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
-
-status 1
-----
-1: StateReplicate match=11 next=12 inactive
-2: StateReplicate match=11 next=12
-3: StateSnapshot match=0 next=11 paused pendingSnap=11
-
-# Follower applies the snapshot. Note how it reacts with a MsgAppResp upon completion.
-# The snapshot fully catches the follower up (i.e. there are no more log entries it
-# needs to apply after). The bug was that the leader failed to realize that the follower
-# was now fully caught up.
-stabilize 3
-----
-> 3 receiving messages
- 1->3 MsgSnap Term:1 Log:0/0 Snapshot: Index:11 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- INFO log [committed=0, applied=0, unstable.offset=1, len(unstable.Entries)=0] starts to restore snapshot [index: 11, term: 1]
- INFO 3 switched to configuration voters=(1 2 3)
- INFO 3 [commit: 11, lastindex: 11, lastterm: 1] restored snapshot [index: 11, term: 1]
- INFO 3 [commit: 11] restored snapshot [index: 11, term: 1]
-> 3 handling Ready
- Ready MustSync=false:
- HardState Term:1 Commit:11
- Snapshot Index:11 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/11
-
-# The MsgAppResp lets the leader move the follower back to replicating state.
-# Leader sends another MsgAppResp, to communicate the updated commit index.
-stabilize 1
-----
-> 1 receiving messages
- 3->1 MsgAppResp Term:1 Log:0/11
- DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=11 next=12 paused pendingSnap=11]
-> 1 handling Ready
- Ready MustSync=false:
- Messages:
- 1->3 MsgApp Term:1 Log:1/11 Commit:11
-
-status 1
-----
-1: StateReplicate match=11 next=12 inactive
-2: StateReplicate match=11 next=12
-3: StateReplicate match=11 next=12
-
-# Let things settle.
-stabilize
-----
-> 2 receiving messages
- 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11
-> 3 receiving messages
- 1->3 MsgApp Term:1 Log:1/11 Commit:11
-> 2 handling Ready
- Ready MustSync=false:
- Messages:
- 2->1 MsgHeartbeatResp Term:1 Log:0/0
-> 3 handling Ready
- Ready MustSync=false:
- Messages:
- 3->1 MsgAppResp Term:1 Log:0/11
-> 1 receiving messages
- 2->1 MsgHeartbeatResp Term:1 Log:0/0
- 3->1 MsgAppResp Term:1 Log:0/11
diff --git a/raft/tracker/inflights.go b/raft/tracker/inflights.go
deleted file mode 100644
index 1a056341ab5..00000000000
--- a/raft/tracker/inflights.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tracker
-
-// Inflights limits the number of MsgApp (represented by the largest index
-// contained within) sent to followers but not yet acknowledged by them. Callers
-// use Full() to check whether more messages can be sent, call Add() whenever
-// they are sending a new append, and release "quota" via FreeLE() whenever an
-// ack is received.
-type Inflights struct {
- // the starting index in the buffer
- start int
- // number of inflights in the buffer
- count int
-
- // the size of the buffer
- size int
-
- // buffer contains the index of the last entry
- // inside one message.
- buffer []uint64
-}
-
-// NewInflights sets up an Inflights that allows up to 'size' inflight messages.
-func NewInflights(size int) *Inflights {
- return &Inflights{
- size: size,
- }
-}
-
-// Clone returns an *Inflights that is identical to but shares no memory with
-// the receiver.
-func (in *Inflights) Clone() *Inflights {
- ins := *in
- ins.buffer = append([]uint64(nil), in.buffer...)
- return &ins
-}
-
-// Add notifies the Inflights that a new message with the given index is being
-// dispatched. Full() must be called prior to Add() to verify that there is room
-// for one more message, and consecutive calls to add Add() must provide a
-// monotonic sequence of indexes.
-func (in *Inflights) Add(inflight uint64) {
- if in.Full() {
- panic("cannot add into a Full inflights")
- }
- next := in.start + in.count
- size := in.size
- if next >= size {
- next -= size
- }
- if next >= len(in.buffer) {
- in.grow()
- }
- in.buffer[next] = inflight
- in.count++
-}
-
-// grow the inflight buffer by doubling up to inflights.size. We grow on demand
-// instead of preallocating to inflights.size to handle systems which have
-// thousands of Raft groups per process.
-func (in *Inflights) grow() {
- newSize := len(in.buffer) * 2
- if newSize == 0 {
- newSize = 1
- } else if newSize > in.size {
- newSize = in.size
- }
- newBuffer := make([]uint64, newSize)
- copy(newBuffer, in.buffer)
- in.buffer = newBuffer
-}
-
-// FreeLE frees the inflights smaller or equal to the given `to` flight.
-func (in *Inflights) FreeLE(to uint64) {
- if in.count == 0 || to < in.buffer[in.start] {
- // out of the left side of the window
- return
- }
-
- idx := in.start
- var i int
- for i = 0; i < in.count; i++ {
- if to < in.buffer[idx] { // found the first large inflight
- break
- }
-
- // increase index and maybe rotate
- size := in.size
- if idx++; idx >= size {
- idx -= size
- }
- }
- // free i inflights and set new start index
- in.count -= i
- in.start = idx
- if in.count == 0 {
- // inflights is empty, reset the start index so that we don't grow the
- // buffer unnecessarily.
- in.start = 0
- }
-}
-
-// FreeFirstOne releases the first inflight. This is a no-op if nothing is
-// inflight.
-func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) }
-
-// Full returns true if no more messages can be sent at the moment.
-func (in *Inflights) Full() bool {
- return in.count == in.size
-}
-
-// Count returns the number of inflight messages.
-func (in *Inflights) Count() int { return in.count }
-
-// reset frees all inflights.
-func (in *Inflights) reset() {
- in.count = 0
- in.start = 0
-}
diff --git a/raft/tracker/inflights_test.go b/raft/tracker/inflights_test.go
deleted file mode 100644
index 582a373ba56..00000000000
--- a/raft/tracker/inflights_test.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tracker
-
-import (
- "reflect"
- "testing"
-)
-
-func TestInflightsAdd(t *testing.T) {
- // no rotating case
- in := &Inflights{
- size: 10,
- buffer: make([]uint64, 10),
- }
-
- for i := 0; i < 5; i++ {
- in.Add(uint64(i))
- }
-
- wantIn := &Inflights{
- start: 0,
- count: 5,
- size: 10,
- // â------------
- buffer: []uint64{0, 1, 2, 3, 4, 0, 0, 0, 0, 0},
- }
-
- if !reflect.DeepEqual(in, wantIn) {
- t.Fatalf("in = %+v, want %+v", in, wantIn)
- }
-
- for i := 5; i < 10; i++ {
- in.Add(uint64(i))
- }
-
- wantIn2 := &Inflights{
- start: 0,
- count: 10,
- size: 10,
- // â---------------------------
- buffer: []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
- }
-
- if !reflect.DeepEqual(in, wantIn2) {
- t.Fatalf("in = %+v, want %+v", in, wantIn2)
- }
-
- // rotating case
- in2 := &Inflights{
- start: 5,
- size: 10,
- buffer: make([]uint64, 10),
- }
-
- for i := 0; i < 5; i++ {
- in2.Add(uint64(i))
- }
-
- wantIn21 := &Inflights{
- start: 5,
- count: 5,
- size: 10,
- // â------------
- buffer: []uint64{0, 0, 0, 0, 0, 0, 1, 2, 3, 4},
- }
-
- if !reflect.DeepEqual(in2, wantIn21) {
- t.Fatalf("in = %+v, want %+v", in2, wantIn21)
- }
-
- for i := 5; i < 10; i++ {
- in2.Add(uint64(i))
- }
-
- wantIn22 := &Inflights{
- start: 5,
- count: 10,
- size: 10,
- // -------------- â------------
- buffer: []uint64{5, 6, 7, 8, 9, 0, 1, 2, 3, 4},
- }
-
- if !reflect.DeepEqual(in2, wantIn22) {
- t.Fatalf("in = %+v, want %+v", in2, wantIn22)
- }
-}
-
-func TestInflightFreeTo(t *testing.T) {
- // no rotating case
- in := NewInflights(10)
- for i := 0; i < 10; i++ {
- in.Add(uint64(i))
- }
-
- in.FreeLE(4)
-
- wantIn := &Inflights{
- start: 5,
- count: 5,
- size: 10,
- // â------------
- buffer: []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
- }
-
- if !reflect.DeepEqual(in, wantIn) {
- t.Fatalf("in = %+v, want %+v", in, wantIn)
- }
-
- in.FreeLE(8)
-
- wantIn2 := &Inflights{
- start: 9,
- count: 1,
- size: 10,
- // â
- buffer: []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
- }
-
- if !reflect.DeepEqual(in, wantIn2) {
- t.Fatalf("in = %+v, want %+v", in, wantIn2)
- }
-
- // rotating case
- for i := 10; i < 15; i++ {
- in.Add(uint64(i))
- }
-
- in.FreeLE(12)
-
- wantIn3 := &Inflights{
- start: 3,
- count: 2,
- size: 10,
- // â-----
- buffer: []uint64{10, 11, 12, 13, 14, 5, 6, 7, 8, 9},
- }
-
- if !reflect.DeepEqual(in, wantIn3) {
- t.Fatalf("in = %+v, want %+v", in, wantIn3)
- }
-
- in.FreeLE(14)
-
- wantIn4 := &Inflights{
- start: 0,
- count: 0,
- size: 10,
- // â
- buffer: []uint64{10, 11, 12, 13, 14, 5, 6, 7, 8, 9},
- }
-
- if !reflect.DeepEqual(in, wantIn4) {
- t.Fatalf("in = %+v, want %+v", in, wantIn4)
- }
-}
-
-func TestInflightFreeFirstOne(t *testing.T) {
- in := NewInflights(10)
- for i := 0; i < 10; i++ {
- in.Add(uint64(i))
- }
-
- in.FreeFirstOne()
-
- wantIn := &Inflights{
- start: 1,
- count: 9,
- size: 10,
- // â------------------------
- buffer: []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
- }
-
- if !reflect.DeepEqual(in, wantIn) {
- t.Fatalf("in = %+v, want %+v", in, wantIn)
- }
-}
diff --git a/raft/tracker/progress.go b/raft/tracker/progress.go
deleted file mode 100644
index a36e5261ac7..00000000000
--- a/raft/tracker/progress.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tracker
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-// Progress represents a followerâs progress in the view of the leader. Leader
-// maintains progresses of all followers, and sends entries to the follower
-// based on its progress.
-//
-// NB(tbg): Progress is basically a state machine whose transitions are mostly
-// strewn around `*raft.raft`. Additionally, some fields are only used when in a
-// certain State. All of this isn't ideal.
-type Progress struct {
- Match, Next uint64
- // State defines how the leader should interact with the follower.
- //
- // When in StateProbe, leader sends at most one replication message
- // per heartbeat interval. It also probes actual progress of the follower.
- //
- // When in StateReplicate, leader optimistically increases next
- // to the latest entry sent after sending replication message. This is
- // an optimized state for fast replicating log entries to the follower.
- //
- // When in StateSnapshot, leader should have sent out snapshot
- // before and stops sending any replication message.
- State StateType
-
- // PendingSnapshot is used in StateSnapshot.
- // If there is a pending snapshot, the pendingSnapshot will be set to the
- // index of the snapshot. If pendingSnapshot is set, the replication process of
- // this Progress will be paused. raft will not resend snapshot until the pending one
- // is reported to be failed.
- PendingSnapshot uint64
-
- // RecentActive is true if the progress is recently active. Receiving any messages
- // from the corresponding follower indicates the progress is active.
- // RecentActive can be reset to false after an election timeout.
- //
- // TODO(tbg): the leader should always have this set to true.
- RecentActive bool
-
- // ProbeSent is used while this follower is in StateProbe. When ProbeSent is
- // true, raft should pause sending replication message to this peer until
- // ProbeSent is reset. See ProbeAcked() and IsPaused().
- ProbeSent bool
-
- // Inflights is a sliding window for the inflight messages.
- // Each inflight message contains one or more log entries.
- // The max number of entries per message is defined in raft config as MaxSizePerMsg.
- // Thus inflight effectively limits both the number of inflight messages
- // and the bandwidth each Progress can use.
- // When inflights is Full, no more message should be sent.
- // When a leader sends out a message, the index of the last
- // entry should be added to inflights. The index MUST be added
- // into inflights in order.
- // When a leader receives a reply, the previous inflights should
- // be freed by calling inflights.FreeLE with the index of the last
- // received entry.
- Inflights *Inflights
-
- // IsLearner is true if this progress is tracked for a learner.
- IsLearner bool
-}
-
-// ResetState moves the Progress into the specified State, resetting ProbeSent,
-// PendingSnapshot, and Inflights.
-func (pr *Progress) ResetState(state StateType) {
- pr.ProbeSent = false
- pr.PendingSnapshot = 0
- pr.State = state
- pr.Inflights.reset()
-}
-
-func max(a, b uint64) uint64 {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b uint64) uint64 {
- if a > b {
- return b
- }
- return a
-}
-
-// ProbeAcked is called when this peer has accepted an append. It resets
-// ProbeSent to signal that additional append messages should be sent without
-// further delay.
-func (pr *Progress) ProbeAcked() {
- pr.ProbeSent = false
-}
-
-// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or,
-// optionally and if larger, the index of the pending snapshot.
-func (pr *Progress) BecomeProbe() {
- // If the original state is StateSnapshot, progress knows that
- // the pending snapshot has been sent to this peer successfully, then
- // probes from pendingSnapshot + 1.
- if pr.State == StateSnapshot {
- pendingSnapshot := pr.PendingSnapshot
- pr.ResetState(StateProbe)
- pr.Next = max(pr.Match+1, pendingSnapshot+1)
- } else {
- pr.ResetState(StateProbe)
- pr.Next = pr.Match + 1
- }
-}
-
-// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1.
-func (pr *Progress) BecomeReplicate() {
- pr.ResetState(StateReplicate)
- pr.Next = pr.Match + 1
-}
-
-// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending
-// snapshot index.
-func (pr *Progress) BecomeSnapshot(snapshoti uint64) {
- pr.ResetState(StateSnapshot)
- pr.PendingSnapshot = snapshoti
-}
-
-// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the
-// index acked by it. The method returns false if the given n index comes from
-// an outdated message. Otherwise it updates the progress and returns true.
-func (pr *Progress) MaybeUpdate(n uint64) bool {
- var updated bool
- if pr.Match < n {
- pr.Match = n
- updated = true
- pr.ProbeAcked()
- }
- pr.Next = max(pr.Next, n+1)
- return updated
-}
-
-// OptimisticUpdate signals that appends all the way up to and including index n
-// are in-flight. As a result, Next is increased to n+1.
-func (pr *Progress) OptimisticUpdate(n uint64) { pr.Next = n + 1 }
-
-// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The
-// arguments are the index of the append message rejected by the follower, and
-// the hint that we want to decrease to.
-//
-// Rejections can happen spuriously as messages are sent out of order or
-// duplicated. In such cases, the rejection pertains to an index that the
-// Progress already knows were previously acknowledged, and false is returned
-// without changing the Progress.
-//
-// If the rejection is genuine, Next is lowered sensibly, and the Progress is
-// cleared for sending log entries.
-func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool {
- if pr.State == StateReplicate {
- // The rejection must be stale if the progress has matched and "rejected"
- // is smaller than "match".
- if rejected <= pr.Match {
- return false
- }
- // Directly decrease next to match + 1.
- //
- // TODO(tbg): why not use matchHint if it's larger?
- pr.Next = pr.Match + 1
- return true
- }
-
- // The rejection must be stale if "rejected" does not match next - 1. This
- // is because non-replicating followers are probed one entry at a time.
- if pr.Next-1 != rejected {
- return false
- }
-
- pr.Next = max(min(rejected, matchHint+1), 1)
- pr.ProbeSent = false
- return true
-}
-
-// IsPaused returns whether sending log entries to this node has been throttled.
-// This is done when a node has rejected recent MsgApps, is currently waiting
-// for a snapshot, or has reached the MaxInflightMsgs limit. In normal
-// operation, this is false. A throttled node will be contacted less frequently
-// until it has reached a state in which it's able to accept a steady stream of
-// log entries again.
-func (pr *Progress) IsPaused() bool {
- switch pr.State {
- case StateProbe:
- return pr.ProbeSent
- case StateReplicate:
- return pr.Inflights.Full()
- case StateSnapshot:
- return true
- default:
- panic("unexpected state")
- }
-}
-
-func (pr *Progress) String() string {
- var buf strings.Builder
- fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next)
- if pr.IsLearner {
- fmt.Fprint(&buf, " learner")
- }
- if pr.IsPaused() {
- fmt.Fprint(&buf, " paused")
- }
- if pr.PendingSnapshot > 0 {
- fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot)
- }
- if !pr.RecentActive {
- fmt.Fprintf(&buf, " inactive")
- }
- if n := pr.Inflights.Count(); n > 0 {
- fmt.Fprintf(&buf, " inflight=%d", n)
- if pr.Inflights.Full() {
- fmt.Fprint(&buf, "[full]")
- }
- }
- return buf.String()
-}
-
-// ProgressMap is a map of *Progress.
-type ProgressMap map[uint64]*Progress
-
-// String prints the ProgressMap in sorted key order, one Progress per line.
-func (m ProgressMap) String() string {
- ids := make([]uint64, 0, len(m))
- for k := range m {
- ids = append(ids, k)
- }
- sort.Slice(ids, func(i, j int) bool {
- return ids[i] < ids[j]
- })
- var buf strings.Builder
- for _, id := range ids {
- fmt.Fprintf(&buf, "%d: %s\n", id, m[id])
- }
- return buf.String()
-}
diff --git a/raft/tracker/progress_test.go b/raft/tracker/progress_test.go
deleted file mode 100644
index 6eb582f049c..00000000000
--- a/raft/tracker/progress_test.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tracker
-
-import (
- "testing"
-)
-
-func TestProgressString(t *testing.T) {
- ins := NewInflights(1)
- ins.Add(123)
- pr := &Progress{
- Match: 1,
- Next: 2,
- State: StateSnapshot,
- PendingSnapshot: 123,
- RecentActive: false,
- ProbeSent: true,
- IsLearner: true,
- Inflights: ins,
- }
- const exp = `StateSnapshot match=1 next=2 learner paused pendingSnap=123 inactive inflight=1[full]`
- if act := pr.String(); act != exp {
- t.Errorf("exp: %s\nact: %s", exp, act)
- }
-}
-
-func TestProgressIsPaused(t *testing.T) {
- tests := []struct {
- state StateType
- paused bool
-
- w bool
- }{
- {StateProbe, false, false},
- {StateProbe, true, true},
- {StateReplicate, false, false},
- {StateReplicate, true, false},
- {StateSnapshot, false, true},
- {StateSnapshot, true, true},
- }
- for i, tt := range tests {
- p := &Progress{
- State: tt.state,
- ProbeSent: tt.paused,
- Inflights: NewInflights(256),
- }
- if g := p.IsPaused(); g != tt.w {
- t.Errorf("#%d: paused= %t, want %t", i, g, tt.w)
- }
- }
-}
-
-// TestProgressResume ensures that MaybeUpdate and MaybeDecrTo will reset
-// ProbeSent.
-func TestProgressResume(t *testing.T) {
- p := &Progress{
- Next: 2,
- ProbeSent: true,
- }
- p.MaybeDecrTo(1, 1)
- if p.ProbeSent {
- t.Errorf("paused= %v, want false", p.ProbeSent)
- }
- p.ProbeSent = true
- p.MaybeUpdate(2)
- if p.ProbeSent {
- t.Errorf("paused= %v, want false", p.ProbeSent)
- }
-}
-
-func TestProgressBecomeProbe(t *testing.T) {
- match := uint64(1)
- tests := []struct {
- p *Progress
- wnext uint64
- }{
- {
- &Progress{State: StateReplicate, Match: match, Next: 5, Inflights: NewInflights(256)},
- 2,
- },
- {
- // snapshot finish
- &Progress{State: StateSnapshot, Match: match, Next: 5, PendingSnapshot: 10, Inflights: NewInflights(256)},
- 11,
- },
- {
- // snapshot failure
- &Progress{State: StateSnapshot, Match: match, Next: 5, PendingSnapshot: 0, Inflights: NewInflights(256)},
- 2,
- },
- }
- for i, tt := range tests {
- tt.p.BecomeProbe()
- if tt.p.State != StateProbe {
- t.Errorf("#%d: state = %s, want %s", i, tt.p.State, StateProbe)
- }
- if tt.p.Match != match {
- t.Errorf("#%d: match = %d, want %d", i, tt.p.Match, match)
- }
- if tt.p.Next != tt.wnext {
- t.Errorf("#%d: next = %d, want %d", i, tt.p.Next, tt.wnext)
- }
- }
-}
-
-func TestProgressBecomeReplicate(t *testing.T) {
- p := &Progress{State: StateProbe, Match: 1, Next: 5, Inflights: NewInflights(256)}
- p.BecomeReplicate()
-
- if p.State != StateReplicate {
- t.Errorf("state = %s, want %s", p.State, StateReplicate)
- }
- if p.Match != 1 {
- t.Errorf("match = %d, want 1", p.Match)
- }
- if w := p.Match + 1; p.Next != w {
- t.Errorf("next = %d, want %d", p.Next, w)
- }
-}
-
-func TestProgressBecomeSnapshot(t *testing.T) {
- p := &Progress{State: StateProbe, Match: 1, Next: 5, Inflights: NewInflights(256)}
- p.BecomeSnapshot(10)
-
- if p.State != StateSnapshot {
- t.Errorf("state = %s, want %s", p.State, StateSnapshot)
- }
- if p.Match != 1 {
- t.Errorf("match = %d, want 1", p.Match)
- }
- if p.PendingSnapshot != 10 {
- t.Errorf("pendingSnapshot = %d, want 10", p.PendingSnapshot)
- }
-}
-
-func TestProgressUpdate(t *testing.T) {
- prevM, prevN := uint64(3), uint64(5)
- tests := []struct {
- update uint64
-
- wm uint64
- wn uint64
- wok bool
- }{
- {prevM - 1, prevM, prevN, false}, // do not decrease match, next
- {prevM, prevM, prevN, false}, // do not decrease next
- {prevM + 1, prevM + 1, prevN, true}, // increase match, do not decrease next
- {prevM + 2, prevM + 2, prevN + 1, true}, // increase match, next
- }
- for i, tt := range tests {
- p := &Progress{
- Match: prevM,
- Next: prevN,
- }
- ok := p.MaybeUpdate(tt.update)
- if ok != tt.wok {
- t.Errorf("#%d: ok= %v, want %v", i, ok, tt.wok)
- }
- if p.Match != tt.wm {
- t.Errorf("#%d: match= %d, want %d", i, p.Match, tt.wm)
- }
- if p.Next != tt.wn {
- t.Errorf("#%d: next= %d, want %d", i, p.Next, tt.wn)
- }
- }
-}
-
-func TestProgressMaybeDecr(t *testing.T) {
- tests := []struct {
- state StateType
- m uint64
- n uint64
- rejected uint64
- last uint64
-
- w bool
- wn uint64
- }{
- {
- // state replicate and rejected is not greater than match
- StateReplicate, 5, 10, 5, 5, false, 10,
- },
- {
- // state replicate and rejected is not greater than match
- StateReplicate, 5, 10, 4, 4, false, 10,
- },
- {
- // state replicate and rejected is greater than match
- // directly decrease to match+1
- StateReplicate, 5, 10, 9, 9, true, 6,
- },
- {
- // next-1 != rejected is always false
- StateProbe, 0, 0, 0, 0, false, 0,
- },
- {
- // next-1 != rejected is always false
- StateProbe, 0, 10, 5, 5, false, 10,
- },
- {
- // next>1 = decremented by 1
- StateProbe, 0, 10, 9, 9, true, 9,
- },
- {
- // next>1 = decremented by 1
- StateProbe, 0, 2, 1, 1, true, 1,
- },
- {
- // next<=1 = reset to 1
- StateProbe, 0, 1, 0, 0, true, 1,
- },
- {
- // decrease to min(rejected, last+1)
- StateProbe, 0, 10, 9, 2, true, 3,
- },
- {
- // rejected < 1, reset to 1
- StateProbe, 0, 10, 9, 0, true, 1,
- },
- }
- for i, tt := range tests {
- p := &Progress{
- State: tt.state,
- Match: tt.m,
- Next: tt.n,
- }
- if g := p.MaybeDecrTo(tt.rejected, tt.last); g != tt.w {
- t.Errorf("#%d: maybeDecrTo= %t, want %t", i, g, tt.w)
- }
- if gm := p.Match; gm != tt.m {
- t.Errorf("#%d: match= %d, want %d", i, gm, tt.m)
- }
- if gn := p.Next; gn != tt.wn {
- t.Errorf("#%d: next= %d, want %d", i, gn, tt.wn)
- }
- }
-}
diff --git a/raft/tracker/state.go b/raft/tracker/state.go
deleted file mode 100644
index 285b4b8f580..00000000000
--- a/raft/tracker/state.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tracker
-
-// StateType is the state of a tracked follower.
-type StateType uint64
-
-const (
- // StateProbe indicates a follower whose last index isn't known. Such a
- // follower is "probed" (i.e. an append sent periodically) to narrow down
- // its last index. In the ideal (and common) case, only one round of probing
- // is necessary as the follower will react with a hint. Followers that are
- // probed over extended periods of time are often offline.
- StateProbe StateType = iota
- // StateReplicate is the state steady in which a follower eagerly receives
- // log entries to append to its log.
- StateReplicate
- // StateSnapshot indicates a follower that needs log entries not available
- // from the leader's Raft log. Such a follower needs a full snapshot to
- // return to StateReplicate.
- StateSnapshot
-)
-
-var prstmap = [...]string{
- "StateProbe",
- "StateReplicate",
- "StateSnapshot",
-}
-
-func (st StateType) String() string { return prstmap[uint64(st)] }
diff --git a/raft/tracker/tracker.go b/raft/tracker/tracker.go
deleted file mode 100644
index 72dcc73b866..00000000000
--- a/raft/tracker/tracker.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tracker
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "go.etcd.io/etcd/raft/v3/quorum"
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-// Config reflects the configuration tracked in a ProgressTracker.
-type Config struct {
- Voters quorum.JointConfig
- // AutoLeave is true if the configuration is joint and a transition to the
- // incoming configuration should be carried out automatically by Raft when
- // this is possible. If false, the configuration will be joint until the
- // application initiates the transition manually.
- AutoLeave bool
- // Learners is a set of IDs corresponding to the learners active in the
- // current configuration.
- //
- // Invariant: Learners and Voters does not intersect, i.e. if a peer is in
- // either half of the joint config, it can't be a learner; if it is a
- // learner it can't be in either half of the joint config. This invariant
- // simplifies the implementation since it allows peers to have clarity about
- // its current role without taking into account joint consensus.
- Learners map[uint64]struct{}
- // When we turn a voter into a learner during a joint consensus transition,
- // we cannot add the learner directly when entering the joint state. This is
- // because this would violate the invariant that the intersection of
- // voters and learners is empty. For example, assume a Voter is removed and
- // immediately re-added as a learner (or in other words, it is demoted):
- //
- // Initially, the configuration will be
- //
- // voters: {1 2 3}
- // learners: {}
- //
- // and we want to demote 3. Entering the joint configuration, we naively get
- //
- // voters: {1 2} & {1 2 3}
- // learners: {3}
- //
- // but this violates the invariant (3 is both voter and learner). Instead,
- // we get
- //
- // voters: {1 2} & {1 2 3}
- // learners: {}
- // next_learners: {3}
- //
- // Where 3 is now still purely a voter, but we are remembering the intention
- // to make it a learner upon transitioning into the final configuration:
- //
- // voters: {1 2}
- // learners: {3}
- // next_learners: {}
- //
- // Note that next_learners is not used while adding a learner that is not
- // also a voter in the joint config. In this case, the learner is added
- // right away when entering the joint configuration, so that it is caught up
- // as soon as possible.
- LearnersNext map[uint64]struct{}
-}
-
-func (c Config) String() string {
- var buf strings.Builder
- fmt.Fprintf(&buf, "voters=%s", c.Voters)
- if c.Learners != nil {
- fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String())
- }
- if c.LearnersNext != nil {
- fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String())
- }
- if c.AutoLeave {
- fmt.Fprintf(&buf, " autoleave")
- }
- return buf.String()
-}
-
-// Clone returns a copy of the Config that shares no memory with the original.
-func (c *Config) Clone() Config {
- clone := func(m map[uint64]struct{}) map[uint64]struct{} {
- if m == nil {
- return nil
- }
- mm := make(map[uint64]struct{}, len(m))
- for k := range m {
- mm[k] = struct{}{}
- }
- return mm
- }
- return Config{
- Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])},
- Learners: clone(c.Learners),
- LearnersNext: clone(c.LearnersNext),
- }
-}
-
-// ProgressTracker tracks the currently active configuration and the information
-// known about the nodes and learners in it. In particular, it tracks the match
-// index for each peer which in turn allows reasoning about the committed index.
-type ProgressTracker struct {
- Config
-
- Progress ProgressMap
-
- Votes map[uint64]bool
-
- MaxInflight int
-}
-
-// MakeProgressTracker initializes a ProgressTracker.
-func MakeProgressTracker(maxInflight int) ProgressTracker {
- p := ProgressTracker{
- MaxInflight: maxInflight,
- Config: Config{
- Voters: quorum.JointConfig{
- quorum.MajorityConfig{},
- nil, // only populated when used
- },
- Learners: nil, // only populated when used
- LearnersNext: nil, // only populated when used
- },
- Votes: map[uint64]bool{},
- Progress: map[uint64]*Progress{},
- }
- return p
-}
-
-// ConfState returns a ConfState representing the active configuration.
-func (p *ProgressTracker) ConfState() pb.ConfState {
- return pb.ConfState{
- Voters: p.Voters[0].Slice(),
- VotersOutgoing: p.Voters[1].Slice(),
- Learners: quorum.MajorityConfig(p.Learners).Slice(),
- LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(),
- AutoLeave: p.AutoLeave,
- }
-}
-
-// IsSingleton returns true if (and only if) there is only one voting member
-// (i.e. the leader) in the current configuration.
-func (p *ProgressTracker) IsSingleton() bool {
- return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0
-}
-
-type matchAckIndexer map[uint64]*Progress
-
-var _ quorum.AckedIndexer = matchAckIndexer(nil)
-
-// AckedIndex implements IndexLookuper.
-func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) {
- pr, ok := l[id]
- if !ok {
- return 0, false
- }
- return quorum.Index(pr.Match), true
-}
-
-// Committed returns the largest log index known to be committed based on what
-// the voting members of the group have acknowledged.
-func (p *ProgressTracker) Committed() uint64 {
- return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress)))
-}
-
-func insertionSort(sl []uint64) {
- a, b := 0, len(sl)
- for i := a + 1; i < b; i++ {
- for j := i; j > a && sl[j] < sl[j-1]; j-- {
- sl[j], sl[j-1] = sl[j-1], sl[j]
- }
- }
-}
-
-// Visit invokes the supplied closure for all tracked progresses in stable order.
-func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) {
- n := len(p.Progress)
- // We need to sort the IDs and don't want to allocate since this is hot code.
- // The optimization here mirrors that in `(MajorityConfig).CommittedIndex`,
- // see there for details.
- var sl [7]uint64
- var ids []uint64
- if len(sl) >= n {
- ids = sl[:n]
- } else {
- ids = make([]uint64, n)
- }
- for id := range p.Progress {
- n--
- ids[n] = id
- }
- insertionSort(ids)
- for _, id := range ids {
- f(id, p.Progress[id])
- }
-}
-
-// QuorumActive returns true if the quorum is active from the view of the local
-// raft state machine. Otherwise, it returns false.
-func (p *ProgressTracker) QuorumActive() bool {
- votes := map[uint64]bool{}
- p.Visit(func(id uint64, pr *Progress) {
- if pr.IsLearner {
- return
- }
- votes[id] = pr.RecentActive
- })
-
- return p.Voters.VoteResult(votes) == quorum.VoteWon
-}
-
-// VoterNodes returns a sorted slice of voters.
-func (p *ProgressTracker) VoterNodes() []uint64 {
- m := p.Voters.IDs()
- nodes := make([]uint64, 0, len(m))
- for id := range m {
- nodes = append(nodes, id)
- }
- sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] })
- return nodes
-}
-
-// LearnerNodes returns a sorted slice of learners.
-func (p *ProgressTracker) LearnerNodes() []uint64 {
- if len(p.Learners) == 0 {
- return nil
- }
- nodes := make([]uint64, 0, len(p.Learners))
- for id := range p.Learners {
- nodes = append(nodes, id)
- }
- sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] })
- return nodes
-}
-
-// ResetVotes prepares for a new round of vote counting via recordVote.
-func (p *ProgressTracker) ResetVotes() {
- p.Votes = map[uint64]bool{}
-}
-
-// RecordVote records that the node with the given id voted for this Raft
-// instance if v == true (and declined it otherwise).
-func (p *ProgressTracker) RecordVote(id uint64, v bool) {
- _, ok := p.Votes[id]
- if !ok {
- p.Votes[id] = v
- }
-}
-
-// TallyVotes returns the number of granted and rejected Votes, and whether the
-// election outcome is known.
-func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) {
- // Make sure to populate granted/rejected correctly even if the Votes slice
- // contains members no longer part of the configuration. This doesn't really
- // matter in the way the numbers are used (they're informational), but might
- // as well get it right.
- for id, pr := range p.Progress {
- if pr.IsLearner {
- continue
- }
- v, voted := p.Votes[id]
- if !voted {
- continue
- }
- if v {
- granted++
- } else {
- rejected++
- }
- }
- result := p.Voters.VoteResult(p.Votes)
- return granted, rejected, result
-}
diff --git a/raft/util.go b/raft/util.go
deleted file mode 100644
index 94ab368f608..00000000000
--- a/raft/util.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "bytes"
- "fmt"
- "strings"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func (st StateType) MarshalJSON() ([]byte, error) {
- return []byte(fmt.Sprintf("%q", st.String())), nil
-}
-
-func min(a, b uint64) uint64 {
- if a > b {
- return b
- }
- return a
-}
-
-func max(a, b uint64) uint64 {
- if a > b {
- return a
- }
- return b
-}
-
-func IsLocalMsg(msgt pb.MessageType) bool {
- return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
- msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
-}
-
-func IsResponseMsg(msgt pb.MessageType) bool {
- return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
-}
-
-// voteResponseType maps vote and prevote message types to their corresponding responses.
-func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
- switch msgt {
- case pb.MsgVote:
- return pb.MsgVoteResp
- case pb.MsgPreVote:
- return pb.MsgPreVoteResp
- default:
- panic(fmt.Sprintf("not a vote message: %s", msgt))
- }
-}
-
-func DescribeHardState(hs pb.HardState) string {
- var buf strings.Builder
- fmt.Fprintf(&buf, "Term:%d", hs.Term)
- if hs.Vote != 0 {
- fmt.Fprintf(&buf, " Vote:%d", hs.Vote)
- }
- fmt.Fprintf(&buf, " Commit:%d", hs.Commit)
- return buf.String()
-}
-
-func DescribeSoftState(ss SoftState) string {
- return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState)
-}
-
-func DescribeConfState(state pb.ConfState) string {
- return fmt.Sprintf(
- "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v",
- state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave,
- )
-}
-
-func DescribeSnapshot(snap pb.Snapshot) string {
- m := snap.Metadata
- return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState))
-}
-
-func DescribeReady(rd Ready, f EntryFormatter) string {
- var buf strings.Builder
- if rd.SoftState != nil {
- fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState))
- buf.WriteByte('\n')
- }
- if !IsEmptyHardState(rd.HardState) {
- fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState))
- buf.WriteByte('\n')
- }
- if len(rd.ReadStates) > 0 {
- fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates)
- }
- if len(rd.Entries) > 0 {
- buf.WriteString("Entries:\n")
- fmt.Fprint(&buf, DescribeEntries(rd.Entries, f))
- }
- if !IsEmptySnap(rd.Snapshot) {
- fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot))
- }
- if len(rd.CommittedEntries) > 0 {
- buf.WriteString("CommittedEntries:\n")
- fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f))
- }
- if len(rd.Messages) > 0 {
- buf.WriteString("Messages:\n")
- for _, msg := range rd.Messages {
- fmt.Fprint(&buf, DescribeMessage(msg, f))
- buf.WriteByte('\n')
- }
- }
- if buf.Len() > 0 {
- return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String())
- }
- return ""
-}
-
-// EntryFormatter can be implemented by the application to provide human-readable formatting
-// of entry data. Nil is a valid EntryFormatter and will use a default format.
-type EntryFormatter func([]byte) string
-
-// DescribeMessage returns a concise human-readable description of a
-// Message for debugging.
-func DescribeMessage(m pb.Message, f EntryFormatter) string {
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
- if m.Reject {
- fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint)
- }
- if m.Commit != 0 {
- fmt.Fprintf(&buf, " Commit:%d", m.Commit)
- }
- if len(m.Entries) > 0 {
- fmt.Fprintf(&buf, " Entries:[")
- for i, e := range m.Entries {
- if i != 0 {
- buf.WriteString(", ")
- }
- buf.WriteString(DescribeEntry(e, f))
- }
- fmt.Fprintf(&buf, "]")
- }
- if !IsEmptySnap(m.Snapshot) {
- fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot))
- }
- return buf.String()
-}
-
-// PayloadSize is the size of the payload of this Entry. Notably, it does not
-// depend on its Index or Term.
-func PayloadSize(e pb.Entry) int {
- return len(e.Data)
-}
-
-// DescribeEntry returns a concise human-readable description of an
-// Entry for debugging.
-func DescribeEntry(e pb.Entry, f EntryFormatter) string {
- if f == nil {
- f = func(data []byte) string { return fmt.Sprintf("%q", data) }
- }
-
- formatConfChange := func(cc pb.ConfChangeI) string {
- // TODO(tbg): give the EntryFormatter a type argument so that it gets
- // a chance to expose the Context.
- return pb.ConfChangesToString(cc.AsV2().Changes)
- }
-
- var formatted string
- switch e.Type {
- case pb.EntryNormal:
- formatted = f(e.Data)
- case pb.EntryConfChange:
- var cc pb.ConfChange
- if err := cc.Unmarshal(e.Data); err != nil {
- formatted = err.Error()
- } else {
- formatted = formatConfChange(cc)
- }
- case pb.EntryConfChangeV2:
- var cc pb.ConfChangeV2
- if err := cc.Unmarshal(e.Data); err != nil {
- formatted = err.Error()
- } else {
- formatted = formatConfChange(cc)
- }
- }
- if formatted != "" {
- formatted = " " + formatted
- }
- return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted)
-}
-
-// DescribeEntries calls DescribeEntry for each Entry, adding a newline to
-// each.
-func DescribeEntries(ents []pb.Entry, f EntryFormatter) string {
- var buf bytes.Buffer
- for _, e := range ents {
- _, _ = buf.WriteString(DescribeEntry(e, f) + "\n")
- }
- return buf.String()
-}
-
-func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
- if len(ents) == 0 {
- return ents
- }
- size := ents[0].Size()
- var limit int
- for limit = 1; limit < len(ents); limit++ {
- size += ents[limit].Size()
- if uint64(size) > maxSize {
- break
- }
- }
- return ents[:limit]
-}
-
-func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) {
- err := cs1.Equivalent(cs2)
- if err == nil {
- return
- }
- l.Panic(err)
-}
diff --git a/raft/util_test.go b/raft/util_test.go
deleted file mode 100644
index 65bc95501bf..00000000000
--- a/raft/util_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "math"
- "reflect"
- "strings"
- "testing"
-
- pb "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-var testFormatter EntryFormatter = func(data []byte) string {
- return strings.ToUpper(string(data))
-}
-
-func TestDescribeEntry(t *testing.T) {
- entry := pb.Entry{
- Term: 1,
- Index: 2,
- Type: pb.EntryNormal,
- Data: []byte("hello\x00world"),
- }
-
- defaultFormatted := DescribeEntry(entry, nil)
- if defaultFormatted != "1/2 EntryNormal \"hello\\x00world\"" {
- t.Errorf("unexpected default output: %s", defaultFormatted)
- }
-
- customFormatted := DescribeEntry(entry, testFormatter)
- if customFormatted != "1/2 EntryNormal HELLO\x00WORLD" {
- t.Errorf("unexpected custom output: %s", customFormatted)
- }
-}
-
-func TestLimitSize(t *testing.T) {
- ents := []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 6}}
- tests := []struct {
- maxsize uint64
- wentries []pb.Entry
- }{
- {math.MaxUint64, []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 6}}},
- // even if maxsize is zero, the first entry should be returned
- {0, []pb.Entry{{Index: 4, Term: 4}}},
- // limit to 2
- {uint64(ents[0].Size() + ents[1].Size()), []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}}},
- // limit to 2
- {uint64(ents[0].Size() + ents[1].Size() + ents[2].Size()/2), []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}}},
- {uint64(ents[0].Size() + ents[1].Size() + ents[2].Size() - 1), []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}}},
- // all
- {uint64(ents[0].Size() + ents[1].Size() + ents[2].Size()), []pb.Entry{{Index: 4, Term: 4}, {Index: 5, Term: 5}, {Index: 6, Term: 6}}},
- }
-
- for i, tt := range tests {
- if !reflect.DeepEqual(limitSize(ents, tt.maxsize), tt.wentries) {
- t.Errorf("#%d: entries = %v, want %v", i, limitSize(ents, tt.maxsize), tt.wentries)
- }
- }
-}
-
-func TestIsLocalMsg(t *testing.T) {
- tests := []struct {
- msgt pb.MessageType
- isLocal bool
- }{
- {pb.MsgHup, true},
- {pb.MsgBeat, true},
- {pb.MsgUnreachable, true},
- {pb.MsgSnapStatus, true},
- {pb.MsgCheckQuorum, true},
- {pb.MsgTransferLeader, false},
- {pb.MsgProp, false},
- {pb.MsgApp, false},
- {pb.MsgAppResp, false},
- {pb.MsgVote, false},
- {pb.MsgVoteResp, false},
- {pb.MsgSnap, false},
- {pb.MsgHeartbeat, false},
- {pb.MsgHeartbeatResp, false},
- {pb.MsgTimeoutNow, false},
- {pb.MsgReadIndex, false},
- {pb.MsgReadIndexResp, false},
- {pb.MsgPreVote, false},
- {pb.MsgPreVoteResp, false},
- }
-
- for i, tt := range tests {
- got := IsLocalMsg(tt.msgt)
- if got != tt.isLocal {
- t.Errorf("#%d: got %v, want %v", i, got, tt.isLocal)
- }
- }
-}
diff --git a/scripts/OWNERS b/scripts/OWNERS
new file mode 100644
index 00000000000..efdfed26735
--- /dev/null
+++ b/scripts/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - ivanvc # Ivan Valdes
diff --git a/scripts/build-binary b/scripts/build-binary
deleted file mode 100755
index 6186424aa9e..00000000000
--- a/scripts/build-binary
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-source ./scripts/test_lib.sh
-
-VER=$1
-REPOSITORY="${REPOSITORY:-git@github.com:etcd-io/etcd.git}"
-
-
-if [ -z "$1" ]; then
- echo "Usage: ${0} VERSION" >> /dev/stderr
- exit 255
-fi
-
-set -u
-
-function setup_env {
- local ver=${1}
- local proj=${2}
-
- if [ ! -d "${proj}" ]; then
- run git clone "${REPOSITORY}"
- fi
-
- pushd "${proj}" >/dev/null
- run git fetch --all
- git_assert_branch_in_sync || exit 2
- run git checkout "${ver}"
- git_assert_branch_in_sync || exit 2
- popd >/dev/null
-}
-
-
-function package {
- local target=${1}
- local srcdir="${2}/bin"
-
- local ccdir="${srcdir}/${GOOS}_${GOARCH}"
- if [ -d "${ccdir}" ]; then
- srcdir="${ccdir}"
- fi
- local ext=""
- if [ "${GOOS}" == "windows" ]; then
- ext=".exe"
- fi
- for bin in etcd etcdctl etcdutl; do
- cp "${srcdir}/${bin}" "${target}/${bin}${ext}"
- done
-
- cp etcd/README.md "${target}"/README.md
- cp etcd/etcdctl/README.md "${target}"/README-etcdctl.md
- cp etcd/etcdctl/READMEv2.md "${target}"/READMEv2-etcdctl.md
- cp etcd/etcdutl/README.md "${target}"/README-etcdutl.md
-
- cp -R etcd/Documentation "${target}"/Documentation
-}
-
-function main {
- local proj="etcd"
-
- mkdir -p release
- cd release
- setup_env "${VER}" "${proj}"
-
- tarcmd=tar
- if [[ $(go env GOOS) == "darwin" ]]; then
- echo "Please use linux machine for release builds."
- exit 1
- fi
-
- for os in darwin windows linux; do
- export GOOS=${os}
- TARGET_ARCHS=("amd64")
-
- if [ ${GOOS} == "linux" ]; then
- TARGET_ARCHS+=("arm64")
- TARGET_ARCHS+=("ppc64le")
- TARGET_ARCHS+=("s390x")
- fi
-
- for TARGET_ARCH in "${TARGET_ARCHS[@]}"; do
- export GOARCH=${TARGET_ARCH}
-
- pushd etcd >/dev/null
- GO_LDFLAGS="-s" ./build.sh
- popd >/dev/null
-
- TARGET="etcd-${VER}-${GOOS}-${GOARCH}"
- mkdir "${TARGET}"
- package "${TARGET}" "${proj}"
-
- if [ ${GOOS} == "linux" ]; then
- ${tarcmd} cfz "${TARGET}.tar.gz" "${TARGET}"
- echo "Wrote release/${TARGET}.tar.gz"
- else
- zip -qr "${TARGET}.zip" "${TARGET}"
- echo "Wrote release/${TARGET}.zip"
- fi
- done
- done
-}
-
-main
diff --git a/scripts/build-binary.sh b/scripts/build-binary.sh
new file mode 100755
index 00000000000..174c72db467
--- /dev/null
+++ b/scripts/build-binary.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+source ./scripts/test_lib.sh
+
+VER=${1:-}
+REPOSITORY="${REPOSITORY:-git@github.com:etcd-io/etcd.git}"
+
+if [ -z "$VER" ]; then
+ echo "Usage: ${0} VERSION" >> /dev/stderr
+ exit 255
+fi
+
+function setup_env {
+ local ver=${1}
+ local proj=${2}
+
+ if [ ! -d "${proj}" ]; then
+ run git clone "${REPOSITORY}"
+ fi
+
+ pushd "${proj}" >/dev/null
+ run git fetch --all
+ run git checkout "${ver}"
+ popd >/dev/null
+}
+
+
+function package {
+ local target=${1}
+ local srcdir="${2}/bin"
+
+ local ccdir="${srcdir}/${GOOS}_${GOARCH}"
+ if [ -d "${ccdir}" ]; then
+ srcdir="${ccdir}"
+ fi
+ local ext=""
+ if [ "${GOOS}" == "windows" ]; then
+ ext=".exe"
+ fi
+ for bin in etcd etcdctl etcdutl; do
+ cp "${srcdir}/${bin}" "${target}/${bin}${ext}"
+ done
+
+ cp etcd/README.md "${target}"/README.md
+ cp etcd/etcdctl/README.md "${target}"/README-etcdctl.md
+ cp etcd/etcdctl/READMEv2.md "${target}"/READMEv2-etcdctl.md
+ cp etcd/etcdutl/README.md "${target}"/README-etcdutl.md
+
+ cp -R etcd/Documentation "${target}"/Documentation
+}
+
+function main {
+ local proj="etcd"
+
+ mkdir -p release
+ cd release
+ setup_env "${VER}" "${proj}"
+
+ local tarcmd=tar
+ if [[ $(go env GOOS) == "darwin" ]]; then
+ echo "Please use linux machine for release builds."
+ exit 1
+ fi
+
+ for os in darwin windows linux; do
+ export GOOS=${os}
+ TARGET_ARCHS=("amd64")
+
+ if [ ${GOOS} == "linux" ]; then
+ TARGET_ARCHS+=("arm64")
+ TARGET_ARCHS+=("ppc64le")
+ TARGET_ARCHS+=("s390x")
+ fi
+
+ if [ ${GOOS} == "darwin" ]; then
+ TARGET_ARCHS+=("arm64")
+ fi
+
+ for TARGET_ARCH in "${TARGET_ARCHS[@]}"; do
+ export GOARCH=${TARGET_ARCH}
+
+ pushd etcd >/dev/null
+ GO_LDFLAGS="-s -w" ./scripts/build.sh
+ popd >/dev/null
+
+ TARGET="etcd-${VER}-${GOOS}-${GOARCH}"
+ mkdir "${TARGET}"
+ package "${TARGET}" "${proj}"
+
+ if [ ${GOOS} == "linux" ]; then
+ ${tarcmd} cfz "${TARGET}.tar.gz" "${TARGET}"
+ echo "Wrote release/${TARGET}.tar.gz"
+ else
+ zip -qr "${TARGET}.zip" "${TARGET}"
+ echo "Wrote release/${TARGET}.zip"
+ fi
+ done
+ done
+}
+
+main
diff --git a/scripts/build-docker b/scripts/build-docker
deleted file mode 100755
index bc5fd7a10c7..00000000000
--- a/scripts/build-docker
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-if [ "$#" -ne 1 ]; then
- echo "Usage: $0 VERSION" >&2
- exit 1
-fi
-
-ARCH=$(go env GOARCH)
-VERSION="${1}-${ARCH}"
-DOCKERFILE="Dockerfile-release.${ARCH}"
-
-if [ -z "${BINARYDIR}" ]; then
- RELEASE="etcd-${1}"-$(go env GOOS)-$(go env GOARCH)
- BINARYDIR="${RELEASE}"
- TARFILE="${RELEASE}.tar.gz"
- TARURL="https://github.com/etcd-io/etcd/releases/download/${1}/${TARFILE}"
- if ! curl -f -L -o "${TARFILE}" "${TARURL}" ; then
- echo "Failed to download ${TARURL}."
- exit 1
- fi
- tar -zvxf "${TARFILE}"
-fi
-
-BINARYDIR=${BINARYDIR:-.}
-BUILDDIR=${BUILDDIR:-.}
-
-IMAGEDIR=${BUILDDIR}/image-docker
-
-mkdir -p "${IMAGEDIR}"/var/etcd
-mkdir -p "${IMAGEDIR}"/var/lib/etcd
-cp "${BINARYDIR}"/etcd "${BINARYDIR}"/etcdctl "${IMAGEDIR}"
-
-cat ./"${DOCKERFILE}" > "${IMAGEDIR}"/Dockerfile
-
-if [ -z "$TAG" ]; then
- docker build -t "gcr.io/etcd-development/etcd:${VERSION}" "${IMAGEDIR}"
- docker build -t "quay.io/coreos/etcd:${VERSION}" "${IMAGEDIR}"
-else
- docker build -t "${TAG}:${VERSION}" "${IMAGEDIR}"
-fi
diff --git a/scripts/build-docker.sh b/scripts/build-docker.sh
new file mode 100755
index 00000000000..6f86fb46a75
--- /dev/null
+++ b/scripts/build-docker.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: $0 VERSION" >&2
+ exit 1
+fi
+
+VERSION=${1}
+if [ -z "$VERSION" ]; then
+ echo "Usage: ${0} VERSION" >&2
+ exit 1
+fi
+
+ARCH=$(go env GOARCH)
+VERSION="${VERSION}-${ARCH}"
+DOCKERFILE="Dockerfile"
+
+if [ -z "${BINARYDIR:-}" ]; then
+ RELEASE="etcd-${1}"-$(go env GOOS)-${ARCH}
+ BINARYDIR="${RELEASE}"
+ TARFILE="${RELEASE}.tar.gz"
+ TARURL="https://github.com/etcd-io/etcd/releases/download/${1}/${TARFILE}"
+ if ! curl -f -L -o "${TARFILE}" "${TARURL}" ; then
+ echo "Failed to download ${TARURL}."
+ exit 1
+ fi
+ tar -zvxf "${TARFILE}"
+fi
+
+BINARYDIR=${BINARYDIR:-.}
+BUILDDIR=${BUILDDIR:-.}
+
+IMAGEDIR=${BUILDDIR}/image-docker
+
+mkdir -p "${IMAGEDIR}"/var/etcd
+mkdir -p "${IMAGEDIR}"/var/lib/etcd
+cp "${BINARYDIR}"/etcd "${BINARYDIR}"/etcdctl "${BINARYDIR}"/etcdutl "${IMAGEDIR}"
+
+cat ./"${DOCKERFILE}" > "${IMAGEDIR}"/Dockerfile
+
+if [ -z "${TAG:-}" ]; then
+ # Fix incorrect image "Architecture" using buildkit
+ # From https://stackoverflow.com/q/72144329/
+ DOCKER_BUILDKIT=1 docker build --build-arg="ARCH=${ARCH}" -t "gcr.io/etcd-development/etcd:${VERSION}" "${IMAGEDIR}"
+ DOCKER_BUILDKIT=1 docker build --build-arg="ARCH=${ARCH}" -t "quay.io/coreos/etcd:${VERSION}" "${IMAGEDIR}"
+else
+ docker build -t "${TAG}:${VERSION}" "${IMAGEDIR}"
+fi
diff --git a/scripts/build-release.sh b/scripts/build-release.sh
index 149be75f10e..4cf1116ee00 100755
--- a/scripts/build-release.sh
+++ b/scripts/build-release.sh
@@ -3,11 +3,11 @@
# Build all release binaries and images to directory ./release.
# Run from repository root.
#
-set -e
+set -euo pipefail
source ./scripts/test_lib.sh
-VERSION=$1
+VERSION=${1:-}
if [ -z "${VERSION}" ]; then
echo "Usage: ${0} VERSION" >> /dev/stderr
exit 255
@@ -22,10 +22,10 @@ ETCD_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
pushd "${ETCD_ROOT}" >/dev/null
log_callout "Building etcd binary..."
- ./scripts/build-binary "${VERSION}"
+ ./scripts/build-binary.sh "${VERSION}"
for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do
log_callout "Building ${TARGET_ARCH} docker image..."
- GOOS=linux GOARCH=${TARGET_ARCH} BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} BUILDDIR=release ./scripts/build-docker "${VERSION}"
+ GOOS=linux GOARCH=${TARGET_ARCH} BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} BUILDDIR=release ./scripts/build-docker.sh "${VERSION}"
done
popd >/dev/null
diff --git a/scripts/build.sh b/scripts/build.sh
new file mode 100755
index 00000000000..15720193a1d
--- /dev/null
+++ b/scripts/build.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+# This scripts build the etcd binaries
+# To build the tools, run `build_tools.sh`
+
+set -euo pipefail
+
+source ./scripts/test_lib.sh
+source ./scripts/build_lib.sh
+
+# only build when called directly, not sourced
+if echo "$0" | grep -E "build(.sh)?$" >/dev/null; then
+ run_build etcd_build
+fi
diff --git a/scripts/build_lib.sh b/scripts/build_lib.sh
new file mode 100755
index 00000000000..bd6279c8ae6
--- /dev/null
+++ b/scripts/build_lib.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+source ./scripts/test_lib.sh
+
+GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound")
+VERSION_SYMBOL="${ROOT_MODULE}/api/v3/version.GitSHA"
+
+# use go env if noset
+GOOS=${GOOS:-$(go env GOOS)}
+GOARCH=${GOARCH:-$(go env GOARCH)}
+
+GO_BUILD_FLAGS=${GO_BUILD_FLAGS:-}
+
+CGO_ENABLED="${CGO_ENABLED:-0}"
+
+# Set GO_LDFLAGS="-s" for building without symbols for debugging.
+# shellcheck disable=SC2206
+GO_LDFLAGS=(${GO_LDFLAGS:-} "-X=${VERSION_SYMBOL}=${GIT_SHA}")
+GO_BUILD_ENV=("CGO_ENABLED=${CGO_ENABLED}" "GO_BUILD_FLAGS=${GO_BUILD_FLAGS}" "GOOS=${GOOS}" "GOARCH=${GOARCH}")
+
+etcd_build() {
+ out="bin"
+ if [[ -n "${BINDIR:-}" ]]; then out="${BINDIR}"; fi
+
+ run rm -f "${out}/etcd"
+ (
+ cd ./server
+ # Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK
+ # shellcheck disable=SC2086
+ run env "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \
+ -trimpath \
+ -installsuffix=cgo \
+ "-ldflags=${GO_LDFLAGS[*]}" \
+ -o="../${out}/etcd" . || return 2
+ ) || return 2
+
+ run rm -f "${out}/etcdutl"
+ # shellcheck disable=SC2086
+ (
+ cd ./etcdutl
+ run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \
+ -trimpath \
+ -installsuffix=cgo \
+ "-ldflags=${GO_LDFLAGS[*]}" \
+ -o="../${out}/etcdutl" . || return 2
+ ) || return 2
+
+ run rm -f "${out}/etcdctl"
+ # shellcheck disable=SC2086
+ (
+ cd ./etcdctl
+ run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \
+ -trimpath \
+ -installsuffix=cgo \
+ "-ldflags=${GO_LDFLAGS[*]}" \
+ -o="../${out}/etcdctl" . || return 2
+ ) || return 2
+ # Verify whether symbol we overwrote exists
+ # For cross-compiling we cannot run: ${out}/etcd --version | grep -q "Git SHA: ${GIT_SHA}"
+
+ # We need symbols to do this check:
+ if [[ "${GO_LDFLAGS[*]}" != *"-s"* ]]; then
+ go tool nm "${out}/etcd" | grep "${VERSION_SYMBOL}" > /dev/null
+ if [[ "${PIPESTATUS[*]}" != "0 0" ]]; then
+ log_error "FAIL: Symbol ${VERSION_SYMBOL} not found in binary: ${out}/etcd"
+ return 2
+ fi
+ fi
+}
+
+tools_build() {
+ out="bin"
+ if [[ -n "${BINDIR:-}" ]]; then out="${BINDIR}"; fi
+ tools_path="tools/benchmark
+ tools/etcd-dump-db
+ tools/etcd-dump-logs
+ tools/local-tester/bridge"
+ for tool in ${tools_path}
+ do
+ echo "Building" "'${tool}'"...
+ run rm -f "${out}/${tool}"
+ # shellcheck disable=SC2086
+ run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" CGO_ENABLED=${CGO_ENABLED} go build ${GO_BUILD_FLAGS} \
+ -trimpath \
+ -installsuffix=cgo \
+ "-ldflags=${GO_LDFLAGS[*]}" \
+ -o="${out}/${tool}" "./${tool}" || return 2
+ done
+}
+
+run_build() {
+ echo Running "$1"
+ if $1; then
+ log_success "SUCCESS: $1 (GOARCH=${GOARCH})"
+ else
+ log_error "FAIL: $1 (GOARCH=${GOARCH})"
+ exit 2
+ fi
+}
diff --git a/scripts/build_tools.sh b/scripts/build_tools.sh
new file mode 100755
index 00000000000..7145126d890
--- /dev/null
+++ b/scripts/build_tools.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+source ./scripts/test_lib.sh
+source ./scripts/build_lib.sh
+
+run_build tools_build
diff --git a/scripts/codecov_upload.sh b/scripts/codecov_upload.sh
index f75ad60e032..b516b3bf1cb 100755
--- a/scripts/codecov_upload.sh
+++ b/scripts/codecov_upload.sh
@@ -8,7 +8,7 @@ set -o pipefail
LOG_FILE=${1:-test-coverage.log}
# We collect the coverage
-COVERDIR=covdir PASSES='build build_cov cov' ./test.sh 2>&1 | tee "${LOG_FILE}"
+COVERDIR=covdir PASSES='build cov' ./scripts/test.sh 2>&1 | tee "${LOG_FILE}"
test_success="$?"
# We try to upload whatever we have:
diff --git a/scripts/etcd_version_annotations.txt b/scripts/etcd_version_annotations.txt
new file mode 100644
index 00000000000..3f0a0a676d3
--- /dev/null
+++ b/scripts/etcd_version_annotations.txt
@@ -0,0 +1,480 @@
+authpb.Permission: ""
+authpb.Permission.READ: ""
+authpb.Permission.READWRITE: ""
+authpb.Permission.Type: ""
+authpb.Permission.WRITE: ""
+authpb.Permission.key: ""
+authpb.Permission.permType: ""
+authpb.Permission.range_end: ""
+authpb.Role: ""
+authpb.Role.keyPermission: ""
+authpb.Role.name: ""
+authpb.User: ""
+authpb.User.name: ""
+authpb.User.options: ""
+authpb.User.password: ""
+authpb.User.roles: ""
+authpb.UserAddOptions: ""
+authpb.UserAddOptions.no_password: ""
+etcdserverpb.AlarmMember: "3.0"
+etcdserverpb.AlarmMember.alarm: ""
+etcdserverpb.AlarmMember.memberID: ""
+etcdserverpb.AlarmRequest: "3.0"
+etcdserverpb.AlarmRequest.ACTIVATE: ""
+etcdserverpb.AlarmRequest.AlarmAction: "3.0"
+etcdserverpb.AlarmRequest.DEACTIVATE: ""
+etcdserverpb.AlarmRequest.GET: ""
+etcdserverpb.AlarmRequest.action: ""
+etcdserverpb.AlarmRequest.alarm: ""
+etcdserverpb.AlarmRequest.memberID: ""
+etcdserverpb.AlarmResponse: "3.0"
+etcdserverpb.AlarmResponse.alarms: ""
+etcdserverpb.AlarmResponse.header: ""
+etcdserverpb.AlarmType: "3.0"
+etcdserverpb.AuthDisableRequest: "3.0"
+etcdserverpb.AuthDisableResponse: "3.0"
+etcdserverpb.AuthDisableResponse.header: ""
+etcdserverpb.AuthEnableRequest: "3.0"
+etcdserverpb.AuthEnableResponse: "3.0"
+etcdserverpb.AuthEnableResponse.header: ""
+etcdserverpb.AuthRoleAddRequest: "3.0"
+etcdserverpb.AuthRoleAddRequest.name: ""
+etcdserverpb.AuthRoleAddResponse: "3.0"
+etcdserverpb.AuthRoleAddResponse.header: ""
+etcdserverpb.AuthRoleDeleteRequest: "3.0"
+etcdserverpb.AuthRoleDeleteRequest.role: ""
+etcdserverpb.AuthRoleDeleteResponse: "3.0"
+etcdserverpb.AuthRoleDeleteResponse.header: ""
+etcdserverpb.AuthRoleGetRequest: "3.0"
+etcdserverpb.AuthRoleGetRequest.role: ""
+etcdserverpb.AuthRoleGetResponse: ""
+etcdserverpb.AuthRoleGetResponse.header: "3.0"
+etcdserverpb.AuthRoleGetResponse.perm: "3.0"
+etcdserverpb.AuthRoleGrantPermissionRequest: "3.0"
+etcdserverpb.AuthRoleGrantPermissionRequest.name: ""
+etcdserverpb.AuthRoleGrantPermissionRequest.perm: ""
+etcdserverpb.AuthRoleGrantPermissionResponse: "3.0"
+etcdserverpb.AuthRoleGrantPermissionResponse.header: ""
+etcdserverpb.AuthRoleListRequest: "3.0"
+etcdserverpb.AuthRoleListResponse: "3.0"
+etcdserverpb.AuthRoleListResponse.header: ""
+etcdserverpb.AuthRoleListResponse.roles: ""
+etcdserverpb.AuthRoleRevokePermissionRequest: "3.0"
+etcdserverpb.AuthRoleRevokePermissionRequest.key: ""
+etcdserverpb.AuthRoleRevokePermissionRequest.range_end: ""
+etcdserverpb.AuthRoleRevokePermissionRequest.role: ""
+etcdserverpb.AuthRoleRevokePermissionResponse: "3.0"
+etcdserverpb.AuthRoleRevokePermissionResponse.header: ""
+etcdserverpb.AuthStatusRequest: "3.5"
+etcdserverpb.AuthStatusResponse: "3.5"
+etcdserverpb.AuthStatusResponse.authRevision: ""
+etcdserverpb.AuthStatusResponse.enabled: ""
+etcdserverpb.AuthStatusResponse.header: ""
+etcdserverpb.AuthUserAddRequest: "3.0"
+etcdserverpb.AuthUserAddRequest.hashedPassword: "3.5"
+etcdserverpb.AuthUserAddRequest.name: ""
+etcdserverpb.AuthUserAddRequest.options: "3.4"
+etcdserverpb.AuthUserAddRequest.password: ""
+etcdserverpb.AuthUserAddResponse: "3.0"
+etcdserverpb.AuthUserAddResponse.header: ""
+etcdserverpb.AuthUserChangePasswordRequest: "3.0"
+etcdserverpb.AuthUserChangePasswordRequest.hashedPassword: "3.5"
+etcdserverpb.AuthUserChangePasswordRequest.name: ""
+etcdserverpb.AuthUserChangePasswordRequest.password: ""
+etcdserverpb.AuthUserChangePasswordResponse: "3.0"
+etcdserverpb.AuthUserChangePasswordResponse.header: ""
+etcdserverpb.AuthUserDeleteRequest: "3.0"
+etcdserverpb.AuthUserDeleteRequest.name: ""
+etcdserverpb.AuthUserDeleteResponse: "3.0"
+etcdserverpb.AuthUserDeleteResponse.header: ""
+etcdserverpb.AuthUserGetRequest: "3.0"
+etcdserverpb.AuthUserGetRequest.name: ""
+etcdserverpb.AuthUserGetResponse: "3.0"
+etcdserverpb.AuthUserGetResponse.header: ""
+etcdserverpb.AuthUserGetResponse.roles: ""
+etcdserverpb.AuthUserGrantRoleRequest: "3.0"
+etcdserverpb.AuthUserGrantRoleRequest.role: ""
+etcdserverpb.AuthUserGrantRoleRequest.user: ""
+etcdserverpb.AuthUserGrantRoleResponse: "3.0"
+etcdserverpb.AuthUserGrantRoleResponse.header: ""
+etcdserverpb.AuthUserListRequest: "3.0"
+etcdserverpb.AuthUserListResponse: "3.0"
+etcdserverpb.AuthUserListResponse.header: ""
+etcdserverpb.AuthUserListResponse.users: ""
+etcdserverpb.AuthUserRevokeRoleRequest: "3.0"
+etcdserverpb.AuthUserRevokeRoleRequest.name: ""
+etcdserverpb.AuthUserRevokeRoleRequest.role: ""
+etcdserverpb.AuthUserRevokeRoleResponse: "3.0"
+etcdserverpb.AuthUserRevokeRoleResponse.header: ""
+etcdserverpb.AuthenticateRequest: "3.0"
+etcdserverpb.AuthenticateRequest.name: ""
+etcdserverpb.AuthenticateRequest.password: ""
+etcdserverpb.AuthenticateResponse: "3.0"
+etcdserverpb.AuthenticateResponse.header: ""
+etcdserverpb.AuthenticateResponse.token: ""
+etcdserverpb.CORRUPT: "3.3"
+etcdserverpb.CompactionRequest: "3.0"
+etcdserverpb.CompactionRequest.physical: ""
+etcdserverpb.CompactionRequest.revision: ""
+etcdserverpb.CompactionResponse: "3.0"
+etcdserverpb.CompactionResponse.header: ""
+etcdserverpb.Compare: "3.0"
+etcdserverpb.Compare.CREATE: ""
+etcdserverpb.Compare.CompareResult: "3.0"
+etcdserverpb.Compare.CompareTarget: "3.0"
+etcdserverpb.Compare.EQUAL: ""
+etcdserverpb.Compare.GREATER: ""
+etcdserverpb.Compare.LEASE: "3.3"
+etcdserverpb.Compare.LESS: ""
+etcdserverpb.Compare.MOD: ""
+etcdserverpb.Compare.NOT_EQUAL: "3.1"
+etcdserverpb.Compare.VALUE: ""
+etcdserverpb.Compare.VERSION: ""
+etcdserverpb.Compare.create_revision: ""
+etcdserverpb.Compare.key: ""
+etcdserverpb.Compare.lease: "3.3"
+etcdserverpb.Compare.mod_revision: ""
+etcdserverpb.Compare.range_end: "3.3"
+etcdserverpb.Compare.result: ""
+etcdserverpb.Compare.target: ""
+etcdserverpb.Compare.value: ""
+etcdserverpb.Compare.version: ""
+etcdserverpb.DefragmentRequest: "3.0"
+etcdserverpb.DefragmentResponse: "3.0"
+etcdserverpb.DefragmentResponse.header: ""
+etcdserverpb.DeleteRangeRequest: "3.0"
+etcdserverpb.DeleteRangeRequest.key: ""
+etcdserverpb.DeleteRangeRequest.prev_kv: "3.1"
+etcdserverpb.DeleteRangeRequest.range_end: ""
+etcdserverpb.DeleteRangeResponse: "3.0"
+etcdserverpb.DeleteRangeResponse.deleted: ""
+etcdserverpb.DeleteRangeResponse.header: ""
+etcdserverpb.DeleteRangeResponse.prev_kvs: "3.1"
+etcdserverpb.DowngradeRequest: "3.5"
+etcdserverpb.DowngradeRequest.CANCEL: ""
+etcdserverpb.DowngradeRequest.DowngradeAction: "3.5"
+etcdserverpb.DowngradeRequest.ENABLE: ""
+etcdserverpb.DowngradeRequest.VALIDATE: ""
+etcdserverpb.DowngradeRequest.action: ""
+etcdserverpb.DowngradeRequest.version: ""
+etcdserverpb.DowngradeResponse: "3.5"
+etcdserverpb.DowngradeResponse.header: ""
+etcdserverpb.DowngradeResponse.version: ""
+etcdserverpb.EmptyResponse: ""
+etcdserverpb.HashKVRequest: "3.3"
+etcdserverpb.HashKVRequest.revision: ""
+etcdserverpb.HashKVResponse: "3.3"
+etcdserverpb.HashKVResponse.compact_revision: ""
+etcdserverpb.HashKVResponse.hash: ""
+etcdserverpb.HashKVResponse.hash_revision: "3.6"
+etcdserverpb.HashKVResponse.header: ""
+etcdserverpb.HashRequest: "3.0"
+etcdserverpb.HashResponse: "3.0"
+etcdserverpb.HashResponse.hash: ""
+etcdserverpb.HashResponse.header: ""
+etcdserverpb.InternalAuthenticateRequest: "3.0"
+etcdserverpb.InternalAuthenticateRequest.name: ""
+etcdserverpb.InternalAuthenticateRequest.password: ""
+etcdserverpb.InternalAuthenticateRequest.simple_token: ""
+etcdserverpb.InternalRaftRequest: "3.0"
+etcdserverpb.InternalRaftRequest.ID: ""
+etcdserverpb.InternalRaftRequest.alarm: ""
+etcdserverpb.InternalRaftRequest.auth_disable: ""
+etcdserverpb.InternalRaftRequest.auth_enable: ""
+etcdserverpb.InternalRaftRequest.auth_role_add: ""
+etcdserverpb.InternalRaftRequest.auth_role_delete: ""
+etcdserverpb.InternalRaftRequest.auth_role_get: ""
+etcdserverpb.InternalRaftRequest.auth_role_grant_permission: ""
+etcdserverpb.InternalRaftRequest.auth_role_list: ""
+etcdserverpb.InternalRaftRequest.auth_role_revoke_permission: ""
+etcdserverpb.InternalRaftRequest.auth_status: "3.5"
+etcdserverpb.InternalRaftRequest.auth_user_add: ""
+etcdserverpb.InternalRaftRequest.auth_user_change_password: ""
+etcdserverpb.InternalRaftRequest.auth_user_delete: ""
+etcdserverpb.InternalRaftRequest.auth_user_get: ""
+etcdserverpb.InternalRaftRequest.auth_user_grant_role: ""
+etcdserverpb.InternalRaftRequest.auth_user_list: ""
+etcdserverpb.InternalRaftRequest.auth_user_revoke_role: ""
+etcdserverpb.InternalRaftRequest.authenticate: ""
+etcdserverpb.InternalRaftRequest.cluster_member_attr_set: "3.5"
+etcdserverpb.InternalRaftRequest.cluster_version_set: "3.5"
+etcdserverpb.InternalRaftRequest.compaction: ""
+etcdserverpb.InternalRaftRequest.delete_range: ""
+etcdserverpb.InternalRaftRequest.downgrade_info_set: "3.5"
+etcdserverpb.InternalRaftRequest.header: ""
+etcdserverpb.InternalRaftRequest.lease_checkpoint: "3.4"
+etcdserverpb.InternalRaftRequest.lease_grant: ""
+etcdserverpb.InternalRaftRequest.lease_revoke: ""
+etcdserverpb.InternalRaftRequest.put: ""
+etcdserverpb.InternalRaftRequest.range: ""
+etcdserverpb.InternalRaftRequest.txn: ""
+etcdserverpb.InternalRaftRequest.v2: ""
+etcdserverpb.LeaseCheckpoint: "3.4"
+etcdserverpb.LeaseCheckpoint.ID: ""
+etcdserverpb.LeaseCheckpoint.remaining_TTL: ""
+etcdserverpb.LeaseCheckpointRequest: "3.4"
+etcdserverpb.LeaseCheckpointRequest.checkpoints: ""
+etcdserverpb.LeaseCheckpointResponse: "3.4"
+etcdserverpb.LeaseCheckpointResponse.header: ""
+etcdserverpb.LeaseGrantRequest: "3.0"
+etcdserverpb.LeaseGrantRequest.ID: ""
+etcdserverpb.LeaseGrantRequest.TTL: ""
+etcdserverpb.LeaseGrantResponse: "3.0"
+etcdserverpb.LeaseGrantResponse.ID: ""
+etcdserverpb.LeaseGrantResponse.TTL: ""
+etcdserverpb.LeaseGrantResponse.error: ""
+etcdserverpb.LeaseGrantResponse.header: ""
+etcdserverpb.LeaseKeepAliveRequest: "3.0"
+etcdserverpb.LeaseKeepAliveRequest.ID: ""
+etcdserverpb.LeaseKeepAliveResponse: "3.0"
+etcdserverpb.LeaseKeepAliveResponse.ID: ""
+etcdserverpb.LeaseKeepAliveResponse.TTL: ""
+etcdserverpb.LeaseKeepAliveResponse.header: ""
+etcdserverpb.LeaseLeasesRequest: "3.3"
+etcdserverpb.LeaseLeasesResponse: "3.3"
+etcdserverpb.LeaseLeasesResponse.header: ""
+etcdserverpb.LeaseLeasesResponse.leases: ""
+etcdserverpb.LeaseRevokeRequest: "3.0"
+etcdserverpb.LeaseRevokeRequest.ID: ""
+etcdserverpb.LeaseRevokeResponse: "3.0"
+etcdserverpb.LeaseRevokeResponse.header: ""
+etcdserverpb.LeaseStatus: "3.3"
+etcdserverpb.LeaseStatus.ID: ""
+etcdserverpb.LeaseTimeToLiveRequest: "3.1"
+etcdserverpb.LeaseTimeToLiveRequest.ID: ""
+etcdserverpb.LeaseTimeToLiveRequest.keys: ""
+etcdserverpb.LeaseTimeToLiveResponse: "3.1"
+etcdserverpb.LeaseTimeToLiveResponse.ID: ""
+etcdserverpb.LeaseTimeToLiveResponse.TTL: ""
+etcdserverpb.LeaseTimeToLiveResponse.grantedTTL: ""
+etcdserverpb.LeaseTimeToLiveResponse.header: ""
+etcdserverpb.LeaseTimeToLiveResponse.keys: ""
+etcdserverpb.Member: "3.0"
+etcdserverpb.Member.ID: ""
+etcdserverpb.Member.clientURLs: ""
+etcdserverpb.Member.isLearner: "3.4"
+etcdserverpb.Member.name: ""
+etcdserverpb.Member.peerURLs: ""
+etcdserverpb.MemberAddRequest: "3.0"
+etcdserverpb.MemberAddRequest.isLearner: "3.4"
+etcdserverpb.MemberAddRequest.peerURLs: ""
+etcdserverpb.MemberAddResponse: "3.0"
+etcdserverpb.MemberAddResponse.header: ""
+etcdserverpb.MemberAddResponse.member: ""
+etcdserverpb.MemberAddResponse.members: ""
+etcdserverpb.MemberListRequest: "3.0"
+etcdserverpb.MemberListRequest.linearizable: "3.5"
+etcdserverpb.MemberListResponse: "3.0"
+etcdserverpb.MemberListResponse.header: ""
+etcdserverpb.MemberListResponse.members: ""
+etcdserverpb.MemberPromoteRequest: "3.4"
+etcdserverpb.MemberPromoteRequest.ID: ""
+etcdserverpb.MemberPromoteResponse: "3.4"
+etcdserverpb.MemberPromoteResponse.header: ""
+etcdserverpb.MemberPromoteResponse.members: ""
+etcdserverpb.MemberRemoveRequest: "3.0"
+etcdserverpb.MemberRemoveRequest.ID: ""
+etcdserverpb.MemberRemoveResponse: "3.0"
+etcdserverpb.MemberRemoveResponse.header: ""
+etcdserverpb.MemberRemoveResponse.members: ""
+etcdserverpb.MemberUpdateRequest: "3.0"
+etcdserverpb.MemberUpdateRequest.ID: ""
+etcdserverpb.MemberUpdateRequest.peerURLs: ""
+etcdserverpb.MemberUpdateResponse: "3.0"
+etcdserverpb.MemberUpdateResponse.header: ""
+etcdserverpb.MemberUpdateResponse.members: "3.1"
+etcdserverpb.Metadata: ""
+etcdserverpb.Metadata.ClusterID: ""
+etcdserverpb.Metadata.NodeID: ""
+etcdserverpb.MoveLeaderRequest: "3.3"
+etcdserverpb.MoveLeaderRequest.targetID: ""
+etcdserverpb.MoveLeaderResponse: "3.3"
+etcdserverpb.MoveLeaderResponse.header: ""
+etcdserverpb.NONE: ""
+etcdserverpb.NOSPACE: ""
+etcdserverpb.PutRequest: "3.0"
+etcdserverpb.PutRequest.ignore_lease: "3.2"
+etcdserverpb.PutRequest.ignore_value: "3.2"
+etcdserverpb.PutRequest.key: ""
+etcdserverpb.PutRequest.lease: ""
+etcdserverpb.PutRequest.prev_kv: "3.1"
+etcdserverpb.PutRequest.value: ""
+etcdserverpb.PutResponse: "3.0"
+etcdserverpb.PutResponse.header: ""
+etcdserverpb.PutResponse.prev_kv: "3.1"
+etcdserverpb.RangeRequest: "3.0"
+etcdserverpb.RangeRequest.ASCEND: ""
+etcdserverpb.RangeRequest.CREATE: ""
+etcdserverpb.RangeRequest.DESCEND: ""
+etcdserverpb.RangeRequest.KEY: ""
+etcdserverpb.RangeRequest.MOD: ""
+etcdserverpb.RangeRequest.NONE: ""
+etcdserverpb.RangeRequest.SortOrder: "3.0"
+etcdserverpb.RangeRequest.SortTarget: "3.0"
+etcdserverpb.RangeRequest.VALUE: ""
+etcdserverpb.RangeRequest.VERSION: ""
+etcdserverpb.RangeRequest.count_only: ""
+etcdserverpb.RangeRequest.key: ""
+etcdserverpb.RangeRequest.keys_only: ""
+etcdserverpb.RangeRequest.limit: ""
+etcdserverpb.RangeRequest.max_create_revision: "3.1"
+etcdserverpb.RangeRequest.max_mod_revision: "3.1"
+etcdserverpb.RangeRequest.min_create_revision: "3.1"
+etcdserverpb.RangeRequest.min_mod_revision: "3.1"
+etcdserverpb.RangeRequest.range_end: ""
+etcdserverpb.RangeRequest.revision: ""
+etcdserverpb.RangeRequest.serializable: ""
+etcdserverpb.RangeRequest.sort_order: ""
+etcdserverpb.RangeRequest.sort_target: ""
+etcdserverpb.RangeResponse: "3.0"
+etcdserverpb.RangeResponse.count: ""
+etcdserverpb.RangeResponse.header: ""
+etcdserverpb.RangeResponse.kvs: ""
+etcdserverpb.RangeResponse.more: ""
+etcdserverpb.Request: ""
+etcdserverpb.Request.Dir: ""
+etcdserverpb.Request.Expiration: ""
+etcdserverpb.Request.ID: ""
+etcdserverpb.Request.Method: ""
+etcdserverpb.Request.Path: ""
+etcdserverpb.Request.PrevExist: ""
+etcdserverpb.Request.PrevIndex: ""
+etcdserverpb.Request.PrevValue: ""
+etcdserverpb.Request.Quorum: ""
+etcdserverpb.Request.Recursive: ""
+etcdserverpb.Request.Refresh: ""
+etcdserverpb.Request.Since: ""
+etcdserverpb.Request.Sorted: ""
+etcdserverpb.Request.Stream: ""
+etcdserverpb.Request.Time: ""
+etcdserverpb.Request.Val: ""
+etcdserverpb.Request.Wait: ""
+etcdserverpb.RequestHeader: "3.0"
+etcdserverpb.RequestHeader.ID: ""
+etcdserverpb.RequestHeader.auth_revision: "3.1"
+etcdserverpb.RequestHeader.username: ""
+etcdserverpb.RequestOp: "3.0"
+etcdserverpb.RequestOp.request_delete_range: ""
+etcdserverpb.RequestOp.request_put: ""
+etcdserverpb.RequestOp.request_range: ""
+etcdserverpb.RequestOp.request_txn: "3.3"
+etcdserverpb.ResponseHeader: "3.0"
+etcdserverpb.ResponseHeader.cluster_id: ""
+etcdserverpb.ResponseHeader.member_id: ""
+etcdserverpb.ResponseHeader.raft_term: ""
+etcdserverpb.ResponseHeader.revision: ""
+etcdserverpb.ResponseOp: "3.0"
+etcdserverpb.ResponseOp.response_delete_range: ""
+etcdserverpb.ResponseOp.response_put: ""
+etcdserverpb.ResponseOp.response_range: ""
+etcdserverpb.ResponseOp.response_txn: "3.3"
+etcdserverpb.SnapshotRequest: "3.3"
+etcdserverpb.SnapshotResponse: "3.3"
+etcdserverpb.SnapshotResponse.blob: ""
+etcdserverpb.SnapshotResponse.header: ""
+etcdserverpb.SnapshotResponse.remaining_bytes: ""
+etcdserverpb.SnapshotResponse.version: "3.6"
+etcdserverpb.StatusRequest: "3.0"
+etcdserverpb.StatusResponse: "3.0"
+etcdserverpb.StatusResponse.dbSize: ""
+etcdserverpb.StatusResponse.dbSizeInUse: "3.4"
+etcdserverpb.StatusResponse.dbSizeQuota: "3.6"
+etcdserverpb.StatusResponse.errors: "3.4"
+etcdserverpb.StatusResponse.header: ""
+etcdserverpb.StatusResponse.isLearner: "3.4"
+etcdserverpb.StatusResponse.leader: ""
+etcdserverpb.StatusResponse.raftAppliedIndex: "3.4"
+etcdserverpb.StatusResponse.raftIndex: ""
+etcdserverpb.StatusResponse.raftTerm: ""
+etcdserverpb.StatusResponse.storageVersion: "3.6"
+etcdserverpb.StatusResponse.version: ""
+etcdserverpb.TxnRequest: "3.0"
+etcdserverpb.TxnRequest.compare: ""
+etcdserverpb.TxnRequest.failure: ""
+etcdserverpb.TxnRequest.success: ""
+etcdserverpb.TxnResponse: "3.0"
+etcdserverpb.TxnResponse.header: ""
+etcdserverpb.TxnResponse.responses: ""
+etcdserverpb.TxnResponse.succeeded: ""
+etcdserverpb.WatchCancelRequest: "3.1"
+etcdserverpb.WatchCancelRequest.watch_id: "3.1"
+etcdserverpb.WatchCreateRequest: "3.0"
+etcdserverpb.WatchCreateRequest.FilterType: "3.1"
+etcdserverpb.WatchCreateRequest.NODELETE: ""
+etcdserverpb.WatchCreateRequest.NOPUT: ""
+etcdserverpb.WatchCreateRequest.filters: "3.1"
+etcdserverpb.WatchCreateRequest.fragment: "3.4"
+etcdserverpb.WatchCreateRequest.key: ""
+etcdserverpb.WatchCreateRequest.prev_kv: "3.1"
+etcdserverpb.WatchCreateRequest.progress_notify: ""
+etcdserverpb.WatchCreateRequest.range_end: ""
+etcdserverpb.WatchCreateRequest.start_revision: ""
+etcdserverpb.WatchCreateRequest.watch_id: "3.4"
+etcdserverpb.WatchProgressRequest: "3.4"
+etcdserverpb.WatchRequest: "3.0"
+etcdserverpb.WatchRequest.cancel_request: ""
+etcdserverpb.WatchRequest.create_request: ""
+etcdserverpb.WatchRequest.progress_request: "3.4"
+etcdserverpb.WatchResponse: "3.0"
+etcdserverpb.WatchResponse.cancel_reason: "3.4"
+etcdserverpb.WatchResponse.canceled: ""
+etcdserverpb.WatchResponse.compact_revision: ""
+etcdserverpb.WatchResponse.created: ""
+etcdserverpb.WatchResponse.events: ""
+etcdserverpb.WatchResponse.fragment: "3.4"
+etcdserverpb.WatchResponse.header: ""
+etcdserverpb.WatchResponse.watch_id: ""
+membershippb.Attributes: "3.5"
+membershippb.Attributes.client_urls: ""
+membershippb.Attributes.name: ""
+membershippb.ClusterMemberAttrSetRequest: "3.5"
+membershippb.ClusterMemberAttrSetRequest.member_ID: ""
+membershippb.ClusterMemberAttrSetRequest.member_attributes: ""
+membershippb.ClusterVersionSetRequest: "3.5"
+membershippb.ClusterVersionSetRequest.ver: ""
+membershippb.DowngradeInfoSetRequest: "3.5"
+membershippb.DowngradeInfoSetRequest.enabled: ""
+membershippb.DowngradeInfoSetRequest.ver: ""
+membershippb.Member: "3.5"
+membershippb.Member.ID: ""
+membershippb.Member.member_attributes: ""
+membershippb.Member.raft_attributes: ""
+membershippb.RaftAttributes: "3.5"
+membershippb.RaftAttributes.is_learner: ""
+membershippb.RaftAttributes.peer_urls: ""
+mvccpb.Event: ""
+mvccpb.Event.DELETE: ""
+mvccpb.Event.EventType: ""
+mvccpb.Event.PUT: ""
+mvccpb.Event.kv: ""
+mvccpb.Event.prev_kv: ""
+mvccpb.Event.type: ""
+mvccpb.KeyValue: ""
+mvccpb.KeyValue.create_revision: ""
+mvccpb.KeyValue.key: ""
+mvccpb.KeyValue.lease: ""
+mvccpb.KeyValue.mod_revision: ""
+mvccpb.KeyValue.value: ""
+mvccpb.KeyValue.version: ""
+pb.GoFeatures: ""
+pb.GoFeatures.APILevel: ""
+pb.GoFeatures.API_HYBRID: ""
+pb.GoFeatures.API_LEVEL_UNSPECIFIED: ""
+pb.GoFeatures.API_OPAQUE: ""
+pb.GoFeatures.API_OPEN: ""
+pb.GoFeatures.STRIP_ENUM_PREFIX_GENERATE_BOTH: ""
+pb.GoFeatures.STRIP_ENUM_PREFIX_KEEP: ""
+pb.GoFeatures.STRIP_ENUM_PREFIX_STRIP: ""
+pb.GoFeatures.STRIP_ENUM_PREFIX_UNSPECIFIED: ""
+pb.GoFeatures.StripEnumPrefix: ""
+pb.GoFeatures.api_level: ""
+pb.GoFeatures.legacy_unmarshal_json_enum: ""
+pb.GoFeatures.strip_enum_prefix: ""
+walpb.Record: ""
+walpb.Record.crc: ""
+walpb.Record.data: ""
+walpb.Record.type: ""
+walpb.Snapshot: ""
+walpb.Snapshot.conf_state: ""
+walpb.Snapshot.index: ""
+walpb.Snapshot.term: ""
diff --git a/scripts/fix.sh b/scripts/fix.sh
index 1925f87bc3f..91e3018bc15 100755
--- a/scripts/fix.sh
+++ b/scripts/fix.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-set -e
+set -euo pipefail
# Top level problems with modules can lead to test_lib being not functional
go mod tidy
@@ -29,11 +29,10 @@ function bash_ws_fix {
find ./ -name '*.sh.bak' -print0 | xargs -0 rm
}
-log_callout -e "\\nFixing etcd code for you...\\n"
+log_callout -e "\\nFixing etcd code for you...\n"
run_for_modules mod_tidy_fix || exit 2
run_for_modules run ${GO_CMD} fmt || exit 2
-run_for_module tests bom_fix || exit 2
bash_ws_fix || exit 2
log_success -e "\\nSUCCESS: etcd code is fixed :)"
diff --git a/scripts/fuzzing.sh b/scripts/fuzzing.sh
new file mode 100755
index 00000000000..65ffaa92ab6
--- /dev/null
+++ b/scripts/fuzzing.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+source ./scripts/test_lib.sh
+
+GO_CMD="go"
+fuzz_time=${FUZZ_TIME:-"300s"}
+target_path=${TARGET_PATH:-"./server/etcdserver/api/v3rpc"}
+TARGETS="FuzzTxnRangeRequest FuzzTxnPutRequest FuzzTxnDeleteRangeRequest"
+
+
+for target in ${TARGETS}; do
+ log_callout -e "\\nExecuting fuzzing with target ${target} in $target_path with a timeout of $fuzz_time\\n"
+ run pushd "${target_path}"
+ $GO_CMD test -fuzz "${target}" -fuzztime "${fuzz_time}"
+ run popd
+ log_success -e "\\COMPLETED: fuzzing with target $target in $target_path \\n"
+done
+
diff --git a/scripts/genproto.sh b/scripts/genproto.sh
index 8b09b6dab0c..52e94a1bab5 100755
--- a/scripts/genproto.sh
+++ b/scripts/genproto.sh
@@ -3,51 +3,105 @@
# Generate all etcd protobuf bindings.
# Run from repository root directory named etcd.
#
-set -e
+set -euo pipefail
+
+shopt -s globstar
if ! [[ "$0" =~ scripts/genproto.sh ]]; then
echo "must be run from repository root"
exit 255
fi
+# Set SED variable
+if LANG=C sed --help 2>&1 | grep -q GNU; then
+ SED="sed"
+elif command -v gsed &>/dev/null; then
+ SED="gsed"
+else
+ echo "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
+ exit 1
+fi
+
source ./scripts/test_lib.sh
-if [[ $(protoc --version | cut -f2 -d' ') != "3.14.0" ]]; then
- echo "could not find protoc 3.14.0, is it installed + in PATH?"
- exit 255
+if [[ $(protoc --version | cut -f2 -d' ') != "3.20.3" ]]; then
+ echo "Could not find protoc 3.20.3, installing now..."
+
+ arch=$(go env GOARCH)
+
+ case ${arch} in
+ "amd64") file="x86_64" ;;
+ "arm64") file="aarch_64" ;;
+ *)
+ echo "Unsupported architecture: ${arch}"
+ exit 255
+ ;;
+ esac
+
+ download_url="https://github.com/protocolbuffers/protobuf/releases/download/v3.20.3/protoc-3.20.3-linux-${file}.zip"
+ echo "Running on ${arch}."
+ mkdir -p bin
+ wget ${download_url} && unzip -p protoc-3.20.3-linux-${file}.zip bin/protoc > tmpFile && mv tmpFile bin/protoc
+ rm protoc-3.20.3-linux-${file}.zip
+ chmod +x bin/protoc
+ PATH=$PATH:$(pwd)/bin
+ export PATH
+ echo "Now running: $(protoc --version)"
+
fi
GOFAST_BIN=$(tool_get_bin github.com/gogo/protobuf/protoc-gen-gofast)
-GRPC_GATEWAY_BIN=$(tool_get_bin github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway)
-SWAGGER_BIN=$(tool_get_bin github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger)
+GRPC_GATEWAY_BIN=$(tool_get_bin github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway)
+OPENAPIV2_BIN=$(tool_get_bin github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2)
GOGOPROTO_ROOT="$(tool_pkg_dir github.com/gogo/protobuf/proto)/.."
-GRPC_GATEWAY_ROOT="$(tool_pkg_dir github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway)/.."
+GRPC_GATEWAY_ROOT="$(tool_pkg_dir github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway)/.."
+RAFT_ROOT="$(tool_pkg_dir go.etcd.io/raft/v3/raftpb)/.."
+GOOGLEAPI_ROOT=$(mktemp -d -t 'googleapi.XXXXX')
+
+readonly googleapi_commit=0adf469dcd7822bf5bc058a7b0217f5558a75643
+
+function cleanup_googleapi() {
+ rm -rf "${GOOGLEAPI_ROOT}"
+}
+
+trap cleanup_googleapi EXIT
+
+# TODO(ahrtr): use buf (https://github.com/bufbuild/buf) to manage the protobuf dependencies?
+function download_googleapi() {
+ run pushd "${GOOGLEAPI_ROOT}"
+ run git init
+ run git remote add upstream https://github.com/googleapis/googleapis.git
+ run git fetch upstream "${googleapi_commit}"
+ run git reset --hard FETCH_HEAD
+ run popd
+}
+
+download_googleapi
echo
echo "Resolved binary and packages versions:"
echo " - protoc-gen-gofast: ${GOFAST_BIN}"
echo " - protoc-gen-grpc-gateway: ${GRPC_GATEWAY_BIN}"
-echo " - swagger: ${SWAGGER_BIN}"
+echo " - openapiv2: ${OPENAPIV2_BIN}"
echo " - gogoproto-root: ${GOGOPROTO_ROOT}"
echo " - grpc-gateway-root: ${GRPC_GATEWAY_ROOT}"
+echo " - raft-root: ${RAFT_ROOT}"
GOGOPROTO_PATH="${GOGOPROTO_ROOT}:${GOGOPROTO_ROOT}/protobuf"
# directories containing protos to be built
-DIRS="./server/wal/walpb ./api/etcdserverpb ./server/etcdserver/api/snap/snappb ./raft/raftpb ./api/mvccpb ./server/lease/leasepb ./api/authpb ./server/etcdserver/api/v3lock/v3lockpb ./server/etcdserver/api/v3election/v3electionpb ./api/membershippb"
+DIRS="./server/storage/wal/walpb ./api/etcdserverpb ./server/etcdserver/api/snap/snappb ./api/mvccpb ./server/lease/leasepb ./api/authpb ./server/etcdserver/api/v3lock/v3lockpb ./server/etcdserver/api/v3election/v3electionpb ./api/membershippb ./api/versionpb"
log_callout -e "\\nRunning gofast (gogo) proto generation..."
for dir in ${DIRS}; do
run pushd "${dir}"
- run protoc --gofast_out=plugins=grpc:. -I=".:${GOGOPROTO_PATH}:${ETCD_ROOT_DIR}/..:${ETCD_ROOT_DIR}:${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
- --plugin="${GOFAST_BIN}" ./*.proto
+ run protoc --gofast_out=plugins=grpc:. -I=".:${GOGOPROTO_PATH}:${ETCD_ROOT_DIR}/..:${RAFT_ROOT}:${ETCD_ROOT_DIR}:${GOOGLEAPI_ROOT}" \
+ --gofast_opt=paths=source_relative,Mraftpb/raft.proto=go.etcd.io/raft/v3/raftpb,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor \
+ -I"${GRPC_GATEWAY_ROOT}" \
+ --plugin="${GOFAST_BIN}" ./**/*.proto
- run sed -i.bak -E 's|"etcd/api/|"go.etcd.io/etcd/api/v3/|g' ./*.pb.go
- run sed -i.bak -E 's|"raft/raftpb"|"go.etcd.io/etcd/raft/v3/raftpb"|g' ./*.pb.go
-
- rm -f ./*.bak
- run gofmt -s -w ./*.pb.go
- run goimports -w ./*.pb.go
+ run gofmt -s -w ./**/*.pb.go
+ run_go_tool "golang.org/x/tools/cmd/goimports" -w ./**/*.pb.go
run popd
done
@@ -58,26 +112,31 @@ rm -rf Documentation/dev-guide/apispec/swagger/*json
for pb in api/etcdserverpb/rpc server/etcdserver/api/v3lock/v3lockpb/v3lock server/etcdserver/api/v3election/v3electionpb/v3election; do
log_callout "grpc & swagger for: ${pb}.proto"
run protoc -I. \
- -I"${GRPC_GATEWAY_ROOT}"/third_party/googleapis \
+ -I"${GOOGLEAPI_ROOT}" \
+ -I"${GRPC_GATEWAY_ROOT}" \
-I"${GOGOPROTO_PATH}" \
-I"${ETCD_ROOT_DIR}/.." \
+ -I"${RAFT_ROOT}" \
--grpc-gateway_out=logtostderr=true,paths=source_relative:. \
- --swagger_out=logtostderr=true:./Documentation/dev-guide/apispec/swagger/. \
- --plugin="${SWAGGER_BIN}" --plugin="${GRPC_GATEWAY_BIN}" \
+ --grpc-gateway_opt=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types \
+ --openapiv2_out=json_names_for_fields=false,logtostderr=true:./Documentation/dev-guide/apispec/swagger/. \
+ --openapiv2_opt=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types:. \
+ --plugin="${OPENAPIV2_BIN}" \
+ --plugin="${GRPC_GATEWAY_BIN}" \
${pb}.proto
# hack to move gw files around so client won't include them
pkgpath=$(dirname "${pb}")
pkg=$(basename "${pkgpath}")
gwfile="${pb}.pb.gw.go"
- run sed -i -E "s#package $pkg#package gw#g" "${gwfile}"
- run sed -i -E "s#import \\(#import \\(\"go.etcd.io/etcd/${pkgpath}\"#g" "${gwfile}"
- run sed -i -E "s#([ (])([a-zA-Z0-9_]*(Client|Server|Request)([^(]|$))#\\1${pkg}.\\2#g" "${gwfile}"
- run sed -i -E "s# (New[a-zA-Z0-9_]*Client\\()# ${pkg}.\\1#g" "${gwfile}"
- run sed -i -E "s|go.etcd.io/etcd|go.etcd.io/etcd/v3|g" "${gwfile}"
- run sed -i -E "s|go.etcd.io/etcd/v3/api|go.etcd.io/etcd/api/v3|g" "${gwfile}"
- run sed -i -E "s|go.etcd.io/etcd/v3/server|go.etcd.io/etcd/server/v3|g" "${gwfile}"
-
+ run ${SED?} -i -E "s#package $pkg#package gw#g" "${gwfile}"
+ run ${SED?} -i -E "s#import \\(#import \\(\"go.etcd.io/etcd/${pkgpath}\"#g" "${gwfile}"
+ run ${SED?} -i -E "s#([ (])([a-zA-Z0-9_]*(Client|Server|Request)([^(]|$))#\\1${pkg}.\\2#g" "${gwfile}"
+ run ${SED?} -i -E "s# (New[a-zA-Z0-9_]*Client\\()# ${pkg}.\\1#g" "${gwfile}"
+ run ${SED?} -i -E "s|go.etcd.io/etcd|go.etcd.io/etcd/v3|g" "${gwfile}"
+ run ${SED?} -i -E "s|go.etcd.io/etcd/v3/api|go.etcd.io/etcd/api/v3|g" "${gwfile}"
+ run ${SED?} -i -E "s|go.etcd.io/etcd/v3/server|go.etcd.io/etcd/server/v3|g" "${gwfile}"
+
run go fmt "${gwfile}"
gwdir="${pkgpath}/gw/"
@@ -89,26 +148,80 @@ for pb in api/etcdserverpb/rpc server/etcdserver/api/v3lock/v3lockpb/v3lock serv
Documentation/dev-guide/apispec/swagger/"${swaggerName}".swagger.json
done
-log_callout -e "\\nRunning swagger ..."
-run_go_tool github.com/hexfusion/schwag -input=Documentation/dev-guide/apispec/swagger/rpc.swagger.json
+# We only upgraded grpc-gateway from v1 to v2, but keep gogo/protobuf as it's for now.
+# So we have to convert v1 message to v2 message. Once we get rid of gogo/protobuf, and
+# start to depend on protobuf v2, then we can remove this patch.
+#
+# TODO(https://github.com/etcd-io/etcd/issues/14533): Remove the patch below after removal of gogo/protobuf
+for pb in api/etcdserverpb/rpc server/etcdserver/api/v3lock/v3lockpb/v3lock server/etcdserver/api/v3election/v3electionpb/v3election; do
+ gwfile="$(dirname ${pb})/gw/$(basename ${pb}).pb.gw.go"
+
+ # Changes something like below,
+ # import (
+ # + protov1 "github.com/golang/protobuf/proto"
+ # +
+ run ${SED?} -i -E "s|import \(|import \(\n\tprotov1 \"github.com/golang/protobuf/proto\"\n|g" "${gwfile}"
+
+ # Changes something like below,
+ # - return msg, metadata, err
+ # + return protov1.MessageV2(msg), metadata, err
+ run ${SED?} -i -E "s|return msg, metadata, err|return protov1.MessageV2\(msg\), metadata, err|g" "${gwfile}"
+
+ # Changes something like below,
+ # - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ # + if err := marshaler.NewDecoder(newReader()).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF {
+ run ${SED?} -i -E "s|Decode\(\&protoReq\)|Decode\(protov1\.MessageV2\(\&protoReq\)\)|g" "${gwfile}"
+
+ # Changes something like below,
+ # - forward_Lease_LeaseKeepAlive_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+ # + forward_Lease_LeaseKeepAlive_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ # + m1, err := resp.Recv()
+ # + return protov1.MessageV2(m1), err
+ # + }, mux.GetForwardResponseOptions()...)
+ run ${SED?} -i -E "s|return resp.Recv\(\)|\n\t\t\tm1, err := resp.Recv\(\)\n\t\t\treturn protov1.MessageV2\(m1\), err\n\t\t|g" "${gwfile}"
-if [ "$1" != "--skip-protodoc" ]; then
+ run go fmt "${gwfile}"
+done
+
+if [ "${1:-}" != "--skip-protodoc" ]; then
log_callout "protodoc is auto-generating grpc API reference documentation..."
- run rm -rf Documentation/dev-guide/api_reference_v3.md
+ # API reference
+ API_REFERENCE_FILE="Documentation/dev-guide/api_reference_v3.md"
+ run rm -rf ${API_REFERENCE_FILE}
run_go_tool go.etcd.io/protodoc --directories="api/etcdserverpb=service_message,api/mvccpb=service_message,server/lease/leasepb=service_message,api/authpb=service_message" \
- --title="etcd API Reference" \
- --output="Documentation/dev-guide/api_reference_v3.md" \
+ --output="${API_REFERENCE_FILE}" \
--message-only-from-this-file="api/etcdserverpb/rpc.proto" \
- --disclaimer="This is a generated documentation. Please read the proto files for more." || exit 2
+ --disclaimer="---
+title: API reference
+---
+
+This API reference is autogenerated from the named \`.proto\` files." || exit 2
+
+ # remove the first 3 lines of the doc as an empty --title adds '### ' to the top of the file.
+ run ${SED?} -i -e 1,3d ${API_REFERENCE_FILE}
- run rm -rf Documentation/dev-guide/api_concurrency_reference_v3.md
+ # API reference: concurrency
+ API_REFERENCE_CONCURRENCY_FILE="Documentation/dev-guide/api_concurrency_reference_v3.md"
+ run rm -rf ${API_REFERENCE_CONCURRENCY_FILE}
run_go_tool go.etcd.io/protodoc --directories="server/etcdserver/api/v3lock/v3lockpb=service_message,server/etcdserver/api/v3election/v3electionpb=service_message,api/mvccpb=service_message" \
- --title="etcd concurrency API Reference" \
- --output="Documentation/dev-guide/api_concurrency_reference_v3.md" \
- --disclaimer="This is a generated documentation. Please read the proto files for more." || exit 2
+ --output="${API_REFERENCE_CONCURRENCY_FILE}" \
+ --disclaimer="---
+title: \"API reference: concurrency\"
+---
+
+This API reference is autogenerated from the named \`.proto\` files." || exit 2
+
+ # remove the first 3 lines of the doc as an empty --title adds '### ' to the top of the file.
+ run ${SED?} -i -e 1,3d ${API_REFERENCE_CONCURRENCY_FILE}
log_success "protodoc is finished."
+ log_warning -e "\\nThe API references have NOT been automatically published on the website."
+ log_success -e "\\nTo publish the API references, copy the following files"
+ log_success " - ${API_REFERENCE_FILE}"
+ log_success " - ${API_REFERENCE_CONCURRENCY_FILE}"
+ log_success "to the etcd-io/website repo under the /content/en/docs/next/dev-guide/ folder."
+ log_success "(https://github.com/etcd-io/website/tree/main/content/en/docs/next/dev-guide)"
else
log_warning "skipping grpc API reference document auto-generation..."
fi
diff --git a/scripts/install-marker.sh b/scripts/install-marker.sh
deleted file mode 100755
index 467492666d1..00000000000
--- a/scripts/install-marker.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-ARCH=$1
-
-if [ -z "$1" ]; then
- echo "Usage: ${0} [amd64 or darwin], defaulting to 'amd64'" >> /dev/stderr
- ARCH=amd64
-fi
-
-MARKER_URL=https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-unknown-linux-gnu
-if [ ${ARCH} == "darwin" ]; then
- MARKER_URL=https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-apple-darwin
-fi
-
-echo "Installing marker"
-curl -L "${MARKER_URL}" -o "${GOPATH}"/bin/marker
-chmod 755 "${GOPATH}"/bin/marker
-
-"${GOPATH}"/bin/marker --version
diff --git a/scripts/measure-testgrid-flakiness.sh b/scripts/measure-testgrid-flakiness.sh
new file mode 100755
index 00000000000..6cde5418727
--- /dev/null
+++ b/scripts/measure-testgrid-flakiness.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Measures test flakiness and create issues for flaky tests
+
+set -euo pipefail
+
+if [[ -z ${GITHUB_TOKEN:-} ]]
+then
+ echo "Please set the \$GITHUB_TOKEN environment variable for the script to work"
+ exit 1
+fi
+
+pushd ./tools/testgrid-analysis
+# ci-etcd-e2e-amd64 and ci-etcd-unit-test-amd64 runs 6 times a day. Keeping a rolling window of 14 days.
+go run main.go flaky --create-issue --dashboard=sig-etcd-periodics --tab=ci-etcd-e2e-amd64 --max-days=14
+go run main.go flaky --create-issue --dashboard=sig-etcd-periodics --tab=ci-etcd-unit-test-amd64 --max-days=14
+
+# do not create issues for presubmit tests
+go run main.go flaky --dashboard=sig-etcd-presubmits --tab=pull-etcd-e2e-amd64
+go run main.go flaky --dashboard=sig-etcd-presubmits --tab=pull-etcd-unit-test
+
+popd
diff --git a/scripts/release b/scripts/release
deleted file mode 100755
index ec40bb8b577..00000000000
--- a/scripts/release
+++ /dev/null
@@ -1,322 +0,0 @@
-#!/usr/bin/env bash
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-source ./scripts/test_lib.sh
-source ./scripts/release_mod.sh
-
-DRY_RUN=${DRY_RUN:-true}
-
-# Following preparation steps help with the release process:
-
-# If you use password-protected gpg key, make sure the password is managed
-# by agent:
-#
-# % gpg-connect-agent reloadagent /bye
-# % gpg -s --default-key [git-email]@google.com -o /dev/null -s /dev/null
-#
-# Refresh your google credentials:
-# % gcloud auth login
-# or
-# % gcloud auth activate-service-account --key-file=gcp-key-etcd-development.json
-#
-# Make sure gcloud-docker plugin is configured:
-# % gcloud auth configure-docker
-
-
-help() {
- echo "$(basename "$0") [version]"
- echo "Release etcd using the same approach as the etcd-release-runbook (https://goo.gl/Gxwysq)"
- echo ""
- echo "WARNING: This does not perform the 'Add API capabilities', 'Performance testing' "
- echo " or 'Documentation' steps. These steps must be performed manually BEFORE running this tool."
- echo ""
- echo "WARNING: This script does not sign releases, publish releases to github or sent announcement"
- echo " emails. These steps must be performed manually AFTER running this tool."
- echo ""
- echo " args:"
- echo " version: version of etcd to release, e.g. 'v3.2.18'"
- echo " flags:"
- echo " --no-upload: skip gs://etcd binary artifact uploads."
- echo " --no-docker-push: skip docker image pushes."
- echo ""
- echo "One can perform a (dry-run) test release from any (uncommitted) branch using:"
- echo " DRY_RUN=true REPOSITORY=\`pwd\` BRANCH='local-branch-name' ./scripts/release 3.5.0-foobar.2"
-}
-
-main() {
- VERSION=$1
- if [[ ! "${VERSION}" =~ [0-9]+.[0-9]+.[0-9]+ ]]; then
- log_error "Expected 'version' param of the form '..' but got '${VERSION}'"
- exit 1
- fi
- RELEASE_VERSION="v${VERSION}"
- MINOR_VERSION=$(echo "${VERSION}" | cut -d. -f 1-2)
- BRANCH=${BRANCH:-"release-${MINOR_VERSION}"}
- REPOSITORY=${REPOSITORY:-"git@github.com:etcd-io/etcd.git"}
-
- log_warning "DRY_RUN=${DRY_RUN}"
- log_callout "RELEASE_VERSION=${RELEASE_VERSION}"
- log_callout "MINOR_VERSION=${MINOR_VERSION}"
- log_callout "BRANCH=${BRANCH}"
- log_callout "REPOSITORY=${REPOSITORY}"
- log_callout ""
-
- # Required to enable 'docker manifest ...'
- export DOCKER_CLI_EXPERIMENTAL=enabled
-
- if ! command -v docker >/dev/null; then
- log_error "cannot find docker"
- exit 1
- fi
-
- # Expected umask for etcd release artifacts
- umask 022
-
- # Set up release directory.
- local reldir="/tmp/etcd-release-${VERSION}"
- log_callout "Preparing temporary directory: ${reldir}"
- if [ ! -d "${reldir}/etcd" ]; then
- mkdir -p "${reldir}"
- cd "${reldir}"
- run git clone "${REPOSITORY}" --branch "${BRANCH}"
- fi
- run cd "${reldir}/etcd" || exit 2
- # mark local directory as root for test_lib scripts executions
- set_root_dir
-
- run git checkout "${BRANCH}" || exit 2
- run git pull origin
- git_assert_branch_in_sync || exit 2
-
- # If a release version tag already exists, use it.
- local remote_tag_exists
- remote_tag_exists=$(run git ls-remote origin "refs/tags/${RELEASE_VERSION}" | grep -c "${RELEASE_VERSION}" || true)
-
- if [ "${remote_tag_exists}" -gt 0 ]; then
- log_callout "Release version tag exists on remote. Checking out refs/tags/${RELEASE_VERSION}"
- git checkout -q "tags/${RELEASE_VERSION}"
- fi
-
- # Check go version.
- local go_version current_go_version
- go_version="go$(run_go_tool "github.com/mikefarah/yq/v3" read .travis.yml "go[0]")"
- current_go_version=$(go version | awk '{ print $3 }')
- if [[ "${current_go_version}" != "${go_version}" ]]; then
- log_error "Current go version is ${current_go_version}, but etcd ${RELEASE_VERSION} requires ${go_version} (see .travis.yml)."
- exit 1
- fi
-
- # If the release tag does not already exist remotely, create it.
- if [ "${remote_tag_exists}" -eq 0 ]; then
- # Bump version/version.go to release version.
- local source_version
- source_version=$(grep -E "\s+Version\s*=" api/version/version.go | sed -e "s/.*\"\(.*\)\".*/\1/g")
- if [[ "${source_version}" != "${VERSION}" ]]; then
- source_minor_version=$(echo "${source_version}" | cut -d. -f 1-2)
- if [[ "${source_minor_version}" != "${MINOR_VERSION}" ]]; then
- log_error "Wrong etcd minor version in api/version/version.go. Expected ${MINOR_VERSION} but got ${source_minor_version}. Aborting."
- exit 1
- fi
- log_callout "Updating modules definitions"
- TARGET_VERSION="v${VERSION}" update_versions_cmd
-
- log_callout "Updating version from ${source_version} to ${VERSION} in api/version/version.go"
- sed -i "s/${source_version}/${VERSION}/g" api/version/version.go
- fi
-
-
- log_callout "Building etcd and checking --version output"
- run ./build.sh
- local etcd_version
- etcd_version=$(bin/etcd --version | grep "etcd Version" | awk '{ print $3 }')
- if [[ "${etcd_version}" != "${VERSION}" ]]; then
- log_error "Wrong etcd version in version/version.go. Expected ${etcd_version} but got ${VERSION}. Aborting."
- exit 1
- fi
-
- if [[ -n $(git status -s) ]]; then
- log_callout "Committing mods & api/version/version.go update."
- run git add api/version/version.go
- run git add $(find -name go.mod ! -path './release/*'| xargs)
- run git diff --staged | cat
- run git commit -m "version: bump up to ${VERSION}"
- run git diff --staged | cat
- fi
-
- # Push the version change if it's not already been pushed.
- if [ "$(git rev-list --count "origin/${BRANCH}..${BRANCH}")" -gt 0 ]; then
- read -p "Push version bump up to ${VERSION} to '$(git remote get-url origin)' [y/N]? " -r confirm
- [[ "${confirm,,}" == "y" ]] || exit 1
- maybe_run git push
- fi
-
- # Tag release.
- if [ "$(git tag --list | grep -c "${RELEASE_VERSION}")" -gt 0 ]; then
- log_callout "Skipping tag step. git tag ${RELEASE_VERSION} already exists."
- else
- log_callout "Tagging release..."
- REMOTE_REPO="origin" push_mod_tags_cmd
- fi
-
- # Verify the latest commit has the version tag
- local tag="$(git describe --exact-match HEAD)"
- if [ "${tag}" != "${RELEASE_VERSION}" ]; then
- log_error "Error: Expected HEAD to be tagged with ${RELEASE_VERSION}, but 'git describe --exact-match HEAD' reported: ${tag}"
- exit 1
- fi
-
- # Verify the version tag is on the right branch
- local branch=$(git for-each-ref --contains "${RELEASE_VERSION}" --format="%(refname)" 'refs/heads' | cut -d '/' -f 3)
- if [ "${branch}" != "${BRANCH}" ]; then
- log_error "Error: Git tag ${RELEASE_VERSION} should be on branch '${BRANCH}' but is on '${branch}'"
- exit 1
- fi
- fi
-
- # Build release.
- # TODO: check the release directory for all required build artifacts.
- if [ -d release ]; then
- log_warning "Skipping release build step. /release directory already exists."
- else
- log_callout "Building release..."
- if ${DRY_RUN}; then
- log_warning "In DRY_RUN mode we clone the current release directory (as there was no push)"
- REPOSITORY=$(pwd) ./scripts/build-release.sh "${RELEASE_VERSION}"
- else
- REPOSITORY=${REPOSITORY} ./scripts/build-release.sh "${RELEASE_VERSION}"
- fi
- fi
-
- # Sanity checks.
- "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcd" --version | grep -q "etcd Version: ${VERSION}" || true
- "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcdctl" version | grep -q "etcdctl version: ${VERSION}" || true
-
- # Generate SHA256SUMS
- log_callout "Generating sha256sums of release artifacts."
- pushd ./release
- ls . | grep -E '\.tar.gz$|\.zip$' | xargs shasum -a 256 > ./SHA256SUMS
- popd
- if [ -s ./release/SHA256SUMS ]; then
- cat ./release/SHA256SUMS
- else
- log_error "sha256sums is not valid. Aborting."
- exit 1
- fi
-
- # Upload artifacts.
- if [ "${NO_UPLOAD}" == 1 ]; then
- log_callout "Skipping artifact upload to gs://etcd. --no-upload flat is set."
- else
- read -p "Upload etcd ${RELEASE_VERSION} release artifacts to gs://etcd [y/N]? " -r confirm
- [[ "${confirm,,}" == "y" ]] || exit 1
- maybe_run gsutil -m cp ./release/SHA256SUMS "gs://etcd/${RELEASE_VERSION}/"
- maybe_run gsutil -m cp ./release/*.zip "gs://etcd/${RELEASE_VERSION}/"
- maybe_run gsutil -m cp ./release/*.tar.gz "gs://etcd/${RELEASE_VERSION}/"
- maybe_run gsutil -m acl ch -u allUsers:R -r "gs://etcd/${RELEASE_VERSION}/"
- fi
-
- # Push images.
- if [ "${NO_DOCKER_PUSH}" == 1 ]; then
- log_callout "Skipping docker push. --no-docker-push flat is set."
- else
- read -p "Publish etcd ${RELEASE_VERSION} docker images to quay.io [y/N]? " -r confirm
- [[ "${confirm,,}" == "y" ]] || exit 1
- # shellcheck disable=SC2034
- for i in {1..5}; do
- docker login quay.io && break
- log_warning "login failed, retrying"
- done
-
- for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do
- log_callout "Pushing container images to quay.io ${RELEASE_VERSION}-${TARGET_ARCH}"
- maybe_run docker push "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
- log_callout "Pushing container images to gcr.io ${RELEASE_VERSION}-${TARGET_ARCH}"
- maybe_run docker push "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
- done
-
- log_callout "Creating manifest-list (multi-image)..."
-
- for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do
- maybe_run docker manifest create --amend "quay.io/coreos/etcd:${RELEASE_VERSION}" "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
- maybe_run docker manifest annotate "quay.io/coreos/etcd:${RELEASE_VERSION}" "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" --arch "${TARGET_ARCH}"
-
- maybe_run docker manifest create --amend "gcr.io/etcd-development/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
- maybe_run docker manifest annotate "gcr.io/etcd-development/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" --arch "${TARGET_ARCH}"
- done
-
- log_callout "Pushing container manifest list to quay.io ${RELEASE_VERSION}"
- maybe_run docker manifest push "quay.io/coreos/etcd:${RELEASE_VERSION}"
-
- log_callout "Pushing container manifest list to gcr.io ${RELEASE_VERSION}"
- maybe_run docker manifest push "gcr.io/etcd-development/etcd:${RELEASE_VERSION}"
-
- log_callout "Setting permissions using gsutil..."
- maybe_run gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
- fi
-
- ### Release validation
- mkdir -p downloads
-
- # Check image versions
- for IMAGE in "quay.io/coreos/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}"; do
- local image_version=$(dry_run docker run --rm "${IMAGE}" etcd --version | grep "etcd Version" | awk -F: '{print $2}' | tr -d '[:space:]')
- if [ "${image_version}" != "${VERSION}" ]; then
- log_error "Check failed: etcd --version output for ${IMAGE} is incorrect: ${image_version}"
- exit 1
- fi
- done
-
- # Check gsutil binary versions
- local BINARY_TGZ="etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64.tar.gz"
- gsutil cp "gs://etcd/${RELEASE_VERSION}/${BINARY_TGZ}" downloads
- tar -zx -C downloads -f "downloads/${BINARY_TGZ}"
- local binary_version=$("./downloads/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcd" --version | grep "etcd Version" | awk -F: '{print $2}' | tr -d '[:space:]')
- if [ "${binary_version}" != "${VERSION}" ]; then
- log_error "Check failed: etcd --version output for ${BINARY_TGZ} from gs://etcd/${RELEASE_VERSION} is incorrect: ${binary_version}"
- exit 1
- fi
-
- # TODO: signing process
- log_warning ""
- log_warning "WARNING: The release has not been signed and published to github. This must be done manually."
- log_warning ""
- log_success "Success."
- exit 0
-}
-
-POSITIONAL=()
-NO_UPLOAD=0
-NO_DOCKER_PUSH=0
-
-while test $# -gt 0; do
- case "$1" in
- -h|--help)
- shift
- help
- exit 0
- ;;
- --no-upload)
- NO_UPLOAD=1
- shift
- ;;
- --no-docker-push)
- NO_DOCKER_PUSH=1
- shift
- ;;
- *)
- POSITIONAL+=("$1") # save it in an array for later
- shift # past argument
- ;;
- esac
-done
-set -- "${POSITIONAL[@]}" # restore positional parameters
-
-if [[ ! $# -eq 1 ]]; then
- help
- exit 1
-fi
-
-main "$1"
diff --git a/scripts/release.sh b/scripts/release.sh
new file mode 100755
index 00000000000..aaa1e612c43
--- /dev/null
+++ b/scripts/release.sh
@@ -0,0 +1,452 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source ./scripts/test_lib.sh
+source ./scripts/release_mod.sh
+
+DRY_RUN=${DRY_RUN:-true}
+
+# Following preparation steps help with the release process:
+
+# If you use password-protected gpg key, make sure the password is managed
+# by agent:
+#
+# % gpg-connect-agent reloadagent /bye
+# % gpg -s --default-key [git-email]@google.com -o /dev/null -s /dev/null
+#
+# Refresh your google credentials:
+# % gcloud auth login
+# or
+# % gcloud auth activate-service-account --key-file=gcp-key-etcd-development.json
+#
+# Make sure gcloud-docker plugin is configured:
+# % gcloud auth configure-docker
+
+
+help() {
+ echo "$(basename "$0") [version]"
+ echo "Release etcd using the same approach as the etcd-release-runbook (https://goo.gl/Gxwysq)"
+ echo ""
+ echo "WARNING: This does not perform the 'Add API capabilities', 'Performance testing' "
+ echo " or 'Documentation' steps. These steps must be performed manually BEFORE running this tool."
+ echo ""
+ echo "WARNING: This script does not send announcement emails. This step must be performed manually AFTER running this tool."
+ echo ""
+ echo " args:"
+ echo " version: version of etcd to release, e.g. 'v3.2.18'"
+ echo " flags:"
+ echo " --in-place: build binaries using current branch."
+ echo " --no-docker-push: skip docker image pushes."
+ echo " --no-gh-release: skip creating the GitHub release using gh."
+ echo " --no-upload: skip gs://etcd binary artifact uploads."
+ echo ""
+ echo "One can perform a (dry-run) test release from any (uncommitted) branch using:"
+ echo " DRY_RUN=true REPOSITORY=\`pwd\` BRANCH='local-branch-name' ./scripts/release 3.5.0-foobar.2"
+}
+
+main() {
+ # Allow to receive the version with the "v" prefix, i.e. v3.6.0.
+ VERSION=${1#v}
+ if [[ ! "${VERSION}" =~ ^[0-9]+.[0-9]+.[0-9]+ ]]; then
+ log_error "Expected 'version' param of the form '..' but got '${VERSION}'"
+ exit 1
+ fi
+ RELEASE_VERSION="v${VERSION}"
+ MINOR_VERSION=$(echo "${VERSION}" | cut -d. -f 1-2)
+
+ if [ "${IN_PLACE}" == 1 ]; then
+ # Trigger release in current branch
+ REPOSITORY=$(pwd)
+ BRANCH=$(git rev-parse --abbrev-ref HEAD)
+ else
+ REPOSITORY=${REPOSITORY:-"git@github.com:etcd-io/etcd.git"}
+ BRANCH=${BRANCH:-"release-${MINOR_VERSION}"}
+ fi
+
+ log_warning "DRY_RUN=${DRY_RUN}"
+ log_callout "RELEASE_VERSION=${RELEASE_VERSION}"
+ log_callout "MINOR_VERSION=${MINOR_VERSION}"
+ log_callout "BRANCH=${BRANCH}"
+ log_callout "REPOSITORY=${REPOSITORY}"
+ log_callout ""
+
+ # Required to enable 'docker manifest ...'
+ export DOCKER_CLI_EXPERIMENTAL=enabled
+
+ if ! command -v docker >/dev/null; then
+ log_error "cannot find docker"
+ exit 1
+ fi
+
+ # Expected umask for etcd release artifacts
+ umask 022
+
+ # Set up release directory.
+ local reldir="/tmp/etcd-release-${VERSION}"
+ log_callout "Preparing temporary directory: ${reldir}"
+ if [ "${IN_PLACE}" == 0 ]; then
+ if [ ! -d "${reldir}/etcd" ]; then
+ mkdir -p "${reldir}"
+ cd "${reldir}"
+ run git clone "${REPOSITORY}" --branch "${BRANCH}" --depth 1
+ fi
+ run cd "${reldir}/etcd" || exit 2
+ run git checkout "${BRANCH}" || exit 2
+ run git pull origin
+
+ git_assert_branch_in_sync || exit 2
+ fi
+
+ # mark local directory as root for test_lib scripts executions
+ set_root_dir
+
+ # If a release version tag already exists, use it.
+ local remote_tag_exists
+ remote_tag_exists=$(run git ls-remote origin "refs/tags/${RELEASE_VERSION}" | grep -c "${RELEASE_VERSION}" || true)
+
+ if [ "${remote_tag_exists}" -gt 0 ]; then
+ log_callout "Release version tag exists on remote. Checking out refs/tags/${RELEASE_VERSION}"
+ git checkout -q "tags/${RELEASE_VERSION}"
+ fi
+
+ # Check go version.
+ log_callout "Check go version"
+ local go_version current_go_version
+ go_version="go$(cat .go-version)"
+ current_go_version=$(go version | awk '{ print $3 }')
+ if [[ "${current_go_version}" != "${go_version}" ]]; then
+ log_error "Current go version is ${current_go_version}, but etcd ${RELEASE_VERSION} requires ${go_version} (see .go-version)."
+ exit 1
+ fi
+
+ if [ "${NO_GH_RELEASE}" == 1 ]; then
+ log_callout "Skipping gh verification, --no-gh-release is set"
+ else
+ # Check that gh is installed and logged in.
+ log_callout "Check gh installation"
+ if ! command -v gh >/dev/null; then
+ log_error "Cannot find gh. Please follow the installation instructions at https://github.com/cli/cli#installation"
+ exit 1
+ fi
+ if ! gh auth status &>/dev/null; then
+ log_error "GitHub authentication failed for gh. Please run gh auth login."
+ exit 1
+ fi
+ fi
+
+ # If the release tag does not already exist remotely, create it.
+ log_callout "Create tag if not present"
+ if [ "${remote_tag_exists}" -eq 0 ]; then
+ # Bump version/version.go to release version.
+ local source_version
+ source_version=$(grep -E "\s+Version\s*=" api/version/version.go | sed -e "s/.*\"\(.*\)\".*/\1/g")
+ if [[ "${source_version}" != "${VERSION}" ]]; then
+ source_minor_version=$(echo "${source_version}" | cut -d. -f 1-2)
+ if [[ "${source_minor_version}" != "${MINOR_VERSION}" ]]; then
+ log_error "Wrong etcd minor version in api/version/version.go. Expected ${MINOR_VERSION} but got ${source_minor_version}. Aborting."
+ exit 1
+ fi
+ log_callout "Updating modules definitions"
+ TARGET_VERSION="v${VERSION}" update_versions_cmd
+
+ log_callout "Updating version from ${source_version} to ${VERSION} in api/version/version.go"
+ sed -i "s/${source_version}/${VERSION}/g" api/version/version.go
+ fi
+
+
+ log_callout "Building etcd and checking --version output"
+ run ./scripts/build.sh
+ local etcd_version
+ etcd_version=$(bin/etcd --version | grep "etcd Version" | awk '{ print $3 }')
+ if [[ "${etcd_version}" != "${VERSION}" ]]; then
+ log_error "Wrong etcd version in version/version.go. Expected ${etcd_version} but got ${VERSION}. Aborting."
+ exit 1
+ fi
+
+ if [[ -n $(git status -s) ]]; then
+ log_callout "Committing mods & api/version/version.go update."
+ run git add api/version/version.go
+ # shellcheck disable=SC2038,SC2046
+ run git add $(find . -name go.mod ! -path './release/*'| xargs)
+ run git diff --staged | cat
+ run git commit --signoff --message "version: bump up to ${VERSION}"
+ run git diff --staged | cat
+ fi
+
+ # Push the version change if it's not already been pushed.
+ if [ "${DRY_RUN}" != "true" ] && [ "$(git rev-list --count "origin/${BRANCH}..${BRANCH}")" -gt 0 ]; then
+ read -p "Push version bump up to ${VERSION} to '$(git remote get-url origin)' [y/N]? " -r confirm
+ [[ "${confirm,,}" == "y" ]] || exit 1
+ maybe_run git push
+ fi
+
+ # Tag release.
+ if [ "$(git tag --list | grep -c "${RELEASE_VERSION}")" -gt 0 ]; then
+ log_callout "Skipping tag step. git tag ${RELEASE_VERSION} already exists."
+ else
+ log_callout "Tagging release..."
+ REMOTE_REPO="origin" push_mod_tags_cmd
+ fi
+
+ if [ "${IN_PLACE}" == 0 ]; then
+ # Tried with `local branch=$(git branch -a --contains tags/"${RELEASE_VERSION}")`
+ # so as to work with both current branch and main/release-3.X.
+ # But got error below on current branch mode,
+ # Error: Git tag v3.6.99 should be on branch '* (HEAD detached at pull/14860/merge)' but is on '* (HEAD detached from pull/14860/merge)'
+ #
+ # Verify the version tag is on the right branch
+ # shellcheck disable=SC2155
+ local branch=$(git for-each-ref --contains "${RELEASE_VERSION}" --format="%(refname)" 'refs/heads' | cut -d '/' -f 3)
+ if [ "${branch}" != "${BRANCH}" ]; then
+ log_error "Error: Git tag ${RELEASE_VERSION} should be on branch '${BRANCH}' but is on '${branch}'"
+ exit 1
+ fi
+ fi
+ fi
+
+ log_callout "Verify the latest commit has the version tag"
+ # Verify the latest commit has the version tag
+ # shellcheck disable=SC2155
+ local tag="$(git describe --exact-match HEAD)"
+ if [ "${tag}" != "${RELEASE_VERSION}" ]; then
+ log_error "Error: Expected HEAD to be tagged with ${RELEASE_VERSION}, but 'git describe --exact-match HEAD' reported: ${tag}"
+ exit 1
+ fi
+
+ log_callout "Verify the work space is clean"
+ # Verify the clean working tree
+ # shellcheck disable=SC2155
+ local diff="$(git diff HEAD --stat)"
+ if [[ "${diff}" != '' ]]; then
+ log_error "Error: Expected clean working tree, but 'git diff --stat' reported: ${diff}"
+ exit 1
+ fi
+
+ # Build release.
+ # TODO: check the release directory for all required build artifacts.
+ if [ -d release ]; then
+ log_warning "Skipping release build step. /release directory already exists."
+ else
+ log_callout "Building release..."
+ REPOSITORY=$(pwd) ./scripts/build-release.sh "${RELEASE_VERSION}"
+ fi
+
+ # Sanity checks.
+ "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcd" --version | grep -q "etcd Version: ${VERSION}" || true
+ "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcdctl" version | grep -q "etcdctl version: ${VERSION}" || true
+ "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcdutl" version | grep -q "etcdutl version: ${VERSION}" || true
+
+ # Generate SHA256SUMS
+ log_callout "Generating sha256sums of release artifacts."
+ pushd ./release
+ # shellcheck disable=SC2010
+ ls . | grep -E '\.tar.gz$|\.zip$' | xargs shasum -a 256 > ./SHA256SUMS
+ popd
+ if [ -s ./release/SHA256SUMS ]; then
+ cat ./release/SHA256SUMS
+ else
+ log_error "sha256sums is not valid. Aborting."
+ exit 1
+ fi
+
+ # Upload artifacts.
+ if [ "${DRY_RUN}" == "true" ] || [ "${NO_UPLOAD}" == 1 ]; then
+ log_callout "Skipping artifact upload to gs://etcd. --no-upload flat is set."
+ else
+ read -p "Upload etcd ${RELEASE_VERSION} release artifacts to gs://etcd [y/N]? " -r confirm
+ [[ "${confirm,,}" == "y" ]] || exit 1
+ maybe_run gsutil -m cp ./release/SHA256SUMS "gs://etcd/${RELEASE_VERSION}/"
+ maybe_run gsutil -m cp ./release/*.zip "gs://etcd/${RELEASE_VERSION}/"
+ maybe_run gsutil -m cp ./release/*.tar.gz "gs://etcd/${RELEASE_VERSION}/"
+ maybe_run gsutil -m acl ch -u allUsers:R -r "gs://etcd/${RELEASE_VERSION}/"
+ fi
+
+ # Push images.
+ if [ "${DRY_RUN}" == "true" ] || [ "${NO_DOCKER_PUSH}" == 1 ]; then
+ log_callout "Skipping docker push. --no-docker-push flat is set."
+ else
+ read -p "Publish etcd ${RELEASE_VERSION} docker images to quay.io [y/N]? " -r confirm
+ [[ "${confirm,,}" == "y" ]] || exit 1
+ # shellcheck disable=SC2034
+ for i in {1..5}; do
+ docker login quay.io && break
+ log_warning "login failed, retrying"
+ done
+
+ for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do
+ log_callout "Pushing container images to quay.io ${RELEASE_VERSION}-${TARGET_ARCH}"
+ maybe_run docker push "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
+ log_callout "Pushing container images to gcr.io ${RELEASE_VERSION}-${TARGET_ARCH}"
+ maybe_run docker push "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
+ done
+
+ log_callout "Creating manifest-list (multi-image)..."
+
+ for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do
+ maybe_run docker manifest create --amend "quay.io/coreos/etcd:${RELEASE_VERSION}" "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
+ maybe_run docker manifest annotate "quay.io/coreos/etcd:${RELEASE_VERSION}" "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" --arch "${TARGET_ARCH}"
+
+ maybe_run docker manifest create --amend "gcr.io/etcd-development/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}"
+ maybe_run docker manifest annotate "gcr.io/etcd-development/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" --arch "${TARGET_ARCH}"
+ done
+
+ log_callout "Pushing container manifest list to quay.io ${RELEASE_VERSION}"
+ maybe_run docker manifest push "quay.io/coreos/etcd:${RELEASE_VERSION}"
+
+ log_callout "Pushing container manifest list to gcr.io ${RELEASE_VERSION}"
+ maybe_run docker manifest push "gcr.io/etcd-development/etcd:${RELEASE_VERSION}"
+ fi
+
+ ### Release validation
+ mkdir -p downloads
+
+ # Check image versions
+ for IMAGE in "quay.io/coreos/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}"; do
+ if [ "${DRY_RUN}" == "true" ] || [ "${NO_DOCKER_PUSH}" == 1 ]; then
+ IMAGE="${IMAGE}-amd64"
+ fi
+ # shellcheck disable=SC2155
+ local image_version=$(docker run --rm "${IMAGE}" etcd --version | grep "etcd Version" | awk -F: '{print $2}' | tr -d '[:space:]')
+ if [ "${image_version}" != "${VERSION}" ]; then
+ log_error "Check failed: etcd --version output for ${IMAGE} is incorrect: ${image_version}"
+ exit 1
+ fi
+ done
+
+ # Check gsutil binary versions
+ # shellcheck disable=SC2155
+ local BINARY_TGZ="etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64.tar.gz"
+ if [ "${DRY_RUN}" == "true" ] || [ "${NO_UPLOAD}" == 1 ]; then
+ cp "./release/${BINARY_TGZ}" downloads
+ else
+ gsutil cp "gs://etcd/${RELEASE_VERSION}/${BINARY_TGZ}" downloads
+ fi
+ tar -zx -C downloads -f "downloads/${BINARY_TGZ}"
+ # shellcheck disable=SC2155
+ local binary_version=$("./downloads/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcd" --version | grep "etcd Version" | awk -F: '{print $2}' | tr -d '[:space:]')
+ if [ "${binary_version}" != "${VERSION}" ]; then
+ log_error "Check failed: etcd --version output for ${BINARY_TGZ} from gs://etcd/${RELEASE_VERSION} is incorrect: ${binary_version}"
+ exit 1
+ fi
+
+ if [ "${DRY_RUN}" == "true" ] || [ "${NO_GH_RELEASE}" == 1 ]; then
+ log_warning ""
+ log_warning "WARNING: Skipping creating GitHub release, --no-gh-release is set."
+ log_warning "WARNING: If not running on DRY_MODE, please do the GitHub release manually."
+ log_warning ""
+ else
+ local gh_repo
+ local release_notes_temp_file
+ local release_url
+ local gh_release_args=()
+
+ # For the main branch (v3.6), we should mark the release as a prerelease.
+ # The release-3.5 (v3.5) branch, should be marked as latest. And release-3.4 (v3.4)
+ # should be left without any additional mark (therefore, it doesn't need a special argument).
+ if [ "${BRANCH}" = "main" ]; then
+ gh_release_args=(--prerelease)
+ elif [ "${BRANCH}" = "release-3.5" ]; then
+ gh_release_args=(--latest)
+ fi
+
+ if [ "${REPOSITORY}" = "$(pwd)" ]; then
+ gh_repo=$(git remote get-url origin)
+ else
+ gh_repo="${REPOSITORY}"
+ fi
+
+ gh_repo=$(echo "${gh_repo}" | sed 's/^[^@]\+@//' | sed 's/https\?:\/\///' | sed 's/\.git$//' | tr ':' '/')
+ log_callout "Creating GitHub release for ${RELEASE_VERSION} on ${gh_repo}"
+
+ release_notes_temp_file=$(mktemp)
+
+ local release_version=${RELEASE_VERSION#v} # Remove the v prefix from the release version (i.e., v3.6.1 -> 3.6.1)
+ local release_version_major_minor=${release_version%.*} # Remove the patch from the version (i.e., 3.6)
+ local release_version_major=${release_version_major_minor%.*} # Extract the major (i.e., 3)
+ local release_version_minor=${release_version_major_minor/*./} # Extract the minor (i.e., 6)
+
+ # Disable sellcheck SC2016, the single quoted syntax for sed is intentional.
+ # shellcheck disable=SC2016
+ sed 's/${RELEASE_VERSION}/'"${RELEASE_VERSION}"'/g' ./scripts/release_notes.tpl.txt |
+ sed 's/${RELEASE_VERSION_MAJOR_MINOR}/'"${release_version_major_minor}"'/g' |
+ sed 's/${RELEASE_VERSION_MAJOR}/'"${release_version_major}"'/g' |
+ sed 's/${RELEASE_VERSION_MINOR}/'"${release_version_minor}"'/g' > "${release_notes_temp_file}"
+
+ if ! gh --repo "${gh_repo}" release view "${RELEASE_VERSION}" &>/dev/null; then
+ maybe_run gh release create "${RELEASE_VERSION}" \
+ --repo "${gh_repo}" \
+ --draft \
+ --title "${RELEASE_VERSION}" \
+ --notes-file "${release_notes_temp_file}" \
+ "${gh_release_args[@]}"
+ fi
+
+ # Upload files one by one, as gh doesn't support passing globs as input.
+ maybe_run find ./release '(' -name '*.tar.gz' -o -name '*.zip' ')' -exec \
+ gh --repo "${gh_repo}" release upload "${RELEASE_VERSION}" {} --clobber \;
+ maybe_run gh --repo "${gh_repo}" release upload "${RELEASE_VERSION}" ./release/SHA256SUMS --clobber
+
+ release_url=$(gh --repo "${gh_repo}" release view "${RELEASE_VERSION}" --json url --jq '.url')
+
+ log_warning ""
+ log_warning "WARNING: The GitHub release for ${RELEASE_VERSION} has been created as a draft, please go to ${release_url} and release it."
+ log_warning ""
+ fi
+
+ log_success "Success."
+ exit 0
+}
+
+POSITIONAL=()
+NO_UPLOAD=0
+NO_DOCKER_PUSH=0
+IN_PLACE=0
+NO_GH_RELEASE=0
+
+while test $# -gt 0; do
+ case "$1" in
+ -h|--help)
+ shift
+ help
+ exit 0
+ ;;
+ --in-place)
+ IN_PLACE=1
+ shift
+ ;;
+ --no-upload)
+ NO_UPLOAD=1
+ shift
+ ;;
+ --no-docker-push)
+ NO_DOCKER_PUSH=1
+ shift
+ ;;
+ --no-gh-release)
+ NO_GH_RELEASE=1
+ shift
+ ;;
+ *)
+ POSITIONAL+=("$1") # save it in an array for later
+ shift # past argument
+ ;;
+ esac
+done
+set -- "${POSITIONAL[@]}" # restore positional parameters
+
+if [[ ! $# -eq 1 ]]; then
+ help
+ exit 1
+fi
+
+# Note that we shouldn't upload artifacts in --in-place mode, so it
+# must be called with DRY_RUN=true
+if [ "${DRY_RUN}" != "true" ] && [ "${IN_PLACE}" == 1 ]; then
+ log_error "--in-place should only be called with DRY_RUN=true"
+ exit 1
+fi
+
+main "$1"
diff --git a/scripts/release_mod.sh b/scripts/release_mod.sh
index b0e9d253efc..da6e63cc2e2 100755
--- a/scripts/release_mod.sh
+++ b/scripts/release_mod.sh
@@ -10,7 +10,7 @@
#
# % DRY_RUN=false REMOTE_REPO="origin" ./scripts/release_mod.sh push_mod_tags
-set -e
+set -euo pipefail
source ./scripts/test_lib.sh
@@ -55,7 +55,7 @@ function mod_tidy_fix {
function update_versions_cmd() {
assert_no_git_modifications || return 2
- if [ -z "${TARGET_VERSION}" ]; then
+ if [ -z "${TARGET_VERSION:-}" ]; then
log_error "TARGET_VERSION environment variable not set. Set it to e.g. v3.5.10-alpha.0"
return 2
fi
@@ -89,7 +89,7 @@ function get_gpg_key {
function push_mod_tags_cmd {
assert_no_git_modifications || return 2
- if [ -z "${REMOTE_REPO}" ]; then
+ if [ -z "${REMOTE_REPO:-}" ]; then
log_error "REMOTE_REPO environment variable not set"
return 2
fi
diff --git a/scripts/release_notes.tpl.txt b/scripts/release_notes.tpl.txt
new file mode 100644
index 00000000000..1038a504d6f
--- /dev/null
+++ b/scripts/release_notes.tpl.txt
@@ -0,0 +1,91 @@
+Please check out [CHANGELOG](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-${RELEASE_VERSION_MAJOR_MINOR}.md) for a full list of changes. And make sure to read [upgrade guide](https://etcd.io/docs/v${RELEASE_VERSION_MAJOR_MINOR}/upgrades/upgrade_${RELEASE_VERSION_MAJOR}_${RELEASE_VERSION_MINOR}/) before upgrading etcd (there may be breaking changes).
+
+For installation guides, please check out [play.etcd.io](http://play.etcd.io) and [operating etcd](https://etcd.io/docs/v${RELEASE_VERSION_MAJOR_MINOR}/op-guide/). Latest support status for common architectures and operating systems can be found at [supported platforms](https://etcd.io/docs/v${RELEASE_VERSION_MAJOR_MINOR}/op-guide/supported-platform/).
+
+###### Linux
+
+```sh
+ETCD_VER=${RELEASE_VERSION}
+
+# choose either URL
+GOOGLE_URL=https://storage.googleapis.com/etcd
+GITHUB_URL=https://github.com/etcd-io/etcd/releases/download
+DOWNLOAD_URL=${GOOGLE_URL}
+
+rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
+rm -rf /tmp/etcd-download-test && mkdir -p /tmp/etcd-download-test
+
+curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
+tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download-test --strip-components=1
+rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
+
+/tmp/etcd-download-test/etcd --version
+/tmp/etcd-download-test/etcdctl version
+/tmp/etcd-download-test/etcdutl version
+
+# start a local etcd server
+/tmp/etcd-download-test/etcd
+
+# write,read to etcd
+/tmp/etcd-download-test/etcdctl --endpoints=localhost:2379 put foo bar
+/tmp/etcd-download-test/etcdctl --endpoints=localhost:2379 get foo
+```
+
+###### macOS (Darwin)
+
+```sh
+ETCD_VER=${RELEASE_VERSION}
+
+# choose either URL
+GOOGLE_URL=https://storage.googleapis.com/etcd
+GITHUB_URL=https://github.com/etcd-io/etcd/releases/download
+DOWNLOAD_URL=${GOOGLE_URL}
+
+rm -f /tmp/etcd-${ETCD_VER}-darwin-amd64.zip
+rm -rf /tmp/etcd-download-test && mkdir -p /tmp/etcd-download-test
+
+curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-darwin-amd64.zip -o /tmp/etcd-${ETCD_VER}-darwin-amd64.zip
+unzip /tmp/etcd-${ETCD_VER}-darwin-amd64.zip -d /tmp && rm -f /tmp/etcd-${ETCD_VER}-darwin-amd64.zip
+mv /tmp/etcd-${ETCD_VER}-darwin-amd64/* /tmp/etcd-download-test && rm -rf mv /tmp/etcd-${ETCD_VER}-darwin-amd64
+
+/tmp/etcd-download-test/etcd --version
+/tmp/etcd-download-test/etcdctl version
+/tmp/etcd-download-test/etcdutl version
+```
+
+###### Docker
+
+etcd uses [`gcr.io/etcd-development/etcd`](https://gcr.io/etcd-development/etcd) as a primary container registry, and [`quay.io/coreos/etcd`](https://quay.io/coreos/etcd) as secondary.
+
+```sh
+ETCD_VER=${RELEASE_VERSION}
+
+rm -rf /tmp/etcd-data.tmp && mkdir -p /tmp/etcd-data.tmp && \
+ docker rmi gcr.io/etcd-development/etcd:${ETCD_VER} || true && \
+ docker run \
+ -p 2379:2379 \
+ -p 2380:2380 \
+ --mount type=bind,source=/tmp/etcd-data.tmp,destination=/etcd-data \
+ --name etcd-gcr-${ETCD_VER} \
+ gcr.io/etcd-development/etcd:${ETCD_VER} \
+ /usr/local/bin/etcd \
+ --name s1 \
+ --data-dir /etcd-data \
+ --listen-client-urls http://0.0.0.0:2379 \
+ --advertise-client-urls http://0.0.0.0:2379 \
+ --listen-peer-urls http://0.0.0.0:2380 \
+ --initial-advertise-peer-urls http://0.0.0.0:2380 \
+ --initial-cluster s1=http://0.0.0.0:2380 \
+ --initial-cluster-token tkn \
+ --initial-cluster-state new \
+ --log-level info \
+ --logger zap \
+ --log-outputs stderr
+
+docker exec etcd-gcr-${ETCD_VER}/usr/local/bin/etcd --version
+docker exec etcd-gcr-${ETCD_VER}/usr/local/bin/etcdctl version
+docker exec etcd-gcr-${ETCD_VER}/usr/local/bin/etcdutl version
+docker exec etcd-gcr-${ETCD_VER}/usr/local/bin/etcdctl endpoint health
+docker exec etcd-gcr-${ETCD_VER}/usr/local/bin/etcdctl put foo bar
+docker exec etcd-gcr-${ETCD_VER}/usr/local/bin/etcdctl get foo
+```
diff --git a/scripts/sync_go_toolchain_directive.sh b/scripts/sync_go_toolchain_directive.sh
new file mode 100755
index 00000000000..643138e1d0a
--- /dev/null
+++ b/scripts/sync_go_toolchain_directive.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+# This script looks at the version present in the .go-version file and treats
+# that to be the value of the toolchain directive that go should use. It then
+# updates the toolchain directives of all go.mod files to reflect this version.
+#
+# We do this to ensure that .go-version acts as the source of truth for go versions.
+
+set -euo pipefail
+
+source ./scripts/test_lib.sh
+
+TARGET_GO_VERSION="${TARGET_GO_VERSION:-"$(cat "${ETCD_ROOT_DIR}/.go-version")"}"
+find . -name 'go.mod' -exec go mod edit -toolchain=go"${TARGET_GO_VERSION}" {} \;
diff --git a/scripts/test.sh b/scripts/test.sh
new file mode 100755
index 00000000000..75447c80b00
--- /dev/null
+++ b/scripts/test.sh
@@ -0,0 +1,647 @@
+#!/usr/bin/env bash
+#
+# Run all etcd tests
+# ./scripts/test.sh
+# ./scripts/test.sh -v
+#
+#
+# Run specified test pass
+#
+# $ PASSES=unit ./scripts/test.sh
+# $ PASSES=integration ./scripts/test.sh
+#
+#
+# Run tests for one package
+# Each pass has different default timeout, if you just run tests in one package or 1 test case then you can set TIMEOUT
+# flag for different expectation
+#
+# $ PASSES=unit PKG=./wal TIMEOUT=1m ./scripts/test.sh
+# $ PASSES=integration PKG=./clientv3 TIMEOUT=1m ./scripts/test.sh
+#
+# Run specified unit tests in one package
+# To run all the tests with prefix of "TestNew", set "TESTCASE=TestNew ";
+# to run only "TestNew", set "TESTCASE="\bTestNew\b""
+#
+# $ PASSES=unit PKG=./wal TESTCASE=TestNew TIMEOUT=1m ./scripts/test.sh
+# $ PASSES=unit PKG=./wal TESTCASE="\bTestNew\b" TIMEOUT=1m ./scripts/test.sh
+# $ PASSES=integration PKG=./client/integration TESTCASE="\bTestV2NoRetryEOF\b" TIMEOUT=1m ./scripts/test.sh
+#
+# KEEP_GOING_SUITE must be set to true to keep going with the next suite execution, passed to PASSES variable when there is a failure
+# in a particular suite.
+# KEEP_GOING_MODULE must be set to true to keep going with execution when there is failure in any module.
+#
+# Run code coverage
+# COVERDIR must either be a absolute path or a relative path to the etcd root
+# $ COVERDIR=coverage PASSES="build cov" ./scripts/test.sh
+# $ go tool cover -html ./coverage/cover.out
+set -e
+
+# Consider command as failed when any component of the pipe fails:
+# https://stackoverflow.com/questions/1221833/pipe-output-and-capture-exit-status-in-bash
+set -o pipefail
+set -o nounset
+
+# The test script is not supposed to make any changes to the files
+# e.g. add/update missing dependencies. Such divergences should be
+# detected and trigger a failure that needs explicit developer's action.
+export GOFLAGS=-mod=readonly
+export ETCD_VERIFY=all
+
+source ./scripts/test_lib.sh
+source ./scripts/build_lib.sh
+
+OUTPUT_FILE=${OUTPUT_FILE:-""}
+
+if [ -n "${OUTPUT_FILE}" ]; then
+ log_callout "Dumping output to: ${OUTPUT_FILE}"
+ exec > >(tee -a "${OUTPUT_FILE}") 2>&1
+fi
+
+PASSES=${PASSES:-"gofmt bom dep build unit"}
+KEEP_GOING_SUITE=${KEEP_GOING_SUITE:-false}
+PKG=${PKG:-}
+SHELLCHECK_VERSION=${SHELLCHECK_VERSION:-"v0.10.0"}
+MARKDOWN_MARKER_VERSION=${MARKDOWN_MARKER_VERSION:="v0.10.0"}
+
+if [ -z "${GOARCH:-}" ]; then
+ GOARCH=$(go env GOARCH);
+fi
+
+# determine whether target supports race detection
+if [ -z "${RACE:-}" ] ; then
+ if [ "$GOARCH" == "amd64" ] || [ "$GOARCH" == "arm64" ]; then
+ RACE="--race"
+ else
+ RACE="--race=false"
+ fi
+else
+ RACE="--race=${RACE:-true}"
+fi
+
+# This options make sense for cases where SUT (System Under Test) is compiled by test.
+COMMON_TEST_FLAGS=("${RACE}")
+if [[ -n "${CPU:-}" ]]; then
+ COMMON_TEST_FLAGS+=("--cpu=${CPU}")
+fi
+
+log_callout "Running with ${COMMON_TEST_FLAGS[*]}"
+
+RUN_ARG=()
+if [ -n "${TESTCASE:-}" ]; then
+ RUN_ARG=("-run=${TESTCASE}")
+fi
+
+function build_pass {
+ log_callout "Building etcd"
+ run_for_modules run go build "${@}" || return 2
+ GO_BUILD_FLAGS="-v" etcd_build "${@}"
+ GO_BUILD_FLAGS="-v" tools_build "${@}"
+}
+
+################# REGULAR TESTS ################################################
+
+# run_unit_tests [pkgs] runs unit tests for a current module and givesn set of [pkgs]
+function run_unit_tests {
+ local pkgs="${1:-./...}"
+ shift 1
+ # shellcheck disable=SC2068 #For context see - https://github.com/etcd-io/etcd/pull/16433#issuecomment-1684312755
+ GOLANG_TEST_SHORT=true go_test "${pkgs}" "parallel" : -short -timeout="${TIMEOUT:-3m}" ${COMMON_TEST_FLAGS[@]:-} ${RUN_ARG[@]:-} "$@"
+}
+
+function unit_pass {
+ run_for_modules run_unit_tests "$@"
+}
+
+function integration_extra {
+ if [ -z "${PKG}" ] ; then
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./integration/v2store/..." "keep_going" : -timeout="${TIMEOUT:-5m}" ${COMMON_TEST_FLAGS[@]:-} ${RUN_ARG[@]:-} "$@" || return $?
+ else
+ log_warning "integration_extra ignored when PKG is specified"
+ fi
+}
+
+function integration_pass {
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./integration/..." "parallel" : -timeout="${TIMEOUT:-15m}" ${COMMON_TEST_FLAGS[@]:-} ${RUN_ARG[@]:-} -p=2 "$@" || return $?
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./common/..." "parallel" : --tags=integration -timeout="${TIMEOUT:-15m}" ${COMMON_TEST_FLAGS[@]:-} ${RUN_ARG[@]:-} -p=2 "$@" || return $?
+ integration_extra "$@"
+}
+
+function e2e_pass {
+ # e2e tests are running pre-build binary. Settings like --race,-cover,-cpu does not have any impact.
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./e2e/..." "keep_going" : -timeout="${TIMEOUT:-30m}" ${RUN_ARG[@]:-} "$@" || return $?
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./common/..." "keep_going" : --tags=e2e -timeout="${TIMEOUT:-30m}" ${RUN_ARG[@]:-} "$@"
+}
+
+function robustness_pass {
+ # e2e tests are running pre-build binary. Settings like --race,-cover,-cpu does not have any impact.
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./robustness" "keep_going" : -timeout="${TIMEOUT:-30m}" ${RUN_ARG[@]:-} "$@"
+}
+
+function integration_e2e_pass {
+ run_pass "integration" "${@}"
+ run_pass "e2e" "${@}"
+}
+
+# generic_checker [cmd...]
+# executes given command in the current module, and clearly fails if it
+# failed or returned output.
+function generic_checker {
+ local cmd=("$@")
+ if ! output=$("${cmd[@]}"); then
+ echo "${output}"
+ log_error -e "FAIL: '${cmd[*]}' checking failed (!=0 return code)"
+ return 255
+ fi
+ if [ -n "${output}" ]; then
+ echo "${output}"
+ log_error -e "FAIL: '${cmd[*]}' checking failed (printed output)"
+ return 255
+ fi
+}
+
+function grpcproxy_pass {
+ run_pass "grpcproxy_integration" "${@}"
+ run_pass "grpcproxy_e2e" "${@}"
+}
+
+function grpcproxy_integration_pass {
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./integration/..." "fail_fast" : -timeout=30m -tags cluster_proxy ${COMMON_TEST_FLAGS[@]:-} "$@"
+}
+
+function grpcproxy_e2e_pass {
+ # shellcheck disable=SC2068
+ run_for_module "tests" go_test "./e2e" "fail_fast" : -timeout=30m -tags cluster_proxy ${COMMON_TEST_FLAGS[@]:-} "$@"
+}
+
+################# COVERAGE #####################################################
+
+# pkg_to_coverflag [prefix] [pkgs]
+# produces name of .coverprofile file to be used for tests of this package
+function pkg_to_coverprofileflag {
+ local prefix="${1}"
+ local pkgs="${2}"
+ local pkgs_normalized
+ prefix_normalized=$(echo "${prefix}" | tr "./ " "__+")
+ if [ "${pkgs}" == "./..." ]; then
+ pkgs_normalized="all"
+ else
+ pkgs_normalized=$(echo "${pkgs}" | tr "./ " "__+")
+ fi
+ mkdir -p "${coverdir}/${prefix_normalized}"
+ echo -n "-coverprofile=${coverdir}/${prefix_normalized}/${pkgs_normalized}.coverprofile"
+}
+
+function not_test_packages {
+ for m in $(modules); do
+ if [[ $m =~ .*/etcd/tests/v3 ]]; then continue; fi
+ if [[ $m =~ .*/etcd/v3 ]]; then continue; fi
+ echo "${m}/..."
+ done
+}
+
+# split_dir [dir] [num]
+function split_dir {
+ local d="${1}"
+ local num="${2}"
+ local i=0
+ for f in "${d}/"*; do
+ local g=$(( i % num ))
+ mkdir -p "${d}_${g}"
+ mv "${f}" "${d}_${g}/"
+ (( i++ ))
+ done
+}
+
+function split_dir_pass {
+ split_dir ./covdir/integration 4
+}
+
+
+# merge_cov_files [coverdir] [outfile]
+# merges all coverprofile files into a single file in the given directory.
+function merge_cov_files {
+ local coverdir="${1}"
+ local cover_out_file="${2}"
+ log_callout "Merging coverage results in: ${coverdir}"
+ # gocovmerge requires not-empty test to start with:
+ echo "mode: set" > "${cover_out_file}"
+
+ local i=0
+ local count
+ count=$(find "${coverdir}"/*.coverprofile | wc -l)
+ for f in "${coverdir}"/*.coverprofile; do
+ # print once per 20 files
+ if ! (( "${i}" % 20 )); then
+ log_callout "${i} of ${count}: Merging file: ${f}"
+ fi
+ run_go_tool "github.com/alexfalkowski/gocovmerge" "${f}" "${cover_out_file}" > "${coverdir}/cover.tmp" 2>/dev/null
+ if [ -s "${coverdir}"/cover.tmp ]; then
+ mv "${coverdir}/cover.tmp" "${cover_out_file}"
+ fi
+ (( i++ ))
+ done
+}
+
+# merge_cov [coverdir]
+function merge_cov {
+ log_callout "[$(date)] Merging coverage files ..."
+ coverdir="${1}"
+ for d in "${coverdir}"/*/; do
+ d=${d%*/} # remove the trailing "/"
+ merge_cov_files "${d}" "${d}.coverprofile" &
+ done
+ wait
+ merge_cov_files "${coverdir}" "${coverdir}/all.coverprofile"
+}
+
+# https://docs.codecov.com/docs/unexpected-coverage-changes#reasons-for-indirect-changes
+function cov_pass {
+ # shellcheck disable=SC2153
+ if [ -z "${COVERDIR:-}" ]; then
+ log_error "COVERDIR undeclared"
+ return 255
+ fi
+
+ local coverdir
+ coverdir=$(readlink -f "${COVERDIR}")
+ mkdir -p "${coverdir}"
+ find "${coverdir}" -print0 -name '*.coverprofile' | xargs -0 rm
+
+ local covpkgs
+ covpkgs=$(not_test_packages)
+ local coverpkg_comma
+ coverpkg_comma=$(echo "${covpkgs[@]}" | xargs | tr ' ' ',')
+ local gocov_build_flags=("-covermode=set" "-coverpkg=$coverpkg_comma")
+
+ local failed=""
+
+ log_callout "[$(date)] Collecting coverage from unit tests ..."
+ for m in $(module_dirs); do
+ GOLANG_TEST_SHORT=true run_for_module "${m}" go_test "./..." "parallel" "pkg_to_coverprofileflag unit_${m}" -short -timeout=30m \
+ "${gocov_build_flags[@]}" "$@" || failed="$failed unit"
+ done
+
+ log_callout "[$(date)] Collecting coverage from integration tests ..."
+ run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration" \
+ -timeout=30m "${gocov_build_flags[@]}" "$@" || failed="$failed integration"
+ # integration-store-v2
+ run_for_module "tests" go_test "./integration/v2store/..." "keep_going" "pkg_to_coverprofileflag store_v2" \
+ -timeout=5m "${gocov_build_flags[@]}" "$@" || failed="$failed integration_v2"
+ # integration_cluster_proxy
+ run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration_cluster_proxy" \
+ -tags cluster_proxy -timeout=30m "${gocov_build_flags[@]}" || failed="$failed integration_cluster_proxy"
+
+ local cover_out_file="${coverdir}/all.coverprofile"
+ merge_cov "${coverdir}"
+
+ # strip out generated files (using GNU-style sed)
+ sed --in-place -E "/[.]pb[.](gw[.])?go/d" "${cover_out_file}" || true
+
+ sed --in-place -E "s|go.etcd.io/etcd/api/v3/|api/|g" "${cover_out_file}" || true
+ sed --in-place -E "s|go.etcd.io/etcd/client/v3/|client/v3/|g" "${cover_out_file}" || true
+ sed --in-place -E "s|go.etcd.io/etcd/client/pkg/v3|client/pkg/v3/|g" "${cover_out_file}" || true
+ sed --in-place -E "s|go.etcd.io/etcd/etcdctl/v3/|etcdctl/|g" "${cover_out_file}" || true
+ sed --in-place -E "s|go.etcd.io/etcd/etcdutl/v3/|etcdutl/|g" "${cover_out_file}" || true
+ sed --in-place -E "s|go.etcd.io/etcd/pkg/v3/|pkg/|g" "${cover_out_file}" || true
+ sed --in-place -E "s|go.etcd.io/etcd/server/v3/|server/|g" "${cover_out_file}" || true
+
+ # held failures to generate the full coverage file, now fail
+ if [ -n "$failed" ]; then
+ for f in $failed; do
+ log_error "--- FAIL:" "$f"
+ done
+ log_warning "Despite failures, you can see partial report:"
+ log_warning " go tool cover -html ${cover_out_file}"
+ return 255
+ fi
+
+ log_success "done :) [see report: go tool cover -html ${cover_out_file}]"
+}
+
+######### Code formatting checkers #############################################
+
+function shellcheck_pass {
+ SHELLCHECK=shellcheck
+ if ! tool_exists "shellcheck" "https://github.com/koalaman/shellcheck#installing"; then
+ log_callout "Installing shellcheck $SHELLCHECK_VERSION"
+ wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar -xJv -C /tmp/ --strip-components=1
+ mkdir -p ./bin
+ mv /tmp/shellcheck ./bin/
+ SHELLCHECK=./bin/shellcheck
+ fi
+ generic_checker run ${SHELLCHECK} -fgcc scripts/*.sh
+}
+
+function shellws_pass {
+ log_callout "Ensuring no tab-based indention in shell scripts"
+ local files
+ if files=$(find . -name '*.sh' -print0 | xargs -0 grep -E -n $'^\s*\t'); then
+ log_error "FAIL: found tab-based indention in the following bash scripts. Use ' ' (double space):"
+ log_error "${files}"
+ log_warning "Suggestion: run \"make fix\" to address the issue."
+ return 255
+ fi
+ log_success "SUCCESS: no tabulators found."
+}
+
+function markdown_marker_pass {
+ local marker="marker"
+ # TODO: check other markdown files when marker handles headers with '[]'
+ if ! tool_exists "$marker" "https://crates.io/crates/marker"; then
+ log_callout "Installing markdown marker $MARKDOWN_MARKER_VERSION"
+ wget -qO- "https://github.com/crawford/marker/releases/download/${MARKDOWN_MARKER_VERSION}/marker-${MARKDOWN_MARKER_VERSION}-x86_64-unknown-linux-musl.tar.gz" | tar -xzv -C /tmp/ --strip-components=1 >/dev/null
+ mkdir -p ./bin
+ mv /tmp/marker ./bin/
+ marker=./bin/marker
+ fi
+
+ generic_checker run "${marker}" --skip-http --allow-absolute-paths --root "${ETCD_ROOT_DIR}" -e ./CHANGELOG -e ./etcdctl -e etcdutl -e ./tools 2>&1
+}
+
+function govuln_pass {
+ run_for_modules run govulncheck -show verbose
+}
+
+function govet_pass {
+ run_for_modules generic_checker run go vet
+}
+
+function govet_shadow_per_package {
+ local shadow
+ shadow=$1
+
+ # skip grpc_gateway packages because
+ #
+ # stderr: etcdserverpb/gw/rpc.pb.gw.go:2100:3: declaration of "ctx" shadows declaration at line 2005
+ local skip_pkgs=(
+ "go.etcd.io/etcd/api/v3/etcdserverpb/gw"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw"
+ )
+
+ local pkgs=()
+ while IFS= read -r line; do
+ local in_skip_pkgs="false"
+
+ for pkg in "${skip_pkgs[@]}"; do
+ if [ "${pkg}" == "${line}" ]; then
+ in_skip_pkgs="true"
+ break
+ fi
+ done
+
+ if [ "${in_skip_pkgs}" == "true" ]; then
+ continue
+ fi
+
+ pkgs+=("${line}")
+ done < <(go list ./...)
+
+ run go vet -all -vettool="${shadow}" "${pkgs[@]}"
+}
+
+function govet_shadow_pass {
+ local shadow
+ shadow=$(tool_get_bin "golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow")
+
+ run_for_modules generic_checker govet_shadow_per_package "${shadow}"
+}
+
+function lint_pass {
+ run_for_modules generic_checker run golangci-lint run --config "${ETCD_ROOT_DIR}/tools/.golangci.yaml"
+}
+
+function lint_fix_pass {
+ run_for_modules generic_checker run golangci-lint run --config "${ETCD_ROOT_DIR}/tools/.golangci.yaml" --fix
+}
+
+function license_header_per_module {
+ # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module)
+ local gofiles=()
+ while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module)
+ run_go_tool "github.com/google/addlicense" --check "${gofiles[@]}"
+}
+
+function license_header_pass {
+ run_for_modules generic_checker license_header_per_module
+}
+
+# goword_for_package package
+# checks spelling and comments in the 'package' in the current module
+#
+function goword_for_package {
+ # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module)
+ local gofiles=()
+ while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module)
+
+ local gowordRes
+
+ # spellchecking can be enabled with GOBINARGS="--tags=spell"
+ # but it requires heavy dependencies installation, like:
+ # apt-get install libaspell-dev libhunspell-dev hunspell-en-us aspell-en
+
+ # only check for broke exported godocs
+ if gowordRes=$(run_go_tool "github.com/chzchzchz/goword" -use-spell=false "${gofiles[@]}" | grep godoc-export | sort); then
+ log_error -e "goword checking failed:\\n${gowordRes}"
+ return 255
+ fi
+ if [ -n "$gowordRes" ]; then
+ log_error -e "goword checking returned output:\\n${gowordRes}"
+ return 255
+ fi
+}
+
+
+function goword_pass {
+ run_for_modules goword_for_package || return 255
+}
+
+function go_fmt_for_package {
+ # We utilize 'go fmt' to find all files suitable for formatting,
+ # but reuse full power gofmt to perform just RO check.
+ go fmt -n "$1" | sed 's| -w | -d |g' | sh
+}
+
+function gofmt_pass {
+ run_for_modules generic_checker go_fmt_for_package
+}
+
+function bom_pass {
+ log_callout "Checking bill of materials..."
+ # https://github.com/golang/go/commit/7c388cc89c76bc7167287fb488afcaf5a4aa12bf
+ # shellcheck disable=SC2207
+ modules=($(modules_for_bom))
+
+ # Internally license-bill-of-materials tends to modify go.sum
+ run cp go.sum go.sum.tmp || return 2
+ run cp go.mod go.mod.tmp || return 2
+
+ output=$(GOFLAGS=-mod=mod run_go_tool github.com/appscodelabs/license-bill-of-materials \
+ --override-file ./bill-of-materials.override.json \
+ "${modules[@]}")
+ code="$?"
+
+ run cp go.sum.tmp go.sum || return 2
+ run cp go.mod.tmp go.mod || return 2
+
+ if [ "${code}" -ne 0 ] ; then
+ log_error -e "license-bill-of-materials (code: ${code}) failed with:\\n${output}"
+ return 255
+ else
+ echo "${output}" > "bom-now.json.tmp"
+ fi
+ if ! diff ./bill-of-materials.json bom-now.json.tmp; then
+ log_error "modularized licenses do not match given bill of materials"
+ return 255
+ fi
+ rm bom-now.json.tmp
+}
+
+######## VARIOUS CHECKERS ######################################################
+
+function dump_deps_of_module() {
+ local module
+ if ! module=$(run go list -m); then
+ return 255
+ fi
+ run go mod edit -json | jq -r '.Require[] | .Path+","+.Version+","+if .Indirect then " (indirect)" else "" end+",'"${module}"'"'
+}
+
+# Checks whether dependencies are consistent across modules
+function dep_pass {
+ local all_dependencies
+ local tools_mod_dependencies
+ all_dependencies=$(run_for_modules dump_deps_of_module | sort) || return 2
+ # tools/mod is a special case. It is a module that is not included in the
+ # module list from test_lib.sh. However, we need to ensure that the
+ # dependency versions match the rest of the project. Therefore, explicitly
+ # execute the command for tools/mod, and append its dependencies to the list.
+ tools_mod_dependencies=$(run_for_module "tools/mod" dump_deps_of_module "./...") || return 2
+ all_dependencies="${all_dependencies}"$'\n'"${tools_mod_dependencies}"
+
+ local duplicates
+ duplicates=$(echo "${all_dependencies}" | cut -d ',' -f 1,2 | sort | uniq | cut -d ',' -f 1 | sort | uniq -d) || return 2
+
+ for dup in ${duplicates}; do
+ log_error "FAIL: inconsistent versions for dependency: ${dup}"
+ echo "${all_dependencies}" | grep "${dup}," | sed 's|\([^,]*\),\([^,]*\),\([^,]*\),\([^,]*\)| - \1@\2\3 from: \4|g'
+ done
+ if [[ -n "${duplicates}" ]]; then
+ log_error "FAIL: inconsistent dependencies"
+ return 2
+ else
+ log_success "SUCCESS: dependencies are consistent across modules"
+ fi
+}
+
+function release_pass {
+ rm -f ./bin/etcd-last-release
+
+ # Work out the previous release based on the version reported by etcd binary
+ binary_version=$(./bin/etcd --version | grep --only-matching --perl-regexp '(?<=etcd Version: )\d+\.\d+')
+ binary_major=$(echo "${binary_version}" | cut -d '.' -f 1)
+ binary_minor=$(echo "${binary_version}" | cut -d '.' -f 2)
+ previous_minor=$((binary_minor - 1))
+
+ # Handle the edge case where we go to a new major version
+ # When this happens we obtain latest minor release of previous major
+ if [ "${binary_minor}" -eq 0 ]; then
+ binary_major=$((binary_major - 1))
+ previous_minor=$(git ls-remote --tags https://github.com/etcd-io/etcd.git \
+ | grep --only-matching --perl-regexp "(?<=v)${binary_major}.\d.[\d]+?(?=[\^])" \
+ | sort --numeric-sort --key 1.3 | tail -1 | cut -d '.' -f 2)
+ fi
+
+ # This gets a list of all remote tags for the release branch in regex
+ # Sort key is used to sort numerically by patch version
+ # Latest version is then stored for use below
+ UPGRADE_VER=$(git ls-remote --tags https://github.com/etcd-io/etcd.git \
+ | grep --only-matching --perl-regexp "(?<=v)${binary_major}.${previous_minor}.[\d]+?(?=[\^])" \
+ | sort --numeric-sort --key 1.5 | tail -1 | sed 's/^/v/')
+ log_callout "Found latest release: ${UPGRADE_VER}."
+
+ if [ -n "${MANUAL_VER:-}" ]; then
+ # in case, we need to test against different version
+ UPGRADE_VER=$MANUAL_VER
+ fi
+ if [[ -z ${UPGRADE_VER} ]]; then
+ UPGRADE_VER="v3.5.0"
+ log_warning "fallback to" ${UPGRADE_VER}
+ fi
+
+ local file="etcd-$UPGRADE_VER-linux-$GOARCH.tar.gz"
+ log_callout "Downloading $file"
+
+ set +e
+ curl --fail -L "https://github.com/etcd-io/etcd/releases/download/$UPGRADE_VER/$file" -o "/tmp/$file"
+ local result=$?
+ set -e
+ case $result in
+ 0) ;;
+ *) log_error "--- FAIL:" ${result}
+ return $result
+ ;;
+ esac
+
+ tar xzvf "/tmp/$file" -C /tmp/ --strip-components=1
+ mkdir -p ./bin
+ mv /tmp/etcd ./bin/etcd-last-release
+}
+
+function mod_tidy_for_module {
+ run go mod tidy -diff
+}
+
+function mod_tidy_pass {
+ run_for_modules generic_checker mod_tidy_for_module
+}
+
+function proto_annotations_pass {
+ "${ETCD_ROOT_DIR}/scripts/verify_proto_annotations.sh"
+}
+
+function genproto_pass {
+ "${ETCD_ROOT_DIR}/scripts/verify_genproto.sh"
+}
+
+########### MAIN ###############################################################
+
+function run_pass {
+ local pass="${1}"
+ shift 1
+ log_callout -e "\\n'${pass}' started at $(date)"
+ if "${pass}_pass" "$@" ; then
+ log_success "'${pass}' PASSED and completed at $(date)"
+ return 0
+ else
+ log_error "FAIL: '${pass}' FAILED at $(date)"
+ if [ "$KEEP_GOING_SUITE" = true ]; then
+ return 2
+ else
+ exit 255
+ fi
+ fi
+}
+
+log_callout "Starting at: $(date)"
+fail_flag=false
+for pass in $PASSES; do
+ if run_pass "${pass}" "$@"; then
+ continue
+ else
+ fail_flag=true
+ fi
+done
+if [ "$fail_flag" = true ]; then
+ log_error "There was FAILURE in the test suites ran. Look above log detail"
+ exit 255
+fi
+
+log_success "SUCCESS"
diff --git a/scripts/test_images.sh b/scripts/test_images.sh
new file mode 100755
index 00000000000..3cdb606b259
--- /dev/null
+++ b/scripts/test_images.sh
@@ -0,0 +1,94 @@
+#!/usr/bin/env bash
+
+# http://redsymbol.net/articles/unofficial-bash-strict-mode/
+set -euo pipefail
+IFS=$'\n\t'
+
+source ./scripts/test_lib.sh
+source ./scripts/build_lib.sh
+
+function startContainer {
+ # run docker in the background
+ docker run -d --rm --name "${RUN_NAME}" "${IMAGE}"
+
+ # wait for etcd daemon to bootstrap
+ sleep 5
+}
+
+function runVersionCheck {
+ Out=$(docker run --rm "${IMAGE}" "${@}")
+ foundVersion=$(echo "$Out" | head -1 | rev | cut -d" " -f 1 | rev )
+ if [[ "${foundVersion}" != "${VERSION}" ]]; then
+ echo "error: Invalid Version. Got $foundVersion, expected $VERSION. Error: $Out"
+ exit 1
+ fi
+}
+
+# Can't proceed without docker
+if ! command -v docker >/dev/null; then
+ log_error "cannot find docker"
+ exit 1
+fi
+
+# You can't run darwin binaries in linux containers
+if [[ $(go env GOOS) == "darwin" ]]; then
+ echo "Please use linux machine for release builds."
+ exit 1
+fi
+
+# Pick defaults based on release workflow
+ARCH=$(go env GOARCH)
+REPOSITARY=${REPOSITARY:-"gcr.io/etcd-development/etcd"}
+if [ -n "$VERSION" ]; then
+ # Expected Format: v3.6.99-amd64
+ TAG=v"${VERSION}"-"${ARCH}"
+else
+ echo "Terminating test, VERSION not supplied"
+ exit 1
+fi
+IMAGE=${IMAGE:-"${REPOSITARY}:${TAG}"}
+
+# ETCD related values
+RUN_NAME="test_etcd"
+KEY="foo"
+VALUE="bar"
+
+if [[ "$(docker images -q "${IMAGE}" 2> /dev/null)" == "" ]]; then
+ echo "${IMAGE} not present locally"
+ exit 1
+fi
+
+# Version check
+runVersionCheck "/usr/local/bin/etcd" "--version"
+runVersionCheck "/usr/local/bin/etcdctl" "version"
+runVersionCheck "/usr/local/bin/etcdutl" "version"
+
+startContainer
+# stop container
+trap 'docker stop "${RUN_NAME}"' EXIT
+
+
+# Put/Get check
+PUT=$(docker exec "${RUN_NAME}" /usr/local/bin/etcdctl put "${KEY}" "${VALUE}")
+if [ "${PUT}" != "OK" ]; then
+ echo "Problem with Putting in etcd"
+ exit 1
+fi
+
+GET=$(docker exec "${RUN_NAME}" /usr/local/bin/etcdctl get "$KEY" --print-value-only)
+if [ "${GET}" != "${VALUE}" ]; then
+ echo "Problem with getting foo bar in etcd. Got ${GET}"
+ exit 1
+fi
+
+echo "Successfully tested etcd local image ${TAG}"
+
+for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do
+ ARCH_TAG=v"${VERSION}"-"${TARGET_ARCH}"
+ IMG_ARCH=$(docker inspect --format '{{.Architecture}}' "${REPOSITARY}:${ARCH_TAG}")
+ if [ "${IMG_ARCH}" != "$TARGET_ARCH" ];then
+ echo "Incorrect docker image architecture"
+ exit 1
+ fi
+ echo "Correct Architecture ${ARCH_TAG}"
+done
diff --git a/scripts/test_lib.sh b/scripts/test_lib.sh
index 9053f9ce8c7..1691ecb56b7 100644
--- a/scripts/test_lib.sh
+++ b/scripts/test_lib.sh
@@ -1,5 +1,7 @@
#!/usr/bin/env bash
+set -euo pipefail
+
ROOT_MODULE="go.etcd.io/etcd"
if [[ "$(go list)" != "${ROOT_MODULE}/v3" ]]; then
@@ -71,7 +73,7 @@ function relativePath {
local commonPart=$source
local result=""
- while [[ "${target#$commonPart}" == "${target}" ]]; do
+ while [[ "${target#"$commonPart"}" == "${target}" ]]; do
# no match, means that candidate common part is not correct
# go up one level (reduce common part)
commonPart="$(dirname "$commonPart")"
@@ -90,7 +92,7 @@ function relativePath {
# since we now have identified the common part,
# compute the non-common part
- local forwardPart="${target#$commonPart}"
+ local forwardPart="${target#"$commonPart"}"
# and now stick all parts together
if [[ -n $result ]] && [[ -n $forwardPart ]]; then
@@ -105,10 +107,10 @@ function relativePath {
#### Discovery of files/packages within a go module #####
-# go_srcs_in_module [package]
+# go_srcs_in_module
# returns list of all not-generated go sources in the current (dir) module.
function go_srcs_in_module {
- go fmt -n "$1" | grep -Eo "([^ ]*)$" | grep -vE "(\\_test.go|\\.pb\\.go|\\.pb\\.gw.go)"
+ go list -f "{{with \$c:=.}}{{range \$f:=\$c.GoFiles }}{{\$c.Dir}}/{{\$f}}{{\"\n\"}}{{end}}{{range \$f:=\$c.TestGoFiles }}{{\$c.Dir}}/{{\$f}}{{\"\n\"}}{{end}}{{range \$f:=\$c.XTestGoFiles }}{{\$c.Dir}}/{{\$f}}{{\"\n\"}}{{end}}{{end}}" ./... | grep -vE "(\\.pb\\.go|\\.pb\\.gw.go)"
}
# pkgs_in_module [optional:package_pattern]
@@ -164,7 +166,7 @@ function run_for_module {
}
function module_dirs() {
- echo "api pkg raft client/pkg client/v2 client/v3 server etcdutl etcdctl tests ."
+ echo "api pkg client/pkg client/internal/v2 client/v3 server etcdutl etcdctl tests tools/mod tools/rw-heatmaps tools/testgrid-analysis ."
}
# maybe_run [cmd...] runs given command depending on the DRY_RUN flag.
@@ -176,11 +178,13 @@ function maybe_run() {
fi
}
+# modules
+# returns the list of all modules in the project, not including the tools,
+# as they are not considered to be added to the bill for materials.
function modules() {
modules=(
"${ROOT_MODULE}/api/v3"
"${ROOT_MODULE}/pkg/v3"
- "${ROOT_MODULE}/raft/v3"
"${ROOT_MODULE}/client/pkg/v3"
"${ROOT_MODULE}/client/v2"
"${ROOT_MODULE}/client/v3"
@@ -192,7 +196,7 @@ function modules() {
echo "${modules[@]}"
}
-function modules_exp() {
+function modules_for_bom() {
for m in $(modules); do
echo -n "${m}/... "
done
@@ -202,16 +206,58 @@ function modules_exp() {
# run given command across all modules and packages
# (unless the set is limited using ${PKG} or / ${USERMOD})
function run_for_modules {
+ KEEP_GOING_MODULE=${KEEP_GOING_MODULE:-false}
local pkg="${PKG:-./...}"
+ local fail_mod=false
if [ -z "${USERMOD:-}" ]; then
for m in $(module_dirs); do
- run_for_module "${m}" "$@" "${pkg}" || return "$?"
+ if run_for_module "${m}" "$@" "${pkg}"; then
+ continue
+ else
+ if [ "$KEEP_GOING_MODULE" = false ]; then
+ log_error "There was a Failure in module ${m}, aborting..."
+ return 1
+ fi
+ log_error "There was a Failure in module ${m}, keep going..."
+ fail_mod=true
+ fi
done
+ if [ "$fail_mod" = true ]; then
+ return 1
+ fi
else
run_for_module "${USERMOD}" "$@" "${pkg}" || return "$?"
fi
}
+junitFilenamePrefix() {
+ if [[ -z "${JUNIT_REPORT_DIR:-}" ]]; then
+ echo ""
+ return
+ fi
+ mkdir -p "${JUNIT_REPORT_DIR}"
+ DATE=$( date +%s | base64 | head -c 15 )
+ echo "${JUNIT_REPORT_DIR}/junit_$DATE"
+}
+
+function produce_junit_xmlreport {
+ local -r junit_filename_prefix=${1:-}
+ if [[ -z "${junit_filename_prefix}" ]]; then
+ return
+ fi
+
+ local junit_xml_filename
+ junit_xml_filename="${junit_filename_prefix}.xml"
+
+ # Ensure that gotestsum is run without cross-compiling
+ run_go_tool gotest.tools/gotestsum --junitfile "${junit_xml_filename}" --raw-command cat "${junit_filename_prefix}"*.stdout || exit 1
+ if [ "${VERBOSE:-}" != "1" ]; then
+ rm "${junit_filename_prefix}"*.stdout
+ fi
+
+ log_callout "Saved JUnit XML test report to ${junit_xml_filename}"
+}
+
#### Running go test ########
@@ -236,13 +282,36 @@ function go_test {
local packages="${1}"
local mode="${2}"
local flags_for_package_func="${3}"
+ local junit_filename_prefix
shift 3
local goTestFlags=""
local goTestEnv=""
- if [ "${VERBOSE}" == "1" ]; then
- goTestFlags="-v"
+
+ ##### Create a junit-style XML test report in this directory if set. #####
+ JUNIT_REPORT_DIR=${JUNIT_REPORT_DIR:-}
+
+ # If JUNIT_REPORT_DIR is unset, and ARTIFACTS is set, then have them match.
+ if [[ -z "${JUNIT_REPORT_DIR:-}" && -n "${ARTIFACTS:-}" ]]; then
+ export JUNIT_REPORT_DIR="${ARTIFACTS}"
+ fi
+
+ # Used to filter verbose test output.
+ go_test_grep_pattern=".*"
+
+ if [[ -n "${JUNIT_REPORT_DIR}" ]] ; then
+ goTestFlags+="-v "
+ goTestFlags+="-json "
+ # Show only summary lines by matching lines like "status package/test"
+ go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+"
+ fi
+
+ junit_filename_prefix=$(junitFilenamePrefix)
+
+ if [ "${VERBOSE:-}" == "1" ]; then
+ goTestFlags="-v "
+ goTestFlags+="-json "
fi
# Expanding patterns (like ./...) into list of packages
@@ -257,6 +326,10 @@ function go_test {
fi
fi
+ if [ "${mode}" == "fail_fast" ]; then
+ goTestFlags+="-failfast "
+ fi
+
local failures=""
# execution of tests against packages:
@@ -266,16 +339,18 @@ function go_test {
additional_flags=$(${flags_for_package_func} ${pkg})
# shellcheck disable=SC2206
- local cmd=( go test ${goTestFlags} ${additional_flags} "$@" ${pkg} )
+ local cmd=( go test ${goTestFlags} ${additional_flags} ${pkg} "$@" )
# shellcheck disable=SC2086
- if ! run env ${goTestEnv} "${cmd[@]}" ; then
+ if ! run env ${goTestEnv} ETCD_VERIFY="${ETCD_VERIFY}" "${cmd[@]}" | tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} | grep --binary-files=text "${go_test_grep_pattern}" ; then
if [ "${mode}" != "keep_going" ]; then
+ produce_junit_xmlreport "${junit_filename_prefix}"
return 2
else
failures=("${failures[@]}" "${pkg}")
fi
fi
+ produce_junit_xmlreport "${junit_filename_prefix}"
done
if [ -n "${failures[*]}" ] ; then
@@ -301,23 +376,27 @@ function tool_exists {
fi
}
-# Ensure gobin is available, as it runs majority of the tools
-if ! command -v "gobin" >/dev/null; then
- run env GO111MODULE=off go get github.com/myitcv/gobin || exit 1
-fi
-
# tool_get_bin [tool] - returns absolute path to a tool binary (or returns error)
function tool_get_bin {
- tool_exists "gobin" "GO111MODULE=off go get github.com/myitcv/gobin" || return 2
-
local tool="$1"
+ local pkg_part="$1"
if [[ "$tool" == *"@"* ]]; then
+ pkg_part=$(echo "${tool}" | cut -d'@' -f1)
# shellcheck disable=SC2086
- run gobin ${GOBINARGS:-} -p "${tool}" || return 2
+ run go install ${GOBINARGS:-} "${tool}" || return 2
else
# shellcheck disable=SC2086
- run_for_module ./tools/mod run gobin ${GOBINARGS:-} -p -m --mod=readonly "${tool}" || return 2
+ run_for_module ./tools/mod run go install ${GOBINARGS:-} "${tool}" || return 2
+ fi
+
+ # remove the version suffix, such as removing "/v3" from "go.etcd.io/etcd/v3".
+ local cmd_base_name
+ cmd_base_name=$(basename "${pkg_part}")
+ if [[ ${cmd_base_name} =~ ^v[0-9]*$ ]]; then
+ pkg_part=$(dirname "${pkg_part}")
fi
+
+ run_for_module ./tools/mod go list -f '{{.Target}}' "${pkg_part}"
}
# tool_pkg_dir [pkg] - returns absolute path to a directory that stores given pkg.
@@ -329,22 +408,23 @@ function tool_pkg_dir {
# tool_get_bin [tool]
function run_go_tool {
local cmdbin
- if ! cmdbin=$(tool_get_bin "${1}"); then
+ if ! cmdbin=$(GOARCH="" tool_get_bin "${1}"); then
+ log_warning "Failed to install tool '${1}'"
return 2
fi
shift 1
- run "${cmdbin}" "$@" || return 2
+ GOARCH="" run "${cmdbin}" "$@" || return 2
}
-# assert_no_git_modifications fails if there are any uncommited changes.
+# assert_no_git_modifications fails if there are any uncommitted changes.
function assert_no_git_modifications {
log_callout "Making sure everything is committed."
if ! git diff --cached --exit-code; then
- log_error "Found staged by uncommited changes. Do commit/stash your changes first."
+ log_error "Found staged by uncommitted changes. Do commit/stash your changes first."
return 2
fi
if ! git diff --exit-code; then
- log_error "Found unstaged and uncommited changes. Do commit/stash your changes first."
+ log_error "Found unstaged and uncommitted changes. Do commit/stash your changes first."
return 2
fi
}
@@ -354,14 +434,16 @@ function assert_no_git_modifications {
# - no differencing commits in relation to the origin/$branch
function git_assert_branch_in_sync {
local branch
- branch=$(run git rev-parse --abbrev-ref HEAD)
# TODO: When git 2.22 popular, change to:
# branch=$(git branch --show-current)
+ branch=$(run git rev-parse --abbrev-ref HEAD)
+ log_callout "Verify the current branch '${branch}' is clean"
if [[ $(run git status --porcelain --untracked-files=no) ]]; then
log_error "The workspace in '$(pwd)' for branch: ${branch} has uncommitted changes"
log_error "Consider cleaning up / renaming this directory or (cd $(pwd) && git reset --hard)"
return 2
fi
+ log_callout "Verify the current branch '${branch}' is in sync with the 'origin/${branch}'"
if [ -n "${branch}" ]; then
ref_local=$(run git rev-parse "${branch}")
ref_origin=$(run git rev-parse "origin/${branch}")
@@ -374,3 +456,27 @@ function git_assert_branch_in_sync {
log_warning "Cannot verify consistency with the origin, as git is on detached branch."
fi
}
+
+# The version present in the .go-verion is the default version that test and build scripts will use.
+# However, it is possible to control the version that should be used with the help of env vars:
+# - FORCE_HOST_GO: if set to a non-empty value, use the version of go installed in system's $PATH.
+# - GO_VERSION: desired version of go to be used, might differ from what is present in .go-version.
+# If empty, the value defaults to the version in .go-version.
+function determine_go_version {
+ # Borrowing from how Kubernetes does this:
+ # https://github.com/kubernetes/kubernetes/blob/17854f0e0a153b06f9d0db096e2cd8ab2fa89c11/hack/lib/golang.sh#L510-L520
+ #
+ # default GO_VERSION to content of .go-version
+ GO_VERSION="${GO_VERSION:-"$(cat "${ETCD_ROOT_DIR}/.go-version")"}"
+ if [ "${GOTOOLCHAIN:-auto}" != 'auto' ]; then
+ # no-op, just respect GOTOOLCHAIN
+ :
+ elif [ -n "${FORCE_HOST_GO:-}" ]; then
+ export GOTOOLCHAIN='local'
+ else
+ GOTOOLCHAIN="go${GO_VERSION}"
+ export GOTOOLCHAIN
+ fi
+}
+
+determine_go_version
diff --git a/scripts/update_dep.sh b/scripts/update_dep.sh
index e0c79b4a3de..e76ecaff5c5 100755
--- a/scripts/update_dep.sh
+++ b/scripts/update_dep.sh
@@ -9,6 +9,8 @@
#
# Updates version of given dependency in all the modules that depend on the mod.
+set -euo pipefail
+
source ./scripts/test_lib.sh
mod="$1"
diff --git a/scripts/update_proto_annotations.sh b/scripts/update_proto_annotations.sh
new file mode 100755
index 00000000000..75089e77222
--- /dev/null
+++ b/scripts/update_proto_annotations.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+# Updates etcd_version_annotations.txt based on state of annotations in proto files.
+# Developers can run this script to avoid manually updating etcd_version_annotations.txt.
+# Before running this script please ensure that fields/messages that you added are annotated with next etcd version.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+tmpfile=$(mktemp)
+go run ./tools/proto-annotations/main.go --annotation etcd_version > "${tmpfile}"
+mv "${tmpfile}" ./scripts/etcd_version_annotations.txt
diff --git a/scripts/updatebom.sh b/scripts/updatebom.sh
index e879aace934..2c6bf8720b0 100755
--- a/scripts/updatebom.sh
+++ b/scripts/updatebom.sh
@@ -1,6 +1,7 @@
#!/usr/bin/env bash
-set -e
+set -euo pipefail
+
source ./scripts/test_lib.sh
function bom_fixlet {
@@ -11,9 +12,9 @@ function bom_fixlet {
local modules
# shellcheck disable=SC2207
- modules=($(modules_exp))
+ modules=($(modules_for_bom))
- if GOFLAGS=-mod=mod run_go_tool "github.com/coreos/license-bill-of-materials" \
+ if GOFLAGS=-mod=mod run_go_tool "github.com/appscodelabs/license-bill-of-materials" \
--override-file ./bill-of-materials.override.json \
"${modules[@]}" > ./bill-of-materials.json.tmp; then
cp ./bill-of-materials.json.tmp ./bill-of-materials.json
diff --git a/scripts/verify_genproto.sh b/scripts/verify_genproto.sh
new file mode 100755
index 00000000000..a66875657cf
--- /dev/null
+++ b/scripts/verify_genproto.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# This scripts is automatically run by CI to prevent pull requests missing running genproto.sh
+# after changing *.proto file.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+tmpWorkDir=$(mktemp -d -t 'twd.XXXXXX')
+mkdir "$tmpWorkDir/etcd"
+tmpWorkDir="$tmpWorkDir/etcd"
+cp -r . "$tmpWorkDir"
+pushd "$tmpWorkDir"
+git add -A
+git commit -m init || true # maybe fail because nothing to commit
+./scripts/genproto.sh
+diff=$(git diff --numstat | awk '{print $3}')
+popd
+if [ -z "$diff" ]; then
+ echo "PASSED genproto-verification!"
+ exit 0
+fi
+echo "Failed genproto-verification!" >&2
+printf "* Found changed files:\n%s\n" "$diff" >&2
+echo "* Please rerun genproto.sh after changing *.proto file" >&2
+echo "* Run ./scripts/genproto.sh" >&2
+exit 1
diff --git a/scripts/verify_go_versions.sh b/scripts/verify_go_versions.sh
new file mode 100755
index 00000000000..11d34f92a51
--- /dev/null
+++ b/scripts/verify_go_versions.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+# This script verifies that the value of the toolchain directive in the
+# go.mod files always match that of the .go-version file to ensure that
+# we accidentally don't test and release with differing versions of Go.
+
+set -euo pipefail
+
+source ./scripts/test_lib.sh
+
+target_go_version="${target_go_version:-"$(cat "${ETCD_ROOT_DIR}/.go-version")"}"
+log_info "expected go toolchain directive: go${target_go_version}"
+log_info
+
+toolchain_out_of_sync="false"
+go_line_violation="false"
+
+# verify_go_versions takes a go.mod filepath as an argument
+# and checks if:
+# (1) go directive <= version in .go-version
+# (2) toolchain directive == version in .go-version
+function verify_go_versions() {
+ # shellcheck disable=SC2086
+ toolchain_version="$(go mod edit -json $1 | jq -r .Toolchain)"
+ # shellcheck disable=SC2086
+ go_line_version="$(go mod edit -json $1 | jq -r .Go)"
+ if [[ "go${target_go_version}" != "${toolchain_version}" ]]; then
+ log_error "go toolchain directive out of sync for $1, got: ${toolchain_version}"
+ toolchain_out_of_sync="true"
+ fi
+ if ! printf '%s\n' "${go_line_version}" "${target_go_version}" | sort --check=silent --version-sort; then
+ log_error "go directive in $1 is greater than maximum allowed: go${target_go_version}"
+ go_line_violation="true"
+ fi
+}
+
+while read -r mod; do
+ verify_go_versions "${mod}";
+done < <(find . -name 'go.mod')
+
+if [[ "${toolchain_out_of_sync}" == "true" ]]; then
+ log_error
+ log_error "Please run scripts/sync_go_toolchain_directive.sh or update .go-version to rectify this error"
+fi
+
+if [[ "${go_line_violation}" == "true" ]]; then
+ log_error
+ log_error "Please update .go-version to rectify this error, any go directive should be <= .go-version"
+fi
+
+if [[ "${go_line_violation}" == "true" ]] || [[ "${toolchain_out_of_sync}" == "true" ]]; then
+ exit 1
+fi
diff --git a/scripts/verify_proto_annotations.sh b/scripts/verify_proto_annotations.sh
new file mode 100755
index 00000000000..17da593baab
--- /dev/null
+++ b/scripts/verify_proto_annotations.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Verifies proto annotations to ensure all new proto fields and messages are annotated by comparing it with etcd_version_annotations.txt file.
+# This scripts is automatically run by CI to prevent pull requests missing adding a proto annotation.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+tmpfile=$(mktemp)
+go run ./tools/proto-annotations/main.go --annotation=etcd_version > "${tmpfile}"
+if diff -u ./scripts/etcd_version_annotations.txt "${tmpfile}"; then
+ echo "PASSED proto-annotations verification!"
+ exit 0
+fi
+echo "Failed proto-annotations-verification!" >&2
+echo "If you are adding new proto fields/messages that will be included in raft log:" >&2
+echo "* Please add etcd_version annotation in *.proto file with next etcd version" >&2
+echo "* Run ./scripts/genproto.sh" >&2
+echo "* Run ./scripts/update_proto_annotations.sh" >&2
+exit 1
diff --git a/security/FUZZING_AUDIT_2022.PDF b/security/FUZZING_AUDIT_2022.PDF
new file mode 100644
index 00000000000..695ce764ee0
Binary files /dev/null and b/security/FUZZING_AUDIT_2022.PDF differ
diff --git a/security/OWNERS b/security/OWNERS
new file mode 100644
index 00000000000..95c0636d30d
--- /dev/null
+++ b/security/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/security
diff --git a/security/README.md b/security/README.md
index 07306956989..3129778cad0 100644
--- a/security/README.md
+++ b/security/README.md
@@ -1,12 +1,12 @@
## Security Announcements
-Join the [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) group for emails about security and major announcements.
+Join the [etcd-dev](https://groups.google.com/g/etcd-dev) group for emails about security and major announcements.
## Report a Vulnerability
Weâre extremely grateful for security researchers and users that report vulnerabilities to the etcd Open Source Community. All reports are thoroughly investigated by a dedicated committee of community volunteers called [Product Security Committee](security-release-process.md#product-security-committee).
-To make a report, please email the private [security@etcd.io](mailto:security@etcd.io) list with the security details and the details expected for [all etcd bug reports](https://etcd.io/docs/latest/reporting_bugs/).
+To make a report, please email the private [etcd maintainers](mailto:etcd-maintainers@googlegroups.com) list with the security details and the details expected for [all etcd bug reports](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/reporting_bugs.md).
### When Should I Report a Vulnerability?
@@ -35,6 +35,7 @@ A public disclosure date is negotiated by the etcd Product Security Committee an
## Security Audit
A third party security audit was performed by Trail of Bits, find the full report [here](SECURITY_AUDIT.pdf).
+A third party fuzzing audit was performed by Ada Logics, find the full report [here](FUZZING_AUDIT_2022.PDF).
## Private Distributor List
@@ -42,6 +43,6 @@ This list provides actionable information regarding etcd security to multiple di
### Request to Join
-New membership requests are sent to security@etcd.io.
+New membership requests are sent to [etcd maintainers](mailto:etcd-maintainers@googlegroups.com).
File an issue [here](https://github.com/etcd-io/etcd/issues/new?template=distributors-application.md), filling in the criteria template.
diff --git a/security/email-templates.md b/security/email-templates.md
index b2854b763be..8d93f76d5bf 100644
--- a/security/email-templates.md
+++ b/security/email-templates.md
@@ -7,7 +7,7 @@ This is a collection of email templates to handle various situations the securit
```
Subject: Upcoming security release of etcd $VERSION
To: etcd-dev@googlegroups.com
-Cc: security@etcd-io
+Cc: etcd-maintainers@googlegroups.com
Hello etcd Community,
@@ -34,7 +34,7 @@ $PERSON on behalf of the etcd Product Security Committee and maintainers
```
Subject: Security release of etcd $VERSION is now available
To: etcd-dev@googlegroups.com
-Cc: security@etcd-io
+Cc: etcd-maintainers@googlegroups.com
Hello etcd Community,
diff --git a/security/security-release-process.md b/security/security-release-process.md
index d029a8d8d18..78fb55dd967 100644
--- a/security/security-release-process.md
+++ b/security/security-release-process.md
@@ -8,14 +8,10 @@ Security vulnerabilities should be handled quickly and sometimes privately. The
The PSC is responsible for organizing the entire response including internal communication and external disclosure but will need help from relevant developers and release leads to successfully run this process.
-The initial PSC will consist of volunteers who have been involved in the initial discussion:
+The PSC consists of the following:
-- Brandon Philips (**[@philips](https://github.com/philips)**) [4096R/154343260542DF34]
-- Gyuho Lee (**[@gyuho](https://github.com/gyuho)**)
-- Joe Betz (**[@jpbetz](https://github.com/jpbetz)**)
-- Sahdev Zala (**[@spzala](https://github.com/spzala)**)
-- Sam Batschelet (**[@hexfusion](https://github.com/hexfusion)**)
-- Xiang Li (**[@xiang90](https://github.com/xiang90)**)
+- Maintainers
+- Volunteer members as described in the [Product Security Committee Membership](#Product-Security-Committee-Membership)
The PSC members will share various tasks as listed below:
@@ -26,13 +22,13 @@ The PSC members will share various tasks as listed below:
### Contacting the Product Security Committee
-Contact the team by sending email to [security@etcd.io](mailto:security@etcd.io)
+Contact the team by sending email to [etcd maintainers](mailto:etcd-maintainers@googlegroups.com).
### Product Security Committee Membership
#### Joining
-The PSC should be consist of 2-4 members. New potential members to the PSC can express their interest to the PSC members. These individuals can be nominated by PSC members or etcd maintainers.
+New potential members to the PSC can express their interest to the PSC members. These individuals can be nominated by PSC members or etcd maintainers.
If representation changes due to job shifts then PSC members are encouraged to grow the team or replace themselves through mentoring new members.
@@ -59,7 +55,7 @@ The etcd Community asks that all suspected vulnerabilities be privately and resp
### Public Disclosure Processes
-If anyone knows of a publicly disclosed security vulnerability please IMMEDIATELY email [security@etcd.io](mailto:security@etcd.io) to inform the PSC about the vulnerability so they may start the patch, release, and communication process.
+If anyone knows of a publicly disclosed security vulnerability please IMMEDIATELY email [etcd maintainers](mailto:etcd-maintainers@googlegroups.com) to inform the PSC about the vulnerability so they may start the patch, release, and communication process.
If possible the PSC will ask the person making the public report if the issue can be handled via a private disclosure process. If the reporter denies the PSC will move swiftly with the fix and release process. In extreme cases GitHub can be asked to delete the issue but this generally isn't necessary and is unlikely to make a public disclosure less damaging.
@@ -94,7 +90,7 @@ If the CVSS score is under ~4.0
Note: CVSS is convenient but imperfect. Ultimately, the PSC has discretion on classifying the severity of a vulnerability.
-The severity of the bug and related handling decisions must be discussed on the security@etcd.io mailing list.
+The severity of the bug and related handling decisions must be discussed on the [etcd maintainers](mailto:etcd-maintainers@googlegroups.com) mailing list.
### Fix Disclosure Process
@@ -108,7 +104,7 @@ With the Fix Development underway, the PSC needs to come up with an overall comm
- The PSC will announce the new releases, the CVE number, severity, and impact, and the location of the binaries to get wide distribution and user action. As much as possible this announcement should be actionable, and include any mitigating steps users can take prior to upgrading to a fixed version. The recommended target time is 4pm UTC on a non-Friday weekday. This means the announcement will be seen morning Pacific, early evening Europe, and late evening Asia. The announcement will be sent via the following channels:
- etcd-dev@googlegroups.com
- [Kubernetes announcement slack channel](https://kubernetes.slack.com/messages/C9T0QMNG4)
- - [etcd slack channel](https://kubernetes.slack.com/messages/C3HD8ARJ5)
+ - [sig-etcd slack channel](https://kubernetes.slack.com/archives/C3HD8ARJ5)
## Retrospective
diff --git a/server/auth/jwt.go b/server/auth/jwt.go
index d286f92c2ec..4984dca62ee 100644
--- a/server/auth/jwt.go
+++ b/server/auth/jwt.go
@@ -17,18 +17,19 @@ package auth
import (
"context"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/rsa"
"errors"
"time"
- jwt "github.com/form3tech-oss/jwt-go"
+ jwt "github.com/golang-jwt/jwt/v4"
"go.uber.org/zap"
)
type tokenJWT struct {
lg *zap.Logger
signMethod jwt.SigningMethod
- key interface{}
+ key any
ttl time.Duration
verifyOnly bool
}
@@ -42,10 +43,10 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
// rev isn't used in JWT, it is only used in simple token
var (
username string
- revision uint64
+ revision float64
)
- parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
+ parsed, err := jwt.Parse(token, func(token *jwt.Token) (any, error) {
if token.Method.Alg() != t.signMethod.Alg() {
return nil, errors.New("invalid signing method")
}
@@ -54,15 +55,15 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
return &k.PublicKey, nil
case *ecdsa.PrivateKey:
return &k.PublicKey, nil
+ case ed25519.PrivateKey:
+ return k.Public(), nil
default:
return t.key, nil
}
})
-
if err != nil {
t.lg.Warn(
"failed to parse a JWT token",
- zap.String("token", token),
zap.Error(err),
)
return nil, false
@@ -70,14 +71,23 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
claims, ok := parsed.Claims.(jwt.MapClaims)
if !parsed.Valid || !ok {
- t.lg.Warn("invalid JWT token", zap.String("token", token))
+ t.lg.Warn("failed to obtain claims from a JWT token")
+ return nil, false
+ }
+
+ username, ok = claims["username"].(string)
+ if !ok {
+ t.lg.Warn("failed to obtain user claims from jwt token")
return nil, false
}
- username = claims["username"].(string)
- revision = uint64(claims["revision"].(float64))
+ revision, ok = claims["revision"].(float64)
+ if !ok {
+ t.lg.Warn("failed to obtain revision claims from jwt token")
+ return nil, false
+ }
- return &AuthInfo{Username: username, Revision: revision}, true
+ return &AuthInfo{Username: username, Revision: uint64(revision)}, true
}
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
@@ -126,7 +136,7 @@ func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, e
return nil, ErrInvalidAuthOpts
}
- var keys = make([]string, 0, len(optMap))
+ keys := make([]string, 0, len(optMap))
for k := range optMap {
if !knownOptions[k] {
keys = append(keys, k)
@@ -153,6 +163,10 @@ func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, e
if _, ok := t.key.(*ecdsa.PublicKey); ok {
t.verifyOnly = true
}
+ case *jwt.SigningMethodEd25519:
+ if _, ok := t.key.(ed25519.PublicKey); ok {
+ t.verifyOnly = true
+ }
case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
if _, ok := t.key.(*rsa.PublicKey); ok {
t.verifyOnly = true
diff --git a/server/auth/jwt_test.go b/server/auth/jwt_test.go
index a3983cc5a56..ff0e4c41989 100644
--- a/server/auth/jwt_test.go
+++ b/server/auth/jwt_test.go
@@ -16,9 +16,13 @@ package auth
import (
"context"
+ "errors"
"fmt"
"testing"
+ "time"
+ "github.com/golang-jwt/jwt/v4"
+ "github.com/stretchr/testify/require"
"go.uber.org/zap"
)
@@ -28,6 +32,9 @@ const (
jwtECPubKey = "../../tests/fixtures/server-ecdsa.crt"
jwtECPrivKey = "../../tests/fixtures/server-ecdsa.key.insecure"
+
+ jwtEdPubKey = "../../tests/fixtures/ed25519-public-key.pem"
+ jwtEdPrivKey = "../../tests/fixtures/ed25519-private-key.pem"
)
func TestJWTInfo(t *testing.T) {
@@ -60,6 +67,15 @@ func TestJWTInfo(t *testing.T) {
"priv-key": jwtECPrivKey,
"sign-method": "ES256",
},
+ "Ed25519-priv": {
+ "priv-key": jwtEdPrivKey,
+ "sign-method": "EdDSA",
+ },
+ "Ed25519": {
+ "pub-key": jwtEdPubKey,
+ "priv-key": jwtEdPrivKey,
+ "sign-method": "EdDSA",
+ },
"HMAC": {
"priv-key": jwtECPrivKey, // any file, raw bytes used as shared secret
"sign-method": "HS256",
@@ -124,17 +140,87 @@ func testJWTInfo(t *testing.T, opts map[string]string) {
}
_, aerr := verify.assign(ctx, "abc", 123)
- if aerr != ErrVerifyOnly {
+ if !errors.Is(aerr, ErrVerifyOnly) {
t.Fatalf("unexpected error when attempting to sign with public key: %v", aerr)
}
+ })
+ }
+}
+
+func TestJWTTokenWithMissingFields(t *testing.T) {
+ testCases := []struct {
+ name string
+ username string // An empty string means not present
+ revision uint64 // 0 means not present
+ expectValid bool
+ }{
+ {
+ name: "valid token",
+ username: "hello",
+ revision: 100,
+ expectValid: true,
+ },
+ {
+ name: "no username",
+ username: "",
+ revision: 100,
+ expectValid: false,
+ },
+ {
+ name: "no revision",
+ username: "hello",
+ revision: 0,
+ expectValid: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ optsMap := map[string]string{
+ "priv-key": jwtRSAPrivKey,
+ "sign-method": "RS256",
+ "ttl": "1h",
+ }
+
+ t.Run(tc.name, func(t *testing.T) {
+ // prepare claims
+ claims := jwt.MapClaims{
+ "exp": time.Now().Add(time.Hour).Unix(),
+ }
+ if tc.username != "" {
+ claims["username"] = tc.username
+ }
+ if tc.revision != 0 {
+ claims["revision"] = tc.revision
+ }
+
+ // generate a JWT token with the given claims
+ var opts jwtOptions
+ err := opts.ParseWithDefaults(optsMap)
+ require.NoError(t, err)
+ key, err := opts.Key()
+ require.NoError(t, err)
+
+ tk := jwt.NewWithClaims(opts.SignMethod, claims)
+ token, err := tk.SignedString(key)
+ require.NoError(t, err)
+ // verify the token
+ jwtProvider, err := newTokenProviderJWT(zap.NewNop(), optsMap)
+ require.NoError(t, err)
+ ai, ok := jwtProvider.info(context.TODO(), token, 123)
+
+ require.Equal(t, tc.expectValid, ok)
+ if ok {
+ require.Equal(t, tc.username, ai.Username)
+ require.Equal(t, tc.revision, ai.Revision)
+ }
})
}
}
func TestJWTBad(t *testing.T) {
-
- var badCases = map[string]map[string]string{
+ badCases := map[string]map[string]string{
"no options": {},
"invalid method": {
"sign-method": "invalid",
diff --git a/server/auth/nop.go b/server/auth/nop.go
index d4378747bd8..8ba3f8c893c 100644
--- a/server/auth/nop.go
+++ b/server/auth/nop.go
@@ -27,9 +27,11 @@ func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil }
func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
return nil, false
}
+
func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) {
return "", ErrAuthFailed
}
+
func newTokenProviderNop() (*tokenNop, error) {
return &tokenNop{}, nil
}
diff --git a/server/auth/options.go b/server/auth/options.go
index c0b039f759c..6d554fbc45e 100644
--- a/server/auth/options.go
+++ b/server/auth/options.go
@@ -15,13 +15,15 @@
package auth
import (
+ "crypto"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/rsa"
"fmt"
- "io/ioutil"
+ "os"
"time"
- jwt "github.com/form3tech-oss/jwt-go"
+ jwt "github.com/golang-jwt/jwt/v4"
)
const (
@@ -38,10 +40,8 @@ var knownOptions = map[string]bool{
optTTL: true,
}
-var (
- // DefaultTTL will be used when a 'ttl' is not specified
- DefaultTTL = 5 * time.Minute
-)
+// DefaultTTL will be used when a 'ttl' is not specified
+var DefaultTTL = 5 * time.Minute
type jwtOptions struct {
SignMethod jwt.SigningMethod
@@ -70,14 +70,14 @@ func (opts *jwtOptions) Parse(optMap map[string]string) error {
}
if file := optMap[optPublicKey]; file != "" {
- opts.PublicKey, err = ioutil.ReadFile(file)
+ opts.PublicKey, err = os.ReadFile(file)
if err != nil {
return err
}
}
if file := optMap[optPrivateKey]; file != "" {
- opts.PrivateKey, err = ioutil.ReadFile(file)
+ opts.PrivateKey, err = os.ReadFile(file)
if err != nil {
return err
}
@@ -94,12 +94,14 @@ func (opts *jwtOptions) Parse(optMap map[string]string) error {
}
// Key will parse and return the appropriately typed key for the selected signature method
-func (opts *jwtOptions) Key() (interface{}, error) {
+func (opts *jwtOptions) Key() (any, error) {
switch opts.SignMethod.(type) {
case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
return opts.rsaKey()
case *jwt.SigningMethodECDSA:
return opts.ecKey()
+ case *jwt.SigningMethodEd25519:
+ return opts.edKey()
case *jwt.SigningMethodHMAC:
return opts.hmacKey()
default:
@@ -107,14 +109,14 @@ func (opts *jwtOptions) Key() (interface{}, error) {
}
}
-func (opts *jwtOptions) hmacKey() (interface{}, error) {
+func (opts *jwtOptions) hmacKey() (any, error) {
if len(opts.PrivateKey) == 0 {
return nil, ErrMissingKey
}
return opts.PrivateKey, nil
}
-func (opts *jwtOptions) rsaKey() (interface{}, error) {
+func (opts *jwtOptions) rsaKey() (any, error) {
var (
priv *rsa.PrivateKey
pub *rsa.PublicKey
@@ -145,14 +147,14 @@ func (opts *jwtOptions) rsaKey() (interface{}, error) {
}
// both keys provided, make sure they match
- if pub != nil && pub.E != priv.E && pub.N.Cmp(priv.N) != 0 {
+ if pub != nil && !pub.Equal(priv.Public()) {
return nil, ErrKeyMismatch
}
return priv, nil
}
-func (opts *jwtOptions) ecKey() (interface{}, error) {
+func (opts *jwtOptions) ecKey() (any, error) {
var (
priv *ecdsa.PrivateKey
pub *ecdsa.PublicKey
@@ -183,8 +185,49 @@ func (opts *jwtOptions) ecKey() (interface{}, error) {
}
// both keys provided, make sure they match
- if pub != nil && pub.Curve != priv.Curve &&
- pub.X.Cmp(priv.X) != 0 && pub.Y.Cmp(priv.Y) != 0 {
+ if pub != nil && !pub.Equal(priv.Public()) {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
+
+func (opts *jwtOptions) edKey() (any, error) {
+ var (
+ priv ed25519.PrivateKey
+ pub ed25519.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ var privKey crypto.PrivateKey
+ privKey, err = jwt.ParseEdPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ priv = privKey.(ed25519.PrivateKey)
+ }
+
+ if len(opts.PublicKey) > 0 {
+ var pubKey crypto.PublicKey
+ pubKey, err = jwt.ParseEdPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ pub = pubKey.(ed25519.PublicKey)
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && !pub.Equal(priv.Public()) {
return nil, ErrKeyMismatch
}
diff --git a/server/auth/range_perm_cache.go b/server/auth/range_perm_cache.go
index 7d77b16ea1a..539ed290e68 100644
--- a/server/auth/range_perm_cache.go
+++ b/server/auth/range_perm_cache.go
@@ -15,15 +15,14 @@
package auth
import (
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/pkg/v3/adt"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
- "go.uber.org/zap"
)
-func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions {
- user := getUser(lg, tx, userName)
+func getMergedPerms(tx UnsafeAuthReader, userName string) *unifiedRangePermissions {
+ user := tx.UnsafeGetUser(userName)
if user == nil {
return nil
}
@@ -32,7 +31,7 @@ func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifie
writePerms := adt.NewIntervalTree()
for _, roleName := range user.Roles {
- role := getRole(lg, tx, roleName)
+ role := tx.UnsafeGetRole(roleName)
if role == nil {
continue
}
@@ -75,9 +74,12 @@ func checkKeyInterval(
lg *zap.Logger,
cachedPerms *unifiedRangePermissions,
key, rangeEnd []byte,
- permtyp authpb.Permission_Type) bool {
- if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+ permtyp authpb.Permission_Type,
+) bool {
+ if isOpenEnded(rangeEnd) {
rangeEnd = nil
+ // nil rangeEnd will be converetd to []byte{}, the largest element of BytesAffineComparable,
+ // in NewBytesAffineInterval().
}
ivl := adt.NewBytesAffineInterval(key, rangeEnd)
@@ -105,37 +107,98 @@ func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []b
return false
}
-func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
+func (as *authStore) isRangeOpPermitted(userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
// assumption: tx is Lock()ed
- _, ok := as.rangePermCache[userName]
+ as.rangePermCacheMu.RLock()
+ defer as.rangePermCacheMu.RUnlock()
+
+ rangePerm, ok := as.rangePermCache[userName]
if !ok {
- perms := getMergedPerms(as.lg, tx, userName)
- if perms == nil {
- as.lg.Error(
- "failed to create a merged permission",
- zap.String("user-name", userName),
- )
- return false
- }
- as.rangePermCache[userName] = perms
+ as.lg.Error(
+ "user doesn't exist",
+ zap.String("user-name", userName),
+ )
+ return false
}
if len(rangeEnd) == 0 {
- return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp)
+ return checkKeyPoint(as.lg, rangePerm, key, permtyp)
}
- return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp)
+ return checkKeyInterval(as.lg, rangePerm, key, rangeEnd, permtyp)
}
-func (as *authStore) clearCachedPerm() {
+func (as *authStore) refreshRangePermCache(tx UnsafeAuthReader) {
+ // Note that every authentication configuration update calls this method and it invalidates the entire
+ // rangePermCache and reconstruct it based on information of users and roles stored in the backend.
+ // This can be a costly operation.
+ as.rangePermCacheMu.Lock()
+ defer as.rangePermCacheMu.Unlock()
+
+ as.lg.Debug("Refreshing rangePermCache")
+
as.rangePermCache = make(map[string]*unifiedRangePermissions)
-}
-func (as *authStore) invalidateCachedPerm(userName string) {
- delete(as.rangePermCache, userName)
+ users := tx.UnsafeGetAllUsers()
+ for _, user := range users {
+ userName := string(user.Name)
+ perms := getMergedPerms(tx, userName)
+ if perms == nil {
+ as.lg.Error(
+ "failed to create a merged permission",
+ zap.String("user-name", userName),
+ )
+ continue
+ }
+ as.rangePermCache[userName] = perms
+ }
}
type unifiedRangePermissions struct {
readPerms adt.IntervalTree
writePerms adt.IntervalTree
}
+
+// Constraints related to key range
+// Assumptions:
+// a1. key must be non-nil
+// a2. []byte{} (in the case of string, "") is not a valid key of etcd
+// For representing an open-ended range, BytesAffineComparable uses []byte{} as the largest element.
+// a3. []byte{0x00} is the minimum valid etcd key
+//
+// Based on the above assumptions, key and rangeEnd must follow below rules:
+// b1. for representing a single key point, rangeEnd should be nil or zero length byte array (in the case of string, "")
+// Rule a2 guarantees that (X, []byte{}) for any X is not a valid range. So such ranges can be used for representing
+// a single key permission.
+//
+// b2. key range with upper limit, like (X, Y), larger or equal to X and smaller than Y
+//
+// b3. key range with open-ended, like (X, ), is represented like (X, []byte{0x00})
+// Because of rule a3, if we have (X, []byte{0x00}), such a range represents an empty range and makes no sense to have
+// such a permission. So we use []byte{0x00} for representing an open-ended permission.
+// Note that rangeEnd with []byte{0x00} will be converted into []byte{} before inserted into the interval tree
+// (rule a2 ensures that this is the largest element).
+// Special range like key = []byte{0x00} and rangeEnd = []byte{0x00} is treated as a range which matches with all keys.
+//
+// Treating a range whose rangeEnd with []byte{0x00} as an open-ended comes from the rules of Range() and Watch() API.
+
+func isOpenEnded(rangeEnd []byte) bool { // check rule b3
+ return len(rangeEnd) == 1 && rangeEnd[0] == 0
+}
+
+func isValidPermissionRange(key, rangeEnd []byte) bool {
+ if len(key) == 0 {
+ return false
+ }
+ if len(rangeEnd) == 0 { // ensure rule b1
+ return true
+ }
+
+ begin := adt.BytesAffineComparable(key)
+ end := adt.BytesAffineComparable(rangeEnd)
+ if begin.Compare(end) == -1 { // rule b2
+ return true
+ }
+
+ return isOpenEnded(rangeEnd)
+}
diff --git a/server/auth/range_perm_cache_test.go b/server/auth/range_perm_cache_test.go
index 2f325159309..4b39acbcb2f 100644
--- a/server/auth/range_perm_cache_test.go
+++ b/server/auth/range_perm_cache_test.go
@@ -17,10 +17,10 @@ package auth
import (
"testing"
+ "go.uber.org/zap/zaptest"
+
"go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/pkg/v3/adt"
-
- "go.uber.org/zap"
)
func TestRangePermission(t *testing.T) {
@@ -45,6 +45,30 @@ func TestRangePermission(t *testing.T) {
[]byte("a"), []byte("f"),
true,
},
+ {
+ []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte("f"))},
+ []byte("a"),
+ []byte{},
+ false,
+ },
+ {
+ []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte{})},
+ []byte("a"),
+ []byte{},
+ true,
+ },
+ {
+ []adt.Interval{adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
+ []byte("a"),
+ []byte{},
+ true,
+ },
+ {
+ []adt.Interval{adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
+ []byte{0x00},
+ []byte{},
+ true,
+ },
}
for i, tt := range tests {
@@ -53,7 +77,7 @@ func TestRangePermission(t *testing.T) {
readPerms.Insert(p, struct{}{})
}
- result := checkKeyInterval(zap.NewExample(), &unifiedRangePermissions{readPerms: readPerms}, tt.begin, tt.end, authpb.READ)
+ result := checkKeyInterval(zaptest.NewLogger(t), &unifiedRangePermissions{readPerms: readPerms}, tt.begin, tt.end, authpb.READ)
if result != tt.want {
t.Errorf("#%d: result=%t, want=%t", i, result, tt.want)
}
@@ -86,6 +110,16 @@ func TestKeyPermission(t *testing.T) {
[]byte("f"),
false,
},
+ {
+ []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte{})},
+ []byte("f"),
+ true,
+ },
+ {
+ []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
+ []byte("f"),
+ true,
+ },
}
for i, tt := range tests {
@@ -94,9 +128,94 @@ func TestKeyPermission(t *testing.T) {
readPerms.Insert(p, struct{}{})
}
- result := checkKeyPoint(zap.NewExample(), &unifiedRangePermissions{readPerms: readPerms}, tt.key, authpb.READ)
+ result := checkKeyPoint(zaptest.NewLogger(t), &unifiedRangePermissions{readPerms: readPerms}, tt.key, authpb.READ)
if result != tt.want {
t.Errorf("#%d: result=%t, want=%t", i, result, tt.want)
}
}
}
+
+func TestRangeCheck(t *testing.T) {
+ tests := []struct {
+ name string
+ key []byte
+ rangeEnd []byte
+ want bool
+ }{
+ {
+ name: "valid single key",
+ key: []byte("a"),
+ rangeEnd: []byte(""),
+ want: true,
+ },
+ {
+ name: "valid single key",
+ key: []byte("a"),
+ rangeEnd: nil,
+ want: true,
+ },
+ {
+ name: "valid key range, key < rangeEnd",
+ key: []byte("a"),
+ rangeEnd: []byte("b"),
+ want: true,
+ },
+ {
+ name: "invalid empty key range, key == rangeEnd",
+ key: []byte("a"),
+ rangeEnd: []byte("a"),
+ want: false,
+ },
+ {
+ name: "invalid empty key range, key > rangeEnd",
+ key: []byte("b"),
+ rangeEnd: []byte("a"),
+ want: false,
+ },
+ {
+ name: "invalid key, key must not be \"\"",
+ key: []byte(""),
+ rangeEnd: []byte("a"),
+ want: false,
+ },
+ {
+ name: "invalid key range, key must not be \"\"",
+ key: []byte(""),
+ rangeEnd: []byte(""),
+ want: false,
+ },
+ {
+ name: "invalid key range, key must not be \"\"",
+ key: []byte(""),
+ rangeEnd: []byte("\x00"),
+ want: false,
+ },
+ {
+ name: "valid single key (not useful in practice)",
+ key: []byte("\x00"),
+ rangeEnd: []byte(""),
+ want: true,
+ },
+ {
+ name: "valid key range, larger or equals to \"a\"",
+ key: []byte("a"),
+ rangeEnd: []byte("\x00"),
+ want: true,
+ },
+ {
+ name: "valid key range, which includes all keys",
+ key: []byte("\x00"),
+ rangeEnd: []byte("\x00"),
+ want: true,
+ },
+ }
+
+ for i, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := isValidPermissionRange(tt.key, tt.rangeEnd)
+ if result != tt.want {
+ t.Errorf("#%d: result=%t, want=%t", i, result, tt.want)
+ }
+ })
+ }
+}
diff --git a/server/auth/simple_token.go b/server/auth/simple_token.go
index 7b1b094ae14..f8272b185d9 100644
--- a/server/auth/simple_token.go
+++ b/server/auth/simple_token.go
@@ -20,6 +20,7 @@ package auth
import (
"context"
"crypto/rand"
+ "errors"
"fmt"
"math/big"
"strconv"
@@ -156,13 +157,18 @@ func (t *tokenSimple) invalidateUser(username string) {
}
func (t *tokenSimple) enable() {
+ t.simpleTokensMu.Lock()
+ defer t.simpleTokensMu.Unlock()
+ if t.simpleTokenKeeper != nil { // already enabled
+ return
+ }
if t.simpleTokenTTL <= 0 {
t.simpleTokenTTL = simpleTokenTTLDefault
}
delf := func(tk string) {
if username, ok := t.simpleTokens[tk]; ok {
- t.lg.Info(
+ t.lg.Debug(
"deleted a simple token",
zap.String("user-name", username),
zap.String("token", tk),
@@ -207,7 +213,11 @@ func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (
func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
// rev isn't used in simple token, it is only used in JWT
- index := ctx.Value(AuthenticateParamIndex{}).(uint64)
+ var index uint64
+ var ok bool
+ if index, ok = ctx.Value(AuthenticateParamIndex{}).(uint64); !ok {
+ return "", errors.New("failed to assign")
+ }
simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
t.assignSimpleTokenToUser(username, token)
@@ -226,7 +236,7 @@ func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool
}
select {
- case <-t.indexWaiter(uint64(index)):
+ case <-t.indexWaiter(index):
return true
case <-ctx.Done():
}
diff --git a/server/auth/simple_token_test.go b/server/auth/simple_token_test.go
index 1bea5696113..13db76efe4a 100644
--- a/server/auth/simple_token_test.go
+++ b/server/auth/simple_token_test.go
@@ -18,15 +18,15 @@ import (
"context"
"testing"
- "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
)
// TestSimpleTokenDisabled ensures that TokenProviderSimple behaves correctly when
// disabled.
func TestSimpleTokenDisabled(t *testing.T) {
- initialState := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
+ initialState := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault)
- explicitlyDisabled := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
+ explicitlyDisabled := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault)
explicitlyDisabled.enable()
explicitlyDisabled.disable()
@@ -48,7 +48,7 @@ func TestSimpleTokenDisabled(t *testing.T) {
// TestSimpleTokenAssign ensures that TokenProviderSimple can correctly assign a
// token, look it up with info, and invalidate it by user.
func TestSimpleTokenAssign(t *testing.T) {
- tp := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
+ tp := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault)
tp.enable()
defer tp.disable()
ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy")
diff --git a/server/auth/store.go b/server/auth/store.go
index 19dd7e738ad..ed302638378 100644
--- a/server/auth/store.go
+++ b/server/auth/store.go
@@ -18,7 +18,6 @@ import (
"bytes"
"context"
"encoding/base64"
- "encoding/binary"
"errors"
"sort"
"strings"
@@ -26,28 +25,19 @@ import (
"sync/atomic"
"time"
- "go.etcd.io/etcd/api/v3/authpb"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
var (
- enableFlagKey = []byte("authEnabled")
- authEnabled = []byte{1}
- authDisabled = []byte{0}
-
- revisionKey = []byte("authRevision")
-
- authBucketName = []byte("auth")
- authUsersBucketName = []byte("authUsers")
- authRolesBucketName = []byte("authRoles")
+ rootPerm = authpb.Permission{PermType: authpb.READWRITE, Key: []byte{}, RangeEnd: []byte{0}}
ErrRootUserNotExist = errors.New("auth: root user does not exist")
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
@@ -57,6 +47,7 @@ var (
ErrRoleAlreadyExist = errors.New("auth: role already exists")
ErrRoleNotFound = errors.New("auth: role not found")
ErrRoleEmpty = errors.New("auth: role name is empty")
+ ErrPermissionNotGiven = errors.New("auth: permission not given")
ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user")
ErrPermissionDenied = errors.New("auth: permission denied")
@@ -79,8 +70,6 @@ const (
tokenTypeSimple = "simple"
tokenTypeJWT = "jwt"
-
- revBytesLen = 8
)
type AuthInfo struct {
@@ -109,7 +98,7 @@ type AuthStore interface {
Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
// Recover recovers the state of auth store from the given backend
- Recover(b backend.Backend)
+ Recover(be AuthBackend)
// UserAdd adds a new user
UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
@@ -201,16 +190,70 @@ type TokenProvider interface {
genTokenPrefix() (string, error)
}
+type AuthBackend interface {
+ CreateAuthBuckets()
+ ForceCommit()
+ ReadTx() AuthReadTx
+ BatchTx() AuthBatchTx
+
+ GetUser(string) *authpb.User
+ GetAllUsers() []*authpb.User
+ GetRole(string) *authpb.Role
+ GetAllRoles() []*authpb.Role
+}
+
+type AuthReadTx interface {
+ RLock()
+ RUnlock()
+ UnsafeAuthReader
+}
+
+type UnsafeAuthReader interface {
+ UnsafeReadAuthEnabled() bool
+ UnsafeReadAuthRevision() uint64
+ UnsafeGetUser(string) *authpb.User
+ UnsafeGetRole(string) *authpb.Role
+ UnsafeGetAllUsers() []*authpb.User
+ UnsafeGetAllRoles() []*authpb.Role
+}
+
+type AuthBatchTx interface {
+ Lock()
+ Unlock()
+ UnsafeAuthReadWriter
+}
+
+type UnsafeAuthReadWriter interface {
+ UnsafeAuthReader
+ UnsafeAuthWriter
+}
+
+type UnsafeAuthWriter interface {
+ UnsafeSaveAuthEnabled(enabled bool)
+ UnsafeSaveAuthRevision(rev uint64)
+ UnsafePutUser(*authpb.User)
+ UnsafeDeleteUser(string)
+ UnsafePutRole(*authpb.Role)
+ UnsafeDeleteRole(string)
+}
+
type authStore struct {
// atomic operations; need 64-bit align, or 32-bit tests will crash
revision uint64
lg *zap.Logger
- be backend.Backend
+ be AuthBackend
enabled bool
enabledMu sync.RWMutex
- rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
+ // rangePermCache needs to be protected by rangePermCacheMu
+ // rangePermCacheMu needs to be write locked only in initialization phase or configuration changes
+ // Hot paths like Range(), needs to acquire read lock for improving performance
+ //
+ // Note that BatchTx and ReadTx cannot be a mutex for rangePermCache because they are independent resources
+ // see also: https://github.com/etcd-io/etcd/pull/13920#discussion_r849114855
+ rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
+ rangePermCacheMu sync.RWMutex
tokenProvider TokenProvider
bcryptCost int // the algorithm cost / strength for hashing auth passwords
@@ -223,15 +266,14 @@ func (as *authStore) AuthEnable() error {
as.lg.Info("authentication is already enabled; ignored auth enable request")
return nil
}
- b := as.be
- tx := b.BatchTx()
+ tx := as.be.BatchTx()
tx.Lock()
defer func() {
tx.Unlock()
- b.ForceCommit()
+ as.be.ForceCommit()
}()
- u := getUser(as.lg, tx, rootUser)
+ u := tx.UnsafeGetUser(rootUser)
if u == nil {
return ErrRootUserNotExist
}
@@ -240,14 +282,13 @@ func (as *authStore) AuthEnable() error {
return ErrRootRoleNotExist
}
- tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
-
+ tx.UnsafeSaveAuthEnabled(true)
as.enabled = true
as.tokenProvider.enable()
- as.rangePermCache = make(map[string]*unifiedRangePermissions)
+ as.refreshRangePermCache(tx)
- as.setRevision(getRevision(tx))
+ as.setRevision(tx.UnsafeReadAuthRevision())
as.lg.Info("enabled authentication")
return nil
@@ -260,11 +301,13 @@ func (as *authStore) AuthDisable() {
return
}
b := as.be
+
tx := b.BatchTx()
tx.Lock()
- tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
+ tx.UnsafeSaveAuthEnabled(false)
as.commitRevision(tx)
tx.Unlock()
+
b.ForceCommit()
as.enabled = false
@@ -287,12 +330,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
if !as.IsAuthEnabled() {
return nil, ErrAuthNotEnabled
}
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(as.lg, tx, username)
+ user := as.be.GetUser(username)
if user == nil {
return nil, ErrAuthFailed
}
@@ -326,11 +364,11 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
// CompareHashAndPassword is very expensive, so we use closures
// to avoid putting it in the critical section of the tx lock.
revision, err := func() (uint64, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
+ tx := as.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
- user = getUser(as.lg, tx, username)
+ user = tx.UnsafeGetUser(username)
if user == nil {
return 0, ErrAuthFailed
}
@@ -339,7 +377,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
return 0, ErrNoPasswordUser
}
- return getRevision(tx), nil
+ return tx.UnsafeReadAuthRevision(), nil
}()
if err != nil {
return 0, err
@@ -352,24 +390,22 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
return revision, nil
}
-func (as *authStore) Recover(be backend.Backend) {
- enabled := false
+func (as *authStore) Recover(be AuthBackend) {
as.be = be
- tx := be.BatchTx()
- tx.Lock()
- _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
- if len(vs) == 1 {
- if bytes.Equal(vs[0], authEnabled) {
- enabled = true
- }
- }
+ tx := be.ReadTx()
+ tx.RLock()
- as.setRevision(getRevision(tx))
+ enabled := tx.UnsafeReadAuthEnabled()
+ as.setRevision(tx.UnsafeReadAuthRevision())
+ as.refreshRangePermCache(tx)
- tx.Unlock()
+ tx.RUnlock()
as.enabledMu.Lock()
as.enabled = enabled
+ if enabled {
+ as.tokenProvider.enable()
+ }
as.enabledMu.Unlock()
}
@@ -390,7 +426,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
tx.Lock()
defer tx.Unlock()
- user := getUser(as.lg, tx, r.Name)
+ user := tx.UnsafeGetUser(r.Name)
if user != nil {
return nil, ErrUserAlreadyExist
}
@@ -417,10 +453,10 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
Password: password,
Options: options,
}
-
- putUser(as.lg, tx, newUser)
+ tx.UnsafePutUser(newUser)
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
as.lg.Info("added a user", zap.String("user-name", r.Name))
return &pb.AuthUserAddResponse{}, nil
@@ -436,16 +472,15 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
tx.Lock()
defer tx.Unlock()
- user := getUser(as.lg, tx, r.Name)
+ user := tx.UnsafeGetUser(r.Name)
if user == nil {
return nil, ErrUserNotFound
}
-
- delUser(tx, r.Name)
+ tx.UnsafeDeleteUser(r.Name)
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
- as.invalidateCachedPerm(r.Name)
as.tokenProvider.invalidateUser(r.Name)
as.lg.Info(
@@ -461,7 +496,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
tx.Lock()
defer tx.Unlock()
- user := getUser(as.lg, tx, r.Name)
+ user := tx.UnsafeGetUser(r.Name)
if user == nil {
return nil, ErrUserNotFound
}
@@ -469,7 +504,8 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
var password []byte
var err error
- if !user.Options.NoPassword {
+ // Backward compatible with old versions of etcd, user options is nil
+ if user.Options == nil || !user.Options.NoPassword {
password, err = as.selectPassword(r.Password, r.HashedPassword)
if err != nil {
return nil, ErrNoPasswordUser
@@ -482,12 +518,11 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
Password: password,
Options: user.Options,
}
-
- putUser(as.lg, tx, updatedUser)
+ tx.UnsafePutUser(updatedUser)
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
- as.invalidateCachedPerm(r.Name)
as.tokenProvider.invalidateUser(r.Name)
as.lg.Info(
@@ -503,13 +538,13 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
tx.Lock()
defer tx.Unlock()
- user := getUser(as.lg, tx, r.User)
+ user := tx.UnsafeGetUser(r.User)
if user == nil {
return nil, ErrUserNotFound
}
if r.Role != rootRole {
- role := getRole(as.lg, tx, r.Role)
+ role := tx.UnsafeGetRole(r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
@@ -529,11 +564,10 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
user.Roles = append(user.Roles, r.Role)
sort.Strings(user.Roles)
- putUser(as.lg, tx, user)
-
- as.invalidateCachedPerm(r.User)
+ tx.UnsafePutUser(user)
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
as.lg.Info(
"granted a role to a user",
@@ -545,10 +579,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
}
func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- user := getUser(as.lg, tx, r.Name)
- tx.Unlock()
+ user := as.be.GetUser(r.Name)
if user == nil {
return nil, ErrUserNotFound
@@ -560,10 +591,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
}
func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- users := getAllUsers(as.lg, tx)
- tx.Unlock()
+ users := as.be.GetAllUsers()
resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
for i := range users {
@@ -586,7 +614,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
tx.Lock()
defer tx.Unlock()
- user := getUser(as.lg, tx, r.Name)
+ user := tx.UnsafeGetUser(r.Name)
if user == nil {
return nil, ErrUserNotFound
}
@@ -607,11 +635,10 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
return nil, ErrRoleNotGranted
}
- putUser(as.lg, tx, updatedUser)
-
- as.invalidateCachedPerm(r.Name)
+ tx.UnsafePutUser(updatedUser)
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
as.lg.Info(
"revoked a role from a user",
@@ -624,25 +651,22 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
}
func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
var resp pb.AuthRoleGetResponse
- role := getRole(as.lg, tx, r.Role)
+ role := as.be.GetRole(r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
- resp.Perm = append(resp.Perm, role.KeyPermission...)
+ if rootRole == string(role.Name) {
+ resp.Perm = append(resp.Perm, &rootPerm)
+ } else {
+ resp.Perm = append(resp.Perm, role.KeyPermission...)
+ }
return &resp, nil
}
func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- roles := getAllRoles(as.lg, tx)
- tx.Unlock()
+ roles := as.be.GetAllRoles()
resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
for i := range roles {
@@ -656,7 +680,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
tx.Lock()
defer tx.Unlock()
- role := getRole(as.lg, tx, r.Role)
+ role := tx.UnsafeGetRole(r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
@@ -675,13 +699,10 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
return nil, ErrPermissionNotGranted
}
- putRole(as.lg, tx, updatedRole)
-
- // TODO(mitake): currently single role update invalidates every cache
- // It should be optimized.
- as.clearCachedPerm()
+ tx.UnsafePutRole(updatedRole)
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
as.lg.Info(
"revoked a permission on range",
@@ -702,14 +723,14 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
tx.Lock()
defer tx.Unlock()
- role := getRole(as.lg, tx, r.Role)
+ role := tx.UnsafeGetRole(r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
- delRole(tx, r.Role)
+ tx.UnsafeDeleteRole(r.Role)
- users := getAllUsers(as.lg, tx)
+ users := tx.UnsafeGetAllUsers()
for _, user := range users {
updatedUser := &authpb.User{
Name: user.Name,
@@ -727,12 +748,11 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
continue
}
- putUser(as.lg, tx, updatedUser)
-
- as.invalidateCachedPerm(string(user.Name))
+ tx.UnsafePutUser(updatedUser)
}
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
as.lg.Info("deleted a role", zap.String("role-name", r.Role))
return &pb.AuthRoleDeleteResponse{}, nil
@@ -747,7 +767,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
tx.Lock()
defer tx.Unlock()
- role := getRole(as.lg, tx, r.Name)
+ role := tx.UnsafeGetRole(r.Name)
if role != nil {
return nil, ErrRoleAlreadyExist
}
@@ -756,7 +776,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
Name: []byte(r.Name),
}
- putRole(as.lg, tx, newRole)
+ tx.UnsafePutRole(newRole)
as.commitRevision(tx)
@@ -783,11 +803,18 @@ func (perms permSlice) Swap(i, j int) {
}
func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ if r.Perm == nil {
+ return nil, ErrPermissionNotGiven
+ }
+ if !isValidPermissionRange(r.Perm.Key, r.Perm.RangeEnd) {
+ return nil, ErrInvalidAuthMgmt
+ }
+
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
- role := getRole(as.lg, tx, r.Name)
+ role := tx.UnsafeGetRole(r.Name)
if role == nil {
return nil, ErrRoleNotFound
}
@@ -811,18 +838,17 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
sort.Sort(permSlice(role.KeyPermission))
}
- putRole(as.lg, tx, role)
-
- // TODO(mitake): currently single role update invalidates every cache
- // It should be optimized.
- as.clearCachedPerm()
+ tx.UnsafePutRole(role)
as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
as.lg.Info(
"granted/updated a permission to a user",
zap.String("user-name", r.Name),
zap.String("permission-name", authpb.Permission_Type_name[int32(r.Perm.PermType)]),
+ zap.ByteString("key", r.Perm.Key),
+ zap.ByteString("range-end", r.Perm.RangeEnd),
)
return &pb.AuthRoleGrantPermissionResponse{}, nil
}
@@ -847,11 +873,11 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
return ErrAuthOldRevision
}
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
+ tx := as.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
- user := getUser(as.lg, tx, userName)
+ user := tx.UnsafeGetUser(userName)
if user == nil {
as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName))
return ErrPermissionDenied
@@ -862,7 +888,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
return nil
}
- if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
+ if as.isRangeOpPermitted(userName, key, rangeEnd, permTyp) {
return nil
}
@@ -889,10 +915,10 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
return ErrUserEmpty
}
- tx := as.be.BatchTx()
- tx.Lock()
- u := getUser(as.lg, tx, authInfo.Username)
- tx.Unlock()
+ tx := as.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ u := tx.UnsafeGetUser(authInfo.Username)
if u == nil {
return ErrUserNotFound
@@ -905,103 +931,6 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
return nil
}
-func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User {
- _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0)
- if len(vs) == 0 {
- return nil
- }
-
- user := &authpb.User{}
- err := user.Unmarshal(vs[0])
- if err != nil {
- lg.Panic(
- "failed to unmarshal 'authpb.User'",
- zap.String("user-name", username),
- zap.Error(err),
- )
- }
- return user
-}
-
-func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User {
- _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
- if len(vs) == 0 {
- return nil
- }
-
- users := make([]*authpb.User, len(vs))
- for i := range vs {
- user := &authpb.User{}
- err := user.Unmarshal(vs[i])
- if err != nil {
- lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
- }
- users[i] = user
- }
- return users
-}
-
-func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) {
- b, err := user.Marshal()
- if err != nil {
- lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
- }
- tx.UnsafePut(authUsersBucketName, user.Name, b)
-}
-
-func delUser(tx backend.BatchTx, username string) {
- tx.UnsafeDelete(authUsersBucketName, []byte(username))
-}
-
-func getRole(lg *zap.Logger, tx backend.BatchTx, rolename string) *authpb.Role {
- _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0)
- if len(vs) == 0 {
- return nil
- }
-
- role := &authpb.Role{}
- err := role.Unmarshal(vs[0])
- if err != nil {
- lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
- }
- return role
-}
-
-func getAllRoles(lg *zap.Logger, tx backend.BatchTx) []*authpb.Role {
- _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1)
- if len(vs) == 0 {
- return nil
- }
-
- roles := make([]*authpb.Role, len(vs))
- for i := range vs {
- role := &authpb.Role{}
- err := role.Unmarshal(vs[i])
- if err != nil {
- lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
- }
- roles[i] = role
- }
- return roles
-}
-
-func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) {
- b, err := role.Marshal()
- if err != nil {
- lg.Panic(
- "failed to marshal 'authpb.Role'",
- zap.String("role-name", string(role.Name)),
- zap.Error(err),
- )
- }
-
- tx.UnsafePut(authRolesBucketName, role.Name, b)
-}
-
-func delRole(tx backend.BatchTx, rolename string) {
- tx.UnsafeDelete(authRolesBucketName, []byte(rolename))
-}
-
func (as *authStore) IsAuthEnabled() bool {
as.enabledMu.RLock()
defer as.enabledMu.RUnlock()
@@ -1009,7 +938,7 @@ func (as *authStore) IsAuthEnabled() bool {
}
// NewAuthStore creates a new AuthStore.
-func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore {
+func NewAuthStore(lg *zap.Logger, be AuthBackend, tp TokenProvider, bcryptCost int) *authStore {
if lg == nil {
lg = zap.NewNop()
}
@@ -1025,23 +954,14 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo
bcryptCost = bcrypt.DefaultCost
}
+ be.CreateAuthBuckets()
tx := be.BatchTx()
+ // We should call LockOutsideApply here, but the txPostLockHoos isn't set
+ // to EtcdServer yet, so it's OK.
tx.Lock()
-
- tx.UnsafeCreateBucket(authBucketName)
- tx.UnsafeCreateBucket(authUsersBucketName)
- tx.UnsafeCreateBucket(authRolesBucketName)
-
- enabled := false
- _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
- if len(vs) == 1 {
- if bytes.Equal(vs[0], authEnabled) {
- enabled = true
- }
- }
-
+ enabled := tx.UnsafeReadAuthEnabled()
as := &authStore{
- revision: getRevision(tx),
+ revision: tx.UnsafeReadAuthRevision(),
lg: lg,
be: be,
enabled: enabled,
@@ -1060,6 +980,8 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo
as.setupMetricsReporter()
+ as.refreshRangePermCache(tx)
+
tx.Unlock()
be.ForceCommit()
@@ -1072,20 +994,9 @@ func hasRootRole(u *authpb.User) bool {
return idx != len(u.Roles) && u.Roles[idx] == rootRole
}
-func (as *authStore) commitRevision(tx backend.BatchTx) {
+func (as *authStore) commitRevision(tx UnsafeAuthWriter) {
atomic.AddUint64(&as.revision, 1)
- revBytes := make([]byte, revBytesLen)
- binary.BigEndian.PutUint64(revBytes, as.Revision())
- tx.UnsafePut(authBucketName, revisionKey, revBytes)
-}
-
-func getRevision(tx backend.BatchTx) uint64 {
- _, vs := tx.UnsafeRange(authBucketName, revisionKey, nil, 0)
- if len(vs) != 1 {
- // this can happen in the initialization phase
- return 0
- }
- return binary.BigEndian.Uint64(vs[0])
+ tx.UnsafeSaveAuthRevision(as.Revision())
}
func (as *authStore) setRevision(rev uint64) {
@@ -1140,12 +1051,16 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) {
}
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
+ if !as.IsAuthEnabled() {
+ return nil, nil
+ }
+
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, nil
}
- //TODO(mitake|hexfusion) review unifying key names
+ // TODO(mitake|hexfusion) review unifying key names
ts, ok := md[rpctypes.TokenFieldNameGRPC]
if !ok {
ts, ok = md[rpctypes.TokenFieldNameSwagger]
@@ -1198,7 +1113,6 @@ func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, er
}
return tokenType, typeSpecificOpts, nil
-
}
// NewTokenProvider creates a new token provider.
@@ -1206,7 +1120,8 @@ func NewTokenProvider(
lg *zap.Logger,
tokenOpts string,
indexWaiter func(uint64) <-chan struct{},
- TokenTTL time.Duration) (TokenProvider, error) {
+ TokenTTL time.Duration,
+) (TokenProvider, error) {
tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts)
if err != nil {
return nil, ErrInvalidAuthOpts
@@ -1280,7 +1195,7 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context {
func (as *authStore) HasRole(user, role string) bool {
tx := as.be.BatchTx()
tx.Lock()
- u := getUser(as.lg, tx, user)
+ u := tx.UnsafeGetUser(user)
tx.Unlock()
if u == nil {
diff --git a/server/auth/store_mock_test.go b/server/auth/store_mock_test.go
new file mode 100644
index 00000000000..764100fa3cb
--- /dev/null
+++ b/server/auth/store_mock_test.go
@@ -0,0 +1,137 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import "go.etcd.io/etcd/api/v3/authpb"
+
+type backendMock struct {
+ users map[string]*authpb.User
+ roles map[string]*authpb.Role
+ enabled bool
+ revision uint64
+}
+
+func newBackendMock() *backendMock {
+ return &backendMock{
+ users: make(map[string]*authpb.User),
+ roles: make(map[string]*authpb.Role),
+ }
+}
+
+func (b *backendMock) CreateAuthBuckets() {
+}
+
+func (b *backendMock) ForceCommit() {
+}
+
+func (b *backendMock) ReadTx() AuthReadTx {
+ return &txMock{be: b}
+}
+
+func (b *backendMock) BatchTx() AuthBatchTx {
+ return &txMock{be: b}
+}
+
+func (b *backendMock) GetUser(s string) *authpb.User {
+ return b.users[s]
+}
+
+func (b *backendMock) GetAllUsers() []*authpb.User {
+ return b.BatchTx().UnsafeGetAllUsers()
+}
+
+func (b *backendMock) GetRole(s string) *authpb.Role {
+ return b.roles[s]
+}
+
+func (b *backendMock) GetAllRoles() []*authpb.Role {
+ return b.BatchTx().UnsafeGetAllRoles()
+}
+
+var _ AuthBackend = (*backendMock)(nil)
+
+type txMock struct {
+ be *backendMock
+}
+
+var _ AuthBatchTx = (*txMock)(nil)
+
+func (t txMock) UnsafeReadAuthEnabled() bool {
+ return t.be.enabled
+}
+
+func (t txMock) UnsafeReadAuthRevision() uint64 {
+ return t.be.revision
+}
+
+func (t txMock) UnsafeGetUser(s string) *authpb.User {
+ return t.be.users[s]
+}
+
+func (t txMock) UnsafeGetRole(s string) *authpb.Role {
+ return t.be.roles[s]
+}
+
+func (t txMock) UnsafeGetAllUsers() []*authpb.User {
+ var users []*authpb.User
+ for _, u := range t.be.users {
+ users = append(users, u)
+ }
+ return users
+}
+
+func (t txMock) UnsafeGetAllRoles() []*authpb.Role {
+ var roles []*authpb.Role
+ for _, r := range t.be.roles {
+ roles = append(roles, r)
+ }
+ return roles
+}
+
+func (t txMock) Lock() {
+}
+
+func (t txMock) Unlock() {
+}
+
+func (t txMock) RLock() {
+}
+
+func (t txMock) RUnlock() {
+}
+
+func (t txMock) UnsafeSaveAuthEnabled(enabled bool) {
+ t.be.enabled = enabled
+}
+
+func (t txMock) UnsafeSaveAuthRevision(rev uint64) {
+ t.be.revision = rev
+}
+
+func (t txMock) UnsafePutUser(user *authpb.User) {
+ t.be.users[string(user.Name)] = user
+}
+
+func (t txMock) UnsafeDeleteUser(s string) {
+ delete(t.be.users, s)
+}
+
+func (t txMock) UnsafePutRole(role *authpb.Role) {
+ t.be.roles[string(role.Name)] = role
+}
+
+func (t txMock) UnsafeDeleteRole(s string) {
+ delete(t.be.roles, s)
+}
diff --git a/server/auth/store_test.go b/server/auth/store_test.go
index c530ffe8861..c8cd5cad7cc 100644
--- a/server/auth/store_test.go
+++ b/server/auth/store_test.go
@@ -17,26 +17,26 @@ package auth
import (
"context"
"encoding/base64"
+ "errors"
"fmt"
- "reflect"
"strings"
"sync"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc/metadata"
+
"go.etcd.io/etcd/api/v3/authpb"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
-
- "go.uber.org/zap"
- "golang.org/x/crypto/bcrypt"
- "google.golang.org/grpc/metadata"
+ "go.etcd.io/etcd/pkg/v3/adt"
)
func dummyIndexWaiter(index uint64) <-chan struct{} {
- ch := make(chan struct{})
+ ch := make(chan struct{}, 1)
go func() {
ch <- struct{}{}
}()
@@ -46,25 +46,21 @@ func dummyIndexWaiter(index uint64) <-chan struct{} {
// TestNewAuthStoreRevision ensures newly auth store
// keeps the old revision when there are no changes.
func TestNewAuthStoreRevision(t *testing.T) {
- b, tPath := betesting.NewDefaultTmpBackend(t)
-
- tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
+ tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
- as := NewAuthStore(zap.NewExample(), b, tp, bcrypt.MinCost)
+ be := newBackendMock()
+ as := NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost)
err = enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
}
old := as.Revision()
as.Close()
- b.Close()
// no changes to commit
- b2 := backend.NewDefaultBackend(tPath)
- defer b2.Close()
- as = NewAuthStore(zap.NewExample(), b2, tp, bcrypt.MinCost)
+ as = NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost)
defer as.Close()
new := as.Revision()
@@ -73,19 +69,16 @@ func TestNewAuthStoreRevision(t *testing.T) {
}
}
-// TestNewAuthStoreBryptCost ensures that NewAuthStore uses default when given bcrypt-cost is invalid
+// TestNewAuthStoreBcryptCost ensures that NewAuthStore uses default when given bcrypt-cost is invalid
func TestNewAuthStoreBcryptCost(t *testing.T) {
- b, _ := betesting.NewDefaultTmpBackend(t)
- defer betesting.Close(t, b)
-
- tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
+ tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
invalidCosts := [2]int{bcrypt.MinCost - 1, bcrypt.MaxCost + 1}
for _, invalidCost := range invalidCosts {
- as := NewAuthStore(zap.NewExample(), b, tp, invalidCost)
+ as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, invalidCost)
defer as.Close()
if as.BcryptCost() != bcrypt.DefaultCost {
t.Fatalf("expected DefaultCost when bcryptcost is invalid")
@@ -95,17 +88,15 @@ func TestNewAuthStoreBcryptCost(t *testing.T) {
func encodePassword(s string) string {
hashedPassword, _ := bcrypt.GenerateFromPassword([]byte(s), bcrypt.MinCost)
- return base64.StdEncoding.EncodeToString([]byte(hashedPassword))
+ return base64.StdEncoding.EncodeToString(hashedPassword)
}
func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) {
- b, _ := betesting.NewDefaultTmpBackend(t)
-
- tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
+ tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
- as := NewAuthStore(zap.NewExample(), b, tp, bcrypt.MinCost)
+ as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
err = enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
@@ -123,13 +114,28 @@ func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testin
t.Fatal(err)
}
+ // The UserAdd function cannot generate old etcd version user data (user's option is nil)
+ // add special users through the underlying interface
+ addUserWithNoOption(as)
+
tearDown := func(_ *testing.T) {
- b.Close()
as.Close()
}
return as, tearDown
}
+func addUserWithNoOption(as *authStore) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("foo-no-user-options"),
+ Password: []byte("bar"),
+ })
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+}
+
func enableAuthAndCreateRoot(as *authStore) error {
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "root", HashedPassword: encodePassword("root"), Options: &authpb.UserAddOptions{NoPassword: false}})
if err != nil {
@@ -153,20 +159,25 @@ func TestUserAdd(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
- ua := &pb.AuthUserAddRequest{Name: "foo", Options: &authpb.UserAddOptions{NoPassword: false}}
+ const userName = "foo"
+ ua := &pb.AuthUserAddRequest{Name: userName, Options: &authpb.UserAddOptions{NoPassword: false}}
_, err := as.UserAdd(ua) // add an existing user
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
- if err != ErrUserAlreadyExist {
+ if !errors.Is(err, ErrUserAlreadyExist) {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
ua = &pb.AuthUserAddRequest{Name: "", Options: &authpb.UserAddOptions{NoPassword: false}}
_, err = as.UserAdd(ua) // add a user with empty name
- if err != ErrUserEmpty {
+ if !errors.Is(err, ErrUserEmpty) {
t.Fatal(err)
}
+
+ if _, ok := as.rangePermCache[userName]; !ok {
+ t.Fatalf("user %s should be added but it doesn't exist in rangePermCache", userName)
+ }
}
func TestRecover(t *testing.T) {
@@ -182,6 +193,30 @@ func TestRecover(t *testing.T) {
}
}
+func TestRecoverWithEmptyRangePermCache(t *testing.T) {
+ as, tearDown := setupAuthStore(t)
+ defer as.Close()
+ defer tearDown(t)
+
+ as.enabled = false
+ as.rangePermCache = map[string]*unifiedRangePermissions{}
+ as.Recover(as.be)
+
+ if !as.IsAuthEnabled() {
+ t.Fatalf("expected auth enabled got disabled")
+ }
+
+ if len(as.rangePermCache) != 3 {
+ t.Fatalf("rangePermCache should have permission information for 3 users (\"root\" and \"foo\",\"foo-no-user-options\"), but has %d information", len(as.rangePermCache))
+ }
+ if _, ok := as.rangePermCache["root"]; !ok {
+ t.Fatal("user \"root\" should be created by setupAuthStore() but doesn't exist in rangePermCache")
+ }
+ if _, ok := as.rangePermCache["foo"]; !ok {
+ t.Fatal("user \"foo\" should be created by setupAuthStore() but doesn't exist in rangePermCache")
+ }
+}
+
func TestCheckPassword(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
@@ -191,7 +226,7 @@ func TestCheckPassword(t *testing.T) {
if err == nil {
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
}
- if err != ErrAuthFailed {
+ if !errors.Is(err, ErrAuthFailed) {
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
}
@@ -206,7 +241,7 @@ func TestCheckPassword(t *testing.T) {
if err == nil {
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
}
- if err != ErrAuthFailed {
+ if !errors.Is(err, ErrAuthFailed) {
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
}
}
@@ -216,7 +251,8 @@ func TestUserDelete(t *testing.T) {
defer tearDown(t)
// delete an existing user
- ud := &pb.AuthUserDeleteRequest{Name: "foo"}
+ const userName = "foo"
+ ud := &pb.AuthUserDeleteRequest{Name: userName}
_, err := as.UserDelete(ud)
if err != nil {
t.Fatal(err)
@@ -227,9 +263,48 @@ func TestUserDelete(t *testing.T) {
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
- if err != ErrUserNotFound {
+ if !errors.Is(err, ErrUserNotFound) {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
+
+ if _, ok := as.rangePermCache[userName]; ok {
+ t.Fatalf("user %s should be deleted but it exists in rangePermCache", userName)
+ }
+}
+
+func TestUserDeleteAndPermCache(t *testing.T) {
+ as, tearDown := setupAuthStore(t)
+ defer tearDown(t)
+
+ // delete an existing user
+ const deletedUserName = "foo"
+ ud := &pb.AuthUserDeleteRequest{Name: deletedUserName}
+ _, err := as.UserDelete(ud)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // delete a non-existing user
+ _, err = as.UserDelete(ud)
+ if !errors.Is(err, ErrUserNotFound) {
+ t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
+ }
+
+ if _, ok := as.rangePermCache[deletedUserName]; ok {
+ t.Fatalf("user %s should be deleted but it exists in rangePermCache", deletedUserName)
+ }
+
+ // add a new user
+ const newUser = "bar"
+ ua := &pb.AuthUserAddRequest{Name: newUser, HashedPassword: encodePassword("pwd1"), Options: &authpb.UserAddOptions{NoPassword: false}}
+ _, err = as.UserAdd(ua)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := as.rangePermCache[newUser]; !ok {
+ t.Fatalf("user %s should exist but it doesn't exist in rangePermCache", deletedUserName)
+ }
}
func TestUserChangePassword(t *testing.T) {
@@ -258,9 +333,15 @@ func TestUserChangePassword(t *testing.T) {
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
- if err != ErrUserNotFound {
+ if !errors.Is(err, ErrUserNotFound) {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
+
+ // change a userīŧuser option is nil) password
+ _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo-no-user-options", HashedPassword: encodePassword("bar")})
+ if err != nil {
+ t.Fatal(err)
+ }
}
func TestRoleAdd(t *testing.T) {
@@ -275,7 +356,7 @@ func TestRoleAdd(t *testing.T) {
// add a role with empty name
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: ""})
- if err != ErrRoleEmpty {
+ if !errors.Is(err, ErrRoleEmpty) {
t.Fatal(err)
}
}
@@ -295,7 +376,7 @@ func TestUserGrant(t *testing.T) {
if err == nil {
t.Errorf("expected %v, got %v", ErrUserNotFound, err)
}
- if err != ErrUserNotFound {
+ if !errors.Is(err, ErrUserNotFound) {
t.Errorf("expected %v, got %v", ErrUserNotFound, err)
}
}
@@ -365,6 +446,15 @@ func TestIsOpPermitted(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
+ // Drop the user's permission from cache and expect a permission denied
+ // error.
+ as.rangePermCacheMu.Lock()
+ delete(as.rangePermCache, "foo")
+ as.rangePermCacheMu.Unlock()
+ if err := as.isOpPermitted("foo", as.Revision(), perm.Key, perm.RangeEnd, perm.PermType); !errors.Is(err, ErrPermissionDenied) {
+ t.Fatal(err)
+ }
}
func TestGetUser(t *testing.T) {
@@ -384,9 +474,8 @@ func TestGetUser(t *testing.T) {
t.Fatal("expect user not nil, got nil")
}
expected := []string{"role-test"}
- if !reflect.DeepEqual(expected, u.Roles) {
- t.Errorf("expected %v, got %v", expected, u.Roles)
- }
+
+ assert.Equal(t, expected, u.Roles)
// check non existent user
_, err = as.UserGet(&pb.AuthUserGetRequest{Name: "nouser"})
@@ -435,7 +524,6 @@ func TestRoleGrantPermission(t *testing.T) {
Name: "role-test-1",
Perm: perm,
})
-
if err != nil {
t.Error(err)
}
@@ -445,9 +533,193 @@ func TestRoleGrantPermission(t *testing.T) {
t.Fatal(err)
}
- if !reflect.DeepEqual(perm, r.Perm[0]) {
- t.Errorf("expected %v, got %v", perm, r.Perm[0])
+ assert.Equal(t, perm, r.Perm[0])
+
+ // trying to grant nil permissions returns an error (and doesn't change the actual permissions!)
+ _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
+ Name: "role-test-1",
+ })
+
+ if !errors.Is(err, ErrPermissionNotGiven) {
+ t.Error(err)
+ }
+
+ r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, perm, r.Perm[0])
+}
+
+func TestRoleGrantInvalidPermission(t *testing.T) {
+ as, tearDown := setupAuthStore(t)
+ defer tearDown(t)
+
+ _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tests := []struct {
+ name string
+ perm *authpb.Permission
+ want error
+ }{
+ {
+ name: "valid range",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("Keys"),
+ RangeEnd: []byte("RangeEnd"),
+ },
+ want: nil,
+ },
+ {
+ name: "invalid range: nil key",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: nil,
+ RangeEnd: []byte("RangeEnd"),
+ },
+ want: ErrInvalidAuthMgmt,
+ },
+ {
+ name: "valid range: single key",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("Keys"),
+ RangeEnd: nil,
+ },
+ want: nil,
+ },
+ {
+ name: "valid range: single key",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("Keys"),
+ RangeEnd: []byte{},
+ },
+ want: nil,
+ },
+ {
+ name: "invalid range: empty (Key == RangeEnd)",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("a"),
+ RangeEnd: []byte("a"),
+ },
+ want: ErrInvalidAuthMgmt,
+ },
+ {
+ name: "invalid range: empty (Key > RangeEnd)",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("b"),
+ RangeEnd: []byte("a"),
+ },
+ want: ErrInvalidAuthMgmt,
+ },
+ {
+ name: "invalid range: length of key is 0",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte(""),
+ RangeEnd: []byte("a"),
+ },
+ want: ErrInvalidAuthMgmt,
+ },
+ {
+ name: "invalid range: length of key is 0",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte(""),
+ RangeEnd: []byte(""),
+ },
+ want: ErrInvalidAuthMgmt,
+ },
+ {
+ name: "invalid range: length of key is 0",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte(""),
+ RangeEnd: []byte{0x00},
+ },
+ want: ErrInvalidAuthMgmt,
+ },
+ {
+ name: "valid range: single key permission for []byte{0x00}",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte{0x00},
+ RangeEnd: []byte(""),
+ },
+ want: nil,
+ },
+ {
+ name: "valid range: \"a\" or larger keys",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("a"),
+ RangeEnd: []byte{0x00},
+ },
+ want: nil,
+ },
+ {
+ name: "valid range: the entire keys",
+ perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte{0x00},
+ RangeEnd: []byte{0x00},
+ },
+ want: nil,
+ },
+ }
+
+ for i, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
+ Name: "role-test-1",
+ Perm: tt.perm,
+ })
+
+ if !errors.Is(err, tt.want) {
+ t.Errorf("#%d: result=%t, want=%t", i, err, tt.want)
+ }
+ })
+ }
+}
+
+func TestRootRoleGrantPermission(t *testing.T) {
+ as, tearDown := setupAuthStore(t)
+ defer tearDown(t)
+
+ perm := &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("Keys"),
+ RangeEnd: []byte("RangeEnd"),
+ }
+ _, err := as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
+ Name: "root",
+ Perm: perm,
+ })
+ if err != nil {
+ t.Error(err)
+ }
+
+ r, err := as.RoleGet(&pb.AuthRoleGetRequest{Role: "root"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // whatever grant permission to root, it always return root permission.
+ expectPerm := &authpb.Permission{
+ PermType: authpb.READWRITE,
+ Key: []byte{},
+ RangeEnd: []byte{0},
}
+
+ assert.Equal(t, expectPerm, r.Perm[0])
}
func TestRoleRevokePermission(t *testing.T) {
@@ -468,7 +740,6 @@ func TestRoleRevokePermission(t *testing.T) {
Name: "role-test-1",
Perm: perm,
})
-
if err != nil {
t.Fatal(err)
}
@@ -506,40 +777,65 @@ func TestUserRevokePermission(t *testing.T) {
t.Fatal(err)
}
- _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"})
+ const userName = "foo"
+ _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test"})
if err != nil {
t.Fatal(err)
}
- _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test-1"})
+ _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test-1"})
if err != nil {
t.Fatal(err)
}
- u, err := as.UserGet(&pb.AuthUserGetRequest{Name: "foo"})
+ perm := &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte("WriteKeyBegin"),
+ RangeEnd: []byte("WriteKeyEnd"),
+ }
+ _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
+ Name: "role-test-1",
+ Perm: perm,
+ })
if err != nil {
t.Fatal(err)
}
- expected := []string{"role-test", "role-test-1"}
- if !reflect.DeepEqual(expected, u.Roles) {
- t.Fatalf("expected %v, got %v", expected, u.Roles)
+ if _, ok := as.rangePermCache[userName]; !ok {
+ t.Fatalf("User %s should have its entry in rangePermCache", userName)
+ }
+ unifiedPerm := as.rangePermCache[userName]
+ pt1 := adt.NewBytesAffinePoint([]byte("WriteKeyBegin"))
+ if !unifiedPerm.writePerms.Contains(pt1) {
+ t.Fatal("rangePermCache should contain WriteKeyBegin")
+ }
+ pt2 := adt.NewBytesAffinePoint([]byte("OutOfRange"))
+ if unifiedPerm.writePerms.Contains(pt2) {
+ t.Fatal("rangePermCache should not contain OutOfRange")
}
- _, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: "foo", Role: "role-test-1"})
+ u, err := as.UserGet(&pb.AuthUserGetRequest{Name: userName})
if err != nil {
t.Fatal(err)
}
- u, err = as.UserGet(&pb.AuthUserGetRequest{Name: "foo"})
+ expected := []string{"role-test", "role-test-1"}
+
+ assert.Equal(t, expected, u.Roles)
+
+ _, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: userName, Role: "role-test-1"})
if err != nil {
t.Fatal(err)
}
- expected = []string{"role-test"}
- if !reflect.DeepEqual(expected, u.Roles) {
- t.Errorf("expected %v, got %v", expected, u.Roles)
+ u, err = as.UserGet(&pb.AuthUserGetRequest{Name: userName})
+ if err != nil {
+ t.Fatal(err)
}
+
+ expected = []string{"role-test"}
+
+ assert.Equal(t, expected, u.Roles)
}
func TestRoleDelete(t *testing.T) {
@@ -555,9 +851,8 @@ func TestRoleDelete(t *testing.T) {
t.Fatal(err)
}
expected := []string{"root"}
- if !reflect.DeepEqual(expected, rl.Roles) {
- t.Errorf("expected %v, got %v", expected, rl.Roles)
- }
+
+ assert.Equal(t, expected, rl.Roles)
}
func TestAuthInfoFromCtx(t *testing.T) {
@@ -585,13 +880,13 @@ func TestAuthInfoFromCtx(t *testing.T) {
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "Invalid Token"}))
_, err = as.AuthInfoFromCtx(ctx)
- if err != ErrInvalidAuthToken {
+ if !errors.Is(err, ErrInvalidAuthToken) {
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
}
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "Invalid.Token"}))
_, err = as.AuthInfoFromCtx(ctx)
- if err != ErrInvalidAuthToken {
+ if !errors.Is(err, ErrInvalidAuthToken) {
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
}
@@ -612,14 +907,14 @@ func TestAuthDisable(t *testing.T) {
as.AuthDisable()
ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy")
_, err := as.Authenticate(ctx, "foo", "bar")
- if err != ErrAuthNotEnabled {
+ if !errors.Is(err, ErrAuthNotEnabled) {
t.Errorf("expected %v, got %v", ErrAuthNotEnabled, err)
}
// Disabling disabled auth to make sure it can return safely if store is already disabled.
as.AuthDisable()
_, err = as.Authenticate(ctx, "foo", "bar")
- if err != ErrAuthNotEnabled {
+ if !errors.Is(err, ErrAuthNotEnabled) {
t.Errorf("expected %v, got %v", ErrAuthNotEnabled, err)
}
}
@@ -648,16 +943,13 @@ func TestIsAuthEnabled(t *testing.T) {
}
}
-// TestAuthRevisionRace ensures that access to authStore.revision is thread-safe.
+// TestAuthInfoFromCtxRace ensures that access to authStore.revision is thread-safe.
func TestAuthInfoFromCtxRace(t *testing.T) {
- b, _ := betesting.NewDefaultTmpBackend(t)
- defer betesting.Close(t, b)
-
- tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
+ tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
- as := NewAuthStore(zap.NewExample(), b, tp, bcrypt.MinCost)
+ as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
defer as.Close()
donec := make(chan struct{})
@@ -681,19 +973,19 @@ func TestIsAdminPermitted(t *testing.T) {
// invalid user
err = as.IsAdminPermitted(&AuthInfo{Username: "rooti", Revision: 1})
- if err != ErrUserNotFound {
+ if !errors.Is(err, ErrUserNotFound) {
t.Errorf("expected %v, got %v", ErrUserNotFound, err)
}
// empty user
err = as.IsAdminPermitted(&AuthInfo{Username: "", Revision: 1})
- if err != ErrUserEmpty {
+ if !errors.Is(err, ErrUserEmpty) {
t.Errorf("expected %v, got %v", ErrUserEmpty, err)
}
// non-admin user
err = as.IsAdminPermitted(&AuthInfo{Username: "foo", Revision: 1})
- if err != ErrPermissionDenied {
+ if !errors.Is(err, ErrPermissionDenied) {
t.Errorf("expected %v, got %v", ErrPermissionDenied, err)
}
@@ -714,23 +1006,23 @@ func TestRecoverFromSnapshot(t *testing.T) {
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
- if err != ErrUserAlreadyExist {
+ if !errors.Is(err, ErrUserAlreadyExist) {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
ua = &pb.AuthUserAddRequest{Name: "", Options: &authpb.UserAddOptions{NoPassword: false}}
_, err = as.UserAdd(ua) // add a user with empty name
- if err != ErrUserEmpty {
+ if !errors.Is(err, ErrUserEmpty) {
t.Fatal(err)
}
as.Close()
- tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
+ tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
- as2 := NewAuthStore(zap.NewExample(), as.be, tp, bcrypt.MinCost)
+ as2 := NewAuthStore(zaptest.NewLogger(t), as.be, tp, bcrypt.MinCost)
defer as2.Close()
if !as2.IsAuthEnabled() {
@@ -803,15 +1095,12 @@ func TestHammerSimpleAuthenticate(t *testing.T) {
// TestRolesOrder tests authpb.User.Roles is sorted
func TestRolesOrder(t *testing.T) {
- b, _ := betesting.NewDefaultTmpBackend(t)
- defer betesting.Close(t, b)
-
- tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
+ tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
defer tp.disable()
if err != nil {
t.Fatal(err)
}
- as := NewAuthStore(zap.NewExample(), b, tp, bcrypt.MinCost)
+ as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
defer as.Close()
err = enableAuthAndCreateRoot(as)
if err != nil {
@@ -860,14 +1149,11 @@ func TestAuthInfoFromCtxWithRootJWT(t *testing.T) {
// testAuthInfoFromCtxWithRoot ensures "WithRoot" properly embeds token in the context.
func testAuthInfoFromCtxWithRoot(t *testing.T, opts string) {
- b, _ := betesting.NewDefaultTmpBackend(t)
- defer betesting.Close(t, b)
-
- tp, err := NewTokenProvider(zap.NewExample(), opts, dummyIndexWaiter, simpleTokenTTLDefault)
+ tp, err := NewTokenProvider(zaptest.NewLogger(t), opts, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
- as := NewAuthStore(zap.NewExample(), b, tp, bcrypt.MinCost)
+ as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
defer as.Close()
if err = enableAuthAndCreateRoot(as); err != nil {
@@ -879,10 +1165,10 @@ func testAuthInfoFromCtxWithRoot(t *testing.T, opts string) {
ai, aerr := as.AuthInfoFromCtx(ctx)
if aerr != nil {
- t.Error(err)
+ t.Fatal(err)
}
if ai == nil {
- t.Error("expected non-nil *AuthInfo")
+ t.Fatal("expected non-nil *AuthInfo")
}
if ai.Username != "root" {
t.Errorf("expected user name 'root', got %+v", ai)
@@ -902,7 +1188,7 @@ func TestUserNoPasswordAdd(t *testing.T) {
ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy")
_, err = as.Authenticate(ctx, username, "")
- if err != ErrAuthFailed {
+ if !errors.Is(err, ErrAuthFailed) {
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
}
}
@@ -944,7 +1230,7 @@ func TestUserChangePasswordWithOldLog(t *testing.T) {
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
- if err != ErrUserNotFound {
+ if !errors.Is(err, ErrUserNotFound) {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
}
diff --git a/server/config/config.go b/server/config/config.go
index b6e2109c228..dee41b86de5 100644
--- a/server/config/config.go
+++ b/server/config/config.go
@@ -22,24 +22,33 @@ import (
"strings"
"time"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.uber.org/zap"
+
+ bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
"go.etcd.io/etcd/pkg/v3/netutil"
- "go.etcd.io/etcd/server/v3/datadir"
- "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+)
- bolt "go.etcd.io/bbolt"
- "go.uber.org/zap"
+const (
+ grpcOverheadBytes = 512 * 1024
)
// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
type ServerConfig struct {
- Name string
+ Name string
+
DiscoveryURL string
DiscoveryProxy string
- ClientURLs types.URLs
- PeerURLs types.URLs
- DataDir string
+ DiscoveryCfg v3discovery.DiscoveryConfig
+
+ ClientURLs types.URLs
+ PeerURLs types.URLs
+ DataDir string
// DedicatedWALDir config will make the etcd to write the WAL to the WALDir
// rather than the dataDir/member/wal.
DedicatedWALDir string
@@ -51,7 +60,6 @@ type ServerConfig struct {
// We expect the follower has a millisecond level latency with the leader.
// The max throughput is around 10K. Keep a 5K entries is enough for helping
// follower to catch up.
- // WARNING: only change this for tests. Always use "DefaultSnapshotCatchUpEntries"
SnapshotCatchUpEntries uint64
MaxSnapFiles uint
@@ -114,13 +122,19 @@ type ServerConfig struct {
AutoCompactionRetention time.Duration
AutoCompactionMode string
CompactionBatchLimit int
+ CompactionSleepInterval time.Duration
QuotaBackendBytes int64
MaxTxnOps uint
// MaxRequestBytes is the maximum request size to send over raft.
MaxRequestBytes uint
- WarningApplyDuration time.Duration
+ // MaxConcurrentStreams specifies the maximum number of concurrent
+ // streams that each client can open at a time.
+ MaxConcurrentStreams uint32
+
+ WarningApplyDuration time.Duration
+ WarningUnaryRequestDuration time.Duration
StrictReconfigCheck bool
@@ -133,8 +147,9 @@ type ServerConfig struct {
// InitialCorruptCheck is true to check data corruption on boot
// before serving any peer/client traffic.
- InitialCorruptCheck bool
- CorruptCheckTime time.Duration
+ InitialCorruptCheck bool
+ CorruptCheckTime time.Duration
+ CompactHashCheckTime time.Duration
// PreVote is true to enable Raft Pre-Vote.
PreVote bool
@@ -147,10 +162,12 @@ type ServerConfig struct {
ForceNewCluster bool
- // EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
+ // EnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.
EnableLeaseCheckpoint bool
// LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
LeaseCheckpointInterval time.Duration
+ // LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
+ LeaseCheckpointPersist bool
EnableGRPCGateway bool
@@ -183,8 +200,17 @@ type ServerConfig struct {
// consider running defrag during bootstrap. Needs to be set to non-zero value to take effect.
ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"`
+ // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership.
+ ExperimentalMaxLearners int `json:"experimental-max-learners"`
+
// V2Deprecation defines a phase of v2store deprecation process.
V2Deprecation V2DeprecationEnum `json:"v2-deprecation"`
+
+ // ExperimentalLocalAddress is the local IP address to use when communicating with a peer.
+ ExperimentalLocalAddress string `json:"experimental-local-address"`
+
+ // ServerFeatureGate is a server level feature gate
+ ServerFeatureGate featuregate.FeatureGate
}
// VerifyBootstrap sanity-checks the initial config for bootstrap case
@@ -250,7 +276,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error {
initMap[url.String()] = struct{}{}
}
- missing := []string{}
+ var missing []string
for url := range initMap {
if _, ok := apMap[url]; !ok {
missing = append(missing, url)
@@ -262,7 +288,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error {
}
mstr := strings.Join(missing, ",")
apStr := strings.Join(apurls, ",")
- return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
+ return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%w)", mstr, apStr, err)
}
for url := range apMap {
@@ -279,7 +305,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error {
// resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
apStr := strings.Join(apurls, ",")
umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
- return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err)
+ return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%w)", apStr, umap.String(), err)
}
func (c *ServerConfig) MemberDir() string { return datadir.ToMemberDir(c.DataDir) }
@@ -288,12 +314,14 @@ func (c *ServerConfig) WALDir() string {
if c.DedicatedWALDir != "" {
return c.DedicatedWALDir
}
- return datadir.ToWalDir(c.DataDir)
+ return datadir.ToWALDir(c.DataDir)
}
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
-func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
+func (c *ServerConfig) ShouldDiscover() bool {
+ return c.DiscoveryURL != "" || len(c.DiscoveryCfg.Endpoints) > 0
+}
// ReqTimeout returns timeout for request to finish.
func (c *ServerConfig) ReqTimeout() time.Duration {
@@ -333,3 +361,7 @@ func (c *ServerConfig) BootstrapTimeoutEffective() time.Duration {
}
func (c *ServerConfig) BackendPath() string { return datadir.ToBackendFileName(c.DataDir) }
+
+func (c *ServerConfig) MaxRequestBytesWithOverhead() uint {
+ return c.MaxRequestBytes + grpcOverheadBytes
+}
diff --git a/server/config/config_test.go b/server/config/config_test.go
index 24ae5eb55a1..069dc9e1315 100644
--- a/server/config/config_test.go
+++ b/server/config/config_test.go
@@ -18,9 +18,9 @@ import (
"net/url"
"testing"
- "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.uber.org/zap/zaptest"
- "go.uber.org/zap"
+ "go.etcd.io/etcd/client/pkg/v3/types"
)
func mustNewURLs(t *testing.T, urls []string) []url.URL {
@@ -39,7 +39,7 @@ func TestConfigVerifyBootstrapWithoutClusterAndDiscoveryURLFail(t *testing.T) {
Name: "node1",
DiscoveryURL: "",
InitialPeerURLsMap: types.URLsMap{},
- Logger: zap.NewExample(),
+ Logger: zaptest.NewLogger(t),
}
if err := c.VerifyBootstrap(); err == nil {
t.Errorf("err = nil, want not nil")
@@ -57,7 +57,7 @@ func TestConfigVerifyExistingWithDiscoveryURLFail(t *testing.T) {
PeerURLs: mustNewURLs(t, []string{"http://127.0.0.1:2380"}),
InitialPeerURLsMap: cluster,
NewCluster: false,
- Logger: zap.NewExample(),
+ Logger: zaptest.NewLogger(t),
}
if err := c.VerifyJoinExisting(); err == nil {
t.Errorf("err = nil, want not nil")
@@ -145,7 +145,7 @@ func TestConfigVerifyLocalMember(t *testing.T) {
cfg := ServerConfig{
Name: "node1",
InitialPeerURLsMap: cluster,
- Logger: zap.NewExample(),
+ Logger: zaptest.NewLogger(t),
}
if tt.apurls != nil {
cfg.PeerURLs = mustNewURLs(t, tt.apurls)
@@ -170,7 +170,7 @@ func TestSnapDir(t *testing.T) {
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
- Logger: zap.NewExample(),
+ Logger: zaptest.NewLogger(t),
}
if g := cfg.SnapDir(); g != w {
t.Errorf("DataDir=%q: SnapDir()=%q, want=%q", dd, g, w)
@@ -186,7 +186,7 @@ func TestWALDir(t *testing.T) {
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
- Logger: zap.NewExample(),
+ Logger: zaptest.NewLogger(t),
}
if g := cfg.WALDir(); g != w {
t.Errorf("DataDir=%q: WALDir()=%q, want=%q", dd, g, w)
@@ -203,7 +203,7 @@ func TestShouldDiscover(t *testing.T) {
for durl, w := range tests {
cfg := ServerConfig{
DiscoveryURL: durl,
- Logger: zap.NewExample(),
+ Logger: zaptest.NewLogger(t),
}
if g := cfg.ShouldDiscover(); g != w {
t.Errorf("durl=%q: ShouldDiscover()=%t, want=%t", durl, g, w)
diff --git a/server/config/v2_deprecation.go b/server/config/v2_deprecation.go
index 828bd9a8f43..c50401cc937 100644
--- a/server/config/v2_deprecation.go
+++ b/server/config/v2_deprecation.go
@@ -17,18 +17,52 @@ package config
type V2DeprecationEnum string
const (
- // Default in v3.5. Issues a warning if v2store have meaningful content.
- V2_DEPR_0_NOT_YET = V2DeprecationEnum("not-yet")
+ // V2Depr0NotYet means v2store isn't deprecated yet.
+ // Default in v3.5, and no longer supported in v3.6.
+ V2Depr0NotYet = V2DeprecationEnum("not-yet")
+
+ // Deprecated: to be decommissioned in 3.7. Please use V2Depr0NotYet.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_0_NOT_YET = V2Depr0NotYet
+
+ // V2Depr1WriteOnly means only writing v2store is allowed.
// Default in v3.6. Meaningful v2 state is not allowed.
// The V2 files are maintained for v3.5 rollback.
- V2_DEPR_1_WRITE_ONLY = V2DeprecationEnum("write-only")
- // V2store is WIPED if found !!!
- V2_DEPR_1_WRITE_ONLY_DROP = V2DeprecationEnum("write-only-drop-data")
- // V2store is neither written nor read. Usage of this configuration is blocking
+ V2Depr1WriteOnly = V2DeprecationEnum("write-only")
+
+ // Deprecated: to be decommissioned in 3.7. Please use V2Depr1WriteOnly.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_1_WRITE_ONLY = V2Depr1WriteOnly
+
+ // V2Depr1WriteOnlyDrop means v2store is WIPED if found !!!
+ // Will be default in 3.7.
+ V2Depr1WriteOnlyDrop = V2DeprecationEnum("write-only-drop-data")
+
+ // Deprecated: to be decommissioned in 3.7. Pleae use V2Depr1WriteOnlyDrop.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_1_WRITE_ONLY_DROP = V2Depr1WriteOnlyDrop
+
+ // V2Depr2Gone means v2store is completely gone. The v2store is
+ // neither written nor read. Anything related to v2store will be
+ // cleaned up in v3.8. Usage of this configuration is blocking
// ability to rollback to etcd v3.5.
- V2_DEPR_2_GONE = V2DeprecationEnum("gone")
+ V2Depr2Gone = V2DeprecationEnum("gone")
+
+ // Deprecated: to be decommissioned in 3.7. Please use V2Depr2Gone.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_2_GONE = V2Depr2Gone
+
+ // V2DeprDefault is the default deprecation level.
+ V2DeprDefault = V2Depr1WriteOnly
- V2_DEPR_DEFAULT = V2_DEPR_0_NOT_YET
+ // Deprecated: to be decommissioned in 3.7. Please use V2DeprDefault.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_DEFAULT = V2DeprDefault
)
func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool {
@@ -37,13 +71,13 @@ func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool {
func (e V2DeprecationEnum) level() int {
switch e {
- case V2_DEPR_0_NOT_YET:
+ case V2Depr0NotYet:
return 0
- case V2_DEPR_1_WRITE_ONLY:
+ case V2Depr1WriteOnly:
return 1
- case V2_DEPR_1_WRITE_ONLY_DROP:
+ case V2Depr1WriteOnlyDrop:
return 2
- case V2_DEPR_2_GONE:
+ case V2Depr2Gone:
return 3
}
panic("Unknown V2DeprecationEnum: " + e)
diff --git a/server/config/v2_deprecation_test.go b/server/config/v2_deprecation_test.go
index c8d911d6076..76d2fb62c31 100644
--- a/server/config/v2_deprecation_test.go
+++ b/server/config/v2_deprecation_test.go
@@ -22,14 +22,14 @@ func TestV2DeprecationEnum_IsAtLeast(t *testing.T) {
v2d V2DeprecationEnum
want bool
}{
- {V2_DEPR_0_NOT_YET, V2_DEPR_0_NOT_YET, true},
- {V2_DEPR_0_NOT_YET, V2_DEPR_1_WRITE_ONLY_DROP, false},
- {V2_DEPR_0_NOT_YET, V2_DEPR_2_GONE, false},
- {V2_DEPR_2_GONE, V2_DEPR_1_WRITE_ONLY_DROP, true},
- {V2_DEPR_2_GONE, V2_DEPR_0_NOT_YET, true},
- {V2_DEPR_2_GONE, V2_DEPR_2_GONE, true},
- {V2_DEPR_1_WRITE_ONLY, V2_DEPR_1_WRITE_ONLY_DROP, false},
- {V2_DEPR_1_WRITE_ONLY_DROP, V2_DEPR_1_WRITE_ONLY, true},
+ {V2Depr0NotYet, V2Depr0NotYet, true},
+ {V2Depr0NotYet, V2Depr1WriteOnlyDrop, false},
+ {V2Depr0NotYet, V2Depr2Gone, false},
+ {V2Depr2Gone, V2Depr1WriteOnlyDrop, true},
+ {V2Depr2Gone, V2Depr0NotYet, true},
+ {V2Depr2Gone, V2Depr2Gone, true},
+ {V2Depr1WriteOnly, V2Depr1WriteOnlyDrop, false},
+ {V2Depr1WriteOnlyDrop, V2Depr1WriteOnly, true},
}
for _, tt := range tests {
t.Run(string(tt.e)+" >= "+string(tt.v2d), func(t *testing.T) {
diff --git a/server/datadir/datadir_test.go b/server/datadir/datadir_test.go
deleted file mode 100644
index f6fe19b1c01..00000000000
--- a/server/datadir/datadir_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package datadir_test
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/server/v3/datadir"
-)
-
-func TestToBackendFileName(t *testing.T) {
- result := datadir.ToBackendFileName("/dir/data-dir")
- assert.Equal(t, "/dir/data-dir/member/snap/db", result)
-}
-
-func TestToMemberDir(t *testing.T) {
- result := datadir.ToMemberDir("/dir/data-dir")
- assert.Equal(t, "/dir/data-dir/member", result)
-}
-
-func TestToSnapDir(t *testing.T) {
- result := datadir.ToSnapDir("/dir/data-dir")
- assert.Equal(t, "/dir/data-dir/member/snap", result)
-}
-
-func TestToWalDir(t *testing.T) {
- result := datadir.ToWalDir("/dir/data-dir")
- assert.Equal(t, "/dir/data-dir/member/wal", result)
-}
-
-func TestToWalDirSlash(t *testing.T) {
- result := datadir.ToWalDir("/dir/data-dir/")
- assert.Equal(t, "/dir/data-dir/member/wal", result)
-}
diff --git a/server/embed/auth_test.go b/server/embed/auth_test.go
index f2fb76ef14f..a09e618f66c 100644
--- a/server/embed/auth_test.go
+++ b/server/embed/auth_test.go
@@ -16,19 +16,13 @@ package embed
import (
"context"
- "io/ioutil"
- "os"
"testing"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
)
func TestEnableAuth(t *testing.T) {
- tdir, err := ioutil.TempDir(os.TempDir(), "auth-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tdir)
+ tdir := t.TempDir()
cfg := NewConfig()
cfg.Dir = tdir
e, err := StartEtcd(cfg)
diff --git a/server/embed/config.go b/server/embed/config.go
index 380c0c3aaa8..3a382056834 100644
--- a/server/embed/config.go
+++ b/server/embed/config.go
@@ -15,10 +15,14 @@
package embed
import (
+ "crypto/tls"
+ "errors"
+ "flag"
"fmt"
- "io/ioutil"
+ "math"
"net"
"net/http"
+ "net/netip"
"net/url"
"os"
"path/filepath"
@@ -26,39 +30,54 @@ import (
"sync"
"time"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc"
+ "sigs.k8s.io/yaml"
+
+ bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/client/pkg/v3/logutil"
"go.etcd.io/etcd/client/pkg/v3/srv"
"go.etcd.io/etcd/client/pkg/v3/tlsutil"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
"go.etcd.io/etcd/pkg/v3/flags"
"go.etcd.io/etcd/pkg/v3/netutil"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor"
-
- bolt "go.etcd.io/bbolt"
- "go.uber.org/multierr"
- "go.uber.org/zap"
- "golang.org/x/crypto/bcrypt"
- "google.golang.org/grpc"
- "sigs.k8s.io/yaml"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery"
+ "go.etcd.io/etcd/server/v3/features"
)
const (
ClusterStateFlagNew = "new"
ClusterStateFlagExisting = "existing"
- DefaultName = "default"
- DefaultMaxSnapshots = 5
- DefaultMaxWALs = 5
- DefaultMaxTxnOps = uint(128)
- DefaultWarningApplyDuration = 100 * time.Millisecond
- DefaultMaxRequestBytes = 1.5 * 1024 * 1024
- DefaultGRPCKeepAliveMinTime = 5 * time.Second
- DefaultGRPCKeepAliveInterval = 2 * time.Hour
- DefaultGRPCKeepAliveTimeout = 20 * time.Second
- DefaultDowngradeCheckTime = 5 * time.Second
+ DefaultName = "default"
+ DefaultMaxSnapshots = 5
+ DefaultMaxWALs = 5
+ DefaultMaxTxnOps = uint(128)
+ DefaultWarningApplyDuration = 100 * time.Millisecond
+ DefaultWarningUnaryRequestDuration = 300 * time.Millisecond
+ DefaultMaxRequestBytes = 1.5 * 1024 * 1024
+ DefaultMaxConcurrentStreams = math.MaxUint32
+ DefaultGRPCKeepAliveMinTime = 5 * time.Second
+ DefaultGRPCKeepAliveInterval = 2 * time.Hour
+ DefaultGRPCKeepAliveTimeout = 20 * time.Second
+ DefaultDowngradeCheckTime = 5 * time.Second
+ DefaultAutoCompactionMode = "periodic"
+ DefaultAuthToken = "simple"
+ DefaultCompactHashCheckTime = time.Minute
+
+ DefaultDiscoveryDialTimeout = 2 * time.Second
+ DefaultDiscoveryRequestTimeOut = 5 * time.Second
+ DefaultDiscoveryKeepAliveTime = 2 * time.Second
+ DefaultDiscoveryKeepAliveTimeOut = 6 * time.Second
DefaultListenPeerURLs = "http://localhost:2380"
DefaultListenClientURLs = "http://localhost:2379"
@@ -85,20 +104,19 @@ const (
// DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag.
// It's enabled by default.
DefaultStrictReconfigCheck = true
- // DefaultEnableV2 is the default value for "--enable-v2" flag.
- // v2 API is disabled by default.
- DefaultEnableV2 = false
// maxElectionMs specifies the maximum value of election timeout.
- // More details are listed in ../Documentation/tuning.md#time-parameters.
+ // More details are listed on etcd.io/docs > version > tuning/#time-parameters
maxElectionMs = 50000
// backend freelist map type
freelistArrayType = "array"
+
+ ServerFeatureGateFlagName = "feature-gates"
)
var (
ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
- "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"")
+ "Choose one of \"initial-cluster\", \"discovery\", \"discovery-endpoints\" or \"discovery-srv\"")
ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined")
@@ -110,6 +128,13 @@ var (
// indirection for testing
getCluster = srv.GetCluster
+
+ // in 3.6, we are migration all the --experimental flags to feature gate and flags without the prefix.
+ // This is the mapping from the non boolean `experimental-` to the new flags.
+ // TODO: delete in v3.7
+ experimentalNonBoolFlagMigrationMap = map[string]string{
+ "experimental-compact-hash-check-time": "compact-hash-check-time",
+ }
)
var (
@@ -135,10 +160,13 @@ func init() {
// Config holds the arguments for configuring an etcd server.
type Config struct {
- Name string `json:"name"`
- Dir string `json:"data-dir"`
+ Name string `json:"name"`
+ Dir string `json:"data-dir"`
+ //revive:disable-next-line:var-naming
WalDir string `json:"wal-dir"`
+ // SnapshotCount is deprecated in v3.6 and will be decommissioned in v3.7.
+ // TODO: remove it in 3.7.
SnapshotCount uint64 `json:"snapshot-count"`
// SnapshotCatchUpEntries is the number of entries for a slow follower
@@ -146,12 +174,13 @@ type Config struct {
// We expect the follower has a millisecond level latency with the leader.
// The max throughput is around 10K. Keep a 5K entries is enough for helping
// follower to catch up.
- // WARNING: only change this for tests.
- // Always use "DefaultSnapshotCatchUpEntries"
- SnapshotCatchUpEntries uint64
+ SnapshotCatchUpEntries uint64 `json:"experimental-snapshot-catch-up-entries"`
+ // MaxSnapFiles is deprecated in v3.6 and will be decommissioned in v3.7.
+ // TODO: remove it in 3.7.
MaxSnapFiles uint `json:"max-snapshots"`
- MaxWalFiles uint `json:"max-wals"`
+ //revive:disable-next-line:var-naming
+ MaxWalFiles uint `json:"max-wals"`
// TickMs is the number of milliseconds between heartbeat ticks.
// TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
@@ -198,35 +227,54 @@ type Config struct {
MaxTxnOps uint `json:"max-txn-ops"`
MaxRequestBytes uint `json:"max-request-bytes"`
- LPUrls, LCUrls []url.URL
- APUrls, ACUrls []url.URL
- ClientTLSInfo transport.TLSInfo
- ClientAutoTLS bool
- PeerTLSInfo transport.TLSInfo
- PeerAutoTLS bool
+ // MaxConcurrentStreams specifies the maximum number of concurrent
+ // streams that each client can open at a time.
+ MaxConcurrentStreams uint32 `json:"max-concurrent-streams"`
+
+ //revive:disable:var-naming
+ ListenPeerUrls, ListenClientUrls, ListenClientHttpUrls []url.URL
+ AdvertisePeerUrls, AdvertiseClientUrls []url.URL
+ //revive:enable:var-naming
+
+ ClientTLSInfo transport.TLSInfo
+ ClientAutoTLS bool
+ PeerTLSInfo transport.TLSInfo
+ PeerAutoTLS bool
+
+ // ExperimentalSetMemberLocalAddr enables using the first specified and
+ // non-loopback local address from initial-advertise-peer-urls as the local
+ // address when communicating with a peer.
+ ExperimentalSetMemberLocalAddr bool `json:"experimental-set-member-localaddr"`
+
// SelfSignedCertValidity specifies the validity period of the client and peer certificates
// that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS,
// the unit is year, and the default is 1
- SelfSignedCertValidity uint
+ SelfSignedCertValidity uint `json:"self-signed-cert-validity"`
// CipherSuites is a list of supported TLS cipher suites between
// client/server and peers. If empty, Go auto-populates the list.
// Note that cipher suites are prioritized in the given order.
CipherSuites []string `json:"cipher-suites"`
+ // TlsMinVersion is the minimum accepted TLS version between client/server and peers.
+ //revive:disable-next-line:var-naming
+ TlsMinVersion string `json:"tls-min-version"`
+
+ // TlsMaxVersion is the maximum accepted TLS version between client/server and peers.
+ //revive:disable-next-line:var-naming
+ TlsMaxVersion string `json:"tls-max-version"`
+
ClusterState string `json:"initial-cluster-state"`
DNSCluster string `json:"discovery-srv"`
DNSClusterServiceName string `json:"discovery-srv-name"`
Dproxy string `json:"discovery-proxy"`
- Durl string `json:"discovery"`
- InitialCluster string `json:"initial-cluster"`
- InitialClusterToken string `json:"initial-cluster-token"`
- StrictReconfigCheck bool `json:"strict-reconfig-check"`
- // EnableV2 exposes the deprecated V2 API surface.
- // TODO: Delete in 3.6 (https://github.com/etcd-io/etcd/issues/12913)
- // Deprecated in 3.5.
- EnableV2 bool `json:"enable-v2"`
+ Durl string `json:"discovery"`
+ DiscoveryCfg v3discovery.DiscoveryConfig `json:"discovery-config"`
+
+ InitialCluster string `json:"initial-cluster"`
+ InitialClusterToken string `json:"initial-cluster-token"`
+ StrictReconfigCheck bool `json:"strict-reconfig-check"`
// AutoCompactionMode is either 'periodic' or 'revision'.
AutoCompactionMode string `json:"auto-compaction-mode"`
@@ -251,8 +299,15 @@ type Config struct {
// before closing a non-responsive connection. 0 to disable.
GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
+ // GRPCAdditionalServerOptions is the additional server option hook
+ // for changing the default internal gRPC configuration. Note these
+ // additional configurations take precedence over the existing individual
+ // configurations if present. Please refer to
+ // https://github.com/etcd-io/etcd/pull/14066#issuecomment-1248682996
+ GRPCAdditionalServerOptions []grpc.ServerOption `json:"grpc-additional-server-options"`
+
// SocketOpts are socket options passed to listener config.
- SocketOpts transport.SocketOpts
+ SocketOpts transport.SocketOpts `json:"socket-options"`
// PreVote is true to enable Raft Pre-Vote.
// If enabled, Raft runs an additional election phase
@@ -295,7 +350,7 @@ type Config struct {
UserHandlers map[string]http.Handler `json:"-"`
// ServiceRegister is for registering users' gRPC services. A simple usage example:
// cfg := embed.NewConfig()
- // cfg.ServerRegister = func(s *grpc.Server) {
+ // cfg.ServiceRegister = func(s *grpc.Server) {
// pb.RegisterFooServer(s, &fooServer{})
// pb.RegisterBarServer(s, &barServer{})
// }
@@ -305,18 +360,31 @@ type Config struct {
AuthToken string `json:"auth-token"`
BcryptCost uint `json:"bcrypt-cost"`
- //The AuthTokenTTL in seconds of the simple token
+ // AuthTokenTTL in seconds of the simple token
AuthTokenTTL uint `json:"auth-token-ttl"`
ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"`
- // ExperimentalEnableV2V3 configures URLs that expose deprecated V2 API working on V3 store.
- // Deprecated in v3.5.
- // TODO: Delete in v3.6 (https://github.com/etcd-io/etcd/issues/12913)
- ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
- // ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
- ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
- ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
+ // ExperimentalCompactHashCheckEnabled enables leader to periodically check followers compaction hashes.
+ // Deprecated in v3.6 and will be decommissioned in v3.7.
+ // TODO: delete in v3.7
+ ExperimentalCompactHashCheckEnabled bool `json:"experimental-compact-hash-check-enabled"`
+ // ExperimentalCompactHashCheckTime is the duration of time between leader checks followers compaction hashes.
+ // Deprecated in v3.6 and will be decommissioned in v3.7.
+ // TODO: delete in v3.7
+ ExperimentalCompactHashCheckTime time.Duration `json:"experimental-compact-hash-check-time"`
+ CompactHashCheckTime time.Duration `json:"compact-hash-check-time"`
+
+ // ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.
+ ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
+ // ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
+ // Requires experimental-enable-lease-checkpoint to be enabled.
+ // Deprecated in v3.6.
+ // TODO: Delete in v3.7
+ ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"`
+ ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
+ // ExperimentalCompactionSleepInterval is the sleep interval between every etcd compaction loop.
+ ExperimentalCompactionSleepInterval time.Duration `json:"experimental-compaction-sleep-interval"`
ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"`
// ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request
// takes more time than this value.
@@ -324,6 +392,13 @@ type Config struct {
// ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to
// consider running defrag during bootstrap. Needs to be set to non-zero value to take effect.
ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"`
+ // WarningUnaryRequestDuration is the time duration after which a warning is generated if applying
+ // unary request takes more time than this value.
+ WarningUnaryRequestDuration time.Duration `json:"warning-unary-request-duration"`
+ // ExperimentalWarningUnaryRequestDuration is deprecated, please use WarningUnaryRequestDuration instead.
+ ExperimentalWarningUnaryRequestDuration time.Duration `json:"experimental-warning-unary-request-duration"`
+ // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership.
+ ExperimentalMaxLearners int `json:"experimental-max-learners"`
// ForceNewCluster starts a new cluster even if previously started; unsafe.
ForceNewCluster bool `json:"force-new-cluster"`
@@ -346,12 +421,17 @@ type Config struct {
// that exist at the same time.
// Can only be used if ExperimentalEnableDistributedTracing is true.
ExperimentalDistributedTracingServiceInstanceID string `json:"experimental-distributed-tracing-instance-id"`
+ // ExperimentalDistributedTracingSamplingRatePerMillion is the number of samples to collect per million spans.
+ // Defaults to 0.
+ ExperimentalDistributedTracingSamplingRatePerMillion int `json:"experimental-distributed-tracing-sampling-rate"`
// Logger is logger options: currently only supports "zap".
// "capnslog" is removed in v3.5.
Logger string `json:"logger"`
// LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
LogLevel string `json:"log-level"`
+ // LogFormat set log encoding. Only supports json, console. Default is 'json'.
+ LogFormat string `json:"log-format"`
// LogOutputs is either:
// - "default" as os.Stderr,
// - "stderr" as os.Stderr,
@@ -392,8 +472,18 @@ type Config struct {
// ExperimentalTxnModeWriteWithSharedBuffer enables write transaction to use a shared buffer in its readonly check operations.
ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"`
- // V2Deprecation describes phase of API & Storage V2 support
+ // ExperimentalStopGRPCServiceOnDefrag enables etcd gRPC service to stop serving client requests on defragmentation.
+ ExperimentalStopGRPCServiceOnDefrag bool `json:"experimental-stop-grpc-service-on-defrag"`
+
+ // V2Deprecation describes phase of API & Storage V2 support.
+ // Deprecated and scheduled for removal in v3.8.
+ // Do not set this field for embedded use cases, as it has no effect. However, setting it will not cause any harm.
V2Deprecation config.V2DeprecationEnum `json:"v2-deprecation"`
+
+ // ServerFeatureGate is a server level feature gate
+ ServerFeatureGate featuregate.FeatureGate
+ // FlagsExplicitlySet stores if a flag is explicitly set from the cmd line or config file.
+ FlagsExplicitlySet map[string]bool
}
// configYAML holds the config suitable for yaml parsing
@@ -404,26 +494,31 @@ type configYAML struct {
// configJSON has file options that are translated into Config options
type configJSON struct {
- LPUrlsJSON string `json:"listen-peer-urls"`
- LCUrlsJSON string `json:"listen-client-urls"`
- APUrlsJSON string `json:"initial-advertise-peer-urls"`
- ACUrlsJSON string `json:"advertise-client-urls"`
+ ListenPeerURLs string `json:"listen-peer-urls"`
+ ListenClientURLs string `json:"listen-client-urls"`
+ ListenClientHTTPURLs string `json:"listen-client-http-urls"`
+ AdvertisePeerURLs string `json:"initial-advertise-peer-urls"`
+ AdvertiseClientURLs string `json:"advertise-client-urls"`
CORSJSON string `json:"cors"`
HostWhitelistJSON string `json:"host-whitelist"`
ClientSecurityJSON securityConfig `json:"client-transport-security"`
PeerSecurityJSON securityConfig `json:"peer-transport-security"`
+
+ ServerFeatureGatesJSON string `json:"feature-gates"`
}
type securityConfig struct {
- CertFile string `json:"cert-file"`
- KeyFile string `json:"key-file"`
- ClientCertFile string `json:"client-cert-file"`
- ClientKeyFile string `json:"client-key-file"`
- CertAuth bool `json:"client-cert-auth"`
- TrustedCAFile string `json:"trusted-ca-file"`
- AutoTLS bool `json:"auto-tls"`
+ CertFile string `json:"cert-file"`
+ KeyFile string `json:"key-file"`
+ ClientCertFile string `json:"client-cert-file"`
+ ClientKeyFile string `json:"client-key-file"`
+ CertAuth bool `json:"client-cert-auth"`
+ TrustedCAFile string `json:"trusted-ca-file"`
+ AutoTLS bool `json:"auto-tls"`
+ AllowedCNs []string `json:"allowed-cn"`
+ AllowedHostnames []string `json:"allowed-hostname"`
}
// NewConfig creates a new Config populated with default values.
@@ -443,34 +538,37 @@ func NewConfig() *Config {
MaxTxnOps: DefaultMaxTxnOps,
MaxRequestBytes: DefaultMaxRequestBytes,
+ MaxConcurrentStreams: DefaultMaxConcurrentStreams,
ExperimentalWarningApplyDuration: DefaultWarningApplyDuration,
GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
- SocketOpts: transport.SocketOpts{},
+ SocketOpts: transport.SocketOpts{
+ ReusePort: false,
+ ReuseAddress: false,
+ },
TickMs: 100,
ElectionMs: 1000,
InitialElectionTickAdvance: true,
- LPUrls: []url.URL{*lpurl},
- LCUrls: []url.URL{*lcurl},
- APUrls: []url.URL{*apurl},
- ACUrls: []url.URL{*acurl},
+ ListenPeerUrls: []url.URL{*lpurl},
+ ListenClientUrls: []url.URL{*lcurl},
+ AdvertisePeerUrls: []url.URL{*apurl},
+ AdvertiseClientUrls: []url.URL{*acurl},
ClusterState: ClusterStateFlagNew,
InitialClusterToken: "etcd-cluster",
StrictReconfigCheck: DefaultStrictReconfigCheck,
Metrics: "basic",
- EnableV2: DefaultEnableV2,
CORS: map[string]struct{}{"*": {}},
HostWhitelist: map[string]struct{}{"*": {}},
- AuthToken: "simple",
+ AuthToken: DefaultAuthToken,
BcryptCost: uint(bcrypt.DefaultCost),
AuthTokenTTL: 300,
@@ -485,16 +583,226 @@ func NewConfig() *Config {
LogRotationConfigJSON: DefaultLogRotationConfig,
EnableGRPCGateway: true,
- ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime,
- ExperimentalMemoryMlock: false,
- ExperimentalTxnModeWriteWithSharedBuffer: true,
+ ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime,
+ ExperimentalMemoryMlock: false,
+ ExperimentalStopGRPCServiceOnDefrag: false,
+ ExperimentalMaxLearners: membership.DefaultMaxLearners,
- V2Deprecation: config.V2_DEPR_DEFAULT,
+ CompactHashCheckTime: DefaultCompactHashCheckTime,
+ // TODO: delete in v3.7
+ ExperimentalCompactHashCheckTime: DefaultCompactHashCheckTime,
+
+ V2Deprecation: config.V2DeprDefault,
+
+ DiscoveryCfg: v3discovery.DiscoveryConfig{
+ ConfigSpec: clientv3.ConfigSpec{
+ DialTimeout: DefaultDiscoveryDialTimeout,
+ RequestTimeout: DefaultDiscoveryRequestTimeOut,
+ KeepAliveTime: DefaultDiscoveryKeepAliveTime,
+ KeepAliveTimeout: DefaultDiscoveryKeepAliveTimeOut,
+
+ Secure: &clientv3.SecureConfig{},
+ Auth: &clientv3.AuthConfig{},
+ },
+ },
+
+ AutoCompactionMode: DefaultAutoCompactionMode,
+ ServerFeatureGate: features.NewDefaultServerFeatureGate(DefaultName, nil),
+ FlagsExplicitlySet: map[string]bool{},
}
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
return cfg
}
+func (cfg *Config) AddFlags(fs *flag.FlagSet) {
+ // member
+ fs.StringVar(&cfg.Dir, "data-dir", cfg.Dir, "Path to the data directory.")
+ fs.StringVar(&cfg.WalDir, "wal-dir", cfg.WalDir, "Path to the dedicated wal directory.")
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultListenPeerURLs, ""),
+ "listen-peer-urls",
+ "List of URLs to listen on for peer traffic.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultListenClientURLs, ""), "listen-client-urls",
+ "List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("", ""), "listen-client-http-urls",
+ "List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("", ""),
+ "listen-metrics-urls",
+ "List of URLs to listen on for the metrics and health endpoints.",
+ )
+ fs.UintVar(&cfg.MaxSnapFiles, "max-snapshots", cfg.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited). Deprecated in v3.6 and will be decommissioned in v3.7.")
+ fs.UintVar(&cfg.MaxWalFiles, "max-wals", cfg.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).")
+ fs.StringVar(&cfg.Name, "name", cfg.Name, "Human-readable name for this member.")
+ fs.Uint64Var(&cfg.SnapshotCount, "snapshot-count", cfg.SnapshotCount, "Number of committed transactions to trigger a snapshot to disk. Deprecated in v3.6 and will be decommissioned in v3.7.")
+ fs.UintVar(&cfg.TickMs, "heartbeat-interval", cfg.TickMs, "Time (in milliseconds) of a heartbeat interval.")
+ fs.UintVar(&cfg.ElectionMs, "election-timeout", cfg.ElectionMs, "Time (in milliseconds) for an election to timeout.")
+ fs.BoolVar(&cfg.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.")
+ fs.Int64Var(&cfg.QuotaBackendBytes, "quota-backend-bytes", cfg.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.")
+ fs.StringVar(&cfg.BackendFreelistType, "backend-bbolt-freelist-type", cfg.BackendFreelistType, "BackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
+ fs.DurationVar(&cfg.BackendBatchInterval, "backend-batch-interval", cfg.BackendBatchInterval, "BackendBatchInterval is the maximum time before commit the backend transaction.")
+ fs.IntVar(&cfg.BackendBatchLimit, "backend-batch-limit", cfg.BackendBatchLimit, "BackendBatchLimit is the maximum operations before commit the backend transaction.")
+ fs.UintVar(&cfg.MaxTxnOps, "max-txn-ops", cfg.MaxTxnOps, "Maximum number of operations permitted in a transaction.")
+ fs.UintVar(&cfg.MaxRequestBytes, "max-request-bytes", cfg.MaxRequestBytes, "Maximum client request size in bytes the server will accept.")
+ fs.DurationVar(&cfg.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.")
+ fs.DurationVar(&cfg.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
+ fs.DurationVar(&cfg.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
+ fs.BoolVar(&cfg.SocketOpts.ReusePort, "socket-reuse-port", cfg.SocketOpts.ReusePort, "Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use.")
+ fs.BoolVar(&cfg.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.SocketOpts.ReuseAddress, "Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in `TIME_WAIT` state.")
+
+ fs.Var(flags.NewUint32Value(cfg.MaxConcurrentStreams), "max-concurrent-streams", "Maximum concurrent streams that each client can open at a time.")
+
+ // raft connection timeouts
+ fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "Read timeout set on each rafthttp connection")
+ fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "Write timeout set on each rafthttp connection")
+
+ // clustering
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultInitialAdvertisePeerURLs, ""),
+ "initial-advertise-peer-urls",
+ "List of this member's peer URLs to advertise to the rest of the cluster.",
+ )
+ fs.BoolVar(&cfg.ExperimentalSetMemberLocalAddr, "experimental-set-member-localaddr", false, "Enable to have etcd use the first specified and non-loopback host from initial-advertise-peer-urls as the local address when communicating with a peer.")
+
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultAdvertiseClientURLs, ""),
+ "advertise-client-urls",
+ "List of this member's client URLs to advertise to the public.",
+ )
+
+ fs.StringVar(&cfg.Durl, "discovery", cfg.Durl, "Discovery URL used to bootstrap the cluster for v2 discovery. Will be deprecated in v3.7, and be decommissioned in v3.8.")
+
+ fs.Var(
+ flags.NewUniqueStringsValue(""),
+ "discovery-endpoints",
+ "V3 discovery: List of gRPC endpoints of the discovery service.",
+ )
+ fs.StringVar(&cfg.DiscoveryCfg.Token, "discovery-token", "", "V3 discovery: discovery token for the etcd cluster to be bootstrapped.")
+ fs.DurationVar(&cfg.DiscoveryCfg.DialTimeout, "discovery-dial-timeout", cfg.DiscoveryCfg.DialTimeout, "V3 discovery: dial timeout for client connections.")
+ fs.DurationVar(&cfg.DiscoveryCfg.RequestTimeout, "discovery-request-timeout", cfg.DiscoveryCfg.RequestTimeout, "V3 discovery: timeout for discovery requests (excluding dial timeout).")
+ fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTime, "discovery-keepalive-time", cfg.DiscoveryCfg.KeepAliveTime, "V3 discovery: keepalive time for client connections.")
+ fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTimeout, "discovery-keepalive-timeout", cfg.DiscoveryCfg.KeepAliveTimeout, "V3 discovery: keepalive timeout for client connections.")
+ fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureTransport, "discovery-insecure-transport", true, "V3 discovery: disable transport security for client connections.")
+ fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureSkipVerify, "discovery-insecure-skip-tls-verify", false, "V3 discovery: skip server certificate verification (CAUTION: this option should be enabled only for testing purposes).")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Cert, "discovery-cert", "", "V3 discovery: identify secure client using this TLS certificate file.")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Key, "discovery-key", "", "V3 discovery: identify secure client using this TLS key file.")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Cacert, "discovery-cacert", "", "V3 discovery: verify certificates of TLS-enabled secure servers using this CA bundle.")
+ fs.StringVar(&cfg.DiscoveryCfg.Auth.Username, "discovery-user", "", "V3 discovery: username[:password] for authentication (prompt if password is not supplied).")
+ fs.StringVar(&cfg.DiscoveryCfg.Auth.Password, "discovery-password", "", "V3 discovery: password for authentication (if this option is used, --user option shouldn't include password).")
+
+ fs.StringVar(&cfg.Dproxy, "discovery-proxy", cfg.Dproxy, "HTTP proxy to use for traffic to discovery service. Will be deprecated in v3.7, and be decommissioned in v3.8.")
+ fs.StringVar(&cfg.DNSCluster, "discovery-srv", cfg.DNSCluster, "DNS domain used to bootstrap initial cluster.")
+ fs.StringVar(&cfg.DNSClusterServiceName, "discovery-srv-name", cfg.DNSClusterServiceName, "Service name to query when using DNS discovery.")
+ fs.StringVar(&cfg.InitialCluster, "initial-cluster", cfg.InitialCluster, "Initial cluster configuration for bootstrapping.")
+ fs.StringVar(&cfg.InitialClusterToken, "initial-cluster-token", cfg.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
+ fs.BoolVar(&cfg.StrictReconfigCheck, "strict-reconfig-check", cfg.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
+
+ fs.BoolVar(&cfg.PreVote, "pre-vote", cfg.PreVote, "Enable the raft Pre-Vote algorithm to prevent disruption when a node that has been partitioned away rejoins the cluster.")
+
+ // security
+ fs.StringVar(&cfg.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.")
+ fs.StringVar(&cfg.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.")
+ fs.StringVar(&cfg.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required.")
+ fs.StringVar(&cfg.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required.")
+ fs.BoolVar(&cfg.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.")
+ fs.StringVar(&cfg.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.")
+ fs.Var(flags.NewStringsValue(""), "client-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for client cert authentication.")
+ fs.StringVar(&cfg.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.")
+ fs.BoolVar(&cfg.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates")
+ fs.StringVar(&cfg.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.")
+ fs.StringVar(&cfg.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.")
+ fs.StringVar(&cfg.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required.")
+ fs.StringVar(&cfg.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required.")
+ fs.BoolVar(&cfg.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.")
+ fs.StringVar(&cfg.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.")
+ fs.BoolVar(&cfg.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates")
+ fs.UintVar(&cfg.SelfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the client and peer certificates, unit is year")
+ fs.StringVar(&cfg.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.")
+ fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-cn", "Comma-separated list of allowed CNs for inter-peer TLS authentication.")
+ fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for inter-peer TLS authentication.")
+ fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
+ fs.BoolVar(&cfg.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.")
+ fs.StringVar(&cfg.TlsMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.")
+ fs.StringVar(&cfg.TlsMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty defers to Go).")
+
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("*", "*"),
+ "cors",
+ "Comma-separated white list of origins for CORS, or cross-origin resource sharing, (empty or * means allow all)",
+ )
+ fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "Comma-separated acceptable hostnames from HTTP client requests, if server is not secure (empty means allow all).")
+
+ // logging
+ fs.StringVar(&cfg.Logger, "logger", "zap", "Currently only supports 'zap' for structured logging.")
+ fs.Var(flags.NewUniqueStringsValue(DefaultLogOutput), "log-outputs", "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets.")
+ fs.StringVar(&cfg.LogLevel, "log-level", logutil.DefaultLogLevel, "Configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.")
+ fs.StringVar(&cfg.LogFormat, "log-format", logutil.DefaultLogFormat, "Configures log format. Only supports json, console. Default is 'json'.")
+ fs.BoolVar(&cfg.EnableLogRotation, "enable-log-rotation", false, "Enable log rotation of a single log-outputs file target.")
+ fs.StringVar(&cfg.LogRotationConfigJSON, "log-rotation-config-json", DefaultLogRotationConfig, "Configures log rotation if enabled with a JSON logger config. Default: MaxSize=100(MB), MaxAge=0(days,no limit), MaxBackups=0(no limit), LocalTime=false(UTC), Compress=false(gzip)")
+
+ fs.StringVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.")
+ fs.StringVar(&cfg.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.")
+
+ // pprof profiler via HTTP
+ fs.BoolVar(&cfg.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"")
+
+ // additional metrics
+ fs.StringVar(&cfg.Metrics, "metrics", cfg.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics")
+
+ // experimental distributed tracing
+ fs.BoolVar(&cfg.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing.")
+ fs.StringVar(&cfg.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).")
+ fs.StringVar(&cfg.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.")
+ fs.StringVar(&cfg.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance.")
+ fs.IntVar(&cfg.ExperimentalDistributedTracingSamplingRatePerMillion, "experimental-distributed-tracing-sampling-rate", 0, "Number of samples to collect per million spans for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).")
+
+ // auth
+ fs.StringVar(&cfg.AuthToken, "auth-token", cfg.AuthToken, "Specify auth token specific options.")
+ fs.UintVar(&cfg.BcryptCost, "bcrypt-cost", cfg.BcryptCost, "Specify bcrypt algorithm cost factor for auth password hashing.")
+ fs.UintVar(&cfg.AuthTokenTTL, "auth-token-ttl", cfg.AuthTokenTTL, "The lifetime in seconds of the auth token.")
+
+ // gateway
+ fs.BoolVar(&cfg.EnableGRPCGateway, "enable-grpc-gateway", cfg.EnableGRPCGateway, "Enable GRPC gateway.")
+
+ // experimental
+ fs.BoolVar(&cfg.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.")
+ fs.DurationVar(&cfg.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalCompactHashCheckEnabled, "experimental-compact-hash-check-enabled", cfg.ExperimentalCompactHashCheckEnabled, "Enable leader to periodically check followers compaction hashes. Deprecated in v3.6 and will be decommissioned in v3.7. Use '--feature-gates=CompactHashCheck=true' instead")
+ fs.DurationVar(&cfg.ExperimentalCompactHashCheckTime, "experimental-compact-hash-check-time", cfg.ExperimentalCompactHashCheckTime, "Duration of time between leader checks followers compaction hashes. Deprecated in v3.6 and will be decommissioned in v3.7. Use --compact-hash-check-time instead.")
+
+ fs.DurationVar(&cfg.CompactHashCheckTime, "compact-hash-check-time", cfg.CompactHashCheckTime, "Duration of time between leader checks followers compaction hashes.")
+
+ fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", false, "Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.")
+ fs.IntVar(&cfg.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
+ fs.DurationVar(&cfg.ExperimentalCompactionSleepInterval, "experimental-compaction-sleep-interval", cfg.ExperimentalCompactionSleepInterval, "Sets the sleep interval between each compaction batch.")
+ fs.DurationVar(&cfg.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
+ fs.DurationVar(&cfg.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status checks.")
+ fs.DurationVar(&cfg.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.")
+ fs.DurationVar(&cfg.WarningUnaryRequestDuration, "warning-unary-request-duration", cfg.WarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time.")
+ fs.DurationVar(&cfg.ExperimentalWarningUnaryRequestDuration, "experimental-warning-unary-request-duration", cfg.ExperimentalWarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead.")
+ fs.BoolVar(&cfg.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ExperimentalMemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.")
+ fs.BoolVar(&cfg.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "Enable the write transaction to use a shared buffer in its readonly check operations.")
+ fs.BoolVar(&cfg.ExperimentalStopGRPCServiceOnDefrag, "experimental-stop-grpc-service-on-defrag", cfg.ExperimentalStopGRPCServiceOnDefrag, "Enable etcd gRPC service to stop serving client requests on defragmentation.")
+ fs.UintVar(&cfg.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.")
+ fs.IntVar(&cfg.ExperimentalMaxLearners, "experimental-max-learners", membership.DefaultMaxLearners, "Sets the maximum number of learners that can be available in the cluster membership.")
+ fs.Uint64Var(&cfg.SnapshotCatchUpEntries, "experimental-snapshot-catchup-entries", cfg.SnapshotCatchUpEntries, "Number of entries for a slow follower to catch up after compacting the raft storage entries.")
+
+ // unsafe
+ fs.BoolVar(&cfg.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
+ fs.BoolVar(&cfg.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
+
+ // featuregate
+ cfg.ServerFeatureGate.(featuregate.MutableFeatureGate).AddFlag(fs, ServerFeatureGateFlagName)
+}
+
func ConfigFromFile(path string) (*Config, error) {
cfg := &configYAML{Config: *NewConfig()}
if err := cfg.configFromFile(path); err != nil {
@@ -504,7 +812,7 @@ func ConfigFromFile(path string) (*Config, error) {
}
func (cfg *configYAML) configFromFile(path string) error {
- b, err := ioutil.ReadFile(path)
+ b, err := os.ReadFile(path)
if err != nil {
return err
}
@@ -516,40 +824,80 @@ func (cfg *configYAML) configFromFile(path string) error {
return err
}
- if cfg.LPUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
+ if cfg.configJSON.ServerFeatureGatesJSON != "" {
+ err = cfg.Config.ServerFeatureGate.(featuregate.MutableFeatureGate).Set(cfg.configJSON.ServerFeatureGatesJSON)
+ if err != nil {
+ return err
+ }
+ }
+
+ // parses the yaml bytes to raw map first, then getBoolFlagVal can get the top level bool flag value.
+ var cfgMap map[string]interface{}
+ err = yaml.Unmarshal(b, &cfgMap)
+ if err != nil {
+ return err
+ }
+
+ for flg := range cfgMap {
+ cfg.FlagsExplicitlySet[flg] = true
+ }
+
+ getBoolFlagVal := func(flagName string) *bool {
+ flagVal, ok := cfgMap[flagName]
+ if !ok {
+ return nil
+ }
+ boolVal := flagVal.(bool)
+ return &boolVal
+ }
+ err = SetFeatureGatesFromExperimentalFlags(cfg.ServerFeatureGate, getBoolFlagVal, cfg.configJSON.ServerFeatureGatesJSON)
+ if err != nil {
+ return err
+ }
+
+ if cfg.configJSON.ListenPeerURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerURLs, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
os.Exit(1)
}
- cfg.LPUrls = []url.URL(u)
+ cfg.Config.ListenPeerUrls = u
}
- if cfg.LCUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
+ if cfg.configJSON.ListenClientURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientURLs, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
os.Exit(1)
}
- cfg.LCUrls = []url.URL(u)
+ cfg.Config.ListenClientUrls = u
+ }
+
+ if cfg.configJSON.ListenClientHTTPURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHTTPURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-http-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.ListenClientHttpUrls = u
}
- if cfg.APUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
+ if cfg.configJSON.AdvertisePeerURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerURLs, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
os.Exit(1)
}
- cfg.APUrls = []url.URL(u)
+ cfg.Config.AdvertisePeerUrls = u
}
- if cfg.ACUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
+ if cfg.configJSON.AdvertiseClientURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientURLs, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
os.Exit(1)
}
- cfg.ACUrls = []url.URL(u)
+ cfg.Config.AdvertiseClientUrls = u
}
if cfg.ListenMetricsUrlsJSON != "" {
@@ -558,7 +906,7 @@ func (cfg *configYAML) configFromFile(path string) error {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err)
os.Exit(1)
}
- cfg.ListenMetricsUrls = []url.URL(u)
+ cfg.ListenMetricsUrls = u
}
if cfg.CORSJSON != "" {
@@ -571,8 +919,8 @@ func (cfg *configYAML) configFromFile(path string) error {
cfg.HostWhitelist = uv.Values
}
- // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName
- if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster {
+ // If a discovery or discovery-endpoints flag is set, clear default initial cluster set by InitialClusterFromName
+ if (cfg.Durl != "" || cfg.DNSCluster != "" || len(cfg.DiscoveryCfg.Endpoints) > 0) && cfg.InitialCluster == defaultInitialCluster {
cfg.InitialCluster = ""
}
if cfg.ClusterState == "" {
@@ -586,58 +934,113 @@ func (cfg *configYAML) configFromFile(path string) error {
tls.ClientKeyFile = ysc.ClientKeyFile
tls.ClientCertAuth = ysc.CertAuth
tls.TrustedCAFile = ysc.TrustedCAFile
+ tls.AllowedCNs = ysc.AllowedCNs
+ tls.AllowedHostnames = ysc.AllowedHostnames
}
copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
-
+ if cfg.SelfSignedCertValidity == 0 {
+ cfg.SelfSignedCertValidity = 1
+ }
return cfg.Validate()
}
+// SetFeatureGatesFromExperimentalFlags sets the feature gate values if the feature gate is not explicitly set
+// while their corresponding experimental flags are explicitly set, for all the features in ExperimentalFlagToFeatureMap.
+// TODO: remove after all experimental flags are deprecated.
+func SetFeatureGatesFromExperimentalFlags(fg featuregate.FeatureGate, getExperimentalFlagVal func(string) *bool, featureGatesVal string) error {
+ m := make(map[featuregate.Feature]bool)
+ // verify that the feature gate and its experimental flag are not both set at the same time.
+ for expFlagName, featureName := range features.ExperimentalFlagToFeatureMap {
+ flagVal := getExperimentalFlagVal(expFlagName)
+ if flagVal == nil {
+ continue
+ }
+ if strings.Contains(featureGatesVal, string(featureName)) {
+ return fmt.Errorf("cannot specify both flags: --%s=%v and --%s=%s=%v at the same time, please just use --%s=%s=%v",
+ expFlagName, *flagVal, ServerFeatureGateFlagName, featureName, fg.Enabled(featureName), ServerFeatureGateFlagName, featureName, fg.Enabled(featureName))
+ }
+ m[featureName] = *flagVal
+ }
+
+ // filter out unknown features for fg, because we could use SetFeatureGatesFromExperimentalFlags both for
+ // server and cluster level feature gates.
+ allFeatures := fg.(featuregate.MutableFeatureGate).GetAll()
+ mFiltered := make(map[string]bool)
+ for k, v := range m {
+ if _, ok := allFeatures[k]; ok {
+ mFiltered[string(k)] = v
+ }
+ }
+ return fg.(featuregate.MutableFeatureGate).SetFromMap(mFiltered)
+}
+
func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
if len(tls.CipherSuites) > 0 && len(ss) > 0 {
return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
}
if len(ss) > 0 {
- cs := make([]uint16, len(ss))
- for i, s := range ss {
- var ok bool
- cs[i], ok = tlsutil.GetCipherSuite(s)
- if !ok {
- return fmt.Errorf("unexpected TLS cipher suite %q", s)
- }
+ cs, err := tlsutil.GetCipherSuites(ss)
+ if err != nil {
+ return err
}
tls.CipherSuites = cs
}
return nil
}
+func updateMinMaxVersions(info *transport.TLSInfo, min, max string) {
+ // Validate() has been called to check the user input, so it should never fail.
+ var err error
+ if info.MinVersion, err = tlsutil.GetTLSVersion(min); err != nil {
+ panic(err)
+ }
+ if info.MaxVersion, err = tlsutil.GetTLSVersion(max); err != nil {
+ panic(err)
+ }
+}
+
// Validate ensures that '*embed.Config' fields are properly configured.
func (cfg *Config) Validate() error {
+ // make sure there is no conflict in the flag settings in the ExperimentalNonBoolFlagMigrationMap
+ // TODO: delete in v3.7
+ for oldFlag, newFlag := range experimentalNonBoolFlagMigrationMap {
+ if cfg.FlagsExplicitlySet[oldFlag] && cfg.FlagsExplicitlySet[newFlag] {
+ return fmt.Errorf("cannot set --%s and --%s at the same time, please use --%s only", oldFlag, newFlag, newFlag)
+ }
+ }
+
if err := cfg.setupLogging(); err != nil {
return err
}
- if err := checkBindURLs(cfg.LPUrls); err != nil {
+ if err := checkBindURLs(cfg.ListenPeerUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenClientUrls); err != nil {
return err
}
- if err := checkBindURLs(cfg.LCUrls); err != nil {
+ if err := checkBindURLs(cfg.ListenClientHttpUrls); err != nil {
return err
}
+ if len(cfg.ListenClientHttpUrls) == 0 {
+ cfg.logger.Warn("Running http and grpc server on single port. This is not recommended for production.")
+ }
if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
return err
}
- if err := checkHostURLs(cfg.APUrls); err != nil {
- addrs := cfg.getAPURLs()
- return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
+ if err := checkHostURLs(cfg.AdvertisePeerUrls); err != nil {
+ addrs := cfg.getAdvertisePeerURLs()
+ return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err)
}
- if err := checkHostURLs(cfg.ACUrls); err != nil {
- addrs := cfg.getACURLs()
- return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
+ if err := checkHostURLs(cfg.AdvertiseClientUrls); err != nil {
+ addrs := cfg.getAdvertiseClientURLs()
+ return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err)
}
// Check if conflicting flags are passed.
nSet := 0
- for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} {
+ for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != "", len(cfg.DiscoveryCfg.Endpoints) > 0} {
if v {
nSet++
}
@@ -651,6 +1054,28 @@ func (cfg *Config) Validate() error {
return ErrConflictBootstrapFlags
}
+ // Check if both v2 discovery and v3 discovery flags are passed.
+ v2discoveryFlagsExist := cfg.Dproxy != ""
+ v3discoveryFlagsExist := len(cfg.DiscoveryCfg.Endpoints) > 0 ||
+ cfg.DiscoveryCfg.Token != "" ||
+ cfg.DiscoveryCfg.Secure.Cert != "" ||
+ cfg.DiscoveryCfg.Secure.Key != "" ||
+ cfg.DiscoveryCfg.Secure.Cacert != "" ||
+ cfg.DiscoveryCfg.Auth.Username != "" ||
+ cfg.DiscoveryCfg.Auth.Password != ""
+
+ if v2discoveryFlagsExist && v3discoveryFlagsExist {
+ return errors.New("both v2 discovery settings (discovery, discovery-proxy) " +
+ "and v3 discovery settings (discovery-token, discovery-endpoints, discovery-cert, " +
+ "discovery-key, discovery-cacert, discovery-user, discovery-password) are set")
+ }
+
+ // If one of `discovery-token` and `discovery-endpoints` is provided,
+ // then the other one must be provided as well.
+ if (cfg.DiscoveryCfg.Token != "") != (len(cfg.DiscoveryCfg.Endpoints) > 0) {
+ return errors.New("both --discovery-token and --discovery-endpoints must be set")
+ }
+
if cfg.TickMs == 0 {
return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs)
}
@@ -665,17 +1090,71 @@ func (cfg *Config) Validate() error {
}
// check this last since proxying in etcdmain may make this OK
- if cfg.LCUrls != nil && cfg.ACUrls == nil {
+ if cfg.ListenClientUrls != nil && cfg.AdvertiseClientUrls == nil {
return ErrUnsetAdvertiseClientURLsFlag
}
switch cfg.AutoCompactionMode {
- case "":
case CompactorModeRevision, CompactorModePeriodic:
+ case "":
+ return errors.New("undefined auto-compaction-mode")
default:
return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
}
+ // Validate distributed tracing configuration but only if enabled.
+ if cfg.ExperimentalEnableDistributedTracing {
+ if err := validateTracingConfig(cfg.ExperimentalDistributedTracingSamplingRatePerMillion); err != nil {
+ return fmt.Errorf("distributed tracing configurition is not valid: (%w)", err)
+ }
+ }
+
+ if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint {
+ cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling experimental-enable-lease-checkpoint-persist")
+ }
+
+ if cfg.ExperimentalEnableLeaseCheckpointPersist && !cfg.ExperimentalEnableLeaseCheckpoint {
+ return fmt.Errorf("setting experimental-enable-lease-checkpoint-persist requires experimental-enable-lease-checkpoint")
+ }
+ // TODO: delete in v3.7
+ if cfg.ExperimentalCompactHashCheckTime <= 0 {
+ return fmt.Errorf("--experimental-compact-hash-check-time must be >0 (set to %v)", cfg.ExperimentalCompactHashCheckTime)
+ }
+ if cfg.CompactHashCheckTime <= 0 {
+ return fmt.Errorf("--compact-hash-check-time must be >0 (set to %v)", cfg.CompactHashCheckTime)
+ }
+
+ // If `--name` isn't configured, then multiple members may have the same "default" name.
+ // When adding a new member with the "default" name as well, etcd may regards its peerURL
+ // as one additional peerURL of the existing member which has the same "default" name,
+ // because each member can have multiple client or peer URLs.
+ // Please refer to https://github.com/etcd-io/etcd/issues/13757
+ if cfg.Name == DefaultName {
+ cfg.logger.Warn(
+ "it isn't recommended to use default name, please set a value for --name. "+
+ "Note that etcd might run into issue when multiple members have the same default name",
+ zap.String("name", cfg.Name))
+ }
+
+ minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion)
+ if err != nil {
+ return err
+ }
+ maxVersion, err := tlsutil.GetTLSVersion(cfg.TlsMaxVersion)
+ if err != nil {
+ return err
+ }
+
+ // maxVersion == 0 means that Go selects the highest available version.
+ if maxVersion != 0 && minVersion > maxVersion {
+ return fmt.Errorf("min version (%s) is greater than max version (%s)", cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ }
+
+ // Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently.
+ if minVersion == tls.VersionTLS13 && len(cfg.CipherSuites) > 0 {
+ return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled")
+ }
+
return nil
}
@@ -685,11 +1164,18 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok
switch {
case cfg.Durl != "":
urlsmap = types.URLsMap{}
- // If using discovery, generate a temporary cluster based on
+ // If using v2 discovery, generate a temporary cluster based on
// self's advertised peer URLs
- urlsmap[cfg.Name] = cfg.APUrls
+ urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
token = cfg.Durl
+ case len(cfg.DiscoveryCfg.Endpoints) > 0:
+ urlsmap = types.URLsMap{}
+ // If using v3 discovery, generate a temporary cluster based on
+ // self's advertised peer URLs
+ urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
+ token = cfg.DiscoveryCfg.Token
+
case cfg.DNSCluster != "":
clusterStrs, cerr := cfg.GetDNSClusterNames()
lg := cfg.logger
@@ -741,7 +1227,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
// Use both etcd-server-ssl and etcd-server for discovery.
// Combine the results if both are available.
- clusterStrs, cerr = getCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
+ clusterStrs, cerr = getCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
if cerr != nil {
clusterStrs = make([]string, 0)
}
@@ -751,12 +1237,12 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix),
zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster),
- zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
zap.Strings("found-cluster", clusterStrs),
zap.Error(cerr),
)
- defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
+ defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
if httpCerr == nil {
clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
}
@@ -766,44 +1252,78 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server"+serviceNameSuffix),
zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster),
- zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
zap.Strings("found-cluster", clusterStrs),
zap.Error(httpCerr),
)
- return clusterStrs, multierr.Combine(cerr, httpCerr)
+ return clusterStrs, errors.Join(cerr, httpCerr)
}
-func (cfg Config) InitialClusterFromName(name string) (ret string) {
- if len(cfg.APUrls) == 0 {
+func (cfg *Config) InitialClusterFromName(name string) (ret string) {
+ if len(cfg.AdvertisePeerUrls) == 0 {
return ""
}
n := name
if name == "" {
n = DefaultName
}
- for i := range cfg.APUrls {
- ret = ret + "," + n + "=" + cfg.APUrls[i].String()
+ for i := range cfg.AdvertisePeerUrls {
+ ret = ret + "," + n + "=" + cfg.AdvertisePeerUrls[i].String()
}
return ret[1:]
}
-func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
-func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
+// InferLocalAddr tries to determine the LocalAddr used when communicating with
+// an etcd peer. If SetMemberLocalAddr is true, then it will try to get the host
+// from AdvertisePeerUrls by searching for the first URL with a specified
+// non-loopback address. Otherwise, it defaults to empty string and the
+// LocalAddr used will be the default for the Golang HTTP client.
+func (cfg *Config) InferLocalAddr() string {
+ if !cfg.ExperimentalSetMemberLocalAddr {
+ return ""
+ }
+
+ lg := cfg.GetLogger()
+ lg.Info(
+ "searching for a suitable member local address in AdvertisePeerURLs",
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ )
+ for _, peerURL := range cfg.AdvertisePeerUrls {
+ if addr, err := netip.ParseAddr(peerURL.Hostname()); err == nil {
+ if addr.IsLoopback() || addr.IsUnspecified() {
+ continue
+ }
+ lg.Info(
+ "setting member local address",
+ zap.String("LocalAddr", addr.String()),
+ )
+ return addr.String()
+ }
+ }
+ lg.Warn(
+ "unable to set a member local address due to lack of suitable local addresses",
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ )
+ return ""
+}
+
+func (cfg *Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
+func (cfg *Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
-func (cfg Config) V2DeprecationEffective() config.V2DeprecationEnum {
+func (cfg *Config) V2DeprecationEffective() config.V2DeprecationEnum {
if cfg.V2Deprecation == "" {
- return config.V2_DEPR_DEFAULT
+ return config.V2DeprDefault
}
return cfg.V2Deprecation
}
-func (cfg Config) defaultPeerHost() bool {
- return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
+func (cfg *Config) defaultPeerHost() bool {
+ return len(cfg.AdvertisePeerUrls) == 1 && cfg.AdvertisePeerUrls[0].String() == DefaultInitialAdvertisePeerURLs
}
-func (cfg Config) defaultClientHost() bool {
- return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
+func (cfg *Config) defaultClientHost() bool {
+ return len(cfg.AdvertiseClientUrls) == 1 && cfg.AdvertiseClientUrls[0].String() == DefaultAdvertiseClientURLs
}
func (cfg *Config) ClientSelfCert() (err error) {
@@ -814,9 +1334,12 @@ func (cfg *Config) ClientSelfCert() (err error) {
cfg.logger.Warn("ignoring client auto TLS since certs given")
return nil
}
- chosts := make([]string, len(cfg.LCUrls))
- for i, u := range cfg.LCUrls {
- chosts[i] = u.Host
+ chosts := make([]string, 0, len(cfg.ListenClientUrls)+len(cfg.ListenClientHttpUrls))
+ for _, u := range cfg.ListenClientUrls {
+ chosts = append(chosts, u.Host)
+ }
+ for _, u := range cfg.ListenClientHttpUrls {
+ chosts = append(chosts, u.Host)
}
cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity)
if err != nil {
@@ -833,8 +1356,8 @@ func (cfg *Config) PeerSelfCert() (err error) {
cfg.logger.Warn("ignoring peer auto TLS since certs given")
return nil
}
- phosts := make([]string, len(cfg.LPUrls))
- for i, u := range cfg.LPUrls {
+ phosts := make([]string, len(cfg.ListenPeerUrls))
+ for i, u := range cfg.ListenPeerUrls {
phosts[i] = u.Host
}
cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity)
@@ -862,9 +1385,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
}
used := false
- pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
+ pip, pport := cfg.ListenPeerUrls[0].Hostname(), cfg.ListenPeerUrls[0].Port()
if cfg.defaultPeerHost() && pip == "0.0.0.0" {
- cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
+ cfg.AdvertisePeerUrls[0] = url.URL{Scheme: cfg.AdvertisePeerUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
used = true
}
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
@@ -872,9 +1395,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
}
- cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
+ cip, cport := cfg.ListenClientUrls[0].Hostname(), cfg.ListenClientUrls[0].Port()
if cfg.defaultClientHost() && cip == "0.0.0.0" {
- cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
+ cfg.AdvertiseClientUrls[0] = url.URL{Scheme: cfg.AdvertiseClientUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
used = true
}
dhost := defaultHostname
@@ -919,34 +1442,34 @@ func checkHostURLs(urls []url.URL) error {
return nil
}
-func (cfg *Config) getAPURLs() (ss []string) {
- ss = make([]string, len(cfg.APUrls))
- for i := range cfg.APUrls {
- ss[i] = cfg.APUrls[i].String()
+func (cfg *Config) getAdvertisePeerURLs() (ss []string) {
+ ss = make([]string, len(cfg.AdvertisePeerUrls))
+ for i := range cfg.AdvertisePeerUrls {
+ ss[i] = cfg.AdvertisePeerUrls[i].String()
}
return ss
}
-func (cfg *Config) getLPURLs() (ss []string) {
- ss = make([]string, len(cfg.LPUrls))
- for i := range cfg.LPUrls {
- ss[i] = cfg.LPUrls[i].String()
+func (cfg *Config) getListenPeerURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenPeerUrls))
+ for i := range cfg.ListenPeerUrls {
+ ss[i] = cfg.ListenPeerUrls[i].String()
}
return ss
}
-func (cfg *Config) getACURLs() (ss []string) {
- ss = make([]string, len(cfg.ACUrls))
- for i := range cfg.ACUrls {
- ss[i] = cfg.ACUrls[i].String()
+func (cfg *Config) getAdvertiseClientURLs() (ss []string) {
+ ss = make([]string, len(cfg.AdvertiseClientUrls))
+ for i := range cfg.AdvertiseClientUrls {
+ ss[i] = cfg.AdvertiseClientUrls[i].String()
}
return ss
}
-func (cfg *Config) getLCURLs() (ss []string) {
- ss = make([]string, len(cfg.LCUrls))
- for i := range cfg.LCUrls {
- ss[i] = cfg.LCUrls[i].String()
+func (cfg *Config) getListenClientURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenClientUrls))
+ for i := range cfg.ListenClientUrls {
+ ss[i] = cfg.ListenClientUrls[i].String()
}
return ss
}
diff --git a/server/embed/config_logging.go b/server/embed/config_logging.go
index 645985f0f1d..ddf19cdbb52 100644
--- a/server/embed/config_logging.go
+++ b/server/embed/config_logging.go
@@ -19,21 +19,22 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net/url"
"os"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zapgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
"gopkg.in/natefinch/lumberjack.v2"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
)
// GetLogger returns the logger.
-func (cfg Config) GetLogger() *zap.Logger {
+func (cfg *Config) GetLogger() *zap.Logger {
cfg.loggerMu.RLock()
l := cfg.logger
cfg.loggerMu.RUnlock()
@@ -87,7 +88,11 @@ func (cfg *Config) setupLogging() error {
var path string
if cfg.EnableLogRotation {
// append rotate scheme to logs managed by lumberjack log rotation
- path = fmt.Sprintf("rotate:%s", v)
+ if v[0:1] == "/" {
+ path = fmt.Sprintf("rotate:/%%2F%s", v[1:])
+ } else {
+ path = fmt.Sprintf("rotate:/%s", v)
+ }
} else {
path = v
}
@@ -102,6 +107,11 @@ func (cfg *Config) setupLogging() error {
copied.ErrorOutputPaths = errOutputPaths
copied = logutil.MergeOutputPaths(copied)
copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+ encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat)
+ if err != nil {
+ return err
+ }
+ copied.Encoding = encoding
if cfg.ZapLoggerBuilder == nil {
lg, err := copied.Build()
if err != nil {
@@ -126,10 +136,22 @@ func (cfg *Config) setupLogging() error {
lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+ var encoder zapcore.Encoder
+ encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat)
+ if err != nil {
+ return err
+ }
+
+ if encoding == logutil.ConsoleLogFormat {
+ encoder = zapcore.NewConsoleEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig)
+ } else {
+ encoder = zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig)
+ }
+
// WARN: do not change field names in encoder config
// journald logging writer assumes field names of "level" and "caller"
cr := zapcore.NewCore(
- zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig),
+ encoder,
syncer,
lvl,
)
@@ -143,35 +165,38 @@ func (cfg *Config) setupLogging() error {
return err
}
- logTLSHandshakeFailure := func(conn *tls.Conn, err error) {
- state := conn.ConnectionState()
- remoteAddr := conn.RemoteAddr().String()
- serverName := state.ServerName
- if len(state.PeerCertificates) > 0 {
- cert := state.PeerCertificates[0]
- ips := make([]string, len(cert.IPAddresses))
- for i := range cert.IPAddresses {
- ips[i] = cert.IPAddresses[i].String()
+ logTLSHandshakeFailureFunc := func(msg string) func(conn *tls.Conn, err error) {
+ return func(conn *tls.Conn, err error) {
+ state := conn.ConnectionState()
+ remoteAddr := conn.RemoteAddr().String()
+ serverName := state.ServerName
+ if len(state.PeerCertificates) > 0 {
+ cert := state.PeerCertificates[0]
+ ips := make([]string, len(cert.IPAddresses))
+ for i := range cert.IPAddresses {
+ ips[i] = cert.IPAddresses[i].String()
+ }
+ cfg.logger.Warn(
+ msg,
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Strings("ip-addresses", ips),
+ zap.Strings("dns-names", cert.DNSNames),
+ zap.Error(err),
+ )
+ } else {
+ cfg.logger.Warn(
+ msg,
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Error(err),
+ )
}
- cfg.logger.Warn(
- "rejected connection",
- zap.String("remote-addr", remoteAddr),
- zap.String("server-name", serverName),
- zap.Strings("ip-addresses", ips),
- zap.Strings("dns-names", cert.DNSNames),
- zap.Error(err),
- )
- } else {
- cfg.logger.Warn(
- "rejected connection",
- zap.String("remote-addr", remoteAddr),
- zap.String("server-name", serverName),
- zap.Error(err),
- )
}
}
- cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
- cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+
+ cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailureFunc("rejected connection on client endpoint")
+ cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailureFunc("rejected connection on peer endpoint")
default:
return fmt.Errorf("unknown logger option %q", cfg.Logger)
@@ -180,7 +205,7 @@ func (cfg *Config) setupLogging() error {
return nil
}
-// NewZapLoggerBuilder generates a zap logger builder that sets given loger
+// NewZapLoggerBuilder generates a zap logger builder that sets given logger
// for embedded etcd.
func NewZapLoggerBuilder(lg *zap.Logger) func(*Config) error {
return func(cfg *Config) error {
@@ -209,7 +234,7 @@ func (cfg *Config) SetupGlobalLoggers() {
grpc.EnableTracing = true
grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
} else {
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, os.Stderr, os.Stderr))
}
zap.ReplaceGlobals(lg)
}
@@ -224,7 +249,7 @@ func (logRotationConfig) Sync() error { return nil }
// setupLogRotation initializes log rotation for a single file path target.
func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
- var logRotationConfig logRotationConfig
+ var logRotationCfg logRotationConfig
outputFilePaths := 0
for _, v := range logOutputs {
switch v {
@@ -243,7 +268,7 @@ func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
return ErrLogRotationInvalidLogOutput
}
- if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationConfig); err != nil {
+ if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationCfg); err != nil {
var unmarshalTypeError *json.UnmarshalTypeError
var syntaxError *json.SyntaxError
switch {
@@ -251,11 +276,13 @@ func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
return fmt.Errorf("improperly formatted log rotation config: %w", err)
case errors.As(err, &unmarshalTypeError):
return fmt.Errorf("invalid log rotation config: %w", err)
+ default:
+ return fmt.Errorf("fail to unmarshal log rotation config: %w", err)
}
}
zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) {
- logRotationConfig.Filename = u.Path
- return &logRotationConfig, nil
+ logRotationCfg.Filename = u.Path[1:]
+ return &logRotationCfg, nil
})
return nil
}
diff --git a/server/embed/config_logging_journal_unix.go b/server/embed/config_logging_journal_unix.go
index 76bb73265fb..478dc65d60d 100644
--- a/server/embed/config_logging_journal_unix.go
+++ b/server/embed/config_logging_journal_unix.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !windows
-// +build !windows
package embed
@@ -21,16 +20,16 @@ import (
"fmt"
"os"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
-
"go.uber.org/zap/zapcore"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
)
// use stderr as fallback
func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
jw, err := logutil.NewJournalWriter(os.Stderr)
if err != nil {
- return nil, fmt.Errorf("can't find journal (%v)", err)
+ return nil, fmt.Errorf("can't find journal (%w)", err)
}
return zapcore.AddSync(jw), nil
}
diff --git a/server/embed/config_logging_journal_windows.go b/server/embed/config_logging_journal_windows.go
index 58ed08631bb..90dfad944e4 100644
--- a/server/embed/config_logging_journal_windows.go
+++ b/server/embed/config_logging_journal_windows.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build windows
-// +build windows
package embed
diff --git a/server/embed/config_test.go b/server/embed/config_test.go
index b86d0d6c246..9a24e5eb903 100644
--- a/server/embed/config_test.go
+++ b/server/embed/config_test.go
@@ -15,20 +15,25 @@
package embed
import (
+ "crypto/tls"
"errors"
"fmt"
- "io/ioutil"
"net"
"net/url"
"os"
+ "strconv"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "sigs.k8s.io/yaml"
+
"go.etcd.io/etcd/client/pkg/v3/srv"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
-
- "sigs.k8s.io/yaml"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+ "go.etcd.io/etcd/server/v3/features"
)
func notFoundErr(service, domain string) error {
@@ -38,14 +43,16 @@ func notFoundErr(service, domain string) error {
func TestConfigFileOtherFields(t *testing.T) {
ctls := securityConfig{TrustedCAFile: "cca", CertFile: "ccert", KeyFile: "ckey"}
- ptls := securityConfig{TrustedCAFile: "pca", CertFile: "pcert", KeyFile: "pkey"}
+ // Note AllowedCN and AllowedHostname are mutually exclusive, this test is just to verify the fields can be correctly marshalled & unmarshalled.
+ ptls := securityConfig{TrustedCAFile: "pca", CertFile: "pcert", KeyFile: "pkey", AllowedCNs: []string{"etcd"}, AllowedHostnames: []string{"whatever.example.com"}}
yc := struct {
- ClientSecurityCfgFile securityConfig `json:"client-transport-security"`
- PeerSecurityCfgFile securityConfig `json:"peer-transport-security"`
- ForceNewCluster bool `json:"force-new-cluster"`
- Logger string `json:"logger"`
- LogOutputs []string `json:"log-outputs"`
- Debug bool `json:"debug"`
+ ClientSecurityCfgFile securityConfig `json:"client-transport-security"`
+ PeerSecurityCfgFile securityConfig `json:"peer-transport-security"`
+ ForceNewCluster bool `json:"force-new-cluster"`
+ Logger string `json:"logger"`
+ LogOutputs []string `json:"log-outputs"`
+ Debug bool `json:"debug"`
+ SocketOpts transport.SocketOpts `json:"socket-options"`
}{
ctls,
ptls,
@@ -53,6 +60,9 @@ func TestConfigFileOtherFields(t *testing.T) {
"zap",
[]string{"/dev/null"},
false,
+ transport.SocketOpts{
+ ReusePort: true,
+ },
}
b, err := yaml.Marshal(&yc)
@@ -68,28 +78,302 @@ func TestConfigFileOtherFields(t *testing.T) {
t.Fatal(err)
}
- if !cfg.ForceNewCluster {
- t.Errorf("ForceNewCluster = %v, want %v", cfg.ForceNewCluster, true)
- }
-
if !ctls.equals(&cfg.ClientTLSInfo) {
t.Errorf("ClientTLS = %v, want %v", cfg.ClientTLSInfo, ctls)
}
if !ptls.equals(&cfg.PeerTLSInfo) {
t.Errorf("PeerTLS = %v, want %v", cfg.PeerTLSInfo, ptls)
}
+
+ assert.Truef(t, cfg.ForceNewCluster, "ForceNewCluster does not match")
+
+ assert.Truef(t, cfg.SocketOpts.ReusePort, "ReusePort does not match")
+
+ assert.Falsef(t, cfg.SocketOpts.ReuseAddress, "ReuseAddress does not match")
+}
+
+func TestConfigFileFeatureGates(t *testing.T) {
+ testCases := []struct {
+ name string
+ serverFeatureGatesJSON string
+ experimentalStopGRPCServiceOnDefrag string
+ experimentalInitialCorruptCheck string
+ experimentalCompactHashCheckEnabled string
+ experimentalTxnModeWriteWithSharedBuffer string
+ expectErr bool
+ expectedFeatures map[featuregate.Feature]bool
+ }{
+ {
+ name: "default",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.DistributedTracing: false,
+ features.StopGRPCServiceOnDefrag: false,
+ features.InitialCorruptCheck: false,
+ features.TxnModeWriteWithSharedBuffer: true,
+ },
+ },
+ {
+ name: "cannot set both experimental flag and feature gate flag for StopGRPCServiceOnDefrag",
+ serverFeatureGatesJSON: "StopGRPCServiceOnDefrag=true",
+ experimentalStopGRPCServiceOnDefrag: "false",
+ expectErr: true,
+ },
+ {
+ name: "cannot set both experimental flag and feature gate flag for InitialCorruptCheck",
+ serverFeatureGatesJSON: "InitialCorruptCheck=true",
+ experimentalInitialCorruptCheck: "false",
+ expectErr: true,
+ },
+ {
+ name: "cannot set both experimental flag and feature gate flag for TxnModeWriteWithSharedBuffer",
+ serverFeatureGatesJSON: "TxnModeWriteWithSharedBuffer=true",
+ experimentalTxnModeWriteWithSharedBuffer: "false",
+ expectErr: true,
+ },
+ {
+ name: "ok to set different experimental flag and feature gate flag",
+ serverFeatureGatesJSON: "DistributedTracing=true",
+ experimentalStopGRPCServiceOnDefrag: "true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.DistributedTracing: true,
+ features.StopGRPCServiceOnDefrag: true,
+ features.InitialCorruptCheck: false,
+ },
+ },
+ {
+ name: "ok to set different multiple experimental flags and feature gate flags",
+ serverFeatureGatesJSON: "StopGRPCServiceOnDefrag=true,TxnModeWriteWithSharedBuffer=true",
+ experimentalCompactHashCheckEnabled: "true",
+ experimentalInitialCorruptCheck: "true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ features.CompactHashCheck: true,
+ features.InitialCorruptCheck: true,
+ features.TxnModeWriteWithSharedBuffer: true,
+ },
+ },
+ {
+ name: "can set feature gate StopGRPCServiceOnDefrag to true from experimental flag",
+ experimentalStopGRPCServiceOnDefrag: "true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ features.TxnModeWriteWithSharedBuffer: true,
+ },
+ },
+ {
+ name: "can set feature gate StopGRPCServiceOnDefrag to false from experimental flag",
+ experimentalStopGRPCServiceOnDefrag: "false",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ features.TxnModeWriteWithSharedBuffer: true,
+ },
+ },
+ {
+ name: "can set feature gate experimentalInitialCorruptCheck to true from experimental flag",
+ experimentalInitialCorruptCheck: "true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: true,
+ },
+ },
+ {
+ name: "can set feature gate experimentalInitialCorruptCheck to false from experimental flag",
+ experimentalInitialCorruptCheck: "false",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ },
+ },
+ {
+ name: "can set feature gate TxnModeWriteWithSharedBuffer to true from experimental flag",
+ experimentalTxnModeWriteWithSharedBuffer: "true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ features.CompactHashCheck: false,
+ features.TxnModeWriteWithSharedBuffer: true,
+ },
+ },
+ {
+ name: "can set feature gate TxnModeWriteWithSharedBuffer to false from experimental flag",
+ experimentalTxnModeWriteWithSharedBuffer: "false",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ features.CompactHashCheck: false,
+ features.TxnModeWriteWithSharedBuffer: false,
+ },
+ },
+ {
+ name: "can set feature gate StopGRPCServiceOnDefrag to true from feature gate flag",
+ serverFeatureGatesJSON: "StopGRPCServiceOnDefrag=true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ },
+ },
+ {
+ name: "can set feature gate InitialCorruptCheck to true from feature gate flag",
+ serverFeatureGatesJSON: "InitialCorruptCheck=true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: true,
+ },
+ },
+ {
+ name: "can set feature gate StopGRPCServiceOnDefrag to false from feature gate flag",
+ serverFeatureGatesJSON: "StopGRPCServiceOnDefrag=false",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ },
+ },
+ {
+ name: "can set feature gate TxnModeWriteWithSharedBuffer to true from feature gate flag",
+ serverFeatureGatesJSON: "TxnModeWriteWithSharedBuffer=true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ features.TxnModeWriteWithSharedBuffer: true,
+ },
+ },
+ {
+ name: "can set feature gate TxnModeWriteWithSharedBuffer to false from feature gate flag",
+ serverFeatureGatesJSON: "TxnModeWriteWithSharedBuffer=false",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.InitialCorruptCheck: false,
+ features.TxnModeWriteWithSharedBuffer: false,
+ },
+ },
+ {
+ name: "cannot set both experimental flag and feature gate flag for ExperimentalCompactHashCheckEnabled",
+ serverFeatureGatesJSON: "CompactHashCheck=true",
+ experimentalCompactHashCheckEnabled: "false",
+ expectErr: true,
+ },
+ {
+ name: "can set feature gate experimentalCompactHashCheckEnabled to true from experimental flag",
+ experimentalCompactHashCheckEnabled: "true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.CompactHashCheck: true,
+ },
+ },
+ {
+ name: "can set feature gate experimentalCompactHashCheckEnabled to false from experimental flag",
+ experimentalCompactHashCheckEnabled: "false",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.CompactHashCheck: false,
+ },
+ },
+ {
+ name: "can set feature gate CompactHashCheck to true from feature gate flag",
+ serverFeatureGatesJSON: "CompactHashCheck=true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ features.CompactHashCheck: true,
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ yc := struct {
+ ExperimentalStopGRPCServiceOnDefrag *bool `json:"experimental-stop-grpc-service-on-defrag,omitempty"`
+ ExperimentalInitialCorruptCheck *bool `json:"experimental-initial-corrupt-check,omitempty"`
+ ExperimentalCompactHashCheckEnabled *bool `json:"experimental-compact-hash-check-enabled,omitempty"`
+ ExperimentalTxnModeWriteWithSharedBuffer *bool `json:"experimental-txn-mode-write-with-shared-buffer,omitempty"`
+ ServerFeatureGatesJSON string `json:"feature-gates"`
+ }{
+ ServerFeatureGatesJSON: tc.serverFeatureGatesJSON,
+ }
+
+ if tc.experimentalInitialCorruptCheck != "" {
+ experimentalInitialCorruptCheck, err := strconv.ParseBool(tc.experimentalInitialCorruptCheck)
+ if err != nil {
+ t.Fatal(err)
+ }
+ yc.ExperimentalInitialCorruptCheck = &experimentalInitialCorruptCheck
+ }
+
+ if tc.experimentalTxnModeWriteWithSharedBuffer != "" {
+ experimentalTxnModeWriteWithSharedBuffer, err := strconv.ParseBool(tc.experimentalTxnModeWriteWithSharedBuffer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ yc.ExperimentalTxnModeWriteWithSharedBuffer = &experimentalTxnModeWriteWithSharedBuffer
+ }
+
+ if tc.experimentalStopGRPCServiceOnDefrag != "" {
+ experimentalStopGRPCServiceOnDefrag, err := strconv.ParseBool(tc.experimentalStopGRPCServiceOnDefrag)
+ if err != nil {
+ t.Fatal(err)
+ }
+ yc.ExperimentalStopGRPCServiceOnDefrag = &experimentalStopGRPCServiceOnDefrag
+ }
+
+ if tc.experimentalCompactHashCheckEnabled != "" {
+ experimentalCompactHashCheckEnabled, err := strconv.ParseBool(tc.experimentalCompactHashCheckEnabled)
+ if err != nil {
+ t.Fatal(err)
+ }
+ yc.ExperimentalCompactHashCheckEnabled = &experimentalCompactHashCheckEnabled
+ }
+
+ b, err := yaml.Marshal(&yc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tmpfile := mustCreateCfgFile(t, b)
+ defer os.Remove(tmpfile.Name())
+
+ cfg, err := ConfigFromFile(tmpfile.Name())
+ if tc.expectErr {
+ if err == nil {
+ t.Fatal("expect parse error")
+ }
+ return
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ for k, v := range tc.expectedFeatures {
+ if cfg.ServerFeatureGate.Enabled(k) != v {
+ t.Errorf("expected feature gate %s=%v, got %v", k, v, cfg.ServerFeatureGate.Enabled(k))
+ }
+ }
+ })
+ }
}
// TestUpdateDefaultClusterFromName ensures that etcd can start with 'etcd --name=abc'.
func TestUpdateDefaultClusterFromName(t *testing.T) {
cfg := NewConfig()
defaultInitialCluster := cfg.InitialCluster
- oldscheme := cfg.APUrls[0].Scheme
- origpeer := cfg.APUrls[0].String()
- origadvc := cfg.ACUrls[0].String()
+ oldscheme := cfg.AdvertisePeerUrls[0].Scheme
+ origpeer := cfg.AdvertisePeerUrls[0].String()
+ origadvc := cfg.AdvertiseClientUrls[0].String()
cfg.Name = "abc"
- lpport := cfg.LPUrls[0].Port()
+ lpport := cfg.ListenPeerUrls[0].Port()
// in case of 'etcd --name=abc'
exp := fmt.Sprintf("%s=%s://localhost:%s", cfg.Name, oldscheme, lpport)
@@ -98,12 +382,12 @@ func TestUpdateDefaultClusterFromName(t *testing.T) {
t.Fatalf("initial-cluster expected %q, got %q", exp, cfg.InitialCluster)
}
// advertise peer URL should not be affected
- if origpeer != cfg.APUrls[0].String() {
- t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.APUrls[0].String())
+ if origpeer != cfg.AdvertisePeerUrls[0].String() {
+ t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.AdvertisePeerUrls[0].String())
}
// advertise client URL should not be affected
- if origadvc != cfg.ACUrls[0].String() {
- t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
+ if origadvc != cfg.AdvertiseClientUrls[0].String() {
+ t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.AdvertiseClientUrls[0].String())
}
}
@@ -116,17 +400,17 @@ func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) {
cfg := NewConfig()
defaultInitialCluster := cfg.InitialCluster
- oldscheme := cfg.APUrls[0].Scheme
- origadvc := cfg.ACUrls[0].String()
+ oldscheme := cfg.AdvertisePeerUrls[0].Scheme
+ origadvc := cfg.AdvertiseClientUrls[0].String()
cfg.Name = "abc"
- lpport := cfg.LPUrls[0].Port()
- cfg.LPUrls[0] = url.URL{Scheme: cfg.LPUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)}
+ lpport := cfg.ListenPeerUrls[0].Port()
+ cfg.ListenPeerUrls[0] = url.URL{Scheme: cfg.ListenPeerUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)}
dhost, _ := cfg.UpdateDefaultClusterFromName(defaultInitialCluster)
if dhost != defaultHostname {
t.Fatalf("expected default host %q, got %q", defaultHostname, dhost)
}
- aphost, apport := cfg.APUrls[0].Hostname(), cfg.APUrls[0].Port()
+ aphost, apport := cfg.AdvertisePeerUrls[0].Hostname(), cfg.AdvertisePeerUrls[0].Port()
if apport != lpport {
t.Fatalf("advertise peer url got different port %s, expected %s", apport, lpport)
}
@@ -139,19 +423,167 @@ func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) {
}
// advertise client URL should not be affected
- if origadvc != cfg.ACUrls[0].String() {
- t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
+ if origadvc != cfg.AdvertiseClientUrls[0].String() {
+ t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.AdvertiseClientUrls[0].String())
+ }
+}
+
+func TestInferLocalAddr(t *testing.T) {
+ tests := []struct {
+ name string
+ advertisePeerURLs []string
+ setMemberLocalAddr bool
+ expectedLocalAddr string
+ }{
+ {
+ "defaults, ExperimentalSetMemberLocalAddr=false ",
+ []string{DefaultInitialAdvertisePeerURLs},
+ false,
+ "",
+ },
+ {
+ "IPv4 address, ExperimentalSetMemberLocalAddr=false ",
+ []string{"https://192.168.100.110:2380"},
+ false,
+ "",
+ },
+ {
+ "defaults, ExperimentalSetMemberLocalAddr=true",
+ []string{DefaultInitialAdvertisePeerURLs},
+ true,
+ "",
+ },
+ {
+ "IPv4 unspecified address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://0.0.0.0:2380"},
+ true,
+ "",
+ },
+ {
+ "IPv6 unspecified address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://[::]:2380"},
+ true,
+ "",
+ },
+ {
+ "IPv4 loopback address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://127.0.0.1:2380"},
+ true,
+ "",
+ },
+ {
+ "IPv6 loopback address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://[::1]:2380"},
+ true,
+ "",
+ },
+ {
+ "IPv4 address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://192.168.100.110:2380"},
+ true,
+ "192.168.100.110",
+ },
+ {
+ "Hostname only, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://123-host-3.corp.internal:2380"},
+ true,
+ "",
+ },
+ {
+ "Hostname and IPv4 address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://123-host-3.corp.internal:2380", "https://192.168.100.110:2380"},
+ true,
+ "192.168.100.110",
+ },
+ {
+ "IPv4 address and Hostname, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://192.168.100.110:2380", "https://123-host-3.corp.internal:2380"},
+ true,
+ "192.168.100.110",
+ },
+ {
+ "IPv4 and IPv6 addresses, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://192.168.100.110:2380", "https://[2001:db8:85a3::8a2e:370:7334]:2380"},
+ true,
+ "192.168.100.110",
+ },
+ {
+ "IPv6 and IPv4 addresses, ExperimentalSetMemberLocalAddr=true",
+ // IPv4 addresses will always sort before IPv6 ones anyway
+ []string{"https://[2001:db8:85a3::8a2e:370:7334]:2380", "https://192.168.100.110:2380"},
+ true,
+ "192.168.100.110",
+ },
+ {
+ "Hostname, IPv4 and IPv6 addresses, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://123-host-3.corp.internal:2380", "https://192.168.100.110:2380", "https://[2001:db8:85a3::8a2e:370:7334]:2380"},
+ true,
+ "192.168.100.110",
+ },
+ {
+ "Hostname, IPv6 and IPv4 addresses, ExperimentalSetMemberLocalAddr=true",
+ // IPv4 addresses will always sort before IPv6 ones anyway
+ []string{"https://123-host-3.corp.internal:2380", "https://[2001:db8:85a3::8a2e:370:7334]:2380", "https://192.168.100.110:2380"},
+ true,
+ "192.168.100.110",
+ },
+ {
+ "IPv6 address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://[2001:db8:85a3::8a2e:370:7334]:2380"},
+ true,
+ "2001:db8:85a3::8a2e:370:7334",
+ },
+ {
+ "Hostname and IPv6 address, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://123-host-3.corp.internal:2380", "https://[2001:db8:85a3::8a2e:370:7334]:2380"},
+ true,
+ "2001:db8:85a3::8a2e:370:7334",
+ },
+ {
+ "IPv6 address and Hostname, ExperimentalSetMemberLocalAddr=true",
+ []string{"https://[2001:db8:85a3::8a2e:370:7334]:2380", "https://123-host-3.corp.internal:2380"},
+ true,
+ "2001:db8:85a3::8a2e:370:7334",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := NewConfig()
+ cfg.AdvertisePeerUrls = types.MustNewURLs(tt.advertisePeerURLs)
+ cfg.ExperimentalSetMemberLocalAddr = tt.setMemberLocalAddr
+
+ require.NoError(t, cfg.Validate())
+ require.Equal(t, tt.expectedLocalAddr, cfg.InferLocalAddr())
+ })
}
}
func (s *securityConfig) equals(t *transport.TLSInfo) bool {
return s.CertFile == t.CertFile &&
s.CertAuth == t.ClientCertAuth &&
- s.TrustedCAFile == t.TrustedCAFile
+ s.TrustedCAFile == t.TrustedCAFile &&
+ s.ClientCertFile == t.ClientCertFile &&
+ s.ClientKeyFile == t.ClientKeyFile &&
+ s.KeyFile == t.KeyFile &&
+ compareSlices(s.AllowedCNs, t.AllowedCNs) &&
+ compareSlices(s.AllowedHostnames, t.AllowedHostnames)
+}
+
+func compareSlices(slice1, slice2 []string) bool {
+ if len(slice1) != len(slice2) {
+ return false
+ }
+ for i, v := range slice1 {
+ if v != slice2[i] {
+ return false
+ }
+ }
+ return true
}
func mustCreateCfgFile(t *testing.T, b []byte) *os.File {
- tmpfile, err := ioutil.TempFile("", "servercfg")
+ tmpfile, err := os.CreateTemp("", "servercfg")
if err != nil {
t.Fatal(err)
}
@@ -194,6 +626,11 @@ func TestAutoCompactionModeParse(t *testing.T) {
// err mode
{"errmode", "1", false, 0},
{"errmode", "1h", false, time.Hour},
+ // empty mode
+ {"", "1", true, 0},
+ {"", "1h", false, time.Hour},
+ {"", "a", true, 0},
+ {"", "-1", true, 0},
}
hasErr := func(err error) bool {
@@ -274,7 +711,7 @@ func TestPeerURLsMapAndTokenFromSRV(t *testing.T) {
cfg.InitialCluster = ""
cfg.InitialClusterToken = ""
cfg.DNSCluster = "example.com"
- cfg.APUrls = types.MustNewURLs(tt.apurls)
+ cfg.AdvertisePeerUrls = types.MustNewURLs(tt.apurls)
if err := cfg.Validate(); err != nil {
t.Errorf("#%d: failed to validate test Config: %v", i, err)
@@ -291,6 +728,56 @@ func TestPeerURLsMapAndTokenFromSRV(t *testing.T) {
}
}
+func TestLeaseCheckpointValidate(t *testing.T) {
+ tcs := []struct {
+ name string
+ configFunc func() Config
+ expectError bool
+ }{
+ {
+ name: "Default config should pass",
+ configFunc: func() Config {
+ return *NewConfig()
+ },
+ },
+ {
+ name: "Enabling checkpoint leases should pass",
+ configFunc: func() Config {
+ cfg := *NewConfig()
+ cfg.ExperimentalEnableLeaseCheckpoint = true
+ return cfg
+ },
+ },
+ {
+ name: "Enabling checkpoint leases and persist should pass",
+ configFunc: func() Config {
+ cfg := *NewConfig()
+ cfg.ExperimentalEnableLeaseCheckpoint = true
+ cfg.ExperimentalEnableLeaseCheckpointPersist = true
+ return cfg
+ },
+ },
+ {
+ name: "Enabling checkpoint leases persist without checkpointing itself should fail",
+ configFunc: func() Config {
+ cfg := *NewConfig()
+ cfg.ExperimentalEnableLeaseCheckpointPersist = true
+ return cfg
+ },
+ expectError: true,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ cfg := tc.configFunc()
+ err := cfg.Validate()
+ if (err != nil) != tc.expectError {
+ t.Errorf("config.Validate() = %q, expected error: %v", err, tc.expectError)
+ }
+ })
+ }
+}
+
func TestLogRotation(t *testing.T) {
tests := []struct {
name string
@@ -304,6 +791,11 @@ func TestLogRotation(t *testing.T) {
logOutputs: []string{"stderr", "/tmp/path"},
logRotationConfig: `{"maxsize": 1}`,
},
+ {
+ name: "log output relative path",
+ logOutputs: []string{"stderr", "tmp/path"},
+ logRotationConfig: `{"maxsize": 1}`,
+ },
{
name: "no file targets",
logOutputs: []string{"stderr"},
@@ -361,6 +853,191 @@ func TestLogRotation(t *testing.T) {
if err == nil && tt.wantErr {
t.Errorf("test %q, expected error, got nil", tt.name)
}
+ if err == nil {
+ cfg.GetLogger().Info("test log")
+ }
+ })
+ }
+}
+
+func TestTLSVersionMinMax(t *testing.T) {
+ tests := []struct {
+ name string
+ givenTLSMinVersion string
+ givenTLSMaxVersion string
+ givenCipherSuites []string
+ expectError bool
+ expectedMinTLSVersion uint16
+ expectedMaxTLSVersion uint16
+ }{
+ {
+ name: "Minimum TLS version is set",
+ givenTLSMinVersion: "TLS1.3",
+ expectedMinTLSVersion: tls.VersionTLS13,
+ expectedMaxTLSVersion: 0,
+ },
+ {
+ name: "Maximum TLS version is set",
+ givenTLSMaxVersion: "TLS1.2",
+ expectedMinTLSVersion: 0,
+ expectedMaxTLSVersion: tls.VersionTLS12,
+ },
+ {
+ name: "Minimum and Maximum TLS versions are set",
+ givenTLSMinVersion: "TLS1.3",
+ givenTLSMaxVersion: "TLS1.3",
+ expectedMinTLSVersion: tls.VersionTLS13,
+ expectedMaxTLSVersion: tls.VersionTLS13,
+ },
+ {
+ name: "Minimum and Maximum TLS versions are set in reverse order",
+ givenTLSMinVersion: "TLS1.3",
+ givenTLSMaxVersion: "TLS1.2",
+ expectError: true,
+ },
+ {
+ name: "Invalid minimum TLS version",
+ givenTLSMinVersion: "invalid version",
+ expectError: true,
+ },
+ {
+ name: "Invalid maximum TLS version",
+ givenTLSMaxVersion: "invalid version",
+ expectError: true,
+ },
+ {
+ name: "Cipher suites configured for TLS 1.3",
+ givenTLSMinVersion: "TLS1.3",
+ givenCipherSuites: []string{"TLS_AES_128_GCM_SHA256"},
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := NewConfig()
+ cfg.TlsMinVersion = tt.givenTLSMinVersion
+ cfg.TlsMaxVersion = tt.givenTLSMaxVersion
+ cfg.CipherSuites = tt.givenCipherSuites
+
+ err := cfg.Validate()
+ if err != nil {
+ assert.Truef(t, tt.expectError, "Validate() returned error while expecting success: %v", err)
+ return
+ }
+
+ updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
+
+ assert.Equal(t, tt.expectedMinTLSVersion, cfg.PeerTLSInfo.MinVersion)
+ assert.Equal(t, tt.expectedMaxTLSVersion, cfg.PeerTLSInfo.MaxVersion)
+ assert.Equal(t, tt.expectedMinTLSVersion, cfg.ClientTLSInfo.MinVersion)
+ assert.Equal(t, tt.expectedMaxTLSVersion, cfg.ClientTLSInfo.MaxVersion)
+ })
+ }
+}
+
+func TestUndefinedAutoCompactionModeValidate(t *testing.T) {
+ cfg := *NewConfig()
+ cfg.AutoCompactionMode = ""
+ err := cfg.Validate()
+ require.Error(t, err)
+}
+
+func TestSetFeatureGatesFromExperimentalFlags(t *testing.T) {
+ testCases := []struct {
+ name string
+ featureGatesFlag string
+ experimentalStopGRPCServiceOnDefrag string
+ expectErr bool
+ expectedFeatures map[featuregate.Feature]bool
+ }{
+ {
+ name: "default",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ "TestAlpha": false,
+ "TestBeta": true,
+ },
+ },
+ {
+ name: "cannot set experimental flag and feature gate to true at the same time",
+ featureGatesFlag: "StopGRPCServiceOnDefrag=true",
+ experimentalStopGRPCServiceOnDefrag: "true",
+ expectErr: true,
+ },
+ {
+ name: "cannot set experimental flag and feature gate to false at the same time",
+ featureGatesFlag: "StopGRPCServiceOnDefrag=false",
+ experimentalStopGRPCServiceOnDefrag: "false",
+ expectErr: true,
+ },
+ {
+ name: "cannot set experimental flag and feature gate to different values at the same time",
+ featureGatesFlag: "StopGRPCServiceOnDefrag=true",
+ experimentalStopGRPCServiceOnDefrag: "false",
+ expectErr: true,
+ },
+ {
+ name: "can set experimental flag and other feature gates",
+ featureGatesFlag: "TestAlpha=true,TestBeta=false",
+ experimentalStopGRPCServiceOnDefrag: "true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ "TestAlpha": true,
+ "TestBeta": false,
+ },
+ },
+ {
+ name: "can set feature gate when its experimental flag is not explicitly set",
+ featureGatesFlag: "TestAlpha=true,StopGRPCServiceOnDefrag=true",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ "TestAlpha": true,
+ "TestBeta": true,
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ fg := features.NewDefaultServerFeatureGate("test", nil)
+ err := fg.(featuregate.MutableFeatureGate).Add(
+ map[featuregate.Feature]featuregate.FeatureSpec{
+ "TestAlpha": {Default: false, PreRelease: featuregate.Alpha},
+ "TestBeta": {Default: true, PreRelease: featuregate.Beta},
+ })
+ require.NoError(t, err)
+
+ fg.(featuregate.MutableFeatureGate).Set(tc.featureGatesFlag)
+ var getExperimentalFlagVal func(flagName string) *bool
+ if tc.experimentalStopGRPCServiceOnDefrag == "" {
+ // experimental flag is not explicitly set
+ getExperimentalFlagVal = func(flagName string) *bool {
+ return nil
+ }
+ } else {
+ // mexperimental flag is explicitly set
+ getExperimentalFlagVal = func(flagName string) *bool {
+ // only the experimental-stop-grpc-service-on-defrag flag can be set in this test.
+ if flagName != "experimental-stop-grpc-service-on-defrag" {
+ return nil
+ }
+ flagVal, parseErr := strconv.ParseBool(tc.experimentalStopGRPCServiceOnDefrag)
+ require.NoError(t, parseErr)
+ return &flagVal
+ }
+ }
+ err = SetFeatureGatesFromExperimentalFlags(fg, getExperimentalFlagVal, tc.featureGatesFlag)
+ if tc.expectErr {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ for k, v := range tc.expectedFeatures {
+ if fg.Enabled(k) != v {
+ t.Errorf("expected feature gate %s=%v, got %v", k, v, fg.Enabled(k))
+ }
+ }
})
}
}
diff --git a/server/embed/config_tracing.go b/server/embed/config_tracing.go
new file mode 100644
index 00000000000..7fd86e8610f
--- /dev/null
+++ b/server/embed/config_tracing.go
@@ -0,0 +1,138 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.uber.org/zap"
+)
+
+const maxSamplingRatePerMillion = 1000000
+
+func validateTracingConfig(samplingRate int) error {
+ if samplingRate < 0 {
+ return fmt.Errorf("tracing sampling rate must be positive")
+ }
+ if samplingRate > maxSamplingRatePerMillion {
+ return fmt.Errorf("tracing sampling rate must be less than %d", maxSamplingRatePerMillion)
+ }
+
+ return nil
+}
+
+type tracingExporter struct {
+ exporter tracesdk.SpanExporter
+ opts []otelgrpc.Option
+ provider *tracesdk.TracerProvider
+}
+
+func newTracingExporter(ctx context.Context, cfg *Config) (*tracingExporter, error) {
+ exporter, err := otlptracegrpc.New(ctx,
+ otlptracegrpc.WithInsecure(),
+ otlptracegrpc.WithEndpoint(cfg.ExperimentalDistributedTracingAddress),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := resource.New(ctx,
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String(cfg.ExperimentalDistributedTracingServiceName),
+ ),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if resWithIDKey := determineResourceWithIDKey(cfg.ExperimentalDistributedTracingServiceInstanceID); resWithIDKey != nil {
+ // Merge resources into a new
+ // resource in case of duplicates.
+ res, err = resource.Merge(res, resWithIDKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ traceProvider := tracesdk.NewTracerProvider(
+ tracesdk.WithBatcher(exporter),
+ tracesdk.WithResource(res),
+ tracesdk.WithSampler(
+ tracesdk.ParentBased(determineSampler(cfg.ExperimentalDistributedTracingSamplingRatePerMillion)),
+ ),
+ )
+
+ options := []otelgrpc.Option{
+ otelgrpc.WithPropagators(
+ propagation.NewCompositeTextMapPropagator(
+ propagation.TraceContext{},
+ propagation.Baggage{},
+ ),
+ ),
+ otelgrpc.WithTracerProvider(
+ traceProvider,
+ ),
+ }
+
+ cfg.logger.Debug(
+ "distributed tracing enabled",
+ zap.String("address", cfg.ExperimentalDistributedTracingAddress),
+ zap.String("service-name", cfg.ExperimentalDistributedTracingServiceName),
+ zap.String("service-instance-id", cfg.ExperimentalDistributedTracingServiceInstanceID),
+ zap.Int("sampling-rate", cfg.ExperimentalDistributedTracingSamplingRatePerMillion),
+ )
+
+ return &tracingExporter{
+ exporter: exporter,
+ opts: options,
+ provider: traceProvider,
+ }, nil
+}
+
+func (te *tracingExporter) Close(ctx context.Context) {
+ if te.provider != nil {
+ te.provider.Shutdown(ctx)
+ }
+ if te.exporter != nil {
+ te.exporter.Shutdown(ctx)
+ }
+}
+
+func determineSampler(samplingRate int) tracesdk.Sampler {
+ sampler := tracesdk.NeverSample()
+ if samplingRate == 0 {
+ return sampler
+ }
+ return tracesdk.TraceIDRatioBased(float64(samplingRate) / float64(maxSamplingRatePerMillion))
+}
+
+// As Tracing service Instance ID must be unique, it should
+// never use the empty default string value, it's set if
+// if it's a non empty string.
+func determineResourceWithIDKey(serviceInstanceID string) *resource.Resource {
+ if serviceInstanceID != "" {
+ return resource.NewSchemaless(
+ (semconv.ServiceInstanceIDKey.String(serviceInstanceID)),
+ )
+ }
+ return nil
+}
diff --git a/server/embed/config_tracing_test.go b/server/embed/config_tracing_test.go
new file mode 100644
index 00000000000..0abbe4d1d42
--- /dev/null
+++ b/server/embed/config_tracing_test.go
@@ -0,0 +1,83 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "testing"
+)
+
+const neverSampleDescription = "AlwaysOffSampler"
+
+func TestDetermineSampler(t *testing.T) {
+ tests := []struct {
+ name string
+ sampleRate int
+ wantSamplerDescription string
+ }{
+ {
+ name: "sample rate is disabled",
+ sampleRate: 0,
+ wantSamplerDescription: neverSampleDescription,
+ },
+ {
+ name: "sample rate is 100",
+ sampleRate: 100,
+ wantSamplerDescription: "TraceIDRatioBased{0.0001}",
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ sampler := determineSampler(tc.sampleRate)
+ if tc.wantSamplerDescription != sampler.Description() {
+ t.Errorf("tracing sampler was not as expected; expected sampler: %#+v, got sampler: %#+v", tc.wantSamplerDescription, sampler.Description())
+ }
+ })
+ }
+}
+
+func TestTracingConfig(t *testing.T) {
+ tests := []struct {
+ name string
+ sampleRate int
+ wantErr bool
+ }{
+ {
+ name: "invalid - sample rate is less than 0",
+ sampleRate: -1,
+ wantErr: true,
+ },
+ {
+ name: "invalid - sample rate is more than allowed value",
+ sampleRate: maxSamplingRatePerMillion + 1,
+ wantErr: true,
+ },
+ {
+ name: "valid - sample rate is 100",
+ sampleRate: 100,
+ wantErr: false,
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ err := validateTracingConfig(tc.sampleRate)
+ if err == nil && tc.wantErr {
+ t.Errorf("expected error got (%v) error", err)
+ }
+ if err != nil && !tc.wantErr {
+ t.Errorf("expected no errors, got error: (%v)", err)
+ }
+ })
+ }
+}
diff --git a/server/embed/etcd.go b/server/embed/etcd.go
index 001302f991b..a40ef662499 100644
--- a/server/embed/etcd.go
+++ b/server/embed/etcd.go
@@ -16,46 +16,41 @@ package embed
import (
"context"
- "crypto/tls"
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
defaultLog "log"
+ "math"
"net"
"net/http"
"net/url"
"runtime"
"sort"
"strconv"
+ "strings"
"sync"
"time"
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "github.com/soheilhy/cmux"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/keepalive"
+
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/client/v3/credentials"
"go.etcd.io/etcd/pkg/v3/debugutil"
runtimeutil "go.etcd.io/etcd/pkg/v3/runtime"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
+ "go.etcd.io/etcd/server/v3/features"
+ "go.etcd.io/etcd/server/v3/storage"
"go.etcd.io/etcd/server/v3/verify"
-
- grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
- "github.com/soheilhy/cmux"
- "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
- "go.opentelemetry.io/otel/exporters/otlp"
- "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/sdk/resource"
- tracesdk "go.opentelemetry.io/otel/sdk/trace"
- "go.opentelemetry.io/otel/semconv"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
)
const (
@@ -130,7 +125,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
}
e.cfg.logger.Info(
"configuring peer listeners",
- zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
+ zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()),
)
if e.Peers, err = configurePeerListeners(cfg); err != nil {
return e, err
@@ -138,7 +133,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
e.cfg.logger.Info(
"configuring client listeners",
- zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
+ zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()),
)
if e.sctxs, err = configureClientListeners(cfg); err != nil {
return e, err
@@ -157,7 +152,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
memberInitialized = false
urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
if err != nil {
- return e, fmt.Errorf("error setting up initial cluster: %v", err)
+ return e, fmt.Errorf("error setting up initial cluster: %w", err)
}
}
@@ -173,72 +168,84 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
backendFreelistType := parseBackendFreelistType(cfg.BackendFreelistType)
srvcfg := config.ServerConfig{
- Name: cfg.Name,
- ClientURLs: cfg.ACUrls,
- PeerURLs: cfg.APUrls,
- DataDir: cfg.Dir,
- DedicatedWALDir: cfg.WalDir,
- SnapshotCount: cfg.SnapshotCount,
- SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
- MaxSnapFiles: cfg.MaxSnapFiles,
- MaxWALFiles: cfg.MaxWalFiles,
- InitialPeerURLsMap: urlsmap,
- InitialClusterToken: token,
- DiscoveryURL: cfg.Durl,
- DiscoveryProxy: cfg.Dproxy,
- NewCluster: cfg.IsNewCluster(),
- PeerTLSInfo: cfg.PeerTLSInfo,
- TickMs: cfg.TickMs,
- ElectionTicks: cfg.ElectionTicks(),
- InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
- AutoCompactionRetention: autoCompactionRetention,
- AutoCompactionMode: cfg.AutoCompactionMode,
- QuotaBackendBytes: cfg.QuotaBackendBytes,
- BackendBatchLimit: cfg.BackendBatchLimit,
- BackendFreelistType: backendFreelistType,
- BackendBatchInterval: cfg.BackendBatchInterval,
- MaxTxnOps: cfg.MaxTxnOps,
- MaxRequestBytes: cfg.MaxRequestBytes,
- SocketOpts: cfg.SocketOpts,
- StrictReconfigCheck: cfg.StrictReconfigCheck,
- ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
- AuthToken: cfg.AuthToken,
- BcryptCost: cfg.BcryptCost,
- TokenTTL: cfg.AuthTokenTTL,
- CORS: cfg.CORS,
- HostWhitelist: cfg.HostWhitelist,
- InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
- CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
- PreVote: cfg.PreVote,
- Logger: cfg.logger,
- ForceNewCluster: cfg.ForceNewCluster,
- EnableGRPCGateway: cfg.EnableGRPCGateway,
- ExperimentalEnableDistributedTracing: cfg.ExperimentalEnableDistributedTracing,
- UnsafeNoFsync: cfg.UnsafeNoFsync,
- EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
- CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
- WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval,
- DowngradeCheckTime: cfg.ExperimentalDowngradeCheckTime,
- WarningApplyDuration: cfg.ExperimentalWarningApplyDuration,
- ExperimentalMemoryMlock: cfg.ExperimentalMemoryMlock,
- ExperimentalTxnModeWriteWithSharedBuffer: cfg.ExperimentalTxnModeWriteWithSharedBuffer,
+ Name: cfg.Name,
+ ClientURLs: cfg.AdvertiseClientUrls,
+ PeerURLs: cfg.AdvertisePeerUrls,
+ DataDir: cfg.Dir,
+ DedicatedWALDir: cfg.WalDir,
+ SnapshotCount: cfg.SnapshotCount,
+ SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
+ MaxSnapFiles: cfg.MaxSnapFiles,
+ MaxWALFiles: cfg.MaxWalFiles,
+ InitialPeerURLsMap: urlsmap,
+ InitialClusterToken: token,
+ DiscoveryURL: cfg.Durl,
+ DiscoveryProxy: cfg.Dproxy,
+ DiscoveryCfg: cfg.DiscoveryCfg,
+ NewCluster: cfg.IsNewCluster(),
+ PeerTLSInfo: cfg.PeerTLSInfo,
+ TickMs: cfg.TickMs,
+ ElectionTicks: cfg.ElectionTicks(),
+ InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
+ AutoCompactionRetention: autoCompactionRetention,
+ AutoCompactionMode: cfg.AutoCompactionMode,
+ QuotaBackendBytes: cfg.QuotaBackendBytes,
+ BackendBatchLimit: cfg.BackendBatchLimit,
+ BackendFreelistType: backendFreelistType,
+ BackendBatchInterval: cfg.BackendBatchInterval,
+ MaxTxnOps: cfg.MaxTxnOps,
+ MaxRequestBytes: cfg.MaxRequestBytes,
+ MaxConcurrentStreams: cfg.MaxConcurrentStreams,
+ SocketOpts: cfg.SocketOpts,
+ StrictReconfigCheck: cfg.StrictReconfigCheck,
+ ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
+ AuthToken: cfg.AuthToken,
+ BcryptCost: cfg.BcryptCost,
+ TokenTTL: cfg.AuthTokenTTL,
+ CORS: cfg.CORS,
+ HostWhitelist: cfg.HostWhitelist,
+ CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
+ CompactHashCheckTime: cfg.CompactHashCheckTime,
+ PreVote: cfg.PreVote,
+ Logger: cfg.logger,
+ ForceNewCluster: cfg.ForceNewCluster,
+ EnableGRPCGateway: cfg.EnableGRPCGateway,
+ ExperimentalEnableDistributedTracing: cfg.ExperimentalEnableDistributedTracing,
+ UnsafeNoFsync: cfg.UnsafeNoFsync,
+ EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
+ LeaseCheckpointPersist: cfg.ExperimentalEnableLeaseCheckpointPersist,
+ CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
+ CompactionSleepInterval: cfg.ExperimentalCompactionSleepInterval,
+ WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval,
+ DowngradeCheckTime: cfg.ExperimentalDowngradeCheckTime,
+ WarningApplyDuration: cfg.ExperimentalWarningApplyDuration,
+ WarningUnaryRequestDuration: cfg.WarningUnaryRequestDuration,
+ ExperimentalMemoryMlock: cfg.ExperimentalMemoryMlock,
ExperimentalBootstrapDefragThresholdMegabytes: cfg.ExperimentalBootstrapDefragThresholdMegabytes,
- V2Deprecation: cfg.V2DeprecationEffective(),
+ ExperimentalMaxLearners: cfg.ExperimentalMaxLearners,
+ V2Deprecation: cfg.V2DeprecationEffective(),
+ ExperimentalLocalAddress: cfg.InferLocalAddr(),
+ ServerFeatureGate: cfg.ServerFeatureGate,
}
if srvcfg.ExperimentalEnableDistributedTracing {
tctx := context.Background()
- tracingExporter, opts, err := e.setupTracing(tctx)
- if err != nil {
- return e, err
+ tracingExporter, terr := newTracingExporter(tctx, cfg)
+ if terr != nil {
+ return e, terr
}
- if tracingExporter == nil || len(opts) == 0 {
- return e, fmt.Errorf("error setting up distributed tracing")
+ e.tracingExporterShutdown = func() {
+ tracingExporter.Close(tctx)
}
- e.tracingExporterShutdown = func() { tracingExporter.Shutdown(tctx) }
- srvcfg.ExperimentalTracerOptions = opts
+ srvcfg.ExperimentalTracerOptions = tracingExporter.opts
+
+ e.cfg.logger.Info(
+ "distributed tracing setup enabled",
+ )
}
+ srvcfg.PeerTLSInfo.LocalAddr = srvcfg.ExperimentalLocalAddress
+
print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
@@ -250,8 +257,8 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
// newly started member ("memberInitialized==false")
// does not need corruption check
- if memberInitialized {
- if err = e.Server.CheckInitialHashKV(); err != nil {
+ if memberInitialized && srvcfg.ServerFeatureGate.Enabled(features.InitialCorruptCheck) {
+ if err = e.Server.CorruptionChecker().InitialCheck(); err != nil {
// set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
// (nothing to close since rafthttp transports have not been started)
@@ -263,23 +270,21 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
}
e.Server.Start()
- if err = e.servePeers(); err != nil {
- return e, err
- }
- if err = e.serveClients(); err != nil {
- return e, err
- }
+ e.servePeers()
+
+ e.serveClients()
+
if err = e.serveMetrics(); err != nil {
return e, err
}
e.cfg.logger.Info(
"now serving peer/client/metrics",
- zap.String("local-member-id", e.Server.ID().String()),
- zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()),
- zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
- zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
- zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
+ zap.String("local-member-id", e.Server.MemberID().String()),
+ zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerURLs()),
+ zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()),
+ zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()),
zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
)
serving = true
@@ -301,7 +306,7 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized
quota := ec.QuotaBackendBytes
if quota == 0 {
- quota = etcdserver.DefaultQuotaBytes
+ quota = storage.DefaultQuotaBytes
}
lg.Info(
@@ -324,27 +329,53 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized
zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
zap.Uint64("snapshot-count", sc.SnapshotCount),
+ zap.Uint("max-wals", sc.MaxWALFiles),
+ zap.Uint("max-snapshots", sc.MaxSnapFiles),
zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
- zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()),
- zap.Strings("listen-peer-urls", ec.getLPURLs()),
- zap.Strings("advertise-client-urls", ec.getACURLs()),
- zap.Strings("listen-client-urls", ec.getLCURLs()),
+ zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerURLs()),
+ zap.Strings("listen-peer-urls", ec.getListenPeerURLs()),
+ zap.Strings("advertise-client-urls", ec.getAdvertiseClientURLs()),
+ zap.Strings("listen-client-urls", ec.getListenClientURLs()),
zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
+ zap.Bool("experimental-set-member-localaddr", ec.ExperimentalSetMemberLocalAddr),
+ zap.String("experimental-local-address", sc.ExperimentalLocalAddress),
zap.Strings("cors", cors),
zap.Strings("host-whitelist", hss),
zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
zap.String("initial-cluster-state", ec.ClusterState),
zap.String("initial-cluster-token", sc.InitialClusterToken),
- zap.Int64("quota-size-bytes", quota),
+ zap.Int64("quota-backend-bytes", quota),
+ zap.Uint("max-request-bytes", sc.MaxRequestBytes),
+ zap.Uint32("max-concurrent-streams", sc.MaxConcurrentStreams),
+
zap.Bool("pre-vote", sc.PreVote),
+ zap.String(ServerFeatureGateFlagName, sc.ServerFeatureGate.String()),
zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
+ zap.Duration("compact-check-time-interval", sc.CompactHashCheckTime),
zap.String("auto-compaction-mode", sc.AutoCompactionMode),
zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention),
zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()),
zap.String("discovery-url", sc.DiscoveryURL),
zap.String("discovery-proxy", sc.DiscoveryProxy),
+
+ zap.String("discovery-token", sc.DiscoveryCfg.Token),
+ zap.String("discovery-endpoints", strings.Join(sc.DiscoveryCfg.Endpoints, ",")),
+ zap.String("discovery-dial-timeout", sc.DiscoveryCfg.DialTimeout.String()),
+ zap.String("discovery-request-timeout", sc.DiscoveryCfg.RequestTimeout.String()),
+ zap.String("discovery-keepalive-time", sc.DiscoveryCfg.KeepAliveTime.String()),
+ zap.String("discovery-keepalive-timeout", sc.DiscoveryCfg.KeepAliveTimeout.String()),
+ zap.Bool("discovery-insecure-transport", sc.DiscoveryCfg.Secure.InsecureTransport),
+ zap.Bool("discovery-insecure-skip-tls-verify", sc.DiscoveryCfg.Secure.InsecureSkipVerify),
+ zap.String("discovery-cert", sc.DiscoveryCfg.Secure.Cert),
+ zap.String("discovery-key", sc.DiscoveryCfg.Secure.Key),
+ zap.String("discovery-cacert", sc.DiscoveryCfg.Secure.Cacert),
+ zap.String("discovery-user", sc.DiscoveryCfg.Auth.Username),
+
zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()),
+ zap.Int("max-learners", sc.ExperimentalMaxLearners),
+
+ zap.String("v2-deprecation", string(ec.V2Deprecation)),
)
}
@@ -360,8 +391,8 @@ func (e *Etcd) Close() {
fields := []zap.Field{
zap.String("name", e.cfg.Name),
zap.String("data-dir", e.cfg.Dir),
- zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()),
- zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
+ zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()),
}
lg := e.GetLogger()
lg.Info("closing etcd server", fields...)
@@ -431,11 +462,16 @@ func (e *Etcd) Close() {
func stopServers(ctx context.Context, ss *servers) {
// first, close the http.Server
- ss.http.Shutdown(ctx)
- // do not grpc.Server.GracefulStop with TLS enabled etcd server
+ if ss.http != nil {
+ ss.http.Shutdown(ctx)
+ }
+ if ss.grpc == nil {
+ return
+ }
+ // do not grpc.Server.GracefulStop when grpc runs under http server
// See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
// and https://github.com/etcd-io/etcd/issues/8916
- if ss.secure {
+ if ss.secure && ss.http != nil {
ss.grpc.Stop()
return
}
@@ -474,6 +510,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
if err = cfg.PeerSelfCert(); err != nil {
cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err))
}
+ updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if !cfg.PeerTLSInfo.Empty() {
cfg.logger.Info(
"starting with peer TLS",
@@ -482,7 +519,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
)
}
- peers = make([]*peerListener, len(cfg.LPUrls))
+ peers = make([]*peerListener, len(cfg.ListenPeerUrls))
defer func() {
if err == nil {
return
@@ -491,7 +528,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
if peers[i] != nil && peers[i].close != nil {
cfg.logger.Warn(
"closing peer listener",
- zap.String("address", cfg.LPUrls[i].String()),
+ zap.String("address", cfg.ListenPeerUrls[i].String()),
zap.Error(err),
)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
@@ -501,7 +538,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
}
}()
- for i, u := range cfg.LPUrls {
+ for i, u := range cfg.ListenPeerUrls {
if u.Scheme == "http" {
if !cfg.PeerTLSInfo.Empty() {
cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String()))
@@ -517,6 +554,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
transport.WithTimeout(rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout),
)
if err != nil {
+ cfg.logger.Error("creating peer listener failed", zap.Error(err))
return nil, err
}
// once serve, overwrite with 'http.Server.Shutdown'
@@ -528,24 +566,16 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
}
// configure peer handlers after rafthttp.Transport started
-func (e *Etcd) servePeers() (err error) {
+func (e *Etcd) servePeers() {
ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
- var peerTLScfg *tls.Config
- if !e.cfg.PeerTLSInfo.Empty() {
- if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
- return err
- }
- }
for _, p := range e.Peers {
u := p.Listener.Addr().String()
- gs := v3rpc.Server(e.Server, peerTLScfg)
m := cmux.New(p.Listener)
- go gs.Serve(m.Match(cmux.HTTP2()))
srv := &http.Server{
- Handler: grpcHandlerFunc(gs, ph),
+ Handler: ph,
ReadTimeout: 5 * time.Minute,
- ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
+ ErrorLog: defaultLog.New(io.Discard, "", 0), // do not log user error
}
go srv.Serve(m.Match(cmux.Any()))
p.serve = func() error {
@@ -563,7 +593,7 @@ func (e *Etcd) servePeers() (err error) {
"stopping serving peer traffic",
zap.String("address", u),
)
- stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
+ srv.Shutdown(ctx)
e.cfg.logger.Info(
"stopped serving peer traffic",
zap.String("address", u),
@@ -584,7 +614,6 @@ func (e *Etcd) servePeers() (err error) {
e.errHandler(l.serve())
}(pl)
}
- return nil
}
func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
@@ -594,42 +623,59 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if err = cfg.ClientSelfCert(); err != nil {
cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err))
}
+ updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if cfg.EnablePprof {
cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
}
sctxs = make(map[string]*serveCtx)
- for _, u := range cfg.LCUrls {
- sctx := newServeCtx(cfg.logger)
+ for _, u := range append(cfg.ListenClientUrls, cfg.ListenClientHttpUrls...) {
if u.Scheme == "http" || u.Scheme == "unix" {
if !cfg.ClientTLSInfo.Empty() {
- cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
+ cfg.logger.Warn("scheme is http or unix while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
}
if cfg.ClientTLSInfo.ClientCertAuth {
- cfg.logger.Warn("scheme is HTTP while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String()))
+ cfg.logger.Warn("scheme is http or unix while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String()))
}
}
if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
}
+ }
- network := "tcp"
- addr := u.Host
- if u.Scheme == "unix" || u.Scheme == "unixs" {
- network = "unix"
- addr = u.Host + u.Path
+ for _, u := range cfg.ListenClientUrls {
+ addr, secure, network := resolveURL(u)
+ sctx := sctxs[addr]
+ if sctx == nil {
+ sctx = newServeCtx(cfg.logger)
+ sctxs[addr] = sctx
}
+ sctx.secure = sctx.secure || secure
+ sctx.insecure = sctx.insecure || !secure
+ sctx.scheme = u.Scheme
+ sctx.addr = addr
sctx.network = network
+ }
+ for _, u := range cfg.ListenClientHttpUrls {
+ addr, secure, network := resolveURL(u)
- sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
- sctx.insecure = !sctx.secure
- if oldctx := sctxs[addr]; oldctx != nil {
- oldctx.secure = oldctx.secure || sctx.secure
- oldctx.insecure = oldctx.insecure || sctx.insecure
- continue
+ sctx := sctxs[addr]
+ if sctx == nil {
+ sctx = newServeCtx(cfg.logger)
+ sctxs[addr] = sctx
+ } else if !sctx.httpOnly {
+ return nil, fmt.Errorf("cannot bind both --listen-client-urls and --listen-client-http-urls on the same url %s", u.String())
}
+ sctx.secure = sctx.secure || secure
+ sctx.insecure = sctx.insecure || !secure
+ sctx.scheme = u.Scheme
+ sctx.addr = addr
+ sctx.network = network
+ sctx.httpOnly = true
+ }
- if sctx.l, err = transport.NewListenerWithOpts(addr, u.Scheme,
+ for _, sctx := range sctxs {
+ if sctx.l, err = transport.NewListenerWithOpts(sctx.addr, sctx.scheme,
transport.WithSocketOpts(&cfg.SocketOpts),
transport.WithSkipTLSInfoCheck(true),
); err != nil {
@@ -637,7 +683,6 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
}
// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
// hosts that disable ipv6. So, use the address given by the user.
- sctx.addr = addr
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
if fdLimit <= reservedInternalFDNum {
@@ -650,23 +695,17 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
}
- if network == "tcp" {
- if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil {
- return nil, err
- }
- }
-
- defer func(u url.URL) {
- if err == nil {
+ defer func(sctx *serveCtx) {
+ if err == nil || sctx.l == nil {
return
}
sctx.l.Close()
cfg.logger.Warn(
"closing peer listener",
- zap.String("address", u.Host),
+ zap.String("address", sctx.addr),
zap.Error(err),
)
- }(u)
+ }(sctx)
for k := range cfg.UserHandlers {
sctx.userHandlers[k] = cfg.UserHandlers[k]
}
@@ -677,12 +716,22 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if cfg.LogLevel == "debug" {
sctx.registerTrace()
}
- sctxs[addr] = sctx
}
return sctxs, nil
}
-func (e *Etcd) serveClients() (err error) {
+func resolveURL(u url.URL) (addr string, secure bool, network string) {
+ addr = u.Host
+ network = "tcp"
+ if u.Scheme == "unix" || u.Scheme == "unixs" {
+ addr = u.Host + u.Path
+ network = "unix"
+ }
+ secure = u.Scheme == "https" || u.Scheme == "unixs"
+ return addr, secure, network
+}
+
+func (e *Etcd) serveClients() {
if !e.cfg.ClientTLSInfo.Empty() {
e.cfg.logger.Info(
"starting with client TLS",
@@ -692,27 +741,13 @@ func (e *Etcd) serveClients() (err error) {
}
// Start a client server goroutine for each listen address
- var h http.Handler
- if e.Config().EnableV2 {
- if e.Config().V2DeprecationEffective().IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) {
- return fmt.Errorf("--enable-v2 and --v2-deprecation=%s are mutually exclusive", e.Config().V2DeprecationEffective())
- }
- e.cfg.logger.Warn("Flag `enable-v2` is deprecated and will get removed in etcd 3.6.")
- if len(e.Config().ExperimentalEnableV2V3) > 0 {
- e.cfg.logger.Warn("Flag `experimental-enable-v2v3` is deprecated and will get removed in etcd 3.6.")
- srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
- h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout())
- } else {
- h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout())
- }
- } else {
- mux := http.NewServeMux()
- etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server)
- etcdhttp.HandleMetricsHealthForV3(e.cfg.logger, mux, e.Server)
- h = mux
- }
+ mux := http.NewServeMux()
+ etcdhttp.HandleDebug(mux)
+ etcdhttp.HandleVersion(mux, e.Server)
+ etcdhttp.HandleMetrics(mux)
+ etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server)
- gopts := []grpc.ServerOption{}
+ var gopts []grpc.ServerOption
if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: e.cfg.GRPCKeepAliveMinTime,
@@ -726,14 +761,84 @@ func (e *Etcd) serveClients() (err error) {
Timeout: e.cfg.GRPCKeepAliveTimeout,
}))
}
+ gopts = append(gopts, e.cfg.GRPCAdditionalServerOptions...)
+
+ splitHTTP := false
+ for _, sctx := range e.sctxs {
+ if sctx.httpOnly {
+ splitHTTP = true
+ }
+ }
// start client servers in each goroutine
for _, sctx := range e.sctxs {
go func(s *serveCtx) {
- e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
+ e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, e.grpcGatewayDial(splitHTTP), splitHTTP, gopts...))
}(sctx)
}
- return nil
+}
+
+func (e *Etcd) grpcGatewayDial(splitHTTP bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
+ if !e.cfg.EnableGRPCGateway {
+ return nil
+ }
+ sctx := e.pickGRPCGatewayServeContext(splitHTTP)
+ addr := sctx.addr
+ if network := sctx.network; network == "unix" {
+ // explicitly define unix network for gRPC socket support
+ addr = fmt.Sprintf("%s:%s", network, addr)
+ }
+ opts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32))}
+ if sctx.secure {
+ tlscfg, tlsErr := e.cfg.ClientTLSInfo.ServerConfig()
+ if tlsErr != nil {
+ return func(ctx context.Context) (*grpc.ClientConn, error) {
+ return nil, tlsErr
+ }
+ }
+ dtls := tlscfg.Clone()
+ // trust local server
+ dtls.InsecureSkipVerify = true
+ opts = append(opts, grpc.WithTransportCredentials(credentials.NewTransportCredential(dtls)))
+ } else {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ return func(ctx context.Context) (*grpc.ClientConn, error) {
+ conn, err := grpc.DialContext(ctx, addr, opts...)
+ if err != nil {
+ sctx.lg.Error("grpc gateway failed to dial", zap.String("addr", addr), zap.Error(err))
+ return nil, err
+ }
+ return conn, err
+ }
+}
+
+func (e *Etcd) pickGRPCGatewayServeContext(splitHTTP bool) *serveCtx {
+ for _, sctx := range e.sctxs {
+ if !splitHTTP || !sctx.httpOnly {
+ return sctx
+ }
+ }
+ panic("Expect at least one context able to serve grpc")
+}
+
+var ErrMissingClientTLSInfoForMetricsURL = errors.New("client TLS key/cert (--cert-file, --key-file) must be provided for metrics secure url")
+
+func (e *Etcd) createMetricsListener(murl url.URL) (net.Listener, error) {
+ tlsInfo := &e.cfg.ClientTLSInfo
+ switch murl.Scheme {
+ case "http":
+ tlsInfo = nil
+ case "https", "unixs":
+ if e.cfg.ClientTLSInfo.Empty() {
+ return nil, ErrMissingClientTLSInfoForMetricsURL
+ }
+ }
+ return transport.NewListenerWithOpts(murl.Host, murl.Scheme,
+ transport.WithTLSInfo(tlsInfo),
+ transport.WithSocketOpts(&e.cfg.SocketOpts),
+ )
}
func (e *Etcd) serveMetrics() (err error) {
@@ -743,17 +848,11 @@ func (e *Etcd) serveMetrics() (err error) {
if len(e.cfg.ListenMetricsUrls) > 0 {
metricsMux := http.NewServeMux()
- etcdhttp.HandleMetricsHealthForV3(e.cfg.logger, metricsMux, e.Server)
+ etcdhttp.HandleMetrics(metricsMux)
+ etcdhttp.HandleHealth(e.cfg.logger, metricsMux, e.Server)
for _, murl := range e.cfg.ListenMetricsUrls {
- tlsInfo := &e.cfg.ClientTLSInfo
- if murl.Scheme == "http" {
- tlsInfo = nil
- }
- ml, err := transport.NewListenerWithOpts(murl.Host, murl.Scheme,
- transport.WithTLSInfo(tlsInfo),
- transport.WithSocketOpts(&e.cfg.SocketOpts),
- )
+ ml, err := e.createMetricsListener(murl)
if err != nil {
return err
}
@@ -771,6 +870,9 @@ func (e *Etcd) serveMetrics() (err error) {
}
func (e *Etcd) errHandler(err error) {
+ if err != nil {
+ e.GetLogger().Error("setting up serving from embedded etcd failed.", zap.Error(err))
+ }
select {
case <-e.stopc:
return
@@ -798,62 +900,15 @@ func parseCompactionRetention(mode, retention string) (ret time.Duration, err er
ret = time.Duration(int64(h))
case CompactorModePeriodic:
ret = time.Duration(int64(h)) * time.Hour
+ case "":
+ return 0, errors.New("--auto-compaction-mode is undefined")
}
} else {
// periodic compaction
ret, err = time.ParseDuration(retention)
if err != nil {
- return 0, fmt.Errorf("error parsing CompactionRetention: %v", err)
+ return 0, fmt.Errorf("error parsing CompactionRetention: %w", err)
}
}
return ret, nil
}
-
-func (e *Etcd) setupTracing(ctx context.Context) (exporter tracesdk.SpanExporter, options []otelgrpc.Option, err error) {
- exporter, err = otlp.NewExporter(ctx,
- otlpgrpc.NewDriver(
- otlpgrpc.WithEndpoint(e.cfg.ExperimentalDistributedTracingAddress),
- otlpgrpc.WithInsecure(),
- ))
- if err != nil {
- return nil, nil, err
- }
- res := resource.NewWithAttributes(
- semconv.ServiceNameKey.String(e.cfg.ExperimentalDistributedTracingServiceName),
- )
- // As Tracing service Instance ID must be unique, it should
- // never use the empty default string value, so we only set it
- // if it's a non empty string.
- if e.cfg.ExperimentalDistributedTracingServiceInstanceID != "" {
- resWithIDKey := resource.NewWithAttributes(
- (semconv.ServiceInstanceIDKey.String(e.cfg.ExperimentalDistributedTracingServiceInstanceID)),
- )
- // Merge resources to combine into a new
- // resource in case of duplicates.
- res = resource.Merge(res, resWithIDKey)
- }
-
- options = append(options,
- otelgrpc.WithPropagators(
- propagation.NewCompositeTextMapPropagator(
- propagation.TraceContext{},
- propagation.Baggage{},
- ),
- ),
- otelgrpc.WithTracerProvider(
- tracesdk.NewTracerProvider(
- tracesdk.WithBatcher(exporter),
- tracesdk.WithResource(res),
- ),
- ),
- )
-
- e.cfg.logger.Info(
- "distributed tracing enabled",
- zap.String("distributed-tracing-address", e.cfg.ExperimentalDistributedTracingAddress),
- zap.String("distributed-tracing-service-name", e.cfg.ExperimentalDistributedTracingServiceName),
- zap.String("distributed-tracing-service-instance-id", e.cfg.ExperimentalDistributedTracingServiceInstanceID),
- )
-
- return exporter, options, err
-}
diff --git a/server/embed/etcd_test.go b/server/embed/etcd_test.go
new file mode 100644
index 00000000000..206cbb737b4
--- /dev/null
+++ b/server/embed/etcd_test.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "errors"
+ "net/url"
+ "testing"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+)
+
+func TestEmptyClientTLSInfo_createMetricsListener(t *testing.T) {
+ e := &Etcd{
+ cfg: Config{
+ ClientTLSInfo: transport.TLSInfo{},
+ },
+ }
+
+ murl := url.URL{
+ Scheme: "https",
+ Host: "localhost:8080",
+ }
+ if _, err := e.createMetricsListener(murl); !errors.Is(err, ErrMissingClientTLSInfoForMetricsURL) {
+ t.Fatalf("expected error %v, got %v", ErrMissingClientTLSInfoForMetricsURL, err)
+ }
+}
diff --git a/server/embed/serve.go b/server/embed/serve.go
index 23e115d6a5e..a73ba58b1b5 100644
--- a/server/embed/serve.go
+++ b/server/embed/serve.go
@@ -16,18 +16,28 @@ package embed
import (
"context"
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
defaultLog "log"
"net"
"net/http"
"strings"
+ gw "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/soheilhy/cmux"
+ "github.com/tmc/grpc-websocket-proxy/wsproxy"
+ "go.uber.org/zap"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/encoding/protojson"
+
etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw"
"go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3/credentials"
"go.etcd.io/etcd/pkg/v3/debugutil"
"go.etcd.io/etcd/pkg/v3/httputil"
+ "go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3election"
@@ -37,22 +47,18 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
v3lockgw "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
-
- gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/soheilhy/cmux"
- "github.com/tmc/grpc-websocket-proxy/wsproxy"
- "go.uber.org/zap"
- "golang.org/x/net/trace"
- "google.golang.org/grpc"
)
type serveCtx struct {
- lg *zap.Logger
- l net.Listener
+ lg *zap.Logger
+ l net.Listener
+
+ scheme string
addr string
network string
secure bool
insecure bool
+ httpOnly bool
ctx context.Context
cancel context.CancelFunc
@@ -90,108 +96,180 @@ func (sctx *serveCtx) serve(
tlsinfo *transport.TLSInfo,
handler http.Handler,
errHandler func(error),
- gopts ...grpc.ServerOption) (err error) {
- logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
- <-s.ReadyNotify()
+ grpcDialForRestGatewayBackends func(ctx context.Context) (*grpc.ClientConn, error),
+ splitHTTP bool,
+ gopts ...grpc.ServerOption,
+) (err error) {
+ logger := defaultLog.New(io.Discard, "etcdhttp", 0)
+
+ select {
+ case <-s.StoppingNotify():
+ return errors.New("server is stopping")
+ case <-s.ReadyNotify():
+ }
sctx.lg.Info("ready to serve client requests")
m := cmux.New(sctx.l)
+ var server func() error
+ onlyGRPC := splitHTTP && !sctx.httpOnly
+ onlyHTTP := splitHTTP && sctx.httpOnly
+ grpcEnabled := !onlyHTTP
+ httpEnabled := !onlyGRPC
+
v3c := v3client.New(s)
servElection := v3election.NewElectionServer(v3c)
servLock := v3lock.NewLockServer(v3c)
- var gs *grpc.Server
- defer func() {
- if err != nil && gs != nil {
- gs.Stop()
+ // Make sure serversC is closed even if we prematurely exit the function.
+ defer close(sctx.serversC)
+ var gwmux *gw.ServeMux
+ if s.Cfg.EnableGRPCGateway {
+ // GRPC gateway connects to grpc server via connection provided by grpc dial.
+ gwmux, err = sctx.registerGateway(grpcDialForRestGatewayBackends)
+ if err != nil {
+ sctx.lg.Error("registerGateway failed", zap.Error(err))
+ return err
}
- }()
+ }
+ var traffic string
+ switch {
+ case onlyGRPC:
+ traffic = "grpc"
+ case onlyHTTP:
+ traffic = "http"
+ default:
+ traffic = "grpc+http"
+ }
if sctx.insecure {
- gs = v3rpc.Server(s, nil, gopts...)
- v3electionpb.RegisterElectionServer(gs, servElection)
- v3lockpb.RegisterLockServer(gs, servLock)
- if sctx.serviceRegister != nil {
- sctx.serviceRegister(gs)
- }
- grpcl := m.Match(cmux.HTTP2())
- go func() { errHandler(gs.Serve(grpcl)) }()
-
- var gwmux *gw.ServeMux
- if s.Cfg.EnableGRPCGateway {
- gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
- if err != nil {
+ var gs *grpc.Server
+ var srv *http.Server
+ if httpEnabled {
+ httpmux := sctx.createMux(gwmux, handler)
+ srv = &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ ErrorLog: logger, // do not log user error
+ }
+ if err = configureHTTPServer(srv, s.Cfg); err != nil {
+ sctx.lg.Error("Configure http server failed", zap.Error(err))
return err
}
}
-
- httpmux := sctx.createMux(gwmux, handler)
-
- srvhttp := &http.Server{
- Handler: createAccessController(sctx.lg, s, httpmux),
- ErrorLog: logger, // do not log user error
+ if grpcEnabled {
+ gs = v3rpc.Server(s, nil, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ defer func(gs *grpc.Server) {
+ if err != nil {
+ sctx.lg.Warn("stopping insecure grpc server due to error", zap.Error(err))
+ gs.Stop()
+ sctx.lg.Warn("stopped insecure grpc server due to error", zap.Error(err))
+ }
+ }(gs)
+ }
+ if onlyGRPC {
+ server = func() error {
+ return gs.Serve(sctx.l)
+ }
+ } else {
+ server = m.Serve
+
+ httpl := m.Match(cmux.HTTP1())
+ go func(srvhttp *http.Server, tlsLis net.Listener) {
+ errHandler(srvhttp.Serve(tlsLis))
+ }(srv, httpl)
+
+ if grpcEnabled {
+ grpcl := m.Match(cmux.HTTP2())
+ go func(gs *grpc.Server, l net.Listener) {
+ errHandler(gs.Serve(l))
+ }(gs, grpcl)
+ }
}
- httpl := m.Match(cmux.HTTP1())
- go func() { errHandler(srvhttp.Serve(httpl)) }()
- sctx.serversC <- &servers{grpc: gs, http: srvhttp}
+ sctx.serversC <- &servers{grpc: gs, http: srv}
sctx.lg.Info(
"serving client traffic insecurely; this is strongly discouraged!",
+ zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()),
)
}
if sctx.secure {
+ var gs *grpc.Server
+ var srv *http.Server
+
tlscfg, tlsErr := tlsinfo.ServerConfig()
if tlsErr != nil {
return tlsErr
}
- gs = v3rpc.Server(s, tlscfg, gopts...)
- v3electionpb.RegisterElectionServer(gs, servElection)
- v3lockpb.RegisterLockServer(gs, servLock)
- if sctx.serviceRegister != nil {
- sctx.serviceRegister(gs)
+
+ if grpcEnabled {
+ gs = v3rpc.Server(s, tlscfg, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ defer func(gs *grpc.Server) {
+ if err != nil {
+ sctx.lg.Warn("stopping secure grpc server due to error", zap.Error(err))
+ gs.Stop()
+ sctx.lg.Warn("stopped secure grpc server due to error", zap.Error(err))
+ }
+ }(gs)
}
- handler = grpcHandlerFunc(gs, handler)
-
- var gwmux *gw.ServeMux
- if s.Cfg.EnableGRPCGateway {
- dtls := tlscfg.Clone()
- // trust local server
- dtls.InsecureSkipVerify = true
- bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
- opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())}
- gwmux, err = sctx.registerGateway(opts)
- if err != nil {
+ if httpEnabled {
+ if grpcEnabled {
+ handler = grpcHandlerFunc(gs, handler)
+ }
+ httpmux := sctx.createMux(gwmux, handler)
+
+ srv = &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ TLSConfig: tlscfg,
+ ErrorLog: logger, // do not log user error
+ }
+ if err := configureHTTPServer(srv, s.Cfg); err != nil {
+ sctx.lg.Error("Configure https server failed", zap.Error(err))
return err
}
}
- var tlsl net.Listener
- tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
- if err != nil {
- return err
- }
- // TODO: add debug flag; enable logging when debug flag is set
- httpmux := sctx.createMux(gwmux, handler)
+ if onlyGRPC {
+ server = func() error { return gs.Serve(sctx.l) }
+ } else {
+ server = m.Serve
- srv := &http.Server{
- Handler: createAccessController(sctx.lg, s, httpmux),
- TLSConfig: tlscfg,
- ErrorLog: logger, // do not log user error
+ tlsl, err := transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
+ if err != nil {
+ return err
+ }
+ go func(srvhttp *http.Server, tlsl net.Listener) {
+ errHandler(srvhttp.Serve(tlsl))
+ }(srv, tlsl)
}
- go func() { errHandler(srv.Serve(tlsl)) }()
sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
sctx.lg.Info(
"serving client traffic securely",
+ zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()),
)
}
- close(sctx.serversC)
- return m.Serve()
+ return server()
+}
+
+func configureHTTPServer(srv *http.Server, cfg config.ServerConfig) error {
+ // todo (ahrtr): should we support configuring other parameters in the future as well?
+ return http2.ConfigureServer(srv, &http2.Server{
+ MaxConcurrentStreams: cfg.MaxConcurrentStreams,
+ })
}
// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
@@ -213,20 +291,30 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha
type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
-func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
+func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.ClientConn, error)) (*gw.ServeMux, error) {
ctx := sctx.ctx
- addr := sctx.addr
- if network := sctx.network; network == "unix" {
- // explicitly define unix network for gRPC socket support
- addr = fmt.Sprintf("%s://%s", network, addr)
- }
-
- conn, err := grpc.DialContext(ctx, addr, opts...)
+ conn, err := dial(ctx)
if err != nil {
return nil, err
}
- gwmux := gw.NewServeMux()
+
+ // Refer to https://grpc-ecosystem.github.io/grpc-gateway/docs/mapping/customizing_your_gateway/
+ gwmux := gw.NewServeMux(
+ gw.WithMarshalerOption(gw.MIMEWildcard,
+ &gw.HTTPBodyMarshaler{
+ Marshaler: &gw.JSONPb{
+ MarshalOptions: protojson.MarshalOptions{
+ UseProtoNames: true,
+ EmitUnpopulated: false,
+ },
+ UnmarshalOptions: protojson.UnmarshalOptions{
+ DiscardUnknown: true,
+ },
+ },
+ },
+ ),
+ )
handlers := []registerHandlerFunc{
etcdservergw.RegisterKVHandler,
@@ -257,6 +345,18 @@ func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, err
return gwmux, nil
}
+type wsProxyZapLogger struct {
+ *zap.Logger
+}
+
+func (w wsProxyZapLogger) Warnln(i ...any) {
+ w.Warn(fmt.Sprint(i...))
+}
+
+func (w wsProxyZapLogger) Debugln(i ...any) {
+ w.Debug(fmt.Sprint(i...))
+}
+
func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
httpmux := http.NewServeMux()
for path, h := range sctx.userHandlers {
@@ -276,6 +376,7 @@ func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.
},
),
wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
+ wsproxy.WithLogger(wsProxyZapLogger{sctx.lg}),
),
)
}
@@ -342,7 +443,7 @@ func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request)
addCORSHeader(rw, origin)
}
- if req.Method == "OPTIONS" {
+ if req.Method == http.MethodOptions {
rw.WriteHeader(http.StatusOK)
return
}
@@ -392,7 +493,7 @@ func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
addCORSHeader(rw, origin)
}
- if req.Method == "OPTIONS" {
+ if req.Method == http.MethodOptions {
rw.WriteHeader(http.StatusOK)
return
}
diff --git a/server/embed/serve_test.go b/server/embed/serve_test.go
index aada585f07e..6150beeecf1 100644
--- a/server/embed/serve_test.go
+++ b/server/embed/serve_test.go
@@ -15,8 +15,8 @@
package embed
import (
+ "errors"
"fmt"
- "io/ioutil"
"net/url"
"os"
"testing"
@@ -26,11 +26,7 @@ import (
// TestStartEtcdWrongToken ensures that StartEtcd with wrong configs returns with error.
func TestStartEtcdWrongToken(t *testing.T) {
- tdir, err := ioutil.TempDir(t.TempDir(), "token-test")
-
- if err != nil {
- t.Fatal(err)
- }
+ tdir := t.TempDir()
cfg := NewConfig()
@@ -38,8 +34,8 @@ func TestStartEtcdWrongToken(t *testing.T) {
urls := newEmbedURLs(2)
curls := []url.URL{urls[0]}
purls := []url.URL{urls[1]}
- cfg.LCUrls, cfg.ACUrls = curls, curls
- cfg.LPUrls, cfg.APUrls = purls, purls
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = curls, curls
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = purls, purls
cfg.InitialCluster = ""
for i := range purls {
cfg.InitialCluster += ",default=" + purls[i].String()
@@ -48,7 +44,7 @@ func TestStartEtcdWrongToken(t *testing.T) {
cfg.Dir = tdir
cfg.AuthToken = "wrong-token"
- if _, err = StartEtcd(cfg); err != auth.ErrInvalidAuthOpts {
+ if _, err := StartEtcd(cfg); !errors.Is(err, auth.ErrInvalidAuthOpts) {
t.Fatalf("expected %v, got %v", auth.ErrInvalidAuthOpts, err)
}
}
diff --git a/server/embed/util.go b/server/embed/util.go
index ad461534551..32efbe67a46 100644
--- a/server/embed/util.go
+++ b/server/embed/util.go
@@ -17,13 +17,13 @@ package embed
import (
"path/filepath"
- "go.etcd.io/etcd/server/v3/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal"
)
func isMemberInitialized(cfg *Config) bool {
- waldir := cfg.WalDir
- if waldir == "" {
- waldir = filepath.Join(cfg.Dir, "member", "wal")
+ walDir := cfg.WalDir
+ if walDir == "" {
+ walDir = filepath.Join(cfg.Dir, "member", "wal")
}
- return wal.Exist(waldir)
+ return wal.Exist(walDir)
}
diff --git a/server/etcdmain/config.go b/server/etcdmain/config.go
index f9c91d9f9c5..ca651e1669c 100644
--- a/server/etcdmain/config.go
+++ b/server/etcdmain/config.go
@@ -17,12 +17,14 @@
package etcdmain
import (
+ "errors"
"flag"
"fmt"
- "io/ioutil"
- "log"
"os"
"runtime"
+ "time"
+
+ "go.uber.org/zap"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/logutil"
@@ -30,16 +32,9 @@ import (
cconfig "go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
-
- "go.uber.org/zap"
- "sigs.k8s.io/yaml"
)
var (
- proxyFlagOff = "off"
- proxyFlagReadonly = "readonly"
- proxyFlagOn = "on"
-
fallbackFlagExit = "exit"
fallbackFlagProxy = "proxy"
@@ -61,24 +56,21 @@ var (
"test.coverprofile",
"test.outputdir",
}
-)
-type configProxy struct {
- ProxyFailureWaitMs uint `json:"proxy-failure-wait"`
- ProxyRefreshIntervalMs uint `json:"proxy-refresh-interval"`
- ProxyDialTimeoutMs uint `json:"proxy-dial-timeout"`
- ProxyWriteTimeoutMs uint `json:"proxy-write-timeout"`
- ProxyReadTimeoutMs uint `json:"proxy-read-timeout"`
- Fallback string
- Proxy string
- ProxyJSON string `json:"proxy"`
- FallbackJSON string `json:"discovery-fallback"`
-}
+ deprecatedFlags = map[string]string{
+ // TODO: remove in 3.7.
+ "snapshot-count": "--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7.",
+ "max-snapshots": "--max-snapshots is deprecated in 3.6 and will be decommissioned in 3.7.",
+ "v2-deprecation": "--v2-deprecation is deprecated and scheduled for removal in v3.8. The default value is enforced, ignoring user input.",
+ "experimental-compact-hash-check-enabled": "--experimental-compact-hash-check-enabled is deprecated in 3.6 and will be decommissioned in 3.7. Use '--feature-gates=CompactHashCheck=true' instead.",
+ "experimental-compact-hash-check-time": "--experimental-compact-hash-check-time is deprecated in 3.6 and will be decommissioned in 3.7. Use '--compact-hash-check-time' instead.",
+ "experimental-txn-mode-write-with-shared-buffer": "--experimental-txn-mode-write-with-shared-buffer is deprecated in v3.6 and will be decommissioned in v3.7. Use '--feature-gates=TxnModeWriteWithSharedBuffer=true' instead.",
+ }
+)
// config holds the config for a command line invocation of etcd
type config struct {
ec embed.Config
- cp configProxy
cf configFlags
configFile string
printVersion bool
@@ -87,23 +79,17 @@ type config struct {
// configFlags has the set of flags used for command line parsing a Config
type configFlags struct {
- flagSet *flag.FlagSet
- clusterState *flags.SelectiveStringValue
- fallback *flags.SelectiveStringValue
- proxy *flags.SelectiveStringValue
+ flagSet *flag.FlagSet
+ clusterState *flags.SelectiveStringValue
+ fallback *flags.SelectiveStringValue
+ // Deprecated and scheduled for removal in v3.8. The default value is enforced, ignoring user input.
+ // TODO: remove in v3.8.
v2deprecation *flags.SelectiveStringsValue
}
func newConfig() *config {
cfg := &config{
- ec: *embed.NewConfig(),
- cp: configProxy{
- Proxy: proxyFlagOff,
- ProxyFailureWaitMs: 5000,
- ProxyRefreshIntervalMs: 30000,
- ProxyDialTimeoutMs: 1000,
- ProxyWriteTimeoutMs: 5000,
- },
+ ec: *embed.NewConfig(),
ignored: ignored,
}
cfg.cf = configFlags{
@@ -113,186 +99,25 @@ func newConfig() *config {
embed.ClusterStateFlagExisting,
),
fallback: flags.NewSelectiveStringValue(
- fallbackFlagProxy,
fallbackFlagExit,
- ),
- proxy: flags.NewSelectiveStringValue(
- proxyFlagOff,
- proxyFlagReadonly,
- proxyFlagOn,
+ fallbackFlagProxy,
),
v2deprecation: flags.NewSelectiveStringsValue(
- string(cconfig.V2_DEPR_0_NOT_YET),
- string(cconfig.V2_DEPR_1_WRITE_ONLY),
- string(cconfig.V2_DEPR_1_WRITE_ONLY_DROP),
- string(cconfig.V2_DEPR_2_GONE)),
+ string(cconfig.V2Depr1WriteOnly),
+ string(cconfig.V2Depr1WriteOnlyDrop),
+ string(cconfig.V2Depr2Gone)),
}
-
fs := cfg.cf.flagSet
fs.Usage = func() {
fmt.Fprintln(os.Stderr, usageline)
}
-
+ cfg.ec.AddFlags(fs)
fs.StringVar(&cfg.configFile, "config-file", "", "Path to the server configuration file. Note that if a configuration file is provided, other command line flags and environment variables will be ignored.")
-
- // member
- fs.StringVar(&cfg.ec.Dir, "data-dir", cfg.ec.Dir, "Path to the data directory.")
- fs.StringVar(&cfg.ec.WalDir, "wal-dir", cfg.ec.WalDir, "Path to the dedicated wal directory.")
- fs.Var(
- flags.NewUniqueURLsWithExceptions(embed.DefaultListenPeerURLs, ""),
- "listen-peer-urls",
- "List of URLs to listen on for peer traffic.",
- )
- fs.Var(
- flags.NewUniqueURLsWithExceptions(embed.DefaultListenClientURLs, ""), "listen-client-urls",
- "List of URLs to listen on for client traffic.",
- )
- fs.Var(
- flags.NewUniqueURLsWithExceptions("", ""),
- "listen-metrics-urls",
- "List of URLs to listen on for the metrics and health endpoints.",
- )
- fs.UintVar(&cfg.ec.MaxSnapFiles, "max-snapshots", cfg.ec.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited).")
- fs.UintVar(&cfg.ec.MaxWalFiles, "max-wals", cfg.ec.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).")
- fs.StringVar(&cfg.ec.Name, "name", cfg.ec.Name, "Human-readable name for this member.")
- fs.Uint64Var(&cfg.ec.SnapshotCount, "snapshot-count", cfg.ec.SnapshotCount, "Number of committed transactions to trigger a snapshot to disk.")
- fs.UintVar(&cfg.ec.TickMs, "heartbeat-interval", cfg.ec.TickMs, "Time (in milliseconds) of a heartbeat interval.")
- fs.UintVar(&cfg.ec.ElectionMs, "election-timeout", cfg.ec.ElectionMs, "Time (in milliseconds) for an election to timeout.")
- fs.BoolVar(&cfg.ec.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.ec.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.")
- fs.Int64Var(&cfg.ec.QuotaBackendBytes, "quota-backend-bytes", cfg.ec.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.")
- fs.StringVar(&cfg.ec.BackendFreelistType, "backend-bbolt-freelist-type", cfg.ec.BackendFreelistType, "BackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
- fs.DurationVar(&cfg.ec.BackendBatchInterval, "backend-batch-interval", cfg.ec.BackendBatchInterval, "BackendBatchInterval is the maximum time before commit the backend transaction.")
- fs.IntVar(&cfg.ec.BackendBatchLimit, "backend-batch-limit", cfg.ec.BackendBatchLimit, "BackendBatchLimit is the maximum operations before commit the backend transaction.")
- fs.UintVar(&cfg.ec.MaxTxnOps, "max-txn-ops", cfg.ec.MaxTxnOps, "Maximum number of operations permitted in a transaction.")
- fs.UintVar(&cfg.ec.MaxRequestBytes, "max-request-bytes", cfg.ec.MaxRequestBytes, "Maximum client request size in bytes the server will accept.")
- fs.DurationVar(&cfg.ec.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.ec.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.")
- fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
- fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
- fs.BoolVar(&cfg.ec.SocketOpts.ReusePort, "socket-reuse-port", cfg.ec.SocketOpts.ReusePort, "Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use.")
- fs.BoolVar(&cfg.ec.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.ec.SocketOpts.ReuseAddress, "Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in `TIME_WAIT` state.")
-
- // raft connection timeouts
- fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "Read timeout set on each rafthttp connection")
- fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "Write timeout set on each rafthttp connection")
-
- // clustering
- fs.Var(
- flags.NewUniqueURLsWithExceptions(embed.DefaultInitialAdvertisePeerURLs, ""),
- "initial-advertise-peer-urls",
- "List of this member's peer URLs to advertise to the rest of the cluster.",
- )
- fs.Var(
- flags.NewUniqueURLsWithExceptions(embed.DefaultAdvertiseClientURLs, ""),
- "advertise-client-urls",
- "List of this member's client URLs to advertise to the public.",
- )
- fs.StringVar(&cfg.ec.Durl, "discovery", cfg.ec.Durl, "Discovery URL used to bootstrap the cluster.")
fs.Var(cfg.cf.fallback, "discovery-fallback", fmt.Sprintf("Valid values include %q", cfg.cf.fallback.Valids()))
+ fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' when bootstrapping a new cluster or 'existing' when adding new members to an existing cluster). After successful initialization (bootstrapping or adding), flag is ignored on restarts.")
+ fs.Var(cfg.cf.v2deprecation, "v2-deprecation", fmt.Sprintf("v2store deprecation stage: %q. Deprecated and scheduled for removal in v3.8. The default value is enforced, ignoring user input.", cfg.cf.v2deprecation.Valids()))
- fs.StringVar(&cfg.ec.Dproxy, "discovery-proxy", cfg.ec.Dproxy, "HTTP proxy to use for traffic to discovery service.")
- fs.StringVar(&cfg.ec.DNSCluster, "discovery-srv", cfg.ec.DNSCluster, "DNS domain used to bootstrap initial cluster.")
- fs.StringVar(&cfg.ec.DNSClusterServiceName, "discovery-srv-name", cfg.ec.DNSClusterServiceName, "Service name to query when using DNS discovery.")
- fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "Initial cluster configuration for bootstrapping.")
- fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
- fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' or 'existing').")
-
- fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
-
- fs.BoolVar(&cfg.ec.PreVote, "pre-vote", cfg.ec.PreVote, "Enable to run an additional Raft election phase.")
-
- fs.BoolVar(&cfg.ec.EnableV2, "enable-v2", cfg.ec.EnableV2, "Accept etcd V2 client requests. Deprecated in v3.5. Will be decommission in v3.6.")
- fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state. Deprecated in 3.5. Will be decomissioned in 3.6.")
- fs.Var(cfg.cf.v2deprecation, "v2-deprecation", fmt.Sprintf("v2store deprecation stage: %q. ", cfg.cf.proxy.Valids()))
-
- // proxy
- fs.Var(cfg.cf.proxy, "proxy", fmt.Sprintf("Valid values include %q", cfg.cf.proxy.Valids()))
- fs.UintVar(&cfg.cp.ProxyFailureWaitMs, "proxy-failure-wait", cfg.cp.ProxyFailureWaitMs, "Time (in milliseconds) an endpoint will be held in a failed state.")
- fs.UintVar(&cfg.cp.ProxyRefreshIntervalMs, "proxy-refresh-interval", cfg.cp.ProxyRefreshIntervalMs, "Time (in milliseconds) of the endpoints refresh interval.")
- fs.UintVar(&cfg.cp.ProxyDialTimeoutMs, "proxy-dial-timeout", cfg.cp.ProxyDialTimeoutMs, "Time (in milliseconds) for a dial to timeout.")
- fs.UintVar(&cfg.cp.ProxyWriteTimeoutMs, "proxy-write-timeout", cfg.cp.ProxyWriteTimeoutMs, "Time (in milliseconds) for a write to timeout.")
- fs.UintVar(&cfg.cp.ProxyReadTimeoutMs, "proxy-read-timeout", cfg.cp.ProxyReadTimeoutMs, "Time (in milliseconds) for a read to timeout.")
-
- // security
- fs.StringVar(&cfg.ec.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required.")
- fs.BoolVar(&cfg.ec.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.AllowedHostname, "client-cert-allowed-hostname", "", "Allowed TLS hostname for client cert authentication.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.")
- fs.BoolVar(&cfg.ec.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates")
- fs.StringVar(&cfg.ec.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required.")
- fs.BoolVar(&cfg.ec.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.")
- fs.BoolVar(&cfg.ec.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates")
- fs.UintVar(&cfg.ec.SelfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the client and peer certificates, unit is year")
- fs.StringVar(&cfg.ec.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedCN, "peer-cert-allowed-cn", "", "Allowed CN for inter peer authentication.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedHostname, "peer-cert-allowed-hostname", "", "Allowed TLS hostname for inter peer authentication.")
- fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
- fs.BoolVar(&cfg.ec.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.")
-
- fs.Var(
- flags.NewUniqueURLsWithExceptions("*", "*"),
- "cors",
- "Comma-separated white list of origins for CORS, or cross-origin resource sharing, (empty or * means allow all)",
- )
- fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "Comma-separated acceptable hostnames from HTTP client requests, if server is not secure (empty means allow all).")
-
- // logging
- fs.StringVar(&cfg.ec.Logger, "logger", "zap", "Currently only supports 'zap' for structured logging.")
- fs.Var(flags.NewUniqueStringsValue(embed.DefaultLogOutput), "log-outputs", "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets.")
- fs.StringVar(&cfg.ec.LogLevel, "log-level", logutil.DefaultLogLevel, "Configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.")
- fs.BoolVar(&cfg.ec.EnableLogRotation, "enable-log-rotation", false, "Enable log rotation of a single log-outputs file target.")
- fs.StringVar(&cfg.ec.LogRotationConfigJSON, "log-rotation-config-json", embed.DefaultLogRotationConfig, "Configures log rotation if enabled with a JSON logger config. Default: MaxSize=100(MB), MaxAge=0(days,no limit), MaxBackups=0(no limit), LocalTime=false(UTC), Compress=false(gzip)")
-
- // version
fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.")
-
- fs.StringVar(&cfg.ec.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.")
- fs.StringVar(&cfg.ec.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.")
-
- // pprof profiler via HTTP
- fs.BoolVar(&cfg.ec.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"")
-
- // additional metrics
- fs.StringVar(&cfg.ec.Metrics, "metrics", cfg.ec.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics")
-
- // experimental distributed tracing
- fs.BoolVar(&cfg.ec.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing.")
- fs.StringVar(&cfg.ec.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", embed.ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).")
- fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", embed.ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.")
- fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance.")
-
- // auth
- fs.StringVar(&cfg.ec.AuthToken, "auth-token", cfg.ec.AuthToken, "Specify auth token specific options.")
- fs.UintVar(&cfg.ec.BcryptCost, "bcrypt-cost", cfg.ec.BcryptCost, "Specify bcrypt algorithm cost factor for auth password hashing.")
- fs.UintVar(&cfg.ec.AuthTokenTTL, "auth-token-ttl", cfg.ec.AuthTokenTTL, "The lifetime in seconds of the auth token.")
-
- // gateway
- fs.BoolVar(&cfg.ec.EnableGRPCGateway, "enable-grpc-gateway", cfg.ec.EnableGRPCGateway, "Enable GRPC gateway.")
-
- // experimental
- fs.BoolVar(&cfg.ec.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ec.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.")
- fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.")
-
- fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable to persist lease remaining TTL to prevent indefinite auto-renewal of long lived leases.")
- fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
- fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
- fs.DurationVar(&cfg.ec.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ec.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status check.")
- fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.")
- fs.BoolVar(&cfg.ec.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ec.ExperimentalMemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.")
- fs.BoolVar(&cfg.ec.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "Enable the write transaction to use a shared buffer in its readonly check operations.")
- fs.UintVar(&cfg.ec.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.")
-
- // unsafe
- fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
- fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
-
// ignored
for _, f := range cfg.ignored {
fs.Var(&flags.IgnoredFlag{Name: f}, f, "")
@@ -302,16 +127,16 @@ func newConfig() *config {
func (cfg *config) parse(arguments []string) error {
perr := cfg.cf.flagSet.Parse(arguments)
- switch perr {
- case nil:
- case flag.ErrHelp:
+ switch {
+ case perr == nil:
+ case errors.Is(perr, flag.ErrHelp):
fmt.Println(flagsline)
os.Exit(0)
default:
os.Exit(2)
}
if len(cfg.cf.flagSet.Args()) != 0 {
- return fmt.Errorf("'%s' is not a valid flag", cfg.cf.flagSet.Arg(0))
+ return fmt.Errorf("%q is not a valid flag", cfg.cf.flagSet.Arg(0))
}
if cfg.printVersion {
@@ -343,8 +168,32 @@ func (cfg *config) parse(arguments []string) error {
err = cfg.configFromCmdLine()
}
- if cfg.ec.V2Deprecation == "" {
- cfg.ec.V2Deprecation = cconfig.V2_DEPR_DEFAULT
+ // params related to experimental flag deprecation
+ // TODO: delete in v3.7
+ if cfg.ec.FlagsExplicitlySet["experimental-compact-hash-check-time"] {
+ cfg.ec.CompactHashCheckTime = cfg.ec.ExperimentalCompactHashCheckTime
+ }
+
+ // `V2Deprecation` (--v2-deprecation) is deprecated and scheduled for removal in v3.8. The default value is enforced, ignoring user input.
+ cfg.ec.V2Deprecation = cconfig.V2DeprDefault
+
+ cfg.ec.WarningUnaryRequestDuration, perr = cfg.parseWarningUnaryRequestDuration()
+ if perr != nil {
+ return perr
+ }
+
+ var warningsForDeprecatedFlags []string
+ cfg.cf.flagSet.Visit(func(f *flag.Flag) {
+ if msg, ok := deprecatedFlags[f.Name]; ok {
+ warningsForDeprecatedFlags = append(warningsForDeprecatedFlags, msg)
+ }
+ })
+ if len(warningsForDeprecatedFlags) > 0 {
+ if lg := cfg.ec.GetLogger(); lg != nil {
+ for _, msg := range warningsForDeprecatedFlags {
+ lg.Warn(msg)
+ }
+ }
}
// now logger is set up
@@ -353,11 +202,10 @@ func (cfg *config) parse(arguments []string) error {
func (cfg *config) configFromCmdLine() error {
// user-specified logger is not setup yet, use this logger during flag parsing
- lg, err := zap.NewProduction()
+ lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
if err != nil {
return err
}
-
verKey := "ETCD_VERSION"
if verVal := os.Getenv(verKey); verVal != "" {
// unset to avoid any possible side-effect.
@@ -384,36 +232,62 @@ func (cfg *config) configFromCmdLine() error {
lg.Info(fmt.Sprintf("raft-write-timeout increased to minimum value: %v", rafthttp.DefaultConnWriteTimeout))
}
- cfg.ec.LPUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
- cfg.ec.APUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
- cfg.ec.LCUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
- cfg.ec.ACUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
+ cfg.ec.ListenPeerUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
+ cfg.ec.AdvertisePeerUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
+ cfg.ec.ListenClientUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
+ cfg.ec.ListenClientHttpUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-http-urls")
+ cfg.ec.AdvertiseClientUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
cfg.ec.ListenMetricsUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-metrics-urls")
+ cfg.ec.DiscoveryCfg.Endpoints = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "discovery-endpoints")
+
cfg.ec.CORS = flags.UniqueURLsMapFromFlag(cfg.cf.flagSet, "cors")
cfg.ec.HostWhitelist = flags.UniqueStringsMapFromFlag(cfg.cf.flagSet, "host-whitelist")
+ cfg.ec.ClientTLSInfo.AllowedHostnames = flags.StringsFromFlag(cfg.cf.flagSet, "client-cert-allowed-hostname")
+ cfg.ec.PeerTLSInfo.AllowedCNs = flags.StringsFromFlag(cfg.cf.flagSet, "peer-cert-allowed-cn")
+ cfg.ec.PeerTLSInfo.AllowedHostnames = flags.StringsFromFlag(cfg.cf.flagSet, "peer-cert-allowed-hostname")
+
cfg.ec.CipherSuites = flags.StringsFromFlag(cfg.cf.flagSet, "cipher-suites")
+ cfg.ec.MaxConcurrentStreams = flags.Uint32FromFlag(cfg.cf.flagSet, "max-concurrent-streams")
+
cfg.ec.LogOutputs = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-outputs")
cfg.ec.ClusterState = cfg.cf.clusterState.String()
- cfg.cp.Fallback = cfg.cf.fallback.String()
- cfg.cp.Proxy = cfg.cf.proxy.String()
cfg.ec.V2Deprecation = cconfig.V2DeprecationEnum(cfg.cf.v2deprecation.String())
// disable default advertise-client-urls if lcurls is set
missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")
- if !cfg.mayBeProxy() && missingAC {
- cfg.ec.ACUrls = nil
+ if missingAC {
+ cfg.ec.AdvertiseClientUrls = nil
}
// disable default initial-cluster if discovery is set
- if (cfg.ec.Durl != "" || cfg.ec.DNSCluster != "" || cfg.ec.DNSClusterServiceName != "") && !flags.IsSet(cfg.cf.flagSet, "initial-cluster") {
+ if (cfg.ec.Durl != "" || cfg.ec.DNSCluster != "" || cfg.ec.DNSClusterServiceName != "" || len(cfg.ec.DiscoveryCfg.Endpoints) > 0) && !flags.IsSet(cfg.cf.flagSet, "initial-cluster") {
cfg.ec.InitialCluster = ""
}
+ cfg.cf.flagSet.Visit(func(f *flag.Flag) {
+ cfg.ec.FlagsExplicitlySet[f.Name] = true
+ })
+
+ getBoolFlagVal := func(flagName string) *bool {
+ boolVal, parseErr := flags.GetBoolFlagVal(cfg.cf.flagSet, flagName)
+ if parseErr != nil {
+ panic(parseErr)
+ }
+ return boolVal
+ }
+
+ // SetFeatureGatesFromExperimentalFlags validates that cmd line flags for experimental feature and their feature gates are not explicitly set simultaneously,
+ // and passes the values of cmd line flags for experimental feature to the server feature gate.
+ err = embed.SetFeatureGatesFromExperimentalFlags(cfg.ec.ServerFeatureGate, getBoolFlagVal, cfg.cf.flagSet.Lookup(embed.ServerFeatureGateFlagName).Value.String())
+ if err != nil {
+ return err
+ }
+
return cfg.validate()
}
@@ -424,45 +298,33 @@ func (cfg *config) configFromFile(path string) error {
}
cfg.ec = *eCfg
- // load extra config information
- b, rerr := ioutil.ReadFile(path)
- if rerr != nil {
- return rerr
- }
- if yerr := yaml.Unmarshal(b, &cfg.cp); yerr != nil {
- return yerr
- }
+ return nil
+}
- if cfg.cp.FallbackJSON != "" {
- if err := cfg.cf.fallback.Set(cfg.cp.FallbackJSON); err != nil {
- log.Fatalf("unexpected error setting up discovery-fallback flag: %v", err)
- }
- cfg.cp.Fallback = cfg.cf.fallback.String()
+func (cfg *config) validate() error {
+ if cfg.cf.fallback.String() == fallbackFlagProxy {
+ return fmt.Errorf("v2 proxy is deprecated, and --discovery-fallback can't be configured as %q", fallbackFlagProxy)
}
+ return cfg.ec.Validate()
+}
- if cfg.cp.ProxyJSON != "" {
- if err := cfg.cf.proxy.Set(cfg.cp.ProxyJSON); err != nil {
- log.Fatalf("unexpected error setting up proxyFlag: %v", err)
- }
- cfg.cp.Proxy = cfg.cf.proxy.String()
+func (cfg *config) parseWarningUnaryRequestDuration() (time.Duration, error) {
+ if cfg.ec.ExperimentalWarningUnaryRequestDuration != 0 && cfg.ec.WarningUnaryRequestDuration != 0 {
+ return 0, errors.New(
+ "both --experimental-warning-unary-request-duration and --warning-unary-request-duration flags are set. " +
+ "Use only --warning-unary-request-duration")
}
- return nil
-}
-func (cfg *config) mayBeProxy() bool {
- mayFallbackToProxy := cfg.ec.Durl != "" && cfg.cp.Fallback == fallbackFlagProxy
- return cfg.cp.Proxy != proxyFlagOff || mayFallbackToProxy
-}
+ if cfg.ec.WarningUnaryRequestDuration != 0 {
+ return cfg.ec.WarningUnaryRequestDuration, nil
+ }
-func (cfg *config) validate() error {
- err := cfg.ec.Validate()
- // TODO(yichengq): check this for joining through discovery service case
- if err == embed.ErrUnsetAdvertiseClientURLsFlag && cfg.mayBeProxy() {
- return nil
+ if cfg.ec.ExperimentalWarningUnaryRequestDuration != 0 {
+ cfg.ec.GetLogger().Warn(
+ "--experimental-warning-unary-request-duration is deprecated, and will be decommissioned in v3.7. " +
+ "Use --warning-unary-request-duration instead.")
+ return cfg.ec.ExperimentalWarningUnaryRequestDuration, nil
}
- return err
-}
-func (cfg config) isProxy() bool { return cfg.cf.proxy.String() != proxyFlagOff }
-func (cfg config) isReadonlyProxy() bool { return cfg.cf.proxy.String() == proxyFlagReadonly }
-func (cfg config) shouldFallbackToProxy() bool { return cfg.cf.fallback.String() == fallbackFlagProxy }
+ return embed.DefaultWarningUnaryRequestDuration, nil
+}
diff --git a/server/etcdmain/config_test.go b/server/etcdmain/config_test.go
index 0dd4db97ec2..b834f3d33ea 100644
--- a/server/etcdmain/config_test.go
+++ b/server/etcdmain/config_test.go
@@ -15,16 +15,22 @@
package etcdmain
import (
+ "errors"
+ "flag"
"fmt"
- "io/ioutil"
"net/url"
"os"
"reflect"
"strings"
"testing"
+ "time"
- "go.etcd.io/etcd/server/v3/embed"
"sigs.k8s.io/yaml"
+
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+ "go.etcd.io/etcd/pkg/v3/flags"
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/server/v3/features"
)
func TestConfigParsingMemberFlags(t *testing.T) {
@@ -34,8 +40,10 @@ func TestConfigParsingMemberFlags(t *testing.T) {
"-max-wals=10",
"-max-snapshots=10",
"-snapshot-count=10",
+ "-experimental-snapshot-catchup-entries=1000",
"-listen-peer-urls=http://localhost:8000,https://localhost:8001",
"-listen-client-urls=http://localhost:7000,https://localhost:7001",
+ "-listen-client-http-urls=http://localhost:7002,https://localhost:7003",
// it should be set if -listen-client-urls is set
"-advertise-client-urls=http://localhost:7000,https://localhost:7001",
}
@@ -51,22 +59,26 @@ func TestConfigParsingMemberFlags(t *testing.T) {
func TestConfigFileMemberFields(t *testing.T) {
yc := struct {
- Dir string `json:"data-dir"`
- MaxSnapFiles uint `json:"max-snapshots"`
- MaxWalFiles uint `json:"max-wals"`
- Name string `json:"name"`
- SnapshotCount uint64 `json:"snapshot-count"`
- LPUrls string `json:"listen-peer-urls"`
- LCUrls string `json:"listen-client-urls"`
- AcurlsCfgFile string `json:"advertise-client-urls"`
+ Dir string `json:"data-dir"`
+ MaxSnapFiles uint `json:"max-snapshots"`
+ MaxWALFiles uint `json:"max-wals"`
+ Name string `json:"name"`
+ SnapshotCount uint64 `json:"snapshot-count"`
+ SnapshotCatchUpEntries uint64 `json:"experimental-snapshot-catch-up-entries"`
+ ListenPeerURLs string `json:"listen-peer-urls"`
+ ListenClientURLs string `json:"listen-client-urls"`
+ ListenClientHTTPURLs string `json:"listen-client-http-urls"`
+ AdvertiseClientURLs string `json:"advertise-client-urls"`
}{
"testdir",
10,
10,
"testname",
10,
+ 1000,
"http://localhost:8000,https://localhost:8001",
"http://localhost:7000,https://localhost:7001",
+ "http://localhost:7002,https://localhost:7003",
"http://localhost:7000,https://localhost:7001",
}
@@ -95,7 +107,6 @@ func TestConfigParsingClusteringFlags(t *testing.T) {
"-initial-cluster-token=etcdtest",
"-initial-advertise-peer-urls=http://localhost:8000,https://localhost:8001",
"-advertise-client-urls=http://localhost:7000,https://localhost:7001",
- "-discovery-fallback=exit",
}
cfg := newConfig()
@@ -111,16 +122,14 @@ func TestConfigFileClusteringFields(t *testing.T) {
InitialCluster string `json:"initial-cluster"`
ClusterState string `json:"initial-cluster-state"`
InitialClusterToken string `json:"initial-cluster-token"`
- Apurls string `json:"initial-advertise-peer-urls"`
- Acurls string `json:"advertise-client-urls"`
- Fallback string `json:"discovery-fallback"`
+ AdvertisePeerUrls string `json:"initial-advertise-peer-urls"`
+ AdvertiseClientUrls string `json:"advertise-client-urls"`
}{
"0=http://localhost:8000",
"existing",
"etcdtest",
"http://localhost:8000,https://localhost:8001",
"http://localhost:7000,https://localhost:7001",
- "exit",
}
b, err := yaml.Marshal(&yc)
@@ -194,44 +203,6 @@ func TestConfigFileClusteringFlags(t *testing.T) {
}
}
-func TestConfigParsingOtherFlags(t *testing.T) {
- args := []string{"-proxy=readonly"}
-
- cfg := newConfig()
- err := cfg.parse(args)
- if err != nil {
- t.Fatal(err)
- }
-
- validateOtherFlags(t, cfg)
-}
-
-func TestConfigFileOtherFields(t *testing.T) {
- yc := struct {
- ProxyCfgFile string `json:"proxy"`
- }{
- "readonly",
- }
-
- b, err := yaml.Marshal(&yc)
- if err != nil {
- t.Fatal(err)
- }
-
- tmpfile := mustCreateCfgFile(t, b)
- defer os.Remove(tmpfile.Name())
-
- args := []string{fmt.Sprintf("--config-file=%s", tmpfile.Name())}
-
- cfg := newConfig()
- err = cfg.parse(args)
- if err != nil {
- t.Fatal(err)
- }
-
- validateOtherFlags(t, cfg)
-}
-
func TestConfigParsingConflictClusteringFlags(t *testing.T) {
conflictArgs := [][]string{
{
@@ -255,7 +226,7 @@ func TestConfigParsingConflictClusteringFlags(t *testing.T) {
for i, tt := range conflictArgs {
cfg := newConfig()
- if err := cfg.parse(tt); err != embed.ErrConflictBootstrapFlags {
+ if err := cfg.parse(tt); !errors.Is(err, embed.ErrConflictBootstrapFlags) {
t.Errorf("%d: err = %v, want %v", i, err, embed.ErrConflictBootstrapFlags)
}
}
@@ -298,7 +269,7 @@ func TestConfigFileConflictClusteringFlags(t *testing.T) {
args := []string{fmt.Sprintf("--config-file=%s", tmpfile.Name())}
cfg := newConfig()
- if err := cfg.parse(args); err != embed.ErrConflictBootstrapFlags {
+ if err := cfg.parse(args); !errors.Is(err, embed.ErrConflictBootstrapFlags) {
t.Errorf("%d: err = %v, want %v", i, err, embed.ErrConflictBootstrapFlags)
}
}
@@ -337,32 +308,11 @@ func TestConfigParsingMissedAdvertiseClientURLsFlag(t *testing.T) {
},
embed.ErrUnsetAdvertiseClientURLsFlag,
},
- {
- []string{
- "-discovery=http://example.com/abc",
- "-listen-client-urls=http://127.0.0.1:2379",
- },
- nil,
- },
- {
- []string{
- "-proxy=on",
- "-listen-client-urls=http://127.0.0.1:2379",
- },
- nil,
- },
- {
- []string{
- "-proxy=readonly",
- "-listen-client-urls=http://127.0.0.1:2379",
- },
- nil,
- },
}
for i, tt := range tests {
cfg := newConfig()
- if err := cfg.parse(tt.args); err != tt.werr {
+ if err := cfg.parse(tt.args); !errors.Is(err, tt.werr) {
t.Errorf("%d: err = %v, want %v", i, err, tt.werr)
}
}
@@ -388,65 +338,6 @@ func TestConfigIsNewCluster(t *testing.T) {
}
}
-func TestConfigIsProxy(t *testing.T) {
- tests := []struct {
- proxy string
- wIsProxy bool
- }{
- {proxyFlagOff, false},
- {proxyFlagReadonly, true},
- {proxyFlagOn, true},
- }
- for i, tt := range tests {
- cfg := newConfig()
- if err := cfg.cf.proxy.Set(tt.proxy); err != nil {
- t.Fatalf("#%d: unexpected proxy.Set error: %v", i, err)
- }
- if g := cfg.isProxy(); g != tt.wIsProxy {
- t.Errorf("#%d: isProxy = %v, want %v", i, g, tt.wIsProxy)
- }
- }
-}
-
-func TestConfigIsReadonlyProxy(t *testing.T) {
- tests := []struct {
- proxy string
- wIsReadonly bool
- }{
- {proxyFlagOff, false},
- {proxyFlagReadonly, true},
- {proxyFlagOn, false},
- }
- for i, tt := range tests {
- cfg := newConfig()
- if err := cfg.cf.proxy.Set(tt.proxy); err != nil {
- t.Fatalf("#%d: unexpected proxy.Set error: %v", i, err)
- }
- if g := cfg.isReadonlyProxy(); g != tt.wIsReadonly {
- t.Errorf("#%d: isReadonlyProxy = %v, want %v", i, g, tt.wIsReadonly)
- }
- }
-}
-
-func TestConfigShouldFallbackToProxy(t *testing.T) {
- tests := []struct {
- fallback string
- wFallback bool
- }{
- {fallbackFlagProxy, true},
- {fallbackFlagExit, false},
- }
- for i, tt := range tests {
- cfg := newConfig()
- if err := cfg.cf.fallback.Set(tt.fallback); err != nil {
- t.Fatalf("#%d: unexpected fallback.Set error: %v", i, err)
- }
- if g := cfg.shouldFallbackToProxy(); g != tt.wFallback {
- t.Errorf("#%d: shouldFallbackToProxy = %v, want %v", i, g, tt.wFallback)
- }
- }
-}
-
func TestConfigFileElectionTimeout(t *testing.T) {
tests := []struct {
TickMs uint `json:"heartbeat-interval"`
@@ -493,8 +384,193 @@ func TestConfigFileElectionTimeout(t *testing.T) {
}
}
+func TestFlagsPresentInHelp(t *testing.T) {
+ cfg := newConfig()
+ cfg.cf.flagSet.VisitAll(func(f *flag.Flag) {
+ if _, ok := f.Value.(*flags.IgnoredFlag); ok {
+ // Ignored flags do not need to be in the help
+ return
+ }
+
+ flagText := fmt.Sprintf("--%s", f.Name)
+ if !strings.Contains(flagsline, flagText) && !strings.Contains(usageline, flagText) {
+ t.Errorf("Neither flagsline nor usageline in help.go contains flag named %s", flagText)
+ }
+ })
+}
+
+func TestParseFeatureGateFlags(t *testing.T) {
+ testCases := []struct {
+ name string
+ args []string
+ expectErr bool
+ expectedFeatures map[featuregate.Feature]bool
+ }{
+ {
+ name: "default",
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: false,
+ features.DistributedTracing: false,
+ },
+ },
+ {
+ name: "cannot set both experimental flag and feature gate flag",
+ args: []string{
+ "--experimental-stop-grpc-service-on-defrag=false",
+ "--feature-gates=StopGRPCServiceOnDefrag=true",
+ },
+ expectErr: true,
+ },
+ {
+ name: "ok to set different experimental flag and feature gate flag",
+ args: []string{
+ "--experimental-stop-grpc-service-on-defrag=true",
+ "--feature-gates=DistributedTracing=true",
+ },
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ features.DistributedTracing: true,
+ },
+ },
+ {
+ name: "can set feature gate from experimental flag",
+ args: []string{
+ "--experimental-stop-grpc-service-on-defrag=true",
+ },
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ features.DistributedTracing: false,
+ },
+ },
+ {
+ name: "can set feature gate from feature gate flag",
+ args: []string{
+ "--feature-gates=StopGRPCServiceOnDefrag=true,DistributedTracing=true",
+ },
+ expectedFeatures: map[featuregate.Feature]bool{
+ features.StopGRPCServiceOnDefrag: true,
+ features.DistributedTracing: true,
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ cfg := newConfig()
+ err := cfg.parse(tc.args)
+ if tc.expectErr {
+ if err == nil {
+ t.Fatal("expect parse error")
+ }
+ return
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ for k, v := range tc.expectedFeatures {
+ if cfg.ec.ServerFeatureGate.Enabled(k) != v {
+ t.Errorf("expected feature gate %s=%v, got %v", k, v, cfg.ec.ServerFeatureGate.Enabled(k))
+ }
+ }
+ })
+ }
+}
+
+// TestCompactHashCheckTimeFlagMigration tests the migration from
+// --experimental-compact-hash-check-time to --compact-hash-check-time
+// TODO: delete in v3.7
+func TestCompactHashCheckTimeFlagMigration(t *testing.T) {
+ testCases := []struct {
+ name string
+ compactHashCheckTime string
+ experimentalCompactHashCheckTime string
+ useConfigFile bool
+ expectErr bool
+ expectedCompactHashCheckTime time.Duration
+ }{
+ {
+ name: "default",
+ expectedCompactHashCheckTime: time.Minute,
+ },
+ {
+ name: "cannot set both experimental flag and non experimental flag",
+ compactHashCheckTime: "2m",
+ experimentalCompactHashCheckTime: "3m",
+ expectErr: true,
+ },
+ {
+ name: "can set experimental flag",
+ experimentalCompactHashCheckTime: "3m",
+ expectedCompactHashCheckTime: 3 * time.Minute,
+ },
+ {
+ name: "can set non experimental flag",
+ compactHashCheckTime: "2m",
+ expectedCompactHashCheckTime: 2 * time.Minute,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ cmdLineArgs := []string{}
+ yc := struct {
+ ExperimentalCompactHashCheckTime time.Duration `json:"experimental-compact-hash-check-time,omitempty"`
+ CompactHashCheckTime time.Duration `json:"compact-hash-check-time,omitempty"`
+ }{}
+
+ if tc.compactHashCheckTime != "" {
+ cmdLineArgs = append(cmdLineArgs, fmt.Sprintf("--compact-hash-check-time=%s", tc.compactHashCheckTime))
+ compactHashCheckTime, err := time.ParseDuration(tc.compactHashCheckTime)
+ if err != nil {
+ t.Fatal(err)
+ }
+ yc.CompactHashCheckTime = compactHashCheckTime
+ }
+
+ if tc.experimentalCompactHashCheckTime != "" {
+ cmdLineArgs = append(cmdLineArgs, fmt.Sprintf("--experimental-compact-hash-check-time=%s", tc.experimentalCompactHashCheckTime))
+ experimentalCompactHashCheckTime, err := time.ParseDuration(tc.experimentalCompactHashCheckTime)
+ if err != nil {
+ t.Fatal(err)
+ }
+ yc.ExperimentalCompactHashCheckTime = experimentalCompactHashCheckTime
+ }
+
+ b, err := yaml.Marshal(&yc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tmpfile := mustCreateCfgFile(t, b)
+ defer os.Remove(tmpfile.Name())
+
+ cfgFromCmdLine := newConfig()
+ errFromCmdLine := cfgFromCmdLine.parse(cmdLineArgs)
+
+ cfgFromFile := newConfig()
+ errFromFile := cfgFromFile.parse([]string{fmt.Sprintf("--config-file=%s", tmpfile.Name())})
+
+ if tc.expectErr {
+ if errFromCmdLine == nil || errFromFile == nil {
+ t.Fatal("expect parse error")
+ }
+ return
+ }
+ if errFromCmdLine != nil || errFromFile != nil {
+ t.Fatal(err)
+ }
+
+ if cfgFromCmdLine.ec.CompactHashCheckTime != tc.expectedCompactHashCheckTime {
+ t.Errorf("expected CompactHashCheckTime=%v, got %v", tc.expectedCompactHashCheckTime, cfgFromCmdLine.ec.CompactHashCheckTime)
+ }
+ if cfgFromFile.ec.CompactHashCheckTime != tc.expectedCompactHashCheckTime {
+ t.Errorf("expected CompactHashCheckTime=%v, got %v", tc.expectedCompactHashCheckTime, cfgFromFile.ec.CompactHashCheckTime)
+ }
+ })
+ }
+}
+
func mustCreateCfgFile(t *testing.T, b []byte) *os.File {
- tmpfile, err := ioutil.TempFile("", "servercfg")
+ tmpfile, err := os.CreateTemp("", "servercfg")
if err != nil {
t.Fatal(err)
}
@@ -513,13 +589,15 @@ func mustCreateCfgFile(t *testing.T, b []byte) *os.File {
func validateMemberFlags(t *testing.T, cfg *config) {
wcfg := &embed.Config{
- Dir: "testdir",
- LPUrls: []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}},
- LCUrls: []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}},
- MaxSnapFiles: 10,
- MaxWalFiles: 10,
- Name: "testname",
- SnapshotCount: 10,
+ Dir: "testdir",
+ ListenPeerUrls: []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}},
+ ListenClientUrls: []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}},
+ ListenClientHttpUrls: []url.URL{{Scheme: "http", Host: "localhost:7002"}, {Scheme: "https", Host: "localhost:7003"}},
+ MaxSnapFiles: 10,
+ MaxWalFiles: 10,
+ Name: "testname",
+ SnapshotCount: 10,
+ SnapshotCatchUpEntries: 1000,
}
if cfg.ec.Dir != wcfg.Dir {
@@ -537,47 +615,41 @@ func validateMemberFlags(t *testing.T, cfg *config) {
if cfg.ec.SnapshotCount != wcfg.SnapshotCount {
t.Errorf("snapcount = %v, want %v", cfg.ec.SnapshotCount, wcfg.SnapshotCount)
}
- if !reflect.DeepEqual(cfg.ec.LPUrls, wcfg.LPUrls) {
- t.Errorf("listen-peer-urls = %v, want %v", cfg.ec.LPUrls, wcfg.LPUrls)
+ if cfg.ec.SnapshotCatchUpEntries != wcfg.SnapshotCatchUpEntries {
+ t.Errorf("snapshot catch up entries = %v, want %v", cfg.ec.SnapshotCatchUpEntries, wcfg.SnapshotCatchUpEntries)
+ }
+ if !reflect.DeepEqual(cfg.ec.ListenPeerUrls, wcfg.ListenPeerUrls) {
+ t.Errorf("listen-peer-urls = %v, want %v", cfg.ec.ListenPeerUrls, wcfg.ListenPeerUrls)
}
- if !reflect.DeepEqual(cfg.ec.LCUrls, wcfg.LCUrls) {
- t.Errorf("listen-client-urls = %v, want %v", cfg.ec.LCUrls, wcfg.LCUrls)
+ if !reflect.DeepEqual(cfg.ec.ListenClientUrls, wcfg.ListenClientUrls) {
+ t.Errorf("listen-client-urls = %v, want %v", cfg.ec.ListenClientUrls, wcfg.ListenClientUrls)
+ }
+ if !reflect.DeepEqual(cfg.ec.ListenClientHttpUrls, wcfg.ListenClientHttpUrls) {
+ t.Errorf("listen-client-http-urls = %v, want %v", cfg.ec.ListenClientHttpUrls, wcfg.ListenClientHttpUrls)
}
}
func validateClusteringFlags(t *testing.T, cfg *config) {
wcfg := newConfig()
- wcfg.ec.APUrls = []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}}
- wcfg.ec.ACUrls = []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}}
+ wcfg.ec.AdvertisePeerUrls = []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}}
+ wcfg.ec.AdvertiseClientUrls = []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}}
wcfg.ec.ClusterState = embed.ClusterStateFlagExisting
- wcfg.cf.fallback.Set(fallbackFlagExit)
wcfg.ec.InitialCluster = "0=http://localhost:8000"
wcfg.ec.InitialClusterToken = "etcdtest"
if cfg.ec.ClusterState != wcfg.ec.ClusterState {
t.Errorf("clusterState = %v, want %v", cfg.ec.ClusterState, wcfg.ec.ClusterState)
}
- if cfg.cf.fallback.String() != wcfg.cf.fallback.String() {
- t.Errorf("fallback = %v, want %v", cfg.cf.fallback, wcfg.cf.fallback)
- }
if cfg.ec.InitialCluster != wcfg.ec.InitialCluster {
t.Errorf("initialCluster = %v, want %v", cfg.ec.InitialCluster, wcfg.ec.InitialCluster)
}
if cfg.ec.InitialClusterToken != wcfg.ec.InitialClusterToken {
t.Errorf("initialClusterToken = %v, want %v", cfg.ec.InitialClusterToken, wcfg.ec.InitialClusterToken)
}
- if !reflect.DeepEqual(cfg.ec.APUrls, wcfg.ec.APUrls) {
- t.Errorf("initial-advertise-peer-urls = %v, want %v", cfg.ec.APUrls, wcfg.ec.APUrls)
+ if !reflect.DeepEqual(cfg.ec.AdvertisePeerUrls, wcfg.ec.AdvertisePeerUrls) {
+ t.Errorf("initial-advertise-peer-urls = %v, want %v", cfg.ec.AdvertisePeerUrls, wcfg.ec.AdvertisePeerUrls)
}
- if !reflect.DeepEqual(cfg.ec.ACUrls, wcfg.ec.ACUrls) {
- t.Errorf("advertise-client-urls = %v, want %v", cfg.ec.ACUrls, wcfg.ec.ACUrls)
- }
-}
-
-func validateOtherFlags(t *testing.T, cfg *config) {
- wcfg := newConfig()
- wcfg.cf.proxy.Set(proxyFlagReadonly)
- if cfg.cf.proxy.String() != wcfg.cf.proxy.String() {
- t.Errorf("proxy = %v, want %v", cfg.cf.proxy, wcfg.cf.proxy)
+ if !reflect.DeepEqual(cfg.ec.AdvertiseClientUrls, wcfg.ec.AdvertiseClientUrls) {
+ t.Errorf("advertise-client-urls = %v, want %v", cfg.ec.AdvertiseClientUrls, wcfg.ec.AdvertiseClientUrls)
}
}
diff --git a/server/etcdmain/etcd.go b/server/etcdmain/etcd.go
index eb2585699dc..16bba3736fd 100644
--- a/server/etcdmain/etcd.go
+++ b/server/etcdmain/etcd.go
@@ -15,30 +15,22 @@
package etcdmain
import (
- "encoding/json"
+ errorspkg "errors"
"fmt"
- "io/ioutil"
- "net/http"
"os"
- "path/filepath"
- "reflect"
"runtime"
"strings"
- "time"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
"go.etcd.io/etcd/client/pkg/v3/types"
- pkgioutil "go.etcd.io/etcd/pkg/v3/ioutil"
"go.etcd.io/etcd/pkg/v3/osutil"
"go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
- "go.etcd.io/etcd/server/v3/proxy/httpproxy"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
)
type dirType string
@@ -63,7 +55,7 @@ func startEtcdOrProxyV2(args []string) {
if lg == nil {
var zapError error
// use this logger
- lg, zapError = zap.NewProduction()
+ lg, zapError = logutil.CreateDefaultZapLogger(zap.InfoLevel)
if zapError != nil {
fmt.Printf("error creating zap logger %v", zapError)
os.Exit(1)
@@ -72,8 +64,8 @@ func startEtcdOrProxyV2(args []string) {
lg.Info("Running: ", zap.Strings("args", args))
if err != nil {
lg.Warn("failed to verify flags", zap.Error(err))
- switch err {
- case embed.ErrUnsetAdvertiseClientURLsFlag:
+ switch {
+ case errorspkg.Is(err, embed.ErrUnsetAdvertiseClientURLsFlag):
lg.Warn("advertise client URLs are not set", zap.Error(err))
}
os.Exit(1)
@@ -113,7 +105,7 @@ func startEtcdOrProxyV2(args []string) {
which := identifyDataDirOrDie(cfg.ec.GetLogger(), cfg.ec.Dir)
if which != dirEmpty {
lg.Info(
- "server has been already initialized",
+ "server has already been initialized",
zap.String("data-dir", cfg.ec.Dir),
zap.String("dir-type", string(which)),
)
@@ -121,7 +113,7 @@ func startEtcdOrProxyV2(args []string) {
case dirMember:
stopped, errc, err = startEtcd(&cfg.ec)
case dirProxy:
- err = startProxy(cfg)
+ lg.Panic("v2 http proxy has already been deprecated in 3.6", zap.String("dir-type", string(which)))
default:
lg.Panic(
"unknown directory type",
@@ -129,31 +121,19 @@ func startEtcdOrProxyV2(args []string) {
)
}
} else {
- shouldProxy := cfg.isProxy()
- if !shouldProxy {
- stopped, errc, err = startEtcd(&cfg.ec)
- if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == v2discovery.ErrFullCluster {
- if cfg.shouldFallbackToProxy() {
- lg.Warn(
- "discovery cluster is full, falling back to proxy",
- zap.String("fallback-proxy", fallbackFlagProxy),
- zap.Error(err),
- )
- shouldProxy = true
- }
- } else if err != nil {
- lg.Warn("failed to start etcd", zap.Error(err))
- }
- }
- if shouldProxy {
- err = startProxy(cfg)
- }
+ lg.Info(
+ "Initialize and start etcd server",
+ zap.String("data-dir", cfg.ec.Dir),
+ zap.String("dir-type", string(which)),
+ )
+ stopped, errc, err = startEtcd(&cfg.ec)
}
if err != nil {
- if derr, ok := err.(*etcdserver.DiscoveryError); ok {
- switch derr.Err {
- case v2discovery.ErrDuplicateID:
+ var derr *errors.DiscoveryError
+ if errorspkg.As(err, &derr) {
+ switch {
+ case errorspkg.Is(derr.Err, v2discovery.ErrDuplicateID):
lg.Warn(
"member has been registered with discovery service",
zap.String("name", cfg.ec.Name),
@@ -167,7 +147,7 @@ func startEtcdOrProxyV2(args []string) {
lg.Warn("check data dir if previous bootstrap succeeded")
lg.Warn("or use a new discovery token if previous bootstrap failed")
- case v2discovery.ErrDuplicateName:
+ case errorspkg.Is(derr.Err, v2discovery.ErrDuplicateName):
lg.Warn(
"member with duplicated name has already been registered",
zap.String("discovery-token", cfg.ec.Durl),
@@ -192,11 +172,11 @@ func startEtcdOrProxyV2(args []string) {
if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) {
lg.Warn("forgot to set --initial-cluster?")
}
- if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
+ if types.URLs(cfg.ec.AdvertisePeerUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
lg.Warn("forgot to set --initial-advertise-peer-urls?")
}
- if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 {
- lg.Warn("--discovery flag is not set")
+ if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 && len(cfg.ec.DiscoveryCfg.Endpoints) == 0 {
+ lg.Warn("V2 discovery settings (i.e., --discovery) or v3 discovery settings (i.e., --discovery-token, --discovery-endpoints) are not set")
}
os.Exit(1)
}
@@ -236,193 +216,6 @@ func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
return e.Server.StopNotify(), e.Err(), nil
}
-// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
-func startProxy(cfg *config) error {
- lg := cfg.ec.GetLogger()
- lg.Info("v2 API proxy starting")
-
- clientTLSInfo := cfg.ec.ClientTLSInfo
- if clientTLSInfo.Empty() {
- // Support old proxy behavior of defaulting to PeerTLSInfo
- // for both client and peer connections.
- clientTLSInfo = cfg.ec.PeerTLSInfo
- }
- clientTLSInfo.InsecureSkipVerify = cfg.ec.ClientAutoTLS
- cfg.ec.PeerTLSInfo.InsecureSkipVerify = cfg.ec.PeerAutoTLS
-
- pt, err := transport.NewTimeoutTransport(
- clientTLSInfo,
- time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond,
- time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond,
- time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond,
- )
- if err != nil {
- return err
- }
- pt.MaxIdleConnsPerHost = httpproxy.DefaultMaxIdleConnsPerHost
-
- if err = cfg.ec.PeerSelfCert(); err != nil {
- lg.Fatal("failed to get self-signed certs for peer", zap.Error(err))
- }
- tr, err := transport.NewTimeoutTransport(
- cfg.ec.PeerTLSInfo,
- time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond,
- time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond,
- time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond,
- )
- if err != nil {
- return err
- }
-
- cfg.ec.Dir = filepath.Join(cfg.ec.Dir, "proxy")
- err = fileutil.TouchDirAll(cfg.ec.Dir)
- if err != nil {
- return err
- }
-
- var peerURLs []string
- clusterfile := filepath.Join(cfg.ec.Dir, "cluster")
-
- b, err := ioutil.ReadFile(clusterfile)
- switch {
- case err == nil:
- if cfg.ec.Durl != "" {
- lg.Warn(
- "discovery token ignored since the proxy has already been initialized; valid cluster file found",
- zap.String("cluster-file", clusterfile),
- )
- }
- if cfg.ec.DNSCluster != "" {
- lg.Warn(
- "DNS SRV discovery ignored since the proxy has already been initialized; valid cluster file found",
- zap.String("cluster-file", clusterfile),
- )
- }
- urls := struct{ PeerURLs []string }{}
- err = json.Unmarshal(b, &urls)
- if err != nil {
- return err
- }
- peerURLs = urls.PeerURLs
- lg.Info(
- "proxy using peer URLS from cluster file",
- zap.Strings("peer-urls", peerURLs),
- zap.String("cluster-file", clusterfile),
- )
-
- case os.IsNotExist(err):
- var urlsmap types.URLsMap
- urlsmap, _, err = cfg.ec.PeerURLsMapAndToken("proxy")
- if err != nil {
- return fmt.Errorf("error setting up initial cluster: %v", err)
- }
-
- if cfg.ec.Durl != "" {
- var s string
- s, err = v2discovery.GetCluster(lg, cfg.ec.Durl, cfg.ec.Dproxy)
- if err != nil {
- return err
- }
- if urlsmap, err = types.NewURLsMap(s); err != nil {
- return err
- }
- }
- peerURLs = urlsmap.URLs()
- lg.Info("proxy using peer URLS", zap.Strings("peer-urls", peerURLs))
-
- default:
- return err
- }
-
- clientURLs := []string{}
- uf := func() []string {
- gcls, gerr := etcdserver.GetClusterFromRemotePeers(lg, peerURLs, tr)
- if gerr != nil {
- lg.Warn(
- "failed to get cluster from remote peers",
- zap.Strings("peer-urls", peerURLs),
- zap.Error(gerr),
- )
- return []string{}
- }
-
- clientURLs = gcls.ClientURLs()
- urls := struct{ PeerURLs []string }{gcls.PeerURLs()}
- b, jerr := json.Marshal(urls)
- if jerr != nil {
- lg.Warn("proxy failed to marshal peer URLs", zap.Error(jerr))
- return clientURLs
- }
-
- err = pkgioutil.WriteAndSyncFile(clusterfile+".bak", b, 0600)
- if err != nil {
- lg.Warn("proxy failed to write cluster file", zap.Error(err))
- return clientURLs
- }
- err = os.Rename(clusterfile+".bak", clusterfile)
- if err != nil {
- lg.Warn(
- "proxy failed to rename cluster file",
- zap.String("path", clusterfile),
- zap.Error(err),
- )
- return clientURLs
- }
- if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) {
- lg.Info(
- "proxy updated peer URLs",
- zap.Strings("from", peerURLs),
- zap.Strings("to", gcls.PeerURLs()),
- )
- }
- peerURLs = gcls.PeerURLs()
-
- return clientURLs
- }
- ph := httpproxy.NewHandler(lg, pt, uf, time.Duration(cfg.cp.ProxyFailureWaitMs)*time.Millisecond, time.Duration(cfg.cp.ProxyRefreshIntervalMs)*time.Millisecond)
- ph = embed.WrapCORS(cfg.ec.CORS, ph)
-
- if cfg.isReadonlyProxy() {
- ph = httpproxy.NewReadonlyHandler(ph)
- }
-
- // setup self signed certs when serving https
- cHosts, cTLS := []string{}, false
- for _, u := range cfg.ec.LCUrls {
- cHosts = append(cHosts, u.Host)
- cTLS = cTLS || u.Scheme == "https"
- }
- for _, u := range cfg.ec.ACUrls {
- cHosts = append(cHosts, u.Host)
- cTLS = cTLS || u.Scheme == "https"
- }
- listenerTLS := cfg.ec.ClientTLSInfo
- if cfg.ec.ClientAutoTLS && cTLS {
- listenerTLS, err = transport.SelfCert(cfg.ec.GetLogger(), filepath.Join(cfg.ec.Dir, "clientCerts"), cHosts, cfg.ec.SelfSignedCertValidity)
- if err != nil {
- lg.Fatal("failed to initialize self-signed client cert", zap.Error(err))
- }
- }
-
- // Start a proxy server goroutine for each listen address
- for _, u := range cfg.ec.LCUrls {
- l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS)
- if err != nil {
- return err
- }
-
- host := u.String()
- go func() {
- lg.Info("v2 proxy started listening on client requests", zap.String("host", host))
- mux := http.NewServeMux()
- etcdhttp.HandlePrometheus(mux) // v2 proxy just uses the same port
- mux.Handle("/", ph)
- lg.Fatal("done serving", zap.Error(http.Serve(l, mux)))
- }()
- }
- return nil
-}
-
// identifyDataDirOrDie returns the type of the data dir.
// Dies if the datadir is invalid.
func identifyDataDirOrDie(lg *zap.Logger, dir string) dirType {
@@ -463,21 +256,24 @@ func identifyDataDirOrDie(lg *zap.Logger, dir string) dirType {
}
func checkSupportArch() {
- // to add a new platform, check https://github.com/etcd-io/website/blob/main/content/en/docs/next/op-guide/supported-platform.md
- if runtime.GOARCH == "amd64" ||
- runtime.GOARCH == "arm64" ||
- runtime.GOARCH == "ppc64le" ||
- runtime.GOARCH == "s390x" {
+ lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+ if err != nil {
+ panic(err)
+ }
+ // To add a new platform, check https://github.com/etcd-io/website/blob/main/content/en/docs/${VERSION}/op-guide/supported-platform.md.
+ // The ${VERSION} is the etcd version, e.g. v3.5, v3.6 etc.
+ switch runtime.GOARCH {
+ case "amd64", "arm64", "ppc64le", "s390x":
return
}
// unsupported arch only configured via environment variable
// so unset here to not parse through flag
defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH")
if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH {
- fmt.Printf("running etcd on unsupported architecture %q since ETCD_UNSUPPORTED_ARCH is set\n", env)
+ lg.Info("running etcd on unsupported architecture since ETCD_UNSUPPORTED_ARCH is set", zap.String("arch", env))
return
}
- fmt.Printf("etcd on unsupported platform without ETCD_UNSUPPORTED_ARCH=%s set\n", runtime.GOARCH)
+ lg.Error("Refusing to run etcd on unsupported architecture since ETCD_UNSUPPORTED_ARCH is not set", zap.String("arch", runtime.GOARCH))
os.Exit(1)
}
diff --git a/server/etcdmain/gateway.go b/server/etcdmain/gateway.go
index 3e4d0620c66..cda7fd7ee3e 100644
--- a/server/etcdmain/gateway.go
+++ b/server/etcdmain/gateway.go
@@ -21,10 +21,11 @@ import (
"os"
"time"
- "go.etcd.io/etcd/server/v3/proxy/tcpproxy"
-
"github.com/spf13/cobra"
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/server/v3/proxy/tcpproxy"
)
var (
@@ -37,13 +38,11 @@ var (
gatewayCA string
)
-var (
- rootCmd = &cobra.Command{
- Use: "etcd",
- Short: "etcd server",
- SuggestFor: []string{"etcd"},
- }
-)
+var rootCmd = &cobra.Command{
+ Use: "etcd",
+ Short: "etcd server",
+ SuggestFor: []string{"etcd"},
+}
func init() {
rootCmd.AddCommand(newGatewayCommand())
@@ -92,8 +91,7 @@ func stripSchema(eps []string) []string {
}
func startGateway(cmd *cobra.Command, args []string) {
- var lg *zap.Logger
- lg, err := zap.NewProduction()
+ lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
diff --git a/server/etcdmain/grpc_proxy.go b/server/etcdmain/grpc_proxy.go
index e251e1825a8..a0df3f99ae4 100644
--- a/server/etcdmain/grpc_proxy.go
+++ b/server/etcdmain/grpc_proxy.go
@@ -19,7 +19,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
- "io/ioutil"
+ "io"
"log"
"math"
"net"
@@ -29,8 +29,21 @@ import (
"path/filepath"
"time"
+ grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
+ grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "github.com/soheilhy/cmux"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapgrpc"
+ "golang.org/x/net/http2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/keepalive"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/tlsutil"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/leasing"
@@ -41,26 +54,22 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
-
- grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
- "github.com/soheilhy/cmux"
- "github.com/spf13/cobra"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/keepalive"
)
var (
- grpcProxyListenAddr string
- grpcProxyMetricsListenAddr string
- grpcProxyEndpoints []string
- grpcProxyDNSCluster string
- grpcProxyDNSClusterServiceName string
- grpcProxyInsecureDiscovery bool
- grpcProxyDataDir string
- grpcMaxCallSendMsgSize int
- grpcMaxCallRecvMsgSize int
+ grpcProxyListenAddr string
+ grpcProxyMetricsListenAddr string
+ grpcProxyEndpoints []string
+ grpcProxyEndpointsAutoSyncInterval time.Duration
+ grpcProxyDialKeepAliveTime time.Duration
+ grpcProxyDialKeepAliveTimeout time.Duration
+ grpcProxyPermitWithoutStream bool
+ grpcProxyDNSCluster string
+ grpcProxyDNSClusterServiceName string
+ grpcProxyInsecureDiscovery bool
+ grpcProxyDataDir string
+ grpcMaxCallSendMsgSize int
+ grpcMaxCallRecvMsgSize int
// tls for connecting to etcd
@@ -71,11 +80,15 @@ var (
// tls for clients connecting to proxy
- grpcProxyListenCA string
- grpcProxyListenCert string
- grpcProxyListenKey string
- grpcProxyListenAutoTLS bool
- grpcProxyListenCRL string
+ grpcProxyListenCA string
+ grpcProxyListenCert string
+ grpcProxyListenKey string
+ grpcProxyListenCipherSuites []string
+ grpcProxyListenAutoTLS bool
+ grpcProxyListenCRL string
+ grpcProxyListenTLSMinVersion string
+ grpcProxyListenTLSMaxVersion string
+
selfSignedCertValidity uint
grpcProxyAdvertiseClientURL string
@@ -87,6 +100,7 @@ var (
grpcProxyEnablePprof bool
grpcProxyEnableOrdering bool
+ grpcProxyEnableLogging bool
grpcProxyDebug bool
@@ -94,6 +108,8 @@ var (
grpcKeepAliveMinTime time.Duration
grpcKeepAliveTimeout time.Duration
grpcKeepAliveInterval time.Duration
+
+ maxConcurrentStreams uint32
)
const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024
@@ -126,6 +142,10 @@ func newGRPCProxyStartCommand() *cobra.Command {
cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for endpoint /metrics requests on an additional interface")
cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
+ cmd.Flags().DurationVar(&grpcProxyEndpointsAutoSyncInterval, "endpoints-auto-sync-interval", 0, "etcd endpoints auto sync interval (disabled by default)")
+ cmd.Flags().DurationVar(&grpcProxyDialKeepAliveTime, "dial-keepalive-time", 0, "keepalive time for client(grpc-proxy) connections (default 0, disable).")
+ cmd.Flags().DurationVar(&grpcProxyDialKeepAliveTimeout, "dial-keepalive-timeout", embed.DefaultGRPCKeepAliveTimeout, "keepalive timeout for client(grpc-proxy) connections (default 20s).")
+ cmd.Flags().BoolVar(&grpcProxyPermitWithoutStream, "permit-without-stream", false, "Enable client(grpc-proxy) to send keepalive pings even with no active RPCs.")
cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)")
cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)")
cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints")
@@ -148,59 +168,80 @@ func newGRPCProxyStartCommand() *cobra.Command {
cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file")
cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file")
cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle")
+ cmd.Flags().StringSliceVar(&grpcProxyListenCipherSuites, "listen-cipher-suites", grpcProxyListenCipherSuites, "Comma-separated list of supported TLS cipher suites between client/proxy (empty will be auto-populated by Go).")
cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates")
cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.")
cmd.Flags().UintVar(&selfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the proxy certificates, unit is year")
+ cmd.Flags().StringVar(&grpcProxyListenTLSMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by grpc proxy. Possible values: TLS1.2, TLS1.3.")
+ cmd.Flags().StringVar(&grpcProxyListenTLSMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by grpc proxy. Possible values: TLS1.2, TLS1.3 (empty defers to Go).")
// experimental flags
cmd.Flags().BoolVar(&grpcProxyEnableOrdering, "experimental-serializable-ordering", false, "Ensure serializable reads have monotonically increasing store revisions across endpoints.")
cmd.Flags().StringVar(&grpcProxyLeasing, "experimental-leasing-prefix", "", "leasing metadata prefix for disconnected linearized reads.")
+ cmd.Flags().BoolVar(&grpcProxyEnableLogging, "experimental-enable-grpc-logging", false, "logging all grpc requests and responses")
cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.")
+ cmd.Flags().Uint32Var(&maxConcurrentStreams, "max-concurrent-streams", math.MaxUint32, "Maximum concurrent streams that each client can open at a time.")
+
return &cmd
}
func startGRPCProxy(cmd *cobra.Command, args []string) {
checkArgs()
-
- lcfg := logutil.DefaultZapLoggerConfig
+ lvl := zap.InfoLevel
if grpcProxyDebug {
- lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+ lvl = zap.DebugLevel
grpc.EnableTracing = true
}
-
- lg, err := lcfg.Build()
+ lg, err := logutil.CreateDefaultZapLogger(lvl)
if err != nil {
- log.Fatal(err)
+ panic(err)
}
defer lg.Sync()
- var gl grpclog.LoggerV2
- gl, err = logutil.NewGRPCLoggerV2(lcfg)
- if err != nil {
- log.Fatal(err)
- }
- grpclog.SetLoggerV2(gl)
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
// The proxy itself (ListenCert) can have not-empty CN.
// The empty CN is required for grpcProxyCert.
// Please see https://github.com/etcd-io/etcd/issues/11970#issuecomment-687875315 for more context.
- tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey, false)
-
- if tlsinfo == nil && grpcProxyListenAutoTLS {
+ tlsInfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey, false)
+ if tlsInfo == nil && grpcProxyListenAutoTLS {
host := []string{"https://" + grpcProxyListenAddr}
dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy")
autoTLS, err := transport.SelfCert(lg, dir, host, selfSignedCertValidity)
if err != nil {
log.Fatal(err)
}
- tlsinfo = &autoTLS
+ tlsInfo = &autoTLS
}
- if tlsinfo != nil {
- lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsinfo)))
+ if tlsInfo != nil {
+ if len(grpcProxyListenCipherSuites) > 0 {
+ cs, err := tlsutil.GetCipherSuites(grpcProxyListenCipherSuites)
+ if err != nil {
+ log.Fatal(err)
+ }
+ tlsInfo.CipherSuites = cs
+ }
+ if grpcProxyListenTLSMinVersion != "" {
+ version, err := tlsutil.GetTLSVersion(grpcProxyListenTLSMinVersion)
+ if err != nil {
+ log.Fatal(err)
+ }
+ tlsInfo.MinVersion = version
+ }
+ if grpcProxyListenTLSMaxVersion != "" {
+ version, err := tlsutil.GetTLSVersion(grpcProxyListenTLSMaxVersion)
+ if err != nil {
+ log.Fatal(err)
+ }
+ tlsInfo.MaxVersion = version
+ }
+
+ lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsInfo)))
}
- m := mustListenCMux(lg, tlsinfo)
+
+ m := mustListenCMux(lg, tlsInfo)
grpcl := m.Match(cmux.HTTP2())
defer func() {
grpcl.Close()
@@ -213,17 +254,24 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
// TODO: The mechanism should be refactored to use internal connection.
var proxyClient *clientv3.Client
if grpcProxyAdvertiseClientURL != "" {
- proxyClient = mustNewProxyClient(lg, tlsinfo)
+ proxyClient = mustNewProxyClient(lg, tlsInfo)
+ }
+ httpClient := mustNewHTTPClient()
+
+ srvhttp, httpl := mustHTTPListener(lg, m, tlsInfo, client, proxyClient)
+
+ if err := http2.ConfigureServer(srvhttp, &http2.Server{
+ MaxConcurrentStreams: maxConcurrentStreams,
+ }); err != nil {
+ lg.Fatal("Failed to configure the http server", zap.Error(err))
}
- httpClient := mustNewHTTPClient(lg)
- srvhttp, httpl := mustHTTPListener(lg, m, tlsinfo, client, proxyClient)
errc := make(chan error, 3)
go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }()
go func() { errc <- srvhttp.Serve(httpl) }()
go func() { errc <- m.Serve() }()
if len(grpcProxyMetricsListenAddr) > 0 {
- mhttpl := mustMetricsListener(lg, tlsinfo)
+ mhttpl := mustMetricsListener(lg, tlsInfo)
go func() {
mux := http.NewServeMux()
grpcproxy.HandleMetrics(mux, httpClient, client.Endpoints())
@@ -266,6 +314,29 @@ func checkArgs() {
fmt.Fprintln(os.Stderr, fmt.Errorf("selfSignedCertValidity is invalid,it should be greater than 0"))
os.Exit(1)
}
+
+ minVersion, err := tlsutil.GetTLSVersion(grpcProxyListenTLSMinVersion)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("tls-min-version is invalid: %w", err))
+ os.Exit(1)
+ }
+ maxVersion, err := tlsutil.GetTLSVersion(grpcProxyListenTLSMaxVersion)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("tls-max-version is invalid: %w", err))
+ os.Exit(1)
+ }
+
+ // maxVersion == 0 means that Go selects the highest available version.
+ if maxVersion != 0 && minVersion > maxVersion {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("min version (%s) is greater than max version (%s)", grpcProxyListenTLSMinVersion, grpcProxyListenTLSMaxVersion))
+ os.Exit(1)
+ }
+
+ // Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently.
+ if minVersion == tls.VersionTLS13 && len(grpcProxyListenCipherSuites) > 0 {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled"))
+ os.Exit(1)
+ }
}
func mustNewClient(lg *zap.Logger) *clientv3.Client {
@@ -327,8 +398,9 @@ func newProxyClientCfg(lg *zap.Logger, eps []string, tls *transport.TLSInfo) (*c
func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) {
// set tls if any one tls option set
cfg := clientv3.Config{
- Endpoints: eps,
- DialTimeout: 5 * time.Second,
+ Endpoints: eps,
+ AutoSyncInterval: grpcProxyEndpointsAutoSyncInterval,
+ DialTimeout: 5 * time.Second,
}
if grpcMaxCallSendMsgSize > 0 {
@@ -337,6 +409,13 @@ func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) {
if grpcMaxCallRecvMsgSize > 0 {
cfg.MaxCallRecvMsgSize = grpcMaxCallRecvMsgSize
}
+ if grpcProxyDialKeepAliveTime > 0 {
+ cfg.DialKeepAliveTime = grpcProxyDialKeepAliveTime
+ }
+ if grpcProxyDialKeepAliveTimeout > 0 {
+ cfg.DialKeepAliveTimeout = grpcProxyDialKeepAliveTimeout
+ }
+ cfg.PermitWithoutStream = grpcProxyPermitWithoutStream
tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey, true)
if tls == nil && grpcProxyInsecureSkipTLSVerify {
@@ -424,9 +503,28 @@ func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
electionp := grpcproxy.NewElectionProxy(client)
lockp := grpcproxy.NewLockProxy(client)
+ alwaysLoggingDeciderServer := func(ctx context.Context, fullMethodName string, servingObject any) bool { return true }
+
+ grpcChainStreamList := []grpc.StreamServerInterceptor{
+ grpc_prometheus.StreamServerInterceptor,
+ }
+ grpcChainUnaryList := []grpc.UnaryServerInterceptor{
+ grpc_prometheus.UnaryServerInterceptor,
+ }
+ if grpcProxyEnableLogging {
+ grpcChainStreamList = append(grpcChainStreamList,
+ grpc_ctxtags.StreamServerInterceptor(),
+ grpc_zap.PayloadStreamServerInterceptor(lg, alwaysLoggingDeciderServer),
+ )
+ grpcChainUnaryList = append(grpcChainUnaryList,
+ grpc_ctxtags.UnaryServerInterceptor(),
+ grpc_zap.PayloadUnaryServerInterceptor(lg, alwaysLoggingDeciderServer),
+ )
+ }
+
gopts := []grpc.ServerOption{
- grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
- grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+ grpc.ChainStreamInterceptor(grpcChainStreamList...),
+ grpc.ChainUnaryInterceptor(grpcChainUnaryList...),
grpc.MaxConcurrentStreams(math.MaxUint32),
}
if grpcKeepAliveMinTime > time.Duration(0) {
@@ -458,7 +556,7 @@ func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
}
func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client, proxy *clientv3.Client) (*http.Server, net.Listener) {
- httpClient := mustNewHTTPClient(lg)
+ httpClient := mustNewHTTPClient()
httpmux := http.NewServeMux()
httpmux.HandleFunc("/", http.NotFound)
grpcproxy.HandleMetrics(httpmux, httpClient, c.Endpoints())
@@ -473,7 +571,7 @@ func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c
}
srvhttp := &http.Server{
Handler: httpmux,
- ErrorLog: log.New(ioutil.Discard, "net/http", 0),
+ ErrorLog: log.New(io.Discard, "net/http", 0),
}
if tlsinfo == nil {
@@ -488,7 +586,7 @@ func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c
return srvhttp, m.Match(cmux.Any())
}
-func mustNewHTTPClient(lg *zap.Logger) *http.Client {
+func mustNewHTTPClient() *http.Client {
transport, err := newHTTPTransport(grpcProxyCA, grpcProxyCert, grpcProxyKey)
if err != nil {
fmt.Fprintln(os.Stderr, err)
@@ -501,7 +599,7 @@ func newHTTPTransport(ca, cert, key string) (*http.Transport, error) {
tr := &http.Transport{}
if ca != "" && cert != "" && key != "" {
- caCert, err := ioutil.ReadFile(ca)
+ caCert, err := os.ReadFile(ca)
if err != nil {
return nil, err
}
@@ -516,7 +614,6 @@ func newHTTPTransport(ca, cert, key string) (*http.Transport, error) {
Certificates: []tls.Certificate{keyPair},
RootCAs: caPool,
}
- tlsConfig.BuildNameToCertificate()
tr.TLSClientConfig = tlsConfig
} else if grpcProxyInsecureSkipTLSVerify {
tlsConfig := &tls.Config{InsecureSkipVerify: grpcProxyInsecureSkipTLSVerify}
diff --git a/server/etcdmain/help.go b/server/etcdmain/help.go
index dc5b55fae7e..aed619c548f 100644
--- a/server/etcdmain/help.go
+++ b/server/etcdmain/help.go
@@ -1,4 +1,5 @@
// Copyright 2015 The etcd Authors
+// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,10 +18,14 @@ package etcdmain
import (
"fmt"
"strconv"
+ "strings"
+
+ "golang.org/x/crypto/bcrypt"
cconfig "go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/embed"
- "golang.org/x/crypto/bcrypt"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/features"
)
var (
@@ -52,8 +57,8 @@ Member:
Path to the data directory.
--wal-dir ''
Path to the dedicated wal directory.
- --snapshot-count '100000'
- Number of committed transactions to trigger a snapshot to disk.
+ --snapshot-count '10000'
+ Number of committed transactions to trigger a snapshot to disk. Deprecated in v3.6 and will be decommissioned in v3.7.
--heartbeat-interval '100'
Time (in milliseconds) of a heartbeat interval.
--election-timeout '1000'
@@ -63,9 +68,11 @@ Member:
--listen-peer-urls 'http://localhost:2380'
List of URLs to listen on for peer traffic.
--listen-client-urls 'http://localhost:2379'
- List of URLs to listen on for client traffic.
+ List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.
+ --listen-client-http-urls ''
+ List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.
--max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `'
- Maximum number of snapshot files to retain (0 is unlimited).
+ Maximum number of snapshot files to retain (0 is unlimited). Deprecated in v3.6 and will be decommissioned in v3.7.
--max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `'
Maximum number of wal files to retain (0 is unlimited).
--quota-backend-bytes '0'
@@ -80,6 +87,8 @@ Member:
Maximum number of operations permitted in a transaction.
--max-request-bytes '1572864'
Maximum client request size in bytes the server will accept.
+ --max-concurrent-streams 'math.MaxUint32'
+ Maximum concurrent streams that each client can open at a time.
--grpc-keepalive-min-time '5s'
Minimum duration interval that a client should wait before pinging server.
--grpc-keepalive-interval '2h'
@@ -89,15 +98,26 @@ Member:
--socket-reuse-port 'false'
Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use.
--socket-reuse-address 'false'
- Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in TIME_WAIT state.
+ Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in TIME_WAIT state.
+ --enable-grpc-gateway
+ Enable GRPC gateway.
+ --raft-read-timeout '` + rafthttp.DefaultConnReadTimeout.String() + `'
+ Read timeout set on each rafthttp connection
+ --raft-write-timeout '` + rafthttp.DefaultConnWriteTimeout.String() + `'
+ Write timeout set on each rafthttp connection
+ --feature-gates ''
+ A set of key=value pairs that describe server level feature gates for alpha/experimental features. Options are:` + "\n " + strings.Join(features.NewDefaultServerFeatureGate("", nil).KnownFeatures(), "\n ") + `
Clustering:
--initial-advertise-peer-urls 'http://localhost:2380'
List of this member's peer URLs to advertise to the rest of the cluster.
+ --experimental-set-member-localaddr 'false'
+ Enable using the first specified and non-loopback local address from initial-advertise-peer-urls as the local address when communicating with a peer.
--initial-cluster 'default=http://localhost:2380'
Initial cluster configuration for bootstrapping.
--initial-cluster-state 'new'
- Initial cluster state ('new' or 'existing').
+ Initial cluster state ('new' when bootstrapping a new cluster or 'existing' when adding new members to an existing cluster).
+ After successful initialization (bootstrapping or adding), flag is ignored on restarts
--initial-cluster-token 'etcd-cluster'
Initial cluster token for the etcd cluster during bootstrap.
Specifying this can protect you from unintended cross-cluster interaction when running multiple clusters.
@@ -105,12 +125,37 @@ Clustering:
List of this member's client URLs to advertise to the public.
The client URLs advertised should be accessible to machines that talk to etcd cluster. etcd client libraries parse these URLs to connect to the cluster.
--discovery ''
- Discovery URL used to bootstrap the cluster.
- --discovery-fallback 'proxy'
- Expected behavior ('exit' or 'proxy') when discovery services fails.
- "proxy" supports v2 API only.
+ Discovery URL used to bootstrap the cluster for v2 discovery. Will be deprecated in v3.7, and be decommissioned in v3.8.
+ --discovery-token ''
+ V3 discovery: discovery token for the etcd cluster to be bootstrapped.
+ --discovery-endpoints ''
+ V3 discovery: List of gRPC endpoints of the discovery service.
+ --discovery-dial-timeout '2s'
+ V3 discovery: dial timeout for client connections.
+ --discovery-request-timeout '5s'
+ V3 discovery: timeout for discovery requests (excluding dial timeout).
+ --discovery-keepalive-time '2s'
+ V3 discovery: keepalive time for client connections.
+ --discovery-keepalive-timeout '6s'
+ V3 discovery: keepalive timeout for client connections.
+ --discovery-insecure-transport 'true'
+ V3 discovery: disable transport security for client connections.
+ --discovery-insecure-skip-tls-verify 'false'
+ V3 discovery: skip server certificate verification (CAUTION: this option should be enabled only for testing purposes).
+ --discovery-cert ''
+ V3 discovery: identify secure client using this TLS certificate file.
+ --discovery-key ''
+ V3 discovery: identify secure client using this TLS key file.
+ --discovery-cacert ''
+ V3 discovery: verify certificates of TLS-enabled secure servers using this CA bundle.
+ --discovery-user ''
+ V3 discovery: username[:password] for authentication (prompt if password is not supplied).
+ --discovery-password ''
+ V3 discovery: password for authentication (if this option is used, --user option shouldn't include password).
+ --discovery-fallback 'exit'
+ Expected behavior ('exit') when discovery services fails. Note that v2 proxy is removed.
--discovery-proxy ''
- HTTP proxy to use for traffic to discovery service.
+ HTTP proxy to use for traffic to discovery service. Will be deprecated in v3.7, and be decommissioned in v3.8.
--discovery-srv ''
DNS srv domain used to bootstrap the cluster.
--discovery-srv-name ''
@@ -118,20 +163,18 @@ Clustering:
--strict-reconfig-check '` + strconv.FormatBool(embed.DefaultStrictReconfigCheck) + `'
Reject reconfiguration requests that would cause quorum loss.
--pre-vote 'true'
- Enable to run an additional Raft election phase.
+ Enable the raft Pre-Vote algorithm to prevent disruption when a node that has been partitioned away rejoins the cluster.
--auto-compaction-retention '0'
Auto compaction retention length. 0 means disable auto compaction.
--auto-compaction-mode 'periodic'
Interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.
- --enable-v2 '` + strconv.FormatBool(embed.DefaultEnableV2) + `'
- Accept etcd V2 client requests. Deprecated and to be decommissioned in v3.6.
- --v2-deprecation '` + string(cconfig.V2_DEPR_DEFAULT) + `'
- Phase of v2store deprecation. Allows to opt-in for higher compatibility mode.
+ --v2-deprecation '` + string(cconfig.V2DeprDefault) + `'
+ Phase of v2store deprecation. Deprecated and scheduled for removal in v3.8. The default value is enforced, ignoring user input.
Supported values:
'not-yet' // Issues a warning if v2store have meaningful content (default in v3.5)
- 'write-only' // Custom v2 state is not allowed (planned default in v3.6)
- 'write-only-drop-data' // Custom v2 state will get DELETED !
- 'gone' // v2store is not maintained any longer. (planned default in v3.7)
+ 'write-only' // Custom v2 state is not allowed (default in v3.6)
+ 'write-only-drop-data' // Custom v2 state will get DELETED ! (planned default in v3.7)
+ 'gone' // v2store is not maintained any longer. (planned to cleanup anything related to v2store in v3.8)
Security:
--cert-file ''
@@ -140,10 +183,14 @@ Security:
Path to the client server TLS key file.
--client-cert-auth 'false'
Enable client cert authentication.
+ --client-cert-file ''
+ Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required.
+ --client-key-file ''
+ Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required.
--client-crl-file ''
Path to the client certificate revocation list file.
--client-cert-allowed-hostname ''
- Allowed TLS hostname for client cert authentication.
+ Comma-separated list of SAN hostnames for client cert authentication.
--trusted-ca-file ''
Path to the client server TLS trusted CA cert file.
--auto-tls 'false'
@@ -154,12 +201,16 @@ Security:
Path to the peer server TLS key file.
--peer-client-cert-auth 'false'
Enable peer client cert authentication.
+ --peer-client-cert-file ''
+ Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required.
+ --peer-client-key-file ''
+ Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required.
--peer-trusted-ca-file ''
Path to the peer server TLS trusted CA file.
--peer-cert-allowed-cn ''
- Required CN for client certs connecting to the peer endpoint.
+ Comma-separated list of allowed CNs for inter-peer TLS authentication.
--peer-cert-allowed-hostname ''
- Allowed TLS hostname for inter peer authentication.
+ Comma-separated list of allowed SAN hostnames for inter-peer TLS authentication.
--peer-auto-tls 'false'
Peer TLS using self-generated certificates if --peer-key-file and --peer-cert-file are not provided.
--self-signed-cert-validity '1'
@@ -172,6 +223,10 @@ Security:
Comma-separated whitelist of origins for CORS, or cross-origin resource sharing, (empty or * means allow all).
--host-whitelist '*'
Acceptable hostnames from HTTP client requests, if server is not secure (empty or * means allow all).
+ --tls-min-version 'TLS1.2'
+ Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.
+ --tls-max-version ''
+ Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty will be auto-populated by Go).
Auth:
--auth-token 'simple'
@@ -187,7 +242,7 @@ Profiling and Monitoring:
--metrics 'basic'
Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics.
--listen-metrics-urls ''
- List of URLs to listen on for the metrics and health endpoints.
+ List of URLs to listen on for the /metrics and /health endpoints. For https, the client URL TLS info is used.
Logging:
--logger 'zap'
@@ -196,10 +251,14 @@ Logging:
Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets.
--log-level 'info'
Configures log level. Only supports debug, info, warn, error, panic, or fatal.
+ --log-format 'json'
+ Configures log format. Only supports json, console.
--enable-log-rotation 'false'
Enable log rotation of a single log-outputs file target.
--log-rotation-config-json '{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}'
- Configures log rotation if enabled with a JSON logger config. MaxSize(MB), MaxAge(days,0=no limit), MaxBackups(0=no limit), LocalTime(use computers local time), Compress(gzip)".
+ Configures log rotation if enabled with a JSON logger config. MaxSize(MB), MaxAge(days,0=no limit), MaxBackups(0=no limit), LocalTime(use computers local time), Compress(gzip)".
+ --warning-unary-request-duration '300ms'
+ Set time duration after which a warning is logged if a unary request takes more than this duration.
Experimental distributed tracing:
--experimental-enable-distributed-tracing 'false'
@@ -210,28 +269,20 @@ Experimental distributed tracing:
Distributed tracing service name, must be same across all etcd instances.
--experimental-distributed-tracing-instance-id ''
Distributed tracing instance ID, must be unique per each etcd instance.
-
-v2 Proxy (to be deprecated in v3.6):
- --proxy 'off'
- Proxy mode setting ('off', 'readonly' or 'on').
- --proxy-failure-wait 5000
- Time (in milliseconds) an endpoint will be held in a failed state.
- --proxy-refresh-interval 30000
- Time (in milliseconds) of the endpoints refresh interval.
- --proxy-dial-timeout 1000
- Time (in milliseconds) for a dial to timeout.
- --proxy-write-timeout 5000
- Time (in milliseconds) for a write to timeout.
- --proxy-read-timeout 0
- Time (in milliseconds) for a read to timeout.
+ --experimental-distributed-tracing-sampling-rate '0'
+ Number of samples to collect per million spans for distributed tracing. Disabled by default.
Experimental feature:
- --experimental-initial-corrupt-check 'false'
+ --experimental-initial-corrupt-check 'false'. It's deprecated, and will be decommissioned in v3.7. Use '--feature-gates=InitialCorruptCheck=true' instead.
Enable to check data corruption before serving any client/peer traffic.
--experimental-corrupt-check-time '0s'
Duration of time between cluster corruption check passes.
- --experimental-enable-v2v3 ''
- Serve v2 requests through the v3 backend under a given prefix. Deprecated and to be decommissioned in v3.6.
+ --experimental-compact-hash-check-enabled 'false'. Deprecated in v3.6 and will be decommissioned in v3.7. Use '--feature-gates=CompactHashCheck=true' instead.
+ Enable leader to periodically check followers compaction hashes.
+ --experimental-compact-hash-check-time '1m'
+ Duration of time between leader checks followers compaction hashes. Deprecated in v3.6 and will be decommissioned in v3.7. Use '--compact-hash-check-time' instead.
+ --compact-hash-check-time '1m'
+ Duration of time between leader checks followers compaction hashes.
--experimental-enable-lease-checkpoint 'false'
ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
--experimental-compaction-batch-limit 1000
@@ -241,11 +292,29 @@ Experimental feature:
--experimental-watch-progress-notify-interval '10m'
Duration of periodical watch progress notification.
--experimental-warning-apply-duration '100ms'
- Warning is generated if requests take more than this duration.
- --experimental-txn-mode-write-with-shared-buffer 'true'
+ Warning is generated if requests take more than this duration.
+ --experimental-txn-mode-write-with-shared-buffer 'true'. Deprecated in v3.6 and will be decommissioned in v3.7. Use '--feature-gates=TxnModeWriteWithSharedBuffer=true' instead.
Enable the write transaction to use a shared buffer in its readonly check operations.
--experimental-bootstrap-defrag-threshold-megabytes
Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.
+ --experimental-warning-unary-request-duration '300ms'
+ Set time duration after which a warning is generated if a unary request takes more than this duration. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead.
+ --experimental-max-learners '1'
+ Set the max number of learner members allowed in the cluster membership.
+ --experimental-snapshot-catch-up-entries '5000'
+ Number of entries for a slow follower to catch up after compacting the raft storage entries.
+ --experimental-compaction-sleep-interval
+ Sets the sleep interval between each compaction batch.
+ --experimental-downgrade-check-time
+ Duration of time between two downgrade status checks.
+ --experimental-enable-lease-checkpoint-persist 'false'
+ Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.
+ --experimental-memory-mlock
+ Enable to enforce etcd pages (in particular bbolt) to stay in RAM.
+ --experimental-snapshot-catchup-entries
+ Number of entries for a slow follower to catch up after compacting the raft storage entries.
+ --experimental-stop-grpc-service-on-defrag
+ Enable etcd gRPC service to stop serving client requests on defragmentation. It's deprecated, and will be decommissioned in v3.7. Use '--feature-gates=StopGRPCServiceOnDefrag=true' instead.
Unsafe feature:
--force-new-cluster 'false'
diff --git a/server/etcdmain/main.go b/server/etcdmain/main.go
index 2e67a137cc3..e28e7da928d 100644
--- a/server/etcdmain/main.go
+++ b/server/etcdmain/main.go
@@ -41,9 +41,6 @@ func Main(args []string) {
}
func notifySystemd(lg *zap.Logger) {
- if lg == nil {
- lg = zap.NewExample()
- }
lg.Info("notifying init daemon")
_, err := daemon.SdNotify(false, daemon.SdNotifyReady)
if err != nil {
diff --git a/server/etcdmain/util.go b/server/etcdmain/util.go
index 0bd23e9e591..984592a2c49 100644
--- a/server/etcdmain/util.go
+++ b/server/etcdmain/util.go
@@ -18,10 +18,10 @@ import (
"fmt"
"os"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/client/pkg/v3/srv"
"go.etcd.io/etcd/client/pkg/v3/transport"
-
- "go.uber.org/zap"
)
func discoverEndpoints(lg *zap.Logger, dns string, ca string, insecure bool, serviceName string) (s srv.SRVClients) {
@@ -35,13 +35,11 @@ func discoverEndpoints(lg *zap.Logger, dns string, ca string, insecure bool, ser
}
endpoints := srvs.Endpoints
- if lg != nil {
- lg.Info(
- "discovered cluster from SRV",
- zap.String("srv-server", dns),
- zap.Strings("endpoints", endpoints),
- )
- }
+ lg.Info(
+ "discovered cluster from SRV",
+ zap.String("srv-server", dns),
+ zap.Strings("endpoints", endpoints),
+ )
if insecure {
return *srvs
@@ -52,32 +50,26 @@ func discoverEndpoints(lg *zap.Logger, dns string, ca string, insecure bool, ser
ServerName: dns,
}
- if lg != nil {
- lg.Info(
- "validating discovered SRV endpoints",
- zap.String("srv-server", dns),
- zap.Strings("endpoints", endpoints),
- )
- }
+ lg.Info(
+ "validating discovered SRV endpoints",
+ zap.String("srv-server", dns),
+ zap.Strings("endpoints", endpoints),
+ )
endpoints, err = transport.ValidateSecureEndpoints(tlsInfo, endpoints)
if err != nil {
- if lg != nil {
- lg.Warn(
- "failed to validate discovered endpoints",
- zap.String("srv-server", dns),
- zap.Strings("endpoints", endpoints),
- zap.Error(err),
- )
- }
+ lg.Warn(
+ "failed to validate discovered endpoints",
+ zap.String("srv-server", dns),
+ zap.Strings("endpoints", endpoints),
+ zap.Error(err),
+ )
} else {
- if lg != nil {
- lg.Info(
- "using validated discovered SRV endpoints",
- zap.String("srv-server", dns),
- zap.Strings("endpoints", endpoints),
- )
- }
+ lg.Info(
+ "using validated discovered SRV endpoints",
+ zap.String("srv-server", dns),
+ zap.Strings("endpoints", endpoints),
+ )
}
// map endpoints back to SRVClients struct with SRV data
diff --git a/server/etcdserver/adapters.go b/server/etcdserver/adapters.go
new file mode 100644
index 00000000000..35660a27bd0
--- /dev/null
+++ b/server/etcdserver/adapters.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+
+ "github.com/coreos/go-semver/semver"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/api/v3/version"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+// serverVersionAdapter implements the interface Server defined in package
+// go.etcd.io/etcd/server/v3/etcdserver/version, and it's needed by Monitor
+// in the same package.
+type serverVersionAdapter struct {
+ *EtcdServer
+}
+
+func NewServerVersionAdapter(s *EtcdServer) *serverVersionAdapter {
+ return &serverVersionAdapter{
+ EtcdServer: s,
+ }
+}
+
+var _ serverversion.Server = (*serverVersionAdapter)(nil)
+
+func (s *serverVersionAdapter) UpdateClusterVersion(version string) {
+ s.GoAttach(func() { s.updateClusterVersionV3(version) })
+}
+
+func (s *serverVersionAdapter) LinearizableReadNotify(ctx context.Context) error {
+ return s.linearizableReadNotify(ctx)
+}
+
+func (s *serverVersionAdapter) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error {
+ raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()}
+ _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ return err
+}
+
+func (s *serverVersionAdapter) DowngradeCancel(ctx context.Context) error {
+ raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false}
+ _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ return err
+}
+
+func (s *serverVersionAdapter) GetClusterVersion() *semver.Version {
+ return s.cluster.Version()
+}
+
+func (s *serverVersionAdapter) GetDowngradeInfo() *serverversion.DowngradeInfo {
+ return s.cluster.DowngradeInfo()
+}
+
+func (s *serverVersionAdapter) GetMembersVersions() map[string]*version.Versions {
+ return getMembersVersions(s.lg, s.cluster, s.MemberID(), s.peerRt, s.Cfg.ReqTimeout())
+}
+
+func (s *serverVersionAdapter) GetStorageVersion() *semver.Version {
+ return s.StorageVersion()
+}
+
+func (s *serverVersionAdapter) UpdateStorageVersion(target semver.Version) error {
+ // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock.
+ s.bemu.RLock()
+ defer s.bemu.RUnlock()
+
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ return schema.UnsafeMigrate(s.lg, tx, s.r.storage, target)
+}
diff --git a/server/etcdserver/api/capability.go b/server/etcdserver/api/capability.go
index ea2f0e97e4b..cf535ec4efa 100644
--- a/server/etcdserver/api/capability.go
+++ b/server/etcdserver/api/capability.go
@@ -17,11 +17,11 @@ package api
import (
"sync"
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "github.com/coreos/go-semver/semver"
"go.uber.org/zap"
- "github.com/coreos/go-semver/semver"
+ "go.etcd.io/etcd/api/v3/version"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
)
type Capability string
@@ -40,6 +40,7 @@ var (
"3.3.0": {AuthCapability: true, V3rpcCapability: true},
"3.4.0": {AuthCapability: true, V3rpcCapability: true},
"3.5.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.6.0": {AuthCapability: true, V3rpcCapability: true},
}
enableMapMu sync.RWMutex
@@ -63,7 +64,7 @@ func UpdateCapability(lg *zap.Logger, v *semver.Version) {
return
}
enableMapMu.Lock()
- if curVersion != nil && !membership.IsValidVersionChange(v, curVersion) {
+ if curVersion != nil && !serverversion.IsValidClusterVersionChange(curVersion, v) {
enableMapMu.Unlock()
return
}
diff --git a/server/etcdserver/api/etcdhttp/base.go b/server/etcdserver/api/etcdhttp/base.go
deleted file mode 100644
index dcfa3f06959..00000000000
--- a/server/etcdserver/api/etcdhttp/base.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdhttp
-
-import (
- "encoding/json"
- "expvar"
- "fmt"
- "net/http"
-
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
- "go.uber.org/zap"
-)
-
-const (
- configPath = "/config"
- varsPath = "/debug/vars"
- versionPath = "/version"
-)
-
-// HandleBasic adds handlers to a mux for serving JSON etcd client requests
-// that do not access the v2 store.
-func HandleBasic(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerPeer) {
- mux.HandleFunc(varsPath, serveVars)
- mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
-}
-
-func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- v := c.Version()
- if v != nil {
- fn(w, r, v.String())
- } else {
- fn(w, r, "not_decided")
- }
- }
-}
-
-func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
- if !allowMethod(w, r, "GET") {
- return
- }
- vs := version.Versions{
- Server: version.Version,
- Cluster: clusterV,
- }
-
- w.Header().Set("Content-Type", "application/json")
- b, err := json.Marshal(&vs)
- if err != nil {
- panic(fmt.Sprintf("cannot marshal versions to json (%v)", err))
- }
- w.Write(b)
-}
-
-func serveVars(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r, "GET") {
- return
- }
-
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- fmt.Fprintf(w, "{\n")
- first := true
- expvar.Do(func(kv expvar.KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
-
-func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
- if m == r.Method {
- return true
- }
- w.Header().Set("Allow", m)
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return false
-}
-
-// WriteError logs and writes the given Error to the ResponseWriter
-// If Error is an etcdErr, it is rendered to the ResponseWriter
-// Otherwise, it is assumed to be a StatusInternalServerError
-func WriteError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
- if err == nil {
- return
- }
- switch e := err.(type) {
- case *v2error.Error:
- e.WriteTo(w)
-
- case *httptypes.HTTPError:
- if et := e.WriteTo(w); et != nil {
- if lg != nil {
- lg.Debug(
- "failed to write v2 HTTP error",
- zap.String("remote-addr", r.RemoteAddr),
- zap.String("internal-server-error", e.Error()),
- zap.Error(et),
- )
- }
- }
-
- default:
- switch err {
- case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers,
- etcdserver.ErrUnhealthy:
- if lg != nil {
- lg.Warn(
- "v2 response error",
- zap.String("remote-addr", r.RemoteAddr),
- zap.String("internal-server-error", err.Error()),
- )
- }
-
- default:
- if lg != nil {
- lg.Warn(
- "unexpected v2 response error",
- zap.String("remote-addr", r.RemoteAddr),
- zap.String("internal-server-error", err.Error()),
- )
- }
- }
-
- herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
- if et := herr.WriteTo(w); et != nil {
- if lg != nil {
- lg.Debug(
- "failed to write v2 HTTP error",
- zap.String("remote-addr", r.RemoteAddr),
- zap.String("internal-server-error", err.Error()),
- zap.Error(et),
- )
- }
- }
- }
-}
diff --git a/server/etcdserver/api/etcdhttp/debug.go b/server/etcdserver/api/etcdhttp/debug.go
new file mode 100644
index 00000000000..ab7feee97f6
--- /dev/null
+++ b/server/etcdserver/api/etcdhttp/debug.go
@@ -0,0 +1,47 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "expvar"
+ "fmt"
+ "net/http"
+)
+
+const (
+ varsPath = "/debug/vars"
+)
+
+func HandleDebug(mux *http.ServeMux) {
+ mux.HandleFunc(varsPath, serveVars)
+}
+
+func serveVars(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprint(w, "{\n")
+ first := true
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprint(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprint(w, "\n}\n")
+}
diff --git a/server/etcdserver/api/etcdhttp/health.go b/server/etcdserver/api/etcdhttp/health.go
new file mode 100644
index 00000000000..ccc6d5b7208
--- /dev/null
+++ b/server/etcdserver/api/etcdhttp/health.go
@@ -0,0 +1,433 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines the http endpoints for etcd health checks.
+// The endpoints include /livez, /readyz and /health.
+
+package etcdhttp
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/raft/v3"
+)
+
+const (
+ PathHealth = "/health"
+ PathProxyHealth = "/proxy/health"
+ HealthStatusSuccess string = "success"
+ HealthStatusError string = "error"
+ checkTypeLivez = "livez"
+ checkTypeReadyz = "readyz"
+ checkTypeHealth = "health"
+)
+
+type ServerHealth interface {
+ Alarms() []*pb.AlarmMember
+ Leader() types.ID
+ Range(context.Context, *pb.RangeRequest) (*pb.RangeResponse, error)
+ Config() config.ServerConfig
+ AuthStore() auth.AuthStore
+}
+
+// HandleHealth registers metrics and health handlers. it checks health by using v3 range request
+// and its corresponding timeout.
+func HandleHealth(lg *zap.Logger, mux *http.ServeMux, srv ServerHealth) {
+ mux.Handle(PathHealth, NewHealthHandler(lg, func(ctx context.Context, excludedAlarms StringSet, serializable bool) Health {
+ if h := checkAlarms(lg, srv, excludedAlarms); h.Health != "true" {
+ return h
+ }
+ if h := checkLeader(lg, srv, serializable); h.Health != "true" {
+ return h
+ }
+ return checkAPI(ctx, lg, srv, serializable)
+ }))
+
+ installLivezEndpoints(lg, mux, srv)
+ installReadyzEndpoints(lg, mux, srv)
+}
+
+// NewHealthHandler handles '/health' requests.
+func NewHealthHandler(lg *zap.Logger, hfunc func(ctx context.Context, excludedAlarms StringSet, Serializable bool) Health) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ lg.Warn("/health error", zap.Int("status-code", http.StatusMethodNotAllowed))
+ return
+ }
+ excludedAlarms := getQuerySet(r, "exclude")
+ // Passing the query parameter "serializable=true" ensures that the
+ // health of the local etcd is checked vs the health of the cluster.
+ // This is useful for probes attempting to validate the liveness of
+ // the etcd process vs readiness of the cluster to serve requests.
+ serializableFlag := getSerializableFlag(r)
+ h := hfunc(r.Context(), excludedAlarms, serializableFlag)
+ defer func() {
+ if h.Health == "true" {
+ healthSuccess.Inc()
+ } else {
+ healthFailed.Inc()
+ }
+ }()
+ d, _ := json.Marshal(h)
+ if h.Health != "true" {
+ http.Error(w, string(d), http.StatusServiceUnavailable)
+ lg.Warn("/health error", zap.String("output", string(d)), zap.Int("status-code", http.StatusServiceUnavailable))
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ w.Write(d)
+ lg.Debug("/health OK", zap.Int("status-code", http.StatusOK))
+ }
+}
+
+var (
+ healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_success",
+ Help: "The total number of successful health checks",
+ })
+ healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_failures",
+ Help: "The total number of failed health checks",
+ })
+ healthCheckGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "healthcheck",
+ Help: "The result of each kind of healthcheck.",
+ },
+ []string{"type", "name"},
+ )
+ healthCheckCounter = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "healthchecks_total",
+ Help: "The total number of each kind of healthcheck.",
+ },
+ []string{"type", "name", "status"},
+ )
+)
+
+func init() {
+ prometheus.MustRegister(healthSuccess)
+ prometheus.MustRegister(healthFailed)
+ prometheus.MustRegister(healthCheckGauge)
+ prometheus.MustRegister(healthCheckCounter)
+}
+
+// Health defines etcd server health status.
+// TODO: remove manual parsing in etcdctl cluster-health
+type Health struct {
+ Health string `json:"health"`
+ Reason string `json:"reason"`
+}
+
+// HealthStatus is used in new /readyz or /livez health checks instead of the Health struct.
+type HealthStatus struct {
+ Reason string `json:"reason"`
+ Status string `json:"status"`
+}
+
+func getQuerySet(r *http.Request, query string) StringSet {
+ querySet := make(map[string]struct{})
+ qs, found := r.URL.Query()[query]
+ if found {
+ for _, q := range qs {
+ if len(q) == 0 {
+ continue
+ }
+ querySet[q] = struct{}{}
+ }
+ }
+ return querySet
+}
+
+func getSerializableFlag(r *http.Request) bool {
+ return r.URL.Query().Get("serializable") == "true"
+}
+
+// TODO: etcdserver.ErrNoLeader in health API
+
+func checkAlarms(lg *zap.Logger, srv ServerHealth, excludedAlarms StringSet) Health {
+ h := Health{Health: "true"}
+
+ for _, v := range srv.Alarms() {
+ alarmName := v.Alarm.String()
+ if _, found := excludedAlarms[alarmName]; found {
+ lg.Debug("/health excluded alarm", zap.String("alarm", v.String()))
+ continue
+ }
+
+ h.Health = "false"
+ switch v.Alarm {
+ case pb.AlarmType_NOSPACE:
+ h.Reason = "ALARM NOSPACE"
+ case pb.AlarmType_CORRUPT:
+ h.Reason = "ALARM CORRUPT"
+ default:
+ h.Reason = "ALARM UNKNOWN"
+ }
+ lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String()))
+ return h
+ }
+
+ return h
+}
+
+func checkLeader(lg *zap.Logger, srv ServerHealth, serializable bool) Health {
+ h := Health{Health: "true"}
+ if !serializable && (uint64(srv.Leader()) == raft.None) {
+ h.Health = "false"
+ h.Reason = "RAFT NO LEADER"
+ lg.Warn("serving /health false; no leader")
+ }
+ return h
+}
+
+func checkAPI(ctx context.Context, lg *zap.Logger, srv ServerHealth, serializable bool) Health {
+ h := Health{Health: "true"}
+ cfg := srv.Config()
+ ctx = srv.AuthStore().WithRoot(ctx)
+ cctx, cancel := context.WithTimeout(ctx, cfg.ReqTimeout())
+ _, err := srv.Range(cctx, &pb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable})
+ cancel()
+ if err != nil {
+ h.Health = "false"
+ h.Reason = fmt.Sprintf("RANGE ERROR:%s", err)
+ lg.Warn("serving /health false; Range fails", zap.Error(err))
+ return h
+ }
+ lg.Debug("serving /health true")
+ return h
+}
+
+type HealthCheck func(ctx context.Context) error
+
+type CheckRegistry struct {
+ checkType string
+ checks map[string]HealthCheck
+}
+
+func installLivezEndpoints(lg *zap.Logger, mux *http.ServeMux, server ServerHealth) {
+ reg := CheckRegistry{checkType: checkTypeLivez, checks: make(map[string]HealthCheck)}
+ reg.Register("serializable_read", readCheck(server, true /* serializable */))
+ reg.InstallHTTPEndpoints(lg, mux)
+}
+
+func installReadyzEndpoints(lg *zap.Logger, mux *http.ServeMux, server ServerHealth) {
+ reg := CheckRegistry{checkType: checkTypeReadyz, checks: make(map[string]HealthCheck)}
+ reg.Register("data_corruption", activeAlarmCheck(server, pb.AlarmType_CORRUPT))
+ // serializable_read checks if local read is ok.
+ // linearizable_read checks if there is consensus in the cluster.
+ // Having both serializable_read and linearizable_read helps isolate the cause of problems if there is a read failure.
+ reg.Register("serializable_read", readCheck(server, true))
+ // linearizable_read check would be replaced by read_index check in 3.6
+ reg.Register("linearizable_read", readCheck(server, false))
+ reg.InstallHTTPEndpoints(lg, mux)
+}
+
+func (reg *CheckRegistry) Register(name string, check HealthCheck) {
+ reg.checks[name] = check
+}
+
+func (reg *CheckRegistry) RootPath() string {
+ return "/" + reg.checkType
+}
+
+// InstallHttpEndpoints installs the http handlers for the health checks.
+//
+// Deprecated: Please use (*CheckRegistry) InstallHTTPEndpoints instead.
+//
+//revive:disable-next-line:var-naming
+func (reg *CheckRegistry) InstallHttpEndpoints(lg *zap.Logger, mux *http.ServeMux) {
+ reg.InstallHTTPEndpoints(lg, mux)
+}
+
+func (reg *CheckRegistry) InstallHTTPEndpoints(lg *zap.Logger, mux *http.ServeMux) {
+ checkNames := make([]string, 0, len(reg.checks))
+ for k := range reg.checks {
+ checkNames = append(checkNames, k)
+ }
+
+ // installs the http handler for the root path.
+ reg.installRootHTTPEndpoint(lg, mux, checkNames...)
+ for _, checkName := range checkNames {
+ // installs the http handler for the individual check sub path.
+ subpath := path.Join(reg.RootPath(), checkName)
+ check := checkName
+ mux.Handle(subpath, newHealthHandler(subpath, lg, func(r *http.Request) HealthStatus {
+ return reg.runHealthChecks(r.Context(), check)
+ }))
+ }
+}
+
+func (reg *CheckRegistry) runHealthChecks(ctx context.Context, checkNames ...string) HealthStatus {
+ h := HealthStatus{Status: HealthStatusSuccess}
+ var individualCheckOutput bytes.Buffer
+ for _, checkName := range checkNames {
+ check, found := reg.checks[checkName]
+ if !found {
+ panic(fmt.Errorf("Health check: %s not registered", checkName))
+ }
+ if err := check(ctx); err != nil {
+ fmt.Fprintf(&individualCheckOutput, "[-]%s failed: %v\n", checkName, err)
+ h.Status = HealthStatusError
+ recordMetrics(reg.checkType, checkName, HealthStatusError)
+ } else {
+ fmt.Fprintf(&individualCheckOutput, "[+]%s ok\n", checkName)
+ recordMetrics(reg.checkType, checkName, HealthStatusSuccess)
+ }
+ }
+ h.Reason = individualCheckOutput.String()
+ return h
+}
+
+// installRootHTTPEndpoint installs the http handler for the root path.
+func (reg *CheckRegistry) installRootHTTPEndpoint(lg *zap.Logger, mux *http.ServeMux, checks ...string) {
+ hfunc := func(r *http.Request) HealthStatus {
+ // extracts the health check names to be excludeList from the query param
+ excluded := getQuerySet(r, "exclude")
+
+ filteredCheckNames := filterCheckList(lg, listToStringSet(checks), excluded)
+ h := reg.runHealthChecks(r.Context(), filteredCheckNames...)
+ return h
+ }
+ mux.Handle(reg.RootPath(), newHealthHandler(reg.RootPath(), lg, hfunc))
+}
+
+// newHealthHandler generates a http HandlerFunc for a health check function hfunc.
+func newHealthHandler(path string, lg *zap.Logger, hfunc func(*http.Request) HealthStatus) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ lg.Warn("Health request error", zap.String("path", path), zap.Int("status-code", http.StatusMethodNotAllowed))
+ return
+ }
+ h := hfunc(r)
+ // Always returns detailed reason for failed checks.
+ if h.Status == HealthStatusError {
+ http.Error(w, h.Reason, http.StatusServiceUnavailable)
+ lg.Error("Health check error", zap.String("path", path), zap.String("reason", h.Reason), zap.Int("status-code", http.StatusServiceUnavailable))
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ // Only writes detailed reason for verbose requests.
+ if _, found := r.URL.Query()["verbose"]; found {
+ fmt.Fprint(w, h.Reason)
+ }
+ fmt.Fprint(w, "ok\n")
+ lg.Debug("Health check OK", zap.String("path", path), zap.String("reason", h.Reason), zap.Int("status-code", http.StatusOK))
+ }
+}
+
+func filterCheckList(lg *zap.Logger, checks StringSet, excluded StringSet) []string {
+ filteredList := []string{}
+ for chk := range checks {
+ if _, found := excluded[chk]; found {
+ delete(excluded, chk)
+ continue
+ }
+ filteredList = append(filteredList, chk)
+ }
+ if len(excluded) > 0 {
+ // For version compatibility, excluding non-exist checks would not fail the request.
+ lg.Warn("some health checks cannot be excluded", zap.String("missing-health-checks", formatQuoted(excluded.List()...)))
+ }
+ return filteredList
+}
+
+// formatQuoted returns a formatted string of the health check names,
+// preserving the order passed in.
+func formatQuoted(names ...string) string {
+ quoted := make([]string, 0, len(names))
+ for _, name := range names {
+ quoted = append(quoted, fmt.Sprintf("%q", name))
+ }
+ return strings.Join(quoted, ",")
+}
+
+type StringSet map[string]struct{}
+
+func (s StringSet) List() []string {
+ keys := make([]string, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+func listToStringSet(list []string) StringSet {
+ set := make(map[string]struct{})
+ for _, s := range list {
+ set[s] = struct{}{}
+ }
+ return set
+}
+
+func recordMetrics(checkType, name string, status string) {
+ val := 0.0
+ if status == HealthStatusSuccess {
+ val = 1.0
+ }
+ healthCheckGauge.With(prometheus.Labels{
+ "type": checkType,
+ "name": name,
+ }).Set(val)
+ healthCheckCounter.With(prometheus.Labels{
+ "type": checkType,
+ "name": name,
+ "status": status,
+ }).Inc()
+}
+
+// activeAlarmCheck checks if a specific alarm type is active in the server.
+func activeAlarmCheck(srv ServerHealth, at pb.AlarmType) func(context.Context) error {
+ return func(ctx context.Context) error {
+ as := srv.Alarms()
+ for _, v := range as {
+ if v.Alarm == at {
+ return fmt.Errorf("alarm activated: %s", at.String())
+ }
+ }
+ return nil
+ }
+}
+
+func readCheck(srv ServerHealth, serializable bool) func(ctx context.Context) error {
+ return func(ctx context.Context) error {
+ ctx = srv.AuthStore().WithRoot(ctx)
+ _, err := srv.Range(ctx, &pb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable})
+ return err
+ }
+}
diff --git a/server/etcdserver/api/etcdhttp/health_test.go b/server/etcdserver/api/etcdhttp/health_test.go
new file mode 100644
index 00000000000..a5f64061996
--- /dev/null
+++ b/server/etcdserver/api/etcdhttp/health_test.go
@@ -0,0 +1,424 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/raft/v3"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/config"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type fakeHealthServer struct {
+ fakeServer
+ serializableReadError error
+ linearizableReadError error
+ missingLeader bool
+ authStore auth.AuthStore
+}
+
+func (s *fakeHealthServer) Range(_ context.Context, req *pb.RangeRequest) (*pb.RangeResponse, error) {
+ if req.Serializable {
+ return nil, s.serializableReadError
+ }
+ return nil, s.linearizableReadError
+}
+
+func (s *fakeHealthServer) Config() config.ServerConfig {
+ return config.ServerConfig{}
+}
+
+func (s *fakeHealthServer) Leader() types.ID {
+ if !s.missingLeader {
+ return 1
+ }
+ return types.ID(raft.None)
+}
+
+func (s *fakeHealthServer) AuthStore() auth.AuthStore { return s.authStore }
+
+func (s *fakeHealthServer) ClientCertAuthEnabled() bool { return false }
+
+type healthTestCase struct {
+ name string
+ healthCheckURL string
+ expectStatusCode int
+ inResult []string
+ notInResult []string
+
+ alarms []*pb.AlarmMember
+ apiError error
+ missingLeader bool
+}
+
+func TestHealthHandler(t *testing.T) {
+ // define the input and expected output
+ // input: alarms, and healthCheckURL
+ tests := []healthTestCase{
+ {
+ name: "Healthy if no alarm",
+ alarms: []*pb.AlarmMember{},
+ healthCheckURL: "/health",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "Unhealthy if NOSPACE alarm is on",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
+ healthCheckURL: "/health",
+ expectStatusCode: http.StatusServiceUnavailable,
+ },
+ {
+ name: "Healthy if NOSPACE alarm is on and excluded",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
+ healthCheckURL: "/health?exclude=NOSPACE",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "Healthy if NOSPACE alarm is excluded",
+ alarms: []*pb.AlarmMember{},
+ healthCheckURL: "/health?exclude=NOSPACE",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "Healthy if multiple NOSPACE alarms are on and excluded",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(1), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(2), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(3), Alarm: pb.AlarmType_NOSPACE}},
+ healthCheckURL: "/health?exclude=NOSPACE",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "Unhealthy if NOSPACE alarms is excluded and CORRUPT is on",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
+ healthCheckURL: "/health?exclude=NOSPACE",
+ expectStatusCode: http.StatusServiceUnavailable,
+ },
+ {
+ name: "Unhealthy if both NOSPACE and CORRUPT are on and excluded",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
+ healthCheckURL: "/health?exclude=NOSPACE&exclude=CORRUPT",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "Unhealthy if api is not available",
+ healthCheckURL: "/health",
+ apiError: fmt.Errorf("Unexpected error"),
+ expectStatusCode: http.StatusServiceUnavailable,
+ },
+ {
+ name: "Unhealthy if no leader",
+ healthCheckURL: "/health",
+ expectStatusCode: http.StatusServiceUnavailable,
+ missingLeader: true,
+ },
+ {
+ name: "Healthy if no leader and serializable=true",
+ healthCheckURL: "/health?serializable=true",
+ expectStatusCode: http.StatusOK,
+ missingLeader: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mux := http.NewServeMux()
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ HandleHealth(zaptest.NewLogger(t), mux, &fakeHealthServer{
+ fakeServer: fakeServer{alarms: tt.alarms},
+ serializableReadError: tt.apiError,
+ linearizableReadError: tt.apiError,
+ missingLeader: tt.missingLeader,
+ authStore: auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), nil, 0),
+ })
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+ checkHTTPResponse(t, ts, tt.healthCheckURL, tt.expectStatusCode, nil, nil)
+ })
+ }
+}
+
+func TestHTTPSubPath(t *testing.T) {
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ tests := []healthTestCase{
+ {
+ name: "/readyz/data_corruption ok",
+ healthCheckURL: "/readyz/data_corruption",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "/readyz/serializable_read not ok with error",
+ apiError: fmt.Errorf("Unexpected error"),
+ healthCheckURL: "/readyz/serializable_read",
+ expectStatusCode: http.StatusServiceUnavailable,
+ notInResult: []string{"data_corruption"},
+ },
+ {
+ name: "/readyz/non_exist 404",
+ healthCheckURL: "/readyz/non_exist",
+ expectStatusCode: http.StatusNotFound,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mux := http.NewServeMux()
+ logger := zaptest.NewLogger(t)
+ s := &fakeHealthServer{
+ serializableReadError: tt.apiError,
+ authStore: auth.NewAuthStore(logger, schema.NewAuthBackend(logger, be), nil, 0),
+ }
+ HandleHealth(logger, mux, s)
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+ checkHTTPResponse(t, ts, tt.healthCheckURL, tt.expectStatusCode, tt.inResult, tt.notInResult)
+ checkMetrics(t, tt.healthCheckURL, "", tt.expectStatusCode)
+ })
+ }
+}
+
+func TestDataCorruptionCheck(t *testing.T) {
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ tests := []healthTestCase{
+ {
+ name: "Live if CORRUPT alarm is on",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_CORRUPT}},
+ healthCheckURL: "/livez",
+ expectStatusCode: http.StatusOK,
+ notInResult: []string{"data_corruption"},
+ },
+ {
+ name: "Not ready if CORRUPT alarm is on",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_CORRUPT}},
+ healthCheckURL: "/readyz",
+ expectStatusCode: http.StatusServiceUnavailable,
+ inResult: []string{"[-]data_corruption failed: alarm activated: CORRUPT"},
+ },
+ {
+ name: "ready if CORRUPT alarm is not on",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
+ healthCheckURL: "/readyz",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "ready if CORRUPT alarm is excluded",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_CORRUPT}, {MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
+ healthCheckURL: "/readyz?exclude=data_corruption",
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "Not ready if CORRUPT alarm is on",
+ alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_CORRUPT}},
+ healthCheckURL: "/readyz?exclude=non_exist",
+ expectStatusCode: http.StatusServiceUnavailable,
+ inResult: []string{"[-]data_corruption failed: alarm activated: CORRUPT"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mux := http.NewServeMux()
+ logger := zaptest.NewLogger(t)
+ s := &fakeHealthServer{
+ authStore: auth.NewAuthStore(logger, schema.NewAuthBackend(logger, be), nil, 0),
+ }
+ HandleHealth(logger, mux, s)
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+ // OK before alarms are activated.
+ checkHTTPResponse(t, ts, tt.healthCheckURL, http.StatusOK, nil, nil)
+ // Activate the alarms.
+ s.alarms = tt.alarms
+ checkHTTPResponse(t, ts, tt.healthCheckURL, tt.expectStatusCode, tt.inResult, tt.notInResult)
+ })
+ }
+}
+
+func TestSerializableReadCheck(t *testing.T) {
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ tests := []healthTestCase{
+ {
+ name: "Alive normal",
+ healthCheckURL: "/livez?verbose",
+ expectStatusCode: http.StatusOK,
+ inResult: []string{"[+]serializable_read ok"},
+ },
+ {
+ name: "Not alive if range api is not available",
+ healthCheckURL: "/livez",
+ apiError: fmt.Errorf("Unexpected error"),
+ expectStatusCode: http.StatusServiceUnavailable,
+ inResult: []string{"[-]serializable_read failed: Unexpected error"},
+ },
+ {
+ name: "Not ready if range api is not available",
+ healthCheckURL: "/readyz",
+ apiError: fmt.Errorf("Unexpected error"),
+ expectStatusCode: http.StatusServiceUnavailable,
+ inResult: []string{"[-]serializable_read failed: Unexpected error"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mux := http.NewServeMux()
+ logger := zaptest.NewLogger(t)
+ s := &fakeHealthServer{
+ serializableReadError: tt.apiError,
+ authStore: auth.NewAuthStore(logger, schema.NewAuthBackend(logger, be), nil, 0),
+ }
+ HandleHealth(logger, mux, s)
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+ checkHTTPResponse(t, ts, tt.healthCheckURL, tt.expectStatusCode, tt.inResult, tt.notInResult)
+ checkMetrics(t, tt.healthCheckURL, "serializable_read", tt.expectStatusCode)
+ })
+ }
+}
+
+func TestLinearizableReadCheck(t *testing.T) {
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ tests := []healthTestCase{
+ {
+ name: "Alive normal",
+ healthCheckURL: "/livez?verbose",
+ expectStatusCode: http.StatusOK,
+ inResult: []string{"[+]serializable_read ok"},
+ },
+ {
+ name: "Alive if lineariable range api is not available",
+ healthCheckURL: "/livez",
+ apiError: fmt.Errorf("Unexpected error"),
+ expectStatusCode: http.StatusOK,
+ },
+ {
+ name: "Not ready if range api is not available",
+ healthCheckURL: "/readyz",
+ apiError: fmt.Errorf("Unexpected error"),
+ expectStatusCode: http.StatusServiceUnavailable,
+ inResult: []string{"[+]serializable_read ok", "[-]linearizable_read failed: Unexpected error"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mux := http.NewServeMux()
+ logger := zaptest.NewLogger(t)
+ s := &fakeHealthServer{
+ linearizableReadError: tt.apiError,
+ authStore: auth.NewAuthStore(logger, schema.NewAuthBackend(logger, be), nil, 0),
+ }
+ HandleHealth(logger, mux, s)
+ ts := httptest.NewServer(mux)
+ defer ts.Close()
+ checkHTTPResponse(t, ts, tt.healthCheckURL, tt.expectStatusCode, tt.inResult, tt.notInResult)
+ checkMetrics(t, tt.healthCheckURL, "linearizable_read", tt.expectStatusCode)
+ })
+ }
+}
+
+func checkHTTPResponse(t *testing.T, ts *httptest.Server, url string, expectStatusCode int, inResult []string, notInResult []string) {
+ res, err := ts.Client().Do(&http.Request{Method: http.MethodGet, URL: testutil.MustNewURL(t, ts.URL+url)})
+ if err != nil {
+ t.Fatalf("fail serve http request %s %v", url, err)
+ }
+ if res.StatusCode != expectStatusCode {
+ t.Errorf("want statusCode %d but got %d", expectStatusCode, res.StatusCode)
+ }
+ defer res.Body.Close()
+ b, err := io.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("Failed to read response for %s", url)
+ }
+ result := string(b)
+ for _, substr := range inResult {
+ if !strings.Contains(result, substr) {
+ t.Errorf("Could not find substring : %s, in response: %s", substr, result)
+ return
+ }
+ }
+ for _, substr := range notInResult {
+ if strings.Contains(result, substr) {
+ t.Errorf("Do not expect substring : %s, in response: %s", substr, result)
+ return
+ }
+ }
+}
+
+func checkMetrics(t *testing.T, url, checkName string, expectStatusCode int) {
+ defer healthCheckGauge.Reset()
+ defer healthCheckCounter.Reset()
+
+ typeName := strings.TrimPrefix(strings.Split(url, "?")[0], "/")
+ if len(checkName) == 0 {
+ checkName = strings.Split(typeName, "/")[1]
+ typeName = strings.Split(typeName, "/")[0]
+ }
+
+ expectedSuccessCount := 1
+ expectedErrorCount := 0
+ if expectStatusCode != http.StatusOK {
+ expectedSuccessCount = 0
+ expectedErrorCount = 1
+ }
+
+ gather, _ := prometheus.DefaultGatherer.Gather()
+ for _, mf := range gather {
+ name := *mf.Name
+ val := 0
+ switch name {
+ case "etcd_server_healthcheck":
+ val = int(mf.GetMetric()[0].GetGauge().GetValue())
+ case "etcd_server_healthcheck_total":
+ val = int(mf.GetMetric()[0].GetCounter().GetValue())
+ default:
+ continue
+ }
+ labelMap := make(map[string]string)
+ for _, label := range mf.GetMetric()[0].Label {
+ labelMap[label.GetName()] = label.GetValue()
+ }
+ if typeName != labelMap["type"] {
+ continue
+ }
+ if labelMap["name"] != checkName {
+ continue
+ }
+ if statusLabel, found := labelMap["status"]; found && statusLabel == HealthStatusError {
+ if val != expectedErrorCount {
+ t.Fatalf("%s got errorCount %d, wanted %d\n", name, val, expectedErrorCount)
+ }
+ } else {
+ if val != expectedSuccessCount {
+ t.Fatalf("%s got expectedSuccessCount %d, wanted %d\n", name, val, expectedSuccessCount)
+ }
+ }
+ }
+}
diff --git a/server/etcdserver/api/etcdhttp/metrics.go b/server/etcdserver/api/etcdhttp/metrics.go
index b14a13c9c5e..bf7d4a4a445 100644
--- a/server/etcdserver/api/etcdhttp/metrics.go
+++ b/server/etcdserver/api/etcdhttp/metrics.go
@@ -15,190 +15,17 @@
package etcdhttp
import (
- "context"
- "encoding/json"
- "fmt"
"net/http"
- "time"
- "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.uber.org/zap"
)
const (
PathMetrics = "/metrics"
- PathHealth = "/health"
PathProxyMetrics = "/proxy/metrics"
- PathProxyHealth = "/proxy/health"
)
-// HandleMetricsHealth registers metrics and health handlers.
-func HandleMetricsHealth(lg *zap.Logger, mux *http.ServeMux, srv etcdserver.ServerV2) {
+// HandleMetrics registers prometheus handler on '/metrics'.
+func HandleMetrics(mux *http.ServeMux) {
mux.Handle(PathMetrics, promhttp.Handler())
- mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health { return checkV2Health(lg, srv, excludedAlarms) }))
-}
-
-// HandleMetricsHealthForV3 registers metrics and health handlers. it checks health by using v3 range request
-// and its corresponding timeout.
-func HandleMetricsHealthForV3(lg *zap.Logger, mux *http.ServeMux, srv *etcdserver.EtcdServer) {
- mux.Handle(PathMetrics, promhttp.Handler())
- mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health { return checkV3Health(lg, srv, excludedAlarms) }))
-}
-
-// HandlePrometheus registers prometheus handler on '/metrics'.
-func HandlePrometheus(mux *http.ServeMux) {
- mux.Handle(PathMetrics, promhttp.Handler())
-}
-
-// NewHealthHandler handles '/health' requests.
-func NewHealthHandler(lg *zap.Logger, hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- if r.Method != http.MethodGet {
- w.Header().Set("Allow", http.MethodGet)
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- lg.Warn("/health error", zap.Int("status-code", http.StatusMethodNotAllowed))
- return
- }
- excludedAlarms := getExcludedAlarms(r)
- h := hfunc(excludedAlarms)
- defer func() {
- if h.Health == "true" {
- healthSuccess.Inc()
- } else {
- healthFailed.Inc()
- }
- }()
- d, _ := json.Marshal(h)
- if h.Health != "true" {
- http.Error(w, string(d), http.StatusServiceUnavailable)
- lg.Warn("/health error", zap.String("output", string(d)), zap.Int("status-code", http.StatusServiceUnavailable))
- return
- }
- w.WriteHeader(http.StatusOK)
- w.Write(d)
- lg.Debug("/health OK", zap.Int("status-code", http.StatusOK))
- }
-}
-
-var (
- healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "health_success",
- Help: "The total number of successful health checks",
- })
- healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "health_failures",
- Help: "The total number of failed health checks",
- })
-)
-
-func init() {
- prometheus.MustRegister(healthSuccess)
- prometheus.MustRegister(healthFailed)
-}
-
-// Health defines etcd server health status.
-// TODO: remove manual parsing in etcdctl cluster-health
-type Health struct {
- Health string `json:"health"`
- Reason string `json:"reason"`
-}
-
-type AlarmSet map[string]struct{}
-
-func getExcludedAlarms(r *http.Request) (alarms AlarmSet) {
- alarms = make(map[string]struct{}, 2)
- alms, found := r.URL.Query()["exclude"]
- if found {
- for _, alm := range alms {
- if len(alms) == 0 {
- continue
- }
- alarms[alm] = struct{}{}
- }
- }
- return alarms
-}
-
-// TODO: etcdserver.ErrNoLeader in health API
-
-func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet) Health {
- h := Health{}
- h.Health = "true"
- as := srv.Alarms()
- if len(as) > 0 {
- for _, v := range as {
- alarmName := v.Alarm.String()
- if _, found := excludedAlarms[alarmName]; found {
- lg.Debug("/health excluded alarm", zap.String("alarm", alarmName))
- delete(excludedAlarms, alarmName)
- continue
- }
-
- h.Health = "false"
- switch v.Alarm {
- case etcdserverpb.AlarmType_NOSPACE:
- h.Reason = "ALARM NOSPACE"
- case etcdserverpb.AlarmType_CORRUPT:
- h.Reason = "ALARM CORRUPT"
- default:
- h.Reason = "ALARM UNKNOWN"
- }
- lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String()))
- return h
- }
- }
-
- if len(excludedAlarms) > 0 {
- lg.Warn("fail exclude alarms from health check", zap.String("exclude alarms", fmt.Sprintf("%+v", excludedAlarms)))
- }
-
- if uint64(srv.Leader()) == raft.None {
- h.Health = "false"
- h.Reason = "RAFT NO LEADER"
- lg.Warn("serving /health false; no leader")
- return h
- }
- return h
-}
-
-func checkV2Health(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet) (h Health) {
- if h = checkHealth(lg, srv, excludedAlarms); h.Health != "true" {
- return
- }
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- _, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
- cancel()
- if err != nil {
- h.Health = "false"
- h.Reason = fmt.Sprintf("QGET ERROR:%s", err)
- lg.Warn("serving /health false; QGET fails", zap.Error(err))
- return
- }
- lg.Debug("serving /health true")
- return
-}
-
-func checkV3Health(lg *zap.Logger, srv *etcdserver.EtcdServer, excludedAlarms AlarmSet) (h Health) {
- if h = checkHealth(lg, srv, excludedAlarms); h.Health != "true" {
- return
- }
- ctx, cancel := context.WithTimeout(context.Background(), srv.Cfg.ReqTimeout())
- _, err := srv.Range(ctx, &etcdserverpb.RangeRequest{KeysOnly: true, Limit: 1})
- cancel()
- if err != nil {
- h.Health = "false"
- h.Reason = fmt.Sprintf("RANGE ERROR:%s", err)
- lg.Warn("serving /health false; Range fails", zap.Error(err))
- return
- }
- lg.Debug("serving /health true")
- return
}
diff --git a/server/etcdserver/api/etcdhttp/metrics_test.go b/server/etcdserver/api/etcdhttp/metrics_test.go
deleted file mode 100644
index c8839d7ad35..00000000000
--- a/server/etcdserver/api/etcdhttp/metrics_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package etcdhttp
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "testing"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/server/v3/etcdserver"
- stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
- "go.uber.org/zap"
-)
-
-type fakeStats struct{}
-
-func (s *fakeStats) SelfStats() []byte { return nil }
-func (s *fakeStats) LeaderStats() []byte { return nil }
-func (s *fakeStats) StoreStats() []byte { return nil }
-
-type fakeServerV2 struct {
- fakeServer
- stats.Stats
- health string
-}
-
-func (s *fakeServerV2) Leader() types.ID {
- if s.health == "true" {
- return 1
- }
- return types.ID(raft.None)
-}
-func (s *fakeServerV2) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
- if s.health == "true" {
- return etcdserver.Response{}, nil
- }
- return etcdserver.Response{}, fmt.Errorf("fail health check")
-}
-func (s *fakeServerV2) ClientCertAuthEnabled() bool { return false }
-
-func TestHealthHandler(t *testing.T) {
- // define the input and expected output
- // input: alarms, and healthCheckURL
- tests := []struct {
- alarms []*pb.AlarmMember
- healthCheckURL string
- statusCode int
- health string
- }{
- {
- []*pb.AlarmMember{},
- "/health",
- http.StatusOK,
- "true",
- },
- {
- []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
- "/health",
- http.StatusServiceUnavailable,
- "false",
- },
- {
- []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
- "/health?exclude=NOSPACE",
- http.StatusOK,
- "true",
- },
- {
- []*pb.AlarmMember{},
- "/health?exclude=NOSPACE",
- http.StatusOK,
- "true",
- },
- {
- []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
- "/health?exclude=NOSPACE",
- http.StatusServiceUnavailable,
- "false",
- },
- {
- []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
- "/health?exclude=NOSPACE&exclude=CORRUPT",
- http.StatusOK,
- "true",
- },
- }
-
- for i, tt := range tests {
- func() {
- mux := http.NewServeMux()
- HandleMetricsHealth(zap.NewExample(), mux, &fakeServerV2{
- fakeServer: fakeServer{alarms: tt.alarms},
- Stats: &fakeStats{},
- health: tt.health,
- })
- ts := httptest.NewServer(mux)
- defer ts.Close()
-
- res, err := ts.Client().Do(&http.Request{Method: http.MethodGet, URL: testutil.MustNewURL(t, ts.URL+tt.healthCheckURL)})
- if err != nil {
- t.Errorf("fail serve http request %s %v in test case #%d", tt.healthCheckURL, err, i+1)
- }
- if res == nil {
- t.Errorf("got nil http response with http request %s in test case #%d", tt.healthCheckURL, i+1)
- return
- }
- if res.StatusCode != tt.statusCode {
- t.Errorf("want statusCode %d but got %d in test case #%d", tt.statusCode, res.StatusCode, i+1)
- }
- health, err := parseHealthOutput(res.Body)
- if err != nil {
- t.Errorf("fail parse health check output %v", err)
- }
- if health.Health != tt.health {
- t.Errorf("want health %s but got %s", tt.health, health.Health)
- }
- }()
- }
-}
-
-func parseHealthOutput(body io.Reader) (Health, error) {
- obj := Health{}
- d, derr := ioutil.ReadAll(body)
- if derr != nil {
- return obj, derr
- }
- if err := json.Unmarshal(d, &obj); err != nil {
- return obj, err
- }
- return obj, nil
-}
diff --git a/server/etcdserver/api/etcdhttp/peer.go b/server/etcdserver/api/etcdhttp/peer.go
index badc98634b1..de5948d30f5 100644
--- a/server/etcdserver/api/etcdhttp/peer.go
+++ b/server/etcdserver/api/etcdhttp/peer.go
@@ -16,19 +16,21 @@ package etcdhttp
import (
"encoding/json"
+ errorspkg "errors"
"fmt"
"net/http"
"strconv"
"strings"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
"go.etcd.io/etcd/server/v3/lease/leasehttp"
-
- "go.uber.org/zap"
)
const (
@@ -71,7 +73,7 @@ func newPeerHandler(
if hashKVHandler != nil {
mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler)
}
- mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion))
+ mux.HandleFunc(versionPath, versionHandler(s, serveVersion))
return mux
}
@@ -137,15 +139,15 @@ func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
resp, err := h.server.PromoteMember(r.Context(), id)
if err != nil {
- switch err {
- case membership.ErrIDNotFound:
+ switch {
+ case errorspkg.Is(err, membership.ErrIDNotFound):
http.Error(w, err.Error(), http.StatusNotFound)
- case membership.ErrMemberNotLearner:
+ case errorspkg.Is(err, membership.ErrMemberNotLearner):
http.Error(w, err.Error(), http.StatusPreconditionFailed)
- case etcdserver.ErrLearnerNotReady:
+ case errorspkg.Is(err, errors.ErrLearnerNotReady):
http.Error(w, err.Error(), http.StatusPreconditionFailed)
default:
- WriteError(h.lg, w, r, err)
+ writeError(h.lg, w, r, err)
}
h.lg.Warn(
"failed to promote a member",
diff --git a/server/etcdserver/api/etcdhttp/peer_test.go b/server/etcdserver/api/etcdhttp/peer_test.go
index 5a1ea753797..2ef66ad0d77 100644
--- a/server/etcdserver/api/etcdhttp/peer_test.go
+++ b/server/etcdserver/api/etcdhttp/peer_test.go
@@ -18,7 +18,7 @@ import (
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"path"
@@ -26,9 +26,9 @@ import (
"strings"
"testing"
- "go.uber.org/zap"
-
"github.com/coreos/go-semver/semver"
+ "go.uber.org/zap/zaptest"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/pkg/v3/types"
@@ -46,12 +46,12 @@ type fakeCluster struct {
func (c *fakeCluster) ID() types.ID { return types.ID(c.id) }
func (c *fakeCluster) ClientURLs() []string { return c.clientURLs }
func (c *fakeCluster) Members() []*membership.Member {
- var ms membership.MembersByID
+ ms := make(membership.MembersByID, 0, len(c.members))
for _, m := range c.members {
ms = append(ms, m)
}
sort.Sort(ms)
- return []*membership.Member(ms)
+ return ms
}
func (c *fakeCluster) Member(id types.ID) *membership.Member { return c.members[uint64(id)] }
func (c *fakeCluster) Version() *semver.Version { return nil }
@@ -64,16 +64,20 @@ type fakeServer struct {
func (s *fakeServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
return nil, fmt.Errorf("AddMember not implemented in fakeServer")
}
+
func (s *fakeServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
return nil, fmt.Errorf("RemoveMember not implemented in fakeServer")
}
+
func (s *fakeServer) UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) {
return nil, fmt.Errorf("UpdateMember not implemented in fakeServer")
}
+
func (s *fakeServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
return nil, fmt.Errorf("PromoteMember not implemented in fakeServer")
}
func (s *fakeServer) ClusterVersion() *semver.Version { return nil }
+func (s *fakeServer) StorageVersion() *semver.Version { return nil }
func (s *fakeServer) Cluster() api.Cluster { return s.cluster }
func (s *fakeServer) Alarms() []*pb.AlarmMember { return s.alarms }
func (s *fakeServer) LeaderChangedNotify() <-chan struct{} { return nil }
@@ -85,7 +89,7 @@ var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Reque
// TestNewPeerHandlerOnRaftPrefix tests that NewPeerHandler returns a handler that
// handles raft-prefix requests well.
func TestNewPeerHandlerOnRaftPrefix(t *testing.T) {
- ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
+ ph := newPeerHandler(zaptest.NewLogger(t), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
srv := httptest.NewServer(ph)
defer srv.Close()
@@ -98,10 +102,11 @@ func TestNewPeerHandlerOnRaftPrefix(t *testing.T) {
if err != nil {
t.Fatalf("unexpected http.Get error: %v", err)
}
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
- t.Fatalf("unexpected ioutil.ReadAll error: %v", err)
+ t.Fatalf("unexpected io.ReadAll error: %v", err)
}
+ resp.Body.Close()
if w := "test data"; string(body) != w {
t.Errorf("#%d: body = %s, want %s", i, body, w)
}
@@ -170,7 +175,7 @@ func TestServeMembersGet(t *testing.T) {
}
for i, tt := range tests {
- req, err := http.NewRequest("GET", testutil.MustNewURL(t, tt.path).String(), nil)
+ req, err := http.NewRequest(http.MethodGet, testutil.MustNewURL(t, tt.path).String(), nil)
if err != nil {
t.Fatal(err)
}
@@ -233,7 +238,7 @@ func TestServeMemberPromoteFails(t *testing.T) {
// TestNewPeerHandlerOnMembersPromotePrefix verifies the request with members promote prefix is routed correctly
func TestNewPeerHandlerOnMembersPromotePrefix(t *testing.T) {
- ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
+ ph := newPeerHandler(zaptest.NewLogger(t), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
srv := httptest.NewServer(ph)
defer srv.Close()
@@ -259,7 +264,7 @@ func TestNewPeerHandlerOnMembersPromotePrefix(t *testing.T) {
},
}
for i, tt := range tests {
- req, err := http.NewRequest("POST", srv.URL+tt.path, nil)
+ req, err := http.NewRequest(http.MethodPost, srv.URL+tt.path, nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
@@ -267,16 +272,16 @@ func TestNewPeerHandlerOnMembersPromotePrefix(t *testing.T) {
if err != nil {
t.Fatalf("failed to get http response: %v", err)
}
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
- t.Fatalf("unexpected ioutil.ReadAll error: %v", err)
+ t.Fatalf("unexpected io.ReadAll error: %v", err)
}
if resp.StatusCode != tt.wcode {
t.Fatalf("#%d: code = %d, want %d", i, resp.StatusCode, tt.wcode)
}
if tt.checkBody && strings.Contains(string(body), tt.wKeyWords) {
- t.Errorf("#%d: body: %s, want body to contain keywords: %s", i, string(body), tt.wKeyWords)
+ t.Errorf("#%d: body: %s, want body to contain keywords: %s", i, body, tt.wKeyWords)
}
}
}
diff --git a/server/etcdserver/api/v2http/httptypes/errors.go b/server/etcdserver/api/etcdhttp/types/errors.go
similarity index 100%
rename from server/etcdserver/api/v2http/httptypes/errors.go
rename to server/etcdserver/api/etcdhttp/types/errors.go
diff --git a/server/etcdserver/api/v2http/httptypes/errors_test.go b/server/etcdserver/api/etcdhttp/types/errors_test.go
similarity index 100%
rename from server/etcdserver/api/v2http/httptypes/errors_test.go
rename to server/etcdserver/api/etcdhttp/types/errors_test.go
diff --git a/server/etcdserver/api/etcdhttp/utils.go b/server/etcdserver/api/etcdhttp/utils.go
new file mode 100644
index 00000000000..082fa5a9b2d
--- /dev/null
+++ b/server/etcdserver/api/etcdhttp/utils.go
@@ -0,0 +1,99 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ errorspkg "errors"
+ "net/http"
+
+ "go.uber.org/zap"
+
+ httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+)
+
+func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
+ if m == r.Method {
+ return true
+ }
+ w.Header().Set("Allow", m)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return false
+}
+
+// writeError logs and writes the given Error to the ResponseWriter
+// If Error is an etcdErr, it is rendered to the ResponseWriter
+// Otherwise, it is assumed to be a StatusInternalServerError
+func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
+ if err == nil {
+ return
+ }
+ var v2Err *v2error.Error
+ var httpErr *httptypes.HTTPError
+ switch {
+ case errorspkg.As(err, &v2Err):
+ v2Err.WriteTo(w)
+
+ case errorspkg.As(err, &httpErr):
+ if et := httpErr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", httpErr.Error()),
+ zap.Error(et),
+ )
+ }
+ }
+
+ default:
+ switch {
+ case
+ errorspkg.Is(err, errors.ErrTimeoutDueToLeaderFail),
+ errorspkg.Is(err, errors.ErrTimeoutDueToConnectionLost),
+ errorspkg.Is(err, errors.ErrNotEnoughStartedMembers),
+ errorspkg.Is(err, errors.ErrUnhealthy):
+ if lg != nil {
+ lg.Warn(
+ "v2 response error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ )
+ }
+
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unexpected v2 response error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ )
+ }
+ }
+
+ herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
+ if et := herr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ zap.Error(et),
+ )
+ }
+ }
+ }
+}
diff --git a/server/etcdserver/api/etcdhttp/version.go b/server/etcdserver/api/etcdhttp/version.go
new file mode 100644
index 00000000000..8090703a0ed
--- /dev/null
+++ b/server/etcdserver/api/etcdhttp/version.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+)
+
+const (
+ versionPath = "/version"
+)
+
+func HandleVersion(mux *http.ServeMux, server etcdserver.Server) {
+ mux.HandleFunc(versionPath, versionHandler(server, serveVersion))
+}
+
+func versionHandler(server etcdserver.Server, fn func(http.ResponseWriter, *http.Request, string, string)) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ clusterVersion := server.ClusterVersion()
+ storageVersion := server.StorageVersion()
+ clusterVersionStr, storageVersionStr := "not_decided", "unknown"
+ if clusterVersion != nil {
+ clusterVersionStr = clusterVersion.String()
+ }
+ if storageVersion != nil {
+ storageVersionStr = storageVersion.String()
+ }
+ fn(w, r, clusterVersionStr, storageVersionStr)
+ }
+}
+
+func serveVersion(w http.ResponseWriter, r *http.Request, clusterV, storageV string) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+ vs := version.Versions{
+ Server: version.Version,
+ Cluster: clusterV,
+ Storage: storageV,
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ b, err := json.Marshal(&vs)
+ if err != nil {
+ panic(fmt.Sprintf("cannot marshal versions to json (%v)", err))
+ }
+ w.Write(b)
+}
diff --git a/server/etcdserver/api/etcdhttp/version_test.go b/server/etcdserver/api/etcdhttp/version_test.go
index 37a14dd1d5a..3da19bb791d 100644
--- a/server/etcdserver/api/etcdhttp/version_test.go
+++ b/server/etcdserver/api/etcdhttp/version_test.go
@@ -24,18 +24,19 @@ import (
)
func TestServeVersion(t *testing.T) {
- req, err := http.NewRequest("GET", "", nil)
+ req, err := http.NewRequest(http.MethodGet, "", nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
rw := httptest.NewRecorder()
- serveVersion(rw, req, "2.1.0")
+ serveVersion(rw, req, "3.6.0", "3.5.2")
if rw.Code != http.StatusOK {
t.Errorf("code=%d, want %d", rw.Code, http.StatusOK)
}
vs := version.Versions{
Server: version.Version,
- Cluster: "2.1.0",
+ Cluster: "3.6.0",
+ Storage: "3.5.2",
}
w, err := json.Marshal(&vs)
if err != nil {
@@ -53,14 +54,16 @@ func TestServeVersionFails(t *testing.T) {
for _, m := range []string{
"CONNECT", "TRACE", "PUT", "POST", "HEAD",
} {
- req, err := http.NewRequest(m, "", nil)
- if err != nil {
- t.Fatalf("error creating request: %v", err)
- }
- rw := httptest.NewRecorder()
- serveVersion(rw, req, "2.1.0")
- if rw.Code != http.StatusMethodNotAllowed {
- t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed)
- }
+ t.Run(m, func(t *testing.T) {
+ req, err := http.NewRequest(m, "", nil)
+ if err != nil {
+ t.Fatalf("error creating request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ serveVersion(rw, req, "3.6.0", "3.5.2")
+ if rw.Code != http.StatusMethodNotAllowed {
+ t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed)
+ }
+ })
}
}
diff --git a/server/etcdserver/api/membership/cluster.go b/server/etcdserver/api/membership/cluster.go
index 3187d12f7d1..6539b977d23 100644
--- a/server/etcdserver/api/membership/cluster.go
+++ b/server/etcdserver/api/membership/cluster.go
@@ -15,33 +15,30 @@
package membership
import (
- "bytes"
"context"
"crypto/sha1"
"encoding/binary"
"encoding/json"
"fmt"
- "path"
"sort"
"strings"
"sync"
"time"
+ "github.com/coreos/go-semver/semver"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/netutil"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
+ "go.etcd.io/etcd/pkg/v3/notify"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
- "github.com/coreos/go-semver/semver"
- "github.com/prometheus/client_golang/prometheus"
- "go.uber.org/zap"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
)
-const maxLearners = 1
-
// RaftCluster is a list of Members that belong to the same raft cluster
type RaftCluster struct {
lg *zap.Logger
@@ -50,7 +47,7 @@ type RaftCluster struct {
cid types.ID
v2store v2store.Store
- be backend.Backend
+ be MembershipBackend
sync.Mutex // guards the fields below
version *semver.Version
@@ -59,7 +56,9 @@ type RaftCluster struct {
// removed id cannot be reused.
removed map[types.ID]bool
- downgradeInfo *DowngradeInfo
+ downgradeInfo *serverversion.DowngradeInfo
+ maxLearners int
+ versionChanged *notify.Notifier
}
// ConfigChangeContext represents a context for confChange.
@@ -80,8 +79,8 @@ const (
// NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating
// cluster with raft learner member.
-func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) {
- c := NewCluster(lg)
+func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap, opts ...ClusterOption) (*RaftCluster, error) {
+ c := NewCluster(lg, opts...)
for name, urls := range urlsmap {
m := NewMember(name, urls, token, nil)
if _, ok := c.members[m.ID]; ok {
@@ -96,8 +95,8 @@ func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap)
return c, nil
}
-func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCluster {
- c := NewCluster(lg)
+func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member, opts ...ClusterOption) *RaftCluster {
+ c := NewCluster(lg, opts...)
c.cid = id
for _, m := range membs {
c.members[m.ID] = m
@@ -105,15 +104,18 @@ func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCl
return c
}
-func NewCluster(lg *zap.Logger) *RaftCluster {
+func NewCluster(lg *zap.Logger, opts ...ClusterOption) *RaftCluster {
if lg == nil {
lg = zap.NewNop()
}
+ clOpts := newClusterOpts(opts...)
+
return &RaftCluster{
lg: lg,
members: make(map[types.ID]*Member),
removed: make(map[types.ID]bool),
- downgradeInfo: &DowngradeInfo{Enabled: false},
+ downgradeInfo: &serverversion.DowngradeInfo{Enabled: false},
+ maxLearners: clOpts.maxLearners,
}
}
@@ -127,7 +129,7 @@ func (c *RaftCluster) Members() []*Member {
ms = append(ms, m.Clone())
}
sort.Sort(ms)
- return []*Member(ms)
+ return ms
}
func (c *RaftCluster) Member(id types.ID) *Member {
@@ -146,7 +148,7 @@ func (c *RaftCluster) VotingMembers() []*Member {
}
}
sort.Sort(ms)
- return []*Member(ms)
+ return ms
}
// MemberByName returns a Member with the given name if exists.
@@ -212,7 +214,7 @@ func (c *RaftCluster) ClientURLs() []string {
func (c *RaftCluster) String() string {
c.Lock()
defer c.Unlock()
- b := &bytes.Buffer{}
+ b := &strings.Builder{}
fmt.Fprintf(b, "{ClusterID:%s ", c.cid)
var ms []string
for _, m := range c.members {
@@ -240,13 +242,18 @@ func (c *RaftCluster) genID() {
func (c *RaftCluster) SetID(localID, cid types.ID) {
c.localID = localID
c.cid = cid
+ c.buildMembershipMetric()
}
func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st }
-func (c *RaftCluster) SetBackend(be backend.Backend) {
+func (c *RaftCluster) SetBackend(be MembershipBackend) {
c.be = be
- mustCreateBackendBuckets(c.be)
+ c.be.MustCreateBackendBuckets()
+}
+
+func (c *RaftCluster) SetVersionChangedNotifier(n *notify.Notifier) {
+ c.versionChanged = n
}
func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
@@ -254,30 +261,40 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
defer c.Unlock()
if c.be != nil {
- c.version = clusterVersionFromBackend(c.lg, c.be)
- c.members, c.removed = membersFromBackend(c.lg, c.be)
+ c.version = c.be.ClusterVersionFromBackend()
+ c.members, c.removed = c.be.MustReadMembersFromBackend()
} else {
c.version = clusterVersionFromStore(c.lg, c.v2store)
c.members, c.removed = membersFromStore(c.lg, c.v2store)
}
+ c.buildMembershipMetric()
if c.be != nil {
- c.downgradeInfo = downgradeInfoFromBackend(c.lg, c.be)
+ c.downgradeInfo = c.be.DowngradeInfoFromBackend()
}
- d := &DowngradeInfo{Enabled: false}
- if c.downgradeInfo != nil {
- d = &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion}
+ sv := semver.Must(semver.NewVersion(version.Version))
+ if c.downgradeInfo != nil && c.downgradeInfo.Enabled {
+ c.lg.Info(
+ "cluster is downgrading to target version",
+ zap.String("target-cluster-version", c.downgradeInfo.TargetVersion),
+ zap.String("current-server-version", sv.String()),
+ )
}
- mustDetectDowngrade(c.lg, c.version, d)
+ serverversion.MustDetectDowngrade(c.lg, sv, c.version)
onSet(c.lg, c.version)
for _, m := range c.members {
+ if c.localID == m.ID {
+ setIsLearnerMetric(m)
+ }
+
c.lg.Info(
"recovered/added member from store",
zap.String("cluster-id", c.cid.String()),
zap.String("local-member-id", c.localID.String()),
zap.String("recovered-remote-peer-id", m.ID.String()),
zap.Strings("recovered-remote-peer-urls", m.PeerURLs),
+ zap.Bool("recovered-remote-peer-is-learner", m.IsLearner),
)
}
if c.version != nil {
@@ -292,9 +309,9 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
// ensures that it is still valid.
func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
// TODO: this must be switched to backend as well.
- members, removed := membersFromStore(c.lg, c.v2store)
+ membersMap, removedMap := membersFromStore(c.lg, c.v2store)
id := types.ID(cc.NodeID)
- if removed[id] {
+ if removedMap[id] {
return ErrIDRemoved
}
switch cc.Type {
@@ -305,19 +322,21 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
}
if confChangeContext.IsPromote { // promoting a learner member to voting member
- if members[id] == nil {
+ if membersMap[id] == nil {
return ErrIDNotFound
}
- if !members[id].IsLearner {
+ if !membersMap[id].IsLearner {
return ErrMemberNotLearner
}
} else { // adding a new member
- if members[id] != nil {
+ if membersMap[id] != nil {
return ErrIDExists
}
+ var members []*Member
urls := make(map[string]bool)
- for _, m := range members {
+ for _, m := range membersMap {
+ members = append(members, m)
for _, u := range m.PeerURLs {
urls[u] = true
}
@@ -328,29 +347,24 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
}
}
- if confChangeContext.Member.IsLearner { // the new member is a learner
- numLearners := 0
- for _, m := range members {
- if m.IsLearner {
- numLearners++
- }
- }
- if numLearners+1 > maxLearners {
- return ErrTooManyLearners
+ if confChangeContext.Member.RaftAttributes.IsLearner && cc.Type == raftpb.ConfChangeAddLearnerNode { // the new member is a learner
+ scaleUpLearners := true
+ if err := ValidateMaxLearnerConfig(c.maxLearners, members, scaleUpLearners); err != nil {
+ return err
}
}
}
case raftpb.ConfChangeRemoveNode:
- if members[id] == nil {
+ if membersMap[id] == nil {
return ErrIDNotFound
}
case raftpb.ConfChangeUpdateNode:
- if members[id] == nil {
+ if membersMap[id] == nil {
return ErrIDNotFound
}
urls := make(map[string]bool)
- for _, m := range members {
+ for _, m := range membersMap {
if m.ID == id {
continue
}
@@ -383,19 +397,34 @@ func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) {
if c.v2store != nil {
mustSaveMemberToStore(c.lg, c.v2store, m)
}
- if c.be != nil && shouldApplyV3 {
- mustSaveMemberToBackend(c.lg, c.be, m)
+
+ if m.ID == c.localID {
+ setIsLearnerMetric(m)
}
- c.members[m.ID] = m
+ if c.be != nil && shouldApplyV3 {
+ c.be.MustSaveMemberToBackend(m)
- c.lg.Info(
- "added member",
- zap.String("cluster-id", c.cid.String()),
- zap.String("local-member-id", c.localID.String()),
- zap.String("added-peer-id", m.ID.String()),
- zap.Strings("added-peer-peer-urls", m.PeerURLs),
- )
+ c.members[m.ID] = m
+ c.updateMembershipMetric(m.ID, true)
+
+ c.lg.Info(
+ "added member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("added-peer-id", m.ID.String()),
+ zap.Strings("added-peer-peer-urls", m.PeerURLs),
+ zap.Bool("added-peer-is-learner", m.IsLearner),
+ )
+ } else {
+ c.lg.Info(
+ "ignore already added member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("added-peer-id", m.ID.String()),
+ zap.Strings("added-peer-peer-urls", m.PeerURLs),
+ zap.Bool("added-peer-is-learner", m.IsLearner))
+ }
}
// RemoveMember removes a member from the store.
@@ -407,24 +436,33 @@ func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
mustDeleteMemberFromStore(c.lg, c.v2store, id)
}
if c.be != nil && shouldApplyV3 {
- mustDeleteMemberFromBackend(c.be, id)
- }
-
- m, ok := c.members[id]
- delete(c.members, id)
- c.removed[id] = true
-
- if ok {
- c.lg.Info(
- "removed member",
- zap.String("cluster-id", c.cid.String()),
- zap.String("local-member-id", c.localID.String()),
- zap.String("removed-remote-peer-id", id.String()),
- zap.Strings("removed-remote-peer-urls", m.PeerURLs),
- )
+ c.be.MustDeleteMemberFromBackend(id)
+
+ m, ok := c.members[id]
+ delete(c.members, id)
+ c.removed[id] = true
+ c.updateMembershipMetric(id, false)
+
+ if ok {
+ c.lg.Info(
+ "removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ zap.Strings("removed-remote-peer-urls", m.PeerURLs),
+ zap.Bool("removed-remote-peer-is-learner", m.IsLearner),
+ )
+ } else {
+ c.lg.Warn(
+ "skipped removing already removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ }
} else {
- c.lg.Warn(
- "skipped removing already removed member",
+ c.lg.Info(
+ "ignore already removed member",
zap.String("cluster-id", c.cid.String()),
zap.String("local-member-id", c.localID.String()),
zap.String("removed-remote-peer-id", id.String()),
@@ -442,7 +480,7 @@ func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApply
mustUpdateMemberAttrInStore(c.lg, c.v2store, m)
}
if c.be != nil && shouldApplyV3 {
- mustSaveMemberToBackend(c.lg, c.be, m)
+ c.be.MustSaveMemberToBackend(m)
}
return
}
@@ -470,40 +508,66 @@ func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
- c.members[id].RaftAttributes.IsLearner = false
if c.v2store != nil {
- mustUpdateMemberInStore(c.lg, c.v2store, c.members[id])
+ m := *(c.members[id])
+ m.RaftAttributes.IsLearner = false
+ mustUpdateMemberInStore(c.lg, c.v2store, &m)
}
- if c.be != nil && shouldApplyV3 {
- mustSaveMemberToBackend(c.lg, c.be, c.members[id])
+
+ if id == c.localID {
+ isLearner.Set(0)
}
- c.lg.Info(
- "promote member",
- zap.String("cluster-id", c.cid.String()),
- zap.String("local-member-id", c.localID.String()),
- )
+ if c.be != nil && shouldApplyV3 {
+ c.members[id].RaftAttributes.IsLearner = false
+ c.updateMembershipMetric(id, true)
+ c.be.MustSaveMemberToBackend(c.members[id])
+
+ c.lg.Info(
+ "promote member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ } else {
+ c.lg.Info(
+ "ignore already promoted member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ }
}
func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
- c.members[id].RaftAttributes = raftAttr
if c.v2store != nil {
- mustUpdateMemberInStore(c.lg, c.v2store, c.members[id])
+ m := *(c.members[id])
+ m.RaftAttributes = raftAttr
+ mustUpdateMemberInStore(c.lg, c.v2store, &m)
}
if c.be != nil && shouldApplyV3 {
- mustSaveMemberToBackend(c.lg, c.be, c.members[id])
- }
+ c.members[id].RaftAttributes = raftAttr
+ c.be.MustSaveMemberToBackend(c.members[id])
- c.lg.Info(
- "updated member",
- zap.String("cluster-id", c.cid.String()),
- zap.String("local-member-id", c.localID.String()),
- zap.String("updated-remote-peer-id", id.String()),
- zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
- )
+ c.lg.Info(
+ "updated member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
+ zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner),
+ )
+ } else {
+ c.lg.Info(
+ "ignored already updated member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
+ zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner),
+ )
+ }
}
func (c *RaftCluster) Version() *semver.Version {
@@ -536,17 +600,21 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s
}
oldVer := c.version
c.version = ver
- mustDetectDowngrade(c.lg, c.version, c.downgradeInfo)
+ sv := semver.Must(semver.NewVersion(version.Version))
+ serverversion.MustDetectDowngrade(c.lg, sv, c.version)
if c.v2store != nil {
mustSaveClusterVersionToStore(c.lg, c.v2store, ver)
}
if c.be != nil && shouldApplyV3 {
- mustSaveClusterVersionToBackend(c.be, ver)
+ c.be.MustSaveClusterVersionToBackend(ver)
}
if oldVer != nil {
ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0)
}
ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1)
+ if c.versionChanged != nil {
+ c.versionChanged.Notify()
+ }
onSet(c.lg, ver)
}
@@ -675,78 +743,6 @@ func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, m
return members, removed
}
-func membersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) {
- return mustReadMembersFromBackend(lg, be)
-}
-
-func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version {
- e, err := st.Get(path.Join(storePrefix, "version"), false, false)
- if err != nil {
- if isKeyNotFound(err) {
- return nil
- }
- lg.Panic(
- "failed to get cluster version from store",
- zap.String("path", path.Join(storePrefix, "version")),
- zap.Error(err),
- )
- }
- return semver.Must(semver.NewVersion(*e.Node.Value))
-}
-
-// The field is populated since etcd v3.5.
-func clusterVersionFromBackend(lg *zap.Logger, be backend.Backend) *semver.Version {
- ckey := backendClusterVersionKey()
- tx := be.ReadTx()
- tx.RLock()
- defer tx.RUnlock()
- keys, vals := tx.UnsafeRange(clusterBucketName, ckey, nil, 0)
- if len(keys) == 0 {
- return nil
- }
- if len(keys) != 1 {
- lg.Panic(
- "unexpected number of keys when getting cluster version from backend",
- zap.Int("number-of-key", len(keys)),
- )
- }
- return semver.Must(semver.NewVersion(string(vals[0])))
-}
-
-// The field is populated since etcd v3.5.
-func downgradeInfoFromBackend(lg *zap.Logger, be backend.Backend) *DowngradeInfo {
- dkey := backendDowngradeKey()
- tx := be.ReadTx()
- tx.Lock()
- defer tx.Unlock()
- keys, vals := tx.UnsafeRange(clusterBucketName, dkey, nil, 0)
- if len(keys) == 0 {
- return nil
- }
-
- if len(keys) != 1 {
- lg.Panic(
- "unexpected number of keys when getting cluster version from backend",
- zap.Int("number-of-key", len(keys)),
- )
- }
- var d DowngradeInfo
- if err := json.Unmarshal(vals[0], &d); err != nil {
- lg.Panic("failed to unmarshal downgrade information", zap.Error(err))
- }
-
- // verify the downgrade info from backend
- if d.Enabled {
- if _, err := semver.NewVersion(d.TargetVersion); err != nil {
- lg.Panic(
- "unexpected version format of the downgrade target version from backend",
- zap.String("target-version", d.TargetVersion),
- )
- }
- }
- return &d
-}
-
// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs
// with the existing cluster. If the validation succeeds, it assigns the IDs
// from the existing cluster to the local cluster.
@@ -770,32 +766,17 @@ func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *R
}
}
if !ok {
- return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%v)", ems[i].ID, ems[i].PeerURLs, err)
+ return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%w)", ems[i].ID, ems[i].PeerURLs, err)
}
}
local.members = make(map[types.ID]*Member)
for _, m := range lms {
local.members[m.ID] = m
}
+ local.buildMembershipMetric()
return nil
}
-// IsValidVersionChange checks the two scenario when version is valid to change:
-// 1. Downgrade: cluster version is 1 minor version higher than local version,
-// cluster version should change.
-// 2. Cluster start: when not all members version are available, cluster version
-// is set to MinVersion(3.0), when all members are at higher version, cluster version
-// is lower than local version, cluster version should change
-func IsValidVersionChange(cv *semver.Version, lv *semver.Version) bool {
- cv = &semver.Version{Major: cv.Major, Minor: cv.Minor}
- lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
-
- if isValidDowngrade(cv, lv) || (cv.Major == lv.Major && cv.LessThan(*lv)) {
- return true
- }
- return false
-}
-
// IsLocalMemberLearner returns if the local member is raft learner
func (c *RaftCluster) IsLocalMemberLearner() bool {
c.Lock()
@@ -812,40 +793,34 @@ func (c *RaftCluster) IsLocalMemberLearner() bool {
}
// DowngradeInfo returns the downgrade status of the cluster
-func (c *RaftCluster) DowngradeInfo() *DowngradeInfo {
+func (c *RaftCluster) DowngradeInfo() *serverversion.DowngradeInfo {
c.Lock()
defer c.Unlock()
if c.downgradeInfo == nil {
- return &DowngradeInfo{Enabled: false}
+ return &serverversion.DowngradeInfo{Enabled: false}
}
- d := &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion}
+ d := &serverversion.DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion}
return d
}
-func (c *RaftCluster) SetDowngradeInfo(d *DowngradeInfo, shouldApplyV3 ShouldApplyV3) {
+func (c *RaftCluster) SetDowngradeInfo(d *serverversion.DowngradeInfo, shouldApplyV3 ShouldApplyV3) {
c.Lock()
defer c.Unlock()
if c.be != nil && shouldApplyV3 {
- mustSaveDowngradeToBackend(c.lg, c.be, d)
+ c.be.MustSaveDowngradeToBackend(d)
}
c.downgradeInfo = d
-
- if d.Enabled {
- c.lg.Info(
- "The server is ready to downgrade",
- zap.String("target-version", d.TargetVersion),
- zap.String("server-version", version.Version),
- )
- }
}
// IsMemberExist returns if the member with the given id exists in cluster.
func (c *RaftCluster) IsMemberExist(id types.ID) bool {
c.Lock()
- defer c.Unlock()
_, ok := c.members[id]
+ c.Unlock()
+
+ // gofail: var afterIsMemberExist struct{}
return ok
}
@@ -863,19 +838,79 @@ func (c *RaftCluster) VotingMemberIDs() []types.ID {
return ids
}
-// PushMembershipToStorage is overriding storage information about cluster's
-// members, such that they fully reflect internal RaftCluster's storage.
-func (c *RaftCluster) PushMembershipToStorage() {
- if c.be != nil {
- TrimMembershipFromBackend(c.lg, c.be)
- for _, m := range c.members {
- mustSaveMemberToBackend(c.lg, c.be, m)
+// buildMembershipMetric sets the knownPeers metric based on the current
+// members of the cluster.
+func (c *RaftCluster) buildMembershipMetric() {
+ if c.localID == 0 {
+ // We don't know our own id yet.
+ return
+ }
+ for p := range c.members {
+ knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(1)
+ }
+ for p := range c.removed {
+ knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(0)
+ }
+}
+
+// updateMembershipMetric updates the knownPeers metric to indicate that
+// the given peer is now (un)known.
+func (c *RaftCluster) updateMembershipMetric(peer types.ID, known bool) {
+ if c.localID == 0 {
+ // We don't know our own id yet.
+ return
+ }
+ v := float64(0)
+ if known {
+ v = 1
+ }
+ knownPeers.WithLabelValues(c.localID.String(), peer.String()).Set(v)
+}
+
+// ValidateMaxLearnerConfig verifies the existing learner members in the cluster membership and an optional N+1 learner
+// scale up are not more than maxLearners.
+func ValidateMaxLearnerConfig(maxLearners int, members []*Member, scaleUpLearners bool) error {
+ numLearners := 0
+ for _, m := range members {
+ if m.IsLearner {
+ numLearners++
}
}
- if c.v2store != nil {
- TrimMembershipFromV2Store(c.lg, c.v2store)
- for _, m := range c.members {
- mustSaveMemberToStore(c.lg, c.v2store, m)
+ // Validate config can accommodate scale up.
+ if scaleUpLearners {
+ numLearners++
+ }
+
+ if numLearners > maxLearners {
+ return ErrTooManyLearners
+ }
+
+ return nil
+}
+
+func (c *RaftCluster) Store(store v2store.Store) {
+ c.Lock()
+ defer c.Unlock()
+
+ verifyNoMembersInStore(c.lg, store)
+
+ for _, m := range c.members {
+ mustSaveMemberToStore(c.lg, store, m)
+ if m.ClientURLs != nil {
+ mustUpdateMemberAttrInStore(c.lg, store, m)
}
+ c.lg.Debug(
+ "snapshot storing member",
+ zap.String("id", m.ID.String()),
+ zap.Strings("peer-urls", m.PeerURLs),
+ zap.Bool("is-learner", m.IsLearner),
+ )
+ }
+ for id := range c.removed {
+ // We do not need to delete the member since the store is empty.
+ mustAddToRemovedMembersInStore(c.lg, store, id)
+ }
+ if c.version != nil {
+ mustSaveClusterVersionToStore(c.lg, store, c.version)
}
}
diff --git a/server/etcdserver/api/membership/cluster_opts.go b/server/etcdserver/api/membership/cluster_opts.go
new file mode 100644
index 00000000000..204fbf04d2c
--- /dev/null
+++ b/server/etcdserver/api/membership/cluster_opts.go
@@ -0,0 +1,43 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+const DefaultMaxLearners = 1
+
+type ClusterOptions struct {
+ maxLearners int
+}
+
+// ClusterOption are options which can be applied to the raft cluster.
+type ClusterOption func(*ClusterOptions)
+
+func newClusterOpts(opts ...ClusterOption) *ClusterOptions {
+ clOpts := &ClusterOptions{}
+ clOpts.applyOpts(opts)
+ return clOpts
+}
+
+func (co *ClusterOptions) applyOpts(opts []ClusterOption) {
+ for _, opt := range opts {
+ opt(co)
+ }
+}
+
+// WithMaxLearners sets the maximum number of learners that can exist in the cluster membership.
+func WithMaxLearners(max int) ClusterOption {
+ return func(co *ClusterOptions) {
+ co.maxLearners = max
+ }
+}
diff --git a/server/etcdserver/api/membership/cluster_test.go b/server/etcdserver/api/membership/cluster_test.go
index 23d81fec1ca..c07093878c9 100644
--- a/server/etcdserver/api/membership/cluster_test.go
+++ b/server/etcdserver/api/membership/cluster_test.go
@@ -16,21 +16,20 @@ package membership
import (
"encoding/json"
+ "errors"
"fmt"
"path"
"reflect"
"testing"
- "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.etcd.io/etcd/server/v3/mock/mockstore"
-
- "go.uber.org/zap"
+ "go.etcd.io/raft/v3/raftpb"
)
func TestClusterMember(t *testing.T) {
@@ -242,7 +241,7 @@ func TestClusterValidateAndAssignIDsBad(t *testing.T) {
for i, tt := range tests {
ecl := newTestCluster(t, tt.clmembs)
lcl := newTestCluster(t, tt.membs)
- if err := ValidateClusterAndAssignIDs(zap.NewExample(), lcl, ecl); err == nil {
+ if err := ValidateClusterAndAssignIDs(zaptest.NewLogger(t), lcl, ecl); err == nil {
t.Errorf("#%d: unexpected update success", i)
}
}
@@ -269,7 +268,7 @@ func TestClusterValidateAndAssignIDs(t *testing.T) {
for i, tt := range tests {
lcl := newTestCluster(t, tt.clmembs)
ecl := newTestCluster(t, tt.membs)
- if err := ValidateClusterAndAssignIDs(zap.NewExample(), lcl, ecl); err != nil {
+ if err := ValidateClusterAndAssignIDs(zaptest.NewLogger(t), lcl, ecl); err != nil {
t.Errorf("#%d: unexpect update error: %v", i, err)
}
if !reflect.DeepEqual(lcl.MemberIDs(), tt.wids) {
@@ -278,11 +277,17 @@ func TestClusterValidateAndAssignIDs(t *testing.T) {
}
}
-func TestClusterValidateConfigurationChange(t *testing.T) {
- cl := NewCluster(zaptest.NewLogger(t))
+func TestClusterValidateConfigurationChangeV2(t *testing.T) {
+ cl := NewCluster(zaptest.NewLogger(t), WithMaxLearners(1))
+ be := newMembershipBackend()
+ cl.SetBackend(be)
cl.SetStore(v2store.New())
for i := 1; i <= 4; i++ {
- attr := RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", i)}}
+ var isLearner bool
+ if i == 1 {
+ isLearner = true
+ }
+ attr := RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", i)}, IsLearner: isLearner}
cl.AddMember(&Member{ID: types.ID(i), RaftAttributes: attr}, true)
}
cl.RemoveMember(4, true)
@@ -327,6 +332,17 @@ func TestClusterValidateConfigurationChange(t *testing.T) {
t.Fatal(err)
}
+ attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 7)}, IsLearner: true}
+ ctx7, err := json.Marshal(&ConfigChangeContext{Member: Member{ID: types.ID(7), RaftAttributes: attr}})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 1)}, IsLearner: true}
+ ctx8, err := json.Marshal(&ConfigChangeContext{Member: Member{ID: types.ID(1), RaftAttributes: attr}, IsPromote: true})
+ if err != nil {
+ t.Fatal(err)
+ }
tests := []struct {
cc raftpb.ConfChange
werr error
@@ -424,10 +440,26 @@ func TestClusterValidateConfigurationChange(t *testing.T) {
},
ErrIDNotFound,
},
+ {
+ raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddLearnerNode,
+ NodeID: 7,
+ Context: ctx7,
+ },
+ ErrTooManyLearners,
+ },
+ {
+ raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: 1,
+ Context: ctx8,
+ },
+ nil,
+ },
}
for i, tt := range tests {
err := cl.ValidateConfigurationChange(tt.cc)
- if err != tt.werr {
+ if !errors.Is(err, tt.werr) {
t.Errorf("#%d: validateConfigurationChange error = %v, want %v", i, err, tt.werr)
}
}
@@ -439,6 +471,9 @@ func TestClusterGenID(t *testing.T) {
newTestMember(2, nil, "", nil),
})
+ be := newMembershipBackend()
+ cs.SetBackend(be)
+
cs.genID()
if cs.ID() == 0 {
t.Fatalf("cluster.ID = %v, want not 0", cs.ID())
@@ -479,7 +514,7 @@ func TestNodeToMemberBad(t *testing.T) {
}},
}
for i, tt := range tests {
- if _, err := nodeToMember(zap.NewExample(), tt); err == nil {
+ if _, err := nodeToMember(zaptest.NewLogger(t), tt); err == nil {
t.Errorf("#%d: unexpected nil error", i)
}
}
@@ -494,7 +529,7 @@ func TestClusterAddMember(t *testing.T) {
wactions := []testutil.Action{
{
Name: "Create",
- Params: []interface{}{
+ Params: []any{
path.Join(StoreMembersPrefix, "1", "raftAttributes"),
false,
`{"peerURLs":null}`,
@@ -512,15 +547,15 @@ func TestClusterAddMemberAsLearner(t *testing.T) {
st := mockstore.NewRecorder()
c := newTestCluster(t, nil)
c.SetStore(st)
- c.AddMember(newTestMemberAsLearner(1, nil, "node1", nil), true)
+ c.AddMember(newTestMemberAsLearner(1, []string{}, "node1", []string{"http://node1"}), true)
wactions := []testutil.Action{
{
Name: "Create",
- Params: []interface{}{
+ Params: []any{
path.Join(StoreMembersPrefix, "1", "raftAttributes"),
false,
- `{"peerURLs":null,"isLearner":true}`,
+ `{"peerURLs":[],"isLearner":true}`,
false,
v2store.TTLOptionSet{ExpireTime: v2store.Permanent},
},
@@ -558,8 +593,8 @@ func TestClusterRemoveMember(t *testing.T) {
c.RemoveMember(1, true)
wactions := []testutil.Action{
- {Name: "Delete", Params: []interface{}{MemberStoreKey(1), true, true}},
- {Name: "Create", Params: []interface{}{RemovedMemberStoreKey(1), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}}},
+ {Name: "Delete", Params: []any{MemberStoreKey(1), true, true}},
+ {Name: "Create", Params: []any{RemovedMemberStoreKey(1), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}}},
}
if !reflect.DeepEqual(st.Action(), wactions) {
t.Errorf("actions = %v, want %v", st.Action(), wactions)
@@ -608,7 +643,7 @@ func TestNodeToMember(t *testing.T) {
{Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)},
}}
wm := &Member{ID: 0x1234, RaftAttributes: RaftAttributes{}, Attributes: Attributes{Name: "node1"}}
- m, err := nodeToMember(zap.NewExample(), n)
+ m, err := nodeToMember(zaptest.NewLogger(t), n)
if err != nil {
t.Fatalf("unexpected nodeToMember error: %v", err)
}
@@ -948,74 +983,68 @@ func TestIsReadyToPromoteMember(t *testing.T) {
}
}
-func TestIsVersionChangable(t *testing.T) {
- v0 := semver.Must(semver.NewVersion("2.4.0"))
- v1 := semver.Must(semver.NewVersion("3.4.0"))
- v2 := semver.Must(semver.NewVersion("3.5.0"))
- v3 := semver.Must(semver.NewVersion("3.5.1"))
- v4 := semver.Must(semver.NewVersion("3.6.0"))
+func TestClusterStore(t *testing.T) {
+ name := "etcd"
+ clientURLs := []string{"http://127.0.0.1:4001"}
tests := []struct {
- name string
- currentVersion *semver.Version
- localVersion *semver.Version
- expectedResult bool
+ name string
+ mems []*Member
+ removed map[types.ID]bool
}{
{
- name: "When local version is one minor lower than cluster version",
- currentVersion: v2,
- localVersion: v1,
- expectedResult: true,
- },
- {
- name: "When local version is one minor and one patch lower than cluster version",
- currentVersion: v3,
- localVersion: v1,
- expectedResult: true,
- },
- {
- name: "When local version is one minor higher than cluster version",
- currentVersion: v1,
- localVersion: v2,
- expectedResult: true,
- },
- {
- name: "When local version is two minor higher than cluster version",
- currentVersion: v1,
- localVersion: v4,
- expectedResult: true,
- },
- {
- name: "When local version is one major higher than cluster version",
- currentVersion: v0,
- localVersion: v1,
- expectedResult: false,
+ name: "Single member, no removed members",
+ mems: []*Member{
+ newTestMember(1, nil, name, clientURLs),
+ },
+ removed: map[types.ID]bool{},
},
{
- name: "When local version is equal to cluster version",
- currentVersion: v1,
- localVersion: v1,
- expectedResult: false,
+ name: "Multiple members, no removed members",
+ mems: []*Member{
+ newTestMember(1, nil, name, clientURLs),
+ newTestMember(2, nil, name, clientURLs),
+ newTestMember(3, nil, name, clientURLs),
+ },
+ removed: map[types.ID]bool{},
},
{
- name: "When local version is one patch higher than cluster version",
- currentVersion: v2,
- localVersion: v3,
- expectedResult: false,
+ name: "Single member, one removed member",
+ mems: []*Member{
+ newTestMember(1, nil, name, clientURLs),
+ },
+ removed: map[types.ID]bool{types.ID(2): true},
},
{
- name: "When local version is two minor lower than cluster version",
- currentVersion: v4,
- localVersion: v1,
- expectedResult: false,
+ name: "Multiple members, some removed members",
+ mems: []*Member{
+ newTestMember(1, nil, name, clientURLs),
+ newTestMember(2, nil, name, clientURLs),
+ newTestMember(3, nil, name, clientURLs),
+ },
+ removed: map[types.ID]bool{
+ types.ID(4): true,
+ types.ID(5): true,
+ },
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if ret := IsValidVersionChange(tt.currentVersion, tt.localVersion); ret != tt.expectedResult {
- t.Errorf("Expected %v; Got %v", tt.expectedResult, ret)
+ c := newTestCluster(t, tt.mems)
+ c.removed = tt.removed
+
+ st := v2store.New("/0", "/1")
+ c.Store(st)
+
+ // Verify that the members are properly stored
+ mst, rst := membersFromStore(c.lg, st)
+ for _, mem := range tt.mems {
+ assert.Equal(t, mem, mst[mem.ID])
}
+
+ // Verify that removed members are correctly stored
+ assert.Equal(t, tt.removed, rst)
})
}
}
diff --git a/server/etcdserver/api/membership/confstate.go b/server/etcdserver/api/membership/confstate.go
deleted file mode 100644
index 9bfc71b379c..00000000000
--- a/server/etcdserver/api/membership/confstate.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "encoding/json"
- "log"
-
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/mvcc"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- "go.uber.org/zap"
-)
-
-var (
- confStateKey = []byte("confState")
-)
-
-// MustUnsafeSaveConfStateToBackend persists confState using given transaction (tx).
-// confState in backend is persisted since etcd v3.5.
-func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.BatchTx, confState *raftpb.ConfState) {
- confStateBytes, err := json.Marshal(confState)
- if err != nil {
- lg.Panic("Cannot marshal raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err))
- }
-
- tx.UnsafePut(mvcc.MetaBucketName, confStateKey, confStateBytes)
-}
-
-// UnsafeConfStateFromBackend retrieves ConfState from the backend.
-// Returns nil if confState in backend is not persisted (e.g. backend writen by 0 {
- i, err := getUint64(r.Form, "ttl")
- if err != nil {
- return emptyReq, false, v2error.NewRequestError(
- v2error.EcodeTTLNaN,
- `invalid value for "ttl"`,
- )
- }
- ttl = &i
- }
-
- // prevExist is nullable, so leave it null if not specified
- var pe *bool
- if _, ok := r.Form["prevExist"]; ok {
- bv, err := getBool(r.Form, "prevExist")
- if err != nil {
- return emptyReq, false, v2error.NewRequestError(
- v2error.EcodeInvalidField,
- "invalid value for prevExist",
- )
- }
- pe = &bv
- }
-
- // refresh is nullable, so leave it null if not specified
- var refresh *bool
- if _, ok := r.Form["refresh"]; ok {
- bv, err := getBool(r.Form, "refresh")
- if err != nil {
- return emptyReq, false, v2error.NewRequestError(
- v2error.EcodeInvalidField,
- "invalid value for refresh",
- )
- }
- refresh = &bv
- if refresh != nil && *refresh {
- val := r.FormValue("value")
- if _, ok := r.Form["value"]; ok && val != "" {
- return emptyReq, false, v2error.NewRequestError(
- v2error.EcodeRefreshValue,
- `A value was provided on a refresh`,
- )
- }
- if ttl == nil {
- return emptyReq, false, v2error.NewRequestError(
- v2error.EcodeRefreshTTLRequired,
- `No TTL value set`,
- )
- }
- }
- }
-
- rr := etcdserverpb.Request{
- Method: r.Method,
- Path: p,
- Val: r.FormValue("value"),
- Dir: dir,
- PrevValue: pV,
- PrevIndex: pIdx,
- PrevExist: pe,
- Wait: wait,
- Since: wIdx,
- Recursive: rec,
- Sorted: sort,
- Quorum: quorum,
- Stream: stream,
- }
-
- if pe != nil {
- rr.PrevExist = pe
- }
-
- if refresh != nil {
- rr.Refresh = refresh
- }
-
- // Null TTL is equivalent to unset Expiration
- if ttl != nil {
- expr := time.Duration(*ttl) * time.Second
- rr.Expiration = clock.Now().Add(expr).UnixNano()
- }
-
- return rr, noValueOnSuccess, nil
-}
-
-// writeKeyEvent trims the prefix of key path in a single Event under
-// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
-// ResponseWriter, along with the appropriate headers.
-func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error {
- ev := resp.Event
- if ev == nil {
- return errors.New("cannot write empty Event")
- }
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex))
- w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
- w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
-
- if ev.IsCreated() {
- w.WriteHeader(http.StatusCreated)
- }
-
- ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
- if noValueOnSuccess &&
- (ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap ||
- ev.Action == v2store.Create || ev.Action == v2store.Update) {
- ev.Node = nil
- ev.PrevNode = nil
- }
- return json.NewEncoder(w).Encode(ev)
-}
-
-func writeKeyNoAuth(w http.ResponseWriter) {
- e := v2error.NewError(v2error.EcodeUnauthorized, "Insufficient credentials", 0)
- e.WriteTo(w)
-}
-
-// writeKeyError logs and writes the given Error to the ResponseWriter.
-// If Error is not an etcdErr, the error will be converted to an etcd error.
-func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) {
- if err == nil {
- return
- }
- switch e := err.(type) {
- case *v2error.Error:
- e.WriteTo(w)
- default:
- switch err {
- case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
- if lg != nil {
- lg.Warn(
- "v2 response error",
- zap.String("internal-server-error", err.Error()),
- )
- }
- default:
- if lg != nil {
- lg.Warn(
- "unexpected v2 response error",
- zap.String("internal-server-error", err.Error()),
- )
- }
- }
- ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0)
- ee.WriteTo(w)
- }
-}
-
-func handleKeyWatch(ctx context.Context, lg *zap.Logger, w http.ResponseWriter, resp etcdserver.Response, stream bool) {
- wa := resp.Watcher
- defer wa.Remove()
- ech := wa.EventChan()
- var nch <-chan bool
- if x, ok := w.(http.CloseNotifier); ok {
- nch = x.CloseNotify()
- }
-
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex()))
- w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
- w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
- w.WriteHeader(http.StatusOK)
-
- // Ensure headers are flushed early, in case of long polling
- w.(http.Flusher).Flush()
-
- for {
- select {
- case <-nch:
- // Client closed connection. Nothing to do.
- return
- case <-ctx.Done():
- // Timed out. net/http will close the connection for us, so nothing to do.
- return
- case ev, ok := <-ech:
- if !ok {
- // If the channel is closed this may be an indication of
- // that notifications are much more than we are able to
- // send to the client in time. Then we simply end streaming.
- return
- }
- ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
- if err := json.NewEncoder(w).Encode(ev); err != nil {
- // Should never be reached
- lg.Warn("failed to encode event", zap.Error(err))
- return
- }
- if !stream {
- return
- }
- w.(http.Flusher).Flush()
- }
- }
-}
-
-func trimEventPrefix(ev *v2store.Event, prefix string) *v2store.Event {
- if ev == nil {
- return nil
- }
- // Since the *Event may reference one in the store history
- // history, we must copy it before modifying
- e := ev.Clone()
- trimNodeExternPrefix(e.Node, prefix)
- trimNodeExternPrefix(e.PrevNode, prefix)
- return e
-}
-
-func trimNodeExternPrefix(n *v2store.NodeExtern, prefix string) {
- if n == nil {
- return
- }
- n.Key = strings.TrimPrefix(n.Key, prefix)
- for _, nn := range n.Nodes {
- trimNodeExternPrefix(nn, prefix)
- }
-}
-
-func trimErrorPrefix(err error, prefix string) error {
- if e, ok := err.(*v2error.Error); ok {
- e.Cause = strings.TrimPrefix(e.Cause, prefix)
- }
- return err
-}
-
-func unmarshalRequest(lg *zap.Logger, r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
- ctype := r.Header.Get("Content-Type")
- semicolonPosition := strings.Index(ctype, ";")
- if semicolonPosition != -1 {
- ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
- }
- if ctype != "application/json" {
- writeError(lg, w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
- return false
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
- return false
- }
- if err := req.UnmarshalJSON(b); err != nil {
- writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
- return false
- }
- return true
-}
-
-func getID(lg *zap.Logger, p string, w http.ResponseWriter) (types.ID, bool) {
- idStr := trimPrefix(p, membersPrefix)
- if idStr == "" {
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return 0, false
- }
- id, err := types.IDFromString(idStr)
- if err != nil {
- writeError(lg, w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
- return 0, false
- }
- return id, true
-}
-
-// getUint64 extracts a uint64 by the given key from a Form. If the key does
-// not exist in the form, 0 is returned. If the key exists but the value is
-// badly formed, an error is returned. If multiple values are present only the
-// first is considered.
-func getUint64(form url.Values, key string) (i uint64, err error) {
- if vals, ok := form[key]; ok {
- i, err = strconv.ParseUint(vals[0], 10, 64)
- }
- return
-}
-
-// getBool extracts a bool by the given key from a Form. If the key does not
-// exist in the form, false is returned. If the key exists but the value is
-// badly formed, an error is returned. If multiple values are present only the
-// first is considered.
-func getBool(form url.Values, key string) (b bool, err error) {
- if vals, ok := form[key]; ok {
- b, err = strconv.ParseBool(vals[0])
- }
- return
-}
-
-// trimPrefix removes a given prefix and any slash following the prefix
-// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == ""
-func trimPrefix(p, prefix string) (s string) {
- s = strings.TrimPrefix(p, prefix)
- s = strings.TrimPrefix(s, "/")
- return
-}
-
-func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection {
- c := httptypes.MemberCollection(make([]httptypes.Member, len(ms)))
-
- for i, m := range ms {
- c[i] = newMember(m)
- }
-
- return &c
-}
-
-func newMember(m *membership.Member) httptypes.Member {
- tm := httptypes.Member{
- ID: m.ID.String(),
- Name: m.Name,
- PeerURLs: make([]string, len(m.PeerURLs)),
- ClientURLs: make([]string, len(m.ClientURLs)),
- }
-
- copy(tm.PeerURLs, m.PeerURLs)
- copy(tm.ClientURLs, m.ClientURLs)
-
- return tm
-}
diff --git a/server/etcdserver/api/v2http/client_auth.go b/server/etcdserver/api/v2http/client_auth.go
deleted file mode 100644
index 2c6e7744ed7..00000000000
--- a/server/etcdserver/api/v2http/client_auth.go
+++ /dev/null
@@ -1,604 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "encoding/json"
- "net/http"
- "path"
- "strings"
-
- "go.etcd.io/etcd/server/v3/etcdserver/api"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
-
- "go.uber.org/zap"
-)
-
-type authHandler struct {
- lg *zap.Logger
- sec v2auth.Store
- cluster api.Cluster
- clientCertAuthEnabled bool
-}
-
-func hasWriteRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
- if r.Method == "GET" || r.Method == "HEAD" {
- return true
- }
- return hasRootAccess(lg, sec, r, clientCertAuthEnabled)
-}
-
-func userFromBasicAuth(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User {
- username, password, ok := r.BasicAuth()
- if !ok {
- lg.Warn("malformed basic auth encoding")
- return nil
- }
- user, err := sec.GetUser(username)
- if err != nil {
- return nil
- }
-
- ok = sec.CheckPassword(user, password)
- if !ok {
- lg.Warn("incorrect password", zap.String("user-name", username))
- return nil
- }
- return &user
-}
-
-func userFromClientCertificate(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User {
- if r.TLS == nil {
- return nil
- }
-
- for _, chains := range r.TLS.VerifiedChains {
- for _, chain := range chains {
- lg.Debug("found common name", zap.String("common-name", chain.Subject.CommonName))
- user, err := sec.GetUser(chain.Subject.CommonName)
- if err == nil {
- lg.Debug(
- "authenticated a user via common name",
- zap.String("user-name", user.User),
- zap.String("common-name", chain.Subject.CommonName),
- )
- return &user
- }
- }
- }
- return nil
-}
-
-func hasRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
- if sec == nil {
- // No store means no auth available, eg, tests.
- return true
- }
- if !sec.AuthEnabled() {
- return true
- }
-
- var rootUser *v2auth.User
- if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
- rootUser = userFromClientCertificate(lg, sec, r)
- if rootUser == nil {
- return false
- }
- } else {
- rootUser = userFromBasicAuth(lg, sec, r)
- if rootUser == nil {
- return false
- }
- }
-
- for _, role := range rootUser.Roles {
- if role == v2auth.RootRoleName {
- return true
- }
- }
-
- lg.Warn(
- "a user does not have root role for resource",
- zap.String("root-user", rootUser.User),
- zap.String("root-role-name", v2auth.RootRoleName),
- zap.String("resource-path", r.URL.Path),
- )
- return false
-}
-
-func hasKeyPrefixAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
- if sec == nil {
- // No store means no auth available, eg, tests.
- return true
- }
- if !sec.AuthEnabled() {
- return true
- }
-
- var user *v2auth.User
- if r.Header.Get("Authorization") == "" {
- if clientCertAuthEnabled {
- user = userFromClientCertificate(lg, sec, r)
- }
- if user == nil {
- return hasGuestAccess(lg, sec, r, key)
- }
- } else {
- user = userFromBasicAuth(lg, sec, r)
- if user == nil {
- return false
- }
- }
-
- writeAccess := r.Method != "GET" && r.Method != "HEAD"
- for _, roleName := range user.Roles {
- role, err := sec.GetRole(roleName)
- if err != nil {
- continue
- }
- if recursive {
- if role.HasRecursiveAccess(key, writeAccess) {
- return true
- }
- } else if role.HasKeyAccess(key, writeAccess) {
- return true
- }
- }
-
- lg.Warn(
- "invalid access for user on key",
- zap.String("user-name", user.User),
- zap.String("key", key),
- )
- return false
-}
-
-func hasGuestAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string) bool {
- writeAccess := r.Method != "GET" && r.Method != "HEAD"
- role, err := sec.GetRole(v2auth.GuestRoleName)
- if err != nil {
- return false
- }
- if role.HasKeyAccess(key, writeAccess) {
- return true
- }
-
- lg.Warn(
- "invalid access for a guest role on key",
- zap.String("role-name", v2auth.GuestRoleName),
- zap.String("key", key),
- )
- return false
-}
-
-func writeNoAuth(lg *zap.Logger, w http.ResponseWriter, r *http.Request) {
- herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials")
- if err := herr.WriteTo(w); err != nil {
- lg.Debug(
- "failed to write v2 HTTP error",
- zap.String("remote-addr", r.RemoteAddr),
- zap.Error(err),
- )
- }
-}
-
-func handleAuth(mux *http.ServeMux, sh *authHandler) {
- mux.HandleFunc(authPrefix+"/roles", authCapabilityHandler(sh.baseRoles))
- mux.HandleFunc(authPrefix+"/roles/", authCapabilityHandler(sh.handleRoles))
- mux.HandleFunc(authPrefix+"/users", authCapabilityHandler(sh.baseUsers))
- mux.HandleFunc(authPrefix+"/users/", authCapabilityHandler(sh.handleUsers))
- mux.HandleFunc(authPrefix+"/enable", authCapabilityHandler(sh.enableDisable))
-}
-
-func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(sh.lg, w, r)
- return
- }
-
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- roles, err := sh.sec.AllRoles()
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- if roles == nil {
- roles = make([]string, 0)
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
-
- var rolesCollections struct {
- Roles []v2auth.Role `json:"roles"`
- }
- for _, roleName := range roles {
- var role v2auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- rolesCollections.Roles = append(rolesCollections.Roles, role)
- }
- err = json.NewEncoder(w).Encode(rolesCollections)
-
- if err != nil {
- sh.lg.Warn(
- "failed to encode base roles",
- zap.String("url", r.URL.String()),
- zap.Error(err),
- )
- writeError(sh.lg, w, r, err)
- return
- }
-}
-
-func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) {
- subpath := path.Clean(r.URL.Path[len(authPrefix):])
- // Split "/roles/rolename/command".
- // First item is an empty string, second is "roles"
- pieces := strings.Split(subpath, "/")
- if len(pieces) == 2 {
- sh.baseRoles(w, r)
- return
- }
- if len(pieces) != 3 {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
- return
- }
- sh.forRole(w, r, pieces[2])
-}
-
-func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(sh.lg, w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- switch r.Method {
- case "GET":
- data, err := sh.sec.GetRole(role)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- err = json.NewEncoder(w).Encode(data)
- if err != nil {
- sh.lg.Warn(
- "failed to encode a role",
- zap.String("url", r.URL.String()),
- zap.Error(err),
- )
- return
- }
- return
-
- case "PUT":
- var in v2auth.Role
- err := json.NewDecoder(r.Body).Decode(&in)
- if err != nil {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
- return
- }
- if in.Role != role {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
- return
- }
-
- var out v2auth.Role
-
- // create
- if in.Grant.IsEmpty() && in.Revoke.IsEmpty() {
- err = sh.sec.CreateRole(in)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- w.WriteHeader(http.StatusCreated)
- out = in
- } else {
- if !in.Permissions.IsEmpty() {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
- return
- }
- out, err = sh.sec.UpdateRole(in)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- w.WriteHeader(http.StatusOK)
- }
-
- err = json.NewEncoder(w).Encode(out)
- if err != nil {
- sh.lg.Warn(
- "failed to encode a role",
- zap.String("url", r.URL.String()),
- zap.Error(err),
- )
- return
- }
- return
-
- case "DELETE":
- err := sh.sec.DeleteRole(role)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- }
-}
-
-type userWithRoles struct {
- User string `json:"user"`
- Roles []v2auth.Role `json:"roles,omitempty"`
-}
-
-type usersCollections struct {
- Users []userWithRoles `json:"users"`
-}
-
-func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(sh.lg, w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- users, err := sh.sec.AllUsers()
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- if users == nil {
- users = make([]string, 0)
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
-
- ucs := usersCollections{}
- for _, userName := range users {
- var user v2auth.User
- user, err = sh.sec.GetUser(userName)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
-
- uwr := userWithRoles{User: user.User}
- for _, roleName := range user.Roles {
- var role v2auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- continue
- }
- uwr.Roles = append(uwr.Roles, role)
- }
-
- ucs.Users = append(ucs.Users, uwr)
- }
- err = json.NewEncoder(w).Encode(ucs)
-
- if err != nil {
- sh.lg.Warn(
- "failed to encode users",
- zap.String("url", r.URL.String()),
- zap.Error(err),
- )
- writeError(sh.lg, w, r, err)
- return
- }
-}
-
-func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) {
- subpath := path.Clean(r.URL.Path[len(authPrefix):])
- // Split "/users/username".
- // First item is an empty string, second is "users"
- pieces := strings.Split(subpath, "/")
- if len(pieces) == 2 {
- sh.baseUsers(w, r)
- return
- }
- if len(pieces) != 3 {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
- return
- }
- sh.forUser(w, r, pieces[2])
-}
-
-func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(sh.lg, w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- switch r.Method {
- case "GET":
- u, err := sh.sec.GetUser(user)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
-
- uwr := userWithRoles{User: u.User}
- for _, roleName := range u.Roles {
- var role v2auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- uwr.Roles = append(uwr.Roles, role)
- }
- err = json.NewEncoder(w).Encode(uwr)
-
- if err != nil {
- sh.lg.Warn(
- "failed to encode roles",
- zap.String("url", r.URL.String()),
- zap.Error(err),
- )
- return
- }
- return
-
- case "PUT":
- var u v2auth.User
- err := json.NewDecoder(r.Body).Decode(&u)
- if err != nil {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
- return
- }
- if u.User != user {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
- return
- }
-
- var (
- out v2auth.User
- created bool
- )
-
- if len(u.Grant) == 0 && len(u.Revoke) == 0 {
- // create or update
- if len(u.Roles) != 0 {
- out, err = sh.sec.CreateUser(u)
- } else {
- // if user passes in both password and roles, we are unsure about his/her
- // intention.
- out, created, err = sh.sec.CreateOrUpdateUser(u)
- }
-
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- } else {
- // update case
- if len(u.Roles) != 0 {
- writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
- return
- }
- out, err = sh.sec.UpdateUser(u)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- }
-
- if created {
- w.WriteHeader(http.StatusCreated)
- } else {
- w.WriteHeader(http.StatusOK)
- }
-
- out.Password = ""
-
- err = json.NewEncoder(w).Encode(out)
- if err != nil {
- sh.lg.Warn(
- "failed to encode a user",
- zap.String("url", r.URL.String()),
- zap.Error(err),
- )
- return
- }
- return
-
- case "DELETE":
- err := sh.sec.DeleteUser(user)
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- }
-}
-
-type enabled struct {
- Enabled bool `json:"enabled"`
-}
-
-func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasWriteRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(sh.lg, w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
- isEnabled := sh.sec.AuthEnabled()
- switch r.Method {
- case "GET":
- jsonDict := enabled{isEnabled}
- err := json.NewEncoder(w).Encode(jsonDict)
- if err != nil {
- sh.lg.Warn(
- "failed to encode a auth state",
- zap.String("url", r.URL.String()),
- zap.Error(err),
- )
- }
-
- case "PUT":
- err := sh.sec.EnableAuth()
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
-
- case "DELETE":
- err := sh.sec.DisableAuth()
- if err != nil {
- writeError(sh.lg, w, r, err)
- return
- }
- }
-}
diff --git a/server/etcdserver/api/v2http/client_auth_test.go b/server/etcdserver/api/v2http/client_auth_test.go
deleted file mode 100644
index 157003f72a5..00000000000
--- a/server/etcdserver/api/v2http/client_auth_test.go
+++ /dev/null
@@ -1,913 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "encoding/pem"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "path"
- "sort"
- "strings"
- "testing"
-
- "go.etcd.io/etcd/server/v3/etcdserver/api"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth"
-
- "go.uber.org/zap"
-)
-
-const goodPassword = "good"
-
-func mustJSONRequest(t *testing.T, method string, p string, body string) *http.Request {
- req, err := http.NewRequest(method, path.Join(authPrefix, p), strings.NewReader(body))
- if err != nil {
- t.Fatalf("Error making JSON request: %s %s %s\n", method, p, body)
- }
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-type mockAuthStore struct {
- users map[string]*v2auth.User
- roles map[string]*v2auth.Role
- err error
- enabled bool
-}
-
-func (s *mockAuthStore) AllUsers() ([]string, error) {
- var us []string
- for u := range s.users {
- us = append(us, u)
- }
- sort.Strings(us)
- return us, s.err
-}
-func (s *mockAuthStore) GetUser(name string) (v2auth.User, error) {
- u, ok := s.users[name]
- if !ok {
- return v2auth.User{}, s.err
- }
- return *u, s.err
-}
-func (s *mockAuthStore) CreateOrUpdateUser(user v2auth.User) (out v2auth.User, created bool, err error) {
- if s.users == nil {
- out, err = s.CreateUser(user)
- return out, true, err
- }
- out, err = s.UpdateUser(user)
- return out, false, err
-}
-func (s *mockAuthStore) CreateUser(user v2auth.User) (v2auth.User, error) { return user, s.err }
-func (s *mockAuthStore) DeleteUser(name string) error { return s.err }
-func (s *mockAuthStore) UpdateUser(user v2auth.User) (v2auth.User, error) {
- return *s.users[user.User], s.err
-}
-func (s *mockAuthStore) AllRoles() ([]string, error) {
- return []string{"awesome", "guest", "root"}, s.err
-}
-func (s *mockAuthStore) GetRole(name string) (v2auth.Role, error) {
- r, ok := s.roles[name]
- if ok {
- return *r, s.err
- }
- return v2auth.Role{}, fmt.Errorf("%q does not exist (%v)", name, s.err)
-}
-func (s *mockAuthStore) CreateRole(role v2auth.Role) error { return s.err }
-func (s *mockAuthStore) DeleteRole(name string) error { return s.err }
-func (s *mockAuthStore) UpdateRole(role v2auth.Role) (v2auth.Role, error) {
- return *s.roles[role.Role], s.err
-}
-func (s *mockAuthStore) AuthEnabled() bool { return s.enabled }
-func (s *mockAuthStore) EnableAuth() error { return s.err }
-func (s *mockAuthStore) DisableAuth() error { return s.err }
-
-func (s *mockAuthStore) CheckPassword(user v2auth.User, password string) bool {
- return user.Password == password
-}
-
-func (s *mockAuthStore) HashPassword(password string) (string, error) {
- return password, nil
-}
-
-func TestAuthFlow(t *testing.T) {
- api.EnableCapability(api.AuthCapability)
- var testCases = []struct {
- req *http.Request
- store mockAuthStore
-
- wcode int
- wbody string
- }{
- {
- req: mustJSONRequest(t, "PUT", "users/alice", `{{{{{{{`),
- store: mockAuthStore{},
- wcode: http.StatusBadRequest,
- wbody: `{"message":"Invalid JSON in request body."}`,
- },
- {
- req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "password": "goodpassword"}`),
- store: mockAuthStore{enabled: true},
- wcode: http.StatusUnauthorized,
- wbody: `{"message":"Insufficient credentials"}`,
- },
- // Users
- {
- req: mustJSONRequest(t, "GET", "users", ""),
- store: mockAuthStore{
- users: map[string]*v2auth.User{
- "alice": {
- User: "alice",
- Roles: []string{"alicerole", "guest"},
- Password: "wheeee",
- },
- "bob": {
- User: "bob",
- Roles: []string{"guest"},
- Password: "wheeee",
- },
- "root": {
- User: "root",
- Roles: []string{"root"},
- Password: "wheeee",
- },
- },
- roles: map[string]*v2auth.Role{
- "alicerole": {
- Role: "alicerole",
- },
- "guest": {
- Role: "guest",
- },
- "root": {
- Role: "root",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: `{"users":[` +
- `{"user":"alice","roles":[` +
- `{"role":"alicerole","permissions":{"kv":{"read":null,"write":null}}},` +
- `{"role":"guest","permissions":{"kv":{"read":null,"write":null}}}` +
- `]},` +
- `{"user":"bob","roles":[{"role":"guest","permissions":{"kv":{"read":null,"write":null}}}]},` +
- `{"user":"root","roles":[{"role":"root","permissions":{"kv":{"read":null,"write":null}}}]}]}`,
- },
- {
- req: mustJSONRequest(t, "GET", "users/alice", ""),
- store: mockAuthStore{
- users: map[string]*v2auth.User{
- "alice": {
- User: "alice",
- Roles: []string{"alicerole"},
- Password: "wheeee",
- },
- },
- roles: map[string]*v2auth.Role{
- "alicerole": {
- Role: "alicerole",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: `{"user":"alice","roles":[{"role":"alicerole","permissions":{"kv":{"read":null,"write":null}}}]}`,
- },
- {
- req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "password": "goodpassword"}`),
- store: mockAuthStore{},
- wcode: http.StatusCreated,
- wbody: `{"user":"alice","roles":null}`,
- },
- {
- req: mustJSONRequest(t, "DELETE", "users/alice", ``),
- store: mockAuthStore{},
- wcode: http.StatusOK,
- wbody: ``,
- },
- {
- req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "password": "goodpassword"}`),
- store: mockAuthStore{
- users: map[string]*v2auth.User{
- "alice": {
- User: "alice",
- Roles: []string{"alicerole", "guest"},
- Password: "wheeee",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: `{"user":"alice","roles":["alicerole","guest"]}`,
- },
- {
- req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "grant": ["alicerole"]}`),
- store: mockAuthStore{
- users: map[string]*v2auth.User{
- "alice": {
- User: "alice",
- Roles: []string{"alicerole", "guest"},
- Password: "wheeee",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: `{"user":"alice","roles":["alicerole","guest"]}`,
- },
- {
- req: mustJSONRequest(t, "GET", "users/alice", ``),
- store: mockAuthStore{
- users: map[string]*v2auth.User{},
- err: v2auth.Error{Status: http.StatusNotFound, Errmsg: "auth: User alice doesn't exist."},
- },
- wcode: http.StatusNotFound,
- wbody: `{"message":"auth: User alice doesn't exist."}`,
- },
- {
- req: mustJSONRequest(t, "GET", "roles/manager", ""),
- store: mockAuthStore{
- roles: map[string]*v2auth.Role{
- "manager": {
- Role: "manager",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: `{"role":"manager","permissions":{"kv":{"read":null,"write":null}}}`,
- },
- {
- req: mustJSONRequest(t, "DELETE", "roles/manager", ``),
- store: mockAuthStore{},
- wcode: http.StatusOK,
- wbody: ``,
- },
- {
- req: mustJSONRequest(t, "PUT", "roles/manager", `{"role":"manager","permissions":{"kv":{"read":[],"write":[]}}}`),
- store: mockAuthStore{},
- wcode: http.StatusCreated,
- wbody: `{"role":"manager","permissions":{"kv":{"read":[],"write":[]}}}`,
- },
- {
- req: mustJSONRequest(t, "PUT", "roles/manager", `{"role":"manager","revoke":{"kv":{"read":["foo"],"write":[]}}}`),
- store: mockAuthStore{
- roles: map[string]*v2auth.Role{
- "manager": {
- Role: "manager",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: `{"role":"manager","permissions":{"kv":{"read":null,"write":null}}}`,
- },
- {
- req: mustJSONRequest(t, "GET", "roles", ""),
- store: mockAuthStore{
- roles: map[string]*v2auth.Role{
- "awesome": {
- Role: "awesome",
- },
- "guest": {
- Role: "guest",
- },
- "root": {
- Role: "root",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: `{"roles":[{"role":"awesome","permissions":{"kv":{"read":null,"write":null}}},` +
- `{"role":"guest","permissions":{"kv":{"read":null,"write":null}}},` +
- `{"role":"root","permissions":{"kv":{"read":null,"write":null}}}]}`,
- },
- {
- req: mustJSONRequest(t, "GET", "enable", ""),
- store: mockAuthStore{
- enabled: true,
- },
- wcode: http.StatusOK,
- wbody: `{"enabled":true}`,
- },
- {
- req: mustJSONRequest(t, "PUT", "enable", ""),
- store: mockAuthStore{
- enabled: false,
- },
- wcode: http.StatusOK,
- wbody: ``,
- },
- {
- req: (func() *http.Request {
- req := mustJSONRequest(t, "DELETE", "enable", "")
- req.SetBasicAuth("root", "good")
- return req
- })(),
- store: mockAuthStore{
- enabled: true,
- users: map[string]*v2auth.User{
- "root": {
- User: "root",
- Password: goodPassword,
- Roles: []string{"root"},
- },
- },
- roles: map[string]*v2auth.Role{
- "root": {
- Role: "root",
- },
- },
- },
- wcode: http.StatusOK,
- wbody: ``,
- },
- {
- req: (func() *http.Request {
- req := mustJSONRequest(t, "DELETE", "enable", "")
- req.SetBasicAuth("root", "bad")
- return req
- })(),
- store: mockAuthStore{
- enabled: true,
- users: map[string]*v2auth.User{
- "root": {
- User: "root",
- Password: goodPassword,
- Roles: []string{"root"},
- },
- },
- roles: map[string]*v2auth.Role{
- "root": {
- Role: "guest",
- },
- },
- },
- wcode: http.StatusUnauthorized,
- wbody: `{"message":"Insufficient credentials"}`,
- },
- }
-
- for i, tt := range testCases {
- mux := http.NewServeMux()
- h := &authHandler{
- lg: zap.NewExample(),
- sec: &tt.store,
- cluster: &fakeCluster{id: 1},
- }
- handleAuth(mux, h)
- rw := httptest.NewRecorder()
- mux.ServeHTTP(rw, tt.req)
- if rw.Code != tt.wcode {
- t.Errorf("#%d: got code=%d, want %d", i, rw.Code, tt.wcode)
- }
- g := rw.Body.String()
- g = strings.TrimSpace(g)
- if g != tt.wbody {
- t.Errorf("#%d: got body=%s, want %s", i, g, tt.wbody)
- }
- }
-}
-
-func TestGetUserGrantedWithNonexistingRole(t *testing.T) {
- sh := &authHandler{
- sec: &mockAuthStore{
- users: map[string]*v2auth.User{
- "root": {
- User: "root",
- Roles: []string{"root", "foo"},
- },
- },
- roles: map[string]*v2auth.Role{
- "root": {
- Role: "root",
- },
- },
- },
- cluster: &fakeCluster{id: 1},
- }
- srv := httptest.NewServer(http.HandlerFunc(sh.baseUsers))
- defer srv.Close()
-
- req, err := http.NewRequest("GET", "", nil)
- if err != nil {
- t.Fatal(err)
- }
- req.URL, err = url.Parse(srv.URL)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Content-Type", "application/json")
-
- cli := http.DefaultClient
- resp, err := cli.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
-
- var uc usersCollections
- if err := json.NewDecoder(resp.Body).Decode(&uc); err != nil {
- t.Fatal(err)
- }
- if len(uc.Users) != 1 {
- t.Fatalf("expected 1 user, got %+v", uc.Users)
- }
- if uc.Users[0].User != "root" {
- t.Fatalf("expected 'root', got %q", uc.Users[0].User)
- }
- if len(uc.Users[0].Roles) != 1 {
- t.Fatalf("expected 1 role, got %+v", uc.Users[0].Roles)
- }
- if uc.Users[0].Roles[0].Role != "root" {
- t.Fatalf("expected 'root', got %q", uc.Users[0].Roles[0].Role)
- }
-}
-
-func mustAuthRequest(username, password string) *http.Request {
- req, err := http.NewRequest(http.MethodGet, "path", strings.NewReader(""))
- if err != nil {
- panic("Cannot make auth request: " + err.Error())
- }
- req.SetBasicAuth(username, password)
- return req
-}
-
-func unauthedRequest() *http.Request {
- req, err := http.NewRequest(http.MethodGet, "path", strings.NewReader(""))
- if err != nil {
- panic("Cannot make request: " + err.Error())
- }
- return req
-}
-
-func tlsAuthedRequest(req *http.Request, certname string) *http.Request {
- bytes, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s.pem", certname))
- if err != nil {
- panic(err)
- }
-
- block, _ := pem.Decode(bytes)
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- panic(err)
- }
-
- req.TLS = &tls.ConnectionState{
- VerifiedChains: [][]*x509.Certificate{{cert}},
- }
- return req
-}
-
-func TestPrefixAccess(t *testing.T) {
- var table = []struct {
- key string
- req *http.Request
- store *mockAuthStore
- hasRoot bool
- hasKeyPrefixAccess bool
- hasRecursiveAccess bool
- }{
- {
- key: "/foo",
- req: mustAuthRequest("root", "good"),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{
- "root": {
- User: "root",
- Password: goodPassword,
- Roles: []string{"root"},
- },
- },
- roles: map[string]*v2auth.Role{
- "root": {
- Role: "root",
- },
- },
- enabled: true,
- },
- hasRoot: true,
- hasKeyPrefixAccess: true,
- hasRecursiveAccess: true,
- },
- {
- key: "/foo",
- req: mustAuthRequest("user", "good"),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Password: goodPassword,
- Roles: []string{"foorole"},
- },
- },
- roles: map[string]*v2auth.Role{
- "foorole": {
- Role: "foorole",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo"},
- Write: []string{"/foo"},
- },
- },
- },
- },
- enabled: true,
- },
- hasRoot: false,
- hasKeyPrefixAccess: true,
- hasRecursiveAccess: false,
- },
- {
- key: "/foo",
- req: mustAuthRequest("user", "good"),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Password: goodPassword,
- Roles: []string{"foorole"},
- },
- },
- roles: map[string]*v2auth.Role{
- "foorole": {
- Role: "foorole",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo*"},
- Write: []string{"/foo*"},
- },
- },
- },
- },
- enabled: true,
- },
- hasRoot: false,
- hasKeyPrefixAccess: true,
- hasRecursiveAccess: true,
- },
- {
- key: "/foo",
- req: mustAuthRequest("user", "bad"),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Password: goodPassword,
- Roles: []string{"foorole"},
- },
- },
- roles: map[string]*v2auth.Role{
- "foorole": {
- Role: "foorole",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo*"},
- Write: []string{"/foo*"},
- },
- },
- },
- },
- enabled: true,
- },
- hasRoot: false,
- hasKeyPrefixAccess: false,
- hasRecursiveAccess: false,
- },
- {
- key: "/foo",
- req: mustAuthRequest("user", "good"),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{},
- err: errors.New("Not the user"),
- enabled: true,
- },
- hasRoot: false,
- hasKeyPrefixAccess: false,
- hasRecursiveAccess: false,
- },
- {
- key: "/foo",
- req: mustJSONRequest(t, "GET", "somepath", ""),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Password: goodPassword,
- Roles: []string{"foorole"},
- },
- },
- roles: map[string]*v2auth.Role{
- "guest": {
- Role: "guest",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo*"},
- Write: []string{"/foo*"},
- },
- },
- },
- },
- enabled: true,
- },
- hasRoot: false,
- hasKeyPrefixAccess: true,
- hasRecursiveAccess: true,
- },
- {
- key: "/bar",
- req: mustJSONRequest(t, "GET", "somepath", ""),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Password: goodPassword,
- Roles: []string{"foorole"},
- },
- },
- roles: map[string]*v2auth.Role{
- "guest": {
- Role: "guest",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo*"},
- Write: []string{"/foo*"},
- },
- },
- },
- },
- enabled: true,
- },
- hasRoot: false,
- hasKeyPrefixAccess: false,
- hasRecursiveAccess: false,
- },
- // check access for multiple roles
- {
- key: "/foo",
- req: mustAuthRequest("user", "good"),
- store: &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Password: goodPassword,
- Roles: []string{"role1", "role2"},
- },
- },
- roles: map[string]*v2auth.Role{
- "role1": {
- Role: "role1",
- },
- "role2": {
- Role: "role2",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo"},
- Write: []string{"/foo"},
- },
- },
- },
- },
- enabled: true,
- },
- hasRoot: false,
- hasKeyPrefixAccess: true,
- hasRecursiveAccess: false,
- },
- {
- key: "/foo",
- req: (func() *http.Request {
- req := mustJSONRequest(t, "GET", "somepath", "")
- req.Header.Set("Authorization", "malformedencoding")
- return req
- })(),
- store: &mockAuthStore{
- enabled: true,
- users: map[string]*v2auth.User{
- "root": {
- User: "root",
- Password: goodPassword,
- Roles: []string{"root"},
- },
- },
- roles: map[string]*v2auth.Role{
- "guest": {
- Role: "guest",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo*"},
- Write: []string{"/foo*"},
- },
- },
- },
- },
- },
- hasRoot: false,
- hasKeyPrefixAccess: false,
- hasRecursiveAccess: false,
- },
- { // guest access in non-TLS mode
- key: "/foo",
- req: (func() *http.Request {
- return mustJSONRequest(t, "GET", "somepath", "")
- })(),
- store: &mockAuthStore{
- enabled: true,
- users: map[string]*v2auth.User{
- "root": {
- User: "root",
- Password: goodPassword,
- Roles: []string{"root"},
- },
- },
- roles: map[string]*v2auth.Role{
- "guest": {
- Role: "guest",
- Permissions: v2auth.Permissions{
- KV: v2auth.RWPermission{
- Read: []string{"/foo*"},
- Write: []string{"/foo*"},
- },
- },
- },
- },
- },
- hasRoot: false,
- hasKeyPrefixAccess: true,
- hasRecursiveAccess: true,
- },
- }
-
- for i, tt := range table {
- if tt.hasRoot != hasRootAccess(zap.NewExample(), tt.store, tt.req, true) {
- t.Errorf("#%d: hasRoot doesn't match (expected %v)", i, tt.hasRoot)
- }
- if tt.hasKeyPrefixAccess != hasKeyPrefixAccess(zap.NewExample(), tt.store, tt.req, tt.key, false, true) {
- t.Errorf("#%d: hasKeyPrefixAccess doesn't match (expected %v)", i, tt.hasRoot)
- }
- if tt.hasRecursiveAccess != hasKeyPrefixAccess(zap.NewExample(), tt.store, tt.req, tt.key, true, true) {
- t.Errorf("#%d: hasRecursiveAccess doesn't match (expected %v)", i, tt.hasRoot)
- }
- }
-}
-
-func TestUserFromClientCertificate(t *testing.T) {
- witherror := &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Roles: []string{"root"},
- Password: "password",
- },
- "basicauth": {
- User: "basicauth",
- Roles: []string{"root"},
- Password: "password",
- },
- },
- roles: map[string]*v2auth.Role{
- "root": {
- Role: "root",
- },
- },
- err: errors.New(""),
- }
-
- noerror := &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Roles: []string{"root"},
- Password: "password",
- },
- "basicauth": {
- User: "basicauth",
- Roles: []string{"root"},
- Password: "password",
- },
- },
- roles: map[string]*v2auth.Role{
- "root": {
- Role: "root",
- },
- },
- }
-
- var table = []struct {
- req *http.Request
- userExists bool
- store v2auth.Store
- username string
- }{
- {
- // non tls request
- req: unauthedRequest(),
- userExists: false,
- store: witherror,
- },
- {
- // cert with cn of existing user
- req: tlsAuthedRequest(unauthedRequest(), "user"),
- userExists: true,
- username: "user",
- store: noerror,
- },
- {
- // cert with cn of non-existing user
- req: tlsAuthedRequest(unauthedRequest(), "otheruser"),
- userExists: false,
- store: witherror,
- },
- }
-
- for i, tt := range table {
- user := userFromClientCertificate(zap.NewExample(), tt.store, tt.req)
- userExists := user != nil
-
- if tt.userExists != userExists {
- t.Errorf("#%d: userFromClientCertificate doesn't match (expected %v)", i, tt.userExists)
- }
- if user != nil && (tt.username != user.User) {
- t.Errorf("#%d: userFromClientCertificate username doesn't match (expected %s, got %s)", i, tt.username, user.User)
- }
- }
-}
-
-func TestUserFromBasicAuth(t *testing.T) {
- sec := &mockAuthStore{
- users: map[string]*v2auth.User{
- "user": {
- User: "user",
- Roles: []string{"root"},
- Password: "password",
- },
- },
- roles: map[string]*v2auth.Role{
- "root": {
- Role: "root",
- },
- },
- }
-
- var table = []struct {
- username string
- req *http.Request
- userExists bool
- }{
- {
- // valid user, valid pass
- username: "user",
- req: mustAuthRequest("user", "password"),
- userExists: true,
- },
- {
- // valid user, bad pass
- username: "user",
- req: mustAuthRequest("user", "badpass"),
- userExists: false,
- },
- {
- // valid user, no pass
- username: "user",
- req: mustAuthRequest("user", ""),
- userExists: false,
- },
- {
- // missing user
- username: "missing",
- req: mustAuthRequest("missing", "badpass"),
- userExists: false,
- },
- {
- // no basic auth
- req: unauthedRequest(),
- userExists: false,
- },
- }
-
- for i, tt := range table {
- user := userFromBasicAuth(zap.NewExample(), sec, tt.req)
- userExists := user != nil
-
- if tt.userExists != userExists {
- t.Errorf("#%d: userFromBasicAuth doesn't match (expected %v)", i, tt.userExists)
- }
- if user != nil && (tt.username != user.User) {
- t.Errorf("#%d: userFromBasicAuth username doesn't match (expected %s, got %s)", i, tt.username, user.User)
- }
- }
-}
diff --git a/server/etcdserver/api/v2http/client_test.go b/server/etcdserver/api/v2http/client_test.go
deleted file mode 100644
index 49d5763b1d7..00000000000
--- a/server/etcdserver/api/v2http/client_test.go
+++ /dev/null
@@ -1,2080 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "path"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
-
- "github.com/coreos/go-semver/semver"
- "github.com/jonboulle/clockwork"
- "go.uber.org/zap"
-)
-
-func mustMarshalEvent(t *testing.T, ev *v2store.Event) string {
- b := new(bytes.Buffer)
- if err := json.NewEncoder(b).Encode(ev); err != nil {
- t.Fatalf("error marshalling event %#v: %v", ev, err)
- }
- return b.String()
-}
-
-// mustNewForm takes a set of Values and constructs a PUT *http.Request,
-// with a URL constructed from appending the given path to the standard keysPrefix
-func mustNewForm(t *testing.T, p string, vals url.Values) *http.Request {
- u := testutil.MustNewURL(t, path.Join(keysPrefix, p))
- req, err := http.NewRequest("PUT", u.String(), strings.NewReader(vals.Encode()))
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if err != nil {
- t.Fatalf("error creating new request: %v", err)
- }
- return req
-}
-
-// mustNewPostForm takes a set of Values and constructs a POST *http.Request,
-// with a URL constructed from appending the given path to the standard keysPrefix
-func mustNewPostForm(t *testing.T, p string, vals url.Values) *http.Request {
- u := testutil.MustNewURL(t, path.Join(keysPrefix, p))
- req, err := http.NewRequest("POST", u.String(), strings.NewReader(vals.Encode()))
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if err != nil {
- t.Fatalf("error creating new request: %v", err)
- }
- return req
-}
-
-// mustNewRequest takes a path, appends it to the standard keysPrefix, and constructs
-// a GET *http.Request referencing the resulting URL
-func mustNewRequest(t *testing.T, p string) *http.Request {
- return mustNewMethodRequest(t, "GET", p)
-}
-
-func mustNewMethodRequest(t *testing.T, m, p string) *http.Request {
- return &http.Request{
- Method: m,
- URL: testutil.MustNewURL(t, path.Join(keysPrefix, p)),
- }
-}
-
-type fakeServer struct {
- dummyRaftTimer
- dummyStats
-}
-
-func (s *fakeServer) Leader() types.ID { return types.ID(1) }
-func (s *fakeServer) Alarms() []*etcdserverpb.AlarmMember { return nil }
-func (s *fakeServer) LeaderChangedNotify() <-chan struct{} { return nil }
-func (s *fakeServer) Cluster() api.Cluster { return nil }
-func (s *fakeServer) ClusterVersion() *semver.Version { return nil }
-func (s *fakeServer) RaftHandler() http.Handler { return nil }
-func (s *fakeServer) Do(ctx context.Context, r etcdserverpb.Request) (rr etcdserver.Response, err error) {
- return
-}
-func (s *fakeServer) ClientCertAuthEnabled() bool { return false }
-
-type serverRecorder struct {
- fakeServer
- actions []action
-}
-
-func (s *serverRecorder) Do(_ context.Context, r etcdserverpb.Request) (etcdserver.Response, error) {
- s.actions = append(s.actions, action{name: "Do", params: []interface{}{r}})
- return etcdserver.Response{}, nil
-}
-func (s *serverRecorder) Process(_ context.Context, m raftpb.Message) error {
- s.actions = append(s.actions, action{name: "Process", params: []interface{}{m}})
- return nil
-}
-func (s *serverRecorder) AddMember(_ context.Context, m membership.Member) ([]*membership.Member, error) {
- s.actions = append(s.actions, action{name: "AddMember", params: []interface{}{m}})
- return nil, nil
-}
-func (s *serverRecorder) RemoveMember(_ context.Context, id uint64) ([]*membership.Member, error) {
- s.actions = append(s.actions, action{name: "RemoveMember", params: []interface{}{id}})
- return nil, nil
-}
-
-func (s *serverRecorder) UpdateMember(_ context.Context, m membership.Member) ([]*membership.Member, error) {
- s.actions = append(s.actions, action{name: "UpdateMember", params: []interface{}{m}})
- return nil, nil
-}
-
-func (s *serverRecorder) PromoteMember(_ context.Context, id uint64) ([]*membership.Member, error) {
- s.actions = append(s.actions, action{name: "PromoteMember", params: []interface{}{id}})
- return nil, nil
-}
-
-type action struct {
- name string
- params []interface{}
-}
-
-// flushingRecorder provides a channel to allow users to block until the Recorder is Flushed()
-type flushingRecorder struct {
- *httptest.ResponseRecorder
- ch chan struct{}
-}
-
-func (fr *flushingRecorder) Flush() {
- fr.ResponseRecorder.Flush()
- fr.ch <- struct{}{}
-}
-
-// resServer implements the etcd.Server interface for testing.
-// It returns the given response from any Do calls, and nil error
-type resServer struct {
- fakeServer
- res etcdserver.Response
-}
-
-func (rs *resServer) Do(_ context.Context, _ etcdserverpb.Request) (etcdserver.Response, error) {
- return rs.res, nil
-}
-func (rs *resServer) Process(_ context.Context, _ raftpb.Message) error { return nil }
-func (rs *resServer) AddMember(_ context.Context, _ membership.Member) ([]*membership.Member, error) {
- return nil, nil
-}
-func (rs *resServer) RemoveMember(_ context.Context, _ uint64) ([]*membership.Member, error) {
- return nil, nil
-}
-func (rs *resServer) UpdateMember(_ context.Context, _ membership.Member) ([]*membership.Member, error) {
- return nil, nil
-}
-func (rs *resServer) PromoteMember(_ context.Context, _ uint64) ([]*membership.Member, error) {
- return nil, nil
-}
-
-func boolp(b bool) *bool { return &b }
-
-type dummyRaftTimer struct{}
-
-func (drt dummyRaftTimer) Index() uint64 { return uint64(100) }
-func (drt dummyRaftTimer) Term() uint64 { return uint64(5) }
-
-type dummyWatcher struct {
- echan chan *v2store.Event
- sidx uint64
-}
-
-func (w *dummyWatcher) EventChan() chan *v2store.Event {
- return w.echan
-}
-func (w *dummyWatcher) StartIndex() uint64 { return w.sidx }
-func (w *dummyWatcher) Remove() {}
-
-func TestBadRefreshRequest(t *testing.T) {
- tests := []struct {
- in *http.Request
- wcode int
- }{
- {
- mustNewRequest(t, "foo?refresh=true&value=test"),
- v2error.EcodeRefreshValue,
- },
- {
- mustNewRequest(t, "foo?refresh=true&value=10"),
- v2error.EcodeRefreshValue,
- },
- {
- mustNewRequest(t, "foo?refresh=true"),
- v2error.EcodeRefreshTTLRequired,
- },
- {
- mustNewRequest(t, "foo?refresh=true&ttl="),
- v2error.EcodeRefreshTTLRequired,
- },
- }
- for i, tt := range tests {
- got, _, err := parseKeyRequest(tt.in, clockwork.NewFakeClock())
- if err == nil {
- t.Errorf("#%d: unexpected nil error!", i)
- continue
- }
- ee, ok := err.(*v2error.Error)
- if !ok {
- t.Errorf("#%d: err is not etcd.Error!", i)
- continue
- }
- if ee.ErrorCode != tt.wcode {
- t.Errorf("#%d: code=%d, want %v", i, ee.ErrorCode, tt.wcode)
- t.Logf("cause: %#v", ee.Cause)
- }
- if !reflect.DeepEqual(got, etcdserverpb.Request{}) {
- t.Errorf("#%d: unexpected non-empty Request: %#v", i, got)
- }
- }
-}
-
-func TestBadParseRequest(t *testing.T) {
- tests := []struct {
- in *http.Request
- wcode int
- }{
- {
- // parseForm failure
- &http.Request{
- Body: nil,
- Method: "PUT",
- },
- v2error.EcodeInvalidForm,
- },
- {
- // bad key prefix
- &http.Request{
- URL: testutil.MustNewURL(t, "/badprefix/"),
- },
- v2error.EcodeInvalidForm,
- },
- // bad values for prevIndex, waitIndex, ttl
- {
- mustNewForm(t, "foo", url.Values{"prevIndex": []string{"garbage"}}),
- v2error.EcodeIndexNaN,
- },
- {
- mustNewForm(t, "foo", url.Values{"prevIndex": []string{"1.5"}}),
- v2error.EcodeIndexNaN,
- },
- {
- mustNewForm(t, "foo", url.Values{"prevIndex": []string{"-1"}}),
- v2error.EcodeIndexNaN,
- },
- {
- mustNewForm(t, "foo", url.Values{"waitIndex": []string{"garbage"}}),
- v2error.EcodeIndexNaN,
- },
- {
- mustNewForm(t, "foo", url.Values{"waitIndex": []string{"??"}}),
- v2error.EcodeIndexNaN,
- },
- {
- mustNewForm(t, "foo", url.Values{"ttl": []string{"-1"}}),
- v2error.EcodeTTLNaN,
- },
- // bad values for recursive, sorted, wait, prevExist, dir, stream
- {
- mustNewForm(t, "foo", url.Values{"recursive": []string{"hahaha"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"recursive": []string{"1234"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"recursive": []string{"?"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"sorted": []string{"?"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"sorted": []string{"x"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"wait": []string{"?!"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"wait": []string{"yes"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"prevExist": []string{"yes"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"prevExist": []string{"#2"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"dir": []string{"no"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"dir": []string{"file"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"quorum": []string{"no"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"quorum": []string{"file"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"stream": []string{"zzz"}}),
- v2error.EcodeInvalidField,
- },
- {
- mustNewForm(t, "foo", url.Values{"stream": []string{"something"}}),
- v2error.EcodeInvalidField,
- },
- // prevValue cannot be empty
- {
- mustNewForm(t, "foo", url.Values{"prevValue": []string{""}}),
- v2error.EcodePrevValueRequired,
- },
- // wait is only valid with GET requests
- {
- mustNewMethodRequest(t, "HEAD", "foo?wait=true"),
- v2error.EcodeInvalidField,
- },
- // query values are considered
- {
- mustNewRequest(t, "foo?prevExist=wrong"),
- v2error.EcodeInvalidField,
- },
- {
- mustNewRequest(t, "foo?ttl=wrong"),
- v2error.EcodeTTLNaN,
- },
- // but body takes precedence if both are specified
- {
- mustNewForm(
- t,
- "foo?ttl=12",
- url.Values{"ttl": []string{"garbage"}},
- ),
- v2error.EcodeTTLNaN,
- },
- {
- mustNewForm(
- t,
- "foo?prevExist=false",
- url.Values{"prevExist": []string{"yes"}},
- ),
- v2error.EcodeInvalidField,
- },
- }
- for i, tt := range tests {
- got, _, err := parseKeyRequest(tt.in, clockwork.NewFakeClock())
- if err == nil {
- t.Errorf("#%d: unexpected nil error!", i)
- continue
- }
- ee, ok := err.(*v2error.Error)
- if !ok {
- t.Errorf("#%d: err is not etcd.Error!", i)
- continue
- }
- if ee.ErrorCode != tt.wcode {
- t.Errorf("#%d: code=%d, want %v", i, ee.ErrorCode, tt.wcode)
- t.Logf("cause: %#v", ee.Cause)
- }
- if !reflect.DeepEqual(got, etcdserverpb.Request{}) {
- t.Errorf("#%d: unexpected non-empty Request: %#v", i, got)
- }
- }
-}
-
-func TestGoodParseRequest(t *testing.T) {
- fc := clockwork.NewFakeClock()
- fc.Advance(1111)
- tests := []struct {
- in *http.Request
- w etcdserverpb.Request
- noValue bool
- }{
- {
- // good prefix, all other values default
- mustNewRequest(t, "foo"),
- etcdserverpb.Request{
- Method: "GET",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // value specified
- mustNewForm(
- t,
- "foo",
- url.Values{"value": []string{"some_value"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- Val: "some_value",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // prevIndex specified
- mustNewForm(
- t,
- "foo",
- url.Values{"prevIndex": []string{"98765"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- PrevIndex: 98765,
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // recursive specified
- mustNewForm(
- t,
- "foo",
- url.Values{"recursive": []string{"true"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- Recursive: true,
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // sorted specified
- mustNewForm(
- t,
- "foo",
- url.Values{"sorted": []string{"true"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- Sorted: true,
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // quorum specified
- mustNewForm(
- t,
- "foo",
- url.Values{"quorum": []string{"true"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- Quorum: true,
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // wait specified
- mustNewRequest(t, "foo?wait=true"),
- etcdserverpb.Request{
- Method: "GET",
- Wait: true,
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // empty TTL specified
- mustNewRequest(t, "foo?ttl="),
- etcdserverpb.Request{
- Method: "GET",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- Expiration: 0,
- },
- false,
- },
- {
- // non-empty TTL specified
- mustNewRequest(t, "foo?ttl=5678"),
- etcdserverpb.Request{
- Method: "GET",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- Expiration: fc.Now().Add(5678 * time.Second).UnixNano(),
- },
- false,
- },
- {
- // zero TTL specified
- mustNewRequest(t, "foo?ttl=0"),
- etcdserverpb.Request{
- Method: "GET",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- Expiration: fc.Now().UnixNano(),
- },
- false,
- },
- {
- // dir specified
- mustNewRequest(t, "foo?dir=true"),
- etcdserverpb.Request{
- Method: "GET",
- Dir: true,
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // dir specified negatively
- mustNewRequest(t, "foo?dir=false"),
- etcdserverpb.Request{
- Method: "GET",
- Dir: false,
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // prevExist should be non-null if specified
- mustNewForm(
- t,
- "foo",
- url.Values{"prevExist": []string{"true"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- PrevExist: boolp(true),
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // prevExist should be non-null if specified
- mustNewForm(
- t,
- "foo",
- url.Values{"prevExist": []string{"false"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- PrevExist: boolp(false),
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- // mix various fields
- {
- mustNewForm(
- t,
- "foo",
- url.Values{
- "value": []string{"some value"},
- "prevExist": []string{"true"},
- "prevValue": []string{"previous value"},
- },
- ),
- etcdserverpb.Request{
- Method: "PUT",
- PrevExist: boolp(true),
- PrevValue: "previous value",
- Val: "some value",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- // query parameters should be used if given
- {
- mustNewForm(
- t,
- "foo?prevValue=woof",
- url.Values{},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- PrevValue: "woof",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- // but form values should take precedence over query parameters
- {
- mustNewForm(
- t,
- "foo?prevValue=woof",
- url.Values{
- "prevValue": []string{"miaow"},
- },
- ),
- etcdserverpb.Request{
- Method: "PUT",
- PrevValue: "miaow",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- false,
- },
- {
- // noValueOnSuccess specified
- mustNewForm(
- t,
- "foo",
- url.Values{"noValueOnSuccess": []string{"true"}},
- ),
- etcdserverpb.Request{
- Method: "PUT",
- Path: path.Join(etcdserver.StoreKeysPrefix, "/foo"),
- },
- true,
- },
- }
-
- for i, tt := range tests {
- got, noValueOnSuccess, err := parseKeyRequest(tt.in, fc)
- if err != nil {
- t.Errorf("#%d: err = %v, want %v", i, err, nil)
- }
-
- if noValueOnSuccess != tt.noValue {
- t.Errorf("#%d: noValue=%t, want %t", i, noValueOnSuccess, tt.noValue)
- }
-
- if !reflect.DeepEqual(got, tt.w) {
- t.Errorf("#%d: request=%#v, want %#v", i, got, tt.w)
- }
- }
-}
-
-func TestServeMembers(t *testing.T) {
- memb1 := membership.Member{ID: 12, Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8080"}}}
- memb2 := membership.Member{ID: 13, Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8081"}}}
- cluster := &fakeCluster{
- id: 1,
- members: map[uint64]*membership.Member{1: &memb1, 2: &memb2},
- }
- h := &membersHandler{
- lg: zap.NewExample(),
- server: &serverRecorder{},
- clock: clockwork.NewFakeClock(),
- cluster: cluster,
- }
-
- wmc := string(`{"members":[{"id":"c","name":"","peerURLs":[],"clientURLs":["http://localhost:8080"]},{"id":"d","name":"","peerURLs":[],"clientURLs":["http://localhost:8081"]}]}`)
-
- tests := []struct {
- path string
- wcode int
- wct string
- wbody string
- }{
- {membersPrefix, http.StatusOK, "application/json", wmc + "\n"},
- {membersPrefix + "/", http.StatusOK, "application/json", wmc + "\n"},
- {path.Join(membersPrefix, "100"), http.StatusNotFound, "application/json", `{"message":"Not found"}`},
- {path.Join(membersPrefix, "foobar"), http.StatusNotFound, "application/json", `{"message":"Not found"}`},
- }
-
- for i, tt := range tests {
- req, err := http.NewRequest("GET", testutil.MustNewURL(t, tt.path).String(), nil)
- if err != nil {
- t.Fatal(err)
- }
- rw := httptest.NewRecorder()
- h.ServeHTTP(rw, req)
-
- if rw.Code != tt.wcode {
- t.Errorf("#%d: code=%d, want %d", i, rw.Code, tt.wcode)
- }
- if gct := rw.Header().Get("Content-Type"); gct != tt.wct {
- t.Errorf("#%d: content-type = %s, want %s", i, gct, tt.wct)
- }
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := cluster.ID().String()
- if gcid != wcid {
- t.Errorf("#%d: cid = %s, want %s", i, gcid, wcid)
- }
- if rw.Body.String() != tt.wbody {
- t.Errorf("#%d: body = %q, want %q", i, rw.Body.String(), tt.wbody)
- }
- }
-}
-
-// TODO: consolidate **ALL** fake server implementations and add no leader test case.
-func TestServeLeader(t *testing.T) {
- memb1 := membership.Member{ID: 1, Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8080"}}}
- memb2 := membership.Member{ID: 2, Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8081"}}}
- cluster := &fakeCluster{
- id: 1,
- members: map[uint64]*membership.Member{1: &memb1, 2: &memb2},
- }
- h := &membersHandler{
- lg: zap.NewExample(),
- server: &serverRecorder{},
- clock: clockwork.NewFakeClock(),
- cluster: cluster,
- }
-
- wmc := string(`{"id":"1","name":"","peerURLs":[],"clientURLs":["http://localhost:8080"]}`)
-
- tests := []struct {
- path string
- wcode int
- wct string
- wbody string
- }{
- {membersPrefix + "leader", http.StatusOK, "application/json", wmc + "\n"},
- // TODO: add no leader case
- }
-
- for i, tt := range tests {
- req, err := http.NewRequest("GET", testutil.MustNewURL(t, tt.path).String(), nil)
- if err != nil {
- t.Fatal(err)
- }
- rw := httptest.NewRecorder()
- h.ServeHTTP(rw, req)
-
- if rw.Code != tt.wcode {
- t.Errorf("#%d: code=%d, want %d", i, rw.Code, tt.wcode)
- }
- if gct := rw.Header().Get("Content-Type"); gct != tt.wct {
- t.Errorf("#%d: content-type = %s, want %s", i, gct, tt.wct)
- }
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := cluster.ID().String()
- if gcid != wcid {
- t.Errorf("#%d: cid = %s, want %s", i, gcid, wcid)
- }
- if rw.Body.String() != tt.wbody {
- t.Errorf("#%d: body = %q, want %q", i, rw.Body.String(), tt.wbody)
- }
- }
-}
-
-func TestServeMembersCreate(t *testing.T) {
- u := testutil.MustNewURL(t, membersPrefix)
- b := []byte(`{"peerURLs":["http://127.0.0.1:1"]}`)
- req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Content-Type", "application/json")
- s := &serverRecorder{}
- h := &membersHandler{
- lg: zap.NewExample(),
- server: s,
- clock: clockwork.NewFakeClock(),
- cluster: &fakeCluster{id: 1},
- }
- rw := httptest.NewRecorder()
-
- h.ServeHTTP(rw, req)
-
- wcode := http.StatusCreated
- if rw.Code != wcode {
- t.Errorf("code=%d, want %d", rw.Code, wcode)
- }
-
- wct := "application/json"
- if gct := rw.Header().Get("Content-Type"); gct != wct {
- t.Errorf("content-type = %s, want %s", gct, wct)
- }
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := h.cluster.ID().String()
- if gcid != wcid {
- t.Errorf("cid = %s, want %s", gcid, wcid)
- }
-
- wb := `{"id":"c29b431f04be0bc7","name":"","peerURLs":["http://127.0.0.1:1"],"clientURLs":[]}` + "\n"
- g := rw.Body.String()
- if g != wb {
- t.Errorf("got body=%q, want %q", g, wb)
- }
-
- wm := membership.Member{
- ID: 14022875665250782151,
- RaftAttributes: membership.RaftAttributes{
- PeerURLs: []string{"http://127.0.0.1:1"},
- },
- }
-
- wactions := []action{{name: "AddMember", params: []interface{}{wm}}}
- if !reflect.DeepEqual(s.actions, wactions) {
- t.Errorf("actions = %+v, want %+v", s.actions, wactions)
- }
-}
-
-func TestServeMembersDelete(t *testing.T) {
- req := &http.Request{
- Method: "DELETE",
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "BEEF")),
- }
- s := &serverRecorder{}
- h := &membersHandler{
- lg: zap.NewExample(),
- server: s,
- cluster: &fakeCluster{id: 1},
- }
- rw := httptest.NewRecorder()
-
- h.ServeHTTP(rw, req)
-
- wcode := http.StatusNoContent
- if rw.Code != wcode {
- t.Errorf("code=%d, want %d", rw.Code, wcode)
- }
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := h.cluster.ID().String()
- if gcid != wcid {
- t.Errorf("cid = %s, want %s", gcid, wcid)
- }
- g := rw.Body.String()
- if g != "" {
- t.Errorf("got body=%q, want %q", g, "")
- }
- wactions := []action{{name: "RemoveMember", params: []interface{}{uint64(0xBEEF)}}}
- if !reflect.DeepEqual(s.actions, wactions) {
- t.Errorf("actions = %+v, want %+v", s.actions, wactions)
- }
-}
-
-func TestServeMembersUpdate(t *testing.T) {
- u := testutil.MustNewURL(t, path.Join(membersPrefix, "1"))
- b := []byte(`{"peerURLs":["http://127.0.0.1:1"]}`)
- req, err := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Content-Type", "application/json")
- s := &serverRecorder{}
- h := &membersHandler{
- lg: zap.NewExample(),
- server: s,
- clock: clockwork.NewFakeClock(),
- cluster: &fakeCluster{id: 1},
- }
- rw := httptest.NewRecorder()
-
- h.ServeHTTP(rw, req)
-
- wcode := http.StatusNoContent
- if rw.Code != wcode {
- t.Errorf("code=%d, want %d", rw.Code, wcode)
- }
-
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := h.cluster.ID().String()
- if gcid != wcid {
- t.Errorf("cid = %s, want %s", gcid, wcid)
- }
-
- wm := membership.Member{
- ID: 1,
- RaftAttributes: membership.RaftAttributes{
- PeerURLs: []string{"http://127.0.0.1:1"},
- },
- }
-
- wactions := []action{{name: "UpdateMember", params: []interface{}{wm}}}
- if !reflect.DeepEqual(s.actions, wactions) {
- t.Errorf("actions = %+v, want %+v", s.actions, wactions)
- }
-}
-
-func TestServeMembersFail(t *testing.T) {
- tests := []struct {
- req *http.Request
- server etcdserver.ServerV2
-
- wcode int
- }{
- {
- // bad method
- &http.Request{
- Method: "CONNECT",
- },
- &resServer{},
-
- http.StatusMethodNotAllowed,
- },
- {
- // bad method
- &http.Request{
- Method: "TRACE",
- },
- &resServer{},
-
- http.StatusMethodNotAllowed,
- },
- {
- // parse body error
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "POST",
- Body: ioutil.NopCloser(strings.NewReader("bad json")),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &resServer{},
-
- http.StatusBadRequest,
- },
- {
- // bad content type
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "POST",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/bad"}},
- },
- &errServer{},
-
- http.StatusUnsupportedMediaType,
- },
- {
- // bad url
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "POST",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://a"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{},
-
- http.StatusBadRequest,
- },
- {
- // etcdserver.AddMember error
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "POST",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{
- err: errors.New("Error while adding a member"),
- },
-
- http.StatusInternalServerError,
- },
- {
- // etcdserver.AddMember error
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "POST",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{
- err: membership.ErrIDExists,
- },
-
- http.StatusConflict,
- },
- {
- // etcdserver.AddMember error
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "POST",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{
- err: membership.ErrPeerURLexists,
- },
-
- http.StatusConflict,
- },
- {
- // etcdserver.RemoveMember error with arbitrary server error
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "1")),
- Method: "DELETE",
- },
- &errServer{
- err: errors.New("Error while removing member"),
- },
-
- http.StatusInternalServerError,
- },
- {
- // etcdserver.RemoveMember error with previously removed ID
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "DELETE",
- },
- &errServer{
- err: membership.ErrIDRemoved,
- },
-
- http.StatusGone,
- },
- {
- // etcdserver.RemoveMember error with nonexistent ID
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "DELETE",
- },
- &errServer{
- err: membership.ErrIDNotFound,
- },
-
- http.StatusNotFound,
- },
- {
- // etcdserver.RemoveMember error with badly formed ID
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "bad_id")),
- Method: "DELETE",
- },
- nil,
-
- http.StatusNotFound,
- },
- {
- // etcdserver.RemoveMember with no ID
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "DELETE",
- },
- nil,
-
- http.StatusMethodNotAllowed,
- },
- {
- // parse body error
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "PUT",
- Body: ioutil.NopCloser(strings.NewReader("bad json")),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &resServer{},
-
- http.StatusBadRequest,
- },
- {
- // bad content type
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "PUT",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/bad"}},
- },
- &errServer{},
-
- http.StatusUnsupportedMediaType,
- },
- {
- // bad url
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "PUT",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://a"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{},
-
- http.StatusBadRequest,
- },
- {
- // etcdserver.UpdateMember error
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "PUT",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{
- err: errors.New("blah"),
- },
-
- http.StatusInternalServerError,
- },
- {
- // etcdserver.UpdateMember error
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "PUT",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{
- err: membership.ErrPeerURLexists,
- },
-
- http.StatusConflict,
- },
- {
- // etcdserver.UpdateMember error
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "0")),
- Method: "PUT",
- Body: ioutil.NopCloser(strings.NewReader(`{"PeerURLs": ["http://127.0.0.1:1"]}`)),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- &errServer{
- err: membership.ErrIDNotFound,
- },
-
- http.StatusNotFound,
- },
- {
- // etcdserver.UpdateMember error with badly formed ID
- &http.Request{
- URL: testutil.MustNewURL(t, path.Join(membersPrefix, "bad_id")),
- Method: "PUT",
- },
- nil,
-
- http.StatusNotFound,
- },
- {
- // etcdserver.UpdateMember with no ID
- &http.Request{
- URL: testutil.MustNewURL(t, membersPrefix),
- Method: "PUT",
- },
- nil,
-
- http.StatusMethodNotAllowed,
- },
- }
- for i, tt := range tests {
- h := &membersHandler{
- lg: zap.NewExample(),
- server: tt.server,
- cluster: &fakeCluster{id: 1},
- clock: clockwork.NewFakeClock(),
- }
- rw := httptest.NewRecorder()
- h.ServeHTTP(rw, tt.req)
- if rw.Code != tt.wcode {
- t.Errorf("#%d: code=%d, want %d", i, rw.Code, tt.wcode)
- }
- if rw.Code != http.StatusMethodNotAllowed {
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := h.cluster.ID().String()
- if gcid != wcid {
- t.Errorf("#%d: cid = %s, want %s", i, gcid, wcid)
- }
- }
- }
-}
-
-func TestWriteEvent(t *testing.T) {
- // nil event should not panic
- rec := httptest.NewRecorder()
- writeKeyEvent(rec, etcdserver.Response{}, false)
- h := rec.Header()
- if len(h) > 0 {
- t.Fatalf("unexpected non-empty headers: %#v", h)
- }
- b := rec.Body.String()
- if len(b) > 0 {
- t.Fatalf("unexpected non-empty body: %q", b)
- }
-
- tests := []struct {
- ev *v2store.Event
- noValue bool
- idx string
- // TODO(jonboulle): check body as well as just status code
- code int
- err error
- }{
- // standard case, standard 200 response
- {
- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- PrevNode: &v2store.NodeExtern{},
- },
- false,
- "0",
- http.StatusOK,
- nil,
- },
- // check new nodes return StatusCreated
- {
- &v2store.Event{
- Action: v2store.Create,
- Node: &v2store.NodeExtern{},
- PrevNode: &v2store.NodeExtern{},
- },
- false,
- "0",
- http.StatusCreated,
- nil,
- },
- }
-
- for i, tt := range tests {
- rw := httptest.NewRecorder()
- resp := etcdserver.Response{Event: tt.ev, Term: 5, Index: 100}
- writeKeyEvent(rw, resp, tt.noValue)
- if gct := rw.Header().Get("Content-Type"); gct != "application/json" {
- t.Errorf("case %d: bad Content-Type: got %q, want application/json", i, gct)
- }
- if gri := rw.Header().Get("X-Raft-Index"); gri != "100" {
- t.Errorf("case %d: bad X-Raft-Index header: got %s, want %s", i, gri, "100")
- }
- if grt := rw.Header().Get("X-Raft-Term"); grt != "5" {
- t.Errorf("case %d: bad X-Raft-Term header: got %s, want %s", i, grt, "5")
- }
- if gei := rw.Header().Get("X-Etcd-Index"); gei != tt.idx {
- t.Errorf("case %d: bad X-Etcd-Index header: got %s, want %s", i, gei, tt.idx)
- }
- if rw.Code != tt.code {
- t.Errorf("case %d: bad response code: got %d, want %v", i, rw.Code, tt.code)
- }
-
- }
-}
-
-func TestV2DMachinesEndpoint(t *testing.T) {
- tests := []struct {
- method string
- wcode int
- }{
- {"GET", http.StatusOK},
- {"HEAD", http.StatusOK},
- {"POST", http.StatusMethodNotAllowed},
- }
-
- m := &machinesHandler{cluster: &fakeCluster{}}
- s := httptest.NewServer(m)
- defer s.Close()
-
- for _, tt := range tests {
- req, err := http.NewRequest(tt.method, s.URL+machinesPrefix, nil)
- if err != nil {
- t.Fatal(err)
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatal(err)
- }
-
- if resp.StatusCode != tt.wcode {
- t.Errorf("StatusCode = %d, expected %d", resp.StatusCode, tt.wcode)
- }
- }
-}
-
-func TestServeMachines(t *testing.T) {
- cluster := &fakeCluster{
- clientURLs: []string{"http://localhost:8080", "http://localhost:8081", "http://localhost:8082"},
- }
- writer := httptest.NewRecorder()
- req, err := http.NewRequest("GET", "", nil)
- if err != nil {
- t.Fatal(err)
- }
- h := &machinesHandler{cluster: cluster}
- h.ServeHTTP(writer, req)
- w := "http://localhost:8080, http://localhost:8081, http://localhost:8082"
- if g := writer.Body.String(); g != w {
- t.Errorf("body = %s, want %s", g, w)
- }
- if writer.Code != http.StatusOK {
- t.Errorf("code = %d, want %d", writer.Code, http.StatusOK)
- }
-}
-
-func TestGetID(t *testing.T) {
- tests := []struct {
- path string
-
- wok bool
- wid types.ID
- wcode int
- }{
- {
- "123",
- true, 0x123, http.StatusOK,
- },
- {
- "bad_id",
- false, 0, http.StatusNotFound,
- },
- {
- "",
- false, 0, http.StatusMethodNotAllowed,
- },
- }
-
- for i, tt := range tests {
- w := httptest.NewRecorder()
- id, ok := getID(zap.NewExample(), tt.path, w)
- if id != tt.wid {
- t.Errorf("#%d: id = %d, want %d", i, id, tt.wid)
- }
- if ok != tt.wok {
- t.Errorf("#%d: ok = %t, want %t", i, ok, tt.wok)
- }
- if w.Code != tt.wcode {
- t.Errorf("#%d code = %d, want %d", i, w.Code, tt.wcode)
- }
- }
-}
-
-type dummyStats struct {
- data []byte
-}
-
-func (ds *dummyStats) SelfStats() []byte { return ds.data }
-func (ds *dummyStats) LeaderStats() []byte { return ds.data }
-func (ds *dummyStats) StoreStats() []byte { return ds.data }
-func (ds *dummyStats) UpdateRecvApp(_ types.ID, _ int64) {}
-
-func TestServeSelfStats(t *testing.T) {
- wb := []byte("some statistics")
- w := string(wb)
- sh := &statsHandler{
- lg: zap.NewExample(),
- stats: &dummyStats{data: wb},
- }
- rw := httptest.NewRecorder()
- sh.serveSelf(rw, &http.Request{Method: "GET"})
- if rw.Code != http.StatusOK {
- t.Errorf("code = %d, want %d", rw.Code, http.StatusOK)
- }
- wct := "application/json"
- if gct := rw.Header().Get("Content-Type"); gct != wct {
- t.Errorf("Content-Type = %q, want %q", gct, wct)
- }
- if g := rw.Body.String(); g != w {
- t.Errorf("body = %s, want %s", g, w)
- }
-}
-
-func TestSelfServeStatsBad(t *testing.T) {
- for _, m := range []string{"PUT", "POST", "DELETE"} {
- sh := &statsHandler{lg: zap.NewExample()}
- rw := httptest.NewRecorder()
- sh.serveSelf(
- rw,
- &http.Request{
- Method: m,
- },
- )
- if rw.Code != http.StatusMethodNotAllowed {
- t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed)
- }
- }
-}
-
-func TestLeaderServeStatsBad(t *testing.T) {
- for _, m := range []string{"PUT", "POST", "DELETE"} {
- sh := &statsHandler{lg: zap.NewExample()}
- rw := httptest.NewRecorder()
- sh.serveLeader(
- rw,
- &http.Request{
- Method: m,
- },
- )
- if rw.Code != http.StatusMethodNotAllowed {
- t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed)
- }
- }
-}
-
-func TestServeLeaderStats(t *testing.T) {
- wb := []byte("some statistics")
- w := string(wb)
- sh := &statsHandler{
- lg: zap.NewExample(),
- stats: &dummyStats{data: wb},
- }
- rw := httptest.NewRecorder()
- sh.serveLeader(rw, &http.Request{Method: "GET"})
- if rw.Code != http.StatusOK {
- t.Errorf("code = %d, want %d", rw.Code, http.StatusOK)
- }
- wct := "application/json"
- if gct := rw.Header().Get("Content-Type"); gct != wct {
- t.Errorf("Content-Type = %q, want %q", gct, wct)
- }
- if g := rw.Body.String(); g != w {
- t.Errorf("body = %s, want %s", g, w)
- }
-}
-
-func TestServeStoreStats(t *testing.T) {
- wb := []byte("some statistics")
- w := string(wb)
- sh := &statsHandler{
- lg: zap.NewExample(),
- stats: &dummyStats{data: wb},
- }
- rw := httptest.NewRecorder()
- sh.serveStore(rw, &http.Request{Method: "GET"})
- if rw.Code != http.StatusOK {
- t.Errorf("code = %d, want %d", rw.Code, http.StatusOK)
- }
- wct := "application/json"
- if gct := rw.Header().Get("Content-Type"); gct != wct {
- t.Errorf("Content-Type = %q, want %q", gct, wct)
- }
- if g := rw.Body.String(); g != w {
- t.Errorf("body = %s, want %s", g, w)
- }
-
-}
-
-func TestBadServeKeys(t *testing.T) {
- testBadCases := []struct {
- req *http.Request
- server etcdserver.ServerV2
-
- wcode int
- wbody string
- }{
- {
- // bad method
- &http.Request{
- Method: "CONNECT",
- },
- &resServer{},
-
- http.StatusMethodNotAllowed,
- "Method Not Allowed",
- },
- {
- // bad method
- &http.Request{
- Method: "TRACE",
- },
- &resServer{},
-
- http.StatusMethodNotAllowed,
- "Method Not Allowed",
- },
- {
- // parseRequest error
- &http.Request{
- Body: nil,
- Method: "PUT",
- },
- &resServer{},
-
- http.StatusBadRequest,
- `{"errorCode":210,"message":"Invalid POST form","cause":"missing form body","index":0}`,
- },
- {
- // etcdserver.Server error
- mustNewRequest(t, "foo"),
- &errServer{
- err: errors.New("Internal Server Error"),
- },
-
- http.StatusInternalServerError,
- `{"errorCode":300,"message":"Raft Internal Error","cause":"Internal Server Error","index":0}`,
- },
- {
- // etcdserver.Server etcd error
- mustNewRequest(t, "foo"),
- &errServer{
- err: v2error.NewError(v2error.EcodeKeyNotFound, "/1/pant", 0),
- },
-
- http.StatusNotFound,
- `{"errorCode":100,"message":"Key not found","cause":"/pant","index":0}`,
- },
- {
- // non-event/watcher response from etcdserver.Server
- mustNewRequest(t, "foo"),
- &resServer{
- res: etcdserver.Response{},
- },
-
- http.StatusInternalServerError,
- `{"errorCode":300,"message":"Raft Internal Error","cause":"received response with no Event/Watcher","index":0}`,
- },
- }
- for i, tt := range testBadCases {
- h := &keysHandler{
- lg: zap.NewExample(),
- timeout: 0, // context times out immediately
- server: tt.server,
- cluster: &fakeCluster{id: 1},
- }
- rw := httptest.NewRecorder()
- h.ServeHTTP(rw, tt.req)
- if rw.Code != tt.wcode {
- t.Errorf("#%d: got code=%d, want %d", i, rw.Code, tt.wcode)
- }
- if rw.Code != http.StatusMethodNotAllowed {
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := h.cluster.ID().String()
- if gcid != wcid {
- t.Errorf("#%d: cid = %s, want %s", i, gcid, wcid)
- }
- }
- if g := strings.TrimSuffix(rw.Body.String(), "\n"); g != tt.wbody {
- t.Errorf("#%d: body = %s, want %s", i, g, tt.wbody)
- }
- }
-}
-
-func TestServeKeysGood(t *testing.T) {
- tests := []struct {
- req *http.Request
- wcode int
- }{
- {
- mustNewMethodRequest(t, "HEAD", "foo"),
- http.StatusOK,
- },
- {
- mustNewMethodRequest(t, "GET", "foo"),
- http.StatusOK,
- },
- {
- mustNewForm(t, "foo", url.Values{"value": []string{"bar"}}),
- http.StatusOK,
- },
- {
- mustNewMethodRequest(t, "DELETE", "foo"),
- http.StatusOK,
- },
- {
- mustNewPostForm(t, "foo", url.Values{"value": []string{"bar"}}),
- http.StatusOK,
- },
- }
- server := &resServer{
- res: etcdserver.Response{
- Event: &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- },
- },
- }
- for i, tt := range tests {
- h := &keysHandler{
- lg: zap.NewExample(),
- timeout: time.Hour,
- server: server,
- cluster: &fakeCluster{id: 1},
- }
- rw := httptest.NewRecorder()
- h.ServeHTTP(rw, tt.req)
- if rw.Code != tt.wcode {
- t.Errorf("#%d: got code=%d, want %d", i, rw.Code, tt.wcode)
- }
- }
-}
-
-func TestServeKeysEvent(t *testing.T) {
- tests := []struct {
- req *http.Request
- rsp etcdserver.Response
- wcode int
- event *v2store.Event
- }{
- {
- mustNewRequest(t, "foo"),
- etcdserver.Response{
- Event: &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- },
- },
- http.StatusOK,
- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- },
- },
- {
- mustNewForm(
- t,
- "foo",
- url.Values{"noValueOnSuccess": []string{"true"}},
- ),
- etcdserver.Response{
- Event: &v2store.Event{
- Action: v2store.CompareAndSwap,
- Node: &v2store.NodeExtern{},
- },
- },
- http.StatusOK,
- &v2store.Event{
- Action: v2store.CompareAndSwap,
- Node: nil,
- },
- },
- }
-
- server := &resServer{}
- h := &keysHandler{
- lg: zap.NewExample(),
- timeout: time.Hour,
- server: server,
- cluster: &fakeCluster{id: 1},
- }
-
- for _, tt := range tests {
- server.res = tt.rsp
- rw := httptest.NewRecorder()
- h.ServeHTTP(rw, tt.req)
-
- wbody := mustMarshalEvent(
- t,
- tt.event,
- )
-
- if rw.Code != tt.wcode {
- t.Errorf("got code=%d, want %d", rw.Code, tt.wcode)
- }
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := h.cluster.ID().String()
- if gcid != wcid {
- t.Errorf("cid = %s, want %s", gcid, wcid)
- }
- g := rw.Body.String()
- if g != wbody {
- t.Errorf("got body=%#v, want %#v", g, wbody)
- }
- }
-}
-
-func TestServeKeysWatch(t *testing.T) {
- req := mustNewRequest(t, "/foo/bar")
- ec := make(chan *v2store.Event)
- dw := &dummyWatcher{
- echan: ec,
- }
- server := &resServer{
- res: etcdserver.Response{
- Watcher: dw,
- },
- }
- h := &keysHandler{
- lg: zap.NewExample(),
- timeout: time.Hour,
- server: server,
- cluster: &fakeCluster{id: 1},
- }
- go func() {
- ec <- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- }
- }()
- rw := httptest.NewRecorder()
-
- h.ServeHTTP(rw, req)
-
- wcode := http.StatusOK
- wbody := mustMarshalEvent(
- t,
- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- },
- )
-
- if rw.Code != wcode {
- t.Errorf("got code=%d, want %d", rw.Code, wcode)
- }
- gcid := rw.Header().Get("X-Etcd-Cluster-ID")
- wcid := h.cluster.ID().String()
- if gcid != wcid {
- t.Errorf("cid = %s, want %s", gcid, wcid)
- }
- g := rw.Body.String()
- if g != wbody {
- t.Errorf("got body=%#v, want %#v", g, wbody)
- }
-}
-
-type recordingCloseNotifier struct {
- *httptest.ResponseRecorder
- cn chan bool
-}
-
-func (rcn *recordingCloseNotifier) CloseNotify() <-chan bool {
- return rcn.cn
-}
-
-func TestHandleWatch(t *testing.T) {
- defaultRwRr := func() (http.ResponseWriter, *httptest.ResponseRecorder) {
- r := httptest.NewRecorder()
- return r, r
- }
- noopEv := func(chan *v2store.Event) {}
-
- tests := []struct {
- getCtx func() context.Context
- getRwRr func() (http.ResponseWriter, *httptest.ResponseRecorder)
- doToChan func(chan *v2store.Event)
-
- wbody string
- }{
- {
- // Normal case: one event
- context.Background,
- defaultRwRr,
- func(ch chan *v2store.Event) {
- ch <- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- }
- },
-
- mustMarshalEvent(
- t,
- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- },
- ),
- },
- {
- // Channel is closed, no event
- context.Background,
- defaultRwRr,
- func(ch chan *v2store.Event) {
- close(ch)
- },
-
- "",
- },
- {
- // Simulate a timed-out context
- func() context.Context {
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
- return ctx
- },
- defaultRwRr,
- noopEv,
-
- "",
- },
- {
- // Close-notifying request
- context.Background,
- func() (http.ResponseWriter, *httptest.ResponseRecorder) {
- rw := &recordingCloseNotifier{
- ResponseRecorder: httptest.NewRecorder(),
- cn: make(chan bool, 1),
- }
- rw.cn <- true
- return rw, rw.ResponseRecorder
- },
- noopEv,
-
- "",
- },
- }
-
- for i, tt := range tests {
- rw, rr := tt.getRwRr()
- wa := &dummyWatcher{
- echan: make(chan *v2store.Event, 1),
- sidx: 10,
- }
- tt.doToChan(wa.echan)
-
- resp := etcdserver.Response{Term: 5, Index: 100, Watcher: wa}
- handleKeyWatch(tt.getCtx(), zap.NewExample(), rw, resp, false)
-
- wcode := http.StatusOK
- wct := "application/json"
- wei := "10"
- wri := "100"
- wrt := "5"
-
- if rr.Code != wcode {
- t.Errorf("#%d: got code=%d, want %d", i, rr.Code, wcode)
- }
- h := rr.Header()
- if ct := h.Get("Content-Type"); ct != wct {
- t.Errorf("#%d: Content-Type=%q, want %q", i, ct, wct)
- }
- if ei := h.Get("X-Etcd-Index"); ei != wei {
- t.Errorf("#%d: X-Etcd-Index=%q, want %q", i, ei, wei)
- }
- if ri := h.Get("X-Raft-Index"); ri != wri {
- t.Errorf("#%d: X-Raft-Index=%q, want %q", i, ri, wri)
- }
- if rt := h.Get("X-Raft-Term"); rt != wrt {
- t.Errorf("#%d: X-Raft-Term=%q, want %q", i, rt, wrt)
- }
- g := rr.Body.String()
- if g != tt.wbody {
- t.Errorf("#%d: got body=%#v, want %#v", i, g, tt.wbody)
- }
- }
-}
-
-func TestHandleWatchStreaming(t *testing.T) {
- rw := &flushingRecorder{
- httptest.NewRecorder(),
- make(chan struct{}, 1),
- }
- wa := &dummyWatcher{
- echan: make(chan *v2store.Event),
- }
-
- // Launch the streaming handler in the background with a cancellable context
- ctx, cancel := context.WithCancel(context.Background())
- done := make(chan struct{})
- go func() {
- resp := etcdserver.Response{Watcher: wa}
- handleKeyWatch(ctx, zap.NewExample(), rw, resp, true)
- close(done)
- }()
-
- // Expect one Flush for the headers etc.
- select {
- case <-rw.ch:
- case <-time.After(time.Second):
- t.Fatalf("timed out waiting for flush")
- }
-
- // Expect headers but no body
- wcode := http.StatusOK
- wct := "application/json"
- wbody := ""
-
- if rw.Code != wcode {
- t.Errorf("got code=%d, want %d", rw.Code, wcode)
- }
- h := rw.Header()
- if ct := h.Get("Content-Type"); ct != wct {
- t.Errorf("Content-Type=%q, want %q", ct, wct)
- }
- g := rw.Body.String()
- if g != wbody {
- t.Errorf("got body=%#v, want %#v", g, wbody)
- }
-
- // Now send the first event
- select {
- case wa.echan <- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- }:
- case <-time.After(time.Second):
- t.Fatal("timed out waiting for send")
- }
-
- // Wait for it to be flushed...
- select {
- case <-rw.ch:
- case <-time.After(time.Second):
- t.Fatalf("timed out waiting for flush")
- }
-
- // And check the body is as expected
- wbody = mustMarshalEvent(
- t,
- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- },
- )
- g = rw.Body.String()
- if g != wbody {
- t.Errorf("got body=%#v, want %#v", g, wbody)
- }
-
- // Rinse and repeat
- select {
- case wa.echan <- &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{},
- }:
- case <-time.After(time.Second):
- t.Fatal("timed out waiting for send")
- }
-
- select {
- case <-rw.ch:
- case <-time.After(time.Second):
- t.Fatalf("timed out waiting for flush")
- }
-
- // This time, we expect to see both events
- wbody = wbody + wbody
- g = rw.Body.String()
- if g != wbody {
- t.Errorf("got body=%#v, want %#v", g, wbody)
- }
-
- // Finally, time out the connection and ensure the serving goroutine returns
- cancel()
-
- select {
- case <-done:
- case <-time.After(time.Second):
- t.Fatalf("timed out waiting for done")
- }
-}
-
-func TestTrimEventPrefix(t *testing.T) {
- pre := "/abc"
- tests := []struct {
- ev *v2store.Event
- wev *v2store.Event
- }{
- {
- nil,
- nil,
- },
- {
- &v2store.Event{},
- &v2store.Event{},
- },
- {
- &v2store.Event{Node: &v2store.NodeExtern{Key: "/abc/def"}},
- &v2store.Event{Node: &v2store.NodeExtern{Key: "/def"}},
- },
- {
- &v2store.Event{PrevNode: &v2store.NodeExtern{Key: "/abc/ghi"}},
- &v2store.Event{PrevNode: &v2store.NodeExtern{Key: "/ghi"}},
- },
- {
- &v2store.Event{
- Node: &v2store.NodeExtern{Key: "/abc/def"},
- PrevNode: &v2store.NodeExtern{Key: "/abc/ghi"},
- },
- &v2store.Event{
- Node: &v2store.NodeExtern{Key: "/def"},
- PrevNode: &v2store.NodeExtern{Key: "/ghi"},
- },
- },
- }
- for i, tt := range tests {
- ev := trimEventPrefix(tt.ev, pre)
- if !reflect.DeepEqual(ev, tt.wev) {
- t.Errorf("#%d: event = %+v, want %+v", i, ev, tt.wev)
- }
- }
-}
-
-func TestTrimNodeExternPrefix(t *testing.T) {
- pre := "/abc"
- tests := []struct {
- n *v2store.NodeExtern
- wn *v2store.NodeExtern
- }{
- {
- nil,
- nil,
- },
- {
- &v2store.NodeExtern{Key: "/abc/def"},
- &v2store.NodeExtern{Key: "/def"},
- },
- {
- &v2store.NodeExtern{
- Key: "/abc/def",
- Nodes: []*v2store.NodeExtern{
- {Key: "/abc/def/1"},
- {Key: "/abc/def/2"},
- },
- },
- &v2store.NodeExtern{
- Key: "/def",
- Nodes: []*v2store.NodeExtern{
- {Key: "/def/1"},
- {Key: "/def/2"},
- },
- },
- },
- }
- for i, tt := range tests {
- trimNodeExternPrefix(tt.n, pre)
- if !reflect.DeepEqual(tt.n, tt.wn) {
- t.Errorf("#%d: node = %+v, want %+v", i, tt.n, tt.wn)
- }
- }
-}
-
-func TestTrimPrefix(t *testing.T) {
- tests := []struct {
- in string
- prefix string
- w string
- }{
- {"/v2/members", "/v2/members", ""},
- {"/v2/members/", "/v2/members", ""},
- {"/v2/members/foo", "/v2/members", "foo"},
- }
- for i, tt := range tests {
- if g := trimPrefix(tt.in, tt.prefix); g != tt.w {
- t.Errorf("#%d: trimPrefix = %q, want %q", i, g, tt.w)
- }
- }
-}
-
-func TestNewMemberCollection(t *testing.T) {
- fixture := []*membership.Member{
- {
- ID: 12,
- Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8080", "http://localhost:8081"}},
- RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:8082", "http://localhost:8083"}},
- },
- {
- ID: 13,
- Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:9090", "http://localhost:9091"}},
- RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:9092", "http://localhost:9093"}},
- },
- }
- got := newMemberCollection(fixture)
-
- want := httptypes.MemberCollection([]httptypes.Member{
- {
- ID: "c",
- ClientURLs: []string{"http://localhost:8080", "http://localhost:8081"},
- PeerURLs: []string{"http://localhost:8082", "http://localhost:8083"},
- },
- {
- ID: "d",
- ClientURLs: []string{"http://localhost:9090", "http://localhost:9091"},
- PeerURLs: []string{"http://localhost:9092", "http://localhost:9093"},
- },
- })
-
- if !reflect.DeepEqual(&want, got) {
- t.Fatalf("newMemberCollection failure: want=%#v, got=%#v", &want, got)
- }
-}
-
-func TestNewMember(t *testing.T) {
- fixture := &membership.Member{
- ID: 12,
- Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8080", "http://localhost:8081"}},
- RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:8082", "http://localhost:8083"}},
- }
- got := newMember(fixture)
-
- want := httptypes.Member{
- ID: "c",
- ClientURLs: []string{"http://localhost:8080", "http://localhost:8081"},
- PeerURLs: []string{"http://localhost:8082", "http://localhost:8083"},
- }
-
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("newMember failure: want=%#v, got=%#v", want, got)
- }
-}
diff --git a/server/etcdserver/api/v2http/doc.go b/server/etcdserver/api/v2http/doc.go
deleted file mode 100644
index 475c4b1f95a..00000000000
--- a/server/etcdserver/api/v2http/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v2http provides etcd client and server implementations.
-package v2http
diff --git a/server/etcdserver/api/v2http/http.go b/server/etcdserver/api/v2http/http.go
deleted file mode 100644
index 88138b80a8a..00000000000
--- a/server/etcdserver/api/v2http/http.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "math"
- "net/http"
- "strings"
- "time"
-
- "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
-
- "go.uber.org/zap"
-)
-
-const (
- // time to wait for a Watch request
- defaultWatchTimeout = time.Duration(math.MaxInt64)
-)
-
-func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
- if err == nil {
- return
- }
- if e, ok := err.(v2auth.Error); ok {
- herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
- if et := herr.WriteTo(w); et != nil {
- if lg != nil {
- lg.Debug(
- "failed to write v2 HTTP error",
- zap.String("remote-addr", r.RemoteAddr),
- zap.String("v2auth-error", e.Error()),
- zap.Error(et),
- )
- }
- }
- return
- }
- etcdhttp.WriteError(lg, w, r, err)
-}
-
-// allowMethod verifies that the given method is one of the allowed methods,
-// and if not, it writes an error to w. A boolean is returned indicating
-// whether or not the method is allowed.
-func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
- for _, meth := range ms {
- if m == meth {
- return true
- }
- }
- w.Header().Set("Allow", strings.Join(ms, ","))
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return false
-}
-
-func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if lg != nil {
- lg.Debug(
- "handling HTTP request",
- zap.String("method", r.Method),
- zap.String("request-uri", r.RequestURI),
- zap.String("remote-addr", r.RemoteAddr),
- )
- }
- handler.ServeHTTP(w, r)
- })
-}
diff --git a/server/etcdserver/api/v2http/http_test.go b/server/etcdserver/api/v2http/http_test.go
deleted file mode 100644
index 2fcd6b26b94..00000000000
--- a/server/etcdserver/api/v2http/http_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "context"
- "errors"
- "net/http"
- "net/http/httptest"
- "sort"
- "testing"
-
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
-
- "github.com/coreos/go-semver/semver"
- "go.uber.org/zap"
-)
-
-type fakeCluster struct {
- id uint64
- clientURLs []string
- members map[uint64]*membership.Member
-}
-
-func (c *fakeCluster) ID() types.ID { return types.ID(c.id) }
-func (c *fakeCluster) ClientURLs() []string { return c.clientURLs }
-func (c *fakeCluster) Members() []*membership.Member {
- var ms membership.MembersByID
- for _, m := range c.members {
- ms = append(ms, m)
- }
- sort.Sort(ms)
- return []*membership.Member(ms)
-}
-func (c *fakeCluster) Member(id types.ID) *membership.Member { return c.members[uint64(id)] }
-func (c *fakeCluster) Version() *semver.Version { return nil }
-
-// errServer implements the etcd.Server interface for testing.
-// It returns the given error from any Do/Process/AddMember/RemoveMember calls.
-type errServer struct {
- err error
- fakeServer
-}
-
-func (fs *errServer) Do(ctx context.Context, r etcdserverpb.Request) (etcdserver.Response, error) {
- return etcdserver.Response{}, fs.err
-}
-func (fs *errServer) Process(ctx context.Context, m raftpb.Message) error {
- return fs.err
-}
-func (fs *errServer) AddMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
- return nil, fs.err
-}
-func (fs *errServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- return nil, fs.err
-}
-func (fs *errServer) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
- return nil, fs.err
-}
-func (fs *errServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- return nil, fs.err
-}
-
-func TestWriteError(t *testing.T) {
- // nil error should not panic
- rec := httptest.NewRecorder()
- r := new(http.Request)
- writeError(zap.NewExample(), rec, r, nil)
- h := rec.Header()
- if len(h) > 0 {
- t.Fatalf("unexpected non-empty headers: %#v", h)
- }
- b := rec.Body.String()
- if len(b) > 0 {
- t.Fatalf("unexpected non-empty body: %q", b)
- }
-
- tests := []struct {
- err error
- wcode int
- wi string
- }{
- {
- v2error.NewError(v2error.EcodeKeyNotFound, "/foo/bar", 123),
- http.StatusNotFound,
- "123",
- },
- {
- v2error.NewError(v2error.EcodeTestFailed, "/foo/bar", 456),
- http.StatusPreconditionFailed,
- "456",
- },
- {
- err: errors.New("something went wrong"),
- wcode: http.StatusInternalServerError,
- },
- }
-
- for i, tt := range tests {
- rw := httptest.NewRecorder()
- writeError(zap.NewExample(), rw, r, tt.err)
- if code := rw.Code; code != tt.wcode {
- t.Errorf("#%d: code=%d, want %d", i, code, tt.wcode)
- }
- if idx := rw.Header().Get("X-Etcd-Index"); idx != tt.wi {
- t.Errorf("#%d: X-Etcd-Index=%q, want %q", i, idx, tt.wi)
- }
- }
-}
-
-func TestAllowMethod(t *testing.T) {
- tests := []struct {
- m string
- ms []string
- w bool
- wh string
- }{
- // Accepted methods
- {
- m: "GET",
- ms: []string{"GET", "POST", "PUT"},
- w: true,
- },
- {
- m: "POST",
- ms: []string{"POST"},
- w: true,
- },
- // Made-up methods no good
- {
- m: "FAKE",
- ms: []string{"GET", "POST", "PUT"},
- w: false,
- wh: "GET,POST,PUT",
- },
- // Empty methods no good
- {
- m: "",
- ms: []string{"GET", "POST"},
- w: false,
- wh: "GET,POST",
- },
- // Empty accepted methods no good
- {
- m: "GET",
- ms: []string{""},
- w: false,
- wh: "",
- },
- // No methods accepted
- {
- m: "GET",
- ms: []string{},
- w: false,
- wh: "",
- },
- }
-
- for i, tt := range tests {
- rw := httptest.NewRecorder()
- g := allowMethod(rw, tt.m, tt.ms...)
- if g != tt.w {
- t.Errorf("#%d: got allowMethod()=%t, want %t", i, g, tt.w)
- }
- if !tt.w {
- if rw.Code != http.StatusMethodNotAllowed {
- t.Errorf("#%d: code=%d, want %d", i, rw.Code, http.StatusMethodNotAllowed)
- }
- gh := rw.Header().Get("Allow")
- if gh != tt.wh {
- t.Errorf("#%d: Allow header=%q, want %q", i, gh, tt.wh)
- }
- }
- }
-}
diff --git a/server/etcdserver/api/v2http/httptypes/member.go b/server/etcdserver/api/v2http/httptypes/member.go
deleted file mode 100644
index a5467be91ee..00000000000
--- a/server/etcdserver/api/v2http/httptypes/member.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package httptypes defines how etcd's HTTP API entities are serialized to and
-// deserialized from JSON.
-package httptypes
-
-import (
- "encoding/json"
-
- "go.etcd.io/etcd/client/pkg/v3/types"
-)
-
-type Member struct {
- ID string `json:"id"`
- Name string `json:"name"`
- PeerURLs []string `json:"peerURLs"`
- ClientURLs []string `json:"clientURLs"`
-}
-
-type MemberCreateRequest struct {
- PeerURLs types.URLs
-}
-
-type MemberUpdateRequest struct {
- MemberCreateRequest
-}
-
-func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
- s := struct {
- PeerURLs []string `json:"peerURLs"`
- }{}
-
- err := json.Unmarshal(data, &s)
- if err != nil {
- return err
- }
-
- urls, err := types.NewURLs(s.PeerURLs)
- if err != nil {
- return err
- }
-
- m.PeerURLs = urls
- return nil
-}
-
-type MemberCollection []Member
-
-func (c *MemberCollection) MarshalJSON() ([]byte, error) {
- d := struct {
- Members []Member `json:"members"`
- }{
- Members: []Member(*c),
- }
-
- return json.Marshal(d)
-}
diff --git a/server/etcdserver/api/v2http/httptypes/member_test.go b/server/etcdserver/api/v2http/httptypes/member_test.go
deleted file mode 100644
index 6b8056abdc1..00000000000
--- a/server/etcdserver/api/v2http/httptypes/member_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptypes
-
-import (
- "encoding/json"
- "net/url"
- "reflect"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/types"
-)
-
-func TestMemberUnmarshal(t *testing.T) {
- tests := []struct {
- body []byte
- wantMember Member
- wantError bool
- }{
- // no URLs, just check ID & Name
- {
- body: []byte(`{"id": "c", "name": "dungarees"}`),
- wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil},
- },
-
- // both client and peer URLs
- {
- body: []byte(`{"peerURLs": ["http://127.0.0.1:2379"], "clientURLs": ["http://127.0.0.1:2379"]}`),
- wantMember: Member{
- PeerURLs: []string{
- "http://127.0.0.1:2379",
- },
- ClientURLs: []string{
- "http://127.0.0.1:2379",
- },
- },
- },
-
- // multiple peer URLs
- {
- body: []byte(`{"peerURLs": ["http://127.0.0.1:2379", "https://example.com"]}`),
- wantMember: Member{
- PeerURLs: []string{
- "http://127.0.0.1:2379",
- "https://example.com",
- },
- ClientURLs: nil,
- },
- },
-
- // multiple client URLs
- {
- body: []byte(`{"clientURLs": ["http://127.0.0.1:2379", "https://example.com"]}`),
- wantMember: Member{
- PeerURLs: nil,
- ClientURLs: []string{
- "http://127.0.0.1:2379",
- "https://example.com",
- },
- },
- },
-
- // invalid JSON
- {
- body: []byte(`{"peerU`),
- wantError: true,
- },
- }
-
- for i, tt := range tests {
- got := Member{}
- err := json.Unmarshal(tt.body, &got)
- if tt.wantError != (err != nil) {
- t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err)
- continue
- }
-
- if !reflect.DeepEqual(tt.wantMember, got) {
- t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got)
- }
- }
-}
-
-func TestMemberCreateRequestUnmarshal(t *testing.T) {
- body := []byte(`{"peerURLs": ["http://127.0.0.1:8081", "https://127.0.0.1:8080"]}`)
- want := MemberCreateRequest{
- PeerURLs: types.URLs([]url.URL{
- {Scheme: "http", Host: "127.0.0.1:8081"},
- {Scheme: "https", Host: "127.0.0.1:8080"},
- }),
- }
-
- var req MemberCreateRequest
- if err := json.Unmarshal(body, &req); err != nil {
- t.Fatalf("Unmarshal returned unexpected err=%v", err)
- }
-
- if !reflect.DeepEqual(want, req) {
- t.Fatalf("Failed to unmarshal MemberCreateRequest: want=%#v, got=%#v", want, req)
- }
-}
-
-func TestMemberCreateRequestUnmarshalFail(t *testing.T) {
- tests := [][]byte{
- // invalid JSON
- []byte(``),
- []byte(`{`),
-
- // spot-check validation done in types.NewURLs
- []byte(`{"peerURLs": "foo"}`),
- []byte(`{"peerURLs": ["."]}`),
- []byte(`{"peerURLs": []}`),
- []byte(`{"peerURLs": ["http://127.0.0.1:2379/foo"]}`),
- []byte(`{"peerURLs": ["http://127.0.0.1"]}`),
- }
-
- for i, tt := range tests {
- var req MemberCreateRequest
- if err := json.Unmarshal(tt, &req); err == nil {
- t.Errorf("#%d: expected err, got nil", i)
- }
- }
-}
diff --git a/server/etcdserver/api/v2http/metrics.go b/server/etcdserver/api/v2http/metrics.go
deleted file mode 100644
index bdbd8c71c1b..00000000000
--- a/server/etcdserver/api/v2http/metrics.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "net/http"
- "strconv"
- "time"
-
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- incomingEvents = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "received_total",
- Help: "Counter of requests received into the system (successfully parsed and authd).",
- }, []string{"method"})
-
- failedEvents = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "failed_total",
- Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).",
- }, []string{"method", "code"})
-
- successfulEventsHandlingSec = prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "successful_duration_seconds",
- Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
-
- // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2
- // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec
- Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
- }, []string{"method"})
-)
-
-func init() {
- prometheus.MustRegister(incomingEvents)
- prometheus.MustRegister(failedEvents)
- prometheus.MustRegister(successfulEventsHandlingSec)
-}
-
-func reportRequestReceived(request etcdserverpb.Request) {
- incomingEvents.WithLabelValues(methodFromRequest(request)).Inc()
-}
-
-func reportRequestCompleted(request etcdserverpb.Request, startTime time.Time) {
- method := methodFromRequest(request)
- successfulEventsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
-}
-
-func reportRequestFailed(request etcdserverpb.Request, err error) {
- method := methodFromRequest(request)
- failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc()
-}
-
-func methodFromRequest(request etcdserverpb.Request) string {
- if request.Method == "GET" && request.Quorum {
- return "QGET"
- }
- return request.Method
-}
-
-func codeFromError(err error) int {
- if err == nil {
- return http.StatusInternalServerError
- }
- switch e := err.(type) {
- case *v2error.Error:
- return e.StatusCode()
- case *httptypes.HTTPError:
- return e.Code
- default:
- return http.StatusInternalServerError
- }
-}
diff --git a/server/etcdserver/api/v2http/testdata/ca.pem b/server/etcdserver/api/v2http/testdata/ca.pem
deleted file mode 100644
index 60cbee3bb4b..00000000000
--- a/server/etcdserver/api/v2http/testdata/ca.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDEjCCAfqgAwIBAgIIYpX+8HgWGfkwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
-AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA1MDBaFw0yMDExMjIwMzA1MDBaMBUx
-EzARBgNVBAMTCmV0Y2QgdGVzdHMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQDa9PkwEwiBD8mB+VIKz5r5gRHnNF4Icj6T6R/RsdatecQe6vU0EU4FXtKZ
-drWnCGlATyrQooqHpb+rDc7CUt3mXrIxrNkcGTMaesF7P0GWxVkyOGSjJMxGBv3e
-bAZknBe4eLMi68L1aT/uYmxcp/B3L2mfdFtc1Gd6mYJpNm1PgilRyIrO0mY5ysIX
-4WHCa3yudAv8HrFbQcw7l7OyKA6uSWg6h07lE3d5jw5YOly+hz0iaRtzhb4tJrYD
-Lm1tehb0nnoLuW6yYblRSoyBVDT50MFVlyvW40Po5WkOXw/wnsnyxWRR4yqU23wq
-quQU0HXJEBLFnT+KbLOQ0EAE35vXAgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSbUCGB95ochDrbEZlzGGYuA7xu
-xjAfBgNVHSMEGDAWgBSbUCGB95ochDrbEZlzGGYuA7xuxjANBgkqhkiG9w0BAQsF
-AAOCAQEAardO/SGCu7Snz3YRBUinzpZEUFTFend+FJtBkxBXCao1RvTXg8PBMkza
-LUsaR4mLsGoXLIbNCoIinvVG0QULYCZe11N3l1L0G2g5uhEM4MfJ2rwrMD0o17i+
-nwNRRE3tfKAlWhYQg+4ye36kQVxASPniHjdQgjKYUFTNXdyG6DzuAclaVte9iVw6
-cWl61fB2CZya3+uMtih8t/Kgl2KbMO2PvNByfnDjKmW+v58qHbXyoJZqnpvDn14+
-p2Ox+AvvxYiEiUIvFdWy101QB7NJMCtdwq6oG6OvIOgXzLgitTFSq4kfWDfupQjW
-iFoQ+vWmYhK5ld0nBaiz+JmHuemK7A==
------END CERTIFICATE-----
diff --git a/server/etcdserver/api/v2http/testdata/otheruser.pem b/server/etcdserver/api/v2http/testdata/otheruser.pem
deleted file mode 100644
index d0c74eb9f8d..00000000000
--- a/server/etcdserver/api/v2http/testdata/otheruser.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDOTCCAiGgAwIBAgIINYpsso1f3SswDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
-AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA4MDBaFw0xNjExMjMwMzA4MDBaMBQx
-EjAQBgNVBAMTCW90aGVydXNlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBAPOAUa5GblwIjHTEnox2c/Am9jV1TMvzBuVXxnp2UnNHMNwstAooFrEs/Z+d
-ft5AOsooP6zVuM3eBQa4i9huJbVNDfPU2H94yA89jYfJYUgo7C838V6NjGsCCptQ
-WzkKPNlDbT9xA/7XpIUJ2WltuYDRrjWq8pXQONqTjcg5n4l0JO8xdHJHRUkFQ76F
-1npXeLndgGaP11lqzpYlglEGi5URhzAT1xxQ0hLSe8WNmiCxxkq++C8Gx4sPg9mX
-M94aoJDzZSnoaqDxckbP/7Q0ZKe/fVdCFkd5+jqT4Mt7hwmz9jTCHcVnAz4EKI+t
-rbWgbCfMK6013GotXz7InStVe+MCAwEAAaOBjTCBijAOBgNVHQ8BAf8EBAMCBaAw
-HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD
-VR0OBBYEFFwMmf+pnaejmri6y1T+lfU+MBq/MB8GA1UdIwQYMBaAFJtQIYH3mhyE
-OtsRmXMYZi4DvG7GMAsGA1UdEQQEMAKCADANBgkqhkiG9w0BAQsFAAOCAQEACOn6
-mec29MTMGPt/EPOmSyhvTKSwH+5YWjCbyUFeoB8puxrJlIphK4mvT+sXp2wzno89
-FVCliO/rJurdErKvyOjlK1QrVGPYIt7Wz9ssAfvlwCyBM8PqgEG8dJN9aAkf2h4r
-Ye+hBh1y6Nnataf7lxe9mqAOvD/7wVIgzjCnMD1q5QSY2Mln3HwVQXtbZFbY363Z
-X9Fk3PUpjJSX9jbEz9kIlT8AJAdxl6GB8Z9B8PrA8qf4Bhk15ICRHxb67EhDrGWV
-8q7ArU2XBqs/+GWpUIMoGKNZv+K+/SksZK1KnzaUvApUCJzt+ac+p8HOgMdvDRgr
-GfVVJqcZgyEmeczy0A==
------END CERTIFICATE-----
diff --git a/server/etcdserver/api/v2http/testdata/user.pem b/server/etcdserver/api/v2http/testdata/user.pem
deleted file mode 100644
index 0fc2108651b..00000000000
--- a/server/etcdserver/api/v2http/testdata/user.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDNDCCAhygAwIBAgIIcQ0DAfgevocwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
-AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA4MDBaFw0xNjExMjMwMzA4MDBaMA8x
-DTALBgNVBAMTBHVzZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0
-+3Lm1SmUJJLufaFTYz+e5qyQEshNRyeAhXIeZ1aw+yBjslXGZQ3/uGOwnOnGqUeA
-Nidc9ty4NsK6RVppHlezUrBnpl4hws8vHWFKZpU2R6kKL8EYLmg+iVqEBj7XqfAp
-8bJqqZI3KOqLXpRH55mA69KP7VEK9ngTVR/tERSrUPT8jcjwbvhSOqD8Qk07BUDR
-6RpDr94Mnaf+fMGG36Sh7iUl+i4Oh6FFar+7+b0+5Bhs2/6uVsK4A1Z3jqqfSQH8
-q8Wf5h9Ka4aqGSw4ia5G3Uw7Jsl2aDgpJ7uwJo1k8SclbMYnYdhZuo+U+esY/Fai
-YdbjG+AroZ+y9TB8bMlHAgMBAAGjgY0wgYowDgYDVR0PAQH/BAQDAgWgMB0GA1Ud
-JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW
-BBRuTt0lJIVKYaz76aSxl/MQOLRwfDAfBgNVHSMEGDAWgBSbUCGB95ochDrbEZlz
-GGYuA7xuxjALBgNVHREEBDACggAwDQYJKoZIhvcNAQELBQADggEBABLRWZm+Lgjs
-c5qDXbgOJW2pR630syY8ixR9c6HvzPVJim8mFioMX+xrlbOC6BmOUlOb9j83bTKn
-aOg/0xlpxNbd8QYzgRxZmHZLULPdiNeeRvIzsrzrH88+inrmZhRXRVcHjdO6CG6t
-hCdDdRiNU6GkF7dPna0xNcEOKe2wUfzd1ZtKOqzi1w+fKjSeMplZomeWgP4WRvkh
-JJ/0ujlMMckgyTxRh8EEaJ35OnpXX7EdipoWhOMmiUnlPqye2icC8Y+CMdZsrod6
-nkoEQnXDCLf/Iv0qj7B9iKbxn7t3QDVxY4UILUReDuD8yrGULlGOl//aY/T3pkZ6
-R5trduZhI3o=
------END CERTIFICATE-----
diff --git a/server/etcdserver/api/v2stats/queue.go b/server/etcdserver/api/v2stats/queue.go
index 2c3dff3d0ff..e16cec159c7 100644
--- a/server/etcdserver/api/v2stats/queue.go
+++ b/server/etcdserver/api/v2stats/queue.go
@@ -65,7 +65,7 @@ func (q *statsQueue) Insert(p *RequestStats) {
q.back = (q.back + 1) % queueCapacity
- if q.size == queueCapacity { //dequeue
+ if q.size == queueCapacity { // dequeue
q.totalReqSize -= q.items[q.front].Size
q.front = (q.back + 1) % queueCapacity
} else {
@@ -74,7 +74,6 @@ func (q *statsQueue) Insert(p *RequestStats) {
q.items[q.back] = p
q.totalReqSize += q.items[q.back].Size
-
}
// Rate function returns the package rate and byte rate
diff --git a/server/etcdserver/api/v2stats/server.go b/server/etcdserver/api/v2stats/server.go
index 45effb1edc5..e8d218a7209 100644
--- a/server/etcdserver/api/v2stats/server.go
+++ b/server/etcdserver/api/v2stats/server.go
@@ -20,7 +20,7 @@ import (
"sync"
"time"
- "go.etcd.io/etcd/raft/v3"
+ "go.etcd.io/raft/v3"
)
// ServerStats encapsulates various statistics about an EtcdServer and its
@@ -59,7 +59,7 @@ type serverStats struct {
StartTime time.Time `json:"startTime"`
} `json:"leaderInfo"`
- RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"`
+ RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt"`
RecvingPkgRate float64 `json:"recvPkgRate,omitempty"`
RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"`
diff --git a/server/etcdserver/api/v2stats/stats.go b/server/etcdserver/api/v2stats/stats.go
deleted file mode 100644
index cbf60215a24..00000000000
--- a/server/etcdserver/api/v2stats/stats.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v2stats defines a standard interface for etcd cluster statistics.
-package v2stats
-
-type Stats interface {
- // SelfStats returns the struct representing statistics of this server
- SelfStats() []byte
- // LeaderStats returns the statistics of all followers in the cluster
- // if this server is leader. Otherwise, nil is returned.
- LeaderStats() []byte
- // StoreStats returns statistics of the store backing this EtcdServer
- StoreStats() []byte
-}
diff --git a/server/etcdserver/api/v2store/event_history.go b/server/etcdserver/api/v2store/event_history.go
index c9bcdca0513..11c8b0176f7 100644
--- a/server/etcdserver/api/v2store/event_history.go
+++ b/server/etcdserver/api/v2store/event_history.go
@@ -125,5 +125,4 @@ func (eh *EventHistory) clone() *EventHistory {
Queue: clonedQueue,
LastIndex: eh.LastIndex,
}
-
}
diff --git a/server/etcdserver/api/v2store/event_queue.go b/server/etcdserver/api/v2store/event_queue.go
index 7ea03de8c9a..aa2a645d6ff 100644
--- a/server/etcdserver/api/v2store/event_queue.go
+++ b/server/etcdserver/api/v2store/event_queue.go
@@ -26,7 +26,7 @@ func (eq *eventQueue) insert(e *Event) {
eq.Events[eq.Back] = e
eq.Back = (eq.Back + 1) % eq.Capacity
- if eq.Size == eq.Capacity { //dequeue
+ if eq.Size == eq.Capacity { // dequeue
eq.Front = (eq.Front + 1) % eq.Capacity
} else {
eq.Size++
diff --git a/server/etcdserver/api/v2store/event_test.go b/server/etcdserver/api/v2store/event_test.go
index 6fc25fd74c9..11553e035a0 100644
--- a/server/etcdserver/api/v2store/event_test.go
+++ b/server/etcdserver/api/v2store/event_test.go
@@ -24,7 +24,6 @@ import (
// Add 200 events into that queue, and test if the
// previous 100 events have been swapped out.
func TestEventQueue(t *testing.T) {
-
eh := newEventHistory(100)
// Add
@@ -114,7 +113,6 @@ func TestEventIndexHistoryCleared(t *testing.T) {
// Add 1000 events into that queue, and test if scanning
// works still for previous events.
func TestFullEventQueue(t *testing.T) {
-
eh := newEventHistory(10)
// Add
diff --git a/server/etcdserver/api/v2store/heap_test.go b/server/etcdserver/api/v2store/heap_test.go
index 9c18e150d7c..7753920d595 100644
--- a/server/etcdserver/api/v2store/heap_test.go
+++ b/server/etcdserver/api/v2store/heap_test.go
@@ -21,7 +21,7 @@ import (
)
func TestHeapPushPop(t *testing.T) {
- h := newTtlKeyHeap()
+ h := newTTLKeyHeap()
// add from older expire time to earlier expire time
// the path is equal to ttl from now
@@ -41,11 +41,10 @@ func TestHeapPushPop(t *testing.T) {
}
min = node.ExpireTime
}
-
}
func TestHeapUpdate(t *testing.T) {
- h := newTtlKeyHeap()
+ h := newTTLKeyHeap()
kvs := make([]*node, 10)
@@ -88,7 +87,5 @@ func TestHeapUpdate(t *testing.T) {
t.Fatal("heap sort wrong!")
}
}
-
}
-
}
diff --git a/server/etcdserver/api/v2store/node.go b/server/etcdserver/api/v2store/node.go
index 9fe6263e2e8..7e5c3e8deb5 100644
--- a/server/etcdserver/api/v2store/node.go
+++ b/server/etcdserver/api/v2store/node.go
@@ -19,9 +19,9 @@ import (
"sort"
"time"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
-
"github.com/jonboulle/clockwork"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
)
// explanations of Compare function result
@@ -277,7 +277,6 @@ func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern {
i := 0
for _, child := range children {
-
if child.IsHidden() { // get will not list hidden node
continue
}
diff --git a/server/etcdserver/api/v2store/node_extern.go b/server/etcdserver/api/v2store/node_extern.go
index b3bf5f3c976..ff2e13e6391 100644
--- a/server/etcdserver/api/v2store/node_extern.go
+++ b/server/etcdserver/api/v2store/node_extern.go
@@ -62,7 +62,6 @@ func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock
if sorted {
sort.Sort(eNode.Nodes)
}
-
} else { // node is a file
value, _ := n.Read()
eNode.Value = &value
diff --git a/server/etcdserver/api/v2store/node_extern_test.go b/server/etcdserver/api/v2store/node_extern_test.go
index 790cfb10602..f2210f98f7d 100644
--- a/server/etcdserver/api/v2store/node_extern_test.go
+++ b/server/etcdserver/api/v2store/node_extern_test.go
@@ -19,7 +19,7 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "github.com/stretchr/testify/assert"
)
func TestNodeExternClone(t *testing.T) {
@@ -56,15 +56,15 @@ func TestNodeExternClone(t *testing.T) {
gNode := eNode.Clone()
// Check the clone is as expected
- testutil.AssertEqual(t, gNode.Key, key)
- testutil.AssertEqual(t, gNode.TTL, ttl)
- testutil.AssertEqual(t, gNode.CreatedIndex, ci)
- testutil.AssertEqual(t, gNode.ModifiedIndex, mi)
+ assert.Equal(t, key, gNode.Key)
+ assert.Equal(t, ttl, gNode.TTL)
+ assert.Equal(t, ci, gNode.CreatedIndex)
+ assert.Equal(t, mi, gNode.ModifiedIndex)
// values should be the same
- testutil.AssertEqual(t, *gNode.Value, val)
- testutil.AssertEqual(t, *gNode.Expiration, exp)
- testutil.AssertEqual(t, len(gNode.Nodes), len(childs))
- testutil.AssertEqual(t, *gNode.Nodes[0], child)
+ assert.Equal(t, val, *gNode.Value)
+ assert.Equal(t, exp, *gNode.Expiration)
+ assert.Len(t, gNode.Nodes, len(childs))
+ assert.Equal(t, child, *gNode.Nodes[0])
// but pointers should differ
if gNode.Value == eNode.Value {
t.Fatalf("expected value pointers to differ, but got same!")
@@ -76,12 +76,12 @@ func TestNodeExternClone(t *testing.T) {
t.Fatalf("expected nodes pointers to differ, but got same!")
}
// Original should be the same
- testutil.AssertEqual(t, eNode.Key, key)
- testutil.AssertEqual(t, eNode.TTL, ttl)
- testutil.AssertEqual(t, eNode.CreatedIndex, ci)
- testutil.AssertEqual(t, eNode.ModifiedIndex, mi)
- testutil.AssertEqual(t, eNode.Value, valp)
- testutil.AssertEqual(t, eNode.Expiration, expp)
+ assert.Equal(t, key, eNode.Key)
+ assert.Equal(t, ttl, eNode.TTL)
+ assert.Equal(t, ci, eNode.CreatedIndex)
+ assert.Equal(t, mi, eNode.ModifiedIndex)
+ assert.Equal(t, valp, eNode.Value)
+ assert.Equal(t, expp, eNode.Expiration)
if !sameSlice(eNode.Nodes, childs) {
t.Fatalf("expected nodes pointer to same, but got different!")
}
@@ -89,15 +89,15 @@ func TestNodeExternClone(t *testing.T) {
gNode.Key = "/baz"
gNode.TTL = 0
gNode.Nodes[0].Key = "uno"
- testutil.AssertEqual(t, eNode.Key, key)
- testutil.AssertEqual(t, eNode.TTL, ttl)
- testutil.AssertEqual(t, eNode.CreatedIndex, ci)
- testutil.AssertEqual(t, eNode.ModifiedIndex, mi)
- testutil.AssertEqual(t, *eNode.Nodes[0], child)
+ assert.Equal(t, key, eNode.Key)
+ assert.Equal(t, ttl, eNode.TTL)
+ assert.Equal(t, ci, eNode.CreatedIndex)
+ assert.Equal(t, mi, eNode.ModifiedIndex)
+ assert.Equal(t, child, *eNode.Nodes[0])
// Change the original and ensure the clone is not affected
eNode.Key = "/wuf"
- testutil.AssertEqual(t, eNode.Key, "/wuf")
- testutil.AssertEqual(t, gNode.Key, "/baz")
+ assert.Equal(t, "/wuf", eNode.Key)
+ assert.Equal(t, "/baz", gNode.Key)
}
func sameSlice(a, b []*NodeExtern) bool {
diff --git a/server/etcdserver/api/v2store/stats.go b/server/etcdserver/api/v2store/stats.go
index 9151799da7b..55ede56d10c 100644
--- a/server/etcdserver/api/v2store/stats.go
+++ b/server/etcdserver/api/v2store/stats.go
@@ -104,7 +104,7 @@ func (s *Stats) clone() *Stats {
}
}
-func (s *Stats) toJson() []byte {
+func (s *Stats) toJSON() []byte {
b, _ := json.Marshal(s)
return b
}
diff --git a/server/etcdserver/api/v2store/stats_test.go b/server/etcdserver/api/v2store/stats_test.go
index 1780a8aff8b..dc436b7f346 100644
--- a/server/etcdserver/api/v2store/stats_test.go
+++ b/server/etcdserver/api/v2store/stats_test.go
@@ -18,95 +18,95 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "github.com/stretchr/testify/assert"
)
-// Ensure that a successful Get is recorded in the stats.
+// TestStoreStatsGetSuccess ensures that a successful Get is recorded in the stats.
func TestStoreStatsGetSuccess(t *testing.T) {
s := newStore()
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.Get("/foo", false, false)
- testutil.AssertEqual(t, uint64(1), s.Stats.GetSuccess, "")
+ assert.Equal(t, uint64(1), s.Stats.GetSuccess)
}
-// Ensure that a failed Get is recorded in the stats.
+// TestStoreStatsGetFail ensures that a failed Get is recorded in the stats.
func TestStoreStatsGetFail(t *testing.T) {
s := newStore()
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.Get("/no_such_key", false, false)
- testutil.AssertEqual(t, uint64(1), s.Stats.GetFail, "")
+ assert.Equal(t, uint64(1), s.Stats.GetFail)
}
-// Ensure that a successful Create is recorded in the stats.
+// TestStoreStatsCreateSuccess ensures that a successful Create is recorded in the stats.
func TestStoreStatsCreateSuccess(t *testing.T) {
s := newStore()
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertEqual(t, uint64(1), s.Stats.CreateSuccess, "")
+ assert.Equal(t, uint64(1), s.Stats.CreateSuccess)
}
-// Ensure that a failed Create is recorded in the stats.
+// TestStoreStatsCreateFail ensures that a failed Create is recorded in the stats.
func TestStoreStatsCreateFail(t *testing.T) {
s := newStore()
s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent})
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertEqual(t, uint64(1), s.Stats.CreateFail, "")
+ assert.Equal(t, uint64(1), s.Stats.CreateFail)
}
-// Ensure that a successful Update is recorded in the stats.
+// TestStoreStatsUpdateSuccess ensures that a successful Update is recorded in the stats.
func TestStoreStatsUpdateSuccess(t *testing.T) {
s := newStore()
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.Update("/foo", "baz", TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertEqual(t, uint64(1), s.Stats.UpdateSuccess, "")
+ assert.Equal(t, uint64(1), s.Stats.UpdateSuccess)
}
-// Ensure that a failed Update is recorded in the stats.
+// TestStoreStatsUpdateFail ensures that a failed Update is recorded in the stats.
func TestStoreStatsUpdateFail(t *testing.T) {
s := newStore()
s.Update("/foo", "bar", TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertEqual(t, uint64(1), s.Stats.UpdateFail, "")
+ assert.Equal(t, uint64(1), s.Stats.UpdateFail)
}
-// Ensure that a successful CAS is recorded in the stats.
+// TestStoreStatsCompareAndSwapSuccess ensures that a successful CAS is recorded in the stats.
func TestStoreStatsCompareAndSwapSuccess(t *testing.T) {
s := newStore()
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.CompareAndSwap("/foo", "bar", 0, "baz", TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertEqual(t, uint64(1), s.Stats.CompareAndSwapSuccess, "")
+ assert.Equal(t, uint64(1), s.Stats.CompareAndSwapSuccess)
}
-// Ensure that a failed CAS is recorded in the stats.
+// TestStoreStatsCompareAndSwapFail ensures that a failed CAS is recorded in the stats.
func TestStoreStatsCompareAndSwapFail(t *testing.T) {
s := newStore()
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.CompareAndSwap("/foo", "wrong_value", 0, "baz", TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertEqual(t, uint64(1), s.Stats.CompareAndSwapFail, "")
+ assert.Equal(t, uint64(1), s.Stats.CompareAndSwapFail)
}
-// Ensure that a successful Delete is recorded in the stats.
+// TestStoreStatsDeleteSuccess ensures that a successful Delete is recorded in the stats.
func TestStoreStatsDeleteSuccess(t *testing.T) {
s := newStore()
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.Delete("/foo", false, false)
- testutil.AssertEqual(t, uint64(1), s.Stats.DeleteSuccess, "")
+ assert.Equal(t, uint64(1), s.Stats.DeleteSuccess)
}
-// Ensure that a failed Delete is recorded in the stats.
+// TestStoreStatsDeleteFail ensures that a failed Delete is recorded in the stats.
func TestStoreStatsDeleteFail(t *testing.T) {
s := newStore()
s.Delete("/foo", false, false)
- testutil.AssertEqual(t, uint64(1), s.Stats.DeleteFail, "")
+ assert.Equal(t, uint64(1), s.Stats.DeleteFail)
}
-//Ensure that the number of expirations is recorded in the stats.
+// TestStoreStatsExpireCount ensures that the number of expirations is recorded in the stats.
func TestStoreStatsExpireCount(t *testing.T) {
s := newStore()
fc := newFakeClock()
s.clock = fc
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
- testutil.AssertEqual(t, uint64(0), s.Stats.ExpireCount, "")
+ assert.Equal(t, uint64(0), s.Stats.ExpireCount)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
- testutil.AssertEqual(t, uint64(1), s.Stats.ExpireCount, "")
+ assert.Equal(t, uint64(1), s.Stats.ExpireCount)
}
diff --git a/server/etcdserver/api/v2store/store.go b/server/etcdserver/api/v2store/store.go
index 32cb26ad964..0c6f7b516e1 100644
--- a/server/etcdserver/api/v2store/store.go
+++ b/server/etcdserver/api/v2store/store.go
@@ -23,10 +23,10 @@ import (
"sync"
"time"
+ "github.com/jonboulle/clockwork"
+
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
-
- "github.com/jonboulle/clockwork"
)
// The default version to set when the store is first initialized.
@@ -99,7 +99,7 @@ func newStore(namespaces ...string) *store {
}
s.Stats = newStats()
s.WatcherHub = newWatchHub(1000)
- s.ttlKeyHeap = newTtlKeyHeap()
+ s.ttlKeyHeap = newTTLKeyHeap()
s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...)
return s
}
@@ -257,8 +257,8 @@ func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64)
}
func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
- value string, expireOpts TTLOptionSet) (*Event, error) {
-
+ value string, expireOpts TTLOptionSet,
+) (*Event, error) {
var err *v2error.Error
s.worldLock.Lock()
@@ -535,7 +535,7 @@ func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet
eNode := e.Node
if err := n.Write(newValue, nextIndex); err != nil {
- return nil, fmt.Errorf("nodePath %v : %v", nodePath, err)
+ return nil, fmt.Errorf("nodePath %v : %w", nodePath, err)
}
if n.IsDir() {
@@ -564,8 +564,8 @@ func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet
}
func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool,
- expireTime time.Time, action string) (*Event, *v2error.Error) {
-
+ expireTime time.Time, action string,
+) (*Event, *v2error.Error) {
currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
if unique { // append unique item under the node path
@@ -589,7 +589,6 @@ func (s *store) internalCreate(nodePath string, dir bool, value string, unique,
// walk through the nodePath, create dirs and get the last directory node
d, err := s.walk(dirName, s.checkDir)
-
if err != nil {
s.Stats.Inc(SetFail)
reportWriteFailure(action)
@@ -624,7 +623,6 @@ func (s *store) internalCreate(nodePath string, dir bool, value string, unique,
eNode.Value = &valueCopy
n = newKV(s, nodePath, value, nextIndex, d, expireTime)
-
} else { // create directory
eNode.Dir = true
@@ -653,7 +651,6 @@ func (s *store) internalGet(nodePath string) (*node, *v2error.Error) {
nodePath = path.Clean(path.Join("/", nodePath))
walkFunc := func(parent *node, name string) (*node, *v2error.Error) {
-
if !parent.IsDir() {
err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex)
return nil, err
@@ -668,7 +665,6 @@ func (s *store) internalGet(nodePath string) (*node, *v2error.Error) {
}
f, err := s.walk(nodePath, walkFunc)
-
if err != nil {
return nil, err
}
@@ -707,7 +703,6 @@ func (s *store) DeleteExpiredKeys(cutoff time.Time) {
s.WatcherHub.notify(e)
}
-
}
// checkDir will check whether the component is a directory under parent node.
@@ -776,20 +771,21 @@ func (s *store) Recovery(state []byte) error {
s.worldLock.Lock()
defer s.worldLock.Unlock()
err := json.Unmarshal(state, s)
-
if err != nil {
return err
}
- s.ttlKeyHeap = newTtlKeyHeap()
+ s.ttlKeyHeap = newTTLKeyHeap()
s.Root.recoverAndclean()
return nil
}
+//revive:disable:var-naming
func (s *store) JsonStats() []byte {
+ //revive:enable:var-naming
s.Stats.Watchers = uint64(s.WatcherHub.count)
- return s.Stats.toJson()
+ return s.Stats.toJSON()
}
func (s *store) HasTTLKeys() bool {
diff --git a/server/etcdserver/api/v2store/store_bench_test.go b/server/etcdserver/api/v2store/store_bench_test.go
index f8f939aff19..66eb1b90ece 100644
--- a/server/etcdserver/api/v2store/store_bench_test.go
+++ b/server/etcdserver/api/v2store/store_bench_test.go
@@ -156,7 +156,6 @@ func BenchmarkWatchWithSetBatch(b *testing.B) {
for i := 0; i < b.N; i++ {
<-watchers[i].EventChan()
}
-
}
func BenchmarkWatchOneKey(b *testing.B) {
@@ -174,7 +173,7 @@ func BenchmarkWatchOneKey(b *testing.B) {
}
}
-func benchStoreSet(b *testing.B, valueSize int, process func(interface{}) ([]byte, error)) {
+func benchStoreSet(b *testing.B, valueSize int, process func(any) ([]byte, error)) {
s := newStore()
b.StopTimer()
kvs, size := generateNRandomKV(b.N, valueSize)
diff --git a/server/etcdserver/api/v2store/store_ttl_test.go b/server/etcdserver/api/v2store/store_ttl_test.go
index 22a9f79da5e..8e47b881095 100644
--- a/server/etcdserver/api/v2store/store_ttl_test.go
+++ b/server/etcdserver/api/v2store/store_ttl_test.go
@@ -18,33 +18,34 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
-
"github.com/jonboulle/clockwork"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
)
-// Ensure that any TTL <= minExpireTime becomes Permanent
+// TestMinExpireTime ensures that any TTL <= minExpireTime becomes Permanent
func TestMinExpireTime(t *testing.T) {
s := newStore()
- fc := clockwork.NewFakeClock()
+ fc := clockwork.NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC))
s.clock = fc
// FakeClock starts at 0, so minExpireTime should be far in the future.. but just in case
- testutil.AssertTrue(t, minExpireTime.After(fc.Now()), "minExpireTime should be ahead of FakeClock!")
+ assert.Truef(t, minExpireTime.After(fc.Now()), "minExpireTime should be ahead of FakeClock!")
s.Create("/foo", false, "Y", false, TTLOptionSet{ExpireTime: fc.Now().Add(3 * time.Second)})
fc.Advance(5 * time.Second)
// Ensure it hasn't expired
s.DeleteExpiredKeys(fc.Now())
var eidx uint64 = 1
e, err := s.Get("/foo", true, false)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "get")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "get", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.Equal(t, int64(0), e.Node.TTL)
}
-// Ensure that the store can recursively retrieve a directory listing.
+// TestStoreGetDirectory ensures that the store can recursively retrieve a directory listing.
// Note that hidden files should not be returned.
func TestStoreGetDirectory(t *testing.T) {
s := newStore()
@@ -59,20 +60,20 @@ func TestStoreGetDirectory(t *testing.T) {
s.Create("/foo/baz/ttl", false, "Y", false, TTLOptionSet{ExpireTime: fc.Now().Add(time.Second * 3)})
var eidx uint64 = 7
e, err := s.Get("/foo", true, false)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "get")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertEqual(t, len(e.Node.Nodes), 2)
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "get", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.Len(t, e.Node.Nodes, 2)
var bazNodes NodeExterns
for _, node := range e.Node.Nodes {
switch node.Key {
case "/foo/bar":
- testutil.AssertEqual(t, *node.Value, "X")
- testutil.AssertEqual(t, node.Dir, false)
+ assert.Equal(t, "X", *node.Value)
+ assert.False(t, node.Dir)
case "/foo/baz":
- testutil.AssertEqual(t, node.Dir, true)
- testutil.AssertEqual(t, len(node.Nodes), 2)
+ assert.True(t, node.Dir)
+ assert.Len(t, node.Nodes, 2)
bazNodes = node.Nodes
default:
t.Errorf("key = %s, not matched", node.Key)
@@ -81,19 +82,19 @@ func TestStoreGetDirectory(t *testing.T) {
for _, node := range bazNodes {
switch node.Key {
case "/foo/baz/bat":
- testutil.AssertEqual(t, *node.Value, "Y")
- testutil.AssertEqual(t, node.Dir, false)
+ assert.Equal(t, "Y", *node.Value)
+ assert.False(t, node.Dir)
case "/foo/baz/ttl":
- testutil.AssertEqual(t, *node.Value, "Y")
- testutil.AssertEqual(t, node.Dir, false)
- testutil.AssertEqual(t, node.TTL, int64(3))
+ assert.Equal(t, "Y", *node.Value)
+ assert.False(t, node.Dir)
+ assert.Equal(t, int64(3), node.TTL)
default:
t.Errorf("key = %s, not matched", node.Key)
}
}
}
-// Ensure that the store can update the TTL on a value.
+// TestStoreUpdateValueTTL ensures that the store can update the TTL on a value.
func TestStoreUpdateValueTTL(t *testing.T) {
s := newStore()
fc := newFakeClock()
@@ -102,18 +103,20 @@ func TestStoreUpdateValueTTL(t *testing.T) {
var eidx uint64 = 2
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
_, err := s.Update("/foo", "baz", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
e, _ := s.Get("/foo", false, false)
- testutil.AssertEqual(t, *e.Node.Value, "baz")
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
+ assert.Equal(t, "baz", *e.Node.Value)
+ assert.Equal(t, eidx, e.EtcdIndex)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
e, err = s.Get("/foo", false, false)
- testutil.AssertNil(t, e)
- testutil.AssertEqual(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound)
+ assert.Nil(t, e)
+ var v2Err *v2error.Error
+ require.ErrorAs(t, err, &v2Err)
+ assert.Equal(t, v2error.EcodeKeyNotFound, v2Err.ErrorCode)
}
-// Ensure that the store can update the TTL on a directory.
+// TestStoreUpdateDirTTL ensures that the store can update the TTL on a directory.
func TestStoreUpdateDirTTL(t *testing.T) {
s := newStore()
fc := newFakeClock()
@@ -121,25 +124,27 @@ func TestStoreUpdateDirTTL(t *testing.T) {
var eidx uint64 = 3
_, err := s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent})
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
e, err := s.Update("/foo/bar", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.Node.Dir, false)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
+ require.NoError(t, err)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, eidx, e.EtcdIndex)
e, _ = s.Get("/foo/bar", false, false)
- testutil.AssertEqual(t, *e.Node.Value, "")
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
+ assert.Equal(t, "", *e.Node.Value)
+ assert.Equal(t, eidx, e.EtcdIndex)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
e, err = s.Get("/foo/bar", false, false)
- testutil.AssertNil(t, e)
- testutil.AssertEqual(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound)
+ assert.Nil(t, e)
+ var v2Err *v2error.Error
+ require.ErrorAs(t, err, &v2Err)
+ assert.Equal(t, v2error.EcodeKeyNotFound, v2Err.ErrorCode)
}
-// Ensure that the store can watch for key expiration.
+// TestStoreWatchExpire ensures that the store can watch for key expiration.
func TestStoreWatchExpire(t *testing.T) {
s := newStore()
fc := newFakeClock()
@@ -151,33 +156,33 @@ func TestStoreWatchExpire(t *testing.T) {
s.Create("/foodir", true, "", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
w, _ := s.Watch("/", true, false, 0)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
c := w.EventChan()
e := nbselect(c)
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
eidx = 4
e = nbselect(c)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "expire")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "expire", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
w, _ = s.Watch("/", true, false, 5)
eidx = 6
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
e = nbselect(w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "expire")
- testutil.AssertEqual(t, e.Node.Key, "/foofoo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "expire", e.Action)
+ assert.Equal(t, "/foofoo", e.Node.Key)
w, _ = s.Watch("/", true, false, 6)
e = nbselect(w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "expire")
- testutil.AssertEqual(t, e.Node.Key, "/foodir")
- testutil.AssertEqual(t, e.Node.Dir, true)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "expire", e.Action)
+ assert.Equal(t, "/foodir", e.Node.Key)
+ assert.True(t, e.Node.Dir)
}
-// Ensure that the store can watch for key expiration when refreshing.
+// TestStoreWatchExpireRefresh ensures that the store can watch for key expiration when refreshing.
func TestStoreWatchExpireRefresh(t *testing.T) {
s := newStore()
fc := newFakeClock()
@@ -189,31 +194,31 @@ func TestStoreWatchExpireRefresh(t *testing.T) {
// Make sure we set watch updates when Refresh is true for newly created keys
w, _ := s.Watch("/", true, false, 0)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
c := w.EventChan()
e := nbselect(c)
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
eidx = 3
e = nbselect(c)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "expire")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "expire", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
s.Update("/foofoo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true})
w, _ = s.Watch("/", true, false, 4)
fc.Advance(700 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
eidx = 5 // We should skip 4 because a TTL update should occur with no watch notification if set `TTLOptionSet.Refresh` to true
- testutil.AssertEqual(t, w.StartIndex(), eidx-1)
+ assert.Equal(t, eidx-1, w.StartIndex())
e = nbselect(w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "expire")
- testutil.AssertEqual(t, e.Node.Key, "/foofoo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "expire", e.Action)
+ assert.Equal(t, "/foofoo", e.Node.Key)
}
-// Ensure that the store can watch for key expiration when refreshing with an empty value.
+// TestStoreWatchExpireEmptyRefresh ensures that the store can watch for key expiration when refreshing with an empty value.
func TestStoreWatchExpireEmptyRefresh(t *testing.T) {
s := newStore()
fc := newFakeClock()
@@ -230,15 +235,15 @@ func TestStoreWatchExpireEmptyRefresh(t *testing.T) {
fc.Advance(700 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
eidx = 3 // We should skip 2 because a TTL update should occur with no watch notification if set `TTLOptionSet.Refresh` to true
- testutil.AssertEqual(t, w.StartIndex(), eidx-1)
+ assert.Equal(t, eidx-1, w.StartIndex())
e := nbselect(w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "expire")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "expire", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
}
-// Update TTL of a key (set TTLOptionSet.Refresh to false) and send notification
+// TestStoreWatchNoRefresh updates TTL of a key (set TTLOptionSet.Refresh to false) and send notification
func TestStoreWatchNoRefresh(t *testing.T) {
s := newStore()
fc := newFakeClock()
@@ -256,15 +261,15 @@ func TestStoreWatchNoRefresh(t *testing.T) {
fc.Advance(700 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
eidx = 2
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
e := nbselect(w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "update")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "update", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
}
-// Ensure that the store can update the TTL on a value with refresh.
+// TestStoreRefresh ensures that the store can update the TTL on a value with refresh.
func TestStoreRefresh(t *testing.T) {
s := newStore()
fc := newFakeClock()
@@ -274,19 +279,19 @@ func TestStoreRefresh(t *testing.T) {
s.Create("/bar", true, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
s.Create("/bar/z", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
_, err := s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true})
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = s.Set("/foo", false, "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true})
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = s.Update("/bar/z", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true})
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = s.CompareAndSwap("/foo", "bar", 0, "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true})
- testutil.AssertNil(t, err)
+ assert.NoError(t, err)
}
-// Ensure that the store can recover from a previously saved state that includes an expiring key.
+// TestStoreRecoverWithExpiration ensures that the store can recover from a previously saved state that includes an expiring key.
func TestStoreRecoverWithExpiration(t *testing.T) {
s := newStore()
s.clock = newFakeClock()
@@ -298,7 +303,7 @@ func TestStoreRecoverWithExpiration(t *testing.T) {
s.Create("/foo/x", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: fc.Now().Add(5 * time.Millisecond)})
b, err := s.Save()
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
time.Sleep(10 * time.Millisecond)
@@ -311,37 +316,37 @@ func TestStoreRecoverWithExpiration(t *testing.T) {
s.DeleteExpiredKeys(fc.Now())
e, err := s.Get("/foo/x", false, false)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, *e.Node.Value, "bar")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "bar", *e.Node.Value)
e, err = s.Get("/foo/y", false, false)
- testutil.AssertNotNil(t, err)
- testutil.AssertNil(t, e)
+ require.Error(t, err)
+ assert.Nil(t, e)
}
-// Ensure that the store doesn't see expirations of hidden keys.
+// TestStoreWatchExpireWithHiddenKey ensures that the store doesn't see expirations of hidden keys.
func TestStoreWatchExpireWithHiddenKey(t *testing.T) {
s := newStore()
fc := newFakeClock()
s.clock = fc
s.Create("/_foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
- s.Create("/foofoo", false, "barbarbar", false, TTLOptionSet{ExpireTime: fc.Now().Add(1000 * time.Millisecond)})
+ s.Create("/foofoo", false, "barbarbar", false, TTLOptionSet{ExpireTime: fc.Now().Add(time.Second)})
w, _ := s.Watch("/", true, false, 0)
c := w.EventChan()
e := nbselect(c)
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
e = nbselect(c)
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
fc.Advance(600 * time.Millisecond)
s.DeleteExpiredKeys(fc.Now())
e = nbselect(c)
- testutil.AssertEqual(t, e.Action, "expire")
- testutil.AssertEqual(t, e.Node.Key, "/foofoo")
+ assert.Equal(t, "expire", e.Action)
+ assert.Equal(t, "/foofoo", e.Node.Key)
}
// newFakeClock creates a new FakeClock that has been advanced to at least minExpireTime
diff --git a/server/etcdserver/api/v2store/ttl_key_heap.go b/server/etcdserver/api/v2store/ttl_key_heap.go
index 477d2b9f3aa..77ca8e9ed60 100644
--- a/server/etcdserver/api/v2store/ttl_key_heap.go
+++ b/server/etcdserver/api/v2store/ttl_key_heap.go
@@ -22,7 +22,7 @@ type ttlKeyHeap struct {
keyMap map[*node]int
}
-func newTtlKeyHeap() *ttlKeyHeap {
+func newTTLKeyHeap() *ttlKeyHeap {
h := &ttlKeyHeap{keyMap: make(map[*node]int)}
heap.Init(h)
return h
@@ -45,13 +45,13 @@ func (h ttlKeyHeap) Swap(i, j int) {
h.keyMap[h.array[j]] = j
}
-func (h *ttlKeyHeap) Push(x interface{}) {
+func (h *ttlKeyHeap) Push(x any) {
n, _ := x.(*node)
h.keyMap[n] = len(h.array)
h.array = append(h.array, n)
}
-func (h *ttlKeyHeap) Pop() interface{} {
+func (h *ttlKeyHeap) Pop() any {
old := h.array
n := len(old)
x := old[n-1]
@@ -77,7 +77,7 @@ func (h *ttlKeyHeap) pop() *node {
return n
}
-func (h *ttlKeyHeap) push(x interface{}) {
+func (h *ttlKeyHeap) push(x any) {
heap.Push(h, x)
}
diff --git a/server/etcdserver/api/v2store/watcher_hub.go b/server/etcdserver/api/v2store/watcher_hub.go
index dc5c8f2bb57..df5ae78b07d 100644
--- a/server/etcdserver/api/v2store/watcher_hub.go
+++ b/server/etcdserver/api/v2store/watcher_hub.go
@@ -59,7 +59,6 @@ func newWatchHub(capacity int) *watcherHub {
func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) {
reportWatchRequest()
event, err := wh.EventHistory.scan(key, recursive, index)
-
if err != nil {
err.Index = storeIndex
return nil, err
diff --git a/server/etcdserver/api/v2store/watcher_test.go b/server/etcdserver/api/v2store/watcher_test.go
index e0901028fba..06a5e4ae054 100644
--- a/server/etcdserver/api/v2store/watcher_test.go
+++ b/server/etcdserver/api/v2store/watcher_test.go
@@ -86,5 +86,4 @@ func TestWatcher(t *testing.T) {
if e != re {
t.Fatal("recv != send")
}
-
}
diff --git a/server/etcdserver/api/v2v3/cluster.go b/server/etcdserver/api/v2v3/cluster.go
deleted file mode 100644
index d275e057183..00000000000
--- a/server/etcdserver/api/v2v3/cluster.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
-
- "github.com/coreos/go-semver/semver"
-)
-
-func (s *v2v3Server) ID() types.ID {
- // TODO: use an actual member ID
- return types.ID(0xe7cd2f00d)
-}
-func (s *v2v3Server) ClientURLs() []string { panic("STUB") }
-func (s *v2v3Server) Members() []*membership.Member { panic("STUB") }
-func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") }
-func (s *v2v3Server) Version() *semver.Version { panic("STUB") }
diff --git a/server/etcdserver/api/v2v3/doc.go b/server/etcdserver/api/v2v3/doc.go
deleted file mode 100644
index 2ff372f1876..00000000000
--- a/server/etcdserver/api/v2v3/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client.
-package v2v3
diff --git a/server/etcdserver/api/v2v3/server.go b/server/etcdserver/api/v2v3/server.go
deleted file mode 100644
index 71557ceb5c8..00000000000
--- a/server/etcdserver/api/v2v3/server.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "net/http"
- "time"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
-
- "github.com/coreos/go-semver/semver"
- "go.uber.org/zap"
-)
-
-type fakeStats struct{}
-
-func (s *fakeStats) SelfStats() []byte { return nil }
-func (s *fakeStats) LeaderStats() []byte { return nil }
-func (s *fakeStats) StoreStats() []byte { return nil }
-
-type v2v3Server struct {
- lg *zap.Logger
- c *clientv3.Client
- store *v2v3Store
- fakeStats
-}
-
-func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer {
- return &v2v3Server{lg: lg, c: c, store: newStore(c, pfx)}
-}
-
-func (s *v2v3Server) ClientCertAuthEnabled() bool { return false }
-
-func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") }
-func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") }
-
-func (s *v2v3Server) Leader() types.ID {
- ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
- defer cancel()
- resp, err := s.c.Status(ctx, s.c.Endpoints()[0])
- if err != nil {
- return 0
- }
- return types.ID(resp.Leader)
-}
-
-func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
- // adding member as learner is not supported by V2 Server.
- resp, err := s.c.MemberAdd(ctx, memb.PeerURLs)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- resp, err := s.c.MemberRemove(ctx, id)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func (s *v2v3Server) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- resp, err := s.c.MemberPromote(ctx, id)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
- resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member {
- membs := make([]*membership.Member, len(v3membs))
- for i, m := range v3membs {
- membs[i] = &membership.Member{
- ID: types.ID(m.ID),
- RaftAttributes: membership.RaftAttributes{
- PeerURLs: m.PeerURLs,
- IsLearner: m.IsLearner,
- },
- Attributes: membership.Attributes{
- Name: m.Name,
- ClientURLs: m.ClientURLs,
- },
- }
- }
- return membs
-}
-
-func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() }
-func (s *v2v3Server) Cluster() api.Cluster { return s }
-func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil }
-func (s *v2v3Server) LeaderChangedNotify() <-chan struct{} { return nil }
-
-func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
- applier := etcdserver.NewApplierV2(s.lg, s.store, nil)
- reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier)
- req := (*etcdserver.RequestV2)(&r)
- resp, err := req.Handle(ctx, reqHandler)
- if resp.Err != nil {
- return resp, resp.Err
- }
- return resp, err
-}
diff --git a/server/etcdserver/api/v2v3/store.go b/server/etcdserver/api/v2v3/store.go
deleted file mode 100644
index 6d78cab7198..00000000000
--- a/server/etcdserver/api/v2v3/store.go
+++ /dev/null
@@ -1,638 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "fmt"
- "path"
- "sort"
- "strings"
- "time"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
-)
-
-// store implements the Store interface for V2 using
-// a v3 client.
-type v2v3Store struct {
- c *clientv3.Client
- // pfx is the v3 prefix where keys should be stored.
- pfx string
- ctx context.Context
-}
-
-const maxPathDepth = 63
-
-var errUnsupported = fmt.Errorf("TTLs are unsupported")
-
-func NewStore(c *clientv3.Client, pfx string) v2store.Store { return newStore(c, pfx) }
-
-func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} }
-
-func (s *v2v3Store) Index() uint64 { panic("STUB") }
-
-func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*v2store.Event, error) {
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).Then(
- clientv3.OpGet(key+"/"),
- clientv3.OpGet(key),
- ).Commit()
- if err != nil {
- return nil, err
- }
-
- if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) {
- nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision)
- if err != nil {
- return nil, err
- }
- cidx, midx := uint64(0), uint64(0)
- if len(kvs) > 0 {
- cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision)
- }
- return &v2store.Event{
- Action: v2store.Get,
- Node: &v2store.NodeExtern{
- Key: nodePath,
- Dir: true,
- Nodes: nodes,
- CreatedIndex: cidx,
- ModifiedIndex: midx,
- },
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
- }
-
- kvs := resp.Responses[1].GetResponseRange().Kvs
- if len(kvs) == 0 {
- return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- return &v2store.Event{
- Action: v2store.Get,
- Node: s.mkV2Node(kvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*v2store.NodeExtern, error) {
- rootNodes, err := s.getDirDepth(nodePath, 1, rev)
- if err != nil || !recursive {
- if sorted {
- sort.Sort(v2store.NodeExterns(rootNodes))
- }
- return rootNodes, err
- }
- nextNodes := rootNodes
- nodes := make(map[string]*v2store.NodeExtern)
- // Breadth walk the subdirectories
- for i := 2; len(nextNodes) > 0; i++ {
- for _, n := range nextNodes {
- nodes[n.Key] = n
- if parent := nodes[path.Dir(n.Key)]; parent != nil {
- parent.Nodes = append(parent.Nodes, n)
- }
- }
- if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil {
- return nil, err
- }
- }
-
- if sorted {
- sort.Sort(v2store.NodeExterns(rootNodes))
- }
- return rootNodes, nil
-}
-
-func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*v2store.NodeExtern, error) {
- pd := s.mkPathDepth(nodePath, depth)
- resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev))
- if err != nil {
- return nil, err
- }
-
- nodes := make([]*v2store.NodeExtern, len(resp.Kvs))
- for i, kv := range resp.Kvs {
- nodes[i] = s.mkV2Node(kv)
- }
- return nodes, nil
-}
-
-func (s *v2v3Store) Set(
- nodePath string,
- dir bool,
- value string,
- expireOpts v2store.TTLOptionSet,
-) (*v2store.Event, error) {
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- if isRoot(nodePath) {
- return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
- }
-
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- // build path if any directories in path do not exist
- dirs := []string{}
- for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
- pp := s.mkPath(p)
- if stm.Rev(pp) > 0 {
- ecode = v2error.EcodeNotDir
- return nil
- }
- if stm.Rev(pp+"/") == 0 {
- dirs = append(dirs, pp+"/")
- }
- }
- for _, d := range dirs {
- stm.Put(d, "")
- }
-
- key := s.mkPath(nodePath)
- if dir {
- if stm.Rev(key) != 0 {
- // exists as non-dir
- ecode = v2error.EcodeNotDir
- return nil
- }
- key = key + "/"
- } else if stm.Rev(key+"/") != 0 {
- ecode = v2error.EcodeNotFile
- return nil
- }
- stm.Put(key, value, clientv3.WithPrevKV())
- stm.Put(s.mkActionKey(), v2store.Set)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- createRev := resp.Header.Revision
- var pn *v2store.NodeExtern
- if pkv := prevKeyFromPuts(resp); pkv != nil {
- pn = s.mkV2Node(pkv)
- createRev = pkv.CreateRevision
- }
-
- vp := &value
- if dir {
- vp = nil
- }
- return &v2store.Event{
- Action: v2store.Set,
- Node: &v2store.NodeExtern{
- Key: nodePath,
- Value: vp,
- Dir: dir,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(createRev),
- },
- PrevNode: pn,
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Update(nodePath, newValue string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
- if isRoot(nodePath) {
- return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
- }
-
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- key := s.mkPath(nodePath)
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- if rev := stm.Rev(key + "/"); rev != 0 {
- ecode = v2error.EcodeNotFile
- return nil
- }
- if rev := stm.Rev(key); rev == 0 {
- ecode = v2error.EcodeKeyNotFound
- return nil
- }
- stm.Put(key, newValue, clientv3.WithPrevKV())
- stm.Put(s.mkActionKey(), v2store.Update)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- pkv := prevKeyFromPuts(resp)
- return &v2store.Event{
- Action: v2store.Update,
- Node: &v2store.NodeExtern{
- Key: nodePath,
- Value: &newValue,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Create(
- nodePath string,
- dir bool,
- value string,
- unique bool,
- expireOpts v2store.TTLOptionSet,
-) (*v2store.Event, error) {
- if isRoot(nodePath) {
- return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
- }
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- ecode = 0
- key := s.mkPath(nodePath)
- if unique {
- // append unique item under the node path
- for {
- key = nodePath + "/" + fmt.Sprintf("%020s", time.Now())
- key = path.Clean(path.Join("/", key))
- key = s.mkPath(key)
- if stm.Rev(key) == 0 {
- break
- }
- }
- }
- if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 {
- ecode = v2error.EcodeNodeExist
- return nil
- }
- // build path if any directories in path do not exist
- dirs := []string{}
- for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
- pp := s.mkPath(p)
- if stm.Rev(pp) > 0 {
- ecode = v2error.EcodeNotDir
- return nil
- }
- if stm.Rev(pp+"/") == 0 {
- dirs = append(dirs, pp+"/")
- }
- }
- for _, d := range dirs {
- stm.Put(d, "")
- }
-
- if dir {
- // directories marked with extra slash in key name
- key += "/"
- }
- stm.Put(key, value)
- stm.Put(s.mkActionKey(), v2store.Create)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- var v *string
- if !dir {
- v = &value
- }
-
- return &v2store.Event{
- Action: v2store.Create,
- Node: &v2store.NodeExtern{
- Key: nodePath,
- Value: v,
- Dir: dir,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(resp.Header.Revision),
- },
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) CompareAndSwap(
- nodePath string,
- prevValue string,
- prevIndex uint64,
- value string,
- expireOpts v2store.TTLOptionSet,
-) (*v2store.Event, error) {
- if isRoot(nodePath) {
- return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
- }
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).If(
- s.mkCompare(nodePath, prevValue, prevIndex)...,
- ).Then(
- clientv3.OpPut(key, value, clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), v2store.CompareAndSwap),
- ).Else(
- clientv3.OpGet(key),
- clientv3.OpGet(key+"/"),
- ).Commit()
-
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, compareFail(nodePath, prevValue, prevIndex, resp)
- }
-
- pkv := resp.Responses[0].GetResponsePut().PrevKv
- return &v2store.Event{
- Action: v2store.CompareAndSwap,
- Node: &v2store.NodeExtern{
- Key: nodePath,
- Value: &value,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*v2store.Event, error) {
- if isRoot(nodePath) {
- return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
- }
- if !dir && !recursive {
- return s.deleteNode(nodePath)
- }
- if !recursive {
- return s.deleteEmptyDir(nodePath)
- }
-
- dels := make([]clientv3.Op, maxPathDepth+1)
- dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV())
- for i := 1; i < maxPathDepth; i++ {
- dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix())
- }
- dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), v2store.Delete)
-
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0),
- clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0),
- ).Then(
- dels...,
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
- }
- dresp := resp.Responses[0].GetResponseDeleteRange()
- return &v2store.Event{
- Action: v2store.Delete,
- PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) deleteEmptyDir(nodePath string) (*v2store.Event, error) {
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(),
- ).Then(
- clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), v2store.Delete),
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, v2error.NewError(v2error.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision))
- }
- dresp := resp.Responses[0].GetResponseDeleteRange()
- if len(dresp.PrevKvs) == 0 {
- return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
- }
- return &v2store.Event{
- Action: v2store.Delete,
- PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) deleteNode(nodePath string) (*v2store.Event, error) {
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0),
- ).Then(
- clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), v2store.Delete),
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
- pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs
- if len(pkvs) == 0 {
- return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
- pkv := pkvs[0]
- return &v2store.Event{
- Action: v2store.Delete,
- Node: &v2store.NodeExtern{
- Key: nodePath,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*v2store.Event, error) {
- if isRoot(nodePath) {
- return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
- }
-
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).If(
- s.mkCompare(nodePath, prevValue, prevIndex)...,
- ).Then(
- clientv3.OpDelete(key, clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), v2store.CompareAndDelete),
- ).Else(
- clientv3.OpGet(key),
- clientv3.OpGet(key+"/"),
- ).Commit()
-
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, compareFail(nodePath, prevValue, prevIndex, resp)
- }
-
- // len(pkvs) > 1 since txn only succeeds when key exists
- pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0]
- return &v2store.Event{
- Action: v2store.CompareAndDelete,
- Node: &v2store.NodeExtern{
- Key: nodePath,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error {
- if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 {
- return v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
- kvs := resp.Responses[0].GetResponseRange().Kvs
- if len(kvs) == 0 {
- return v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
- kv := kvs[0]
- indexMatch := prevIndex == 0 || kv.ModRevision == int64(prevIndex)
- valueMatch := prevValue == "" || string(kv.Value) == prevValue
- var cause string
- switch {
- case indexMatch && !valueMatch:
- cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value))
- case valueMatch && !indexMatch:
- cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision)
- default:
- cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision)
- }
- return v2error.NewError(v2error.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision))
-}
-
-func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp {
- key := s.mkPath(nodePath)
- cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)}
- if prevIndex != 0 {
- cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex)))
- }
- if prevValue != "" {
- cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue))
- }
- return cmps
-}
-
-func (s *v2v3Store) JsonStats() []byte { panic("STUB") }
-func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") }
-
-func (s *v2v3Store) Version() int { return 2 }
-
-// TODO: move this out of the Store interface?
-
-func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") }
-func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") }
-func (s *v2v3Store) Clone() v2store.Store { panic("STUB") }
-func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") }
-func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") }
-
-func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) }
-
-func (s *v2v3Store) mkNodePath(p string) string {
- return path.Clean(p[len(s.pfx)+len("/k/000/"):])
-}
-
-// mkPathDepth makes a path to a key that encodes its directory depth
-// for fast directory listing. If a depth is provided, it is added
-// to the computed depth.
-func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string {
- normalForm := path.Clean(path.Join("/", nodePath))
- n := strings.Count(normalForm, "/") + depth
- return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
-}
-
-func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" }
-
-func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" }
-
-func mkV2Rev(v3Rev int64) uint64 {
- if v3Rev == 0 {
- return 0
- }
- return uint64(v3Rev - 1)
-}
-
-func mkV3Rev(v2Rev uint64) int64 {
- if v2Rev == 0 {
- return 0
- }
- return int64(v2Rev + 1)
-}
-
-// mkV2Node creates a V2 NodeExtern from a V3 KeyValue
-func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern {
- if kv == nil {
- return nil
- }
- n := &v2store.NodeExtern{
- Key: s.mkNodePath(string(kv.Key)),
- Dir: kv.Key[len(kv.Key)-1] == '/',
- CreatedIndex: mkV2Rev(kv.CreateRevision),
- ModifiedIndex: mkV2Rev(kv.ModRevision),
- }
- if !n.Dir {
- v := string(kv.Value)
- n.Value = &v
- }
- return n
-}
-
-// prevKeyFromPuts gets the prev key that is being put; ignores
-// the put action response.
-func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue {
- for _, r := range resp.Responses {
- pkv := r.GetResponsePut().PrevKv
- if pkv != nil && pkv.CreateRevision > 0 {
- return pkv
- }
- }
- return nil
-}
-
-func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) {
- return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable))
-}
diff --git a/server/etcdserver/api/v2v3/watcher.go b/server/etcdserver/api/v2v3/watcher.go
deleted file mode 100644
index 046c25d4509..00000000000
--- a/server/etcdserver/api/v2v3/watcher.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "strings"
-
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
-)
-
-func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (v2store.Watcher, error) {
- ctx, cancel := context.WithCancel(s.ctx)
- wch := s.c.Watch(
- ctx,
- // TODO: very pricey; use a single store-wide watch in future
- s.pfx,
- clientv3.WithPrefix(),
- clientv3.WithRev(int64(sinceIndex)),
- clientv3.WithCreatedNotify(),
- clientv3.WithPrevKV())
- resp, ok := <-wch
- if err := resp.Err(); err != nil || !ok {
- cancel()
- return nil, v2error.NewError(v2error.EcodeRaftInternal, prefix, 0)
- }
-
- evc, donec := make(chan *v2store.Event), make(chan struct{})
- go func() {
- defer func() {
- close(evc)
- close(donec)
- }()
- for resp := range wch {
- for _, ev := range s.mkV2Events(resp) {
- k := ev.Node.Key
- if recursive {
- if !strings.HasPrefix(k, prefix) {
- continue
- }
- // accept events on hidden keys given in prefix
- k = strings.Replace(k, prefix, "/", 1)
- // ignore hidden keys deeper than prefix
- if strings.Contains(k, "/_") {
- continue
- }
- }
- if !recursive && k != prefix {
- continue
- }
- select {
- case evc <- ev:
- case <-ctx.Done():
- return
- }
- if !stream {
- return
- }
- }
- }
- }()
-
- return &v2v3Watcher{
- startRev: resp.Header.Revision,
- evc: evc,
- donec: donec,
- cancel: cancel,
- }, nil
-}
-
-func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*v2store.Event) {
- ak := s.mkActionKey()
- for _, rev := range mkRevs(wr) {
- var act, key *clientv3.Event
- for _, ev := range rev {
- if string(ev.Kv.Key) == ak {
- act = ev
- } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) {
- // use longest key to ignore intermediate new
- // directories from Create.
- key = ev
- } else if key == nil {
- key = ev
- }
- }
- if act != nil && act.Kv != nil && key != nil {
- v2ev := &v2store.Event{
- Action: string(act.Kv.Value),
- Node: s.mkV2Node(key.Kv),
- PrevNode: s.mkV2Node(key.PrevKv),
- EtcdIndex: mkV2Rev(wr.Header.Revision),
- }
- evs = append(evs, v2ev)
- }
- }
- return evs
-}
-
-func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) {
- var curRev []*clientv3.Event
- for _, ev := range wr.Events {
- if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision {
- revs = append(revs, curRev)
- curRev = nil
- }
- curRev = append(curRev, ev)
- }
- if curRev != nil {
- revs = append(revs, curRev)
- }
- return revs
-}
-
-type v2v3Watcher struct {
- startRev int64
- evc chan *v2store.Event
- donec chan struct{}
- cancel context.CancelFunc
-}
-
-func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) }
-
-func (w *v2v3Watcher) Remove() {
- w.cancel()
- <-w.donec
-}
-
-func (w *v2v3Watcher) EventChan() chan *v2store.Event { return w.evc }
diff --git a/server/etcdserver/api/v3alarm/alarms.go b/server/etcdserver/api/v3alarm/alarms.go
index a0bb13af19b..e0480da081c 100644
--- a/server/etcdserver/api/v3alarm/alarms.go
+++ b/server/etcdserver/api/v3alarm/alarms.go
@@ -18,21 +18,25 @@ package v3alarm
import (
"sync"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
"go.uber.org/zap"
-)
-var (
- alarmBucketName = []byte("alarm")
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/storage/backend"
)
type BackendGetter interface {
Backend() backend.Backend
}
+type AlarmBackend interface {
+ CreateAlarmBucket()
+ MustPutAlarm(member *pb.AlarmMember)
+ MustDeleteAlarm(alarm *pb.AlarmMember)
+ GetAllAlarms() ([]*pb.AlarmMember, error)
+ ForceCommit()
+}
+
type alarmSet map[types.ID]*pb.AlarmMember
// AlarmStore persists alarms to the backend.
@@ -41,14 +45,14 @@ type AlarmStore struct {
mu sync.Mutex
types map[pb.AlarmType]alarmSet
- bg BackendGetter
+ be AlarmBackend
}
-func NewAlarmStore(lg *zap.Logger, bg BackendGetter) (*AlarmStore, error) {
+func NewAlarmStore(lg *zap.Logger, be AlarmBackend) (*AlarmStore, error) {
if lg == nil {
lg = zap.NewNop()
}
- ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), bg: bg}
+ ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), be: be}
err := ret.restore()
return ret, err
}
@@ -62,16 +66,7 @@ func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
return m
}
- v, err := newAlarm.Marshal()
- if err != nil {
- a.lg.Panic("failed to marshal alarm member", zap.Error(err))
- }
-
- b := a.bg.Backend()
- b.BatchTx().Lock()
- b.BatchTx().UnsafePut(alarmBucketName, v, nil)
- b.BatchTx().Unlock()
-
+ a.be.MustPutAlarm(newAlarm)
return newAlarm
}
@@ -91,16 +86,7 @@ func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
delete(t, id)
- v, err := m.Marshal()
- if err != nil {
- a.lg.Panic("failed to marshal alarm member", zap.Error(err))
- }
-
- b := a.bg.Backend()
- b.BatchTx().Lock()
- b.BatchTx().UnsafeDelete(alarmBucketName, v)
- b.BatchTx().Unlock()
-
+ a.be.MustDeleteAlarm(m)
return m
}
@@ -122,22 +108,15 @@ func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) {
}
func (a *AlarmStore) restore() error {
- b := a.bg.Backend()
- tx := b.BatchTx()
-
- tx.Lock()
- tx.UnsafeCreateBucket(alarmBucketName)
- err := tx.UnsafeForEach(alarmBucketName, func(k, v []byte) error {
- var m pb.AlarmMember
- if err := m.Unmarshal(k); err != nil {
- return err
- }
- a.addToMap(&m)
- return nil
- })
- tx.Unlock()
-
- b.ForceCommit()
+ a.be.CreateAlarmBucket()
+ ms, err := a.be.GetAllAlarms()
+ if err != nil {
+ return err
+ }
+ for _, m := range ms {
+ a.addToMap(m)
+ }
+ a.be.ForceCommit()
return err
}
diff --git a/server/etcdserver/api/v3client/doc.go b/server/etcdserver/api/v3client/doc.go
index 279195e1a1b..a6a4d7edfa9 100644
--- a/server/etcdserver/api/v3client/doc.go
+++ b/server/etcdserver/api/v3client/doc.go
@@ -41,5 +41,4 @@
// if err != nil {
// // handle error!
// }
-//
package v3client
diff --git a/server/etcdserver/api/v3client/v3client.go b/server/etcdserver/api/v3client/v3client.go
index 8342dc434c2..b9d18399f2d 100644
--- a/server/etcdserver/api/v3client/v3client.go
+++ b/server/etcdserver/api/v3client/v3client.go
@@ -18,7 +18,7 @@ import (
"context"
"time"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter"
@@ -39,7 +39,7 @@ func New(s *etcdserver.EtcdServer) *clientv3.Client {
wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)}
- mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s))
+ mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s, nil))
c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c)
clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
diff --git a/server/etcdserver/api/v3compactor/compactor.go b/server/etcdserver/api/v3compactor/compactor.go
index e352670c12b..f916e71141b 100644
--- a/server/etcdserver/api/v3compactor/compactor.go
+++ b/server/etcdserver/api/v3compactor/compactor.go
@@ -19,10 +19,10 @@ import (
"fmt"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
const (
diff --git a/server/etcdserver/api/v3compactor/compactor_test.go b/server/etcdserver/api/v3compactor/compactor_test.go
index c7b4252d1c3..b5dc88a2b94 100644
--- a/server/etcdserver/api/v3compactor/compactor_test.go
+++ b/server/etcdserver/api/v3compactor/compactor_test.go
@@ -27,7 +27,7 @@ type fakeCompactable struct {
}
func (fc *fakeCompactable) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
- fc.Record(testutil.Action{Name: "c", Params: []interface{}{r}})
+ fc.Record(testutil.Action{Name: "c", Params: []any{r}})
return &pb.CompactionResponse{}, nil
}
diff --git a/server/etcdserver/api/v3compactor/periodic.go b/server/etcdserver/api/v3compactor/periodic.go
index 083c72ede24..067f69ec36d 100644
--- a/server/etcdserver/api/v3compactor/periodic.go
+++ b/server/etcdserver/api/v3compactor/periodic.go
@@ -16,14 +16,15 @@ package v3compactor
import (
"context"
+ "errors"
"sync"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/mvcc"
-
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
)
// Periodic compacts the log by purging revisions older than
@@ -54,8 +55,9 @@ func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevG
period: h,
rg: rg,
c: c,
- revs: make([]int64, 0),
}
+ // revs won't be longer than the retentions.
+ pc.revs = make([]int64, 0, pc.getRetentions())
pc.ctx, pc.cancel = context.WithCancel(context.Background())
return pc
}
@@ -66,7 +68,7 @@ Compaction period 1-hour:
2. record revisions for every 1/10 of 1-hour (6-minute)
3. keep recording revisions with no compaction for first 1-hour
4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 1-hour (6-minute)
Compaction period 24-hour:
@@ -74,7 +76,7 @@ Compaction period 24-hour:
2. record revisions for every 1/10 of 1-hour (6-minute)
3. keep recording revisions with no compaction for first 24-hour
4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 1-hour (6-minute)
Compaction period 59-min:
@@ -82,7 +84,7 @@ Compaction period 59-min:
2. record revisions for every 1/10 of 59-min (5.9-min)
3. keep recording revisions with no compaction for first 59-min
4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 59-min (5.9-min)
Compaction period 5-sec:
@@ -90,7 +92,7 @@ Compaction period 5-sec:
2. record revisions for every 1/10 of 5-sec (0.5-sec)
3. keep recording revisions with no compaction for first 5-sec
4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 5-sec (0.5-sec)
*/
@@ -101,6 +103,7 @@ func (pc *Periodic) Run() {
retentions := pc.getRetentions()
go func() {
+ lastRevision := int64(0)
lastSuccess := pc.clock.Now()
baseInterval := pc.period
for {
@@ -113,15 +116,15 @@ func (pc *Periodic) Run() {
case <-pc.ctx.Done():
return
case <-pc.clock.After(retryInterval):
- pc.mu.Lock()
+ pc.mu.RLock()
p := pc.paused
- pc.mu.Unlock()
+ pc.mu.RUnlock()
if p {
continue
}
}
-
- if pc.clock.Now().Sub(lastSuccess) < baseInterval {
+ rev := pc.revs[0]
+ if pc.clock.Now().Sub(lastSuccess) < baseInterval || rev == lastRevision {
continue
}
@@ -129,7 +132,6 @@ func (pc *Periodic) Run() {
if baseInterval == pc.period {
baseInterval = compactInterval
}
- rev := pc.revs[0]
pc.lg.Info(
"starting auto periodic compaction",
@@ -138,13 +140,14 @@ func (pc *Periodic) Run() {
)
startTime := pc.clock.Now()
_, err := pc.c.Compact(pc.ctx, &pb.CompactionRequest{Revision: rev})
- if err == nil || err == mvcc.ErrCompacted {
+ if err == nil || errors.Is(err, mvcc.ErrCompacted) {
pc.lg.Info(
"completed auto periodic compaction",
zap.Int64("revision", rev),
zap.Duration("compact-period", pc.period),
zap.Duration("took", pc.clock.Now().Sub(startTime)),
)
+ lastRevision = rev
lastSuccess = pc.clock.Now()
} else {
pc.lg.Warn(
diff --git a/server/etcdserver/api/v3compactor/periodic_test.go b/server/etcdserver/api/v3compactor/periodic_test.go
index 7a806bfe899..5604dabc5ef 100644
--- a/server/etcdserver/api/v3compactor/periodic_test.go
+++ b/server/etcdserver/api/v3compactor/periodic_test.go
@@ -15,15 +15,16 @@
package v3compactor
import (
+ "errors"
"reflect"
"testing"
"time"
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap/zaptest"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/testutil"
-
- "github.com/jonboulle/clockwork"
- "go.uber.org/zap"
)
func TestPeriodicHourly(t *testing.T) {
@@ -32,9 +33,9 @@ func TestPeriodicHourly(t *testing.T) {
fc := clockwork.NewFakeClock()
// TODO: Do not depand or real time (Recorder.Wait) in unit tests.
- rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
+ rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(0), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
- tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
+ tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable)
tb.Run()
defer tb.Stop()
@@ -42,8 +43,8 @@ func TestPeriodicHourly(t *testing.T) {
initialIntervals, intervalsPerPeriod := tb.getRetentions(), 10
// compaction doesn't happen til 2 hours elapse
- for i := 0; i < initialIntervals; i++ {
- rg.Wait(1)
+ for i := 0; i < initialIntervals-1; i++ {
+ waitOneAction(t, rg)
fc.Advance(tb.getRetryInterval())
}
@@ -62,7 +63,7 @@ func TestPeriodicHourly(t *testing.T) {
for i := 0; i < 3; i++ {
// advance one hour, one revision for each interval
for j := 0; j < intervalsPerPeriod; j++ {
- rg.Wait(1)
+ waitOneAction(t, rg)
fc.Advance(tb.getRetryInterval())
}
@@ -83,9 +84,9 @@ func TestPeriodicMinutes(t *testing.T) {
retentionDuration := time.Duration(retentionMinutes) * time.Minute
fc := clockwork.NewFakeClock()
- rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
+ rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(0), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
- tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
+ tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable)
tb.Run()
defer tb.Stop()
@@ -93,8 +94,8 @@ func TestPeriodicMinutes(t *testing.T) {
initialIntervals, intervalsPerPeriod := tb.getRetentions(), 10
// compaction doesn't happen til 5 minutes elapse
- for i := 0; i < initialIntervals; i++ {
- rg.Wait(1)
+ for i := 0; i < initialIntervals-1; i++ {
+ waitOneAction(t, rg)
fc.Advance(tb.getRetryInterval())
}
@@ -112,7 +113,7 @@ func TestPeriodicMinutes(t *testing.T) {
for i := 0; i < 5; i++ {
// advance 5-minute, one revision for each interval
for j := 0; j < intervalsPerPeriod; j++ {
- rg.Wait(1)
+ waitOneAction(t, rg)
fc.Advance(tb.getRetryInterval())
}
@@ -131,9 +132,9 @@ func TestPeriodicMinutes(t *testing.T) {
func TestPeriodicPause(t *testing.T) {
fc := clockwork.NewFakeClock()
retentionDuration := time.Hour
- rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
+ rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(0), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
- tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
+ tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable)
tb.Run()
tb.Pause()
@@ -142,7 +143,7 @@ func TestPeriodicPause(t *testing.T) {
// tb will collect 3 hours of revisions but not compact since paused
for i := 0; i < n*3; i++ {
- rg.Wait(1)
+ waitOneAction(t, rg)
fc.Advance(tb.getRetryInterval())
}
// t.revs = [21 22 23 24 25 26 27 28 29 30]
@@ -155,7 +156,7 @@ func TestPeriodicPause(t *testing.T) {
// tb resumes to being blocked on the clock
tb.Resume()
- rg.Wait(1)
+ waitOneAction(t, rg)
// unblock clock, will kick off a compaction at T=3h6m by retry
fc.Advance(tb.getRetryInterval())
@@ -172,3 +173,74 @@ func TestPeriodicPause(t *testing.T) {
t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision)
}
}
+
+func TestPeriodicSkipRevNotChange(t *testing.T) {
+ retentionMinutes := 5
+ retentionDuration := time.Duration(retentionMinutes) * time.Minute
+
+ fc := clockwork.NewFakeClock()
+ rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(0), 0}
+ compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(20 * time.Millisecond)}
+ tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable)
+
+ tb.Run()
+ defer tb.Stop()
+
+ initialIntervals, intervalsPerPeriod := tb.getRetentions(), 10
+
+ // first compaction happens til 5 minutes elapsed
+ for i := 0; i < initialIntervals-1; i++ {
+ // every time set the same revision with 100
+ rg.SetRev(int64(100))
+ waitOneAction(t, rg)
+ fc.Advance(tb.getRetryInterval())
+ }
+
+ // very first compaction
+ a, err := compactable.Wait(1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // first compaction the compact revision will be 100+1
+ expectedRevision := int64(100 + 1)
+ if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) {
+ t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision})
+ }
+
+ // compaction doesn't happens at every interval since revision not change
+ for i := 0; i < 5; i++ {
+ for j := 0; j < intervalsPerPeriod; j++ {
+ rg.SetRev(int64(100))
+ waitOneAction(t, rg)
+ fc.Advance(tb.getRetryInterval())
+ }
+
+ _, err = compactable.Wait(1)
+ if err == nil {
+ t.Fatal(errors.New("should not compact since the revision not change"))
+ }
+ }
+
+ // when revision changed, compaction is normally
+ for i := 0; i < initialIntervals; i++ {
+ waitOneAction(t, rg)
+ fc.Advance(tb.getRetryInterval())
+ }
+
+ a, err = compactable.Wait(1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedRevision = int64(100 + 2)
+ if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) {
+ t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision})
+ }
+}
+
+func waitOneAction(t *testing.T, r testutil.Recorder) {
+ if actions, _ := r.Wait(1); len(actions) != 1 {
+ t.Errorf("expect 1 action, got %v instead", len(actions))
+ }
+}
diff --git a/server/etcdserver/api/v3compactor/revision.go b/server/etcdserver/api/v3compactor/revision.go
index 83be6279478..41748616903 100644
--- a/server/etcdserver/api/v3compactor/revision.go
+++ b/server/etcdserver/api/v3compactor/revision.go
@@ -16,14 +16,15 @@ package v3compactor
import (
"context"
+ "errors"
"sync"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/mvcc"
-
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
)
// Revision compacts the log by purging revisions older than
@@ -89,7 +90,7 @@ func (rc *Revision) Run() {
zap.Int64("revision-compaction-retention", rc.retention),
)
_, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev})
- if err == nil || err == mvcc.ErrCompacted {
+ if err == nil || errors.Is(err, mvcc.ErrCompacted) {
prev = rev
rc.lg.Info(
"completed auto revision compaction",
diff --git a/server/etcdserver/api/v3compactor/revision_test.go b/server/etcdserver/api/v3compactor/revision_test.go
index a3ae3d3478d..54e25f2b88c 100644
--- a/server/etcdserver/api/v3compactor/revision_test.go
+++ b/server/etcdserver/api/v3compactor/revision_test.go
@@ -19,18 +19,18 @@ import (
"testing"
"time"
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap/zaptest"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/testutil"
-
- "github.com/jonboulle/clockwork"
- "go.uber.org/zap"
)
func TestRevision(t *testing.T) {
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
- tb := newRevision(zap.NewExample(), fc, 10, rg, compactable)
+ tb := newRevision(zaptest.NewLogger(t), fc, 10, rg, compactable)
tb.Run()
defer tb.Stop()
@@ -73,7 +73,7 @@ func TestRevisionPause(t *testing.T) {
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStream(), 99} // will be 100
compactable := &fakeCompactable{testutil.NewRecorderStream()}
- tb := newRevision(zap.NewExample(), fc, 10, rg, compactable)
+ tb := newRevision(zaptest.NewLogger(t), fc, 10, rg, compactable)
tb.Run()
tb.Pause()
diff --git a/server/etcdserver/api/v3discovery/discovery.go b/server/etcdserver/api/v3discovery/discovery.go
new file mode 100644
index 00000000000..7fe231cdb55
--- /dev/null
+++ b/server/etcdserver/api/v3discovery/discovery.go
@@ -0,0 +1,509 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3discovery provides an implementation of the cluster discovery that
+// is used by etcd with v3 client.
+package v3discovery
+
+import (
+ "context"
+ "errors"
+ "math"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+const (
+ discoveryPrefix = "/_etcd/registry"
+)
+
+var (
+ ErrInvalidURL = errors.New("discovery: invalid peer URL")
+ ErrBadSizeKey = errors.New("discovery: size key is bad")
+ ErrSizeNotFound = errors.New("discovery: size key not found")
+ ErrFullCluster = errors.New("discovery: cluster is full")
+ ErrTooManyRetries = errors.New("discovery: too many retries")
+)
+
+var (
+ // Number of retries discovery will attempt before giving up and error out.
+ nRetries = uint(math.MaxUint32)
+ maxExponentialRetries = uint(8)
+)
+
+type DiscoveryConfig struct {
+ clientv3.ConfigSpec `json:"client"`
+ Token string `json:"token"`
+}
+
+type memberInfo struct {
+ // peerRegKey is the key used by the member when registering in the
+ // discovery service.
+ // Format: "/_etcd/registry//members/".
+ peerRegKey string
+ // peerURLsMap format: "peerName=peerURLs", i.e., "member1=http://127.0.0.1:2380".
+ peerURLsMap string
+ // createRev is the member's CreateRevision in the etcd cluster backing
+ // the discovery service.
+ createRev int64
+}
+
+type clusterInfo struct {
+ clusterToken string
+ members []memberInfo
+}
+
+// key prefix for each cluster: "/_etcd/registry/".
+func getClusterKeyPrefix(cluster string) string {
+ return path.Join(discoveryPrefix, cluster)
+}
+
+// key format for cluster size: "/_etcd/registry//_config/size".
+func getClusterSizeKey(cluster string) string {
+ return path.Join(getClusterKeyPrefix(cluster), "_config/size")
+}
+
+// key prefix for each member: "/_etcd/registry//members".
+func getMemberKeyPrefix(clusterToken string) string {
+ return path.Join(getClusterKeyPrefix(clusterToken), "members")
+}
+
+// key format for each member: "/_etcd/registry//members/".
+func getMemberKey(cluster, memberID string) string {
+ return path.Join(getMemberKeyPrefix(cluster), memberID)
+}
+
+// GetCluster will connect to the discovery service at the given endpoints and
+// retrieve a string describing the cluster
+func GetCluster(lg *zap.Logger, cfg *DiscoveryConfig) (cs string, rerr error) {
+ d, err := newDiscovery(lg, cfg, 0)
+ if err != nil {
+ return "", err
+ }
+
+ defer d.close()
+ defer func() {
+ if rerr != nil {
+ d.lg.Error(
+ "discovery failed to get cluster",
+ zap.String("cluster", cs),
+ zap.Error(rerr),
+ )
+ } else {
+ d.lg.Info(
+ "discovery got cluster successfully",
+ zap.String("cluster", cs),
+ )
+ }
+ }()
+
+ return d.getCluster()
+}
+
+// JoinCluster will connect to the discovery service at the endpoints, and
+// register the server represented by the given id and config to the cluster.
+// The parameter `config` is supposed to be in the format "memberName=peerURLs",
+// such as "member1=http://127.0.0.1:2380".
+//
+// The final returned string has the same format as "--initial-cluster", such as
+// "infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380".
+func JoinCluster(lg *zap.Logger, cfg *DiscoveryConfig, id types.ID, config string) (cs string, rerr error) {
+ d, err := newDiscovery(lg, cfg, id)
+ if err != nil {
+ return "", err
+ }
+
+ defer d.close()
+ defer func() {
+ if rerr != nil {
+ d.lg.Error(
+ "discovery failed to join cluster",
+ zap.String("cluster", cs),
+ zap.Error(rerr),
+ )
+ } else {
+ d.lg.Info(
+ "discovery joined cluster successfully",
+ zap.String("cluster", cs),
+ )
+ }
+ }()
+
+ return d.joinCluster(config)
+}
+
+type discovery struct {
+ lg *zap.Logger
+ clusterToken string
+ memberID types.ID
+ c *clientv3.Client
+ retries uint
+
+ cfg *DiscoveryConfig
+
+ clock clockwork.Clock
+}
+
+func newDiscovery(lg *zap.Logger, dcfg *DiscoveryConfig, id types.ID) (*discovery, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ lg = lg.With(zap.String("discovery-token", dcfg.Token), zap.String("discovery-endpoints", strings.Join(dcfg.Endpoints, ",")))
+ cfg, err := clientv3.NewClientConfig(&dcfg.ConfigSpec, lg)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := clientv3.New(*cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &discovery{
+ lg: lg,
+ clusterToken: dcfg.Token,
+ memberID: id,
+ c: c,
+ cfg: dcfg,
+ clock: clockwork.NewRealClock(),
+ }, nil
+}
+
+func (d *discovery) getCluster() (string, error) {
+ cls, clusterSize, rev, err := d.checkCluster()
+ if err != nil {
+ if errors.Is(err, ErrFullCluster) {
+ return cls.getInitClusterStr(clusterSize)
+ }
+ return "", err
+ }
+
+ for cls.Len() < clusterSize {
+ d.waitPeers(cls, clusterSize, rev)
+ }
+
+ return cls.getInitClusterStr(clusterSize)
+}
+
+func (d *discovery) joinCluster(config string) (string, error) {
+ _, _, _, err := d.checkCluster()
+ if err != nil {
+ return "", err
+ }
+
+ if err = d.registerSelf(config); err != nil {
+ return "", err
+ }
+
+ cls, clusterSize, rev, err := d.checkCluster()
+ if err != nil {
+ return "", err
+ }
+
+ for cls.Len() < clusterSize {
+ d.waitPeers(cls, clusterSize, rev)
+ }
+
+ return cls.getInitClusterStr(clusterSize)
+}
+
+func (d *discovery) getClusterSize() (int, error) {
+ configKey := getClusterSizeKey(d.clusterToken)
+ ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout)
+ defer cancel()
+
+ resp, err := d.c.Get(ctx, configKey)
+ if err != nil {
+ d.lg.Warn(
+ "failed to get cluster size from discovery service",
+ zap.String("clusterSizeKey", configKey),
+ zap.Error(err),
+ )
+ return 0, err
+ }
+
+ if len(resp.Kvs) == 0 {
+ return 0, ErrSizeNotFound
+ }
+
+ clusterSize, err := strconv.ParseInt(string(resp.Kvs[0].Value), 10, 0)
+ if err != nil || clusterSize <= 0 {
+ return 0, ErrBadSizeKey
+ }
+
+ return int(clusterSize), nil
+}
+
+func (d *discovery) getClusterMembers() (*clusterInfo, int64, error) {
+ membersKeyPrefix := getMemberKeyPrefix(d.clusterToken)
+ ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout)
+ defer cancel()
+
+ resp, err := d.c.Get(ctx, membersKeyPrefix, clientv3.WithPrefix())
+ if err != nil {
+ d.lg.Warn(
+ "failed to get cluster members from discovery service",
+ zap.String("membersKeyPrefix", membersKeyPrefix),
+ zap.Error(err),
+ )
+ return nil, 0, err
+ }
+
+ cls := &clusterInfo{clusterToken: d.clusterToken}
+ for _, kv := range resp.Kvs {
+ mKey := strings.TrimSpace(string(kv.Key))
+ mValue := strings.TrimSpace(string(kv.Value))
+
+ if err := cls.add(mKey, mValue, kv.CreateRevision); err != nil {
+ d.lg.Warn(
+ err.Error(),
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ } else {
+ d.lg.Info(
+ "found peer from discovery service",
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ }
+ }
+
+ return cls, resp.Header.Revision, nil
+}
+
+func (d *discovery) checkClusterRetry() (*clusterInfo, int, int64, error) {
+ if d.retries < nRetries {
+ d.logAndBackoffForRetry("cluster status check")
+ return d.checkCluster()
+ }
+ return nil, 0, 0, ErrTooManyRetries
+}
+
+func (d *discovery) checkCluster() (*clusterInfo, int, int64, error) {
+ clusterSize, err := d.getClusterSize()
+ if err != nil {
+ if errors.Is(err, ErrSizeNotFound) || errors.Is(err, ErrBadSizeKey) {
+ return nil, 0, 0, err
+ }
+
+ return d.checkClusterRetry()
+ }
+
+ cls, rev, err := d.getClusterMembers()
+ if err != nil {
+ return d.checkClusterRetry()
+ }
+ d.retries = 0
+
+ // find self position
+ memberSelfID := getMemberKey(d.clusterToken, d.memberID.String())
+ idx := 0
+ for _, m := range cls.members {
+ if m.peerRegKey == memberSelfID {
+ break
+ }
+ if idx >= clusterSize-1 {
+ return cls, clusterSize, rev, ErrFullCluster
+ }
+ idx++
+ }
+ return cls, clusterSize, rev, nil
+}
+
+func (d *discovery) registerSelfRetry(contents string) error {
+ if d.retries < nRetries {
+ d.logAndBackoffForRetry("register member itself")
+ return d.registerSelf(contents)
+ }
+ return ErrTooManyRetries
+}
+
+func (d *discovery) registerSelf(contents string) error {
+ ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout)
+ memberKey := getMemberKey(d.clusterToken, d.memberID.String())
+ _, err := d.c.Put(ctx, memberKey, contents)
+ cancel()
+
+ if err != nil {
+ d.lg.Warn(
+ "failed to register members itself to the discovery service",
+ zap.String("memberKey", memberKey),
+ zap.Error(err),
+ )
+ return d.registerSelfRetry(contents)
+ }
+ d.retries = 0
+
+ d.lg.Info(
+ "register member itself successfully",
+ zap.String("memberKey", memberKey),
+ zap.String("memberInfo", contents),
+ )
+
+ return nil
+}
+
+func (d *discovery) waitPeers(cls *clusterInfo, clusterSize int, rev int64) {
+ // watch from the next revision
+ membersKeyPrefix := getMemberKeyPrefix(d.clusterToken)
+ w := d.c.Watch(context.Background(), membersKeyPrefix, clientv3.WithPrefix(), clientv3.WithRev(rev+1))
+
+ d.lg.Info(
+ "waiting for peers from discovery service",
+ zap.Int("clusterSize", clusterSize),
+ zap.Int("found-peers", cls.Len()),
+ )
+
+ // waiting for peers until all needed peers are returned
+ for wresp := range w {
+ for _, ev := range wresp.Events {
+ mKey := strings.TrimSpace(string(ev.Kv.Key))
+ mValue := strings.TrimSpace(string(ev.Kv.Value))
+
+ if err := cls.add(mKey, mValue, ev.Kv.CreateRevision); err != nil {
+ d.lg.Warn(
+ err.Error(),
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ } else {
+ d.lg.Info(
+ "found peer from discovery service",
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ }
+ }
+
+ if cls.Len() >= clusterSize {
+ break
+ }
+ }
+
+ d.lg.Info(
+ "found all needed peers from discovery service",
+ zap.Int("clusterSize", clusterSize),
+ zap.Int("found-peers", cls.Len()),
+ )
+}
+
+func (d *discovery) logAndBackoffForRetry(step string) {
+ d.retries++
+ // logAndBackoffForRetry stops exponential backoff when the retries are
+ // more than maxExpoentialRetries and is set to a constant backoff afterward.
+ retries := d.retries
+ if retries > maxExponentialRetries {
+ retries = maxExponentialRetries
+ }
+ retryTimeInSecond := time.Duration(0x1< clusterSize {
+ peerURLs = peerURLs[:clusterSize]
+ }
+
+ us := strings.Join(peerURLs, ",")
+ _, err := types.NewURLsMap(us)
+ if err != nil {
+ return us, ErrInvalidURL
+ }
+
+ return us, nil
+}
+
+func (cls *clusterInfo) getPeerURLs() []string {
+ var peerURLs []string
+ for _, peer := range cls.members {
+ peerURLs = append(peerURLs, peer.peerURLsMap)
+ }
+ return peerURLs
+}
diff --git a/server/etcdserver/api/v3discovery/discovery_test.go b/server/etcdserver/api/v3discovery/discovery_test.go
new file mode 100644
index 00000000000..a442ba9bcf0
--- /dev/null
+++ b/server/etcdserver/api/v3discovery/discovery_test.go
@@ -0,0 +1,780 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3discovery
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+// fakeKVForClusterSize is used to test getClusterSize.
+type fakeKVForClusterSize struct {
+ *fakeBaseKV
+ clusterSizeStr string
+}
+
+// Get when we only need to overwrite the method `Get`.
+func (fkv *fakeKVForClusterSize) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+ if fkv.clusterSizeStr == "" {
+ // cluster size isn't configured in this case.
+ return &clientv3.GetResponse{}, nil
+ }
+
+ return &clientv3.GetResponse{
+ Kvs: []*mvccpb.KeyValue{
+ {
+ Value: []byte(fkv.clusterSizeStr),
+ },
+ },
+ }, nil
+}
+
+func TestGetClusterSize(t *testing.T) {
+ cases := []struct {
+ name string
+ clusterSizeStr string
+ expectedErr error
+ expectedSize int
+ }{
+ {
+ name: "cluster size not defined",
+ clusterSizeStr: "",
+ expectedErr: ErrSizeNotFound,
+ },
+ {
+ name: "invalid cluster size",
+ clusterSizeStr: "invalidSize",
+ expectedErr: ErrBadSizeKey,
+ },
+ {
+ name: "valid cluster size",
+ clusterSizeStr: "3",
+ expectedErr: nil,
+ expectedSize: 3,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ d := &discovery{
+ lg: lg,
+ c: &clientv3.Client{
+ KV: &fakeKVForClusterSize{
+ fakeBaseKV: &fakeBaseKV{},
+ clusterSizeStr: tc.clusterSizeStr,
+ },
+ },
+ cfg: &DiscoveryConfig{},
+ clusterToken: "fakeToken",
+ }
+
+ if cs, err := d.getClusterSize(); !errors.Is(err, tc.expectedErr) {
+ t.Errorf("Unexpected error, expected: %v got: %v", tc.expectedErr, err)
+ } else {
+ if err == nil && cs != tc.expectedSize {
+ t.Errorf("Unexpected cluster size, expected: %d got: %d", tc.expectedSize, cs)
+ }
+ }
+ })
+ }
+}
+
+// fakeKVForClusterMembers is used to test getClusterMembers.
+type fakeKVForClusterMembers struct {
+ *fakeBaseKV
+ members []memberInfo
+}
+
+// Get when we only need to overwrite method `Get`.
+func (fkv *fakeKVForClusterMembers) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+ kvs := memberInfoToKeyValues(fkv.members)
+
+ return &clientv3.GetResponse{
+ Header: &etcdserverpb.ResponseHeader{
+ Revision: 10,
+ },
+ Kvs: kvs,
+ }, nil
+}
+
+func memberInfoToKeyValues(members []memberInfo) []*mvccpb.KeyValue {
+ kvs := make([]*mvccpb.KeyValue, 0)
+ for _, mi := range members {
+ kvs = append(kvs, &mvccpb.KeyValue{
+ Key: []byte(mi.peerRegKey),
+ Value: []byte(mi.peerURLsMap),
+ CreateRevision: mi.createRev,
+ })
+ }
+
+ return kvs
+}
+
+func TestGetClusterMembers(t *testing.T) {
+ actualMemberInfo := []memberInfo{
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 8,
+ },
+ {
+ // invalid peer registry key
+ peerRegKey: "/invalidPrefix/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ // invalid peer info format
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(),
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ createRev: 7,
+ },
+ {
+ // duplicate peer
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 2,
+ },
+ }
+
+ // sort by CreateRevision
+ expectedMemberInfo := []memberInfo{
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(),
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ createRev: 7,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 8,
+ },
+ }
+
+ lg := zaptest.NewLogger(t)
+
+ d := &discovery{
+ lg: lg,
+ c: &clientv3.Client{
+ KV: &fakeKVForClusterMembers{
+ fakeBaseKV: &fakeBaseKV{},
+ members: actualMemberInfo,
+ },
+ },
+ cfg: &DiscoveryConfig{},
+ clusterToken: "fakeToken",
+ }
+
+ clsInfo, _, err := d.getClusterMembers()
+ if err != nil {
+ t.Errorf("Failed to get cluster members, error: %v", err)
+ }
+
+ if clsInfo.Len() != len(expectedMemberInfo) {
+ t.Errorf("unexpected member count, expected: %d, got: %d", len(expectedMemberInfo), clsInfo.Len())
+ }
+
+ for i, m := range clsInfo.members {
+ if m != expectedMemberInfo[i] {
+ t.Errorf("unexpected member[%d], expected: %v, got: %v", i, expectedMemberInfo[i], m)
+ }
+ }
+}
+
+// fakeKVForCheckCluster is used to test checkCluster.
+type fakeKVForCheckCluster struct {
+ *fakeBaseKV
+ t *testing.T
+ token string
+ clusterSizeStr string
+ members []memberInfo
+ getSizeRetries int
+ getMembersRetries int
+}
+
+// Get when we only need to overwrite method `Get`.
+func (fkv *fakeKVForCheckCluster) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+ clusterSizeKey := fmt.Sprintf("/_etcd/registry/%s/_config/size", fkv.token)
+ clusterMembersKey := fmt.Sprintf("/_etcd/registry/%s/members", fkv.token)
+
+ if key == clusterSizeKey {
+ if fkv.getSizeRetries > 0 {
+ fkv.getSizeRetries--
+ // discovery client should retry on error.
+ return nil, errors.New("get cluster size failed")
+ }
+ return &clientv3.GetResponse{
+ Kvs: []*mvccpb.KeyValue{
+ {
+ Value: []byte(fkv.clusterSizeStr),
+ },
+ },
+ }, nil
+ }
+ if key == clusterMembersKey {
+ if fkv.getMembersRetries > 0 {
+ fkv.getMembersRetries--
+ // discovery client should retry on error.
+ return nil, errors.New("get cluster members failed")
+ }
+ kvs := memberInfoToKeyValues(fkv.members)
+
+ return &clientv3.GetResponse{
+ Header: &etcdserverpb.ResponseHeader{
+ Revision: 10,
+ },
+ Kvs: kvs,
+ }, nil
+ }
+ fkv.t.Errorf("unexpected key: %s", key)
+ return nil, fmt.Errorf("unexpected key: %s", key)
+}
+
+func TestCheckCluster(t *testing.T) {
+ actualMemberInfo := []memberInfo{
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 8,
+ },
+ {
+ // invalid peer registry key
+ peerRegKey: "/invalidPrefix/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ // invalid peer info format
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(),
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ createRev: 7,
+ },
+ {
+ // duplicate peer
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 2,
+ },
+ }
+
+ // sort by CreateRevision
+ expectedMemberInfo := []memberInfo{
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(),
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ createRev: 7,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 8,
+ },
+ }
+
+ cases := []struct {
+ name string
+ memberID types.ID
+ getSizeRetries int
+ getMembersRetries int
+ expectedError error
+ }{
+ {
+ name: "no retries",
+ memberID: 101,
+ getSizeRetries: 0,
+ getMembersRetries: 0,
+ expectedError: nil,
+ },
+ {
+ name: "2 retries for getClusterSize",
+ memberID: 102,
+ getSizeRetries: 2,
+ getMembersRetries: 0,
+ expectedError: nil,
+ },
+ {
+ name: "2 retries for getClusterMembers",
+ memberID: 103,
+ getSizeRetries: 0,
+ getMembersRetries: 2,
+ expectedError: nil,
+ },
+ {
+ name: "error due to cluster full",
+ memberID: 104,
+ getSizeRetries: 0,
+ getMembersRetries: 0,
+ expectedError: ErrFullCluster,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+
+ fkv := &fakeKVForCheckCluster{
+ fakeBaseKV: &fakeBaseKV{},
+ t: t,
+ token: "fakeToken",
+ clusterSizeStr: "3",
+ members: actualMemberInfo,
+ getSizeRetries: tc.getSizeRetries,
+ getMembersRetries: tc.getMembersRetries,
+ }
+
+ d := &discovery{
+ lg: lg,
+ c: &clientv3.Client{
+ KV: fkv,
+ },
+ cfg: &DiscoveryConfig{},
+ clusterToken: "fakeToken",
+ memberID: tc.memberID,
+ clock: clockwork.NewRealClock(),
+ }
+
+ clsInfo, _, _, err := d.checkCluster()
+ if !errors.Is(err, tc.expectedError) {
+ t.Errorf("Unexpected error, expected: %v, got: %v", tc.expectedError, err)
+ }
+
+ if err == nil {
+ if fkv.getSizeRetries != 0 || fkv.getMembersRetries != 0 {
+ t.Errorf("Discovery client did not retry checking cluster on error, remaining etries: (%d, %d)", fkv.getSizeRetries, fkv.getMembersRetries)
+ }
+
+ if clsInfo.Len() != len(expectedMemberInfo) {
+ t.Errorf("Unexpected member count, expected: %d, got: %d", len(expectedMemberInfo), clsInfo.Len())
+ }
+
+ for mIdx, m := range clsInfo.members {
+ if m != expectedMemberInfo[mIdx] {
+ t.Errorf("Unexpected member[%d], expected: %v, got: %v", mIdx, expectedMemberInfo[mIdx], m)
+ }
+ }
+ }
+ })
+ }
+}
+
+// fakeKVForRegisterSelf is used to test registerSelf.
+type fakeKVForRegisterSelf struct {
+ *fakeBaseKV
+ t *testing.T
+ expectedRegKey string
+ expectedRegValue string
+ retries int
+}
+
+// Put when we only need to overwrite method `Put`.
+func (fkv *fakeKVForRegisterSelf) Put(ctx context.Context, key string, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
+ if key != fkv.expectedRegKey {
+ fkv.t.Errorf("unexpected register key, expected: %s, got: %s", fkv.expectedRegKey, key)
+ }
+
+ if val != fkv.expectedRegValue {
+ fkv.t.Errorf("unexpected register value, expected: %s, got: %s", fkv.expectedRegValue, val)
+ }
+
+ if fkv.retries > 0 {
+ fkv.retries--
+ // discovery client should retry on error.
+ return nil, errors.New("register self failed")
+ }
+
+ return nil, nil
+}
+
+func TestRegisterSelf(t *testing.T) {
+ cases := []struct {
+ name string
+ token string
+ memberID types.ID
+ expectedRegKey string
+ expectedRegValue string
+ retries int // when retries > 0, then return an error on Put request.
+ }{
+ {
+ name: "no retry with token1",
+ token: "token1",
+ memberID: 101,
+ expectedRegKey: "/_etcd/registry/token1/members/" + types.ID(101).String(),
+ expectedRegValue: "infra=http://127.0.0.1:2380",
+ retries: 0,
+ },
+ {
+ name: "no retry with token2",
+ token: "token2",
+ memberID: 102,
+ expectedRegKey: "/_etcd/registry/token2/members/" + types.ID(102).String(),
+ expectedRegValue: "infra=http://127.0.0.1:2380",
+ retries: 0,
+ },
+ {
+ name: "2 retries",
+ token: "token3",
+ memberID: 103,
+ expectedRegKey: "/_etcd/registry/token3/members/" + types.ID(103).String(),
+ expectedRegValue: "infra=http://127.0.0.1:2380",
+ retries: 2,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ fkv := &fakeKVForRegisterSelf{
+ fakeBaseKV: &fakeBaseKV{},
+ t: t,
+ expectedRegKey: tc.expectedRegKey,
+ expectedRegValue: tc.expectedRegValue,
+ retries: tc.retries,
+ }
+
+ d := &discovery{
+ lg: lg,
+ clusterToken: tc.token,
+ memberID: tc.memberID,
+ cfg: &DiscoveryConfig{},
+ c: &clientv3.Client{
+ KV: fkv,
+ },
+ clock: clockwork.NewRealClock(),
+ }
+
+ if err := d.registerSelf(tc.expectedRegValue); err != nil {
+ t.Errorf("Error occuring on register member self: %v", err)
+ }
+
+ if fkv.retries != 0 {
+ t.Errorf("Discovery client did not retry registering itself on error, remaining retries: %d", fkv.retries)
+ }
+ })
+ }
+}
+
+// fakeWatcherForWaitPeers is used to test waitPeers.
+type fakeWatcherForWaitPeers struct {
+ *fakeBaseWatcher
+ t *testing.T
+ token string
+ members []memberInfo
+}
+
+// Watch we only need to overwrite method `Watch`.
+func (fw *fakeWatcherForWaitPeers) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
+ expectedWatchKey := fmt.Sprintf("/_etcd/registry/%s/members", fw.token)
+ if key != expectedWatchKey {
+ fw.t.Errorf("unexpected watch key, expected: %s, got: %s", expectedWatchKey, key)
+ }
+
+ ch := make(chan clientv3.WatchResponse, 1)
+ go func() {
+ for _, mi := range fw.members {
+ ch <- clientv3.WatchResponse{
+ Events: []*clientv3.Event{
+ {
+ Kv: &mvccpb.KeyValue{
+ Key: []byte(mi.peerRegKey),
+ Value: []byte(mi.peerURLsMap),
+ CreateRevision: mi.createRev,
+ },
+ },
+ },
+ }
+ }
+ close(ch)
+ }()
+ return ch
+}
+
+func TestWaitPeers(t *testing.T) {
+ actualMemberInfo := []memberInfo{
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 8,
+ },
+ {
+ // invalid peer registry key
+ peerRegKey: "/invalidPrefix/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ // invalid peer info format
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(),
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ createRev: 7,
+ },
+ {
+ // duplicate peer
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 2,
+ },
+ }
+
+ // sort by CreateRevision
+ expectedMemberInfo := []memberInfo{
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(),
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ createRev: 6,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(),
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ createRev: 7,
+ },
+ {
+ peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(),
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ createRev: 8,
+ },
+ }
+
+ lg := zaptest.NewLogger(t)
+
+ d := &discovery{
+ lg: lg,
+ c: &clientv3.Client{
+ KV: &fakeBaseKV{},
+ Watcher: &fakeWatcherForWaitPeers{
+ fakeBaseWatcher: &fakeBaseWatcher{},
+ t: t,
+ token: "fakeToken",
+ members: actualMemberInfo,
+ },
+ },
+ cfg: &DiscoveryConfig{},
+ clusterToken: "fakeToken",
+ }
+
+ cls := clusterInfo{
+ clusterToken: "fakeToken",
+ }
+
+ d.waitPeers(&cls, 3, 0)
+
+ if cls.Len() != len(expectedMemberInfo) {
+ t.Errorf("unexpected member number returned by watch, expected: %d, got: %d", len(expectedMemberInfo), cls.Len())
+ }
+
+ for i, m := range cls.members {
+ if m != expectedMemberInfo[i] {
+ t.Errorf("unexpected member[%d] returned by watch, expected: %v, got: %v", i, expectedMemberInfo[i], m)
+ }
+ }
+}
+
+func TestGetInitClusterStr(t *testing.T) {
+ cases := []struct {
+ name string
+ members []memberInfo
+ clusterSize int
+ expectedResult string
+ expectedError error
+ }{
+ {
+ name: "1 member",
+ members: []memberInfo{
+ {
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ },
+ },
+ clusterSize: 1,
+ expectedResult: "infra2=http://192.168.0.102:2380",
+ expectedError: nil,
+ },
+ {
+ name: "2 members",
+ members: []memberInfo{
+ {
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ },
+ {
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ },
+ },
+ clusterSize: 2,
+ expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380",
+ expectedError: nil,
+ },
+ {
+ name: "3 members",
+ members: []memberInfo{
+ {
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ },
+ {
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ },
+ {
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ },
+ },
+ clusterSize: 3,
+ expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380,infra1=http://192.168.0.100:2380",
+ expectedError: nil,
+ },
+ {
+ name: "should ignore redundant member",
+ members: []memberInfo{
+ {
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ },
+ {
+ peerURLsMap: "infra3=http://192.168.0.103:2380",
+ },
+ {
+ peerURLsMap: "infra1=http://192.168.0.100:2380",
+ },
+ {
+ peerURLsMap: "infra4=http://192.168.0.104:2380",
+ },
+ },
+ clusterSize: 3,
+ expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380,infra1=http://192.168.0.100:2380",
+ expectedError: nil,
+ },
+ {
+ name: "invalid_peer_url",
+ members: []memberInfo{
+ {
+ peerURLsMap: "infra2=http://192.168.0.102:2380",
+ },
+ {
+ peerURLsMap: "infra3=http://192.168.0.103", // not host:port
+ },
+ },
+ clusterSize: 2,
+ expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380",
+ expectedError: ErrInvalidURL,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ clsInfo := &clusterInfo{
+ members: tc.members,
+ }
+
+ retStr, err := clsInfo.getInitClusterStr(tc.clusterSize)
+ if !errors.Is(err, tc.expectedError) {
+ t.Errorf("Unexpected error, expected: %v, got: %v", tc.expectedError, err)
+ }
+
+ if err == nil {
+ if retStr != tc.expectedResult {
+ t.Errorf("Unexpected result, expected: %s, got: %s", tc.expectedResult, retStr)
+ }
+ }
+ })
+ }
+}
+
+// fakeBaseKV is the base struct implementing the interface `clientv3.KV`.
+type fakeBaseKV struct{}
+
+func (fkv *fakeBaseKV) Put(ctx context.Context, key string, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
+ return nil, nil
+}
+
+func (fkv *fakeBaseKV) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+ return nil, nil
+}
+
+func (fkv *fakeBaseKV) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
+ return nil, nil
+}
+
+func (fkv *fakeBaseKV) Compact(ctx context.Context, rev int64, opts ...clientv3.CompactOption) (*clientv3.CompactResponse, error) {
+ return nil, nil
+}
+
+func (fkv *fakeBaseKV) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
+ return clientv3.OpResponse{}, nil
+}
+
+func (fkv *fakeBaseKV) Txn(ctx context.Context) clientv3.Txn {
+ return nil
+}
+
+// fakeBaseWatcher is the base struct implementing the interface `clientv3.Watcher`.
+type fakeBaseWatcher struct{}
+
+func (fw *fakeBaseWatcher) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
+ return nil
+}
+
+func (fw *fakeBaseWatcher) RequestProgress(ctx context.Context) error {
+ return nil
+}
+
+func (fw *fakeBaseWatcher) Close() error {
+ return nil
+}
diff --git a/server/etcdserver/api/v3election/election.go b/server/etcdserver/api/v3election/election.go
index 78b26ad7cc9..77a9c4bcb42 100644
--- a/server/etcdserver/api/v3election/election.go
+++ b/server/etcdserver/api/v3election/election.go
@@ -18,7 +18,7 @@ import (
"context"
"errors"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
)
diff --git a/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
index 4a6e5e724cc..912149f5ab5 100644
--- a/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
+++ b/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
@@ -9,143 +9,115 @@ It translates gRPC into RESTful JSON APIs.
package gw
import (
+ protov1 "github.com/golang/protobuf/proto"
+
"context"
+ "errors"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
"io"
"net/http"
- "github.com/golang/protobuf/descriptor"
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-var _ = descriptor.ForMessage
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.CampaignRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.CampaignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.CampaignRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.CampaignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Campaign(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ProclaimRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.ProclaimRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ProclaimRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.ProclaimRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Proclaim(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.LeaderRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.LeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.LeaderRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.LeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Leader(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.LeaderRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.LeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
stream, err := client.Observe(ctx, &protoReq)
if err != nil {
return nil, metadata, err
@@ -156,133 +128,124 @@ func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler
}
metadata.HeaderMD = header
return stream, metadata, nil
-
}
func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ResignRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.ResignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ResignRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3electionpb.ResignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Resign(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
// v3electionpb.RegisterElectionHandlerServer registers the http handlers for service Election to "mux".
// UnaryRPC :call v3electionpb.ElectionServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterElectionHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3electionpb.ElectionServer) error {
-
- mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Campaign", runtime.WithHTTPPathPattern("/v3/election/campaign"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Election_Campaign_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Election_Campaign_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Campaign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Proclaim", runtime.WithHTTPPathPattern("/v3/election/proclaim"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Election_Proclaim_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Election_Proclaim_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Proclaim_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Leader", runtime.WithHTTPPathPattern("/v3/election/leader"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Election_Leader_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Election_Leader_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Leader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
- mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
})
-
- mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Resign", runtime.WithHTTPPathPattern("/v3/election/resign"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Election_Resign_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Election_Resign_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Resign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -291,25 +254,24 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterElectionHandler(ctx, mux, conn)
}
@@ -323,132 +285,111 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "ElectionClient" to call the correct interceptors.
+// "ElectionClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
-
- mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Campaign", runtime.WithHTTPPathPattern("/v3/election/campaign"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Election_Campaign_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Campaign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Proclaim", runtime.WithHTTPPathPattern("/v3/election/proclaim"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Election_Proclaim_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Proclaim_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Leader", runtime.WithHTTPPathPattern("/v3/election/leader"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Election_Leader_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Leader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Observe", runtime.WithHTTPPathPattern("/v3/election/observe"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Election_Observe_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Observe_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Resign", runtime.WithHTTPPathPattern("/v3/election/resign"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Election_Resign_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Election_Resign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "campaign"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "proclaim"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "leader"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "observe"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "resign"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "campaign"}, ""))
+ pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "proclaim"}, ""))
+ pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "leader"}, ""))
+ pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "observe"}, ""))
+ pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "resign"}, ""))
)
var (
forward_Election_Campaign_0 = runtime.ForwardResponseMessage
-
forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
-
- forward_Election_Leader_0 = runtime.ForwardResponseMessage
-
- forward_Election_Observe_0 = runtime.ForwardResponseStream
-
- forward_Election_Resign_0 = runtime.ForwardResponseMessage
+ forward_Election_Leader_0 = runtime.ForwardResponseMessage
+ forward_Election_Observe_0 = runtime.ForwardResponseStream
+ forward_Election_Resign_0 = runtime.ForwardResponseMessage
)
diff --git a/server/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/server/etcdserver/api/v3election/v3electionpb/v3election.pb.go
index 81cf59b9b25..4a022f1b628 100644
--- a/server/etcdserver/api/v3election/v3electionpb/v3election.pb.go
+++ b/server/etcdserver/api/v3election/v3electionpb/v3election.pb.go
@@ -552,41 +552,42 @@ func init() {
func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) }
var fileDescriptor_c9b1f26cc432a035 = []byte{
- // 531 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
- 0x10, 0xc6, 0x59, 0x27, 0x84, 0x32, 0xa4, 0xad, 0x65, 0x82, 0x08, 0x21, 0xb8, 0xd1, 0x72, 0xa9,
- 0x72, 0xb0, 0x51, 0xc3, 0x29, 0x27, 0x04, 0x02, 0x55, 0x2a, 0x12, 0xe0, 0x03, 0x82, 0xe3, 0xda,
- 0x1d, 0xb9, 0x91, 0x1d, 0xaf, 0xb1, 0x5d, 0x4b, 0xb9, 0xf2, 0x0a, 0x1c, 0xe0, 0x91, 0x38, 0x22,
- 0xf1, 0x02, 0x28, 0xf0, 0x20, 0x68, 0x77, 0xed, 0xfa, 0x8f, 0x12, 0x84, 0x9a, 0xdb, 0x78, 0xe7,
- 0xdb, 0xf9, 0xcd, 0x37, 0x3b, 0x09, 0xe8, 0xf9, 0x0c, 0x43, 0xf4, 0xb2, 0x05, 0x8f, 0xac, 0x38,
- 0xe1, 0x19, 0x37, 0xfa, 0xd5, 0x49, 0xec, 0x8e, 0x06, 0x3e, 0xf7, 0xb9, 0x4c, 0xd8, 0x22, 0x52,
- 0x9a, 0xd1, 0x11, 0x66, 0xde, 0xb9, 0xcd, 0xe2, 0x85, 0x2d, 0x82, 0x14, 0x93, 0x1c, 0x93, 0xd8,
- 0xb5, 0x93, 0xd8, 0x2b, 0x04, 0xc3, 0x2b, 0xc1, 0x32, 0xf7, 0xbc, 0xd8, 0xb5, 0x83, 0xbc, 0xc8,
- 0x8c, 0x7d, 0xce, 0xfd, 0x10, 0x65, 0x8e, 0x45, 0x11, 0xcf, 0x98, 0x20, 0xa5, 0x2a, 0x4b, 0xdf,
- 0xc1, 0xe1, 0x0b, 0xb6, 0x8c, 0xd9, 0xc2, 0x8f, 0x1c, 0xfc, 0x74, 0x89, 0x69, 0x66, 0x18, 0xd0,
- 0x8d, 0xd8, 0x12, 0x87, 0x64, 0x42, 0x8e, 0xfb, 0x8e, 0x8c, 0x8d, 0x01, 0xdc, 0x0c, 0x91, 0xa5,
- 0x38, 0xd4, 0x26, 0xe4, 0xb8, 0xe3, 0xa8, 0x0f, 0x71, 0x9a, 0xb3, 0xf0, 0x12, 0x87, 0x1d, 0x29,
- 0x55, 0x1f, 0x74, 0x05, 0x7a, 0x55, 0x32, 0x8d, 0x79, 0x94, 0xa2, 0xf1, 0x14, 0x7a, 0x17, 0xc8,
- 0xce, 0x31, 0x91, 0x55, 0xef, 0x9c, 0x8c, 0xad, 0xba, 0x0f, 0xab, 0xd4, 0x9d, 0x4a, 0x8d, 0x53,
- 0x68, 0x0d, 0x1b, 0x7a, 0xa1, 0xba, 0xa5, 0xc9, 0x5b, 0xf7, 0xad, 0xfa, 0xa8, 0xac, 0xd7, 0x32,
- 0x77, 0x86, 0x2b, 0xa7, 0x90, 0xd1, 0x8f, 0x70, 0xfb, 0xea, 0x70, 0xa3, 0x0f, 0x1d, 0x3a, 0x01,
- 0xae, 0x64, 0xb9, 0xbe, 0x23, 0x42, 0x71, 0x92, 0x60, 0x2e, 0x1d, 0x74, 0x1c, 0x11, 0x56, 0x5e,
- 0xbb, 0x35, 0xaf, 0xf4, 0x31, 0xec, 0xab, 0xd2, 0xff, 0x18, 0x13, 0xbd, 0x80, 0x83, 0x52, 0xb4,
- 0x93, 0xf1, 0x09, 0x68, 0x41, 0x5e, 0x98, 0xd6, 0x2d, 0xf5, 0xa2, 0xd6, 0x19, 0xae, 0xde, 0x8b,
- 0x01, 0x3b, 0x5a, 0x90, 0xd3, 0x67, 0xb0, 0xef, 0x60, 0x5a, 0x7b, 0xb5, 0x6a, 0x56, 0xe4, 0xff,
- 0x66, 0xf5, 0x0a, 0x0e, 0xca, 0x0a, 0xbb, 0xf4, 0x4a, 0x3f, 0xc0, 0xe1, 0xdb, 0x84, 0x7b, 0x21,
- 0x5b, 0x2c, 0xaf, 0xdb, 0x4b, 0xb5, 0x48, 0x5a, 0x7d, 0x91, 0x4e, 0x41, 0xaf, 0x2a, 0xef, 0xd2,
- 0xe3, 0xc9, 0xd7, 0x2e, 0xec, 0xbd, 0x2c, 0x1a, 0x30, 0x02, 0xd8, 0x2b, 0xf7, 0xd3, 0x78, 0xd4,
- 0xec, 0xac, 0xf5, 0x53, 0x18, 0x99, 0xdb, 0xd2, 0x8a, 0x42, 0x27, 0x9f, 0x7f, 0xfe, 0xf9, 0xa2,
- 0x8d, 0xe8, 0x3d, 0x3b, 0x9f, 0xd9, 0xa5, 0xd0, 0xf6, 0x0a, 0xd9, 0x9c, 0x4c, 0x05, 0xac, 0xf4,
- 0xd0, 0x86, 0xb5, 0xa6, 0xd6, 0x86, 0xb5, 0xad, 0x6f, 0x81, 0xc5, 0x85, 0x4c, 0xc0, 0x3c, 0xe8,
- 0xa9, 0xd9, 0x1a, 0x0f, 0x37, 0x4d, 0xbc, 0x04, 0x8d, 0x37, 0x27, 0x0b, 0x8c, 0x29, 0x31, 0x43,
- 0x7a, 0xb7, 0x81, 0x51, 0x0f, 0x25, 0x20, 0x3e, 0xdc, 0x7a, 0xe3, 0xca, 0x81, 0xef, 0x42, 0x39,
- 0x92, 0x94, 0x07, 0x74, 0xd0, 0xa0, 0x70, 0x55, 0x78, 0x4e, 0xa6, 0x4f, 0x88, 0x70, 0xa3, 0x16,
- 0xb4, 0xcd, 0x69, 0x2c, 0x7e, 0x9b, 0xd3, 0xdc, 0xe9, 0x2d, 0x6e, 0x12, 0x29, 0x9a, 0x93, 0xe9,
- 0x73, 0xfd, 0xfb, 0xda, 0x24, 0x3f, 0xd6, 0x26, 0xf9, 0xb5, 0x36, 0xc9, 0xb7, 0xdf, 0xe6, 0x0d,
- 0xb7, 0x27, 0xff, 0x18, 0x67, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xe6, 0x7c, 0x66, 0xa9,
- 0x05, 0x00, 0x00,
+ // 556 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0x41, 0x6f, 0xd3, 0x30,
+ 0x14, 0xc7, 0x71, 0x5a, 0xca, 0x78, 0x74, 0x5b, 0x15, 0x8a, 0x28, 0xa5, 0x64, 0x95, 0xb9, 0x4c,
+ 0x3d, 0xc4, 0x68, 0xe5, 0xd4, 0xd3, 0x04, 0x02, 0x4d, 0x1a, 0x12, 0xe0, 0x03, 0x02, 0x6e, 0x6e,
+ 0xf6, 0x94, 0x55, 0x4d, 0xe3, 0x90, 0x74, 0x91, 0x7a, 0xe5, 0x2b, 0x70, 0x80, 0x8f, 0xc4, 0x11,
+ 0x89, 0x2f, 0x80, 0x0a, 0x1f, 0x04, 0xd9, 0x4e, 0x9a, 0x34, 0x6a, 0x11, 0x5a, 0x6f, 0x8e, 0xdf,
+ 0xdf, 0xef, 0xf7, 0xfe, 0xcf, 0x2f, 0x86, 0x56, 0x3a, 0xc4, 0x00, 0xbd, 0xf9, 0x44, 0x86, 0x6e,
+ 0x14, 0xcb, 0xb9, 0xb4, 0x9b, 0xc5, 0x4e, 0x34, 0xee, 0xb6, 0x7d, 0xe9, 0x4b, 0x1d, 0x60, 0x6a,
+ 0x65, 0x34, 0xdd, 0x23, 0x9c, 0x7b, 0x17, 0x4c, 0x44, 0x13, 0xa6, 0x16, 0x09, 0xc6, 0x29, 0xc6,
+ 0xd1, 0x98, 0xc5, 0x91, 0x97, 0x09, 0x3a, 0x2b, 0xc1, 0x2c, 0xf5, 0xbc, 0x68, 0xcc, 0xa6, 0x69,
+ 0x16, 0xe9, 0xf9, 0x52, 0xfa, 0x01, 0xea, 0x98, 0x08, 0x43, 0x39, 0x17, 0x8a, 0x94, 0x98, 0x28,
+ 0x7d, 0x0b, 0x87, 0xcf, 0xc5, 0x2c, 0x12, 0x13, 0x3f, 0xe4, 0xf8, 0xe9, 0x0a, 0x93, 0xb9, 0x6d,
+ 0x43, 0x3d, 0x14, 0x33, 0xec, 0x90, 0x3e, 0x39, 0x6e, 0x72, 0xbd, 0xb6, 0xdb, 0x70, 0x33, 0x40,
+ 0x91, 0x60, 0xc7, 0xea, 0x93, 0xe3, 0x1a, 0x37, 0x1f, 0x6a, 0x37, 0x15, 0xc1, 0x15, 0x76, 0x6a,
+ 0x5a, 0x6a, 0x3e, 0xe8, 0x02, 0x5a, 0x45, 0xca, 0x24, 0x92, 0x61, 0x82, 0xf6, 0x53, 0x68, 0x5c,
+ 0xa2, 0xb8, 0xc0, 0x58, 0x67, 0xbd, 0x73, 0xd2, 0x73, 0xcb, 0x3e, 0xdc, 0x5c, 0x77, 0xa6, 0x35,
+ 0x3c, 0xd3, 0xda, 0x0c, 0x1a, 0x81, 0x39, 0x65, 0xe9, 0x53, 0xf7, 0xdd, 0x72, 0xab, 0xdc, 0x57,
+ 0x3a, 0x76, 0x8e, 0x0b, 0x9e, 0xc9, 0xe8, 0x07, 0xb8, 0xbd, 0xda, 0xdc, 0xe8, 0xa3, 0x05, 0xb5,
+ 0x29, 0x2e, 0x74, 0xba, 0x26, 0x57, 0x4b, 0xb5, 0x13, 0x63, 0xaa, 0x1d, 0xd4, 0xb8, 0x5a, 0x16,
+ 0x5e, 0xeb, 0x25, 0xaf, 0xf4, 0x31, 0xec, 0x9b, 0xd4, 0xff, 0x68, 0x13, 0xbd, 0x84, 0x83, 0x5c,
+ 0xb4, 0x93, 0xf1, 0x3e, 0x58, 0xd3, 0x34, 0x33, 0xdd, 0x72, 0xcd, 0x8d, 0xba, 0xe7, 0xb8, 0x78,
+ 0xa7, 0x1a, 0xcc, 0xad, 0x69, 0x4a, 0x4f, 0x61, 0x9f, 0x63, 0x52, 0xba, 0xb5, 0xa2, 0x57, 0xe4,
+ 0xff, 0x7a, 0xf5, 0x12, 0x0e, 0xf2, 0x0c, 0xbb, 0xd4, 0x4a, 0xdf, 0xc3, 0xe1, 0x9b, 0x58, 0x7a,
+ 0x81, 0x98, 0xcc, 0xae, 0x5b, 0x4b, 0x31, 0x48, 0x56, 0x79, 0x90, 0xce, 0xa0, 0x55, 0x64, 0xde,
+ 0xa5, 0xc6, 0x93, 0xaf, 0x75, 0xd8, 0x7b, 0x91, 0x15, 0x60, 0x4f, 0x61, 0x2f, 0x9f, 0x4f, 0xfb,
+ 0xd1, 0x7a, 0x65, 0x95, 0x5f, 0xa1, 0xeb, 0x6c, 0x0b, 0x1b, 0x0a, 0xed, 0x7f, 0xfe, 0xf9, 0xe7,
+ 0x8b, 0xd5, 0xa5, 0xf7, 0x58, 0x3a, 0x64, 0xb9, 0x90, 0x79, 0x99, 0x6c, 0x44, 0x06, 0x0a, 0x96,
+ 0x7b, 0xa8, 0xc2, 0x2a, 0x5d, 0xab, 0xc2, 0xaa, 0xd6, 0xb7, 0xc0, 0xa2, 0x4c, 0xa6, 0x60, 0x1e,
+ 0x34, 0x4c, 0x6f, 0xed, 0x87, 0x9b, 0x3a, 0x9e, 0x83, 0x7a, 0x9b, 0x83, 0x19, 0xc6, 0xd1, 0x98,
+ 0x0e, 0xbd, 0xbb, 0x86, 0x31, 0x17, 0xa5, 0x20, 0x3e, 0xdc, 0x7a, 0x3d, 0xd6, 0x0d, 0xdf, 0x85,
+ 0x72, 0xa4, 0x29, 0x0f, 0x68, 0x7b, 0x8d, 0x22, 0x4d, 0xe2, 0x11, 0x19, 0x3c, 0x21, 0xca, 0x8d,
+ 0x19, 0xd0, 0x2a, 0x67, 0x6d, 0xf0, 0xab, 0x9c, 0xf5, 0x99, 0xde, 0xe2, 0x26, 0xd6, 0xa2, 0x11,
+ 0x19, 0x3c, 0xe3, 0xdf, 0x97, 0x0e, 0xf9, 0xb1, 0x74, 0xc8, 0xaf, 0xa5, 0x43, 0xbe, 0xfd, 0x76,
+ 0x6e, 0x7c, 0x3c, 0xf5, 0xa5, 0x9e, 0x29, 0x77, 0x22, 0xf5, 0x63, 0xcb, 0xcc, 0x70, 0xe9, 0xf3,
+ 0xab, 0x51, 0xd3, 0xaf, 0x69, 0xc1, 0x65, 0xe5, 0x12, 0xc6, 0x0d, 0xfd, 0xb4, 0x0e, 0xff, 0x06,
+ 0x00, 0x00, 0xff, 0xff, 0xcd, 0x58, 0x82, 0xe2, 0xeb, 0x05, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/server/etcdserver/api/v3election/v3electionpb/v3election.proto b/server/etcdserver/api/v3election/v3electionpb/v3election.proto
index 24ccaf138d8..0bcaad87578 100644
--- a/server/etcdserver/api/v3election/v3electionpb/v3election.proto
+++ b/server/etcdserver/api/v3election/v3electionpb/v3election.proto
@@ -8,6 +8,8 @@ import "etcd/api/mvccpb/kv.proto";
// for grpc-gateway
import "google/api/annotations.proto";
+option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb";
+
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
diff --git a/server/etcdserver/api/v3lock/lock.go b/server/etcdserver/api/v3lock/lock.go
index 8f9623361c7..c8ef56ebaeb 100644
--- a/server/etcdserver/api/v3lock/lock.go
+++ b/server/etcdserver/api/v3lock/lock.go
@@ -17,7 +17,7 @@ package v3lock
import (
"context"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
)
diff --git a/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
index dc573d79cf0..5efb75939c9 100644
--- a/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
+++ b/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
@@ -9,140 +9,128 @@ It translates gRPC into RESTful JSON APIs.
package gw
import (
+ protov1 "github.com/golang/protobuf/proto"
+
"context"
+ "errors"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
"io"
"net/http"
- "github.com/golang/protobuf/descriptor"
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-var _ = descriptor.ForMessage
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.LockRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3lockpb.LockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.LockRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3lockpb.LockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Lock(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.UnlockRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3lockpb.UnlockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
func local_request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.UnlockRequest
- var metadata runtime.ServerMetadata
-
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
- }
- if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq v3lockpb.UnlockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Unlock(ctx, &protoReq)
- return msg, metadata, err
-
+ return protov1.MessageV2(msg), metadata, err
}
// v3lockpb.RegisterLockHandlerServer registers the http handlers for service Lock to "mux".
// UnaryRPC :call v3lockpb.LockServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLockHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3lockpb.LockServer) error {
-
- mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3lockpb.Lock/Lock", runtime.WithHTTPPathPattern("/v3/lock/lock"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lock_Lock_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lock_Lock_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lock_Lock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3lockpb.Lock/Unlock", runtime.WithHTTPPathPattern("/v3/lock/unlock"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_Lock_Unlock_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := local_request_Lock_Unlock_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lock_Unlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -151,25 +139,24 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
+ conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
-
return RegisterLockHandler(ctx, mux, conn)
}
@@ -183,60 +170,51 @@ func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "LockClient" to call the correct interceptors.
+// "LockClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
-
- mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3lockpb.Lock/Lock", runtime.WithHTTPPathPattern("/v3/lock/lock"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lock_Lock_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lock_Lock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3lockpb.Lock/Unlock", runtime.WithHTTPPathPattern("/v3/lock/unlock"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_Lock_Unlock_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
- forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
+ forward_Lock_Unlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3", "lock"}, "", runtime.AssumeColonVerbOpt(true)))
-
- pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lock", "unlock"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3", "lock"}, ""))
+ pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lock", "unlock"}, ""))
)
var (
- forward_Lock_Lock_0 = runtime.ForwardResponseMessage
-
+ forward_Lock_Lock_0 = runtime.ForwardResponseMessage
forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
)
diff --git a/server/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/server/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
index 4282ddc85e0..39c04abe179 100644
--- a/server/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
+++ b/server/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
@@ -254,28 +254,30 @@ func init() {
func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) }
var fileDescriptor_52389b3e2f253201 = []byte{
- // 330 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
- 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
- 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x3e, 0xb5, 0x24, 0x39,
- 0x45, 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2,
- 0x2f, 0x2a, 0x48, 0x86, 0x2a, 0x90, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x2b, 0x49, 0xcc,
- 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, 0xc8, 0x2a, 0x99, 0x73, 0x71, 0xfb,
- 0xe4, 0x27, 0x67, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25,
- 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x39,
- 0xa9, 0x89, 0xc5, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x52, 0x18, 0x17,
- 0x0f, 0x44, 0x63, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x09, 0x17, 0x5b, 0x46, 0x6a, 0x62,
- 0x4a, 0x6a, 0x11, 0x58, 0x2f, 0xb7, 0x91, 0x8c, 0x1e, 0xb2, 0x7b, 0xf4, 0x60, 0xea, 0x3c, 0xc0,
- 0x6a, 0x82, 0xa0, 0x6a, 0x85, 0x04, 0xb8, 0x98, 0xb3, 0x53, 0x2b, 0xc1, 0x26, 0xf3, 0x04, 0x81,
- 0x98, 0x4a, 0x8a, 0x5c, 0xbc, 0xa1, 0x79, 0x39, 0x48, 0x4e, 0x82, 0x2a, 0x61, 0x44, 0x28, 0x71,
- 0xe3, 0xe2, 0x83, 0x29, 0xa1, 0xc4, 0x72, 0xa3, 0x0d, 0x8c, 0x5c, 0x2c, 0x20, 0x3f, 0x08, 0xf9,
- 0x43, 0x69, 0x51, 0x3d, 0x58, 0x60, 0xeb, 0x21, 0x05, 0x8a, 0x94, 0x18, 0xba, 0x30, 0xc4, 0x34,
- 0x25, 0x89, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x09, 0x29, 0xf1, 0xea, 0x97, 0x19, 0xeb, 0x83, 0x14,
- 0x80, 0x09, 0x2b, 0x46, 0x2d, 0xa1, 0x70, 0x2e, 0x36, 0x88, 0x0b, 0x85, 0xc4, 0x11, 0x7a, 0x51,
- 0xbc, 0x25, 0x25, 0x81, 0x29, 0x01, 0x35, 0x56, 0x0a, 0x6c, 0xac, 0x88, 0x12, 0x3f, 0xdc, 0xd8,
- 0xd2, 0x3c, 0xa8, 0xc1, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0,
- 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, 0x78, 0x34, 0x06, 0x04, 0x00, 0x00,
- 0xff, 0xff, 0x4a, 0x4d, 0xca, 0xbb, 0x36, 0x02, 0x00, 0x00,
+ // 356 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xcd, 0x4a, 0xc3, 0x40,
+ 0x10, 0xc7, 0xdd, 0xb6, 0x16, 0xd9, 0xa6, 0x2a, 0x4b, 0xd5, 0x10, 0x4a, 0xac, 0x39, 0x15, 0x0f,
+ 0x59, 0x68, 0x05, 0xc5, 0xa3, 0x07, 0x11, 0x11, 0x84, 0x80, 0x0a, 0xde, 0xd2, 0x74, 0x88, 0xa5,
+ 0x71, 0x27, 0x26, 0x69, 0xc1, 0xab, 0xaf, 0xe0, 0xc5, 0xc7, 0xf0, 0x31, 0x3c, 0x0a, 0xbe, 0x80,
+ 0x54, 0x1f, 0x44, 0x32, 0x9b, 0xd8, 0xaa, 0x47, 0x2f, 0xc9, 0xec, 0xce, 0x6f, 0xfe, 0xf3, 0xb1,
+ 0xc3, 0x8d, 0x69, 0x3f, 0xc2, 0x60, 0xec, 0xc6, 0x09, 0x66, 0x28, 0x56, 0xf4, 0x29, 0x1e, 0x58,
+ 0xad, 0x10, 0x43, 0xa4, 0x4b, 0x99, 0x5b, 0xda, 0x6f, 0x6d, 0x43, 0x16, 0x0c, 0xa5, 0x1f, 0x8f,
+ 0x64, 0x6e, 0xa4, 0x90, 0x4c, 0x21, 0x89, 0x07, 0x32, 0x89, 0x83, 0x02, 0x68, 0x87, 0x88, 0x61,
+ 0x04, 0x84, 0xf8, 0x4a, 0x61, 0xe6, 0x67, 0x23, 0x54, 0xa9, 0xf6, 0x3a, 0xfb, 0xbc, 0x71, 0x86,
+ 0xc1, 0xd8, 0x83, 0xbb, 0x09, 0xa4, 0x99, 0x10, 0xbc, 0xa6, 0xfc, 0x5b, 0x30, 0x59, 0x87, 0x75,
+ 0x0d, 0x8f, 0x6c, 0xd1, 0xe2, 0xcb, 0x11, 0xf8, 0x29, 0x98, 0x95, 0x0e, 0xeb, 0x56, 0x3d, 0x7d,
+ 0x70, 0x2e, 0xb9, 0xa1, 0x03, 0xd3, 0x18, 0x55, 0x0a, 0x62, 0x8f, 0xd7, 0x6f, 0xc0, 0x1f, 0x42,
+ 0x42, 0xb1, 0x8d, 0x5e, 0xdb, 0x5d, 0xac, 0xc7, 0x2d, 0xb9, 0x13, 0x62, 0xbc, 0x82, 0x15, 0xeb,
+ 0xbc, 0x3a, 0x86, 0x7b, 0x52, 0x36, 0xbc, 0xdc, 0x74, 0x76, 0x78, 0xf3, 0x42, 0x45, 0x0b, 0x25,
+ 0x15, 0x08, 0x9b, 0x23, 0xc7, 0x7c, 0xb5, 0x44, 0xfe, 0x93, 0xbc, 0xf7, 0xcc, 0x78, 0x2d, 0xef,
+ 0x41, 0x9c, 0x17, 0xff, 0x0d, 0xb7, 0x1c, 0xb6, 0xbb, 0x30, 0x14, 0x6b, 0xf3, 0xf7, 0xb5, 0x56,
+ 0x73, 0xcc, 0x87, 0xb7, 0xcf, 0xc7, 0x8a, 0x70, 0x9a, 0x72, 0xda, 0x97, 0x39, 0x40, 0x9f, 0x43,
+ 0xb6, 0x2b, 0xae, 0x78, 0x5d, 0x57, 0x28, 0xb6, 0xe6, 0xb1, 0x3f, 0xda, 0xb2, 0xcc, 0xbf, 0x8e,
+ 0x42, 0xd6, 0x22, 0xd9, 0x96, 0xb3, 0xf6, 0x2d, 0x3b, 0x51, 0x85, 0xf0, 0xd1, 0xe9, 0xcb, 0xcc,
+ 0x66, 0xaf, 0x33, 0x9b, 0xbd, 0xcf, 0x6c, 0xf6, 0xf4, 0x61, 0x2f, 0x5d, 0x1f, 0x84, 0x48, 0xcd,
+ 0xba, 0x23, 0xa4, 0x0d, 0x90, 0xba, 0xeb, 0x3c, 0x76, 0x3e, 0x03, 0x7a, 0x7c, 0x9d, 0x4f, 0x96,
+ 0x69, 0x07, 0x75, 0xda, 0x80, 0xfe, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x48, 0x31, 0x4a,
+ 0x70, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/server/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/server/etcdserver/api/v3lock/v3lockpb/v3lock.proto
index 1b5c456ae23..88a1c824284 100644
--- a/server/etcdserver/api/v3lock/v3lockpb/v3lock.proto
+++ b/server/etcdserver/api/v3lock/v3lockpb/v3lock.proto
@@ -7,6 +7,8 @@ import "etcd/api/etcdserverpb/rpc.proto";
// for grpc-gateway
import "google/api/annotations.proto";
+option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb";
+
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
diff --git a/server/etcdserver/api/v3rpc/auth.go b/server/etcdserver/api/v3rpc/auth.go
index d986037a1b4..6c5db76cb8e 100644
--- a/server/etcdserver/api/v3rpc/auth.go
+++ b/server/etcdserver/api/v3rpc/auth.go
@@ -18,6 +18,7 @@ import (
"context"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver"
)
@@ -164,3 +165,23 @@ func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChan
}
return resp, nil
}
+
+type AuthGetter interface {
+ AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
+ AuthStore() auth.AuthStore
+}
+
+type AuthAdmin struct {
+ ag AuthGetter
+}
+
+// isPermitted verifies the user has admin privilege.
+// Only users with "root" role are permitted.
+func (aa *AuthAdmin) isPermitted(ctx context.Context) error {
+ authInfo, err := aa.ag.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+
+ return aa.ag.AuthStore().IsAdminPermitted(authInfo)
+}
diff --git a/server/etcdserver/api/v3rpc/codec.go b/server/etcdserver/api/v3rpc/codec.go
index d599ff63cc3..1bbed839712 100644
--- a/server/etcdserver/api/v3rpc/codec.go
+++ b/server/etcdserver/api/v3rpc/codec.go
@@ -18,13 +18,13 @@ import "github.com/golang/protobuf/proto"
type codec struct{}
-func (c *codec) Marshal(v interface{}) ([]byte, error) {
+func (c *codec) Marshal(v any) ([]byte, error) {
b, err := proto.Marshal(v.(proto.Message))
sentBytes.Add(float64(len(b)))
return b, err
}
-func (c *codec) Unmarshal(data []byte, v interface{}) error {
+func (c *codec) Unmarshal(data []byte, v any) error {
receivedBytes.Add(float64(len(data)))
return proto.Unmarshal(data, v.(proto.Message))
}
diff --git a/server/etcdserver/api/v3rpc/grpc.go b/server/etcdserver/api/v3rpc/grpc.go
index 26c52b385b4..32949207805 100644
--- a/server/etcdserver/api/v3rpc/grpc.go
+++ b/server/etcdserver/api/v3rpc/grpc.go
@@ -18,37 +18,35 @@ import (
"crypto/tls"
"math"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3/credentials"
- "go.etcd.io/etcd/server/v3/etcdserver"
-
- grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/v3/credentials"
+ "go.etcd.io/etcd/server/v3/etcdserver"
)
const (
- grpcOverheadBytes = 512 * 1024
- maxStreams = math.MaxUint32
- maxSendBytes = math.MaxInt32
+ maxSendBytes = math.MaxInt32
)
-func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
+func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnaryServerInterceptor, gopts ...grpc.ServerOption) *grpc.Server {
var opts []grpc.ServerOption
opts = append(opts, grpc.CustomCodec(&codec{}))
if tls != nil {
- bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls})
- opts = append(opts, grpc.Creds(bundle.TransportCredentials()))
+ opts = append(opts, grpc.Creds(credentials.NewTransportCredential(tls)))
}
-
chainUnaryInterceptors := []grpc.UnaryServerInterceptor{
newLogUnaryInterceptor(s),
newUnaryInterceptor(s),
grpc_prometheus.UnaryServerInterceptor,
}
+ if interceptor != nil {
+ chainUnaryInterceptors = append(chainUnaryInterceptors, interceptor)
+ }
chainStreamInterceptors := []grpc.StreamServerInterceptor{
newStreamInterceptor(s),
@@ -58,15 +56,14 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOptio
if s.Cfg.ExperimentalEnableDistributedTracing {
chainUnaryInterceptors = append(chainUnaryInterceptors, otelgrpc.UnaryServerInterceptor(s.Cfg.ExperimentalTracerOptions...))
chainStreamInterceptors = append(chainStreamInterceptors, otelgrpc.StreamServerInterceptor(s.Cfg.ExperimentalTracerOptions...))
-
}
- opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(chainUnaryInterceptors...)))
- opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(chainStreamInterceptors...)))
+ opts = append(opts, grpc.ChainUnaryInterceptor(chainUnaryInterceptors...))
+ opts = append(opts, grpc.ChainStreamInterceptor(chainStreamInterceptors...))
- opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
+ opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytesWithOverhead())))
opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
- opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
+ opts = append(opts, grpc.MaxConcurrentStreams(s.Cfg.MaxConcurrentStreams))
grpcServer := grpc.NewServer(append(opts, gopts...)...)
@@ -75,14 +72,11 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOptio
pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
- pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
- // server should register all the services manually
- // use empty service name for all etcd services' health status,
- // see https://github.com/grpc/grpc/blob/master/doc/health-checking.md for more
hsrv := health.NewServer()
- hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING)
+ healthNotifier := newHealthNotifier(hsrv, s)
healthpb.RegisterHealthServer(grpcServer, hsrv)
+ pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s, healthNotifier))
// set zero values for metrics registered for this grpc server
grpc_prometheus.Register(grpcServer)
diff --git a/server/etcdserver/api/v3rpc/header.go b/server/etcdserver/api/v3rpc/header.go
index 112cc922ea1..8fe4e5863b8 100644
--- a/server/etcdserver/api/v3rpc/header.go
+++ b/server/etcdserver/api/v3rpc/header.go
@@ -17,19 +17,20 @@ package v3rpc
import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
)
type header struct {
clusterID int64
memberID int64
- sg etcdserver.RaftStatusGetter
+ sg apply.RaftStatusGetter
rev func() int64
}
func newHeader(s *etcdserver.EtcdServer) header {
return header{
clusterID: int64(s.Cluster().ID()),
- memberID: int64(s.ID()),
+ memberID: int64(s.MemberID()),
sg: s,
rev: func() int64 { return s.KV().Rev() },
}
diff --git a/server/etcdserver/api/v3rpc/health.go b/server/etcdserver/api/v3rpc/health.go
new file mode 100644
index 00000000000..2861e11e6d3
--- /dev/null
+++ b/server/etcdserver/api/v3rpc/health.go
@@ -0,0 +1,79 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "go.uber.org/zap"
+ "google.golang.org/grpc/health"
+ healthpb "google.golang.org/grpc/health/grpc_health_v1"
+
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/features"
+)
+
+const (
+ allGRPCServices = ""
+)
+
+type notifier interface {
+ defragStarted()
+ defragFinished()
+}
+
+func newHealthNotifier(hs *health.Server, s *etcdserver.EtcdServer) notifier {
+ if hs == nil {
+ panic("unexpected nil gRPC health server")
+ }
+ hc := &healthNotifier{hs: hs, lg: s.Logger(), stopGRPCServiceOnDefrag: s.FeatureEnabled(features.StopGRPCServiceOnDefrag)}
+ // set grpc health server as serving status blindly since
+ // the grpc server will serve iff s.ReadyNotify() is closed.
+ hc.startServe()
+ return hc
+}
+
+type healthNotifier struct {
+ hs *health.Server
+ lg *zap.Logger
+
+ stopGRPCServiceOnDefrag bool
+}
+
+func (hc *healthNotifier) defragStarted() {
+ if !hc.stopGRPCServiceOnDefrag {
+ return
+ }
+ hc.stopServe("defrag is active")
+}
+
+func (hc *healthNotifier) defragFinished() { hc.startServe() }
+
+func (hc *healthNotifier) startServe() {
+ hc.lg.Info(
+ "grpc service status changed",
+ zap.String("service", allGRPCServices),
+ zap.String("status", healthpb.HealthCheckResponse_SERVING.String()),
+ )
+ hc.hs.SetServingStatus(allGRPCServices, healthpb.HealthCheckResponse_SERVING)
+}
+
+func (hc *healthNotifier) stopServe(reason string) {
+ hc.lg.Warn(
+ "grpc service status changed",
+ zap.String("service", allGRPCServices),
+ zap.String("status", healthpb.HealthCheckResponse_NOT_SERVING.String()),
+ zap.String("reason", reason),
+ )
+ hc.hs.SetServingStatus(allGRPCServices, healthpb.HealthCheckResponse_NOT_SERVING)
+}
diff --git a/server/etcdserver/api/v3rpc/interceptor.go b/server/etcdserver/api/v3rpc/interceptor.go
index 5c80fcf041e..697d0b075ed 100644
--- a/server/etcdserver/api/v3rpc/interceptor.go
+++ b/server/etcdserver/api/v3rpc/interceptor.go
@@ -18,24 +18,24 @@ import (
"context"
"sync"
"time"
+ "unicode/utf8"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/raft/v3"
)
const (
- maxNoLeaderCnt = 3
- warnUnaryRequestLatency = 300 * time.Millisecond
- snapshotMethod = "/etcdserverpb.Maintenance/Snapshot"
+ maxNoLeaderCnt = 3
+ snapshotMethod = "/etcdserverpb.Maintenance/Snapshot"
)
type streamsMap struct {
@@ -44,13 +44,13 @@ type streamsMap struct {
}
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
return nil, rpctypes.ErrGRPCNotCapable
}
- if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) {
- return nil, rpctypes.ErrGPRCNotSupportedForLearner
+ if s.IsMemberExist(s.MemberID()) && s.IsLearner() && !isRPCSupportedForLearner(req) {
+ return nil, rpctypes.ErrGRPCNotSupportedForLearner
}
md, ok := metadata.FromIncomingContext(ctx)
@@ -59,6 +59,9 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
if len(vs) > 0 {
ver = vs[0]
}
+ if !utf8.ValidString(ver) {
+ return nil, rpctypes.ErrGRPCInvalidClientAPIVersion
+ }
clientRequests.WithLabelValues("unary", ver).Inc()
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
@@ -73,24 +76,24 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
}
func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
startTime := time.Now()
resp, err := handler(ctx, req)
lg := s.Logger()
- if lg != nil { // acquire stats if debug level is enabled or request is expensive
- defer logUnaryRequestStats(ctx, lg, info, startTime, req, resp)
+ if lg != nil { // acquire stats if debug level is enabled or RequestInfo is expensive
+ defer logUnaryRequestStats(ctx, lg, s.Cfg.WarningUnaryRequestDuration, info, startTime, req, resp)
}
return resp, err
}
}
-func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) {
+func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, warnLatency time.Duration, info *grpc.UnaryServerInfo, startTime time.Time, req any, resp any) {
duration := time.Since(startTime)
var enabledDebugLevel, expensiveRequest bool
if lg.Core().Enabled(zap.DebugLevel) {
enabledDebugLevel = true
}
- if duration > warnUnaryRequestLatency {
+ if duration > warnLatency {
expensiveRequest = true
}
if !enabledDebugLevel && !expensiveRequest {
@@ -178,7 +181,8 @@ func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryS
}
func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
- reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) {
+ reqCount int64, reqSize int, respCount int64, respSize int, reqContent string,
+) {
lg.Debug("request stats",
zap.Time("start time", startTime),
zap.Duration("time spent", duration),
@@ -193,7 +197,8 @@ func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.D
}
func logExpensiveRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
- reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) {
+ reqCount int64, reqSize int, respCount int64, respSize int, reqContent string,
+) {
lg.Warn("request stats",
zap.Time("start time", startTime),
zap.Duration("time spent", duration),
@@ -210,13 +215,13 @@ func logExpensiveRequestStats(lg *zap.Logger, startTime time.Time, duration time
func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
smap := monitorLeader(s)
- return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
return rpctypes.ErrGRPCNotCapable
}
- if s.IsMemberExist(s.ID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot
- return rpctypes.ErrGPRCNotSupportedForLearner
+ if s.IsMemberExist(s.MemberID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot
+ return rpctypes.ErrGRPCNotSupportedForLearner
}
md, ok := metadata.FromIncomingContext(ss.Context())
@@ -225,6 +230,9 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
if len(vs) > 0 {
ver = vs[0]
}
+ if !utf8.ValidString(ver) {
+ return rpctypes.ErrGRPCInvalidClientAPIVersion
+ }
clientRequests.WithLabelValues("stream", ver).Inc()
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
diff --git a/server/etcdserver/api/v3rpc/key.go b/server/etcdserver/api/v3rpc/key.go
index d1a7ee63345..2c1de2a90de 100644
--- a/server/etcdserver/api/v3rpc/key.go
+++ b/server/etcdserver/api/v3rpc/key.go
@@ -115,6 +115,15 @@ func checkRangeRequest(r *pb.RangeRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrGRPCEmptyKey
}
+
+ if _, ok := pb.RangeRequest_SortOrder_name[int32(r.SortOrder)]; !ok {
+ return rpctypes.ErrGRPCInvalidSortOption
+ }
+
+ if _, ok := pb.RangeRequest_SortTarget_name[int32(r.SortTarget)]; !ok {
+ return rpctypes.ErrGRPCInvalidSortOption
+ }
+
return nil
}
diff --git a/server/etcdserver/api/v3rpc/key_test.go b/server/etcdserver/api/v3rpc/key_test.go
new file mode 100644
index 00000000000..a585ee89cc0
--- /dev/null
+++ b/server/etcdserver/api/v3rpc/key_test.go
@@ -0,0 +1,68 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "testing"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+func TestCheckRangeRequest(t *testing.T) {
+ rangeReqs := []struct {
+ sortOrder pb.RangeRequest_SortOrder
+ sortTarget pb.RangeRequest_SortTarget
+ expectedError error
+ }{
+ {
+ sortOrder: pb.RangeRequest_ASCEND,
+ sortTarget: pb.RangeRequest_CREATE,
+ expectedError: nil,
+ },
+ {
+ sortOrder: pb.RangeRequest_ASCEND,
+ sortTarget: 100,
+ expectedError: rpctypes.ErrGRPCInvalidSortOption,
+ },
+ {
+ sortOrder: 200,
+ sortTarget: pb.RangeRequest_MOD,
+ expectedError: rpctypes.ErrGRPCInvalidSortOption,
+ },
+ }
+
+ for _, req := range rangeReqs {
+ rangeReq := pb.RangeRequest{
+ Key: []byte{1, 2, 3},
+ SortOrder: req.sortOrder,
+ SortTarget: req.sortTarget,
+ }
+
+ actualRet := checkRangeRequest(&rangeReq)
+ if getError(actualRet) != getError(req.expectedError) {
+ t.Errorf("expected sortOrder (%d) and sortTarget (%d) to be %q, but got %q",
+ req.sortOrder, req.sortTarget, getError(req.expectedError), getError(actualRet))
+ }
+ }
+}
+
+func getError(err error) string {
+ if err == nil {
+ return ""
+ }
+
+ return err.Error()
+}
diff --git a/server/etcdserver/api/v3rpc/lease.go b/server/etcdserver/api/v3rpc/lease.go
index e123dd2a37c..f51334eadeb 100644
--- a/server/etcdserver/api/v3rpc/lease.go
+++ b/server/etcdserver/api/v3rpc/lease.go
@@ -16,14 +16,15 @@ package v3rpc
import (
"context"
+ "errors"
"io"
+ "go.uber.org/zap"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/lease"
-
- "go.uber.org/zap"
)
type LeaseServer struct {
@@ -42,7 +43,6 @@ func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
resp, err := ls.le.LeaseGrant(ctx, cr)
-
if err != nil {
return nil, togRPCError(err)
}
@@ -61,10 +61,10 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
- if err != nil && err != lease.ErrLeaseNotFound {
+ if err != nil && !errors.Is(err, lease.ErrLeaseNotFound) {
return nil, togRPCError(err)
}
- if err == lease.ErrLeaseNotFound {
+ if errors.Is(err, lease.ErrLeaseNotFound) {
resp = &pb.LeaseTimeToLiveResponse{
Header: &pb.ResponseHeader{},
ID: rr.ID,
@@ -77,10 +77,10 @@ func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLi
func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
resp, err := ls.le.LeaseLeases(ctx, rr)
- if err != nil && err != lease.ErrLeaseNotFound {
+ if err != nil && !errors.Is(err, lease.ErrLeaseNotFound) {
return nil, togRPCError(err)
}
- if err == lease.ErrLeaseNotFound {
+ if errors.Is(err, lease.ErrLeaseNotFound) {
resp = &pb.LeaseLeasesResponse{
Header: &pb.ResponseHeader{},
Leases: []*pb.LeaseStatus{},
@@ -100,7 +100,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err
case <-stream.Context().Done():
// the only server-side cancellation is noleader for now.
err = stream.Context().Err()
- if err == context.Canceled {
+ if errors.Is(err, context.Canceled) {
err = rpctypes.ErrGRPCNoLeader
}
}
@@ -110,7 +110,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err
func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
for {
req, err := stream.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return nil
}
if err != nil {
@@ -133,7 +133,7 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
ls.hdr.fill(resp.Header)
ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
- if err == lease.ErrLeaseNotFound {
+ if errors.Is(err, lease.ErrLeaseNotFound) {
err = nil
ttl = 0
}
diff --git a/server/etcdserver/api/v3rpc/maintenance.go b/server/etcdserver/api/v3rpc/maintenance.go
index 38cc9137163..92b41c31703 100644
--- a/server/etcdserver/api/v3rpc/maintenance.go
+++ b/server/etcdserver/api/v3rpc/maintenance.go
@@ -17,20 +17,25 @@ package v3rpc
import (
"context"
"crypto/sha256"
+ errorspkg "errors"
"io"
"time"
"github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/mvcc"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3"
)
type KVGetter interface {
@@ -56,41 +61,59 @@ type LeaderTransferrer interface {
MoveLeader(ctx context.Context, lead, target uint64) error
}
-type AuthGetter interface {
- AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
- AuthStore() auth.AuthStore
-}
-
type ClusterStatusGetter interface {
IsLearner() bool
}
+type ConfigGetter interface {
+ Config() config.ServerConfig
+}
+
type maintenanceServer struct {
- lg *zap.Logger
- rg etcdserver.RaftStatusGetter
- kg KVGetter
- bg BackendGetter
- a Alarmer
- lt LeaderTransferrer
- hdr header
- cs ClusterStatusGetter
- d Downgrader
+ lg *zap.Logger
+ rg apply.RaftStatusGetter
+ hasher mvcc.HashStorage
+ bg BackendGetter
+ a Alarmer
+ lt LeaderTransferrer
+ hdr header
+ cs ClusterStatusGetter
+ d Downgrader
+ vs serverversion.Server
+ cg ConfigGetter
+
+ healthNotifier notifier
}
-func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
- srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s), cs: s, d: s}
+func NewMaintenanceServer(s *etcdserver.EtcdServer, healthNotifier notifier) pb.MaintenanceServer {
+ srv := &maintenanceServer{
+ lg: s.Cfg.Logger,
+ rg: s,
+ hasher: s.KV().HashStorage(),
+ bg: s,
+ a: s,
+ lt: s,
+ hdr: newHeader(s),
+ cs: s,
+ d: s,
+ vs: etcdserver.NewServerVersionAdapter(s),
+ healthNotifier: healthNotifier,
+ cg: s,
+ }
if srv.lg == nil {
srv.lg = zap.NewNop()
}
- return &authMaintenanceServer{srv, s}
+ return &authMaintenanceServer{srv, &AuthAdmin{s}}
}
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
ms.lg.Info("starting defragment")
+ ms.healthNotifier.defragStarted()
+ defer ms.healthNotifier.defragFinished()
err := ms.bg.Backend().Defrag()
if err != nil {
ms.lg.Warn("failed to defragment", zap.Error(err))
- return nil, err
+ return nil, togRPCError(err)
}
ms.lg.Info("finished defragment")
return &pb.DefragmentResponse{}, nil
@@ -100,6 +123,11 @@ func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRe
const snapshotSendBufferSize = 32 * 1024
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+ ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx())
+ storageVersion := ""
+ if ver != nil {
+ storageVersion = ver.String()
+ }
snap := ms.bg.Backend().Snapshot()
pr, pw := io.Pipe()
@@ -125,6 +153,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance
ms.lg.Info("sending database snapshot to client",
zap.Int64("total-bytes", total),
zap.String("size", size),
+ zap.String("storage-version", storageVersion),
)
for total-sent > 0 {
// buffer just holds read bytes from stream
@@ -135,7 +164,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance
buf := make([]byte, snapshotSendBufferSize)
n, err := io.ReadFull(pr, buf)
- if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ if err != nil && !errorspkg.Is(err, io.EOF) && !errorspkg.Is(err, io.ErrUnexpectedEOF) {
return togRPCError(err)
}
sent += int64(n)
@@ -151,6 +180,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance
resp := &pb.SnapshotResponse{
RemainingBytes: uint64(total - sent),
Blob: buf[:n],
+ Version: storageVersion,
}
if err = srv.Send(resp); err != nil {
return togRPCError(err)
@@ -166,7 +196,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance
zap.Int64("total-bytes", total),
zap.Int("checksum-size", len(sha)),
)
- hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
+ hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha, Version: storageVersion}
if err := srv.Send(hresp); err != nil {
return togRPCError(err)
}
@@ -174,13 +204,14 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance
ms.lg.Info("successfully sent database snapshot to client",
zap.Int64("total-bytes", total),
zap.String("size", size),
- zap.String("took", humanize.Time(start)),
+ zap.Duration("took", time.Since(start)),
+ zap.String("storage-version", storageVersion),
)
return nil
}
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
- h, rev, err := ms.kg.KV().Hash()
+ h, rev, err := ms.hasher.Hash()
if err != nil {
return nil, togRPCError(err)
}
@@ -190,12 +221,17 @@ func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.H
}
func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
- h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision)
+ h, rev, err := ms.hasher.HashByRev(r.Revision)
if err != nil {
return nil, togRPCError(err)
}
- resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev}
+ resp := &pb.HashKVResponse{
+ Header: &pb.ResponseHeader{Revision: rev},
+ Hash: h.Hash,
+ CompactRevision: h.CompactRevision,
+ HashRevision: h.Revision,
+ }
ms.hdr.fill(resp.Header)
return resp, nil
}
@@ -225,9 +261,13 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (
DbSize: ms.bg.Backend().Size(),
DbSizeInUse: ms.bg.Backend().SizeInUse(),
IsLearner: ms.cs.IsLearner(),
+ DbSizeQuota: ms.cg.Config().QuotaBackendBytes,
+ }
+ if storageVersion := ms.vs.GetStorageVersion(); storageVersion != nil {
+ resp.StorageVersion = storageVersion.String()
}
if resp.Leader == raft.None {
- resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error())
+ resp.Errors = append(resp.Errors, errors.ErrNoLeader.Error())
}
for _, a := range ms.a.Alarms() {
resp.Errors = append(resp.Errors, a.String())
@@ -236,7 +276,7 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (
}
func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
- if ms.rg.ID() != ms.rg.Leader() {
+ if ms.rg.MemberID() != ms.rg.Leader() {
return nil, rpctypes.ErrGRPCNotLeader
}
@@ -258,57 +298,60 @@ func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeReque
type authMaintenanceServer struct {
*maintenanceServer
- ag AuthGetter
-}
-
-func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
- authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
- if err != nil {
- return err
- }
-
- return ams.ag.AuthStore().IsAdminPermitted(authInfo)
+ *AuthAdmin
}
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
}
return ams.maintenanceServer.Defragment(ctx, sr)
}
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
- if err := ams.isAuthenticated(srv.Context()); err != nil {
- return err
+ if err := ams.isPermitted(srv.Context()); err != nil {
+ return togRPCError(err)
}
return ams.maintenanceServer.Snapshot(sr, srv)
}
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
}
return ams.maintenanceServer.Hash(ctx, r)
}
func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
}
return ams.maintenanceServer.HashKV(ctx, r)
}
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
return ams.maintenanceServer.Status(ctx, ar)
}
func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
return ams.maintenanceServer.MoveLeader(ctx, tr)
}
func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
return ams.maintenanceServer.Downgrade(ctx, r)
}
diff --git a/server/etcdserver/api/v3rpc/member.go b/server/etcdserver/api/v3rpc/member.go
index 54fcc24843d..7fd68fe2d6f 100644
--- a/server/etcdserver/api/v3rpc/member.go
+++ b/server/etcdserver/api/v3rpc/member.go
@@ -106,7 +106,7 @@ func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteR
}
func (cs *ClusterServer) header() *pb.ResponseHeader {
- return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()}
+ return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.MemberID()), RaftTerm: cs.server.Term()}
}
func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
diff --git a/server/etcdserver/api/v3rpc/quota.go b/server/etcdserver/api/v3rpc/quota.go
index 7f53bd966bd..13bb83ffcd6 100644
--- a/server/etcdserver/api/v3rpc/quota.go
+++ b/server/etcdserver/api/v3rpc/quota.go
@@ -21,6 +21,7 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/storage"
)
type quotaKVServer struct {
@@ -29,14 +30,14 @@ type quotaKVServer struct {
}
type quotaAlarmer struct {
- q etcdserver.Quota
+ q storage.Quota
a Alarmer
id types.ID
}
// check whether request satisfies the quota. If there is not enough space,
// ignore request and raise the free space alarm.
-func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
+func (qa *quotaAlarmer) check(ctx context.Context, r any) error {
if qa.q.Available(r) {
return nil
}
@@ -52,7 +53,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
return "aKVServer{
NewKVServer(s),
- quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()},
+ quotaAlarmer{newBackendQuota(s, "kv"), s, s.MemberID()},
}
}
@@ -85,6 +86,10 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ
func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
return "aLeaseServer{
NewLeaseServer(s),
- quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()},
+ quotaAlarmer{newBackendQuota(s, "lease"), s, s.MemberID()},
}
}
+
+func newBackendQuota(s *etcdserver.EtcdServer, name string) storage.Quota {
+ return storage.NewBackendQuota(s.Logger(), s.Cfg.QuotaBackendBytes, s.Backend(), name)
+}
diff --git a/server/etcdserver/api/v3rpc/util.go b/server/etcdserver/api/v3rpc/util.go
index 51cbdc66a16..2354b0cb243 100644
--- a/server/etcdserver/api/v3rpc/util.go
+++ b/server/etcdserver/api/v3rpc/util.go
@@ -16,53 +16,56 @@ package v3rpc
import (
"context"
+ errorspkg "errors"
"strings"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/server/v3/auth"
- "go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/version"
"go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
)
var toGRPCErrorMap = map[error]error{
- membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound,
- membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound,
- membership.ErrIDExists: rpctypes.ErrGRPCMemberExist,
- membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist,
- membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner,
- membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners,
- etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
- etcdserver.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady,
-
- mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted,
- mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev,
- etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
- etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace,
- etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
-
- etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
- etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
- etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
- etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
- etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
- etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
- etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
- etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
- etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
- etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
- etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee,
-
- etcdserver.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable,
- etcdserver.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat,
- etcdserver.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion,
- etcdserver.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess,
- etcdserver.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade,
+ membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDExists: rpctypes.ErrGRPCMemberExist,
+ membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist,
+ membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner,
+ membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners,
+ errors.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
+ errors.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady,
+
+ mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted,
+ mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev,
+ errors.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
+ errors.ErrNoSpace: rpctypes.ErrGRPCNoSpace,
+ errors.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
+
+ errors.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
+ errors.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
+ errors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
+ errors.ErrStopped: rpctypes.ErrGRPCStopped,
+ errors.ErrTimeout: rpctypes.ErrGRPCTimeout,
+ errors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
+ errors.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
+ errors.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex,
+ errors.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
+ errors.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
+ errors.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
+ errors.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee,
+
+ errors.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable,
+ errors.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat,
+ version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion,
+ version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess,
+ version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade,
lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound,
lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist,
@@ -77,12 +80,14 @@ var toGRPCErrorMap = map[error]error{
auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty,
auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
+ auth.ErrPermissionNotGiven: rpctypes.ErrGRPCPermissionNotGiven,
auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
+ auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision,
// In sync with status.FromContextError
context.Canceled: rpctypes.ErrGRPCCanceled,
@@ -91,7 +96,7 @@ var toGRPCErrorMap = map[error]error{
func togRPCError(err error) error {
// let gRPC server convert to codes.Canceled, codes.DeadlineExceeded
- if err == context.Canceled || err == context.DeadlineExceeded {
+ if errorspkg.Is(err, context.Canceled) || errorspkg.Is(err, context.DeadlineExceeded) {
return err
}
grpcErr, ok := toGRPCErrorMap[err]
@@ -134,7 +139,7 @@ func isClientCtxErr(ctxErr error, err error) bool {
}
// in v3.4, learner is allowed to serve serializable read and endpoint status
-func isRPCSupportedForLearner(req interface{}) bool {
+func isRPCSupportedForLearner(req any) bool {
switch r := req.(type) {
case *pb.StatusRequest:
return true
diff --git a/server/etcdserver/api/v3rpc/util_test.go b/server/etcdserver/api/v3rpc/util_test.go
index d7f5a3ce92f..8bd569301c4 100644
--- a/server/etcdserver/api/v3rpc/util_test.go
+++ b/server/etcdserver/api/v3rpc/util_test.go
@@ -19,11 +19,11 @@ import (
"errors"
"testing"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/server/v3/mvcc"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
)
func TestGRPCError(t *testing.T) {
@@ -38,7 +38,7 @@ func TestGRPCError(t *testing.T) {
{err: errors.New("foo"), exp: status.Error(codes.Unknown, "foo")},
}
for i := range tt {
- if err := togRPCError(tt[i].err); err != tt[i].exp {
+ if err := togRPCError(tt[i].err); !errors.Is(err, tt[i].exp) {
if _, ok := status.FromError(err); ok {
if err.Error() == tt[i].exp.Error() {
continue
diff --git a/server/etcdserver/api/v3rpc/validationfuzz_test.go b/server/etcdserver/api/v3rpc/validationfuzz_test.go
new file mode 100644
index 00000000000..d921c9602b7
--- /dev/null
+++ b/server/etcdserver/api/v3rpc/validationfuzz_test.go
@@ -0,0 +1,182 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "testing"
+
+ "go.uber.org/zap/zaptest"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ txn "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+func FuzzTxnRangeRequest(f *testing.F) {
+ testcases := []pb.RangeRequest{
+ {
+ Key: []byte{2},
+ RangeEnd: []byte{2},
+ Limit: 3,
+ Revision: 3,
+ SortOrder: 2,
+ SortTarget: 2,
+ },
+ }
+
+ for _, tc := range testcases {
+ soValue := pb.RangeRequest_SortOrder_value[tc.SortOrder.String()]
+ soTarget := pb.RangeRequest_SortTarget_value[tc.SortTarget.String()]
+ f.Add(tc.Key, tc.RangeEnd, tc.Limit, tc.Revision, soValue, soTarget)
+ }
+
+ f.Fuzz(func(t *testing.T,
+ key []byte,
+ rangeEnd []byte,
+ limit int64,
+ revision int64,
+ sortOrder int32,
+ sortTarget int32,
+ ) {
+ fuzzRequest := &pb.RangeRequest{
+ Key: key,
+ RangeEnd: rangeEnd,
+ Limit: limit,
+ SortOrder: pb.RangeRequest_SortOrder(sortOrder),
+ SortTarget: pb.RangeRequest_SortTarget(sortTarget),
+ }
+
+ verifyCheck(t, func() error {
+ return checkRangeRequest(fuzzRequest)
+ })
+
+ execTransaction(t, &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: fuzzRequest,
+ },
+ })
+ })
+}
+
+func FuzzTxnPutRequest(f *testing.F) {
+ testcases := []pb.PutRequest{
+ {
+ Key: []byte{2},
+ Value: []byte{2},
+ Lease: 2,
+ PrevKv: false,
+ IgnoreValue: false,
+ IgnoreLease: false,
+ },
+ }
+
+ for _, tc := range testcases {
+ f.Add(tc.Key, tc.Value, tc.Lease, tc.PrevKv, tc.IgnoreValue, tc.IgnoreLease)
+ }
+
+ f.Fuzz(func(t *testing.T,
+ key []byte,
+ value []byte,
+ leaseValue int64,
+ prevKv bool,
+ ignoreValue bool,
+ IgnoreLease bool,
+ ) {
+ fuzzRequest := &pb.PutRequest{
+ Key: key,
+ Value: value,
+ Lease: leaseValue,
+ PrevKv: prevKv,
+ IgnoreValue: ignoreValue,
+ IgnoreLease: IgnoreLease,
+ }
+
+ verifyCheck(t, func() error {
+ return checkPutRequest(fuzzRequest)
+ })
+
+ execTransaction(t, &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: fuzzRequest,
+ },
+ })
+ })
+}
+
+func FuzzTxnDeleteRangeRequest(f *testing.F) {
+ testcases := []pb.DeleteRangeRequest{
+ {
+ Key: []byte{2},
+ RangeEnd: []byte{2},
+ PrevKv: false,
+ },
+ }
+
+ for _, tc := range testcases {
+ f.Add(tc.Key, tc.RangeEnd, tc.PrevKv)
+ }
+
+ f.Fuzz(func(t *testing.T,
+ key []byte,
+ rangeEnd []byte,
+ prevKv bool,
+ ) {
+ fuzzRequest := &pb.DeleteRangeRequest{
+ Key: key,
+ RangeEnd: rangeEnd,
+ PrevKv: prevKv,
+ }
+
+ verifyCheck(t, func() error {
+ return checkDeleteRequest(fuzzRequest)
+ })
+
+ execTransaction(t, &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: fuzzRequest,
+ },
+ })
+ })
+}
+
+func verifyCheck(t *testing.T, check func() error) {
+ errCheck := check()
+ if errCheck != nil {
+ t.Skip("Validation not passing. Skipping the apply.")
+ }
+}
+
+func execTransaction(t *testing.T, req *pb.RequestOp) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, b)
+ s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{})
+ defer s.Close()
+
+ // setup cancelled context
+ ctx, cancel := context.WithCancel(context.TODO())
+ cancel()
+
+ request := &pb.TxnRequest{
+ Success: []*pb.RequestOp{req},
+ }
+
+ _, _, err := txn.Txn(ctx, zaptest.NewLogger(t), request, false, s, &lease.FakeLessor{})
+ if err != nil {
+ t.Skipf("Application erroring. %s", err.Error())
+ }
+}
diff --git a/server/etcdserver/api/v3rpc/watch.go b/server/etcdserver/api/v3rpc/watch.go
index c56ec4b26e7..b0a7e4a1926 100644
--- a/server/etcdserver/api/v3rpc/watch.go
+++ b/server/etcdserver/api/v3rpc/watch.go
@@ -16,19 +16,23 @@ package v3rpc
import (
"context"
+ "errors"
"io"
"math/rand"
"sync"
"time"
+ "go.uber.org/zap"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/mvcc"
-
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
)
const minWatchProgressInterval = 100 * time.Millisecond
@@ -39,9 +43,9 @@ type watchServer struct {
clusterID int64
memberID int64
- maxRequestBytes int
+ maxRequestBytes uint
- sg etcdserver.RaftStatusGetter
+ sg apply.RaftStatusGetter
watchable mvcc.WatchableKV
ag AuthGetter
}
@@ -52,9 +56,9 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
lg: s.Cfg.Logger,
clusterID: int64(s.Cluster().ID()),
- memberID: int64(s.ID()),
+ memberID: int64(s.MemberID()),
- maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes),
+ maxRequestBytes: s.Cfg.MaxRequestBytesWithOverhead(),
sg: s,
watchable: s.Watchable(),
@@ -122,9 +126,9 @@ type serverWatchStream struct {
clusterID int64
memberID int64
- maxRequestBytes int
+ maxRequestBytes uint
- sg etcdserver.RaftStatusGetter
+ sg apply.RaftStatusGetter
watchable mvcc.WatchableKV
ag AuthGetter
@@ -208,13 +212,13 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
// revisited.
select {
case err = <-errc:
- if err == context.Canceled {
+ if errors.Is(err, context.Canceled) {
err = rpctypes.ErrGRPCWatchCanceled
}
close(sws.ctrlStream)
case <-stream.Context().Done():
err = stream.Context().Err()
- if err == context.Canceled {
+ if errors.Is(err, context.Canceled) {
err = rpctypes.ErrGRPCWatchCanceled
}
}
@@ -223,22 +227,22 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
return err
}
-func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
+func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) error {
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
if err != nil {
- return false
+ return err
}
if authInfo == nil {
// if auth is enabled, IsRangePermitted() can cause an error
authInfo = &auth.AuthInfo{}
}
- return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
+ return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd)
}
func (sws *serverWatchStream) recvLoop() error {
for {
req, err := sws.gRPCStream.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return nil
}
if err != nil {
@@ -266,13 +270,29 @@ func (sws *serverWatchStream) recvLoop() error {
creq.RangeEnd = []byte{}
}
- if !sws.isWatchPermitted(creq) {
+ err := sws.isWatchPermitted(creq)
+ if err != nil {
+ var cancelReason string
+ switch {
+ case errors.Is(err, auth.ErrInvalidAuthToken):
+ cancelReason = rpctypes.ErrGRPCInvalidAuthToken.Error()
+ case errors.Is(err, auth.ErrAuthOldRevision):
+ cancelReason = rpctypes.ErrGRPCAuthOldRevision.Error()
+ case errors.Is(err, auth.ErrUserEmpty):
+ cancelReason = rpctypes.ErrGRPCUserEmpty.Error()
+ default:
+ if !errors.Is(err, auth.ErrPermissionDenied) {
+ sws.lg.Error("unexpected error code", zap.Error(err))
+ }
+ cancelReason = rpctypes.ErrGRPCPermissionDenied.Error()
+ }
+
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
- WatchId: creq.WatchId,
+ WatchId: clientv3.InvalidWatchID,
Canceled: true,
Created: true,
- CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
+ CancelReason: cancelReason,
}
select {
@@ -303,7 +323,10 @@ func (sws *serverWatchStream) recvLoop() error {
sws.fragment[id] = true
}
sws.mu.Unlock()
+ } else {
+ id = clientv3.InvalidWatchID
}
+
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev),
WatchId: int64(id),
@@ -324,11 +347,17 @@ func (sws *serverWatchStream) recvLoop() error {
id := uv.CancelRequest.WatchId
err := sws.watchStream.Cancel(mvcc.WatchID(id))
if err == nil {
- sws.ctrlStream <- &pb.WatchResponse{
+ wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: id,
Canceled: true,
}
+ select {
+ case sws.ctrlStream <- wr:
+ case <-sws.closec:
+ return nil
+ }
+
sws.mu.Lock()
delete(sws.progress, mvcc.WatchID(id))
delete(sws.prevKV, mvcc.WatchID(id))
@@ -338,15 +367,15 @@ func (sws *serverWatchStream) recvLoop() error {
}
case *pb.WatchRequest_ProgressRequest:
if uv.ProgressRequest != nil {
- sws.ctrlStream <- &pb.WatchResponse{
- Header: sws.newResponseHeader(sws.watchStream.Rev()),
- WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
- }
+ sws.mu.Lock()
+ sws.watchStream.RequestProgressAll()
+ sws.mu.Unlock()
}
default:
// we probably should not shutdown the entire stream when
- // receive an valid command.
+ // receive an invalid command.
// so just do nothing instead.
+ sws.lg.Sugar().Infof("invalid watch request type %T received in gRPC stream", uv)
continue
}
}
@@ -409,11 +438,15 @@ func (sws *serverWatchStream) sendLoop() {
Canceled: canceled,
}
- if _, okID := ids[wresp.WatchID]; !okID {
- // buffer if id not yet announced
- wrs := append(pending[wresp.WatchID], wr)
- pending[wresp.WatchID] = wrs
- continue
+ // Progress notifications can have WatchID -1
+ // if they announce on behalf of multiple watchers
+ if wresp.WatchID != clientv3.InvalidWatchID {
+ if _, okID := ids[wresp.WatchID]; !okID {
+ // buffer if id not yet announced
+ wrs := append(pending[wresp.WatchID], wr)
+ pending[wresp.WatchID] = wrs
+ continue
+ }
}
mvcc.ReportEventReceived(len(evs))
@@ -423,6 +456,7 @@ func (sws *serverWatchStream) sendLoop() {
sws.mu.RUnlock()
var serr error
+ // gofail: var beforeSendWatchResponse struct{}
if !fragmented && !ok {
serr = sws.gRPCStream.Send(wr)
} else {
@@ -463,7 +497,10 @@ func (sws *serverWatchStream) sendLoop() {
// track id creation
wid := mvcc.WatchID(c.WatchId)
- if c.Canceled {
+
+ verify.Assert(!(c.Canceled && c.Created) || wid == clientv3.InvalidWatchID, "unexpected watchId: %d, wanted: %d, since both 'Canceled' and 'Created' are true", wid, clientv3.InvalidWatchID)
+
+ if c.Canceled && wid != clientv3.InvalidWatchID {
delete(ids, wid)
continue
}
@@ -507,11 +544,12 @@ func IsCreateEvent(e mvccpb.Event) bool {
func sendFragments(
wr *pb.WatchResponse,
- maxRequestBytes int,
- sendFunc func(*pb.WatchResponse) error) error {
+ maxRequestBytes uint,
+ sendFunc func(*pb.WatchResponse) error,
+) error {
// no need to fragment if total request size is smaller
// than max request limit or response contains only one event
- if wr.Size() < maxRequestBytes || len(wr.Events) < 2 {
+ if uint(wr.Size()) < maxRequestBytes || len(wr.Events) < 2 {
return sendFunc(wr)
}
@@ -524,7 +562,7 @@ func sendFragments(
cur := ow
for _, ev := range wr.Events[idx:] {
cur.Events = append(cur.Events, ev)
- if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes {
+ if len(cur.Events) > 1 && uint(cur.Size()) >= maxRequestBytes {
cur.Events = cur.Events[:len(cur.Events)-1]
break
}
diff --git a/server/etcdserver/api/v3rpc/watch_test.go b/server/etcdserver/api/v3rpc/watch_test.go
index bd3f4943b2e..caa86f91ad7 100644
--- a/server/etcdserver/api/v3rpc/watch_test.go
+++ b/server/etcdserver/api/v3rpc/watch_test.go
@@ -16,6 +16,7 @@ package v3rpc
import (
"bytes"
+ "errors"
"math"
"testing"
@@ -26,7 +27,7 @@ import (
func TestSendFragment(t *testing.T) {
tt := []struct {
wr *pb.WatchResponse
- maxRequestBytes int
+ maxRequestBytes uint
fragments int
werr error
}{
@@ -69,7 +70,7 @@ func TestSendFragment(t *testing.T) {
return nil
}
err := sendFragments(tt[i].wr, tt[i].maxRequestBytes, testSend)
- if err != tt[i].werr {
+ if !errors.Is(err, tt[i].werr) {
t.Errorf("#%d: expected error %v, got %v", i, tt[i].werr, err)
}
got := len(fragmentedResp)
diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go
deleted file mode 100644
index cf281d1a9c7..00000000000
--- a/server/etcdserver/apply.go
+++ /dev/null
@@ -1,1148 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "bytes"
- "context"
- "fmt"
- "sort"
- "strconv"
- "time"
-
- "github.com/coreos/go-semver/semver"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/membershippb"
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/auth"
- "go.etcd.io/etcd/server/v3/etcdserver/api"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc"
-
- "github.com/gogo/protobuf/proto"
- "go.uber.org/zap"
-)
-
-const (
- v3Version = "v3"
-)
-
-type applyResult struct {
- resp proto.Message
- err error
- // physc signals the physical effect of the request has completed in addition
- // to being logically reflected by the node. Currently only used for
- // Compaction requests.
- physc <-chan struct{}
- trace *traceutil.Trace
-}
-
-// applierV3Internal is the interface for processing internal V3 raft request
-type applierV3Internal interface {
- ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3)
- ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3)
- DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3)
-}
-
-// applierV3 is the interface for processing V3 raft messages
-type applierV3 interface {
- Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult
-
- Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error)
- Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
- DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
- Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error)
- Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error)
-
- LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
- LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
-
- LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error)
-
- Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
-
- Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
-
- AuthEnable() (*pb.AuthEnableResponse, error)
- AuthDisable() (*pb.AuthDisableResponse, error)
- AuthStatus() (*pb.AuthStatusResponse, error)
-
- UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
- UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
- UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
- UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
- UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
- UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
- RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
- RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
- RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
- RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
- RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
- UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
- RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
-}
-
-type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error
-
-type applierV3backend struct {
- s *EtcdServer
-
- checkPut checkReqFunc
- checkRange checkReqFunc
-}
-
-func (s *EtcdServer) newApplierV3Backend() applierV3 {
- base := &applierV3backend{s: s}
- base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error {
- return base.checkRequestPut(rv, req)
- }
- base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error {
- return base.checkRequestRange(rv, req)
- }
- return base
-}
-
-func (s *EtcdServer) newApplierV3Internal() applierV3Internal {
- base := &applierV3backend{s: s}
- return base
-}
-
-func (s *EtcdServer) newApplierV3() applierV3 {
- return newAuthApplierV3(
- s.AuthStore(),
- newQuotaApplierV3(s, s.newApplierV3Backend()),
- s.lessor,
- )
-}
-
-func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult {
- op := "unknown"
- ar := &applyResult{}
- defer func(start time.Time) {
- success := ar.err == nil || ar.err == mvcc.ErrCompacted
- applySec.WithLabelValues(v3Version, op, strconv.FormatBool(success)).Observe(time.Since(start).Seconds())
- warnOfExpensiveRequest(a.s.Logger(), a.s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
- if !success {
- warnOfFailedRequest(a.s.Logger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
- }
- }(time.Now())
-
- switch {
- case r.ClusterVersionSet != nil: // Implemented in 3.5.x
- op = "ClusterVersionSet"
- a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3)
- return nil
- case r.ClusterMemberAttrSet != nil:
- op = "ClusterMemberAttrSet" // Implemented in 3.5.x
- a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3)
- return nil
- case r.DowngradeInfoSet != nil:
- op = "DowngradeInfoSet" // Implemented in 3.5.x
- a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3)
- return nil
- }
-
- if !shouldApplyV3 {
- return nil
- }
-
- // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
- switch {
- case r.Range != nil:
- op = "Range"
- ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range)
- case r.Put != nil:
- op = "Put"
- ar.resp, ar.trace, ar.err = a.s.applyV3.Put(context.TODO(), nil, r.Put)
- case r.DeleteRange != nil:
- op = "DeleteRange"
- ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
- case r.Txn != nil:
- op = "Txn"
- ar.resp, ar.trace, ar.err = a.s.applyV3.Txn(context.TODO(), r.Txn)
- case r.Compaction != nil:
- op = "Compaction"
- ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction)
- case r.LeaseGrant != nil:
- op = "LeaseGrant"
- ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
- case r.LeaseRevoke != nil:
- op = "LeaseRevoke"
- ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
- case r.LeaseCheckpoint != nil:
- op = "LeaseCheckpoint"
- ar.resp, ar.err = a.s.applyV3.LeaseCheckpoint(r.LeaseCheckpoint)
- case r.Alarm != nil:
- op = "Alarm"
- ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
- case r.Authenticate != nil:
- op = "Authenticate"
- ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
- case r.AuthEnable != nil:
- op = "AuthEnable"
- ar.resp, ar.err = a.s.applyV3.AuthEnable()
- case r.AuthDisable != nil:
- op = "AuthDisable"
- ar.resp, ar.err = a.s.applyV3.AuthDisable()
- case r.AuthStatus != nil:
- ar.resp, ar.err = a.s.applyV3.AuthStatus()
- case r.AuthUserAdd != nil:
- op = "AuthUserAdd"
- ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
- case r.AuthUserDelete != nil:
- op = "AuthUserDelete"
- ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
- case r.AuthUserChangePassword != nil:
- op = "AuthUserChangePassword"
- ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
- case r.AuthUserGrantRole != nil:
- op = "AuthUserGrantRole"
- ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
- case r.AuthUserGet != nil:
- op = "AuthUserGet"
- ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
- case r.AuthUserRevokeRole != nil:
- op = "AuthUserRevokeRole"
- ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
- case r.AuthRoleAdd != nil:
- op = "AuthRoleAdd"
- ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
- case r.AuthRoleGrantPermission != nil:
- op = "AuthRoleGrantPermission"
- ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
- case r.AuthRoleGet != nil:
- op = "AuthRoleGet"
- ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
- case r.AuthRoleRevokePermission != nil:
- op = "AuthRoleRevokePermission"
- ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
- case r.AuthRoleDelete != nil:
- op = "AuthRoleDelete"
- ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
- case r.AuthUserList != nil:
- op = "AuthUserList"
- ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
- case r.AuthRoleList != nil:
- op = "AuthRoleList"
- ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
- default:
- a.s.lg.Panic("not implemented apply", zap.Stringer("raft-request", r))
- }
- return ar
-}
-
-func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
- resp = &pb.PutResponse{}
- resp.Header = &pb.ResponseHeader{}
- trace = traceutil.Get(ctx)
- // create put tracing if the trace in context is empty
- if trace.IsEmpty() {
- trace = traceutil.New("put",
- a.s.Logger(),
- traceutil.Field{Key: "key", Value: string(p.Key)},
- traceutil.Field{Key: "req_size", Value: p.Size()},
- )
- }
- val, leaseID := p.Value, lease.LeaseID(p.Lease)
- if txn == nil {
- if leaseID != lease.NoLease {
- if l := a.s.lessor.Lookup(leaseID); l == nil {
- return nil, nil, lease.ErrLeaseNotFound
- }
- }
- txn = a.s.KV().Write(trace)
- defer txn.End()
- }
-
- var rr *mvcc.RangeResult
- if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
- trace.StepWithFunction(func() {
- rr, err = txn.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{})
- }, "get previous kv pair")
-
- if err != nil {
- return nil, nil, err
- }
- }
- if p.IgnoreValue || p.IgnoreLease {
- if rr == nil || len(rr.KVs) == 0 {
- // ignore_{lease,value} flag expects previous key-value pair
- return nil, nil, ErrKeyNotFound
- }
- }
- if p.IgnoreValue {
- val = rr.KVs[0].Value
- }
- if p.IgnoreLease {
- leaseID = lease.LeaseID(rr.KVs[0].Lease)
- }
- if p.PrevKv {
- if rr != nil && len(rr.KVs) != 0 {
- resp.PrevKv = &rr.KVs[0]
- }
- }
-
- resp.Header.Revision = txn.Put(p.Key, val, leaseID)
- trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
- return resp, trace, nil
-}
-
-func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- resp := &pb.DeleteRangeResponse{}
- resp.Header = &pb.ResponseHeader{}
- end := mkGteRange(dr.RangeEnd)
-
- if txn == nil {
- txn = a.s.kv.Write(traceutil.TODO())
- defer txn.End()
- }
-
- if dr.PrevKv {
- rr, err := txn.Range(context.TODO(), dr.Key, end, mvcc.RangeOptions{})
- if err != nil {
- return nil, err
- }
- if rr != nil {
- resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
- for i := range rr.KVs {
- resp.PrevKvs[i] = &rr.KVs[i]
- }
- }
- }
-
- resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end)
- return resp, nil
-}
-
-func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- trace := traceutil.Get(ctx)
-
- resp := &pb.RangeResponse{}
- resp.Header = &pb.ResponseHeader{}
-
- if txn == nil {
- txn = a.s.kv.Read(mvcc.ConcurrentReadTxMode, trace)
- defer txn.End()
- }
-
- limit := r.Limit
- if r.SortOrder != pb.RangeRequest_NONE ||
- r.MinModRevision != 0 || r.MaxModRevision != 0 ||
- r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
- // fetch everything; sort and truncate afterwards
- limit = 0
- }
- if limit > 0 {
- // fetch one extra for 'more' flag
- limit = limit + 1
- }
-
- ro := mvcc.RangeOptions{
- Limit: limit,
- Rev: r.Revision,
- Count: r.CountOnly,
- }
-
- rr, err := txn.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro)
- if err != nil {
- return nil, err
- }
-
- if r.MaxModRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
- pruneKVs(rr, f)
- }
- if r.MinModRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
- pruneKVs(rr, f)
- }
- if r.MaxCreateRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
- pruneKVs(rr, f)
- }
- if r.MinCreateRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
- pruneKVs(rr, f)
- }
-
- sortOrder := r.SortOrder
- if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
- // Since current mvcc.Range implementation returns results
- // sorted by keys in lexiographically ascending order,
- // sort ASCEND by default only when target is not 'KEY'
- sortOrder = pb.RangeRequest_ASCEND
- }
- if sortOrder != pb.RangeRequest_NONE {
- var sorter sort.Interface
- switch {
- case r.SortTarget == pb.RangeRequest_KEY:
- sorter = &kvSortByKey{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_VERSION:
- sorter = &kvSortByVersion{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_CREATE:
- sorter = &kvSortByCreate{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_MOD:
- sorter = &kvSortByMod{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_VALUE:
- sorter = &kvSortByValue{&kvSort{rr.KVs}}
- }
- switch {
- case sortOrder == pb.RangeRequest_ASCEND:
- sort.Sort(sorter)
- case sortOrder == pb.RangeRequest_DESCEND:
- sort.Sort(sort.Reverse(sorter))
- }
- }
-
- if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
- rr.KVs = rr.KVs[:r.Limit]
- resp.More = true
- }
- trace.Step("filter and sort the key-value pairs")
- resp.Header.Revision = rr.Rev
- resp.Count = int64(rr.Count)
- resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
- for i := range rr.KVs {
- if r.KeysOnly {
- rr.KVs[i].Value = nil
- }
- resp.Kvs[i] = &rr.KVs[i]
- }
- trace.Step("assemble the response")
- return resp, nil
-}
-
-func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
- trace := traceutil.Get(ctx)
- if trace.IsEmpty() {
- trace = traceutil.New("transaction", a.s.Logger())
- ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
- }
- isWrite := !isTxnReadonly(rt)
-
- // When the transaction contains write operations, we use ReadTx instead of
- // ConcurrentReadTx to avoid extra overhead of copying buffer.
- var txn mvcc.TxnWrite
- if isWrite && a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer {
- txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.SharedBufReadTxMode, trace))
- } else {
- txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.ConcurrentReadTxMode, trace))
- }
-
- var txnPath []bool
- trace.StepWithFunction(
- func() {
- txnPath = compareToPath(txn, rt)
- },
- "compare",
- )
-
- if isWrite {
- trace.AddField(traceutil.Field{Key: "read_only", Value: false})
- if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil {
- txn.End()
- return nil, nil, err
- }
- }
- if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil {
- txn.End()
- return nil, nil, err
- }
- trace.Step("check requests")
- txnResp, _ := newTxnResp(rt, txnPath)
-
- // When executing mutable txn ops, etcd must hold the txn lock so
- // readers do not see any intermediate results. Since writes are
- // serialized on the raft loop, the revision in the read view will
- // be the revision of the write txn.
- if isWrite {
- txn.End()
- txn = a.s.KV().Write(trace)
- }
- a.applyTxn(ctx, txn, rt, txnPath, txnResp)
- rev := txn.Rev()
- if len(txn.Changes()) != 0 {
- rev++
- }
- txn.End()
-
- txnResp.Header.Revision = rev
- trace.AddField(
- traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)},
- traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision},
- )
- return txnResp, trace, nil
-}
-
-// newTxnResp allocates a txn response for a txn request given a path.
-func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- resps := make([]*pb.ResponseOp, len(reqs))
- txnResp = &pb.TxnResponse{
- Responses: resps,
- Succeeded: txnPath[0],
- Header: &pb.ResponseHeader{},
- }
- for i, req := range reqs {
- switch tv := req.Request.(type) {
- case *pb.RequestOp_RequestRange:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}}
- case *pb.RequestOp_RequestPut:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}}
- case *pb.RequestOp_RequestDeleteRange:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}}
- case *pb.RequestOp_RequestTxn:
- resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:])
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
- txnPath = txnPath[1+txns:]
- txnCount += txns + 1
- default:
- }
- }
- return txnResp, txnCount
-}
-
-func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
- txnPath := make([]bool, 1)
- ops := rt.Success
- if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
- ops = rt.Failure
- }
- for _, op := range ops {
- tv, ok := op.Request.(*pb.RequestOp_RequestTxn)
- if !ok || tv.RequestTxn == nil {
- continue
- }
- txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
- }
- return txnPath
-}
-
-func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
- for _, c := range cmps {
- if !applyCompare(rv, c) {
- return false
- }
- }
- return true
-}
-
-// applyCompare applies the compare request.
-// If the comparison succeeds, it returns true. Otherwise, returns false.
-func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
- // TODO: possible optimizations
- // * chunk reads for large ranges to conserve memory
- // * rewrite rules for common patterns:
- // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
- // * caching
- rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{})
- if err != nil {
- return false
- }
- if len(rr.KVs) == 0 {
- if c.Target == pb.Compare_VALUE {
- // Always fail if comparing a value on a key/keys that doesn't exist;
- // nil == empty string in grpc; no way to represent missing value
- return false
- }
- return compareKV(c, mvccpb.KeyValue{})
- }
- for _, kv := range rr.KVs {
- if !compareKV(c, kv) {
- return false
- }
- }
- return true
-}
-
-func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
- var result int
- rev := int64(0)
- switch c.Target {
- case pb.Compare_VALUE:
- v := []byte{}
- if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
- v = tv.Value
- }
- result = bytes.Compare(ckv.Value, v)
- case pb.Compare_CREATE:
- if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil {
- rev = tv.CreateRevision
- }
- result = compareInt64(ckv.CreateRevision, rev)
- case pb.Compare_MOD:
- if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil {
- rev = tv.ModRevision
- }
- result = compareInt64(ckv.ModRevision, rev)
- case pb.Compare_VERSION:
- if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil {
- rev = tv.Version
- }
- result = compareInt64(ckv.Version, rev)
- case pb.Compare_LEASE:
- if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
- rev = tv.Lease
- }
- result = compareInt64(ckv.Lease, rev)
- }
- switch c.Result {
- case pb.Compare_EQUAL:
- return result == 0
- case pb.Compare_NOT_EQUAL:
- return result != 0
- case pb.Compare_GREATER:
- return result > 0
- case pb.Compare_LESS:
- return result < 0
- }
- return true
-}
-
-func (a *applierV3backend) applyTxn(ctx context.Context, txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) {
- trace := traceutil.Get(ctx)
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
-
- lg := a.s.Logger()
- for i, req := range reqs {
- respi := tresp.Responses[i].Response
- switch tv := req.Request.(type) {
- case *pb.RequestOp_RequestRange:
- trace.StartSubTrace(
- traceutil.Field{Key: "req_type", Value: "range"},
- traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)},
- traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)})
- resp, err := a.Range(ctx, txn, tv.RequestRange)
- if err != nil {
- lg.Panic("unexpected error during txn", zap.Error(err))
- }
- respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
- trace.StopSubTrace()
- case *pb.RequestOp_RequestPut:
- trace.StartSubTrace(
- traceutil.Field{Key: "req_type", Value: "put"},
- traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)},
- traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()})
- resp, _, err := a.Put(ctx, txn, tv.RequestPut)
- if err != nil {
- lg.Panic("unexpected error during txn", zap.Error(err))
- }
- respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
- trace.StopSubTrace()
- case *pb.RequestOp_RequestDeleteRange:
- resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
- if err != nil {
- lg.Panic("unexpected error during txn", zap.Error(err))
- }
- respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
- case *pb.RequestOp_RequestTxn:
- resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn
- applyTxns := a.applyTxn(ctx, txn, tv.RequestTxn, txnPath[1:], resp)
- txns += applyTxns + 1
- txnPath = txnPath[applyTxns+1:]
- default:
- // empty union
- }
- }
- return txns
-}
-
-func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
- resp := &pb.CompactionResponse{}
- resp.Header = &pb.ResponseHeader{}
- trace := traceutil.New("compact",
- a.s.Logger(),
- traceutil.Field{Key: "revision", Value: compaction.Revision},
- )
-
- ch, err := a.s.KV().Compact(trace, compaction.Revision)
- if err != nil {
- return nil, ch, nil, err
- }
- // get the current revision. which key to get is not important.
- rr, _ := a.s.KV().Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{})
- resp.Header.Revision = rr.Rev
- return resp, ch, trace, err
-}
-
-func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
- resp := &pb.LeaseGrantResponse{}
- if err == nil {
- resp.ID = int64(l.ID)
- resp.TTL = l.TTL()
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
- return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
-}
-
-func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) {
- for _, c := range lc.Checkpoints {
- err := a.s.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL)
- if err != nil {
- return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, err
- }
- }
- return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, nil
-}
-
-func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- resp := &pb.AlarmResponse{}
- oldCount := len(a.s.alarmStore.Get(ar.Alarm))
-
- lg := a.s.Logger()
- switch ar.Action {
- case pb.AlarmRequest_GET:
- resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
- case pb.AlarmRequest_ACTIVATE:
- m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
- if m == nil {
- break
- }
- resp.Alarms = append(resp.Alarms, m)
- activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
- if !activated {
- break
- }
-
- lg.Warn("alarm raised", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
- switch m.Alarm {
- case pb.AlarmType_CORRUPT:
- a.s.applyV3 = newApplierV3Corrupt(a)
- case pb.AlarmType_NOSPACE:
- a.s.applyV3 = newApplierV3Capped(a)
- default:
- lg.Warn("unimplemented alarm activation", zap.String("alarm", fmt.Sprintf("%+v", m)))
- }
- case pb.AlarmRequest_DEACTIVATE:
- m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
- if m == nil {
- break
- }
- resp.Alarms = append(resp.Alarms, m)
- deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
- if !deactivated {
- break
- }
-
- switch m.Alarm {
- case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
- // TODO: check kv hash before deactivating CORRUPT?
- lg.Warn("alarm disarmed", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
- a.s.applyV3 = a.s.newApplierV3()
- default:
- lg.Warn("unimplemented alarm deactivation", zap.String("alarm", fmt.Sprintf("%+v", m)))
- }
- default:
- return nil, nil
- }
- return resp, nil
-}
-
-type applierV3Capped struct {
- applierV3
- q backendQuota
-}
-
-// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
-// with Puts so that the number of keys in the store is capped.
-func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
-
-func (a *applierV3Capped) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
- return nil, nil, ErrNoSpace
-}
-
-func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
- if a.q.Cost(r) > 0 {
- return nil, nil, ErrNoSpace
- }
- return a.applierV3.Txn(ctx, r)
-}
-
-func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return nil, ErrNoSpace
-}
-
-func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
- err := a.s.AuthStore().AuthEnable()
- if err != nil {
- return nil, err
- }
- return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
-}
-
-func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
- a.s.AuthStore().AuthDisable()
- return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
-}
-
-func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) {
- enabled := a.s.AuthStore().IsAuthEnabled()
- authRevision := a.s.AuthStore().Revision()
- return &pb.AuthStatusResponse{Header: newHeader(a.s), Enabled: enabled, AuthRevision: authRevision}, nil
-}
-
-func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
- ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
- resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- resp, err := a.s.AuthStore().UserAdd(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- resp, err := a.s.AuthStore().UserDelete(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- resp, err := a.s.AuthStore().UserChangePassword(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- resp, err := a.s.AuthStore().UserGrantRole(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- resp, err := a.s.AuthStore().UserGet(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- resp, err := a.s.AuthStore().UserRevokeRole(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- resp, err := a.s.AuthStore().RoleAdd(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- resp, err := a.s.AuthStore().RoleGrantPermission(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- resp, err := a.s.AuthStore().RoleGet(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- resp, err := a.s.AuthStore().RoleRevokePermission(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- resp, err := a.s.AuthStore().RoleDelete(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- resp, err := a.s.AuthStore().UserList(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- resp, err := a.s.AuthStore().RoleList(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
- a.s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Ver)), api.UpdateCapability, shouldApplyV3)
-}
-
-func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
- a.s.cluster.UpdateAttributes(
- types.ID(r.Member_ID),
- membership.Attributes{
- Name: r.MemberAttributes.Name,
- ClientURLs: r.MemberAttributes.ClientUrls,
- },
- shouldApplyV3,
- )
-}
-
-func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
- d := membership.DowngradeInfo{Enabled: false}
- if r.Enabled {
- d = membership.DowngradeInfo{Enabled: true, TargetVersion: r.Ver}
- }
- a.s.cluster.SetDowngradeInfo(&d, shouldApplyV3)
-}
-
-type quotaApplierV3 struct {
- applierV3
- q Quota
-}
-
-func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
- return "aApplierV3{app, NewBackendQuota(s, "v3-applier")}
-}
-
-func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
- ok := a.q.Available(p)
- resp, trace, err := a.applierV3.Put(ctx, txn, p)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, trace, err
-}
-
-func (a *quotaApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
- ok := a.q.Available(rt)
- resp, trace, err := a.applierV3.Txn(ctx, rt)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, trace, err
-}
-
-func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- ok := a.q.Available(lc)
- resp, err := a.applierV3.LeaseGrant(lc)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, err
-}
-
-type kvSort struct{ kvs []mvccpb.KeyValue }
-
-func (s *kvSort) Swap(i, j int) {
- t := s.kvs[i]
- s.kvs[i] = s.kvs[j]
- s.kvs[j] = t
-}
-func (s *kvSort) Len() int { return len(s.kvs) }
-
-type kvSortByKey struct{ *kvSort }
-
-func (s *kvSortByKey) Less(i, j int) bool {
- return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
-}
-
-type kvSortByVersion struct{ *kvSort }
-
-func (s *kvSortByVersion) Less(i, j int) bool {
- return (s.kvs[i].Version - s.kvs[j].Version) < 0
-}
-
-type kvSortByCreate struct{ *kvSort }
-
-func (s *kvSortByCreate) Less(i, j int) bool {
- return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
-}
-
-type kvSortByMod struct{ *kvSort }
-
-func (s *kvSortByMod) Less(i, j int) bool {
- return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
-}
-
-type kvSortByValue struct{ *kvSort }
-
-func (s *kvSortByValue) Less(i, j int) bool {
- return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
-}
-
-func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) {
- txnCount := 0
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- for _, req := range reqs {
- if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil {
- txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f)
- if err != nil {
- return 0, err
- }
- txnCount += txns + 1
- txnPath = txnPath[txns+1:]
- continue
- }
- if err := f(rv, req); err != nil {
- return 0, err
- }
- }
- return txnCount, nil
-}
-
-func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
- tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut)
- if !ok || tv.RequestPut == nil {
- return nil
- }
- req := tv.RequestPut
- if req.IgnoreValue || req.IgnoreLease {
- // expects previous key-value, error if not exist
- rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{})
- if err != nil {
- return err
- }
- if rr == nil || len(rr.KVs) == 0 {
- return ErrKeyNotFound
- }
- }
- if lease.LeaseID(req.Lease) != lease.NoLease {
- if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
- return lease.ErrLeaseNotFound
- }
- }
- return nil
-}
-
-func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
- tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange)
- if !ok || tv.RequestRange == nil {
- return nil
- }
- req := tv.RequestRange
- switch {
- case req.Revision == 0:
- return nil
- case req.Revision > rv.Rev():
- return mvcc.ErrFutureRev
- case req.Revision < rv.FirstRev():
- return mvcc.ErrCompacted
- }
- return nil
-}
-
-func compareInt64(a, b int64) int {
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
-}
-
-// mkGteRange determines if the range end is a >= range. This works around grpc
-// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
-// If it is a GTE range, then []byte{} is returned to indicate the empty byte
-// string (vs nil being no byte string).
-func mkGteRange(rangeEnd []byte) []byte {
- if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
- return []byte{}
- }
- return rangeEnd
-}
-
-func noSideEffect(r *pb.InternalRaftRequest) bool {
- return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil
-}
-
-func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
- f := func(ops []*pb.RequestOp) []*pb.RequestOp {
- j := 0
- for i := 0; i < len(ops); i++ {
- if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
- continue
- }
- ops[j] = ops[i]
- j++
- }
-
- return ops[:j]
- }
-
- txn.Success = f(txn.Success)
- txn.Failure = f(txn.Failure)
-}
-
-func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
- j := 0
- for i := range rr.KVs {
- rr.KVs[j] = rr.KVs[i]
- if !isPrunable(&rr.KVs[i]) {
- j++
- }
- }
- rr.KVs = rr.KVs[:j]
-}
-
-func newHeader(s *EtcdServer) *pb.ResponseHeader {
- return &pb.ResponseHeader{
- ClusterId: uint64(s.Cluster().ID()),
- MemberId: uint64(s.ID()),
- Revision: s.KV().Rev(),
- RaftTerm: s.Term(),
- }
-}
diff --git a/server/etcdserver/apply/apply.go b/server/etcdserver/apply/apply.go
new file mode 100644
index 00000000000..e45d53e17b1
--- /dev/null
+++ b/server/etcdserver/apply/apply.go
@@ -0,0 +1,493 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "context"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ mvcctxn "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/lease"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+const (
+ v3Version = "v3"
+)
+
+// RaftStatusGetter represents etcd server and Raft progress.
+type RaftStatusGetter interface {
+ MemberID() types.ID
+ Leader() types.ID
+ CommittedIndex() uint64
+ AppliedIndex() uint64
+ Term() uint64
+}
+
+type Result struct {
+ Resp proto.Message
+ Err error
+ // Physc signals the physical effect of the request has completed in addition
+ // to being logically reflected by the node. Currently, only used for
+ // Compaction requests.
+ Physc <-chan struct{}
+ Trace *traceutil.Trace
+}
+
+type applyFunc func(r *pb.InternalRaftRequest) *Result
+
+// applierV3 is the interface for processing V3 raft messages
+type applierV3 interface {
+ // Apply executes the generic portion of application logic for the current applier, but
+ // delegates the actual execution to the applyFunc method.
+ Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result
+
+ Put(p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error)
+ Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error)
+ DeleteRange(dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error)
+ Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error)
+ Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error)
+
+ LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+ LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+
+ LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error)
+
+ Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
+
+ Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
+
+ AuthEnable() (*pb.AuthEnableResponse, error)
+ AuthDisable() (*pb.AuthDisableResponse, error)
+ AuthStatus() (*pb.AuthStatusResponse, error)
+
+ UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+ RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+type SnapshotServer interface {
+ ForceSnapshot()
+}
+
+type applierV3backend struct {
+ lg *zap.Logger
+ kv mvcc.KV
+ alarmStore *v3alarm.AlarmStore
+ authStore auth.AuthStore
+ lessor lease.Lessor
+ cluster *membership.RaftCluster
+ raftStatus RaftStatusGetter
+ snapshotServer SnapshotServer
+ consistentIndex cindex.ConsistentIndexer
+
+ txnModeWriteWithSharedBuffer bool
+}
+
+func newApplierV3Backend(
+ lg *zap.Logger,
+ kv mvcc.KV,
+ alarmStore *v3alarm.AlarmStore,
+ authStore auth.AuthStore,
+ lessor lease.Lessor,
+ cluster *membership.RaftCluster,
+ raftStatus RaftStatusGetter,
+ snapshotServer SnapshotServer,
+ consistentIndex cindex.ConsistentIndexer,
+ txnModeWriteWithSharedBuffer bool,
+) applierV3 {
+ return &applierV3backend{
+ lg: lg,
+ kv: kv,
+ alarmStore: alarmStore,
+ authStore: authStore,
+ lessor: lessor,
+ cluster: cluster,
+ raftStatus: raftStatus,
+ snapshotServer: snapshotServer,
+ consistentIndex: consistentIndex,
+ txnModeWriteWithSharedBuffer: txnModeWriteWithSharedBuffer,
+ }
+}
+
+func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result {
+ return applyFunc(r)
+}
+
+func (a *applierV3backend) Put(p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
+ return mvcctxn.Put(context.TODO(), a.lg, a.lessor, a.kv, p)
+}
+
+func (a *applierV3backend) DeleteRange(dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) {
+ return mvcctxn.DeleteRange(context.TODO(), a.lg, a.kv, dr)
+}
+
+func (a *applierV3backend) Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) {
+ return mvcctxn.Range(context.TODO(), a.lg, a.kv, r)
+}
+
+func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ return mvcctxn.Txn(context.TODO(), a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor)
+}
+
+func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ resp := &pb.CompactionResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ trace := traceutil.New("compact",
+ a.lg,
+ traceutil.Field{Key: "revision", Value: compaction.Revision},
+ )
+
+ ch, err := a.kv.Compact(trace, compaction.Revision)
+ if err != nil {
+ return nil, ch, nil, err
+ }
+ // get the current revision. which key to get is not important.
+ rr, _ := a.kv.Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{})
+ resp.Header.Revision = rr.Rev
+ return resp, ch, trace, err
+}
+
+func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ l, err := a.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
+ resp := &pb.LeaseGrantResponse{}
+ if err == nil {
+ resp.ID = int64(l.ID)
+ resp.TTL = l.TTL()
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ err := a.lessor.Revoke(lease.LeaseID(lc.ID))
+ return &pb.LeaseRevokeResponse{Header: a.newHeader()}, err
+}
+
+func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) {
+ for _, c := range lc.Checkpoints {
+ err := a.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL)
+ if err != nil {
+ return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, err
+ }
+ }
+ return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, nil
+}
+
+func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp := &pb.AlarmResponse{}
+
+ switch ar.Action {
+ case pb.AlarmRequest_GET:
+ resp.Alarms = a.alarmStore.Get(ar.Alarm)
+ case pb.AlarmRequest_ACTIVATE:
+ if ar.Alarm == pb.AlarmType_NONE {
+ break
+ }
+ m := a.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Inc()
+ case pb.AlarmRequest_DEACTIVATE:
+ m := a.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Dec()
+ default:
+ return nil, nil
+ }
+ return resp, nil
+}
+
+type applierV3Capped struct {
+ applierV3
+ q serverstorage.BackendQuota
+}
+
+// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
+// with Puts so that the number of keys in the store is capped.
+func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
+
+func (a *applierV3Capped) Put(_ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrNoSpace
+}
+
+func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ if a.q.Cost(r) > 0 {
+ return nil, nil, errors.ErrNoSpace
+ }
+ return a.applierV3.Txn(r)
+}
+
+func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, errors.ErrNoSpace
+}
+
+func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
+ err := a.authStore.AuthEnable()
+ if err != nil {
+ return nil, err
+ }
+ return &pb.AuthEnableResponse{Header: a.newHeader()}, nil
+}
+
+func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
+ a.authStore.AuthDisable()
+ return &pb.AuthDisableResponse{Header: a.newHeader()}, nil
+}
+
+func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) {
+ enabled := a.authStore.IsAuthEnabled()
+ authRevision := a.authStore.Revision()
+ return &pb.AuthStatusResponse{Header: a.newHeader(), Enabled: enabled, AuthRevision: authRevision}, nil
+}
+
+func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ ctx := context.WithValue(context.WithValue(context.Background(), auth.AuthenticateParamIndex{}, a.consistentIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
+ resp, err := a.authStore.Authenticate(ctx, r.Name, r.Password)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := a.authStore.UserAdd(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := a.authStore.UserDelete(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := a.authStore.UserChangePassword(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := a.authStore.UserGrantRole(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := a.authStore.UserGet(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := a.authStore.UserRevokeRole(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := a.authStore.RoleAdd(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := a.authStore.RoleGrantPermission(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := a.authStore.RoleGet(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := a.authStore.RoleRevokePermission(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := a.authStore.RoleDelete(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := a.authStore.UserList(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := a.authStore.RoleList(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+type applierMembership struct {
+ lg *zap.Logger
+ cluster *membership.RaftCluster
+ snapshotServer SnapshotServer
+}
+
+func NewApplierMembership(lg *zap.Logger, cluster *membership.RaftCluster, snapshotServer SnapshotServer) *applierMembership {
+ return &applierMembership{
+ lg: lg,
+ cluster: cluster,
+ snapshotServer: snapshotServer,
+ }
+}
+
+func (a *applierMembership) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ prevVersion := a.cluster.Version()
+ newVersion := semver.Must(semver.NewVersion(r.Ver))
+ a.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3)
+ // Force snapshot after cluster version downgrade.
+ if prevVersion != nil && newVersion.LessThan(*prevVersion) {
+ lg := a.lg
+ if lg != nil {
+ lg.Info("Cluster version downgrade detected, forcing snapshot",
+ zap.String("prev-cluster-version", prevVersion.String()),
+ zap.String("new-cluster-version", newVersion.String()),
+ )
+ }
+ a.snapshotServer.ForceSnapshot()
+ }
+}
+
+func (a *applierMembership) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ a.cluster.UpdateAttributes(
+ types.ID(r.Member_ID),
+ membership.Attributes{
+ Name: r.MemberAttributes.Name,
+ ClientURLs: r.MemberAttributes.ClientUrls,
+ },
+ shouldApplyV3,
+ )
+}
+
+func (a *applierMembership) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ d := version.DowngradeInfo{Enabled: false}
+ if r.Enabled {
+ d = version.DowngradeInfo{Enabled: true, TargetVersion: r.Ver}
+ }
+ a.cluster.SetDowngradeInfo(&d, shouldApplyV3)
+}
+
+type quotaApplierV3 struct {
+ applierV3
+ q serverstorage.Quota
+}
+
+func newQuotaApplierV3(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, app applierV3) applierV3 {
+ return "aApplierV3{app, serverstorage.NewBackendQuota(lg, quotaBackendBytesCfg, be, "v3-applier")}
+}
+
+func (a *quotaApplierV3) Put(p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ ok := a.q.Available(p)
+ resp, trace, err := a.applierV3.Put(p)
+ if err == nil && !ok {
+ err = errors.ErrNoSpace
+ }
+ return resp, trace, err
+}
+
+func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ ok := a.q.Available(rt)
+ resp, trace, err := a.applierV3.Txn(rt)
+ if err == nil && !ok {
+ err = errors.ErrNoSpace
+ }
+ return resp, trace, err
+}
+
+func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ ok := a.q.Available(lc)
+ resp, err := a.applierV3.LeaseGrant(lc)
+ if err == nil && !ok {
+ err = errors.ErrNoSpace
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) newHeader() *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(a.cluster.ID()),
+ MemberId: uint64(a.raftStatus.MemberID()),
+ Revision: a.kv.Rev(),
+ RaftTerm: a.raftStatus.Term(),
+ }
+}
diff --git a/server/etcdserver/apply/apply_auth.go b/server/etcdserver/apply/apply_auth.go
new file mode 100644
index 00000000000..3922deebd01
--- /dev/null
+++ b/server/etcdserver/apply/apply_auth.go
@@ -0,0 +1,201 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "sync"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/lease"
+)
+
+type authApplierV3 struct {
+ applierV3
+ as auth.AuthStore
+ lessor lease.Lessor
+
+ // mu serializes Apply so that user isn't corrupted and so that
+ // serialized requests don't leak data from TOCTOU errors
+ mu sync.Mutex
+
+ authInfo auth.AuthInfo
+}
+
+func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
+ return &authApplierV3{applierV3: base, as: as, lessor: lessor}
+}
+
+func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result {
+ aa.mu.Lock()
+ defer aa.mu.Unlock()
+ if r.Header != nil {
+ // backward-compatible with pre-3.0 releases when internalRaftRequest
+ // does not have header field
+ aa.authInfo.Username = r.Header.Username
+ aa.authInfo.Revision = r.Header.AuthRevision
+ }
+ if needAdminPermission(r) {
+ if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &Result{Err: err}
+ }
+ }
+ ret := aa.applierV3.Apply(r, applyFunc)
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return ret
+}
+
+func (aa *authApplierV3) Put(r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
+ return nil, nil, err
+ }
+
+ if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
+ // The specified lease is already attached with a key that cannot
+ // be written by this user. It means the user cannot revoke the
+ // lease so attaching the lease to the newly written key should
+ // be forbidden.
+ return nil, nil, err
+ }
+
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return aa.applierV3.Put(r)
+}
+
+func (aa *authApplierV3) Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+ return nil, nil, err
+ }
+ return aa.applierV3.Range(r)
+}
+
+func (aa *authApplierV3) DeleteRange(r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+ return nil, nil, err
+ }
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return aa.applierV3.DeleteRange(r)
+}
+
+func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ if err := txn.CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
+ return nil, nil, err
+ }
+ return aa.applierV3.Txn(rt)
+}
+
+func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil {
+ return nil, err
+ }
+ return aa.applierV3.LeaseRevoke(lc)
+}
+
+func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
+ l := aa.lessor.Lookup(leaseID)
+ if l != nil {
+ return aa.checkLeasePutsKeys(l)
+ }
+
+ return nil
+}
+
+func (aa *authApplierV3) checkLeasePutsKeys(l *lease.Lease) error {
+ // early return for most-common scenario of either disabled auth or admin user.
+ // IsAdminPermitted also checks whether auth is enabled
+ if err := aa.as.IsAdminPermitted(&aa.authInfo); err == nil {
+ return nil
+ }
+
+ for _, key := range l.Keys() {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && r.Name != aa.authInfo.Username {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthUserGetResponse{}, err
+ }
+
+ return aa.applierV3.UserGet(r)
+}
+
+func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthRoleGetResponse{}, err
+ }
+
+ return aa.applierV3.RoleGet(r)
+}
+
+func needAdminPermission(r *pb.InternalRaftRequest) bool {
+ switch {
+ case r.AuthEnable != nil:
+ return true
+ case r.AuthDisable != nil:
+ return true
+ case r.AuthStatus != nil:
+ return true
+ case r.AuthUserAdd != nil:
+ return true
+ case r.AuthUserDelete != nil:
+ return true
+ case r.AuthUserChangePassword != nil:
+ return true
+ case r.AuthUserGrantRole != nil:
+ return true
+ case r.AuthUserRevokeRole != nil:
+ return true
+ case r.AuthRoleAdd != nil:
+ return true
+ case r.AuthRoleGrantPermission != nil:
+ return true
+ case r.AuthRoleRevokePermission != nil:
+ return true
+ case r.AuthRoleDelete != nil:
+ return true
+ case r.AuthUserList != nil:
+ return true
+ case r.AuthRoleList != nil:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/server/etcdserver/apply/apply_auth_test.go b/server/etcdserver/apply/apply_auth_test.go
new file mode 100644
index 00000000000..cbd1893266c
--- /dev/null
+++ b/server/etcdserver/apply/apply_auth_test.go
@@ -0,0 +1,827 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+ "golang.org/x/crypto/bcrypt"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func dummyIndexWaiter(_ uint64) <-chan struct{} {
+ ch := make(chan struct{}, 1)
+ ch <- struct{}{}
+ return ch
+}
+
+func dummyApplyFunc(_ *pb.InternalRaftRequest) *Result {
+ return &Result{}
+}
+
+type fakeRaftStatusGetter struct{}
+
+func (*fakeRaftStatusGetter) MemberID() types.ID {
+ return 0
+}
+
+func (*fakeRaftStatusGetter) Leader() types.ID {
+ return 0
+}
+
+func (*fakeRaftStatusGetter) CommittedIndex() uint64 {
+ return 0
+}
+
+func (*fakeRaftStatusGetter) AppliedIndex() uint64 {
+ return 0
+}
+
+func (*fakeRaftStatusGetter) Term() uint64 {
+ return 0
+}
+
+type fakeSnapshotServer struct{}
+
+func (*fakeSnapshotServer) ForceSnapshot() {}
+
+func defaultAuthApplierV3(t *testing.T) *authApplierV3 {
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ t.Cleanup(func() {
+ betesting.Close(t, be)
+ })
+
+ cluster := membership.NewCluster(lg)
+ lessor := lease.NewLessor(lg, be, cluster, lease.LessorConfig{})
+ kv := mvcc.NewStore(lg, be, lessor, mvcc.StoreConfig{})
+ alarmStore, err := v3alarm.NewAlarmStore(lg, schema.NewAlarmBackend(lg, be))
+ require.NoError(t, err)
+
+ tp, err := auth.NewTokenProvider(lg, "simple", dummyIndexWaiter, 300*time.Second)
+ require.NoError(t, err)
+ authStore := auth.NewAuthStore(
+ lg,
+ schema.NewAuthBackend(lg, be),
+ tp,
+ bcrypt.DefaultCost,
+ )
+ consistentIndex := cindex.NewConsistentIndex(be)
+ return newAuthApplierV3(
+ authStore,
+ newApplierV3Backend(
+ lg,
+ kv,
+ alarmStore,
+ authStore,
+ lessor,
+ cluster,
+ &fakeRaftStatusGetter{},
+ &fakeSnapshotServer{},
+ consistentIndex,
+ false,
+ ),
+ lessor)
+}
+
+const (
+ userRoot = "root"
+ roleRoot = "root"
+ userReadOnly = "user_read_only"
+ roleReadOnly = "role_read_only"
+ userWriteOnly = "user_write_only"
+ roleWriteOnly = "role_write_only"
+
+ key = "key"
+ rangeEnd = "rangeEnd"
+ keyOutsideRange = "rangeEnd_outside"
+
+ leaseID = 1
+)
+
+func mustCreateRolesAndEnableAuth(t *testing.T, authApplier *authApplierV3) {
+ _, err := authApplier.UserAdd(&pb.AuthUserAddRequest{Name: userRoot, Options: &authpb.UserAddOptions{NoPassword: true}})
+ require.NoError(t, err)
+ _, err = authApplier.RoleAdd(&pb.AuthRoleAddRequest{Name: roleRoot})
+ require.NoError(t, err)
+ _, err = authApplier.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userRoot, Role: roleRoot})
+ require.NoError(t, err)
+
+ _, err = authApplier.UserAdd(&pb.AuthUserAddRequest{Name: userReadOnly, Options: &authpb.UserAddOptions{NoPassword: true}})
+ require.NoError(t, err)
+ _, err = authApplier.RoleAdd(&pb.AuthRoleAddRequest{Name: roleReadOnly})
+ require.NoError(t, err)
+ _, err = authApplier.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userReadOnly, Role: roleReadOnly})
+ require.NoError(t, err)
+ _, err = authApplier.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{Name: roleReadOnly, Perm: &authpb.Permission{
+ PermType: authpb.READ,
+ Key: []byte(key),
+ RangeEnd: []byte(rangeEnd),
+ }})
+ require.NoError(t, err)
+
+ _, err = authApplier.UserAdd(&pb.AuthUserAddRequest{Name: userWriteOnly, Options: &authpb.UserAddOptions{NoPassword: true}})
+ require.NoError(t, err)
+ _, err = authApplier.RoleAdd(&pb.AuthRoleAddRequest{Name: roleWriteOnly})
+ require.NoError(t, err)
+ _, err = authApplier.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userWriteOnly, Role: roleWriteOnly})
+ require.NoError(t, err)
+ _, err = authApplier.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{Name: roleWriteOnly, Perm: &authpb.Permission{
+ PermType: authpb.WRITE,
+ Key: []byte(key),
+ RangeEnd: []byte(rangeEnd),
+ }})
+ require.NoError(t, err)
+
+ _, err = authApplier.AuthEnable()
+ require.NoError(t, err)
+}
+
+// setAuthInfo manually sets the authInfo of the applier. In reality, authInfo is filled before Apply()
+func setAuthInfo(authApplier *authApplierV3, userName string) {
+ authApplier.authInfo = auth.AuthInfo{
+ Username: userName,
+ Revision: authApplier.as.Revision(),
+ }
+}
+
+// TestAuthApplierV3_Apply ensures Apply() calls applyFunc() when permission is granted
+// and returns an error when permission is denied
+func TestAuthApplierV3_Apply(t *testing.T) {
+ tcs := []struct {
+ name string
+ request *pb.InternalRaftRequest
+ expectResult *Result
+ }{
+ {
+ name: "request does not need admin permission",
+ request: &pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{},
+ },
+ expectResult: &Result{},
+ },
+ {
+ name: "request needs admin permission but permission denied",
+ request: &pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{
+ Username: userReadOnly,
+ },
+ AuthEnable: &pb.AuthEnableRequest{},
+ },
+ expectResult: &Result{
+ Err: auth.ErrPermissionDenied,
+ },
+ },
+ {
+ name: "request needs admin permission and permitted",
+ request: &pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{
+ Username: userRoot,
+ },
+ AuthEnable: &pb.AuthEnableRequest{},
+ },
+ expectResult: &Result{},
+ },
+ }
+
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ result := authApplier.Apply(tc.request, dummyApplyFunc)
+ require.Equalf(t, result, tc.expectResult, "Apply: got %v, expect: %v", result, tc.expectResult)
+ })
+ }
+}
+
+// TestAuthApplierV3_AdminPermission ensures the admin permission is checked for certain
+// operations
+func TestAuthApplierV3_AdminPermission(t *testing.T) {
+ tcs := []struct {
+ name string
+ request *pb.InternalRaftRequest
+ adminPermissionNeeded bool
+ }{
+ {
+ name: "Range does not need admin permission",
+ request: &pb.InternalRaftRequest{Range: &pb.RangeRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "Put does not need admin permission",
+ request: &pb.InternalRaftRequest{Put: &pb.PutRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "DeleteRange does not need admin permission",
+ request: &pb.InternalRaftRequest{DeleteRange: &pb.DeleteRangeRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "Txn does not need admin permission",
+ request: &pb.InternalRaftRequest{Txn: &pb.TxnRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "Compaction does not need admin permission",
+ request: &pb.InternalRaftRequest{Compaction: &pb.CompactionRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "LeaseGrant does not need admin permission",
+ request: &pb.InternalRaftRequest{LeaseGrant: &pb.LeaseGrantRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "LeaseRevoke does not need admin permission",
+ request: &pb.InternalRaftRequest{LeaseRevoke: &pb.LeaseRevokeRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "Alarm does not need admin permission",
+ request: &pb.InternalRaftRequest{Alarm: &pb.AlarmRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "LeaseCheckpoint does not need admin permission",
+ request: &pb.InternalRaftRequest{LeaseCheckpoint: &pb.LeaseCheckpointRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "Authenticate does not need admin permission",
+ request: &pb.InternalRaftRequest{Authenticate: &pb.InternalAuthenticateRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "ClusterVersionSet does not need admin permission",
+ request: &pb.InternalRaftRequest{ClusterVersionSet: &membershippb.ClusterVersionSetRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "ClusterMemberAttrSet does not need admin permission",
+ request: &pb.InternalRaftRequest{ClusterMemberAttrSet: &membershippb.ClusterMemberAttrSetRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "DowngradeInfoSet does not need admin permission",
+ request: &pb.InternalRaftRequest{DowngradeInfoSet: &membershippb.DowngradeInfoSetRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "AuthUserGet does not need admin permission",
+ request: &pb.InternalRaftRequest{AuthUserGet: &pb.AuthUserGetRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "AuthRoleGet does not need admin permission",
+ request: &pb.InternalRaftRequest{AuthRoleGet: &pb.AuthRoleGetRequest{}},
+ adminPermissionNeeded: false,
+ },
+ {
+ name: "AuthEnable needs admin permission",
+ request: &pb.InternalRaftRequest{AuthEnable: &pb.AuthEnableRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthDisable needs admin permission",
+ request: &pb.InternalRaftRequest{AuthDisable: &pb.AuthDisableRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthStatus needs admin permission",
+ request: &pb.InternalRaftRequest{AuthStatus: &pb.AuthStatusRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthUserAdd needs admin permission",
+ request: &pb.InternalRaftRequest{AuthUserAdd: &pb.AuthUserAddRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthUserDelete needs admin permission",
+ request: &pb.InternalRaftRequest{AuthUserDelete: &pb.AuthUserDeleteRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthUserChangePassword needs admin permission",
+ request: &pb.InternalRaftRequest{AuthUserChangePassword: &pb.AuthUserChangePasswordRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthUserGrantRole needs admin permission",
+ request: &pb.InternalRaftRequest{AuthUserGrantRole: &pb.AuthUserGrantRoleRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthUserRevokeRole needs admin permission",
+ request: &pb.InternalRaftRequest{AuthUserRevokeRole: &pb.AuthUserRevokeRoleRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthUserList needs admin permission",
+ request: &pb.InternalRaftRequest{AuthUserList: &pb.AuthUserListRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthRoleList needs admin permission",
+ request: &pb.InternalRaftRequest{AuthRoleList: &pb.AuthRoleListRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthRoleAdd needs admin permission",
+ request: &pb.InternalRaftRequest{AuthRoleAdd: &pb.AuthRoleAddRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthRoleDelete needs admin permission",
+ request: &pb.InternalRaftRequest{AuthRoleDelete: &pb.AuthRoleDeleteRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthRoleGrantPermission needs admin permission",
+ request: &pb.InternalRaftRequest{AuthRoleGrantPermission: &pb.AuthRoleGrantPermissionRequest{}},
+ adminPermissionNeeded: true,
+ },
+ {
+ name: "AuthRoleRevokePermission needs admin permission",
+ request: &pb.InternalRaftRequest{AuthRoleRevokePermission: &pb.AuthRoleRevokePermissionRequest{}},
+ adminPermissionNeeded: true,
+ },
+ }
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.adminPermissionNeeded {
+ tc.request.Header = &pb.RequestHeader{Username: userReadOnly}
+ }
+ result := authApplier.Apply(tc.request, dummyApplyFunc)
+ require.Equalf(t, errors.Is(result.Err, auth.ErrPermissionDenied), tc.adminPermissionNeeded, "Admin permission needed")
+ })
+ }
+}
+
+// TestAuthApplierV3_Put verifies only users with write permissions in the key range can put
+func TestAuthApplierV3_Put(t *testing.T) {
+ tcs := []struct {
+ name string
+ userName string
+ request *pb.PutRequest
+ expectError error
+ }{
+ {
+ name: "put permission denied",
+ userName: userReadOnly,
+ request: &pb.PutRequest{},
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "prevKv is set, but user does not have read permission",
+ userName: userWriteOnly,
+ request: &pb.PutRequest{
+ Key: []byte(key),
+ Value: []byte("1"),
+ PrevKv: true,
+ },
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "put success",
+ userName: userWriteOnly,
+ request: &pb.PutRequest{
+ Key: []byte(key),
+ Value: []byte("1"),
+ },
+ expectError: nil,
+ },
+ {
+ name: "put success with PrevKv set",
+ userName: userRoot,
+ request: &pb.PutRequest{
+ Key: []byte(key),
+ Value: []byte("1"),
+ PrevKv: true,
+ },
+ expectError: nil,
+ },
+ }
+
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ setAuthInfo(authApplier, tc.userName)
+ _, _, err := authApplier.Put(tc.request)
+ require.Equalf(t, tc.expectError, err, "Put returned unexpected error (or lack thereof), expected: %v, got: %v", tc.expectError, err)
+ })
+ }
+}
+
+// TestAuthApplierV3_LeasePut verifies users cannot put with lease if the lease is attached with a key out of range
+func TestAuthApplierV3_LeasePut(t *testing.T) {
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+
+ _, err := authApplier.LeaseGrant(&pb.LeaseGrantRequest{
+ TTL: lease.MaxLeaseTTL,
+ ID: leaseID,
+ })
+ require.NoError(t, err)
+
+ // The user should be able to put the key
+ setAuthInfo(authApplier, userWriteOnly)
+ _, _, err = authApplier.Put(&pb.PutRequest{
+ Key: []byte(key),
+ Value: []byte("1"),
+ Lease: leaseID,
+ })
+ require.NoError(t, err)
+
+ // Put a key under the lease outside user's key range
+ setAuthInfo(authApplier, userRoot)
+ _, _, err = authApplier.Put(&pb.PutRequest{
+ Key: []byte(keyOutsideRange),
+ Value: []byte("1"),
+ Lease: leaseID,
+ })
+ require.NoError(t, err)
+
+ // The user should not be able to put the key anymore
+ setAuthInfo(authApplier, userWriteOnly)
+ _, _, err = authApplier.Put(&pb.PutRequest{
+ Key: []byte(key),
+ Value: []byte("1"),
+ Lease: leaseID,
+ })
+ require.Equal(t, err, auth.ErrPermissionDenied)
+}
+
+// TestAuthApplierV3_Range verifies only users with read permissions can do range in the key range
+func TestAuthApplierV3_Range(t *testing.T) {
+ tcs := []struct {
+ name string
+ userName string
+ request *pb.RangeRequest
+ expectError error
+ }{
+ {
+ name: "range permission denied",
+ userName: userWriteOnly,
+ request: &pb.RangeRequest{},
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "range key out of range",
+ userName: userReadOnly,
+ request: &pb.RangeRequest{
+ Key: []byte(keyOutsideRange),
+ },
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "range success",
+ userName: userReadOnly,
+ request: &pb.RangeRequest{
+ Key: []byte(key),
+ RangeEnd: []byte(rangeEnd),
+ },
+ expectError: nil,
+ },
+ }
+
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ setAuthInfo(authApplier, tc.userName)
+ _, _, err := authApplier.Range(tc.request)
+ require.Equalf(t, tc.expectError, err, "Range returned unexpected error (or lack thereof), expected: %v, got: %v", tc.expectError, err)
+ })
+ }
+}
+
+// TestAuthApplierV3_DeleteRange verifies only users with write permissions can do delete range in the key range
+func TestAuthApplierV3_DeleteRange(t *testing.T) {
+ tcs := []struct {
+ name string
+ userName string
+ request *pb.DeleteRangeRequest
+ expectError error
+ }{
+ {
+ name: "delete range permission denied",
+ userName: userReadOnly,
+ request: &pb.DeleteRangeRequest{},
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "delete range key out of range",
+ userName: userWriteOnly,
+ request: &pb.DeleteRangeRequest{
+ Key: []byte(keyOutsideRange),
+ },
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "prevKv is set, but user does not have read permission",
+ userName: userWriteOnly,
+ request: &pb.DeleteRangeRequest{
+ Key: []byte(key),
+ RangeEnd: []byte(rangeEnd),
+ PrevKv: true,
+ },
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "delete range success",
+ userName: userWriteOnly,
+ request: &pb.DeleteRangeRequest{
+ Key: []byte(key),
+ RangeEnd: []byte(rangeEnd),
+ },
+ expectError: nil,
+ },
+ {
+ name: "delete range success with PrevKv",
+ userName: userRoot,
+ request: &pb.DeleteRangeRequest{
+ Key: []byte(key),
+ RangeEnd: []byte(rangeEnd),
+ PrevKv: true,
+ },
+ expectError: nil,
+ },
+ }
+
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ setAuthInfo(authApplier, tc.userName)
+ _, _, err := authApplier.DeleteRange(tc.request)
+ require.Equalf(t, tc.expectError, err, "Range returned unexpected error (or lack thereof), expected: %v, got: %v", tc.expectError, err)
+ })
+ }
+}
+
+// TestAuthApplierV3_Txn verifies txns can only be applied with proper permissions
+func TestAuthApplierV3_Txn(t *testing.T) {
+ tcs := []struct {
+ name string
+ userName string
+ request *pb.TxnRequest
+ expectError error
+ }{
+ {
+ name: "txn range permission denied",
+ userName: userWriteOnly,
+ request: &pb.TxnRequest{
+ Compare: []*pb.Compare{
+ {
+ Key: []byte(key),
+ },
+ },
+ },
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "txn put permission denied",
+ userName: userReadOnly,
+ request: &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Key: []byte(key),
+ },
+ },
+ },
+ },
+ },
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "txn success",
+ userName: userRoot,
+ request: &pb.TxnRequest{
+ Compare: []*pb.Compare{
+ {
+ Key: []byte(key),
+ },
+ },
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Key: []byte(key),
+ },
+ },
+ },
+ },
+ },
+ expectError: nil,
+ },
+ }
+
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ setAuthInfo(authApplier, tc.userName)
+ _, _, err := authApplier.Txn(tc.request)
+ require.Equalf(t, tc.expectError, err, "Range returned unexpected error (or lack thereof), expected: %v, got: %v", tc.expectError, err)
+ })
+ }
+}
+
+// TestAuthApplierV3_LeaseRevoke verifies user cannot revoke a lease if the lease is attached with
+// a key out of range by someone else
+func TestAuthApplierV3_LeaseRevoke(t *testing.T) {
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+
+ _, err := authApplier.LeaseGrant(&pb.LeaseGrantRequest{
+ TTL: lease.MaxLeaseTTL,
+ ID: leaseID,
+ })
+ require.NoError(t, err)
+
+ // The user should be able to revoke the lease
+ setAuthInfo(authApplier, userWriteOnly)
+ _, err = authApplier.LeaseRevoke(&pb.LeaseRevokeRequest{
+ ID: leaseID,
+ })
+ require.NoError(t, err)
+
+ _, err = authApplier.LeaseGrant(&pb.LeaseGrantRequest{
+ TTL: lease.MaxLeaseTTL,
+ ID: leaseID,
+ })
+ require.NoError(t, err)
+
+ // Put a key under the lease outside user's key range
+ setAuthInfo(authApplier, userRoot)
+ _, _, err = authApplier.Put(&pb.PutRequest{
+ Key: []byte(keyOutsideRange),
+ Value: []byte("1"),
+ Lease: leaseID,
+ })
+ require.NoError(t, err)
+
+ // The user should not be able to revoke the lease anymore
+ setAuthInfo(authApplier, userWriteOnly)
+ _, err = authApplier.LeaseRevoke(&pb.LeaseRevokeRequest{
+ ID: leaseID,
+ })
+ require.Equal(t, err, auth.ErrPermissionDenied)
+}
+
+// TestAuthApplierV3_UserGet verifies UserGet can only be performed by the user itself or the root
+func TestAuthApplierV3_UserGet(t *testing.T) {
+ tcs := []struct {
+ name string
+ userName string
+ request *pb.AuthUserGetRequest
+ expectError error
+ }{
+ {
+ name: "UserGet permission denied with non-root role and requests other user",
+ userName: userWriteOnly,
+ request: &pb.AuthUserGetRequest{Name: userReadOnly},
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "UserGet success with non-root role but requests itself",
+ userName: userWriteOnly,
+ request: &pb.AuthUserGetRequest{Name: userWriteOnly},
+ expectError: nil,
+ },
+ {
+ name: "UserGet success with root role",
+ userName: userRoot,
+ request: &pb.AuthUserGetRequest{Name: userWriteOnly},
+ expectError: nil,
+ },
+ }
+
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ setAuthInfo(authApplier, tc.userName)
+ _, err := authApplier.UserGet(tc.request)
+ require.Equalf(t, tc.expectError, err, "Range returned unexpected error (or lack thereof), expected: %v, got: %v", tc.expectError, err)
+ })
+ }
+}
+
+// TestAuthApplierV3_RoleGet verifies RoleGet can only be performed by the user in the role itself or the root
+func TestAuthApplierV3_RoleGet(t *testing.T) {
+ tcs := []struct {
+ name string
+ userName string
+ request *pb.AuthRoleGetRequest
+ expectError error
+ }{
+ {
+ name: "RoleGet permission denied with non-root role and requests other role",
+ userName: userWriteOnly,
+ request: &pb.AuthRoleGetRequest{Role: roleReadOnly},
+ expectError: auth.ErrPermissionDenied,
+ },
+ {
+ name: "RoleGet success with non-root role but requests itself",
+ userName: userWriteOnly,
+ request: &pb.AuthRoleGetRequest{Role: roleWriteOnly},
+ expectError: nil,
+ },
+ {
+ name: "RoleGet success with root role",
+ userName: userRoot,
+ request: &pb.AuthRoleGetRequest{Role: roleWriteOnly},
+ expectError: nil,
+ },
+ }
+
+ authApplier := defaultAuthApplierV3(t)
+ mustCreateRolesAndEnableAuth(t, authApplier)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ setAuthInfo(authApplier, tc.userName)
+ _, err := authApplier.RoleGet(tc.request)
+ require.Equalf(t, tc.expectError, err, "Range returned unexpected error (or lack thereof), expected: %v, got: %v", tc.expectError, err)
+ })
+ }
+}
+
+func TestCheckLeasePutsKeys(t *testing.T) {
+ aa := defaultAuthApplierV3(t)
+ require.NoErrorf(t, aa.checkLeasePutsKeys(lease.NewLease(lease.LeaseID(1), 3600)), "auth is disabled, should allow puts")
+ mustCreateRolesAndEnableAuth(t, aa)
+ aa.authInfo = auth.AuthInfo{Username: "root"}
+ require.NoErrorf(t, aa.checkLeasePutsKeys(lease.NewLease(lease.LeaseID(1), 3600)), "auth is enabled, should allow puts for root")
+
+ l := lease.NewLease(lease.LeaseID(1), 3600)
+ l.SetLeaseItem(lease.LeaseItem{Key: "a"})
+ aa.authInfo = auth.AuthInfo{Username: "bob", Revision: 0}
+ require.ErrorIsf(t, aa.checkLeasePutsKeys(l), auth.ErrUserEmpty, "auth is enabled, should not allow bob, non existing at rev 0")
+ aa.authInfo = auth.AuthInfo{Username: "bob", Revision: 1}
+ require.ErrorIsf(t, aa.checkLeasePutsKeys(l), auth.ErrAuthOldRevision, "auth is enabled, old revision")
+
+ aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
+ require.ErrorIsf(t, aa.checkLeasePutsKeys(l), auth.ErrPermissionDenied, "auth is enabled, bob does not have permissions, bob does not exist")
+ _, err := aa.as.UserAdd(&pb.AuthUserAddRequest{Name: "bob", Options: &authpb.UserAddOptions{NoPassword: true}})
+ require.NoErrorf(t, err, "bob should be added without error")
+ aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
+ require.ErrorIsf(t, aa.checkLeasePutsKeys(l), auth.ErrPermissionDenied, "auth is enabled, bob exists yet does not have permissions")
+
+ // allow bob to access "a"
+ _, err = aa.as.RoleAdd(&pb.AuthRoleAddRequest{Name: "bobsrole"})
+ require.NoErrorf(t, err, "bobsrole should be added without error")
+ _, err = aa.as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
+ Name: "bobsrole",
+ Perm: &authpb.Permission{
+ PermType: authpb.READWRITE,
+ Key: []byte("a"),
+ RangeEnd: nil,
+ },
+ })
+ require.NoErrorf(t, err, "bobsrole should be granted permissions without error")
+ _, err = aa.as.UserGrantRole(&pb.AuthUserGrantRoleRequest{
+ User: "bob",
+ Role: "bobsrole",
+ })
+ require.NoErrorf(t, err, "bob should be granted bobsrole without error")
+
+ aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
+ assert.NoErrorf(t, aa.checkLeasePutsKeys(l), "bob should be able to access key 'a'")
+}
diff --git a/server/etcdserver/apply/corrupt.go b/server/etcdserver/apply/corrupt.go
new file mode 100644
index 00000000000..c198119510a
--- /dev/null
+++ b/server/etcdserver/apply/corrupt.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+)
+
+type applierV3Corrupt struct {
+ applierV3
+}
+
+func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
+
+func (a *applierV3Corrupt) Put(_ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Range(_ *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) DeleteRange(_ *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Txn(_ *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Compaction(_ *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ return nil, nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseRevoke(_ *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ return nil, errors.ErrCorrupt
+}
diff --git a/server/etcdserver/apply/metrics.go b/server/etcdserver/apply/metrics.go
new file mode 100644
index 00000000000..c6b43864633
--- /dev/null
+++ b/server/etcdserver/apply/metrics.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var alarms = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "server",
+ Name: "alarms",
+ Help: "Alarms for every member in cluster. 1 for 'server_id' label with current ID. 2 for 'alarm_type' label with type of this alarm",
+},
+ []string{"server_id", "alarm_type"})
+
+func init() {
+ prometheus.MustRegister(alarms)
+}
diff --git a/server/etcdserver/apply/uber_applier.go b/server/etcdserver/apply/uber_applier.go
new file mode 100644
index 00000000000..ec7e2aae663
--- /dev/null
+++ b/server/etcdserver/apply/uber_applier.go
@@ -0,0 +1,228 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "errors"
+ "time"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+type UberApplier interface {
+ Apply(r *pb.InternalRaftRequest) *Result
+}
+
+type uberApplier struct {
+ lg *zap.Logger
+
+ alarmStore *v3alarm.AlarmStore
+ warningApplyDuration time.Duration
+
+ // This is the applier that is taking in consideration current alarms
+ applyV3 applierV3
+
+ // This is the applier used for wrapping when alarms change
+ applyV3base applierV3
+}
+
+func NewUberApplier(
+ lg *zap.Logger,
+ be backend.Backend,
+ kv mvcc.KV,
+ alarmStore *v3alarm.AlarmStore,
+ authStore auth.AuthStore,
+ lessor lease.Lessor,
+ cluster *membership.RaftCluster,
+ raftStatus RaftStatusGetter,
+ snapshotServer SnapshotServer,
+ consistentIndex cindex.ConsistentIndexer,
+ warningApplyDuration time.Duration,
+ txnModeWriteWithSharedBuffer bool,
+ quotaBackendBytesCfg int64,
+) UberApplier {
+ applyV3base := newApplierV3(lg, be, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer, quotaBackendBytesCfg)
+
+ ua := &uberApplier{
+ lg: lg,
+ alarmStore: alarmStore,
+ warningApplyDuration: warningApplyDuration,
+ applyV3: applyV3base,
+ applyV3base: applyV3base,
+ }
+ ua.restoreAlarms()
+ return ua
+}
+
+func newApplierV3(
+ lg *zap.Logger,
+ be backend.Backend,
+ kv mvcc.KV,
+ alarmStore *v3alarm.AlarmStore,
+ authStore auth.AuthStore,
+ lessor lease.Lessor,
+ cluster *membership.RaftCluster,
+ raftStatus RaftStatusGetter,
+ snapshotServer SnapshotServer,
+ consistentIndex cindex.ConsistentIndexer,
+ txnModeWriteWithSharedBuffer bool,
+ quotaBackendBytesCfg int64,
+) applierV3 {
+ applierBackend := newApplierV3Backend(lg, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer)
+ return newAuthApplierV3(
+ authStore,
+ newQuotaApplierV3(lg, quotaBackendBytesCfg, be, applierBackend),
+ lessor,
+ )
+}
+
+func (a *uberApplier) restoreAlarms() {
+ noSpaceAlarms := len(a.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0
+ corruptAlarms := len(a.alarmStore.Get(pb.AlarmType_CORRUPT)) > 0
+ a.applyV3 = a.applyV3base
+ if noSpaceAlarms {
+ a.applyV3 = newApplierV3Capped(a.applyV3)
+ }
+ if corruptAlarms {
+ a.applyV3 = newApplierV3Corrupt(a.applyV3)
+ }
+}
+
+func (a *uberApplier) Apply(r *pb.InternalRaftRequest) *Result {
+ // We first execute chain of Apply() calls down the hierarchy:
+ // (i.e. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend),
+ // then dispatch() unpacks the request to a specific method (like Put),
+ // that gets executed down the hierarchy again:
+ // i.e. CorruptApplier.Put(CappedApplier.Put(...(BackendApplier.Put(...)))).
+ return a.applyV3.Apply(r, a.dispatch)
+}
+
+// dispatch translates the request (r) into appropriate call (like Put) on
+// the underlying applyV3 object.
+func (a *uberApplier) dispatch(r *pb.InternalRaftRequest) *Result {
+ op := "unknown"
+ ar := &Result{}
+ defer func(start time.Time) {
+ success := ar.Err == nil || errors.Is(ar.Err, mvcc.ErrCompacted)
+ txn.ApplySecObserve(v3Version, op, success, time.Since(start))
+ txn.WarnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err)
+ if !success {
+ txn.WarnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err)
+ }
+ }(time.Now())
+
+ switch {
+ case r.Range != nil:
+ op = "Range"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.Range(r.Range)
+ case r.Put != nil:
+ op = "Put"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.Put(r.Put)
+ case r.DeleteRange != nil:
+ op = "DeleteRange"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.DeleteRange(r.DeleteRange)
+ case r.Txn != nil:
+ op = "Txn"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.Txn(r.Txn)
+ case r.Compaction != nil:
+ op = "Compaction"
+ ar.Resp, ar.Physc, ar.Trace, ar.Err = a.applyV3.Compaction(r.Compaction)
+ case r.LeaseGrant != nil:
+ op = "LeaseGrant"
+ ar.Resp, ar.Err = a.applyV3.LeaseGrant(r.LeaseGrant)
+ case r.LeaseRevoke != nil:
+ op = "LeaseRevoke"
+ ar.Resp, ar.Err = a.applyV3.LeaseRevoke(r.LeaseRevoke)
+ case r.LeaseCheckpoint != nil:
+ op = "LeaseCheckpoint"
+ ar.Resp, ar.Err = a.applyV3.LeaseCheckpoint(r.LeaseCheckpoint)
+ case r.Alarm != nil:
+ op = "Alarm"
+ ar.Resp, ar.Err = a.Alarm(r.Alarm)
+ case r.Authenticate != nil:
+ op = "Authenticate"
+ ar.Resp, ar.Err = a.applyV3.Authenticate(r.Authenticate)
+ case r.AuthEnable != nil:
+ op = "AuthEnable"
+ ar.Resp, ar.Err = a.applyV3.AuthEnable()
+ case r.AuthDisable != nil:
+ op = "AuthDisable"
+ ar.Resp, ar.Err = a.applyV3.AuthDisable()
+ case r.AuthStatus != nil:
+ ar.Resp, ar.Err = a.applyV3.AuthStatus()
+ case r.AuthUserAdd != nil:
+ op = "AuthUserAdd"
+ ar.Resp, ar.Err = a.applyV3.UserAdd(r.AuthUserAdd)
+ case r.AuthUserDelete != nil:
+ op = "AuthUserDelete"
+ ar.Resp, ar.Err = a.applyV3.UserDelete(r.AuthUserDelete)
+ case r.AuthUserChangePassword != nil:
+ op = "AuthUserChangePassword"
+ ar.Resp, ar.Err = a.applyV3.UserChangePassword(r.AuthUserChangePassword)
+ case r.AuthUserGrantRole != nil:
+ op = "AuthUserGrantRole"
+ ar.Resp, ar.Err = a.applyV3.UserGrantRole(r.AuthUserGrantRole)
+ case r.AuthUserGet != nil:
+ op = "AuthUserGet"
+ ar.Resp, ar.Err = a.applyV3.UserGet(r.AuthUserGet)
+ case r.AuthUserRevokeRole != nil:
+ op = "AuthUserRevokeRole"
+ ar.Resp, ar.Err = a.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
+ case r.AuthRoleAdd != nil:
+ op = "AuthRoleAdd"
+ ar.Resp, ar.Err = a.applyV3.RoleAdd(r.AuthRoleAdd)
+ case r.AuthRoleGrantPermission != nil:
+ op = "AuthRoleGrantPermission"
+ ar.Resp, ar.Err = a.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
+ case r.AuthRoleGet != nil:
+ op = "AuthRoleGet"
+ ar.Resp, ar.Err = a.applyV3.RoleGet(r.AuthRoleGet)
+ case r.AuthRoleRevokePermission != nil:
+ op = "AuthRoleRevokePermission"
+ ar.Resp, ar.Err = a.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
+ case r.AuthRoleDelete != nil:
+ op = "AuthRoleDelete"
+ ar.Resp, ar.Err = a.applyV3.RoleDelete(r.AuthRoleDelete)
+ case r.AuthUserList != nil:
+ op = "AuthUserList"
+ ar.Resp, ar.Err = a.applyV3.UserList(r.AuthUserList)
+ case r.AuthRoleList != nil:
+ op = "AuthRoleList"
+ ar.Resp, ar.Err = a.applyV3.RoleList(r.AuthRoleList)
+ default:
+ a.lg.Panic("not implemented apply", zap.Stringer("raft-request", r))
+ }
+ return ar
+}
+
+func (a *uberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp, err := a.applyV3.Alarm(ar)
+
+ if ar.Action == pb.AlarmRequest_ACTIVATE ||
+ ar.Action == pb.AlarmRequest_DEACTIVATE {
+ a.restoreAlarms()
+ }
+ return resp, err
+}
diff --git a/server/etcdserver/apply/uber_applier_test.go b/server/etcdserver/apply/uber_applier_test.go
new file mode 100644
index 00000000000..086db23c6cf
--- /dev/null
+++ b/server/etcdserver/apply/uber_applier_test.go
@@ -0,0 +1,279 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+ "golang.org/x/crypto/bcrypt"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+const memberID = 111195
+
+func defaultUberApplier(t *testing.T) UberApplier {
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ t.Cleanup(func() {
+ betesting.Close(t, be)
+ })
+
+ cluster := membership.NewCluster(lg)
+ cluster.AddMember(&membership.Member{ID: memberID}, true)
+ lessor := lease.NewLessor(lg, be, cluster, lease.LessorConfig{})
+ kv := mvcc.NewStore(lg, be, lessor, mvcc.StoreConfig{})
+ alarmStore, err := v3alarm.NewAlarmStore(lg, schema.NewAlarmBackend(lg, be))
+ require.NoError(t, err)
+
+ tp, err := auth.NewTokenProvider(lg, "simple", dummyIndexWaiter, 300*time.Second)
+ require.NoError(t, err)
+ authStore := auth.NewAuthStore(
+ lg,
+ schema.NewAuthBackend(lg, be),
+ tp,
+ bcrypt.DefaultCost,
+ )
+ consistentIndex := cindex.NewConsistentIndex(be)
+ return NewUberApplier(
+ lg,
+ be,
+ kv,
+ alarmStore,
+ authStore,
+ lessor,
+ cluster,
+ &fakeRaftStatusGetter{},
+ &fakeSnapshotServer{},
+ consistentIndex,
+ 1*time.Hour,
+ false,
+ 16*1024*1024, // 16MB
+ )
+}
+
+// TestUberApplier_Alarm_Corrupt tests the applier returns ErrCorrupt after alarm CORRUPT is activated
+func TestUberApplier_Alarm_Corrupt(t *testing.T) {
+ tcs := []struct {
+ name string
+ request *pb.InternalRaftRequest
+ expectError error
+ }{
+ {
+ name: "Put request returns ErrCorrupt after alarm CORRUPT is activated",
+ request: &pb.InternalRaftRequest{Put: &pb.PutRequest{}},
+ expectError: errors.ErrCorrupt,
+ },
+ {
+ name: "Range request returns ErrCorrupt after alarm CORRUPT is activated",
+ request: &pb.InternalRaftRequest{Range: &pb.RangeRequest{}},
+ expectError: errors.ErrCorrupt,
+ },
+ {
+ name: "DeleteRange request returns ErrCorrupt after alarm CORRUPT is activated",
+ request: &pb.InternalRaftRequest{DeleteRange: &pb.DeleteRangeRequest{}},
+ expectError: errors.ErrCorrupt,
+ },
+ {
+ name: "Txn request returns ErrCorrupt after alarm CORRUPT is activated",
+ request: &pb.InternalRaftRequest{Txn: &pb.TxnRequest{}},
+ expectError: errors.ErrCorrupt,
+ },
+ {
+ name: "Compaction request returns ErrCorrupt after alarm CORRUPT is activated",
+ request: &pb.InternalRaftRequest{Compaction: &pb.CompactionRequest{}},
+ expectError: errors.ErrCorrupt,
+ },
+ {
+ name: "LeaseGrant request returns ErrCorrupt after alarm CORRUPT is activated",
+ request: &pb.InternalRaftRequest{LeaseGrant: &pb.LeaseGrantRequest{}},
+ expectError: errors.ErrCorrupt,
+ },
+ {
+ name: "LeaseRevoke request returns ErrCorrupt after alarm CORRUPT is activated",
+ request: &pb.InternalRaftRequest{LeaseRevoke: &pb.LeaseRevokeRequest{}},
+ expectError: errors.ErrCorrupt,
+ },
+ }
+
+ ua := defaultUberApplier(t)
+ result := ua.Apply(&pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{},
+ Alarm: &pb.AlarmRequest{
+ Action: pb.AlarmRequest_ACTIVATE,
+ MemberID: memberID,
+ Alarm: pb.AlarmType_CORRUPT,
+ },
+ })
+ require.NotNil(t, result)
+ require.NoError(t, result.Err)
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ result = ua.Apply(tc.request)
+ require.NotNil(t, result)
+ require.Equalf(t, tc.expectError, result.Err, "Apply: got %v, expect: %v", result.Err, tc.expectError)
+ })
+ }
+}
+
+// TestUberApplier_Alarm_Quota tests the applier returns ErrNoSpace after alarm NOSPACE is activated
+func TestUberApplier_Alarm_Quota(t *testing.T) {
+ tcs := []struct {
+ name string
+ request *pb.InternalRaftRequest
+ expectError error
+ }{
+ {
+ name: "Put request returns ErrCorrupt after alarm NOSPACE is activated",
+ request: &pb.InternalRaftRequest{Put: &pb.PutRequest{Key: []byte(key)}},
+ expectError: errors.ErrNoSpace,
+ },
+ {
+ name: "Txn request cost > 0 returns ErrCorrupt after alarm NOSPACE is activated",
+ request: &pb.InternalRaftRequest{Txn: &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Key: []byte(key),
+ },
+ },
+ },
+ },
+ }},
+ expectError: errors.ErrNoSpace,
+ },
+ {
+ name: "Txn request cost = 0 is still allowed after alarm NOSPACE is activated",
+ request: &pb.InternalRaftRequest{Txn: &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Key: []byte(key),
+ },
+ },
+ },
+ },
+ }},
+ expectError: nil,
+ },
+ {
+ name: "Txn request cost = 0 in both branches is still allowed after alarm NOSPACE is activated",
+ request: &pb.InternalRaftRequest{Txn: &pb.TxnRequest{
+ Compare: []*pb.Compare{
+ {
+ Key: []byte(key),
+ Result: pb.Compare_EQUAL,
+ Target: pb.Compare_CREATE,
+ TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 0},
+ },
+ },
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Key: []byte(key),
+ },
+ },
+ },
+ },
+ Failure: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: []byte(key),
+ },
+ },
+ },
+ },
+ }},
+ expectError: nil,
+ },
+ {
+ name: "LeaseGrant request returns ErrCorrupt after alarm NOSPACE is activated",
+ request: &pb.InternalRaftRequest{LeaseGrant: &pb.LeaseGrantRequest{}},
+ expectError: errors.ErrNoSpace,
+ },
+ }
+
+ ua := defaultUberApplier(t)
+ result := ua.Apply(&pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{},
+ Alarm: &pb.AlarmRequest{
+ Action: pb.AlarmRequest_ACTIVATE,
+ MemberID: memberID,
+ Alarm: pb.AlarmType_NOSPACE,
+ },
+ })
+ require.NotNil(t, result)
+ require.NoError(t, result.Err)
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ result = ua.Apply(tc.request)
+ require.NotNil(t, result)
+ require.Equalf(t, tc.expectError, result.Err, "Apply: got %v, expect: %v", result.Err, tc.expectError)
+ })
+ }
+}
+
+// TestUberApplier_Alarm_Deactivate tests the applier should be able to apply after alarm is deactivated
+func TestUberApplier_Alarm_Deactivate(t *testing.T) {
+ ua := defaultUberApplier(t)
+ result := ua.Apply(&pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{},
+ Alarm: &pb.AlarmRequest{
+ Action: pb.AlarmRequest_ACTIVATE,
+ MemberID: memberID,
+ Alarm: pb.AlarmType_NOSPACE,
+ },
+ })
+ require.NotNil(t, result)
+ require.NoError(t, result.Err)
+
+ result = ua.Apply(&pb.InternalRaftRequest{Put: &pb.PutRequest{Key: []byte(key)}})
+ require.NotNil(t, result)
+ require.Equalf(t, errors.ErrNoSpace, result.Err, "Apply: got %v, expect: %v", result.Err, errors.ErrNoSpace)
+
+ result = ua.Apply(&pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{},
+ Alarm: &pb.AlarmRequest{
+ Action: pb.AlarmRequest_DEACTIVATE,
+ MemberID: memberID,
+ Alarm: pb.AlarmType_NOSPACE,
+ },
+ })
+ require.NotNil(t, result)
+ require.NoError(t, result.Err)
+
+ result = ua.Apply(&pb.InternalRaftRequest{Put: &pb.PutRequest{Key: []byte(key)}})
+ require.NotNil(t, result)
+ assert.NoError(t, result.Err)
+}
diff --git a/server/etcdserver/apply_auth.go b/server/etcdserver/apply_auth.go
deleted file mode 100644
index 74fd2b4fc37..00000000000
--- a/server/etcdserver/apply_auth.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "sync"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/auth"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc"
-)
-
-type authApplierV3 struct {
- applierV3
- as auth.AuthStore
- lessor lease.Lessor
-
- // mu serializes Apply so that user isn't corrupted and so that
- // serialized requests don't leak data from TOCTOU errors
- mu sync.Mutex
-
- authInfo auth.AuthInfo
-}
-
-func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
- return &authApplierV3{applierV3: base, as: as, lessor: lessor}
-}
-
-func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult {
- aa.mu.Lock()
- defer aa.mu.Unlock()
- if r.Header != nil {
- // backward-compatible with pre-3.0 releases when internalRaftRequest
- // does not have header field
- aa.authInfo.Username = r.Header.Username
- aa.authInfo.Revision = r.Header.AuthRevision
- }
- if needAdminPermission(r) {
- if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &applyResult{err: err}
- }
- }
- ret := aa.applierV3.Apply(r, shouldApplyV3)
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return ret
-}
-
-func (aa *authApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
- if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
- return nil, nil, err
- }
-
- if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
- // The specified lease is already attached with a key that cannot
- // be written by this user. It means the user cannot revoke the
- // lease so attaching the lease to the newly written key should
- // be forbidden.
- return nil, nil, err
- }
-
- if r.PrevKv {
- err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
- if err != nil {
- return nil, nil, err
- }
- }
- return aa.applierV3.Put(ctx, txn, r)
-}
-
-func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
- return nil, err
- }
- return aa.applierV3.Range(ctx, txn, r)
-}
-
-func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
- return nil, err
- }
- if r.PrevKv {
- err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
- if err != nil {
- return nil, err
- }
- }
-
- return aa.applierV3.DeleteRange(txn, r)
-}
-
-func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
- for _, requ := range reqs {
- switch tv := requ.Request.(type) {
- case *pb.RequestOp_RequestRange:
- if tv.RequestRange == nil {
- continue
- }
-
- if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
- return err
- }
-
- case *pb.RequestOp_RequestPut:
- if tv.RequestPut == nil {
- continue
- }
-
- if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
- return err
- }
-
- case *pb.RequestOp_RequestDeleteRange:
- if tv.RequestDeleteRange == nil {
- continue
- }
-
- if tv.RequestDeleteRange.PrevKv {
- err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
- if err != nil {
- return err
- }
- }
-
- err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
- for _, c := range rt.Compare {
- if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil {
- return err
- }
- }
- if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
- return err
- }
- return checkTxnReqsPermission(as, ai, rt.Failure)
-}
-
-func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
- if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
- return nil, nil, err
- }
- return aa.applierV3.Txn(ctx, rt)
-}
-
-func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil {
- return nil, err
- }
- return aa.applierV3.LeaseRevoke(lc)
-}
-
-func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
- lease := aa.lessor.Lookup(leaseID)
- if lease != nil {
- for _, key := range lease.Keys() {
- if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- err := aa.as.IsAdminPermitted(&aa.authInfo)
- if err != nil && r.Name != aa.authInfo.Username {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &pb.AuthUserGetResponse{}, err
- }
-
- return aa.applierV3.UserGet(r)
-}
-
-func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- err := aa.as.IsAdminPermitted(&aa.authInfo)
- if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &pb.AuthRoleGetResponse{}, err
- }
-
- return aa.applierV3.RoleGet(r)
-}
-
-func needAdminPermission(r *pb.InternalRaftRequest) bool {
- switch {
- case r.AuthEnable != nil:
- return true
- case r.AuthDisable != nil:
- return true
- case r.AuthStatus != nil:
- return true
- case r.AuthUserAdd != nil:
- return true
- case r.AuthUserDelete != nil:
- return true
- case r.AuthUserChangePassword != nil:
- return true
- case r.AuthUserGrantRole != nil:
- return true
- case r.AuthUserRevokeRole != nil:
- return true
- case r.AuthRoleAdd != nil:
- return true
- case r.AuthRoleGrantPermission != nil:
- return true
- case r.AuthRoleRevokePermission != nil:
- return true
- case r.AuthRoleDelete != nil:
- return true
- case r.AuthUserList != nil:
- return true
- case r.AuthRoleList != nil:
- return true
- default:
- return false
- }
-}
diff --git a/server/etcdserver/apply_v2.go b/server/etcdserver/apply_v2.go
index f2e3d89da57..60442fcdfde 100644
--- a/server/etcdserver/apply_v2.go
+++ b/server/etcdserver/apply_v2.go
@@ -16,139 +16,49 @@ package etcdserver
import (
"encoding/json"
- "fmt"
+ "net/http"
"path"
- "strconv"
- "time"
-
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.uber.org/zap"
-)
-
-const v2Version = "v2"
-
-// ApplierV2 is the interface for processing V2 raft messages
-type ApplierV2 interface {
- Delete(r *RequestV2) Response
- Post(r *RequestV2) Response
- Put(r *RequestV2) Response
- QGet(r *RequestV2) Response
- Sync(r *RequestV2) Response
-}
-
-func NewApplierV2(lg *zap.Logger, s v2store.Store, c *membership.RaftCluster) ApplierV2 {
- if lg == nil {
- lg = zap.NewNop()
- }
- return &applierV2store{lg: lg, store: s, cluster: c}
-}
-type applierV2store struct {
- lg *zap.Logger
- store v2store.Store
- cluster *membership.RaftCluster
-}
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+)
-func (a *applierV2store) Delete(r *RequestV2) Response {
- switch {
- case r.PrevIndex > 0 || r.PrevValue != "":
- return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
- default:
- return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
+func v2ToV3Request(lg *zap.Logger, r *RequestV2) pb.InternalRaftRequest {
+ if r.Method != http.MethodPut || (!storeMemberAttributeRegexp.MatchString(r.Path) && r.Path != membership.StoreClusterVersionKey()) {
+ lg.Panic("detected disallowed v2 WAL for stage --v2-deprecation=write-only", zap.String("method", r.Method))
}
-}
-
-func (a *applierV2store) Post(r *RequestV2) Response {
- return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, r.TTLOptions()))
-}
-
-func (a *applierV2store) Put(r *RequestV2) Response {
- ttlOptions := r.TTLOptions()
- exists, existsSet := pbutil.GetBool(r.PrevExist)
- switch {
- case existsSet:
- if exists {
- if r.PrevIndex == 0 && r.PrevValue == "" {
- return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
- }
- return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
+ if storeMemberAttributeRegexp.MatchString(r.Path) {
+ id := membership.MustParseMemberIDFromKey(lg, path.Dir(r.Path))
+ var attr membership.Attributes
+ if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
+ lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err))
}
- return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
- case r.PrevIndex > 0 || r.PrevValue != "":
- return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
- default:
- if storeMemberAttributeRegexp.MatchString(r.Path) {
- id := membership.MustParseMemberIDFromKey(a.lg, path.Dir(r.Path))
- var attr membership.Attributes
- if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
- a.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err))
- }
- if a.cluster != nil {
- a.cluster.UpdateAttributes(id, attr, true)
- }
- // return an empty response since there is no consumer.
- return Response{}
+ return pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{
+ ID: r.ID,
+ },
+ ClusterMemberAttrSet: &membershippb.ClusterMemberAttrSetRequest{
+ Member_ID: uint64(id),
+ MemberAttributes: &membershippb.Attributes{
+ Name: attr.Name,
+ ClientUrls: attr.ClientURLs,
+ },
+ },
}
- // remove v2 version set to avoid the conflict between v2 and v3.
- if r.Path == membership.StoreClusterVersionKey() {
- // return an empty response since there is no consumer.
- return Response{}
- }
- return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
- }
-}
-
-func (a *applierV2store) QGet(r *RequestV2) Response {
- return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
-}
-
-func (a *applierV2store) Sync(r *RequestV2) Response {
- a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
- return Response{}
-}
-
-// applyV2Request interprets r as a call to v2store.X
-// and returns a Response interpreted from v2store.Event
-func (s *EtcdServer) applyV2Request(r *RequestV2) (resp Response) {
- stringer := panicAlternativeStringer{
- stringer: r,
- alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) },
}
- defer func(start time.Time) {
- success := resp.Err == nil
- applySec.WithLabelValues(v2Version, r.Method, strconv.FormatBool(success)).Observe(time.Since(start).Seconds())
- warnOfExpensiveRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, stringer, nil, nil)
- }(time.Now())
-
- switch r.Method {
- case "POST":
- return s.applyV2.Post(r)
- case "PUT":
- return s.applyV2.Put(r)
- case "DELETE":
- return s.applyV2.Delete(r)
- case "QGET":
- return s.applyV2.QGet(r)
- case "SYNC":
- return s.applyV2.Sync(r)
- default:
- // This should never be reached, but just in case:
- return Response{Err: ErrUnknownMethod}
- }
-}
-
-func (r *RequestV2) TTLOptions() v2store.TTLOptionSet {
- refresh, _ := pbutil.GetBool(r.Refresh)
- ttlOptions := v2store.TTLOptionSet{Refresh: refresh}
- if r.Expiration != 0 {
- ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
+ if r.Path == membership.StoreClusterVersionKey() {
+ return pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{
+ ID: r.ID,
+ },
+ ClusterVersionSet: &membershippb.ClusterVersionSetRequest{
+ Ver: r.Val,
+ },
+ }
}
- return ttlOptions
-}
-
-func toResponse(ev *v2store.Event, err error) Response {
- return Response{Event: ev, Err: err}
+ lg.Panic("detected disallowed v2 WAL for stage --v2-deprecation=write-only", zap.String("method", r.Method))
+ return pb.InternalRaftRequest{}
}
diff --git a/server/etcdserver/backend.go b/server/etcdserver/backend.go
deleted file mode 100644
index 081be2b5259..00000000000
--- a/server/etcdserver/backend.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "fmt"
- "os"
- "time"
-
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/config"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/etcdserver/cindex"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
- "go.uber.org/zap"
-)
-
-func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
- bcfg := backend.DefaultBackendConfig()
- bcfg.Path = cfg.BackendPath()
- bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync
- if cfg.BackendBatchLimit != 0 {
- bcfg.BatchLimit = cfg.BackendBatchLimit
- if cfg.Logger != nil {
- cfg.Logger.Info("setting backend batch limit", zap.Int("batch limit", cfg.BackendBatchLimit))
- }
- }
- if cfg.BackendBatchInterval != 0 {
- bcfg.BatchInterval = cfg.BackendBatchInterval
- if cfg.Logger != nil {
- cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval))
- }
- }
- bcfg.BackendFreelistType = cfg.BackendFreelistType
- bcfg.Logger = cfg.Logger
- if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
- // permit 10% excess over quota for disarm
- bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
- }
- bcfg.Mlock = cfg.ExperimentalMemoryMlock
- bcfg.Hooks = hooks
- return backend.New(bcfg)
-}
-
-// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
-func openSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks backend.Hooks) (backend.Backend, error) {
- snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
- if err != nil {
- return nil, fmt.Errorf("failed to find database snapshot file (%v)", err)
- }
- if err := os.Rename(snapPath, cfg.BackendPath()); err != nil {
- return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err)
- }
- return openBackend(cfg, hooks), nil
-}
-
-// openBackend returns a backend using the current etcd db.
-func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
- fn := cfg.BackendPath()
-
- now, beOpened := time.Now(), make(chan backend.Backend)
- go func() {
- beOpened <- newBackend(cfg, hooks)
- }()
-
- select {
- case be := <-beOpened:
- cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now)))
- return be
-
- case <-time.After(10 * time.Second):
- cfg.Logger.Info(
- "db file is flocked by another process, or taking too long",
- zap.String("path", fn),
- zap.Duration("took", time.Since(now)),
- )
- }
-
- return <-beOpened
-}
-
-// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
-// before updating the backend db after persisting raft snapshot to disk,
-// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
-// case, replace the db with the snapshot db sent by the leader.
-func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) {
- consistentIndex := uint64(0)
- if beExist {
- consistentIndex, _ = cindex.ReadConsistentIndex(oldbe.BatchTx())
- }
- if snapshot.Metadata.Index <= consistentIndex {
- return oldbe, nil
- }
- oldbe.Close()
- return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks)
-}
diff --git a/server/etcdserver/bootstrap.go b/server/etcdserver/bootstrap.go
new file mode 100644
index 00000000000..992e26d4f96
--- /dev/null
+++ b/server/etcdserver/bootstrap.go
@@ -0,0 +1,713 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ servererrors "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
+ if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
+ cfg.Logger.Warn(
+ "exceeded recommended request limit",
+ zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
+ zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
+ zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
+ zap.String("recommended-request-size", recommendedMaxRequestBytesString),
+ )
+ }
+
+ if terr := fileutil.TouchDirAll(cfg.Logger, cfg.DataDir); terr != nil {
+ return nil, fmt.Errorf("cannot access data directory: %w", terr)
+ }
+
+ if terr := fileutil.TouchDirAll(cfg.Logger, cfg.MemberDir()); terr != nil {
+ return nil, fmt.Errorf("cannot access member directory: %w", terr)
+ }
+ ss := bootstrapSnapshot(cfg)
+ prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
+ if err != nil {
+ return nil, err
+ }
+
+ haveWAL := wal.Exist(cfg.WALDir())
+ st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
+ backend, err := bootstrapBackend(cfg, haveWAL, st, ss)
+ if err != nil {
+ return nil, err
+ }
+ var bwal *bootstrappedWAL
+
+ if haveWAL {
+ if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to WAL directory: %w", err)
+ }
+ bwal = bootstrapWALFromSnapshot(cfg, backend.snapshot)
+ }
+
+ cfg.Logger.Info("bootstrapping cluster")
+ cluster, err := bootstrapCluster(cfg, bwal, prt)
+ if err != nil {
+ backend.Close()
+ return nil, err
+ }
+
+ cfg.Logger.Info("bootstrapping storage")
+ s := bootstrapStorage(cfg, st, backend, bwal, cluster)
+
+ if err = cluster.Finalize(cfg, s); err != nil {
+ backend.Close()
+ return nil, err
+ }
+
+ cfg.Logger.Info("bootstrapping raft")
+ raft := bootstrapRaft(cfg, cluster, s.wal)
+ return &bootstrappedServer{
+ prt: prt,
+ ss: ss,
+ storage: s,
+ cluster: cluster,
+ raft: raft,
+ }, nil
+}
+
+type bootstrappedServer struct {
+ storage *bootstrappedStorage
+ cluster *bootstrappedCluster
+ raft *bootstrappedRaft
+ prt http.RoundTripper
+ ss *snap.Snapshotter
+}
+
+func (s *bootstrappedServer) Close() {
+ s.storage.Close()
+}
+
+type bootstrappedStorage struct {
+ backend *bootstrappedBackend
+ wal *bootstrappedWAL
+ st v2store.Store
+}
+
+func (s *bootstrappedStorage) Close() {
+ s.backend.Close()
+}
+
+type bootstrappedBackend struct {
+ beHooks *serverstorage.BackendHooks
+ be backend.Backend
+ ci cindex.ConsistentIndexer
+ beExist bool
+ snapshot *raftpb.Snapshot
+}
+
+func (s *bootstrappedBackend) Close() {
+ s.be.Close()
+}
+
+type bootstrappedCluster struct {
+ remotes []*membership.Member
+ cl *membership.RaftCluster
+ nodeID types.ID
+}
+
+type bootstrappedRaft struct {
+ lg *zap.Logger
+ heartbeat time.Duration
+
+ peers []raft.Peer
+ config *raft.Config
+ storage *raft.MemoryStorage
+}
+
+func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrappedCluster) *bootstrappedStorage {
+ if wal == nil {
+ wal = bootstrapNewWAL(cfg, cl)
+ }
+
+ return &bootstrappedStorage{
+ backend: be,
+ st: st,
+ wal: wal,
+ }
+}
+
+func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
+ if err := fileutil.TouchDirAll(cfg.Logger, cfg.SnapDir()); err != nil {
+ cfg.Logger.Fatal(
+ "failed to create snapshot directory",
+ zap.String("path", cfg.SnapDir()),
+ zap.Error(err),
+ )
+ }
+
+ if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
+ return strings.HasPrefix(fileName, "tmp")
+ }); err != nil {
+ cfg.Logger.Error(
+ "failed to remove temp file(s) in snapshot directory",
+ zap.String("path", cfg.SnapDir()),
+ zap.Error(err),
+ )
+ }
+ return snap.New(cfg.Logger, cfg.SnapDir())
+}
+
+func bootstrapBackend(cfg config.ServerConfig, haveWAL bool, st v2store.Store, ss *snap.Snapshotter) (backend *bootstrappedBackend, err error) {
+ beExist := fileutil.Exist(cfg.BackendPath())
+ ci := cindex.NewConsistentIndex(nil)
+ beHooks := serverstorage.NewBackendHooks(cfg.Logger, ci)
+ be := serverstorage.OpenBackend(cfg, beHooks)
+ defer func() {
+ if err != nil && be != nil {
+ be.Close()
+ }
+ }()
+ ci.SetBackend(be)
+ schema.CreateMetaBucket(be.BatchTx())
+ if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
+ err = maybeDefragBackend(cfg, be)
+ if err != nil {
+ return nil, err
+ }
+ }
+ cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
+
+ // TODO(serathius): Implement schema setup in fresh storage
+ var snapshot *raftpb.Snapshot
+ if haveWAL {
+ snapshot, be, err = recoverSnapshot(cfg, st, be, beExist, beHooks, ci, ss)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if beExist {
+ s1, s2 := be.Size(), be.SizeInUse()
+ cfg.Logger.Info(
+ "recovered v3 backend",
+ zap.Int64("backend-size-bytes", s1),
+ zap.String("backend-size", humanize.Bytes(uint64(s1))),
+ zap.Int64("backend-size-in-use-bytes", s2),
+ zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
+ )
+ if err = schema.Validate(cfg.Logger, be.ReadTx()); err != nil {
+ cfg.Logger.Error("Failed to validate schema", zap.Error(err))
+ return nil, err
+ }
+ }
+
+ return &bootstrappedBackend{
+ beHooks: beHooks,
+ be: be,
+ ci: ci,
+ beExist: beExist,
+ snapshot: snapshot,
+ }, nil
+}
+
+func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
+ size := be.Size()
+ sizeInUse := be.SizeInUse()
+ freeableMemory := uint(size - sizeInUse)
+ thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024
+ if freeableMemory < thresholdBytes {
+ cfg.Logger.Info("Skipping defragmentation",
+ zap.Int64("current-db-size-bytes", size),
+ zap.String("current-db-size", humanize.Bytes(uint64(size))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
+ zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
+ zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
+ )
+ return nil
+ }
+ return be.Defrag()
+}
+
+func bootstrapCluster(cfg config.ServerConfig, bwal *bootstrappedWAL, prt http.RoundTripper) (c *bootstrappedCluster, err error) {
+ switch {
+ case bwal == nil && !cfg.NewCluster:
+ c, err = bootstrapExistingClusterNoWAL(cfg, prt)
+ case bwal == nil && cfg.NewCluster:
+ c, err = bootstrapNewClusterNoWAL(cfg, prt)
+ case bwal != nil && bwal.haveWAL:
+ c, err = bootstrapClusterWithWAL(cfg, bwal.meta)
+ default:
+ return nil, fmt.Errorf("unsupported bootstrap config")
+ }
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrappedCluster, error) {
+ if err := cfg.VerifyJoinExisting(); err != nil {
+ return nil, err
+ }
+ cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners))
+ if err != nil {
+ return nil, err
+ }
+ existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
+ if gerr != nil {
+ return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %w", gerr)
+ }
+ if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
+ return nil, fmt.Errorf("error validating peerURLs %s: %w", existingCluster, err)
+ }
+ if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt, cfg.ReqTimeout()) {
+ return nil, fmt.Errorf("incompatible with current running cluster")
+ }
+ scaleUpLearners := false
+ if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, existingCluster.Members(), scaleUpLearners); err != nil {
+ return nil, err
+ }
+ remotes := existingCluster.Members()
+ cl.SetID(types.ID(0), existingCluster.ID())
+ member := cl.MemberByName(cfg.Name)
+ return &bootstrappedCluster{
+ remotes: remotes,
+ cl: cl,
+ nodeID: member.ID,
+ }, nil
+}
+
+func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrappedCluster, error) {
+ if err := cfg.VerifyBootstrap(); err != nil {
+ return nil, err
+ }
+ cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners))
+ if err != nil {
+ return nil, err
+ }
+ m := cl.MemberByName(cfg.Name)
+ if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
+ return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
+ }
+ if cfg.ShouldDiscover() {
+ var str string
+ if cfg.DiscoveryURL != "" {
+ cfg.Logger.Warn("V2 discovery is deprecated!")
+ str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
+ } else {
+ cfg.Logger.Info("Bootstrapping cluster using v3 discovery.")
+ str, err = v3discovery.JoinCluster(cfg.Logger, &cfg.DiscoveryCfg, m.ID, cfg.InitialPeerURLsMap.String())
+ }
+ if err != nil {
+ return nil, &servererrors.DiscoveryError{Op: "join", Err: err}
+ }
+ var urlsmap types.URLsMap
+ urlsmap, err = types.NewURLsMap(str)
+ if err != nil {
+ return nil, err
+ }
+ if config.CheckDuplicateURL(urlsmap) {
+ return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
+ }
+ if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)); err != nil {
+ return nil, err
+ }
+ }
+ return &bootstrappedCluster{
+ remotes: nil,
+ cl: cl,
+ nodeID: m.ID,
+ }, nil
+}
+
+func bootstrapClusterWithWAL(cfg config.ServerConfig, meta *snapshotMetadata) (*bootstrappedCluster, error) {
+ if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to member directory: %w", err)
+ }
+
+ if cfg.ShouldDiscover() {
+ cfg.Logger.Warn(
+ "discovery token is ignored since cluster already initialized; valid logs are found",
+ zap.String("wal-dir", cfg.WALDir()),
+ )
+ }
+ cl := membership.NewCluster(cfg.Logger, membership.WithMaxLearners(cfg.ExperimentalMaxLearners))
+
+ scaleUpLearners := false
+ if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, cl.Members(), scaleUpLearners); err != nil {
+ return nil, err
+ }
+
+ cl.SetID(meta.nodeID, meta.clusterID)
+ return &bootstrappedCluster{
+ cl: cl,
+ nodeID: meta.nodeID,
+ }, nil
+}
+
+func recoverSnapshot(cfg config.ServerConfig, st v2store.Store, be backend.Backend, beExist bool, beHooks *serverstorage.BackendHooks, ci cindex.ConsistentIndexer, ss *snap.Snapshotter) (*raftpb.Snapshot, backend.Backend, error) {
+ // Find a snapshot to start/restart a raft node
+ walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
+ if err != nil {
+ return nil, be, err
+ }
+ // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
+ // bwal log entries
+ snapshot, err := ss.LoadNewestAvailable(walSnaps)
+ if err != nil && !errors.Is(err, snap.ErrNoSnapshot) {
+ return nil, be, err
+ }
+
+ if snapshot != nil {
+ if err = st.Recovery(snapshot.Data); err != nil {
+ cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
+ }
+
+ if err = serverstorage.AssertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
+ cfg.Logger.Error("illegal v2store content", zap.Error(err))
+ return nil, be, err
+ }
+
+ cfg.Logger.Info(
+ "recovered v2 store from snapshot",
+ zap.Uint64("snapshot-index", snapshot.Metadata.Index),
+ zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
+ )
+
+ if be, err = serverstorage.RecoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil {
+ cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
+ }
+ // A snapshot db may have already been recovered, and the old db should have
+ // already been closed in this case, so we should set the backend again.
+ ci.SetBackend(be)
+
+ if beExist {
+ // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
+ // etcd from pre-3.0 release.
+ kvindex := ci.ConsistentIndex()
+ if kvindex < snapshot.Metadata.Index {
+ if kvindex != 0 {
+ return nil, be, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
+ }
+ cfg.Logger.Warn(
+ "consistent index was never saved",
+ zap.Uint64("snapshot-index", snapshot.Metadata.Index),
+ )
+ }
+ }
+ } else {
+ cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
+ }
+ return snapshot, be, nil
+}
+
+func (c *bootstrappedCluster) Finalize(cfg config.ServerConfig, s *bootstrappedStorage) error {
+ if !s.wal.haveWAL {
+ c.cl.SetID(c.nodeID, c.cl.ID())
+ }
+ c.cl.SetStore(s.st)
+ c.cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, s.backend.be))
+ if s.wal.haveWAL {
+ c.cl.Recover(api.UpdateCapability)
+ if c.databaseFileMissing(s) {
+ bepath := cfg.BackendPath()
+ os.RemoveAll(bepath)
+ return fmt.Errorf("database file (%v) of the backend is missing", bepath)
+ }
+ }
+ scaleUpLearners := false
+ return membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, c.cl.Members(), scaleUpLearners)
+}
+
+func (c *bootstrappedCluster) databaseFileMissing(s *bootstrappedStorage) bool {
+ v3Cluster := c.cl.Version() != nil && !c.cl.Version().LessThan(semver.Version{Major: 3})
+ return v3Cluster && !s.backend.beExist
+}
+
+func bootstrapRaft(cfg config.ServerConfig, cluster *bootstrappedCluster, bwal *bootstrappedWAL) *bootstrappedRaft {
+ switch {
+ case !bwal.haveWAL && !cfg.NewCluster:
+ return bootstrapRaftFromCluster(cfg, cluster.cl, nil, bwal)
+ case !bwal.haveWAL && cfg.NewCluster:
+ return bootstrapRaftFromCluster(cfg, cluster.cl, cluster.cl.MemberIDs(), bwal)
+ case bwal.haveWAL:
+ return bootstrapRaftFromWAL(cfg, bwal)
+ default:
+ cfg.Logger.Panic("unsupported bootstrap config")
+ return nil
+ }
+}
+
+func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID, bwal *bootstrappedWAL) *bootstrappedRaft {
+ member := cl.MemberByName(cfg.Name)
+ peers := make([]raft.Peer, len(ids))
+ for i, id := range ids {
+ var ctx []byte
+ ctx, err := json.Marshal((*cl).Member(id))
+ if err != nil {
+ cfg.Logger.Panic("failed to marshal member", zap.Error(err))
+ }
+ peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
+ }
+ cfg.Logger.Info(
+ "starting local member",
+ zap.String("local-member-id", member.ID.String()),
+ zap.String("cluster-id", cl.ID().String()),
+ )
+ s := bwal.MemoryStorage()
+ return &bootstrappedRaft{
+ lg: cfg.Logger,
+ heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
+ config: raftConfig(cfg, uint64(member.ID), s),
+ peers: peers,
+ storage: s,
+ }
+}
+
+func bootstrapRaftFromWAL(cfg config.ServerConfig, bwal *bootstrappedWAL) *bootstrappedRaft {
+ s := bwal.MemoryStorage()
+ return &bootstrappedRaft{
+ lg: cfg.Logger,
+ heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
+ config: raftConfig(cfg, uint64(bwal.meta.nodeID), s),
+ storage: s,
+ }
+}
+
+func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config {
+ return &raft.Config{
+ ID: id,
+ ElectionTick: cfg.ElectionTicks,
+ HeartbeatTick: 1,
+ Storage: s,
+ MaxSizePerMsg: maxSizePerMsg,
+ MaxInflightMsgs: maxInflightMsgs,
+ CheckQuorum: true,
+ PreVote: cfg.PreVote,
+ Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
+ }
+}
+
+func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter, wal *wal.WAL, cl *membership.RaftCluster) *raftNode {
+ var n raft.Node
+ if len(b.peers) == 0 {
+ n = raft.RestartNode(b.config)
+ } else {
+ n = raft.StartNode(b.config, b.peers)
+ }
+ raftStatusMu.Lock()
+ raftStatus = n.Status
+ raftStatusMu.Unlock()
+ return newRaftNode(
+ raftNodeConfig{
+ lg: b.lg,
+ isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
+ Node: n,
+ heartbeat: b.heartbeat,
+ raftStorage: b.storage,
+ storage: serverstorage.NewStorage(b.lg, wal, ss),
+ },
+ )
+}
+
+func bootstrapWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedWAL {
+ wal, st, ents, snap, meta := openWALFromSnapshot(cfg, snapshot)
+ bwal := &bootstrappedWAL{
+ lg: cfg.Logger,
+ w: wal,
+ st: st,
+ ents: ents,
+ snapshot: snap,
+ meta: meta,
+ haveWAL: true,
+ }
+
+ if cfg.ForceNewCluster {
+ // discard the previously uncommitted entries
+ bwal.ents = bwal.CommitedEntries()
+ entries := bwal.NewConfigChangeEntries()
+ // force commit config change entries
+ bwal.AppendAndCommitEntries(entries)
+ cfg.Logger.Info(
+ "forcing restart member",
+ zap.String("cluster-id", meta.clusterID.String()),
+ zap.String("local-member-id", meta.nodeID.String()),
+ zap.Uint64("commit-index", bwal.st.Commit),
+ )
+ } else {
+ cfg.Logger.Info(
+ "restarting local member",
+ zap.String("cluster-id", meta.clusterID.String()),
+ zap.String("local-member-id", meta.nodeID.String()),
+ zap.Uint64("commit-index", bwal.st.Commit),
+ )
+ }
+ return bwal
+}
+
+// openWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
+// after the position of the given snap in the WAL.
+// The snap must have been previously saved to the WAL, or this call will panic.
+func openWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (*wal.WAL, *raftpb.HardState, []raftpb.Entry, *raftpb.Snapshot, *snapshotMetadata) {
+ var walsnap walpb.Snapshot
+ if snapshot != nil {
+ walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+ }
+ repaired := false
+ for {
+ w, err := wal.Open(cfg.Logger, cfg.WALDir(), walsnap)
+ if err != nil {
+ cfg.Logger.Fatal("failed to open WAL", zap.Error(err))
+ }
+ if cfg.UnsafeNoFsync {
+ w.SetUnsafeNoFsync()
+ }
+ wmetadata, st, ents, err := w.ReadAll()
+ if err != nil {
+ w.Close()
+ // we can only repair ErrUnexpectedEOF and we never repair twice.
+ if repaired || !errors.Is(err, io.ErrUnexpectedEOF) {
+ cfg.Logger.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
+ }
+ if !wal.Repair(cfg.Logger, cfg.WALDir()) {
+ cfg.Logger.Fatal("failed to repair WAL", zap.Error(err))
+ } else {
+ cfg.Logger.Info("repaired WAL", zap.Error(err))
+ repaired = true
+ }
+ continue
+ }
+ var metadata etcdserverpb.Metadata
+ pbutil.MustUnmarshal(&metadata, wmetadata)
+ id := types.ID(metadata.NodeID)
+ cid := types.ID(metadata.ClusterID)
+ meta := &snapshotMetadata{clusterID: cid, nodeID: id}
+ return w, &st, ents, snapshot, meta
+ }
+}
+
+type snapshotMetadata struct {
+ nodeID, clusterID types.ID
+}
+
+func bootstrapNewWAL(cfg config.ServerConfig, cl *bootstrappedCluster) *bootstrappedWAL {
+ metadata := pbutil.MustMarshal(
+ &etcdserverpb.Metadata{
+ NodeID: uint64(cl.nodeID),
+ ClusterID: uint64(cl.cl.ID()),
+ },
+ )
+ w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata)
+ if err != nil {
+ cfg.Logger.Panic("failed to create WAL", zap.Error(err))
+ }
+ if cfg.UnsafeNoFsync {
+ w.SetUnsafeNoFsync()
+ }
+ return &bootstrappedWAL{
+ lg: cfg.Logger,
+ w: w,
+ }
+}
+
+type bootstrappedWAL struct {
+ lg *zap.Logger
+
+ haveWAL bool
+ w *wal.WAL
+ st *raftpb.HardState
+ ents []raftpb.Entry
+ snapshot *raftpb.Snapshot
+ meta *snapshotMetadata
+}
+
+func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage {
+ s := raft.NewMemoryStorage()
+ if wal.snapshot != nil {
+ s.ApplySnapshot(*wal.snapshot)
+ }
+ if wal.st != nil {
+ s.SetHardState(*wal.st)
+ }
+ if len(wal.ents) != 0 {
+ s.Append(wal.ents)
+ }
+ return s
+}
+
+func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry {
+ for i, ent := range wal.ents {
+ if ent.Index > wal.st.Commit {
+ wal.lg.Info(
+ "discarding uncommitted WAL entries",
+ zap.Uint64("entry-index", ent.Index),
+ zap.Uint64("commit-index-from-wal", wal.st.Commit),
+ zap.Int("number-of-discarded-entries", len(wal.ents)-i),
+ )
+ return wal.ents[:i]
+ }
+ }
+ return wal.ents
+}
+
+func (wal *bootstrappedWAL) NewConfigChangeEntries() []raftpb.Entry {
+ return serverstorage.CreateConfigChangeEnts(
+ wal.lg,
+ serverstorage.GetEffectiveNodeIDsFromWALEntries(wal.lg, wal.snapshot, wal.ents),
+ uint64(wal.meta.nodeID),
+ wal.st.Term,
+ wal.st.Commit,
+ )
+}
+
+func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) {
+ wal.ents = append(wal.ents, ents...)
+ err := wal.w.Save(raftpb.HardState{}, ents)
+ if err != nil {
+ wal.lg.Fatal("failed to save hard state and entries", zap.Error(err))
+ }
+ if len(wal.ents) != 0 {
+ wal.st.Commit = wal.ents[len(wal.ents)-1].Index
+ }
+}
diff --git a/server/etcdserver/bootstrap_test.go b/server/etcdserver/bootstrap_test.go
new file mode 100644
index 00000000000..a692a68566c
--- /dev/null
+++ b/server/etcdserver/bootstrap_test.go
@@ -0,0 +1,316 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version implements etcd version parsing and contains latest version
+// information.
+
+package etcdserver
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "go.uber.org/zap/zaptest"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func TestBootstrapExistingClusterNoWALMaxLearner(t *testing.T) {
+ tests := []struct {
+ name string
+ members []etcdserverpb.Member
+ maxLearner int
+ hasError bool
+ expectedError error
+ }{
+ {
+ name: "bootstrap success: maxLearner gt learner count",
+ members: []etcdserverpb.Member{
+ {ID: 4512484362714696085, PeerURLs: []string{"http://localhost:2380"}},
+ {ID: 5321713336100798248, PeerURLs: []string{"http://localhost:2381"}},
+ {ID: 5670219998796287055, PeerURLs: []string{"http://localhost:2382"}},
+ },
+ maxLearner: 1,
+ hasError: false,
+ expectedError: nil,
+ },
+ {
+ name: "bootstrap success: maxLearner eq learner count",
+ members: []etcdserverpb.Member{
+ {ID: 4512484362714696085, PeerURLs: []string{"http://localhost:2380"}, IsLearner: true},
+ {ID: 5321713336100798248, PeerURLs: []string{"http://localhost:2381"}},
+ {ID: 5670219998796287055, PeerURLs: []string{"http://localhost:2382"}, IsLearner: true},
+ },
+ maxLearner: 2,
+ hasError: false,
+ expectedError: nil,
+ },
+ {
+ name: "bootstrap fail: maxLearner lt learner count",
+ members: []etcdserverpb.Member{
+ {ID: 4512484362714696085, PeerURLs: []string{"http://localhost:2380"}},
+ {ID: 5321713336100798248, PeerURLs: []string{"http://localhost:2381"}, IsLearner: true},
+ {ID: 5670219998796287055, PeerURLs: []string{"http://localhost:2382"}, IsLearner: true},
+ },
+ maxLearner: 1,
+ hasError: true,
+ expectedError: membership.ErrTooManyLearners,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cluster, err := types.NewURLsMap("node0=http://localhost:2380,node1=http://localhost:2381,node2=http://localhost:2382")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ cfg := config.ServerConfig{
+ Name: "node0",
+ InitialPeerURLsMap: cluster,
+ Logger: zaptest.NewLogger(t),
+ ExperimentalMaxLearners: tt.maxLearner,
+ }
+ _, err = bootstrapExistingClusterNoWAL(cfg, mockBootstrapRoundTrip(tt.members))
+ hasError := err != nil
+ if hasError != tt.hasError {
+ t.Errorf("expected error: %v got: %v", tt.hasError, err)
+ }
+ if hasError && !strings.Contains(err.Error(), tt.expectedError.Error()) {
+ t.Fatalf("expected error to contain: %q, got: %q", tt.expectedError.Error(), err.Error())
+ }
+ })
+ }
+}
+
+type roundTripFunc func(r *http.Request) (*http.Response, error)
+
+func (s roundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return s(r)
+}
+
+func mockBootstrapRoundTrip(members []etcdserverpb.Member) roundTripFunc {
+ return func(r *http.Request) (*http.Response, error) {
+ switch {
+ case strings.Contains(r.URL.String(), "/members"):
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: io.NopCloser(strings.NewReader(mockMembersJSON(members))),
+ Header: http.Header{"X-Etcd-Cluster-Id": []string{"f4588138892a16b0"}},
+ }, nil
+ case strings.Contains(r.URL.String(), "/version"):
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: io.NopCloser(strings.NewReader(mockVersionJSON())),
+ }, nil
+ case strings.Contains(r.URL.String(), DowngradeEnabledPath):
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Body: io.NopCloser(strings.NewReader(`true`)),
+ }, nil
+ }
+ return nil, nil
+ }
+}
+
+func mockVersionJSON() string {
+ v := version.Versions{Server: "3.7.0", Cluster: "3.7.0"}
+ version, _ := json.Marshal(v)
+ return string(version)
+}
+
+func mockMembersJSON(m []etcdserverpb.Member) string {
+ members, _ := json.Marshal(m)
+ return string(members)
+}
+
+func TestBootstrapBackend(t *testing.T) {
+ tests := []struct {
+ name string
+ prepareData func(config.ServerConfig) error
+ expectedConsistentIdx uint64
+ expectedError error
+ }{
+ {
+ name: "bootstrap backend success: no data files",
+ prepareData: nil,
+ expectedConsistentIdx: 0,
+ expectedError: nil,
+ },
+ {
+ name: "bootstrap backend success: have data files and snapshot db file",
+ prepareData: prepareData,
+ expectedConsistentIdx: 5,
+ expectedError: nil,
+ },
+ // TODO(ahrtr): add more test cases
+ // https://github.com/etcd-io/etcd/issues/13507
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ dataDir, err := createDataDir(t)
+ if err != nil {
+ t.Fatalf("Failed to create the data dir, unexpected error: %v", err)
+ }
+
+ cfg := config.ServerConfig{
+ Name: "demoNode",
+ DataDir: dataDir,
+ BackendFreelistType: bolt.FreelistArrayType,
+ Logger: zaptest.NewLogger(t),
+ }
+
+ if tt.prepareData != nil {
+ if err = tt.prepareData(cfg); err != nil {
+ t.Fatalf("failed to prepare data, unexpected error: %v", err)
+ }
+ }
+
+ haveWAL := wal.Exist(cfg.WALDir())
+ st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
+ ss := snap.New(cfg.Logger, cfg.SnapDir())
+ backend, err := bootstrapBackend(cfg, haveWAL, st, ss)
+ defer t.Cleanup(func() {
+ backend.Close()
+ })
+
+ hasError := err != nil
+ expectedHasError := tt.expectedError != nil
+ if hasError != expectedHasError {
+ t.Errorf("expected error: %v got: %v", expectedHasError, err)
+ }
+ if hasError && !strings.Contains(err.Error(), tt.expectedError.Error()) {
+ t.Fatalf("expected error to contain: %q, got: %q", tt.expectedError.Error(), err.Error())
+ }
+
+ if backend.ci.ConsistentIndex() != tt.expectedConsistentIdx {
+ t.Errorf("expected consistent index: %d, got: %d", tt.expectedConsistentIdx, backend.ci.ConsistentIndex())
+ }
+ })
+ }
+}
+
+func createDataDir(t *testing.T) (string, error) {
+ var err error
+
+ // create the temporary data dir
+ dataDir := t.TempDir()
+
+ // create ${dataDir}/member/snap
+ if err = os.MkdirAll(datadir.ToSnapDir(dataDir), 0o700); err != nil {
+ return "", err
+ }
+
+ // create ${dataDir}/member/wal
+ err = os.MkdirAll(datadir.ToWALDir(dataDir), 0o700)
+ if err != nil {
+ return "", err
+ }
+
+ return dataDir, nil
+}
+
+// prepare data for the test case
+func prepareData(cfg config.ServerConfig) error {
+ var snapshotTerm, snapshotIndex uint64 = 2, 5
+
+ if err := createWALFileWithSnapshotRecord(cfg, snapshotTerm, snapshotIndex); err != nil {
+ return err
+ }
+
+ return createSnapshotAndBackendDB(cfg, snapshotTerm, snapshotIndex)
+}
+
+func createWALFileWithSnapshotRecord(cfg config.ServerConfig, snapshotTerm, snapshotIndex uint64) (err error) {
+ var w *wal.WAL
+ if w, err = wal.Create(cfg.Logger, cfg.WALDir(), []byte("somedata")); err != nil {
+ return err
+ }
+
+ defer func() {
+ err = w.Close()
+ }()
+
+ walSnap := walpb.Snapshot{
+ Index: snapshotIndex,
+ Term: snapshotTerm,
+ ConfState: &raftpb.ConfState{
+ Voters: []uint64{0x00ffca74},
+ AutoLeave: false,
+ },
+ }
+
+ if err = w.SaveSnapshot(walSnap); err != nil {
+ return err
+ }
+
+ return w.Save(raftpb.HardState{Term: snapshotTerm, Vote: 3, Commit: snapshotIndex}, nil)
+}
+
+func createSnapshotAndBackendDB(cfg config.ServerConfig, snapshotTerm, snapshotIndex uint64) error {
+ var err error
+
+ confState := raftpb.ConfState{
+ Voters: []uint64{1, 2, 3},
+ }
+
+ // create snapshot file
+ ss := snap.New(cfg.Logger, cfg.SnapDir())
+ if err = ss.SaveSnap(raftpb.Snapshot{
+ Data: []byte("{}"),
+ Metadata: raftpb.SnapshotMetadata{
+ ConfState: confState,
+ Index: snapshotIndex,
+ Term: snapshotTerm,
+ },
+ }); err != nil {
+ return err
+ }
+
+ // create snapshot db file: "%016x.snap.db"
+ be := serverstorage.OpenBackend(cfg, nil)
+ schema.CreateMetaBucket(be.BatchTx())
+ schema.UnsafeUpdateConsistentIndex(be.BatchTx(), snapshotIndex, snapshotTerm)
+ schema.MustUnsafeSaveConfStateToBackend(cfg.Logger, be.BatchTx(), &confState)
+ if err = be.Close(); err != nil {
+ return err
+ }
+ sdb := filepath.Join(cfg.SnapDir(), fmt.Sprintf("%016x.snap.db", snapshotIndex))
+ if err = os.Rename(cfg.BackendPath(), sdb); err != nil {
+ return err
+ }
+
+ // create backend db file
+ be = serverstorage.OpenBackend(cfg, nil)
+ schema.CreateMetaBucket(be.BatchTx())
+ schema.UnsafeUpdateConsistentIndex(be.BatchTx(), 1, 1)
+ return be.Close()
+}
diff --git a/server/etcdserver/cindex/cindex.go b/server/etcdserver/cindex/cindex.go
index 5086490f3cb..b865742014d 100644
--- a/server/etcdserver/cindex/cindex.go
+++ b/server/etcdserver/cindex/cindex.go
@@ -15,36 +15,37 @@
package cindex
import (
- "encoding/binary"
"sync"
"sync/atomic"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-)
-
-var (
- MetaBucketName = []byte("meta")
-
- ConsistentIndexKeyName = []byte("consistent_index")
- TermKeyName = []byte("term")
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
type Backend interface {
- BatchTx() backend.BatchTx
+ ReadTx() backend.ReadTx
}
// ConsistentIndexer is an interface that wraps the Get/Set/Save method for consistentIndex.
type ConsistentIndexer interface {
-
// ConsistentIndex returns the consistent index of current executing entry.
ConsistentIndex() uint64
+ // ConsistentApplyingIndex returns the consistent applying index of current executing entry.
+ ConsistentApplyingIndex() (uint64, uint64)
+
+ // UnsafeConsistentIndex is similar to ConsistentIndex, but it doesn't lock the transaction.
+ UnsafeConsistentIndex() uint64
+
// SetConsistentIndex set the consistent index of current executing entry.
SetConsistentIndex(v uint64, term uint64)
+ // SetConsistentApplyingIndex set the consistent applying index of current executing entry.
+ SetConsistentApplyingIndex(v uint64, term uint64)
+
// UnsafeSave must be called holding the lock on the tx.
// It saves consistentIndex to the underlying stable storage.
- UnsafeSave(tx backend.BatchTx)
+ UnsafeSave(tx backend.UnsafeReadWriter)
// SetBackend set the available backend.BatchTx for ConsistentIndexer.
SetBackend(be Backend)
@@ -61,6 +62,19 @@ type consistentIndex struct {
// The value is being persisted in the backend since v3.5.
term uint64
+ // applyingIndex and applyingTerm are just temporary cache of the raftpb.Entry.Index
+ // and raftpb.Entry.Term, and they are not ready to be persisted yet. They will be
+ // saved to consistentIndex and term above in the txPostLockInsideApplyHook.
+ //
+ // TODO(ahrtr): try to remove the OnPreCommitUnsafe, and compare the
+ // performance difference. Afterwards we can make a decision on whether
+ // or not we should remove OnPreCommitUnsafe. If it is true, then we
+ // can remove applyingIndex and applyingTerm, and save the e.Index and
+ // e.Term to consistentIndex and term directly in applyEntries, and
+ // persist them into db in the txPostLockInsideApplyHook.
+ applyingIndex uint64
+ applyingTerm uint64
+
// be is used for initial read consistentIndex
be Backend
// mutex is protecting be.
@@ -80,7 +94,17 @@ func (ci *consistentIndex) ConsistentIndex() uint64 {
ci.mutex.Lock()
defer ci.mutex.Unlock()
- v, term := ReadConsistentIndex(ci.be.BatchTx())
+ v, term := schema.ReadConsistentIndex(ci.be.ReadTx())
+ ci.SetConsistentIndex(v, term)
+ return v
+}
+
+func (ci *consistentIndex) UnsafeConsistentIndex() uint64 {
+ if index := atomic.LoadUint64(&ci.consistentIndex); index > 0 {
+ return index
+ }
+
+ v, term := schema.UnsafeReadConsistentIndex(ci.be.ReadTx())
ci.SetConsistentIndex(v, term)
return v
}
@@ -90,10 +114,10 @@ func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) {
atomic.StoreUint64(&ci.term, term)
}
-func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) {
+func (ci *consistentIndex) UnsafeSave(tx backend.UnsafeReadWriter) {
index := atomic.LoadUint64(&ci.consistentIndex)
term := atomic.LoadUint64(&ci.term)
- UnsafeUpdateConsistentIndex(tx, index, term, true)
+ schema.UnsafeUpdateConsistentIndex(tx, index, term)
}
func (ci *consistentIndex) SetBackend(be Backend) {
@@ -104,6 +128,15 @@ func (ci *consistentIndex) SetBackend(be Backend) {
ci.SetConsistentIndex(0, 0)
}
+func (ci *consistentIndex) ConsistentApplyingIndex() (uint64, uint64) {
+ return atomic.LoadUint64(&ci.applyingIndex), atomic.LoadUint64(&ci.applyingTerm)
+}
+
+func (ci *consistentIndex) SetConsistentApplyingIndex(v uint64, term uint64) {
+ atomic.StoreUint64(&ci.applyingIndex, v)
+ atomic.StoreUint64(&ci.applyingTerm, term)
+}
+
func NewFakeConsistentIndex(index uint64) ConsistentIndexer {
return &fakeConsistentIndex{index: index}
}
@@ -113,83 +146,33 @@ type fakeConsistentIndex struct {
term uint64
}
-func (f *fakeConsistentIndex) ConsistentIndex() uint64 { return f.index }
-
-func (f *fakeConsistentIndex) SetConsistentIndex(index uint64, term uint64) {
- atomic.StoreUint64(&f.index, index)
- atomic.StoreUint64(&f.term, term)
+func (f *fakeConsistentIndex) ConsistentIndex() uint64 {
+ return atomic.LoadUint64(&f.index)
}
-func (f *fakeConsistentIndex) UnsafeSave(_ backend.BatchTx) {}
-func (f *fakeConsistentIndex) SetBackend(_ Backend) {}
-
-// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exists yet).
-func UnsafeCreateMetaBucket(tx backend.BatchTx) {
- tx.UnsafeCreateBucket(MetaBucketName)
+func (f *fakeConsistentIndex) ConsistentApplyingIndex() (uint64, uint64) {
+ return atomic.LoadUint64(&f.index), atomic.LoadUint64(&f.term)
}
-// CreateMetaBucket creates the `meta` bucket (if it does not exists yet).
-func CreateMetaBucket(tx backend.BatchTx) {
- tx.Lock()
- defer tx.Unlock()
- tx.UnsafeCreateBucket(MetaBucketName)
+func (f *fakeConsistentIndex) UnsafeConsistentIndex() uint64 {
+ return atomic.LoadUint64(&f.index)
}
-// unsafeGetConsistentIndex loads consistent index & term from given transaction.
-// returns 0,0 if the data are not found.
-// Term is persisted since v3.5.
-func unsafeReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) {
- _, vs := tx.UnsafeRange(MetaBucketName, ConsistentIndexKeyName, nil, 0)
- if len(vs) == 0 {
- return 0, 0
- }
- v := binary.BigEndian.Uint64(vs[0])
- _, ts := tx.UnsafeRange(MetaBucketName, TermKeyName, nil, 0)
- if len(ts) == 0 {
- return v, 0
- }
- t := binary.BigEndian.Uint64(ts[0])
- return v, t
+func (f *fakeConsistentIndex) SetConsistentIndex(index uint64, term uint64) {
+ atomic.StoreUint64(&f.index, index)
+ atomic.StoreUint64(&f.term, term)
}
-// ReadConsistentIndex loads consistent index and term from given transaction.
-// returns 0 if the data are not found.
-func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) {
- tx.Lock()
- defer tx.Unlock()
- return unsafeReadConsistentIndex(tx)
+func (f *fakeConsistentIndex) SetConsistentApplyingIndex(index uint64, term uint64) {
+ atomic.StoreUint64(&f.index, index)
+ atomic.StoreUint64(&f.term, term)
}
-func UnsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) {
- if index == 0 {
- // Never save 0 as it means that we didn't loaded the real index yet.
- return
- }
-
- if onlyGrow {
- oldi, oldTerm := unsafeReadConsistentIndex(tx)
- if term < oldTerm {
- return
- }
- if term == oldTerm && index <= oldi {
- return
- }
- }
-
- bs1 := make([]byte, 8)
- binary.BigEndian.PutUint64(bs1, index)
- // put the index into the underlying backend
- // tx has been locked in TxnBegin, so there is no need to lock it again
- tx.UnsafePut(MetaBucketName, ConsistentIndexKeyName, bs1)
- if term > 0 {
- bs2 := make([]byte, 8)
- binary.BigEndian.PutUint64(bs2, term)
- tx.UnsafePut(MetaBucketName, TermKeyName, bs2)
- }
-}
+func (f *fakeConsistentIndex) UnsafeSave(_ backend.UnsafeReadWriter) {}
+func (f *fakeConsistentIndex) SetBackend(_ Backend) {}
-func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) {
- tx.Lock()
+func UpdateConsistentIndexForce(tx backend.BatchTx, index uint64, term uint64) {
+ tx.LockOutsideApply()
defer tx.Unlock()
- UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow)
+ schema.UnsafeUpdateConsistentIndexForce(tx, index, term)
}
diff --git a/server/etcdserver/cindex/cindex_test.go b/server/etcdserver/cindex/cindex_test.go
index 1e111b9e823..0afb0a2f5fe 100644
--- a/server/etcdserver/cindex/cindex_test.go
+++ b/server/etcdserver/cindex/cindex_test.go
@@ -20,13 +20,16 @@ import (
"time"
"github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
// TestConsistentIndex ensures that LoadConsistentIndex/Save/ConsistentIndex and backend.BatchTx can work well together.
func TestConsistentIndex(t *testing.T) {
-
be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
ci := NewConsistentIndex(be)
@@ -36,7 +39,7 @@ func TestConsistentIndex(t *testing.T) {
}
tx.Lock()
- UnsafeCreateMetaBucket(tx)
+ schema.UnsafeCreateMetaBucket(tx)
tx.Unlock()
be.ForceCommit()
r := uint64(7890123)
@@ -52,7 +55,7 @@ func TestConsistentIndex(t *testing.T) {
be.ForceCommit()
be.Close()
- b := backend.NewDefaultBackend(tmpPath)
+ b := backend.NewDefaultBackend(zaptest.NewLogger(t), tmpPath)
defer b.Close()
ci.SetBackend(b)
index = ci.ConsistentIndex()
@@ -63,8 +66,72 @@ func TestConsistentIndex(t *testing.T) {
assert.Equal(t, r, index)
}
-func TestFakeConsistentIndex(t *testing.T) {
+func TestConsistentIndexDecrease(t *testing.T) {
+ testutil.BeforeTest(t)
+ initIndex := uint64(100)
+ initTerm := uint64(10)
+
+ tcs := []struct {
+ name string
+ index uint64
+ term uint64
+ panicExpected bool
+ }{
+ {
+ name: "Decrease term",
+ index: initIndex + 1,
+ term: initTerm - 1,
+ panicExpected: false, // TODO: Change in v3.7
+ },
+ {
+ name: "Decrease CI",
+ index: initIndex - 1,
+ term: initTerm + 1,
+ panicExpected: true,
+ },
+ {
+ name: "Decrease CI and term",
+ index: initIndex - 1,
+ term: initTerm - 1,
+ panicExpected: true,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ tx := be.BatchTx()
+ tx.Lock()
+ schema.UnsafeCreateMetaBucket(tx)
+ schema.UnsafeUpdateConsistentIndex(tx, initIndex, initTerm)
+ tx.Unlock()
+ be.ForceCommit()
+ be.Close()
+
+ be = backend.NewDefaultBackend(zaptest.NewLogger(t), tmpPath)
+ defer be.Close()
+ ci := NewConsistentIndex(be)
+ ci.SetConsistentIndex(tc.index, tc.term)
+ tx = be.BatchTx()
+ func() {
+ tx.Lock()
+ defer tx.Unlock()
+ if tc.panicExpected {
+ assert.Panicsf(t, func() { ci.UnsafeSave(tx) }, "Should refuse to decrease cindex")
+ return
+ }
+ ci.UnsafeSave(tx)
+ }()
+ if !tc.panicExpected {
+ assert.Equal(t, tc.index, ci.ConsistentIndex())
+ ci = NewConsistentIndex(be)
+ assert.Equal(t, tc.index, ci.ConsistentIndex())
+ }
+ })
+ }
+}
+
+func TestFakeConsistentIndex(t *testing.T) {
r := rand.Uint64()
ci := NewFakeConsistentIndex(r)
index := ci.ConsistentIndex()
@@ -77,5 +144,4 @@ func TestFakeConsistentIndex(t *testing.T) {
if index != r {
t.Errorf("expected %d,got %d", r, index)
}
-
}
diff --git a/server/etcdserver/cluster_util.go b/server/etcdserver/cluster_util.go
index 595586e2012..089d2acf71b 100644
--- a/server/etcdserver/cluster_util.go
+++ b/server/etcdserver/cluster_util.go
@@ -18,19 +18,21 @@ import (
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"sort"
"strconv"
"strings"
"time"
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
-
- "github.com/coreos/go-semver/semver"
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
)
// isMemberBootstrapped tries to check if the given member has been bootstrapped
@@ -70,6 +72,9 @@ func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Durat
cc := &http.Client{
Transport: rt,
Timeout: timeout,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
}
for _, u := range urls {
addr := u + "/members"
@@ -80,7 +85,7 @@ func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Durat
}
continue
}
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
if logerr {
@@ -134,11 +139,11 @@ func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
return us
}
-// getVersions returns the versions of the members in the given cluster.
+// getMembersVersions returns the versions of the members in the given cluster.
// The key of the returned map is the member's ID. The value of the returned map
// is the semver versions string, including server and cluster.
// If it fails to get the version of a member, the key will be nil.
-func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
+func getMembersVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) map[string]*version.Versions {
members := cl.Members()
vers := make(map[string]*version.Versions)
for _, m := range members {
@@ -150,7 +155,7 @@ func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt
vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
continue
}
- ver, err := getVersion(lg, m, rt)
+ ver, err := getVersion(lg, m, rt, timeout)
if err != nil {
lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
vers[m.ID.String()] = nil
@@ -161,44 +166,6 @@ func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt
return vers
}
-// decideClusterVersion decides the cluster version based on the versions map.
-// The returned version is the min server version in the map, or nil if the min
-// version in unknown.
-func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version {
- var cv *semver.Version
- lv := semver.Must(semver.NewVersion(version.Version))
-
- for mid, ver := range vers {
- if ver == nil {
- return nil
- }
- v, err := semver.NewVersion(ver.Server)
- if err != nil {
- lg.Warn(
- "failed to parse server version of remote member",
- zap.String("remote-member-id", mid),
- zap.String("remote-member-version", ver.Server),
- zap.Error(err),
- )
- return nil
- }
- if lv.LessThan(*v) {
- lg.Warn(
- "leader found higher-versioned member",
- zap.String("local-member-version", lv.String()),
- zap.String("remote-member-id", mid),
- zap.String("remote-member-version", ver.Server),
- )
- }
- if cv == nil {
- cv = v
- } else if v.LessThan(*cv) {
- cv = v
- }
- }
- return cv
-}
-
// allowedVersionRange decides the available version range of the cluster that local server can join in;
// if the downgrade enabled status is true, the version window is [oneMinorHigher, oneMinorHigher]
// if the downgrade is not enabled, the version window is [MinClusterVersion, localVersion]
@@ -221,9 +188,9 @@ func allowedVersionRange(downgradeEnabled bool) (minV *semver.Version, maxV *sem
// cluster version in the range of [MinV, MaxV] and no known members has a cluster version
// out of the range.
// We set this rule since when the local member joins, another member might be offline.
-func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
- vers := getVersions(lg, cl, local, rt)
- minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt))
+func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool {
+ vers := getMembersVersions(lg, cl, local, rt, timeout)
+ minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt, timeout))
return isCompatibleWithVers(lg, vers, local, minV, maxV)
}
@@ -261,7 +228,7 @@ func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, loc
"cluster version of remote member is not compatible; too high",
zap.String("remote-member-id", id),
zap.String("remote-member-cluster-version", clusterv.String()),
- zap.String("minimum-cluster-version-supported", minV.String()),
+ zap.String("maximum-cluster-version-supported", maxV.String()),
)
return false
}
@@ -272,9 +239,13 @@ func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, loc
// getVersion returns the Versions of the given member via its
// peerURLs. Returns the last error if it fails to get the version.
-func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
+func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (*version.Versions, error) {
cc := &http.Client{
Transport: rt,
+ Timeout: timeout,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
}
var (
err error
@@ -294,7 +265,7 @@ func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*ve
continue
}
var b []byte
- b, err = ioutil.ReadAll(resp.Body)
+ b, err = io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lg.Warn(
@@ -321,11 +292,16 @@ func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*ve
}
func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.RoundTripper) ([]*membership.Member, error) {
- cc := &http.Client{Transport: peerRt}
+ cc := &http.Client{
+ Transport: peerRt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
// TODO: refactor member http handler code
// cannot import etcdhttp, so manually construct url
- requestUrl := url + "/members/promote/" + fmt.Sprintf("%d", id)
- req, err := http.NewRequest("POST", requestUrl, nil)
+ requestURL := url + "/members/promote/" + fmt.Sprintf("%d", id)
+ req, err := http.NewRequest(http.MethodPost, requestURL, nil)
if err != nil {
return nil, err
}
@@ -335,30 +311,30 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R
return nil, err
}
defer resp.Body.Close()
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusRequestTimeout {
- return nil, ErrTimeout
+ return nil, errors.ErrTimeout
}
if resp.StatusCode == http.StatusPreconditionFailed {
// both ErrMemberNotLearner and ErrLearnerNotReady have same http status code
- if strings.Contains(string(b), ErrLearnerNotReady.Error()) {
- return nil, ErrLearnerNotReady
+ if strings.Contains(string(b), errors.ErrLearnerNotReady.Error()) {
+ return nil, errors.ErrLearnerNotReady
}
if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) {
return nil, membership.ErrMemberNotLearner
}
- return nil, fmt.Errorf("member promote: unknown error(%s)", string(b))
+ return nil, fmt.Errorf("member promote: unknown error(%s)", b)
}
if resp.StatusCode == http.StatusNotFound {
return nil, membership.ErrIDNotFound
}
if resp.StatusCode != http.StatusOK { // all other types of errors
- return nil, fmt.Errorf("member promote: unknown error(%s)", string(b))
+ return nil, fmt.Errorf("member promote: unknown error(%s)", b)
}
var membs []*membership.Member
@@ -369,14 +345,14 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R
}
// getDowngradeEnabledFromRemotePeers will get the downgrade enabled status of the cluster.
-func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
+func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool {
members := cl.Members()
for _, m := range members {
if m.ID == local {
continue
}
- enable, err := getDowngradeEnabled(lg, m, rt)
+ enable, err := getDowngradeEnabled(lg, m, rt, timeout)
if err != nil {
lg.Warn("failed to get downgrade enabled status", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
} else {
@@ -390,9 +366,13 @@ func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftClust
// getDowngradeEnabled returns the downgrade enabled status of the given member
// via its peerURLs. Returns the last error if it fails to get it.
-func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (bool, error) {
+func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (bool, error) {
cc := &http.Client{
Transport: rt,
+ Timeout: timeout,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
}
var (
err error
@@ -412,7 +392,7 @@ func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTrip
continue
}
var b []byte
- b, err = ioutil.ReadAll(resp.Body)
+ b, err = io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lg.Warn(
@@ -438,45 +418,26 @@ func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTrip
return false, err
}
-// isMatchedVersions returns true if all server versions are equal to target version, otherwise return false.
-// It can be used to decide the whether the cluster finishes downgrading to target version.
-func isMatchedVersions(lg *zap.Logger, targetVersion *semver.Version, vers map[string]*version.Versions) bool {
- for mid, ver := range vers {
- if ver == nil {
- return false
- }
- v, err := semver.NewVersion(ver.Cluster)
- if err != nil {
- lg.Warn(
- "failed to parse server version of remote member",
- zap.String("remote-member-id", mid),
- zap.String("remote-member-version", ver.Server),
- zap.Error(err),
- )
- return false
- }
- if !targetVersion.Equal(*v) {
- lg.Warn("remotes server has mismatching etcd version",
- zap.String("remote-member-id", mid),
- zap.String("current-server-version", v.String()),
- zap.String("target-version", targetVersion.String()),
- )
- return false
- }
- }
- return true
-}
-
func convertToClusterVersion(v string) (*semver.Version, error) {
ver, err := semver.NewVersion(v)
if err != nil {
// allow input version format Major.Minor
ver, err = semver.NewVersion(v + ".0")
if err != nil {
- return nil, ErrWrongDowngradeVersionFormat
+ return nil, errors.ErrWrongDowngradeVersionFormat
}
}
// cluster version only keeps major.minor, remove patch version
ver = &semver.Version{Major: ver.Major, Minor: ver.Minor}
return ver, nil
}
+
+func GetMembershipInfoInV2Format(lg *zap.Logger, cl *membership.RaftCluster) []byte {
+ st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
+ cl.Store(st)
+ d, err := st.SaveNoCopy()
+ if err != nil {
+ lg.Panic("failed to save v2 store", zap.Error(err))
+ }
+ return d
+}
diff --git a/server/etcdserver/cluster_util_test.go b/server/etcdserver/cluster_util_test.go
index f2196b84dc3..1ca9ec1345f 100644
--- a/server/etcdserver/cluster_util_test.go
+++ b/server/etcdserver/cluster_util_test.go
@@ -15,54 +15,15 @@
package etcdserver
import (
- "reflect"
"testing"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/types"
"github.com/coreos/go-semver/semver"
- "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
)
-var testLogger = zap.NewExample()
-
-func TestDecideClusterVersion(t *testing.T) {
- tests := []struct {
- vers map[string]*version.Versions
- wdver *semver.Version
- }{
- {
- map[string]*version.Versions{"a": {Server: "2.0.0"}},
- semver.Must(semver.NewVersion("2.0.0")),
- },
- // unknown
- {
- map[string]*version.Versions{"a": nil},
- nil,
- },
- {
- map[string]*version.Versions{"a": {Server: "2.0.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
- semver.Must(semver.NewVersion("2.0.0")),
- },
- {
- map[string]*version.Versions{"a": {Server: "2.1.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
- semver.Must(semver.NewVersion("2.1.0")),
- },
- {
- map[string]*version.Versions{"a": nil, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
- nil,
- },
- }
-
- for i, tt := range tests {
- dver := decideClusterVersion(testLogger, tt.vers)
- if !reflect.DeepEqual(dver, tt.wdver) {
- t.Errorf("#%d: ver = %+v, want %+v", i, dver, tt.wdver)
- }
- }
-}
-
func TestIsCompatibleWithVers(t *testing.T) {
tests := []struct {
vers map[string]*version.Versions
@@ -127,7 +88,7 @@ func TestIsCompatibleWithVers(t *testing.T) {
}
for i, tt := range tests {
- ok := isCompatibleWithVers(testLogger, tt.vers, tt.local, tt.minV, tt.maxV)
+ ok := isCompatibleWithVers(zaptest.NewLogger(t), tt.vers, tt.local, tt.minV, tt.maxV)
if ok != tt.wok {
t.Errorf("#%d: ok = %+v, want %+v", i, ok, tt.wok)
}
@@ -215,52 +176,3 @@ func TestDecideAllowedVersionRange(t *testing.T) {
})
}
}
-
-func TestIsMatchedVersions(t *testing.T) {
- tests := []struct {
- name string
- targetVersion *semver.Version
- versionMap map[string]*version.Versions
- expectedFinished bool
- }{
- {
- "When downgrade finished",
- &semver.Version{Major: 3, Minor: 4},
- map[string]*version.Versions{
- "mem1": {Server: "3.4.1", Cluster: "3.4.0"},
- "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
- "mem3": {Server: "3.4.2", Cluster: "3.4.0"},
- },
- true,
- },
- {
- "When cannot parse peer version",
- &semver.Version{Major: 3, Minor: 4},
- map[string]*version.Versions{
- "mem1": {Server: "3.4.1", Cluster: "3.4"},
- "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
- "mem3": {Server: "3.4.2", Cluster: "3.4.0"},
- },
- false,
- },
- {
- "When downgrade not finished",
- &semver.Version{Major: 3, Minor: 4},
- map[string]*version.Versions{
- "mem1": {Server: "3.4.1", Cluster: "3.4.0"},
- "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
- "mem3": {Server: "3.5.2", Cluster: "3.5.0"},
- },
- false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- actual := isMatchedVersions(zap.NewNop(), tt.targetVersion, tt.versionMap)
- if actual != tt.expectedFinished {
- t.Errorf("expected downgrade finished is %v; got %v", tt.expectedFinished, actual)
- }
- })
- }
-}
diff --git a/server/etcdserver/corrupt.go b/server/etcdserver/corrupt.go
index 3a4bab6d5c4..5ec111bef43 100644
--- a/server/etcdserver/corrupt.go
+++ b/server/etcdserver/corrupt.go
@@ -18,51 +18,96 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
+ "sort"
"strings"
+ "sync"
"time"
+ "go.uber.org/zap"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/mvcc"
-
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
)
-// CheckInitialHashKV compares initial hash values with its peers
-// before serving any peer/client traffic. Only mismatch when hashes
-// are different at requested revision, with same compact revision.
-func (s *EtcdServer) CheckInitialHashKV() error {
- if !s.Cfg.InitialCorruptCheck {
- return nil
+type CorruptionChecker interface {
+ InitialCheck() error
+ PeriodicCheck() error
+ CompactHashCheck()
+}
+
+type corruptionChecker struct {
+ lg *zap.Logger
+
+ hasher Hasher
+
+ mux sync.RWMutex
+ latestRevisionChecked int64
+}
+
+type Hasher interface {
+ mvcc.HashStorage
+ ReqTimeout() time.Duration
+ MemberID() types.ID
+ PeerHashByRev(int64) []*peerHashKVResp
+ LinearizableReadNotify(context.Context) error
+ TriggerCorruptAlarm(types.ID)
+}
+
+func newCorruptionChecker(lg *zap.Logger, s *EtcdServer, storage mvcc.HashStorage) *corruptionChecker {
+ return &corruptionChecker{
+ lg: lg,
+ hasher: hasherAdapter{s, storage},
}
+}
- lg := s.Logger()
+type hasherAdapter struct {
+ *EtcdServer
+ mvcc.HashStorage
+}
+
+func (h hasherAdapter) ReqTimeout() time.Duration {
+ return h.EtcdServer.Cfg.ReqTimeout()
+}
+
+func (h hasherAdapter) PeerHashByRev(rev int64) []*peerHashKVResp {
+ return h.EtcdServer.getPeerHashKVs(rev)
+}
+
+func (h hasherAdapter) TriggerCorruptAlarm(memberID types.ID) {
+ h.EtcdServer.triggerCorruptAlarm(memberID)
+}
- lg.Info(
+// InitialCheck compares initial hash values with its peers
+// before serving any peer/client traffic. Only mismatch when hashes
+// are different at requested revision, with same compact revision.
+func (cm *corruptionChecker) InitialCheck() error {
+ cm.lg.Info(
"starting initial corruption check",
- zap.String("local-member-id", s.ID().String()),
- zap.Duration("timeout", s.Cfg.ReqTimeout()),
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Duration("timeout", cm.hasher.ReqTimeout()),
)
- h, rev, crev, err := s.kv.HashByRev(0)
+ h, _, err := cm.hasher.HashByRev(0)
if err != nil {
- return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
+ return fmt.Errorf("%s failed to fetch hash (%w)", cm.hasher.MemberID(), err)
}
- peers := s.getPeerHashKVs(rev)
+ peers := cm.hasher.PeerHashByRev(h.Revision)
mismatch := 0
for _, p := range peers {
if p.resp != nil {
peerID := types.ID(p.resp.Header.MemberId)
fields := []zap.Field{
- zap.String("local-member-id", s.ID().String()),
- zap.Int64("local-member-revision", rev),
- zap.Int64("local-member-compact-revision", crev),
- zap.Uint32("local-member-hash", h),
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
zap.String("remote-peer-id", peerID.String()),
zap.Strings("remote-peer-endpoints", p.eps),
zap.Int64("remote-peer-revision", p.resp.Header.Revision),
@@ -70,12 +115,12 @@ func (s *EtcdServer) CheckInitialHashKV() error {
zap.Uint32("remote-peer-hash", p.resp.Hash),
}
- if h != p.resp.Hash {
- if crev == p.resp.CompactRevision {
- lg.Warn("found different hash values from remote peer", fields...)
+ if h.Hash != p.resp.Hash {
+ if h.CompactRevision == p.resp.CompactRevision {
+ cm.lg.Warn("found different hash values from remote peer", fields...)
mismatch++
} else {
- lg.Warn("found different compact revision values from remote peer", fields...)
+ cm.lg.Warn("found different compact revision values from remote peer", fields...)
}
}
@@ -83,25 +128,36 @@ func (s *EtcdServer) CheckInitialHashKV() error {
}
if p.err != nil {
- switch p.err {
- case rpctypes.ErrFutureRev:
- lg.Warn(
+ switch {
+ case errors.Is(p.err, rpctypes.ErrFutureRev):
+ cm.lg.Warn(
"cannot fetch hash from slow remote peer",
- zap.String("local-member-id", s.ID().String()),
- zap.Int64("local-member-revision", rev),
- zap.Int64("local-member-compact-revision", crev),
- zap.Uint32("local-member-hash", h),
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
zap.String("remote-peer-id", p.id.String()),
zap.Strings("remote-peer-endpoints", p.eps),
zap.Error(err),
)
- case rpctypes.ErrCompacted:
- lg.Warn(
+ case errors.Is(p.err, rpctypes.ErrCompacted):
+ cm.lg.Warn(
"cannot fetch hash from remote peer; local member is behind",
- zap.String("local-member-id", s.ID().String()),
- zap.Int64("local-member-revision", rev),
- zap.Int64("local-member-compact-revision", crev),
- zap.Uint32("local-member-hash", h),
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ case errors.Is(p.err, rpctypes.ErrClusterIDMismatch):
+ cm.lg.Warn(
+ "cluster ID mismatch",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
zap.String("remote-peer-id", p.id.String()),
zap.Strings("remote-peer-endpoints", p.eps),
zap.Error(err),
@@ -110,92 +166,55 @@ func (s *EtcdServer) CheckInitialHashKV() error {
}
}
if mismatch > 0 {
- return fmt.Errorf("%s found data inconsistency with peers", s.ID())
+ return fmt.Errorf("%s found data inconsistency with peers", cm.hasher.MemberID())
}
- lg.Info(
+ cm.lg.Info(
"initial corruption checking passed; no corruption",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
)
return nil
}
-func (s *EtcdServer) monitorKVHash() {
- t := s.Cfg.CorruptCheckTime
- if t == 0 {
- return
- }
-
- lg := s.Logger()
- lg.Info(
- "enabled corruption checking",
- zap.String("local-member-id", s.ID().String()),
- zap.Duration("interval", t),
- )
-
- for {
- select {
- case <-s.stopping:
- return
- case <-time.After(t):
- }
- if !s.isLeader() {
- continue
- }
- if err := s.checkHashKV(); err != nil {
- lg.Warn("failed to check hash KV", zap.Error(err))
- }
- }
-}
-
-func (s *EtcdServer) checkHashKV() error {
- lg := s.Logger()
-
- h, rev, crev, err := s.kv.HashByRev(0)
+func (cm *corruptionChecker) PeriodicCheck() error {
+ h, _, err := cm.hasher.HashByRev(0)
if err != nil {
return err
}
- peers := s.getPeerHashKVs(rev)
+ peers := cm.hasher.PeerHashByRev(h.Revision)
- ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- err = s.linearizableReadNotify(ctx)
+ ctx, cancel := context.WithTimeout(context.Background(), cm.hasher.ReqTimeout())
+ err = cm.hasher.LinearizableReadNotify(ctx)
cancel()
if err != nil {
return err
}
- h2, rev2, crev2, err := s.kv.HashByRev(0)
+ h2, rev2, err := cm.hasher.HashByRev(0)
if err != nil {
return err
}
alarmed := false
- mismatch := func(id uint64) {
+ mismatch := func(id types.ID) {
if alarmed {
return
}
alarmed = true
- a := &pb.AlarmRequest{
- MemberID: id,
- Action: pb.AlarmRequest_ACTIVATE,
- Alarm: pb.AlarmType_CORRUPT,
- }
- s.GoAttach(func() {
- s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
- })
+ cm.hasher.TriggerCorruptAlarm(id)
}
- if h2 != h && rev2 == rev && crev == crev2 {
- lg.Warn(
+ if h2.Hash != h.Hash && h2.Revision == h.Revision && h.CompactRevision == h2.CompactRevision {
+ cm.lg.Warn(
"found hash mismatch",
- zap.Int64("revision-1", rev),
- zap.Int64("compact-revision-1", crev),
- zap.Uint32("hash-1", h),
- zap.Int64("revision-2", rev2),
- zap.Int64("compact-revision-2", crev2),
- zap.Uint32("hash-2", h2),
+ zap.Int64("revision-1", h.Revision),
+ zap.Int64("compact-revision-1", h.CompactRevision),
+ zap.Uint32("hash-1", h.Hash),
+ zap.Int64("revision-2", h2.Revision),
+ zap.Int64("compact-revision-2", h2.CompactRevision),
+ zap.Uint32("hash-2", h2.Hash),
)
- mismatch(uint64(s.ID()))
+ mismatch(cm.hasher.MemberID())
}
checkedCount := 0
@@ -204,47 +223,225 @@ func (s *EtcdServer) checkHashKV() error {
continue
}
checkedCount++
- id := p.resp.Header.MemberId
// leader expects follower's latest revision less than or equal to leader's
if p.resp.Header.Revision > rev2 {
- lg.Warn(
+ cm.lg.Warn(
"revision from follower must be less than or equal to leader's",
zap.Int64("leader-revision", rev2),
zap.Int64("follower-revision", p.resp.Header.Revision),
- zap.String("follower-peer-id", types.ID(id).String()),
+ zap.String("follower-peer-id", p.id.String()),
)
- mismatch(id)
+ mismatch(p.id)
}
// leader expects follower's latest compact revision less than or equal to leader's
- if p.resp.CompactRevision > crev2 {
- lg.Warn(
+ if p.resp.CompactRevision > h2.CompactRevision {
+ cm.lg.Warn(
"compact revision from follower must be less than or equal to leader's",
- zap.Int64("leader-compact-revision", crev2),
+ zap.Int64("leader-compact-revision", h2.CompactRevision),
zap.Int64("follower-compact-revision", p.resp.CompactRevision),
- zap.String("follower-peer-id", types.ID(id).String()),
+ zap.String("follower-peer-id", p.id.String()),
)
- mismatch(id)
+ mismatch(p.id)
}
// follower's compact revision is leader's old one, then hashes must match
- if p.resp.CompactRevision == crev && p.resp.Hash != h {
- lg.Warn(
+ if p.resp.CompactRevision == h.CompactRevision && p.resp.Hash != h.Hash {
+ cm.lg.Warn(
"same compact revision then hashes must match",
- zap.Int64("leader-compact-revision", crev2),
- zap.Uint32("leader-hash", h),
+ zap.Int64("leader-compact-revision", h2.CompactRevision),
+ zap.Uint32("leader-hash", h.Hash),
zap.Int64("follower-compact-revision", p.resp.CompactRevision),
zap.Uint32("follower-hash", p.resp.Hash),
- zap.String("follower-peer-id", types.ID(id).String()),
+ zap.String("follower-peer-id", p.id.String()),
)
- mismatch(id)
+ mismatch(p.id)
}
}
- lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount))
+ cm.lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount))
return nil
}
+// CompactHashCheck is based on the fact that 'compactions' are coordinated
+// between raft members and performed at the same revision. For each compacted
+// revision there is KV store hash computed and saved for some time.
+//
+// This method communicates with peers to find a recent common revision across
+// members, and raises alarm if 2 or more members at the same compact revision
+// have different hashes.
+//
+// We might miss opportunity to perform the check if the compaction is still
+// ongoing on one of the members, or it was unresponsive. In such situation the
+// method still passes without raising alarm.
+func (cm *corruptionChecker) CompactHashCheck() {
+ cm.lg.Info("starting compact hash check",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Duration("timeout", cm.hasher.ReqTimeout()),
+ )
+ hashes := cm.uncheckedRevisions()
+ // Assume that revisions are ordered from largest to smallest
+ for i, hash := range hashes {
+ peers := cm.hasher.PeerHashByRev(hash.Revision)
+ if len(peers) == 0 {
+ continue
+ }
+ if cm.checkPeerHashes(hash, peers) {
+ cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", i+1))
+ return
+ }
+ }
+ cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", len(hashes)))
+}
+
+// check peers hash and raise alarms if detected corruption.
+// return a bool indicate whether to check next hash.
+//
+// true: successfully checked hash on whole cluster or raised alarms, so no need to check next hash
+// false: skipped some members, so need to check next hash
+func (cm *corruptionChecker) checkPeerHashes(leaderHash mvcc.KeyValueHash, peers []*peerHashKVResp) bool {
+ leaderID := cm.hasher.MemberID()
+ hash2members := map[uint32]types.IDSlice{leaderHash.Hash: {leaderID}}
+
+ peersChecked := 0
+ // group all peers by hash
+ for _, peer := range peers {
+ skipped := false
+ reason := ""
+
+ if peer.resp == nil {
+ skipped = true
+ reason = "no response"
+ } else if peer.resp.CompactRevision != leaderHash.CompactRevision {
+ skipped = true
+ reason = fmt.Sprintf("the peer's CompactRevision %d doesn't match leader's CompactRevision %d",
+ peer.resp.CompactRevision, leaderHash.CompactRevision)
+ }
+ if skipped {
+ cm.lg.Warn("Skipped peer's hash", zap.Int("number-of-peers", len(peers)),
+ zap.String("leader-id", leaderID.String()),
+ zap.String("peer-id", peer.id.String()),
+ zap.String("reason", reason))
+ continue
+ }
+
+ peersChecked++
+ if ids, ok := hash2members[peer.resp.Hash]; !ok {
+ hash2members[peer.resp.Hash] = []types.ID{peer.id}
+ } else {
+ ids = append(ids, peer.id)
+ hash2members[peer.resp.Hash] = ids
+ }
+ }
+
+ // All members have the same CompactRevision and Hash.
+ if len(hash2members) == 1 {
+ return cm.handleConsistentHash(leaderHash, peersChecked, len(peers))
+ }
+
+ // Detected hashes mismatch
+ // The first step is to figure out the majority with the same hash.
+ memberCnt := len(peers) + 1
+ quorum := memberCnt/2 + 1
+ quorumExist := false
+ for k, v := range hash2members {
+ if len(v) >= quorum {
+ quorumExist = true
+ // remove the majority, and we might raise alarms for the left members.
+ delete(hash2members, k)
+ break
+ }
+ }
+
+ if !quorumExist {
+ // If quorum doesn't exist, we don't know which members data are
+ // corrupted. In such situation, we intentionally set the memberID
+ // as 0, it means it affects the whole cluster.
+ cm.lg.Error("Detected compaction hash mismatch but cannot identify the corrupted members, so intentionally set the memberID as 0",
+ zap.String("leader-id", leaderID.String()),
+ zap.Int64("leader-revision", leaderHash.Revision),
+ zap.Int64("leader-compact-revision", leaderHash.CompactRevision),
+ zap.Uint32("leader-hash", leaderHash.Hash),
+ )
+ cm.hasher.TriggerCorruptAlarm(0)
+ }
+
+ // Raise alarm for the left members if the quorum is present.
+ // But we should always generate error log for debugging.
+ for k, v := range hash2members {
+ if quorumExist {
+ for _, pid := range v {
+ cm.hasher.TriggerCorruptAlarm(pid)
+ }
+ }
+
+ cm.lg.Error("Detected compaction hash mismatch",
+ zap.String("leader-id", leaderID.String()),
+ zap.Int64("leader-revision", leaderHash.Revision),
+ zap.Int64("leader-compact-revision", leaderHash.CompactRevision),
+ zap.Uint32("leader-hash", leaderHash.Hash),
+ zap.Uint32("peer-hash", k),
+ zap.String("peer-ids", v.String()),
+ zap.Bool("quorum-exist", quorumExist),
+ )
+ }
+
+ return true
+}
+
+func (cm *corruptionChecker) handleConsistentHash(hash mvcc.KeyValueHash, peersChecked, peerCnt int) bool {
+ if peersChecked == peerCnt {
+ cm.lg.Info("successfully checked hash on whole cluster",
+ zap.Int("number-of-peers-checked", peersChecked),
+ zap.Int64("revision", hash.Revision),
+ zap.Int64("compactRevision", hash.CompactRevision),
+ )
+ cm.mux.Lock()
+ if hash.Revision > cm.latestRevisionChecked {
+ cm.latestRevisionChecked = hash.Revision
+ }
+ cm.mux.Unlock()
+ return true
+ }
+ cm.lg.Warn("skipped revision in compaction hash check; was not able to check all peers",
+ zap.Int("number-of-peers-checked", peersChecked),
+ zap.Int("number-of-peers", peerCnt),
+ zap.Int64("revision", hash.Revision),
+ zap.Int64("compactRevision", hash.CompactRevision),
+ )
+ // The only case which needs to check next hash
+ return false
+}
+
+func (cm *corruptionChecker) uncheckedRevisions() []mvcc.KeyValueHash {
+ cm.mux.RLock()
+ lastRevisionChecked := cm.latestRevisionChecked
+ cm.mux.RUnlock()
+
+ hashes := cm.hasher.Hashes()
+ // Sort in descending order
+ sort.Slice(hashes, func(i, j int) bool {
+ return hashes[i].Revision > hashes[j].Revision
+ })
+ for i, hash := range hashes {
+ if hash.Revision <= lastRevisionChecked {
+ return hashes[:i]
+ }
+ }
+ return hashes
+}
+
+func (s *EtcdServer) triggerCorruptAlarm(id types.ID) {
+ a := &pb.AlarmRequest{
+ MemberID: uint64(id),
+ Action: pb.AlarmRequest_ACTIVATE,
+ Alarm: pb.AlarmType_CORRUPT,
+ }
+ s.GoAttach(func() {
+ s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+ })
+}
+
type peerInfo struct {
id types.ID
eps []string
@@ -262,7 +459,7 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
members := s.cluster.Members()
peers := make([]peerInfo, 0, len(members))
for _, m := range members {
- if m.ID == s.ID() {
+ if m.ID == s.MemberID() {
continue
}
peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs})
@@ -270,6 +467,12 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
lg := s.Logger()
+ cc := &http.Client{
+ Transport: s.peerRt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
var resps []*peerHashKVResp
for _, p := range peers {
if len(p.eps) == 0 {
@@ -279,8 +482,10 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
respsLen := len(resps)
var lastErr error
for _, ep := range p.eps {
+ var resp *pb.HashKVResponse
+
ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- resp, lastErr := s.getPeerHashKVHTTP(ctx, ep, rev)
+ resp, lastErr = HashByRev(ctx, s.cluster.ID(), cc, ep, rev)
cancel()
if lastErr == nil {
resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil})
@@ -288,7 +493,7 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
}
lg.Warn(
"failed hash kv request",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.Int64("requested-revision", rev),
zap.String("remote-peer-endpoint", ep),
zap.Error(lastErr),
@@ -303,40 +508,6 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
return resps
}
-type applierV3Corrupt struct {
- applierV3
-}
-
-func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
-
-func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
- return nil, nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
- return nil, nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
- return nil, nil, nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- return nil, ErrCorrupt
-}
-
const PeerHashKVPath = "/members/hashkv"
type hashKVHandler struct {
@@ -358,21 +529,25 @@ func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, "bad path", http.StatusBadRequest)
return
}
+ if gcid := r.Header.Get("X-Etcd-Cluster-ID"); gcid != "" && gcid != h.server.cluster.ID().String() {
+ http.Error(w, rafthttp.ErrClusterIDMismatch.Error(), http.StatusPreconditionFailed)
+ return
+ }
defer r.Body.Close()
- b, err := ioutil.ReadAll(r.Body)
+ b, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "error reading body", http.StatusBadRequest)
return
}
req := &pb.HashKVRequest{}
- if err := json.Unmarshal(b, req); err != nil {
+ if err = json.Unmarshal(b, req); err != nil {
h.lg.Warn("failed to unmarshal request", zap.Error(err))
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
return
}
- hash, rev, compactRev, err := h.server.KV().HashByRev(req.Revision)
+ hash, rev, err := h.server.KV().HashStorage().HashByRev(req.Revision)
if err != nil {
h.lg.Warn(
"failed to get hashKV",
@@ -382,7 +557,12 @@ func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
- resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: hash, CompactRevision: compactRev}
+ resp := &pb.HashKVResponse{
+ Header: &pb.ResponseHeader{Revision: rev},
+ Hash: hash.Hash,
+ CompactRevision: hash.CompactRevision,
+ HashRevision: hash.Revision,
+ }
respBytes, err := json.Marshal(resp)
if err != nil {
h.lg.Warn("failed to marshal hashKV response", zap.Error(err))
@@ -395,21 +575,21 @@ func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Write(respBytes)
}
-// getPeerHashKVHTTP fetch hash of kv store at the given rev via http call to the given url
-func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int64) (*pb.HashKVResponse, error) {
- cc := &http.Client{Transport: s.peerRt}
+// HashByRev fetch hash of kv store at the given rev via http call to the given url
+func HashByRev(ctx context.Context, cid types.ID, cc *http.Client, url string, rev int64) (*pb.HashKVResponse, error) {
hashReq := &pb.HashKVRequest{Revision: rev}
hashReqBytes, err := json.Marshal(hashReq)
if err != nil {
return nil, err
}
- requestUrl := url + PeerHashKVPath
- req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes))
+ requestURL := url + PeerHashKVPath
+ req, err := http.NewRequest(http.MethodGet, requestURL, bytes.NewReader(hashReqBytes))
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("X-Etcd-Cluster-ID", cid.String())
req.Cancel = ctx.Done()
resp, err := cc.Do(req)
@@ -417,7 +597,7 @@ func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int6
return nil, err
}
defer resp.Body.Close()
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
@@ -429,9 +609,13 @@ func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int6
if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) {
return nil, rpctypes.ErrFutureRev
}
+ } else if resp.StatusCode == http.StatusPreconditionFailed {
+ if strings.Contains(string(b), rafthttp.ErrClusterIDMismatch.Error()) {
+ return nil, rpctypes.ErrClusterIDMismatch
+ }
}
if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("unknown error: %s", string(b))
+ return nil, fmt.Errorf("unknown error: %s", b)
}
hashResp := &pb.HashKVResponse{}
diff --git a/server/etcdserver/corrupt_test.go b/server/etcdserver/corrupt_test.go
new file mode 100644
index 00000000000..6001542d348
--- /dev/null
+++ b/server/etcdserver/corrupt_test.go
@@ -0,0 +1,595 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+func TestInitialCheck(t *testing.T) {
+ tcs := []struct {
+ name string
+ hasher fakeHasher
+ expectError bool
+ expectCorrupt bool
+ expectActions []string
+ }{
+ {
+ name: "No peers",
+ hasher: fakeHasher{
+ hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Revision: 10}}},
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(10)", "MemberID()"},
+ },
+ {
+ name: "Error getting hash",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{err: fmt.Errorf("error getting hash")}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "MemberID()"},
+ expectError: true,
+ },
+ {
+ name: "Peer with empty response",
+ hasher: fakeHasher{peerHashes: []*peerHashKVResp{{}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()"},
+ },
+ {
+ name: "Peer returned ErrFutureRev",
+ hasher: fakeHasher{peerHashes: []*peerHashKVResp{{err: rpctypes.ErrFutureRev}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()", "MemberID()"},
+ },
+ {
+ name: "Peer returned ErrCompacted",
+ hasher: fakeHasher{peerHashes: []*peerHashKVResp{{err: rpctypes.ErrCompacted}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()", "MemberID()"},
+ },
+ {
+ name: "Peer returned other error",
+ hasher: fakeHasher{peerHashes: []*peerHashKVResp{{err: rpctypes.ErrCorrupt}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()"},
+ },
+ {
+ name: "Peer returned same hash",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1}}}, peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{}, Hash: 1}}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()", "MemberID()"},
+ },
+ {
+ name: "Peer returned different hash with same compaction rev",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1}}}, peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{}, Hash: 2, CompactRevision: 1}}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()", "MemberID()"},
+ expectError: true,
+ },
+ {
+ name: "Peer returned different hash and compaction rev",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1}}}, peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{}, Hash: 2, CompactRevision: 2}}}},
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()", "MemberID()"},
+ },
+ {
+ name: "Cluster ID Mismatch does not fail CorruptionChecker.InitialCheck()",
+ hasher: fakeHasher{
+ peerHashes: []*peerHashKVResp{{err: rpctypes.ErrClusterIDMismatch}},
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberID()", "MemberID()"},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ monitor := corruptionChecker{
+ lg: zaptest.NewLogger(t),
+ hasher: &tc.hasher,
+ }
+ err := monitor.InitialCheck()
+ if gotError := err != nil; gotError != tc.expectError {
+ t.Errorf("Unexpected error, got: %v, expected?: %v", err, tc.expectError)
+ }
+ if tc.hasher.alarmTriggered != tc.expectCorrupt {
+ t.Errorf("Unexpected corrupt triggered, got: %v, expected?: %v", tc.hasher.alarmTriggered, tc.expectCorrupt)
+ }
+ assert.Equal(t, tc.expectActions, tc.hasher.actions)
+ })
+ }
+}
+
+func TestPeriodicCheck(t *testing.T) {
+ tcs := []struct {
+ name string
+ hasher fakeHasher
+ expectError bool
+ expectCorrupt bool
+ expectActions []string
+ }{
+ {
+ name: "Same local hash and no peers",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Revision: 10}}, {hash: mvcc.KeyValueHash{Revision: 10}}}},
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(10)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"},
+ },
+ {
+ name: "Error getting hash first time",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{err: fmt.Errorf("error getting hash")}}},
+ expectActions: []string{"HashByRev(0)"},
+ expectError: true,
+ },
+ {
+ name: "Error getting hash second time",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Revision: 11}}, {err: fmt.Errorf("error getting hash")}}},
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(11)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"},
+ expectError: true,
+ },
+ {
+ name: "Error linearizableReadNotify",
+ hasher: fakeHasher{linearizableReadNotify: fmt.Errorf("error getting linearizableReadNotify")},
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()"},
+ expectError: true,
+ },
+ {
+ name: "Different local hash and revision",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2}, revision: 2}}},
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"},
+ },
+ {
+ name: "Different local hash and compaction revision",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1}}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 2}}}},
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"},
+ },
+ {
+ name: "Different local hash and same revisions",
+ hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 1, Revision: 1}, revision: 1}}},
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "MemberID()", "TriggerCorruptAlarm(1)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Peer with nil response",
+ hasher: fakeHasher{
+ peerHashes: []*peerHashKVResp{{}},
+ },
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"},
+ },
+ {
+ name: "Peer with newer revision",
+ hasher: fakeHasher{
+ peerHashes: []*peerHashKVResp{{peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 1}}}},
+ },
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(42)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Peer with newer compact revision",
+ hasher: fakeHasher{
+ peerHashes: []*peerHashKVResp{{peerInfo: peerInfo{id: 88}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 10}, CompactRevision: 2}}},
+ },
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(88)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Peer with same hash and compact revision",
+ hasher: fakeHasher{
+ hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 2, Revision: 2}, revision: 2}},
+ peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 1}, CompactRevision: 1, Hash: 1}}},
+ },
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"},
+ },
+ {
+ name: "Peer with different hash and same compact revision as first local",
+ hasher: fakeHasher{
+ hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 2}, revision: 2}},
+ peerHashes: []*peerHashKVResp{{peerInfo: peerInfo{id: 666}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 1}, CompactRevision: 1, Hash: 2}}},
+ },
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(666)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Multiple corrupted peers trigger one alarm",
+ hasher: fakeHasher{
+ peerHashes: []*peerHashKVResp{
+ {peerInfo: peerInfo{id: 88}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 10}, CompactRevision: 2}},
+ {peerInfo: peerInfo{id: 89}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 10}, CompactRevision: 2}},
+ },
+ },
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(88)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Cluster ID Mismatch does not fail CorruptionChecker.PeriodicCheck()",
+ hasher: fakeHasher{
+ peerHashes: []*peerHashKVResp{{err: rpctypes.ErrClusterIDMismatch}},
+ },
+ expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ monitor := corruptionChecker{
+ lg: zaptest.NewLogger(t),
+ hasher: &tc.hasher,
+ }
+ err := monitor.PeriodicCheck()
+ if gotError := err != nil; gotError != tc.expectError {
+ t.Errorf("Unexpected error, got: %v, expected?: %v", err, tc.expectError)
+ }
+ if tc.hasher.alarmTriggered != tc.expectCorrupt {
+ t.Errorf("Unexpected corrupt triggered, got: %v, expected?: %v", tc.hasher.alarmTriggered, tc.expectCorrupt)
+ }
+ assert.Equal(t, tc.expectActions, tc.hasher.actions)
+ })
+ }
+}
+
+func TestCompactHashCheck(t *testing.T) {
+ tcs := []struct {
+ name string
+ hasher fakeHasher
+ lastRevisionChecked int64
+
+ expectError bool
+ expectCorrupt bool
+ expectActions []string
+ expectLastRevisionChecked int64
+ }{
+ {
+ name: "No hashes",
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()"},
+ },
+ {
+ name: "No peers, check new checked from largest to smallest",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1}, {Revision: 2}, {Revision: 3}, {Revision: 4}},
+ },
+ lastRevisionChecked: 2,
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(4)", "PeerHashByRev(3)"},
+ expectLastRevisionChecked: 2,
+ },
+ {
+ name: "Peer error",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1}, {Revision: 2}},
+ peerHashes: []*peerHashKVResp{{err: fmt.Errorf("failed getting hash")}},
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "PeerHashByRev(1)", "MemberID()"},
+ },
+ {
+ name: "Peer returned different compaction revision is skipped",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1}, {Revision: 2, CompactRevision: 2}},
+ peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{CompactRevision: 3}}},
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "PeerHashByRev(1)", "MemberID()"},
+ },
+ {
+ name: "Etcd can identify two corrupted members in 5 member cluster",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 7}},
+ {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 7}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "TriggerCorruptAlarm(44)", "TriggerCorruptAlarm(45)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Etcd checks next hash when one member is unresponsive in 3 member cluster",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {err: fmt.Errorf("failed getting hash")},
+ {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "PeerHashByRev(1)", "MemberID()"},
+ expectCorrupt: false,
+ },
+ {
+ name: "Etcd can identify single corrupted member in 3 member cluster",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "TriggerCorruptAlarm(43)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Etcd can identify single corrupted member in 5 member cluster",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "TriggerCorruptAlarm(44)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Etcd triggers corrupted alarm on whole cluster if in 3 member cluster one member is down and one member corrupted",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {err: fmt.Errorf("failed getting hash")},
+ {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "TriggerCorruptAlarm(0)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Etcd triggers corrupted alarm on whole cluster if no quorum in 5 member cluster",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ {peerInfo: peerInfo{id: 46}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 4}},
+ {peerInfo: peerInfo{id: 47}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "TriggerCorruptAlarm(0)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Etcd can identify corrupted member in 5 member cluster even if one member is down",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ {err: fmt.Errorf("failed getting hash")},
+ {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "TriggerCorruptAlarm(44)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Etcd can identify that leader is corrupted",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}},
+ peerHashes: []*peerHashKVResp{
+ {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()", "TriggerCorruptAlarm(1)"},
+ expectCorrupt: true,
+ },
+ {
+ name: "Peer returned same hash bumps last revision checked",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}, {Revision: 2, CompactRevision: 1, Hash: 1}},
+ peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{MemberId: 42}, CompactRevision: 1, Hash: 1}}},
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberID()"},
+ expectLastRevisionChecked: 2,
+ },
+ {
+ name: "Only one peer succeeded check",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}},
+ peerHashes: []*peerHashKVResp{
+ {resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{MemberId: 42}, CompactRevision: 1, Hash: 1}},
+ {err: fmt.Errorf("failed getting hash")},
+ },
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(1)", "MemberID()"},
+ },
+ {
+ name: "Cluster ID Mismatch does not fail CorruptionChecker.CompactHashCheck()",
+ hasher: fakeHasher{
+ hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}},
+ peerHashes: []*peerHashKVResp{{err: rpctypes.ErrClusterIDMismatch}},
+ },
+ expectActions: []string{"MemberID()", "ReqTimeout()", "Hashes()", "PeerHashByRev(1)", "MemberID()"},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ monitor := corruptionChecker{
+ latestRevisionChecked: tc.lastRevisionChecked,
+ lg: zaptest.NewLogger(t),
+ hasher: &tc.hasher,
+ }
+ monitor.CompactHashCheck()
+ if tc.hasher.alarmTriggered != tc.expectCorrupt {
+ t.Errorf("Unexpected corrupt triggered, got: %v, expected?: %v", tc.hasher.alarmTriggered, tc.expectCorrupt)
+ }
+ if tc.expectLastRevisionChecked != monitor.latestRevisionChecked {
+ t.Errorf("Unexpected last revision checked, got: %v, expected?: %v", monitor.latestRevisionChecked, tc.expectLastRevisionChecked)
+ }
+ assert.Equal(t, tc.expectActions, tc.hasher.actions)
+ })
+ }
+}
+
+type fakeHasher struct {
+ peerHashes []*peerHashKVResp
+ hashByRevIndex int
+ hashByRevResponses []hashByRev
+ linearizableReadNotify error
+ hashes []mvcc.KeyValueHash
+
+ alarmTriggered bool
+ actions []string
+}
+
+type hashByRev struct {
+ hash mvcc.KeyValueHash
+ revision int64
+ err error
+}
+
+func (f *fakeHasher) Hash() (hash uint32, revision int64, err error) {
+ panic("not implemented")
+}
+
+func (f *fakeHasher) HashByRev(rev int64) (hash mvcc.KeyValueHash, revision int64, err error) {
+ f.actions = append(f.actions, fmt.Sprintf("HashByRev(%d)", rev))
+ if len(f.hashByRevResponses) == 0 {
+ return mvcc.KeyValueHash{}, 0, nil
+ }
+ hashByRev := f.hashByRevResponses[f.hashByRevIndex]
+ f.hashByRevIndex++
+ return hashByRev.hash, hashByRev.revision, hashByRev.err
+}
+
+func (f *fakeHasher) Store(hash mvcc.KeyValueHash) {
+ f.actions = append(f.actions, fmt.Sprintf("Store(%v)", hash))
+ f.hashes = append(f.hashes, hash)
+}
+
+func (f *fakeHasher) Hashes() []mvcc.KeyValueHash {
+ f.actions = append(f.actions, "Hashes()")
+ return f.hashes
+}
+
+func (f *fakeHasher) ReqTimeout() time.Duration {
+ f.actions = append(f.actions, "ReqTimeout()")
+ return time.Second
+}
+
+func (f *fakeHasher) MemberID() types.ID {
+ f.actions = append(f.actions, "MemberID()")
+ return 1
+}
+
+func (f *fakeHasher) PeerHashByRev(rev int64) []*peerHashKVResp {
+ f.actions = append(f.actions, fmt.Sprintf("PeerHashByRev(%d)", rev))
+ return f.peerHashes
+}
+
+func (f *fakeHasher) LinearizableReadNotify(ctx context.Context) error {
+ f.actions = append(f.actions, "LinearizableReadNotify()")
+ return f.linearizableReadNotify
+}
+
+func (f *fakeHasher) TriggerCorruptAlarm(memberID types.ID) {
+ f.actions = append(f.actions, fmt.Sprintf("TriggerCorruptAlarm(%d)", memberID))
+ f.alarmTriggered = true
+}
+
+func TestHashKVHandler(t *testing.T) {
+ remoteClusterID := 111195
+ localClusterID := 111196
+ revision := 1
+
+ etcdSrv := &EtcdServer{}
+ etcdSrv.cluster = newTestCluster(t)
+ etcdSrv.cluster.SetID(types.ID(localClusterID), types.ID(localClusterID))
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ etcdSrv.kv = mvcc.New(zap.NewNop(), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
+ defer func() {
+ assert.NoError(t, etcdSrv.kv.Close())
+ }()
+ ph := &hashKVHandler{
+ lg: zap.NewNop(),
+ server: etcdSrv,
+ }
+ srv := httptest.NewServer(ph)
+ defer srv.Close()
+
+ tests := []struct {
+ name string
+ remoteClusterID int
+ wcode int
+ wKeyWords string
+ }{
+ {
+ name: "HashKV returns 200 if cluster hash matches",
+ remoteClusterID: localClusterID,
+ wcode: http.StatusOK,
+ wKeyWords: "",
+ },
+ {
+ name: "HashKV returns 400 if cluster hash doesn't matche",
+ remoteClusterID: remoteClusterID,
+ wcode: http.StatusPreconditionFailed,
+ wKeyWords: "cluster ID mismatch",
+ },
+ }
+ for i, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ hashReq := &pb.HashKVRequest{Revision: int64(revision)}
+ hashReqBytes, err := json.Marshal(hashReq)
+ if err != nil {
+ t.Fatalf("failed to marshal request: %v", err)
+ }
+ req, err := http.NewRequest(http.MethodGet, srv.URL+PeerHashKVPath, bytes.NewReader(hashReqBytes))
+ if err != nil {
+ t.Fatalf("failed to create request: %v", err)
+ }
+ req.Header.Set("X-Etcd-Cluster-ID", strconv.FormatUint(uint64(tt.remoteClusterID), 16))
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("failed to get http response: %v", err)
+ }
+ body, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ t.Fatalf("unexpected io.ReadAll error: %v", err)
+ }
+ if resp.StatusCode != tt.wcode {
+ t.Fatalf("#%d: code = %d, want %d", i, resp.StatusCode, tt.wcode)
+ }
+ if resp.StatusCode != http.StatusOK {
+ if !strings.Contains(string(body), tt.wKeyWords) {
+ t.Errorf("#%d: body: %s, want body to contain keywords: %s", i, body, tt.wKeyWords)
+ }
+ return
+ }
+
+ hashKVResponse := pb.HashKVResponse{}
+ err = json.Unmarshal(body, &hashKVResponse)
+ if err != nil {
+ t.Fatalf("unmarshal response error: %v", err)
+ }
+ hashValue, _, err := etcdSrv.KV().HashStorage().HashByRev(int64(revision))
+ if err != nil {
+ t.Fatalf("etcd server hash failed: %v", err)
+ }
+ if hashKVResponse.Hash != hashValue.Hash {
+ t.Fatalf("hash value inconsistent: %d != %d", hashKVResponse.Hash, hashValue)
+ }
+ })
+ }
+}
diff --git a/server/etcdserver/errors.go b/server/etcdserver/errors.go
deleted file mode 100644
index dc2a85fdd47..00000000000
--- a/server/etcdserver/errors.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "errors"
- "fmt"
-)
-
-var (
- ErrUnknownMethod = errors.New("etcdserver: unknown method")
- ErrStopped = errors.New("etcdserver: server stopped")
- ErrCanceled = errors.New("etcdserver: request cancelled")
- ErrTimeout = errors.New("etcdserver: request timed out")
- ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
- ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
- ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
- ErrLeaderChanged = errors.New("etcdserver: leader changed")
- ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
- ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader")
- ErrNoLeader = errors.New("etcdserver: no leader")
- ErrNotLeader = errors.New("etcdserver: not leader")
- ErrRequestTooLarge = errors.New("etcdserver: request is too large")
- ErrNoSpace = errors.New("etcdserver: no space")
- ErrTooManyRequests = errors.New("etcdserver: too many requests")
- ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
- ErrKeyNotFound = errors.New("etcdserver: key not found")
- ErrCorrupt = errors.New("etcdserver: corrupt cluster")
- ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee")
- ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade")
- ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format")
- ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version")
- ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress")
- ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job")
-)
-
-type DiscoveryError struct {
- Op string
- Err error
-}
-
-func (e DiscoveryError) Error() string {
- return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
-}
diff --git a/server/etcdserver/errors/errors.go b/server/etcdserver/errors/errors.go
new file mode 100644
index 00000000000..8de698a1df3
--- /dev/null
+++ b/server/etcdserver/errors/errors.go
@@ -0,0 +1,54 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ ErrUnknownMethod = errors.New("etcdserver: unknown method")
+ ErrStopped = errors.New("etcdserver: server stopped")
+ ErrCanceled = errors.New("etcdserver: request cancelled")
+ ErrTimeout = errors.New("etcdserver: request timed out")
+ ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
+ ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
+ ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
+ ErrTimeoutWaitAppliedIndex = errors.New("etcdserver: request timed out, waiting for the applied index took too long")
+ ErrLeaderChanged = errors.New("etcdserver: leader changed")
+ ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
+ ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader")
+ ErrNoLeader = errors.New("etcdserver: no leader")
+ ErrNotLeader = errors.New("etcdserver: not leader")
+ ErrRequestTooLarge = errors.New("etcdserver: request is too large")
+ ErrNoSpace = errors.New("etcdserver: no space")
+ ErrTooManyRequests = errors.New("etcdserver: too many requests")
+ ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
+ ErrCorrupt = errors.New("etcdserver: corrupt cluster")
+ ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee")
+ ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade")
+ ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format")
+ ErrKeyNotFound = errors.New("etcdserver: key not found")
+)
+
+type DiscoveryError struct {
+ Op string
+ Err error
+}
+
+func (e DiscoveryError) Error() string {
+ return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
+}
diff --git a/server/etcdserver/metrics.go b/server/etcdserver/metrics.go
index 06263a9cd29..4be44a57a6c 100644
--- a/server/etcdserver/metrics.go
+++ b/server/etcdserver/metrics.go
@@ -18,11 +18,11 @@ import (
goruntime "runtime"
"time"
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/pkg/v3/runtime"
-
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/runtime"
)
var (
@@ -44,12 +44,6 @@ var (
Name: "leader_changes_seen_total",
Help: "The number of leader changes seen.",
})
- isLearner = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "is_learner",
- Help: "Whether or not this member is a learner. 1 if is, 0 otherwise.",
- })
learnerPromoteFailed = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "server",
@@ -70,12 +64,6 @@ var (
Name: "heartbeat_send_failures_total",
Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).",
})
- slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "slow_apply_total",
- Help: "The total number of slow apply requests (likely overloaded from slow disk).",
- })
applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "etcd",
Subsystem: "server",
@@ -124,12 +112,7 @@ var (
Name: "lease_expired_total",
Help: "The total number of expired leases.",
})
- quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "quota_backend_bytes",
- Help: "Current backend storage quota size in bytes.",
- })
+
currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "etcd",
Subsystem: "server",
@@ -164,17 +147,6 @@ var (
Name: "limit",
Help: "The file descriptor limit.",
})
- applySec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "apply_duration_seconds",
- Help: "The latency distributions of v2 apply called by backend.",
-
- // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2
- // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec
- Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20),
- },
- []string{"version", "op", "success"})
)
func init() {
@@ -182,7 +154,6 @@ func init() {
prometheus.MustRegister(isLeader)
prometheus.MustRegister(leaderChanges)
prometheus.MustRegister(heartbeatSendFailures)
- prometheus.MustRegister(slowApplies)
prometheus.MustRegister(applySnapshotInProgress)
prometheus.MustRegister(proposalsCommitted)
prometheus.MustRegister(proposalsApplied)
@@ -191,16 +162,13 @@ func init() {
prometheus.MustRegister(slowReadIndex)
prometheus.MustRegister(readIndexFailed)
prometheus.MustRegister(leaseExpired)
- prometheus.MustRegister(quotaBackendBytes)
prometheus.MustRegister(currentVersion)
prometheus.MustRegister(currentGoVersion)
prometheus.MustRegister(serverID)
- prometheus.MustRegister(isLearner)
prometheus.MustRegister(learnerPromoteSucceed)
prometheus.MustRegister(learnerPromoteFailed)
prometheus.MustRegister(fdUsed)
prometheus.MustRegister(fdLimit)
- prometheus.MustRegister(applySec)
currentVersion.With(prometheus.Labels{
"server_version": version.Version,
diff --git a/server/etcdserver/quota.go b/server/etcdserver/quota.go
deleted file mode 100644
index 33c06e61900..00000000000
--- a/server/etcdserver/quota.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "sync"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
- humanize "github.com/dustin/go-humanize"
- "go.uber.org/zap"
-)
-
-const (
- // DefaultQuotaBytes is the number of bytes the backend Size may
- // consume before exceeding the space quota.
- DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
- // MaxQuotaBytes is the maximum number of bytes suggested for a backend
- // quota. A larger quota may lead to degraded performance.
- MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
-)
-
-// Quota represents an arbitrary quota against arbitrary requests. Each request
-// costs some charge; if there is not enough remaining charge, then there are
-// too few resources available within the quota to apply the request.
-type Quota interface {
- // Available judges whether the given request fits within the quota.
- Available(req interface{}) bool
- // Cost computes the charge against the quota for a given request.
- Cost(req interface{}) int
- // Remaining is the amount of charge left for the quota.
- Remaining() int64
-}
-
-type passthroughQuota struct{}
-
-func (*passthroughQuota) Available(interface{}) bool { return true }
-func (*passthroughQuota) Cost(interface{}) int { return 0 }
-func (*passthroughQuota) Remaining() int64 { return 1 }
-
-type backendQuota struct {
- s *EtcdServer
- maxBackendBytes int64
-}
-
-const (
- // leaseOverhead is an estimate for the cost of storing a lease
- leaseOverhead = 64
- // kvOverhead is an estimate for the cost of storing a key's metadata
- kvOverhead = 256
-)
-
-var (
- // only log once
- quotaLogOnce sync.Once
-
- DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes))
- maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes))
-)
-
-// NewBackendQuota creates a quota layer with the given storage limit.
-func NewBackendQuota(s *EtcdServer, name string) Quota {
- lg := s.Logger()
- quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes))
-
- if s.Cfg.QuotaBackendBytes < 0 {
- // disable quotas if negative
- quotaLogOnce.Do(func() {
- lg.Info(
- "disabled backend quota",
- zap.String("quota-name", name),
- zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
- )
- })
- return &passthroughQuota{}
- }
-
- if s.Cfg.QuotaBackendBytes == 0 {
- // use default size if no quota size given
- quotaLogOnce.Do(func() {
- if lg != nil {
- lg.Info(
- "enabled backend quota with default value",
- zap.String("quota-name", name),
- zap.Int64("quota-size-bytes", DefaultQuotaBytes),
- zap.String("quota-size", DefaultQuotaSize),
- )
- }
- })
- quotaBackendBytes.Set(float64(DefaultQuotaBytes))
- return &backendQuota{s, DefaultQuotaBytes}
- }
-
- quotaLogOnce.Do(func() {
- if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
- lg.Warn(
- "quota exceeds the maximum value",
- zap.String("quota-name", name),
- zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
- zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
- zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes),
- zap.String("quota-maximum-size", maxQuotaSize),
- )
- }
- lg.Info(
- "enabled backend quota",
- zap.String("quota-name", name),
- zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
- zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
- )
- })
- return &backendQuota{s, s.Cfg.QuotaBackendBytes}
-}
-
-func (b *backendQuota) Available(v interface{}) bool {
- // TODO: maybe optimize backend.Size()
- return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
-}
-
-func (b *backendQuota) Cost(v interface{}) int {
- switch r := v.(type) {
- case *pb.PutRequest:
- return costPut(r)
- case *pb.TxnRequest:
- return costTxn(r)
- case *pb.LeaseGrantRequest:
- return leaseOverhead
- default:
- panic("unexpected cost")
- }
-}
-
-func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
-
-func costTxnReq(u *pb.RequestOp) int {
- r := u.GetRequestPut()
- if r == nil {
- return 0
- }
- return costPut(r)
-}
-
-func costTxn(r *pb.TxnRequest) int {
- sizeSuccess := 0
- for _, u := range r.Success {
- sizeSuccess += costTxnReq(u)
- }
- sizeFailure := 0
- for _, u := range r.Failure {
- sizeFailure += costTxnReq(u)
- }
- if sizeFailure > sizeSuccess {
- return sizeFailure
- }
- return sizeSuccess
-}
-
-func (b *backendQuota) Remaining() int64 {
- return b.maxBackendBytes - b.s.Backend().Size()
-}
diff --git a/server/etcdserver/raft.go b/server/etcdserver/raft.go
index 8b9600d39ce..fd4b5dac337 100644
--- a/server/etcdserver/raft.go
+++ b/server/etcdserver/raft.go
@@ -15,27 +15,20 @@
package etcdserver
import (
- "encoding/json"
"expvar"
"fmt"
"log"
- "sort"
"sync"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/contention"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/config"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
- "go.etcd.io/etcd/server/v3/wal"
- "go.etcd.io/etcd/server/v3/wal/walpb"
- "go.uber.org/zap"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
)
const (
@@ -59,7 +52,7 @@ var (
)
func init() {
- expvar.Publish("raft.status", expvar.Func(func() interface{} {
+ expvar.Publish("raft.status", expvar.Func(func() any {
raftStatusMu.Lock()
defer raftStatusMu.Unlock()
if raftStatus == nil {
@@ -69,28 +62,34 @@ func init() {
}))
}
-// apply contains entries, snapshot to be applied. Once
-// an apply is consumed, the entries will be persisted to
-// to raft storage concurrently; the application must read
-// raftDone before assuming the raft messages are stable.
-type apply struct {
+// toApply contains entries, snapshot to be applied. Once
+// an toApply is consumed, the entries will be persisted to
+// raft storage concurrently; the application must read
+// notifyc before assuming the raft messages are stable.
+type toApply struct {
entries []raftpb.Entry
snapshot raftpb.Snapshot
// notifyc synchronizes etcd server applies with the raft node
notifyc chan struct{}
+ // raftAdvancedC notifies EtcdServer.apply that
+ // 'raftLog.applied' has advanced by r.Advance
+ // it should be used only when entries contain raftpb.EntryConfChange
+ raftAdvancedC <-chan struct{}
}
type raftNode struct {
lg *zap.Logger
- tickMu *sync.Mutex
+ tickMu *sync.RWMutex
+ // timestamp of the latest tick
+ latestTickTs time.Time
raftNodeConfig
// a chan to send/receive snapshot
msgSnapC chan raftpb.Message
// a chan to send out apply
- applyc chan apply
+ applyc chan toApply
// a chan to send out readState
readStateC chan raft.ReadState
@@ -111,7 +110,7 @@ type raftNodeConfig struct {
isIDRemoved func(id uint64) bool
raft.Node
raftStorage *raft.MemoryStorage
- storage Storage
+ storage serverstorage.Storage
heartbeat time.Duration // for logging
// transport specifies the transport to send and receive msgs to members.
// Sending messages MUST NOT block. It is okay to drop messages, since
@@ -135,14 +134,15 @@ func newRaftNode(cfg raftNodeConfig) *raftNode {
raft.SetLogger(lg)
r := &raftNode{
lg: cfg.lg,
- tickMu: new(sync.Mutex),
+ tickMu: new(sync.RWMutex),
raftNodeConfig: cfg,
+ latestTickTs: time.Now(),
// set up contention detectors for raft heartbeat message.
// expect to send a heartbeat within 2 heartbeat intervals.
td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
readStateC: make(chan raft.ReadState, 1),
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
- applyc: make(chan apply),
+ applyc: make(chan toApply),
stopped: make(chan struct{}),
done: make(chan struct{}),
}
@@ -158,9 +158,16 @@ func newRaftNode(cfg raftNodeConfig) *raftNode {
func (r *raftNode) tick() {
r.tickMu.Lock()
r.Tick()
+ r.latestTickTs = time.Now()
r.tickMu.Unlock()
}
+func (r *raftNode) getLatestTickTs() time.Time {
+ r.tickMu.RLock()
+ defer r.tickMu.RUnlock()
+ return r.latestTickTs
+}
+
// start prepares and starts raftNode in a new goroutine. It is no longer safe
// to modify the fields after it has been started.
func (r *raftNode) start(rh *raftReadyHandler) {
@@ -209,10 +216,12 @@ func (r *raftNode) start(rh *raftReadyHandler) {
}
notifyc := make(chan struct{}, 1)
- ap := apply{
- entries: rd.CommittedEntries,
- snapshot: rd.Snapshot,
- notifyc: notifyc,
+ raftAdvancedC := make(chan struct{}, 1)
+ ap := toApply{
+ entries: rd.CommittedEntries,
+ snapshot: rd.Snapshot,
+ notifyc: notifyc,
+ raftAdvancedC: raftAdvancedC,
}
updateCommittedIndex(&ap, rh)
@@ -223,7 +232,7 @@ func (r *raftNode) start(rh *raftReadyHandler) {
return
}
- // the leader can write to its disk in parallel with replicating to the followers and them
+ // the leader can write to its disk in parallel with replicating to the followers and then
// writing to their disks.
// For more details, check raft thesis 10.2.1
if islead {
@@ -275,8 +284,16 @@ func (r *raftNode) start(rh *raftReadyHandler) {
r.raftStorage.Append(rd.Entries)
+ confChanged := false
+ for _, ent := range rd.CommittedEntries {
+ if ent.Type == raftpb.EntryConfChange {
+ confChanged = true
+ break
+ }
+ }
+
if !islead {
- // finish processing incoming messages before we signal raftdone chan
+ // finish processing incoming messages before we signal notifyc chan
msgs := r.processMessages(rd.Messages)
// now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
@@ -286,17 +303,11 @@ func (r *raftNode) start(rh *raftReadyHandler) {
// changes to be applied before sending messages.
// Otherwise we might incorrectly count votes (e.g. votes from removed members).
// Also slow machine's follower raft-layer could proceed to become the leader
- // on its own single-node cluster, before apply-layer applies the config change.
+ // on its own single-node cluster, before toApply-layer applies the config change.
// We simply wait for ALL pending entries to be applied for now.
// We might improve this later on if it causes unnecessary long blocking issues.
- waitApply := false
- for _, ent := range rd.CommittedEntries {
- if ent.Type == raftpb.EntryConfChange {
- waitApply = true
- break
- }
- }
- if waitApply {
+
+ if confChanged {
// blocks until 'applyAll' calls 'applyWait.Trigger'
// to be in sync with scheduled config-change job
// (assume notifyc has cap of 1)
@@ -314,7 +325,13 @@ func (r *raftNode) start(rh *raftReadyHandler) {
notifyc <- struct{}{}
}
+ // gofail: var raftBeforeAdvance struct{}
r.Advance()
+
+ if confChanged {
+ // notify etcdserver that raft has already been notified or advanced.
+ raftAdvancedC <- struct{}{}
+ }
case <-r.stopped:
return
}
@@ -322,7 +339,7 @@ func (r *raftNode) start(rh *raftReadyHandler) {
}()
}
-func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
+func updateCommittedIndex(ap *toApply, rh *raftReadyHandler) {
var ci uint64
if len(ap.entries) != 0 {
ci = ap.entries[len(ap.entries)-1].Index
@@ -340,6 +357,7 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
for i := len(ms) - 1; i >= 0; i-- {
if r.isIDRemoved(ms[i].To) {
ms[i].To = 0
+ continue
}
if ms[i].Type == raftpb.MsgAppResp {
@@ -380,12 +398,19 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
return ms
}
-func (r *raftNode) apply() chan apply {
+func (r *raftNode) apply() chan toApply {
return r.applyc
}
func (r *raftNode) stop() {
- r.stopped <- struct{}{}
+ select {
+ case r.stopped <- struct{}{}:
+ // Not already stopped, so trigger it
+ case <-r.done:
+ // Has already been stopped - no need to do anything
+ return
+ }
+ // Block until the stop has been acknowledged by start()
<-r.done
}
@@ -419,271 +444,3 @@ func (r *raftNode) advanceTicks(ticks int) {
r.tick()
}
}
-
-func startNode(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
- var err error
- member := cl.MemberByName(cfg.Name)
- metadata := pbutil.MustMarshal(
- &pb.Metadata{
- NodeID: uint64(member.ID),
- ClusterID: uint64(cl.ID()),
- },
- )
- if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil {
- cfg.Logger.Panic("failed to create WAL", zap.Error(err))
- }
- if cfg.UnsafeNoFsync {
- w.SetUnsafeNoFsync()
- }
- peers := make([]raft.Peer, len(ids))
- for i, id := range ids {
- var ctx []byte
- ctx, err = json.Marshal((*cl).Member(id))
- if err != nil {
- cfg.Logger.Panic("failed to marshal member", zap.Error(err))
- }
- peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
- }
- id = member.ID
- cfg.Logger.Info(
- "starting local member",
- zap.String("local-member-id", id.String()),
- zap.String("cluster-id", cl.ID().String()),
- )
- s = raft.NewMemoryStorage()
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- PreVote: cfg.PreVote,
- Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
- }
- if len(peers) == 0 {
- n = raft.RestartNode(c)
- } else {
- n = raft.StartNode(c, peers)
- }
- raftStatusMu.Lock()
- raftStatus = n.Status
- raftStatusMu.Unlock()
- return id, n, s, w
-}
-
-func restartNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
- var walsnap walpb.Snapshot
- if snapshot != nil {
- walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
- }
- w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
-
- cfg.Logger.Info(
- "restarting local member",
- zap.String("cluster-id", cid.String()),
- zap.String("local-member-id", id.String()),
- zap.Uint64("commit-index", st.Commit),
- )
- cl := membership.NewCluster(cfg.Logger)
- cl.SetID(id, cid)
- s := raft.NewMemoryStorage()
- if snapshot != nil {
- s.ApplySnapshot(*snapshot)
- }
- s.SetHardState(st)
- s.Append(ents)
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- PreVote: cfg.PreVote,
- Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
- }
-
- n := raft.RestartNode(c)
- raftStatusMu.Lock()
- raftStatus = n.Status
- raftStatusMu.Unlock()
- return id, cl, n, s, w
-}
-
-func restartAsStandaloneNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
- var walsnap walpb.Snapshot
- if snapshot != nil {
- walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
- }
- w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
-
- // discard the previously uncommitted entries
- for i, ent := range ents {
- if ent.Index > st.Commit {
- cfg.Logger.Info(
- "discarding uncommitted WAL entries",
- zap.Uint64("entry-index", ent.Index),
- zap.Uint64("commit-index-from-wal", st.Commit),
- zap.Int("number-of-discarded-entries", len(ents)-i),
- )
- ents = ents[:i]
- break
- }
- }
-
- // force append the configuration change entries
- toAppEnts := createConfigChangeEnts(
- cfg.Logger,
- getIDs(cfg.Logger, snapshot, ents),
- uint64(id),
- st.Term,
- st.Commit,
- )
- ents = append(ents, toAppEnts...)
-
- // force commit newly appended entries
- err := w.Save(raftpb.HardState{}, toAppEnts)
- if err != nil {
- cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err))
- }
- if len(ents) != 0 {
- st.Commit = ents[len(ents)-1].Index
- }
-
- cfg.Logger.Info(
- "forcing restart member",
- zap.String("cluster-id", cid.String()),
- zap.String("local-member-id", id.String()),
- zap.Uint64("commit-index", st.Commit),
- )
-
- cl := membership.NewCluster(cfg.Logger)
- cl.SetID(id, cid)
- s := raft.NewMemoryStorage()
- if snapshot != nil {
- s.ApplySnapshot(*snapshot)
- }
- s.SetHardState(st)
- s.Append(ents)
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- PreVote: cfg.PreVote,
- Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
- }
-
- n := raft.RestartNode(c)
- raftStatus = n.Status
- return id, cl, n, s, w
-}
-
-// getIDs returns an ordered set of IDs included in the given snapshot and
-// the entries. The given snapshot/entries can contain three kinds of
-// ID-related entry:
-// - ConfChangeAddNode, in which case the contained ID will be added into the set.
-// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
-// - ConfChangeAddLearnerNode, in which the contained ID will be added into the set.
-func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
- ids := make(map[uint64]bool)
- if snap != nil {
- for _, id := range snap.Metadata.ConfState.Voters {
- ids[id] = true
- }
- }
- for _, e := range ents {
- if e.Type != raftpb.EntryConfChange {
- continue
- }
- var cc raftpb.ConfChange
- pbutil.MustUnmarshal(&cc, e.Data)
- switch cc.Type {
- case raftpb.ConfChangeAddLearnerNode:
- ids[cc.NodeID] = true
- case raftpb.ConfChangeAddNode:
- ids[cc.NodeID] = true
- case raftpb.ConfChangeRemoveNode:
- delete(ids, cc.NodeID)
- case raftpb.ConfChangeUpdateNode:
- // do nothing
- default:
- lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
- }
- }
- sids := make(types.Uint64Slice, 0, len(ids))
- for id := range ids {
- sids = append(sids, id)
- }
- sort.Sort(sids)
- return []uint64(sids)
-}
-
-// createConfigChangeEnts creates a series of Raft entries (i.e.
-// EntryConfChange) to remove the set of given IDs from the cluster. The ID
-// `self` is _not_ removed, even if present in the set.
-// If `self` is not inside the given ids, it creates a Raft entry to add a
-// default member with the given `self`.
-func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
- found := false
- for _, id := range ids {
- if id == self {
- found = true
- }
- }
-
- var ents []raftpb.Entry
- next := index + 1
-
- // NB: always add self first, then remove other nodes. Raft will panic if the
- // set of voters ever becomes empty.
- if !found {
- m := membership.Member{
- ID: types.ID(self),
- RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
- }
- ctx, err := json.Marshal(m)
- if err != nil {
- lg.Panic("failed to marshal member", zap.Error(err))
- }
- cc := &raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: self,
- Context: ctx,
- }
- e := raftpb.Entry{
- Type: raftpb.EntryConfChange,
- Data: pbutil.MustMarshal(cc),
- Term: term,
- Index: next,
- }
- ents = append(ents, e)
- next++
- }
-
- for _, id := range ids {
- if id == self {
- continue
- }
- cc := &raftpb.ConfChange{
- Type: raftpb.ConfChangeRemoveNode,
- NodeID: id,
- }
- e := raftpb.Entry{
- Type: raftpb.EntryConfChange,
- Data: pbutil.MustMarshal(cc),
- Term: term,
- Index: next,
- }
- ents = append(ents, e)
- next++
- }
-
- return ents
-}
diff --git a/server/etcdserver/raft_test.go b/server/etcdserver/raft_test.go
index 3eb5345dc25..2cfa4816232 100644
--- a/server/etcdserver/raft_test.go
+++ b/server/etcdserver/raft_test.go
@@ -22,16 +22,19 @@ import (
"testing"
"time"
+ "go.uber.org/zap/zaptest"
+
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/mock/mockstorage"
- "go.uber.org/zap"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
)
func TestGetIDs(t *testing.T) {
+ lg := zaptest.NewLogger(t)
addcc := &raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 2}
addEntry := raftpb.Entry{Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(addcc)}
removecc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2}
@@ -47,18 +50,36 @@ func TestGetIDs(t *testing.T) {
widSet []uint64
}{
{nil, []raftpb.Entry{}, []uint64{}},
- {&raftpb.ConfState{Voters: []uint64{1}},
- []raftpb.Entry{}, []uint64{1}},
- {&raftpb.ConfState{Voters: []uint64{1}},
- []raftpb.Entry{addEntry}, []uint64{1, 2}},
- {&raftpb.ConfState{Voters: []uint64{1}},
- []raftpb.Entry{addEntry, removeEntry}, []uint64{1}},
- {&raftpb.ConfState{Voters: []uint64{1}},
- []raftpb.Entry{addEntry, normalEntry}, []uint64{1, 2}},
- {&raftpb.ConfState{Voters: []uint64{1}},
- []raftpb.Entry{addEntry, normalEntry, updateEntry}, []uint64{1, 2}},
- {&raftpb.ConfState{Voters: []uint64{1}},
- []raftpb.Entry{addEntry, removeEntry, normalEntry}, []uint64{1}},
+ {
+ &raftpb.ConfState{Voters: []uint64{1}},
+ []raftpb.Entry{},
+ []uint64{1},
+ },
+ {
+ &raftpb.ConfState{Voters: []uint64{1}},
+ []raftpb.Entry{addEntry},
+ []uint64{1, 2},
+ },
+ {
+ &raftpb.ConfState{Voters: []uint64{1}},
+ []raftpb.Entry{addEntry, removeEntry},
+ []uint64{1},
+ },
+ {
+ &raftpb.ConfState{Voters: []uint64{1}},
+ []raftpb.Entry{addEntry, normalEntry},
+ []uint64{1, 2},
+ },
+ {
+ &raftpb.ConfState{Voters: []uint64{1}},
+ []raftpb.Entry{addEntry, normalEntry, updateEntry},
+ []uint64{1, 2},
+ },
+ {
+ &raftpb.ConfState{Voters: []uint64{1}},
+ []raftpb.Entry{addEntry, removeEntry, normalEntry},
+ []uint64{1},
+ },
}
for i, tt := range tests {
@@ -66,7 +87,7 @@ func TestGetIDs(t *testing.T) {
if tt.confState != nil {
snap.Metadata.ConfState = *tt.confState
}
- idSet := getIDs(testLogger, &snap, tt.ents)
+ idSet := serverstorage.GetEffectiveNodeIDsFromWALEntries(lg, &snap, tt.ents)
if !reflect.DeepEqual(idSet, tt.widSet) {
t.Errorf("#%d: idset = %#v, want %#v", i, idSet, tt.widSet)
}
@@ -74,6 +95,7 @@ func TestGetIDs(t *testing.T) {
}
func TestCreateConfigChangeEnts(t *testing.T) {
+ lg := zaptest.NewLogger(t)
m := membership.Member{
ID: types.ID(1),
RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
@@ -146,7 +168,7 @@ func TestCreateConfigChangeEnts(t *testing.T) {
}
for i, tt := range tests {
- gents := createConfigChangeEnts(testLogger, tt.ids, tt.self, tt.term, tt.index)
+ gents := serverstorage.CreateConfigChangeEnts(lg, tt.ids, tt.self, tt.term, tt.index)
if !reflect.DeepEqual(gents, tt.wents) {
t.Errorf("#%d: ents = %v, want %v", i, gents, tt.wents)
}
@@ -156,48 +178,54 @@ func TestCreateConfigChangeEnts(t *testing.T) {
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) {
n := newNopReadyNode()
r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
Node: n,
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: newNopTransporter(),
})
- srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zap.NewExample(), r: *r}
+ srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), r: *r}
srv.r.start(nil)
n.readyc <- raft.Ready{}
- select {
- case <-srv.r.applyc:
- case <-time.After(time.Second):
- t.Fatalf("failed to receive apply struct")
+
+ stop := func() {
+ srv.r.stopped <- struct{}{}
+ select {
+ case <-srv.r.done:
+ case <-time.After(time.Second):
+ t.Fatalf("failed to stop raft loop")
+ }
}
- srv.r.stopped <- struct{}{}
select {
- case <-srv.r.done:
+ case <-srv.r.applyc:
case <-time.After(time.Second):
- t.Fatalf("failed to stop raft loop")
+ stop()
+ t.Fatalf("failed to receive toApply struct")
}
+
+ stop()
}
-// TestConfigChangeBlocksApply ensures apply blocks if committed entries contain config-change.
+// TestConfigChangeBlocksApply ensures toApply blocks if committed entries contain config-change.
func TestConfigChangeBlocksApply(t *testing.T) {
n := newNopReadyNode()
r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
Node: n,
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: newNopTransporter(),
})
- srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zap.NewExample(), r: *r}
+ srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), r: *r}
srv.r.start(&raftReadyHandler{
getLead: func() uint64 { return 0 },
updateLead: func(uint64) {},
updateLeadership: func(bool) {},
})
- defer srv.r.Stop()
+ defer srv.r.stop()
n.readyc <- raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateFollower},
@@ -214,13 +242,18 @@ func TestConfigChangeBlocksApply(t *testing.T) {
select {
case <-continueC:
- t.Fatalf("unexpected execution: raft routine should block waiting for apply")
+ t.Fatalf("unexpected execution: raft routine should block waiting for toApply")
case <-time.After(time.Second):
}
- // finish apply, unblock raft routine
+ // finish toApply, unblock raft routine
<-ap.notifyc
+ select {
+ case <-ap.raftAdvancedC:
+ t.Log("recevied raft advance notification")
+ }
+
select {
case <-continueC:
case <-time.After(time.Second):
@@ -230,13 +263,13 @@ func TestConfigChangeBlocksApply(t *testing.T) {
func TestProcessDuplicatedAppRespMessage(t *testing.T) {
n := newNopReadyNode()
- cl := membership.NewCluster(zap.NewExample())
+ cl := membership.NewCluster(zaptest.NewLogger(t))
rs := raft.NewMemoryStorage()
p := mockstorage.NewStorageRecorder("")
tr, sendc := newSendMsgAppRespTransporter()
r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
transport: tr,
@@ -246,7 +279,7 @@ func TestProcessDuplicatedAppRespMessage(t *testing.T) {
s := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
r: *r,
cluster: cl,
SyncTicker: &time.Ticker{},
@@ -269,9 +302,8 @@ func TestProcessDuplicatedAppRespMessage(t *testing.T) {
}
}
-// Test that none of the expvars that get added during init panic.
-// This matters if another package imports etcdserver,
-// doesn't use it, but does use expvars.
+// TestExpvarWithNoRaftStatus to test that none of the expvars that get added during init panic.
+// This matters if another package imports etcdserver, doesn't use it, but does use expvars.
func TestExpvarWithNoRaftStatus(t *testing.T) {
defer func() {
if err := recover(); err != nil {
@@ -282,3 +314,29 @@ func TestExpvarWithNoRaftStatus(t *testing.T) {
_ = kv.Value.String()
})
}
+
+func TestStopRaftNodeMoreThanOnce(t *testing.T) {
+ n := newNopReadyNode()
+ r := newRaftNode(raftNodeConfig{
+ lg: zaptest.NewLogger(t),
+ Node: n,
+ storage: mockstorage.NewStorageRecorder(""),
+ raftStorage: raft.NewMemoryStorage(),
+ transport: newNopTransporter(),
+ })
+ r.start(&raftReadyHandler{})
+
+ for i := 0; i < 2; i++ {
+ stopped := make(chan struct{})
+ go func() {
+ r.stop()
+ close(stopped)
+ }()
+
+ select {
+ case <-stopped:
+ case <-time.After(time.Second):
+ t.Errorf("*raftNode.stop() is blocked !")
+ }
+ }
+}
diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go
index 83babf95276..778e786c3af 100644
--- a/server/etcdserver/server.go
+++ b/server/etcdserver/server.go
@@ -17,16 +17,14 @@ package etcdserver
import (
"context"
"encoding/json"
+ errorspkg "errors"
"expvar"
"fmt"
"math"
- "math/rand"
"net/http"
- "os"
"path"
"regexp"
"strconv"
- "strings"
"sync"
"sync/atomic"
"time"
@@ -34,7 +32,6 @@ import (
"github.com/coreos/go-semver/semver"
humanize "github.com/dustin/go-humanize"
"github.com/prometheus/client_golang/prometheus"
- "go.etcd.io/etcd/server/v3/config"
"go.uber.org/zap"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
@@ -42,35 +39,44 @@ import (
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
"go.etcd.io/etcd/pkg/v3/idutil"
+ "go.etcd.io/etcd/pkg/v3/notify"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/pkg/v3/runtime"
"go.etcd.io/etcd/pkg/v3/schedule"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/pkg/v3/wait"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api"
+ httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/features"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/lease/leasehttp"
- "go.etcd.io/etcd/server/v3/mvcc"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- "go.etcd.io/etcd/server/v3/wal"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
)
const (
- DefaultSnapshotCount = 100000
+ DefaultSnapshotCount = 10000
// DefaultSnapshotCatchUpEntries is the number of entries for a slow follower
// to catch-up after compacting the raft storage entries.
@@ -83,7 +89,7 @@ const (
StoreKeysPrefix = "/1"
// HealthInterval is the minimum time the cluster should be healthy
- // before accepting add member requests.
+ // before accepting add and delete member requests.
HealthInterval = 5 * time.Second
purgeFileInterval = 30 * time.Second
@@ -99,9 +105,12 @@ const (
recommendedMaxRequestBytes = 10 * 1024 * 1024
- readyPercent = 0.9
+ // readyPercentThreshold is a threshold used to determine
+ // whether a learner is ready for a transition into a full voting member or not.
+ readyPercentThreshold = 0.9
DowngradeEnabledPath = "/downgrade/enabled"
+ memorySnapshotCount = 100
)
var (
@@ -115,12 +124,10 @@ var (
)
func init() {
- rand.Seed(time.Now().UnixNano())
-
expvar.Publish(
"file_descriptor_limit",
expvar.Func(
- func() interface{} {
+ func() any {
n, _ := runtime.FDLimit()
return n
},
@@ -140,15 +147,12 @@ type ServerV2 interface {
Server
Leader() types.ID
- // Do takes a V2 request and attempts to fulfill it, returning a Response.
- Do(ctx context.Context, r pb.Request) (Response, error)
- stats.Stats
ClientCertAuthEnabled() bool
}
type ServerV3 interface {
Server
- RaftStatusGetter
+ apply.RaftStatusGetter
}
func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
@@ -186,6 +190,9 @@ type Server interface {
// the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
// this feature is introduced post 2.0.
ClusterVersion() *semver.Version
+ // StorageVersion is the storage schema version. It's supported starting
+ // from 3.6.
+ StorageVersion() *semver.Version
Cluster() api.Cluster
Alarms() []*pb.AlarmMember
@@ -234,11 +241,10 @@ type EtcdServer struct {
// done is closed when all goroutines from start() complete.
done chan struct{}
// leaderChanged is used to notify the linearizable read loop to drop the old read requests.
- leaderChanged chan struct{}
- leaderChangedMu sync.RWMutex
+ leaderChanged *notify.Notifier
errorc chan error
- id types.ID
+ memberID types.ID
attributes membership.Attributes
cluster *membership.RaftCluster
@@ -246,21 +252,15 @@ type EtcdServer struct {
v2store v2store.Store
snapshotter *snap.Snapshotter
- applyV2 ApplierV2
+ uberApply apply.UberApplier
- // applyV3 is the applier with auth and quotas
- applyV3 applierV3
- // applyV3Base is the core applier without auth or quotas
- applyV3Base applierV3
- // applyV3Internal is the applier for internal request
- applyV3Internal applierV3Internal
- applyWait wait.WaitTime
+ applyWait wait.WaitTime
kv mvcc.WatchableKV
lessor lease.Lessor
- bemu sync.Mutex
+ bemu sync.RWMutex
be backend.Backend
- beHooks *backendHooks
+ beHooks *serverstorage.BackendHooks
authStore auth.AuthStore
alarmStore *v3alarm.AlarmStore
@@ -289,312 +289,72 @@ type EtcdServer struct {
leadTimeMu sync.RWMutex
leadElectedTime time.Time
- firstCommitInTermMu sync.RWMutex
- firstCommitInTermC chan struct{}
+ firstCommitInTerm *notify.Notifier
+ clusterVersionChanged *notify.Notifier
*AccessController
-}
-
-type backendHooks struct {
- indexer cindex.ConsistentIndexer
- lg *zap.Logger
-
- // confState to be written in the next submitted backend transaction (if dirty)
- confState raftpb.ConfState
- // first write changes it to 'dirty'. false by default, so
- // not initialized `confState` is meaningless.
- confStateDirty bool
- confStateLock sync.Mutex
-}
-
-func (bh *backendHooks) OnPreCommitUnsafe(tx backend.BatchTx) {
- bh.indexer.UnsafeSave(tx)
- bh.confStateLock.Lock()
- defer bh.confStateLock.Unlock()
- if bh.confStateDirty {
- membership.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState)
- // save bh.confState
- bh.confStateDirty = false
- }
-}
-
-func (bh *backendHooks) SetConfState(confState *raftpb.ConfState) {
- bh.confStateLock.Lock()
- defer bh.confStateLock.Unlock()
- bh.confState = *confState
- bh.confStateDirty = true
+ // forceDiskSnapshot can force snapshot be triggered after apply, independent of the snapshotCount.
+ // Should only be set within apply code path. Used to force snapshot after cluster version downgrade.
+ // TODO: Replace with flush db in v3.7 assuming v3.6 bootstraps from db file.
+ forceDiskSnapshot bool
+ corruptionChecker CorruptionChecker
}
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
- st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
-
- var (
- w *wal.WAL
- n raft.Node
- s *raft.MemoryStorage
- id types.ID
- cl *membership.RaftCluster
- )
-
- if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
- cfg.Logger.Warn(
- "exceeded recommended request limit",
- zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
- zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
- zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
- zap.String("recommended-request-size", recommendedMaxRequestBytesString),
- )
- }
-
- if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
- return nil, fmt.Errorf("cannot access data directory: %v", terr)
- }
-
- haveWAL := wal.Exist(cfg.WALDir())
-
- if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
- cfg.Logger.Fatal(
- "failed to create snapshot directory",
- zap.String("path", cfg.SnapDir()),
- zap.Error(err),
- )
- }
-
- if err = fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
- return strings.HasPrefix(fileName, "tmp")
- }); err != nil {
- cfg.Logger.Error(
- "failed to remove temp file(s) in snapshot directory",
- zap.String("path", cfg.SnapDir()),
- zap.Error(err),
- )
- }
-
- ss := snap.New(cfg.Logger, cfg.SnapDir())
-
- bepath := cfg.BackendPath()
- beExist := fileutil.Exist(bepath)
-
- ci := cindex.NewConsistentIndex(nil)
- beHooks := &backendHooks{lg: cfg.Logger, indexer: ci}
- be := openBackend(cfg, beHooks)
- ci.SetBackend(be)
- cindex.CreateMetaBucket(be.BatchTx())
-
- if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
- err := maybeDefragBackend(cfg, be)
- if err != nil {
- return nil, err
- }
- }
-
- defer func() {
- if err != nil {
- be.Close()
- }
- }()
-
- prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
+ b, err := bootstrap(cfg)
if err != nil {
+ cfg.Logger.Error("bootstrap failed", zap.Error(err))
return nil, err
}
- var (
- remotes []*membership.Member
- snapshot *raftpb.Snapshot
- )
+ cfg.Logger.Info("bootstrap successfully")
- switch {
- case !haveWAL && !cfg.NewCluster:
- if err = cfg.VerifyJoinExisting(); err != nil {
- return nil, err
- }
- cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
- if err != nil {
- return nil, err
- }
- existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
- if gerr != nil {
- return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
- }
- if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
- return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
- }
- if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
- return nil, fmt.Errorf("incompatible with current running cluster")
- }
-
- remotes = existingCluster.Members()
- cl.SetID(types.ID(0), existingCluster.ID())
- cl.SetStore(st)
- cl.SetBackend(be)
- id, n, s, w = startNode(cfg, cl, nil)
- cl.SetID(id, existingCluster.ID())
-
- case !haveWAL && cfg.NewCluster:
- if err = cfg.VerifyBootstrap(); err != nil {
- return nil, err
- }
- cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
- if err != nil {
- return nil, err
- }
- m := cl.MemberByName(cfg.Name)
- if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
- return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
- }
- if cfg.ShouldDiscover() {
- var str string
- str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
- if err != nil {
- return nil, &DiscoveryError{Op: "join", Err: err}
- }
- var urlsmap types.URLsMap
- urlsmap, err = types.NewURLsMap(str)
- if err != nil {
- return nil, err
- }
- if config.CheckDuplicateURL(urlsmap) {
- return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
- }
- if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
- return nil, err
- }
- }
- cl.SetStore(st)
- cl.SetBackend(be)
- id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
- cl.SetID(id, cl.ID())
-
- case haveWAL:
- if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
- return nil, fmt.Errorf("cannot write to member directory: %v", err)
- }
-
- if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
- return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
- }
-
- if cfg.ShouldDiscover() {
- cfg.Logger.Warn(
- "discovery token is ignored since cluster already initialized; valid logs are found",
- zap.String("wal-dir", cfg.WALDir()),
- )
- }
-
- // Find a snapshot to start/restart a raft node
- walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
+ defer func() {
if err != nil {
- return nil, err
+ b.Close()
}
- // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
- // wal log entries
- snapshot, err := ss.LoadNewestAvailable(walSnaps)
- if err != nil && err != snap.ErrNoSnapshot {
- return nil, err
- }
-
- if snapshot != nil {
- if err = st.Recovery(snapshot.Data); err != nil {
- cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
- }
-
- if err = assertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
- cfg.Logger.Error("illegal v2store content", zap.Error(err))
- return nil, err
- }
-
- cfg.Logger.Info(
- "recovered v2 store from snapshot",
- zap.Uint64("snapshot-index", snapshot.Metadata.Index),
- zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
- )
-
- if be, err = recoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil {
- cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
- }
- s1, s2 := be.Size(), be.SizeInUse()
- cfg.Logger.Info(
- "recovered v3 backend from snapshot",
- zap.Int64("backend-size-bytes", s1),
- zap.String("backend-size", humanize.Bytes(uint64(s1))),
- zap.Int64("backend-size-in-use-bytes", s2),
- zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
- )
- } else {
- cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
- }
-
- if !cfg.ForceNewCluster {
- id, cl, n, s, w = restartNode(cfg, snapshot)
- } else {
- id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
- }
-
- cl.SetStore(st)
- cl.SetBackend(be)
- cl.Recover(api.UpdateCapability)
- if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
- os.RemoveAll(bepath)
- return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
- }
-
- default:
- return nil, fmt.Errorf("unsupported bootstrap config")
- }
-
- if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
- return nil, fmt.Errorf("cannot access member directory: %v", terr)
- }
+ }()
- sstats := stats.NewServerStats(cfg.Name, id.String())
- lstats := stats.NewLeaderStats(cfg.Logger, id.String())
+ sstats := stats.NewServerStats(cfg.Name, b.cluster.cl.String())
+ lstats := stats.NewLeaderStats(cfg.Logger, b.cluster.nodeID.String())
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
srv = &EtcdServer{
- readych: make(chan struct{}),
- Cfg: cfg,
- lgMu: new(sync.RWMutex),
- lg: cfg.Logger,
- errorc: make(chan error, 1),
- v2store: st,
- snapshotter: ss,
- r: *newRaftNode(
- raftNodeConfig{
- lg: cfg.Logger,
- isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
- Node: n,
- heartbeat: heartbeat,
- raftStorage: s,
- storage: NewStorage(w, ss),
- },
- ),
- id: id,
- attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
- cluster: cl,
- stats: sstats,
- lstats: lstats,
- SyncTicker: time.NewTicker(500 * time.Millisecond),
- peerRt: prt,
- reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
- AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
- consistIndex: ci,
- firstCommitInTermC: make(chan struct{}),
- }
- serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
-
- srv.applyV2 = NewApplierV2(cfg.Logger, srv.v2store, srv.cluster)
-
- srv.be = be
- srv.beHooks = beHooks
+ readych: make(chan struct{}),
+ Cfg: cfg,
+ lgMu: new(sync.RWMutex),
+ lg: cfg.Logger,
+ errorc: make(chan error, 1),
+ v2store: b.storage.st,
+ snapshotter: b.ss,
+ r: *b.raft.newRaftNode(b.ss, b.storage.wal.w, b.cluster.cl),
+ memberID: b.cluster.nodeID,
+ attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
+ cluster: b.cluster.cl,
+ stats: sstats,
+ lstats: lstats,
+ SyncTicker: time.NewTicker(500 * time.Millisecond),
+ peerRt: b.prt,
+ reqIDGen: idutil.NewGenerator(uint16(b.cluster.nodeID), time.Now()),
+ AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
+ consistIndex: b.storage.backend.ci,
+ firstCommitInTerm: notify.NewNotifier(),
+ clusterVersionChanged: notify.NewNotifier(),
+ }
+ serverID.With(prometheus.Labels{"server_id": b.cluster.nodeID.String()}).Set(1)
+ srv.cluster.SetVersionChangedNotifier(srv.clusterVersionChanged)
+
+ srv.be = b.storage.backend.be
+ srv.beHooks = b.storage.backend.beHooks
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
- srv.lessor = lease.NewLessor(srv.Logger(), srv.be, lease.LessorConfig{
+ srv.lessor = lease.NewLessor(srv.Logger(), srv.be, srv.cluster, lease.LessorConfig{
MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
CheckpointInterval: cfg.LeaseCheckpointInterval,
+ CheckpointPersist: cfg.LeaseCheckpointPersist,
ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
})
@@ -608,25 +368,15 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
cfg.Logger.Warn("failed to create token provider", zap.Error(err))
return nil, err
}
- srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
- kvindex := ci.ConsistentIndex()
- srv.lg.Debug("restore consistentIndex", zap.Uint64("index", kvindex))
- if beExist {
- // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
- // etcd from pre-3.0 release.
- if snapshot != nil && kvindex < snapshot.Metadata.Index {
- if kvindex != 0 {
- return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index)
- }
- cfg.Logger.Warn(
- "consistent index was never saved",
- zap.Uint64("snapshot-index", snapshot.Metadata.Index),
- )
- }
+ mvccStoreConfig := mvcc.StoreConfig{
+ CompactionBatchLimit: cfg.CompactionBatchLimit,
+ CompactionSleepInterval: cfg.CompactionSleepInterval,
}
+ srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, mvccStoreConfig)
+ srv.corruptionChecker = newCorruptionChecker(cfg.Logger, srv, srv.kv.HashStorage())
- srv.authStore = auth.NewAuthStore(srv.Logger(), srv.be, tp, int(cfg.BcryptCost))
+ srv.authStore = auth.NewAuthStore(srv.Logger(), schema.NewAuthBackend(srv.Logger(), srv.be), tp, int(cfg.BcryptCost))
newSrv := srv // since srv == nil in defer if srv is returned as nil
defer func() {
@@ -644,29 +394,39 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
srv.compactor.Run()
}
- srv.applyV3Base = srv.newApplierV3Backend()
- srv.applyV3Internal = srv.newApplierV3Internal()
if err = srv.restoreAlarms(); err != nil {
return nil, err
}
+ srv.uberApply = srv.NewUberApplier()
if srv.Cfg.EnableLeaseCheckpoint {
// setting checkpointer enables lease checkpoint feature.
- srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
+ srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) error {
+ if !srv.ensureLeadership() {
+ srv.lg.Warn("Ignore the checkpoint request because current member isn't a leader",
+ zap.Uint64("local-member-id", uint64(srv.MemberID())))
+ return lease.ErrNotPrimary
+ }
+
srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
+ return nil
})
}
+ // Set the hook after EtcdServer finishes the initialization to avoid
+ // the hook being called during the initialization process.
+ srv.be.SetTxPostLockInsideApplyHook(srv.getTxPostLockInsideApplyHook())
+
// TODO: move transport initialization near the definition of remote
tr := &rafthttp.Transport{
Logger: cfg.Logger,
TLSInfo: cfg.PeerTLSInfo,
DialTimeout: cfg.PeerDialTimeout(),
- ID: id,
+ ID: b.cluster.nodeID,
URLs: cfg.PeerURLs,
- ClusterID: cl.ID(),
+ ClusterID: b.cluster.cl.ID(),
Raft: srv,
- Snapshotter: ss,
+ Snapshotter: b.ss,
ServerStats: sstats,
LeaderStats: lstats,
ErrorC: srv.errorc,
@@ -675,13 +435,13 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
return nil, err
}
// add all remotes into transport
- for _, m := range remotes {
- if m.ID != id {
+ for _, m := range b.cluster.remotes {
+ if m.ID != b.cluster.nodeID {
tr.AddRemote(m.ID, m.PeerURLs)
}
}
- for _, m := range cl.Members() {
- if m.ID != id {
+ for _, m := range b.cluster.cl.Members() {
+ if m.ID != b.cluster.nodeID {
tr.AddPeer(m.ID, m.PeerURLs)
}
}
@@ -690,23 +450,6 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
return srv, nil
}
-// assertNoV2StoreContent -> depending on the deprecation stage, warns or report an error
-// if the v2store contains custom content.
-func assertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error {
- metaOnly, err := membership.IsMetaStoreOnly(st)
- if err != nil {
- return err
- }
- if metaOnly {
- return nil
- }
- if deprecationStage.IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) {
- return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage)
- }
- lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.")
- return nil
-}
-
func (s *EtcdServer) Logger() *zap.Logger {
s.lgMu.RLock()
l := s.lg
@@ -714,6 +457,15 @@ func (s *EtcdServer) Logger() *zap.Logger {
return l
}
+func (s *EtcdServer) Config() config.ServerConfig {
+ return s.Cfg
+}
+
+// FeatureEnabled returns true if the feature is enabled by the etcd server, false otherwise.
+func (s *EtcdServer) FeatureEnabled(f featuregate.Feature) bool {
+ return s.Cfg.ServerFeatureGate.Enabled(f)
+}
+
func tickToDur(ticks int, tickMs uint) string {
return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond)
}
@@ -727,7 +479,7 @@ func (s *EtcdServer) adjustTicks() {
ticks := s.Cfg.ElectionTicks - 1
lg.Info(
"started as single-node; fast-forwarding election ticks",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.Int("forward-ticks", ticks),
zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
zap.Int("election-ticks", s.Cfg.ElectionTicks),
@@ -766,7 +518,7 @@ func (s *EtcdServer) adjustTicks() {
lg.Info(
"initialized peer connections; fast-forwarding election ticks",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.Int("forward-ticks", ticks),
zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
zap.Int("election-ticks", s.Cfg.ElectionTicks),
@@ -787,14 +539,14 @@ func (s *EtcdServer) adjustTicks() {
func (s *EtcdServer) Start() {
s.start()
s.GoAttach(func() { s.adjustTicks() })
- // TODO: Switch to publishV3 in 3.6.
- // Support for cluster_member_set_attr was added in 3.5.
- s.GoAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
+ s.GoAttach(func() { s.publishV3(s.Cfg.ReqTimeout()) })
s.GoAttach(s.purgeFile)
s.GoAttach(func() { monitorFileDescriptor(s.Logger(), s.stopping) })
- s.GoAttach(s.monitorVersions)
+ s.GoAttach(s.monitorClusterVersions)
+ s.GoAttach(s.monitorStorageVersion)
s.GoAttach(s.linearizableReadLoop)
s.GoAttach(s.monitorKVHash)
+ s.GoAttach(s.monitorCompactHash)
s.GoAttach(s.monitorDowngrade)
}
@@ -829,11 +581,11 @@ func (s *EtcdServer) start() {
s.ctx, s.cancel = context.WithCancel(context.Background())
s.readwaitc = make(chan struct{}, 1)
s.readNotifier = newNotifier()
- s.leaderChanged = make(chan struct{})
+ s.leaderChanged = notify.NewNotifier()
if s.ClusterVersion() != nil {
lg.Info(
"starting etcd server",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("local-server-version", version.Version),
zap.String("cluster-id", s.Cluster().ID().String()),
zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())),
@@ -842,7 +594,7 @@ func (s *EtcdServer) start() {
} else {
lg.Info(
"starting etcd server",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("local-server-version", version.Version),
zap.String("cluster-version", "to_be_decided"),
)
@@ -858,8 +610,8 @@ func (s *EtcdServer) purgeFile() {
var dberrc, serrc, werrc <-chan error
var dbdonec, sdonec, wdonec <-chan struct{}
if s.Cfg.MaxSnapFiles > 0 {
- dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
- sdonec, serrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ dbdonec, dberrc = fileutil.PurgeFileWithoutFlock(lg, s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ sdonec, serrc = fileutil.PurgeFileWithoutFlock(lg, s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
}
if s.Cfg.MaxWALFiles > 0 {
wdonec, werrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping)
@@ -911,7 +663,7 @@ type ServerPeerV2 interface {
DowngradeEnabledHandler() http.Handler
}
-func (s *EtcdServer) DowngradeInfo() *membership.DowngradeInfo { return s.cluster.DowngradeInfo() }
+func (s *EtcdServer) DowngradeInfo() *serverversion.DowngradeInfo { return s.cluster.DowngradeInfo() }
type downgradeEnabledHandler struct {
lg *zap.Logger
@@ -962,11 +714,19 @@ func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
if s.cluster.IsIDRemoved(types.ID(m.From)) {
lg.Warn(
"rejected Raft message from removed member",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("removed-member-id", types.ID(m.From).String()),
)
return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
}
+ if s.MemberID() != types.ID(m.To) {
+ lg.Warn(
+ "rejected Raft message to mismatch member",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("mismatch-member-id", types.ID(m.To).String()),
+ )
+ return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message to mismatch member")
+ }
if m.Type == raftpb.MsgApp {
s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
}
@@ -984,15 +744,16 @@ func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
}
type etcdProgress struct {
- confState raftpb.ConfState
- snapi uint64
- appliedt uint64
- appliedi uint64
+ confState raftpb.ConfState
+ diskSnapshotIndex uint64
+ memorySnapshotIndex uint64
+ appliedt uint64
+ appliedi uint64
}
// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
// and helps decouple state machine logic from Raft algorithms.
-// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
+// TODO: add a state machine interface to toApply the commit entries and do snapshot/recover
type raftReadyHandler struct {
getLead func() (lead uint64)
updateLead func(lead uint64)
@@ -1008,24 +769,9 @@ func (s *EtcdServer) run() {
lg.Panic("failed to get snapshot from Raft storage", zap.Error(err))
}
- // asynchronously accept apply packets, dispatch progress in-order
- sched := schedule.NewFIFOScheduler()
+ // asynchronously accept toApply packets, dispatch progress in-order
+ sched := schedule.NewFIFOScheduler(lg)
- var (
- smu sync.RWMutex
- syncC <-chan time.Time
- )
- setSyncC := func(ch <-chan time.Time) {
- smu.Lock()
- syncC = ch
- smu.Unlock()
- }
- getSyncC := func() (ch <-chan time.Time) {
- smu.RLock()
- ch = syncC
- smu.RUnlock()
- return
- }
rh := &raftReadyHandler{
getLead: func() (lead uint64) { return s.getLead() },
updateLead: func(lead uint64) { s.setLead(lead) },
@@ -1037,7 +783,6 @@ func (s *EtcdServer) run() {
if s.compactor != nil {
s.compactor.Pause()
}
- setSyncC(nil)
} else {
if newLeader {
t := time.Now()
@@ -1045,17 +790,12 @@ func (s *EtcdServer) run() {
s.leadElectedTime = t
s.leadTimeMu.Unlock()
}
- setSyncC(s.SyncTicker.C)
if s.compactor != nil {
s.compactor.Resume()
}
}
if newLeader {
- s.leaderChangedMu.Lock()
- lc := s.leaderChanged
- s.leaderChanged = make(chan struct{})
- close(lc)
- s.leaderChangedMu.Unlock()
+ s.leaderChanged.Notify()
}
// TODO: remove the nil checking
// current test utility does not provide the stats
@@ -1073,10 +813,11 @@ func (s *EtcdServer) run() {
s.r.start(rh)
ep := etcdProgress{
- confState: sn.Metadata.ConfState,
- snapi: sn.Metadata.Index,
- appliedt: sn.Metadata.Term,
- appliedi: sn.Metadata.Index,
+ confState: sn.Metadata.ConfState,
+ diskSnapshotIndex: sn.Metadata.Index,
+ memorySnapshotIndex: sn.Metadata.Index,
+ appliedt: sn.Metadata.Term,
+ appliedi: sn.Metadata.Index,
}
defer func() {
@@ -1086,7 +827,7 @@ func (s *EtcdServer) run() {
s.cancel()
sched.Stop()
- // wait for gouroutines before closing raft so wal stays open
+ // wait for goroutines before closing raft so wal stays open
s.wg.Wait()
s.SyncTicker.Stop()
@@ -1108,50 +849,106 @@ func (s *EtcdServer) run() {
for {
select {
case ap := <-s.r.apply():
- f := func(context.Context) { s.applyAll(&ep, &ap) }
+ f := schedule.NewJob("server_applyAll", func(context.Context) { s.applyAll(&ep, &ap) })
sched.Schedule(f)
case leases := <-expiredLeaseC:
- s.GoAttach(func() {
- // Increases throughput of expired leases deletion process through parallelization
- c := make(chan struct{}, maxPendingRevokes)
- for _, lease := range leases {
- select {
- case c <- struct{}{}:
- case <-s.stopping:
- return
- }
- lid := lease.ID
- s.GoAttach(func() {
- ctx := s.authStore.WithRoot(s.ctx)
- _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
- if lerr == nil {
- leaseExpired.Inc()
- } else {
- lg.Warn(
- "failed to revoke lease",
- zap.String("lease-id", fmt.Sprintf("%016x", lid)),
- zap.Error(lerr),
- )
- }
-
- <-c
- })
- }
- })
+ s.revokeExpiredLeases(leases)
case err := <-s.errorc:
lg.Warn("server error", zap.Error(err))
lg.Warn("data-dir used by this member must be removed")
return
- case <-getSyncC():
- if s.v2store.HasTTLKeys() {
- s.sync(s.Cfg.ReqTimeout())
- }
case <-s.stop:
return
}
}
}
+func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) {
+ s.GoAttach(func() {
+ // We shouldn't revoke any leases if current member isn't a leader,
+ // because the operation should only be performed by the leader. When
+ // the leader gets blocked on the raft loop, such as writing WAL entries,
+ // it can't process any events or messages from raft. It may think it
+ // is still the leader even the leader has already changed.
+ // Refer to https://github.com/etcd-io/etcd/issues/15247
+ lg := s.Logger()
+ if !s.ensureLeadership() {
+ lg.Warn("Ignore the lease revoking request because current member isn't a leader",
+ zap.Uint64("local-member-id", uint64(s.MemberID())))
+ return
+ }
+
+ // Increases throughput of expired leases deletion process through parallelization
+ c := make(chan struct{}, maxPendingRevokes)
+ for _, curLease := range leases {
+ select {
+ case c <- struct{}{}:
+ case <-s.stopping:
+ return
+ }
+
+ f := func(lid int64) {
+ s.GoAttach(func() {
+ ctx := s.authStore.WithRoot(s.ctx)
+ _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lid})
+ if lerr == nil {
+ leaseExpired.Inc()
+ } else {
+ lg.Warn(
+ "failed to revoke lease",
+ zap.String("lease-id", fmt.Sprintf("%016x", lid)),
+ zap.Error(lerr),
+ )
+ }
+
+ <-c
+ })
+ }
+
+ f(int64(curLease.ID))
+ }
+ })
+}
+
+// isActive checks if the etcd instance is still actively processing the
+// heartbeat message (ticks). It returns false if no heartbeat has been
+// received within 3 * tickMs.
+func (s *EtcdServer) isActive() bool {
+ latestTickTs := s.r.getLatestTickTs()
+ threshold := 3 * time.Duration(s.Cfg.TickMs) * time.Millisecond
+ return latestTickTs.Add(threshold).After(time.Now())
+}
+
+// ensureLeadership checks whether current member is still the leader.
+func (s *EtcdServer) ensureLeadership() bool {
+ lg := s.Logger()
+
+ if s.isActive() {
+ lg.Debug("The member is active, skip checking leadership",
+ zap.Time("latestTickTs", s.r.getLatestTickTs()),
+ zap.Time("now", time.Now()))
+ return true
+ }
+
+ ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+ if err := s.linearizableReadNotify(ctx); err != nil {
+ lg.Warn("Failed to check current member's leadership",
+ zap.Error(err))
+ return false
+ }
+
+ newLeaderID := s.raftStatus().Lead
+ if newLeaderID != uint64(s.MemberID()) {
+ lg.Warn("Current member isn't a leader",
+ zap.Uint64("local-member-id", uint64(s.MemberID())),
+ zap.Uint64("new-lead", newLeaderID))
+ return false
+ }
+
+ return true
+}
+
// Cleanup removes allocated objects by EtcdServer.NewServer in
// situation that EtcdServer::Start was not called (that takes care of cleanup).
func (s *EtcdServer) Cleanup() {
@@ -1174,19 +971,20 @@ func (s *EtcdServer) Cleanup() {
}
}
-func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
+func (s *EtcdServer) applyAll(ep *etcdProgress, apply *toApply) {
s.applySnapshot(ep, apply)
s.applyEntries(ep, apply)
+ backend.VerifyBackendConsistency(s.Backend(), s.Logger(), true, schema.AllBuckets...)
proposalsApplied.Set(float64(ep.appliedi))
s.applyWait.Trigger(ep.appliedi)
// wait for the raft routine to finish the disk writes before triggering a
// snapshot. or applied index might be greater than the last index in raft
- // storage, since the raft routine might be slower than apply routine.
+ // storage, since the raft routine might be slower than toApply routine.
<-apply.notifyc
- s.triggerSnapshot(ep)
+ s.snapshotIfNeededAndCompactRaftLog(ep)
select {
// snapshot requested via send()
case m := <-s.r.msgSnapC:
@@ -1196,8 +994,8 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
}
}
-func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
- if raft.IsEmptySnap(apply.snapshot) {
+func (s *EtcdServer) applySnapshot(ep *etcdProgress, toApply *toApply) {
+ if raft.IsEmptySnap(toApply.snapshot) {
return
}
applySnapshotInProgress.Inc()
@@ -1205,40 +1003,49 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
lg := s.Logger()
lg.Info(
"applying snapshot",
- zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-snapshot-index", ep.diskSnapshotIndex),
zap.Uint64("current-applied-index", ep.appliedi),
- zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
- zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term),
)
defer func() {
lg.Info(
"applied snapshot",
- zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-snapshot-index", ep.diskSnapshotIndex),
zap.Uint64("current-applied-index", ep.appliedi),
- zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
- zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term),
)
applySnapshotInProgress.Dec()
}()
- if apply.snapshot.Metadata.Index <= ep.appliedi {
+ if toApply.snapshot.Metadata.Index <= ep.appliedi {
lg.Panic(
"unexpected leader snapshot from outdated index",
- zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-snapshot-index", ep.diskSnapshotIndex),
zap.Uint64("current-applied-index", ep.appliedi),
- zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
- zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term),
)
}
// wait for raftNode to persist snapshot onto the disk
- <-apply.notifyc
+ <-toApply.notifyc
- newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot, s.beHooks)
+ // gofail: var applyBeforeOpenSnapshot struct{}
+ newbe, err := serverstorage.OpenSnapshotBackend(s.Cfg, s.snapshotter, toApply.snapshot, s.beHooks)
if err != nil {
lg.Panic("failed to open snapshot backend", zap.Error(err))
}
+ // We need to set the backend to consistIndex before recovering the lessor,
+ // because lessor.Recover will commit the boltDB transaction, accordingly it
+ // will get the old consistent_index persisted into the db in OnPreCommitUnsafe.
+ // Eventually the new consistent_index value coming from snapshot is overwritten
+ // by the old value.
+ s.consistIndex.SetBackend(newbe)
+ verifySnapshotIndex(toApply.snapshot, s.consistIndex.ConsistentIndex())
+
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
if s.lessor != nil {
@@ -1255,7 +1062,8 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
lg.Panic("failed to restore mvcc store", zap.Error(err))
}
- s.consistIndex.SetBackend(newbe)
+ newbe.SetTxPostLockInsideApplyHook(s.getTxPostLockInsideApplyHook())
+
lg.Info("restored mvcc store", zap.Uint64("consistent-index", s.consistIndex.ConsistentIndex()))
// Closing old backend might block until all the txns
@@ -1287,23 +1095,23 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
if s.authStore != nil {
lg.Info("restoring auth store")
- s.authStore.Recover(newbe)
+ s.authStore.Recover(schema.NewAuthBackend(lg, newbe))
lg.Info("restored auth store")
}
lg.Info("restoring v2 store")
- if err := s.v2store.Recovery(apply.snapshot.Data); err != nil {
+ if err := s.v2store.Recovery(toApply.snapshot.Data); err != nil {
lg.Panic("failed to restore v2 store", zap.Error(err))
}
- if err := assertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil {
+ if err := serverstorage.AssertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil {
lg.Panic("illegal v2store content", zap.Error(err))
}
lg.Info("restored v2 store")
- s.cluster.SetBackend(newbe)
+ s.cluster.SetBackend(schema.NewMembershipBackend(lg, newbe))
lg.Info("restoring cluster configuration")
@@ -1319,7 +1127,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
lg.Info("adding peers from new cluster configuration")
for _, m := range s.cluster.Members() {
- if m.ID == s.ID() {
+ if m.ID == s.MemberID() {
continue
}
s.r.transport.AddPeer(m.ID, m.PeerURLs)
@@ -1327,13 +1135,39 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
lg.Info("added peers from new cluster configuration")
- ep.appliedt = apply.snapshot.Metadata.Term
- ep.appliedi = apply.snapshot.Metadata.Index
- ep.snapi = ep.appliedi
- ep.confState = apply.snapshot.Metadata.ConfState
+ ep.appliedt = toApply.snapshot.Metadata.Term
+ ep.appliedi = toApply.snapshot.Metadata.Index
+ ep.diskSnapshotIndex = ep.appliedi
+ ep.memorySnapshotIndex = ep.appliedi
+ ep.confState = toApply.snapshot.Metadata.ConfState
+
+ // As backends and implementations like alarmsStore changed, we need
+ // to re-bootstrap Appliers.
+ s.uberApply = s.NewUberApplier()
+}
+
+func (s *EtcdServer) NewUberApplier() apply.UberApplier {
+ return apply.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex,
+ s.Cfg.WarningApplyDuration, s.Cfg.ServerFeatureGate.Enabled(features.TxnModeWriteWithSharedBuffer), s.Cfg.QuotaBackendBytes)
+}
+
+func verifySnapshotIndex(snapshot raftpb.Snapshot, cindex uint64) {
+ verify.Verify(func() {
+ if cindex != snapshot.Metadata.Index {
+ panic(fmt.Sprintf("consistent_index(%d) isn't equal to snapshot index (%d)", cindex, snapshot.Metadata.Index))
+ }
+ })
+}
+
+func verifyConsistentIndexIsLatest(lg *zap.Logger, snapshot raftpb.Snapshot, cindex uint64) {
+ verify.Verify(func() {
+ if cindex < snapshot.Metadata.Index {
+ lg.Panic(fmt.Sprintf("consistent_index(%d) is older than snapshot index (%d)", cindex, snapshot.Metadata.Index))
+ }
+ })
}
-func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
+func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *toApply) {
if len(apply.entries) == 0 {
return
}
@@ -1354,27 +1188,32 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
return
}
var shouldstop bool
- if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
+ if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState, apply.raftAdvancedC); shouldstop {
go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
}
}
-func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
- if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount {
+func (s *EtcdServer) ForceSnapshot() {
+ s.forceDiskSnapshot = true
+}
+
+func (s *EtcdServer) snapshotIfNeededAndCompactRaftLog(ep *etcdProgress) {
+ // TODO: Remove disk snapshot in v3.7
+ shouldSnapshotToDisk := s.shouldSnapshotToDisk(ep)
+ shouldSnapshotToMemory := s.shouldSnapshotToMemory(ep)
+ if !shouldSnapshotToDisk && !shouldSnapshotToMemory {
return
}
+ s.snapshot(ep, shouldSnapshotToDisk)
+ s.compactRaftLog(ep.appliedi)
+}
- lg := s.Logger()
- lg.Info(
- "triggering snapshot",
- zap.String("local-member-id", s.ID().String()),
- zap.Uint64("local-member-applied-index", ep.appliedi),
- zap.Uint64("local-member-snapshot-index", ep.snapi),
- zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
- )
+func (s *EtcdServer) shouldSnapshotToDisk(ep *etcdProgress) bool {
+ return (s.forceDiskSnapshot && ep.appliedi != ep.diskSnapshotIndex) || (ep.appliedi-ep.diskSnapshotIndex > s.Cfg.SnapshotCount)
+}
- s.snapshot(ep.appliedi, ep.confState)
- ep.snapi = ep.appliedi
+func (s *EtcdServer) shouldSnapshotToMemory(ep *etcdProgress) bool {
+ return ep.appliedi > ep.memorySnapshotIndex+memorySnapshotCount
}
func (s *EtcdServer) hasMultipleVotingMembers() bool {
@@ -1382,13 +1221,14 @@ func (s *EtcdServer) hasMultipleVotingMembers() bool {
}
func (s *EtcdServer) isLeader() bool {
- return uint64(s.ID()) == s.Lead()
+ return uint64(s.MemberID()) == s.Lead()
}
// MoveLeader transfers the leader to the given transferee.
func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
- if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner {
- return ErrBadLeaderTransferee
+ member := s.cluster.Member(types.ID(transferee))
+ if member == nil || member.IsLearner {
+ return errors.ErrBadLeaderTransferee
}
now := time.Now()
@@ -1397,7 +1237,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er
lg := s.Logger()
lg.Info(
"leadership transfer starting",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("current-leader-member-id", types.ID(lead).String()),
zap.String("transferee-member-id", types.ID(transferee).String()),
)
@@ -1406,7 +1246,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er
for s.Lead() != transferee {
select {
case <-ctx.Done(): // time out
- return ErrTimeoutLeaderTransfer
+ return errors.ErrTimeoutLeaderTransfer
case <-time.After(interval):
}
}
@@ -1414,7 +1254,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er
// TODO: drain all requests, or drop all messages to the old leader
lg.Info(
"leadership transfer finished",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("old-leader-member-id", types.ID(lead).String()),
zap.String("new-leader-member-id", types.ID(transferee).String()),
zap.Duration("took", time.Since(now)),
@@ -1422,13 +1262,13 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er
return nil
}
-// TransferLeadership transfers the leader to the chosen transferee.
-func (s *EtcdServer) TransferLeadership() error {
+// TryTransferLeadershipOnShutdown transfers the leader to the chosen transferee. It is only used in server graceful shutdown.
+func (s *EtcdServer) TryTransferLeadershipOnShutdown() error {
lg := s.Logger()
if !s.isLeader() {
lg.Info(
"skipped leadership transfer; local server is not leader",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
)
return nil
@@ -1437,7 +1277,7 @@ func (s *EtcdServer) TransferLeadership() error {
if !s.hasMultipleVotingMembers() {
lg.Info(
"skipped leadership transfer for single voting member cluster",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
)
return nil
@@ -1445,7 +1285,7 @@ func (s *EtcdServer) TransferLeadership() error {
transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs())
if !ok {
- return ErrUnhealthy
+ return errors.ErrUnhealthy
}
tm := s.Cfg.ReqTimeout()
@@ -1473,8 +1313,8 @@ func (s *EtcdServer) HardStop() {
// Do and Process cannot be called after Stop has been invoked.
func (s *EtcdServer) Stop() {
lg := s.Logger()
- if err := s.TransferLeadership(); err != nil {
- lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err))
+ if err := s.TryTransferLeadershipOnShutdown(); err != nil {
+ lg.Warn("leadership transfer failed", zap.String("local-member-id", s.MemberID().String()), zap.Error(err))
}
s.HardStop()
}
@@ -1494,26 +1334,14 @@ func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
}
}
-// StopNotify returns a channel that receives a empty struct
+// StopNotify returns a channel that receives an empty struct
// when the server is stopped.
func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
-// StoppingNotify returns a channel that receives a empty struct
+// StoppingNotify returns a channel that receives an empty struct
// when the server is being stopped.
func (s *EtcdServer) StoppingNotify() <-chan struct{} { return s.stopping }
-func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
-
-func (s *EtcdServer) LeaderStats() []byte {
- lead := s.getLead()
- if lead != uint64(s.id) {
- return nil
- }
- return s.lstats.JSON()
-}
-
-func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() }
-
func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
if s.authStore == nil {
// In the context of ordinary etcd process, s.authStore will never be nil.
@@ -1523,7 +1351,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err
// Note that this permission check is done in the API layer,
// so TOCTOU problem can be caused potentially in a schedule like this:
- // update membership with user A -> revoke root role of A -> apply membership change
+ // update membership with user A -> revoke root role of A -> toApply membership change
// in the state machine layer
// However, both of membership change and role management requires the root privilege.
// So careful operation by admins can prevent the problem.
@@ -1574,21 +1402,21 @@ func (s *EtcdServer) mayAddMember(memb membership.Member) error {
if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() {
lg.Warn(
"rejecting member add request; not enough healthy members",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
- zap.Error(ErrNotEnoughStartedMembers),
+ zap.Error(errors.ErrNotEnoughStartedMembers),
)
- return ErrNotEnoughStartedMembers
+ return errors.ErrNotEnoughStartedMembers
}
- if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) {
+ if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberID(), s.cluster.VotingMembers()) {
lg.Warn(
"rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
- zap.Error(ErrUnhealthy),
+ zap.Error(errors.ErrUnhealthy),
)
- return ErrUnhealthy
+ return errors.ErrUnhealthy
}
return nil
@@ -1621,7 +1449,7 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi
learnerPromoteSucceed.Inc()
return resp, nil
}
- if err != ErrNotLeader {
+ if !errorspkg.Is(err, errors.ErrNotLeader) {
learnerPromoteFailed.WithLabelValues(err.Error()).Inc()
return resp, err
}
@@ -1640,16 +1468,16 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi
return resp, nil
}
// If member promotion failed, return early. Otherwise keep retry.
- if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner {
+ if errorspkg.Is(err, errors.ErrLearnerNotReady) || errorspkg.Is(err, membership.ErrIDNotFound) || errorspkg.Is(err, membership.ErrMemberNotLearner) {
return nil, err
}
}
}
- if cctx.Err() == context.DeadlineExceeded {
- return nil, ErrTimeout
+ if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) {
+ return nil, errors.ErrTimeout
}
- return nil, ErrCanceled
+ return nil, errors.ErrCanceled
}
// promoteMember checks whether the to-be-promoted learner node is ready before sending the promote
@@ -1692,8 +1520,7 @@ func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membershi
func (s *EtcdServer) mayPromoteMember(id types.ID) error {
lg := s.Logger()
- err := s.isLearnerReady(uint64(id))
- if err != nil {
+ if err := s.isLearnerReady(lg, uint64(id)); err != nil {
return err
}
@@ -1703,11 +1530,11 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error {
if !s.cluster.IsReadyToPromoteMember(uint64(id)) {
lg.Warn(
"rejecting member promote request; not enough healthy members",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("requested-member-remove-id", id.String()),
- zap.Error(ErrNotEnoughStartedMembers),
+ zap.Error(errors.ErrNotEnoughStartedMembers),
)
- return ErrNotEnoughStartedMembers
+ return errors.ErrNotEnoughStartedMembers
}
return nil
@@ -1715,13 +1542,17 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error {
// check whether the learner catches up with leader or not.
// Note: it will return nil if member is not found in cluster or if member is not learner.
-// These two conditions will be checked before apply phase later.
-func (s *EtcdServer) isLearnerReady(id uint64) error {
+// These two conditions will be checked before toApply phase later.
+func (s *EtcdServer) isLearnerReady(lg *zap.Logger, id uint64) error {
+ if err := s.waitAppliedIndex(); err != nil {
+ return err
+ }
+
rs := s.raftStatus()
// leader's raftStatus.Progress is not nil
if rs.Progress == nil {
- return ErrNotLeader
+ return errors.ErrNotLeader
}
var learnerMatch uint64
@@ -1736,12 +1567,24 @@ func (s *EtcdServer) isLearnerReady(id uint64) error {
}
}
- if isFound {
- leaderMatch := rs.Progress[leaderID].Match
- // the learner's Match not caught up with leader yet
- if float64(learnerMatch) < float64(leaderMatch)*readyPercent {
- return ErrLearnerNotReady
- }
+ // We should return an error in API directly, to avoid the request
+ // being unnecessarily delivered to raft.
+ if !isFound {
+ return membership.ErrIDNotFound
+ }
+
+ leaderMatch := rs.Progress[leaderID].Match
+
+ learnerReadyPercent := float64(learnerMatch) / float64(leaderMatch)
+
+ // the learner's Match not caught up with leader yet
+ if learnerReadyPercent < readyPercentThreshold {
+ lg.Error(
+ "rejecting promote learner: learner is not ready",
+ zap.Float64("learner-ready-percent", learnerReadyPercent),
+ zap.Float64("ready-percent-threshold", readyPercentThreshold),
+ )
+ return errors.ErrLearnerNotReady
}
return nil
@@ -1753,39 +1596,39 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error {
}
lg := s.Logger()
- isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner
+ member := s.cluster.Member(id)
// no need to check quorum when removing non-voting member
- if isLearner {
+ if member != nil && member.IsLearner {
return nil
}
if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) {
lg.Warn(
"rejecting member remove request; not enough healthy members",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("requested-member-remove-id", id.String()),
- zap.Error(ErrNotEnoughStartedMembers),
+ zap.Error(errors.ErrNotEnoughStartedMembers),
)
- return ErrNotEnoughStartedMembers
+ return errors.ErrNotEnoughStartedMembers
}
// downed member is safe to remove since it's not part of the active quorum
- if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
+ if t := s.r.transport.ActiveSince(id); id != s.MemberID() && t.IsZero() {
return nil
}
// protect quorum if some members are down
m := s.cluster.VotingMembers()
- active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
+ active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberID(), m)
if (active - 1) < 1+((len(m)-1)/2) {
lg.Warn(
"rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("requested-member-remove", id.String()),
zap.Int("active-peers", active),
- zap.Error(ErrUnhealthy),
+ zap.Error(errors.ErrUnhealthy),
)
- return ErrUnhealthy
+ return errors.ErrUnhealthy
}
return nil
@@ -1841,9 +1684,7 @@ func (s *EtcdServer) getLead() uint64 {
}
func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} {
- s.leaderChangedMu.RLock()
- defer s.leaderChangedMu.RUnlock()
- return s.leaderChanged
+ return s.leaderChanged.Receive()
}
// FirstCommitInTermNotify returns channel that will be unlocked on first
@@ -1851,21 +1692,18 @@ func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} {
// read-only requests (leader is not able to respond any read-only requests
// as long as linearizable semantic is required)
func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} {
- s.firstCommitInTermMu.RLock()
- defer s.firstCommitInTermMu.RUnlock()
- return s.firstCommitInTermC
+ return s.firstCommitInTerm.Receive()
}
-// RaftStatusGetter represents etcd server and Raft progress.
-type RaftStatusGetter interface {
- ID() types.ID
- Leader() types.ID
- CommittedIndex() uint64
- AppliedIndex() uint64
- Term() uint64
-}
+// MemberId returns the ID of the local member.
+// Deprecated: Please use (*EtcdServer) MemberID instead.
+//
+//revive:disable:var-naming
+func (s *EtcdServer) MemberId() types.ID { return s.MemberID() }
+
+//revive:enable:var-naming
-func (s *EtcdServer) ID() types.ID { return s.id }
+func (s *EtcdServer) MemberID() types.ID { return s.memberID }
func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) }
@@ -1878,8 +1716,9 @@ func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() }
func (s *EtcdServer) Term() uint64 { return s.getTerm() }
type confChangeResponse struct {
- membs []*membership.Member
- err error
+ membs []*membership.Member
+ raftAdvanceC <-chan struct{}
+ err error
}
// configure sends a configuration change through consensus and
@@ -1902,9 +1741,14 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me
lg.Panic("failed to configure")
}
resp := x.(*confChangeResponse)
+ // etcdserver need to ensure the raft has already been notified
+ // or advanced before it responds to the client. Otherwise, the
+ // following config change request may be rejected.
+ // See https://github.com/etcd-io/etcd/issues/15528.
+ <-resp.raftAdvanceC
lg.Info(
"applied a configuration change through raft",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("raft-conf-change", cc.Type.String()),
zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()),
)
@@ -1915,29 +1759,10 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me
return nil, s.parseProposeCtxErr(ctx.Err(), start)
case <-s.stopping:
- return nil, ErrStopped
+ return nil, errors.ErrStopped
}
}
-// sync proposes a SYNC request and is non-blocking.
-// This makes no guarantee that the request will be proposed or performed.
-// The request will be canceled after the given timeout.
-func (s *EtcdServer) sync(timeout time.Duration) {
- req := pb.Request{
- Method: "SYNC",
- ID: s.reqIDGen.Next(),
- Time: time.Now().UnixNano(),
- }
- data := pbutil.MustMarshal(&req)
- // There is no promise that node has leader when do SYNC request,
- // so it uses goroutine to propose.
- ctx, cancel := context.WithTimeout(s.ctx, timeout)
- s.GoAttach(func() {
- s.r.Propose(ctx, data)
- cancel()
- })
-}
-
// publishV3 registers server information into the cluster using v3 request. The
// information is the JSON representation of this server's member struct, updated
// with the static clientURLs of the server.
@@ -1945,7 +1770,7 @@ func (s *EtcdServer) sync(timeout time.Duration) {
// or its server is stopped.
func (s *EtcdServer) publishV3(timeout time.Duration) {
req := &membershippb.ClusterMemberAttrSetRequest{
- Member_ID: uint64(s.id),
+ Member_ID: uint64(s.MemberID()),
MemberAttributes: &membershippb.Attributes{
Name: s.attributes.Name,
ClientUrls: s.attributes.ClientURLs,
@@ -1957,7 +1782,7 @@ func (s *EtcdServer) publishV3(timeout time.Duration) {
case <-s.stopping:
lg.Warn(
"stopped publish because server is stopping",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
zap.Duration("publish-timeout", timeout),
)
@@ -1974,7 +1799,7 @@ func (s *EtcdServer) publishV3(timeout time.Duration) {
close(s.readych)
lg.Info(
"published local member to cluster through raft",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
zap.String("cluster-id", s.cluster.ID().String()),
zap.Duration("publish-timeout", timeout),
@@ -1984,7 +1809,7 @@ func (s *EtcdServer) publishV3(timeout time.Duration) {
default:
lg.Warn(
"failed to publish local member to cluster through raft",
- zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-id", s.MemberID().String()),
zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
zap.Duration("publish-timeout", timeout),
zap.Error(err),
@@ -1993,76 +1818,12 @@ func (s *EtcdServer) publishV3(timeout time.Duration) {
}
}
-// publish registers server information into the cluster. The information
-// is the JSON representation of this server's member struct, updated with the
-// static clientURLs of the server.
-// The function keeps attempting to register until it succeeds,
-// or its server is stopped.
-//
-// Use v2 store to encode member attributes, and apply through Raft
-// but does not go through v2 API endpoint, which means even with v2
-// client handler disabled (e.g. --enable-v2=false), cluster can still
-// process publish requests through rafthttp
-// TODO: Remove in 3.6 (start using publishV3)
-func (s *EtcdServer) publish(timeout time.Duration) {
- lg := s.Logger()
- b, err := json.Marshal(s.attributes)
- if err != nil {
- lg.Panic("failed to marshal JSON", zap.Error(err))
- return
- }
- req := pb.Request{
- Method: "PUT",
- Path: membership.MemberAttributesStorePath(s.id),
- Val: string(b),
- }
-
- for {
- ctx, cancel := context.WithTimeout(s.ctx, timeout)
- _, err := s.Do(ctx, req)
- cancel()
- switch err {
- case nil:
- close(s.readych)
- lg.Info(
- "published local member to cluster through raft",
- zap.String("local-member-id", s.ID().String()),
- zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
- zap.String("request-path", req.Path),
- zap.String("cluster-id", s.cluster.ID().String()),
- zap.Duration("publish-timeout", timeout),
- )
- return
-
- case ErrStopped:
- lg.Warn(
- "stopped publish because server is stopped",
- zap.String("local-member-id", s.ID().String()),
- zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
- zap.Duration("publish-timeout", timeout),
- zap.Error(err),
- )
- return
-
- default:
- lg.Warn(
- "failed to publish local member to cluster through raft",
- zap.String("local-member-id", s.ID().String()),
- zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
- zap.String("request-path", req.Path),
- zap.Duration("publish-timeout", timeout),
- zap.Error(err),
- )
- }
- }
-}
-
func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
atomic.AddInt64(&s.inflightSnapshots, 1)
lg := s.Logger()
fields := []zap.Field{
- zap.String("from", s.ID().String()),
+ zap.String("from", s.MemberID().String()),
zap.String("to", types.ID(merged.To).String()),
zap.Int64("bytes", merged.TotalSize),
zap.String("size", humanize.Bytes(uint64(merged.TotalSize))),
@@ -2097,44 +1858,48 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
})
}
-// apply takes entries received from Raft (after it has been committed) and
+// toApply takes entries received from Raft (after it has been committed) and
// applies them to the current state of the EtcdServer.
// The given entries should not be empty.
func (s *EtcdServer) apply(
es []raftpb.Entry,
confState *raftpb.ConfState,
+ raftAdvancedC <-chan struct{},
) (appliedt uint64, appliedi uint64, shouldStop bool) {
s.lg.Debug("Applying entries", zap.Int("num-entries", len(es)))
for i := range es {
e := es[i]
+ index := s.consistIndex.ConsistentIndex()
s.lg.Debug("Applying entry",
- zap.Uint64("index", e.Index),
- zap.Uint64("term", e.Term),
- zap.Stringer("type", e.Type))
+ zap.Uint64("consistent-index", index),
+ zap.Uint64("entry-index", e.Index),
+ zap.Uint64("entry-term", e.Term),
+ zap.Stringer("entry-type", e.Type))
+
+ // We need to toApply all WAL entries on top of v2store
+ // and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend.
+ shouldApplyV3 := membership.ApplyV2storeOnly
+ if e.Index > index {
+ shouldApplyV3 = membership.ApplyBoth
+ // set the consistent index of current executing entry
+ s.consistIndex.SetConsistentApplyingIndex(e.Index, e.Term)
+ }
switch e.Type {
case raftpb.EntryNormal:
- s.applyEntryNormal(&e)
+ // gofail: var beforeApplyOneEntryNormal struct{}
+ s.applyEntryNormal(&e, shouldApplyV3)
s.setAppliedIndex(e.Index)
s.setTerm(e.Term)
case raftpb.EntryConfChange:
- // We need to apply all WAL entries on top of v2store
- // and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend.
- shouldApplyV3 := membership.ApplyV2storeOnly
-
- // set the consistent index of current executing entry
- if e.Index > s.consistIndex.ConsistentIndex() {
- s.consistIndex.SetConsistentIndex(e.Index, e.Term)
- shouldApplyV3 = membership.ApplyBoth
- }
-
+ // gofail: var beforeApplyOneConfChange struct{}
var cc raftpb.ConfChange
pbutil.MustUnmarshal(&cc, e.Data)
removedSelf, err := s.applyConfChange(cc, confState, shouldApplyV3)
s.setAppliedIndex(e.Index)
s.setTerm(e.Term)
shouldStop = shouldStop || removedSelf
- s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
+ s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), raftAdvancedC, err})
default:
lg := s.Logger()
@@ -2148,24 +1913,24 @@ func (s *EtcdServer) apply(
return appliedt, appliedi, shouldStop
}
-// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
-func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
- shouldApplyV3 := membership.ApplyV2storeOnly
- index := s.consistIndex.ConsistentIndex()
- if e.Index > index {
- // set the consistent index of current executing entry
- s.consistIndex.SetConsistentIndex(e.Index, e.Term)
- shouldApplyV3 = membership.ApplyBoth
+// applyEntryNormal applies an EntryNormal type raftpb request to the EtcdServer
+func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry, shouldApplyV3 membership.ShouldApplyV3) {
+ var ar *apply.Result
+ if shouldApplyV3 {
+ defer func() {
+ // The txPostLockInsideApplyHook will not get called in some cases,
+ // in which we should move the consistent index forward directly.
+ newIndex := s.consistIndex.ConsistentIndex()
+ if newIndex < e.Index {
+ s.consistIndex.SetConsistentIndex(e.Index, e.Term)
+ }
+ }()
}
- s.lg.Debug("apply entry normal",
- zap.Uint64("consistent-index", index),
- zap.Uint64("entry-index", e.Index),
- zap.Bool("should-applyV3", bool(shouldApplyV3)))
// raft state machine may generate noop entry when leader confirmation.
// skip it in advance to avoid some potential bug in the future
if len(e.Data) == 0 {
- s.notifyAboutFirstCommitInTerm()
+ s.firstCommitInTerm.Notify()
// promote lessor when the local member is leader and finished
// applying all entries from the last term.
@@ -2181,32 +1946,32 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
rp := &r
pbutil.MustUnmarshal(rp, e.Data)
s.lg.Debug("applyEntryNormal", zap.Stringer("V2request", rp))
- s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
- return
+ raftReq = v2ToV3Request(s.lg, (*RequestV2)(rp))
}
s.lg.Debug("applyEntryNormal", zap.Stringer("raftReq", &raftReq))
if raftReq.V2 != nil {
req := (*RequestV2)(raftReq.V2)
- s.w.Trigger(req.ID, s.applyV2Request(req))
- return
+ raftReq = v2ToV3Request(s.lg, req)
}
id := raftReq.ID
if id == 0 {
+ if raftReq.Header == nil {
+ s.lg.Panic("applyEntryNormal, could not find a header")
+ }
id = raftReq.Header.ID
}
- var ar *applyResult
needResult := s.w.IsRegistered(id)
if needResult || !noSideEffect(&raftReq) {
if !needResult && raftReq.Txn != nil {
removeNeedlessRangeReqs(raftReq.Txn)
}
- ar = s.applyV3.Apply(&raftReq, shouldApplyV3)
+ ar = s.applyInternalRaftRequest(&raftReq, shouldApplyV3)
}
- // do not re-apply applied entries.
+ // do not re-toApply applied entries.
if !shouldApplyV3 {
return
}
@@ -2215,7 +1980,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
return
}
- if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
+ if !errorspkg.Is(ar.Err, errors.ErrNoSpace) || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
s.w.Trigger(id, ar)
return
}
@@ -2225,12 +1990,12 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
"message exceeded backend quota; raising alarm",
zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
- zap.Error(ar.err),
+ zap.Error(ar.Err),
)
s.GoAttach(func() {
a := &pb.AlarmRequest{
- MemberID: uint64(s.ID()),
+ MemberID: uint64(s.MemberID()),
Action: pb.AlarmRequest_ACTIVATE,
Alarm: pb.AlarmType_NOSPACE,
}
@@ -2239,25 +2004,77 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
})
}
-func (s *EtcdServer) notifyAboutFirstCommitInTerm() {
- newNotifier := make(chan struct{})
- s.firstCommitInTermMu.Lock()
- notifierToClose := s.firstCommitInTermC
- s.firstCommitInTermC = newNotifier
- s.firstCommitInTermMu.Unlock()
- close(notifierToClose)
+func (s *EtcdServer) applyInternalRaftRequest(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *apply.Result {
+ if r.ClusterVersionSet == nil && r.ClusterMemberAttrSet == nil && r.DowngradeInfoSet == nil {
+ if !shouldApplyV3 {
+ return nil
+ }
+ return s.uberApply.Apply(r)
+ }
+ membershipApplier := apply.NewApplierMembership(s.lg, s.cluster, s)
+ op := "unknown"
+ defer func(start time.Time) {
+ txn.ApplySecObserve("v3", op, true, time.Since(start))
+ txn.WarnOfExpensiveRequest(s.lg, s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, nil, nil)
+ }(time.Now())
+ switch {
+ case r.ClusterVersionSet != nil:
+ op = "ClusterVersionSet" // Implemented in 3.5.x
+ membershipApplier.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3)
+ return &apply.Result{}
+ case r.ClusterMemberAttrSet != nil:
+ op = "ClusterMemberAttrSet" // Implemented in 3.5.x
+ membershipApplier.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3)
+ case r.DowngradeInfoSet != nil:
+ op = "DowngradeInfoSet" // Implemented in 3.5.x
+ membershipApplier.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3)
+ default:
+ s.lg.Panic("not implemented apply", zap.Stringer("raft-request", r))
+ return nil
+ }
+ return &apply.Result{}
+}
+
+func noSideEffect(r *pb.InternalRaftRequest) bool {
+ return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil
+}
+
+func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
+ f := func(ops []*pb.RequestOp) []*pb.RequestOp {
+ j := 0
+ for i := 0; i < len(ops); i++ {
+ if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
+ continue
+ }
+ ops[j] = ops[i]
+ j++
+ }
+
+ return ops[:j]
+ }
+
+ txn.Success = f(txn.Success)
+ txn.Failure = f(txn.Failure)
}
// applyConfChange applies a ConfChange to the server. It is only
// invoked with a ConfChange that has already passed through Raft
func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) {
+ lg := s.Logger()
if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
+ lg.Error("Validation on configuration change failed", zap.Bool("shouldApplyV3", bool(shouldApplyV3)), zap.Error(err))
cc.NodeID = raft.None
s.r.ApplyConfChange(cc)
+
+ // The txPostLock callback will not get called in this case,
+ // so we should set the consistent index directly.
+ if s.consistIndex != nil && membership.ApplyBoth == shouldApplyV3 {
+ applyingIndex, applyingTerm := s.consistIndex.ConsistentApplyingIndex()
+ s.consistIndex.SetConsistentIndex(applyingIndex, applyingTerm)
+ }
return false, err
}
- lg := s.Logger()
*confState = *s.r.ApplyConfChange(cc)
s.beHooks.SetConfState(confState)
switch cc.Type {
@@ -2278,24 +2095,15 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
} else {
s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3)
- if confChangeContext.Member.ID != s.id {
+ if confChangeContext.Member.ID != s.MemberID() {
s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
}
}
- // update the isLearner metric when this server id is equal to the id in raft member confChange
- if confChangeContext.Member.ID == s.id {
- if cc.Type == raftpb.ConfChangeAddLearnerNode {
- isLearner.Set(1)
- } else {
- isLearner.Set(0)
- }
- }
-
case raftpb.ConfChangeRemoveNode:
id := types.ID(cc.NodeID)
s.cluster.RemoveMember(id, shouldApplyV3)
- if id == s.id {
+ if id == s.MemberID() {
return true, nil
}
s.r.transport.RemovePeer(id)
@@ -2313,7 +2121,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
)
}
s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3)
- if m.ID != s.id {
+ if m.ID != s.MemberID() {
s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
}
}
@@ -2321,80 +2129,93 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
}
// TODO: non-blocking snapshot
-func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
- clone := s.v2store.Clone()
- // commit kv to write metadata (for example: consistent index) to disk.
- //
- // This guarantees that Backend's consistent_index is >= index of last snapshot.
- //
- // KV().commit() updates the consistent index in backend.
- // All operations that update consistent index must be called sequentially
- // from applyAll function.
- // So KV().Commit() cannot run in parallel with apply. It has to be called outside
- // the go routine created below.
- s.KV().Commit()
+func (s *EtcdServer) snapshot(ep *etcdProgress, toDisk bool) {
+ lg := s.Logger()
+ d := GetMembershipInfoInV2Format(lg, s.cluster)
+ if toDisk {
+ s.Logger().Info(
+ "triggering snapshot",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.Uint64("local-member-applied-index", ep.appliedi),
+ zap.Uint64("local-member-snapshot-index", ep.diskSnapshotIndex),
+ zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
+ zap.Bool("snapshot-forced", s.forceDiskSnapshot),
+ )
+ s.forceDiskSnapshot = false
+ // commit kv to write metadata (for example: consistent index) to disk.
+ //
+ // This guarantees that Backend's consistent_index is >= index of last snapshot.
+ //
+ // KV().commit() updates the consistent index in backend.
+ // All operations that update consistent index must be called sequentially
+ // from applyAll function.
+ // So KV().Commit() cannot run in parallel with toApply. It has to be called outside
+ // the go routine created below.
+ s.KV().Commit()
+ }
+
+ // For backward compatibility, generate v2 snapshot from v3 state.
+ snap, err := s.r.raftStorage.CreateSnapshot(ep.appliedi, &ep.confState, d)
+ if err != nil {
+ // the snapshot was done asynchronously with the progress of raft.
+ // raft might have already got a newer snapshot.
+ if errorspkg.Is(err, raft.ErrSnapOutOfDate) {
+ return
+ }
+ lg.Panic("failed to create snapshot", zap.Error(err))
+ }
+ ep.memorySnapshotIndex = ep.appliedi
- s.GoAttach(func() {
- lg := s.Logger()
+ verifyConsistentIndexIsLatest(lg, snap, s.consistIndex.ConsistentIndex())
- d, err := clone.SaveNoCopy()
- // TODO: current store will never fail to do a snapshot
- // what should we do if the store might fail?
- if err != nil {
- lg.Panic("failed to save v2 store", zap.Error(err))
- }
- snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
- if err != nil {
- // the snapshot was done asynchronously with the progress of raft.
- // raft might have already got a newer snapshot.
- if err == raft.ErrSnapOutOfDate {
- return
- }
- lg.Panic("failed to create snapshot", zap.Error(err))
- }
+ if toDisk {
// SaveSnap saves the snapshot to file and appends the corresponding WAL entry.
if err = s.r.storage.SaveSnap(snap); err != nil {
lg.Panic("failed to save snapshot", zap.Error(err))
}
+ ep.diskSnapshotIndex = ep.appliedi
if err = s.r.storage.Release(snap); err != nil {
lg.Panic("failed to release wal", zap.Error(err))
}
lg.Info(
- "saved snapshot",
+ "saved snapshot to disk",
zap.Uint64("snapshot-index", snap.Metadata.Index),
)
+ }
+}
- // When sending a snapshot, etcd will pause compaction.
- // After receives a snapshot, the slow follower needs to get all the entries right after
- // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
- // the snapshot sent might already be compacted. It happens when the snapshot takes long time
- // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
- if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
- lg.Info("skip compaction since there is an inflight snapshot")
- return
- }
+func (s *EtcdServer) compactRaftLog(snapi uint64) {
+ lg := s.Logger()
- // keep some in memory log entries for slow followers.
- compacti := uint64(1)
- if snapi > s.Cfg.SnapshotCatchUpEntries {
- compacti = snapi - s.Cfg.SnapshotCatchUpEntries
- }
+ // When sending a snapshot, etcd will pause compaction.
+ // After receives a snapshot, the slow follower needs to get all the entries right after
+ // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
+ // the snapshot sent might already be compacted. It happens when the snapshot takes long time
+ // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
+ if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
+ lg.Info("skip compaction since there is an inflight snapshot")
+ return
+ }
- err = s.r.raftStorage.Compact(compacti)
- if err != nil {
- // the compaction was done asynchronously with the progress of raft.
- // raft log might already been compact.
- if err == raft.ErrCompacted {
- return
- }
- lg.Panic("failed to compact", zap.Error(err))
+ // keep some in memory log entries for slow followers.
+ compacti := uint64(1)
+ if snapi > s.Cfg.SnapshotCatchUpEntries {
+ compacti = snapi - s.Cfg.SnapshotCatchUpEntries
+ }
+ err := s.r.raftStorage.Compact(compacti)
+ if err != nil {
+ // the compaction was done asynchronously with the progress of raft.
+ // raft log might already been compact.
+ if errorspkg.Is(err, raft.ErrCompacted) {
+ return
}
- lg.Info(
- "compacted Raft logs",
- zap.Uint64("compact-index", compacti),
- )
- })
+ lg.Panic("failed to compact", zap.Error(err))
+ }
+ lg.Debug(
+ "compacted Raft logs",
+ zap.Uint64("compact-index", compacti),
+ )
}
// CutPeer drops messages to the specified peer.
@@ -2424,61 +2245,120 @@ func (s *EtcdServer) ClusterVersion() *semver.Version {
return s.cluster.Version()
}
-// monitorVersions checks the member's version every monitorVersionInterval.
-// It updates the cluster version if all members agrees on a higher one.
-// It prints out log if there is a member with a higher version than the
-// local version.
-func (s *EtcdServer) monitorVersions() {
+func (s *EtcdServer) StorageVersion() *semver.Version {
+ // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock.
+ s.bemu.RLock()
+ defer s.bemu.RUnlock()
+
+ v, err := schema.DetectSchemaVersion(s.lg, s.be.ReadTx())
+ if err != nil {
+ s.lg.Warn("Failed to detect schema version", zap.Error(err))
+ return nil
+ }
+ return &v
+}
+
+// monitorClusterVersions every monitorVersionInterval checks if it's the leader and updates cluster version if needed.
+func (s *EtcdServer) monitorClusterVersions() {
+ lg := s.Logger()
+ monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s))
for {
select {
- case <-s.FirstCommitInTermNotify():
+ case <-s.firstCommitInTerm.Receive():
case <-time.After(monitorVersionInterval):
case <-s.stopping:
+ lg.Info("server has stopped; stopping cluster version's monitor")
return
}
- if s.Leader() != s.ID() {
+ if s.Leader() != s.MemberID() {
continue
}
+ err := monitor.UpdateClusterVersionIfNeeded()
+ if err != nil {
+ s.lg.Error("Failed to monitor cluster version", zap.Error(err))
+ }
+ }
+}
- v := decideClusterVersion(s.Logger(), getVersions(s.Logger(), s.cluster, s.id, s.peerRt))
- if v != nil {
- // only keep major.minor version for comparison
- v = &semver.Version{
- Major: v.Major,
- Minor: v.Minor,
- }
+// monitorStorageVersion every monitorVersionInterval updates storage version if needed.
+func (s *EtcdServer) monitorStorageVersion() {
+ lg := s.Logger()
+ monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s))
+ for {
+ select {
+ case <-time.After(monitorVersionInterval):
+ case <-s.clusterVersionChanged.Receive():
+ case <-s.stopping:
+ lg.Info("server has stopped; stopping storage version's monitor")
+ return
}
+ monitor.UpdateStorageVersionIfNeeded()
+ }
+}
- // if the current version is nil:
- // 1. use the decided version if possible
- // 2. or use the min cluster version
- if s.cluster.Version() == nil {
- verStr := version.MinClusterVersion
- if v != nil {
- verStr = v.String()
- }
- s.GoAttach(func() { s.updateClusterVersion(verStr) })
+func (s *EtcdServer) monitorKVHash() {
+ t := s.Cfg.CorruptCheckTime
+ if t == 0 {
+ return
+ }
+ checkTicker := time.NewTicker(t)
+ defer checkTicker.Stop()
+
+ lg := s.Logger()
+ lg.Info(
+ "enabled corruption checking",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.Duration("interval", t),
+ )
+ for {
+ select {
+ case <-s.stopping:
+ lg.Info("server has stopped; stopping kv hash's monitor")
+ return
+ case <-checkTicker.C:
+ }
+ backend.VerifyBackendConsistency(s.be, lg, false, schema.AllBuckets...)
+ if !s.isLeader() {
continue
}
+ if err := s.corruptionChecker.PeriodicCheck(); err != nil {
+ lg.Warn("failed to check hash KV", zap.Error(err))
+ }
+ }
+}
- if v != nil && membership.IsValidVersionChange(s.cluster.Version(), v) {
- s.GoAttach(func() { s.updateClusterVersion(v.String()) })
+func (s *EtcdServer) monitorCompactHash() {
+ if !s.FeatureEnabled(features.CompactHashCheck) {
+ return
+ }
+ t := s.Cfg.CompactHashCheckTime
+ for {
+ select {
+ case <-time.After(t):
+ case <-s.stopping:
+ lg := s.Logger()
+ lg.Info("server has stopped; stopping compact hash's monitor")
+ return
+ }
+ if !s.isLeader() {
+ continue
}
+ s.corruptionChecker.CompactHashCheck()
}
}
-func (s *EtcdServer) updateClusterVersion(ver string) {
+func (s *EtcdServer) updateClusterVersionV3(ver string) {
lg := s.Logger()
if s.cluster.Version() == nil {
lg.Info(
- "setting up initial cluster version",
+ "setting up initial cluster version using v3 API",
zap.String("cluster-version", version.Cluster(ver)),
)
} else {
lg.Info(
- "updating cluster version",
+ "updating cluster version using v3 API",
zap.String("from", version.Cluster(s.cluster.Version().String())),
zap.String("to", version.Cluster(ver)),
)
@@ -2490,12 +2370,12 @@ func (s *EtcdServer) updateClusterVersion(ver string) {
_, err := s.raftRequest(ctx, pb.InternalRaftRequest{ClusterVersionSet: &req})
cancel()
- switch err {
- case nil:
+ switch {
+ case errorspkg.Is(err, nil):
lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver)))
return
- case ErrStopped:
+ case errorspkg.Is(err, errors.ErrStopped):
lg.Warn("aborting cluster version update; server is stopped", zap.Error(err))
return
@@ -2504,12 +2384,13 @@ func (s *EtcdServer) updateClusterVersion(ver string) {
}
}
+// monitorDowngrade every DowngradeCheckTime checks if it's the leader and cancels downgrade if needed.
func (s *EtcdServer) monitorDowngrade() {
+ monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s))
t := s.Cfg.DowngradeCheckTime
if t == 0 {
return
}
- lg := s.Logger()
for {
select {
case <-time.After(t):
@@ -2520,52 +2401,37 @@ func (s *EtcdServer) monitorDowngrade() {
if !s.isLeader() {
continue
}
-
- d := s.cluster.DowngradeInfo()
- if !d.Enabled {
- continue
- }
-
- targetVersion := d.TargetVersion
- v := semver.Must(semver.NewVersion(targetVersion))
- if isMatchedVersions(s.Logger(), v, getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) {
- lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion))
- ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- if _, err := s.downgradeCancel(ctx); err != nil {
- lg.Warn("failed to cancel downgrade", zap.Error(err))
- }
- cancel()
- }
+ monitor.CancelDowngradeIfNeeded()
}
}
func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
- switch err {
- case context.Canceled:
- return ErrCanceled
+ switch {
+ case errorspkg.Is(err, context.Canceled):
+ return errors.ErrCanceled
- case context.DeadlineExceeded:
+ case errorspkg.Is(err, context.DeadlineExceeded):
s.leadTimeMu.RLock()
curLeadElected := s.leadElectedTime
s.leadTimeMu.RUnlock()
prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
if start.After(prevLeadLost) && start.Before(curLeadElected) {
- return ErrTimeoutDueToLeaderFail
+ return errors.ErrTimeoutDueToLeaderFail
}
lead := types.ID(s.getLead())
switch lead {
case types.ID(raft.None):
// TODO: return error to specify it happens because the cluster does not have leader now
- case s.ID():
- if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
- return ErrTimeoutDueToConnectionLost
+ case s.MemberID():
+ if !isConnectedToQuorumSince(s.r.transport, start, s.MemberID(), s.cluster.Members()) {
+ return errors.ErrTimeoutDueToConnectionLost
}
default:
if !isConnectedSince(s.r.transport, start, lead) {
- return ErrTimeoutDueToConnectionLost
+ return errors.ErrTimeoutDueToConnectionLost
}
}
- return ErrTimeout
+ return errors.ErrTimeout
default:
return err
@@ -2574,26 +2440,19 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
func (s *EtcdServer) KV() mvcc.WatchableKV { return s.kv }
func (s *EtcdServer) Backend() backend.Backend {
- s.bemu.Lock()
- defer s.bemu.Unlock()
+ s.bemu.RLock()
+ defer s.bemu.RUnlock()
return s.be
}
func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
func (s *EtcdServer) restoreAlarms() error {
- s.applyV3 = s.newApplierV3()
- as, err := v3alarm.NewAlarmStore(s.lg, s)
+ as, err := v3alarm.NewAlarmStore(s.lg, schema.NewAlarmBackend(s.lg, s.be))
if err != nil {
return err
}
s.alarmStore = as
- if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
- s.applyV3 = newApplierV3Capped(s.applyV3)
- }
- if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
- s.applyV3 = newApplierV3Corrupt(s.applyV3)
- }
return nil
}
@@ -2638,21 +2497,19 @@ func (s *EtcdServer) raftStatus() raft.Status {
return s.r.Node.Status()
}
-func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
- size := be.Size()
- sizeInUse := be.SizeInUse()
- freeableMemory := uint(size - sizeInUse)
- thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024
- if freeableMemory < thresholdBytes {
- cfg.Logger.Info("Skipping defragmentation",
- zap.Int64("current-db-size-bytes", size),
- zap.String("current-db-size", humanize.Bytes(uint64(size))),
- zap.Int64("current-db-size-in-use-bytes", sizeInUse),
- zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
- zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
- zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
- )
- return nil
+func (s *EtcdServer) Version() *serverversion.Manager {
+ return serverversion.NewManager(s.Logger(), NewServerVersionAdapter(s))
+}
+
+func (s *EtcdServer) getTxPostLockInsideApplyHook() func() {
+ return func() {
+ applyingIdx, applyingTerm := s.consistIndex.ConsistentApplyingIndex()
+ if applyingIdx > s.consistIndex.UnsafeConsistentIndex() {
+ s.consistIndex.SetConsistentIndex(applyingIdx, applyingTerm)
+ }
}
- return be.Defrag()
+}
+
+func (s *EtcdServer) CorruptionChecker() CorruptionChecker {
+ return s.corruptionChecker
}
diff --git a/server/etcdserver/server_access_control_test.go b/server/etcdserver/server_access_control_test.go
new file mode 100644
index 00000000000..4f64c78e228
--- /dev/null
+++ b/server/etcdserver/server_access_control_test.go
@@ -0,0 +1,119 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestOriginAllowed(t *testing.T) {
+ tests := []struct {
+ accessController *AccessController
+ origin string
+ allowed bool
+ }{
+ {
+ &AccessController{
+ CORS: map[string]struct{}{},
+ },
+ "https://example.com",
+ true,
+ },
+ {
+ &AccessController{
+ CORS: map[string]struct{}{"*": {}},
+ },
+ "https://example.com",
+ true,
+ },
+ {
+ &AccessController{
+ CORS: map[string]struct{}{"https://example.com": {}, "http://example.org": {}},
+ },
+ "https://example.com",
+ true,
+ },
+ {
+ &AccessController{
+ CORS: map[string]struct{}{"http://example.org": {}},
+ },
+ "https://example.com",
+ false,
+ },
+ {
+ &AccessController{
+ CORS: map[string]struct{}{"*": {}, "http://example.org/": {}},
+ },
+ "https://example.com",
+ true,
+ },
+ }
+
+ for _, tt := range tests {
+ allowed := tt.accessController.OriginAllowed(tt.origin)
+ assert.Equal(t, allowed, tt.allowed)
+ }
+}
+
+func TestIsHostWhitelisted(t *testing.T) {
+ tests := []struct {
+ accessController *AccessController
+ host string
+ whitelisted bool
+ }{
+ {
+ &AccessController{
+ HostWhitelist: map[string]struct{}{},
+ },
+ "example.com",
+ true,
+ },
+ {
+ &AccessController{
+ HostWhitelist: map[string]struct{}{"*": {}},
+ },
+ "example.com",
+ true,
+ },
+ {
+ &AccessController{
+ HostWhitelist: map[string]struct{}{"example.com": {}, "example.org": {}},
+ },
+ "example.com",
+ true,
+ },
+ {
+ &AccessController{
+ HostWhitelist: map[string]struct{}{"example.org": {}},
+ },
+ "example.com",
+ false,
+ },
+ {
+ &AccessController{
+ HostWhitelist: map[string]struct{}{"*": {}, "example.org/": {}},
+ },
+ "example.com",
+ true,
+ },
+ }
+
+ for _, tt := range tests {
+ whitelisted := tt.accessController.IsHostWhitelisted(tt.host)
+ assert.Equal(t, whitelisted, tt.whitelisted)
+ }
+}
diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go
index 07a22e2b0ea..7db595f5fc3 100644
--- a/server/etcdserver/server_test.go
+++ b/server/etcdserver/server_test.go
@@ -17,8 +17,8 @@ package etcdserver
import (
"context"
"encoding/json"
+ errorspkg "errors"
"fmt"
- "io/ioutil"
"math"
"net/http"
"os"
@@ -28,163 +28,66 @@ import (
"testing"
"time"
+ "github.com/coreos/go-semver/semver"
+ "github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
"go.etcd.io/etcd/pkg/v3/idutil"
+ "go.etcd.io/etcd/pkg/v3/notify"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/pkg/v3/wait"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/features"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mock/mockstorage"
"go.etcd.io/etcd/server/v3/mock/mockstore"
"go.etcd.io/etcd/server/v3/mock/mockwait"
- "go.etcd.io/etcd/server/v3/mvcc"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
- "go.uber.org/zap"
- "go.uber.org/zap/zaptest"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
)
-// TestDoLocalAction tests requests which do not need to go through raft to be applied,
-// and are served through local data.
-func TestDoLocalAction(t *testing.T) {
- tests := []struct {
- req pb.Request
-
- wresp Response
- werr error
- wactions []testutil.Action
- }{
- {
- pb.Request{Method: "GET", ID: 1, Wait: true},
- Response{Watcher: v2store.NewNopWatcher()}, nil, []testutil.Action{{Name: "Watch"}},
- },
- {
- pb.Request{Method: "GET", ID: 1},
- Response{Event: &v2store.Event{}}, nil,
- []testutil.Action{
- {
- Name: "Get",
- Params: []interface{}{"", false, false},
- },
- },
- },
- {
- pb.Request{Method: "HEAD", ID: 1},
- Response{Event: &v2store.Event{}}, nil,
- []testutil.Action{
- {
- Name: "Get",
- Params: []interface{}{"", false, false},
- },
- },
- },
- {
- pb.Request{Method: "BADMETHOD", ID: 1},
- Response{}, ErrUnknownMethod, []testutil.Action{},
- },
- }
- for i, tt := range tests {
- st := mockstore.NewRecorder()
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- v2store: st,
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- }
- resp, err := srv.Do(context.Background(), tt.req)
-
- if err != tt.werr {
- t.Fatalf("#%d: err = %+v, want %+v", i, err, tt.werr)
- }
- if !reflect.DeepEqual(resp, tt.wresp) {
- t.Errorf("#%d: resp = %+v, want %+v", i, resp, tt.wresp)
- }
- gaction := st.Action()
- if !reflect.DeepEqual(gaction, tt.wactions) {
- t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions)
- }
- }
-}
-
-// TestDoBadLocalAction tests server requests which do not need to go through consensus,
-// and return errors when they fetch from local data.
-func TestDoBadLocalAction(t *testing.T) {
- storeErr := fmt.Errorf("bah")
- tests := []struct {
- req pb.Request
-
- wactions []testutil.Action
- }{
- {
- pb.Request{Method: "GET", ID: 1, Wait: true},
- []testutil.Action{{Name: "Watch"}},
- },
- {
- pb.Request{Method: "GET", ID: 1},
- []testutil.Action{
- {
- Name: "Get",
- Params: []interface{}{"", false, false},
- },
- },
- },
- {
- pb.Request{Method: "HEAD", ID: 1},
- []testutil.Action{
- {
- Name: "Get",
- Params: []interface{}{"", false, false},
- },
- },
- },
- }
- for i, tt := range tests {
- st := mockstore.NewErrRecorder(storeErr)
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- v2store: st,
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- }
- resp, err := srv.Do(context.Background(), tt.req)
-
- if err != storeErr {
- t.Fatalf("#%d: err = %+v, want %+v", i, err, storeErr)
- }
- if !reflect.DeepEqual(resp, Response{}) {
- t.Errorf("#%d: resp = %+v, want %+v", i, resp, Response{})
- }
- gaction := st.Action()
- if !reflect.DeepEqual(gaction, tt.wactions) {
- t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions)
- }
- }
-}
-
// TestApplyRepeat tests that server handles repeat raft messages gracefully
func TestApplyRepeat(t *testing.T) {
+ lg := zaptest.NewLogger(t)
n := newNodeConfChangeCommitterStream()
n.readyc <- raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
}
- cl := newTestCluster(t, nil)
+ cl := newTestCluster(t)
st := v2store.New()
cl.SetStore(v2store.New())
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
+
cl.AddMember(&membership.Member{ID: 1234}, true)
r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
@@ -192,17 +95,20 @@ func TestApplyRepeat(t *testing.T) {
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
r: *r,
v2store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
consistIndex: cindex.NewFakeConsistentIndex(0),
+ uberApply: uberApplierMock{},
}
- s.applyV2 = &applierV2store{store: s.v2store, cluster: s.cluster}
s.start()
- req := &pb.Request{Method: "QGET", ID: uint64(1)}
+ req := &pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{ID: 1},
+ Put: &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")},
+ }
ents := []raftpb.Entry{{Index: 1, Data: pbutil.MustMarshal(req)}}
n.readyc <- raft.Ready{CommittedEntries: ents}
// dup msg
@@ -240,275 +146,222 @@ func TestApplyRepeat(t *testing.T) {
}
}
-func TestApplyRequest(t *testing.T) {
- tests := []struct {
- req pb.Request
+type uberApplierMock struct{}
- wresp Response
- wactions []testutil.Action
- }{
- // POST ==> Create
- {
- pb.Request{Method: "POST", ID: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Create",
- Params: []interface{}{"", false, "", true, v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // POST ==> Create, with expiration
- {
- pb.Request{Method: "POST", ID: 1, Expiration: 1337},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Create",
- Params: []interface{}{"", false, "", true, v2store.TTLOptionSet{ExpireTime: time.Unix(0, 1337)}},
- },
- },
- },
- // POST ==> Create, with dir
- {
- pb.Request{Method: "POST", ID: 1, Dir: true},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Create",
- Params: []interface{}{"", true, "", true, v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT ==> Set
- {
- pb.Request{Method: "PUT", ID: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Set",
- Params: []interface{}{"", false, "", v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT ==> Set, with dir
- {
- pb.Request{Method: "PUT", ID: 1, Dir: true},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Set",
- Params: []interface{}{"", true, "", v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT with PrevExist=true ==> Update
- {
- pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(true)},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Update",
- Params: []interface{}{"", "", v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT with PrevExist=false ==> Create
- {
- pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(false)},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Create",
- Params: []interface{}{"", false, "", false, v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT with PrevExist=true *and* PrevIndex set ==> CompareAndSwap
- {
- pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(true), PrevIndex: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "CompareAndSwap",
- Params: []interface{}{"", "", uint64(1), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT with PrevExist=false *and* PrevIndex set ==> Create
- {
- pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(false), PrevIndex: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Create",
- Params: []interface{}{"", false, "", false, v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT with PrevIndex set ==> CompareAndSwap
- {
- pb.Request{Method: "PUT", ID: 1, PrevIndex: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "CompareAndSwap",
- Params: []interface{}{"", "", uint64(1), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT with PrevValue set ==> CompareAndSwap
- {
- pb.Request{Method: "PUT", ID: 1, PrevValue: "bar"},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "CompareAndSwap",
- Params: []interface{}{"", "bar", uint64(0), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // PUT with PrevIndex and PrevValue set ==> CompareAndSwap
- {
- pb.Request{Method: "PUT", ID: 1, PrevIndex: 1, PrevValue: "bar"},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "CompareAndSwap",
- Params: []interface{}{"", "bar", uint64(1), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}},
- },
- },
- },
- // DELETE ==> Delete
- {
- pb.Request{Method: "DELETE", ID: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Delete",
- Params: []interface{}{"", false, false},
- },
- },
- },
- // DELETE with PrevIndex set ==> CompareAndDelete
- {
- pb.Request{Method: "DELETE", ID: 1, PrevIndex: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "CompareAndDelete",
- Params: []interface{}{"", "", uint64(1)},
- },
- },
- },
- // DELETE with PrevValue set ==> CompareAndDelete
+func (uberApplierMock) Apply(r *pb.InternalRaftRequest) *apply2.Result {
+ return &apply2.Result{}
+}
+
+// TestV2SetMemberAttributes validates support of hybrid v3.5 cluster which still uses v2 request.
+// TODO: Remove in v3.7
+func TestV2SetMemberAttributes(t *testing.T) {
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ cl := newTestClusterWithBackend(t, []*membership.Member{{ID: 1}}, be)
+
+ cfg := config.ServerConfig{
+ ServerFeatureGate: features.NewDefaultServerFeatureGate("test", nil),
+ }
+
+ srv := &EtcdServer{
+ lgMu: new(sync.RWMutex),
+ lg: zaptest.NewLogger(t),
+ v2store: mockstore.NewRecorder(),
+ cluster: cl,
+ consistIndex: cindex.NewConsistentIndex(be),
+ w: wait.New(),
+ Cfg: cfg,
+ }
+ as, err := v3alarm.NewAlarmStore(srv.lg, schema.NewAlarmBackend(srv.lg, be))
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv.alarmStore = as
+ srv.uberApply = srv.NewUberApplier()
+
+ req := pb.Request{
+ Method: "PUT",
+ ID: 1,
+ Path: membership.MemberAttributesStorePath(1),
+ Val: `{"Name":"abc","ClientURLs":["http://127.0.0.1:2379"]}`,
+ }
+ data, err := proto.Marshal(&req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv.applyEntryNormal(&raftpb.Entry{
+ Data: data,
+ }, membership.ApplyV2storeOnly)
+ w := membership.Attributes{Name: "abc", ClientURLs: []string{"http://127.0.0.1:2379"}}
+ if g := cl.Member(1).Attributes; !reflect.DeepEqual(g, w) {
+ t.Errorf("attributes = %v, want %v", g, w)
+ }
+}
+
+// TestV2SetClusterVersion validates support of hybrid v3.5 cluster which still uses v2 request.
+// TODO: Remove in v3.7
+func TestV2SetClusterVersion(t *testing.T) {
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ cl := newTestClusterWithBackend(t, []*membership.Member{}, be)
+ cl.SetVersion(semver.New("3.4.0"), api.UpdateCapability, membership.ApplyBoth)
+ cfg := config.ServerConfig{
+ ServerFeatureGate: features.NewDefaultServerFeatureGate("test", nil),
+ }
+
+ srv := &EtcdServer{
+ lgMu: new(sync.RWMutex),
+ lg: zaptest.NewLogger(t),
+ v2store: mockstore.NewRecorder(),
+ cluster: cl,
+ consistIndex: cindex.NewConsistentIndex(be),
+ w: wait.New(),
+ Cfg: cfg,
+ }
+ as, err := v3alarm.NewAlarmStore(srv.lg, schema.NewAlarmBackend(srv.lg, be))
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv.alarmStore = as
+ srv.uberApply = srv.NewUberApplier()
+
+ req := pb.Request{
+ Method: "PUT",
+ ID: 1,
+ Path: membership.StoreClusterVersionKey(),
+ Val: "3.5.0",
+ }
+ data, err := proto.Marshal(&req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv.applyEntryNormal(&raftpb.Entry{
+ Data: data,
+ }, membership.ApplyV2storeOnly)
+ if g := cl.Version(); !reflect.DeepEqual(*g, version.V3_5) {
+ t.Errorf("attributes = %v, want %v", *g, version.V3_5)
+ }
+}
+
+func TestApplyConfStateWithRestart(t *testing.T) {
+ n := newNodeRecorder()
+ srv := newServer(t, n)
+ defer srv.Cleanup()
+
+ assert.Equal(t, uint64(0), srv.consistIndex.ConsistentIndex())
+
+ var nodeID uint64 = 1
+ memberData, err := json.Marshal(&membership.Member{ID: types.ID(nodeID), RaftAttributes: membership.RaftAttributes{PeerURLs: []string{""}}})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ entries := []raftpb.Entry{
{
- pb.Request{Method: "DELETE", ID: 1, PrevValue: "bar"},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "CompareAndDelete",
- Params: []interface{}{"", "bar", uint64(0)},
- },
- },
+ Term: 1,
+ Index: 1,
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(&raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: nodeID,
+ Context: memberData,
+ }),
},
- // DELETE with PrevIndex *and* PrevValue set ==> CompareAndDelete
{
- pb.Request{Method: "DELETE", ID: 1, PrevIndex: 5, PrevValue: "bar"},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "CompareAndDelete",
- Params: []interface{}{"", "bar", uint64(5)},
- },
- },
+ Term: 1,
+ Index: 2,
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(&raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: nodeID,
+ }),
},
- // QGET ==> Get
{
- pb.Request{Method: "QGET", ID: 1},
- Response{Event: &v2store.Event{}},
- []testutil.Action{
- {
- Name: "Get",
- Params: []interface{}{"", false, false},
- },
- },
+ Term: 1,
+ Index: 3,
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(&raftpb.ConfChange{
+ Type: raftpb.ConfChangeUpdateNode,
+ NodeID: nodeID,
+ Context: memberData,
+ }),
},
- // SYNC ==> DeleteExpiredKeys
+ }
+ want := []testutil.Action{
{
- pb.Request{Method: "SYNC", ID: 1},
- Response{},
- []testutil.Action{
- {
- Name: "DeleteExpiredKeys",
- Params: []interface{}{time.Unix(0, 0)},
- },
- },
+ Name: "ApplyConfChange",
+ Params: []any{raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: nodeID,
+ Context: memberData,
+ }},
},
{
- pb.Request{Method: "SYNC", ID: 1, Time: 12345},
- Response{},
- []testutil.Action{
- {
- Name: "DeleteExpiredKeys",
- Params: []interface{}{time.Unix(0, 12345)},
- },
- },
+ Name: "ApplyConfChange",
+ Params: []any{raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: nodeID,
+ }},
},
- // Unknown method - error
+ // This action is expected to fail validation, thus NodeID is set to 0
{
- pb.Request{Method: "BADMETHOD", ID: 1},
- Response{Err: ErrUnknownMethod},
- []testutil.Action{},
+ Name: "ApplyConfChange",
+ Params: []any{raftpb.ConfChange{
+ Type: raftpb.ConfChangeUpdateNode,
+ Context: memberData,
+ NodeID: 0,
+ }},
},
}
- for i, tt := range tests {
- st := mockstore.NewRecorder()
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- v2store: st,
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
- resp := srv.applyV2Request((*RequestV2)(&tt.req))
+ confState := raftpb.ConfState{}
- if !reflect.DeepEqual(resp, tt.wresp) {
- t.Errorf("#%d: resp = %+v, want %+v", i, resp, tt.wresp)
- }
- gaction := st.Action()
- if !reflect.DeepEqual(gaction, tt.wactions) {
- t.Errorf("#%d: action = %#v, want %#v", i, gaction, tt.wactions)
- }
+ t.Log("Applying entries for the first time")
+ srv.apply(entries, &confState, nil)
+ if got, _ := n.Wait(len(want)); !reflect.DeepEqual(got, want) {
+ t.Errorf("actions don't match\n got %+v\n want %+v", got, want)
}
-}
-func TestApplyRequestOnAdminMemberAttributes(t *testing.T) {
- cl := newTestCluster(t, []*membership.Member{{ID: 1}})
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- v2store: mockstore.NewRecorder(),
- cluster: cl,
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
+ t.Log("Simulating etcd restart by clearing v2 store")
+ srv.cluster.SetStore(v2store.New())
- req := pb.Request{
- Method: "PUT",
- ID: 1,
- Path: membership.MemberAttributesStorePath(1),
- Val: `{"Name":"abc","ClientURLs":["http://127.0.0.1:2379"]}`,
+ t.Log("Reapplying same entries after restart")
+ srv.apply(entries, &confState, nil)
+ if got, _ := n.Wait(2 * len(want)); !reflect.DeepEqual(got[len(want):], want) {
+ t.Errorf("actions don't match\n got %+v\n want %+v", got, want)
}
- srv.applyV2Request((*RequestV2)(&req))
- w := membership.Attributes{Name: "abc", ClientURLs: []string{"http://127.0.0.1:2379"}}
- if g := cl.Member(1).Attributes; !reflect.DeepEqual(g, w) {
- t.Errorf("attributes = %v, want %v", g, w)
+}
+
+func newServer(t *testing.T, recorder *nodeRecorder) *EtcdServer {
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ t.Cleanup(func() {
+ betesting.Close(t, be)
+ })
+ srv := &EtcdServer{
+ lgMu: new(sync.RWMutex),
+ lg: zaptest.NewLogger(t),
+ r: *newRaftNode(raftNodeConfig{lg: lg, Node: recorder}),
+ cluster: membership.NewCluster(lg),
+ consistIndex: cindex.NewConsistentIndex(be),
}
+ srv.cluster.SetBackend(schema.NewMembershipBackend(lg, be))
+ srv.cluster.SetStore(v2store.New())
+ srv.beHooks = serverstorage.NewBackendHooks(lg, srv.consistIndex)
+ srv.r.transport = newNopTransporter()
+ srv.w = mockwait.NewNop()
+ return srv
}
func TestApplyConfChangeError(t *testing.T) {
- cl := membership.NewCluster(zaptest.NewLogger(t))
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+
+ cl := membership.NewCluster(lg)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
cl.SetStore(v2store.New())
+
for i := 1; i <= 4; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)}, true)
}
@@ -573,19 +426,19 @@ func TestApplyConfChangeError(t *testing.T) {
n := newNodeRecorder()
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
+ lg: zaptest.NewLogger(t),
+ r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}),
cluster: cl,
}
_, err := srv.applyConfChange(tt.cc, nil, true)
- if err != tt.werr {
+ if !errorspkg.Is(err, tt.werr) {
t.Errorf("#%d: applyConfChange error = %v, want %v", i, err, tt.werr)
}
cc := raftpb.ConfChange{Type: tt.cc.Type, NodeID: raft.None, Context: tt.cc.Context}
w := []testutil.Action{
{
Name: "ApplyConfChange",
- Params: []interface{}{cc},
+ Params: []any{cc},
},
}
if g, _ := n.Wait(1); !reflect.DeepEqual(g, w) {
@@ -595,24 +448,29 @@ func TestApplyConfChangeError(t *testing.T) {
}
func TestApplyConfChangeShouldStop(t *testing.T) {
- cl := membership.NewCluster(zaptest.NewLogger(t))
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+
+ cl := membership.NewCluster(lg)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
cl.SetStore(v2store.New())
+
for i := 1; i <= 3; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)}, true)
}
r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
Node: newNodeNop(),
transport: newNopTransporter(),
})
- lg := zaptest.NewLogger(t)
srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: lg,
- id: 1,
- r: *r,
- cluster: cl,
- beHooks: &backendHooks{lg: lg},
+ lgMu: new(sync.RWMutex),
+ lg: lg,
+ memberID: 1,
+ r: *r,
+ cluster: cl,
+ beHooks: serverstorage.NewBackendHooks(lg, nil),
}
cc := raftpb.ConfChange{
Type: raftpb.ConfChangeRemoveNode,
@@ -642,26 +500,29 @@ func TestApplyConfChangeShouldStop(t *testing.T) {
// where consistIndex equals to applied index.
func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
cl := membership.NewCluster(zaptest.NewLogger(t))
cl.SetStore(v2store.New())
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
+
cl.AddMember(&membership.Member{ID: types.ID(1)}, true)
- be, _ := betesting.NewDefaultTmpBackend(t)
- defer betesting.Close(t, be)
- cindex.CreateMetaBucket(be.BatchTx())
+ schema.CreateMetaBucket(be.BatchTx())
ci := cindex.NewConsistentIndex(be)
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: lg,
- id: 1,
- r: *realisticRaftNode(lg),
+ memberID: 1,
+ r: *realisticRaftNode(lg, 1, nil),
cluster: cl,
w: wait.New(),
consistIndex: ci,
- beHooks: &backendHooks{lg: lg, indexer: ci},
+ beHooks: serverstorage.NewBackendHooks(lg, ci),
}
+ defer srv.r.raftNodeConfig.Stop()
// create EntryConfChange entry
now := time.Now()
@@ -683,29 +544,34 @@ func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
Data: pbutil.MustMarshal(cc),
}}
- _, appliedi, _ := srv.apply(ents, &raftpb.ConfState{})
+ raftAdvancedC := make(chan struct{}, 1)
+ raftAdvancedC <- struct{}{}
+ _, appliedi, _ := srv.apply(ents, &raftpb.ConfState{}, raftAdvancedC)
consistIndex := srv.consistIndex.ConsistentIndex()
- if consistIndex != appliedi {
- t.Fatalf("consistIndex = %v, want %v", consistIndex, appliedi)
- }
+ assert.Equal(t, uint64(2), appliedi)
t.Run("verify-backend", func(t *testing.T) {
tx := be.BatchTx()
tx.Lock()
defer tx.Unlock()
srv.beHooks.OnPreCommitUnsafe(tx)
- assert.Equal(t, raftpb.ConfState{Voters: []uint64{2}}, *membership.UnsafeConfStateFromBackend(lg, tx))
+ assert.Equal(t, raftpb.ConfState{Voters: []uint64{2}}, *schema.UnsafeConfStateFromBackend(lg, tx))
})
- rindex, rterm := cindex.ReadConsistentIndex(be.BatchTx())
+ rindex, _ := schema.ReadConsistentIndex(be.ReadTx())
assert.Equal(t, consistIndex, rindex)
- assert.Equal(t, uint64(4), rterm)
}
-func realisticRaftNode(lg *zap.Logger) *raftNode {
+func realisticRaftNode(lg *zap.Logger, id uint64, snap *raftpb.Snapshot) *raftNode {
storage := raft.NewMemoryStorage()
storage.SetHardState(raftpb.HardState{Commit: 0, Term: 0})
+ if snap != nil {
+ err := storage.ApplySnapshot(*snap)
+ if err != nil {
+ panic(err)
+ }
+ }
c := &raft.Config{
- ID: 1,
+ ID: id,
ElectionTick: 10,
HeartbeatTick: 1,
Storage: storage,
@@ -721,12 +587,16 @@ func realisticRaftNode(lg *zap.Logger) *raftNode {
return r
}
-// TestApplyMultiConfChangeShouldStop ensures that apply will return shouldStop
+// TestApplyMultiConfChangeShouldStop ensures that toApply will return shouldStop
// if the local member is removed along with other conf updates.
func TestApplyMultiConfChangeShouldStop(t *testing.T) {
lg := zaptest.NewLogger(t)
cl := membership.NewCluster(lg)
cl.SetStore(v2store.New())
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
+
for i := 1; i <= 5; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)}, true)
}
@@ -739,14 +609,14 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: lg,
- id: 2,
+ memberID: 2,
r: *r,
cluster: cl,
w: wait.New(),
consistIndex: ci,
- beHooks: &backendHooks{lg: lg, indexer: ci},
+ beHooks: serverstorage.NewBackendHooks(lg, ci),
}
- ents := []raftpb.Entry{}
+ var ents []raftpb.Entry
for i := 1; i <= 4; i++ {
ent := raftpb.Entry{
Term: 1,
@@ -755,349 +625,146 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) {
Data: pbutil.MustMarshal(
&raftpb.ConfChange{
Type: raftpb.ConfChangeRemoveNode,
- NodeID: uint64(i)}),
+ NodeID: uint64(i),
+ }),
}
ents = append(ents, ent)
}
- _, _, shouldStop := srv.apply(ents, &raftpb.ConfState{})
+ raftAdvancedC := make(chan struct{}, 1)
+ raftAdvancedC <- struct{}{}
+ _, _, shouldStop := srv.apply(ents, &raftpb.ConfState{}, raftAdvancedC)
if !shouldStop {
t.Errorf("shouldStop = %t, want %t", shouldStop, true)
}
}
-func TestDoProposal(t *testing.T) {
- tests := []pb.Request{
- {Method: "POST", ID: 1},
- {Method: "PUT", ID: 1},
- {Method: "DELETE", ID: 1},
- {Method: "GET", ID: 1, Quorum: true},
- }
- for i, tt := range tests {
- st := mockstore.NewRecorder()
- r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
- Node: newNodeCommitter(),
- storage: mockstorage.NewStorageRecorder(""),
- raftStorage: raft.NewMemoryStorage(),
- transport: newNopTransporter(),
- })
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *r,
- v2store: st,
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- SyncTicker: &time.Ticker{},
- consistIndex: cindex.NewFakeConsistentIndex(0),
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
- srv.start()
- resp, err := srv.Do(context.Background(), tt)
- srv.Stop()
-
- action := st.Action()
- if len(action) != 1 {
- t.Errorf("#%d: len(action) = %d, want 1", i, len(action))
- }
- if err != nil {
- t.Fatalf("#%d: err = %v, want nil", i, err)
- }
- // resp.Index is set in Do() based on the raft state; may either be 0 or 1
- wresp := Response{Event: &v2store.Event{}, Index: resp.Index}
- if !reflect.DeepEqual(resp, wresp) {
- t.Errorf("#%d: resp = %v, want %v", i, resp, wresp)
- }
- }
-}
-
-func TestDoProposalCancelled(t *testing.T) {
- wt := mockwait.NewRecorder()
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
- w: wt,
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
-
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
- _, err := srv.Do(ctx, pb.Request{Method: "PUT"})
-
- if err != ErrCanceled {
- t.Fatalf("err = %v, want %v", err, ErrCanceled)
- }
- w := []testutil.Action{{Name: "Register"}, {Name: "Trigger"}}
- if !reflect.DeepEqual(wt.Action(), w) {
- t.Errorf("wt.action = %+v, want %+v", wt.Action(), w)
- }
-}
+// TestSnapshotDisk should save the snapshot to disk and release old snapshots
+func TestSnapshotDisk(t *testing.T) {
+ revertFunc := verify.DisableVerifications()
+ defer revertFunc()
-func TestDoProposalTimeout(t *testing.T) {
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
- w: mockwait.NewNop(),
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
-
- ctx, cancel := context.WithTimeout(context.Background(), 0)
- _, err := srv.Do(ctx, pb.Request{Method: "PUT"})
- cancel()
- if err != ErrTimeout {
- t.Fatalf("err = %v, want %v", err, ErrTimeout)
- }
-}
-
-func TestDoProposalStopped(t *testing.T) {
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: newNodeNop()}),
- w: mockwait.NewNop(),
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
-
- srv.stopping = make(chan struct{})
- close(srv.stopping)
- _, err := srv.Do(context.Background(), pb.Request{Method: "PUT", ID: 1})
- if err != ErrStopped {
- t.Errorf("err = %v, want %v", err, ErrStopped)
- }
-}
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
-// TestSync tests sync 1. is nonblocking 2. proposes SYNC request.
-func TestSync(t *testing.T) {
- n := newNodeRecorder()
- ctx, cancel := context.WithCancel(context.Background())
+ s := raft.NewMemoryStorage()
+ s.Append([]raftpb.Entry{{Index: 1}})
+ st := mockstore.NewRecorderStream()
+ p := mockstorage.NewStorageRecorderStream("")
+ r := newRaftNode(raftNodeConfig{
+ lg: zaptest.NewLogger(t),
+ Node: newNodeNop(),
+ raftStorage: s,
+ storage: p,
+ })
srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- ctx: ctx,
- cancel: cancel,
+ lgMu: new(sync.RWMutex),
+ lg: zaptest.NewLogger(t),
+ r: *r,
+ v2store: st,
+ consistIndex: cindex.NewConsistentIndex(be),
}
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
-
- // check that sync is non-blocking
- done := make(chan struct{}, 1)
- go func() {
- srv.sync(10 * time.Second)
- done <- struct{}{}
+ srv.kv = mvcc.New(zaptest.NewLogger(t), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
+ defer func() {
+ assert.NoError(t, srv.kv.Close())
}()
+ srv.be = be
- select {
- case <-done:
- case <-time.After(time.Second):
- t.Fatal("sync should be non-blocking but did not return after 1s!")
- }
-
- action, _ := n.Wait(1)
- if len(action) != 1 {
- t.Fatalf("len(action) = %d, want 1", len(action))
- }
- if action[0].Name != "Propose" {
- t.Fatalf("action = %s, want Propose", action[0].Name)
- }
- data := action[0].Params[0].([]byte)
- var r pb.Request
- if err := r.Unmarshal(data); err != nil {
- t.Fatalf("unmarshal request error: %v", err)
- }
- if r.Method != "SYNC" {
- t.Errorf("method = %s, want SYNC", r.Method)
- }
-}
+ cl := membership.NewCluster(zaptest.NewLogger(t))
+ srv.cluster = cl
-// TestSyncTimeout tests the case that sync 1. is non-blocking 2. cancel request
-// after timeout
-func TestSyncTimeout(t *testing.T) {
- n := newProposalBlockerRecorder()
- ctx, cancel := context.WithCancel(context.Background())
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- ctx: ctx,
- cancel: cancel,
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
+ ch := make(chan struct{}, 1)
- // check that sync is non-blocking
- done := make(chan struct{}, 1)
go func() {
- srv.sync(0)
- done <- struct{}{}
- }()
-
- select {
- case <-done:
- case <-time.After(time.Second):
- t.Fatal("sync should be non-blocking but did not return after 1s!")
- }
-
- w := []testutil.Action{{Name: "Propose blocked"}}
- if g, _ := n.Wait(1); !reflect.DeepEqual(g, w) {
- t.Errorf("action = %v, want %v", g, w)
- }
-}
-
-// TODO: TestNoSyncWhenNoLeader
-
-// TestSyncTrigger tests that the server proposes a SYNC request when its sync timer ticks
-func TestSyncTrigger(t *testing.T) {
- n := newReadyNode()
- st := make(chan time.Time, 1)
- tk := &time.Ticker{C: st}
- r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
- Node: n,
- raftStorage: raft.NewMemoryStorage(),
- transport: newNopTransporter(),
- storage: mockstorage.NewStorageRecorder(""),
- })
-
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *r,
- v2store: mockstore.NewNop(),
- SyncTicker: tk,
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- }
+ gaction, _ := p.Wait(2)
+ defer func() { ch <- struct{}{} }()
- // trigger the server to become a leader and accept sync requests
- go func() {
- srv.start()
- n.readyc <- raft.Ready{
- SoftState: &raft.SoftState{
- RaftState: raft.StateLeader,
- },
- }
- // trigger a sync request
- st <- time.Time{}
+ assert.Len(t, gaction, 2)
+ assert.Equal(t, testutil.Action{Name: "SaveSnap"}, gaction[0])
+ assert.Equal(t, testutil.Action{Name: "Release"}, gaction[1])
}()
-
- action, _ := n.Wait(1)
- go srv.Stop()
-
- if len(action) != 1 {
- t.Fatalf("len(action) = %d, want 1", len(action))
- }
- if action[0].Name != "Propose" {
- t.Fatalf("action = %s, want Propose", action[0].Name)
- }
- data := action[0].Params[0].([]byte)
- var req pb.Request
- if err := req.Unmarshal(data); err != nil {
- t.Fatalf("error unmarshalling data: %v", err)
- }
- if req.Method != "SYNC" {
- t.Fatalf("unexpected proposed request: %#v", req.Method)
- }
-
- // wait on stop message
- <-n.Chan()
+ ep := etcdProgress{appliedi: 1, confState: raftpb.ConfState{Voters: []uint64{1}}}
+ srv.snapshot(&ep, true)
+ <-ch
+ assert.Empty(t, st.Action())
+ assert.Equal(t, uint64(1), ep.diskSnapshotIndex)
+ assert.Equal(t, uint64(1), ep.memorySnapshotIndex)
}
-// snapshot should snapshot the store and cut the persistent
-func TestSnapshot(t *testing.T) {
+func TestSnapshotMemory(t *testing.T) {
+ revertFunc := verify.DisableVerifications()
+ defer revertFunc()
+
be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
s := raft.NewMemoryStorage()
s.Append([]raftpb.Entry{{Index: 1}})
st := mockstore.NewRecorderStream()
p := mockstorage.NewStorageRecorderStream("")
r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
Node: newNodeNop(),
raftStorage: s,
storage: p,
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
r: *r,
v2store: st,
consistIndex: cindex.NewConsistentIndex(be),
}
- srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
+ srv.kv = mvcc.New(zaptest.NewLogger(t), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
+ defer func() {
+ assert.NoError(t, srv.kv.Close())
+ }()
srv.be = be
- ch := make(chan struct{}, 2)
-
- go func() {
- gaction, _ := p.Wait(2)
- defer func() { ch <- struct{}{} }()
-
- if len(gaction) != 2 {
- t.Errorf("len(action) = %d, want 2", len(gaction))
- return
- }
- if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) {
- t.Errorf("action = %s, want SaveSnap", gaction[0])
- }
+ cl := membership.NewCluster(zaptest.NewLogger(t))
+ srv.cluster = cl
- if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "Release"}) {
- t.Errorf("action = %s, want Release", gaction[1])
- }
- }()
+ ch := make(chan struct{}, 1)
go func() {
- gaction, _ := st.Wait(2)
+ gaction, _ := p.Wait(1)
defer func() { ch <- struct{}{} }()
- if len(gaction) != 2 {
- t.Errorf("len(action) = %d, want 2", len(gaction))
- }
- if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "Clone"}) {
- t.Errorf("action = %s, want Clone", gaction[0])
- }
- if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "SaveNoCopy"}) {
- t.Errorf("action = %s, want SaveNoCopy", gaction[1])
- }
+ assert.Empty(t, gaction)
}()
-
- srv.snapshot(1, raftpb.ConfState{Voters: []uint64{1}})
- <-ch
+ ep := etcdProgress{appliedi: 1, confState: raftpb.ConfState{Voters: []uint64{1}}}
+ srv.snapshot(&ep, false)
<-ch
+ assert.Empty(t, st.Action())
+ assert.Equal(t, uint64(0), ep.diskSnapshotIndex)
+ assert.Equal(t, uint64(1), ep.memorySnapshotIndex)
}
// TestSnapshotOrdering ensures raft persists snapshot onto disk before
// snapshot db is applied.
func TestSnapshotOrdering(t *testing.T) {
+ // Ignore the snapshot index verification in unit test, because
+ // it doesn't follow the e2e applying logic.
+ revertFunc := verify.DisableVerifications()
+ defer revertFunc()
+
lg := zaptest.NewLogger(t)
n := newNopReadyNode()
st := v2store.New()
cl := membership.NewCluster(lg)
- cl.SetStore(st)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
- testdir, err := ioutil.TempDir(t.TempDir(), "testsnapdir")
- if err != nil {
- t.Fatalf("couldn't open tempdir (%v)", err)
- }
- defer os.RemoveAll(testdir)
+ testdir := t.TempDir()
snapdir := filepath.Join(testdir, "member", "snap")
- if err := os.MkdirAll(snapdir, 0755); err != nil {
+ if err := os.MkdirAll(snapdir, 0o755); err != nil {
t.Fatalf("couldn't make snap dir (%v)", err)
}
rs := raft.NewMemoryStorage()
p := mockstorage.NewStorageRecorderStream(testdir)
- tr, snapDoneC := newSnapTransporter(snapdir)
+ tr, snapDoneC := newSnapTransporter(lg, snapdir)
r := newRaftNode(raftNodeConfig{
lg: lg,
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
@@ -1106,21 +773,26 @@ func TestSnapshotOrdering(t *testing.T) {
storage: p,
raftStorage: rs,
})
- be, _ := betesting.NewDefaultTmpBackend(t)
ci := cindex.NewConsistentIndex(be)
+ cfg := config.ServerConfig{
+ Logger: lg,
+ DataDir: testdir,
+ SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries,
+ ServerFeatureGate: features.NewDefaultServerFeatureGate("test", lg),
+ }
+
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: lg,
- Cfg: config.ServerConfig{Logger: lg, DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
+ Cfg: cfg,
r: *r,
v2store: st,
snapshotter: snap.New(lg, snapdir),
cluster: cl,
SyncTicker: &time.Ticker{},
consistIndex: ci,
- beHooks: &backendHooks{lg: lg, indexer: ci},
+ beHooks: serverstorage.NewBackendHooks(lg, ci),
}
- s.applyV2 = &applierV2store{store: s.v2store, cluster: s.cluster}
s.kv = mvcc.New(lg, be, &lease.FakeLessor{}, mvcc.StoreConfig{})
s.be = be
@@ -1135,122 +807,61 @@ func TestSnapshotOrdering(t *testing.T) {
// Snapshot first triggers raftnode to persists the snapshot onto disk
// before renaming db snapshot file to db
snapMsg.Snapshot.Metadata.Index = 1
- n.readyc <- raft.Ready{Snapshot: snapMsg.Snapshot}
- }()
-
- ac := <-p.Chan()
- if ac.Name != "Save" {
- t.Fatalf("expected Save, got %+v", ac)
- }
-
- if ac := <-p.Chan(); ac.Name != "SaveSnap" {
- t.Fatalf("expected SaveSnap, got %+v", ac)
- }
-
- if ac := <-p.Chan(); ac.Name != "Save" {
- t.Fatalf("expected Save, got %+v", ac)
- }
-
- // confirm snapshot file still present before calling SaveSnap
- snapPath := filepath.Join(snapdir, fmt.Sprintf("%016x.snap.db", 1))
- if !fileutil.Exist(snapPath) {
- t.Fatalf("expected file %q, got missing", snapPath)
- }
-
- // unblock SaveSnapshot, etcdserver now permitted to move snapshot file
- if ac := <-p.Chan(); ac.Name != "Sync" {
- t.Fatalf("expected Sync, got %+v", ac)
- }
-
- if ac := <-p.Chan(); ac.Name != "Release" {
- t.Fatalf("expected Release, got %+v", ac)
- }
-}
-
-// Applied > SnapshotCount should trigger a SaveSnap event
-func TestTriggerSnap(t *testing.T) {
- be, tmpPath := betesting.NewDefaultTmpBackend(t)
- defer func() {
- os.RemoveAll(tmpPath)
- }()
-
- snapc := 10
- st := mockstore.NewRecorder()
- p := mockstorage.NewStorageRecorderStream("")
- r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
- Node: newNodeCommitter(),
- raftStorage: raft.NewMemoryStorage(),
- storage: p,
- transport: newNopTransporter(),
- })
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCount: uint64(snapc), SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *r,
- v2store: st,
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- SyncTicker: &time.Ticker{},
- consistIndex: cindex.NewConsistentIndex(be),
- }
- srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
+ n.readyc <- raft.Ready{Snapshot: *snapMsg.Snapshot}
+ }()
- srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
- srv.be = be
+ ac := <-p.Chan()
+ if ac.Name != "Save" {
+ t.Fatalf("expected Save, got %+v", ac)
+ }
- srv.start()
+ if ac := <-p.Chan(); ac.Name != "SaveSnap" {
+ t.Fatalf("expected SaveSnap, got %+v", ac)
+ }
- donec := make(chan struct{})
- go func() {
- defer close(donec)
- wcnt := 3 + snapc
- gaction, _ := p.Wait(wcnt)
-
- // each operation is recorded as a Save
- // (SnapshotCount+1) * Puts + SaveSnap = (SnapshotCount+1) * Save + SaveSnap + Release
- if len(gaction) != wcnt {
- t.Logf("gaction: %v", gaction)
- t.Errorf("len(action) = %d, want %d", len(gaction), wcnt)
- return
- }
- if !reflect.DeepEqual(gaction[wcnt-2], testutil.Action{Name: "SaveSnap"}) {
- t.Errorf("action = %s, want SaveSnap", gaction[wcnt-2])
- }
+ if ac := <-p.Chan(); ac.Name != "Save" {
+ t.Fatalf("expected Save, got %+v", ac)
+ }
- if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "Release"}) {
- t.Errorf("action = %s, want Release", gaction[wcnt-1])
- }
- }()
+ // confirm snapshot file still present before calling SaveSnap
+ snapPath := filepath.Join(snapdir, fmt.Sprintf("%016x.snap.db", 1))
+ if !fileutil.Exist(snapPath) {
+ t.Fatalf("expected file %q, got missing", snapPath)
+ }
- for i := 0; i < snapc+1; i++ {
- srv.Do(context.Background(), pb.Request{Method: "PUT"})
+ // unblock SaveSnapshot, etcdserver now permitted to move snapshot file
+ if ac := <-p.Chan(); ac.Name != "Sync" {
+ t.Fatalf("expected Sync, got %+v", ac)
}
- <-donec
- srv.Stop()
+ if ac := <-p.Chan(); ac.Name != "Release" {
+ t.Fatalf("expected Release, got %+v", ac)
+ }
}
// TestConcurrentApplyAndSnapshotV3 will send out snapshots concurrently with
// proposals.
func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
+ // Ignore the snapshot index verification in unit test, because
+ // it doesn't follow the e2e applying logic.
+ revertFunc := verify.DisableVerifications()
+ defer revertFunc()
+
lg := zaptest.NewLogger(t)
n := newNopReadyNode()
st := v2store.New()
cl := membership.NewCluster(lg)
cl.SetStore(st)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
- testdir, err := ioutil.TempDir(t.TempDir(), "testsnapdir")
- if err != nil {
- t.Fatalf("Couldn't open tempdir (%v)", err)
- }
- defer os.RemoveAll(testdir)
- if err := os.MkdirAll(testdir+"/member/snap", 0755); err != nil {
+ testdir := t.TempDir()
+ if err := os.MkdirAll(testdir+"/member/snap", 0o755); err != nil {
t.Fatalf("Couldn't make snap dir (%v)", err)
}
rs := raft.NewMemoryStorage()
- tr, snapDoneC := newSnapTransporter(testdir)
+ tr, snapDoneC := newSnapTransporter(lg, testdir)
r := newRaftNode(raftNodeConfig{
lg: lg,
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
@@ -1259,21 +870,28 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
storage: mockstorage.NewStorageRecorder(testdir),
raftStorage: rs,
})
- be, _ := betesting.NewDefaultTmpBackend(t)
ci := cindex.NewConsistentIndex(be)
s := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: lg,
- Cfg: config.ServerConfig{Logger: lg, DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *r,
- v2store: st,
- snapshotter: snap.New(lg, testdir),
- cluster: cl,
- SyncTicker: &time.Ticker{},
- consistIndex: ci,
- beHooks: &backendHooks{lg: lg, indexer: ci},
+ lgMu: new(sync.RWMutex),
+ lg: lg,
+ Cfg: config.ServerConfig{
+ Logger: lg,
+ DataDir: testdir,
+ SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries,
+ ServerFeatureGate: features.NewDefaultServerFeatureGate("test", lg),
+ },
+ r: *r,
+ v2store: st,
+ snapshotter: snap.New(lg, testdir),
+ cluster: cl,
+ SyncTicker: &time.Ticker{},
+ consistIndex: ci,
+ beHooks: serverstorage.NewBackendHooks(lg, ci),
+ firstCommitInTerm: notify.NewNotifier(),
+ lessor: &lease.FakeLessor{},
+ uberApply: uberApplierMock{},
+ authStore: auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), nil, 1),
}
- s.applyV2 = &applierV2store{store: s.v2store, cluster: s.cluster}
s.kv = mvcc.New(lg, be, &lease.FakeLessor{}, mvcc.StoreConfig{})
s.be = be
@@ -1288,7 +906,10 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
for k := 1; k <= 101; k++ {
idx++
ch := s.w.Register(idx)
- req := &pb.Request{Method: "QGET", ID: idx}
+ req := &pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{ID: idx},
+ Put: &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")},
+ }
ent := raftpb.Entry{Index: idx, Data: pbutil.MustMarshal(req)}
ready := raft.Ready{Entries: []raftpb.Entry{ent}}
n.readyc <- ready
@@ -1314,7 +935,7 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
if snapMsg.Snapshot.Metadata.Index == idx {
idx++
snapMsg.Snapshot.Metadata.Index = idx
- ready = raft.Ready{Snapshot: snapMsg.Snapshot}
+ ready = raft.Ready{Snapshot: *snapMsg.Snapshot}
n.readyc <- ready
accepted++
} else {
@@ -1337,9 +958,13 @@ func TestAddMember(t *testing.T) {
n.readyc <- raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
}
- cl := newTestCluster(t, nil)
+ cl := newTestCluster(t)
st := v2store.New()
cl.SetStore(st)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
+
r := newRaftNode(raftNodeConfig{
lg: lg,
Node: n,
@@ -1356,7 +981,7 @@ func TestAddMember(t *testing.T) {
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
consistIndex: cindex.NewFakeConsistentIndex(0),
- beHooks: &backendHooks{lg: lg},
+ beHooks: serverstorage.NewBackendHooks(lg, nil),
}
s.start()
m := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"foo"}}}
@@ -1376,6 +1001,62 @@ func TestAddMember(t *testing.T) {
}
}
+// TestProcessIgnoreMismatchMessage tests Process must ignore messages to
+// mismatch member.
+func TestProcessIgnoreMismatchMessage(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ cl := newTestCluster(t)
+ st := v2store.New()
+ cl.SetStore(st)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
+
+ // Bootstrap a 3-node cluster, member IDs: 1 2 3.
+ cl.AddMember(&membership.Member{ID: types.ID(1)}, true)
+ cl.AddMember(&membership.Member{ID: types.ID(2)}, true)
+ cl.AddMember(&membership.Member{ID: types.ID(3)}, true)
+ // r is initialized with ID 1.
+ r := realisticRaftNode(lg, 1, &raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Index: 11, // Magic number.
+ Term: 11, // Magic number.
+ ConfState: raftpb.ConfState{
+ // Member ID list.
+ Voters: []uint64{1, 2, 3},
+ },
+ },
+ })
+ defer r.raftNodeConfig.Stop()
+ s := &EtcdServer{
+ lgMu: new(sync.RWMutex),
+ lg: lg,
+ memberID: 1,
+ r: *r,
+ v2store: st,
+ cluster: cl,
+ reqIDGen: idutil.NewGenerator(0, time.Time{}),
+ SyncTicker: &time.Ticker{},
+ consistIndex: cindex.NewFakeConsistentIndex(0),
+ beHooks: serverstorage.NewBackendHooks(lg, nil),
+ }
+ // Mock a mad switch dispatching messages to wrong node.
+ m := raftpb.Message{
+ Type: raftpb.MsgHeartbeat,
+ To: 2, // Wrong ID, s.MemberID() is 1.
+ From: 3,
+ Term: 11,
+ Commit: 42, // Commit is larger than the last index 11.
+ }
+ if types.ID(m.To) == s.MemberID() {
+ t.Fatalf("m.To (%d) is expected to mismatch s.MemberID (%d)", m.To, s.MemberID())
+ }
+ err := s.Process(context.Background(), m)
+ if err == nil {
+ t.Fatalf("Must ignore the message and return an error")
+ }
+}
+
// TestRemoveMember tests RemoveMember can propose and perform node removal.
func TestRemoveMember(t *testing.T) {
lg := zaptest.NewLogger(t)
@@ -1383,9 +1064,13 @@ func TestRemoveMember(t *testing.T) {
n.readyc <- raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
}
- cl := newTestCluster(t, nil)
+ cl := newTestCluster(t)
st := v2store.New()
cl.SetStore(v2store.New())
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
+
cl.AddMember(&membership.Member{ID: 1234}, true)
r := newRaftNode(raftNodeConfig{
lg: lg,
@@ -1396,14 +1081,14 @@ func TestRemoveMember(t *testing.T) {
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
r: *r,
v2store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
consistIndex: cindex.NewFakeConsistentIndex(0),
- beHooks: &backendHooks{lg: lg},
+ beHooks: serverstorage.NewBackendHooks(lg, nil),
}
s.start()
_, err := s.RemoveMember(context.Background(), 1234)
@@ -1425,13 +1110,16 @@ func TestRemoveMember(t *testing.T) {
// TestUpdateMember tests RemoveMember can propose and perform node update.
func TestUpdateMember(t *testing.T) {
lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
n := newNodeConfChangeCommitterRecorder()
n.readyc <- raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
}
- cl := newTestCluster(t, nil)
+ cl := newTestCluster(t)
st := v2store.New()
cl.SetStore(st)
+ cl.SetBackend(schema.NewMembershipBackend(lg, be))
cl.AddMember(&membership.Member{ID: 1234}, true)
r := newRaftNode(raftNodeConfig{
lg: lg,
@@ -1449,7 +1137,7 @@ func TestUpdateMember(t *testing.T) {
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
consistIndex: cindex.NewFakeConsistentIndex(0),
- beHooks: &backendHooks{lg: lg},
+ beHooks: serverstorage.NewBackendHooks(lg, nil),
}
s.start()
wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}}
@@ -1471,31 +1159,34 @@ func TestUpdateMember(t *testing.T) {
// TODO: test server could stop itself when being removed
-func TestPublish(t *testing.T) {
- lg := zaptest.NewLogger(t)
+func TestPublishV3(t *testing.T) {
n := newNodeRecorder()
- ch := make(chan interface{}, 1)
+ ch := make(chan any, 1)
// simulate that request has gone through consensus
- ch <- Response{}
+ ch <- &apply2.Result{}
w := wait.NewWithResponse(ch)
ctx, cancel := context.WithCancel(context.Background())
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: lg,
readych: make(chan struct{}),
- Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- id: 1,
+ Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000},
+ memberID: 1,
r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}),
attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
cluster: &membership.RaftCluster{},
w: w,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
-
- ctx: ctx,
- cancel: cancel,
+ authStore: auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), nil, 0),
+ be: be,
+ ctx: ctx,
+ cancel: cancel,
}
- srv.publish(time.Hour)
+ srv.publishV3(time.Hour)
action := n.Action()
if len(action) != 1 {
@@ -1505,39 +1196,27 @@ func TestPublish(t *testing.T) {
t.Fatalf("action = %s, want Propose", action[0].Name)
}
data := action[0].Params[0].([]byte)
- var r pb.Request
+ var r pb.InternalRaftRequest
if err := r.Unmarshal(data); err != nil {
t.Fatalf("unmarshal request error: %v", err)
}
- if r.Method != "PUT" {
- t.Errorf("method = %s, want PUT", r.Method)
- }
- wm := membership.Member{ID: 1, Attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}}
- if wpath := membership.MemberAttributesStorePath(wm.ID); r.Path != wpath {
- t.Errorf("path = %s, want %s", r.Path, wpath)
- }
- var gattr membership.Attributes
- if err := json.Unmarshal([]byte(r.Val), &gattr); err != nil {
- t.Fatalf("unmarshal val error: %v", err)
- }
- if !reflect.DeepEqual(gattr, wm.Attributes) {
- t.Errorf("member = %v, want %v", gattr, wm.Attributes)
- }
+ assert.Equal(t, &membershippb.ClusterMemberAttrSetRequest{Member_ID: 0x1, MemberAttributes: &membershippb.Attributes{
+ Name: "node1", ClientUrls: []string{"http://a", "http://b"},
+ }}, r.ClusterMemberAttrSet)
}
-// TestPublishStopped tests that publish will be stopped if server is stopped.
-func TestPublishStopped(t *testing.T) {
- lg := zaptest.NewLogger(t)
+// TestPublishV3Stopped tests that publish will be stopped if server is stopped.
+func TestPublishV3Stopped(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
r := newRaftNode(raftNodeConfig{
- lg: lg,
+ lg: zaptest.NewLogger(t),
Node: newNodeNop(),
transport: newNopTransporter(),
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: lg,
- Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
+ lg: zaptest.NewLogger(t),
+ Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
cluster: &membership.RaftCluster{},
w: mockwait.NewNop(),
@@ -1551,27 +1230,36 @@ func TestPublishStopped(t *testing.T) {
cancel: cancel,
}
close(srv.stopping)
- srv.publish(time.Hour)
+ srv.publishV3(time.Hour)
}
-// TestPublishRetry tests that publish will keep retry until success.
-func TestPublishRetry(t *testing.T) {
- lg := zaptest.NewLogger(t)
-
+// TestPublishV3Retry tests that publish will keep retry until success.
+func TestPublishV3Retry(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
n := newNodeRecorderStream()
+
+ lg := zaptest.NewLogger(t)
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: lg,
- Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
+ readych: make(chan struct{}),
+ Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000},
+ memberID: 1,
r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}),
w: mockwait.NewNop(),
stopping: make(chan struct{}),
+ attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
+ cluster: &membership.RaftCluster{},
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
+ authStore: auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), nil, 0),
+ be: be,
ctx: ctx,
cancel: cancel,
}
+
// expect multiple proposals from retrying
ch := make(chan struct{})
go func() {
@@ -1590,38 +1278,40 @@ func TestPublishRetry(t *testing.T) {
}
}
}()
- srv.publish(10 * time.Nanosecond)
+ srv.publishV3(10 * time.Nanosecond)
ch <- struct{}{}
<-ch
}
-func TestPublishV3(t *testing.T) {
+func TestUpdateVersionV3(t *testing.T) {
n := newNodeRecorder()
- ch := make(chan interface{}, 1)
+ ch := make(chan any, 1)
// simulate that request has gone through consensus
- ch <- &applyResult{}
+ ch <- &apply2.Result{}
w := wait.NewWithResponse(ch)
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(context.TODO())
lg := zaptest.NewLogger(t)
be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: lg,
- readych: make(chan struct{}),
+ lg: zaptest.NewLogger(t),
+ memberID: 1,
Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000},
- id: 1,
- r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}),
- attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
+ r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}),
+ attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
cluster: &membership.RaftCluster{},
w: w,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
- authStore: auth.NewAuthStore(lg, be, nil, 0),
+ authStore: auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), nil, 0),
be: be,
- ctx: ctx,
- cancel: cancel,
+
+ ctx: ctx,
+ cancel: cancel,
}
- srv.publishV3(time.Hour)
+ ver := "2.0.0"
+ srv.updateClusterVersionV3(ver)
action := n.Action()
if len(action) != 1 {
@@ -1635,91 +1325,13 @@ func TestPublishV3(t *testing.T) {
if err := r.Unmarshal(data); err != nil {
t.Fatalf("unmarshal request error: %v", err)
}
- assert.Equal(t, &membershippb.ClusterMemberAttrSetRequest{Member_ID: 0x1, MemberAttributes: &membershippb.Attributes{
- Name: "node1", ClientUrls: []string{"http://a", "http://b"}}}, r.ClusterMemberAttrSet)
-}
-
-// TestPublishStopped tests that publish will be stopped if server is stopped.
-func TestPublishV3Stopped(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- r := newRaftNode(raftNodeConfig{
- lg: zap.NewExample(),
- Node: newNodeNop(),
- transport: newNopTransporter(),
- })
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
- Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
- r: *r,
- cluster: &membership.RaftCluster{},
- w: mockwait.NewNop(),
- done: make(chan struct{}),
- stopping: make(chan struct{}),
- stop: make(chan struct{}),
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- SyncTicker: &time.Ticker{},
-
- ctx: ctx,
- cancel: cancel,
- }
- close(srv.stopping)
- srv.publishV3(time.Hour)
-}
-
-// TestPublishRetry tests that publish will keep retry until success.
-func TestPublishV3Retry(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- n := newNodeRecorderStream()
-
- lg := zaptest.NewLogger(t)
- be, _ := betesting.NewDefaultTmpBackend(t)
- srv := &EtcdServer{
- lgMu: new(sync.RWMutex),
- lg: lg,
- readych: make(chan struct{}),
- Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000},
- id: 1,
- r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}),
- w: mockwait.NewNop(),
- stopping: make(chan struct{}),
- attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
- cluster: &membership.RaftCluster{},
- reqIDGen: idutil.NewGenerator(0, time.Time{}),
- SyncTicker: &time.Ticker{},
- authStore: auth.NewAuthStore(lg, be, nil, 0),
- be: be,
- ctx: ctx,
- cancel: cancel,
- }
-
- // expect multiple proposals from retrying
- ch := make(chan struct{})
- go func() {
- defer close(ch)
- if action, err := n.Wait(2); err != nil {
- t.Errorf("len(action) = %d, want >= 2 (%v)", len(action), err)
- }
- close(srv.stopping)
- // drain remaining actions, if any, so publish can terminate
- for {
- select {
- case <-ch:
- return
- default:
- n.Action()
- }
- }
- }()
- srv.publishV3(10 * time.Nanosecond)
- ch <- struct{}{}
- <-ch
+ assert.Equal(t, &membershippb.ClusterVersionSetRequest{Ver: ver}, r.ClusterVersionSet)
}
func TestStopNotify(t *testing.T) {
s := &EtcdServer{
lgMu: new(sync.RWMutex),
- lg: zap.NewExample(),
+ lg: zaptest.NewLogger(t),
stop: make(chan struct{}),
done: make(chan struct{}),
}
@@ -1792,14 +1404,17 @@ func (n *nodeRecorder) Campaign(ctx context.Context) error {
n.Record(testutil.Action{Name: "Campaign"})
return nil
}
+
func (n *nodeRecorder) Propose(ctx context.Context, data []byte) error {
- n.Record(testutil.Action{Name: "Propose", Params: []interface{}{data}})
+ n.Record(testutil.Action{Name: "Propose", Params: []any{data}})
return nil
}
+
func (n *nodeRecorder) ProposeConfChange(ctx context.Context, conf raftpb.ConfChangeI) error {
n.Record(testutil.Action{Name: "ProposeConfChange"})
return nil
}
+
func (n *nodeRecorder) Step(ctx context.Context, msg raftpb.Message) error {
n.Record(testutil.Action{Name: "Step"})
return nil
@@ -1810,7 +1425,7 @@ func (n *nodeRecorder) TransferLeadership(ctx context.Context, lead, transferee
func (n *nodeRecorder) ReadIndex(ctx context.Context, rctx []byte) error { return nil }
func (n *nodeRecorder) Advance() {}
func (n *nodeRecorder) ApplyConfChange(conf raftpb.ConfChangeI) *raftpb.ConfState {
- n.Record(testutil.Action{Name: "ApplyConfChange", Params: []interface{}{conf}})
+ n.Record(testutil.Action{Name: "ApplyConfChange", Params: []any{conf}})
return &raftpb.ConfState{}
}
@@ -1826,17 +1441,7 @@ func (n *nodeRecorder) Compact(index uint64, nodes []uint64, d []byte) {
n.Record(testutil.Action{Name: "Compact"})
}
-type nodeProposalBlockerRecorder struct {
- nodeRecorder
-}
-
-func newProposalBlockerRecorder() *nodeProposalBlockerRecorder {
- return &nodeProposalBlockerRecorder{*newNodeRecorderStream()}
-}
-
-func (n *nodeProposalBlockerRecorder) Propose(ctx context.Context, data []byte) error {
- <-ctx.Done()
- n.Record(testutil.Action{Name: "Propose blocked"})
+func (n *nodeRecorder) ForgetLeader(ctx context.Context) error {
return nil
}
@@ -1849,8 +1454,10 @@ type readyNode struct {
func newReadyNode() *readyNode {
return &readyNode{
nodeRecorder{testutil.NewRecorderStream()},
- make(chan raft.Ready, 1)}
+ make(chan raft.Ready, 1),
+ }
}
+
func newNopReadyNode() *readyNode {
return &readyNode{*newNodeRecorder(), make(chan raft.Ready, 1)}
}
@@ -1896,35 +1503,24 @@ func (n *nodeConfChangeCommitterRecorder) ProposeConfChange(ctx context.Context,
n.readyc <- raft.Ready{CommittedEntries: []raftpb.Entry{{Index: n.index, Type: typ, Data: data}}}
return nil
}
+
func (n *nodeConfChangeCommitterRecorder) Ready() <-chan raft.Ready {
return n.readyc
}
+
func (n *nodeConfChangeCommitterRecorder) ApplyConfChange(conf raftpb.ConfChangeI) *raftpb.ConfState {
n.Record(testutil.Action{Name: "ApplyConfChange:" + confChangeActionName(conf)})
return &raftpb.ConfState{}
}
-// nodeCommitter commits proposed data immediately.
-type nodeCommitter struct {
- readyNode
- index uint64
-}
-
-func newNodeCommitter() raft.Node {
- return &nodeCommitter{*newNopReadyNode(), 0}
-}
-func (n *nodeCommitter) Propose(ctx context.Context, data []byte) error {
- n.index++
- ents := []raftpb.Entry{{Index: n.index, Data: data}}
- n.readyc <- raft.Ready{
- Entries: ents,
- CommittedEntries: ents,
- }
- return nil
+func newTestCluster(t testing.TB) *membership.RaftCluster {
+ return membership.NewCluster(zaptest.NewLogger(t))
}
-func newTestCluster(t testing.TB, membs []*membership.Member) *membership.RaftCluster {
- c := membership.NewCluster(zaptest.NewLogger(t))
+func newTestClusterWithBackend(t testing.TB, membs []*membership.Member, be backend.Backend) *membership.RaftCluster {
+ lg := zaptest.NewLogger(t)
+ c := membership.NewCluster(lg)
+ c.SetBackend(schema.NewMembershipBackend(lg, be))
for _, m := range membs {
c.AddMember(m, true)
}
@@ -1956,16 +1552,17 @@ type snapTransporter struct {
nopTransporter
snapDoneC chan snap.Message
snapDir string
+ lg *zap.Logger
}
-func newSnapTransporter(snapDir string) (rafthttp.Transporter, <-chan snap.Message) {
+func newSnapTransporter(lg *zap.Logger, snapDir string) (rafthttp.Transporter, <-chan snap.Message) {
ch := make(chan snap.Message, 1)
- tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir}
+ tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir, lg: lg}
return tr, ch
}
func (s *snapTransporter) SendSnapshot(m snap.Message) {
- ss := snap.New(zap.NewExample(), s.snapDir)
+ ss := snap.New(s.lg, s.snapDir)
ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1)
m.CloseWithError(nil)
s.snapDoneC <- m
@@ -1991,3 +1588,101 @@ func (s *sendMsgAppRespTransporter) Send(m []raftpb.Message) {
}
s.sendC <- send
}
+
+func TestWaitAppliedIndex(t *testing.T) {
+ cases := []struct {
+ name string
+ appliedIndex uint64
+ committedIndex uint64
+ action func(s *EtcdServer)
+ ExpectedError error
+ }{
+ {
+ name: "The applied Id is already equal to the commitId",
+ appliedIndex: 10,
+ committedIndex: 10,
+ action: func(s *EtcdServer) {
+ s.applyWait.Trigger(10)
+ },
+ ExpectedError: nil,
+ },
+ {
+ name: "The etcd server has already stopped",
+ appliedIndex: 10,
+ committedIndex: 12,
+ action: func(s *EtcdServer) {
+ s.stopping <- struct{}{}
+ },
+ ExpectedError: errors.ErrStopped,
+ },
+ {
+ name: "Timed out waiting for the applied index",
+ appliedIndex: 10,
+ committedIndex: 12,
+ action: nil,
+ ExpectedError: errors.ErrTimeoutWaitAppliedIndex,
+ },
+ }
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ s := &EtcdServer{
+ appliedIndex: tc.appliedIndex,
+ committedIndex: tc.committedIndex,
+ stopping: make(chan struct{}, 1),
+ applyWait: wait.NewTimeList(),
+ }
+
+ if tc.action != nil {
+ go tc.action(s)
+ }
+
+ err := s.waitAppliedIndex()
+
+ if !errorspkg.Is(err, tc.ExpectedError) {
+ t.Errorf("Unexpected error, want (%v), got (%v)", tc.ExpectedError, err)
+ }
+ })
+ }
+}
+
+func TestIsActive(t *testing.T) {
+ cases := []struct {
+ name string
+ tickMs uint
+ durationSinceLastTick time.Duration
+ expectActive bool
+ }{
+ {
+ name: "1.5*tickMs,active",
+ tickMs: 100,
+ durationSinceLastTick: 150 * time.Millisecond,
+ expectActive: true,
+ },
+ {
+ name: "2*tickMs,active",
+ tickMs: 200,
+ durationSinceLastTick: 400 * time.Millisecond,
+ expectActive: true,
+ },
+ {
+ name: "4*tickMs,not active",
+ tickMs: 150,
+ durationSinceLastTick: 600 * time.Millisecond,
+ expectActive: false,
+ },
+ }
+
+ for _, tc := range cases {
+ s := EtcdServer{
+ Cfg: config.ServerConfig{
+ TickMs: tc.tickMs,
+ },
+ r: raftNode{
+ tickMu: new(sync.RWMutex),
+ latestTickTs: time.Now().Add(-tc.durationSinceLastTick),
+ },
+ }
+
+ require.Equal(t, tc.expectActive, s.isActive())
+ }
+}
diff --git a/server/etcdserver/snapshot_merge.go b/server/etcdserver/snapshot_merge.go
index 72d10c1796d..cc3c545bee2 100644
--- a/server/etcdserver/snapshot_merge.go
+++ b/server/etcdserver/snapshot_merge.go
@@ -17,12 +17,12 @@ package etcdserver
import (
"io"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
humanize "github.com/dustin/go-humanize"
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/raft/v3/raftpb"
)
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
@@ -31,11 +31,7 @@ import (
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
lg := s.Logger()
// get a snapshot of v2 store as []byte
- clone := s.v2store.Clone()
- d, err := clone.SaveNoCopy()
- if err != nil {
- lg.Panic("failed to save v2 store data", zap.Error(err))
- }
+ d := GetMembershipInfoInV2Format(lg, s.cluster)
// commit kv to write metadata(for example: consistent index).
s.KV().Commit()
@@ -53,7 +49,9 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi
},
Data: d,
}
- m.Snapshot = snapshot
+ m.Snapshot = &snapshot
+
+ verifySnapshotIndex(snapshot, s.consistIndex.ConsistentIndex())
return *snap.NewMessage(m, rc, dbsnap.Size())
}
diff --git a/server/etcdserver/storage.go b/server/etcdserver/storage.go
deleted file mode 100644
index e662537d368..00000000000
--- a/server/etcdserver/storage.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "io"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/wal"
- "go.etcd.io/etcd/server/v3/wal/walpb"
-
- "go.uber.org/zap"
-)
-
-type Storage interface {
- // Save function saves ents and state to the underlying stable storage.
- // Save MUST block until st and ents are on stable storage.
- Save(st raftpb.HardState, ents []raftpb.Entry) error
- // SaveSnap function saves snapshot to the underlying stable storage.
- SaveSnap(snap raftpb.Snapshot) error
- // Close closes the Storage and performs finalization.
- Close() error
- // Release releases the locked wal files older than the provided snapshot.
- Release(snap raftpb.Snapshot) error
- // Sync WAL
- Sync() error
-}
-
-type storage struct {
- *wal.WAL
- *snap.Snapshotter
-}
-
-func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {
- return &storage{w, s}
-}
-
-// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry.
-func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
- walsnap := walpb.Snapshot{
- Index: snap.Metadata.Index,
- Term: snap.Metadata.Term,
- ConfState: &snap.Metadata.ConfState,
- }
- // save the snapshot file before writing the snapshot to the wal.
- // This makes it possible for the snapshot file to become orphaned, but prevents
- // a WAL snapshot entry from having no corresponding snapshot file.
- err := st.Snapshotter.SaveSnap(snap)
- if err != nil {
- return err
- }
- // gofail: var raftBeforeWALSaveSnaphot struct{}
-
- return st.WAL.SaveSnapshot(walsnap)
-}
-
-// Release releases resources older than the given snap and are no longer needed:
-// - releases the locks to the wal files that are older than the provided wal for the given snap.
-// - deletes any .snap.db files that are older than the given snap.
-func (st *storage) Release(snap raftpb.Snapshot) error {
- if err := st.WAL.ReleaseLockTo(snap.Metadata.Index); err != nil {
- return err
- }
- return st.Snapshotter.ReleaseSnapDBs(snap)
-}
-
-// readWAL reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
-// after the position of the given snap in the WAL.
-// The snap must have been previously saved to the WAL, or this call will panic.
-func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot, unsafeNoFsync bool) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
- var (
- err error
- wmetadata []byte
- )
-
- repaired := false
- for {
- if w, err = wal.Open(lg, waldir, snap); err != nil {
- lg.Fatal("failed to open WAL", zap.Error(err))
- }
- if unsafeNoFsync {
- w.SetUnsafeNoFsync()
- }
- if wmetadata, st, ents, err = w.ReadAll(); err != nil {
- w.Close()
- // we can only repair ErrUnexpectedEOF and we never repair twice.
- if repaired || err != io.ErrUnexpectedEOF {
- lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
- }
- if !wal.Repair(lg, waldir) {
- lg.Fatal("failed to repair WAL", zap.Error(err))
- } else {
- lg.Info("repaired WAL", zap.Error(err))
- repaired = true
- }
- continue
- }
- break
- }
- var metadata pb.Metadata
- pbutil.MustUnmarshal(&metadata, wmetadata)
- id = types.ID(metadata.NodeID)
- cid = types.ID(metadata.ClusterID)
- return w, id, cid, st, ents
-}
diff --git a/server/etcdserver/txn/metrics.go b/server/etcdserver/txn/metrics.go
new file mode 100644
index 00000000000..e66254b1c60
--- /dev/null
+++ b/server/etcdserver/txn/metrics.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "slow_apply_total",
+ Help: "The total number of slow apply requests (likely overloaded from slow disk).",
+ })
+ applySec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "apply_duration_seconds",
+ Help: "The latency distributions of v2 apply called by backend.",
+
+ // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2
+ // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20),
+ },
+ []string{"version", "op", "success"})
+ rangeSec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "range_duration_seconds",
+ Help: "The latency distributions of txn.Range",
+
+ // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2
+ // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20),
+ },
+ []string{"success"})
+)
+
+func ApplySecObserve(version, op string, success bool, latency time.Duration) {
+ applySec.WithLabelValues(version, op, strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0)
+}
+
+func RangeSecObserve(success bool, latency time.Duration) {
+ rangeSec.WithLabelValues(strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0)
+}
+
+func init() {
+ prometheus.MustRegister(applySec)
+ prometheus.MustRegister(rangeSec)
+ prometheus.MustRegister(slowApplies)
+}
diff --git a/server/etcdserver/txn/metrics_test.go b/server/etcdserver/txn/metrics_test.go
new file mode 100644
index 00000000000..4a1cd3e1ee3
--- /dev/null
+++ b/server/etcdserver/txn/metrics_test.go
@@ -0,0 +1,62 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRangeSecObserve(t *testing.T) {
+ // Simulate a range operation taking 500 milliseconds.
+ latency := 500 * time.Millisecond
+ RangeSecObserve(true, latency)
+
+ // Use testutil to collect the results and check against expected value
+ expected := `
+# HELP etcd_server_range_duration_seconds The latency distributions of txn.Range
+# TYPE etcd_server_range_duration_seconds histogram
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0001"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0002"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0004"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0008"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0016"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0032"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0064"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0128"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0256"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.0512"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.1024"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.2048"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.4096"} 0
+etcd_server_range_duration_seconds_bucket{success="true",le="0.8192"} 1
+etcd_server_range_duration_seconds_bucket{success="true",le="1.6384"} 1
+etcd_server_range_duration_seconds_bucket{success="true",le="3.2768"} 1
+etcd_server_range_duration_seconds_bucket{success="true",le="6.5536"} 1
+etcd_server_range_duration_seconds_bucket{success="true",le="13.1072"} 1
+etcd_server_range_duration_seconds_bucket{success="true",le="26.2144"} 1
+etcd_server_range_duration_seconds_bucket{success="true",le="52.4288"} 1
+etcd_server_range_duration_seconds_bucket{success="true",le="+Inf"} 1
+etcd_server_range_duration_seconds_sum{success="true"} 0.5
+etcd_server_range_duration_seconds_count{success="true"} 1
+`
+
+ err := testutil.CollectAndCompare(rangeSec, strings.NewReader(expected))
+ require.NoErrorf(t, err, "Collected metrics did not match expected metrics")
+}
diff --git a/server/etcdserver/txn/txn.go b/server/etcdserver/txn/txn.go
new file mode 100644
index 00000000000..51f70a06a42
--- /dev/null
+++ b/server/etcdserver/txn/txn.go
@@ -0,0 +1,723 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "time"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
+ trace = traceutil.Get(ctx)
+ // create put tracing if the trace in context is empty
+ if trace.IsEmpty() {
+ trace = traceutil.New("put",
+ lg,
+ traceutil.Field{Key: "key", Value: string(p.Key)},
+ traceutil.Field{Key: "req_size", Value: p.Size()},
+ )
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ leaseID := lease.LeaseID(p.Lease)
+ if leaseID != lease.NoLease {
+ if l := lessor.Lookup(leaseID); l == nil {
+ return nil, nil, lease.ErrLeaseNotFound
+ }
+ }
+ txnWrite := kv.Write(trace)
+ defer txnWrite.End()
+ resp, err = put(ctx, txnWrite, p)
+ return resp, trace, err
+}
+
+func put(ctx context.Context, txnWrite mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
+ trace := traceutil.Get(ctx)
+ resp = &pb.PutResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ val, leaseID := p.Value, lease.LeaseID(p.Lease)
+
+ var rr *mvcc.RangeResult
+ if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
+ trace.StepWithFunction(func() {
+ rr, err = txnWrite.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{})
+ }, "get previous kv pair")
+
+ if err != nil {
+ return nil, err
+ }
+ }
+ if p.IgnoreValue || p.IgnoreLease {
+ if rr == nil || len(rr.KVs) == 0 {
+ // ignore_{lease,value} flag expects previous key-value pair
+ return nil, errors.ErrKeyNotFound
+ }
+ }
+ if p.IgnoreValue {
+ val = rr.KVs[0].Value
+ }
+ if p.IgnoreLease {
+ leaseID = lease.LeaseID(rr.KVs[0].Lease)
+ }
+ if p.PrevKv {
+ if rr != nil && len(rr.KVs) != 0 {
+ resp.PrevKv = &rr.KVs[0]
+ }
+ }
+
+ resp.Header.Revision = txnWrite.Put(p.Key, val, leaseID)
+ trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
+ return resp, nil
+}
+
+func DeleteRange(ctx context.Context, lg *zap.Logger, kv mvcc.KV, dr *pb.DeleteRangeRequest) (resp *pb.DeleteRangeResponse, trace *traceutil.Trace, err error) {
+ trace = traceutil.Get(ctx)
+ // create delete tracing if the trace in context is empty
+ if trace.IsEmpty() {
+ trace = traceutil.New("delete_range",
+ lg,
+ traceutil.Field{Key: "key", Value: string(dr.Key)},
+ traceutil.Field{Key: "range_end", Value: string(dr.RangeEnd)},
+ )
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ txnWrite := kv.Write(trace)
+ defer txnWrite.End()
+ resp, err = deleteRange(ctx, txnWrite, dr)
+ return resp, trace, err
+}
+
+func deleteRange(ctx context.Context, txnWrite mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ resp := &pb.DeleteRangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ end := mkGteRange(dr.RangeEnd)
+
+ if dr.PrevKv {
+ rr, err := txnWrite.Range(ctx, dr.Key, end, mvcc.RangeOptions{})
+ if err != nil {
+ return nil, err
+ }
+ if rr != nil {
+ resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ resp.PrevKvs[i] = &rr.KVs[i]
+ }
+ }
+ }
+
+ resp.Deleted, resp.Header.Revision = txnWrite.DeleteRange(dr.Key, end)
+ return resp, nil
+}
+
+func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, r *pb.RangeRequest) (resp *pb.RangeResponse, trace *traceutil.Trace, err error) {
+ trace = traceutil.Get(ctx)
+ if trace.IsEmpty() {
+ trace = traceutil.New("range", lg)
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ defer func(start time.Time) {
+ success := err == nil
+ RangeSecObserve(success, time.Since(start))
+ }(time.Now())
+ txnRead := kv.Read(mvcc.ConcurrentReadTxMode, trace)
+ defer txnRead.End()
+ resp, err = executeRange(ctx, lg, txnRead, r)
+ return resp, trace, err
+}
+
+func executeRange(ctx context.Context, lg *zap.Logger, txnRead mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ trace := traceutil.Get(ctx)
+
+ resp := &pb.RangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+
+ limit := r.Limit
+ if r.SortOrder != pb.RangeRequest_NONE ||
+ r.MinModRevision != 0 || r.MaxModRevision != 0 ||
+ r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
+ // fetch everything; sort and truncate afterwards
+ limit = 0
+ }
+ if limit > 0 {
+ // fetch one extra for 'more' flag
+ limit = limit + 1
+ }
+
+ ro := mvcc.RangeOptions{
+ Limit: limit,
+ Rev: r.Revision,
+ Count: r.CountOnly,
+ }
+
+ rr, err := txnRead.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro)
+ if err != nil {
+ return nil, err
+ }
+
+ if r.MaxModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MaxCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
+ pruneKVs(rr, f)
+ }
+
+ sortOrder := r.SortOrder
+ if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
+ // Since current mvcc.Range implementation returns results
+ // sorted by keys in lexiographically ascending order,
+ // sort ASCEND by default only when target is not 'KEY'
+ sortOrder = pb.RangeRequest_ASCEND
+ } else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND {
+ // Since current mvcc.Range implementation returns results
+ // sorted by keys in lexiographically ascending order,
+ // don't re-sort when target is 'KEY' and order is ASCEND
+ sortOrder = pb.RangeRequest_NONE
+ }
+ if sortOrder != pb.RangeRequest_NONE {
+ var sorter sort.Interface
+ switch {
+ case r.SortTarget == pb.RangeRequest_KEY:
+ sorter = &kvSortByKey{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VERSION:
+ sorter = &kvSortByVersion{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_CREATE:
+ sorter = &kvSortByCreate{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_MOD:
+ sorter = &kvSortByMod{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VALUE:
+ sorter = &kvSortByValue{&kvSort{rr.KVs}}
+ default:
+ lg.Panic("unexpected sort target", zap.Int32("sort-target", int32(r.SortTarget)))
+ }
+ switch {
+ case sortOrder == pb.RangeRequest_ASCEND:
+ sort.Sort(sorter)
+ case sortOrder == pb.RangeRequest_DESCEND:
+ sort.Sort(sort.Reverse(sorter))
+ }
+ }
+
+ if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
+ rr.KVs = rr.KVs[:r.Limit]
+ resp.More = true
+ }
+ trace.Step("filter and sort the key-value pairs")
+ resp.Header.Revision = rr.Rev
+ resp.Count = int64(rr.Count)
+ resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ if r.KeysOnly {
+ rr.KVs[i].Value = nil
+ }
+ resp.Kvs[i] = &rr.KVs[i]
+ }
+ trace.Step("assemble the response")
+ return resp, nil
+}
+
+func Txn(ctx context.Context, lg *zap.Logger, rt *pb.TxnRequest, txnModeWriteWithSharedBuffer bool, kv mvcc.KV, lessor lease.Lessor) (*pb.TxnResponse, *traceutil.Trace, error) {
+ trace := traceutil.Get(ctx)
+ if trace.IsEmpty() {
+ trace = traceutil.New("transaction", lg)
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ isWrite := !IsTxnReadonly(rt)
+ // When the transaction contains write operations, we use ReadTx instead of
+ // ConcurrentReadTx to avoid extra overhead of copying buffer.
+ var mode mvcc.ReadTxMode
+ if isWrite && txnModeWriteWithSharedBuffer /*a.s.Cfg.ServerFeatureGate.Enabled(features.TxnModeWriteWithSharedBuffer)*/ {
+ mode = mvcc.SharedBufReadTxMode
+ } else {
+ mode = mvcc.ConcurrentReadTxMode
+ }
+ txnRead := kv.Read(mode, trace)
+ var txnPath []bool
+ trace.StepWithFunction(
+ func() {
+ txnPath = compareToPath(txnRead, rt)
+ },
+ "compare",
+ )
+ if isWrite {
+ trace.AddField(traceutil.Field{Key: "read_only", Value: false})
+ }
+ _, err := checkTxn(txnRead, rt, lessor, txnPath)
+ if err != nil {
+ txnRead.End()
+ return nil, nil, err
+ }
+ trace.Step("check requests")
+ // When executing mutable txnWrite ops, etcd must hold the txnWrite lock so
+ // readers do not see any intermediate results. Since writes are
+ // serialized on the raft loop, the revision in the read view will
+ // be the revision of the write txnWrite.
+ var txnWrite mvcc.TxnWrite
+ if isWrite {
+ txnRead.End()
+ txnWrite = kv.Write(trace)
+ } else {
+ txnWrite = mvcc.NewReadOnlyTxnWrite(txnRead)
+ }
+ txnResp, err := txn(ctx, lg, txnWrite, rt, isWrite, txnPath)
+ txnWrite.End()
+
+ trace.AddField(
+ traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)},
+ traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision},
+ )
+ return txnResp, trace, err
+}
+
+func txn(ctx context.Context, lg *zap.Logger, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, isWrite bool, txnPath []bool) (*pb.TxnResponse, error) {
+ txnResp, _ := newTxnResp(rt, txnPath)
+ _, err := executeTxn(ctx, lg, txnWrite, rt, txnPath, txnResp)
+ if err != nil {
+ if isWrite {
+ // CAUTION: When a txn performing write operations starts, we always expect it to be successful.
+ // If a write failure is seen we SHOULD NOT try to recover the server, but crash with a panic to make the failure explicit.
+ // Trying to silently recover (e.g by ignoring the failed txn or calling txn.End() early) poses serious risks:
+ // - violation of transaction atomicity if some write operations have been partially executed
+ // - data inconsistency across different etcd members if they applied the txn asymmetrically
+ lg.Panic("unexpected error during txn with writes", zap.Error(err))
+ } else {
+ lg.Error("unexpected error during readonly txn", zap.Error(err))
+ }
+ }
+ rev := txnWrite.Rev()
+ if len(txnWrite.Changes()) != 0 {
+ rev++
+ }
+ txnResp.Header.Revision = rev
+ return txnResp, err
+}
+
+// newTxnResp allocates a txn response for a txn request given a path.
+func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ resps := make([]*pb.ResponseOp, len(reqs))
+ txnResp = &pb.TxnResponse{
+ Responses: resps,
+ Succeeded: txnPath[0],
+ Header: &pb.ResponseHeader{},
+ }
+ for i, req := range reqs {
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}}
+ case *pb.RequestOp_RequestPut:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}}
+ case *pb.RequestOp_RequestDeleteRange:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}}
+ case *pb.RequestOp_RequestTxn:
+ resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:])
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
+ txnPath = txnPath[1+txns:]
+ txnCount += txns + 1
+ default:
+ }
+ }
+ return txnResp, txnCount
+}
+
+func executeTxn(ctx context.Context, lg *zap.Logger, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int, err error) {
+ trace := traceutil.Get(ctx)
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+
+ for i, req := range reqs {
+ respi := tresp.Responses[i].Response
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ trace.StartSubTrace(
+ traceutil.Field{Key: "req_type", Value: "range"},
+ traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)},
+ traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)})
+ resp, err := executeRange(ctx, lg, txnWrite, tv.RequestRange)
+ if err != nil {
+ return 0, fmt.Errorf("applyTxn: failed Range: %w", err)
+ }
+ respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
+ trace.StopSubTrace()
+ case *pb.RequestOp_RequestPut:
+ trace.StartSubTrace(
+ traceutil.Field{Key: "req_type", Value: "put"},
+ traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)},
+ traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()})
+ resp, err := put(ctx, txnWrite, tv.RequestPut)
+ if err != nil {
+ return 0, fmt.Errorf("applyTxn: failed Put: %w", err)
+ }
+ respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
+ trace.StopSubTrace()
+ case *pb.RequestOp_RequestDeleteRange:
+ resp, err := deleteRange(ctx, txnWrite, tv.RequestDeleteRange)
+ if err != nil {
+ return 0, fmt.Errorf("applyTxn: failed DeleteRange: %w", err)
+ }
+ respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
+ case *pb.RequestOp_RequestTxn:
+ resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn
+ applyTxns, err := executeTxn(ctx, lg, txnWrite, tv.RequestTxn, txnPath[1:], resp)
+ if err != nil {
+ // don't wrap the error. It's a recursive call and err should be already wrapped
+ return 0, err
+ }
+ txns += applyTxns + 1
+ txnPath = txnPath[applyTxns+1:]
+ default:
+ // empty union
+ }
+ }
+ return txns, nil
+}
+
+func checkPut(rv mvcc.ReadView, lessor lease.Lessor, req *pb.PutRequest) error {
+ if req.IgnoreValue || req.IgnoreLease {
+ // expects previous key-value, error if not exist
+ rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{})
+ if err != nil {
+ return err
+ }
+ if rr == nil || len(rr.KVs) == 0 {
+ return errors.ErrKeyNotFound
+ }
+ }
+ if lease.LeaseID(req.Lease) != lease.NoLease {
+ if l := lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
+ return lease.ErrLeaseNotFound
+ }
+ }
+ return nil
+}
+
+func checkRange(rv mvcc.ReadView, req *pb.RangeRequest) error {
+ switch {
+ case req.Revision == 0:
+ return nil
+ case req.Revision > rv.Rev():
+ return mvcc.ErrFutureRev
+ case req.Revision < rv.FirstRev():
+ return mvcc.ErrCompacted
+ }
+ return nil
+}
+
+func checkTxn(rv mvcc.ReadView, rt *pb.TxnRequest, lessor lease.Lessor, txnPath []bool) (int, error) {
+ txnCount := 0
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ for _, req := range reqs {
+ var err error
+ var txns int
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ err = checkRange(rv, tv.RequestRange)
+ case *pb.RequestOp_RequestPut:
+ err = checkPut(rv, lessor, tv.RequestPut)
+ case *pb.RequestOp_RequestDeleteRange:
+ case *pb.RequestOp_RequestTxn:
+ txns, err = checkTxn(rv, tv.RequestTxn, lessor, txnPath[1:])
+ txnCount += txns + 1
+ txnPath = txnPath[txns+1:]
+ default:
+ // empty union
+ }
+ if err != nil {
+ return 0, err
+ }
+ }
+ return txnCount, nil
+}
+
+// mkGteRange determines if the range end is a >= range. This works around grpc
+// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
+// If it is a GTE range, then []byte{} is returned to indicate the empty byte
+// string (vs nil being no byte string).
+func mkGteRange(rangeEnd []byte) []byte {
+ if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+ return []byte{}
+ }
+ return rangeEnd
+}
+
+func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
+ j := 0
+ for i := range rr.KVs {
+ rr.KVs[j] = rr.KVs[i]
+ if !isPrunable(&rr.KVs[i]) {
+ j++
+ }
+ }
+ rr.KVs = rr.KVs[:j]
+}
+
+type kvSort struct{ kvs []mvccpb.KeyValue }
+
+func (s *kvSort) Swap(i, j int) {
+ t := s.kvs[i]
+ s.kvs[i] = s.kvs[j]
+ s.kvs[j] = t
+}
+func (s *kvSort) Len() int { return len(s.kvs) }
+
+type kvSortByKey struct{ *kvSort }
+
+func (s *kvSortByKey) Less(i, j int) bool {
+ return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
+}
+
+type kvSortByVersion struct{ *kvSort }
+
+func (s *kvSortByVersion) Less(i, j int) bool {
+ return (s.kvs[i].Version - s.kvs[j].Version) < 0
+}
+
+type kvSortByCreate struct{ *kvSort }
+
+func (s *kvSortByCreate) Less(i, j int) bool {
+ return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
+}
+
+type kvSortByMod struct{ *kvSort }
+
+func (s *kvSortByMod) Less(i, j int) bool {
+ return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
+}
+
+type kvSortByValue struct{ *kvSort }
+
+func (s *kvSortByValue) Less(i, j int) bool {
+ return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
+}
+
+func compareInt64(a, b int64) int {
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+}
+
+func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
+ txnPath := make([]bool, 1)
+ ops := rt.Success
+ if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
+ ops = rt.Failure
+ }
+ for _, op := range ops {
+ tv, ok := op.Request.(*pb.RequestOp_RequestTxn)
+ if !ok || tv.RequestTxn == nil {
+ continue
+ }
+ txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
+ }
+ return txnPath
+}
+
+func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
+ for _, c := range cmps {
+ if !applyCompare(rv, c) {
+ return false
+ }
+ }
+ return true
+}
+
+// applyCompare applies the compare request.
+// If the comparison succeeds, it returns true. Otherwise, returns false.
+func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
+ // TODO: possible optimizations
+ // * chunk reads for large ranges to conserve memory
+ // * rewrite rules for common patterns:
+ // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
+ // * caching
+ rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{})
+ if err != nil {
+ return false
+ }
+ if len(rr.KVs) == 0 {
+ if c.Target == pb.Compare_VALUE {
+ // Always fail if comparing a value on a key/keys that doesn't exist;
+ // nil == empty string in grpc; no way to represent missing value
+ return false
+ }
+ return compareKV(c, mvccpb.KeyValue{})
+ }
+ for _, kv := range rr.KVs {
+ if !compareKV(c, kv) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
+ var result int
+ rev := int64(0)
+ switch c.Target {
+ case pb.Compare_VALUE:
+ var v []byte
+ if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
+ v = tv.Value
+ }
+ result = bytes.Compare(ckv.Value, v)
+ case pb.Compare_CREATE:
+ if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil {
+ rev = tv.CreateRevision
+ }
+ result = compareInt64(ckv.CreateRevision, rev)
+ case pb.Compare_MOD:
+ if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil {
+ rev = tv.ModRevision
+ }
+ result = compareInt64(ckv.ModRevision, rev)
+ case pb.Compare_VERSION:
+ if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil {
+ rev = tv.Version
+ }
+ result = compareInt64(ckv.Version, rev)
+ case pb.Compare_LEASE:
+ if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
+ rev = tv.Lease
+ }
+ result = compareInt64(ckv.Lease, rev)
+ }
+ switch c.Result {
+ case pb.Compare_EQUAL:
+ return result == 0
+ case pb.Compare_NOT_EQUAL:
+ return result != 0
+ case pb.Compare_GREATER:
+ return result > 0
+ case pb.Compare_LESS:
+ return result < 0
+ }
+ return true
+}
+
+func IsTxnSerializable(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ return true
+}
+
+func IsTxnReadonly(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func CheckTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
+ for _, c := range rt.Compare {
+ if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil {
+ return err
+ }
+ }
+ if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
+ return err
+ }
+ return checkTxnReqsPermission(as, ai, rt.Failure)
+}
+
+func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
+ for _, requ := range reqs {
+ switch tv := requ.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ if tv.RequestRange == nil {
+ continue
+ }
+
+ if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
+ return err
+ }
+
+ case *pb.RequestOp_RequestPut:
+ if tv.RequestPut == nil {
+ continue
+ }
+
+ if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
+ return err
+ }
+
+ case *pb.RequestOp_RequestDeleteRange:
+ if tv.RequestDeleteRange == nil {
+ continue
+ }
+
+ if tv.RequestDeleteRange.PrevKv {
+ err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+ if err != nil {
+ return err
+ }
+ }
+
+ err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/server/etcdserver/txn/txn_test.go b/server/etcdserver/txn/txn_test.go
new file mode 100644
index 00000000000..850c8a95b9b
--- /dev/null
+++ b/server/etcdserver/txn/txn_test.go
@@ -0,0 +1,677 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "context"
+ "crypto/sha256"
+ "io"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type testCase struct {
+ name string
+ setup testSetup
+ op *pb.RequestOp
+ expectError string
+}
+
+type testSetup struct {
+ compactRevision int64
+ lease int64
+ key []byte
+}
+
+var futureRev int64 = 1000
+
+var rangeTestCases = []testCase{
+ {
+ name: "Range with revision 0 should succeed",
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Revision: 0,
+ },
+ },
+ },
+ },
+ {
+ name: "Range on future rev should fail",
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Revision: futureRev,
+ },
+ },
+ },
+ expectError: "mvcc: required revision is a future revision",
+ },
+ {
+ name: "Range on compacted rev should fail",
+ setup: testSetup{compactRevision: 10},
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Revision: 9,
+ },
+ },
+ },
+ expectError: "mvcc: required revision has been compacted",
+ },
+}
+
+var putTestCases = []testCase{
+ {
+ name: "Put without lease should succeed",
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{},
+ },
+ },
+ },
+ {
+ name: "Put with non-existing lease should fail",
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Lease: 123,
+ },
+ },
+ },
+ expectError: "lease not found",
+ },
+ {
+ name: "Put with existing lease should succeed",
+ setup: testSetup{lease: 123},
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Lease: 123,
+ },
+ },
+ },
+ },
+ {
+ name: "Put with ignore value without previous key should fail",
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ IgnoreValue: true,
+ },
+ },
+ },
+ expectError: "etcdserver: key not found",
+ },
+ {
+ name: "Put with ignore lease without previous key should fail",
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ IgnoreLease: true,
+ },
+ },
+ },
+ expectError: "etcdserver: key not found",
+ },
+ {
+ name: "Put with ignore value with previous key should succeeded",
+ setup: testSetup{key: []byte("ignore-value")},
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ IgnoreValue: true,
+ Key: []byte("ignore-value"),
+ },
+ },
+ },
+ },
+ {
+ name: "Put with ignore lease with previous key should succeed ",
+ setup: testSetup{key: []byte("ignore-lease")},
+ op: &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ IgnoreLease: true,
+ Key: []byte("ignore-lease"),
+ },
+ },
+ },
+ },
+}
+
+func TestCheckTxn(t *testing.T) {
+ type txnTestCase struct {
+ name string
+ setup testSetup
+ txn *pb.TxnRequest
+ expectError string
+ }
+ testCases := []txnTestCase{}
+ for _, tc := range append(rangeTestCases, putTestCases...) {
+ testCases = append(testCases, txnTestCase{
+ name: tc.name,
+ setup: tc.setup,
+ txn: &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ tc.op,
+ },
+ },
+ expectError: tc.expectError,
+ })
+ }
+ invalidOperation := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Revision: futureRev,
+ },
+ },
+ }
+ testCases = append(testCases, txnTestCase{
+ name: "Invalid operation on failed path should succeed",
+ txn: &pb.TxnRequest{
+ Failure: []*pb.RequestOp{
+ invalidOperation,
+ },
+ },
+ })
+
+ testCases = append(testCases, txnTestCase{
+ name: "Invalid operation on subtransaction should fail",
+ txn: &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestTxn{
+ RequestTxn: &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ invalidOperation,
+ },
+ },
+ },
+ },
+ },
+ },
+ expectError: "mvcc: required revision is a future revision",
+ })
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ s, lessor := setup(t, tc.setup)
+
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+ _, _, err := Txn(ctx, zaptest.NewLogger(t), tc.txn, false, s, lessor)
+
+ gotErr := ""
+ if err != nil {
+ gotErr = err.Error()
+ }
+ if gotErr != tc.expectError {
+ t.Errorf("Error not matching, got %q, expected %q", gotErr, tc.expectError)
+ }
+ })
+ }
+}
+
+func TestCheckPut(t *testing.T) {
+ for _, tc := range putTestCases {
+ t.Run(tc.name, func(t *testing.T) {
+ s, lessor := setup(t, tc.setup)
+
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+ _, _, err := Put(ctx, zaptest.NewLogger(t), lessor, s, tc.op.GetRequestPut())
+
+ gotErr := ""
+ if err != nil {
+ gotErr = err.Error()
+ }
+ if gotErr != tc.expectError {
+ t.Errorf("Error not matching, got %q, expected %q", gotErr, tc.expectError)
+ }
+ })
+ }
+}
+
+func TestCheckRange(t *testing.T) {
+ for _, tc := range rangeTestCases {
+ t.Run(tc.name, func(t *testing.T) {
+ s, _ := setup(t, tc.setup)
+
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+ _, _, err := Range(ctx, zaptest.NewLogger(t), s, tc.op.GetRequestRange())
+
+ gotErr := ""
+ if err != nil {
+ gotErr = err.Error()
+ }
+ if gotErr != tc.expectError {
+ t.Errorf("Error not matching, got %q, expected %q", gotErr, tc.expectError)
+ }
+ })
+ }
+}
+
+func setup(t *testing.T, setup testSetup) (mvcc.KV, lease.Lessor) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ t.Cleanup(func() {
+ betesting.Close(t, b)
+ })
+ lessor := &lease.FakeLessor{LeaseSet: map[lease.LeaseID]struct{}{}}
+ s := mvcc.NewStore(zaptest.NewLogger(t), b, lessor, mvcc.StoreConfig{})
+ t.Cleanup(func() {
+ s.Close()
+ })
+
+ if setup.compactRevision != 0 {
+ for i := 0; int64(i) < setup.compactRevision; i++ {
+ s.Put([]byte("a"), []byte("b"), 0)
+ }
+ s.Compact(traceutil.TODO(), setup.compactRevision)
+ }
+ if setup.lease != 0 {
+ lessor.Grant(lease.LeaseID(setup.lease), 0)
+ }
+ if len(setup.key) != 0 {
+ s.Put(setup.key, []byte("b"), 0)
+ }
+ return s, lessor
+}
+
+func TestReadonlyTxnError(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, b)
+ s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{})
+ defer s.Close()
+
+ // setup cancelled context
+ ctx, cancel := context.WithCancel(context.TODO())
+ cancel()
+
+ // put some data to prevent early termination in rangeKeys
+ // we are expecting failure on cancelled context check
+ s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+
+ txn := &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Key: []byte("foo"),
+ },
+ },
+ },
+ },
+ }
+
+ _, _, err := Txn(ctx, zaptest.NewLogger(t), txn, false, s, &lease.FakeLessor{})
+ if err == nil || !strings.Contains(err.Error(), "applyTxn: failed Range: rangeKeys: context cancelled: context canceled") {
+ t.Fatalf("Expected context canceled error, got %v", err)
+ }
+}
+
+func TestWriteTxnPanicWithoutApply(t *testing.T) {
+ b, bePath := betesting.NewDefaultTmpBackend(t)
+ s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{})
+ defer s.Close()
+
+ // setup cancelled context
+ ctx, cancel := context.WithCancel(context.TODO())
+ cancel()
+
+ // write txn that puts some data and then fails in range due to cancelled context
+ txn := &pb.TxnRequest{
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ },
+ },
+ },
+ {
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Key: []byte("foo"),
+ },
+ },
+ },
+ },
+ }
+
+ // compute DB file hash before applying the txn
+ dbHashBefore, err := computeFileHash(bePath)
+ require.NoErrorf(t, err, "failed to compute DB file hash before txn")
+
+ // we verify the following properties below:
+ // 1. server panics after a write txn aply fails (invariant: server should never try to move on from a failed write)
+ // 2. no writes from the txn are applied to the backend (invariant: failed write should have no side-effect on DB state besides panic)
+ assert.Panicsf(t, func() { Txn(ctx, zaptest.NewLogger(t), txn, false, s, &lease.FakeLessor{}) }, "Expected panic in Txn with writes")
+ dbHashAfter, err := computeFileHash(bePath)
+ require.NoErrorf(t, err, "failed to compute DB file hash after txn")
+ require.Equalf(t, dbHashBefore, dbHashAfter, "mismatch in DB hash before and after failed write txn")
+}
+
+func TestCheckTxnAuth(t *testing.T) {
+ be, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, be)
+ as := setupAuth(t, be)
+
+ tests := []struct {
+ name string
+ txnRequest *pb.TxnRequest
+ err error
+ }{
+ {
+ name: "Out of range compare is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Compare: []*pb.Compare{outOfRangeCompare},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "In range compare is authorized",
+ txnRequest: &pb.TxnRequest{
+ Compare: []*pb.Compare{inRangeCompare},
+ },
+ err: nil,
+ },
+ {
+ name: "Nil request range is always authorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{nilRequestRange},
+ },
+ err: nil,
+ },
+ {
+ name: "Range request in range is authorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{inRangeRequestRange},
+ Failure: []*pb.RequestOp{inRangeRequestRange},
+ },
+ err: nil,
+ },
+ {
+ name: "Range request out of range success case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{outOfRangeRequestRange},
+ Failure: []*pb.RequestOp{inRangeRequestRange},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "Range request out of range failure case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{inRangeRequestRange},
+ Failure: []*pb.RequestOp{outOfRangeRequestRange},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "Nil Put request is always authorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{nilRequestPut},
+ },
+ err: nil,
+ },
+ {
+ name: "Put request in range in authorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{inRangeRequestPut},
+ Failure: []*pb.RequestOp{inRangeRequestPut},
+ },
+ err: nil,
+ },
+ {
+ name: "Put request out of range success case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{outOfRangeRequestPut},
+ Failure: []*pb.RequestOp{inRangeRequestPut},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "Put request out of range failure case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{inRangeRequestPut},
+ Failure: []*pb.RequestOp{outOfRangeRequestPut},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "Nil delete request is authorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{nilRequestDeleteRange},
+ },
+ err: nil,
+ },
+ {
+ name: "Delete range request in range is authorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{inRangeRequestDeleteRange},
+ Failure: []*pb.RequestOp{inRangeRequestDeleteRange},
+ },
+ err: nil,
+ },
+ {
+ name: "Delete range request out of range success case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{outOfRangeRequestDeleteRange},
+ Failure: []*pb.RequestOp{inRangeRequestDeleteRange},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "Delete range request out of range failure case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{inRangeRequestDeleteRange},
+ Failure: []*pb.RequestOp{outOfRangeRequestDeleteRange},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "Delete range request out of range and PrevKv false success case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{outOfRangeRequestDeleteRangeKvFalse},
+ Failure: []*pb.RequestOp{inRangeRequestDeleteRange},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ {
+ name: "Delete range request out of range and PrevKv false failure case is unauthorized",
+ txnRequest: &pb.TxnRequest{
+ Success: []*pb.RequestOp{inRangeRequestDeleteRange},
+ Failure: []*pb.RequestOp{outOfRangeRequestDeleteRangeKvFalse},
+ },
+ err: auth.ErrPermissionDenied,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := CheckTxnAuth(as, &auth.AuthInfo{Username: "foo", Revision: 8}, tt.txnRequest)
+ assert.Equal(t, tt.err, err)
+ })
+ }
+}
+
+// CheckTxnAuth test setup.
+func setupAuth(t *testing.T, be backend.Backend) auth.AuthStore {
+ lg := zaptest.NewLogger(t)
+
+ simpleTokenTTLDefault := 300 * time.Second
+ tokenTypeSimple := "simple"
+ dummyIndexWaiter := func(index uint64) <-chan struct{} {
+ ch := make(chan struct{}, 1)
+ go func() {
+ ch <- struct{}{}
+ }()
+ return ch
+ }
+
+ tp, _ := auth.NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
+
+ as := auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), tp, 4)
+
+ // create "root" user and "foo" user with limited range
+ _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "root"})
+ require.NoError(t, err)
+
+ _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "rw"})
+ require.NoError(t, err)
+
+ _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
+ Name: "rw",
+ Perm: &authpb.Permission{
+ PermType: authpb.READWRITE,
+ Key: []byte("foo"),
+ RangeEnd: []byte("zoo"),
+ },
+ })
+ require.NoError(t, err)
+
+ _, err = as.UserAdd(&pb.AuthUserAddRequest{Name: "root", Password: "foo"})
+ require.NoError(t, err)
+
+ _, err = as.UserAdd(&pb.AuthUserAddRequest{Name: "foo", Password: "foo"})
+ require.NoError(t, err)
+
+ _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "root", Role: "root"})
+ require.NoError(t, err)
+
+ _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "rw"})
+ require.NoError(t, err)
+
+ err = as.AuthEnable()
+ require.NoError(t, err)
+
+ return as
+}
+
+func computeFileHash(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ h := sha256.New()
+ if _, err := io.Copy(h, file); err != nil {
+ return "", err
+ }
+ return string(h.Sum(nil)), nil
+}
+
+// CheckTxnAuth variables setup.
+var (
+ inRangeCompare = &pb.Compare{
+ Key: []byte("foo"),
+ RangeEnd: []byte("zoo"),
+ }
+ outOfRangeCompare = &pb.Compare{
+ Key: []byte("boo"),
+ RangeEnd: []byte("zoo"),
+ }
+ nilRequestPut = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: nil,
+ },
+ }
+ inRangeRequestPut = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Key: []byte("foo"),
+ },
+ },
+ }
+ outOfRangeRequestPut = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Key: []byte("boo"),
+ },
+ },
+ }
+ nilRequestRange = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: nil,
+ },
+ }
+ inRangeRequestRange = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Key: []byte("foo"),
+ RangeEnd: []byte("zoo"),
+ },
+ },
+ }
+ outOfRangeRequestRange = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestRange{
+ RequestRange: &pb.RangeRequest{
+ Key: []byte("boo"),
+ RangeEnd: []byte("zoo"),
+ },
+ },
+ }
+ nilRequestDeleteRange = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: nil,
+ },
+ }
+ inRangeRequestDeleteRange = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: []byte("foo"),
+ RangeEnd: []byte("zoo"),
+ PrevKv: true,
+ },
+ },
+ }
+ outOfRangeRequestDeleteRange = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: []byte("boo"),
+ RangeEnd: []byte("zoo"),
+ PrevKv: true,
+ },
+ },
+ }
+ outOfRangeRequestDeleteRangeKvFalse = &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: []byte("boo"),
+ RangeEnd: []byte("zoo"),
+ PrevKv: false,
+ },
+ },
+ }
+)
diff --git a/server/etcdserver/txn/util.go b/server/etcdserver/txn/util.go
new file mode 100644
index 00000000000..f9987c6d5d1
--- /dev/null
+++ b/server/etcdserver/txn/util.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+func WarnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+ if time.Since(now) <= warningApplyDuration {
+ return
+ }
+ var resp string
+ if !isNil(respMsg) {
+ resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+ }
+ warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err)
+}
+
+func WarnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+ var resp string
+ if !isNil(respMsg) {
+ resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+ }
+ d := time.Since(now)
+ lg.Warn(
+ "failed to apply request",
+ zap.Duration("took", d),
+ zap.String("request", reqStringer.String()),
+ zap.String("response", resp),
+ zap.Error(err),
+ )
+}
+
+func WarnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
+ if time.Since(now) <= warningApplyDuration {
+ return
+ }
+ reqStringer := pb.NewLoggableTxnRequest(r)
+ var resp string
+ if !isNil(txnResponse) {
+ var resps []string
+ for _, r := range txnResponse.Responses {
+ switch r.Response.(type) {
+ case *pb.ResponseOp_ResponseRange:
+ if op := r.GetResponseRange(); op != nil {
+ resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.GetKvs())))
+ } else {
+ resps = append(resps, "range_response:nil")
+ }
+ default:
+ // only range responses should be in a read only txn request
+ }
+ }
+ resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size())
+ }
+ warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err)
+}
+
+func WarnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
+ if time.Since(now) <= warningApplyDuration {
+ return
+ }
+ var resp string
+ if !isNil(rangeResponse) {
+ resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size())
+ }
+ warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err)
+}
+
+// callers need make sure time has passed warningApplyDuration
+func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
+ lg.Warn(
+ "apply request took too long",
+ zap.Duration("took", time.Since(now)),
+ zap.Duration("expected-duration", warningApplyDuration),
+ zap.String("prefix", prefix),
+ zap.String("request", reqStringer.String()),
+ zap.String("response", resp),
+ zap.Error(err),
+ )
+ slowApplies.Inc()
+}
+
+func isNil(msg proto.Message) bool {
+ return msg == nil || reflect.ValueOf(msg).IsNil()
+}
diff --git a/server/etcdserver/txn/util_bench_test.go b/server/etcdserver/txn/util_bench_test.go
new file mode 100644
index 00000000000..5a84f62f537
--- /dev/null
+++ b/server/etcdserver/txn/util_bench_test.go
@@ -0,0 +1,54 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func BenchmarkWarnOfExpensiveRequestNoLog(b *testing.B) {
+ m := &raftpb.Message{
+ Type: 0,
+ To: 0,
+ From: 1,
+ Term: 2,
+ LogTerm: 3,
+ Index: 0,
+ Entries: []raftpb.Entry{
+ {
+ Term: 0,
+ Index: 0,
+ Type: 0,
+ Data: make([]byte, 1024),
+ },
+ },
+ Commit: 0,
+ Snapshot: nil,
+ Reject: false,
+ RejectHint: 0,
+ Context: nil,
+ }
+ err := errors.New("benchmarking warn of expensive request")
+ lg := zaptest.NewLogger(b)
+ for n := 0; n < b.N; n++ {
+ WarnOfExpensiveRequest(lg, time.Second, time.Now(), nil, m, err)
+ }
+}
diff --git a/server/etcdserver/txn/util_test.go b/server/etcdserver/txn/util_test.go
new file mode 100644
index 00000000000..016f8211cd8
--- /dev/null
+++ b/server/etcdserver/txn/util_test.go
@@ -0,0 +1,141 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "testing"
+ "time"
+
+ "go.uber.org/zap/zaptest"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+)
+
+// TestWarnOfExpensiveReadOnlyTxnRequest verifies WarnOfExpensiveReadOnlyTxnRequest
+// never panic no matter what data the txnResponse contains.
+func TestWarnOfExpensiveReadOnlyTxnRequest(t *testing.T) {
+ kvs := []*mvccpb.KeyValue{
+ {Key: []byte("k1"), Value: []byte("v1")},
+ {Key: []byte("k2"), Value: []byte("v2")},
+ }
+
+ testCases := []struct {
+ name string
+ txnResp *pb.TxnResponse
+ }{
+ {
+ name: "all readonly responses",
+ txnResp: &pb.TxnResponse{
+ Responses: []*pb.ResponseOp{
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: &pb.RangeResponse{
+ Kvs: kvs,
+ },
+ },
+ },
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: &pb.RangeResponse{},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "all readonly responses with partial nil responses",
+ txnResp: &pb.TxnResponse{
+ Responses: []*pb.ResponseOp{
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: &pb.RangeResponse{},
+ },
+ },
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: nil,
+ },
+ },
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: &pb.RangeResponse{
+ Kvs: kvs,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "all readonly responses with all nil responses",
+ txnResp: &pb.TxnResponse{
+ Responses: []*pb.ResponseOp{
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: nil,
+ },
+ },
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: nil,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "partial non readonly responses",
+ txnResp: &pb.TxnResponse{
+ Responses: []*pb.ResponseOp{
+ {
+ Response: &pb.ResponseOp_ResponseRange{
+ ResponseRange: nil,
+ },
+ },
+ {
+ Response: &pb.ResponseOp_ResponsePut{},
+ },
+ {
+ Response: &pb.ResponseOp_ResponseDeleteRange{},
+ },
+ },
+ },
+ },
+ {
+ name: "all non readonly responses",
+ txnResp: &pb.TxnResponse{
+ Responses: []*pb.ResponseOp{
+ {
+ Response: &pb.ResponseOp_ResponsePut{},
+ },
+ {
+ Response: &pb.ResponseOp_ResponseDeleteRange{},
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ start := time.Now().Add(-1 * time.Second)
+ // WarnOfExpensiveReadOnlyTxnRequest shouldn't panic.
+ WarnOfExpensiveReadOnlyTxnRequest(lg, 0, start, &pb.TxnRequest{}, tc.txnResp, nil)
+ })
+ }
+}
diff --git a/server/etcdserver/util.go b/server/etcdserver/util.go
index 265dbae6737..fbba5491b07 100644
--- a/server/etcdserver/util.go
+++ b/server/etcdserver/util.go
@@ -16,17 +16,11 @@ package etcdserver
import (
"fmt"
- "reflect"
- "strings"
"time"
- "github.com/golang/protobuf/proto"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
-
- "go.uber.org/zap"
)
// isConnectedToQuorumSince checks whether the local member is connected to the
@@ -103,76 +97,6 @@ func (nc *notifier) notify(err error) {
close(nc.c)
}
-func warnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
- var resp string
- if !isNil(respMsg) {
- resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
- }
- warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err)
-}
-
-func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
- var resp string
- if !isNil(respMsg) {
- resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
- }
- d := time.Since(now)
- lg.Warn(
- "failed to apply request",
- zap.Duration("took", d),
- zap.String("request", reqStringer.String()),
- zap.String("response", resp),
- zap.Error(err),
- )
-}
-
-func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
- reqStringer := pb.NewLoggableTxnRequest(r)
- var resp string
- if !isNil(txnResponse) {
- var resps []string
- for _, r := range txnResponse.Responses {
- switch op := r.Response.(type) {
- case *pb.ResponseOp_ResponseRange:
- resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs)))
- default:
- // only range responses should be in a read only txn request
- }
- }
- resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size())
- }
- warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err)
-}
-
-func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
- var resp string
- if !isNil(rangeResponse) {
- resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size())
- }
- warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err)
-}
-
-func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
- d := time.Since(now)
-
- if d > warningApplyDuration {
- lg.Warn(
- "apply request took too long",
- zap.Duration("took", d),
- zap.Duration("expected-duration", warningApplyDuration),
- zap.String("prefix", prefix),
- zap.String("request", reqStringer.String()),
- zap.String("response", resp),
- zap.Error(err),
- )
- slowApplies.Inc()
- }
-}
-
-func isNil(msg proto.Message) bool {
- return msg == nil || reflect.ValueOf(msg).IsNil()
-}
-
// panicAlternativeStringer wraps a fmt.Stringer, and if calling String() panics, calls the alternative instead.
// This is needed to ensure logging slow v2 requests does not panic, which occurs when running integration tests
// with the embedded server with github.com/golang/protobuf v1.4.0+. See https://github.com/etcd-io/etcd/issues/12197.
diff --git a/server/etcdserver/util_test.go b/server/etcdserver/util_test.go
index dd57c7e1d2e..cad7c3cf452 100644
--- a/server/etcdserver/util_test.go
+++ b/server/etcdserver/util_test.go
@@ -19,13 +19,13 @@ import (
"testing"
"time"
- "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
"go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/raft/v3/raftpb"
)
func TestLongestConnected(t *testing.T) {
@@ -33,7 +33,7 @@ func TestLongestConnected(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- clus, err := membership.NewClusterFromURLsMap(zap.NewExample(), "test", umap)
+ clus, err := membership.NewClusterFromURLsMap(zaptest.NewLogger(t), "test", umap)
if err != nil {
t.Fatal(err)
}
diff --git a/server/etcdserver/v2_server.go b/server/etcdserver/v2_server.go
index 7372823c0b3..8636204b544 100644
--- a/server/etcdserver/v2_server.go
+++ b/server/etcdserver/v2_server.go
@@ -15,150 +15,11 @@
package etcdserver
import (
- "context"
- "time"
-
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
)
type RequestV2 pb.Request
-type RequestV2Handler interface {
- Post(ctx context.Context, r *RequestV2) (Response, error)
- Put(ctx context.Context, r *RequestV2) (Response, error)
- Delete(ctx context.Context, r *RequestV2) (Response, error)
- QGet(ctx context.Context, r *RequestV2) (Response, error)
- Get(ctx context.Context, r *RequestV2) (Response, error)
- Head(ctx context.Context, r *RequestV2) (Response, error)
-}
-
-type reqV2HandlerEtcdServer struct {
- reqV2HandlerStore
- s *EtcdServer
-}
-
-type reqV2HandlerStore struct {
- store v2store.Store
- applier ApplierV2
-}
-
-func NewStoreRequestV2Handler(s v2store.Store, applier ApplierV2) RequestV2Handler {
- return &reqV2HandlerStore{s, applier}
-}
-
-func (a *reqV2HandlerStore) Post(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Post(r), nil
-}
-
-func (a *reqV2HandlerStore) Put(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Put(r), nil
-}
-
-func (a *reqV2HandlerStore) Delete(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Delete(r), nil
-}
-
-func (a *reqV2HandlerStore) QGet(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.QGet(r), nil
-}
-
-func (a *reqV2HandlerStore) Get(ctx context.Context, r *RequestV2) (Response, error) {
- if r.Wait {
- wc, err := a.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
- return Response{Watcher: wc}, err
- }
- ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
- return Response{Event: ev}, err
-}
-
-func (a *reqV2HandlerStore) Head(ctx context.Context, r *RequestV2) (Response, error) {
- ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
- return Response{Event: ev}, err
-}
-
-func (a *reqV2HandlerEtcdServer) Post(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) Put(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) Delete(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) QGet(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *RequestV2) (Response, error) {
- data, err := ((*pb.Request)(r)).Marshal()
- if err != nil {
- return Response{}, err
- }
- ch := a.s.w.Register(r.ID)
-
- start := time.Now()
- a.s.r.Propose(ctx, data)
- proposalsPending.Inc()
- defer proposalsPending.Dec()
-
- select {
- case x := <-ch:
- resp := x.(Response)
- return resp, resp.Err
- case <-ctx.Done():
- proposalsFailed.Inc()
- a.s.w.Trigger(r.ID, nil) // GC wait
- return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
- case <-a.s.stopping:
- }
- return Response{}, ErrStopped
-}
-
-func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
- r.ID = s.reqIDGen.Next()
- h := &reqV2HandlerEtcdServer{
- reqV2HandlerStore: reqV2HandlerStore{
- store: s.v2store,
- applier: s.applyV2,
- },
- s: s,
- }
- rp := &r
- resp, err := ((*RequestV2)(rp)).Handle(ctx, h)
- resp.Term, resp.Index = s.Term(), s.CommittedIndex()
- return resp, err
-}
-
-// Handle interprets r and performs an operation on s.store according to r.Method
-// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
-// Quorum == true, r will be sent through consensus before performing its
-// respective operation. Do will block until an action is performed or there is
-// an error.
-func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) {
- if r.Method == "GET" && r.Quorum {
- r.Method = "QGET"
- }
- switch r.Method {
- case "POST":
- return v2api.Post(ctx, r)
- case "PUT":
- return v2api.Put(ctx, r)
- case "DELETE":
- return v2api.Delete(ctx, r)
- case "QGET":
- return v2api.QGet(ctx, r)
- case "GET":
- return v2api.Get(ctx, r)
- case "HEAD":
- return v2api.Head(ctx, r)
- }
- return Response{}, ErrUnknownMethod
-}
-
func (r *RequestV2) String() string {
rpb := pb.Request(*r)
return rpb.String()
diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go
index 442288a6ee2..c6953604aa2 100644
--- a/server/etcdserver/v3_server.go
+++ b/server/etcdserver/v3_server.go
@@ -19,32 +19,41 @@ import (
"context"
"encoding/base64"
"encoding/binary"
+ errorspkg "errors"
"strconv"
"time"
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/features"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/lease/leasehttp"
- "go.etcd.io/etcd/server/v3/mvcc"
-
- "github.com/gogo/protobuf/proto"
- "go.uber.org/zap"
- "golang.org/x/crypto/bcrypt"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/raft/v3"
)
const (
// In the health case, there might be a small gap (10s of entries) between
// the applied index and committed index.
- // However, if the committed entries are very heavy to apply, the gap might grow.
+ // However, if the committed entries are very heavy to toApply, the gap might grow.
// We should stop accepting new proposals if the gap growing to a certain point.
maxGapBetweenApplyAndCommitIndex = 5000
traceThreshold = 100 * time.Millisecond
readIndexRetryTime = 500 * time.Millisecond
+
+ // The timeout for the node to catch up its applied index, and is used in
+ // lease related operations, such as LeaseRenew and LeaseTimeToLive.
+ applyTimeout = time.Second
)
type RaftKV interface {
@@ -56,9 +65,9 @@ type RaftKV interface {
}
type Lessor interface {
- // LeaseGrant sends LeaseGrant request to raft and apply it after committed.
+ // LeaseGrant sends LeaseGrant request to raft and toApply it after committed.
LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
- // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
+ // LeaseRevoke sends LeaseRevoke request to raft and toApply it after committed.
LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
@@ -98,12 +107,12 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
traceutil.Field{Key: "range_begin", Value: string(r.Key)},
traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)},
)
- ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
var resp *pb.RangeResponse
var err error
defer func(start time.Time) {
- warnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
+ txn.WarnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
if resp != nil {
trace.AddField(
traceutil.Field{Key: "response_count", Value: len(resp.Kvs)},
@@ -124,7 +133,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
}
- get := func() { resp, err = s.applyV3Base.Range(ctx, nil, r) }
+ get := func() { resp, _, err = txn.Range(ctx, s.Logger(), s.KV(), r) }
if serr := s.doSerialize(ctx, chk, get); serr != nil {
err = serr
return nil, err
@@ -133,7 +142,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
}
func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
- ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
+ ctx = context.WithValue(ctx, traceutil.StartTimeKey{}, time.Now())
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
if err != nil {
return nil, err
@@ -150,13 +159,13 @@ func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest)
}
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if isTxnReadonly(r) {
+ if txn.IsTxnReadonly(r) {
trace := traceutil.New("transaction",
s.Logger(),
traceutil.Field{Key: "read_only", Value: true},
)
- ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
- if !isTxnSerializable(r) {
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ if !txn.IsTxnSerializable(r) {
err := s.linearizableReadNotify(ctx)
trace.Step("agreement among raft nodes before linearized reading")
if err != nil {
@@ -166,22 +175,24 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse
var resp *pb.TxnResponse
var err error
chk := func(ai *auth.AuthInfo) error {
- return checkTxnAuth(s.authStore, ai, r)
+ return txn.CheckTxnAuth(s.authStore, ai, r)
}
defer func(start time.Time) {
- warnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
+ txn.WarnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
trace.LogIfLong(traceThreshold)
}(time.Now())
- get := func() { resp, _, err = s.applyV3Base.Txn(ctx, r) }
+ get := func() {
+ resp, _, err = txn.Txn(ctx, s.Logger(), r, s.Cfg.ServerFeatureGate.Enabled(features.TxnModeWriteWithSharedBuffer), s.KV(), s.lessor)
+ }
if serr := s.doSerialize(ctx, chk, get); serr != nil {
return nil, serr
}
return resp, err
}
- ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
+ ctx = context.WithValue(ctx, traceutil.StartTimeKey{}, time.Now())
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
if err != nil {
return nil, err
@@ -189,64 +200,40 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse
return resp.(*pb.TxnResponse), nil
}
-func isTxnSerializable(r *pb.TxnRequest) bool {
- for _, u := range r.Success {
- if r := u.GetRequestRange(); r == nil || !r.Serializable {
- return false
- }
- }
- for _, u := range r.Failure {
- if r := u.GetRequestRange(); r == nil || !r.Serializable {
- return false
- }
- }
- return true
-}
-
-func isTxnReadonly(r *pb.TxnRequest) bool {
- for _, u := range r.Success {
- if r := u.GetRequestRange(); r == nil {
- return false
- }
- }
- for _, u := range r.Failure {
- if r := u.GetRequestRange(); r == nil {
- return false
- }
- }
- return true
-}
-
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
startTime := time.Now()
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
trace := traceutil.TODO()
- if result != nil && result.trace != nil {
- trace = result.trace
+ if result != nil && result.Trace != nil {
+ trace = result.Trace
defer func() {
trace.LogIfLong(traceThreshold)
}()
- applyStart := result.trace.GetStartTime()
- result.trace.SetStartTime(startTime)
+ applyStart := result.Trace.GetStartTime()
+ result.Trace.SetStartTime(startTime)
trace.InsertStep(0, applyStart, "process raft request")
}
- if r.Physical && result != nil && result.physc != nil {
- <-result.physc
+ if r.Physical && result != nil && result.Physc != nil {
+ <-result.Physc
// The compaction is done deleting keys; the hash is now settled
// but the data is not necessarily committed. If there's a crash,
// the hash may revert to a hash prior to compaction completing
// if the compaction resumes. Force the finished compaction to
// commit so it won't resume following a crash.
+ //
+ // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock.
+ s.bemu.RLock()
s.be.ForceCommit()
- trace.Step("physically apply compaction")
+ s.bemu.RUnlock()
+ trace.Step("physically toApply compaction")
}
if err != nil {
return nil, err
}
- if result.err != nil {
- return nil, result.err
+ if result.Err != nil {
+ return nil, result.Err
}
- resp := result.resp.(*pb.CompactionResponse)
+ resp := result.Resp.(*pb.CompactionResponse)
if resp == nil {
resp = &pb.CompactionResponse{}
}
@@ -271,6 +258,18 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
return resp.(*pb.LeaseGrantResponse), nil
}
+func (s *EtcdServer) waitAppliedIndex() error {
+ select {
+ case <-s.ApplyWait():
+ case <-s.stopping:
+ return errors.ErrStopped
+ case <-time.After(applyTimeout):
+ return errors.ErrTimeoutWaitAppliedIndex
+ }
+
+ return nil
+}
+
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
if err != nil {
@@ -280,27 +279,43 @@ func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest)
}
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
- ttl, err := s.lessor.Renew(id)
- if err == nil { // already requested to primary lessor(leader)
- return ttl, nil
- }
- if err != lease.ErrNotPrimary {
- return -1, err
+ if s.isLeader() {
+ // If s.isLeader() returns true, but we fail to ensure the current
+ // member's leadership, there are a couple of possibilities:
+ // 1. current member gets stuck on writing WAL entries;
+ // 2. current member is in network isolation status;
+ // 3. current member isn't a leader anymore (possibly due to #1 above).
+ // In such case, we just return error to client, so that the client can
+ // switch to another member to continue the lease keep-alive operation.
+ if !s.ensureLeadership() {
+ return -1, lease.ErrNotPrimary
+ }
+ if err := s.waitAppliedIndex(); err != nil {
+ return 0, err
+ }
+
+ ttl, err := s.lessor.Renew(id)
+ if err == nil { // already requested to primary lessor(leader)
+ return ttl, nil
+ }
+ if !errorspkg.Is(err, lease.ErrNotPrimary) {
+ return -1, err
+ }
}
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
defer cancel()
// renewals don't go through raft; forward to leader manually
- for cctx.Err() == nil && err != nil {
+ for cctx.Err() == nil {
leader, lerr := s.waitLeader(cctx)
if lerr != nil {
return -1, lerr
}
for _, url := range leader.PeerURLs {
lurl := url + leasehttp.LeasePrefix
- ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
- if err == nil || err == lease.ErrLeaseNotFound {
+ ttl, err := leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
+ if err == nil || errorspkg.Is(err, lease.ErrLeaseNotFound) {
return ttl, err
}
}
@@ -308,14 +323,45 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
time.Sleep(50 * time.Millisecond)
}
- if cctx.Err() == context.DeadlineExceeded {
- return -1, ErrTimeout
+ if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) {
+ return -1, errors.ErrTimeout
}
- return -1, ErrCanceled
+ return -1, errors.ErrCanceled
}
-func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
- if s.Leader() == s.ID() {
+func (s *EtcdServer) checkLeaseTimeToLive(ctx context.Context, leaseID lease.LeaseID) (uint64, error) {
+ rev := s.AuthStore().Revision()
+ if !s.AuthStore().IsAuthEnabled() {
+ return rev, nil
+ }
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return rev, err
+ }
+ if authInfo == nil {
+ return rev, auth.ErrUserEmpty
+ }
+
+ l := s.lessor.Lookup(leaseID)
+ if l != nil {
+ for _, key := range l.Keys() {
+ if err := s.AuthStore().IsRangePermitted(authInfo, []byte(key), []byte{}); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ return rev, nil
+}
+
+func (s *EtcdServer) leaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ if s.isLeader() {
+ if err := s.waitAppliedIndex(); err != nil {
+ return nil, err
+ }
+
+ // gofail: var beforeLookupWhenLeaseTimeToLive struct{}
+
// primary; timetolive directly from leader
le := s.lessor.Lookup(lease.LeaseID(r.ID))
if le == nil {
@@ -331,6 +377,15 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR
}
resp.Keys = kbs
}
+
+ // The leasor could be demoted if leader changed during lookup.
+ // We should return error to force retry instead of returning
+ // incorrect remaining TTL.
+ if le.Demoted() {
+ // NOTE: lease.ErrNotPrimary is not retryable error for
+ // client. Instead, uses ErrLeaderChanged.
+ return nil, errors.ErrLeaderChanged
+ }
return resp, nil
}
@@ -349,25 +404,59 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR
if err == nil {
return resp.LeaseTimeToLiveResponse, nil
}
- if err == lease.ErrLeaseNotFound {
+ if errorspkg.Is(err, lease.ErrLeaseNotFound) {
return nil, err
}
}
}
- if cctx.Err() == context.DeadlineExceeded {
- return nil, ErrTimeout
+ if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) {
+ return nil, errors.ErrTimeout
+ }
+ return nil, errors.ErrCanceled
+}
+
+func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ var rev uint64
+ var err error
+ if r.Keys {
+ // check RBAC permission only if Keys is true
+ rev, err = s.checkLeaseTimeToLive(ctx, lease.LeaseID(r.ID))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ resp, err := s.leaseTimeToLive(ctx, r)
+ if err != nil {
+ return nil, err
+ }
+
+ if r.Keys {
+ if s.AuthStore().IsAuthEnabled() && rev != s.AuthStore().Revision() {
+ return nil, auth.ErrAuthOldRevision
+ }
}
- return nil, ErrCanceled
+ return resp, nil
}
-func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+func (s *EtcdServer) newHeader() *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(s.cluster.ID()),
+ MemberId: uint64(s.MemberID()),
+ Revision: s.KV().Rev(),
+ RaftTerm: s.Term(),
+ }
+}
+
+// LeaseLeases is really ListLeases !???
+func (s *EtcdServer) LeaseLeases(_ context.Context, _ *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
ls := s.lessor.Leases()
lss := make([]*pb.LeaseStatus, len(ls))
for i := range ls {
lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
}
- return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
+ return &pb.LeaseLeasesResponse{Header: s.newHeader(), Leases: lss}, nil
}
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
@@ -379,13 +468,13 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
case <-time.After(dur):
leader = s.cluster.Member(s.Leader())
case <-s.stopping:
- return nil, ErrStopped
+ return nil, errors.ErrStopped
case <-ctx.Done():
- return nil, ErrNoLeader
+ return nil, errors.ErrNoLeader
}
}
- if leader == nil || len(leader.PeerURLs) == 0 {
- return nil, ErrNoLeader
+ if len(leader.PeerURLs) == 0 {
+ return nil, errors.ErrNoLeader
}
return leader, nil
}
@@ -429,11 +518,18 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest
lg := s.Logger()
+ // fix https://nvd.nist.gov/vuln/detail/CVE-2021-28235
+ defer func() {
+ if r != nil {
+ r.Password = ""
+ }
+ }()
+
var resp proto.Message
for {
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
if err != nil {
- if err != auth.ErrAuthNotEnabled {
+ if !errorspkg.Is(err, auth.ErrAuthNotEnabled) {
lg.Warn(
"invalid authentication was requested",
zap.String("user", r.Name),
@@ -596,19 +692,19 @@ func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftReque
if err != nil {
return nil, err
}
- if result.err != nil {
- return nil, result.err
+ if result.Err != nil {
+ return nil, result.Err
}
- if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.trace != nil {
- applyStart := result.trace.GetStartTime()
- // The trace object is created in apply. Here reset the start time to trace
+ if startTime, ok := ctx.Value(traceutil.StartTimeKey{}).(time.Time); ok && result.Trace != nil {
+ applyStart := result.Trace.GetStartTime()
+ // The trace object is created in toApply. Here reset the start time to trace
// the raft request time by the difference between the request start time
- // and apply start time
- result.trace.SetStartTime(startTime)
- result.trace.InsertStep(0, applyStart, "process raft request")
- result.trace.LogIfLong(traceThreshold)
+ // and toApply start time
+ result.Trace.SetStartTime(startTime)
+ result.Trace.InsertStep(0, applyStart, "process raft request")
+ result.Trace.LogIfLong(traceThreshold)
}
- return result.resp, nil
+ return result.Resp, nil
}
func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
@@ -640,11 +736,11 @@ func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) e
return nil
}
-func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
+func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*apply2.Result, error) {
ai := s.getAppliedIndex()
ci := s.getCommittedIndex()
if ci > ai+maxGapBetweenApplyAndCommitIndex {
- return nil, ErrTooManyRequests
+ return nil, errors.ErrTooManyRequests
}
r.Header = &pb.RequestHeader{
@@ -669,7 +765,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In
}
if len(data) > int(s.Cfg.MaxRequestBytes) {
- return nil, ErrRequestTooLarge
+ return nil, errors.ErrRequestTooLarge
}
id := r.ID
@@ -693,13 +789,13 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In
select {
case x := <-ch:
- return x.(*applyResult), nil
+ return x.(*apply2.Result), nil
case <-cctx.Done():
proposalsFailed.Inc()
s.w.Trigger(id, nil) // GC wait
return nil, s.parseProposeCtxErr(cctx.Err(), start)
case <-s.done:
- return nil, ErrStopped
+ return nil, errors.ErrStopped
}
}
@@ -708,8 +804,8 @@ func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
func (s *EtcdServer) linearizableReadLoop() {
for {
- requestId := s.reqIDGen.Next()
- leaderChangedNotifier := s.LeaderChangedNotify()
+ requestID := s.reqIDGen.Next()
+ leaderChangedNotifier := s.leaderChanged.Receive()
select {
case <-leaderChangedNotifier:
continue
@@ -728,7 +824,7 @@ func (s *EtcdServer) linearizableReadLoop() {
s.readNotifier = nextnr
s.readMu.Unlock()
- confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestId)
+ confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestID)
if isStopped(err) {
return
}
@@ -760,11 +856,11 @@ func (s *EtcdServer) linearizableReadLoop() {
}
func isStopped(err error) bool {
- return err == raft.ErrStopped || err == ErrStopped
+ return errorspkg.Is(err, raft.ErrStopped) || errorspkg.Is(err, errors.ErrStopped)
}
-func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) {
- err := s.sendReadIndex(requestId)
+func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestID uint64) (uint64, error) {
+ err := s.sendReadIndex(requestID)
if err != nil {
return 0, err
}
@@ -775,24 +871,24 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{},
retryTimer := time.NewTimer(readIndexRetryTime)
defer retryTimer.Stop()
- firstCommitInTermNotifier := s.FirstCommitInTermNotify()
+ firstCommitInTermNotifier := s.firstCommitInTerm.Receive()
for {
select {
case rs := <-s.r.readStateC:
- requestIdBytes := uint64ToBigEndianBytes(requestId)
- gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIdBytes)
+ requestIDBytes := uint64ToBigEndianBytes(requestID)
+ gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIDBytes)
if !gotOwnResponse {
// a previous request might time out. now we should ignore the response of it and
// continue waiting for the response of the current requests.
- responseId := uint64(0)
+ responseID := uint64(0)
if len(rs.RequestCtx) == 8 {
- responseId = binary.BigEndian.Uint64(rs.RequestCtx)
+ responseID = binary.BigEndian.Uint64(rs.RequestCtx)
}
lg.Warn(
"ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader",
- zap.Uint64("sent-request-id", requestId),
- zap.Uint64("received-request-id", responseId),
+ zap.Uint64("sent-request-id", requestID),
+ zap.Uint64("received-request-id", responseID),
)
slowReadIndex.Inc()
continue
@@ -801,11 +897,11 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{},
case <-leaderChangedNotifier:
readIndexFailed.Inc()
// return a retryable error.
- return 0, ErrLeaderChanged
+ return 0, errors.ErrLeaderChanged
case <-firstCommitInTermNotifier:
- firstCommitInTermNotifier = s.FirstCommitInTermNotify()
+ firstCommitInTermNotifier = s.firstCommitInTerm.Receive()
lg.Info("first commit in current term: resending ReadIndex request")
- err := s.sendReadIndex(requestId)
+ err := s.sendReadIndex(requestID)
if err != nil {
return 0, err
}
@@ -814,10 +910,10 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{},
case <-retryTimer.C:
lg.Warn(
"waiting for ReadIndex response took too long, retrying",
- zap.Uint64("sent-request-id", requestId),
+ zap.Uint64("sent-request-id", requestID),
zap.Duration("retry-timeout", readIndexRetryTime),
)
- err := s.sendReadIndex(requestId)
+ err := s.sendReadIndex(requestID)
if err != nil {
return 0, err
}
@@ -829,9 +925,9 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{},
zap.Duration("timeout", s.Cfg.ReqTimeout()),
)
slowReadIndex.Inc()
- return 0, ErrTimeout
+ return 0, errors.ErrTimeout
case <-s.stopping:
- return 0, ErrStopped
+ return 0, errors.ErrStopped
}
}
}
@@ -848,7 +944,7 @@ func (s *EtcdServer) sendReadIndex(requestIndex uint64) error {
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
err := s.r.ReadIndex(cctx, ctxToSend)
cancel()
- if err == raft.ErrStopped {
+ if errorspkg.Is(err, raft.ErrStopped) {
return err
}
if err != nil {
@@ -882,7 +978,7 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
case <-ctx.Done():
return ctx.Err()
case <-s.done:
- return ErrStopped
+ return errors.ErrStopped
}
}
@@ -907,7 +1003,7 @@ func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb
case pb.DowngradeRequest_CANCEL:
return s.downgradeCancel(ctx)
default:
- return nil, ErrUnknownMethod
+ return nil, errors.ErrUnknownMethod
}
}
@@ -919,73 +1015,40 @@ func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.Downg
return nil, err
}
- // gets leaders commit index and wait for local store to finish applying that index
- // to avoid using stale downgrade information
- err = s.linearizableReadNotify(ctx)
- if err != nil {
- return nil, err
- }
-
cv := s.ClusterVersion()
if cv == nil {
- return nil, ErrClusterVersionUnavailable
+ return nil, errors.ErrClusterVersionUnavailable
}
- resp.Version = cv.String()
-
- allowedTargetVersion := membership.AllowedDowngradeVersion(cv)
- if !targetVersion.Equal(*allowedTargetVersion) {
- return nil, ErrInvalidDowngradeTargetVersion
+ resp.Version = version.Cluster(cv.String())
+ err = s.Version().DowngradeValidate(ctx, targetVersion)
+ if err != nil {
+ return nil, err
}
- downgradeInfo := s.cluster.DowngradeInfo()
- if downgradeInfo.Enabled {
- // Todo: return the downgrade status along with the error msg
- return nil, ErrDowngradeInProcess
- }
return resp, nil
}
func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
- // validate downgrade capability before starting downgrade
- v := r.Version
lg := s.Logger()
- if resp, err := s.downgradeValidate(ctx, v); err != nil {
- lg.Warn("reject downgrade request", zap.Error(err))
- return resp, err
- }
- targetVersion, err := convertToClusterVersion(v)
+ targetVersion, err := convertToClusterVersion(r.Version)
if err != nil {
lg.Warn("reject downgrade request", zap.Error(err))
return nil, err
}
-
- raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()}
- _, err = s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ err = s.Version().DowngradeEnable(ctx, targetVersion)
if err != nil {
lg.Warn("reject downgrade request", zap.Error(err))
return nil, err
}
- resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()}
+ resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())}
return &resp, nil
}
func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) {
- // gets leaders commit index and wait for local store to finish applying that index
- // to avoid using stale downgrade information
- if err := s.linearizableReadNotify(ctx); err != nil {
- return nil, err
- }
-
- downgradeInfo := s.cluster.DowngradeInfo()
- if !downgradeInfo.Enabled {
- return nil, ErrNoInflightDowngrade
- }
-
- raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false}
- _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ err := s.Version().DowngradeCancel(ctx)
if err != nil {
- return nil, err
+ s.lg.Warn("failed to cancel downgrade", zap.Error(err))
}
- resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()}
+ resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())}
return &resp, nil
}
diff --git a/server/etcdserver/version/doc.go b/server/etcdserver/version/doc.go
new file mode 100644
index 00000000000..c34f9051195
--- /dev/null
+++ b/server/etcdserver/version/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version provides functions for getting/saving storage version.
+package version
diff --git a/server/etcdserver/version/downgrade.go b/server/etcdserver/version/downgrade.go
new file mode 100644
index 00000000000..f2c6e119467
--- /dev/null
+++ b/server/etcdserver/version/downgrade.go
@@ -0,0 +1,76 @@
+// Copyright 2020 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+type DowngradeInfo struct {
+ // TargetVersion is the target downgrade version, if the cluster is not under downgrading,
+ // the targetVersion will be an empty string
+ TargetVersion string `json:"target-version"`
+ // Enabled indicates whether the cluster is enabled to downgrade
+ Enabled bool `json:"enabled"`
+}
+
+func (d *DowngradeInfo) GetTargetVersion() *semver.Version {
+ return semver.Must(semver.NewVersion(d.TargetVersion))
+}
+
+// isValidDowngrade verifies whether the cluster can be downgraded from verFrom to verTo
+func isValidDowngrade(verFrom *semver.Version, verTo *semver.Version) bool {
+ return verTo.Equal(*allowedDowngradeVersion(verFrom))
+}
+
+// MustDetectDowngrade will detect local server joining cluster that doesn't support it's version.
+func MustDetectDowngrade(lg *zap.Logger, sv, cv *semver.Version) {
+ // only keep major.minor version for comparison against cluster version
+ sv = &semver.Version{Major: sv.Major, Minor: sv.Minor}
+
+ // if the cluster disables downgrade, check local version against determined cluster version.
+ // the validation passes when local version is not less than cluster version
+ if cv != nil && sv.LessThan(*cv) {
+ lg.Panic(
+ "invalid downgrade; server version is lower than determined cluster version",
+ zap.String("current-server-version", sv.String()),
+ zap.String("determined-cluster-version", version.Cluster(cv.String())),
+ )
+ }
+}
+
+func allowedDowngradeVersion(ver *semver.Version) *semver.Version {
+ // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x)
+ return &semver.Version{Major: ver.Major, Minor: ver.Minor - 1}
+}
+
+// IsValidClusterVersionChange checks the two scenario when version is valid to change:
+// 1. Downgrade: cluster version is 1 minor version higher than local version,
+// cluster version should change.
+// 2. Cluster start: when not all members version are available, cluster version
+// is set to MinVersion(3.0), when all members are at higher version, cluster version
+// is lower than minimal server version, cluster version should change
+func IsValidClusterVersionChange(verFrom *semver.Version, verTo *semver.Version) bool {
+ verFrom = &semver.Version{Major: verFrom.Major, Minor: verFrom.Minor}
+ verTo = &semver.Version{Major: verTo.Major, Minor: verTo.Minor}
+
+ if isValidDowngrade(verFrom, verTo) || (verFrom.Major == verTo.Major && verFrom.LessThan(*verTo)) {
+ return true
+ }
+ return false
+}
diff --git a/server/etcdserver/version/downgrade_test.go b/server/etcdserver/version/downgrade_test.go
new file mode 100644
index 00000000000..252fcb57c3b
--- /dev/null
+++ b/server/etcdserver/version/downgrade_test.go
@@ -0,0 +1,188 @@
+// Copyright 2020 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+func TestMustDetectDowngrade(t *testing.T) {
+ lv := semver.Must(semver.NewVersion(version.Version))
+ lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
+ oneMinorHigher := &semver.Version{Major: lv.Major, Minor: lv.Minor + 1}
+ oneMinorLower := &semver.Version{Major: lv.Major, Minor: lv.Minor - 1}
+
+ tests := []struct {
+ name string
+ clusterVersion *semver.Version
+ success bool
+ message string
+ }{
+ {
+ "Succeeded when cluster version is nil",
+ nil,
+ true,
+ "",
+ },
+ {
+ "Succeeded when cluster version is one minor lower",
+ oneMinorLower,
+ true,
+ "",
+ },
+ {
+ "Succeeded when cluster version is server version",
+ lv,
+ true,
+ "",
+ },
+ {
+ "Failed when server version is lower than determined cluster version ",
+ oneMinorHigher,
+ false,
+ "invalid downgrade; server version is lower than determined cluster version",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ sv := semver.Must(semver.NewVersion(version.Version))
+ err := tryMustDetectDowngrade(lg, sv, tt.clusterVersion)
+
+ if tt.success != (err == nil) {
+ t.Errorf("Unexpected success, got: %v, wanted: %v", err == nil, tt.success)
+ // TODO test err
+ }
+ if err != nil && tt.message != fmt.Sprintf("%s", err) {
+ t.Errorf("Unexpected message, got %q, wanted: %v", err, tt.message)
+ }
+ })
+ }
+}
+
+func tryMustDetectDowngrade(lg *zap.Logger, sv, cv *semver.Version) (err any) {
+ defer func() {
+ err = recover()
+ }()
+ MustDetectDowngrade(lg, sv, cv)
+ return err
+}
+
+func TestIsValidDowngrade(t *testing.T) {
+ tests := []struct {
+ name string
+ verFrom string
+ verTo string
+ result bool
+ }{
+ {
+ "Valid downgrade",
+ "3.5.0",
+ "3.4.0",
+ true,
+ },
+ {
+ "Invalid downgrade",
+ "3.5.2",
+ "3.3.0",
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ res := isValidDowngrade(
+ semver.Must(semver.NewVersion(tt.verFrom)), semver.Must(semver.NewVersion(tt.verTo)))
+ if res != tt.result {
+ t.Errorf("Expected downgrade valid is %v; Got %v", tt.result, res)
+ }
+ })
+ }
+}
+
+func TestIsVersionChangable(t *testing.T) {
+ tests := []struct {
+ name string
+ verFrom string
+ verTo string
+ expectedResult bool
+ }{
+ {
+ name: "When local version is one minor lower than cluster version",
+ verFrom: "3.5.0",
+ verTo: "3.4.0",
+ expectedResult: true,
+ },
+ {
+ name: "When local version is one minor and one patch lower than cluster version",
+ verFrom: "3.5.1",
+ verTo: "3.4.0",
+ expectedResult: true,
+ },
+ {
+ name: "When local version is one minor higher than cluster version",
+ verFrom: "3.4.0",
+ verTo: "3.5.0",
+ expectedResult: true,
+ },
+ {
+ name: "When local version is two minor higher than cluster version",
+ verFrom: "3.4.0",
+ verTo: "3.6.0",
+ expectedResult: true,
+ },
+ {
+ name: "When local version is one major higher than cluster version",
+ verFrom: "2.4.0",
+ verTo: "3.4.0",
+ expectedResult: false,
+ },
+ {
+ name: "When local version is equal to cluster version",
+ verFrom: "3.4.0",
+ verTo: "3.4.0",
+ expectedResult: false,
+ },
+ {
+ name: "When local version is one patch higher than cluster version",
+ verFrom: "3.5.0",
+ verTo: "3.5.1",
+ expectedResult: false,
+ },
+ {
+ name: "When local version is two minor lower than cluster version",
+ verFrom: "3.6.0",
+ verTo: "3.4.0",
+ expectedResult: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ verFrom := semver.Must(semver.NewVersion(tt.verFrom))
+ verTo := semver.Must(semver.NewVersion(tt.verTo))
+ ret := IsValidClusterVersionChange(verFrom, verTo)
+ assert.Equal(t, tt.expectedResult, ret)
+ })
+ }
+}
diff --git a/server/etcdserver/version/errors.go b/server/etcdserver/version/errors.go
new file mode 100644
index 00000000000..906aa9f413f
--- /dev/null
+++ b/server/etcdserver/version/errors.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import "errors"
+
+var (
+ ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version")
+ ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress")
+ ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job")
+)
diff --git a/server/etcdserver/version/monitor.go b/server/etcdserver/version/monitor.go
new file mode 100644
index 00000000000..b3e7f5804e1
--- /dev/null
+++ b/server/etcdserver/version/monitor.go
@@ -0,0 +1,221 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "context"
+ "errors"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+// Monitor contains logic used by cluster leader to monitor version changes and decide on cluster version or downgrade progress.
+type Monitor struct {
+ lg *zap.Logger
+ s Server
+}
+
+// Server lists EtcdServer methods needed by Monitor
+type Server interface {
+ GetClusterVersion() *semver.Version
+ GetDowngradeInfo() *DowngradeInfo
+ GetMembersVersions() map[string]*version.Versions
+ UpdateClusterVersion(string)
+ LinearizableReadNotify(ctx context.Context) error
+ DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error
+ DowngradeCancel(ctx context.Context) error
+
+ GetStorageVersion() *semver.Version
+ UpdateStorageVersion(semver.Version) error
+}
+
+func NewMonitor(lg *zap.Logger, storage Server) *Monitor {
+ return &Monitor{
+ lg: lg,
+ s: storage,
+ }
+}
+
+// UpdateClusterVersionIfNeeded updates the cluster version.
+func (m *Monitor) UpdateClusterVersionIfNeeded() error {
+ newClusterVersion, err := m.decideClusterVersion()
+ if newClusterVersion != nil {
+ newClusterVersion = &semver.Version{Major: newClusterVersion.Major, Minor: newClusterVersion.Minor}
+ m.s.UpdateClusterVersion(newClusterVersion.String())
+ }
+ return err
+}
+
+// decideClusterVersion decides whether to change cluster version and its next value.
+// New cluster version is based on the members versions server and whether cluster is downgrading.
+// Returns nil if cluster version should be left unchanged.
+func (m *Monitor) decideClusterVersion() (*semver.Version, error) {
+ clusterVersion := m.s.GetClusterVersion()
+ minimalServerVersion := m.membersMinimalServerVersion()
+ if clusterVersion == nil {
+ if minimalServerVersion != nil {
+ return minimalServerVersion, nil
+ }
+ return semver.New(version.MinClusterVersion), nil
+ }
+ if minimalServerVersion == nil {
+ return nil, nil
+ }
+ downgrade := m.s.GetDowngradeInfo()
+ if downgrade != nil && downgrade.Enabled {
+ if downgrade.GetTargetVersion().Equal(*clusterVersion) {
+ return nil, nil
+ }
+ if !isValidDowngrade(clusterVersion, downgrade.GetTargetVersion()) {
+ m.lg.Error("Cannot downgrade from cluster-version to downgrade-target",
+ zap.String("downgrade-target", downgrade.TargetVersion),
+ zap.String("cluster-version", clusterVersion.String()),
+ )
+ return nil, errors.New("invalid downgrade target")
+ }
+ if !isValidDowngrade(minimalServerVersion, downgrade.GetTargetVersion()) {
+ m.lg.Error("Cannot downgrade from minimal-server-version to downgrade-target",
+ zap.String("downgrade-target", downgrade.TargetVersion),
+ zap.String("minimal-server-version", minimalServerVersion.String()),
+ )
+ return nil, errors.New("invalid downgrade target")
+ }
+ return downgrade.GetTargetVersion(), nil
+ }
+ if clusterVersion.LessThan(*minimalServerVersion) && IsValidClusterVersionChange(clusterVersion, minimalServerVersion) {
+ return minimalServerVersion, nil
+ }
+ return nil, nil
+}
+
+// UpdateStorageVersionIfNeeded updates the storage version if it differs from cluster version.
+func (m *Monitor) UpdateStorageVersionIfNeeded() {
+ cv := m.s.GetClusterVersion()
+ if cv == nil || cv.String() == version.MinClusterVersion {
+ return
+ }
+ sv := m.s.GetStorageVersion()
+
+ if sv == nil || sv.Major != cv.Major || sv.Minor != cv.Minor {
+ if sv != nil {
+ m.lg.Info("cluster version differs from storage version.", zap.String("cluster-version", cv.String()), zap.String("storage-version", sv.String()))
+ }
+ err := m.s.UpdateStorageVersion(semver.Version{Major: cv.Major, Minor: cv.Minor})
+ if err != nil {
+ m.lg.Error("failed to update storage version", zap.String("cluster-version", cv.String()), zap.Error(err))
+ return
+ }
+ d := m.s.GetDowngradeInfo()
+ if d != nil && d.Enabled {
+ m.lg.Info(
+ "The server is ready to downgrade",
+ zap.String("target-version", d.TargetVersion),
+ zap.String("server-version", version.Version),
+ )
+ }
+ }
+}
+
+func (m *Monitor) CancelDowngradeIfNeeded() {
+ d := m.s.GetDowngradeInfo()
+ if d == nil || !d.Enabled {
+ return
+ }
+
+ targetVersion := d.TargetVersion
+ v := semver.Must(semver.NewVersion(targetVersion))
+ if m.versionsMatchTarget(v) {
+ m.lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion))
+ err := m.s.DowngradeCancel(context.Background())
+ if err != nil {
+ m.lg.Warn("failed to cancel downgrade", zap.Error(err))
+ }
+ }
+}
+
+// membersMinimalServerVersion returns the min server version in the map, or nil if the min
+// version in unknown.
+// It prints out log if there is a member with a higher version than the
+// local version.
+func (m *Monitor) membersMinimalServerVersion() *semver.Version {
+ vers := m.s.GetMembersVersions()
+ var minV *semver.Version
+ lv := semver.Must(semver.NewVersion(version.Version))
+
+ for mid, ver := range vers {
+ if ver == nil {
+ return nil
+ }
+ v, err := semver.NewVersion(ver.Server)
+ if err != nil {
+ m.lg.Warn(
+ "failed to parse server version of remote member",
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ zap.Error(err),
+ )
+ return nil
+ }
+ if lv.LessThan(*v) {
+ m.lg.Warn(
+ "leader found higher-versioned member",
+ zap.String("local-member-version", lv.String()),
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ )
+ }
+ if minV == nil {
+ minV = v
+ } else if v.LessThan(*minV) {
+ minV = v
+ }
+ }
+ return minV
+}
+
+// versionsMatchTarget returns true if all server versions are equal to target version, otherwise return false.
+// It can be used to decide the whether the cluster finishes downgrading to target version.
+func (m *Monitor) versionsMatchTarget(targetVersion *semver.Version) bool {
+ vers := m.s.GetMembersVersions()
+ targetVersion = &semver.Version{Major: targetVersion.Major, Minor: targetVersion.Minor}
+ for mid, ver := range vers {
+ if ver == nil {
+ return false
+ }
+ v, err := semver.NewVersion(ver.Server)
+ if err != nil {
+ m.lg.Warn(
+ "failed to parse server version of remote member",
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ zap.Error(err),
+ )
+ return false
+ }
+ v = &semver.Version{Major: v.Major, Minor: v.Minor}
+ if !targetVersion.Equal(*v) {
+ m.lg.Warn("remotes server has mismatching etcd version",
+ zap.String("remote-member-id", mid),
+ zap.String("current-server-version", v.String()),
+ zap.String("target-version", targetVersion.String()),
+ )
+ return false
+ }
+ }
+ return true
+}
diff --git a/server/etcdserver/version/monitor_test.go b/server/etcdserver/version/monitor_test.go
new file mode 100644
index 00000000000..917d82d086b
--- /dev/null
+++ b/server/etcdserver/version/monitor_test.go
@@ -0,0 +1,453 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+func TestMemberMinimalVersion(t *testing.T) {
+ tests := []struct {
+ memberVersions map[string]*version.Versions
+ wantVersion *semver.Version
+ }{
+ {
+ map[string]*version.Versions{"a": {Server: "2.0.0"}},
+ semver.Must(semver.NewVersion("2.0.0")),
+ },
+ // unknown
+ {
+ map[string]*version.Versions{"a": nil},
+ nil,
+ },
+ {
+ map[string]*version.Versions{"a": {Server: "2.0.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
+ semver.Must(semver.NewVersion("2.0.0")),
+ },
+ {
+ map[string]*version.Versions{"a": {Server: "2.1.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
+ semver.Must(semver.NewVersion("2.1.0")),
+ },
+ {
+ map[string]*version.Versions{"a": nil, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}},
+ nil,
+ },
+ }
+
+ for i, tt := range tests {
+ monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{
+ memberVersions: tt.memberVersions,
+ })
+ minV := monitor.membersMinimalServerVersion()
+ if !reflect.DeepEqual(minV, tt.wantVersion) {
+ t.Errorf("#%d: ver = %+v, want %+v", i, minV, tt.wantVersion)
+ }
+ }
+}
+
+func TestDecideStorageVersion(t *testing.T) {
+ tests := []struct {
+ name string
+ clusterVersion *semver.Version
+ storageVersion *semver.Version
+ expectStorageVersion *semver.Version
+ }{
+ {
+ name: "No action if cluster version is nil",
+ },
+ {
+ name: "Should set storage version if cluster version is set",
+ clusterVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_5,
+ },
+ {
+ name: "No action if storage version was already set",
+ storageVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_5,
+ },
+ {
+ name: "No action if storage version equals cluster version",
+ clusterVersion: &version.V3_5,
+ storageVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_5,
+ },
+ {
+ name: "Should set storage version to cluster version",
+ clusterVersion: &version.V3_6,
+ storageVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_6,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := &storageMock{
+ clusterVersion: tt.clusterVersion,
+ storageVersion: tt.storageVersion,
+ }
+ monitor := NewMonitor(zaptest.NewLogger(t), s)
+ monitor.UpdateStorageVersionIfNeeded()
+ if !reflect.DeepEqual(s.storageVersion, tt.expectStorageVersion) {
+ t.Errorf("Unexpected storage version value, got = %+v, want %+v", s.storageVersion, tt.expectStorageVersion)
+ }
+ })
+ }
+}
+
+func TestVersionMatchTarget(t *testing.T) {
+ tests := []struct {
+ name string
+ targetVersion *semver.Version
+ versionMap map[string]*version.Versions
+ expectedFinished bool
+ }{
+ {
+ "When downgrade finished",
+ &semver.Version{Major: 3, Minor: 4},
+ map[string]*version.Versions{
+ "mem1": {Server: "3.4.1", Cluster: "3.4.0"},
+ "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
+ "mem3": {Server: "3.4.2", Cluster: "3.4.0"},
+ },
+ true,
+ },
+ {
+ "When cannot parse peer version",
+ &semver.Version{Major: 3, Minor: 4},
+ map[string]*version.Versions{
+ "mem1": {Server: "3.4", Cluster: "3.4.0"},
+ "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
+ "mem3": {Server: "3.4.2", Cluster: "3.4.0"},
+ },
+ false,
+ },
+ {
+ "When downgrade not finished",
+ &semver.Version{Major: 3, Minor: 4},
+ map[string]*version.Versions{
+ "mem1": {Server: "3.4.1", Cluster: "3.4.0"},
+ "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"},
+ "mem3": {Server: "3.5.2", Cluster: "3.5.0"},
+ },
+ false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{
+ memberVersions: tt.versionMap,
+ })
+ actual := monitor.versionsMatchTarget(tt.targetVersion)
+ if actual != tt.expectedFinished {
+ t.Errorf("expected downgrade finished is %v; got %v", tt.expectedFinished, actual)
+ }
+ })
+ }
+}
+
+func TestUpdateClusterVersionIfNeeded(t *testing.T) {
+ tests := []struct {
+ name string
+ clusterVersion *semver.Version
+ memberVersions map[string]*version.Versions
+ downgrade *DowngradeInfo
+ expectClusterVersion *semver.Version
+ expectError error
+ }{
+ {
+ name: "Default to 3.0 if there are no members",
+ expectClusterVersion: &version.V3_0,
+ },
+ {
+ name: "Should pick lowest server version from members",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.6.0"},
+ "b": {Server: "3.5.0"},
+ },
+ expectClusterVersion: &version.V3_5,
+ },
+ {
+ name: "Should support not full releases",
+ memberVersions: map[string]*version.Versions{
+ "b": {Server: "3.5.0-alpha.0"},
+ },
+ expectClusterVersion: &version.V3_5,
+ },
+ {
+ name: "Sets minimal version when member has broken version",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.6.0"},
+ "b": {Server: "yyyy"},
+ },
+ expectClusterVersion: &version.V3_0,
+ },
+ {
+ name: "Should not downgrade cluster version without explicit downgrade request",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.5.0"},
+ "b": {Server: "3.6.0"},
+ },
+ clusterVersion: &version.V3_6,
+ expectClusterVersion: &version.V3_6,
+ },
+ {
+ name: "Should not upgrade cluster version if there is still member old member",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.5.0"},
+ "b": {Server: "3.6.0"},
+ },
+ clusterVersion: &version.V3_5,
+ expectClusterVersion: &version.V3_5,
+ },
+ {
+ name: "Should upgrade cluster version if all members have upgraded (have higher server version)",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.6.0"},
+ "b": {Server: "3.6.0"},
+ },
+ clusterVersion: &version.V3_5,
+ expectClusterVersion: &version.V3_6,
+ },
+ {
+ name: "Should downgrade cluster version if downgrade is set to allow older members to join",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.6.0"},
+ "b": {Server: "3.6.0"},
+ },
+ clusterVersion: &version.V3_6,
+ downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
+ expectClusterVersion: &version.V3_5,
+ },
+ {
+ name: "Don't downgrade below supported range",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.6.0"},
+ "b": {Server: "3.6.0"},
+ },
+ clusterVersion: &version.V3_5,
+ downgrade: &DowngradeInfo{TargetVersion: "3.4.0", Enabled: true},
+ expectClusterVersion: &version.V3_5,
+ expectError: fmt.Errorf("invalid downgrade target"),
+ },
+ {
+ name: "Don't downgrade above cluster version",
+ memberVersions: map[string]*version.Versions{
+ "a": {Server: "3.5.0"},
+ "b": {Server: "3.5.0"},
+ },
+ clusterVersion: &version.V3_5,
+ downgrade: &DowngradeInfo{TargetVersion: "3.6.0", Enabled: true},
+ expectClusterVersion: &version.V3_5,
+ expectError: fmt.Errorf("invalid downgrade target"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := &storageMock{
+ clusterVersion: tt.clusterVersion,
+ memberVersions: tt.memberVersions,
+ downgradeInfo: tt.downgrade,
+ }
+ monitor := NewMonitor(zaptest.NewLogger(t), s)
+
+ err := monitor.UpdateClusterVersionIfNeeded()
+ assert.Equal(t, tt.expectClusterVersion, s.clusterVersion)
+ assert.Equal(t, tt.expectError, err)
+
+ // Ensure results are stable
+ newVersion, err := monitor.decideClusterVersion()
+ assert.Nil(t, newVersion)
+ assert.Equal(t, tt.expectError, err)
+ })
+ }
+}
+
+func TestCancelDowngradeIfNeeded(t *testing.T) {
+ tests := []struct {
+ name string
+ memberVersions map[string]*version.Versions
+ downgrade *DowngradeInfo
+ expectDowngrade *DowngradeInfo
+ }{
+ {
+ name: "No action if there no downgrade in progress",
+ },
+ {
+ name: "Cancel downgrade if there are no members",
+ downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
+ expectDowngrade: nil,
+ },
+ // Next entries go through all states that should happen during downgrade
+ {
+ name: "No action if downgrade was not started",
+ memberVersions: map[string]*version.Versions{
+ "a": {Cluster: "3.6.0", Server: "3.6.1"},
+ "b": {Cluster: "3.6.0", Server: "3.6.2"},
+ },
+ },
+ {
+ name: "Continue downgrade if just started",
+ memberVersions: map[string]*version.Versions{
+ "a": {Cluster: "3.5.0", Server: "3.6.1"},
+ "b": {Cluster: "3.5.0", Server: "3.6.2"},
+ },
+ downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
+ expectDowngrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
+ },
+ {
+ name: "Continue downgrade if there is at least one member with not matching",
+ memberVersions: map[string]*version.Versions{
+ "a": {Cluster: "3.5.0", Server: "3.5.1"},
+ "b": {Cluster: "3.5.0", Server: "3.6.2"},
+ },
+ downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
+ expectDowngrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
+ },
+ {
+ name: "Cancel downgrade if all members have downgraded",
+ memberVersions: map[string]*version.Versions{
+ "a": {Cluster: "3.5.0", Server: "3.5.1"},
+ "b": {Cluster: "3.5.0", Server: "3.5.2"},
+ },
+ downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true},
+ expectDowngrade: nil,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := &storageMock{
+ memberVersions: tt.memberVersions,
+ downgradeInfo: tt.downgrade,
+ }
+ monitor := NewMonitor(zaptest.NewLogger(t), s)
+
+ // Run multiple times to ensure that results are stable
+ for i := 0; i < 3; i++ {
+ monitor.CancelDowngradeIfNeeded()
+ assert.Equal(t, tt.expectDowngrade, s.downgradeInfo)
+ }
+ })
+ }
+}
+
+func TestUpdateStorageVersionIfNeeded(t *testing.T) {
+ tests := []struct {
+ name string
+ clusterVersion *semver.Version
+ storageVersion *semver.Version
+ expectStorageVersion *semver.Version
+ }{
+ {
+ name: "No action if cluster version is nil",
+ },
+ {
+ name: "Should set storage version if cluster version is set",
+ clusterVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_5,
+ },
+ {
+ name: "No action if storage version was already set",
+ storageVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_5,
+ },
+ {
+ name: "No action if storage version equals cluster version",
+ clusterVersion: &version.V3_5,
+ storageVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_5,
+ },
+ {
+ name: "Should set storage version to cluster version",
+ clusterVersion: &version.V3_6,
+ storageVersion: &version.V3_5,
+ expectStorageVersion: &version.V3_6,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := &storageMock{
+ clusterVersion: tt.clusterVersion,
+ storageVersion: tt.storageVersion,
+ }
+ monitor := NewMonitor(zaptest.NewLogger(t), s)
+
+ // Run multiple times to ensure that results are stable
+ for i := 0; i < 3; i++ {
+ monitor.UpdateStorageVersionIfNeeded()
+ assert.Equal(t, tt.expectStorageVersion, s.storageVersion)
+ }
+ })
+ }
+}
+
+type storageMock struct {
+ memberVersions map[string]*version.Versions
+ clusterVersion *semver.Version
+ storageVersion *semver.Version
+ downgradeInfo *DowngradeInfo
+}
+
+var _ Server = (*storageMock)(nil)
+
+func (s *storageMock) UpdateClusterVersion(version string) {
+ s.clusterVersion = semver.New(version)
+}
+
+func (s *storageMock) LinearizableReadNotify(ctx context.Context) error {
+ return nil
+}
+
+func (s *storageMock) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error {
+ return nil
+}
+
+func (s *storageMock) DowngradeCancel(ctx context.Context) error {
+ s.downgradeInfo = nil
+ return nil
+}
+
+func (s *storageMock) GetClusterVersion() *semver.Version {
+ return s.clusterVersion
+}
+
+func (s *storageMock) GetDowngradeInfo() *DowngradeInfo {
+ return s.downgradeInfo
+}
+
+func (s *storageMock) GetMembersVersions() map[string]*version.Versions {
+ return s.memberVersions
+}
+
+func (s *storageMock) GetStorageVersion() *semver.Version {
+ return s.storageVersion
+}
+
+func (s *storageMock) UpdateStorageVersion(v semver.Version) error {
+ s.storageVersion = &v
+ return nil
+}
diff --git a/server/etcdserver/version/version.go b/server/etcdserver/version/version.go
new file mode 100644
index 00000000000..0a2f99a1faf
--- /dev/null
+++ b/server/etcdserver/version/version.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "context"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+// Manager contains logic to manage etcd cluster version downgrade process.
+type Manager struct {
+ lg *zap.Logger
+ s Server
+}
+
+// NewManager returns a new manager instance
+func NewManager(lg *zap.Logger, s Server) *Manager {
+ return &Manager{
+ lg: lg,
+ s: s,
+ }
+}
+
+// DowngradeValidate validates if cluster is downloadable to provided target version and returns error if not.
+func (m *Manager) DowngradeValidate(ctx context.Context, targetVersion *semver.Version) error {
+ // gets leaders commit index and wait for local store to finish applying that index
+ // to avoid using stale downgrade information
+ err := m.s.LinearizableReadNotify(ctx)
+ if err != nil {
+ return err
+ }
+ cv := m.s.GetClusterVersion()
+ allowedTargetVersion := allowedDowngradeVersion(cv)
+ if !targetVersion.Equal(*allowedTargetVersion) {
+ return ErrInvalidDowngradeTargetVersion
+ }
+
+ downgradeInfo := m.s.GetDowngradeInfo()
+ if downgradeInfo != nil && downgradeInfo.Enabled {
+ // Todo: return the downgrade status along with the error msg
+ return ErrDowngradeInProcess
+ }
+ return nil
+}
+
+// DowngradeEnable initiates etcd cluster version downgrade process.
+func (m *Manager) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error {
+ // validate downgrade capability before starting downgrade
+ err := m.DowngradeValidate(ctx, targetVersion)
+ if err != nil {
+ return err
+ }
+ return m.s.DowngradeEnable(ctx, targetVersion)
+}
+
+// DowngradeCancel cancels ongoing downgrade process.
+func (m *Manager) DowngradeCancel(ctx context.Context) error {
+ err := m.s.LinearizableReadNotify(ctx)
+ if err != nil {
+ return err
+ }
+ downgradeInfo := m.s.GetDowngradeInfo()
+ if !downgradeInfo.Enabled {
+ return ErrNoInflightDowngrade
+ }
+ return m.s.DowngradeCancel(ctx)
+}
diff --git a/server/etcdserver/version/version_test.go b/server/etcdserver/version/version_test.go
new file mode 100644
index 00000000000..cae8e70a3b5
--- /dev/null
+++ b/server/etcdserver/version/version_test.go
@@ -0,0 +1,255 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+func TestUpgradeSingleNode(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ c := newCluster(lg, 1, version.V3_6)
+ c.StepMonitors()
+ assert.Equal(t, newCluster(lg, 1, version.V3_6), c)
+
+ c.ReplaceMemberBinary(0, version.V3_7)
+ c.StepMonitors()
+ c.StepMonitors()
+
+ assert.Equal(t, newCluster(lg, 1, version.V3_7), c)
+}
+
+func TestUpgradeThreeNodes(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ c := newCluster(lg, 3, version.V3_6)
+ c.StepMonitors()
+ assert.Equal(t, newCluster(lg, 3, version.V3_6), c)
+
+ c.ReplaceMemberBinary(0, version.V3_7)
+ c.StepMonitors()
+ c.ReplaceMemberBinary(1, version.V3_7)
+ c.StepMonitors()
+ c.ReplaceMemberBinary(2, version.V3_7)
+ c.StepMonitors()
+ c.StepMonitors()
+
+ assert.Equal(t, newCluster(lg, 3, version.V3_7), c)
+}
+
+func TestDowngradeSingleNode(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ c := newCluster(lg, 1, version.V3_6)
+ c.StepMonitors()
+ assert.Equal(t, newCluster(lg, 1, version.V3_6), c)
+
+ require.NoError(t, c.Version().DowngradeEnable(context.Background(), &version.V3_5))
+ c.StepMonitors()
+ assert.Equal(t, version.V3_5, c.clusterVersion)
+
+ c.ReplaceMemberBinary(0, version.V3_5)
+ c.StepMonitors()
+
+ assert.Equal(t, newCluster(lg, 1, version.V3_5), c)
+}
+
+func TestDowngradeThreeNode(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ c := newCluster(lg, 3, version.V3_6)
+ c.StepMonitors()
+ assert.Equal(t, newCluster(lg, 3, version.V3_6), c)
+
+ require.NoError(t, c.Version().DowngradeEnable(context.Background(), &version.V3_5))
+ c.StepMonitors()
+ assert.Equal(t, version.V3_5, c.clusterVersion)
+
+ c.ReplaceMemberBinary(0, version.V3_5)
+ c.StepMonitors()
+ c.ReplaceMemberBinary(1, version.V3_5)
+ c.StepMonitors()
+ c.ReplaceMemberBinary(2, version.V3_5)
+ c.StepMonitors()
+
+ assert.Equal(t, newCluster(lg, 3, version.V3_5), c)
+}
+
+func TestNewerMemberCanReconnectDuringDowngrade(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ c := newCluster(lg, 3, version.V3_6)
+ c.StepMonitors()
+ assert.Equal(t, newCluster(lg, 3, version.V3_6), c)
+
+ require.NoError(t, c.Version().DowngradeEnable(context.Background(), &version.V3_5))
+ c.StepMonitors()
+ assert.Equal(t, version.V3_5, c.clusterVersion)
+
+ c.ReplaceMemberBinary(0, version.V3_5)
+ c.StepMonitors()
+
+ c.MemberCrashes(2)
+ c.StepMonitors()
+ c.MemberReconnects(2)
+ c.StepMonitors()
+
+ c.ReplaceMemberBinary(1, version.V3_5)
+ c.StepMonitors()
+ c.ReplaceMemberBinary(2, version.V3_5)
+ c.StepMonitors()
+
+ assert.Equal(t, newCluster(lg, 3, version.V3_5), c)
+}
+
+func newCluster(lg *zap.Logger, memberCount int, ver semver.Version) *clusterMock {
+ cluster := &clusterMock{
+ lg: lg,
+ clusterVersion: ver,
+ members: make([]*memberMock, 0, memberCount),
+ }
+ majorMinVer := semver.Version{Major: ver.Major, Minor: ver.Minor}
+ for i := 0; i < memberCount; i++ {
+ m := &memberMock{
+ isRunning: true,
+ cluster: cluster,
+ serverVersion: ver,
+ storageVersion: majorMinVer,
+ }
+ m.monitor = NewMonitor(lg.Named(fmt.Sprintf("m%d", i)), m)
+ cluster.members = append(cluster.members, m)
+ }
+ cluster.members[0].isLeader = true
+ return cluster
+}
+
+func (c *clusterMock) StepMonitors() {
+ // Execute monitor functions in random order as it is not guaranteed
+ var fs []func()
+ for _, m := range c.members {
+ fs = append(fs, m.monitor.UpdateStorageVersionIfNeeded)
+ if m.isLeader {
+ fs = append(fs, m.monitor.CancelDowngradeIfNeeded, func() { m.monitor.UpdateClusterVersionIfNeeded() })
+ }
+ }
+ rand.Shuffle(len(fs), func(i, j int) {
+ fs[i], fs[j] = fs[j], fs[i]
+ })
+ for _, f := range fs {
+ f()
+ }
+}
+
+type clusterMock struct {
+ lg *zap.Logger
+ clusterVersion semver.Version
+ downgradeInfo *DowngradeInfo
+ members []*memberMock
+}
+
+func (c *clusterMock) Version() *Manager {
+ return NewManager(c.lg, c.members[0])
+}
+
+func (c *clusterMock) MembersVersions() map[string]*version.Versions {
+ result := map[string]*version.Versions{}
+ for i, m := range c.members {
+ if m.isRunning {
+ result[fmt.Sprintf("%d", i)] = &version.Versions{
+ Server: m.serverVersion.String(),
+ Cluster: c.clusterVersion.String(),
+ }
+ }
+ }
+ return result
+}
+
+func (c *clusterMock) ReplaceMemberBinary(mid int, newServerVersion semver.Version) {
+ MustDetectDowngrade(c.lg, &c.members[mid].serverVersion, &c.clusterVersion)
+ c.members[mid].serverVersion = newServerVersion
+}
+
+func (c *clusterMock) MemberCrashes(mid int) {
+ c.members[mid].isRunning = false
+}
+
+func (c *clusterMock) MemberReconnects(mid int) {
+ MustDetectDowngrade(c.lg, &c.members[mid].serverVersion, &c.clusterVersion)
+ c.members[mid].isRunning = true
+}
+
+type memberMock struct {
+ cluster *clusterMock
+
+ isRunning bool
+ isLeader bool
+ serverVersion semver.Version
+ storageVersion semver.Version
+ monitor *Monitor
+}
+
+var _ Server = (*memberMock)(nil)
+
+func (m *memberMock) UpdateClusterVersion(version string) {
+ m.cluster.clusterVersion = *semver.New(version)
+}
+
+func (m *memberMock) LinearizableReadNotify(ctx context.Context) error {
+ return nil
+}
+
+func (m *memberMock) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error {
+ m.cluster.downgradeInfo = &DowngradeInfo{
+ TargetVersion: targetVersion.String(),
+ Enabled: true,
+ }
+ return nil
+}
+
+func (m *memberMock) DowngradeCancel(context.Context) error {
+ m.cluster.downgradeInfo = nil
+ return nil
+}
+
+func (m *memberMock) GetClusterVersion() *semver.Version {
+ return &m.cluster.clusterVersion
+}
+
+func (m *memberMock) GetDowngradeInfo() *DowngradeInfo {
+ return m.cluster.downgradeInfo
+}
+
+func (m *memberMock) GetMembersVersions() map[string]*version.Versions {
+ return m.cluster.MembersVersions()
+}
+
+func (m *memberMock) GetStorageVersion() *semver.Version {
+ return &m.storageVersion
+}
+
+func (m *memberMock) UpdateStorageVersion(v semver.Version) error {
+ m.storageVersion = v
+ return nil
+}
+
+func (m *memberMock) TriggerSnapshot() {
+}
diff --git a/server/etcdserver/zap_raft.go b/server/etcdserver/zap_raft.go
index e8174f396ff..7672bdfbc4c 100644
--- a/server/etcdserver/zap_raft.go
+++ b/server/etcdserver/zap_raft.go
@@ -17,10 +17,10 @@ package etcdserver
import (
"errors"
- "go.etcd.io/etcd/raft/v3"
-
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
+
+ "go.etcd.io/raft/v3"
)
// NewRaftLogger builds "raft.Logger" from "*zap.Config".
@@ -37,7 +37,8 @@ func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
- return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
+ skipCallerLg := lg.WithOptions(zap.AddCallerSkip(1))
+ return &zapRaftLogger{lg: skipCallerLg, sugar: skipCallerLg.Sugar()}
}
// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
@@ -53,50 +54,50 @@ type zapRaftLogger struct {
sugar *zap.SugaredLogger
}
-func (zl *zapRaftLogger) Debug(args ...interface{}) {
+func (zl *zapRaftLogger) Debug(args ...any) {
zl.sugar.Debug(args...)
}
-func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
+func (zl *zapRaftLogger) Debugf(format string, args ...any) {
zl.sugar.Debugf(format, args...)
}
-func (zl *zapRaftLogger) Error(args ...interface{}) {
+func (zl *zapRaftLogger) Error(args ...any) {
zl.sugar.Error(args...)
}
-func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
+func (zl *zapRaftLogger) Errorf(format string, args ...any) {
zl.sugar.Errorf(format, args...)
}
-func (zl *zapRaftLogger) Info(args ...interface{}) {
+func (zl *zapRaftLogger) Info(args ...any) {
zl.sugar.Info(args...)
}
-func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
+func (zl *zapRaftLogger) Infof(format string, args ...any) {
zl.sugar.Infof(format, args...)
}
-func (zl *zapRaftLogger) Warning(args ...interface{}) {
+func (zl *zapRaftLogger) Warning(args ...any) {
zl.sugar.Warn(args...)
}
-func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
+func (zl *zapRaftLogger) Warningf(format string, args ...any) {
zl.sugar.Warnf(format, args...)
}
-func (zl *zapRaftLogger) Fatal(args ...interface{}) {
+func (zl *zapRaftLogger) Fatal(args ...any) {
zl.sugar.Fatal(args...)
}
-func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
+func (zl *zapRaftLogger) Fatalf(format string, args ...any) {
zl.sugar.Fatalf(format, args...)
}
-func (zl *zapRaftLogger) Panic(args ...interface{}) {
+func (zl *zapRaftLogger) Panic(args ...any) {
zl.sugar.Panic(args...)
}
-func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
+func (zl *zapRaftLogger) Panicf(format string, args ...any) {
zl.sugar.Panicf(format, args...)
}
diff --git a/server/etcdserver/zap_raft_test.go b/server/etcdserver/zap_raft_test.go
index 0b458a4094d..0952ac8c23a 100644
--- a/server/etcdserver/zap_raft_test.go
+++ b/server/etcdserver/zap_raft_test.go
@@ -17,16 +17,16 @@ package etcdserver
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
)
func TestNewRaftLogger(t *testing.T) {
@@ -51,7 +51,7 @@ func TestNewRaftLogger(t *testing.T) {
}
gl.Info("etcd-logutil-1")
- data, err := ioutil.ReadFile(logPath)
+ data, err := os.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
@@ -60,7 +60,7 @@ func TestNewRaftLogger(t *testing.T) {
}
gl.Warning("etcd-logutil-2")
- data, err = ioutil.ReadFile(logPath)
+ data, err = os.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
@@ -76,7 +76,7 @@ func TestNewRaftLoggerFromZapCore(t *testing.T) {
buf := bytes.NewBuffer(nil)
syncer := zapcore.AddSync(buf)
cr := zapcore.NewCore(
- zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
+ zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig),
syncer,
zap.NewAtomicLevelAt(zap.InfoLevel),
)
diff --git a/server/features/etcd_features.go b/server/features/etcd_features.go
new file mode 100644
index 00000000000..0a5d85ce28f
--- /dev/null
+++ b/server/features/etcd_features.go
@@ -0,0 +1,90 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package features
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+)
+
+const (
+ // Every feature gate should add method here following this template:
+ //
+ // // owner: @username
+ // // kep: https://kep.k8s.io/NNN (or issue: https://github.com/etcd-io/etcd/issues/NNN, or main PR: https://github.com/etcd-io/etcd/pull/NNN)
+ // // alpha: v3.X
+ // MyFeature featuregate.Feature = "MyFeature"
+ //
+ // Feature gates should be listed in alphabetical, case-sensitive
+ // (upper before any lower case character) order. This reduces the risk
+ // of code conflicts because changes are more likely to be scattered
+ // across the file.
+
+ // DistributedTracing enables experimental distributed tracing using OpenTelemetry Tracing.
+ // owner: @dashpole
+ // alpha: v3.5
+ // issue: https://github.com/etcd-io/etcd/issues/12460
+ DistributedTracing featuregate.Feature = "DistributedTracing"
+ // StopGRPCServiceOnDefrag enables etcd gRPC service to stop serving client requests on defragmentation.
+ // owner: @chaochn47
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/18279
+ StopGRPCServiceOnDefrag featuregate.Feature = "StopGRPCServiceOnDefrag"
+ // TxnModeWriteWithSharedBuffer enables the write transaction to use a shared buffer in its readonly check operations.
+ // owner: @wilsonwang371
+ // beta: v3.5
+ // main PR: https://github.com/etcd-io/etcd/pull/12896
+ TxnModeWriteWithSharedBuffer featuregate.Feature = "TxnModeWriteWithSharedBuffer"
+ // InitialCorruptCheck enable to check data corruption before serving any client/peer traffic.
+ // owner: @serathius
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/10524
+ InitialCorruptCheck featuregate.Feature = "InitialCorruptCheck"
+ // CompactHashCheck enables leader to periodically check followers compaction hashes.
+ // owner: @serathius
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/14120
+ CompactHashCheck featuregate.Feature = "CompactHashCheck"
+)
+
+var (
+ DefaultEtcdServerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
+ DistributedTracing: {Default: false, PreRelease: featuregate.Alpha},
+ StopGRPCServiceOnDefrag: {Default: false, PreRelease: featuregate.Alpha},
+ InitialCorruptCheck: {Default: false, PreRelease: featuregate.Alpha},
+ CompactHashCheck: {Default: false, PreRelease: featuregate.Alpha},
+ TxnModeWriteWithSharedBuffer: {Default: true, PreRelease: featuregate.Beta},
+ }
+ // ExperimentalFlagToFeatureMap is the map from the cmd line flags of experimental features
+ // to their corresponding feature gates.
+ // Deprecated: only add existing experimental features here. DO NOT use for new features.
+ ExperimentalFlagToFeatureMap = map[string]featuregate.Feature{
+ "experimental-stop-grpc-service-on-defrag": StopGRPCServiceOnDefrag,
+ "experimental-initial-corrupt-check": InitialCorruptCheck,
+ "experimental-compact-hash-check-enabled": CompactHashCheck,
+ "experimental-txn-mode-write-with-shared-buffer": TxnModeWriteWithSharedBuffer,
+ }
+)
+
+func NewDefaultServerFeatureGate(name string, lg *zap.Logger) featuregate.FeatureGate {
+ fg := featuregate.New(fmt.Sprintf("%sServerFeatureGate", name), lg)
+ if err := fg.Add(DefaultEtcdServerFeatureGates); err != nil {
+ panic(err)
+ }
+ return fg
+}
diff --git a/server/go.mod b/server/go.mod
index a2e33bb28f6..700fb89fa56 100644
--- a/server/go.mod
+++ b/server/go.mod
@@ -1,65 +1,93 @@
module go.etcd.io/etcd/server/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
require (
- github.com/coreos/go-semver v0.3.0
- github.com/coreos/go-systemd/v22 v22.3.1
- github.com/dustin/go-humanize v1.0.0
- github.com/form3tech-oss/jwt-go v3.2.3+incompatible
+ github.com/coreos/go-semver v0.3.1
+ github.com/coreos/go-systemd/v22 v22.5.0
+ github.com/dustin/go-humanize v1.0.1
github.com/gogo/protobuf v1.3.2
+ github.com/golang-jwt/jwt/v4 v4.5.1
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
- github.com/golang/protobuf v1.5.2
- github.com/google/btree v1.0.0
+ github.com/golang/protobuf v1.5.4
+ github.com/google/btree v1.1.3
+ github.com/google/go-cmp v0.6.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/grpc-ecosystem/grpc-gateway v1.16.0
- github.com/jonboulle/clockwork v0.2.2
- github.com/kr/text v0.2.0 // indirect
- github.com/prometheus/client_golang v1.5.1
- github.com/prometheus/client_model v0.2.0
- github.com/prometheus/common v0.10.0 // indirect
- github.com/prometheus/procfs v0.2.0 // indirect
- github.com/sirupsen/logrus v1.7.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1
+ github.com/jonboulle/clockwork v0.4.0
+ github.com/prometheus/client_golang v1.20.5
+ github.com/prometheus/client_model v0.6.1
github.com/soheilhy/cmux v0.1.5
- github.com/spf13/cobra v1.1.3
- github.com/stretchr/testify v1.7.0
+ github.com/spf13/cobra v1.8.1
+ github.com/stretchr/testify v1.10.0
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2
- go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0
- go.etcd.io/etcd/api/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/v2 v2.305.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0
- go.opentelemetry.io/otel v0.20.0
- go.opentelemetry.io/otel/exporters/otlp v0.20.0
- go.opentelemetry.io/otel/sdk v0.20.0
- go.uber.org/multierr v1.6.0
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
- golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
- golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
- golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
- google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
- google.golang.org/grpc v1.37.0
- gopkg.in/natefinch/lumberjack.v2 v2.0.0
- sigs.k8s.io/yaml v1.2.0
+ go.etcd.io/bbolt v1.4.0-beta.0
+ go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/v2 v2.306.0-alpha.0
+ go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/raft/v3 v3.6.0-beta.0
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0
+ go.opentelemetry.io/otel v1.33.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
+ go.opentelemetry.io/otel/sdk v1.33.0
+ go.uber.org/zap v1.27.0
+ golang.org/x/crypto v0.31.0
+ golang.org/x/net v0.33.0
+ golang.org/x/time v0.8.0
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb
+ google.golang.org/grpc v1.69.2
+ google.golang.org/protobuf v1.36.1
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1
+ sigs.k8s.io/yaml v1.4.0
+)
+
+require (
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/common v0.61.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
+ go.opentelemetry.io/otel/metric v1.33.0 // indirect
+ go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.4.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
)
replace (
go.etcd.io/etcd/api/v3 => ../api
go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
- go.etcd.io/etcd/client/v2 => ../client/v2
+ go.etcd.io/etcd/client/v2 => ./../client/internal/v2
go.etcd.io/etcd/client/v3 => ../client/v3
go.etcd.io/etcd/pkg/v3 => ../pkg
- go.etcd.io/etcd/raft/v3 => ../raft
)
// Bad imports are sometimes causing attempts to pull that code.
// This makes the error more explicit.
-replace (
- go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY
-)
+replace go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
+
+replace go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY
diff --git a/server/go.sum b/server/go.sum
index 3ccf56bb48c..2386a7c23e4 100644
--- a/server/go.sum
+++ b/server/go.sum
@@ -1,523 +1,243 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
+github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
+github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
+github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0 h1:FPuyGXkE6qPKJ71PyS0sdXuxUvYGXAXxV0XHpx0qjHE=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM=
+go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec=
+go.etcd.io/raft/v3 v3.6.0-beta.0 h1:MZFQVjCQxPJj5K9oS69Y+atNvYnGNyOQBnroTdw56jQ=
+go.etcd.io/raft/v3 v3.6.0-beta.0/go.mod h1:C2JoekRXfvImSrk5GnqD0aZ3a+cGVRnyem9qqn2DCEw=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
+go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/server/lease/lease.go b/server/lease/lease.go
new file mode 100644
index 00000000000..95f3eb6f756
--- /dev/null
+++ b/server/lease/lease.go
@@ -0,0 +1,135 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "math"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/server/v3/lease/leasepb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type Lease struct {
+ ID LeaseID
+ ttl int64 // time to live of the lease in seconds
+ remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used
+ // expiryMu protects concurrent accesses to expiry
+ expiryMu sync.RWMutex
+ // expiry is time when lease should expire. no expiration when expiry.IsZero() is true
+ expiry time.Time
+
+ // mu protects concurrent accesses to itemSet
+ mu sync.RWMutex
+ itemSet map[LeaseItem]struct{}
+ revokec chan struct{}
+}
+
+func NewLease(id LeaseID, ttl int64) *Lease {
+ return &Lease{
+ ID: id,
+ ttl: ttl,
+ itemSet: make(map[LeaseItem]struct{}),
+ revokec: make(chan struct{}),
+ }
+}
+
+func (l *Lease) expired() bool {
+ return l.Remaining() <= 0
+}
+
+func (l *Lease) persistTo(b backend.Backend) {
+ lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL}
+ tx := b.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ schema.MustUnsafePutLease(tx, &lpb)
+}
+
+// TTL returns the TTL of the Lease.
+func (l *Lease) TTL() int64 {
+ return l.ttl
+}
+
+// SetLeaseItem sets the given lease item, this func is thread-safe
+func (l *Lease) SetLeaseItem(item LeaseItem) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.itemSet[item] = struct{}{}
+}
+
+// getRemainingTTL returns the last checkpointed remaining TTL of the lease.
+func (l *Lease) getRemainingTTL() int64 {
+ if l.remainingTTL > 0 {
+ return l.remainingTTL
+ }
+ return l.ttl
+}
+
+// refresh refreshes the expiry of the lease.
+func (l *Lease) refresh(extend time.Duration) {
+ newExpiry := time.Now().Add(extend + time.Duration(l.getRemainingTTL())*time.Second)
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = newExpiry
+}
+
+// forever sets the expiry of lease to be forever.
+func (l *Lease) forever() {
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = forever
+}
+
+// Demoted returns true if the lease's expiry has been reset to forever.
+func (l *Lease) Demoted() bool {
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ return l.expiry == forever
+}
+
+// Keys returns all the keys attached to the lease.
+func (l *Lease) Keys() []string {
+ l.mu.RLock()
+ keys := make([]string, 0, len(l.itemSet))
+ for k := range l.itemSet {
+ keys = append(keys, k.Key)
+ }
+ l.mu.RUnlock()
+ return keys
+}
+
+// Remaining returns the remaining time of the lease.
+func (l *Lease) Remaining() time.Duration {
+ l.expiryMu.RLock()
+ defer l.expiryMu.RUnlock()
+ if l.expiry.IsZero() {
+ return time.Duration(math.MaxInt64)
+ }
+ return time.Until(l.expiry)
+}
+
+type LeaseItem struct {
+ Key string
+}
+
+// leasesByExpiry implements the sort.Interface.
+type leasesByExpiry []*Lease
+
+func (le leasesByExpiry) Len() int { return len(le) }
+func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
+func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
diff --git a/server/lease/lease_queue.go b/server/lease/lease_queue.go
index ffb7285ec08..74ffee5c696 100644
--- a/server/lease/lease_queue.go
+++ b/server/lease/lease_queue.go
@@ -42,14 +42,14 @@ func (pq LeaseQueue) Swap(i, j int) {
pq[j].index = j
}
-func (pq *LeaseQueue) Push(x interface{}) {
+func (pq *LeaseQueue) Push(x any) {
n := len(*pq)
item := x.(*LeaseWithTime)
item.index = n
*pq = append(*pq, item)
}
-func (pq *LeaseQueue) Pop() interface{} {
+func (pq *LeaseQueue) Pop() any {
old := *pq
n := len(old)
item := old[n-1]
@@ -96,7 +96,7 @@ func (mq *LeaseExpiredNotifier) Unregister() *LeaseWithTime {
return item
}
-func (mq *LeaseExpiredNotifier) Poll() *LeaseWithTime {
+func (mq *LeaseExpiredNotifier) Peek() *LeaseWithTime {
if mq.Len() == 0 {
return nil
}
diff --git a/server/lease/lease_queue_test.go b/server/lease/lease_queue_test.go
index c1200356152..a25af1a4f8d 100644
--- a/server/lease/lease_queue_test.go
+++ b/server/lease/lease_queue_test.go
@@ -40,18 +40,18 @@ func TestLeaseQueue(t *testing.T) {
}
// first element is expired.
- if le.leaseExpiredNotifier.Poll().id != LeaseID(1) {
- t.Fatalf("first item expected lease ID %d, got %d", LeaseID(1), le.leaseExpiredNotifier.Poll().id)
+ if le.leaseExpiredNotifier.Peek().id != LeaseID(1) {
+ t.Fatalf("first item expected lease ID %d, got %d", LeaseID(1), le.leaseExpiredNotifier.Peek().id)
}
existExpiredEvent := func() {
- l, ok, more := le.expireExists()
+ l, more := le.expireExists()
+ if l == nil {
+ t.Fatalf("expect expiry lease exists")
+ }
if l.ID != 1 {
t.Fatalf("first item expected lease ID %d, got %d", 1, l.ID)
}
- if !ok {
- t.Fatal("expect expiry lease exists")
- }
if more {
t.Fatal("expect no more expiry lease")
}
@@ -60,15 +60,15 @@ func TestLeaseQueue(t *testing.T) {
t.Fatalf("expected the expired lease to be pushed back to the heap, heap size got %d", le.leaseExpiredNotifier.Len())
}
- if le.leaseExpiredNotifier.Poll().id != LeaseID(1) {
- t.Fatalf("first item expected lease ID %d, got %d", LeaseID(1), le.leaseExpiredNotifier.Poll().id)
+ if le.leaseExpiredNotifier.Peek().id != LeaseID(1) {
+ t.Fatalf("first item expected lease ID %d, got %d", LeaseID(1), le.leaseExpiredNotifier.Peek().id)
}
}
noExpiredEvent := func() {
// re-acquire the expired item, nothing exists
- _, ok, more := le.expireExists()
- if ok {
+ l, more := le.expireExists()
+ if l != nil {
t.Fatal("expect no expiry lease exists")
}
if more {
diff --git a/server/lease/leasehttp/http.go b/server/lease/leasehttp/http.go
index 4b0a60a9be6..d5572c3aafc 100644
--- a/server/lease/leasehttp/http.go
+++ b/server/lease/leasehttp/http.go
@@ -19,7 +19,7 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"time"
@@ -47,13 +47,13 @@ type leaseHandler struct {
}
func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method != "POST" {
+ if r.Method != http.MethodPost {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
defer r.Body.Close()
- b, err := ioutil.ReadAll(r.Body)
+ b, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "error reading body", http.StatusBadRequest)
return
@@ -75,7 +75,7 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID))
if rerr != nil {
- if rerr == lease.ErrLeaseNotFound {
+ if errors.Is(rerr, lease.ErrLeaseNotFound) {
http.Error(w, rerr.Error(), http.StatusNotFound)
return
}
@@ -103,6 +103,9 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
return
}
+
+ // gofail: var beforeLookupWhenForwardLeaseTimeToLive struct{}
+
l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
if l == nil {
http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
@@ -126,6 +129,14 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
resp.LeaseTimeToLiveResponse.Keys = kbs
}
+ // The leasor could be demoted if leader changed during lookup.
+ // We should return error to force retry instead of returning
+ // incorrect remaining TTL.
+ if l.Demoted() {
+ http.Error(w, lease.ErrNotPrimary.Error(), http.StatusInternalServerError)
+ return
+ }
+
v, err = resp.Marshal()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -150,8 +161,13 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT
return -1, err
}
- cc := &http.Client{Transport: rt}
- req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+ cc := &http.Client{
+ Transport: rt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(lreq))
if err != nil {
return -1, err
}
@@ -176,12 +192,12 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT
}
if resp.StatusCode != http.StatusOK {
- return -1, fmt.Errorf("lease: unknown error(%s)", string(b))
+ return -1, fmt.Errorf("lease: unknown error(%s)", b)
}
lresp := &pb.LeaseKeepAliveResponse{}
if err := lresp.Unmarshal(b); err != nil {
- return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+ return -1, fmt.Errorf(`lease: %w. data = "%s"`, err, b)
}
if lresp.ID != int64(id) {
return -1, fmt.Errorf("lease: renew id mismatch")
@@ -202,7 +218,7 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string
return nil, err
}
- req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+ req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(lreq))
if err != nil {
return nil, err
}
@@ -210,7 +226,12 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string
req = req.WithContext(ctx)
- cc := &http.Client{Transport: rt}
+ cc := &http.Client{
+ Transport: rt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
var b []byte
// buffer errc channel so that errc don't block inside the go routinue
resp, err := cc.Do(req)
@@ -233,16 +254,16 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string
lresp := &leasepb.LeaseInternalResponse{}
if err := lresp.Unmarshal(b); err != nil {
- return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+ return nil, fmt.Errorf(`lease: %w. data = "%s"`, err, string(b))
}
if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
- return nil, fmt.Errorf("lease: renew id mismatch")
+ return nil, fmt.Errorf("lease: TTL id mismatch")
}
return lresp, nil
}
func readResponse(resp *http.Response) (b []byte, err error) {
- b, err = ioutil.ReadAll(resp.Body)
+ b, err = io.ReadAll(resp.Body)
httputil.GracefulClose(resp)
return
}
diff --git a/server/lease/leasehttp/http_test.go b/server/lease/leasehttp/http_test.go
index ada3d3a2e2a..7fb284ff41f 100644
--- a/server/lease/leasehttp/http_test.go
+++ b/server/lease/leasehttp/http_test.go
@@ -21,9 +21,10 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/server/v3/lease"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
"go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
)
func TestRenewHTTP(t *testing.T) {
@@ -31,7 +32,7 @@ func TestRenewHTTP(t *testing.T) {
be, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
defer betesting.Close(t, be)
- le := lease.NewLessor(lg, be, lease.LessorConfig{MinLeaseTTL: int64(5)})
+ le := lease.NewLessor(lg, be, nil, lease.LessorConfig{MinLeaseTTL: int64(5)})
le.Promote(time.Second)
l, err := le.Grant(1, int64(5))
if err != nil {
@@ -55,7 +56,7 @@ func TestTimeToLiveHTTP(t *testing.T) {
be, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
defer betesting.Close(t, be)
- le := lease.NewLessor(lg, be, lease.LessorConfig{MinLeaseTTL: int64(5)})
+ le := lease.NewLessor(lg, be, nil, lease.LessorConfig{MinLeaseTTL: int64(5)})
le.Promote(time.Second)
l, err := le.Grant(1, int64(5))
if err != nil {
@@ -96,7 +97,7 @@ func testApplyTimeout(t *testing.T, f func(*lease.Lease, string) error) {
be, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
defer betesting.Close(t, be)
- le := lease.NewLessor(lg, be, lease.LessorConfig{MinLeaseTTL: int64(5)})
+ le := lease.NewLessor(lg, be, nil, lease.LessorConfig{MinLeaseTTL: int64(5)})
le.Promote(time.Second)
l, err := le.Grant(1, int64(5))
if err != nil {
diff --git a/server/lease/leasepb/lease.pb.go b/server/lease/leasepb/lease.pb.go
index 8a1c54922fc..ee833d31a73 100644
--- a/server/lease/leasepb/lease.pb.go
+++ b/server/lease/leasepb/lease.pb.go
@@ -156,7 +156,7 @@ func init() {
func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) }
var fileDescriptor_3dd57e402472b33a = []byte{
- // 256 bytes of a gzipped FileDescriptorProto
+ // 283 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x3e, 0xb5, 0x24, 0x39, 0x45,
@@ -170,9 +170,11 @@ var fileDescriptor_3dd57e402472b33a = []byte{
0x54, 0xa8, 0x0c, 0xd8, 0x46, 0x6e, 0x23, 0x15, 0x3d, 0x64, 0xf7, 0xe9, 0x61, 0x57, 0x1b, 0x84,
0xc3, 0x0c, 0xa5, 0x0a, 0x2e, 0x51, 0x34, 0x5b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe2,
0xb9, 0xc4, 0x31, 0xb4, 0x40, 0xa4, 0xa0, 0xf6, 0xaa, 0x12, 0xb0, 0x17, 0xa2, 0x38, 0x08, 0x97,
- 0x29, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3,
- 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x0e,
- 0x5f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x8a, 0x94, 0xb9, 0xae, 0x01, 0x00, 0x00,
+ 0x29, 0x4e, 0x9e, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3,
+ 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x9f, 0x9e,
+ 0x0f, 0x36, 0x5b, 0x2f, 0x33, 0x1f, 0x1c, 0xf6, 0xfa, 0x10, 0x4b, 0xf4, 0xcb, 0x8c, 0xf5, 0xc1,
+ 0x51, 0xa6, 0x0f, 0x8d, 0x38, 0x6b, 0x28, 0x9d, 0xc4, 0x06, 0x8e, 0x10, 0x63, 0x40, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0x0e, 0x16, 0x3b, 0xc4, 0xdf, 0x01, 0x00, 0x00,
}
func (m *Lease) Marshal() (dAtA []byte, err error) {
diff --git a/server/lease/leasepb/lease.proto b/server/lease/leasepb/lease.proto
index 5b40e3b17b6..d6317975483 100644
--- a/server/lease/leasepb/lease.proto
+++ b/server/lease/leasepb/lease.proto
@@ -4,6 +4,8 @@ package leasepb;
import "gogoproto/gogo.proto";
import "etcd/api/etcdserverpb/rpc.proto";
+option go_package = "go.etcd.io/etcd/server/v3/lease/leasepb;leasepb";
+
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
diff --git a/server/lease/lessor.go b/server/lease/lessor.go
index 5dba54db02e..cf2028933d5 100644
--- a/server/lease/lessor.go
+++ b/server/lease/lessor.go
@@ -17,17 +17,20 @@ package lease
import (
"container/heap"
"context"
- "encoding/binary"
"errors"
"math"
"sort"
"sync"
"time"
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/server/v3/lease/leasepb"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
// NoLease is a special LeaseID representing the absence of a lease.
@@ -39,10 +42,8 @@ const MaxLeaseTTL = 9000000000
var (
forever = time.Time{}
- leaseBucketName = []byte("lease")
-
- // maximum number of leases to revoke per second; configurable for tests
- leaseRevokeRate = 1000
+ // default number of leases to revoke per second; configurable for tests
+ defaultLeaseRevokeRate = 1000
// maximum number of lease checkpoints recorded to the consensus log per second; configurable for tests
leaseCheckpointRate = 1000
@@ -74,7 +75,7 @@ type RangeDeleter func() TxnDelete
// Checkpointer permits checkpointing of lease remaining TTLs to the consensus log. Defined here to
// avoid circular dependency with mvcc.
-type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest)
+type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest) error
type LeaseID int64
@@ -169,6 +170,9 @@ type lessor struct {
// requests for shorter TTLs are extended to the minimum TTL.
minLeaseTTL int64
+ // maximum number of leases to revoke per second
+ leaseRevokeRate int
+
expiredC chan []*Lease
// stopC is a channel whose closure indicates that the lessor should be stopped.
stopC chan struct{}
@@ -181,27 +185,43 @@ type lessor struct {
checkpointInterval time.Duration
// the interval to check if the expired lease is revoked
expiredLeaseRetryInterval time.Duration
+ // whether lessor should always persist remaining TTL (always enabled in v3.6).
+ checkpointPersist bool
+ // cluster is used to adapt lessor logic based on cluster version
+ cluster cluster
+}
+
+type cluster interface {
+ // Version is the cluster-wide minimum major.minor version.
+ Version() *semver.Version
}
type LessorConfig struct {
MinLeaseTTL int64
CheckpointInterval time.Duration
ExpiredLeasesRetryInterval time.Duration
+ CheckpointPersist bool
+
+ leaseRevokeRate int
}
-func NewLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) Lessor {
- return newLessor(lg, b, cfg)
+func NewLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) Lessor {
+ return newLessor(lg, b, cluster, cfg)
}
-func newLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) *lessor {
+func newLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) *lessor {
checkpointInterval := cfg.CheckpointInterval
expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval
+ leaseRevokeRate := cfg.leaseRevokeRate
if checkpointInterval == 0 {
checkpointInterval = defaultLeaseCheckpointInterval
}
if expiredLeaseRetryInterval == 0 {
expiredLeaseRetryInterval = defaultExpiredleaseRetryInterval
}
+ if leaseRevokeRate == 0 {
+ leaseRevokeRate = defaultLeaseRevokeRate
+ }
l := &lessor{
leaseMap: make(map[LeaseID]*Lease),
itemMap: make(map[LeaseItem]LeaseID),
@@ -209,13 +229,16 @@ func newLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) *lessor {
leaseCheckpointHeap: make(LeaseQueue, 0),
b: b,
minLeaseTTL: cfg.MinLeaseTTL,
+ leaseRevokeRate: leaseRevokeRate,
checkpointInterval: checkpointInterval,
expiredLeaseRetryInterval: expiredLeaseRetryInterval,
+ checkpointPersist: cfg.CheckpointPersist,
// expiredC is a small buffered chan to avoid unnecessary blocking.
expiredC: make(chan []*Lease, 16),
stopC: make(chan struct{}),
doneC: make(chan struct{}),
lg: lg,
+ cluster: cluster,
}
l.initAndRecover()
@@ -266,12 +289,7 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
// TODO: when lessor is under high load, it should give out lease
// with longer TTL to reduce renew load.
- l := &Lease{
- ID: id,
- ttl: ttl,
- itemSet: make(map[LeaseItem]struct{}),
- revokec: make(chan struct{}),
- }
+ l := NewLease(id, ttl)
le.mu.Lock()
defer le.mu.Unlock()
@@ -313,6 +331,7 @@ func (le *lessor) Revoke(id LeaseID) error {
le.mu.Unlock()
return ErrLeaseNotFound
}
+
defer close(l.revokec)
// unlock before doing external work
le.mu.Unlock()
@@ -337,7 +356,7 @@ func (le *lessor) Revoke(id LeaseID) error {
// lease deletion needs to be in the same backend transaction with the
// kv deletion. Or we might end up with not executing the revoke or not
// deleting the keys if etcdserver fails in between.
- le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID)))
+ schema.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)})
txn.End()
@@ -352,6 +371,9 @@ func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error {
if l, ok := le.leaseMap[id]; ok {
// when checkpointing, we only update the remainingTTL, Promote is responsible for applying this to lease expiry
l.remainingTTL = remainingTTL
+ if le.shouldPersistCheckpoints() {
+ l.persistTo(le.b)
+ }
if le.isPrimary() {
// schedule the next checkpoint as needed
le.scheduleCheckpointIfNeeded(l)
@@ -360,6 +382,15 @@ func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error {
return nil
}
+func (le *lessor) shouldPersistCheckpoints() bool {
+ cv := le.cluster.Version()
+ return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, version.V3_6))
+}
+
+func greaterOrEqual(first, second semver.Version) bool {
+ return !version.LessThan(first, second)
+}
+
// Renew renews an existing lease. If the given lease does not exist or
// has expired, an error will be returned.
func (le *lessor) Renew(id LeaseID) (int64, error) {
@@ -401,7 +432,9 @@ func (le *lessor) Renew(id LeaseID) (int64, error) {
// By applying a RAFT entry only when the remainingTTL is already set, we limit the number
// of RAFT entries written per lease to a max of 2 per checkpoint interval.
if clearRemainingTTL {
- le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), Remaining_TTL: 0}}})
+ if err := le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), Remaining_TTL: 0}}}); err != nil {
+ return -1, err
+ }
}
le.mu.Lock()
@@ -447,9 +480,10 @@ func (le *lessor) Promote(extend time.Duration) {
l.refresh(extend)
item := &LeaseWithTime{id: l.ID, time: l.expiry}
le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
}
- if len(le.leaseMap) < leaseRevokeRate {
+ if len(le.leaseMap) < le.leaseRevokeRate {
// no possibility of lease pile-up
return
}
@@ -463,7 +497,7 @@ func (le *lessor) Promote(extend time.Duration) {
expires := 0
// have fewer expires than the total revoke rate so piled up leases
// don't consume the entire revoke limit
- targetExpiresPerSecond := (3 * leaseRevokeRate) / 4
+ targetExpiresPerSecond := (3 * le.leaseRevokeRate) / 4
for _, l := range leases {
remaining := l.Remaining()
if remaining > nextWindow {
@@ -489,12 +523,6 @@ func (le *lessor) Promote(extend time.Duration) {
}
}
-type leasesByExpiry []*Lease
-
-func (le leasesByExpiry) Len() int { return len(le) }
-func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
-func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
-
func (le *lessor) Demote() {
le.mu.Lock()
defer le.mu.Unlock()
@@ -584,12 +612,15 @@ func (le *lessor) Stop() {
func (le *lessor) runLoop() {
defer close(le.doneC)
+ delayTicker := time.NewTicker(500 * time.Millisecond)
+ defer delayTicker.Stop()
+
for {
le.revokeExpiredLeases()
le.checkpointScheduledLeases()
select {
- case <-time.After(500 * time.Millisecond):
+ case <-delayTicker.C:
case <-le.stopC:
return
}
@@ -602,7 +633,7 @@ func (le *lessor) revokeExpiredLeases() {
var ls []*Lease
// rate limit
- revokeLimit := leaseRevokeRate / 2
+ revokeLimit := le.leaseRevokeRate / 2
le.mu.RLock()
if le.isPrimary() {
@@ -626,10 +657,10 @@ func (le *lessor) revokeExpiredLeases() {
// checkpointScheduledLeases finds all scheduled lease checkpoints that are due and
// submits them to the checkpointer to persist them to the consensus log.
func (le *lessor) checkpointScheduledLeases() {
- var cps []*pb.LeaseCheckpoint
-
// rate limit
for i := 0; i < leaseCheckpointRate/2; i++ {
+ var cps []*pb.LeaseCheckpoint
+
le.mu.Lock()
if le.isPrimary() {
cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize)
@@ -637,7 +668,9 @@ func (le *lessor) checkpointScheduledLeases() {
le.mu.Unlock()
if len(cps) != 0 {
- le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps})
+ if err := le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps}); err != nil {
+ return
+ }
}
if len(cps) < maxLeaseCheckpointBatchSize {
return
@@ -653,33 +686,33 @@ func (le *lessor) clearLeaseExpiredNotifier() {
le.leaseExpiredNotifier = newLeaseExpiredNotifier()
}
-// expireExists returns true if expiry items exist.
+// expireExists returns "l" which is not nil if expiry items exist.
// It pops only when expiry item exists.
// "next" is true, to indicate that it may exist in next attempt.
-func (le *lessor) expireExists() (l *Lease, ok bool, next bool) {
+func (le *lessor) expireExists() (l *Lease, next bool) {
if le.leaseExpiredNotifier.Len() == 0 {
- return nil, false, false
+ return nil, false
}
- item := le.leaseExpiredNotifier.Poll()
+ item := le.leaseExpiredNotifier.Peek()
l = le.leaseMap[item.id]
if l == nil {
// lease has expired or been revoked
// no need to revoke (nothing is expiry)
le.leaseExpiredNotifier.Unregister() // O(log N)
- return nil, false, true
+ return nil, true
}
now := time.Now()
if now.Before(item.time) /* item.time: expiration time */ {
// Candidate expirations are caught up, reinsert this item
// and no need to revoke (nothing is expiry)
- return l, false, false
+ return nil, false
}
// recheck if revoke is complete after retry interval
item.time = now.Add(le.expiredLeaseRetryInterval)
le.leaseExpiredNotifier.RegisterOrUpdate(item)
- return l, true, false
+ return l, false
}
// findExpiredLeases loops leases in the leaseMap until reaching expired limit
@@ -688,13 +721,10 @@ func (le *lessor) findExpiredLeases(limit int) []*Lease {
leases := make([]*Lease, 0, 16)
for {
- l, ok, next := le.expireExists()
- if !ok && !next {
+ l, next := le.expireExists()
+ if l == nil && !next {
break
}
- if !ok {
- continue
- }
if next {
continue
}
@@ -717,7 +747,7 @@ func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) {
return
}
- if lease.RemainingTTL() > int64(le.checkpointInterval.Seconds()) {
+ if lease.getRemainingTTL() > int64(le.checkpointInterval.Seconds()) {
if le.lg != nil {
le.lg.Debug("Scheduling lease checkpoint",
zap.Int64("leaseID", int64(lease.ID)),
@@ -737,7 +767,7 @@ func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCh
}
now := time.Now()
- cps := []*pb.LeaseCheckpoint{}
+ var cps []*pb.LeaseCheckpoint
for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit {
lt := le.leaseCheckpointHeap[0]
if lt.time.After(now) /* lt.time: next checkpoint time */ {
@@ -769,18 +799,12 @@ func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCh
func (le *lessor) initAndRecover() {
tx := le.b.BatchTx()
- tx.Lock()
-
- tx.UnsafeCreateBucket(leaseBucketName)
- _, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0)
- // TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue.
- for i := range vs {
- var lpb leasepb.Lease
- err := lpb.Unmarshal(vs[i])
- if err != nil {
- tx.Unlock()
- panic("failed to unmarshal lease proto item")
- }
+
+ tx.LockOutsideApply()
+ schema.UnsafeCreateLeaseBucket(tx)
+ lpbs := schema.MustUnsafeGetAllLeases(tx)
+ tx.Unlock()
+ for _, lpb := range lpbs {
ID := LeaseID(lpb.ID)
if lpb.TTL < le.minLeaseTTL {
lpb.TTL = le.minLeaseTTL
@@ -790,120 +814,32 @@ func (le *lessor) initAndRecover() {
ttl: lpb.TTL,
// itemSet will be filled in when recover key-value pairs
// set expiry to forever, refresh when promoted
- itemSet: make(map[LeaseItem]struct{}),
- expiry: forever,
- revokec: make(chan struct{}),
+ itemSet: make(map[LeaseItem]struct{}),
+ expiry: forever,
+ revokec: make(chan struct{}),
+ remainingTTL: lpb.RemainingTTL,
}
}
le.leaseExpiredNotifier.Init()
heap.Init(&le.leaseCheckpointHeap)
- tx.Unlock()
le.b.ForceCommit()
}
-type Lease struct {
- ID LeaseID
- ttl int64 // time to live of the lease in seconds
- remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used
- // expiryMu protects concurrent accesses to expiry
- expiryMu sync.RWMutex
- // expiry is time when lease should expire. no expiration when expiry.IsZero() is true
- expiry time.Time
-
- // mu protects concurrent accesses to itemSet
- mu sync.RWMutex
- itemSet map[LeaseItem]struct{}
- revokec chan struct{}
-}
-
-func (l *Lease) expired() bool {
- return l.Remaining() <= 0
-}
-
-func (l *Lease) persistTo(b backend.Backend) {
- key := int64ToBytes(int64(l.ID))
-
- lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL}
- val, err := lpb.Marshal()
- if err != nil {
- panic("failed to marshal lease proto item")
- }
-
- b.BatchTx().Lock()
- b.BatchTx().UnsafePut(leaseBucketName, key, val)
- b.BatchTx().Unlock()
-}
-
-// TTL returns the TTL of the Lease.
-func (l *Lease) TTL() int64 {
- return l.ttl
-}
-
-// RemainingTTL returns the last checkpointed remaining TTL of the lease.
-// TODO(jpbetz): do not expose this utility method
-func (l *Lease) RemainingTTL() int64 {
- if l.remainingTTL > 0 {
- return l.remainingTTL
- }
- return l.ttl
-}
-
-// refresh refreshes the expiry of the lease.
-func (l *Lease) refresh(extend time.Duration) {
- newExpiry := time.Now().Add(extend + time.Duration(l.RemainingTTL())*time.Second)
- l.expiryMu.Lock()
- defer l.expiryMu.Unlock()
- l.expiry = newExpiry
-}
-
-// forever sets the expiry of lease to be forever.
-func (l *Lease) forever() {
- l.expiryMu.Lock()
- defer l.expiryMu.Unlock()
- l.expiry = forever
-}
-
-// Keys returns all the keys attached to the lease.
-func (l *Lease) Keys() []string {
- l.mu.RLock()
- keys := make([]string, 0, len(l.itemSet))
- for k := range l.itemSet {
- keys = append(keys, k.Key)
- }
- l.mu.RUnlock()
- return keys
-}
-
-// Remaining returns the remaining time of the lease.
-func (l *Lease) Remaining() time.Duration {
- l.expiryMu.RLock()
- defer l.expiryMu.RUnlock()
- if l.expiry.IsZero() {
- return time.Duration(math.MaxInt64)
- }
- return time.Until(l.expiry)
-}
-
-type LeaseItem struct {
- Key string
-}
-
-func int64ToBytes(n int64) []byte {
- bytes := make([]byte, 8)
- binary.BigEndian.PutUint64(bytes, uint64(n))
- return bytes
-}
-
// FakeLessor is a fake implementation of Lessor interface.
// Used for testing only.
-type FakeLessor struct{}
+type FakeLessor struct {
+ LeaseSet map[LeaseID]struct{}
+}
func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {}
-func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil }
+func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
+ fl.LeaseSet[id] = struct{}{}
+ return nil, nil
+}
func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
@@ -920,7 +856,12 @@ func (fl *FakeLessor) Demote() {}
func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
-func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil }
+func (fl *FakeLessor) Lookup(id LeaseID) *Lease {
+ if _, ok := fl.LeaseSet[id]; ok {
+ return &Lease{ID: id}
+ }
+ return nil
+}
func (fl *FakeLessor) Leases() []*Lease { return nil }
diff --git a/server/lease/lessor_bench_test.go b/server/lease/lessor_bench_test.go
index 06feec810c5..8e6ff791b19 100644
--- a/server/lease/lessor_bench_test.go
+++ b/server/lease/lessor_bench_test.go
@@ -19,8 +19,9 @@ import (
"testing"
"time"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
"go.uber.org/zap"
+
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
)
func BenchmarkLessorGrant1000(b *testing.B) { benchmarkLessorGrant(1000, b) }
@@ -32,14 +33,10 @@ func BenchmarkLessorRevoke100000(b *testing.B) { benchmarkLessorRevoke(100000, b
func BenchmarkLessorRenew1000(b *testing.B) { benchmarkLessorRenew(1000, b) }
func BenchmarkLessorRenew100000(b *testing.B) { benchmarkLessorRenew(100000, b) }
-// Use findExpired10000 replace findExpired1000, which takes too long.
+// BenchmarkLessorFindExpired10000 uses findExpired10000 replace findExpired1000, which takes too long.
func BenchmarkLessorFindExpired10000(b *testing.B) { benchmarkLessorFindExpired(10000, b) }
func BenchmarkLessorFindExpired100000(b *testing.B) { benchmarkLessorFindExpired(100000, b) }
-func init() {
- rand.Seed(time.Now().UTC().UnixNano())
-}
-
const (
// minTTL keep lease will not auto expire in benchmark
minTTL = 1000
@@ -68,7 +65,7 @@ func setUp(t testing.TB) (le *lessor, tearDown func()) {
be, _ := betesting.NewDefaultTmpBackend(t)
// MinLeaseTTL is negative, so we can grant expired lease in benchmark.
// ExpiredLeasesRetryInterval should small, so benchmark of findExpired will recheck expired lease.
- le = newLessor(lg, be, LessorConfig{MinLeaseTTL: -1000, ExpiredLeasesRetryInterval: 10 * time.Microsecond})
+ le = newLessor(lg, be, nil, LessorConfig{MinLeaseTTL: -1000, ExpiredLeasesRetryInterval: 10 * time.Microsecond})
le.SetRangeDeleter(func() TxnDelete {
ftd := &FakeTxnDelete{be.BatchTx()}
ftd.Lock()
@@ -93,7 +90,6 @@ func benchmarkLessorGrant(benchSize int, b *testing.B) {
b.StopTimer()
if tearDown != nil {
tearDown()
- tearDown = nil
}
le, tearDown = setUp(b)
b.StartTimer()
@@ -120,7 +116,6 @@ func benchmarkLessorRevoke(benchSize int, b *testing.B) {
b.StopTimer()
if tearDown != nil {
tearDown()
- tearDown = nil
}
le, tearDown = setUp(b)
for j := 1; j <= benchSize; j++ {
@@ -151,7 +146,6 @@ func benchmarkLessorRenew(benchSize int, b *testing.B) {
b.StopTimer()
if tearDown != nil {
tearDown()
- tearDown = nil
}
le, tearDown = setUp(b)
for j := 1; j <= benchSize; j++ {
@@ -184,7 +178,6 @@ func benchmarkLessorFindExpired(benchSize int, b *testing.B) {
b.StopTimer()
if tearDown != nil {
tearDown()
- tearDown = nil
}
le, tearDown = setUp(b)
for j := 1; j <= benchSize; j++ {
diff --git a/server/lease/lessor_test.go b/server/lease/lessor_test.go
index c6cb0518e84..f46e2b39949 100644
--- a/server/lease/lessor_test.go
+++ b/server/lease/lessor_test.go
@@ -16,8 +16,8 @@ package lease
import (
"context"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -26,9 +26,14 @@ import (
"testing"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
+ "github.com/coreos/go-semver/semver"
"go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
const (
@@ -45,7 +50,7 @@ func TestLessorGrant(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
le.Promote(0)
@@ -91,12 +96,13 @@ func TestLessorGrant(t *testing.T) {
}
}
- be.BatchTx().Lock()
- _, vs := be.BatchTx().UnsafeRange(leaseBucketName, int64ToBytes(int64(l.ID)), nil, 0)
- if len(vs) != 1 {
- t.Errorf("len(vs) = %d, want 1", len(vs))
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ lpb := schema.MustUnsafeGetLease(tx, int64(l.ID))
+ if lpb == nil {
+ t.Errorf("lpb = %d, want not nil", lpb)
}
- be.BatchTx().Unlock()
}
// TestLeaseConcurrentKeys ensures Lease.Keys method calls are guarded
@@ -107,7 +113,7 @@ func TestLeaseConcurrentKeys(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) })
@@ -156,7 +162,7 @@ func TestLessorRevoke(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
var fd *fakeDeleter
le.SetRangeDeleter(func() TxnDelete {
@@ -194,12 +200,36 @@ func TestLessorRevoke(t *testing.T) {
t.Errorf("deleted= %v, want %v", fd.deleted, wdeleted)
}
- be.BatchTx().Lock()
- _, vs := be.BatchTx().UnsafeRange(leaseBucketName, int64ToBytes(int64(l.ID)), nil, 0)
- if len(vs) != 0 {
- t.Errorf("len(vs) = %d, want 0", len(vs))
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ lpb := schema.MustUnsafeGetLease(tx, int64(l.ID))
+ if lpb != nil {
+ t.Errorf("lpb = %d, want nil", lpb)
+ }
+}
+
+func renew(t *testing.T, le *lessor, id LeaseID) int64 {
+ ch := make(chan int64, 1)
+ errch := make(chan error, 1)
+ go func() {
+ ttl, err := le.Renew(id)
+ if err != nil {
+ errch <- err
+ } else {
+ ch <- ttl
+ }
+ }()
+
+ select {
+ case ttl := <-ch:
+ return ttl
+ case err := <-errch:
+ t.Fatalf("failed to renew lease (%v)", err)
+ case <-time.After(10 * time.Second):
+ t.Fatal("timed out while renewing lease")
}
- be.BatchTx().Unlock()
+ panic("unreachable")
}
// TestLessorRenew ensures Lessor can renew an existing lease.
@@ -209,7 +239,7 @@ func TestLessorRenew(t *testing.T) {
defer be.Close()
defer os.RemoveAll(dir)
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
le.Promote(0)
@@ -222,10 +252,7 @@ func TestLessorRenew(t *testing.T) {
le.mu.Lock()
l.ttl = 10
le.mu.Unlock()
- ttl, err := le.Renew(l.ID)
- if err != nil {
- t.Fatalf("failed to renew lease (%v)", err)
- }
+ ttl := renew(t, le, l.ID)
if ttl != l.ttl {
t.Errorf("ttl = %d, want %d", ttl, l.ttl)
}
@@ -242,11 +269,12 @@ func TestLessorRenewWithCheckpointer(t *testing.T) {
defer be.Close()
defer os.RemoveAll(dir)
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
- fakerCheckerpointer := func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
+ fakerCheckerpointer := func(ctx context.Context, cp *pb.LeaseCheckpointRequest) error {
for _, cp := range cp.GetCheckpoints() {
le.Checkpoint(LeaseID(cp.GetID()), cp.GetRemaining_TTL())
}
+ return nil
}
defer le.Stop()
// Set checkpointer
@@ -263,10 +291,7 @@ func TestLessorRenewWithCheckpointer(t *testing.T) {
l.ttl = 10
l.remainingTTL = 10
le.mu.Unlock()
- ttl, err := le.Renew(l.ID)
- if err != nil {
- t.Fatalf("failed to renew lease (%v)", err)
- }
+ ttl := renew(t, le, l.ID)
if ttl != l.ttl {
t.Errorf("ttl = %d, want %d", ttl, l.ttl)
}
@@ -283,17 +308,15 @@ func TestLessorRenewWithCheckpointer(t *testing.T) {
// TestLessorRenewExtendPileup ensures Lessor extends leases on promotion if too many
// expire at the same time.
func TestLessorRenewExtendPileup(t *testing.T) {
- oldRevokeRate := leaseRevokeRate
- defer func() { leaseRevokeRate = oldRevokeRate }()
+ leaseRevokeRate := 10
lg := zap.NewNop()
- leaseRevokeRate = 10
dir, be := NewTestBackend(t)
defer os.RemoveAll(dir)
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL, leaseRevokeRate: leaseRevokeRate})
ttl := int64(10)
- for i := 1; i <= leaseRevokeRate*10; i++ {
+ for i := 1; i <= le.leaseRevokeRate*10; i++ {
if _, err := le.Grant(LeaseID(2*i), ttl); err != nil {
t.Fatal(err)
}
@@ -306,11 +329,11 @@ func TestLessorRenewExtendPileup(t *testing.T) {
// simulate stop and recovery
le.Stop()
be.Close()
- bcfg := backend.DefaultBackendConfig()
+ bcfg := backend.DefaultBackendConfig(lg)
bcfg.Path = filepath.Join(dir, "be")
be = backend.New(bcfg)
defer be.Close()
- le = newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le = newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL, leaseRevokeRate: leaseRevokeRate})
defer le.Stop()
// extend after recovery should extend expiration on lease pile-up
@@ -325,11 +348,11 @@ func TestLessorRenewExtendPileup(t *testing.T) {
for i := ttl; i < ttl+20; i++ {
c := windowCounts[i]
- if c > leaseRevokeRate {
- t.Errorf("expected at most %d expiring at %ds, got %d", leaseRevokeRate, i, c)
+ if c > le.leaseRevokeRate {
+ t.Errorf("expected at most %d expiring at %ds, got %d", le.leaseRevokeRate, i, c)
}
- if c < leaseRevokeRate/2 {
- t.Errorf("expected at least %d expiring at %ds, got %d", leaseRevokeRate/2, i, c)
+ if c < le.leaseRevokeRate/2 {
+ t.Errorf("expected at least %d expiring at %ds, got %d", le.leaseRevokeRate/2, i, c)
}
}
}
@@ -340,7 +363,7 @@ func TestLessorDetach(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) })
@@ -381,7 +404,7 @@ func TestLessorRecover(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
l1, err1 := le.Grant(1, 10)
l2, err2 := le.Grant(2, 20)
@@ -390,7 +413,7 @@ func TestLessorRecover(t *testing.T) {
}
// Create a new lessor with the same backend
- nle := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ nle := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer nle.Stop()
nl1 := nle.Lookup(l1.ID)
if nl1 == nil || nl1.ttl != l1.ttl {
@@ -411,7 +434,7 @@ func TestLessorExpire(t *testing.T) {
testMinTTL := int64(1)
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: testMinTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: testMinTTL})
defer le.Stop()
le.Promote(1 * time.Second)
@@ -432,7 +455,7 @@ func TestLessorExpire(t *testing.T) {
donec := make(chan struct{}, 1)
go func() {
// expired lease cannot be renewed
- if _, err := le.Renew(l.ID); err != ErrLeaseNotFound {
+ if _, err := le.Renew(l.ID); !errors.Is(err, ErrLeaseNotFound) {
t.Errorf("unexpected renew")
}
donec <- struct{}{}
@@ -464,7 +487,7 @@ func TestLessorExpireAndDemote(t *testing.T) {
testMinTTL := int64(1)
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: testMinTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: testMinTTL})
defer le.Stop()
le.Promote(1 * time.Second)
@@ -485,7 +508,7 @@ func TestLessorExpireAndDemote(t *testing.T) {
donec := make(chan struct{}, 1)
go func() {
// expired lease cannot be renewed
- if _, err := le.Renew(l.ID); err != ErrNotPrimary {
+ if _, err := le.Renew(l.ID); !errors.Is(err, ErrNotPrimary) {
t.Errorf("unexpected renew: %v", err)
}
donec <- struct{}{}
@@ -513,11 +536,11 @@ func TestLessorMaxTTL(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
_, err := le.Grant(1, MaxLeaseTTL+1)
- if err != ErrLeaseTTLTooLarge {
+ if !errors.Is(err, ErrLeaseTTLTooLarge) {
t.Fatalf("grant unexpectedly succeeded")
}
}
@@ -529,10 +552,11 @@ func TestLessorCheckpointScheduling(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL, CheckpointInterval: 1 * time.Second})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL, CheckpointInterval: 1 * time.Second})
+ defer le.Stop()
le.minLeaseTTL = 1
checkpointedC := make(chan struct{})
- le.SetCheckpointer(func(ctx context.Context, lc *pb.LeaseCheckpointRequest) {
+ le.SetCheckpointer(func(ctx context.Context, lc *pb.LeaseCheckpointRequest) error {
close(checkpointedC)
if len(lc.Checkpoints) != 1 {
t.Errorf("expected 1 checkpoint but got %d", len(lc.Checkpoints))
@@ -541,14 +565,13 @@ func TestLessorCheckpointScheduling(t *testing.T) {
if c.Remaining_TTL != 1 {
t.Errorf("expected checkpoint to be called with Remaining_TTL=%d but got %d", 1, c.Remaining_TTL)
}
+ return nil
})
- defer le.Stop()
- le.Promote(0)
-
_, err := le.Grant(1, 2)
if err != nil {
t.Fatal(err)
}
+ le.Promote(0)
// TODO: Is there any way to avoid doing this wait? Lease TTL granularity is in seconds.
select {
@@ -564,7 +587,7 @@ func TestLessorCheckpointsRestoredOnPromote(t *testing.T) {
defer os.RemoveAll(dir)
defer be.Close()
- le := newLessor(lg, be, LessorConfig{MinLeaseTTL: minLeaseTTL})
+ le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})
defer le.Stop()
l, err := le.Grant(1, 10)
if err != nil {
@@ -578,6 +601,75 @@ func TestLessorCheckpointsRestoredOnPromote(t *testing.T) {
}
}
+func TestLessorCheckpointPersistenceAfterRestart(t *testing.T) {
+ const ttl int64 = 10
+ const checkpointTTL int64 = 5
+
+ tcs := []struct {
+ name string
+ cluster cluster
+ checkpointPersist bool
+ expectRemainingTTL int64
+ }{
+ {
+ name: "Etcd v3.6 and newer persist remainingTTL on checkpoint",
+ cluster: clusterLatest(),
+ expectRemainingTTL: checkpointTTL,
+ },
+ {
+ name: "Etcd v3.5 and older persist remainingTTL if CheckpointPersist is set",
+ cluster: clusterV3_5(),
+ checkpointPersist: true,
+ expectRemainingTTL: checkpointTTL,
+ },
+ {
+ name: "Etcd with version unknown persists remainingTTL if CheckpointPersist is set",
+ cluster: clusterNil(),
+ checkpointPersist: true,
+ expectRemainingTTL: checkpointTTL,
+ },
+ {
+ name: "Etcd v3.5 and older reset remainingTTL on checkpoint",
+ cluster: clusterV3_5(),
+ expectRemainingTTL: ttl,
+ },
+ {
+ name: "Etcd with version unknown fallbacks to v3.5 behavior",
+ cluster: clusterNil(),
+ expectRemainingTTL: ttl,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zap.NewNop()
+ dir, be := NewTestBackend(t)
+ defer os.RemoveAll(dir)
+ defer be.Close()
+
+ cfg := LessorConfig{MinLeaseTTL: minLeaseTTL}
+ cfg.CheckpointPersist = tc.checkpointPersist
+ le := newLessor(lg, be, tc.cluster, cfg)
+ l, err := le.Grant(2, ttl)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if l.getRemainingTTL() != ttl {
+ t.Errorf("getRemainingTTL() = %d, expected: %d", l.getRemainingTTL(), ttl)
+ }
+ le.Checkpoint(2, checkpointTTL)
+ if l.getRemainingTTL() != checkpointTTL {
+ t.Errorf("getRemainingTTL() = %d, expected: %d", l.getRemainingTTL(), checkpointTTL)
+ }
+ le.Stop()
+ le2 := newLessor(lg, be, clusterLatest(), cfg)
+ l = le2.Lookup(2)
+ if l.getRemainingTTL() != tc.expectRemainingTTL {
+ t.Errorf("getRemainingTTL() = %d, expected: %d", l.getRemainingTTL(), tc.expectRemainingTTL)
+ }
+ })
+ }
+}
+
type fakeDeleter struct {
deleted []string
tx backend.BatchTx
@@ -597,11 +689,29 @@ func (fd *fakeDeleter) DeleteRange(key, end []byte) (int64, int64) {
}
func NewTestBackend(t *testing.T) (string, backend.Backend) {
- tmpPath, err := ioutil.TempDir("", "lease")
- if err != nil {
- t.Fatalf("failed to create tmpdir (%v)", err)
- }
- bcfg := backend.DefaultBackendConfig()
+ lg := zaptest.NewLogger(t)
+ tmpPath := t.TempDir()
+ bcfg := backend.DefaultBackendConfig(lg)
bcfg.Path = filepath.Join(tmpPath, "be")
return tmpPath, backend.New(bcfg)
}
+
+func clusterLatest() cluster {
+ return fakeCluster{semver.New(version.Cluster(version.Version) + ".0")}
+}
+
+func clusterV3_5() cluster {
+ return fakeCluster{semver.New("3.5.0")}
+}
+
+func clusterNil() cluster {
+ return fakeCluster{}
+}
+
+type fakeCluster struct {
+ version *semver.Version
+}
+
+func (c fakeCluster) Version() *semver.Version {
+ return c.version
+}
diff --git a/server/main.go b/server/main.go
index 88bb1a9d12f..0468094fff3 100644
--- a/server/main.go
+++ b/server/main.go
@@ -19,7 +19,6 @@
//
// This package should NOT be extended or modified in any way; to modify the
// etcd binary, work in the `go.etcd.io/etcd/etcdmain` package.
-//
package main
import (
diff --git a/server/main_test.go b/server/main_test.go
deleted file mode 100644
index 1b630fb1146..00000000000
--- a/server/main_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "log"
- "os"
- "os/signal"
- "strings"
- "syscall"
- "testing"
-
- "go.etcd.io/etcd/server/v3/etcdmain"
-)
-
-func SplitTestArgs(args []string) (testArgs, appArgs []string) {
- for i, arg := range os.Args {
- switch {
- case strings.HasPrefix(arg, "-test."):
- testArgs = append(testArgs, arg)
- case i == 0:
- appArgs = append(appArgs, arg)
- testArgs = append(testArgs, arg)
- default:
- appArgs = append(appArgs, arg)
- }
- }
- return
-}
-
-func TestEmpty(t *testing.T) {}
-
-/**
- * The purpose of this "test" is to run etcd server with code-coverage
- * collection turned on. The technique is documented here:
- *
- * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage
- */
-func TestMain(m *testing.M) {
- // don't launch etcd server when invoked via go test
- if strings.HasSuffix(os.Args[0], ".test") {
- log.Printf("skip launching etcd server when invoked via go test")
- return
- }
-
- testArgs, appArgs := SplitTestArgs(os.Args)
-
- notifier := make(chan os.Signal, 1)
- signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
- go etcdmain.Main(appArgs)
- <-notifier
-
- // This will generate coverage files:
- os.Args = testArgs
- m.Run()
-}
diff --git a/server/mock/mockstorage/storage_recorder.go b/server/mock/mockstorage/storage_recorder.go
index db989cd2c45..41d2952e8a1 100644
--- a/server/mock/mockstorage/storage_recorder.go
+++ b/server/mock/mockstorage/storage_recorder.go
@@ -15,9 +15,11 @@
package mockstorage
import (
+ "github.com/coreos/go-semver/semver"
+
"go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
)
type storageRecorder struct {
@@ -57,4 +59,5 @@ func (p *storageRecorder) Sync() error {
return nil
}
-func (p *storageRecorder) Close() error { return nil }
+func (p *storageRecorder) Close() error { return nil }
+func (p *storageRecorder) MinimalEtcdVersion() *semver.Version { return nil }
diff --git a/server/mock/mockstore/store_recorder.go b/server/mock/mockstore/store_recorder.go
index 64aa46e9c2a..9edade1f29d 100644
--- a/server/mock/mockstore/store_recorder.go
+++ b/server/mock/mockstore/store_recorder.go
@@ -40,6 +40,7 @@ func NewRecorder() *StoreRecorder {
sr := &storeRecorder{Recorder: &testutil.RecorderBuffered{}}
return &StoreRecorder{Store: sr, Recorder: sr.Recorder}
}
+
func NewRecorderStream() *StoreRecorder {
sr := &storeRecorder{Recorder: testutil.NewRecorderStream()}
return &StoreRecorder{Store: sr, Recorder: sr.Recorder}
@@ -50,60 +51,69 @@ func (s *storeRecorder) Index() uint64 { return 0 }
func (s *storeRecorder) Get(path string, recursive, sorted bool) (*v2store.Event, error) {
s.Record(testutil.Action{
Name: "Get",
- Params: []interface{}{path, recursive, sorted},
+ Params: []any{path, recursive, sorted},
})
return &v2store.Event{}, nil
}
+
func (s *storeRecorder) Set(path string, dir bool, val string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
s.Record(testutil.Action{
Name: "Set",
- Params: []interface{}{path, dir, val, expireOpts},
+ Params: []any{path, dir, val, expireOpts},
})
return &v2store.Event{}, nil
}
+
func (s *storeRecorder) Update(path, val string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
s.Record(testutil.Action{
Name: "Update",
- Params: []interface{}{path, val, expireOpts},
+ Params: []any{path, val, expireOpts},
})
return &v2store.Event{}, nil
}
+
func (s *storeRecorder) Create(path string, dir bool, val string, uniq bool, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
s.Record(testutil.Action{
Name: "Create",
- Params: []interface{}{path, dir, val, uniq, expireOpts},
+ Params: []any{path, dir, val, uniq, expireOpts},
})
return &v2store.Event{}, nil
}
+
func (s *storeRecorder) CompareAndSwap(path, prevVal string, prevIdx uint64, val string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
s.Record(testutil.Action{
Name: "CompareAndSwap",
- Params: []interface{}{path, prevVal, prevIdx, val, expireOpts},
+ Params: []any{path, prevVal, prevIdx, val, expireOpts},
})
return &v2store.Event{}, nil
}
+
func (s *storeRecorder) Delete(path string, dir, recursive bool) (*v2store.Event, error) {
s.Record(testutil.Action{
Name: "Delete",
- Params: []interface{}{path, dir, recursive},
+ Params: []any{path, dir, recursive},
})
return &v2store.Event{}, nil
}
+
func (s *storeRecorder) CompareAndDelete(path, prevVal string, prevIdx uint64) (*v2store.Event, error) {
s.Record(testutil.Action{
Name: "CompareAndDelete",
- Params: []interface{}{path, prevVal, prevIdx},
+ Params: []any{path, prevVal, prevIdx},
})
return &v2store.Event{}, nil
}
+
func (s *storeRecorder) Watch(_ string, _, _ bool, _ uint64) (v2store.Watcher, error) {
s.Record(testutil.Action{Name: "Watch"})
return v2store.NewNopWatcher(), nil
}
+
func (s *storeRecorder) Save() ([]byte, error) {
s.Record(testutil.Action{Name: "Save"})
return nil, nil
}
+
func (s *storeRecorder) Recovery(b []byte) error {
s.Record(testutil.Action{Name: "Recovery"})
return nil
@@ -119,11 +129,15 @@ func (s *storeRecorder) Clone() v2store.Store {
return s
}
+//revive:disable:var-naming
func (s *storeRecorder) JsonStats() []byte { return nil }
+
+//revive:enable:var-naming
+
func (s *storeRecorder) DeleteExpiredKeys(cutoff time.Time) {
s.Record(testutil.Action{
Name: "DeleteExpiredKeys",
- Params: []interface{}{cutoff},
+ Params: []any{cutoff},
})
}
@@ -151,6 +165,7 @@ func (s *errStoreRecorder) Get(path string, recursive, sorted bool) (*v2store.Ev
s.storeRecorder.Get(path, recursive, sorted)
return nil, s.err
}
+
func (s *errStoreRecorder) Watch(path string, recursive, sorted bool, index uint64) (v2store.Watcher, error) {
s.storeRecorder.Watch(path, recursive, sorted, index)
return nil, s.err
diff --git a/server/mock/mockwait/wait_recorder.go b/server/mock/mockwait/wait_recorder.go
index df16cc3b0e5..608ea73f501 100644
--- a/server/mock/mockwait/wait_recorder.go
+++ b/server/mock/mockwait/wait_recorder.go
@@ -34,11 +34,12 @@ func NewRecorder() *WaitRecorder {
}
func NewNop() wait.Wait { return NewRecorder() }
-func (w *waitRecorder) Register(id uint64) <-chan interface{} {
+func (w *waitRecorder) Register(id uint64) <-chan any {
w.Record(testutil.Action{Name: "Register"})
return nil
}
-func (w *waitRecorder) Trigger(id uint64, x interface{}) {
+
+func (w *waitRecorder) Trigger(id uint64, x any) {
w.Record(testutil.Action{Name: "Trigger"})
}
diff --git a/server/mvcc/backend/backend.go b/server/mvcc/backend/backend.go
deleted file mode 100644
index 055aedaff69..00000000000
--- a/server/mvcc/backend/backend.go
+++ /dev/null
@@ -1,578 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "fmt"
- "hash/crc32"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "time"
-
- humanize "github.com/dustin/go-humanize"
- bolt "go.etcd.io/bbolt"
- "go.uber.org/zap"
-)
-
-var (
- defaultBatchLimit = 10000
- defaultBatchInterval = 100 * time.Millisecond
-
- defragLimit = 10000
-
- // initialMmapSize is the initial size of the mmapped region. Setting this larger than
- // the potential max db size can prevent writer from blocking reader.
- // This only works for linux.
- initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
-
- // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
- minSnapshotWarningTimeout = 30 * time.Second
-)
-
-type Backend interface {
- // ReadTx returns a read transaction. It is replaced by ConcurrentReadTx in the main data path, see #10523.
- ReadTx() ReadTx
- BatchTx() BatchTx
- // ConcurrentReadTx returns a non-blocking read transaction.
- ConcurrentReadTx() ReadTx
-
- Snapshot() Snapshot
- Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
- // Size returns the current size of the backend physically allocated.
- // The backend can hold DB space that is not utilized at the moment,
- // since it can conduct pre-allocation or spare unused space for recycling.
- // Use SizeInUse() instead for the actual DB size.
- Size() int64
- // SizeInUse returns the current size of the backend logically in use.
- // Since the backend can manage free space in a non-byte unit such as
- // number of pages, the returned value can be not exactly accurate in bytes.
- SizeInUse() int64
- // OpenReadTxN returns the number of currently open read transactions in the backend.
- OpenReadTxN() int64
- Defrag() error
- ForceCommit()
- Close() error
-}
-
-type Snapshot interface {
- // Size gets the size of the snapshot.
- Size() int64
- // WriteTo writes the snapshot into the given writer.
- WriteTo(w io.Writer) (n int64, err error)
- // Close closes the snapshot.
- Close() error
-}
-
-type backend struct {
- // size and commits are used with atomic operations so they must be
- // 64-bit aligned, otherwise 32-bit tests will crash
-
- // size is the number of bytes allocated in the backend
- size int64
- // sizeInUse is the number of bytes actually used in the backend
- sizeInUse int64
- // commits counts number of commits since start
- commits int64
- // openReadTxN is the number of currently open read transactions in the backend
- openReadTxN int64
- // mlock prevents backend database file to be swapped
- mlock bool
-
- mu sync.RWMutex
- db *bolt.DB
-
- batchInterval time.Duration
- batchLimit int
- batchTx *batchTxBuffered
-
- readTx *readTx
-
- stopc chan struct{}
- donec chan struct{}
-
- hooks Hooks
-
- lg *zap.Logger
-}
-
-type BackendConfig struct {
- // Path is the file path to the backend file.
- Path string
- // BatchInterval is the maximum time before flushing the BatchTx.
- BatchInterval time.Duration
- // BatchLimit is the maximum puts before flushing the BatchTx.
- BatchLimit int
- // BackendFreelistType is the backend boltdb's freelist type.
- BackendFreelistType bolt.FreelistType
- // MmapSize is the number of bytes to mmap for the backend.
- MmapSize uint64
- // Logger logs backend-side operations.
- Logger *zap.Logger
- // UnsafeNoFsync disables all uses of fsync.
- UnsafeNoFsync bool `json:"unsafe-no-fsync"`
- // Mlock prevents backend database file to be swapped
- Mlock bool
-
- // Hooks are getting executed during lifecycle of Backend's transactions.
- Hooks Hooks
-}
-
-func DefaultBackendConfig() BackendConfig {
- return BackendConfig{
- BatchInterval: defaultBatchInterval,
- BatchLimit: defaultBatchLimit,
- MmapSize: initialMmapSize,
- }
-}
-
-func New(bcfg BackendConfig) Backend {
- return newBackend(bcfg)
-}
-
-func NewDefaultBackend(path string) Backend {
- bcfg := DefaultBackendConfig()
- bcfg.Path = path
- return newBackend(bcfg)
-}
-
-func newBackend(bcfg BackendConfig) *backend {
- if bcfg.Logger == nil {
- bcfg.Logger = zap.NewNop()
- }
-
- bopts := &bolt.Options{}
- if boltOpenOptions != nil {
- *bopts = *boltOpenOptions
- }
- bopts.InitialMmapSize = bcfg.mmapSize()
- bopts.FreelistType = bcfg.BackendFreelistType
- bopts.NoSync = bcfg.UnsafeNoFsync
- bopts.NoGrowSync = bcfg.UnsafeNoFsync
- bopts.Mlock = bcfg.Mlock
-
- db, err := bolt.Open(bcfg.Path, 0600, bopts)
- if err != nil {
- bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err))
- }
-
- // In future, may want to make buffering optional for low-concurrency systems
- // or dynamically swap between buffered/non-buffered depending on workload.
- b := &backend{
- db: db,
-
- batchInterval: bcfg.BatchInterval,
- batchLimit: bcfg.BatchLimit,
- mlock: bcfg.Mlock,
-
- readTx: &readTx{
- baseReadTx: baseReadTx{
- buf: txReadBuffer{
- txBuffer: txBuffer{make(map[string]*bucketBuffer)},
- },
- buckets: make(map[string]*bolt.Bucket),
- txWg: new(sync.WaitGroup),
- txMu: new(sync.RWMutex),
- },
- },
-
- stopc: make(chan struct{}),
- donec: make(chan struct{}),
-
- lg: bcfg.Logger,
- }
- b.batchTx = newBatchTxBuffered(b)
- // We set it after newBatchTxBuffered to skip the 'empty' commit.
- b.hooks = bcfg.Hooks
-
- go b.run()
- return b
-}
-
-// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
-// write operations. The write result can be retrieved within the same tx immediately.
-// The write result is isolated with other txs until the current one get committed.
-func (b *backend) BatchTx() BatchTx {
- return b.batchTx
-}
-
-func (b *backend) ReadTx() ReadTx { return b.readTx }
-
-// ConcurrentReadTx creates and returns a new ReadTx, which:
-// A) creates and keeps a copy of backend.readTx.txReadBuffer,
-// B) references the boltdb read Tx (and its bucket cache) of current batch interval.
-func (b *backend) ConcurrentReadTx() ReadTx {
- b.readTx.RLock()
- defer b.readTx.RUnlock()
- // prevent boltdb read Tx from been rolled back until store read Tx is done. Needs to be called when holding readTx.RLock().
- b.readTx.txWg.Add(1)
- // TODO: might want to copy the read buffer lazily - create copy when A) end of a write transaction B) end of a batch interval.
- return &concurrentReadTx{
- baseReadTx: baseReadTx{
- buf: b.readTx.buf.unsafeCopy(),
- txMu: b.readTx.txMu,
- tx: b.readTx.tx,
- buckets: b.readTx.buckets,
- txWg: b.readTx.txWg,
- },
- }
-}
-
-// ForceCommit forces the current batching tx to commit.
-func (b *backend) ForceCommit() {
- b.batchTx.Commit()
-}
-
-func (b *backend) Snapshot() Snapshot {
- b.batchTx.Commit()
-
- b.mu.RLock()
- defer b.mu.RUnlock()
- tx, err := b.db.Begin(false)
- if err != nil {
- b.lg.Fatal("failed to begin tx", zap.Error(err))
- }
-
- stopc, donec := make(chan struct{}), make(chan struct{})
- dbBytes := tx.Size()
- go func() {
- defer close(donec)
- // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
- // assuming a min tcp throughput of 100MB/s.
- var sendRateBytes int64 = 100 * 1024 * 1024
- warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
- if warningTimeout < minSnapshotWarningTimeout {
- warningTimeout = minSnapshotWarningTimeout
- }
- start := time.Now()
- ticker := time.NewTicker(warningTimeout)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- b.lg.Warn(
- "snapshotting taking too long to transfer",
- zap.Duration("taking", time.Since(start)),
- zap.Int64("bytes", dbBytes),
- zap.String("size", humanize.Bytes(uint64(dbBytes))),
- )
-
- case <-stopc:
- snapshotTransferSec.Observe(time.Since(start).Seconds())
- return
- }
- }
- }()
-
- return &snapshot{tx, stopc, donec}
-}
-
-type IgnoreKey struct {
- Bucket string
- Key string
-}
-
-func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
- h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
-
- b.mu.RLock()
- defer b.mu.RUnlock()
- err := b.db.View(func(tx *bolt.Tx) error {
- c := tx.Cursor()
- for next, _ := c.First(); next != nil; next, _ = c.Next() {
- b := tx.Bucket(next)
- if b == nil {
- return fmt.Errorf("cannot get hash of bucket %s", string(next))
- }
- h.Write(next)
- b.ForEach(func(k, v []byte) error {
- bk := IgnoreKey{Bucket: string(next), Key: string(k)}
- if _, ok := ignores[bk]; !ok {
- h.Write(k)
- h.Write(v)
- }
- return nil
- })
- }
- return nil
- })
-
- if err != nil {
- return 0, err
- }
-
- return h.Sum32(), nil
-}
-
-func (b *backend) Size() int64 {
- return atomic.LoadInt64(&b.size)
-}
-
-func (b *backend) SizeInUse() int64 {
- return atomic.LoadInt64(&b.sizeInUse)
-}
-
-func (b *backend) run() {
- defer close(b.donec)
- t := time.NewTimer(b.batchInterval)
- defer t.Stop()
- for {
- select {
- case <-t.C:
- case <-b.stopc:
- b.batchTx.CommitAndStop()
- return
- }
- if b.batchTx.safePending() != 0 {
- b.batchTx.Commit()
- }
- t.Reset(b.batchInterval)
- }
-}
-
-func (b *backend) Close() error {
- close(b.stopc)
- <-b.donec
- return b.db.Close()
-}
-
-// Commits returns total number of commits since start
-func (b *backend) Commits() int64 {
- return atomic.LoadInt64(&b.commits)
-}
-
-func (b *backend) Defrag() error {
- return b.defrag()
-}
-
-func (b *backend) defrag() error {
- now := time.Now()
-
- // TODO: make this non-blocking?
- // lock batchTx to ensure nobody is using previous tx, and then
- // close previous ongoing tx.
- b.batchTx.Lock()
- defer b.batchTx.Unlock()
-
- // lock database after lock tx to avoid deadlock.
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // block concurrent read requests while resetting tx
- b.readTx.Lock()
- defer b.readTx.Unlock()
-
- b.batchTx.unsafeCommit(true)
-
- b.batchTx.tx = nil
-
- // Create a temporary file to ensure we start with a clean slate.
- // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
- dir := filepath.Dir(b.db.Path())
- temp, err := ioutil.TempFile(dir, "db.tmp.*")
- if err != nil {
- return err
- }
- options := bolt.Options{}
- if boltOpenOptions != nil {
- options = *boltOpenOptions
- }
- options.OpenFile = func(_ string, _ int, _ os.FileMode) (file *os.File, err error) {
- return temp, nil
- }
- // Don't load tmp db into memory regardless of opening options
- options.Mlock = false
- tdbp := temp.Name()
- tmpdb, err := bolt.Open(tdbp, 0600, &options)
- if err != nil {
- return err
- }
-
- dbp := b.db.Path()
- size1, sizeInUse1 := b.Size(), b.SizeInUse()
- if b.lg != nil {
- b.lg.Info(
- "defragmenting",
- zap.String("path", dbp),
- zap.Int64("current-db-size-bytes", size1),
- zap.String("current-db-size", humanize.Bytes(uint64(size1))),
- zap.Int64("current-db-size-in-use-bytes", sizeInUse1),
- zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
- )
- }
- // gofail: var defragBeforeCopy struct{}
- err = defragdb(b.db, tmpdb, defragLimit)
- if err != nil {
- tmpdb.Close()
- if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
- b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr))
- }
- return err
- }
-
- err = b.db.Close()
- if err != nil {
- b.lg.Fatal("failed to close database", zap.Error(err))
- }
- err = tmpdb.Close()
- if err != nil {
- b.lg.Fatal("failed to close tmp database", zap.Error(err))
- }
- // gofail: var defragBeforeRename struct{}
- err = os.Rename(tdbp, dbp)
- if err != nil {
- b.lg.Fatal("failed to rename tmp database", zap.Error(err))
- }
-
- defragmentedBoltOptions := bolt.Options{}
- if boltOpenOptions != nil {
- defragmentedBoltOptions = *boltOpenOptions
- }
- defragmentedBoltOptions.Mlock = b.mlock
-
- b.db, err = bolt.Open(dbp, 0600, &defragmentedBoltOptions)
- if err != nil {
- b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err))
- }
- b.batchTx.tx = b.unsafeBegin(true)
-
- b.readTx.reset()
- b.readTx.tx = b.unsafeBegin(false)
-
- size := b.readTx.tx.Size()
- db := b.readTx.tx.DB()
- atomic.StoreInt64(&b.size, size)
- atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
-
- took := time.Since(now)
- defragSec.Observe(took.Seconds())
-
- size2, sizeInUse2 := b.Size(), b.SizeInUse()
- if b.lg != nil {
- b.lg.Info(
- "finished defragmenting directory",
- zap.String("path", dbp),
- zap.Int64("current-db-size-bytes-diff", size2-size1),
- zap.Int64("current-db-size-bytes", size2),
- zap.String("current-db-size", humanize.Bytes(uint64(size2))),
- zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1),
- zap.Int64("current-db-size-in-use-bytes", sizeInUse2),
- zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))),
- zap.Duration("took", took),
- )
- }
- return nil
-}
-
-func defragdb(odb, tmpdb *bolt.DB, limit int) error {
- // open a tx on tmpdb for writes
- tmptx, err := tmpdb.Begin(true)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- tmptx.Rollback()
- }
- }()
-
- // open a tx on old db for read
- tx, err := odb.Begin(false)
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- c := tx.Cursor()
-
- count := 0
- for next, _ := c.First(); next != nil; next, _ = c.Next() {
- b := tx.Bucket(next)
- if b == nil {
- return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
- }
-
- tmpb, berr := tmptx.CreateBucketIfNotExists(next)
- if berr != nil {
- return berr
- }
- tmpb.FillPercent = 0.9 // for seq write in for each
-
- if err = b.ForEach(func(k, v []byte) error {
- count++
- if count > limit {
- err = tmptx.Commit()
- if err != nil {
- return err
- }
- tmptx, err = tmpdb.Begin(true)
- if err != nil {
- return err
- }
- tmpb = tmptx.Bucket(next)
- tmpb.FillPercent = 0.9 // for seq write in for each
-
- count = 0
- }
- return tmpb.Put(k, v)
- }); err != nil {
- return err
- }
- }
-
- return tmptx.Commit()
-}
-
-func (b *backend) begin(write bool) *bolt.Tx {
- b.mu.RLock()
- tx := b.unsafeBegin(write)
- b.mu.RUnlock()
-
- size := tx.Size()
- db := tx.DB()
- stats := db.Stats()
- atomic.StoreInt64(&b.size, size)
- atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize)))
- atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN))
-
- return tx
-}
-
-func (b *backend) unsafeBegin(write bool) *bolt.Tx {
- tx, err := b.db.Begin(write)
- if err != nil {
- b.lg.Fatal("failed to begin tx", zap.Error(err))
- }
- return tx
-}
-
-func (b *backend) OpenReadTxN() int64 {
- return atomic.LoadInt64(&b.openReadTxN)
-}
-
-type snapshot struct {
- *bolt.Tx
- stopc chan struct{}
- donec chan struct{}
-}
-
-func (s *snapshot) Close() error {
- close(s.stopc)
- <-s.donec
- return s.Tx.Rollback()
-}
diff --git a/server/mvcc/backend/backend_test.go b/server/mvcc/backend/backend_test.go
deleted file mode 100644
index bb7a34e6d3d..00000000000
--- a/server/mvcc/backend/backend_test.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend_test
-
-import (
- "fmt"
- "io/ioutil"
- "reflect"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- bolt "go.etcd.io/bbolt"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
-)
-
-func TestBackendClose(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
-
- // check close could work
- done := make(chan struct{})
- go func() {
- err := b.Close()
- if err != nil {
- t.Errorf("close error = %v, want nil", err)
- }
- done <- struct{}{}
- }()
- select {
- case <-done:
- case <-time.After(10 * time.Second):
- t.Errorf("failed to close database in 10s")
- }
-}
-
-func TestBackendSnapshot(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("test"))
- tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
- tx.Unlock()
- b.ForceCommit()
-
- // write snapshot to a new file
- f, err := ioutil.TempFile(t.TempDir(), "etcd_backend_test")
- if err != nil {
- t.Fatal(err)
- }
- snap := b.Snapshot()
- defer func() { assert.NoError(t, snap.Close()) }()
- if _, err := snap.WriteTo(f); err != nil {
- t.Fatal(err)
- }
- assert.NoError(t, f.Close())
-
- // bootstrap new backend from the snapshot
- bcfg := backend.DefaultBackendConfig()
- bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = f.Name(), time.Hour, 10000
- nb := backend.New(bcfg)
- defer betesting.Close(t, nb)
-
- newTx := nb.BatchTx()
- newTx.Lock()
- ks, _ := newTx.UnsafeRange([]byte("test"), []byte("foo"), []byte("goo"), 0)
- if len(ks) != 1 {
- t.Errorf("len(kvs) = %d, want 1", len(ks))
- }
- newTx.Unlock()
-}
-
-func TestBackendBatchIntervalCommit(t *testing.T) {
- // start backend with super short batch interval so
- // we do not need to wait long before commit to happen.
- b, _ := betesting.NewTmpBackend(t, time.Nanosecond, 10000)
- defer betesting.Close(t, b)
-
- pc := backend.CommitsForTest(b)
-
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("test"))
- tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
- tx.Unlock()
-
- for i := 0; i < 10; i++ {
- if backend.CommitsForTest(b) >= pc+1 {
- break
- }
- time.Sleep(time.Duration(i*100) * time.Millisecond)
- }
-
- // check whether put happens via db view
- assert.NoError(t, backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
- bucket := tx.Bucket([]byte("test"))
- if bucket == nil {
- t.Errorf("bucket test does not exit")
- return nil
- }
- v := bucket.Get([]byte("foo"))
- if v == nil {
- t.Errorf("foo key failed to written in backend")
- }
- return nil
- }))
-}
-
-func TestBackendDefrag(t *testing.T) {
- b, _ := betesting.NewDefaultTmpBackend(t)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("test"))
- for i := 0; i < backend.DefragLimitForTest()+100; i++ {
- tx.UnsafePut([]byte("test"), []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
- }
- tx.Unlock()
- b.ForceCommit()
-
- // remove some keys to ensure the disk space will be reclaimed after defrag
- tx = b.BatchTx()
- tx.Lock()
- for i := 0; i < 50; i++ {
- tx.UnsafeDelete([]byte("test"), []byte(fmt.Sprintf("foo_%d", i)))
- }
- tx.Unlock()
- b.ForceCommit()
-
- size := b.Size()
-
- // shrink and check hash
- oh, err := b.Hash(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- err = b.Defrag()
- if err != nil {
- t.Fatal(err)
- }
-
- nh, err := b.Hash(nil)
- if err != nil {
- t.Fatal(err)
- }
- if oh != nh {
- t.Errorf("hash = %v, want %v", nh, oh)
- }
-
- nsize := b.Size()
- if nsize >= size {
- t.Errorf("new size = %v, want < %d", nsize, size)
- }
-
- // try put more keys after shrink.
- tx = b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("test"))
- tx.UnsafePut([]byte("test"), []byte("more"), []byte("bar"))
- tx.Unlock()
- b.ForceCommit()
-}
-
-// TestBackendWriteback ensures writes are stored to the read txn on write txn unlock.
-func TestBackendWriteback(t *testing.T) {
- b, _ := betesting.NewDefaultTmpBackend(t)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("key"))
- tx.UnsafePut([]byte("key"), []byte("abc"), []byte("bar"))
- tx.UnsafePut([]byte("key"), []byte("def"), []byte("baz"))
- tx.UnsafePut([]byte("key"), []byte("overwrite"), []byte("1"))
- tx.Unlock()
-
- // overwrites should be propagated too
- tx.Lock()
- tx.UnsafePut([]byte("key"), []byte("overwrite"), []byte("2"))
- tx.Unlock()
-
- keys := []struct {
- key []byte
- end []byte
- limit int64
-
- wkey [][]byte
- wval [][]byte
- }{
- {
- key: []byte("abc"),
- end: nil,
-
- wkey: [][]byte{[]byte("abc")},
- wval: [][]byte{[]byte("bar")},
- },
- {
- key: []byte("abc"),
- end: []byte("def"),
-
- wkey: [][]byte{[]byte("abc")},
- wval: [][]byte{[]byte("bar")},
- },
- {
- key: []byte("abc"),
- end: []byte("deg"),
-
- wkey: [][]byte{[]byte("abc"), []byte("def")},
- wval: [][]byte{[]byte("bar"), []byte("baz")},
- },
- {
- key: []byte("abc"),
- end: []byte("\xff"),
- limit: 1,
-
- wkey: [][]byte{[]byte("abc")},
- wval: [][]byte{[]byte("bar")},
- },
- {
- key: []byte("abc"),
- end: []byte("\xff"),
-
- wkey: [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")},
- wval: [][]byte{[]byte("bar"), []byte("baz"), []byte("2")},
- },
- }
- rtx := b.ReadTx()
- for i, tt := range keys {
- rtx.RLock()
- k, v := rtx.UnsafeRange([]byte("key"), tt.key, tt.end, tt.limit)
- rtx.RUnlock()
- if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) {
- t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v)
- }
- }
-}
-
-// TestConcurrentReadTx ensures that current read transaction can see all prior writes stored in read buffer
-func TestConcurrentReadTx(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
- defer betesting.Close(t, b)
-
- wtx1 := b.BatchTx()
- wtx1.Lock()
- wtx1.UnsafeCreateBucket([]byte("key"))
- wtx1.UnsafePut([]byte("key"), []byte("abc"), []byte("ABC"))
- wtx1.UnsafePut([]byte("key"), []byte("overwrite"), []byte("1"))
- wtx1.Unlock()
-
- wtx2 := b.BatchTx()
- wtx2.Lock()
- wtx2.UnsafePut([]byte("key"), []byte("def"), []byte("DEF"))
- wtx2.UnsafePut([]byte("key"), []byte("overwrite"), []byte("2"))
- wtx2.Unlock()
-
- rtx := b.ConcurrentReadTx()
- rtx.RLock() // no-op
- k, v := rtx.UnsafeRange([]byte("key"), []byte("abc"), []byte("\xff"), 0)
- rtx.RUnlock()
- wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")}
- wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")}
- if !reflect.DeepEqual(wKey, k) || !reflect.DeepEqual(wVal, v) {
- t.Errorf("want k=%+v, v=%+v; got k=%+v, v=%+v", wKey, wVal, k, v)
- }
-}
-
-// TestBackendWritebackForEach checks that partially written / buffered
-// data is visited in the same order as fully committed data.
-func TestBackendWritebackForEach(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("key"))
- for i := 0; i < 5; i++ {
- k := []byte(fmt.Sprintf("%04d", i))
- tx.UnsafePut([]byte("key"), k, []byte("bar"))
- }
- tx.Unlock()
-
- // writeback
- b.ForceCommit()
-
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("key"))
- for i := 5; i < 20; i++ {
- k := []byte(fmt.Sprintf("%04d", i))
- tx.UnsafePut([]byte("key"), k, []byte("bar"))
- }
- tx.Unlock()
-
- seq := ""
- getSeq := func(k, v []byte) error {
- seq += string(k)
- return nil
- }
- rtx := b.ReadTx()
- rtx.RLock()
- assert.NoError(t, rtx.UnsafeForEach([]byte("key"), getSeq))
- rtx.RUnlock()
-
- partialSeq := seq
-
- seq = ""
- b.ForceCommit()
-
- tx.Lock()
- assert.NoError(t, tx.UnsafeForEach([]byte("key"), getSeq))
- tx.Unlock()
-
- if seq != partialSeq {
- t.Fatalf("expected %q, got %q", seq, partialSeq)
- }
-}
diff --git a/server/mvcc/backend/batch_tx.go b/server/mvcc/backend/batch_tx.go
deleted file mode 100644
index 74107b445e5..00000000000
--- a/server/mvcc/backend/batch_tx.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- bolt "go.etcd.io/bbolt"
- "go.uber.org/zap"
-)
-
-type BatchTx interface {
- ReadTx
- UnsafeCreateBucket(name []byte)
- UnsafeDeleteBucket(name []byte)
- UnsafePut(bucketName []byte, key []byte, value []byte)
- UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
- UnsafeDelete(bucketName []byte, key []byte)
- // Commit commits a previous tx and begins a new writable one.
- Commit()
- // CommitAndStop commits the previous tx and does not create a new one.
- CommitAndStop()
-}
-
-type batchTx struct {
- sync.Mutex
- tx *bolt.Tx
- backend *backend
-
- pending int
-}
-
-func (t *batchTx) Lock() {
- t.Mutex.Lock()
-}
-
-func (t *batchTx) Unlock() {
- if t.pending >= t.backend.batchLimit {
- t.commit(false)
- }
- t.Mutex.Unlock()
-}
-
-// BatchTx interface embeds ReadTx interface. But RLock() and RUnlock() do not
-// have appropriate semantics in BatchTx interface. Therefore should not be called.
-// TODO: might want to decouple ReadTx and BatchTx
-
-func (t *batchTx) RLock() {
- panic("unexpected RLock")
-}
-
-func (t *batchTx) RUnlock() {
- panic("unexpected RUnlock")
-}
-
-func (t *batchTx) UnsafeCreateBucket(name []byte) {
- _, err := t.tx.CreateBucket(name)
- if err != nil && err != bolt.ErrBucketExists {
- t.backend.lg.Fatal(
- "failed to create a bucket",
- zap.String("bucket-name", string(name)),
- zap.Error(err),
- )
- }
- t.pending++
-}
-
-func (t *batchTx) UnsafeDeleteBucket(name []byte) {
- err := t.tx.DeleteBucket(name)
- if err != nil && err != bolt.ErrBucketNotFound {
- t.backend.lg.Fatal(
- "failed to delete a bucket",
- zap.String("bucket-name", string(name)),
- zap.Error(err),
- )
- }
- t.pending++
-}
-
-// UnsafePut must be called holding the lock on the tx.
-func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
- t.unsafePut(bucketName, key, value, false)
-}
-
-// UnsafeSeqPut must be called holding the lock on the tx.
-func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
- t.unsafePut(bucketName, key, value, true)
-}
-
-func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- t.backend.lg.Fatal(
- "failed to find a bucket",
- zap.String("bucket-name", string(bucketName)),
- zap.Stack("stack"),
- )
- }
- if seq {
- // it is useful to increase fill percent when the workloads are mostly append-only.
- // this can delay the page split and reduce space usage.
- bucket.FillPercent = 0.9
- }
- if err := bucket.Put(key, value); err != nil {
- t.backend.lg.Fatal(
- "failed to write to a bucket",
- zap.String("bucket-name", string(bucketName)),
- zap.Error(err),
- )
- }
- t.pending++
-}
-
-// UnsafeRange must be called holding the lock on the tx.
-func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- t.backend.lg.Fatal(
- "failed to find a bucket",
- zap.String("bucket-name", string(bucketName)),
- zap.Stack("stack"),
- )
- }
- return unsafeRange(bucket.Cursor(), key, endKey, limit)
-}
-
-func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
- if limit <= 0 {
- limit = math.MaxInt64
- }
- var isMatch func(b []byte) bool
- if len(endKey) > 0 {
- isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
- } else {
- isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
- limit = 1
- }
-
- for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
- vs = append(vs, cv)
- keys = append(keys, ck)
- if limit == int64(len(keys)) {
- break
- }
- }
- return keys, vs
-}
-
-// UnsafeDelete must be called holding the lock on the tx.
-func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- t.backend.lg.Fatal(
- "failed to find a bucket",
- zap.String("bucket-name", string(bucketName)),
- zap.Stack("stack"),
- )
- }
- err := bucket.Delete(key)
- if err != nil {
- t.backend.lg.Fatal(
- "failed to delete a key",
- zap.String("bucket-name", string(bucketName)),
- zap.Error(err),
- )
- }
- t.pending++
-}
-
-// UnsafeForEach must be called holding the lock on the tx.
-func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- return unsafeForEach(t.tx, bucketName, visitor)
-}
-
-func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
- if b := tx.Bucket(bucket); b != nil {
- return b.ForEach(visitor)
- }
- return nil
-}
-
-// Commit commits a previous tx and begins a new writable one.
-func (t *batchTx) Commit() {
- t.Lock()
- t.commit(false)
- t.Unlock()
-}
-
-// CommitAndStop commits the previous tx and does not create a new one.
-func (t *batchTx) CommitAndStop() {
- t.Lock()
- t.commit(true)
- t.Unlock()
-}
-
-func (t *batchTx) safePending() int {
- t.Mutex.Lock()
- defer t.Mutex.Unlock()
- return t.pending
-}
-
-func (t *batchTx) commit(stop bool) {
- // commit the last tx
- if t.tx != nil {
- if t.pending == 0 && !stop {
- return
- }
-
- start := time.Now()
-
- // gofail: var beforeCommit struct{}
- err := t.tx.Commit()
- // gofail: var afterCommit struct{}
-
- rebalanceSec.Observe(t.tx.Stats().RebalanceTime.Seconds())
- spillSec.Observe(t.tx.Stats().SpillTime.Seconds())
- writeSec.Observe(t.tx.Stats().WriteTime.Seconds())
- commitSec.Observe(time.Since(start).Seconds())
- atomic.AddInt64(&t.backend.commits, 1)
-
- t.pending = 0
- if err != nil {
- t.backend.lg.Fatal("failed to commit tx", zap.Error(err))
- }
- }
- if !stop {
- t.tx = t.backend.begin(true)
- }
-}
-
-type batchTxBuffered struct {
- batchTx
- buf txWriteBuffer
-}
-
-func newBatchTxBuffered(backend *backend) *batchTxBuffered {
- tx := &batchTxBuffered{
- batchTx: batchTx{backend: backend},
- buf: txWriteBuffer{
- txBuffer: txBuffer{make(map[string]*bucketBuffer)},
- seq: true,
- },
- }
- tx.Commit()
- return tx
-}
-
-func (t *batchTxBuffered) Unlock() {
- if t.pending != 0 {
- t.backend.readTx.Lock() // blocks txReadBuffer for writing.
- t.buf.writeback(&t.backend.readTx.buf)
- t.backend.readTx.Unlock()
- if t.pending >= t.backend.batchLimit {
- t.commit(false)
- }
- }
- t.batchTx.Unlock()
-}
-
-func (t *batchTxBuffered) Commit() {
- t.Lock()
- t.commit(false)
- t.Unlock()
-}
-
-func (t *batchTxBuffered) CommitAndStop() {
- t.Lock()
- t.commit(true)
- t.Unlock()
-}
-
-func (t *batchTxBuffered) commit(stop bool) {
- if t.backend.hooks != nil {
- t.backend.hooks.OnPreCommitUnsafe(t)
- }
-
- // all read txs must be closed to acquire boltdb commit rwlock
- t.backend.readTx.Lock()
- t.unsafeCommit(stop)
- t.backend.readTx.Unlock()
-}
-
-func (t *batchTxBuffered) unsafeCommit(stop bool) {
- if t.backend.readTx.tx != nil {
- // wait all store read transactions using the current boltdb tx to finish,
- // then close the boltdb tx
- go func(tx *bolt.Tx, wg *sync.WaitGroup) {
- wg.Wait()
- if err := tx.Rollback(); err != nil {
- t.backend.lg.Fatal("failed to rollback tx", zap.Error(err))
- }
- }(t.backend.readTx.tx, t.backend.readTx.txWg)
- t.backend.readTx.reset()
- }
-
- t.batchTx.commit(stop)
-
- if !stop {
- t.backend.readTx.tx = t.backend.begin(false)
- }
-}
-
-func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
- t.batchTx.UnsafePut(bucketName, key, value)
- t.buf.put(bucketName, key, value)
-}
-
-func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
- t.batchTx.UnsafeSeqPut(bucketName, key, value)
- t.buf.putSeq(bucketName, key, value)
-}
diff --git a/server/mvcc/backend/batch_tx_test.go b/server/mvcc/backend/batch_tx_test.go
deleted file mode 100644
index 95375253d1a..00000000000
--- a/server/mvcc/backend/batch_tx_test.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend_test
-
-import (
- "reflect"
- "testing"
- "time"
-
- bolt "go.etcd.io/bbolt"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
-)
-
-func TestBatchTxPut(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
-
- tx.Lock()
-
- // create bucket
- tx.UnsafeCreateBucket([]byte("test"))
-
- // put
- v := []byte("bar")
- tx.UnsafePut([]byte("test"), []byte("foo"), v)
-
- tx.Unlock()
-
- // check put result before and after tx is committed
- for k := 0; k < 2; k++ {
- tx.Lock()
- _, gv := tx.UnsafeRange([]byte("test"), []byte("foo"), nil, 0)
- tx.Unlock()
- if !reflect.DeepEqual(gv[0], v) {
- t.Errorf("v = %s, want %s", string(gv[0]), string(v))
- }
- tx.Commit()
- }
-}
-
-func TestBatchTxRange(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- tx.UnsafeCreateBucket([]byte("test"))
- // put keys
- allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
- allVals := [][]byte{[]byte("bar"), []byte("bar1"), []byte("bar2")}
- for i := range allKeys {
- tx.UnsafePut([]byte("test"), allKeys[i], allVals[i])
- }
-
- tests := []struct {
- key []byte
- endKey []byte
- limit int64
-
- wkeys [][]byte
- wvals [][]byte
- }{
- // single key
- {
- []byte("foo"), nil, 0,
- allKeys[:1], allVals[:1],
- },
- // single key, bad
- {
- []byte("doo"), nil, 0,
- nil, nil,
- },
- // key range
- {
- []byte("foo"), []byte("foo1"), 0,
- allKeys[:1], allVals[:1],
- },
- // key range, get all keys
- {
- []byte("foo"), []byte("foo3"), 0,
- allKeys, allVals,
- },
- // key range, bad
- {
- []byte("goo"), []byte("goo3"), 0,
- nil, nil,
- },
- // key range with effective limit
- {
- []byte("foo"), []byte("foo3"), 1,
- allKeys[:1], allVals[:1],
- },
- // key range with limit
- {
- []byte("foo"), []byte("foo3"), 4,
- allKeys, allVals,
- },
- }
- for i, tt := range tests {
- keys, vals := tx.UnsafeRange([]byte("test"), tt.key, tt.endKey, tt.limit)
- if !reflect.DeepEqual(keys, tt.wkeys) {
- t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys)
- }
- if !reflect.DeepEqual(vals, tt.wvals) {
- t.Errorf("#%d: vals = %+v, want %+v", i, vals, tt.wvals)
- }
- }
-}
-
-func TestBatchTxDelete(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
-
- tx.UnsafeCreateBucket([]byte("test"))
- tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
-
- tx.UnsafeDelete([]byte("test"), []byte("foo"))
-
- tx.Unlock()
-
- // check put result before and after tx is committed
- for k := 0; k < 2; k++ {
- tx.Lock()
- ks, _ := tx.UnsafeRange([]byte("test"), []byte("foo"), nil, 0)
- tx.Unlock()
- if len(ks) != 0 {
- t.Errorf("keys on foo = %v, want nil", ks)
- }
- tx.Commit()
- }
-}
-
-func TestBatchTxCommit(t *testing.T) {
- b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("test"))
- tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
- tx.Unlock()
-
- tx.Commit()
-
- // check whether put happens via db view
- backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
- bucket := tx.Bucket([]byte("test"))
- if bucket == nil {
- t.Errorf("bucket test does not exit")
- return nil
- }
- v := bucket.Get([]byte("foo"))
- if v == nil {
- t.Errorf("foo key failed to written in backend")
- }
- return nil
- })
-}
-
-func TestBatchTxBatchLimitCommit(t *testing.T) {
- // start backend with batch limit 1 so one write can
- // trigger a commit
- b, _ := betesting.NewTmpBackend(t, time.Hour, 1)
- defer betesting.Close(t, b)
-
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket([]byte("test"))
- tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
- tx.Unlock()
-
- // batch limit commit should have been triggered
- // check whether put happens via db view
- backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
- bucket := tx.Bucket([]byte("test"))
- if bucket == nil {
- t.Errorf("bucket test does not exit")
- return nil
- }
- v := bucket.Get([]byte("foo"))
- if v == nil {
- t.Errorf("foo key failed to written in backend")
- }
- return nil
- })
-}
diff --git a/server/mvcc/backend/export_test.go b/server/mvcc/backend/export_test.go
deleted file mode 100644
index 6cf96c35d77..00000000000
--- a/server/mvcc/backend/export_test.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package backend
-
-import bolt "go.etcd.io/bbolt"
-
-func DbFromBackendForTest(b Backend) *bolt.DB {
- return b.(*backend).db
-}
-
-func DefragLimitForTest() int {
- return defragLimit
-}
-
-func CommitsForTest(b Backend) int64 {
- return b.(*backend).Commits()
-}
diff --git a/server/mvcc/backend/hooks.go b/server/mvcc/backend/hooks.go
deleted file mode 100644
index 9750828ef7b..00000000000
--- a/server/mvcc/backend/hooks.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-type HookFunc func(tx BatchTx)
-
-// Hooks allow to add additional logic executed during transaction lifetime.
-type Hooks interface {
- // OnPreCommitUnsafe is executed before Commit of transactions.
- // The given transaction is already locked.
- OnPreCommitUnsafe(tx BatchTx)
-}
-
-type hooks struct {
- onPreCommitUnsafe HookFunc
-}
-
-func (h hooks) OnPreCommitUnsafe(tx BatchTx) {
- h.onPreCommitUnsafe(tx)
-}
-
-func NewHooks(onPreCommitUnsafe HookFunc) Hooks {
- return hooks{onPreCommitUnsafe: onPreCommitUnsafe}
-}
diff --git a/server/mvcc/backend/metrics.go b/server/mvcc/backend/metrics.go
deleted file mode 100644
index d9641af7ae2..00000000000
--- a/server/mvcc/backend/metrics.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- commitSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_commit_duration_seconds",
- Help: "The latency distributions of commit called by backend.",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- rebalanceSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "disk",
- Name: "backend_commit_rebalance_duration_seconds",
- Help: "The latency distributions of commit.rebalance called by bboltdb backend.",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- spillSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "disk",
- Name: "backend_commit_spill_duration_seconds",
- Help: "The latency distributions of commit.spill called by bboltdb backend.",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- writeSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "disk",
- Name: "backend_commit_write_duration_seconds",
- Help: "The latency distributions of commit.write called by bboltdb backend.",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- defragSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_defrag_duration_seconds",
- Help: "The latency distribution of backend defragmentation.",
-
- // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
- // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
- // highest bucket start of 0.1 sec * 2^12 == 409.6 sec
- Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
- })
-
- snapshotTransferSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_snapshot_duration_seconds",
- Help: "The latency distribution of backend snapshots.",
-
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^16 == 655.36 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
- })
-)
-
-func init() {
- prometheus.MustRegister(commitSec)
- prometheus.MustRegister(rebalanceSec)
- prometheus.MustRegister(spillSec)
- prometheus.MustRegister(writeSec)
- prometheus.MustRegister(defragSec)
- prometheus.MustRegister(snapshotTransferSec)
-}
diff --git a/server/mvcc/backend/read_tx.go b/server/mvcc/backend/read_tx.go
deleted file mode 100644
index 3658786277b..00000000000
--- a/server/mvcc/backend/read_tx.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "math"
- "sync"
-
- bolt "go.etcd.io/bbolt"
-)
-
-// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
-// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
-// is known to never overwrite any key so range is safe.
-var safeRangeBucket = []byte("key")
-
-type ReadTx interface {
- Lock()
- Unlock()
- RLock()
- RUnlock()
-
- UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
- UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error
-}
-
-// Base type for readTx and concurrentReadTx to eliminate duplicate functions between these
-type baseReadTx struct {
- // mu protects accesses to the txReadBuffer
- mu sync.RWMutex
- buf txReadBuffer
-
- // TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle.
- // txMu protects accesses to buckets and tx on Range requests.
- txMu *sync.RWMutex
- tx *bolt.Tx
- buckets map[string]*bolt.Bucket
- // txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done.
- txWg *sync.WaitGroup
-}
-
-func (baseReadTx *baseReadTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- dups := make(map[string]struct{})
- getDups := func(k, v []byte) error {
- dups[string(k)] = struct{}{}
- return nil
- }
- visitNoDup := func(k, v []byte) error {
- if _, ok := dups[string(k)]; ok {
- return nil
- }
- return visitor(k, v)
- }
- if err := baseReadTx.buf.ForEach(bucketName, getDups); err != nil {
- return err
- }
- baseReadTx.txMu.Lock()
- err := unsafeForEach(baseReadTx.tx, bucketName, visitNoDup)
- baseReadTx.txMu.Unlock()
- if err != nil {
- return err
- }
- return baseReadTx.buf.ForEach(bucketName, visitor)
-}
-
-func (baseReadTx *baseReadTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- if endKey == nil {
- // forbid duplicates for single keys
- limit = 1
- }
- if limit <= 0 {
- limit = math.MaxInt64
- }
- if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
- panic("do not use unsafeRange on non-keys bucket")
- }
- keys, vals := baseReadTx.buf.Range(bucketName, key, endKey, limit)
- if int64(len(keys)) == limit {
- return keys, vals
- }
-
- // find/cache bucket
- bn := string(bucketName)
- baseReadTx.txMu.RLock()
- bucket, ok := baseReadTx.buckets[bn]
- baseReadTx.txMu.RUnlock()
- lockHeld := false
- if !ok {
- baseReadTx.txMu.Lock()
- lockHeld = true
- bucket = baseReadTx.tx.Bucket(bucketName)
- baseReadTx.buckets[bn] = bucket
- }
-
- // ignore missing bucket since may have been created in this batch
- if bucket == nil {
- if lockHeld {
- baseReadTx.txMu.Unlock()
- }
- return keys, vals
- }
- if !lockHeld {
- baseReadTx.txMu.Lock()
- }
- c := bucket.Cursor()
- baseReadTx.txMu.Unlock()
-
- k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
- return append(k2, keys...), append(v2, vals...)
-}
-
-type readTx struct {
- baseReadTx
-}
-
-func (rt *readTx) Lock() { rt.mu.Lock() }
-func (rt *readTx) Unlock() { rt.mu.Unlock() }
-func (rt *readTx) RLock() { rt.mu.RLock() }
-func (rt *readTx) RUnlock() { rt.mu.RUnlock() }
-
-func (rt *readTx) reset() {
- rt.buf.reset()
- rt.buckets = make(map[string]*bolt.Bucket)
- rt.tx = nil
- rt.txWg = new(sync.WaitGroup)
-}
-
-type concurrentReadTx struct {
- baseReadTx
-}
-
-func (rt *concurrentReadTx) Lock() {}
-func (rt *concurrentReadTx) Unlock() {}
-
-// RLock is no-op. concurrentReadTx does not need to be locked after it is created.
-func (rt *concurrentReadTx) RLock() {}
-
-// RUnlock signals the end of concurrentReadTx.
-func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() }
diff --git a/server/mvcc/backend/tx_buffer.go b/server/mvcc/backend/tx_buffer.go
deleted file mode 100644
index 4df6d0c5951..00000000000
--- a/server/mvcc/backend/tx_buffer.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "sort"
-)
-
-// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
-type txBuffer struct {
- buckets map[string]*bucketBuffer
-}
-
-func (txb *txBuffer) reset() {
- for k, v := range txb.buckets {
- if v.used == 0 {
- // demote
- delete(txb.buckets, k)
- }
- v.used = 0
- }
-}
-
-// txWriteBuffer buffers writes of pending updates that have not yet committed.
-type txWriteBuffer struct {
- txBuffer
- seq bool
-}
-
-func (txw *txWriteBuffer) put(bucket, k, v []byte) {
- txw.seq = false
- txw.putSeq(bucket, k, v)
-}
-
-func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) {
- b, ok := txw.buckets[string(bucket)]
- if !ok {
- b = newBucketBuffer()
- txw.buckets[string(bucket)] = b
- }
- b.add(k, v)
-}
-
-func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
- for k, wb := range txw.buckets {
- rb, ok := txr.buckets[k]
- if !ok {
- delete(txw.buckets, k)
- txr.buckets[k] = wb
- continue
- }
- if !txw.seq && wb.used > 1 {
- // assume no duplicate keys
- sort.Sort(wb)
- }
- rb.merge(wb)
- }
- txw.reset()
-}
-
-// txReadBuffer accesses buffered updates.
-type txReadBuffer struct{ txBuffer }
-
-func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- if b := txr.buckets[string(bucketName)]; b != nil {
- return b.Range(key, endKey, limit)
- }
- return nil, nil
-}
-
-func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- if b := txr.buckets[string(bucketName)]; b != nil {
- return b.ForEach(visitor)
- }
- return nil
-}
-
-// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock()
-func (txr *txReadBuffer) unsafeCopy() txReadBuffer {
- txrCopy := txReadBuffer{
- txBuffer: txBuffer{
- buckets: make(map[string]*bucketBuffer, len(txr.txBuffer.buckets)),
- },
- }
- for bucketName, bucket := range txr.txBuffer.buckets {
- txrCopy.txBuffer.buckets[bucketName] = bucket.Copy()
- }
- return txrCopy
-}
-
-type kv struct {
- key []byte
- val []byte
-}
-
-// bucketBuffer buffers key-value pairs that are pending commit.
-type bucketBuffer struct {
- buf []kv
- // used tracks number of elements in use so buf can be reused without reallocation.
- used int
-}
-
-func newBucketBuffer() *bucketBuffer {
- return &bucketBuffer{buf: make([]kv, 512), used: 0}
-}
-
-func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
- f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
- idx := sort.Search(bb.used, f)
- if idx < 0 {
- return nil, nil
- }
- if len(endKey) == 0 {
- if bytes.Equal(key, bb.buf[idx].key) {
- keys = append(keys, bb.buf[idx].key)
- vals = append(vals, bb.buf[idx].val)
- }
- return keys, vals
- }
- if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
- return nil, nil
- }
- for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
- if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
- break
- }
- keys = append(keys, bb.buf[i].key)
- vals = append(vals, bb.buf[i].val)
- }
- return keys, vals
-}
-
-func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
- for i := 0; i < bb.used; i++ {
- if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (bb *bucketBuffer) add(k, v []byte) {
- bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
- bb.used++
- if bb.used == len(bb.buf) {
- buf := make([]kv, (3*len(bb.buf))/2)
- copy(buf, bb.buf)
- bb.buf = buf
- }
-}
-
-// merge merges data from bbsrc into bb.
-func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
- for i := 0; i < bbsrc.used; i++ {
- bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
- }
- if bb.used == bbsrc.used {
- return
- }
- if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
- return
- }
-
- sort.Stable(bb)
-
- // remove duplicates, using only newest update
- widx := 0
- for ridx := 1; ridx < bb.used; ridx++ {
- if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
- widx++
- }
- bb.buf[widx] = bb.buf[ridx]
- }
- bb.used = widx + 1
-}
-
-func (bb *bucketBuffer) Len() int { return bb.used }
-func (bb *bucketBuffer) Less(i, j int) bool {
- return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
-}
-func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
-
-func (bb *bucketBuffer) Copy() *bucketBuffer {
- bbCopy := bucketBuffer{
- buf: make([]kv, len(bb.buf)),
- used: bb.used,
- }
- copy(bbCopy.buf, bb.buf)
- return &bbCopy
-}
diff --git a/server/mvcc/index.go b/server/mvcc/index.go
deleted file mode 100644
index 57ba1bab46f..00000000000
--- a/server/mvcc/index.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sort"
- "sync"
-
- "github.com/google/btree"
- "go.uber.org/zap"
-)
-
-type index interface {
- Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
- Range(key, end []byte, atRev int64) ([][]byte, []revision)
- Revisions(key, end []byte, atRev int64, limit int) []revision
- CountRevisions(key, end []byte, atRev int64, limit int) int
- Put(key []byte, rev revision)
- Tombstone(key []byte, rev revision) error
- RangeSince(key, end []byte, rev int64) []revision
- Compact(rev int64) map[revision]struct{}
- Keep(rev int64) map[revision]struct{}
- Equal(b index) bool
-
- Insert(ki *keyIndex)
- KeyIndex(ki *keyIndex) *keyIndex
-}
-
-type treeIndex struct {
- sync.RWMutex
- tree *btree.BTree
- lg *zap.Logger
-}
-
-func newTreeIndex(lg *zap.Logger) index {
- return &treeIndex{
- tree: btree.New(32),
- lg: lg,
- }
-}
-
-func (ti *treeIndex) Put(key []byte, rev revision) {
- keyi := &keyIndex{key: key}
-
- ti.Lock()
- defer ti.Unlock()
- item := ti.tree.Get(keyi)
- if item == nil {
- keyi.put(ti.lg, rev.main, rev.sub)
- ti.tree.ReplaceOrInsert(keyi)
- return
- }
- okeyi := item.(*keyIndex)
- okeyi.put(ti.lg, rev.main, rev.sub)
-}
-
-func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
- keyi := &keyIndex{key: key}
- ti.RLock()
- defer ti.RUnlock()
- if keyi = ti.keyIndex(keyi); keyi == nil {
- return revision{}, revision{}, 0, ErrRevisionNotFound
- }
- return keyi.get(ti.lg, atRev)
-}
-
-func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
- ti.RLock()
- defer ti.RUnlock()
- return ti.keyIndex(keyi)
-}
-
-func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
- if item := ti.tree.Get(keyi); item != nil {
- return item.(*keyIndex)
- }
- return nil
-}
-
-func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex) bool) {
- keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
-
- ti.RLock()
- defer ti.RUnlock()
-
- ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
- if len(endi.key) > 0 && !item.Less(endi) {
- return false
- }
- if !f(item.(*keyIndex)) {
- return false
- }
- return true
- })
-}
-
-func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []revision) {
- if end == nil {
- rev, _, _, err := ti.Get(key, atRev)
- if err != nil {
- return nil
- }
- return []revision{rev}
- }
- ti.visit(key, end, func(ki *keyIndex) bool {
- if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
- revs = append(revs, rev)
- if len(revs) == limit {
- return false
- }
- }
- return true
- })
- return revs
-}
-
-func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64, limit int) int {
- if end == nil {
- _, _, _, err := ti.Get(key, atRev)
- if err != nil {
- return 0
- }
- return 1
- }
- total := 0
- ti.visit(key, end, func(ki *keyIndex) bool {
- if _, _, _, err := ki.get(ti.lg, atRev); err == nil {
- total++
- if total == limit {
- return false
- }
- }
- return true
- })
- return total
-}
-
-func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
- if end == nil {
- rev, _, _, err := ti.Get(key, atRev)
- if err != nil {
- return nil, nil
- }
- return [][]byte{key}, []revision{rev}
- }
- ti.visit(key, end, func(ki *keyIndex) bool {
- if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
- revs = append(revs, rev)
- keys = append(keys, ki.key)
- }
- return true
- })
- return keys, revs
-}
-
-func (ti *treeIndex) Tombstone(key []byte, rev revision) error {
- keyi := &keyIndex{key: key}
-
- ti.Lock()
- defer ti.Unlock()
- item := ti.tree.Get(keyi)
- if item == nil {
- return ErrRevisionNotFound
- }
-
- ki := item.(*keyIndex)
- return ki.tombstone(ti.lg, rev.main, rev.sub)
-}
-
-// RangeSince returns all revisions from key(including) to end(excluding)
-// at or after the given rev. The returned slice is sorted in the order
-// of revision.
-func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
- keyi := &keyIndex{key: key}
-
- ti.RLock()
- defer ti.RUnlock()
-
- if end == nil {
- item := ti.tree.Get(keyi)
- if item == nil {
- return nil
- }
- keyi = item.(*keyIndex)
- return keyi.since(ti.lg, rev)
- }
-
- endi := &keyIndex{key: end}
- var revs []revision
- ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
- if len(endi.key) > 0 && !item.Less(endi) {
- return false
- }
- curKeyi := item.(*keyIndex)
- revs = append(revs, curKeyi.since(ti.lg, rev)...)
- return true
- })
- sort.Sort(revisions(revs))
-
- return revs
-}
-
-func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
- available := make(map[revision]struct{})
- ti.lg.Info("compact tree index", zap.Int64("revision", rev))
- ti.Lock()
- clone := ti.tree.Clone()
- ti.Unlock()
-
- clone.Ascend(func(item btree.Item) bool {
- keyi := item.(*keyIndex)
- //Lock is needed here to prevent modification to the keyIndex while
- //compaction is going on or revision added to empty before deletion
- ti.Lock()
- keyi.compact(ti.lg, rev, available)
- if keyi.isEmpty() {
- item := ti.tree.Delete(keyi)
- if item == nil {
- ti.lg.Panic("failed to delete during compaction")
- }
- }
- ti.Unlock()
- return true
- })
- return available
-}
-
-// Keep finds all revisions to be kept for a Compaction at the given rev.
-func (ti *treeIndex) Keep(rev int64) map[revision]struct{} {
- available := make(map[revision]struct{})
- ti.RLock()
- defer ti.RUnlock()
- ti.tree.Ascend(func(i btree.Item) bool {
- keyi := i.(*keyIndex)
- keyi.keep(rev, available)
- return true
- })
- return available
-}
-
-func (ti *treeIndex) Equal(bi index) bool {
- b := bi.(*treeIndex)
-
- if ti.tree.Len() != b.tree.Len() {
- return false
- }
-
- equal := true
-
- ti.tree.Ascend(func(item btree.Item) bool {
- aki := item.(*keyIndex)
- bki := b.tree.Get(item).(*keyIndex)
- if !aki.equal(bki) {
- equal = false
- return false
- }
- return true
- })
-
- return equal
-}
-
-func (ti *treeIndex) Insert(ki *keyIndex) {
- ti.Lock()
- defer ti.Unlock()
- ti.tree.ReplaceOrInsert(ki)
-}
diff --git a/server/mvcc/index_bench_test.go b/server/mvcc/index_bench_test.go
deleted file mode 100644
index 5d2e5e3f49b..00000000000
--- a/server/mvcc/index_bench_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "testing"
-
- "go.uber.org/zap"
-)
-
-func BenchmarkIndexCompact1(b *testing.B) { benchmarkIndexCompact(b, 1) }
-func BenchmarkIndexCompact100(b *testing.B) { benchmarkIndexCompact(b, 100) }
-func BenchmarkIndexCompact10000(b *testing.B) { benchmarkIndexCompact(b, 10000) }
-func BenchmarkIndexCompact100000(b *testing.B) { benchmarkIndexCompact(b, 100000) }
-func BenchmarkIndexCompact1000000(b *testing.B) { benchmarkIndexCompact(b, 1000000) }
-
-func benchmarkIndexCompact(b *testing.B, size int) {
- log := zap.NewNop()
- kvindex := newTreeIndex(log)
-
- bytesN := 64
- keys := createBytesSlice(bytesN, size)
- for i := 1; i < size; i++ {
- kvindex.Put(keys[i], revision{main: int64(i), sub: int64(i)})
- }
- b.ResetTimer()
- for i := 1; i < b.N; i++ {
- kvindex.Compact(int64(i))
- }
-}
diff --git a/server/mvcc/index_test.go b/server/mvcc/index_test.go
deleted file mode 100644
index 87c31dd905f..00000000000
--- a/server/mvcc/index_test.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "reflect"
- "testing"
-
- "github.com/google/btree"
- "go.uber.org/zap"
-)
-
-func TestIndexGet(t *testing.T) {
- ti := newTreeIndex(zap.NewExample())
- ti.Put([]byte("foo"), revision{main: 2})
- ti.Put([]byte("foo"), revision{main: 4})
- ti.Tombstone([]byte("foo"), revision{main: 6})
-
- tests := []struct {
- rev int64
-
- wrev revision
- wcreated revision
- wver int64
- werr error
- }{
- {0, revision{}, revision{}, 0, ErrRevisionNotFound},
- {1, revision{}, revision{}, 0, ErrRevisionNotFound},
- {2, revision{main: 2}, revision{main: 2}, 1, nil},
- {3, revision{main: 2}, revision{main: 2}, 1, nil},
- {4, revision{main: 4}, revision{main: 2}, 2, nil},
- {5, revision{main: 4}, revision{main: 2}, 2, nil},
- {6, revision{}, revision{}, 0, ErrRevisionNotFound},
- }
- for i, tt := range tests {
- rev, created, ver, err := ti.Get([]byte("foo"), tt.rev)
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- if rev != tt.wrev {
- t.Errorf("#%d: rev = %+v, want %+v", i, rev, tt.wrev)
- }
- if created != tt.wcreated {
- t.Errorf("#%d: created = %+v, want %+v", i, created, tt.wcreated)
- }
- if ver != tt.wver {
- t.Errorf("#%d: ver = %d, want %d", i, ver, tt.wver)
- }
- }
-}
-
-func TestIndexRange(t *testing.T) {
- allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
- allRevs := []revision{{main: 1}, {main: 2}, {main: 3}}
-
- ti := newTreeIndex(zap.NewExample())
- for i := range allKeys {
- ti.Put(allKeys[i], allRevs[i])
- }
-
- atRev := int64(3)
- tests := []struct {
- key, end []byte
- wkeys [][]byte
- wrevs []revision
- }{
- // single key that not found
- {
- []byte("bar"), nil, nil, nil,
- },
- // single key that found
- {
- []byte("foo"), nil, allKeys[:1], allRevs[:1],
- },
- // range keys, return first member
- {
- []byte("foo"), []byte("foo1"), allKeys[:1], allRevs[:1],
- },
- // range keys, return first two members
- {
- []byte("foo"), []byte("foo2"), allKeys[:2], allRevs[:2],
- },
- // range keys, return all members
- {
- []byte("foo"), []byte("fop"), allKeys, allRevs,
- },
- // range keys, return last two members
- {
- []byte("foo1"), []byte("fop"), allKeys[1:], allRevs[1:],
- },
- // range keys, return last member
- {
- []byte("foo2"), []byte("fop"), allKeys[2:], allRevs[2:],
- },
- // range keys, return nothing
- {
- []byte("foo3"), []byte("fop"), nil, nil,
- },
- }
- for i, tt := range tests {
- keys, revs := ti.Range(tt.key, tt.end, atRev)
- if !reflect.DeepEqual(keys, tt.wkeys) {
- t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys)
- }
- if !reflect.DeepEqual(revs, tt.wrevs) {
- t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs)
- }
- }
-}
-
-func TestIndexTombstone(t *testing.T) {
- ti := newTreeIndex(zap.NewExample())
- ti.Put([]byte("foo"), revision{main: 1})
-
- err := ti.Tombstone([]byte("foo"), revision{main: 2})
- if err != nil {
- t.Errorf("tombstone error = %v, want nil", err)
- }
-
- _, _, _, err = ti.Get([]byte("foo"), 2)
- if err != ErrRevisionNotFound {
- t.Errorf("get error = %v, want ErrRevisionNotFound", err)
- }
- err = ti.Tombstone([]byte("foo"), revision{main: 3})
- if err != ErrRevisionNotFound {
- t.Errorf("tombstone error = %v, want %v", err, ErrRevisionNotFound)
- }
-}
-
-func TestIndexRangeSince(t *testing.T) {
- allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2"), []byte("foo2"), []byte("foo1"), []byte("foo")}
- allRevs := []revision{{main: 1}, {main: 2}, {main: 3}, {main: 4}, {main: 5}, {main: 6}}
-
- ti := newTreeIndex(zap.NewExample())
- for i := range allKeys {
- ti.Put(allKeys[i], allRevs[i])
- }
-
- atRev := int64(1)
- tests := []struct {
- key, end []byte
- wrevs []revision
- }{
- // single key that not found
- {
- []byte("bar"), nil, nil,
- },
- // single key that found
- {
- []byte("foo"), nil, []revision{{main: 1}, {main: 6}},
- },
- // range keys, return first member
- {
- []byte("foo"), []byte("foo1"), []revision{{main: 1}, {main: 6}},
- },
- // range keys, return first two members
- {
- []byte("foo"), []byte("foo2"), []revision{{main: 1}, {main: 2}, {main: 5}, {main: 6}},
- },
- // range keys, return all members
- {
- []byte("foo"), []byte("fop"), allRevs,
- },
- // range keys, return last two members
- {
- []byte("foo1"), []byte("fop"), []revision{{main: 2}, {main: 3}, {main: 4}, {main: 5}},
- },
- // range keys, return last member
- {
- []byte("foo2"), []byte("fop"), []revision{{main: 3}, {main: 4}},
- },
- // range keys, return nothing
- {
- []byte("foo3"), []byte("fop"), nil,
- },
- }
- for i, tt := range tests {
- revs := ti.RangeSince(tt.key, tt.end, atRev)
- if !reflect.DeepEqual(revs, tt.wrevs) {
- t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs)
- }
- }
-}
-
-func TestIndexCompactAndKeep(t *testing.T) {
- maxRev := int64(20)
- tests := []struct {
- key []byte
- remove bool
- rev revision
- created revision
- ver int64
- }{
- {[]byte("foo"), false, revision{main: 1}, revision{main: 1}, 1},
- {[]byte("foo1"), false, revision{main: 2}, revision{main: 2}, 1},
- {[]byte("foo2"), false, revision{main: 3}, revision{main: 3}, 1},
- {[]byte("foo2"), false, revision{main: 4}, revision{main: 3}, 2},
- {[]byte("foo"), false, revision{main: 5}, revision{main: 1}, 2},
- {[]byte("foo1"), false, revision{main: 6}, revision{main: 2}, 2},
- {[]byte("foo1"), true, revision{main: 7}, revision{}, 0},
- {[]byte("foo2"), true, revision{main: 8}, revision{}, 0},
- {[]byte("foo"), true, revision{main: 9}, revision{}, 0},
- {[]byte("foo"), false, revision{10, 0}, revision{10, 0}, 1},
- {[]byte("foo1"), false, revision{10, 1}, revision{10, 1}, 1},
- }
-
- // Continuous Compact and Keep
- ti := newTreeIndex(zap.NewExample())
- for _, tt := range tests {
- if tt.remove {
- ti.Tombstone(tt.key, tt.rev)
- } else {
- ti.Put(tt.key, tt.rev)
- }
- }
- for i := int64(1); i < maxRev; i++ {
- am := ti.Compact(i)
- keep := ti.Keep(i)
- if !(reflect.DeepEqual(am, keep)) {
- t.Errorf("#%d: compact keep %v != Keep keep %v", i, am, keep)
- }
- wti := &treeIndex{tree: btree.New(32)}
- for _, tt := range tests {
- if _, ok := am[tt.rev]; ok || tt.rev.GreaterThan(revision{main: i}) {
- if tt.remove {
- wti.Tombstone(tt.key, tt.rev)
- } else {
- restore(wti, tt.key, tt.created, tt.rev, tt.ver)
- }
- }
- }
- if !ti.Equal(wti) {
- t.Errorf("#%d: not equal ti", i)
- }
- }
-
- // Once Compact and Keep
- for i := int64(1); i < maxRev; i++ {
- ti := newTreeIndex(zap.NewExample())
- for _, tt := range tests {
- if tt.remove {
- ti.Tombstone(tt.key, tt.rev)
- } else {
- ti.Put(tt.key, tt.rev)
- }
- }
- am := ti.Compact(i)
- keep := ti.Keep(i)
- if !(reflect.DeepEqual(am, keep)) {
- t.Errorf("#%d: compact keep %v != Keep keep %v", i, am, keep)
- }
- wti := &treeIndex{tree: btree.New(32)}
- for _, tt := range tests {
- if _, ok := am[tt.rev]; ok || tt.rev.GreaterThan(revision{main: i}) {
- if tt.remove {
- wti.Tombstone(tt.key, tt.rev)
- } else {
- restore(wti, tt.key, tt.created, tt.rev, tt.ver)
- }
- }
- }
- if !ti.Equal(wti) {
- t.Errorf("#%d: not equal ti", i)
- }
- }
-}
-
-func restore(ti *treeIndex, key []byte, created, modified revision, ver int64) {
- keyi := &keyIndex{key: key}
-
- ti.Lock()
- defer ti.Unlock()
- item := ti.tree.Get(keyi)
- if item == nil {
- keyi.restore(ti.lg, created, modified, ver)
- ti.tree.ReplaceOrInsert(keyi)
- return
- }
- okeyi := item.(*keyIndex)
- okeyi.put(ti.lg, modified.main, modified.sub)
-}
diff --git a/server/mvcc/key_index_test.go b/server/mvcc/key_index_test.go
deleted file mode 100644
index 9e7da6ad90b..00000000000
--- a/server/mvcc/key_index_test.go
+++ /dev/null
@@ -1,700 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "reflect"
- "testing"
-
- "go.uber.org/zap"
-)
-
-func TestKeyIndexGet(t *testing.T) {
- // key: "foo"
- // rev: 16
- // generations:
- // {empty}
- // {{14, 0}[1], {14, 1}[2], {16, 0}(t)[3]}
- // {{8, 0}[1], {10, 0}[2], {12, 0}(t)[3]}
- // {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]}
- ki := newTestKeyIndex()
- ki.compact(zap.NewExample(), 4, make(map[revision]struct{}))
-
- tests := []struct {
- rev int64
-
- wmod revision
- wcreat revision
- wver int64
- werr error
- }{
- {17, revision{}, revision{}, 0, ErrRevisionNotFound},
- {16, revision{}, revision{}, 0, ErrRevisionNotFound},
-
- // get on generation 3
- {15, revision{14, 1}, revision{14, 0}, 2, nil},
- {14, revision{14, 1}, revision{14, 0}, 2, nil},
-
- {13, revision{}, revision{}, 0, ErrRevisionNotFound},
- {12, revision{}, revision{}, 0, ErrRevisionNotFound},
-
- // get on generation 2
- {11, revision{10, 0}, revision{8, 0}, 2, nil},
- {10, revision{10, 0}, revision{8, 0}, 2, nil},
- {9, revision{8, 0}, revision{8, 0}, 1, nil},
- {8, revision{8, 0}, revision{8, 0}, 1, nil},
-
- {7, revision{}, revision{}, 0, ErrRevisionNotFound},
- {6, revision{}, revision{}, 0, ErrRevisionNotFound},
-
- // get on generation 1
- {5, revision{4, 0}, revision{2, 0}, 2, nil},
- {4, revision{4, 0}, revision{2, 0}, 2, nil},
-
- {3, revision{}, revision{}, 0, ErrRevisionNotFound},
- {2, revision{}, revision{}, 0, ErrRevisionNotFound},
- {1, revision{}, revision{}, 0, ErrRevisionNotFound},
- {0, revision{}, revision{}, 0, ErrRevisionNotFound},
- }
-
- for i, tt := range tests {
- mod, creat, ver, err := ki.get(zap.NewExample(), tt.rev)
- if err != tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
- }
- if mod != tt.wmod {
- t.Errorf("#%d: modified = %+v, want %+v", i, mod, tt.wmod)
- }
- if creat != tt.wcreat {
- t.Errorf("#%d: created = %+v, want %+v", i, creat, tt.wcreat)
- }
- if ver != tt.wver {
- t.Errorf("#%d: version = %d, want %d", i, ver, tt.wver)
- }
- }
-}
-
-func TestKeyIndexSince(t *testing.T) {
- ki := newTestKeyIndex()
- ki.compact(zap.NewExample(), 4, make(map[revision]struct{}))
-
- allRevs := []revision{{4, 0}, {6, 0}, {8, 0}, {10, 0}, {12, 0}, {14, 1}, {16, 0}}
- tests := []struct {
- rev int64
-
- wrevs []revision
- }{
- {17, nil},
- {16, allRevs[6:]},
- {15, allRevs[6:]},
- {14, allRevs[5:]},
- {13, allRevs[5:]},
- {12, allRevs[4:]},
- {11, allRevs[4:]},
- {10, allRevs[3:]},
- {9, allRevs[3:]},
- {8, allRevs[2:]},
- {7, allRevs[2:]},
- {6, allRevs[1:]},
- {5, allRevs[1:]},
- {4, allRevs},
- {3, allRevs},
- {2, allRevs},
- {1, allRevs},
- {0, allRevs},
- }
-
- for i, tt := range tests {
- revs := ki.since(zap.NewExample(), tt.rev)
- if !reflect.DeepEqual(revs, tt.wrevs) {
- t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs)
- }
- }
-}
-
-func TestKeyIndexPut(t *testing.T) {
- ki := &keyIndex{key: []byte("foo")}
- ki.put(zap.NewExample(), 5, 0)
-
- wki := &keyIndex{
- key: []byte("foo"),
- modified: revision{5, 0},
- generations: []generation{{created: revision{5, 0}, ver: 1, revs: []revision{{main: 5}}}},
- }
- if !reflect.DeepEqual(ki, wki) {
- t.Errorf("ki = %+v, want %+v", ki, wki)
- }
-
- ki.put(zap.NewExample(), 7, 0)
-
- wki = &keyIndex{
- key: []byte("foo"),
- modified: revision{7, 0},
- generations: []generation{{created: revision{5, 0}, ver: 2, revs: []revision{{main: 5}, {main: 7}}}},
- }
- if !reflect.DeepEqual(ki, wki) {
- t.Errorf("ki = %+v, want %+v", ki, wki)
- }
-}
-
-func TestKeyIndexRestore(t *testing.T) {
- ki := &keyIndex{key: []byte("foo")}
- ki.restore(zap.NewExample(), revision{5, 0}, revision{7, 0}, 2)
-
- wki := &keyIndex{
- key: []byte("foo"),
- modified: revision{7, 0},
- generations: []generation{{created: revision{5, 0}, ver: 2, revs: []revision{{main: 7}}}},
- }
- if !reflect.DeepEqual(ki, wki) {
- t.Errorf("ki = %+v, want %+v", ki, wki)
- }
-}
-
-func TestKeyIndexTombstone(t *testing.T) {
- ki := &keyIndex{key: []byte("foo")}
- ki.put(zap.NewExample(), 5, 0)
-
- err := ki.tombstone(zap.NewExample(), 7, 0)
- if err != nil {
- t.Errorf("unexpected tombstone error: %v", err)
- }
-
- wki := &keyIndex{
- key: []byte("foo"),
- modified: revision{7, 0},
- generations: []generation{{created: revision{5, 0}, ver: 2, revs: []revision{{main: 5}, {main: 7}}}, {}},
- }
- if !reflect.DeepEqual(ki, wki) {
- t.Errorf("ki = %+v, want %+v", ki, wki)
- }
-
- ki.put(zap.NewExample(), 8, 0)
- ki.put(zap.NewExample(), 9, 0)
- err = ki.tombstone(zap.NewExample(), 15, 0)
- if err != nil {
- t.Errorf("unexpected tombstone error: %v", err)
- }
-
- wki = &keyIndex{
- key: []byte("foo"),
- modified: revision{15, 0},
- generations: []generation{
- {created: revision{5, 0}, ver: 2, revs: []revision{{main: 5}, {main: 7}}},
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 9}, {main: 15}}},
- {},
- },
- }
- if !reflect.DeepEqual(ki, wki) {
- t.Errorf("ki = %+v, want %+v", ki, wki)
- }
-
- err = ki.tombstone(zap.NewExample(), 16, 0)
- if err != ErrRevisionNotFound {
- t.Errorf("tombstone error = %v, want %v", err, ErrRevisionNotFound)
- }
-}
-
-func TestKeyIndexCompactAndKeep(t *testing.T) {
- tests := []struct {
- compact int64
-
- wki *keyIndex
- wam map[revision]struct{}
- }{
- {
- 1,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{2, 0}, ver: 3, revs: []revision{{main: 2}, {main: 4}, {main: 6}}},
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{},
- },
- {
- 2,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{2, 0}, ver: 3, revs: []revision{{main: 2}, {main: 4}, {main: 6}}},
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 2}: {},
- },
- },
- {
- 3,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{2, 0}, ver: 3, revs: []revision{{main: 2}, {main: 4}, {main: 6}}},
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 2}: {},
- },
- },
- {
- 4,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{2, 0}, ver: 3, revs: []revision{{main: 4}, {main: 6}}},
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 4}: {},
- },
- },
- {
- 5,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{2, 0}, ver: 3, revs: []revision{{main: 4}, {main: 6}}},
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 4}: {},
- },
- },
- {
- 6,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{},
- },
- {
- 7,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{},
- },
- {
- 8,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 8}: {},
- },
- },
- {
- 9,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 8}: {},
- },
- },
- {
- 10,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 10}: {},
- },
- },
- {
- 11,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{8, 0}, ver: 3, revs: []revision{{main: 10}, {main: 12}}},
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 10}: {},
- },
- },
- {
- 12,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{},
- },
- {
- 13,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{},
- },
- {
- 14,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 14, sub: 1}: {},
- },
- },
- {
- 15,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14, sub: 1}, {main: 16}}},
- {},
- },
- },
- map[revision]struct{}{
- {main: 14, sub: 1}: {},
- },
- },
- {
- 16,
- &keyIndex{
- key: []byte("foo"),
- modified: revision{16, 0},
- generations: []generation{
- {},
- },
- },
- map[revision]struct{}{},
- },
- }
-
- // Continuous Compaction and finding Keep
- ki := newTestKeyIndex()
- for i, tt := range tests {
- am := make(map[revision]struct{})
- kiclone := cloneKeyIndex(ki)
- ki.keep(tt.compact, am)
- if !reflect.DeepEqual(ki, kiclone) {
- t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiclone)
- }
- if !reflect.DeepEqual(am, tt.wam) {
- t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
- }
- am = make(map[revision]struct{})
- ki.compact(zap.NewExample(), tt.compact, am)
- if !reflect.DeepEqual(ki, tt.wki) {
- t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
- }
- if !reflect.DeepEqual(am, tt.wam) {
- t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
- }
- }
-
- // Jump Compaction and finding Keep
- ki = newTestKeyIndex()
- for i, tt := range tests {
- if (i%2 == 0 && i < 6) || (i%2 == 1 && i > 6) {
- am := make(map[revision]struct{})
- kiclone := cloneKeyIndex(ki)
- ki.keep(tt.compact, am)
- if !reflect.DeepEqual(ki, kiclone) {
- t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiclone)
- }
- if !reflect.DeepEqual(am, tt.wam) {
- t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
- }
- am = make(map[revision]struct{})
- ki.compact(zap.NewExample(), tt.compact, am)
- if !reflect.DeepEqual(ki, tt.wki) {
- t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
- }
- if !reflect.DeepEqual(am, tt.wam) {
- t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
- }
- }
- }
-
- kiClone := newTestKeyIndex()
- // Once Compaction and finding Keep
- for i, tt := range tests {
- ki := newTestKeyIndex()
- am := make(map[revision]struct{})
- ki.keep(tt.compact, am)
- if !reflect.DeepEqual(ki, kiClone) {
- t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiClone)
- }
- if !reflect.DeepEqual(am, tt.wam) {
- t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
- }
- am = make(map[revision]struct{})
- ki.compact(zap.NewExample(), tt.compact, am)
- if !reflect.DeepEqual(ki, tt.wki) {
- t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
- }
- if !reflect.DeepEqual(am, tt.wam) {
- t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
- }
- }
-}
-
-func cloneKeyIndex(ki *keyIndex) *keyIndex {
- generations := make([]generation, len(ki.generations))
- for i, gen := range ki.generations {
- generations[i] = *cloneGeneration(&gen)
- }
- return &keyIndex{ki.key, ki.modified, generations}
-}
-
-func cloneGeneration(g *generation) *generation {
- if g.revs == nil {
- return &generation{g.ver, g.created, nil}
- }
- tmp := make([]revision, len(g.revs))
- copy(tmp, g.revs)
- return &generation{g.ver, g.created, tmp}
-}
-
-// test that compact on version that higher than last modified version works well
-func TestKeyIndexCompactOnFurtherRev(t *testing.T) {
- ki := &keyIndex{key: []byte("foo")}
- ki.put(zap.NewExample(), 1, 0)
- ki.put(zap.NewExample(), 2, 0)
- am := make(map[revision]struct{})
- ki.compact(zap.NewExample(), 3, am)
-
- wki := &keyIndex{
- key: []byte("foo"),
- modified: revision{2, 0},
- generations: []generation{
- {created: revision{1, 0}, ver: 2, revs: []revision{{main: 2}}},
- },
- }
- wam := map[revision]struct{}{
- {main: 2}: {},
- }
- if !reflect.DeepEqual(ki, wki) {
- t.Errorf("ki = %+v, want %+v", ki, wki)
- }
- if !reflect.DeepEqual(am, wam) {
- t.Errorf("am = %+v, want %+v", am, wam)
- }
-}
-
-func TestKeyIndexIsEmpty(t *testing.T) {
- tests := []struct {
- ki *keyIndex
- w bool
- }{
- {
- &keyIndex{
- key: []byte("foo"),
- generations: []generation{{}},
- },
- true,
- },
- {
- &keyIndex{
- key: []byte("foo"),
- modified: revision{2, 0},
- generations: []generation{
- {created: revision{1, 0}, ver: 2, revs: []revision{{main: 2}}},
- },
- },
- false,
- },
- }
- for i, tt := range tests {
- g := tt.ki.isEmpty()
- if g != tt.w {
- t.Errorf("#%d: isEmpty = %v, want %v", i, g, tt.w)
- }
- }
-}
-
-func TestKeyIndexFindGeneration(t *testing.T) {
- ki := newTestKeyIndex()
-
- tests := []struct {
- rev int64
- wg *generation
- }{
- {0, nil},
- {1, nil},
- {2, &ki.generations[0]},
- {3, &ki.generations[0]},
- {4, &ki.generations[0]},
- {5, &ki.generations[0]},
- {6, nil},
- {7, nil},
- {8, &ki.generations[1]},
- {9, &ki.generations[1]},
- {10, &ki.generations[1]},
- {11, &ki.generations[1]},
- {12, nil},
- {13, nil},
- }
- for i, tt := range tests {
- g := ki.findGeneration(tt.rev)
- if g != tt.wg {
- t.Errorf("#%d: generation = %+v, want %+v", i, g, tt.wg)
- }
- }
-}
-
-func TestKeyIndexLess(t *testing.T) {
- ki := &keyIndex{key: []byte("foo")}
-
- tests := []struct {
- ki *keyIndex
- w bool
- }{
- {&keyIndex{key: []byte("doo")}, false},
- {&keyIndex{key: []byte("foo")}, false},
- {&keyIndex{key: []byte("goo")}, true},
- }
- for i, tt := range tests {
- g := ki.Less(tt.ki)
- if g != tt.w {
- t.Errorf("#%d: Less = %v, want %v", i, g, tt.w)
- }
- }
-}
-
-func TestGenerationIsEmpty(t *testing.T) {
- tests := []struct {
- g *generation
- w bool
- }{
- {nil, true},
- {&generation{}, true},
- {&generation{revs: []revision{{main: 1}}}, false},
- }
- for i, tt := range tests {
- g := tt.g.isEmpty()
- if g != tt.w {
- t.Errorf("#%d: isEmpty = %v, want %v", i, g, tt.w)
- }
- }
-}
-
-func TestGenerationWalk(t *testing.T) {
- g := &generation{
- ver: 3,
- created: revision{2, 0},
- revs: []revision{{main: 2}, {main: 4}, {main: 6}},
- }
- tests := []struct {
- f func(rev revision) bool
- wi int
- }{
- {func(rev revision) bool { return rev.main >= 7 }, 2},
- {func(rev revision) bool { return rev.main >= 6 }, 1},
- {func(rev revision) bool { return rev.main >= 5 }, 1},
- {func(rev revision) bool { return rev.main >= 4 }, 0},
- {func(rev revision) bool { return rev.main >= 3 }, 0},
- {func(rev revision) bool { return rev.main >= 2 }, -1},
- }
- for i, tt := range tests {
- idx := g.walk(tt.f)
- if idx != tt.wi {
- t.Errorf("#%d: index = %d, want %d", i, idx, tt.wi)
- }
- }
-}
-
-func newTestKeyIndex() *keyIndex {
- // key: "foo"
- // rev: 16
- // generations:
- // {empty}
- // {{14, 0}[1], {14, 1}[2], {16, 0}(t)[3]}
- // {{8, 0}[1], {10, 0}[2], {12, 0}(t)[3]}
- // {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]}
-
- ki := &keyIndex{key: []byte("foo")}
- ki.put(zap.NewExample(), 2, 0)
- ki.put(zap.NewExample(), 4, 0)
- ki.tombstone(zap.NewExample(), 6, 0)
- ki.put(zap.NewExample(), 8, 0)
- ki.put(zap.NewExample(), 10, 0)
- ki.tombstone(zap.NewExample(), 12, 0)
- ki.put(zap.NewExample(), 14, 0)
- ki.put(zap.NewExample(), 14, 1)
- ki.tombstone(zap.NewExample(), 16, 0)
- return ki
-}
diff --git a/server/mvcc/kv_test.go b/server/mvcc/kv_test.go
deleted file mode 100644
index d7df00ebf68..00000000000
--- a/server/mvcc/kv_test.go
+++ /dev/null
@@ -1,840 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "context"
- "fmt"
- "os"
- "reflect"
- "testing"
- "time"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
-
- "github.com/prometheus/client_golang/prometheus"
- dto "github.com/prometheus/client_model/go"
- "go.uber.org/zap"
-)
-
-// Functional tests for features implemented in v3 store. It treats v3 store
-// as a black box, and tests it by feeding the input and validating the output.
-
-// TODO: add similar tests on operations in one txn/rev
-
-type (
- rangeFunc func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error)
- putFunc func(kv KV, key, value []byte, lease lease.LeaseID) int64
- deleteRangeFunc func(kv KV, key, end []byte) (n, rev int64)
-)
-
-var (
- normalRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) {
- return kv.Range(context.TODO(), key, end, ro)
- }
- txnRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) {
- txn := kv.Read(ConcurrentReadTxMode, traceutil.TODO())
- defer txn.End()
- return txn.Range(context.TODO(), key, end, ro)
- }
-
- normalPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 {
- return kv.Put(key, value, lease)
- }
- txnPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 {
- txn := kv.Write(traceutil.TODO())
- defer txn.End()
- return txn.Put(key, value, lease)
- }
-
- normalDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) {
- return kv.DeleteRange(key, end)
- }
- txnDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) {
- txn := kv.Write(traceutil.TODO())
- defer txn.End()
- return txn.DeleteRange(key, end)
- }
-)
-
-func TestKVRange(t *testing.T) { testKVRange(t, normalRangeFunc) }
-func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
-
-func testKVRange(t *testing.T, f rangeFunc) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- kvs := put3TestKVs(s)
-
- wrev := int64(4)
- tests := []struct {
- key, end []byte
- wkvs []mvccpb.KeyValue
- }{
- // get no keys
- {
- []byte("doo"), []byte("foo"),
- nil,
- },
- // get no keys when key == end
- {
- []byte("foo"), []byte("foo"),
- nil,
- },
- // get no keys when ranging single key
- {
- []byte("doo"), nil,
- nil,
- },
- // get all keys
- {
- []byte("foo"), []byte("foo3"),
- kvs,
- },
- // get partial keys
- {
- []byte("foo"), []byte("foo1"),
- kvs[:1],
- },
- // get single key
- {
- []byte("foo"), nil,
- kvs[:1],
- },
- // get entire keyspace
- {
- []byte(""), []byte(""),
- kvs,
- },
- }
-
- for i, tt := range tests {
- r, err := f(s, tt.key, tt.end, RangeOptions{})
- if err != nil {
- t.Fatal(err)
- }
- if r.Rev != wrev {
- t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev)
- }
- if !reflect.DeepEqual(r.KVs, tt.wkvs) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
- }
- }
-}
-
-func TestKVRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) }
-func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
-
-func testKVRangeRev(t *testing.T, f rangeFunc) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- kvs := put3TestKVs(s)
-
- tests := []struct {
- rev int64
- wrev int64
- wkvs []mvccpb.KeyValue
- }{
- {-1, 4, kvs},
- {0, 4, kvs},
- {2, 4, kvs[:1]},
- {3, 4, kvs[:2]},
- {4, 4, kvs},
- }
-
- for i, tt := range tests {
- r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev})
- if err != nil {
- t.Fatal(err)
- }
- if r.Rev != tt.wrev {
- t.Errorf("#%d: rev = %d, want %d", i, r.Rev, tt.wrev)
- }
- if !reflect.DeepEqual(r.KVs, tt.wkvs) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
- }
- }
-}
-
-func TestKVRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) }
-func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
-
-func testKVRangeBadRev(t *testing.T, f rangeFunc) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- put3TestKVs(s)
- if _, err := s.Compact(traceutil.TODO(), 4); err != nil {
- t.Fatalf("compact error (%v)", err)
- }
-
- tests := []struct {
- rev int64
- werr error
- }{
- {-1, nil}, // <= 0 is most recent store
- {0, nil},
- {1, ErrCompacted},
- {2, ErrCompacted},
- {4, nil},
- {5, ErrFutureRev},
- {100, ErrFutureRev},
- }
- for i, tt := range tests {
- _, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev})
- if err != tt.werr {
- t.Errorf("#%d: error = %v, want %v", i, err, tt.werr)
- }
- }
-}
-
-func TestKVRangeLimit(t *testing.T) { testKVRangeLimit(t, normalRangeFunc) }
-func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
-
-func testKVRangeLimit(t *testing.T, f rangeFunc) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- kvs := put3TestKVs(s)
-
- wrev := int64(4)
- tests := []struct {
- limit int64
- wkvs []mvccpb.KeyValue
- }{
- // no limit
- {-1, kvs},
- // no limit
- {0, kvs},
- {1, kvs[:1]},
- {2, kvs[:2]},
- {3, kvs},
- {100, kvs},
- }
- for i, tt := range tests {
- r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Limit: tt.limit})
- if err != nil {
- t.Fatalf("#%d: range error (%v)", i, err)
- }
- if !reflect.DeepEqual(r.KVs, tt.wkvs) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
- }
- if r.Rev != wrev {
- t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev)
- }
- if tt.limit <= 0 || int(tt.limit) > len(kvs) {
- if r.Count != len(kvs) {
- t.Errorf("#%d: count = %d, want %d", i, r.Count, len(kvs))
- }
- } else if r.Count != int(tt.limit) {
- t.Errorf("#%d: count = %d, want %d", i, r.Count, tt.limit)
- }
- }
-}
-
-func TestKVPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, normalPutFunc) }
-func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) }
-
-func testKVPutMultipleTimes(t *testing.T, f putFunc) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- for i := 0; i < 10; i++ {
- base := int64(i + 1)
-
- rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(base))
- if rev != base+1 {
- t.Errorf("#%d: rev = %d, want %d", i, rev, base+1)
- }
-
- r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{})
- if err != nil {
- t.Fatal(err)
- }
- wkvs := []mvccpb.KeyValue{
- {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: base + 1, Version: base, Lease: base},
- }
- if !reflect.DeepEqual(r.KVs, wkvs) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
- }
- }
-}
-
-func TestKVDeleteRange(t *testing.T) { testKVDeleteRange(t, normalDeleteRangeFunc) }
-func TestKVTxnDeleteRange(t *testing.T) { testKVDeleteRange(t, txnDeleteRangeFunc) }
-
-func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
- tests := []struct {
- key, end []byte
-
- wrev int64
- wN int64
- }{
- {
- []byte("foo"), nil,
- 5, 1,
- },
- {
- []byte("foo"), []byte("foo1"),
- 5, 1,
- },
- {
- []byte("foo"), []byte("foo2"),
- 5, 2,
- },
- {
- []byte("foo"), []byte("foo3"),
- 5, 3,
- },
- {
- []byte("foo3"), []byte("foo8"),
- 4, 0,
- },
- {
- []byte("foo3"), nil,
- 4, 0,
- },
- }
-
- for i, tt := range tests {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
- s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
- s.Put([]byte("foo2"), []byte("bar2"), lease.NoLease)
-
- n, rev := f(s, tt.key, tt.end)
- if n != tt.wN || rev != tt.wrev {
- t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev)
- }
-
- cleanup(s, b, tmpPath)
- }
-}
-
-func TestKVDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, normalDeleteRangeFunc) }
-func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) }
-
-func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
-
- n, rev := f(s, []byte("foo"), nil)
- if n != 1 || rev != 3 {
- t.Fatalf("n = %d, rev = %d, want (%d, %d)", n, rev, 1, 3)
- }
-
- for i := 0; i < 10; i++ {
- n, rev := f(s, []byte("foo"), nil)
- if n != 0 || rev != 3 {
- t.Fatalf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 0, 3)
- }
- }
-}
-
-// test that range, put, delete on single key in sequence repeatedly works correctly.
-func TestKVOperationInSequence(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- for i := 0; i < 10; i++ {
- base := int64(i*2 + 1)
-
- // put foo
- rev := s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
- if rev != base+1 {
- t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1)
- }
-
- r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1})
- if err != nil {
- t.Fatal(err)
- }
- wkvs := []mvccpb.KeyValue{
- {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)},
- }
- if !reflect.DeepEqual(r.KVs, wkvs) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
- }
- if r.Rev != base+1 {
- t.Errorf("#%d: range rev = %d, want %d", i, rev, base+1)
- }
-
- // delete foo
- n, rev := s.DeleteRange([]byte("foo"), nil)
- if n != 1 || rev != base+2 {
- t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+2)
- }
-
- r, err = s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 2})
- if err != nil {
- t.Fatal(err)
- }
- if r.KVs != nil {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil)
- }
- if r.Rev != base+2 {
- t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+2)
- }
- }
-}
-
-func TestKVTxnBlockWriteOperations(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- tests := []func(){
- func() { s.Put([]byte("foo"), nil, lease.NoLease) },
- func() { s.DeleteRange([]byte("foo"), nil) },
- }
- for i, tt := range tests {
- tf := tt
- txn := s.Write(traceutil.TODO())
- done := make(chan struct{}, 1)
- go func() {
- tf()
- done <- struct{}{}
- }()
- select {
- case <-done:
- t.Fatalf("#%d: operation failed to be blocked", i)
- case <-time.After(10 * time.Millisecond):
- }
-
- txn.End()
- select {
- case <-done:
- case <-time.After(10 * time.Second):
- testutil.FatalStack(t, fmt.Sprintf("#%d: operation failed to be unblocked", i))
- }
- }
-
- // only close backend when we know all the tx are finished
- cleanup(s, b, tmpPath)
-}
-
-func TestKVTxnNonBlockRange(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- txn := s.Write(traceutil.TODO())
- defer txn.End()
-
- donec := make(chan struct{})
- go func() {
- defer close(donec)
- s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{})
- }()
- select {
- case <-donec:
- case <-time.After(100 * time.Millisecond):
- t.Fatalf("range operation blocked on write txn")
- }
-}
-
-// test that txn range, put, delete on single key in sequence repeatedly works correctly.
-func TestKVTxnOperationInSequence(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- for i := 0; i < 10; i++ {
- txn := s.Write(traceutil.TODO())
- base := int64(i + 1)
-
- // put foo
- rev := txn.Put([]byte("foo"), []byte("bar"), lease.NoLease)
- if rev != base+1 {
- t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1)
- }
-
- r, err := txn.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1})
- if err != nil {
- t.Fatal(err)
- }
- wkvs := []mvccpb.KeyValue{
- {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)},
- }
- if !reflect.DeepEqual(r.KVs, wkvs) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
- }
- if r.Rev != base+1 {
- t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1)
- }
-
- // delete foo
- n, rev := txn.DeleteRange([]byte("foo"), nil)
- if n != 1 || rev != base+1 {
- t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+1)
- }
-
- r, err = txn.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1})
- if err != nil {
- t.Errorf("#%d: range error (%v)", i, err)
- }
- if r.KVs != nil {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil)
- }
- if r.Rev != base+1 {
- t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1)
- }
-
- txn.End()
- }
-}
-
-func TestKVCompactReserveLastValue(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- s.Put([]byte("foo"), []byte("bar0"), 1)
- s.Put([]byte("foo"), []byte("bar1"), 2)
- s.DeleteRange([]byte("foo"), nil)
- s.Put([]byte("foo"), []byte("bar2"), 3)
-
- // rev in tests will be called in Compact() one by one on the same store
- tests := []struct {
- rev int64
- // wanted kvs right after the compacted rev
- wkvs []mvccpb.KeyValue
- }{
- {
- 1,
- []mvccpb.KeyValue{
- {Key: []byte("foo"), Value: []byte("bar0"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1},
- },
- },
- {
- 2,
- []mvccpb.KeyValue{
- {Key: []byte("foo"), Value: []byte("bar1"), CreateRevision: 2, ModRevision: 3, Version: 2, Lease: 2},
- },
- },
- {
- 3,
- nil,
- },
- {
- 4,
- []mvccpb.KeyValue{
- {Key: []byte("foo"), Value: []byte("bar2"), CreateRevision: 5, ModRevision: 5, Version: 1, Lease: 3},
- },
- },
- }
- for i, tt := range tests {
- _, err := s.Compact(traceutil.TODO(), tt.rev)
- if err != nil {
- t.Errorf("#%d: unexpect compact error %v", i, err)
- }
- r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: tt.rev + 1})
- if err != nil {
- t.Errorf("#%d: unexpect range error %v", i, err)
- }
- if !reflect.DeepEqual(r.KVs, tt.wkvs) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
- }
- }
-}
-
-func TestKVCompactBad(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
- s.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
- s.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
-
- // rev in tests will be called in Compact() one by one on the same store
- tests := []struct {
- rev int64
- werr error
- }{
- {0, nil},
- {1, nil},
- {1, ErrCompacted},
- {4, nil},
- {5, ErrFutureRev},
- {100, ErrFutureRev},
- }
- for i, tt := range tests {
- _, err := s.Compact(traceutil.TODO(), tt.rev)
- if err != tt.werr {
- t.Errorf("#%d: compact error = %v, want %v", i, err, tt.werr)
- }
- }
-}
-
-func TestKVHash(t *testing.T) {
- hashes := make([]uint32, 3)
-
- for i := 0; i < len(hashes); i++ {
- var err error
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
- kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
- hashes[i], _, err = kv.Hash()
- if err != nil {
- t.Fatalf("failed to get hash: %v", err)
- }
- cleanup(kv, b, tmpPath)
- }
-
- for i := 1; i < len(hashes); i++ {
- if hashes[i-1] != hashes[i] {
- t.Errorf("hash[%d](%d) != hash[%d](%d)", i-1, hashes[i-1], i, hashes[i])
- }
- }
-}
-
-func TestKVRestore(t *testing.T) {
- tests := []func(kv KV){
- func(kv KV) {
- kv.Put([]byte("foo"), []byte("bar0"), 1)
- kv.Put([]byte("foo"), []byte("bar1"), 2)
- kv.Put([]byte("foo"), []byte("bar2"), 3)
- kv.Put([]byte("foo2"), []byte("bar0"), 1)
- },
- func(kv KV) {
- kv.Put([]byte("foo"), []byte("bar0"), 1)
- kv.DeleteRange([]byte("foo"), nil)
- kv.Put([]byte("foo"), []byte("bar1"), 2)
- },
- func(kv KV) {
- kv.Put([]byte("foo"), []byte("bar0"), 1)
- kv.Put([]byte("foo"), []byte("bar1"), 2)
- kv.Compact(traceutil.TODO(), 1)
- },
- }
- for i, tt := range tests {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- tt(s)
- var kvss [][]mvccpb.KeyValue
- for k := int64(0); k < 10; k++ {
- r, _ := s.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k})
- kvss = append(kvss, r.KVs)
- }
-
- keysBefore := readGaugeInt(keysGauge)
- s.Close()
-
- // ns should recover the the previous state from backend.
- ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
- t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
- }
-
- // wait for possible compaction to finish
- testutil.WaitSchedule()
- var nkvss [][]mvccpb.KeyValue
- for k := int64(0); k < 10; k++ {
- r, _ := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k})
- nkvss = append(nkvss, r.KVs)
- }
- cleanup(ns, b, tmpPath)
-
- if !reflect.DeepEqual(nkvss, kvss) {
- t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss)
- }
- }
-}
-
-func readGaugeInt(g prometheus.Gauge) int {
- ch := make(chan prometheus.Metric, 1)
- g.Collect(ch)
- m := <-ch
- mm := &dto.Metric{}
- m.Write(mm)
- return int(mm.GetGauge().GetValue())
-}
-
-func TestKVSnapshot(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- wkvs := put3TestKVs(s)
-
- newPath := "new_test"
- f, err := os.Create(newPath)
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(newPath)
-
- snap := s.b.Snapshot()
- defer snap.Close()
- _, err = snap.WriteTo(f)
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
-
- ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer ns.Close()
- r, err := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{})
- if err != nil {
- t.Errorf("unexpect range error (%v)", err)
- }
- if !reflect.DeepEqual(r.KVs, wkvs) {
- t.Errorf("kvs = %+v, want %+v", r.KVs, wkvs)
- }
- if r.Rev != 4 {
- t.Errorf("rev = %d, want %d", r.Rev, 4)
- }
-}
-
-func TestWatchableKVWatch(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
- defer cleanup(s, b, tmpPath)
-
- w := s.NewWatchStream()
- defer w.Close()
-
- wid, _ := w.Watch(0, []byte("foo"), []byte("fop"), 0)
-
- wev := []mvccpb.Event{
- {Type: mvccpb.PUT,
- Kv: &mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 2,
- ModRevision: 2,
- Version: 1,
- Lease: 1,
- },
- },
- {
- Type: mvccpb.PUT,
- Kv: &mvccpb.KeyValue{
- Key: []byte("foo1"),
- Value: []byte("bar1"),
- CreateRevision: 3,
- ModRevision: 3,
- Version: 1,
- Lease: 2,
- },
- },
- {
- Type: mvccpb.PUT,
- Kv: &mvccpb.KeyValue{
- Key: []byte("foo1"),
- Value: []byte("bar11"),
- CreateRevision: 3,
- ModRevision: 4,
- Version: 2,
- Lease: 3,
- },
- },
- }
-
- s.Put([]byte("foo"), []byte("bar"), 1)
- select {
- case resp := <-w.Chan():
- if resp.WatchID != wid {
- t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
- }
- ev := resp.Events[0]
- if !reflect.DeepEqual(ev, wev[0]) {
- t.Errorf("watched event = %+v, want %+v", ev, wev[0])
- }
- case <-time.After(5 * time.Second):
- // CPU might be too slow, and the routine is not able to switch around
- testutil.FatalStack(t, "failed to watch the event")
- }
-
- s.Put([]byte("foo1"), []byte("bar1"), 2)
- select {
- case resp := <-w.Chan():
- if resp.WatchID != wid {
- t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
- }
- ev := resp.Events[0]
- if !reflect.DeepEqual(ev, wev[1]) {
- t.Errorf("watched event = %+v, want %+v", ev, wev[1])
- }
- case <-time.After(5 * time.Second):
- testutil.FatalStack(t, "failed to watch the event")
- }
-
- w = s.NewWatchStream()
- wid, _ = w.Watch(0, []byte("foo1"), []byte("foo2"), 3)
-
- select {
- case resp := <-w.Chan():
- if resp.WatchID != wid {
- t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
- }
- ev := resp.Events[0]
- if !reflect.DeepEqual(ev, wev[1]) {
- t.Errorf("watched event = %+v, want %+v", ev, wev[1])
- }
- case <-time.After(5 * time.Second):
- testutil.FatalStack(t, "failed to watch the event")
- }
-
- s.Put([]byte("foo1"), []byte("bar11"), 3)
- select {
- case resp := <-w.Chan():
- if resp.WatchID != wid {
- t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
- }
- ev := resp.Events[0]
- if !reflect.DeepEqual(ev, wev[2]) {
- t.Errorf("watched event = %+v, want %+v", ev, wev[2])
- }
- case <-time.After(5 * time.Second):
- testutil.FatalStack(t, "failed to watch the event")
- }
-}
-
-func cleanup(s KV, b backend.Backend, path string) {
- s.Close()
- b.Close()
- os.Remove(path)
-}
-
-func put3TestKVs(s KV) []mvccpb.KeyValue {
- s.Put([]byte("foo"), []byte("bar"), 1)
- s.Put([]byte("foo1"), []byte("bar1"), 2)
- s.Put([]byte("foo2"), []byte("bar2"), 3)
- return []mvccpb.KeyValue{
- {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1},
- {Key: []byte("foo1"), Value: []byte("bar1"), CreateRevision: 3, ModRevision: 3, Version: 1, Lease: 2},
- {Key: []byte("foo2"), Value: []byte("bar2"), CreateRevision: 4, ModRevision: 4, Version: 1, Lease: 3},
- }
-}
diff --git a/server/mvcc/kvstore.go b/server/mvcc/kvstore.go
deleted file mode 100644
index 5116b15cbd0..00000000000
--- a/server/mvcc/kvstore.go
+++ /dev/null
@@ -1,570 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "context"
- "errors"
- "fmt"
- "hash/crc32"
- "math"
- "sync"
- "time"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/pkg/v3/schedule"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/etcdserver/cindex"
- "go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
- "go.uber.org/zap"
-)
-
-var (
- keyBucketName = []byte("key")
- MetaBucketName = cindex.MetaBucketName
-
- scheduledCompactKeyName = []byte("scheduledCompactRev")
- finishedCompactKeyName = []byte("finishedCompactRev")
-
- ErrCompacted = errors.New("mvcc: required revision has been compacted")
- ErrFutureRev = errors.New("mvcc: required revision is a future revision")
-)
-
-const (
- // markedRevBytesLen is the byte length of marked revision.
- // The first `revBytesLen` bytes represents a normal revision. The last
- // one byte is the mark.
- markedRevBytesLen = revBytesLen + 1
- markBytePosition = markedRevBytesLen - 1
- markTombstone byte = 't'
-)
-
-var restoreChunkKeys = 10000 // non-const for testing
-var defaultCompactBatchLimit = 1000
-
-type StoreConfig struct {
- CompactionBatchLimit int
-}
-
-type store struct {
- ReadView
- WriteView
-
- cfg StoreConfig
-
- // mu read locks for txns and write locks for non-txn store changes.
- mu sync.RWMutex
-
- b backend.Backend
- kvindex index
-
- le lease.Lessor
-
- // revMuLock protects currentRev and compactMainRev.
- // Locked at end of write txn and released after write txn unlock lock.
- // Locked before locking read txn and released after locking.
- revMu sync.RWMutex
- // currentRev is the revision of the last completed transaction.
- currentRev int64
- // compactMainRev is the main revision of the last compaction.
- compactMainRev int64
-
- fifoSched schedule.Scheduler
-
- stopc chan struct{}
-
- lg *zap.Logger
-}
-
-// NewStore returns a new store. It is useful to create a store inside
-// mvcc pkg. It should only be used for testing externally.
-func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {
- if lg == nil {
- lg = zap.NewNop()
- }
- if cfg.CompactionBatchLimit == 0 {
- cfg.CompactionBatchLimit = defaultCompactBatchLimit
- }
- s := &store{
- cfg: cfg,
- b: b,
- kvindex: newTreeIndex(lg),
-
- le: le,
-
- currentRev: 1,
- compactMainRev: -1,
-
- fifoSched: schedule.NewFIFOScheduler(),
-
- stopc: make(chan struct{}),
-
- lg: lg,
- }
- s.ReadView = &readView{s}
- s.WriteView = &writeView{s}
- if s.le != nil {
- s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
- }
-
- tx := s.b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket(keyBucketName)
- cindex.UnsafeCreateMetaBucket(tx)
- tx.Unlock()
- s.b.ForceCommit()
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if err := s.restore(); err != nil {
- // TODO: return the error instead of panic here?
- panic("failed to recover store from backend")
- }
-
- return s
-}
-
-func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
- if ctx == nil || ctx.Err() != nil {
- select {
- case <-s.stopc:
- default:
- // fix deadlock in mvcc,for more information, please refer to pr 11817.
- // s.stopc is only updated in restore operation, which is called by apply
- // snapshot call, compaction and apply snapshot requests are serialized by
- // raft, and do not happen at the same time.
- s.mu.Lock()
- f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
- s.fifoSched.Schedule(f)
- s.mu.Unlock()
- }
- return
- }
- close(ch)
-}
-
-func (s *store) Hash() (hash uint32, revision int64, err error) {
- // TODO: hash and revision could be inconsistent, one possible fix is to add s.revMu.RLock() at the beginning of function, which is costly
- start := time.Now()
-
- s.b.ForceCommit()
- h, err := s.b.Hash(DefaultIgnores)
-
- hashSec.Observe(time.Since(start).Seconds())
- return h, s.currentRev, err
-}
-
-func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
- start := time.Now()
-
- s.mu.RLock()
- s.revMu.RLock()
- compactRev, currentRev = s.compactMainRev, s.currentRev
- s.revMu.RUnlock()
-
- if rev > 0 && rev <= compactRev {
- s.mu.RUnlock()
- return 0, 0, compactRev, ErrCompacted
- } else if rev > 0 && rev > currentRev {
- s.mu.RUnlock()
- return 0, currentRev, 0, ErrFutureRev
- }
-
- if rev == 0 {
- rev = currentRev
- }
- keep := s.kvindex.Keep(rev)
-
- tx := s.b.ReadTx()
- tx.RLock()
- defer tx.RUnlock()
- s.mu.RUnlock()
-
- upper := revision{main: rev + 1}
- lower := revision{main: compactRev + 1}
- h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
-
- h.Write(keyBucketName)
- err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
- kr := bytesToRev(k)
- if !upper.GreaterThan(kr) {
- return nil
- }
- // skip revisions that are scheduled for deletion
- // due to compacting; don't skip if there isn't one.
- if lower.GreaterThan(kr) && len(keep) > 0 {
- if _, ok := keep[kr]; !ok {
- return nil
- }
- }
- h.Write(k)
- h.Write(v)
- return nil
- })
- hash = h.Sum32()
-
- hashRevSec.Observe(time.Since(start).Seconds())
- return hash, currentRev, compactRev, err
-}
-
-func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) {
- s.revMu.Lock()
- if rev <= s.compactMainRev {
- ch := make(chan struct{})
- f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
- s.fifoSched.Schedule(f)
- s.revMu.Unlock()
- return ch, ErrCompacted
- }
- if rev > s.currentRev {
- s.revMu.Unlock()
- return nil, ErrFutureRev
- }
-
- s.compactMainRev = rev
-
- rbytes := newRevBytes()
- revToBytes(revision{main: rev}, rbytes)
-
- tx := s.b.BatchTx()
- tx.Lock()
- tx.UnsafePut(MetaBucketName, scheduledCompactKeyName, rbytes)
- tx.Unlock()
- // ensure that desired compaction is persisted
- s.b.ForceCommit()
-
- s.revMu.Unlock()
-
- return nil, nil
-}
-
-func (s *store) compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
- ch := make(chan struct{})
- var j = func(ctx context.Context) {
- if ctx.Err() != nil {
- s.compactBarrier(ctx, ch)
- return
- }
- start := time.Now()
- keep := s.kvindex.Compact(rev)
- indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
- if !s.scheduleCompaction(rev, keep) {
- s.compactBarrier(context.TODO(), ch)
- return
- }
- close(ch)
- }
-
- s.fifoSched.Schedule(j)
- trace.Step("schedule compaction")
- return ch, nil
-}
-
-func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) {
- ch, err := s.updateCompactRev(rev)
- if err != nil {
- return ch, err
- }
-
- return s.compact(traceutil.TODO(), rev)
-}
-
-func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
- s.mu.Lock()
-
- ch, err := s.updateCompactRev(rev)
- trace.Step("check and update compact revision")
- if err != nil {
- s.mu.Unlock()
- return ch, err
- }
- s.mu.Unlock()
-
- return s.compact(trace, rev)
-}
-
-// DefaultIgnores is a map of keys to ignore in hash checking.
-var DefaultIgnores map[backend.IgnoreKey]struct{}
-
-func init() {
- DefaultIgnores = map[backend.IgnoreKey]struct{}{
- // consistent index might be changed due to v2 internal sync, which
- // is not controllable by the user.
- {Bucket: string(MetaBucketName), Key: string(cindex.ConsistentIndexKeyName)}: {},
- {Bucket: string(MetaBucketName), Key: string(cindex.TermKeyName)}: {},
- }
-}
-
-func (s *store) Commit() {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.b.ForceCommit()
-}
-
-func (s *store) Restore(b backend.Backend) error {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- close(s.stopc)
- s.fifoSched.Stop()
-
- s.b = b
- s.kvindex = newTreeIndex(s.lg)
-
- {
- // During restore the metrics might report 'special' values
- s.revMu.Lock()
- s.currentRev = 1
- s.compactMainRev = -1
- s.revMu.Unlock()
- }
-
- s.fifoSched = schedule.NewFIFOScheduler()
- s.stopc = make(chan struct{})
-
- return s.restore()
-}
-
-func (s *store) restore() error {
- s.setupMetricsReporter()
-
- min, max := newRevBytes(), newRevBytes()
- revToBytes(revision{main: 1}, min)
- revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
-
- keyToLease := make(map[string]lease.LeaseID)
-
- // restore index
- tx := s.b.BatchTx()
- tx.Lock()
-
- _, finishedCompactBytes := tx.UnsafeRange(MetaBucketName, finishedCompactKeyName, nil, 0)
- if len(finishedCompactBytes) != 0 {
- s.revMu.Lock()
- s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
-
- s.lg.Info(
- "restored last compact revision",
- zap.String("meta-bucket-name", string(MetaBucketName)),
- zap.String("meta-bucket-name-key", string(finishedCompactKeyName)),
- zap.Int64("restored-compact-revision", s.compactMainRev),
- )
- s.revMu.Unlock()
- }
- _, scheduledCompactBytes := tx.UnsafeRange(MetaBucketName, scheduledCompactKeyName, nil, 0)
- scheduledCompact := int64(0)
- if len(scheduledCompactBytes) != 0 {
- scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
- }
-
- // index keys concurrently as they're loaded in from tx
- keysGauge.Set(0)
- rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
- for {
- keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
- if len(keys) == 0 {
- break
- }
- // rkvc blocks if the total pending keys exceeds the restore
- // chunk size to keep keys from consuming too much memory.
- restoreChunk(s.lg, rkvc, keys, vals, keyToLease)
- if len(keys) < restoreChunkKeys {
- // partial set implies final set
- break
- }
- // next set begins after where this one ended
- newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
- newMin.sub++
- revToBytes(newMin, min)
- }
- close(rkvc)
-
- {
- s.revMu.Lock()
- s.currentRev = <-revc
-
- // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
- // the correct revision should be set to compaction revision in the case, not the largest revision
- // we have seen.
- if s.currentRev < s.compactMainRev {
- s.currentRev = s.compactMainRev
- }
- s.revMu.Unlock()
- }
-
- if scheduledCompact <= s.compactMainRev {
- scheduledCompact = 0
- }
-
- for key, lid := range keyToLease {
- if s.le == nil {
- tx.Unlock()
- panic("no lessor to attach lease")
- }
- err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
- if err != nil {
- s.lg.Error(
- "failed to attach a lease",
- zap.String("lease-id", fmt.Sprintf("%016x", lid)),
- zap.Error(err),
- )
- }
- }
-
- tx.Unlock()
-
- s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev))
-
- if scheduledCompact != 0 {
- if _, err := s.compactLockfree(scheduledCompact); err != nil {
- s.lg.Warn("compaction encountered error", zap.Error(err))
- }
-
- s.lg.Info(
- "resume scheduled compaction",
- zap.String("meta-bucket-name", string(MetaBucketName)),
- zap.String("meta-bucket-name-key", string(scheduledCompactKeyName)),
- zap.Int64("scheduled-compact-revision", scheduledCompact),
- )
- }
-
- return nil
-}
-
-type revKeyValue struct {
- key []byte
- kv mvccpb.KeyValue
- kstr string
-}
-
-func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) {
- rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
- go func() {
- currentRev := int64(1)
- defer func() { revc <- currentRev }()
- // restore the tree index from streaming the unordered index.
- kiCache := make(map[string]*keyIndex, restoreChunkKeys)
- for rkv := range rkvc {
- ki, ok := kiCache[rkv.kstr]
- // purge kiCache if many keys but still missing in the cache
- if !ok && len(kiCache) >= restoreChunkKeys {
- i := 10
- for k := range kiCache {
- delete(kiCache, k)
- if i--; i == 0 {
- break
- }
- }
- }
- // cache miss, fetch from tree index if there
- if !ok {
- ki = &keyIndex{key: rkv.kv.Key}
- if idxKey := idx.KeyIndex(ki); idxKey != nil {
- kiCache[rkv.kstr], ki = idxKey, idxKey
- ok = true
- }
- }
- rev := bytesToRev(rkv.key)
- currentRev = rev.main
- if ok {
- if isTombstone(rkv.key) {
- if err := ki.tombstone(lg, rev.main, rev.sub); err != nil {
- lg.Warn("tombstone encountered error", zap.Error(err))
- }
- continue
- }
- ki.put(lg, rev.main, rev.sub)
- } else if !isTombstone(rkv.key) {
- ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
- idx.Insert(ki)
- kiCache[rkv.kstr] = ki
- }
- }
- }()
- return rkvc, revc
-}
-
-func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
- for i, key := range keys {
- rkv := revKeyValue{key: key}
- if err := rkv.kv.Unmarshal(vals[i]); err != nil {
- lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
- }
- rkv.kstr = string(rkv.kv.Key)
- if isTombstone(key) {
- delete(keyToLease, rkv.kstr)
- } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
- keyToLease[rkv.kstr] = lid
- } else {
- delete(keyToLease, rkv.kstr)
- }
- kvc <- rkv
- }
-}
-
-func (s *store) Close() error {
- close(s.stopc)
- s.fifoSched.Stop()
- return nil
-}
-
-func (s *store) setupMetricsReporter() {
- b := s.b
- reportDbTotalSizeInBytesMu.Lock()
- reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
- reportDbTotalSizeInBytesMu.Unlock()
- reportDbTotalSizeInBytesDebugMu.Lock()
- reportDbTotalSizeInBytesDebug = func() float64 { return float64(b.Size()) }
- reportDbTotalSizeInBytesDebugMu.Unlock()
- reportDbTotalSizeInUseInBytesMu.Lock()
- reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
- reportDbTotalSizeInUseInBytesMu.Unlock()
- reportDbOpenReadTxNMu.Lock()
- reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) }
- reportDbOpenReadTxNMu.Unlock()
- reportCurrentRevMu.Lock()
- reportCurrentRev = func() float64 {
- s.revMu.RLock()
- defer s.revMu.RUnlock()
- return float64(s.currentRev)
- }
- reportCurrentRevMu.Unlock()
- reportCompactRevMu.Lock()
- reportCompactRev = func() float64 {
- s.revMu.RLock()
- defer s.revMu.RUnlock()
- return float64(s.compactMainRev)
- }
- reportCompactRevMu.Unlock()
-}
-
-// appendMarkTombstone appends tombstone mark to normal revision bytes.
-func appendMarkTombstone(lg *zap.Logger, b []byte) []byte {
- if len(b) != revBytesLen {
- lg.Panic(
- "cannot append tombstone mark to non-normal revision bytes",
- zap.Int("expected-revision-bytes-size", revBytesLen),
- zap.Int("given-revision-bytes-size", len(b)),
- )
- }
- return append(b, markTombstone)
-}
-
-// isTombstone checks whether the revision bytes is a tombstone.
-func isTombstone(b []byte) bool {
- return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
-}
diff --git a/server/mvcc/kvstore_compaction.go b/server/mvcc/kvstore_compaction.go
deleted file mode 100644
index e056fedcc63..00000000000
--- a/server/mvcc/kvstore_compaction.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "encoding/binary"
- "time"
-
- "go.uber.org/zap"
-)
-
-func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
- totalStart := time.Now()
- defer func() { dbCompactionTotalMs.Observe(float64(time.Since(totalStart) / time.Millisecond)) }()
- keyCompactions := 0
- defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
- defer func() { dbCompactionLast.Set(float64(time.Now().Unix())) }()
-
- end := make([]byte, 8)
- binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
-
- last := make([]byte, 8+1+8)
- for {
- var rev revision
-
- start := time.Now()
-
- tx := s.b.BatchTx()
- tx.Lock()
- keys, _ := tx.UnsafeRange(keyBucketName, last, end, int64(s.cfg.CompactionBatchLimit))
- for _, key := range keys {
- rev = bytesToRev(key)
- if _, ok := keep[rev]; !ok {
- tx.UnsafeDelete(keyBucketName, key)
- keyCompactions++
- }
- }
-
- if len(keys) < s.cfg.CompactionBatchLimit {
- rbytes := make([]byte, 8+1+8)
- revToBytes(revision{main: compactMainRev}, rbytes)
- tx.UnsafePut(MetaBucketName, finishedCompactKeyName, rbytes)
- tx.Unlock()
- s.lg.Info(
- "finished scheduled compaction",
- zap.Int64("compact-revision", compactMainRev),
- zap.Duration("took", time.Since(totalStart)),
- )
- return true
- }
-
- // update last
- revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last)
- tx.Unlock()
- // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer
- s.b.ForceCommit()
- dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
-
- select {
- case <-time.After(10 * time.Millisecond):
- case <-s.stopc:
- return false
- }
- }
-}
diff --git a/server/mvcc/kvstore_compaction_test.go b/server/mvcc/kvstore_compaction_test.go
deleted file mode 100644
index 4dbf8291300..00000000000
--- a/server/mvcc/kvstore_compaction_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "context"
- "os"
- "reflect"
- "testing"
- "time"
-
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/lease"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
- "go.uber.org/zap"
-)
-
-func TestScheduleCompaction(t *testing.T) {
- revs := []revision{{1, 0}, {2, 0}, {3, 0}}
-
- tests := []struct {
- rev int64
- keep map[revision]struct{}
- wrevs []revision
- }{
- // compact at 1 and discard all history
- {
- 1,
- nil,
- revs[1:],
- },
- // compact at 3 and discard all history
- {
- 3,
- nil,
- nil,
- },
- // compact at 1 and keeps history one step earlier
- {
- 1,
- map[revision]struct{}{
- {main: 1}: {},
- },
- revs,
- },
- // compact at 1 and keeps history two steps earlier
- {
- 3,
- map[revision]struct{}{
- {main: 2}: {},
- {main: 3}: {},
- },
- revs[1:],
- },
- }
- for i, tt := range tests {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- tx := s.b.BatchTx()
-
- tx.Lock()
- ibytes := newRevBytes()
- for _, rev := range revs {
- revToBytes(rev, ibytes)
- tx.UnsafePut(keyBucketName, ibytes, []byte("bar"))
- }
- tx.Unlock()
-
- s.scheduleCompaction(tt.rev, tt.keep)
-
- tx.Lock()
- for _, rev := range tt.wrevs {
- revToBytes(rev, ibytes)
- keys, _ := tx.UnsafeRange(keyBucketName, ibytes, nil, 0)
- if len(keys) != 1 {
- t.Errorf("#%d: range on %v = %d, want 1", i, rev, len(keys))
- }
- }
- _, vals := tx.UnsafeRange(MetaBucketName, finishedCompactKeyName, nil, 0)
- revToBytes(revision{main: tt.rev}, ibytes)
- if w := [][]byte{ibytes}; !reflect.DeepEqual(vals, w) {
- t.Errorf("#%d: vals on %v = %+v, want %+v", i, finishedCompactKeyName, vals, w)
- }
- tx.Unlock()
-
- cleanup(s, b, tmpPath)
- }
-}
-
-func TestCompactAllAndRestore(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer os.Remove(tmpPath)
-
- s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
- s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
- s0.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
- s0.DeleteRange([]byte("foo"), nil)
-
- rev := s0.Rev()
- // compact all keys
- done, err := s0.Compact(traceutil.TODO(), rev)
- if err != nil {
- t.Fatal(err)
- }
-
- select {
- case <-done:
- case <-time.After(10 * time.Second):
- t.Fatal("timeout waiting for compaction to finish")
- }
-
- err = s0.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- if s1.Rev() != rev {
- t.Errorf("rev = %v, want %v", s1.Rev(), rev)
- }
- _, err = s1.Range(context.TODO(), []byte("foo"), nil, RangeOptions{})
- if err != nil {
- t.Errorf("unexpect range error %v", err)
- }
-}
diff --git a/server/mvcc/kvstore_test.go b/server/mvcc/kvstore_test.go
deleted file mode 100644
index a60cb8de409..00000000000
--- a/server/mvcc/kvstore_test.go
+++ /dev/null
@@ -1,1000 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "encoding/binary"
- "fmt"
- "math"
- mrand "math/rand"
- "os"
- "reflect"
- "sort"
- "strconv"
- "sync"
- "testing"
- "time"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/pkg/v3/schedule"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
-
- "go.uber.org/zap"
-)
-
-func TestStoreRev(t *testing.T) {
- b, _ := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer s.Close()
-
- for i := 1; i <= 3; i++ {
- s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
- if r := s.Rev(); r != int64(i+1) {
- t.Errorf("#%d: rev = %d, want %d", i, r, i+1)
- }
- }
-}
-
-func TestStorePut(t *testing.T) {
- kv := mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 1,
- ModRevision: 2,
- Version: 1,
- }
- kvb, err := kv.Marshal()
- if err != nil {
- t.Fatal(err)
- }
-
- tests := []struct {
- rev revision
- r indexGetResp
- rr *rangeResp
-
- wrev revision
- wkey []byte
- wkv mvccpb.KeyValue
- wputrev revision
- }{
- {
- revision{1, 0},
- indexGetResp{revision{}, revision{}, 0, ErrRevisionNotFound},
- nil,
-
- revision{2, 0},
- newTestKeyBytes(revision{2, 0}, false),
- mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 2,
- ModRevision: 2,
- Version: 1,
- Lease: 1,
- },
- revision{2, 0},
- },
- {
- revision{1, 1},
- indexGetResp{revision{2, 0}, revision{2, 0}, 1, nil},
- &rangeResp{[][]byte{newTestKeyBytes(revision{2, 1}, false)}, [][]byte{kvb}},
-
- revision{2, 0},
- newTestKeyBytes(revision{2, 0}, false),
- mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 2,
- ModRevision: 2,
- Version: 2,
- Lease: 2,
- },
- revision{2, 0},
- },
- {
- revision{2, 0},
- indexGetResp{revision{2, 1}, revision{2, 0}, 2, nil},
- &rangeResp{[][]byte{newTestKeyBytes(revision{2, 1}, false)}, [][]byte{kvb}},
-
- revision{3, 0},
- newTestKeyBytes(revision{3, 0}, false),
- mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 2,
- ModRevision: 3,
- Version: 3,
- Lease: 3,
- },
- revision{3, 0},
- },
- }
- for i, tt := range tests {
- s := newFakeStore()
- b := s.b.(*fakeBackend)
- fi := s.kvindex.(*fakeIndex)
-
- s.currentRev = tt.rev.main
- fi.indexGetRespc <- tt.r
- if tt.rr != nil {
- b.tx.rangeRespc <- *tt.rr
- }
-
- s.Put([]byte("foo"), []byte("bar"), lease.LeaseID(i+1))
-
- data, err := tt.wkv.Marshal()
- if err != nil {
- t.Errorf("#%d: marshal err = %v, want nil", i, err)
- }
-
- wact := []testutil.Action{
- {Name: "seqput", Params: []interface{}{keyBucketName, tt.wkey, data}},
- }
-
- if tt.rr != nil {
- wact = []testutil.Action{
- {Name: "seqput", Params: []interface{}{keyBucketName, tt.wkey, data}},
- }
- }
-
- if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
- }
- wact = []testutil.Action{
- {Name: "get", Params: []interface{}{[]byte("foo"), tt.wputrev.main}},
- {Name: "put", Params: []interface{}{[]byte("foo"), tt.wputrev}},
- }
- if g := fi.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
- }
- if s.currentRev != tt.wrev.main {
- t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev)
- }
-
- s.Close()
- }
-}
-
-func TestStoreRange(t *testing.T) {
- key := newTestKeyBytes(revision{2, 0}, false)
- kv := mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 1,
- ModRevision: 2,
- Version: 1,
- }
- kvb, err := kv.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- wrev := int64(2)
-
- tests := []struct {
- idxr indexRangeResp
- r rangeResp
- }{
- {
- indexRangeResp{[][]byte{[]byte("foo")}, []revision{{2, 0}}},
- rangeResp{[][]byte{key}, [][]byte{kvb}},
- },
- {
- indexRangeResp{[][]byte{[]byte("foo"), []byte("foo1")}, []revision{{2, 0}, {3, 0}}},
- rangeResp{[][]byte{key}, [][]byte{kvb}},
- },
- }
-
- ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
- for i, tt := range tests {
- s := newFakeStore()
- b := s.b.(*fakeBackend)
- fi := s.kvindex.(*fakeIndex)
-
- s.currentRev = 2
- b.tx.rangeRespc <- tt.r
- fi.indexRangeRespc <- tt.idxr
-
- ret, err := s.Range(context.TODO(), []byte("foo"), []byte("goo"), ro)
- if err != nil {
- t.Errorf("#%d: err = %v, want nil", i, err)
- }
- if w := []mvccpb.KeyValue{kv}; !reflect.DeepEqual(ret.KVs, w) {
- t.Errorf("#%d: kvs = %+v, want %+v", i, ret.KVs, w)
- }
- if ret.Rev != wrev {
- t.Errorf("#%d: rev = %d, want %d", i, ret.Rev, wrev)
- }
-
- wstart := newRevBytes()
- revToBytes(tt.idxr.revs[0], wstart)
- wact := []testutil.Action{
- {Name: "range", Params: []interface{}{keyBucketName, wstart, []byte(nil), int64(0)}},
- }
- if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
- }
- wact = []testutil.Action{
- {Name: "range", Params: []interface{}{[]byte("foo"), []byte("goo"), wrev}},
- }
- if g := fi.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
- }
- if s.currentRev != 2 {
- t.Errorf("#%d: current rev = %+v, want %+v", i, s.currentRev, 2)
- }
-
- s.Close()
- }
-}
-
-func TestStoreDeleteRange(t *testing.T) {
- key := newTestKeyBytes(revision{2, 0}, false)
- kv := mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 1,
- ModRevision: 2,
- Version: 1,
- }
- kvb, err := kv.Marshal()
- if err != nil {
- t.Fatal(err)
- }
-
- tests := []struct {
- rev revision
- r indexRangeResp
- rr rangeResp
-
- wkey []byte
- wrev revision
- wrrev int64
- wdelrev revision
- }{
- {
- revision{2, 0},
- indexRangeResp{[][]byte{[]byte("foo")}, []revision{{2, 0}}},
- rangeResp{[][]byte{key}, [][]byte{kvb}},
-
- newTestKeyBytes(revision{3, 0}, true),
- revision{3, 0},
- 2,
- revision{3, 0},
- },
- }
- for i, tt := range tests {
- s := newFakeStore()
- b := s.b.(*fakeBackend)
- fi := s.kvindex.(*fakeIndex)
-
- s.currentRev = tt.rev.main
- fi.indexRangeRespc <- tt.r
- b.tx.rangeRespc <- tt.rr
-
- n, _ := s.DeleteRange([]byte("foo"), []byte("goo"))
- if n != 1 {
- t.Errorf("#%d: n = %d, want 1", i, n)
- }
-
- data, err := (&mvccpb.KeyValue{
- Key: []byte("foo"),
- }).Marshal()
- if err != nil {
- t.Errorf("#%d: marshal err = %v, want nil", i, err)
- }
- wact := []testutil.Action{
- {Name: "seqput", Params: []interface{}{keyBucketName, tt.wkey, data}},
- }
- if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
- }
- wact = []testutil.Action{
- {Name: "range", Params: []interface{}{[]byte("foo"), []byte("goo"), tt.wrrev}},
- {Name: "tombstone", Params: []interface{}{[]byte("foo"), tt.wdelrev}},
- }
- if g := fi.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
- }
- if s.currentRev != tt.wrev.main {
- t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev)
- }
- }
-}
-
-func TestStoreCompact(t *testing.T) {
- s := newFakeStore()
- defer s.Close()
- b := s.b.(*fakeBackend)
- fi := s.kvindex.(*fakeIndex)
-
- s.currentRev = 3
- fi.indexCompactRespc <- map[revision]struct{}{{1, 0}: {}}
- key1 := newTestKeyBytes(revision{1, 0}, false)
- key2 := newTestKeyBytes(revision{2, 0}, false)
- b.tx.rangeRespc <- rangeResp{[][]byte{key1, key2}, nil}
-
- s.Compact(traceutil.TODO(), 3)
- s.fifoSched.WaitFinish(1)
-
- if s.compactMainRev != 3 {
- t.Errorf("compact main rev = %d, want 3", s.compactMainRev)
- }
- end := make([]byte, 8)
- binary.BigEndian.PutUint64(end, uint64(4))
- wact := []testutil.Action{
- {Name: "put", Params: []interface{}{MetaBucketName, scheduledCompactKeyName, newTestRevBytes(revision{3, 0})}},
- {Name: "range", Params: []interface{}{keyBucketName, make([]byte, 17), end, int64(10000)}},
- {Name: "delete", Params: []interface{}{keyBucketName, key2}},
- {Name: "put", Params: []interface{}{MetaBucketName, finishedCompactKeyName, newTestRevBytes(revision{3, 0})}},
- }
- if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("tx actions = %+v, want %+v", g, wact)
- }
- wact = []testutil.Action{
- {Name: "compact", Params: []interface{}{int64(3)}},
- }
- if g := fi.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("index action = %+v, want %+v", g, wact)
- }
-}
-
-func TestStoreRestore(t *testing.T) {
- s := newFakeStore()
- b := s.b.(*fakeBackend)
- fi := s.kvindex.(*fakeIndex)
-
- putkey := newTestKeyBytes(revision{3, 0}, false)
- putkv := mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 4,
- ModRevision: 4,
- Version: 1,
- }
- putkvb, err := putkv.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- delkey := newTestKeyBytes(revision{5, 0}, true)
- delkv := mvccpb.KeyValue{
- Key: []byte("foo"),
- }
- delkvb, err := delkv.Marshal()
- if err != nil {
- t.Fatal(err)
- }
- b.tx.rangeRespc <- rangeResp{[][]byte{finishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
- b.tx.rangeRespc <- rangeResp{[][]byte{scheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
-
- b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}}
- b.tx.rangeRespc <- rangeResp{nil, nil}
-
- s.restore()
-
- if s.compactMainRev != 3 {
- t.Errorf("compact rev = %d, want 3", s.compactMainRev)
- }
- if s.currentRev != 5 {
- t.Errorf("current rev = %v, want 5", s.currentRev)
- }
- wact := []testutil.Action{
- {Name: "range", Params: []interface{}{MetaBucketName, finishedCompactKeyName, []byte(nil), int64(0)}},
- {Name: "range", Params: []interface{}{MetaBucketName, scheduledCompactKeyName, []byte(nil), int64(0)}},
- {Name: "range", Params: []interface{}{keyBucketName, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}},
- }
- if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("tx actions = %+v, want %+v", g, wact)
- }
-
- gens := []generation{
- {created: revision{4, 0}, ver: 2, revs: []revision{{3, 0}, {5, 0}}},
- {created: revision{0, 0}, ver: 0, revs: nil},
- }
- ki := &keyIndex{key: []byte("foo"), modified: revision{5, 0}, generations: gens}
- wact = []testutil.Action{
- {Name: "keyIndex", Params: []interface{}{ki}},
- {Name: "insert", Params: []interface{}{ki}},
- }
- if g := fi.Action(); !reflect.DeepEqual(g, wact) {
- t.Errorf("index action = %+v, want %+v", g, wact)
- }
-}
-
-func TestRestoreDelete(t *testing.T) {
- oldChunk := restoreChunkKeys
- restoreChunkKeys = mrand.Intn(3) + 2
- defer func() { restoreChunkKeys = oldChunk }()
-
- b, _ := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- keys := make(map[string]struct{})
- for i := 0; i < 20; i++ {
- ks := fmt.Sprintf("foo-%d", i)
- k := []byte(ks)
- s.Put(k, []byte("bar"), lease.NoLease)
- keys[ks] = struct{}{}
- switch mrand.Intn(3) {
- case 0:
- // put random key from past via random range on map
- ks = fmt.Sprintf("foo-%d", mrand.Intn(i+1))
- s.Put([]byte(ks), []byte("baz"), lease.NoLease)
- keys[ks] = struct{}{}
- case 1:
- // delete random key via random range on map
- for k := range keys {
- s.DeleteRange([]byte(k), nil)
- delete(keys, k)
- break
- }
- }
- }
- s.Close()
-
- s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer s.Close()
- for i := 0; i < 20; i++ {
- ks := fmt.Sprintf("foo-%d", i)
- r, err := s.Range(context.TODO(), []byte(ks), nil, RangeOptions{})
- if err != nil {
- t.Fatal(err)
- }
- if _, ok := keys[ks]; ok {
- if len(r.KVs) == 0 {
- t.Errorf("#%d: expected %q, got deleted", i, ks)
- }
- } else if len(r.KVs) != 0 {
- t.Errorf("#%d: expected deleted, got %q", i, ks)
- }
- }
-}
-
-func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
- tests := []string{"recreate", "restore"}
- for _, test := range tests {
- b, _ := betesting.NewDefaultTmpBackend(t)
- s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
- s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
- s0.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
-
- // write scheduled compaction, but not do compaction
- rbytes := newRevBytes()
- revToBytes(revision{main: 2}, rbytes)
- tx := s0.b.BatchTx()
- tx.Lock()
- tx.UnsafePut(MetaBucketName, scheduledCompactKeyName, rbytes)
- tx.Unlock()
-
- s0.Close()
-
- var s *store
- switch test {
- case "recreate":
- s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- case "restore":
- s0.Restore(b)
- s = s0
- }
-
- // wait for scheduled compaction to be finished
- time.Sleep(100 * time.Millisecond)
-
- if _, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: 1}); err != ErrCompacted {
- t.Errorf("range on compacted rev error = %v, want %v", err, ErrCompacted)
- }
- // check the key in backend is deleted
- revbytes := newRevBytes()
- revToBytes(revision{main: 1}, revbytes)
-
- // The disk compaction is done asynchronously and requires more time on slow disk.
- // try 5 times for CI with slow IO.
- for i := 0; i < 5; i++ {
- tx = s.b.BatchTx()
- tx.Lock()
- ks, _ := tx.UnsafeRange(keyBucketName, revbytes, nil, 0)
- tx.Unlock()
- if len(ks) != 0 {
- time.Sleep(100 * time.Millisecond)
- continue
- }
- return
- }
-
- t.Errorf("key for rev %+v still exists, want deleted", bytesToRev(revbytes))
- }
-}
-
-type hashKVResult struct {
- hash uint32
- compactRev int64
-}
-
-// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
-func TestHashKVWhenCompacting(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer os.Remove(tmpPath)
-
- rev := 10000
- for i := 2; i <= rev; i++ {
- s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
- }
-
- hashCompactc := make(chan hashKVResult, 1)
-
- donec := make(chan struct{})
- var wg sync.WaitGroup
- for i := 0; i < 10; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for {
- hash, _, compactRev, err := s.HashByRev(int64(rev))
- if err != nil {
- t.Error(err)
- }
- select {
- case <-donec:
- return
- case hashCompactc <- hashKVResult{hash, compactRev}:
- }
- }
- }()
- }
-
- go func() {
- defer close(donec)
- revHash := make(map[int64]uint32)
- for round := 0; round < 1000; round++ {
- r := <-hashCompactc
- if revHash[r.compactRev] == 0 {
- revHash[r.compactRev] = r.hash
- }
- if r.hash != revHash[r.compactRev] {
- t.Errorf("Hashes differ (current %v) != (saved %v)", r.hash, revHash[r.compactRev])
- }
- }
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- for i := 100; i >= 0; i-- {
- _, err := s.Compact(traceutil.TODO(), int64(rev-1-i))
- if err != nil {
- t.Error(err)
- }
- time.Sleep(10 * time.Millisecond)
- }
- }()
-
- select {
- case <-donec:
- wg.Wait()
- case <-time.After(10 * time.Second):
- testutil.FatalStack(t, "timeout")
- }
-}
-
-// TestHashKVZeroRevision ensures that "HashByRev(0)" computes
-// correct hash value with latest revision.
-func TestHashKVZeroRevision(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer os.Remove(tmpPath)
-
- rev := 10000
- for i := 2; i <= rev; i++ {
- s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
- }
- if _, err := s.Compact(traceutil.TODO(), int64(rev/2)); err != nil {
- t.Fatal(err)
- }
-
- hash1, _, _, err := s.HashByRev(int64(rev))
- if err != nil {
- t.Fatal(err)
- }
- var hash2 uint32
- hash2, _, _, err = s.HashByRev(0)
- if err != nil {
- t.Fatal(err)
- }
- if hash1 != hash2 {
- t.Errorf("hash %d (rev %d) != hash %d (rev 0)", hash1, rev, hash2)
- }
-}
-
-func TestTxnPut(t *testing.T) {
- // assign arbitrary size
- bytesN := 30
- sliceN := 100
- keys := createBytesSlice(bytesN, sliceN)
- vals := createBytesSlice(bytesN, sliceN)
-
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- for i := 0; i < sliceN; i++ {
- txn := s.Write(traceutil.TODO())
- base := int64(i + 2)
- if rev := txn.Put(keys[i], vals[i], lease.NoLease); rev != base {
- t.Errorf("#%d: rev = %d, want %d", i, rev, base)
- }
- txn.End()
- }
-}
-
-// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
-func TestConcurrentReadNotBlockingWrite(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer os.Remove(tmpPath)
-
- // write something to read later
- s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
-
- // readTx simulates a long read request
- readTx1 := s.Read(ConcurrentReadTxMode, traceutil.TODO())
-
- // write should not be blocked by reads
- done := make(chan struct{}, 1)
- go func() {
- s.Put([]byte("foo"), []byte("newBar"), lease.NoLease) // this is a write Txn
- done <- struct{}{}
- }()
- select {
- case <-done:
- case <-time.After(1 * time.Second):
- t.Fatalf("write should not be blocked by read")
- }
-
- // readTx2 simulates a short read request
- readTx2 := s.Read(ConcurrentReadTxMode, traceutil.TODO())
- ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
- ret, err := readTx2.Range(context.TODO(), []byte("foo"), nil, ro)
- if err != nil {
- t.Fatalf("failed to range: %v", err)
- }
- // readTx2 should see the result of new write
- w := mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("newBar"),
- CreateRevision: 2,
- ModRevision: 3,
- Version: 2,
- }
- if !reflect.DeepEqual(ret.KVs[0], w) {
- t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w)
- }
- readTx2.End()
-
- ret, err = readTx1.Range(context.TODO(), []byte("foo"), nil, ro)
- if err != nil {
- t.Fatalf("failed to range: %v", err)
- }
- // readTx1 should not see the result of new write
- w = mvccpb.KeyValue{
- Key: []byte("foo"),
- Value: []byte("bar"),
- CreateRevision: 2,
- ModRevision: 2,
- Version: 1,
- }
- if !reflect.DeepEqual(ret.KVs[0], w) {
- t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w)
- }
- readTx1.End()
-}
-
-// TestConcurrentReadTxAndWrite creates random concurrent Reads and Writes, and ensures Reads always see latest Writes
-func TestConcurrentReadTxAndWrite(t *testing.T) {
- var (
- numOfReads = 100
- numOfWrites = 100
- maxNumOfPutsPerWrite = 10
- committedKVs kvs // committedKVs records the key-value pairs written by the finished Write Txns
- mu sync.Mutex // mu protects committedKVs
- )
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer os.Remove(tmpPath)
-
- var wg sync.WaitGroup
- wg.Add(numOfWrites)
- for i := 0; i < numOfWrites; i++ {
- go func() {
- defer wg.Done()
- time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time
-
- tx := s.Write(traceutil.TODO())
- numOfPuts := mrand.Intn(maxNumOfPutsPerWrite) + 1
- var pendingKvs kvs
- for j := 0; j < numOfPuts; j++ {
- k := []byte(strconv.Itoa(mrand.Int()))
- v := []byte(strconv.Itoa(mrand.Int()))
- tx.Put(k, v, lease.NoLease)
- pendingKvs = append(pendingKvs, kv{k, v})
- }
- // reads should not see above Puts until write is finished
- mu.Lock()
- committedKVs = merge(committedKVs, pendingKvs) // update shared data structure
- tx.End()
- mu.Unlock()
- }()
- }
-
- wg.Add(numOfReads)
- for i := 0; i < numOfReads; i++ {
- go func() {
- defer wg.Done()
- time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time
-
- mu.Lock()
- wKVs := make(kvs, len(committedKVs))
- copy(wKVs, committedKVs)
- tx := s.Read(ConcurrentReadTxMode, traceutil.TODO())
- mu.Unlock()
- // get all keys in backend store, and compare with wKVs
- ret, err := tx.Range(context.TODO(), []byte("\x00000000"), []byte("\xffffffff"), RangeOptions{})
- tx.End()
- if err != nil {
- t.Errorf("failed to range keys: %v", err)
- return
- }
- if len(wKVs) == 0 && len(ret.KVs) == 0 { // no committed KVs yet
- return
- }
- var result kvs
- for _, keyValue := range ret.KVs {
- result = append(result, kv{keyValue.Key, keyValue.Value})
- }
- if !reflect.DeepEqual(wKVs, result) {
- t.Errorf("unexpected range result") // too many key value pairs, skip printing them
- }
- }()
- }
-
- // wait until goroutines finish or timeout
- doneC := make(chan struct{})
- go func() {
- wg.Wait()
- close(doneC)
- }()
- select {
- case <-doneC:
- case <-time.After(5 * time.Minute):
- testutil.FatalStack(t, "timeout")
- }
-}
-
-type kv struct {
- key []byte
- val []byte
-}
-
-type kvs []kv
-
-func (kvs kvs) Len() int { return len(kvs) }
-func (kvs kvs) Less(i, j int) bool { return bytes.Compare(kvs[i].key, kvs[j].key) < 0 }
-func (kvs kvs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] }
-
-func merge(dst, src kvs) kvs {
- dst = append(dst, src...)
- sort.Stable(dst)
- // remove duplicates, using only the newest value
- // ref: tx_buffer.go
- widx := 0
- for ridx := 1; ridx < len(dst); ridx++ {
- if !bytes.Equal(dst[widx].key, dst[ridx].key) {
- widx++
- }
- dst[widx] = dst[ridx]
- }
- return dst[:widx+1]
-}
-
-// TODO: test attach key to lessor
-
-func newTestRevBytes(rev revision) []byte {
- bytes := newRevBytes()
- revToBytes(rev, bytes)
- return bytes
-}
-
-func newTestKeyBytes(rev revision, tombstone bool) []byte {
- bytes := newRevBytes()
- revToBytes(rev, bytes)
- if tombstone {
- bytes = appendMarkTombstone(zap.NewExample(), bytes)
- }
- return bytes
-}
-
-func newFakeStore() *store {
- b := &fakeBackend{&fakeBatchTx{
- Recorder: &testutil.RecorderBuffered{},
- rangeRespc: make(chan rangeResp, 5)}}
- fi := &fakeIndex{
- Recorder: &testutil.RecorderBuffered{},
- indexGetRespc: make(chan indexGetResp, 1),
- indexRangeRespc: make(chan indexRangeResp, 1),
- indexRangeEventsRespc: make(chan indexRangeEventsResp, 1),
- indexCompactRespc: make(chan map[revision]struct{}, 1),
- }
- s := &store{
- cfg: StoreConfig{CompactionBatchLimit: 10000},
- b: b,
- le: &lease.FakeLessor{},
- kvindex: fi,
- currentRev: 0,
- compactMainRev: -1,
- fifoSched: schedule.NewFIFOScheduler(),
- stopc: make(chan struct{}),
- lg: zap.NewExample(),
- }
- s.ReadView, s.WriteView = &readView{s}, &writeView{s}
- return s
-}
-
-type rangeResp struct {
- keys [][]byte
- vals [][]byte
-}
-
-type fakeBatchTx struct {
- testutil.Recorder
- rangeRespc chan rangeResp
-}
-
-func (b *fakeBatchTx) Lock() {}
-func (b *fakeBatchTx) Unlock() {}
-func (b *fakeBatchTx) RLock() {}
-func (b *fakeBatchTx) RUnlock() {}
-func (b *fakeBatchTx) UnsafeCreateBucket(name []byte) {}
-func (b *fakeBatchTx) UnsafeDeleteBucket(name []byte) {}
-func (b *fakeBatchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
- b.Recorder.Record(testutil.Action{Name: "put", Params: []interface{}{bucketName, key, value}})
-}
-func (b *fakeBatchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
- b.Recorder.Record(testutil.Action{Name: "seqput", Params: []interface{}{bucketName, key, value}})
-}
-func (b *fakeBatchTx) UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
- b.Recorder.Record(testutil.Action{Name: "range", Params: []interface{}{bucketName, key, endKey, limit}})
- r := <-b.rangeRespc
- return r.keys, r.vals
-}
-func (b *fakeBatchTx) UnsafeDelete(bucketName []byte, key []byte) {
- b.Recorder.Record(testutil.Action{Name: "delete", Params: []interface{}{bucketName, key}})
-}
-func (b *fakeBatchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- return nil
-}
-func (b *fakeBatchTx) Commit() {}
-func (b *fakeBatchTx) CommitAndStop() {}
-
-type fakeBackend struct {
- tx *fakeBatchTx
-}
-
-func (b *fakeBackend) BatchTx() backend.BatchTx { return b.tx }
-func (b *fakeBackend) ReadTx() backend.ReadTx { return b.tx }
-func (b *fakeBackend) ConcurrentReadTx() backend.ReadTx { return b.tx }
-func (b *fakeBackend) Hash(ignores map[backend.IgnoreKey]struct{}) (uint32, error) { return 0, nil }
-func (b *fakeBackend) Size() int64 { return 0 }
-func (b *fakeBackend) SizeInUse() int64 { return 0 }
-func (b *fakeBackend) OpenReadTxN() int64 { return 0 }
-func (b *fakeBackend) Snapshot() backend.Snapshot { return nil }
-func (b *fakeBackend) ForceCommit() {}
-func (b *fakeBackend) Defrag() error { return nil }
-func (b *fakeBackend) Close() error { return nil }
-
-type indexGetResp struct {
- rev revision
- created revision
- ver int64
- err error
-}
-
-type indexRangeResp struct {
- keys [][]byte
- revs []revision
-}
-
-type indexRangeEventsResp struct {
- revs []revision
-}
-
-type fakeIndex struct {
- testutil.Recorder
- indexGetRespc chan indexGetResp
- indexRangeRespc chan indexRangeResp
- indexRangeEventsRespc chan indexRangeEventsResp
- indexCompactRespc chan map[revision]struct{}
-}
-
-func (i *fakeIndex) Revisions(key, end []byte, atRev int64, limit int) []revision {
- _, rev := i.Range(key, end, atRev)
- return rev
-}
-
-func (i *fakeIndex) CountRevisions(key, end []byte, atRev int64, limit int) int {
- _, rev := i.Range(key, end, atRev)
- return len(rev)
-}
-
-func (i *fakeIndex) Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) {
- i.Recorder.Record(testutil.Action{Name: "get", Params: []interface{}{key, atRev}})
- r := <-i.indexGetRespc
- return r.rev, r.created, r.ver, r.err
-}
-func (i *fakeIndex) Range(key, end []byte, atRev int64) ([][]byte, []revision) {
- i.Recorder.Record(testutil.Action{Name: "range", Params: []interface{}{key, end, atRev}})
- r := <-i.indexRangeRespc
- return r.keys, r.revs
-}
-func (i *fakeIndex) Put(key []byte, rev revision) {
- i.Recorder.Record(testutil.Action{Name: "put", Params: []interface{}{key, rev}})
-}
-func (i *fakeIndex) Tombstone(key []byte, rev revision) error {
- i.Recorder.Record(testutil.Action{Name: "tombstone", Params: []interface{}{key, rev}})
- return nil
-}
-func (i *fakeIndex) RangeSince(key, end []byte, rev int64) []revision {
- i.Recorder.Record(testutil.Action{Name: "rangeEvents", Params: []interface{}{key, end, rev}})
- r := <-i.indexRangeEventsRespc
- return r.revs
-}
-func (i *fakeIndex) Compact(rev int64) map[revision]struct{} {
- i.Recorder.Record(testutil.Action{Name: "compact", Params: []interface{}{rev}})
- return <-i.indexCompactRespc
-}
-func (i *fakeIndex) Keep(rev int64) map[revision]struct{} {
- i.Recorder.Record(testutil.Action{Name: "keep", Params: []interface{}{rev}})
- return <-i.indexCompactRespc
-}
-func (i *fakeIndex) Equal(b index) bool { return false }
-
-func (i *fakeIndex) Insert(ki *keyIndex) {
- i.Recorder.Record(testutil.Action{Name: "insert", Params: []interface{}{ki}})
-}
-
-func (i *fakeIndex) KeyIndex(ki *keyIndex) *keyIndex {
- i.Recorder.Record(testutil.Action{Name: "keyIndex", Params: []interface{}{ki}})
- return nil
-}
-
-func createBytesSlice(bytesN, sliceN int) [][]byte {
- rs := [][]byte{}
- for len(rs) != sliceN {
- v := make([]byte, bytesN)
- if _, err := rand.Read(v); err != nil {
- panic(err)
- }
- rs = append(rs, v)
- }
- return rs
-}
diff --git a/server/mvcc/kvstore_txn.go b/server/mvcc/kvstore_txn.go
deleted file mode 100644
index 42cfb2e2bee..00000000000
--- a/server/mvcc/kvstore_txn.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "context"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- "go.uber.org/zap"
-)
-
-type storeTxnRead struct {
- s *store
- tx backend.ReadTx
-
- firstRev int64
- rev int64
-
- trace *traceutil.Trace
-}
-
-func (s *store) Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead {
- s.mu.RLock()
- s.revMu.RLock()
- // For read-only workloads, we use shared buffer by copying transaction read buffer
- // for higher concurrency with ongoing blocking writes.
- // For write/write-read transactions, we use the shared buffer
- // rather than duplicating transaction read buffer to avoid transaction overhead.
- var tx backend.ReadTx
- if mode == ConcurrentReadTxMode {
- tx = s.b.ConcurrentReadTx()
- } else {
- tx = s.b.ReadTx()
- }
-
- tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created.
- firstRev, rev := s.compactMainRev, s.currentRev
- s.revMu.RUnlock()
- return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev, trace})
-}
-
-func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev }
-func (tr *storeTxnRead) Rev() int64 { return tr.rev }
-
-func (tr *storeTxnRead) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- return tr.rangeKeys(ctx, key, end, tr.Rev(), ro)
-}
-
-func (tr *storeTxnRead) End() {
- tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx.
- tr.s.mu.RUnlock()
-}
-
-type storeTxnWrite struct {
- storeTxnRead
- tx backend.BatchTx
- // beginRev is the revision where the txn begins; it will write to the next revision.
- beginRev int64
- changes []mvccpb.KeyValue
-}
-
-func (s *store) Write(trace *traceutil.Trace) TxnWrite {
- s.mu.RLock()
- tx := s.b.BatchTx()
- tx.Lock()
- tw := &storeTxnWrite{
- storeTxnRead: storeTxnRead{s, tx, 0, 0, trace},
- tx: tx,
- beginRev: s.currentRev,
- changes: make([]mvccpb.KeyValue, 0, 4),
- }
- return newMetricsTxnWrite(tw)
-}
-
-func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev }
-
-func (tw *storeTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- rev := tw.beginRev
- if len(tw.changes) > 0 {
- rev++
- }
- return tw.rangeKeys(ctx, key, end, rev, ro)
-}
-
-func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
- if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
- return n, tw.beginRev + 1
- }
- return 0, tw.beginRev
-}
-
-func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
- tw.put(key, value, lease)
- return tw.beginRev + 1
-}
-
-func (tw *storeTxnWrite) End() {
- // only update index if the txn modifies the mvcc state.
- if len(tw.changes) != 0 {
- // hold revMu lock to prevent new read txns from opening until writeback.
- tw.s.revMu.Lock()
- tw.s.currentRev++
- }
- tw.tx.Unlock()
- if len(tw.changes) != 0 {
- tw.s.revMu.Unlock()
- }
- tw.s.mu.RUnlock()
-}
-
-func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
- rev := ro.Rev
- if rev > curRev {
- return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
- }
- if rev <= 0 {
- rev = curRev
- }
- if rev < tr.s.compactMainRev {
- return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
- }
- if ro.Count {
- total := tr.s.kvindex.CountRevisions(key, end, rev, int(ro.Limit))
- tr.trace.Step("count revisions from in-memory index tree")
- return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil
- }
- revpairs := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit))
- tr.trace.Step("range keys from in-memory index tree")
- if len(revpairs) == 0 {
- return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil
- }
-
- limit := int(ro.Limit)
- if limit <= 0 || limit > len(revpairs) {
- limit = len(revpairs)
- }
-
- kvs := make([]mvccpb.KeyValue, limit)
- revBytes := newRevBytes()
- for i, revpair := range revpairs[:len(kvs)] {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
- revToBytes(revpair, revBytes)
- _, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0)
- if len(vs) != 1 {
- tr.s.lg.Fatal(
- "range failed to find revision pair",
- zap.Int64("revision-main", revpair.main),
- zap.Int64("revision-sub", revpair.sub),
- )
- }
- if err := kvs[i].Unmarshal(vs[0]); err != nil {
- tr.s.lg.Fatal(
- "failed to unmarshal mvccpb.KeyValue",
- zap.Error(err),
- )
- }
- }
- tr.trace.Step("range keys from bolt db")
- return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil
-}
-
-func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
- rev := tw.beginRev + 1
- c := rev
- oldLease := lease.NoLease
-
- // if the key exists before, use its previous created and
- // get its previous leaseID
- _, created, ver, err := tw.s.kvindex.Get(key, rev)
- if err == nil {
- c = created.main
- oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
- tw.trace.Step("get key's previous created_revision and leaseID")
- }
- ibytes := newRevBytes()
- idxRev := revision{main: rev, sub: int64(len(tw.changes))}
- revToBytes(idxRev, ibytes)
-
- ver = ver + 1
- kv := mvccpb.KeyValue{
- Key: key,
- Value: value,
- CreateRevision: c,
- ModRevision: rev,
- Version: ver,
- Lease: int64(leaseID),
- }
-
- d, err := kv.Marshal()
- if err != nil {
- tw.storeTxnRead.s.lg.Fatal(
- "failed to marshal mvccpb.KeyValue",
- zap.Error(err),
- )
- }
-
- tw.trace.Step("marshal mvccpb.KeyValue")
- tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
- tw.s.kvindex.Put(key, idxRev)
- tw.changes = append(tw.changes, kv)
- tw.trace.Step("store kv pair into bolt db")
-
- if oldLease != lease.NoLease {
- if tw.s.le == nil {
- panic("no lessor to detach lease")
- }
- err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
- if err != nil {
- tw.storeTxnRead.s.lg.Error(
- "failed to detach old lease from a key",
- zap.Error(err),
- )
- }
- }
- if leaseID != lease.NoLease {
- if tw.s.le == nil {
- panic("no lessor to attach lease")
- }
- err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
- if err != nil {
- panic("unexpected error from lease Attach")
- }
- }
- tw.trace.Step("attach lease to kv pair")
-}
-
-func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
- rrev := tw.beginRev
- if len(tw.changes) > 0 {
- rrev++
- }
- keys, _ := tw.s.kvindex.Range(key, end, rrev)
- if len(keys) == 0 {
- return 0
- }
- for _, key := range keys {
- tw.delete(key)
- }
- return int64(len(keys))
-}
-
-func (tw *storeTxnWrite) delete(key []byte) {
- ibytes := newRevBytes()
- idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))}
- revToBytes(idxRev, ibytes)
-
- ibytes = appendMarkTombstone(tw.storeTxnRead.s.lg, ibytes)
-
- kv := mvccpb.KeyValue{Key: key}
-
- d, err := kv.Marshal()
- if err != nil {
- tw.storeTxnRead.s.lg.Fatal(
- "failed to marshal mvccpb.KeyValue",
- zap.Error(err),
- )
- }
-
- tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
- err = tw.s.kvindex.Tombstone(key, idxRev)
- if err != nil {
- tw.storeTxnRead.s.lg.Fatal(
- "failed to tombstone an existing key",
- zap.String("key", string(key)),
- zap.Error(err),
- )
- }
- tw.changes = append(tw.changes, kv)
-
- item := lease.LeaseItem{Key: string(key)}
- leaseID := tw.s.le.GetLease(item)
-
- if leaseID != lease.NoLease {
- err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
- if err != nil {
- tw.storeTxnRead.s.lg.Error(
- "failed to detach old lease from a key",
- zap.Error(err),
- )
- }
- }
-}
-
-func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
diff --git a/server/mvcc/metrics.go b/server/mvcc/metrics.go
deleted file mode 100644
index f28d114e2bc..00000000000
--- a/server/mvcc/metrics.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sync"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- rangeCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "range_total",
- Help: "Total number of ranges seen by this member.",
- })
- rangeCounterDebug = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "range_total",
- Help: "Total number of ranges seen by this member.",
- })
-
- putCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "put_total",
- Help: "Total number of puts seen by this member.",
- })
-
- deleteCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "delete_total",
- Help: "Total number of deletes seen by this member.",
- })
-
- txnCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "txn_total",
- Help: "Total number of txns seen by this member.",
- })
-
- keysGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "keys_total",
- Help: "Total number of keys.",
- })
-
- watchStreamGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "watch_stream_total",
- Help: "Total number of watch streams.",
- })
-
- watcherGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "watcher_total",
- Help: "Total number of watchers.",
- })
-
- slowWatcherGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "slow_watcher_total",
- Help: "Total number of unsynced slow watchers.",
- })
-
- totalEventsCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "events_total",
- Help: "Total number of events sent by this member.",
- })
-
- pendingEventsGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "pending_events_total",
- Help: "Total number of pending events to be sent.",
- })
-
- indexCompactionPauseMs = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "index_compaction_pause_duration_milliseconds",
- Help: "Bucketed histogram of index compaction pause duration.",
-
- // lowest bucket start of upper bound 0.5 ms with factor 2
- // highest bucket start of 0.5 ms * 2^13 == 4.096 sec
- Buckets: prometheus.ExponentialBuckets(0.5, 2, 14),
- })
-
- dbCompactionPauseMs = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_pause_duration_milliseconds",
- Help: "Bucketed histogram of db compaction pause duration.",
-
- // lowest bucket start of upper bound 1 ms with factor 2
- // highest bucket start of 1 ms * 2^12 == 4.096 sec
- Buckets: prometheus.ExponentialBuckets(1, 2, 13),
- })
-
- dbCompactionTotalMs = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_total_duration_milliseconds",
- Help: "Bucketed histogram of db compaction total duration.",
-
- // lowest bucket start of upper bound 100 ms with factor 2
- // highest bucket start of 100 ms * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(100, 2, 14),
- })
-
- dbCompactionLast = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_last",
- Help: "The unix time of the last db compaction. Resets to 0 on start.",
- })
-
- dbCompactionKeysCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_keys_total",
- Help: "Total number of db keys compacted.",
- })
-
- dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "db_total_size_in_bytes",
- Help: "Total size of the underlying database physically allocated in bytes.",
- },
- func() float64 {
- reportDbTotalSizeInBytesMu.RLock()
- defer reportDbTotalSizeInBytesMu.RUnlock()
- return reportDbTotalSizeInBytes()
- },
- )
- // overridden by mvcc initialization
- reportDbTotalSizeInBytesMu sync.RWMutex
- reportDbTotalSizeInBytes = func() float64 { return 0 }
-
- // overridden by mvcc initialization
- reportDbTotalSizeInBytesDebugMu sync.RWMutex
- reportDbTotalSizeInBytesDebug = func() float64 { return 0 }
-
- dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "db_total_size_in_use_in_bytes",
- Help: "Total size of the underlying database logically in use in bytes.",
- },
- func() float64 {
- reportDbTotalSizeInUseInBytesMu.RLock()
- defer reportDbTotalSizeInUseInBytesMu.RUnlock()
- return reportDbTotalSizeInUseInBytes()
- },
- )
- // overridden by mvcc initialization
- reportDbTotalSizeInUseInBytesMu sync.RWMutex
- reportDbTotalSizeInUseInBytes = func() float64 { return 0 }
-
- dbOpenReadTxN = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "db_open_read_transactions",
- Help: "The number of currently open read transactions",
- },
-
- func() float64 {
- reportDbOpenReadTxNMu.RLock()
- defer reportDbOpenReadTxNMu.RUnlock()
- return reportDbOpenReadTxN()
- },
- )
- // overridden by mvcc initialization
- reportDbOpenReadTxNMu sync.RWMutex
- reportDbOpenReadTxN = func() float64 { return 0 }
-
- hashSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "hash_duration_seconds",
- Help: "The latency distribution of storage hash operation.",
-
- // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
- })
-
- hashRevSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "hash_rev_duration_seconds",
- Help: "The latency distribution of storage hash by revision operation.",
-
- // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
- })
-
- currentRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "current_revision",
- Help: "The current revision of store.",
- },
- func() float64 {
- reportCurrentRevMu.RLock()
- defer reportCurrentRevMu.RUnlock()
- return reportCurrentRev()
- },
- )
- // overridden by mvcc initialization
- reportCurrentRevMu sync.RWMutex
- reportCurrentRev = func() float64 { return 0 }
-
- compactRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "compact_revision",
- Help: "The revision of the last compaction in store.",
- },
- func() float64 {
- reportCompactRevMu.RLock()
- defer reportCompactRevMu.RUnlock()
- return reportCompactRev()
- },
- )
- // overridden by mvcc initialization
- reportCompactRevMu sync.RWMutex
- reportCompactRev = func() float64 { return 0 }
-
- totalPutSizeGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "total_put_size_in_bytes",
- Help: "The total size of put kv pairs seen by this member.",
- })
-)
-
-func init() {
- prometheus.MustRegister(rangeCounter)
- prometheus.MustRegister(rangeCounterDebug)
- prometheus.MustRegister(putCounter)
- prometheus.MustRegister(deleteCounter)
- prometheus.MustRegister(txnCounter)
- prometheus.MustRegister(keysGauge)
- prometheus.MustRegister(watchStreamGauge)
- prometheus.MustRegister(watcherGauge)
- prometheus.MustRegister(slowWatcherGauge)
- prometheus.MustRegister(totalEventsCounter)
- prometheus.MustRegister(pendingEventsGauge)
- prometheus.MustRegister(indexCompactionPauseMs)
- prometheus.MustRegister(dbCompactionPauseMs)
- prometheus.MustRegister(dbCompactionTotalMs)
- prometheus.MustRegister(dbCompactionLast)
- prometheus.MustRegister(dbCompactionKeysCounter)
- prometheus.MustRegister(dbTotalSize)
- prometheus.MustRegister(dbTotalSizeInUse)
- prometheus.MustRegister(dbOpenReadTxN)
- prometheus.MustRegister(hashSec)
- prometheus.MustRegister(hashRevSec)
- prometheus.MustRegister(currentRev)
- prometheus.MustRegister(compactRev)
- prometheus.MustRegister(totalPutSizeGauge)
-}
-
-// ReportEventReceived reports that an event is received.
-// This function should be called when the external systems received an
-// event from mvcc.Watcher.
-func ReportEventReceived(n int) {
- pendingEventsGauge.Sub(float64(n))
- totalEventsCounter.Add(float64(n))
-}
diff --git a/server/mvcc/revision.go b/server/mvcc/revision.go
deleted file mode 100644
index d6213866f26..00000000000
--- a/server/mvcc/revision.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import "encoding/binary"
-
-// revBytesLen is the byte length of a normal revision.
-// First 8 bytes is the revision.main in big-endian format. The 9th byte
-// is a '_'. The last 8 bytes is the revision.sub in big-endian format.
-const revBytesLen = 8 + 1 + 8
-
-// A revision indicates modification of the key-value space.
-// The set of changes that share same main revision changes the key-value space atomically.
-type revision struct {
- // main is the main revision of a set of changes that happen atomically.
- main int64
-
- // sub is the sub revision of a change in a set of changes that happen
- // atomically. Each change has different increasing sub revision in that
- // set.
- sub int64
-}
-
-func (a revision) GreaterThan(b revision) bool {
- if a.main > b.main {
- return true
- }
- if a.main < b.main {
- return false
- }
- return a.sub > b.sub
-}
-
-func newRevBytes() []byte {
- return make([]byte, revBytesLen, markedRevBytesLen)
-}
-
-func revToBytes(rev revision, bytes []byte) {
- binary.BigEndian.PutUint64(bytes, uint64(rev.main))
- bytes[8] = '_'
- binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub))
-}
-
-func bytesToRev(bytes []byte) revision {
- return revision{
- main: int64(binary.BigEndian.Uint64(bytes[0:8])),
- sub: int64(binary.BigEndian.Uint64(bytes[9:])),
- }
-}
-
-type revisions []revision
-
-func (a revisions) Len() int { return len(a) }
-func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) }
-func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/server/mvcc/revision_test.go b/server/mvcc/revision_test.go
deleted file mode 100644
index 46fcb483cf0..00000000000
--- a/server/mvcc/revision_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "math"
- "reflect"
- "testing"
-)
-
-// TestRevision tests that revision could be encoded to and decoded from
-// bytes slice. Moreover, the lexicographical order of its byte slice representation
-// follows the order of (main, sub).
-func TestRevision(t *testing.T) {
- tests := []revision{
- // order in (main, sub)
- {},
- {main: 1, sub: 0},
- {main: 1, sub: 1},
- {main: 2, sub: 0},
- {main: math.MaxInt64, sub: math.MaxInt64},
- }
-
- bs := make([][]byte, len(tests))
- for i, tt := range tests {
- b := newRevBytes()
- revToBytes(tt, b)
- bs[i] = b
-
- if grev := bytesToRev(b); !reflect.DeepEqual(grev, tt) {
- t.Errorf("#%d: revision = %+v, want %+v", i, grev, tt)
- }
- }
-
- for i := 0; i < len(tests)-1; i++ {
- if bytes.Compare(bs[i], bs[i+1]) >= 0 {
- t.Errorf("#%d: %v (%+v) should be smaller than %v (%+v)", i, bs[i], tests[i], bs[i+1], tests[i+1])
- }
- }
-}
diff --git a/server/mvcc/util.go b/server/mvcc/util.go
deleted file mode 100644
index 25467609054..00000000000
--- a/server/mvcc/util.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "fmt"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-)
-
-func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
- ibytes := newRevBytes()
- revToBytes(revision{main: kv.ModRevision}, ibytes)
-
- d, err := kv.Marshal()
- if err != nil {
- panic(fmt.Errorf("cannot marshal event: %v", err))
- }
-
- be.BatchTx().Lock()
- be.BatchTx().UnsafePut(keyBucketName, ibytes, d)
- be.BatchTx().Unlock()
-}
diff --git a/server/mvcc/watchable_store.go b/server/mvcc/watchable_store.go
deleted file mode 100644
index 63529ed672e..00000000000
--- a/server/mvcc/watchable_store.go
+++ /dev/null
@@ -1,544 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sync"
- "time"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
- "go.uber.org/zap"
-)
-
-// non-const so modifiable by tests
-var (
- // chanBufLen is the length of the buffered chan
- // for sending out watched events.
- // See https://github.com/etcd-io/etcd/issues/11906 for more detail.
- chanBufLen = 128
-
- // maxWatchersPerSync is the number of watchers to sync in a single batch
- maxWatchersPerSync = 512
-)
-
-type watchable interface {
- watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
- progress(w *watcher)
- rev() int64
-}
-
-type watchableStore struct {
- *store
-
- // mu protects watcher groups and batches. It should never be locked
- // before locking store.mu to avoid deadlock.
- mu sync.RWMutex
-
- // victims are watcher batches that were blocked on the watch channel
- victims []watcherBatch
- victimc chan struct{}
-
- // contains all unsynced watchers that needs to sync with events that have happened
- unsynced watcherGroup
-
- // contains all synced watchers that are in sync with the progress of the store.
- // The key of the map is the key that the watcher watches on.
- synced watcherGroup
-
- stopc chan struct{}
- wg sync.WaitGroup
-}
-
-// cancelFunc updates unsynced and synced maps when running
-// cancel operations.
-type cancelFunc func()
-
-func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) WatchableKV {
- return newWatchableStore(lg, b, le, cfg)
-}
-
-func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore {
- if lg == nil {
- lg = zap.NewNop()
- }
- s := &watchableStore{
- store: NewStore(lg, b, le, cfg),
- victimc: make(chan struct{}, 1),
- unsynced: newWatcherGroup(),
- synced: newWatcherGroup(),
- stopc: make(chan struct{}),
- }
- s.store.ReadView = &readView{s}
- s.store.WriteView = &writeView{s}
- if s.le != nil {
- // use this store as the deleter so revokes trigger watch events
- s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
- }
- s.wg.Add(2)
- go s.syncWatchersLoop()
- go s.syncVictimsLoop()
- return s
-}
-
-func (s *watchableStore) Close() error {
- close(s.stopc)
- s.wg.Wait()
- return s.store.Close()
-}
-
-func (s *watchableStore) NewWatchStream() WatchStream {
- watchStreamGauge.Inc()
- return &watchStream{
- watchable: s,
- ch: make(chan WatchResponse, chanBufLen),
- cancels: make(map[WatchID]cancelFunc),
- watchers: make(map[WatchID]*watcher),
- }
-}
-
-func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
- wa := &watcher{
- key: key,
- end: end,
- minRev: startRev,
- id: id,
- ch: ch,
- fcs: fcs,
- }
-
- s.mu.Lock()
- s.revMu.RLock()
- synced := startRev > s.store.currentRev || startRev == 0
- if synced {
- wa.minRev = s.store.currentRev + 1
- if startRev > wa.minRev {
- wa.minRev = startRev
- }
- s.synced.add(wa)
- } else {
- slowWatcherGauge.Inc()
- s.unsynced.add(wa)
- }
- s.revMu.RUnlock()
- s.mu.Unlock()
-
- watcherGauge.Inc()
-
- return wa, func() { s.cancelWatcher(wa) }
-}
-
-// cancelWatcher removes references of the watcher from the watchableStore
-func (s *watchableStore) cancelWatcher(wa *watcher) {
- for {
- s.mu.Lock()
- if s.unsynced.delete(wa) {
- slowWatcherGauge.Dec()
- watcherGauge.Dec()
- break
- } else if s.synced.delete(wa) {
- watcherGauge.Dec()
- break
- } else if wa.compacted {
- watcherGauge.Dec()
- break
- } else if wa.ch == nil {
- // already canceled (e.g., cancel/close race)
- break
- }
-
- if !wa.victim {
- s.mu.Unlock()
- panic("watcher not victim but not in watch groups")
- }
-
- var victimBatch watcherBatch
- for _, wb := range s.victims {
- if wb[wa] != nil {
- victimBatch = wb
- break
- }
- }
- if victimBatch != nil {
- slowWatcherGauge.Dec()
- watcherGauge.Dec()
- delete(victimBatch, wa)
- break
- }
-
- // victim being processed so not accessible; retry
- s.mu.Unlock()
- time.Sleep(time.Millisecond)
- }
-
- wa.ch = nil
- s.mu.Unlock()
-}
-
-func (s *watchableStore) Restore(b backend.Backend) error {
- s.mu.Lock()
- defer s.mu.Unlock()
- err := s.store.Restore(b)
- if err != nil {
- return err
- }
-
- for wa := range s.synced.watchers {
- wa.restore = true
- s.unsynced.add(wa)
- }
- s.synced = newWatcherGroup()
- return nil
-}
-
-// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
-func (s *watchableStore) syncWatchersLoop() {
- defer s.wg.Done()
-
- for {
- s.mu.RLock()
- st := time.Now()
- lastUnsyncedWatchers := s.unsynced.size()
- s.mu.RUnlock()
-
- unsyncedWatchers := 0
- if lastUnsyncedWatchers > 0 {
- unsyncedWatchers = s.syncWatchers()
- }
- syncDuration := time.Since(st)
-
- waitDuration := 100 * time.Millisecond
- // more work pending?
- if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
- // be fair to other store operations by yielding time taken
- waitDuration = syncDuration
- }
-
- select {
- case <-time.After(waitDuration):
- case <-s.stopc:
- return
- }
- }
-}
-
-// syncVictimsLoop tries to write precomputed watcher responses to
-// watchers that had a blocked watcher channel
-func (s *watchableStore) syncVictimsLoop() {
- defer s.wg.Done()
-
- for {
- for s.moveVictims() != 0 {
- // try to update all victim watchers
- }
- s.mu.RLock()
- isEmpty := len(s.victims) == 0
- s.mu.RUnlock()
-
- var tickc <-chan time.Time
- if !isEmpty {
- tickc = time.After(10 * time.Millisecond)
- }
-
- select {
- case <-tickc:
- case <-s.victimc:
- case <-s.stopc:
- return
- }
- }
-}
-
-// moveVictims tries to update watches with already pending event data
-func (s *watchableStore) moveVictims() (moved int) {
- s.mu.Lock()
- victims := s.victims
- s.victims = nil
- s.mu.Unlock()
-
- var newVictim watcherBatch
- for _, wb := range victims {
- // try to send responses again
- for w, eb := range wb {
- // watcher has observed the store up to, but not including, w.minRev
- rev := w.minRev - 1
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- if newVictim == nil {
- newVictim = make(watcherBatch)
- }
- newVictim[w] = eb
- continue
- }
- moved++
- }
-
- // assign completed victim watchers to unsync/sync
- s.mu.Lock()
- s.store.revMu.RLock()
- curRev := s.store.currentRev
- for w, eb := range wb {
- if newVictim != nil && newVictim[w] != nil {
- // couldn't send watch response; stays victim
- continue
- }
- w.victim = false
- if eb.moreRev != 0 {
- w.minRev = eb.moreRev
- }
- if w.minRev <= curRev {
- s.unsynced.add(w)
- } else {
- slowWatcherGauge.Dec()
- s.synced.add(w)
- }
- }
- s.store.revMu.RUnlock()
- s.mu.Unlock()
- }
-
- if len(newVictim) > 0 {
- s.mu.Lock()
- s.victims = append(s.victims, newVictim)
- s.mu.Unlock()
- }
-
- return moved
-}
-
-// syncWatchers syncs unsynced watchers by:
-// 1. choose a set of watchers from the unsynced watcher group
-// 2. iterate over the set to get the minimum revision and remove compacted watchers
-// 3. use minimum revision to get all key-value pairs and send those events to watchers
-// 4. remove synced watchers in set from unsynced group and move to synced group
-func (s *watchableStore) syncWatchers() int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.unsynced.size() == 0 {
- return 0
- }
-
- s.store.revMu.RLock()
- defer s.store.revMu.RUnlock()
-
- // in order to find key-value pairs from unsynced watchers, we need to
- // find min revision index, and these revisions can be used to
- // query the backend store of key-value pairs
- curRev := s.store.currentRev
- compactionRev := s.store.compactMainRev
-
- wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
- minBytes, maxBytes := newRevBytes(), newRevBytes()
- revToBytes(revision{main: minRev}, minBytes)
- revToBytes(revision{main: curRev + 1}, maxBytes)
-
- // UnsafeRange returns keys and values. And in boltdb, keys are revisions.
- // values are actual key-value pairs in backend.
- tx := s.store.b.ReadTx()
- tx.RLock()
- revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0)
- tx.RUnlock()
- evs := kvsToEvents(s.store.lg, wg, revs, vs)
-
- var victims watcherBatch
- wb := newWatcherBatch(wg, evs)
- for w := range wg.watchers {
- w.minRev = curRev + 1
-
- eb, ok := wb[w]
- if !ok {
- // bring un-notified watcher to synced
- s.synced.add(w)
- s.unsynced.delete(w)
- continue
- }
-
- if eb.moreRev != 0 {
- w.minRev = eb.moreRev
- }
-
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- if victims == nil {
- victims = make(watcherBatch)
- }
- w.victim = true
- }
-
- if w.victim {
- victims[w] = eb
- } else {
- if eb.moreRev != 0 {
- // stay unsynced; more to read
- continue
- }
- s.synced.add(w)
- }
- s.unsynced.delete(w)
- }
- s.addVictim(victims)
-
- vsz := 0
- for _, v := range s.victims {
- vsz += len(v)
- }
- slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
-
- return s.unsynced.size()
-}
-
-// kvsToEvents gets all events for the watchers from all key-value pairs
-func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) {
- for i, v := range vals {
- var kv mvccpb.KeyValue
- if err := kv.Unmarshal(v); err != nil {
- lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
- }
-
- if !wg.contains(string(kv.Key)) {
- continue
- }
-
- ty := mvccpb.PUT
- if isTombstone(revs[i]) {
- ty = mvccpb.DELETE
- // patch in mod revision so watchers won't skip
- kv.ModRevision = bytesToRev(revs[i]).main
- }
- evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
- }
- return evs
-}
-
-// notify notifies the fact that given event at the given rev just happened to
-// watchers that watch on the key of the event.
-func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
- var victim watcherBatch
- for w, eb := range newWatcherBatch(&s.synced, evs) {
- if eb.revs != 1 {
- s.store.lg.Panic(
- "unexpected multiple revisions in watch notification",
- zap.Int("number-of-revisions", eb.revs),
- )
- }
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- // move slow watcher to victims
- w.minRev = rev + 1
- if victim == nil {
- victim = make(watcherBatch)
- }
- w.victim = true
- victim[w] = eb
- s.synced.delete(w)
- slowWatcherGauge.Inc()
- }
- }
- s.addVictim(victim)
-}
-
-func (s *watchableStore) addVictim(victim watcherBatch) {
- if victim == nil {
- return
- }
- s.victims = append(s.victims, victim)
- select {
- case s.victimc <- struct{}{}:
- default:
- }
-}
-
-func (s *watchableStore) rev() int64 { return s.store.Rev() }
-
-func (s *watchableStore) progress(w *watcher) {
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- if _, ok := s.synced.watchers[w]; ok {
- w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
- // If the ch is full, this watcher is receiving events.
- // We do not need to send progress at all.
- }
-}
-
-type watcher struct {
- // the watcher key
- key []byte
- // end indicates the end of the range to watch.
- // If end is set, the watcher is on a range.
- end []byte
-
- // victim is set when ch is blocked and undergoing victim processing
- victim bool
-
- // compacted is set when the watcher is removed because of compaction
- compacted bool
-
- // restore is true when the watcher is being restored from leader snapshot
- // which means that this watcher has just been moved from "synced" to "unsynced"
- // watcher group, possibly with a future revision when it was first added
- // to the synced watcher
- // "unsynced" watcher revision must always be <= current revision,
- // except when the watcher were to be moved from "synced" watcher group
- restore bool
-
- // minRev is the minimum revision update the watcher will accept
- minRev int64
- id WatchID
-
- fcs []FilterFunc
- // a chan to send out the watch response.
- // The chan might be shared with other watchers.
- ch chan<- WatchResponse
-}
-
-func (w *watcher) send(wr WatchResponse) bool {
- progressEvent := len(wr.Events) == 0
-
- if len(w.fcs) != 0 {
- ne := make([]mvccpb.Event, 0, len(wr.Events))
- for i := range wr.Events {
- filtered := false
- for _, filter := range w.fcs {
- if filter(wr.Events[i]) {
- filtered = true
- break
- }
- }
- if !filtered {
- ne = append(ne, wr.Events[i])
- }
- }
- wr.Events = ne
- }
-
- // if all events are filtered out, we should send nothing.
- if !progressEvent && len(wr.Events) == 0 {
- return true
- }
- select {
- case w.ch <- wr:
- return true
- default:
- return false
- }
-}
diff --git a/server/mvcc/watchable_store_bench_test.go b/server/mvcc/watchable_store_bench_test.go
deleted file mode 100644
index 0cdc09e3ba0..00000000000
--- a/server/mvcc/watchable_store_bench_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "math/rand"
- "os"
- "testing"
-
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/lease"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
-
- "go.uber.org/zap"
-)
-
-func BenchmarkWatchableStorePut(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, be, tmpPath)
-
- // arbitrary number of bytes
- bytesN := 64
- keys := createBytesSlice(bytesN, b.N)
- vals := createBytesSlice(bytesN, b.N)
-
- b.ResetTimer()
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- s.Put(keys[i], vals[i], lease.NoLease)
- }
-}
-
-// BenchmarkWatchableStoreTxnPut benchmarks the Put operation
-// with transaction begin and end, where transaction involves
-// some synchronization operations, such as mutex locking.
-func BenchmarkWatchableStoreTxnPut(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, be, tmpPath)
-
- // arbitrary number of bytes
- bytesN := 64
- keys := createBytesSlice(bytesN, b.N)
- vals := createBytesSlice(bytesN, b.N)
-
- b.ResetTimer()
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- txn := s.Write(traceutil.TODO())
- txn.Put(keys[i], vals[i], lease.NoLease)
- txn.End()
- }
-}
-
-// BenchmarkWatchableStoreWatchPutSync benchmarks the case of
-// many synced watchers receiving a Put notification.
-func BenchmarkWatchableStoreWatchPutSync(b *testing.B) {
- benchmarkWatchableStoreWatchPut(b, true)
-}
-
-// BenchmarkWatchableStoreWatchPutUnsync benchmarks the case of
-// many unsynced watchers receiving a Put notification.
-func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
- benchmarkWatchableStoreWatchPut(b, false)
-}
-
-func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, be, tmpPath)
-
- k := []byte("testkey")
- v := []byte("testval")
-
- rev := int64(0)
- if !synced {
- // non-0 value to keep watchers in unsynced
- rev = 1
- }
-
- w := s.NewWatchStream()
- defer w.Close()
- watchIDs := make([]WatchID, b.N)
- for i := range watchIDs {
- watchIDs[i], _ = w.Watch(0, k, nil, rev)
- }
-
- b.ResetTimer()
- b.ReportAllocs()
-
- // trigger watchers
- s.Put(k, v, lease.NoLease)
- for range watchIDs {
- <-w.Chan()
- }
- select {
- case wc := <-w.Chan():
- b.Fatalf("unexpected data %v", wc)
- default:
- }
-}
-
-// Benchmarks on cancel function performance for unsynced watchers
-// in a WatchableStore. It creates k*N watchers to populate unsynced
-// with a reasonably large number of watchers. And measures the time it
-// takes to cancel N watchers out of k*N watchers. The performance is
-// expected to differ depending on the unsynced member implementation.
-// TODO: k is an arbitrary constant. We need to figure out what factor
-// we should put to simulate the real-world use cases.
-func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
-
- // manually create watchableStore instead of newWatchableStore
- // because newWatchableStore periodically calls syncWatchersLoop
- // method to sync watchers in unsynced map. We want to keep watchers
- // in unsynced for this benchmark.
- ws := &watchableStore{
- store: s,
- unsynced: newWatcherGroup(),
-
- // to make the test not crash from assigning to nil map.
- // 'synced' doesn't get populated in this test.
- synced: newWatcherGroup(),
- }
-
- defer func() {
- ws.store.Close()
- os.Remove(tmpPath)
- }()
-
- // Put a key so that we can spawn watchers on that key
- // (testKey in this test). This increases the rev to 1,
- // and later we can we set the watcher's startRev to 1,
- // and force watchers to be in unsynced.
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
-
- w := ws.NewWatchStream()
-
- const k int = 2
- benchSampleN := b.N
- watcherN := k * benchSampleN
-
- watchIDs := make([]WatchID, watcherN)
- for i := 0; i < watcherN; i++ {
- // non-0 value to keep watchers in unsynced
- watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
- }
-
- // random-cancel N watchers to make it not biased towards
- // data structures with an order, such as slice.
- ix := rand.Perm(watcherN)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- // cancel N watchers
- for _, idx := range ix[:benchSampleN] {
- if err := w.Cancel(watchIDs[idx]); err != nil {
- b.Error(err)
- }
- }
-}
-
-func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
-
- // Put a key so that we can spawn watchers on that key
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
-
- w := s.NewWatchStream()
-
- // put 1 million watchers on the same key
- const watcherN = 1000000
-
- watchIDs := make([]WatchID, watcherN)
- for i := 0; i < watcherN; i++ {
- // 0 for startRev to keep watchers in synced
- watchIDs[i], _ = w.Watch(0, testKey, nil, 0)
- }
-
- // randomly cancel watchers to make it not biased towards
- // data structures with an order, such as slice.
- ix := rand.Perm(watcherN)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for _, idx := range ix {
- if err := w.Cancel(watchIDs[idx]); err != nil {
- b.Error(err)
- }
- }
-}
diff --git a/server/mvcc/watchable_store_test.go b/server/mvcc/watchable_store_test.go
deleted file mode 100644
index bc09a4a0366..00000000000
--- a/server/mvcc/watchable_store_test.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "fmt"
- "os"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/lease"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
- "go.uber.org/zap"
-)
-
-func TestWatch(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
-
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
-
- w := s.NewWatchStream()
- w.Watch(0, testKey, nil, 0)
-
- if !s.synced.contains(string(testKey)) {
- // the key must have had an entry in synced
- t.Errorf("existence = false, want true")
- }
-}
-
-func TestNewWatcherCancel(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
-
- w := s.NewWatchStream()
- wt, _ := w.Watch(0, testKey, nil, 0)
-
- if err := w.Cancel(wt); err != nil {
- t.Error(err)
- }
-
- if s.synced.contains(string(testKey)) {
- // the key shoud have been deleted
- t.Errorf("existence = true, want false")
- }
-}
-
-// TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
-func TestCancelUnsynced(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
-
- // manually create watchableStore instead of newWatchableStore
- // because newWatchableStore automatically calls syncWatchers
- // method to sync watchers in unsynced map. We want to keep watchers
- // in unsynced to test if syncWatchers works as expected.
- s := &watchableStore{
- store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
- unsynced: newWatcherGroup(),
-
- // to make the test not crash from assigning to nil map.
- // 'synced' doesn't get populated in this test.
- synced: newWatcherGroup(),
- }
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
-
- // Put a key so that we can spawn watchers on that key.
- // (testKey in this test). This increases the rev to 1,
- // and later we can we set the watcher's startRev to 1,
- // and force watchers to be in unsynced.
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
-
- w := s.NewWatchStream()
-
- // arbitrary number for watchers
- watcherN := 100
-
- // create watcherN of watch ids to cancel
- watchIDs := make([]WatchID, watcherN)
- for i := 0; i < watcherN; i++ {
- // use 1 to keep watchers in unsynced
- watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
- }
-
- for _, idx := range watchIDs {
- if err := w.Cancel(idx); err != nil {
- t.Error(err)
- }
- }
-
- // After running CancelFunc
- //
- // unsynced should be empty
- // because cancel removes watcher from unsynced
- if size := s.unsynced.size(); size != 0 {
- t.Errorf("unsynced size = %d, want 0", size)
- }
-}
-
-// TestSyncWatchers populates unsynced watcher map and tests syncWatchers
-// method to see if it correctly sends events to channel of unsynced watchers
-// and moves these watchers to synced.
-func TestSyncWatchers(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
-
- s := &watchableStore{
- store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
- unsynced: newWatcherGroup(),
- synced: newWatcherGroup(),
- }
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
-
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
-
- w := s.NewWatchStream()
-
- // arbitrary number for watchers
- watcherN := 100
-
- for i := 0; i < watcherN; i++ {
- // specify rev as 1 to keep watchers in unsynced
- w.Watch(0, testKey, nil, 1)
- }
-
- // Before running s.syncWatchers() synced should be empty because we manually
- // populate unsynced only
- sws := s.synced.watcherSetByKey(string(testKey))
- uws := s.unsynced.watcherSetByKey(string(testKey))
-
- if len(sws) != 0 {
- t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws))
- }
- // unsynced should not be empty because we manually populated unsynced only
- if len(uws) != watcherN {
- t.Errorf("unsynced size = %d, want %d", len(uws), watcherN)
- }
-
- // this should move all unsynced watchers to synced ones
- s.syncWatchers()
-
- sws = s.synced.watcherSetByKey(string(testKey))
- uws = s.unsynced.watcherSetByKey(string(testKey))
-
- // After running s.syncWatchers(), synced should not be empty because syncwatchers
- // populates synced in this test case
- if len(sws) != watcherN {
- t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN)
- }
-
- // unsynced should be empty because syncwatchers is expected to move all watchers
- // from unsynced to synced in this test case
- if len(uws) != 0 {
- t.Errorf("unsynced size = %d, want 0", len(uws))
- }
-
- for w := range sws {
- if w.minRev != s.Rev()+1 {
- t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1)
- }
- }
-
- if len(w.(*watchStream).ch) != watcherN {
- t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
- }
-
- evs := (<-w.(*watchStream).ch).Events
- if len(evs) != 1 {
- t.Errorf("len(evs) got = %d, want = 1", len(evs))
- }
- if evs[0].Type != mvccpb.PUT {
- t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT)
- }
- if !bytes.Equal(evs[0].Kv.Key, testKey) {
- t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
- }
- if !bytes.Equal(evs[0].Kv.Value, testValue) {
- t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
- }
-}
-
-// TestWatchCompacted tests a watcher that watches on a compacted revision.
-func TestWatchCompacted(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
- testKey := []byte("foo")
- testValue := []byte("bar")
-
- maxRev := 10
- compactRev := int64(5)
- for i := 0; i < maxRev; i++ {
- s.Put(testKey, testValue, lease.NoLease)
- }
- _, err := s.Compact(traceutil.TODO(), compactRev)
- if err != nil {
- t.Fatalf("failed to compact kv (%v)", err)
- }
-
- w := s.NewWatchStream()
- wt, _ := w.Watch(0, testKey, nil, compactRev-1)
-
- select {
- case resp := <-w.Chan():
- if resp.WatchID != wt {
- t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
- }
- if resp.CompactRevision == 0 {
- t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
- }
- case <-time.After(1 * time.Second):
- t.Fatalf("failed to receive response (timeout)")
- }
-}
-
-func TestWatchFutureRev(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
-
- testKey := []byte("foo")
- testValue := []byte("bar")
-
- w := s.NewWatchStream()
- wrev := int64(10)
- w.Watch(0, testKey, nil, wrev)
-
- for i := 0; i < 10; i++ {
- rev := s.Put(testKey, testValue, lease.NoLease)
- if rev >= wrev {
- break
- }
- }
-
- select {
- case resp := <-w.Chan():
- if resp.Revision != wrev {
- t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
- }
- if len(resp.Events) != 1 {
- t.Fatalf("failed to get events from the response")
- }
- if resp.Events[0].Kv.ModRevision != wrev {
- t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
- }
- case <-time.After(time.Second):
- t.Fatal("failed to receive event in 1 second.")
- }
-}
-
-func TestWatchRestore(t *testing.T) {
- test := func(delay time.Duration) func(t *testing.T) {
- return func(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, b, tmpPath)
-
- testKey := []byte("foo")
- testValue := []byte("bar")
- rev := s.Put(testKey, testValue, lease.NoLease)
-
- newBackend, newPath := betesting.NewDefaultTmpBackend(t)
- newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(newStore, newBackend, newPath)
-
- w := newStore.NewWatchStream()
- w.Watch(0, testKey, nil, rev-1)
-
- time.Sleep(delay)
-
- newStore.Restore(b)
- select {
- case resp := <-w.Chan():
- if resp.Revision != rev {
- t.Fatalf("rev = %d, want %d", resp.Revision, rev)
- }
- if len(resp.Events) != 1 {
- t.Fatalf("failed to get events from the response")
- }
- if resp.Events[0].Kv.ModRevision != rev {
- t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
- }
- case <-time.After(time.Second):
- t.Fatal("failed to receive event in 1 second.")
- }
- }
- }
-
- t.Run("Normal", test(0))
- t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration
-}
-
-// TestWatchRestoreSyncedWatcher tests such a case that:
-// 1. watcher is created with a future revision "math.MaxInt64 - 2"
-// 2. watcher with a future revision is added to "synced" watcher group
-// 3. restore/overwrite storage with snapshot of a higher lasat revision
-// 4. restore operation moves "synced" to "unsynced" watcher group
-// 5. choose the watcher from step 1, without panic
-func TestWatchRestoreSyncedWatcher(t *testing.T) {
- b1, b1Path := betesting.NewDefaultTmpBackend(t)
- s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s1, b1, b1Path)
-
- b2, b2Path := betesting.NewDefaultTmpBackend(t)
- s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s2, b2, b2Path)
-
- testKey, testValue := []byte("foo"), []byte("bar")
- rev := s1.Put(testKey, testValue, lease.NoLease)
- startRev := rev + 2
-
- // create a watcher with a future revision
- // add to "synced" watcher group (startRev > s.store.currentRev)
- w1 := s1.NewWatchStream()
- w1.Watch(0, testKey, nil, startRev)
-
- // make "s2" ends up with a higher last revision
- s2.Put(testKey, testValue, lease.NoLease)
- s2.Put(testKey, testValue, lease.NoLease)
-
- // overwrite storage with higher revisions
- if err := s1.Restore(b2); err != nil {
- t.Fatal(err)
- }
-
- // wait for next "syncWatchersLoop" iteration
- // and the unsynced watcher should be chosen
- time.Sleep(2 * time.Second)
-
- // trigger events for "startRev"
- s1.Put(testKey, testValue, lease.NoLease)
-
- select {
- case resp := <-w1.Chan():
- if resp.Revision != startRev {
- t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision)
- }
- if len(resp.Events) != 1 {
- t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events))
- }
- if resp.Events[0].Kv.ModRevision != startRev {
- t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision)
- }
- case <-time.After(time.Second):
- t.Fatal("failed to receive event in 1 second")
- }
-}
-
-// TestWatchBatchUnsynced tests batching on unsynced watchers
-func TestWatchBatchUnsynced(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- oldMaxRevs := watchBatchMaxRevs
- defer func() {
- watchBatchMaxRevs = oldMaxRevs
- s.store.Close()
- os.Remove(tmpPath)
- }()
- batches := 3
- watchBatchMaxRevs = 4
-
- v := []byte("foo")
- for i := 0; i < watchBatchMaxRevs*batches; i++ {
- s.Put(v, v, lease.NoLease)
- }
-
- w := s.NewWatchStream()
- w.Watch(0, v, nil, 1)
- for i := 0; i < batches; i++ {
- if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
- t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
- }
- }
-
- s.store.revMu.Lock()
- defer s.store.revMu.Unlock()
- if size := s.synced.size(); size != 1 {
- t.Errorf("synced size = %d, want 1", size)
- }
-}
-
-func TestNewMapwatcherToEventMap(t *testing.T) {
- k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
- v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
-
- ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
-
- evs := []mvccpb.Event{
- {
- Type: mvccpb.PUT,
- Kv: &mvccpb.KeyValue{Key: k0, Value: v0},
- },
- {
- Type: mvccpb.PUT,
- Kv: &mvccpb.KeyValue{Key: k1, Value: v1},
- },
- {
- Type: mvccpb.PUT,
- Kv: &mvccpb.KeyValue{Key: k2, Value: v2},
- },
- }
-
- tests := []struct {
- sync []*watcher
- evs []mvccpb.Event
-
- wwe map[*watcher][]mvccpb.Event
- }{
- // no watcher in sync, some events should return empty wwe
- {
- nil,
- evs,
- map[*watcher][]mvccpb.Event{},
- },
-
- // one watcher in sync, one event that does not match the key of that
- // watcher should return empty wwe
- {
- []*watcher{ws[2]},
- evs[:1],
- map[*watcher][]mvccpb.Event{},
- },
-
- // one watcher in sync, one event that matches the key of that
- // watcher should return wwe with that matching watcher
- {
- []*watcher{ws[1]},
- evs[1:2],
- map[*watcher][]mvccpb.Event{
- ws[1]: evs[1:2],
- },
- },
-
- // two watchers in sync that watches two different keys, one event
- // that matches the key of only one of the watcher should return wwe
- // with the matching watcher
- {
- []*watcher{ws[0], ws[2]},
- evs[2:],
- map[*watcher][]mvccpb.Event{
- ws[2]: evs[2:],
- },
- },
-
- // two watchers in sync that watches the same key, two events that
- // match the keys should return wwe with those two watchers
- {
- []*watcher{ws[0], ws[1]},
- evs[:2],
- map[*watcher][]mvccpb.Event{
- ws[0]: evs[:1],
- ws[1]: evs[1:2],
- },
- },
- }
-
- for i, tt := range tests {
- wg := newWatcherGroup()
- for _, w := range tt.sync {
- wg.add(w)
- }
-
- gwe := newWatcherBatch(&wg, tt.evs)
- if len(gwe) != len(tt.wwe) {
- t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
- }
- // compare gwe and tt.wwe
- for w, eb := range gwe {
- if len(eb.evs) != len(tt.wwe[w]) {
- t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
- }
- if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
- t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
- }
- }
- }
-}
-
-// TestWatchVictims tests that watchable store delivers watch events
-// when the watch channel is temporarily clogged with too many events.
-func TestWatchVictims(t *testing.T) {
- oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
-
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
- }()
-
- chanBufLen, maxWatchersPerSync = 1, 2
- numPuts := chanBufLen * 64
- testKey, testValue := []byte("foo"), []byte("bar")
-
- var wg sync.WaitGroup
- numWatches := maxWatchersPerSync * 128
- errc := make(chan error, numWatches)
- wg.Add(numWatches)
- for i := 0; i < numWatches; i++ {
- go func() {
- w := s.NewWatchStream()
- w.Watch(0, testKey, nil, 1)
- defer func() {
- w.Close()
- wg.Done()
- }()
- tc := time.After(10 * time.Second)
- evs, nextRev := 0, int64(2)
- for evs < numPuts {
- select {
- case <-tc:
- errc <- fmt.Errorf("time out")
- return
- case wr := <-w.Chan():
- evs += len(wr.Events)
- for _, ev := range wr.Events {
- if ev.Kv.ModRevision != nextRev {
- errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision)
- return
- }
- nextRev++
- }
- time.Sleep(time.Millisecond)
- }
- }
- if evs != numPuts {
- errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs)
- return
- }
- select {
- case <-w.Chan():
- errc <- fmt.Errorf("unexpected response")
- default:
- }
- }()
- time.Sleep(time.Millisecond)
- }
-
- var wgPut sync.WaitGroup
- wgPut.Add(numPuts)
- for i := 0; i < numPuts; i++ {
- go func() {
- defer wgPut.Done()
- s.Put(testKey, testValue, lease.NoLease)
- }()
- }
- wgPut.Wait()
-
- wg.Wait()
- select {
- case err := <-errc:
- t.Fatal(err)
- default:
- }
-}
-
-// TestStressWatchCancelClose tests closing a watch stream while
-// canceling its watches.
-func TestStressWatchCancelClose(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
-
- testKey, testValue := []byte("foo"), []byte("bar")
- var wg sync.WaitGroup
- readyc := make(chan struct{})
- wg.Add(100)
- for i := 0; i < 100; i++ {
- go func() {
- defer wg.Done()
- w := s.NewWatchStream()
- ids := make([]WatchID, 10)
- for i := range ids {
- ids[i], _ = w.Watch(0, testKey, nil, 0)
- }
- <-readyc
- wg.Add(1 + len(ids)/2)
- for i := range ids[:len(ids)/2] {
- go func(n int) {
- defer wg.Done()
- w.Cancel(ids[n])
- }(i)
- }
- go func() {
- defer wg.Done()
- w.Close()
- }()
- }()
- }
-
- close(readyc)
- for i := 0; i < 100; i++ {
- s.Put(testKey, testValue, lease.NoLease)
- }
-
- wg.Wait()
-}
diff --git a/server/mvcc/watcher.go b/server/mvcc/watcher.go
deleted file mode 100644
index f48a9ef3b33..00000000000
--- a/server/mvcc/watcher.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "errors"
- "sync"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
-)
-
-// AutoWatchID is the watcher ID passed in WatchStream.Watch when no
-// user-provided ID is available. If pass, an ID will automatically be assigned.
-const AutoWatchID WatchID = 0
-
-var (
- ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
- ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty")
- ErrWatcherDuplicateID = errors.New("mvcc: duplicate watch ID provided on the WatchStream")
-)
-
-type WatchID int64
-
-// FilterFunc returns true if the given event should be filtered out.
-type FilterFunc func(e mvccpb.Event) bool
-
-type WatchStream interface {
- // Watch creates a watcher. The watcher watches the events happening or
- // happened on the given key or range [key, end) from the given startRev.
- //
- // The whole event history can be watched unless compacted.
- // If "startRev" <=0, watch observes events after currentRev.
- //
- // The returned "id" is the ID of this watcher. It appears as WatchID
- // in events that are sent to the created watcher through stream channel.
- // The watch ID is used when it's not equal to AutoWatchID. Otherwise,
- // an auto-generated watch ID is returned.
- Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error)
-
- // Chan returns a chan. All watch response will be sent to the returned chan.
- Chan() <-chan WatchResponse
-
- // RequestProgress requests the progress of the watcher with given ID. The response
- // will only be sent if the watcher is currently synced.
- // The responses will be sent through the WatchRespone Chan attached
- // with this stream to ensure correct ordering.
- // The responses contains no events. The revision in the response is the progress
- // of the watchers since the watcher is currently synced.
- RequestProgress(id WatchID)
-
- // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
- // returned.
- Cancel(id WatchID) error
-
- // Close closes Chan and release all related resources.
- Close()
-
- // Rev returns the current revision of the KV the stream watches on.
- Rev() int64
-}
-
-type WatchResponse struct {
- // WatchID is the WatchID of the watcher this response sent to.
- WatchID WatchID
-
- // Events contains all the events that needs to send.
- Events []mvccpb.Event
-
- // Revision is the revision of the KV when the watchResponse is created.
- // For a normal response, the revision should be the same as the last
- // modified revision inside Events. For a delayed response to a unsynced
- // watcher, the revision is greater than the last modified revision
- // inside Events.
- Revision int64
-
- // CompactRevision is set when the watcher is cancelled due to compaction.
- CompactRevision int64
-}
-
-// watchStream contains a collection of watchers that share
-// one streaming chan to send out watched events and other control events.
-type watchStream struct {
- watchable watchable
- ch chan WatchResponse
-
- mu sync.Mutex // guards fields below it
- // nextID is the ID pre-allocated for next new watcher in this stream
- nextID WatchID
- closed bool
- cancels map[WatchID]cancelFunc
- watchers map[WatchID]*watcher
-}
-
-// Watch creates a new watcher in the stream and returns its WatchID.
-func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) {
- // prevent wrong range where key >= end lexicographically
- // watch request with 'WithFromKey' has empty-byte range end
- if len(end) != 0 && bytes.Compare(key, end) != -1 {
- return -1, ErrEmptyWatcherRange
- }
-
- ws.mu.Lock()
- defer ws.mu.Unlock()
- if ws.closed {
- return -1, ErrEmptyWatcherRange
- }
-
- if id == AutoWatchID {
- for ws.watchers[ws.nextID] != nil {
- ws.nextID++
- }
- id = ws.nextID
- ws.nextID++
- } else if _, ok := ws.watchers[id]; ok {
- return -1, ErrWatcherDuplicateID
- }
-
- w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
-
- ws.cancels[id] = c
- ws.watchers[id] = w
- return id, nil
-}
-
-func (ws *watchStream) Chan() <-chan WatchResponse {
- return ws.ch
-}
-
-func (ws *watchStream) Cancel(id WatchID) error {
- ws.mu.Lock()
- cancel, ok := ws.cancels[id]
- w := ws.watchers[id]
- ok = ok && !ws.closed
- ws.mu.Unlock()
-
- if !ok {
- return ErrWatcherNotExist
- }
- cancel()
-
- ws.mu.Lock()
- // The watch isn't removed until cancel so that if Close() is called,
- // it will wait for the cancel. Otherwise, Close() could close the
- // watch channel while the store is still posting events.
- if ww := ws.watchers[id]; ww == w {
- delete(ws.cancels, id)
- delete(ws.watchers, id)
- }
- ws.mu.Unlock()
-
- return nil
-}
-
-func (ws *watchStream) Close() {
- ws.mu.Lock()
- defer ws.mu.Unlock()
-
- for _, cancel := range ws.cancels {
- cancel()
- }
- ws.closed = true
- close(ws.ch)
- watchStreamGauge.Dec()
-}
-
-func (ws *watchStream) Rev() int64 {
- ws.mu.Lock()
- defer ws.mu.Unlock()
- return ws.watchable.rev()
-}
-
-func (ws *watchStream) RequestProgress(id WatchID) {
- ws.mu.Lock()
- w, ok := ws.watchers[id]
- ws.mu.Unlock()
- if !ok {
- return
- }
- ws.watchable.progress(w)
-}
diff --git a/server/proxy/grpcproxy/adapter/auth_client_adapter.go b/server/proxy/grpcproxy/adapter/auth_client_adapter.go
index 140212b9620..0baa10c5f16 100644
--- a/server/proxy/grpcproxy/adapter/auth_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/auth_client_adapter.go
@@ -17,9 +17,9 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
grpc "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
type as2ac struct{ as pb.AuthServer }
diff --git a/server/proxy/grpcproxy/adapter/chan_stream.go b/server/proxy/grpcproxy/adapter/chan_stream.go
index 1af514b1fdd..eaf4309c978 100644
--- a/server/proxy/grpcproxy/adapter/chan_stream.go
+++ b/server/proxy/grpcproxy/adapter/chan_stream.go
@@ -96,15 +96,15 @@ func (cs *chanClientStream) CloseSend() error {
// chanStream implements grpc.Stream using channels
type chanStream struct {
- recvc <-chan interface{}
- sendc chan<- interface{}
+ recvc <-chan any
+ sendc chan<- any
ctx context.Context
cancel context.CancelFunc
}
func (s *chanStream) Context() context.Context { return s.ctx }
-func (s *chanStream) SendMsg(m interface{}) error {
+func (s *chanStream) SendMsg(m any) error {
select {
case s.sendc <- m:
if err, ok := m.(error); ok {
@@ -116,8 +116,8 @@ func (s *chanStream) SendMsg(m interface{}) error {
return s.ctx.Err()
}
-func (s *chanStream) RecvMsg(m interface{}) error {
- v := m.(*interface{})
+func (s *chanStream) RecvMsg(m any) error {
+ v := m.(*any)
for {
select {
case msg, ok := <-s.recvc:
@@ -141,7 +141,7 @@ func (s *chanStream) RecvMsg(m interface{}) error {
func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream {
// ch1 is buffered so server can send error on close
- ch1, ch2 := make(chan interface{}, 1), make(chan interface{})
+ ch1, ch2 := make(chan any, 1), make(chan any)
headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)
cctx, ccancel := context.WithCancel(ctx)
diff --git a/server/proxy/grpcproxy/adapter/cluster_client_adapter.go b/server/proxy/grpcproxy/adapter/cluster_client_adapter.go
index c1fff054de4..4c9fbbb0826 100644
--- a/server/proxy/grpcproxy/adapter/cluster_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/cluster_client_adapter.go
@@ -17,9 +17,9 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
"google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
type cls2clc struct{ cls pb.ClusterServer }
diff --git a/server/proxy/grpcproxy/adapter/election_client_adapter.go b/server/proxy/grpcproxy/adapter/election_client_adapter.go
index 81d7434474a..c7edaf70a47 100644
--- a/server/proxy/grpcproxy/adapter/election_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/election_client_adapter.go
@@ -17,9 +17,9 @@ package adapter
import (
"context"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
-
"google.golang.org/grpc"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
)
type es2ec struct{ es v3electionpb.ElectionServer }
@@ -60,8 +60,9 @@ type es2ecServerStream struct{ chanServerStream }
func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error {
return s.SendMsg(rr)
}
+
func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
@@ -71,8 +72,9 @@ func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error {
return s.SendMsg(rr)
}
+
func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
diff --git a/server/proxy/grpcproxy/adapter/kv_client_adapter.go b/server/proxy/grpcproxy/adapter/kv_client_adapter.go
index ddb6ada4732..69e3a113c4f 100644
--- a/server/proxy/grpcproxy/adapter/kv_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/kv_client_adapter.go
@@ -17,9 +17,9 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
grpc "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
type kvs2kvc struct{ kvs pb.KVServer }
diff --git a/server/proxy/grpcproxy/adapter/lease_client_adapter.go b/server/proxy/grpcproxy/adapter/lease_client_adapter.go
index 6640d1d39e3..bf76a5563e5 100644
--- a/server/proxy/grpcproxy/adapter/lease_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/lease_client_adapter.go
@@ -17,9 +17,9 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
"google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
type ls2lc struct {
@@ -62,8 +62,9 @@ type ls2lcServerStream struct{ chanServerStream }
func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error {
return s.SendMsg(rr)
}
+
func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
@@ -73,8 +74,9 @@ func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error {
return s.SendMsg(rr)
}
+
func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
diff --git a/server/proxy/grpcproxy/adapter/lock_client_adapter.go b/server/proxy/grpcproxy/adapter/lock_client_adapter.go
index a3ceaf26dae..de5ba049e19 100644
--- a/server/proxy/grpcproxy/adapter/lock_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/lock_client_adapter.go
@@ -17,9 +17,9 @@ package adapter
import (
"context"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
-
"google.golang.org/grpc"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
)
type ls2lsc struct{ ls v3lockpb.LockServer }
diff --git a/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go
index 6369a16d8b4..7b204451681 100644
--- a/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go
@@ -17,9 +17,9 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-
"google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
type mts2mtc struct{ mts pb.MaintenanceServer }
@@ -72,8 +72,9 @@ type ss2scServerStream struct{ chanServerStream }
func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error {
return s.SendMsg(rr)
}
+
func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
@@ -83,8 +84,9 @@ func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error {
return s.SendMsg(rr)
}
+
func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
diff --git a/server/proxy/grpcproxy/adapter/watch_client_adapter.go b/server/proxy/grpcproxy/adapter/watch_client_adapter.go
index fbc09f6ff64..828ed1b7e60 100644
--- a/server/proxy/grpcproxy/adapter/watch_client_adapter.go
+++ b/server/proxy/grpcproxy/adapter/watch_client_adapter.go
@@ -18,8 +18,9 @@ import (
"context"
"errors"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
var errAlreadySentHeader = errors.New("adapter: already sent header")
@@ -46,8 +47,9 @@ type ws2wcServerStream struct{ chanServerStream }
func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error {
return s.SendMsg(wr)
}
+
func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
@@ -57,8 +59,9 @@ func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error {
return s.SendMsg(wr)
}
+
func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) {
- var v interface{}
+ var v any
if err := s.RecvMsg(&v); err != nil {
return nil, err
}
diff --git a/server/proxy/grpcproxy/auth.go b/server/proxy/grpcproxy/auth.go
index 0cabfc146e0..753dfa47acf 100644
--- a/server/proxy/grpcproxy/auth.go
+++ b/server/proxy/grpcproxy/auth.go
@@ -18,98 +18,81 @@ import (
"context"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type AuthProxy struct {
- client *clientv3.Client
+ authClient pb.AuthClient
}
func NewAuthProxy(c *clientv3.Client) pb.AuthServer {
- return &AuthProxy{client: c}
+ return &AuthProxy{authClient: pb.NewAuthClient(c.ActiveConnection())}
}
func (ap *AuthProxy) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).AuthEnable(ctx, r)
+ return ap.authClient.AuthEnable(ctx, r)
}
func (ap *AuthProxy) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).AuthDisable(ctx, r)
+ return ap.authClient.AuthDisable(ctx, r)
}
func (ap *AuthProxy) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).AuthStatus(ctx, r)
+ return ap.authClient.AuthStatus(ctx, r)
}
func (ap *AuthProxy) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).Authenticate(ctx, r)
+ return ap.authClient.Authenticate(ctx, r)
}
func (ap *AuthProxy) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).RoleAdd(ctx, r)
+ return ap.authClient.RoleAdd(ctx, r)
}
func (ap *AuthProxy) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).RoleDelete(ctx, r)
+ return ap.authClient.RoleDelete(ctx, r)
}
func (ap *AuthProxy) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).RoleGet(ctx, r)
+ return ap.authClient.RoleGet(ctx, r)
}
func (ap *AuthProxy) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).RoleList(ctx, r)
+ return ap.authClient.RoleList(ctx, r)
}
func (ap *AuthProxy) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).RoleRevokePermission(ctx, r)
+ return ap.authClient.RoleRevokePermission(ctx, r)
}
func (ap *AuthProxy) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).RoleGrantPermission(ctx, r)
+ return ap.authClient.RoleGrantPermission(ctx, r)
}
func (ap *AuthProxy) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).UserAdd(ctx, r)
+ return ap.authClient.UserAdd(ctx, r)
}
func (ap *AuthProxy) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).UserDelete(ctx, r)
+ return ap.authClient.UserDelete(ctx, r)
}
func (ap *AuthProxy) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).UserGet(ctx, r)
+ return ap.authClient.UserGet(ctx, r)
}
func (ap *AuthProxy) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).UserList(ctx, r)
+ return ap.authClient.UserList(ctx, r)
}
func (ap *AuthProxy) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).UserGrantRole(ctx, r)
+ return ap.authClient.UserGrantRole(ctx, r)
}
func (ap *AuthProxy) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).UserRevokeRole(ctx, r)
+ return ap.authClient.UserRevokeRole(ctx, r)
}
func (ap *AuthProxy) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- conn := ap.client.ActiveConnection()
- return pb.NewAuthClient(conn).UserChangePassword(ctx, r)
+ return ap.authClient.UserChangePassword(ctx, r)
}
diff --git a/server/proxy/grpcproxy/cache/store.go b/server/proxy/grpcproxy/cache/store.go
index 2c189644a86..69fb38c070f 100644
--- a/server/proxy/grpcproxy/cache/store.go
+++ b/server/proxy/grpcproxy/cache/store.go
@@ -21,6 +21,7 @@ import (
"sync"
"github.com/golang/groupcache/lru"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/v3/adt"
diff --git a/server/proxy/grpcproxy/cluster.go b/server/proxy/grpcproxy/cluster.go
index 1f7dccbe74f..a528e161ef3 100644
--- a/server/proxy/grpcproxy/cluster.go
+++ b/server/proxy/grpcproxy/cluster.go
@@ -21,12 +21,12 @@ import (
"os"
"sync"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/naming/endpoints"
+ "go.uber.org/zap"
"golang.org/x/time/rate"
- "go.uber.org/zap"
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/naming/endpoints"
)
// allow maximum 1 retry per second
@@ -34,7 +34,7 @@ const resolveRetryRate = 1
type clusterProxy struct {
lg *zap.Logger
- clus clientv3.Cluster
+ clus pb.ClusterClient
ctx context.Context
// advertise client URL
@@ -67,7 +67,7 @@ func NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix
cp := &clusterProxy{
lg: lg,
- clus: c.Cluster,
+ clus: pb.NewClusterClient(c.ActiveConnection()),
ctx: c.Ctx(),
advaddr: advaddr,
@@ -112,9 +112,9 @@ func (cp *clusterProxy) monitor(wa endpoints.WatchChannel) {
for _, up := range updates {
switch up.Op {
case endpoints.Add:
- cp.umap[up.Endpoint.Addr] = up.Endpoint
+ cp.umap[up.Key] = up.Endpoint
case endpoints.Delete:
- delete(cp.umap, up.Endpoint.Addr)
+ delete(cp.umap, up.Key)
}
}
cp.umu.Unlock()
@@ -123,58 +123,27 @@ func (cp *clusterProxy) monitor(wa endpoints.WatchChannel) {
}
func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
- if r.IsLearner {
- return cp.memberAddAsLearner(ctx, r.PeerURLs)
- }
- return cp.memberAdd(ctx, r.PeerURLs)
-}
-
-func (cp *clusterProxy) memberAdd(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) {
- mresp, err := cp.clus.MemberAdd(ctx, peerURLs)
- if err != nil {
- return nil, err
- }
- resp := (pb.MemberAddResponse)(*mresp)
- return &resp, err
-}
-
-func (cp *clusterProxy) memberAddAsLearner(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) {
- mresp, err := cp.clus.MemberAddAsLearner(ctx, peerURLs)
- if err != nil {
- return nil, err
- }
- resp := (pb.MemberAddResponse)(*mresp)
- return &resp, err
+ return cp.clus.MemberAdd(ctx, r)
}
func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
- mresp, err := cp.clus.MemberRemove(ctx, r.ID)
- if err != nil {
- return nil, err
- }
- resp := (pb.MemberRemoveResponse)(*mresp)
- return &resp, err
+ return cp.clus.MemberRemove(ctx, r)
}
func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
- mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs)
- if err != nil {
- return nil, err
- }
- resp := (pb.MemberUpdateResponse)(*mresp)
- return &resp, err
+ return cp.clus.MemberUpdate(ctx, r)
}
func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) {
cp.umu.RLock()
defer cp.umu.RUnlock()
mbs := make([]*pb.Member, 0, len(cp.umap))
- for addr, upt := range cp.umap {
+ for _, upt := range cp.umap {
m, err := decodeMeta(fmt.Sprint(upt.Metadata))
if err != nil {
return nil, err
}
- mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}})
+ mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{upt.Addr}})
}
return mbs, nil
}
@@ -199,12 +168,7 @@ func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest)
hostname, _ := os.Hostname()
return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil
}
- mresp, err := cp.clus.MemberList(ctx)
- if err != nil {
- return nil, err
- }
- resp := (pb.MemberListResponse)(*mresp)
- return &resp, err
+ return cp.clus.MemberList(ctx, r)
}
func (cp *clusterProxy) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) {
diff --git a/server/proxy/grpcproxy/election.go b/server/proxy/grpcproxy/election.go
index a9ec0fddd3c..9ea8d961576 100644
--- a/server/proxy/grpcproxy/election.go
+++ b/server/proxy/grpcproxy/election.go
@@ -17,35 +17,34 @@ package grpcproxy
import (
"context"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
)
type electionProxy struct {
- client *clientv3.Client
+ electionClient v3electionpb.ElectionClient
}
func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer {
- return &electionProxy{client: client}
+ return &electionProxy{electionClient: v3electionpb.NewElectionClient(client.ActiveConnection())}
}
func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) {
- return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req)
+ return ep.electionClient.Campaign(ctx, req)
}
func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) {
- return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req)
+ return ep.electionClient.Proclaim(ctx, req)
}
func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) {
- return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req)
+ return ep.electionClient.Leader(ctx, req)
}
func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error {
- conn := ep.client.ActiveConnection()
ctx, cancel := context.WithCancel(s.Context())
defer cancel()
- sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req)
+ sc, err := ep.electionClient.Observe(ctx, req)
if err != nil {
return err
}
@@ -61,5 +60,5 @@ func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb
}
func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) {
- return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req)
+ return ep.electionClient.Resign(ctx, req)
}
diff --git a/server/proxy/grpcproxy/health.go b/server/proxy/grpcproxy/health.go
index 1d6f7a2d8b9..3bbfa9e2b0f 100644
--- a/server/proxy/grpcproxy/health.go
+++ b/server/proxy/grpcproxy/health.go
@@ -16,14 +16,16 @@ package grpcproxy
import (
"context"
+ "errors"
"fmt"
"net/http"
"time"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
- "go.uber.org/zap"
)
// HandleHealth registers health handler on '/health'.
@@ -31,7 +33,9 @@ func HandleHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) {
if lg == nil {
lg = zap.NewNop()
}
- mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkHealth(c) }))
+ mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(lg, func(ctx context.Context, excludedAlarms etcdhttp.StringSet, serializable bool) etcdhttp.Health {
+ return checkHealth(c)
+ }))
}
// HandleProxyHealth registers health handler on '/proxy/health'.
@@ -39,7 +43,9 @@ func HandleProxyHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) {
if lg == nil {
lg = zap.NewNop()
}
- mux.Handle(etcdhttp.PathProxyHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkProxyHealth(c) }))
+ mux.Handle(etcdhttp.PathProxyHealth, etcdhttp.NewHealthHandler(lg, func(ctx context.Context, excludedAlarms etcdhttp.StringSet, serializable bool) etcdhttp.Health {
+ return checkProxyHealth(c)
+ }))
}
func checkHealth(c *clientv3.Client) etcdhttp.Health {
@@ -47,7 +53,7 @@ func checkHealth(c *clientv3.Client) etcdhttp.Health {
ctx, cancel := context.WithTimeout(c.Ctx(), time.Second)
_, err := c.Get(ctx, "a")
cancel()
- if err == nil || err == rpctypes.ErrPermissionDenied {
+ if err == nil || errors.Is(err, rpctypes.ErrPermissionDenied) {
h.Health = "true"
} else {
h.Reason = fmt.Sprintf("GET ERROR:%s", err)
diff --git a/server/proxy/grpcproxy/kv.go b/server/proxy/grpcproxy/kv.go
index 6e88eb9fb95..3df361d644e 100644
--- a/server/proxy/grpcproxy/kv.go
+++ b/server/proxy/grpcproxy/kv.go
@@ -16,9 +16,10 @@ package grpcproxy
import (
"context"
+ "errors"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy/cache"
)
@@ -40,11 +41,11 @@ func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) {
func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if r.Serializable {
resp, err := p.cache.Get(r)
- switch err {
- case nil:
+ switch {
+ case err == nil:
cacheHits.Inc()
return resp, nil
- case cache.ErrCompacted:
+ case errors.Is(err, cache.ErrCompacted):
cacheHits.Inc()
return nil, err
}
@@ -162,7 +163,7 @@ func requestOpToOp(union *pb.RequestOp) clientv3.Op {
}
func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
- opts := []clientv3.OpOption{}
+ var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
}
@@ -190,7 +191,7 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
}
func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
- opts := []clientv3.OpOption{}
+ var opts []clientv3.OpOption
opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))
if r.IgnoreValue {
opts = append(opts, clientv3.WithIgnoreValue())
@@ -205,7 +206,7 @@ func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
}
func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {
- opts := []clientv3.OpOption{}
+ var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
}
diff --git a/server/proxy/grpcproxy/leader.go b/server/proxy/grpcproxy/leader.go
index c078c89bb3b..18e0a838232 100644
--- a/server/proxy/grpcproxy/leader.go
+++ b/server/proxy/grpcproxy/leader.go
@@ -19,9 +19,9 @@ import (
"math"
"sync"
- "go.etcd.io/etcd/client/v3"
-
"golang.org/x/time/rate"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
)
const (
diff --git a/server/proxy/grpcproxy/lease.go b/server/proxy/grpcproxy/lease.go
index 875256c4329..dc42dc5a0d8 100644
--- a/server/proxy/grpcproxy/lease.go
+++ b/server/proxy/grpcproxy/lease.go
@@ -16,19 +16,20 @@ package grpcproxy
import (
"context"
+ "errors"
"io"
"sync"
"sync/atomic"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
-
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type leaseProxy struct {
@@ -245,7 +246,7 @@ type leaseProxyStream struct {
func (lps *leaseProxyStream) recvLoop() error {
for {
rr, err := lps.stream.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return nil
}
if err != nil {
diff --git a/server/proxy/grpcproxy/lock.go b/server/proxy/grpcproxy/lock.go
index 9859b936995..9458080db7b 100644
--- a/server/proxy/grpcproxy/lock.go
+++ b/server/proxy/grpcproxy/lock.go
@@ -17,22 +17,22 @@ package grpcproxy
import (
"context"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
)
type lockProxy struct {
- client *clientv3.Client
+ lockClient v3lockpb.LockClient
}
func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer {
- return &lockProxy{client: client}
+ return &lockProxy{lockClient: v3lockpb.NewLockClient(client.ActiveConnection())}
}
func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
- return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req)
+ return lp.lockClient.Lock(ctx, req)
}
func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
- return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req)
+ return lp.lockClient.Unlock(ctx, req)
}
diff --git a/server/proxy/grpcproxy/maintenance.go b/server/proxy/grpcproxy/maintenance.go
index 3e81656259a..553b66c4b73 100644
--- a/server/proxy/grpcproxy/maintenance.go
+++ b/server/proxy/grpcproxy/maintenance.go
@@ -16,35 +16,34 @@ package grpcproxy
import (
"context"
+ "errors"
"io"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
type maintenanceProxy struct {
- client *clientv3.Client
+ maintenanceClient pb.MaintenanceClient
}
func NewMaintenanceProxy(c *clientv3.Client) pb.MaintenanceServer {
return &maintenanceProxy{
- client: c,
+ maintenanceClient: pb.NewMaintenanceClient(c.ActiveConnection()),
}
}
func (mp *maintenanceProxy) Defragment(ctx context.Context, dr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
- conn := mp.client.ActiveConnection()
- return pb.NewMaintenanceClient(conn).Defragment(ctx, dr)
+ return mp.maintenanceClient.Defragment(ctx, dr)
}
func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenance_SnapshotServer) error {
- conn := mp.client.ActiveConnection()
ctx, cancel := context.WithCancel(stream.Context())
defer cancel()
ctx = withClientAuthToken(ctx, stream.Context())
- sc, err := pb.NewMaintenanceClient(conn).Snapshot(ctx, sr)
+ sc, err := mp.maintenanceClient.Snapshot(ctx, sr)
if err != nil {
return err
}
@@ -52,7 +51,7 @@ func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenan
for {
rr, err := sc.Recv()
if err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return nil
}
return err
@@ -65,31 +64,25 @@ func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenan
}
func (mp *maintenanceProxy) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
- conn := mp.client.ActiveConnection()
- return pb.NewMaintenanceClient(conn).Hash(ctx, r)
+ return mp.maintenanceClient.Hash(ctx, r)
}
func (mp *maintenanceProxy) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
- conn := mp.client.ActiveConnection()
- return pb.NewMaintenanceClient(conn).HashKV(ctx, r)
+ return mp.maintenanceClient.HashKV(ctx, r)
}
func (mp *maintenanceProxy) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- conn := mp.client.ActiveConnection()
- return pb.NewMaintenanceClient(conn).Alarm(ctx, r)
+ return mp.maintenanceClient.Alarm(ctx, r)
}
func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*pb.StatusResponse, error) {
- conn := mp.client.ActiveConnection()
- return pb.NewMaintenanceClient(conn).Status(ctx, r)
+ return mp.maintenanceClient.Status(ctx, r)
}
func (mp *maintenanceProxy) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
- conn := mp.client.ActiveConnection()
- return pb.NewMaintenanceClient(conn).MoveLeader(ctx, r)
+ return mp.maintenanceClient.MoveLeader(ctx, r)
}
func (mp *maintenanceProxy) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
- conn := mp.client.ActiveConnection()
- return pb.NewMaintenanceClient(conn).Downgrade(ctx, r)
+ return mp.maintenanceClient.Downgrade(ctx, r)
}
diff --git a/server/proxy/grpcproxy/metrics.go b/server/proxy/grpcproxy/metrics.go
index 01a7a94c89e..cc94b7f6e9f 100644
--- a/server/proxy/grpcproxy/metrics.go
+++ b/server/proxy/grpcproxy/metrics.go
@@ -16,7 +16,7 @@ package grpcproxy
import (
"fmt"
- "io/ioutil"
+ "io"
"math/rand"
"net/http"
"strings"
@@ -24,6 +24,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
+
"go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
)
@@ -94,7 +95,7 @@ func HandleMetrics(mux *http.ServeMux, c *http.Client, eps []string) {
}
defer resp.Body.Close()
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
- body, _ := ioutil.ReadAll(resp.Body)
+ body, _ := io.ReadAll(resp.Body)
fmt.Fprintf(w, "%s", body)
})
}
diff --git a/server/proxy/grpcproxy/register.go b/server/proxy/grpcproxy/register.go
index 14ec034eadf..bb860405a1d 100644
--- a/server/proxy/grpcproxy/register.go
+++ b/server/proxy/grpcproxy/register.go
@@ -18,12 +18,12 @@ import (
"encoding/json"
"os"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/client/v3/naming/endpoints"
-
"go.uber.org/zap"
"golang.org/x/time/rate"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/concurrency"
+ "go.etcd.io/etcd/client/v3/naming/endpoints"
)
// allow maximum 1 retry per second
@@ -69,10 +69,12 @@ func registerSession(lg *zap.Logger, c *clientv3.Client, prefix string, addr str
em, err := endpoints.NewManager(c, prefix)
if err != nil {
+ ss.Close()
return nil, err
}
endpoint := endpoints.Endpoint{Addr: addr, Metadata: getMeta()}
if err = em.AddEndpoint(c.Ctx(), prefix+"/"+addr, endpoint, clientv3.WithLease(ss.Lease())); err != nil {
+ ss.Close()
return nil, err
}
diff --git a/server/proxy/grpcproxy/util.go b/server/proxy/grpcproxy/util.go
index 856ac5769e1..7e3d3193b31 100644
--- a/server/proxy/grpcproxy/util.go
+++ b/server/proxy/grpcproxy/util.go
@@ -17,10 +17,10 @@ package grpcproxy
import (
"context"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
func getAuthTokenFromClient(ctx context.Context) string {
@@ -37,7 +37,7 @@ func getAuthTokenFromClient(ctx context.Context) string {
func withClientAuthToken(ctx, ctxWithToken context.Context) context.Context {
token := getAuthTokenFromClient(ctxWithToken)
if token != "" {
- ctx = context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token)
+ ctx = context.WithValue(ctx, rpctypes.TokenFieldNameGRPCKey{}, token)
}
return ctx
}
@@ -56,7 +56,7 @@ func (cred *proxyTokenCredential) GetRequestMetadata(ctx context.Context, s ...s
}, nil
}
-func AuthUnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+func AuthUnaryClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
token := getAuthTokenFromClient(ctx)
if token != "" {
tokenCred := &proxyTokenCredential{token}
@@ -66,7 +66,7 @@ func AuthUnaryClientInterceptor(ctx context.Context, method string, req, reply i
}
func AuthStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- tokenif := ctx.Value(rpctypes.TokenFieldNameGRPC)
+ tokenif := ctx.Value(rpctypes.TokenFieldNameGRPCKey{})
if tokenif != nil {
tokenCred := &proxyTokenCredential{tokenif.(string)}
opts = append(opts, grpc.PerRPCCredentials(tokenCred))
diff --git a/server/proxy/grpcproxy/watch.go b/server/proxy/grpcproxy/watch.go
index 3ec38d600c3..90eb21d4a40 100644
--- a/server/proxy/grpcproxy/watch.go
+++ b/server/proxy/grpcproxy/watch.go
@@ -18,15 +18,15 @@ import (
"context"
"sync"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
-
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
)
type watchProxy struct {
@@ -238,7 +238,7 @@ func (wps *watchProxyStream) recvLoop() error {
if err := wps.checkPermissionForWatch(cr.Key, cr.RangeEnd); err != nil {
wps.watchCh <- &pb.WatchResponse{
Header: &pb.ResponseHeader{},
- WatchId: -1,
+ WatchId: clientv3.InvalidWatchID,
Created: true,
Canceled: true,
CancelReason: err.Error(),
@@ -258,7 +258,7 @@ func (wps *watchProxyStream) recvLoop() error {
filters: v3rpc.FiltersFromRequest(cr),
}
if !w.wr.valid() {
- w.post(&pb.WatchResponse{WatchId: -1, Created: true, Canceled: true})
+ w.post(&pb.WatchResponse{WatchId: clientv3.InvalidWatchID, Created: true, Canceled: true})
wps.mu.Unlock()
continue
}
diff --git a/server/proxy/grpcproxy/watch_broadcast.go b/server/proxy/grpcproxy/watch_broadcast.go
index 1d9a43df143..5a395df08a0 100644
--- a/server/proxy/grpcproxy/watch_broadcast.go
+++ b/server/proxy/grpcproxy/watch_broadcast.go
@@ -19,10 +19,10 @@ import (
"sync"
"time"
+ "go.uber.org/zap"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
clientv3 "go.etcd.io/etcd/client/v3"
-
- "go.uber.org/zap"
)
// watchBroadcast broadcasts a server watcher to many client watchers.
@@ -126,6 +126,7 @@ func (wb *watchBroadcast) add(w *watcher) bool {
return true
}
+
func (wb *watchBroadcast) delete(w *watcher) {
wb.mu.Lock()
defer wb.mu.Unlock()
diff --git a/server/proxy/grpcproxy/watcher.go b/server/proxy/grpcproxy/watcher.go
index 5f6c3db8084..45d3a5352f5 100644
--- a/server/proxy/grpcproxy/watcher.go
+++ b/server/proxy/grpcproxy/watcher.go
@@ -19,8 +19,8 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/mvcc"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
)
type watchRange struct {
diff --git a/server/proxy/httpproxy/director.go b/server/proxy/httpproxy/director.go
deleted file mode 100644
index e20e2226a0d..00000000000
--- a/server/proxy/httpproxy/director.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpproxy
-
-import (
- "math/rand"
- "net/url"
- "sync"
- "time"
-
- "go.uber.org/zap"
-)
-
-// defaultRefreshInterval is the default proxyRefreshIntervalMs value
-// as in etcdmain/config.go.
-const defaultRefreshInterval = 30000 * time.Millisecond
-
-var once sync.Once
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
-
-func newDirector(lg *zap.Logger, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) *director {
- if lg == nil {
- lg = zap.NewNop()
- }
- d := &director{
- lg: lg,
- uf: urlsFunc,
- failureWait: failureWait,
- }
- d.refresh()
- go func() {
- // In order to prevent missing proxy endpoints in the first try:
- // when given refresh interval of defaultRefreshInterval or greater
- // and whenever there is no available proxy endpoints,
- // give 1-second refreshInterval.
- for {
- es := d.endpoints()
- ri := refreshInterval
- if ri >= defaultRefreshInterval {
- if len(es) == 0 {
- ri = time.Second
- }
- }
- if len(es) > 0 {
- once.Do(func() {
- var sl []string
- for _, e := range es {
- sl = append(sl, e.URL.String())
- }
- lg.Info("endpoints found", zap.Strings("endpoints", sl))
- })
- }
- time.Sleep(ri)
- d.refresh()
- }
- }()
- return d
-}
-
-type director struct {
- sync.Mutex
- lg *zap.Logger
- ep []*endpoint
- uf GetProxyURLs
- failureWait time.Duration
-}
-
-func (d *director) refresh() {
- urls := d.uf()
- d.Lock()
- defer d.Unlock()
- var endpoints []*endpoint
- for _, u := range urls {
- uu, err := url.Parse(u)
- if err != nil {
- d.lg.Info("upstream URL invalid", zap.Error(err))
- continue
- }
- endpoints = append(endpoints, newEndpoint(d.lg, *uu, d.failureWait))
- }
-
- // shuffle array to avoid connections being "stuck" to a single endpoint
- for i := range endpoints {
- j := rand.Intn(i + 1)
- endpoints[i], endpoints[j] = endpoints[j], endpoints[i]
- }
-
- d.ep = endpoints
-}
-
-func (d *director) endpoints() []*endpoint {
- d.Lock()
- defer d.Unlock()
- filtered := make([]*endpoint, 0)
- for _, ep := range d.ep {
- if ep.Available {
- filtered = append(filtered, ep)
- }
- }
-
- return filtered
-}
-
-func newEndpoint(lg *zap.Logger, u url.URL, failureWait time.Duration) *endpoint {
- ep := endpoint{
- lg: lg,
- URL: u,
- Available: true,
- failFunc: timedUnavailabilityFunc(failureWait),
- }
-
- return &ep
-}
-
-type endpoint struct {
- sync.Mutex
-
- lg *zap.Logger
- URL url.URL
- Available bool
-
- failFunc func(ep *endpoint)
-}
-
-func (ep *endpoint) Failed() {
- ep.Lock()
- if !ep.Available {
- ep.Unlock()
- return
- }
-
- ep.Available = false
- ep.Unlock()
-
- if ep.lg != nil {
- ep.lg.Info("marked endpoint unavailable", zap.String("endpoint", ep.URL.String()))
- }
-
- if ep.failFunc == nil {
- if ep.lg != nil {
- ep.lg.Info(
- "no failFunc defined, endpoint will be unavailable forever",
- zap.String("endpoint", ep.URL.String()),
- )
- }
- return
- }
-
- ep.failFunc(ep)
-}
-
-func timedUnavailabilityFunc(wait time.Duration) func(*endpoint) {
- return func(ep *endpoint) {
- time.AfterFunc(wait, func() {
- ep.Available = true
- if ep.lg != nil {
- ep.lg.Info(
- "marked endpoint available, to retest connectivity",
- zap.String("endpoint", ep.URL.String()),
- )
- }
- })
- }
-}
diff --git a/server/proxy/httpproxy/director_test.go b/server/proxy/httpproxy/director_test.go
deleted file mode 100644
index 952506729dc..00000000000
--- a/server/proxy/httpproxy/director_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpproxy
-
-import (
- "net/url"
- "reflect"
- "sort"
- "testing"
- "time"
-
- "go.uber.org/zap"
-)
-
-func TestNewDirectorScheme(t *testing.T) {
- tests := []struct {
- urls []string
- want []string
- }{
- {
- urls: []string{"http://192.0.2.8:4002", "http://example.com:8080"},
- want: []string{"http://192.0.2.8:4002", "http://example.com:8080"},
- },
- {
- urls: []string{"https://192.0.2.8:4002", "https://example.com:8080"},
- want: []string{"https://192.0.2.8:4002", "https://example.com:8080"},
- },
-
- // accept urls without a port
- {
- urls: []string{"http://192.0.2.8"},
- want: []string{"http://192.0.2.8"},
- },
-
- // accept urls even if they are garbage
- {
- urls: []string{"http://."},
- want: []string{"http://."},
- },
- }
-
- for i, tt := range tests {
- uf := func() []string {
- return tt.urls
- }
- got := newDirector(zap.NewExample(), uf, time.Minute, time.Minute)
-
- var gep []string
- for _, ep := range got.ep {
- gep = append(gep, ep.URL.String())
- }
- sort.Strings(tt.want)
- sort.Strings(gep)
- if !reflect.DeepEqual(tt.want, gep) {
- t.Errorf("#%d: want endpoints = %#v, got = %#v", i, tt.want, gep)
- }
- }
-}
-
-func TestDirectorEndpointsFiltering(t *testing.T) {
- d := director{
- ep: []*endpoint{
- {
- URL: url.URL{Scheme: "http", Host: "192.0.2.5:5050"},
- Available: false,
- },
- {
- URL: url.URL{Scheme: "http", Host: "192.0.2.4:4000"},
- Available: true,
- },
- },
- }
-
- got := d.endpoints()
- want := []*endpoint{
- {
- URL: url.URL{Scheme: "http", Host: "192.0.2.4:4000"},
- Available: true,
- },
- }
-
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("directed to incorrect endpoint: want = %#v, got = %#v", want, got)
- }
-}
diff --git a/server/proxy/httpproxy/doc.go b/server/proxy/httpproxy/doc.go
deleted file mode 100644
index 7a45099120c..00000000000
--- a/server/proxy/httpproxy/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package httpproxy implements etcd httpproxy. The etcd proxy acts as a reverse
-// http proxy forwarding client requests to active etcd cluster members, and does
-// not participate in consensus.
-package httpproxy
diff --git a/server/proxy/httpproxy/metrics.go b/server/proxy/httpproxy/metrics.go
deleted file mode 100644
index fcbedc28a88..00000000000
--- a/server/proxy/httpproxy/metrics.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpproxy
-
-import (
- "net/http"
- "strconv"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- requestsIncoming = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "proxy",
- Name: "requests_total",
- Help: "Counter requests incoming by method.",
- }, []string{"method"})
-
- requestsHandled = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "proxy",
- Name: "handled_total",
- Help: "Counter of requests fully handled (by authoratitave servers)",
- }, []string{"method", "code"})
-
- requestsDropped = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "proxy",
- Name: "dropped_total",
- Help: "Counter of requests dropped on the proxy.",
- }, []string{"method", "proxying_error"})
-
- requestsHandlingSec = prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "proxy",
- Name: "handling_duration_seconds",
- Help: "Bucketed histogram of handling time of successful events (non-watches), by method (GET/PUT etc.).",
-
- // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2
- // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec
- Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
- }, []string{"method"})
-)
-
-type forwardingError string
-
-const (
- zeroEndpoints forwardingError = "zero_endpoints"
- failedSendingRequest forwardingError = "failed_sending_request"
- failedGettingResponse forwardingError = "failed_getting_response"
-)
-
-func init() {
- prometheus.MustRegister(requestsIncoming)
- prometheus.MustRegister(requestsHandled)
- prometheus.MustRegister(requestsDropped)
- prometheus.MustRegister(requestsHandlingSec)
-}
-
-func reportIncomingRequest(request *http.Request) {
- requestsIncoming.WithLabelValues(request.Method).Inc()
-}
-
-func reportRequestHandled(request *http.Request, response *http.Response, startTime time.Time) {
- method := request.Method
- requestsHandled.WithLabelValues(method, strconv.Itoa(response.StatusCode)).Inc()
- requestsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
-}
-
-func reportRequestDropped(request *http.Request, err forwardingError) {
- requestsDropped.WithLabelValues(request.Method, string(err)).Inc()
-}
diff --git a/server/proxy/httpproxy/proxy.go b/server/proxy/httpproxy/proxy.go
deleted file mode 100644
index c8f27bf01df..00000000000
--- a/server/proxy/httpproxy/proxy.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpproxy
-
-import (
- "encoding/json"
- "net/http"
- "strings"
- "time"
-
- "go.uber.org/zap"
- "golang.org/x/net/http2"
-)
-
-const (
- // DefaultMaxIdleConnsPerHost indicates the default maximum idle connection
- // count maintained between proxy and each member. We set it to 128 to
- // let proxy handle 128 concurrent requests in long term smoothly.
- // If the number of concurrent requests is bigger than this value,
- // proxy needs to create one new connection when handling each request in
- // the delta, which is bad because the creation consumes resource and
- // may eat up ephemeral ports.
- DefaultMaxIdleConnsPerHost = 128
-)
-
-// GetProxyURLs is a function which should return the current set of URLs to
-// which client requests should be proxied. This function will be queried
-// periodically by the proxy Handler to refresh the set of available
-// backends.
-type GetProxyURLs func() []string
-
-// NewHandler creates a new HTTP handler, listening on the given transport,
-// which will proxy requests to an etcd cluster.
-// The handler will periodically update its view of the cluster.
-func NewHandler(lg *zap.Logger, t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler {
- if lg == nil {
- lg = zap.NewNop()
- }
- if t.TLSClientConfig != nil {
- // Enable http2, see Issue 5033.
- err := http2.ConfigureTransport(t)
- if err != nil {
- lg.Info("Error enabling Transport HTTP/2 support", zap.Error(err))
- }
- }
-
- p := &reverseProxy{
- lg: lg,
- director: newDirector(lg, urlsFunc, failureWait, refreshInterval),
- transport: t,
- }
-
- mux := http.NewServeMux()
- mux.Handle("/", p)
- mux.HandleFunc("/v2/config/local/proxy", p.configHandler)
-
- return mux
-}
-
-// NewReadonlyHandler wraps the given HTTP handler to allow only GET requests
-func NewReadonlyHandler(hdlr http.Handler) http.Handler {
- readonly := readonlyHandlerFunc(hdlr)
- return http.HandlerFunc(readonly)
-}
-
-func readonlyHandlerFunc(next http.Handler) func(http.ResponseWriter, *http.Request) {
- return func(w http.ResponseWriter, req *http.Request) {
- if req.Method != "GET" {
- w.WriteHeader(http.StatusNotImplemented)
- return
- }
-
- next.ServeHTTP(w, req)
- }
-}
-
-func (p *reverseProxy) configHandler(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
-
- eps := p.director.endpoints()
- epstr := make([]string, len(eps))
- for i, e := range eps {
- epstr[i] = e.URL.String()
- }
-
- proxyConfig := struct {
- Endpoints []string `json:"endpoints"`
- }{
- Endpoints: epstr,
- }
-
- json.NewEncoder(w).Encode(proxyConfig)
-}
-
-// allowMethod verifies that the given method is one of the allowed methods,
-// and if not, it writes an error to w. A boolean is returned indicating
-// whether or not the method is allowed.
-func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
- for _, meth := range ms {
- if m == meth {
- return true
- }
- }
- w.Header().Set("Allow", strings.Join(ms, ","))
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return false
-}
diff --git a/server/proxy/httpproxy/proxy_test.go b/server/proxy/httpproxy/proxy_test.go
deleted file mode 100644
index ae4291a44a0..00000000000
--- a/server/proxy/httpproxy/proxy_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpproxy
-
-import (
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "testing"
- "time"
-
- "go.uber.org/zap"
-)
-
-func TestReadonlyHandler(t *testing.T) {
- fixture := func(w http.ResponseWriter, req *http.Request) {
- w.WriteHeader(http.StatusOK)
- }
- hdlrFunc := readonlyHandlerFunc(http.HandlerFunc(fixture))
-
- tests := []struct {
- method string
- want int
- }{
- // GET is only passing method
- {"GET", http.StatusOK},
-
- // everything but GET is StatusNotImplemented
- {"POST", http.StatusNotImplemented},
- {"PUT", http.StatusNotImplemented},
- {"PATCH", http.StatusNotImplemented},
- {"DELETE", http.StatusNotImplemented},
- {"FOO", http.StatusNotImplemented},
- }
-
- for i, tt := range tests {
- req, _ := http.NewRequest(tt.method, "http://example.com", nil)
- rr := httptest.NewRecorder()
- hdlrFunc(rr, req)
-
- if tt.want != rr.Code {
- t.Errorf("#%d: incorrect HTTP status code: method=%s want=%d got=%d", i, tt.method, tt.want, rr.Code)
- }
- }
-}
-
-func TestConfigHandlerGET(t *testing.T) {
- var err error
- us := make([]*url.URL, 3)
- us[0], err = url.Parse("http://example1.com")
- if err != nil {
- t.Fatal(err)
- }
- us[1], err = url.Parse("http://example2.com")
- if err != nil {
- t.Fatal(err)
- }
- us[2], err = url.Parse("http://example3.com")
- if err != nil {
- t.Fatal(err)
- }
-
- lg := zap.NewExample()
- rp := reverseProxy{
- lg: lg,
- director: &director{
- lg: lg,
- ep: []*endpoint{
- newEndpoint(lg, *us[0], 1*time.Second),
- newEndpoint(lg, *us[1], 1*time.Second),
- newEndpoint(lg, *us[2], 1*time.Second),
- },
- },
- }
-
- req, _ := http.NewRequest("GET", "http://example.com//v2/config/local/proxy", nil)
- rr := httptest.NewRecorder()
- rp.configHandler(rr, req)
-
- wbody := "{\"endpoints\":[\"http://example1.com\",\"http://example2.com\",\"http://example3.com\"]}\n"
-
- body, err := ioutil.ReadAll(rr.Body)
- if err != nil {
- t.Fatal(err)
- }
-
- if string(body) != wbody {
- t.Errorf("body = %s, want %s", string(body), wbody)
- }
-}
diff --git a/server/proxy/httpproxy/reverse.go b/server/proxy/httpproxy/reverse.go
deleted file mode 100644
index 83247486b11..00000000000
--- a/server/proxy/httpproxy/reverse.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpproxy
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "strings"
- "sync/atomic"
- "time"
-
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
-
- "go.uber.org/zap"
-)
-
-var (
- // Hop-by-hop headers. These are removed when sent to the backend.
- // http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
- // This list of headers borrowed from stdlib httputil.ReverseProxy
- singleHopHeaders = []string{
- "Connection",
- "Keep-Alive",
- "Proxy-Authenticate",
- "Proxy-Authorization",
- "Te", // canonicalized version of "TE"
- "Trailers",
- "Transfer-Encoding",
- "Upgrade",
- }
-)
-
-func removeSingleHopHeaders(hdrs *http.Header) {
- for _, h := range singleHopHeaders {
- hdrs.Del(h)
- }
-}
-
-type reverseProxy struct {
- lg *zap.Logger
- director *director
- transport http.RoundTripper
-}
-
-func (p *reverseProxy) ServeHTTP(rw http.ResponseWriter, clientreq *http.Request) {
- reportIncomingRequest(clientreq)
- proxyreq := new(http.Request)
- *proxyreq = *clientreq
- startTime := time.Now()
-
- var (
- proxybody []byte
- err error
- )
-
- if clientreq.Body != nil {
- proxybody, err = ioutil.ReadAll(clientreq.Body)
- if err != nil {
- msg := fmt.Sprintf("failed to read request body: %v", err)
- p.lg.Info("failed to read request body", zap.Error(err))
- e := httptypes.NewHTTPError(http.StatusInternalServerError, "httpproxy: "+msg)
- if we := e.WriteTo(rw); we != nil {
- p.lg.Debug(
- "error writing HTTPError to remote addr",
- zap.String("remote-addr", clientreq.RemoteAddr),
- zap.Error(we),
- )
- }
- return
- }
- }
-
- // deep-copy the headers, as these will be modified below
- proxyreq.Header = make(http.Header)
- copyHeader(proxyreq.Header, clientreq.Header)
-
- normalizeRequest(proxyreq)
- removeSingleHopHeaders(&proxyreq.Header)
- maybeSetForwardedFor(proxyreq)
-
- endpoints := p.director.endpoints()
- if len(endpoints) == 0 {
- msg := "zero endpoints currently available"
- reportRequestDropped(clientreq, zeroEndpoints)
-
- // TODO: limit the rate of the error logging.
- p.lg.Info(msg)
- e := httptypes.NewHTTPError(http.StatusServiceUnavailable, "httpproxy: "+msg)
- if we := e.WriteTo(rw); we != nil {
- p.lg.Debug(
- "error writing HTTPError to remote addr",
- zap.String("remote-addr", clientreq.RemoteAddr),
- zap.Error(we),
- )
- }
- return
- }
-
- var requestClosed int32
- completeCh := make(chan bool, 1)
- closeNotifier, ok := rw.(http.CloseNotifier)
- ctx, cancel := context.WithCancel(context.Background())
- proxyreq = proxyreq.WithContext(ctx)
- defer cancel()
- if ok {
- closeCh := closeNotifier.CloseNotify()
- go func() {
- select {
- case <-closeCh:
- atomic.StoreInt32(&requestClosed, 1)
- p.lg.Info(
- "client closed request prematurely",
- zap.String("remote-addr", clientreq.RemoteAddr),
- )
- cancel()
- case <-completeCh:
- }
- }()
-
- defer func() {
- completeCh <- true
- }()
- }
-
- var res *http.Response
-
- for _, ep := range endpoints {
- if proxybody != nil {
- proxyreq.Body = ioutil.NopCloser(bytes.NewBuffer(proxybody))
- }
- redirectRequest(proxyreq, ep.URL)
-
- res, err = p.transport.RoundTrip(proxyreq)
- if atomic.LoadInt32(&requestClosed) == 1 {
- return
- }
- if err != nil {
- reportRequestDropped(clientreq, failedSendingRequest)
- p.lg.Info(
- "failed to direct request",
- zap.String("url", ep.URL.String()),
- zap.Error(err),
- )
- ep.Failed()
- continue
- }
-
- break
- }
-
- if res == nil {
- // TODO: limit the rate of the error logging.
- msg := fmt.Sprintf("unable to get response from %d endpoint(s)", len(endpoints))
- reportRequestDropped(clientreq, failedGettingResponse)
- p.lg.Info(msg)
- e := httptypes.NewHTTPError(http.StatusBadGateway, "httpproxy: "+msg)
- if we := e.WriteTo(rw); we != nil {
- p.lg.Debug(
- "error writing HTTPError to remote addr",
- zap.String("remote-addr", clientreq.RemoteAddr),
- zap.Error(we),
- )
- }
- return
- }
-
- defer res.Body.Close()
- reportRequestHandled(clientreq, res, startTime)
- removeSingleHopHeaders(&res.Header)
- copyHeader(rw.Header(), res.Header)
-
- rw.WriteHeader(res.StatusCode)
- io.Copy(rw, res.Body)
-}
-
-func copyHeader(dst, src http.Header) {
- for k, vv := range src {
- for _, v := range vv {
- dst.Add(k, v)
- }
- }
-}
-
-func redirectRequest(req *http.Request, loc url.URL) {
- req.URL.Scheme = loc.Scheme
- req.URL.Host = loc.Host
-}
-
-func normalizeRequest(req *http.Request) {
- req.Proto = "HTTP/1.1"
- req.ProtoMajor = 1
- req.ProtoMinor = 1
- req.Close = false
-}
-
-func maybeSetForwardedFor(req *http.Request) {
- clientIP, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- return
- }
-
- // If we aren't the first proxy retain prior
- // X-Forwarded-For information as a comma+space
- // separated list and fold multiple headers into one.
- if prior, ok := req.Header["X-Forwarded-For"]; ok {
- clientIP = strings.Join(prior, ", ") + ", " + clientIP
- }
- req.Header.Set("X-Forwarded-For", clientIP)
-}
diff --git a/server/proxy/httpproxy/reverse_test.go b/server/proxy/httpproxy/reverse_test.go
deleted file mode 100644
index cbcb3cf1904..00000000000
--- a/server/proxy/httpproxy/reverse_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httpproxy
-
-import (
- "bytes"
- "errors"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "reflect"
- "testing"
-
- "go.uber.org/zap"
-)
-
-type staticRoundTripper struct {
- res *http.Response
- err error
-}
-
-func (srt *staticRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
- return srt.res, srt.err
-}
-
-func TestReverseProxyServe(t *testing.T) {
- u := url.URL{Scheme: "http", Host: "192.0.2.3:4040"}
- lg := zap.NewExample()
-
- tests := []struct {
- eps []*endpoint
- rt http.RoundTripper
- want int
- }{
- // no endpoints available so no requests are even made
- {
- eps: []*endpoint{},
- rt: &staticRoundTripper{
- res: &http.Response{
- StatusCode: http.StatusCreated,
- Body: ioutil.NopCloser(&bytes.Reader{}),
- },
- },
- want: http.StatusServiceUnavailable,
- },
-
- // error is returned from one endpoint that should be available
- {
- eps: []*endpoint{{URL: u, Available: true}},
- rt: &staticRoundTripper{err: errors.New("what a bad trip")},
- want: http.StatusBadGateway,
- },
-
- // endpoint is available and returns success
- {
- eps: []*endpoint{{URL: u, Available: true}},
- rt: &staticRoundTripper{
- res: &http.Response{
- StatusCode: http.StatusCreated,
- Body: ioutil.NopCloser(&bytes.Reader{}),
- Header: map[string][]string{"Content-Type": {"application/json"}},
- },
- },
- want: http.StatusCreated,
- },
- }
-
- for i, tt := range tests {
- rp := reverseProxy{
- lg: lg,
- director: &director{lg: lg, ep: tt.eps},
- transport: tt.rt,
- }
-
- req, _ := http.NewRequest("GET", "http://192.0.2.2:2379", nil)
- rr := httptest.NewRecorder()
- rp.ServeHTTP(rr, req)
-
- if rr.Code != tt.want {
- t.Errorf("#%d: unexpected HTTP status code: want = %d, got = %d", i, tt.want, rr.Code)
- }
- if gct := rr.Header().Get("Content-Type"); gct != "application/json" {
- t.Errorf("#%d: Content-Type = %s, want %s", i, gct, "application/json")
- }
- }
-}
-
-func TestRedirectRequest(t *testing.T) {
- loc := url.URL{
- Scheme: "http",
- Host: "bar.example.com",
- }
-
- req := &http.Request{
- Method: "GET",
- Host: "foo.example.com",
- URL: &url.URL{
- Host: "foo.example.com",
- Path: "/v2/keys/baz",
- },
- }
-
- redirectRequest(req, loc)
-
- want := &http.Request{
- Method: "GET",
- // this field must not change
- Host: "foo.example.com",
- URL: &url.URL{
- // the Scheme field is updated to that of the provided URL
- Scheme: "http",
- // the Host field is updated to that of the provided URL
- Host: "bar.example.com",
- Path: "/v2/keys/baz",
- },
- }
-
- if !reflect.DeepEqual(want, req) {
- t.Fatalf("HTTP request does not match expected criteria: want=%#v got=%#v", want, req)
- }
-}
-
-func TestMaybeSetForwardedFor(t *testing.T) {
- tests := []struct {
- raddr string
- fwdFor string
- want string
- }{
- {"192.0.2.3:8002", "", "192.0.2.3"},
- {"192.0.2.3:8002", "192.0.2.2", "192.0.2.2, 192.0.2.3"},
- {"192.0.2.3:8002", "192.0.2.1, 192.0.2.2", "192.0.2.1, 192.0.2.2, 192.0.2.3"},
- {"example.com:8002", "", "example.com"},
-
- // While these cases look valid, golang net/http will not let it happen
- // The RemoteAddr field will always be a valid host:port
- {":8002", "", ""},
- {"192.0.2.3", "", ""},
-
- // blatantly invalid host w/o a port
- {"12", "", ""},
- {"12", "192.0.2.3", "192.0.2.3"},
- }
-
- for i, tt := range tests {
- req := &http.Request{
- RemoteAddr: tt.raddr,
- Header: make(http.Header),
- }
-
- if tt.fwdFor != "" {
- req.Header.Set("X-Forwarded-For", tt.fwdFor)
- }
-
- maybeSetForwardedFor(req)
- got := req.Header.Get("X-Forwarded-For")
- if tt.want != got {
- t.Errorf("#%d: incorrect header: want = %q, got = %q", i, tt.want, got)
- }
- }
-}
-
-func TestRemoveSingleHopHeaders(t *testing.T) {
- hdr := http.Header(map[string][]string{
- // single-hop headers that should be removed
- "Connection": {"close"},
- "Keep-Alive": {"foo"},
- "Proxy-Authenticate": {"Basic realm=example.com"},
- "Proxy-Authorization": {"foo"},
- "Te": {"deflate,gzip"},
- "Trailers": {"ETag"},
- "Transfer-Encoding": {"chunked"},
- "Upgrade": {"WebSocket"},
-
- // headers that should persist
- "Accept": {"application/json"},
- "X-Foo": {"Bar"},
- })
-
- removeSingleHopHeaders(&hdr)
-
- want := http.Header(map[string][]string{
- "Accept": {"application/json"},
- "X-Foo": {"Bar"},
- })
-
- if !reflect.DeepEqual(want, hdr) {
- t.Fatalf("unexpected result: want = %#v, got = %#v", want, hdr)
- }
-}
-
-func TestCopyHeader(t *testing.T) {
- tests := []struct {
- src http.Header
- dst http.Header
- want http.Header
- }{
- {
- src: http.Header(map[string][]string{
- "Foo": {"bar", "baz"},
- }),
- dst: http.Header(map[string][]string{}),
- want: http.Header(map[string][]string{
- "Foo": {"bar", "baz"},
- }),
- },
- {
- src: http.Header(map[string][]string{
- "Foo": {"bar"},
- "Ping": {"pong"},
- }),
- dst: http.Header(map[string][]string{}),
- want: http.Header(map[string][]string{
- "Foo": {"bar"},
- "Ping": {"pong"},
- }),
- },
- {
- src: http.Header(map[string][]string{
- "Foo": {"bar", "baz"},
- }),
- dst: http.Header(map[string][]string{
- "Foo": {"qux"},
- }),
- want: http.Header(map[string][]string{
- "Foo": {"qux", "bar", "baz"},
- }),
- },
- }
-
- for i, tt := range tests {
- copyHeader(tt.dst, tt.src)
- if !reflect.DeepEqual(tt.dst, tt.want) {
- t.Errorf("#%d: unexpected headers: want = %v, got = %v", i, tt.want, tt.dst)
- }
- }
-}
diff --git a/server/proxy/tcpproxy/userspace.go b/server/proxy/tcpproxy/userspace.go
index 81421bffa70..c579584b613 100644
--- a/server/proxy/tcpproxy/userspace.go
+++ b/server/proxy/tcpproxy/userspace.go
@@ -75,11 +75,11 @@ func (tp *TCPProxy) Run() error {
tp.MonitorInterval = 5 * time.Minute
}
for _, srv := range tp.Endpoints {
- addr := fmt.Sprintf("%s:%d", srv.Target, srv.Port)
+ addr := net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port))
tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr})
}
- eps := []string{}
+ var eps []string
for _, ep := range tp.Endpoints {
eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port))
}
diff --git a/server/proxy/tcpproxy/userspace_test.go b/server/proxy/tcpproxy/userspace_test.go
index 9b4642dfec7..2b98cd004d3 100644
--- a/server/proxy/tcpproxy/userspace_test.go
+++ b/server/proxy/tcpproxy/userspace_test.go
@@ -16,7 +16,7 @@ package tcpproxy
import (
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"net/http/httptest"
@@ -57,7 +57,7 @@ func TestUserspaceProxy(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, gerr := ioutil.ReadAll(res.Body)
+ got, gerr := io.ReadAll(res.Body)
res.Body.Close()
if gerr != nil {
t.Fatal(gerr)
@@ -118,7 +118,7 @@ func TestUserspaceProxyPriority(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, gerr := ioutil.ReadAll(res.Body)
+ got, gerr := io.ReadAll(res.Body)
res.Body.Close()
if gerr != nil {
t.Fatal(gerr)
diff --git a/server/storage/backend.go b/server/storage/backend.go
new file mode 100644
index 00000000000..b7b0d6861ad
--- /dev/null
+++ b/server/storage/backend.go
@@ -0,0 +1,114 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
+ bcfg := backend.DefaultBackendConfig(cfg.Logger)
+ bcfg.Path = cfg.BackendPath()
+ bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync
+ if cfg.BackendBatchLimit != 0 {
+ bcfg.BatchLimit = cfg.BackendBatchLimit
+ if cfg.Logger != nil {
+ cfg.Logger.Info("setting backend batch limit", zap.Int("batch limit", cfg.BackendBatchLimit))
+ }
+ }
+ if cfg.BackendBatchInterval != 0 {
+ bcfg.BatchInterval = cfg.BackendBatchInterval
+ if cfg.Logger != nil {
+ cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval))
+ }
+ }
+ bcfg.BackendFreelistType = cfg.BackendFreelistType
+ bcfg.Logger = cfg.Logger
+ if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
+ // permit 10% excess over quota for disarm
+ bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
+ }
+ bcfg.Mlock = cfg.ExperimentalMemoryMlock
+ bcfg.Hooks = hooks
+ return backend.New(bcfg)
+}
+
+// OpenSnapshotBackend renames a snapshot db to the current etcd db and opens it.
+func OpenSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks *BackendHooks) (backend.Backend, error) {
+ snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find database snapshot file (%w)", err)
+ }
+ if err := os.Rename(snapPath, cfg.BackendPath()); err != nil {
+ return nil, fmt.Errorf("failed to rename database snapshot file (%w)", err)
+ }
+ return OpenBackend(cfg, hooks), nil
+}
+
+// OpenBackend returns a backend using the current etcd db.
+func OpenBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
+ fn := cfg.BackendPath()
+
+ now, beOpened := time.Now(), make(chan backend.Backend)
+ go func() {
+ beOpened <- newBackend(cfg, hooks)
+ }()
+
+ defer func() {
+ cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now)))
+ }()
+
+ select {
+ case be := <-beOpened:
+ return be
+
+ case <-time.After(10 * time.Second):
+ cfg.Logger.Info(
+ "db file is flocked by another process, or taking too long",
+ zap.String("path", fn),
+ zap.Duration("took", time.Since(now)),
+ )
+ }
+
+ return <-beOpened
+}
+
+// RecoverSnapshotBackend recovers the DB from a snapshot in case etcd crashes
+// before updating the backend db after persisting raft snapshot to disk,
+// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
+// case, replace the db with the snapshot db sent by the leader.
+func RecoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks *BackendHooks) (backend.Backend, error) {
+ consistentIndex := uint64(0)
+ if beExist {
+ consistentIndex, _ = schema.ReadConsistentIndex(oldbe.ReadTx())
+ }
+ if snapshot.Metadata.Index <= consistentIndex {
+ cfg.Logger.Info("Skipping snapshot backend", zap.Uint64("consistent-index", consistentIndex), zap.Uint64("snapshot-index", snapshot.Metadata.Index))
+ return oldbe, nil
+ }
+ cfg.Logger.Info("Recovering from snapshot backend", zap.Uint64("consistent-index", consistentIndex), zap.Uint64("snapshot-index", snapshot.Metadata.Index))
+ oldbe.Close()
+ return OpenSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks)
+}
diff --git a/server/storage/backend/backend.go b/server/storage/backend/backend.go
new file mode 100644
index 00000000000..275064f083b
--- /dev/null
+++ b/server/storage/backend/backend.go
@@ -0,0 +1,724 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+var (
+ defaultBatchLimit = 10000
+ defaultBatchInterval = 100 * time.Millisecond
+
+ defragLimit = 10000
+
+ // InitialMmapSize is the initial size of the mmapped region. Setting this larger than
+ // the potential max db size can prevent writer from blocking reader.
+ // This only works for linux.
+ InitialMmapSize = uint64(10 * 1024 * 1024 * 1024)
+
+ // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
+ minSnapshotWarningTimeout = 30 * time.Second
+)
+
+type Backend interface {
+ // ReadTx returns a read transaction. It is replaced by ConcurrentReadTx in the main data path, see #10523.
+ ReadTx() ReadTx
+ BatchTx() BatchTx
+ // ConcurrentReadTx returns a non-blocking read transaction.
+ ConcurrentReadTx() ReadTx
+
+ Snapshot() Snapshot
+ Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error)
+ // Size returns the current size of the backend physically allocated.
+ // The backend can hold DB space that is not utilized at the moment,
+ // since it can conduct pre-allocation or spare unused space for recycling.
+ // Use SizeInUse() instead for the actual DB size.
+ Size() int64
+ // SizeInUse returns the current size of the backend logically in use.
+ // Since the backend can manage free space in a non-byte unit such as
+ // number of pages, the returned value can be not exactly accurate in bytes.
+ SizeInUse() int64
+ // OpenReadTxN returns the number of currently open read transactions in the backend.
+ OpenReadTxN() int64
+ Defrag() error
+ ForceCommit()
+ Close() error
+
+ // SetTxPostLockInsideApplyHook sets a txPostLockInsideApplyHook.
+ SetTxPostLockInsideApplyHook(func())
+}
+
+type Snapshot interface {
+ // Size gets the size of the snapshot.
+ Size() int64
+ // WriteTo writes the snapshot into the given writer.
+ WriteTo(w io.Writer) (n int64, err error)
+ // Close closes the snapshot.
+ Close() error
+}
+
+type txReadBufferCache struct {
+ mu sync.Mutex
+ buf *txReadBuffer
+ bufVersion uint64
+}
+
+type backend struct {
+ // size and commits are used with atomic operations so they must be
+ // 64-bit aligned, otherwise 32-bit tests will crash
+
+ // size is the number of bytes allocated in the backend
+ size int64
+ // sizeInUse is the number of bytes actually used in the backend
+ sizeInUse int64
+ // commits counts number of commits since start
+ commits int64
+ // openReadTxN is the number of currently open read transactions in the backend
+ openReadTxN int64
+ // mlock prevents backend database file to be swapped
+ mlock bool
+
+ mu sync.RWMutex
+ bopts *bolt.Options
+ db *bolt.DB
+
+ batchInterval time.Duration
+ batchLimit int
+ batchTx *batchTxBuffered
+
+ readTx *readTx
+ // txReadBufferCache mirrors "txReadBuffer" within "readTx" -- readTx.baseReadTx.buf.
+ // When creating "concurrentReadTx":
+ // - if the cache is up-to-date, "readTx.baseReadTx.buf" copy can be skipped
+ // - if the cache is empty or outdated, "readTx.baseReadTx.buf" copy is required
+ txReadBufferCache txReadBufferCache
+
+ stopc chan struct{}
+ donec chan struct{}
+
+ hooks Hooks
+
+ // txPostLockInsideApplyHook is called each time right after locking the tx.
+ txPostLockInsideApplyHook func()
+
+ lg *zap.Logger
+}
+
+type BackendConfig struct {
+ // Path is the file path to the backend file.
+ Path string
+ // BatchInterval is the maximum time before flushing the BatchTx.
+ BatchInterval time.Duration
+ // BatchLimit is the maximum puts before flushing the BatchTx.
+ BatchLimit int
+ // BackendFreelistType is the backend boltdb's freelist type.
+ BackendFreelistType bolt.FreelistType
+ // MmapSize is the number of bytes to mmap for the backend.
+ MmapSize uint64
+ // Logger logs backend-side operations.
+ Logger *zap.Logger
+ // UnsafeNoFsync disables all uses of fsync.
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"`
+ // Mlock prevents backend database file to be swapped
+ Mlock bool
+
+ // Hooks are getting executed during lifecycle of Backend's transactions.
+ Hooks Hooks
+}
+
+type BackendConfigOption func(*BackendConfig)
+
+func DefaultBackendConfig(lg *zap.Logger) BackendConfig {
+ return BackendConfig{
+ BatchInterval: defaultBatchInterval,
+ BatchLimit: defaultBatchLimit,
+ MmapSize: InitialMmapSize,
+ Logger: lg,
+ }
+}
+
+func New(bcfg BackendConfig) Backend {
+ return newBackend(bcfg)
+}
+
+func WithMmapSize(size uint64) BackendConfigOption {
+ return func(bcfg *BackendConfig) {
+ bcfg.MmapSize = size
+ }
+}
+
+func NewDefaultBackend(lg *zap.Logger, path string, opts ...BackendConfigOption) Backend {
+ bcfg := DefaultBackendConfig(lg)
+ bcfg.Path = path
+ for _, opt := range opts {
+ opt(&bcfg)
+ }
+
+ return newBackend(bcfg)
+}
+
+func newBackend(bcfg BackendConfig) *backend {
+ bopts := &bolt.Options{}
+ if boltOpenOptions != nil {
+ *bopts = *boltOpenOptions
+ }
+
+ if bcfg.Logger == nil {
+ bcfg.Logger = zap.NewNop()
+ }
+
+ bopts.InitialMmapSize = bcfg.mmapSize()
+ bopts.FreelistType = bcfg.BackendFreelistType
+ bopts.NoSync = bcfg.UnsafeNoFsync
+ bopts.NoGrowSync = bcfg.UnsafeNoFsync
+ bopts.Mlock = bcfg.Mlock
+ bopts.Logger = newBoltLoggerZap(bcfg)
+
+ db, err := bolt.Open(bcfg.Path, 0o600, bopts)
+ if err != nil {
+ bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err))
+ }
+
+ // In future, may want to make buffering optional for low-concurrency systems
+ // or dynamically swap between buffered/non-buffered depending on workload.
+ b := &backend{
+ bopts: bopts,
+ db: db,
+
+ batchInterval: bcfg.BatchInterval,
+ batchLimit: bcfg.BatchLimit,
+ mlock: bcfg.Mlock,
+
+ readTx: &readTx{
+ baseReadTx: baseReadTx{
+ buf: txReadBuffer{
+ txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)},
+ bufVersion: 0,
+ },
+ buckets: make(map[BucketID]*bolt.Bucket),
+ txWg: new(sync.WaitGroup),
+ txMu: new(sync.RWMutex),
+ },
+ },
+ txReadBufferCache: txReadBufferCache{
+ mu: sync.Mutex{},
+ bufVersion: 0,
+ buf: nil,
+ },
+
+ stopc: make(chan struct{}),
+ donec: make(chan struct{}),
+
+ lg: bcfg.Logger,
+ }
+
+ b.batchTx = newBatchTxBuffered(b)
+ // We set it after newBatchTxBuffered to skip the 'empty' commit.
+ b.hooks = bcfg.Hooks
+
+ go b.run()
+ return b
+}
+
+// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
+// write operations. The write result can be retrieved within the same tx immediately.
+// The write result is isolated with other txs until the current one get committed.
+func (b *backend) BatchTx() BatchTx {
+ return b.batchTx
+}
+
+func (b *backend) SetTxPostLockInsideApplyHook(hook func()) {
+ // It needs to lock the batchTx, because the periodic commit
+ // may be accessing the txPostLockInsideApplyHook at the moment.
+ b.batchTx.lock()
+ defer b.batchTx.Unlock()
+ b.txPostLockInsideApplyHook = hook
+}
+
+func (b *backend) ReadTx() ReadTx { return b.readTx }
+
+// ConcurrentReadTx creates and returns a new ReadTx, which:
+// A) creates and keeps a copy of backend.readTx.txReadBuffer,
+// B) references the boltdb read Tx (and its bucket cache) of current batch interval.
+func (b *backend) ConcurrentReadTx() ReadTx {
+ b.readTx.RLock()
+ defer b.readTx.RUnlock()
+ // prevent boltdb read Tx from been rolled back until store read Tx is done. Needs to be called when holding readTx.RLock().
+ b.readTx.txWg.Add(1)
+
+ // TODO: might want to copy the read buffer lazily - create copy when A) end of a write transaction B) end of a batch interval.
+
+ // inspect/update cache recency iff there's no ongoing update to the cache
+ // this falls through if there's no cache update
+
+ // by this line, "ConcurrentReadTx" code path is already protected against concurrent "writeback" operations
+ // which requires write lock to update "readTx.baseReadTx.buf".
+ // Which means setting "buf *txReadBuffer" with "readTx.buf.unsafeCopy()" is guaranteed to be up-to-date,
+ // whereas "txReadBufferCache.buf" may be stale from concurrent "writeback" operations.
+ // We only update "txReadBufferCache.buf" if we know "buf *txReadBuffer" is up-to-date.
+ // The update to "txReadBufferCache.buf" will benefit the following "ConcurrentReadTx" creation
+ // by avoiding copying "readTx.baseReadTx.buf".
+ b.txReadBufferCache.mu.Lock()
+
+ curCache := b.txReadBufferCache.buf
+ curCacheVer := b.txReadBufferCache.bufVersion
+ curBufVer := b.readTx.buf.bufVersion
+
+ isEmptyCache := curCache == nil
+ isStaleCache := curCacheVer != curBufVer
+
+ var buf *txReadBuffer
+ switch {
+ case isEmptyCache:
+ // perform safe copy of buffer while holding "b.txReadBufferCache.mu.Lock"
+ // this is only supposed to run once so there won't be much overhead
+ curBuf := b.readTx.buf.unsafeCopy()
+ buf = &curBuf
+ case isStaleCache:
+ // to maximize the concurrency, try unsafe copy of buffer
+ // release the lock while copying buffer -- cache may become stale again and
+ // get overwritten by someone else.
+ // therefore, we need to check the readTx buffer version again
+ b.txReadBufferCache.mu.Unlock()
+ curBuf := b.readTx.buf.unsafeCopy()
+ b.txReadBufferCache.mu.Lock()
+ buf = &curBuf
+ default:
+ // neither empty nor stale cache, just use the current buffer
+ buf = curCache
+ }
+ // txReadBufferCache.bufVersion can be modified when we doing an unsafeCopy()
+ // as a result, curCacheVer could be no longer the same as
+ // txReadBufferCache.bufVersion
+ // if !isEmptyCache && curCacheVer != b.txReadBufferCache.bufVersion
+ // then the cache became stale while copying "readTx.baseReadTx.buf".
+ // It is safe to not update "txReadBufferCache.buf", because the next following
+ // "ConcurrentReadTx" creation will trigger a new "readTx.baseReadTx.buf" copy
+ // and "buf" is still used for the current "concurrentReadTx.baseReadTx.buf".
+ if isEmptyCache || curCacheVer == b.txReadBufferCache.bufVersion {
+ // continue if the cache is never set or no one has modified the cache
+ b.txReadBufferCache.buf = buf
+ b.txReadBufferCache.bufVersion = curBufVer
+ }
+
+ b.txReadBufferCache.mu.Unlock()
+
+ // concurrentReadTx is not supposed to write to its txReadBuffer
+ return &concurrentReadTx{
+ baseReadTx: baseReadTx{
+ buf: *buf,
+ txMu: b.readTx.txMu,
+ tx: b.readTx.tx,
+ buckets: b.readTx.buckets,
+ txWg: b.readTx.txWg,
+ },
+ }
+}
+
+// ForceCommit forces the current batching tx to commit.
+func (b *backend) ForceCommit() {
+ b.batchTx.Commit()
+}
+
+func (b *backend) Snapshot() Snapshot {
+ b.batchTx.Commit()
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ tx, err := b.db.Begin(false)
+ if err != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ }
+
+ stopc, donec := make(chan struct{}), make(chan struct{})
+ dbBytes := tx.Size()
+ go func() {
+ defer close(donec)
+ // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
+ // assuming a min tcp throughput of 100MB/s.
+ var sendRateBytes int64 = 100 * 1024 * 1024
+ warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
+ if warningTimeout < minSnapshotWarningTimeout {
+ warningTimeout = minSnapshotWarningTimeout
+ }
+ start := time.Now()
+ ticker := time.NewTicker(warningTimeout)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ b.lg.Warn(
+ "snapshotting taking too long to transfer",
+ zap.Duration("taking", time.Since(start)),
+ zap.Int64("bytes", dbBytes),
+ zap.String("size", humanize.Bytes(uint64(dbBytes))),
+ )
+
+ case <-stopc:
+ snapshotTransferSec.Observe(time.Since(start).Seconds())
+ return
+ }
+ }
+ }()
+
+ return &snapshot{tx, stopc, donec}
+}
+
+func (b *backend) Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error) {
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ err := b.db.View(func(tx *bolt.Tx) error {
+ c := tx.Cursor()
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("cannot get hash of bucket %s", next)
+ }
+ h.Write(next)
+ b.ForEach(func(k, v []byte) error {
+ if ignores != nil && !ignores(next, k) {
+ h.Write(k)
+ h.Write(v)
+ }
+ return nil
+ })
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ return h.Sum32(), nil
+}
+
+func (b *backend) Size() int64 {
+ return atomic.LoadInt64(&b.size)
+}
+
+func (b *backend) SizeInUse() int64 {
+ return atomic.LoadInt64(&b.sizeInUse)
+}
+
+func (b *backend) run() {
+ defer close(b.donec)
+ t := time.NewTimer(b.batchInterval)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ case <-b.stopc:
+ b.batchTx.CommitAndStop()
+ return
+ }
+ if b.batchTx.safePending() != 0 {
+ b.batchTx.Commit()
+ }
+ t.Reset(b.batchInterval)
+ }
+}
+
+func (b *backend) Close() error {
+ close(b.stopc)
+ <-b.donec
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.db.Close()
+}
+
+// Commits returns total number of commits since start
+func (b *backend) Commits() int64 {
+ return atomic.LoadInt64(&b.commits)
+}
+
+func (b *backend) Defrag() error {
+ return b.defrag()
+}
+
+func (b *backend) defrag() error {
+ verify.Assert(b.lg != nil, "the logger should not be nil")
+ now := time.Now()
+ isDefragActive.Set(1)
+ defer isDefragActive.Set(0)
+
+ // TODO: make this non-blocking?
+ // lock batchTx to ensure nobody is using previous tx, and then
+ // close previous ongoing tx.
+ b.batchTx.LockOutsideApply()
+ defer b.batchTx.Unlock()
+
+ // lock database after lock tx to avoid deadlock.
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // block concurrent read requests while resetting tx
+ b.readTx.Lock()
+ defer b.readTx.Unlock()
+
+ // Create a temporary file to ensure we start with a clean slate.
+ // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
+ dir := filepath.Dir(b.db.Path())
+ temp, err := os.CreateTemp(dir, "db.tmp.*")
+ if err != nil {
+ return err
+ }
+
+ options := bolt.Options{}
+ if boltOpenOptions != nil {
+ options = *boltOpenOptions
+ }
+ options.OpenFile = func(_ string, _ int, _ os.FileMode) (file *os.File, err error) {
+ // gofail: var defragOpenFileError string
+ // return nil, fmt.Errorf(defragOpenFileError)
+ return temp, nil
+ }
+ // Don't load tmp db into memory regardless of opening options
+ options.Mlock = false
+ tdbp := temp.Name()
+ tmpdb, err := bolt.Open(tdbp, 0o600, &options)
+ if err != nil {
+ temp.Close()
+ if rmErr := os.Remove(temp.Name()); rmErr != nil {
+ b.lg.Error(
+ "failed to remove temporary file",
+ zap.String("path", temp.Name()),
+ zap.Error(rmErr),
+ )
+ }
+
+ return err
+ }
+
+ dbp := b.db.Path()
+ size1, sizeInUse1 := b.Size(), b.SizeInUse()
+ b.lg.Info(
+ "defragmenting",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes", size1),
+ zap.String("current-db-size", humanize.Bytes(uint64(size1))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse1),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
+ )
+
+ defer func() {
+ // NOTE: We should exit as soon as possible because that tx
+ // might be closed. The inflight request might use invalid
+ // tx and then panic as well. The real panic reason might be
+ // shadowed by new panic. So, we should fatal here with lock.
+ if rerr := recover(); rerr != nil {
+ b.lg.Fatal("unexpected panic during defrag", zap.Any("panic", rerr))
+ }
+ }()
+
+ // Commit/stop and then reset current transactions (including the readTx)
+ b.batchTx.unsafeCommit(true)
+ b.batchTx.tx = nil
+
+ // gofail: var defragBeforeCopy struct{}
+ err = defragdb(b.db, tmpdb, defragLimit)
+ if err != nil {
+ tmpdb.Close()
+ if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
+ b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr))
+ }
+
+ // restore the bbolt transactions if defragmentation fails
+ b.batchTx.tx = b.unsafeBegin(true)
+ b.readTx.tx = b.unsafeBegin(false)
+
+ return err
+ }
+
+ err = b.db.Close()
+ if err != nil {
+ b.lg.Fatal("failed to close database", zap.Error(err))
+ }
+ err = tmpdb.Close()
+ if err != nil {
+ b.lg.Fatal("failed to close tmp database", zap.Error(err))
+ }
+ // gofail: var defragBeforeRename struct{}
+ err = os.Rename(tdbp, dbp)
+ if err != nil {
+ b.lg.Fatal("failed to rename tmp database", zap.Error(err))
+ }
+
+ b.db, err = bolt.Open(dbp, 0o600, b.bopts)
+ if err != nil {
+ b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err))
+ }
+ b.batchTx.tx = b.unsafeBegin(true)
+
+ b.readTx.reset()
+ b.readTx.tx = b.unsafeBegin(false)
+
+ size := b.readTx.tx.Size()
+ db := b.readTx.tx.DB()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
+
+ took := time.Since(now)
+ defragSec.Observe(took.Seconds())
+
+ size2, sizeInUse2 := b.Size(), b.SizeInUse()
+ b.lg.Info(
+ "finished defragmenting directory",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes-diff", size2-size1),
+ zap.Int64("current-db-size-bytes", size2),
+ zap.String("current-db-size", humanize.Bytes(uint64(size2))),
+ zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse2),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))),
+ zap.Duration("took", took),
+ )
+ return nil
+}
+
+func defragdb(odb, tmpdb *bolt.DB, limit int) error {
+ // gofail: var defragdbFail string
+ // return fmt.Errorf(defragdbFail)
+
+ // open a tx on tmpdb for writes
+ tmptx, err := tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ tmptx.Rollback()
+ }
+ }()
+
+ // open a tx on old db for read
+ tx, err := odb.Begin(false)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ c := tx.Cursor()
+
+ count := 0
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("backend: cannot defrag bucket %s", next)
+ }
+
+ tmpb, berr := tmptx.CreateBucketIfNotExists(next)
+ if berr != nil {
+ return berr
+ }
+ tmpb.FillPercent = 0.9 // for bucket2seq write in for each
+
+ if err = b.ForEach(func(k, v []byte) error {
+ count++
+ if count > limit {
+ err = tmptx.Commit()
+ if err != nil {
+ return err
+ }
+ tmptx, err = tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ tmpb = tmptx.Bucket(next)
+ tmpb.FillPercent = 0.9 // for bucket2seq write in for each
+
+ count = 0
+ }
+ return tmpb.Put(k, v)
+ }); err != nil {
+ return err
+ }
+ }
+
+ return tmptx.Commit()
+}
+
+func (b *backend) begin(write bool) *bolt.Tx {
+ b.mu.RLock()
+ tx := b.unsafeBegin(write)
+ b.mu.RUnlock()
+
+ size := tx.Size()
+ db := tx.DB()
+ stats := db.Stats()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize)))
+ atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN))
+
+ return tx
+}
+
+func (b *backend) unsafeBegin(write bool) *bolt.Tx {
+ // gofail: var beforeStartDBTxn struct{}
+ tx, err := b.db.Begin(write)
+ // gofail: var afterStartDBTxn struct{}
+ if err != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ }
+ return tx
+}
+
+func (b *backend) OpenReadTxN() int64 {
+ return atomic.LoadInt64(&b.openReadTxN)
+}
+
+type snapshot struct {
+ *bolt.Tx
+ stopc chan struct{}
+ donec chan struct{}
+}
+
+func (s *snapshot) Close() error {
+ close(s.stopc)
+ <-s.donec
+ return s.Tx.Rollback()
+}
+
+func newBoltLoggerZap(bcfg BackendConfig) bolt.Logger {
+ lg := bcfg.Logger.Named("bbolt")
+ return &zapBoltLogger{lg.WithOptions(zap.AddCallerSkip(1)).Sugar()}
+}
+
+type zapBoltLogger struct {
+ *zap.SugaredLogger
+}
+
+func (zl *zapBoltLogger) Warning(args ...any) {
+ zl.SugaredLogger.Warn(args...)
+}
+
+func (zl *zapBoltLogger) Warningf(format string, args ...any) {
+ zl.SugaredLogger.Warnf(format, args...)
+}
diff --git a/server/mvcc/backend/backend_bench_test.go b/server/storage/backend/backend_bench_test.go
similarity index 80%
rename from server/mvcc/backend/backend_bench_test.go
rename to server/storage/backend/backend_bench_test.go
index 4f3599a8710..60cad1437a4 100644
--- a/server/mvcc/backend/backend_bench_test.go
+++ b/server/storage/backend/backend_bench_test.go
@@ -19,8 +19,10 @@ import (
"testing"
"time"
- "github.com/stretchr/testify/assert"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
+ "github.com/stretchr/testify/require"
+
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
func BenchmarkBackendPut(b *testing.B) {
@@ -32,22 +34,22 @@ func BenchmarkBackendPut(b *testing.B) {
for i := 0; i < b.N; i++ {
keys[i] = make([]byte, 64)
_, err := rand.Read(keys[i])
- assert.NoError(b, err)
+ require.NoError(b, err)
}
value := make([]byte, 128)
_, err := rand.Read(value)
- assert.NoError(b, err)
+ require.NoError(b, err)
batchTx := backend.BatchTx()
batchTx.Lock()
- batchTx.UnsafeCreateBucket([]byte("test"))
+ batchTx.UnsafeCreateBucket(schema.Test)
batchTx.Unlock()
b.ResetTimer()
for i := 0; i < b.N; i++ {
batchTx.Lock()
- batchTx.UnsafePut([]byte("test"), keys[i], value)
+ batchTx.UnsafePut(schema.Test, keys[i], value)
batchTx.Unlock()
}
}
diff --git a/server/storage/backend/backend_test.go b/server/storage/backend/backend_test.go
new file mode 100644
index 00000000000..fc024b88bc1
--- /dev/null
+++ b/server/storage/backend/backend_test.go
@@ -0,0 +1,351 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend_test
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func TestBackendClose(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+
+ // check close could work
+ done := make(chan struct{}, 1)
+ go func() {
+ err := b.Close()
+ if err != nil {
+ t.Errorf("close error = %v, want nil", err)
+ }
+ done <- struct{}{}
+ }()
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.Errorf("failed to close database in 10s")
+ }
+}
+
+func TestBackendSnapshot(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.Unlock()
+ b.ForceCommit()
+
+ // write snapshot to a new file
+ f, err := os.CreateTemp(t.TempDir(), "etcd_backend_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ snap := b.Snapshot()
+ defer func() { assert.NoError(t, snap.Close()) }()
+ if _, err := snap.WriteTo(f); err != nil {
+ t.Fatal(err)
+ }
+ require.NoError(t, f.Close())
+
+ // bootstrap new backend from the snapshot
+ bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t))
+ bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = f.Name(), time.Hour, 10000
+ nb := backend.New(bcfg)
+ defer betesting.Close(t, nb)
+
+ newTx := nb.BatchTx()
+ newTx.Lock()
+ ks, _ := newTx.UnsafeRange(schema.Test, []byte("foo"), []byte("goo"), 0)
+ if len(ks) != 1 {
+ t.Errorf("len(kvs) = %d, want 1", len(ks))
+ }
+ newTx.Unlock()
+}
+
+func TestBackendBatchIntervalCommit(t *testing.T) {
+ // start backend with super short batch interval so
+ // we do not need to wait long before commit to happen.
+ b, _ := betesting.NewTmpBackend(t, time.Nanosecond, 10000)
+ defer betesting.Close(t, b)
+
+ pc := backend.CommitsForTest(b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.Unlock()
+
+ for i := 0; i < 10; i++ {
+ if backend.CommitsForTest(b) >= pc+1 {
+ break
+ }
+ time.Sleep(time.Duration(i*100) * time.Millisecond)
+ }
+
+ // check whether put happens via db view
+ assert.NoError(t, backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
+ bucket := tx.Bucket([]byte("test"))
+ if bucket == nil {
+ t.Errorf("bucket test does not exit")
+ return nil
+ }
+ v := bucket.Get([]byte("foo"))
+ if v == nil {
+ t.Errorf("foo key failed to written in backend")
+ }
+ return nil
+ }))
+}
+
+func TestBackendDefrag(t *testing.T) {
+ bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t))
+ // Make sure we change BackendFreelistType
+ // The goal is to verify that we restore config option after defrag.
+ if bcfg.BackendFreelistType == bolt.FreelistMapType {
+ bcfg.BackendFreelistType = bolt.FreelistArrayType
+ } else {
+ bcfg.BackendFreelistType = bolt.FreelistMapType
+ }
+
+ b, _ := betesting.NewTmpBackendFromCfg(t, bcfg)
+
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ for i := 0; i < backend.DefragLimitForTest()+100; i++ {
+ tx.UnsafePut(schema.Test, []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
+ }
+ tx.Unlock()
+ b.ForceCommit()
+
+ // remove some keys to ensure the disk space will be reclaimed after defrag
+ tx = b.BatchTx()
+ tx.Lock()
+ for i := 0; i < 50; i++ {
+ tx.UnsafeDelete(schema.Test, []byte(fmt.Sprintf("foo_%d", i)))
+ }
+ tx.Unlock()
+ b.ForceCommit()
+
+ size := b.Size()
+
+ // shrink and check hash
+ oh, err := b.Hash(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = b.Defrag()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nh, err := b.Hash(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if oh != nh {
+ t.Errorf("hash = %v, want %v", nh, oh)
+ }
+
+ nsize := b.Size()
+ if nsize >= size {
+ t.Errorf("new size = %v, want < %d", nsize, size)
+ }
+ db := backend.DbFromBackendForTest(b)
+ if db.FreelistType != bcfg.BackendFreelistType {
+ t.Errorf("db FreelistType = [%v], want [%v]", db.FreelistType, bcfg.BackendFreelistType)
+ }
+
+ // try put more keys after shrink.
+ tx = b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("more"), []byte("bar"))
+ tx.Unlock()
+ b.ForceCommit()
+}
+
+// TestBackendWriteback ensures writes are stored to the read txn on write txn unlock.
+func TestBackendWriteback(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Key)
+ tx.UnsafePut(schema.Key, []byte("abc"), []byte("bar"))
+ tx.UnsafePut(schema.Key, []byte("def"), []byte("baz"))
+ tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("1"))
+ tx.Unlock()
+
+ // overwrites should be propagated too
+ tx.Lock()
+ tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("2"))
+ tx.Unlock()
+
+ keys := []struct {
+ key []byte
+ end []byte
+ limit int64
+
+ wkey [][]byte
+ wval [][]byte
+ }{
+ {
+ key: []byte("abc"),
+ end: nil,
+
+ wkey: [][]byte{[]byte("abc")},
+ wval: [][]byte{[]byte("bar")},
+ },
+ {
+ key: []byte("abc"),
+ end: []byte("def"),
+
+ wkey: [][]byte{[]byte("abc")},
+ wval: [][]byte{[]byte("bar")},
+ },
+ {
+ key: []byte("abc"),
+ end: []byte("deg"),
+
+ wkey: [][]byte{[]byte("abc"), []byte("def")},
+ wval: [][]byte{[]byte("bar"), []byte("baz")},
+ },
+ {
+ key: []byte("abc"),
+ end: []byte("\xff"),
+ limit: 1,
+
+ wkey: [][]byte{[]byte("abc")},
+ wval: [][]byte{[]byte("bar")},
+ },
+ {
+ key: []byte("abc"),
+ end: []byte("\xff"),
+
+ wkey: [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")},
+ wval: [][]byte{[]byte("bar"), []byte("baz"), []byte("2")},
+ },
+ }
+ rtx := b.ReadTx()
+ for i, tt := range keys {
+ func() {
+ rtx.RLock()
+ defer rtx.RUnlock()
+ k, v := rtx.UnsafeRange(schema.Key, tt.key, tt.end, tt.limit)
+ if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) {
+ t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v)
+ }
+ }()
+ }
+}
+
+// TestConcurrentReadTx ensures that current read transaction can see all prior writes stored in read buffer
+func TestConcurrentReadTx(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ wtx1 := b.BatchTx()
+ wtx1.Lock()
+ wtx1.UnsafeCreateBucket(schema.Key)
+ wtx1.UnsafePut(schema.Key, []byte("abc"), []byte("ABC"))
+ wtx1.UnsafePut(schema.Key, []byte("overwrite"), []byte("1"))
+ wtx1.Unlock()
+
+ wtx2 := b.BatchTx()
+ wtx2.Lock()
+ wtx2.UnsafePut(schema.Key, []byte("def"), []byte("DEF"))
+ wtx2.UnsafePut(schema.Key, []byte("overwrite"), []byte("2"))
+ wtx2.Unlock()
+
+ rtx := b.ConcurrentReadTx()
+ rtx.RLock() // no-op
+ k, v := rtx.UnsafeRange(schema.Key, []byte("abc"), []byte("\xff"), 0)
+ rtx.RUnlock()
+ wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")}
+ wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")}
+ if !reflect.DeepEqual(wKey, k) || !reflect.DeepEqual(wVal, v) {
+ t.Errorf("want k=%+v, v=%+v; got k=%+v, v=%+v", wKey, wVal, k, v)
+ }
+}
+
+// TestBackendWritebackForEach checks that partially written / buffered
+// data is visited in the same order as fully committed data.
+func TestBackendWritebackForEach(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Key)
+ for i := 0; i < 5; i++ {
+ k := []byte(fmt.Sprintf("%04d", i))
+ tx.UnsafePut(schema.Key, k, []byte("bar"))
+ }
+ tx.Unlock()
+
+ // writeback
+ b.ForceCommit()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Key)
+ for i := 5; i < 20; i++ {
+ k := []byte(fmt.Sprintf("%04d", i))
+ tx.UnsafePut(schema.Key, k, []byte("bar"))
+ }
+ tx.Unlock()
+
+ seq := ""
+ getSeq := func(k, v []byte) error {
+ seq += string(k)
+ return nil
+ }
+ rtx := b.ReadTx()
+ rtx.RLock()
+ require.NoError(t, rtx.UnsafeForEach(schema.Key, getSeq))
+ rtx.RUnlock()
+
+ partialSeq := seq
+
+ seq = ""
+ b.ForceCommit()
+
+ tx.Lock()
+ require.NoError(t, tx.UnsafeForEach(schema.Key, getSeq))
+ tx.Unlock()
+
+ if seq != partialSeq {
+ t.Fatalf("expected %q, got %q", seq, partialSeq)
+ }
+}
diff --git a/server/storage/backend/batch_tx.go b/server/storage/backend/batch_tx.go
new file mode 100644
index 00000000000..5af557cb428
--- /dev/null
+++ b/server/storage/backend/batch_tx.go
@@ -0,0 +1,406 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "errors"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.uber.org/zap"
+
+ bolt "go.etcd.io/bbolt"
+ bolterrors "go.etcd.io/bbolt/errors"
+)
+
+type BucketID int
+
+type Bucket interface {
+ // ID returns a unique identifier of a bucket.
+ // The id must NOT be persisted and can be used as lightweight identificator
+ // in the in-memory maps.
+ ID() BucketID
+ Name() []byte
+ // String implements Stringer (human readable name).
+ String() string
+
+ // IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+ // overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
+ // is known to never overwrite any key so range is safe.
+ IsSafeRangeBucket() bool
+}
+
+type BatchTx interface {
+ Lock()
+ Unlock()
+ // Commit commits a previous tx and begins a new writable one.
+ Commit()
+ // CommitAndStop commits the previous tx and does not create a new one.
+ CommitAndStop()
+ LockInsideApply()
+ LockOutsideApply()
+ UnsafeReadWriter
+}
+
+type UnsafeReadWriter interface {
+ UnsafeReader
+ UnsafeWriter
+}
+
+type UnsafeWriter interface {
+ UnsafeCreateBucket(bucket Bucket)
+ UnsafeDeleteBucket(bucket Bucket)
+ UnsafePut(bucket Bucket, key []byte, value []byte)
+ UnsafeSeqPut(bucket Bucket, key []byte, value []byte)
+ UnsafeDelete(bucket Bucket, key []byte)
+}
+
+type batchTx struct {
+ sync.Mutex
+ tx *bolt.Tx
+ backend *backend
+
+ pending int
+}
+
+// Lock is supposed to be called only by the unit test.
+func (t *batchTx) Lock() {
+ ValidateCalledInsideUnittest(t.backend.lg)
+ t.lock()
+}
+
+func (t *batchTx) lock() {
+ t.Mutex.Lock()
+}
+
+func (t *batchTx) LockInsideApply() {
+ t.lock()
+ if t.backend.txPostLockInsideApplyHook != nil {
+ // The callers of some methods (i.e., (*RaftCluster).AddMember)
+ // can be coming from both InsideApply and OutsideApply, but the
+ // callers from OutsideApply will have a nil txPostLockInsideApplyHook.
+ // So we should check the txPostLockInsideApplyHook before validating
+ // the callstack.
+ ValidateCalledInsideApply(t.backend.lg)
+ t.backend.txPostLockInsideApplyHook()
+ }
+}
+
+func (t *batchTx) LockOutsideApply() {
+ ValidateCalledOutSideApply(t.backend.lg)
+ t.lock()
+}
+
+func (t *batchTx) Unlock() {
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ t.Mutex.Unlock()
+}
+
+func (t *batchTx) UnsafeCreateBucket(bucket Bucket) {
+ if _, err := t.tx.CreateBucketIfNotExists(bucket.Name()); err != nil {
+ t.backend.lg.Fatal(
+ "failed to create a bucket",
+ zap.Stringer("bucket-name", bucket),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+func (t *batchTx) UnsafeDeleteBucket(bucket Bucket) {
+ err := t.tx.DeleteBucket(bucket.Name())
+ if err != nil && !errors.Is(err, bolterrors.ErrBucketNotFound) {
+ t.backend.lg.Fatal(
+ "failed to delete a bucket",
+ zap.Stringer("bucket-name", bucket),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// UnsafePut must be called holding the lock on the tx.
+func (t *batchTx) UnsafePut(bucket Bucket, key []byte, value []byte) {
+ t.unsafePut(bucket, key, value, false)
+}
+
+// UnsafeSeqPut must be called holding the lock on the tx.
+func (t *batchTx) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) {
+ t.unsafePut(bucket, key, value, true)
+}
+
+func (t *batchTx) unsafePut(bucketType Bucket, key []byte, value []byte, seq bool) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Stack("stack"),
+ )
+ }
+ if seq {
+ // it is useful to increase fill percent when the workloads are mostly append-only.
+ // this can delay the page split and reduce space usage.
+ bucket.FillPercent = 0.9
+ }
+ if err := bucket.Put(key, value); err != nil {
+ t.backend.lg.Fatal(
+ "failed to write to a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// UnsafeRange must be called holding the lock on the tx.
+func (t *batchTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Stack("stack"),
+ )
+ }
+ return unsafeRange(bucket.Cursor(), key, endKey, limit)
+}
+
+func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ var isMatch func(b []byte) bool
+ if len(endKey) > 0 {
+ isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
+ } else {
+ isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
+ limit = 1
+ }
+
+ for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
+ vs = append(vs, cv)
+ keys = append(keys, ck)
+ if limit == int64(len(keys)) {
+ break
+ }
+ }
+ return keys, vs
+}
+
+// UnsafeDelete must be called holding the lock on the tx.
+func (t *batchTx) UnsafeDelete(bucketType Bucket, key []byte) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Stack("stack"),
+ )
+ }
+ err := bucket.Delete(key)
+ if err != nil {
+ t.backend.lg.Fatal(
+ "failed to delete a key",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// UnsafeForEach must be called holding the lock on the tx.
+func (t *batchTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ return unsafeForEach(t.tx, bucket, visitor)
+}
+
+func unsafeForEach(tx *bolt.Tx, bucket Bucket, visitor func(k, v []byte) error) error {
+ if b := tx.Bucket(bucket.Name()); b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// Commit commits a previous tx and begins a new writable one.
+func (t *batchTx) Commit() {
+ t.lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+// CommitAndStop commits the previous tx and does not create a new one.
+func (t *batchTx) CommitAndStop() {
+ t.lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTx) safePending() int {
+ t.Mutex.Lock()
+ defer t.Mutex.Unlock()
+ return t.pending
+}
+
+func (t *batchTx) commit(stop bool) {
+ // commit the last tx
+ if t.tx != nil {
+ if t.pending == 0 && !stop {
+ return
+ }
+
+ start := time.Now()
+
+ // gofail: var beforeCommit struct{}
+ err := t.tx.Commit()
+ // gofail: var afterCommit struct{}
+
+ rebalanceSec.Observe(t.tx.Stats().RebalanceTime.Seconds())
+ spillSec.Observe(t.tx.Stats().SpillTime.Seconds())
+ writeSec.Observe(t.tx.Stats().WriteTime.Seconds())
+ commitSec.Observe(time.Since(start).Seconds())
+ atomic.AddInt64(&t.backend.commits, 1)
+
+ t.pending = 0
+ if err != nil {
+ t.backend.lg.Fatal("failed to commit tx", zap.Error(err))
+ }
+ }
+ if !stop {
+ t.tx = t.backend.begin(true)
+ }
+}
+
+type batchTxBuffered struct {
+ batchTx
+ buf txWriteBuffer
+ pendingDeleteOperations int
+}
+
+func newBatchTxBuffered(backend *backend) *batchTxBuffered {
+ tx := &batchTxBuffered{
+ batchTx: batchTx{backend: backend},
+ buf: txWriteBuffer{
+ txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)},
+ bucket2seq: make(map[BucketID]bool),
+ },
+ }
+ tx.Commit()
+ return tx
+}
+
+func (t *batchTxBuffered) Unlock() {
+ if t.pending != 0 {
+ t.backend.readTx.Lock() // blocks txReadBuffer for writing.
+ // gofail: var beforeWritebackBuf struct{}
+ t.buf.writeback(&t.backend.readTx.buf)
+ // gofail: var afterWritebackBuf struct{}
+ t.backend.readTx.Unlock()
+ // We commit the transaction when the number of pending operations
+ // reaches the configured limit(batchLimit) to prevent it from
+ // becoming excessively large.
+ //
+ // But we also need to commit the transaction immediately if there
+ // is any pending deleting operation, otherwise etcd might run into
+ // a situation that it haven't finished committing the data into backend
+ // storage (note: etcd periodically commits the bbolt transactions
+ // instead of on each request) when it applies next request. Accordingly,
+ // etcd may still read the stale data from bbolt when processing next
+ // request. So it breaks the linearizability.
+ //
+ // Note we don't need to commit the transaction for put requests if
+ // it doesn't exceed the batch limit, because there is a buffer on top
+ // of the bbolt. Each time when etcd reads data from backend storage,
+ // it will read data from both bbolt and the buffer. But there is no
+ // such a buffer for delete requests.
+ //
+ // Please also refer to
+ // https://github.com/etcd-io/etcd/pull/17119#issuecomment-1857547158
+ if t.pending >= t.backend.batchLimit || t.pendingDeleteOperations > 0 {
+ t.commit(false)
+ }
+ }
+ t.batchTx.Unlock()
+}
+
+func (t *batchTxBuffered) Commit() {
+ t.lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) CommitAndStop() {
+ t.lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) commit(stop bool) {
+ // all read txs must be closed to acquire boltdb commit rwlock
+ t.backend.readTx.Lock()
+ t.unsafeCommit(stop)
+ t.backend.readTx.Unlock()
+}
+
+func (t *batchTxBuffered) unsafeCommit(stop bool) {
+ if t.backend.hooks != nil {
+ // gofail: var commitBeforePreCommitHook struct{}
+ t.backend.hooks.OnPreCommitUnsafe(t)
+ // gofail: var commitAfterPreCommitHook struct{}
+ }
+
+ if t.backend.readTx.tx != nil {
+ // wait all store read transactions using the current boltdb tx to finish,
+ // then close the boltdb tx
+ go func(tx *bolt.Tx, wg *sync.WaitGroup) {
+ wg.Wait()
+ if err := tx.Rollback(); err != nil {
+ t.backend.lg.Fatal("failed to rollback tx", zap.Error(err))
+ }
+ }(t.backend.readTx.tx, t.backend.readTx.txWg)
+ t.backend.readTx.reset()
+ }
+
+ t.batchTx.commit(stop)
+ t.pendingDeleteOperations = 0
+
+ if !stop {
+ t.backend.readTx.tx = t.backend.begin(false)
+ }
+}
+
+func (t *batchTxBuffered) UnsafePut(bucket Bucket, key []byte, value []byte) {
+ t.batchTx.UnsafePut(bucket, key, value)
+ t.buf.put(bucket, key, value)
+}
+
+func (t *batchTxBuffered) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) {
+ t.batchTx.UnsafeSeqPut(bucket, key, value)
+ t.buf.putSeq(bucket, key, value)
+}
+
+func (t *batchTxBuffered) UnsafeDelete(bucketType Bucket, key []byte) {
+ t.batchTx.UnsafeDelete(bucketType, key)
+ t.pendingDeleteOperations++
+}
+
+func (t *batchTxBuffered) UnsafeDeleteBucket(bucket Bucket) {
+ t.batchTx.UnsafeDeleteBucket(bucket)
+ t.pendingDeleteOperations++
+}
diff --git a/server/storage/backend/batch_tx_test.go b/server/storage/backend/batch_tx_test.go
new file mode 100644
index 00000000000..279f199a5db
--- /dev/null
+++ b/server/storage/backend/batch_tx_test.go
@@ -0,0 +1,476 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend_test
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func TestBatchTxPut(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+
+ // create bucket
+ tx.UnsafeCreateBucket(schema.Test)
+
+ // put
+ v := []byte("bar")
+ tx.UnsafePut(schema.Test, []byte("foo"), v)
+
+ tx.Unlock()
+
+ // check put result before and after tx is committed
+ for k := 0; k < 2; k++ {
+ tx.Lock()
+ _, gv := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0)
+ tx.Unlock()
+ if !reflect.DeepEqual(gv[0], v) {
+ t.Errorf("v = %s, want %s", gv[0], v)
+ }
+ tx.Commit()
+ }
+}
+
+func TestBatchTxRange(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ tx.UnsafeCreateBucket(schema.Test)
+ // put keys
+ allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
+ allVals := [][]byte{[]byte("bar"), []byte("bar1"), []byte("bar2")}
+ for i := range allKeys {
+ tx.UnsafePut(schema.Test, allKeys[i], allVals[i])
+ }
+
+ tests := []struct {
+ key []byte
+ endKey []byte
+ limit int64
+
+ wkeys [][]byte
+ wvals [][]byte
+ }{
+ // single key
+ {
+ []byte("foo"), nil, 0,
+ allKeys[:1], allVals[:1],
+ },
+ // single key, bad
+ {
+ []byte("doo"), nil, 0,
+ nil, nil,
+ },
+ // key range
+ {
+ []byte("foo"), []byte("foo1"), 0,
+ allKeys[:1], allVals[:1],
+ },
+ // key range, get all keys
+ {
+ []byte("foo"), []byte("foo3"), 0,
+ allKeys, allVals,
+ },
+ // key range, bad
+ {
+ []byte("goo"), []byte("goo3"), 0,
+ nil, nil,
+ },
+ // key range with effective limit
+ {
+ []byte("foo"), []byte("foo3"), 1,
+ allKeys[:1], allVals[:1],
+ },
+ // key range with limit
+ {
+ []byte("foo"), []byte("foo3"), 4,
+ allKeys, allVals,
+ },
+ }
+ for i, tt := range tests {
+ keys, vals := tx.UnsafeRange(schema.Test, tt.key, tt.endKey, tt.limit)
+ if !reflect.DeepEqual(keys, tt.wkeys) {
+ t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys)
+ }
+ if !reflect.DeepEqual(vals, tt.wvals) {
+ t.Errorf("#%d: vals = %+v, want %+v", i, vals, tt.wvals)
+ }
+ }
+}
+
+func TestBatchTxDelete(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+
+ tx.UnsafeDelete(schema.Test, []byte("foo"))
+
+ tx.Unlock()
+
+ // check put result before and after tx is committed
+ for k := 0; k < 2; k++ {
+ tx.Lock()
+ ks, _ := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0)
+ tx.Unlock()
+ if len(ks) != 0 {
+ t.Errorf("keys on foo = %v, want nil", ks)
+ }
+ tx.Commit()
+ }
+}
+
+func TestBatchTxCommit(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.Unlock()
+
+ tx.Commit()
+
+ // check whether put happens via db view
+ backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
+ bucket := tx.Bucket(schema.Test.Name())
+ if bucket == nil {
+ t.Errorf("bucket test does not exit")
+ return nil
+ }
+ v := bucket.Get([]byte("foo"))
+ if v == nil {
+ t.Errorf("foo key failed to written in backend")
+ }
+ return nil
+ })
+}
+
+func TestBatchTxBatchLimitCommit(t *testing.T) {
+ // start backend with batch limit 1 so one write can
+ // trigger a commit
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 1)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.Unlock()
+
+ // batch limit commit should have been triggered
+ // check whether put happens via db view
+ backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
+ bucket := tx.Bucket(schema.Test.Name())
+ if bucket == nil {
+ t.Errorf("bucket test does not exit")
+ return nil
+ }
+ v := bucket.Get([]byte("foo"))
+ if v == nil {
+ t.Errorf("foo key failed to written in backend")
+ }
+ return nil
+ })
+}
+
+func TestRangeAfterDeleteBucketMatch(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.Unlock()
+ tx.Commit()
+
+ checkForEach(t, b.BatchTx(), b.ReadTx(), [][]byte{[]byte("foo")}, [][]byte{[]byte("bar")})
+
+ tx.Lock()
+ tx.UnsafeDeleteBucket(schema.Test)
+ tx.Unlock()
+
+ checkForEach(t, b.BatchTx(), b.ReadTx(), nil, nil)
+}
+
+func TestRangeAfterDeleteMatch(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.Unlock()
+ tx.Commit()
+
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo"), nil, 0)
+ checkForEach(t, b.BatchTx(), b.ReadTx(), [][]byte{[]byte("foo")}, [][]byte{[]byte("bar")})
+
+ tx.Lock()
+ tx.UnsafeDelete(schema.Test, []byte("foo"))
+ tx.Unlock()
+
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo"), nil, 0)
+ checkForEach(t, b.BatchTx(), b.ReadTx(), nil, nil)
+}
+
+func TestRangeAfterUnorderedKeyWriteMatch(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo5"), []byte("bar5"))
+ tx.UnsafePut(schema.Test, []byte("foo2"), []byte("bar2"))
+ tx.UnsafePut(schema.Test, []byte("foo1"), []byte("bar1"))
+ tx.UnsafePut(schema.Test, []byte("foo3"), []byte("bar3"))
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.UnsafePut(schema.Test, []byte("foo4"), []byte("bar4"))
+ tx.Unlock()
+
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo"), nil, 1)
+}
+
+func TestRangeAfterAlternatingBucketWriteMatch(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Key)
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafeSeqPut(schema.Key, []byte("key1"), []byte("val1"))
+ tx.Unlock()
+
+ tx.Lock()
+ tx.UnsafeSeqPut(schema.Key, []byte("key2"), []byte("val2"))
+ tx.Unlock()
+ tx.Commit()
+ // only in the 2nd commit the schema.Key key is removed from the readBuffer.buckets.
+ // This makes sure to test the case when an empty writeBuffer.bucket
+ // is used to replace the read buffer bucket.
+ tx.Commit()
+
+ tx.Lock()
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
+ tx.Unlock()
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Key, []byte("key"), []byte("key5"), 100)
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo"), []byte("foo3"), 1)
+}
+
+func TestRangeAfterOverwriteMatch(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar2"))
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar0"))
+ tx.UnsafePut(schema.Test, []byte("foo1"), []byte("bar10"))
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar1"))
+ tx.UnsafePut(schema.Test, []byte("foo1"), []byte("bar11"))
+ tx.Unlock()
+
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo"), []byte("foo3"), 1)
+ checkForEach(t, b.BatchTx(), b.ReadTx(), [][]byte{[]byte("foo"), []byte("foo1")}, [][]byte{[]byte("bar1"), []byte("bar11")})
+}
+
+func TestRangeAfterOverwriteAndDeleteMatch(t *testing.T) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar2"))
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar0"))
+ tx.UnsafePut(schema.Test, []byte("foo1"), []byte("bar10"))
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar1"))
+ tx.UnsafePut(schema.Test, []byte("foo1"), []byte("bar11"))
+ tx.Unlock()
+
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo"), nil, 0)
+ checkForEach(t, b.BatchTx(), b.ReadTx(), [][]byte{[]byte("foo"), []byte("foo1")}, [][]byte{[]byte("bar1"), []byte("bar11")})
+
+ tx.Lock()
+ tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar3"))
+ tx.UnsafeDelete(schema.Test, []byte("foo1"))
+ tx.Unlock()
+
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo"), nil, 0)
+ checkRangeResponseMatch(t, b.BatchTx(), b.ReadTx(), schema.Test, []byte("foo1"), nil, 0)
+ checkForEach(t, b.BatchTx(), b.ReadTx(), [][]byte{[]byte("foo")}, [][]byte{[]byte("bar3")})
+}
+
+func checkRangeResponseMatch(t *testing.T, tx backend.BatchTx, rtx backend.ReadTx, bucket backend.Bucket, key, endKey []byte, limit int64) {
+ tx.Lock()
+ ks1, vs1 := tx.UnsafeRange(bucket, key, endKey, limit)
+ tx.Unlock()
+
+ rtx.RLock()
+ ks2, vs2 := rtx.UnsafeRange(bucket, key, endKey, limit)
+ rtx.RUnlock()
+
+ if diff := cmp.Diff(ks1, ks2); diff != "" {
+ t.Errorf("keys on read and batch transaction doesn't match, diff: %s", diff)
+ }
+ if diff := cmp.Diff(vs1, vs2); diff != "" {
+ t.Errorf("values on read and batch transaction doesn't match, diff: %s", diff)
+ }
+}
+
+func checkForEach(t *testing.T, tx backend.BatchTx, rtx backend.ReadTx, expectedKeys, expectedValues [][]byte) {
+ tx.Lock()
+ checkUnsafeForEach(t, tx, expectedKeys, expectedValues)
+ tx.Unlock()
+
+ rtx.RLock()
+ checkUnsafeForEach(t, rtx, expectedKeys, expectedValues)
+ rtx.RUnlock()
+}
+
+func checkUnsafeForEach(t *testing.T, tx backend.UnsafeReader, expectedKeys, expectedValues [][]byte) {
+ var ks, vs [][]byte
+ tx.UnsafeForEach(schema.Test, func(k, v []byte) error {
+ ks = append(ks, k)
+ vs = append(vs, v)
+ return nil
+ })
+
+ if diff := cmp.Diff(ks, expectedKeys); diff != "" {
+ t.Errorf("keys on transaction doesn't match expected, diff: %s", diff)
+ }
+ if diff := cmp.Diff(vs, expectedValues); diff != "" {
+ t.Errorf("values on transaction doesn't match expected, diff: %s", diff)
+ }
+}
+
+// runWriteback is used test the txWriteBuffer.writeback function, which is called inside tx.Unlock().
+// The parameters are chosen based on defaultBatchLimit = 10000
+func runWriteback(t testing.TB, kss, vss [][]string, isSeq bool) {
+ b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ defer betesting.Close(t, b)
+
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Test)
+ tx.UnsafeCreateBucket(schema.Key)
+ tx.Unlock()
+
+ for i, ks := range kss {
+ vs := vss[i]
+ tx.Lock()
+ for j := 0; j < len(ks); j++ {
+ if isSeq {
+ tx.UnsafeSeqPut(schema.Key, []byte(ks[j]), []byte(vs[j]))
+ } else {
+ tx.UnsafePut(schema.Test, []byte(ks[j]), []byte(vs[j]))
+ }
+ }
+ tx.Unlock()
+ }
+}
+
+func BenchmarkWritebackSeqBatches1BatchSize10000(b *testing.B) { benchmarkWriteback(b, 1, 10000, true) }
+
+func BenchmarkWritebackSeqBatches10BatchSize1000(b *testing.B) { benchmarkWriteback(b, 10, 1000, true) }
+
+func BenchmarkWritebackSeqBatches100BatchSize100(b *testing.B) { benchmarkWriteback(b, 100, 100, true) }
+
+func BenchmarkWritebackSeqBatches1000BatchSize10(b *testing.B) { benchmarkWriteback(b, 1000, 10, true) }
+
+func BenchmarkWritebackNonSeqBatches1000BatchSize1(b *testing.B) {
+ // for non sequential writes, the batch size is usually small, 1 or the order of cluster size.
+ benchmarkWriteback(b, 1000, 1, false)
+}
+
+func BenchmarkWritebackNonSeqBatches10000BatchSize1(b *testing.B) {
+ benchmarkWriteback(b, 10000, 1, false)
+}
+
+func BenchmarkWritebackNonSeqBatches100BatchSize10(b *testing.B) {
+ benchmarkWriteback(b, 100, 10, false)
+}
+
+func BenchmarkWritebackNonSeqBatches1000BatchSize10(b *testing.B) {
+ benchmarkWriteback(b, 1000, 10, false)
+}
+
+func benchmarkWriteback(b *testing.B, batches, batchSize int, isSeq bool) {
+ // kss and vss are key and value arrays to write with size batches*batchSize
+ var kss, vss [][]string
+ for i := 0; i < batches; i++ {
+ var ks, vs []string
+ for j := i * batchSize; j < (i+1)*batchSize; j++ {
+ k := fmt.Sprintf("key%d", j)
+ v := fmt.Sprintf("val%d", j)
+ ks = append(ks, k)
+ vs = append(vs, v)
+ }
+ if !isSeq {
+ // make sure each batch is shuffled differently but the same for different test runs.
+ shuffleList(ks, i*batchSize)
+ }
+ kss = append(kss, ks)
+ vss = append(vss, vs)
+ }
+ b.ResetTimer()
+ for n := 1; n < b.N; n++ {
+ runWriteback(b, kss, vss, isSeq)
+ }
+}
+
+func shuffleList(l []string, seed int) {
+ r := rand.New(rand.NewSource(int64(seed)))
+ for i := 0; i < len(l); i++ {
+ j := r.Intn(i + 1)
+ l[i], l[j] = l[j], l[i]
+ }
+}
diff --git a/server/mvcc/backend/config_default.go b/server/storage/backend/config_default.go
similarity index 96%
rename from server/mvcc/backend/config_default.go
rename to server/storage/backend/config_default.go
index 847bd10fd78..fd57c7ca84c 100644
--- a/server/mvcc/backend/config_default.go
+++ b/server/storage/backend/config_default.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !linux && !windows
-// +build !linux,!windows
package backend
diff --git a/server/mvcc/backend/config_linux.go b/server/storage/backend/config_linux.go
similarity index 100%
rename from server/mvcc/backend/config_linux.go
rename to server/storage/backend/config_linux.go
diff --git a/server/mvcc/backend/config_windows.go b/server/storage/backend/config_windows.go
similarity index 98%
rename from server/mvcc/backend/config_windows.go
rename to server/storage/backend/config_windows.go
index ba6e5a1284c..7bb42f3a289 100644
--- a/server/mvcc/backend/config_windows.go
+++ b/server/storage/backend/config_windows.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build windows
-// +build windows
package backend
diff --git a/server/mvcc/backend/doc.go b/server/storage/backend/doc.go
similarity index 100%
rename from server/mvcc/backend/doc.go
rename to server/storage/backend/doc.go
diff --git a/server/storage/backend/export_test.go b/server/storage/backend/export_test.go
new file mode 100644
index 00000000000..e9f5ad38d6a
--- /dev/null
+++ b/server/storage/backend/export_test.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import bolt "go.etcd.io/bbolt"
+
+func DbFromBackendForTest(b Backend) *bolt.DB {
+ return b.(*backend).db
+}
+
+func DefragLimitForTest() int {
+ return defragLimit
+}
+
+func CommitsForTest(b Backend) int64 {
+ return b.(*backend).Commits()
+}
diff --git a/server/storage/backend/hooks.go b/server/storage/backend/hooks.go
new file mode 100644
index 00000000000..817d0c5eb50
--- /dev/null
+++ b/server/storage/backend/hooks.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+type HookFunc func(tx UnsafeReadWriter)
+
+// Hooks allow to add additional logic executed during transaction lifetime.
+type Hooks interface {
+ // OnPreCommitUnsafe is executed before Commit of transactions.
+ // The given transaction is already locked.
+ OnPreCommitUnsafe(tx UnsafeReadWriter)
+}
+
+type hooks struct {
+ onPreCommitUnsafe HookFunc
+}
+
+func (h hooks) OnPreCommitUnsafe(tx UnsafeReadWriter) {
+ h.onPreCommitUnsafe(tx)
+}
+
+func NewHooks(onPreCommitUnsafe HookFunc) Hooks {
+ return hooks{onPreCommitUnsafe: onPreCommitUnsafe}
+}
diff --git a/server/mvcc/backend/hooks_test.go b/server/storage/backend/hooks_test.go
similarity index 83%
rename from server/mvcc/backend/hooks_test.go
rename to server/storage/backend/hooks_test.go
index dbf68f910be..9a1d33d8253 100644
--- a/server/mvcc/backend/hooks_test.go
+++ b/server/storage/backend/hooks_test.go
@@ -20,17 +20,20 @@ import (
"time"
"github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
var (
- bucket = []byte("bucket")
+ bucket = schema.Test
key = []byte("key")
)
func TestBackendPreCommitHook(t *testing.T) {
- be := newTestHooksBackend(t, backend.DefaultBackendConfig())
+ be := newTestHooksBackend(t, backend.DefaultBackendConfig(zaptest.NewLogger(t)))
tx := be.BatchTx()
prepareBuckenAndKey(tx)
@@ -39,15 +42,13 @@ func TestBackendPreCommitHook(t *testing.T) {
// Empty commit.
tx.Commit()
- write(tx, []byte("foo"), []byte("bar"))
-
- assert.Equal(t, ">cc", getCommitsKey(t, be), "expected 2 explict commits")
+ assert.Equalf(t, ">cc", getCommitsKey(t, be), "expected 2 explicit commits")
tx.Commit()
- assert.Equal(t, ">ccc", getCommitsKey(t, be), "expected 3 explict commits")
+ assert.Equalf(t, ">ccc", getCommitsKey(t, be), "expected 3 explicit commits")
}
func TestBackendAutoCommitLimitHook(t *testing.T) {
- cfg := backend.DefaultBackendConfig()
+ cfg := backend.DefaultBackendConfig(zaptest.NewLogger(t))
cfg.BatchLimit = 3
be := newTestHooksBackend(t, cfg)
@@ -71,7 +72,7 @@ func TestBackendAutoCommitBatchIntervalHook(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
- cfg := backend.DefaultBackendConfig()
+ cfg := backend.DefaultBackendConfig(zaptest.NewLogger(t))
cfg.BatchInterval = 10 * time.Millisecond
be := newTestHooksBackend(t, cfg)
tx := be.BatchTx()
@@ -113,7 +114,7 @@ func prepareBuckenAndKey(tx backend.BatchTx) {
func newTestHooksBackend(t testing.TB, baseConfig backend.BackendConfig) backend.Backend {
cfg := baseConfig
- cfg.Hooks = backend.NewHooks(func(tx backend.BatchTx) {
+ cfg.Hooks = backend.NewHooks(func(tx backend.UnsafeReadWriter) {
k, v := tx.UnsafeRange(bucket, key, nil, 1)
t.Logf("OnPreCommit executed: %v %v", string(k[0]), string(v[0]))
assert.Len(t, k, 1)
diff --git a/server/storage/backend/metrics.go b/server/storage/backend/metrics.go
new file mode 100644
index 00000000000..9d58c00638b
--- /dev/null
+++ b/server/storage/backend/metrics.go
@@ -0,0 +1,103 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ commitSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_commit_duration_seconds",
+ Help: "The latency distributions of commit called by backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ rebalanceSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_rebalance_duration_seconds",
+ Help: "The latency distributions of commit.rebalance called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ spillSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_spill_duration_seconds",
+ Help: "The latency distributions of commit.spill called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ writeSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_write_duration_seconds",
+ Help: "The latency distributions of commit.write called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ defragSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_defrag_duration_seconds",
+ Help: "The latency distribution of backend defragmentation.",
+
+ // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^12 == 409.6 sec
+ Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
+ })
+
+ snapshotTransferSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_snapshot_duration_seconds",
+ Help: "The latency distribution of backend snapshots.",
+
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^16 == 655.36 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
+ })
+
+ isDefragActive = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "defrag_inflight",
+ Help: "Whether or not defrag is active on the member. 1 means active, 0 means not.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(commitSec)
+ prometheus.MustRegister(rebalanceSec)
+ prometheus.MustRegister(spillSec)
+ prometheus.MustRegister(writeSec)
+ prometheus.MustRegister(defragSec)
+ prometheus.MustRegister(snapshotTransferSec)
+ prometheus.MustRegister(isDefragActive)
+}
diff --git a/server/storage/backend/read_tx.go b/server/storage/backend/read_tx.go
new file mode 100644
index 00000000000..4ca2621411c
--- /dev/null
+++ b/server/storage/backend/read_tx.go
@@ -0,0 +1,151 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "math"
+ "sync"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+// overwrites on a bucket should only fetch with limit=1, but IsSafeRangeBucket
+// is known to never overwrite any key so range is safe.
+
+type ReadTx interface {
+ RLock()
+ RUnlock()
+ UnsafeReader
+}
+
+type UnsafeReader interface {
+ UnsafeRange(bucket Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
+ UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error
+}
+
+// Base type for readTx and concurrentReadTx to eliminate duplicate functions between these
+type baseReadTx struct {
+ // mu protects accesses to the txReadBuffer
+ mu sync.RWMutex
+ buf txReadBuffer
+
+ // TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle.
+ // txMu protects accesses to buckets and tx on Range requests.
+ txMu *sync.RWMutex
+ tx *bolt.Tx
+ buckets map[BucketID]*bolt.Bucket
+ // txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done.
+ txWg *sync.WaitGroup
+}
+
+func (baseReadTx *baseReadTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ dups := make(map[string]struct{})
+ getDups := func(k, v []byte) error {
+ dups[string(k)] = struct{}{}
+ return nil
+ }
+ visitNoDup := func(k, v []byte) error {
+ if _, ok := dups[string(k)]; ok {
+ return nil
+ }
+ return visitor(k, v)
+ }
+ if err := baseReadTx.buf.ForEach(bucket, getDups); err != nil {
+ return err
+ }
+ baseReadTx.txMu.Lock()
+ err := unsafeForEach(baseReadTx.tx, bucket, visitNoDup)
+ baseReadTx.txMu.Unlock()
+ if err != nil {
+ return err
+ }
+ return baseReadTx.buf.ForEach(bucket, visitor)
+}
+
+func (baseReadTx *baseReadTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if endKey == nil {
+ // forbid duplicates for single keys
+ limit = 1
+ }
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ if limit > 1 && !bucketType.IsSafeRangeBucket() {
+ panic("do not use unsafeRange on non-keys bucket")
+ }
+ keys, vals := baseReadTx.buf.Range(bucketType, key, endKey, limit)
+ if int64(len(keys)) == limit {
+ return keys, vals
+ }
+
+ // find/cache bucket
+ bn := bucketType.ID()
+ baseReadTx.txMu.RLock()
+ bucket, ok := baseReadTx.buckets[bn]
+ baseReadTx.txMu.RUnlock()
+ lockHeld := false
+ if !ok {
+ baseReadTx.txMu.Lock()
+ lockHeld = true
+ bucket = baseReadTx.tx.Bucket(bucketType.Name())
+ baseReadTx.buckets[bn] = bucket
+ }
+
+ // ignore missing bucket since may have been created in this batch
+ if bucket == nil {
+ if lockHeld {
+ baseReadTx.txMu.Unlock()
+ }
+ return keys, vals
+ }
+ if !lockHeld {
+ baseReadTx.txMu.Lock()
+ }
+ c := bucket.Cursor()
+ baseReadTx.txMu.Unlock()
+
+ k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
+ return append(k2, keys...), append(v2, vals...)
+}
+
+type readTx struct {
+ baseReadTx
+}
+
+func (rt *readTx) Lock() { rt.mu.Lock() }
+func (rt *readTx) Unlock() { rt.mu.Unlock() }
+func (rt *readTx) RLock() { rt.mu.RLock() }
+func (rt *readTx) RUnlock() { rt.mu.RUnlock() }
+
+func (rt *readTx) reset() {
+ rt.buf.reset()
+ rt.buckets = make(map[BucketID]*bolt.Bucket)
+ rt.tx = nil
+ rt.txWg = new(sync.WaitGroup)
+}
+
+type concurrentReadTx struct {
+ baseReadTx
+}
+
+func (rt *concurrentReadTx) Lock() {}
+func (rt *concurrentReadTx) Unlock() {}
+
+// RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+func (rt *concurrentReadTx) RLock() {}
+
+// RUnlock signals the end of concurrentReadTx.
+func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() }
diff --git a/server/mvcc/backend/testing/betesting.go b/server/storage/backend/testing/betesting.go
similarity index 84%
rename from server/mvcc/backend/testing/betesting.go
rename to server/storage/backend/testing/betesting.go
index ddf27018e7d..e42908f9365 100644
--- a/server/mvcc/backend/testing/betesting.go
+++ b/server/storage/backend/testing/betesting.go
@@ -15,18 +15,19 @@
package betesting
import (
- "io/ioutil"
+ "os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
"go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
)
func NewTmpBackendFromCfg(t testing.TB, bcfg backend.BackendConfig) (backend.Backend, string) {
- dir, err := ioutil.TempDir(t.TempDir(), "etcd_backend_test")
+ dir, err := os.MkdirTemp(t.TempDir(), "etcd_backend_test")
if err != nil {
panic(err)
}
@@ -38,13 +39,13 @@ func NewTmpBackendFromCfg(t testing.TB, bcfg backend.BackendConfig) (backend.Bac
// NewTmpBackend creates a backend implementation for testing.
func NewTmpBackend(t testing.TB, batchInterval time.Duration, batchLimit int) (backend.Backend, string) {
- bcfg := backend.DefaultBackendConfig()
+ bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t))
bcfg.BatchInterval, bcfg.BatchLimit = batchInterval, batchLimit
return NewTmpBackendFromCfg(t, bcfg)
}
func NewDefaultTmpBackend(t testing.TB) (backend.Backend, string) {
- return NewTmpBackendFromCfg(t, backend.DefaultBackendConfig())
+ return NewTmpBackendFromCfg(t, backend.DefaultBackendConfig(zaptest.NewLogger(t)))
}
func Close(t testing.TB, b backend.Backend) {
diff --git a/server/storage/backend/tx_buffer.go b/server/storage/backend/tx_buffer.go
new file mode 100644
index 00000000000..821b300bfef
--- /dev/null
+++ b/server/storage/backend/tx_buffer.go
@@ -0,0 +1,258 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "sort"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+const bucketBufferInitialSize = 512
+
+// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
+type txBuffer struct {
+ buckets map[BucketID]*bucketBuffer
+}
+
+func (txb *txBuffer) reset() {
+ for k, v := range txb.buckets {
+ if v.used == 0 {
+ // demote
+ delete(txb.buckets, k)
+ }
+ v.used = 0
+ }
+}
+
+// txWriteBuffer buffers writes of pending updates that have not yet committed.
+type txWriteBuffer struct {
+ txBuffer
+ // Map from bucket ID into information whether this bucket is edited
+ // sequentially (i.e. keys are growing monotonically).
+ bucket2seq map[BucketID]bool
+}
+
+func (txw *txWriteBuffer) put(bucket Bucket, k, v []byte) {
+ txw.bucket2seq[bucket.ID()] = false
+ txw.putInternal(bucket, k, v)
+}
+
+func (txw *txWriteBuffer) putSeq(bucket Bucket, k, v []byte) {
+ // putSeq is only be called for the data in the Key bucket. The keys
+ // in the Key bucket should be monotonically increasing revisions.
+ verify.Verify(func() {
+ b, ok := txw.buckets[bucket.ID()]
+ if !ok || b.used == 0 {
+ return
+ }
+
+ existingMaxKey := b.buf[b.used-1].key
+ if bytes.Compare(k, existingMaxKey) <= 0 {
+ panic(fmt.Sprintf("Broke the rule of monotonically increasing, existingMaxKey: %s, currentKey: %s",
+ hex.EncodeToString(existingMaxKey), hex.EncodeToString(k)))
+ }
+ })
+ txw.putInternal(bucket, k, v)
+}
+
+func (txw *txWriteBuffer) putInternal(bucket Bucket, k, v []byte) {
+ b, ok := txw.buckets[bucket.ID()]
+ if !ok {
+ b = newBucketBuffer()
+ txw.buckets[bucket.ID()] = b
+ }
+ b.add(k, v)
+}
+
+func (txw *txWriteBuffer) reset() {
+ txw.txBuffer.reset()
+ for k := range txw.bucket2seq {
+ v, ok := txw.buckets[k]
+ if !ok {
+ delete(txw.bucket2seq, k)
+ } else if v.used == 0 {
+ txw.bucket2seq[k] = true
+ }
+ }
+}
+
+func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
+ for k, wb := range txw.buckets {
+ rb, ok := txr.buckets[k]
+ if !ok {
+ delete(txw.buckets, k)
+ if seq, ok := txw.bucket2seq[k]; ok && !seq {
+ wb.dedupe()
+ }
+ txr.buckets[k] = wb
+ continue
+ }
+ if seq, ok := txw.bucket2seq[k]; ok && !seq && wb.used > 1 {
+ // assume no duplicate keys
+ sort.Sort(wb)
+ }
+ rb.merge(wb)
+ }
+ txw.reset()
+ // increase the buffer version
+ txr.bufVersion++
+}
+
+// txReadBuffer accesses buffered updates.
+type txReadBuffer struct {
+ txBuffer
+ // bufVersion is used to check if the buffer is modified recently
+ bufVersion uint64
+}
+
+func (txr *txReadBuffer) Range(bucket Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if b := txr.buckets[bucket.ID()]; b != nil {
+ return b.Range(key, endKey, limit)
+ }
+ return nil, nil
+}
+
+func (txr *txReadBuffer) ForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ if b := txr.buckets[bucket.ID()]; b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock()
+func (txr *txReadBuffer) unsafeCopy() txReadBuffer {
+ txrCopy := txReadBuffer{
+ txBuffer: txBuffer{
+ buckets: make(map[BucketID]*bucketBuffer, len(txr.txBuffer.buckets)),
+ },
+ bufVersion: 0,
+ }
+ for bucketName, bucket := range txr.txBuffer.buckets {
+ txrCopy.txBuffer.buckets[bucketName] = bucket.CopyUsed()
+ }
+ return txrCopy
+}
+
+type kv struct {
+ key []byte
+ val []byte
+}
+
+// bucketBuffer buffers key-value pairs that are pending commit.
+type bucketBuffer struct {
+ buf []kv
+ // used tracks number of elements in use so buf can be reused without reallocation.
+ used int
+}
+
+func newBucketBuffer() *bucketBuffer {
+ return &bucketBuffer{buf: make([]kv, bucketBufferInitialSize), used: 0}
+}
+
+func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
+ f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
+ idx := sort.Search(bb.used, f)
+ if idx < 0 || idx >= bb.used {
+ return nil, nil
+ }
+ if len(endKey) == 0 {
+ if bytes.Equal(key, bb.buf[idx].key) {
+ keys = append(keys, bb.buf[idx].key)
+ vals = append(vals, bb.buf[idx].val)
+ }
+ return keys, vals
+ }
+ if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
+ return nil, nil
+ }
+ for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
+ if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
+ break
+ }
+ keys = append(keys, bb.buf[i].key)
+ vals = append(vals, bb.buf[i].val)
+ }
+ return keys, vals
+}
+
+func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
+ for i := 0; i < bb.used; i++ {
+ if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (bb *bucketBuffer) add(k, v []byte) {
+ bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
+ bb.used++
+ if bb.used == len(bb.buf) {
+ buf := make([]kv, (3*len(bb.buf))/2)
+ copy(buf, bb.buf)
+ bb.buf = buf
+ }
+}
+
+// merge merges data from bbsrc into bb.
+func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
+ for i := 0; i < bbsrc.used; i++ {
+ bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
+ }
+ if bb.used == bbsrc.used {
+ return
+ }
+ if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
+ return
+ }
+ bb.dedupe()
+}
+
+// dedupe removes duplicates, using only newest update
+func (bb *bucketBuffer) dedupe() {
+ if bb.used <= 1 {
+ return
+ }
+ sort.Stable(bb)
+ widx := 0
+ for ridx := 1; ridx < bb.used; ridx++ {
+ if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
+ widx++
+ }
+ bb.buf[widx] = bb.buf[ridx]
+ }
+ bb.used = widx + 1
+}
+
+func (bb *bucketBuffer) Len() int { return bb.used }
+func (bb *bucketBuffer) Less(i, j int) bool {
+ return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
+}
+func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
+
+func (bb *bucketBuffer) CopyUsed() *bucketBuffer {
+ verify.Assert(bb.used <= len(bb.buf),
+ "used (%d) should never be bigger than the length of buf (%d)", bb.used, len(bb.buf))
+ bbCopy := bucketBuffer{
+ buf: make([]kv, bb.used),
+ used: bb.used,
+ }
+ copy(bbCopy.buf, bb.buf[:bb.used])
+ return &bbCopy
+}
diff --git a/server/storage/backend/tx_buffer_test.go b/server/storage/backend/tx_buffer_test.go
new file mode 100644
index 00000000000..f194a3574eb
--- /dev/null
+++ b/server/storage/backend/tx_buffer_test.go
@@ -0,0 +1,142 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_bucketBuffer_CopyUsed_After_Add(t *testing.T) {
+ bb := &bucketBuffer{buf: make([]kv, 10), used: 0}
+ for i := 0; i < 20; i++ {
+ k := fmt.Sprintf("key%d", i)
+ v := fmt.Sprintf("val%d", i)
+ bb.add([]byte(k), []byte(v))
+ bbCopy := bb.CopyUsed()
+ assert.Equal(t, bb.used, bbCopy.used)
+ assert.Len(t, bbCopy.buf, bbCopy.used)
+ assert.GreaterOrEqual(t, len(bb.buf), len(bbCopy.buf))
+ }
+}
+
+func Test_bucketBuffer_CopyUsed(t *testing.T) {
+ tests := []struct {
+ name string
+ bufLen int
+ used int
+ wantPanic bool
+ wantUsed int
+ wantBufLen int
+ }{
+ {
+ name: "used is 0",
+ bufLen: 10,
+ used: 0,
+ wantPanic: false,
+ wantUsed: 0,
+ wantBufLen: 0,
+ },
+ {
+ name: "used is greater than 0 and less than len(buf)",
+ bufLen: 10,
+ used: 5,
+ wantPanic: false,
+ wantUsed: 5,
+ wantBufLen: 5,
+ },
+ {
+ name: "used is equal to len(buf)",
+ bufLen: 10,
+ used: 10,
+ wantPanic: false,
+ wantUsed: 10,
+ wantBufLen: 10,
+ },
+ {
+ name: "used is greater than len(buf)",
+ bufLen: 10,
+ used: 11,
+ wantPanic: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ bb := &bucketBuffer{buf: make([]kv, tt.bufLen), used: tt.used}
+ if tt.wantPanic {
+ assert.Panicsf(t, func() {
+ bb.CopyUsed()
+ }, "expected panic when used (%d) and the length of buf (%d)", tt.used, tt.bufLen)
+ } else {
+ bbCopy := bb.CopyUsed()
+ assert.Equal(t, tt.wantUsed, bbCopy.used)
+ assert.Len(t, bbCopy.buf, tt.wantBufLen)
+ }
+ })
+ }
+}
+
+func TestDedupe(t *testing.T) {
+ tests := []struct {
+ name string
+ keys, vals, expectedKeys, expectedVals []string
+ }{
+ {
+ name: "empty",
+ keys: []string{},
+ vals: []string{},
+ expectedKeys: []string{},
+ expectedVals: []string{},
+ },
+ {
+ name: "single kv",
+ keys: []string{"key1"},
+ vals: []string{"val1"},
+ expectedKeys: []string{"key1"},
+ expectedVals: []string{"val1"},
+ },
+ {
+ name: "duplicate key",
+ keys: []string{"key1", "key1"},
+ vals: []string{"val1", "val2"},
+ expectedKeys: []string{"key1"},
+ expectedVals: []string{"val2"},
+ },
+ {
+ name: "unordered keys",
+ keys: []string{"key3", "key1", "key4", "key2", "key1", "key4"},
+ vals: []string{"val1", "val5", "val3", "val4", "val2", "val6"},
+ expectedKeys: []string{"key1", "key2", "key3", "key4"},
+ expectedVals: []string{"val2", "val4", "val1", "val6"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ bb := &bucketBuffer{buf: make([]kv, 10), used: 0}
+ for i := 0; i < len(tt.keys); i++ {
+ bb.add([]byte(tt.keys[i]), []byte(tt.vals[i]))
+ }
+ bb.dedupe()
+ assert.Len(t, tt.expectedKeys, bb.used)
+ for i := 0; i < bb.used; i++ {
+ assert.Equal(t, bb.buf[i].key, []byte(tt.expectedKeys[i]))
+ assert.Equal(t, bb.buf[i].val, []byte(tt.expectedVals[i]))
+ }
+ })
+ }
+}
diff --git a/server/storage/backend/verify.go b/server/storage/backend/verify.go
new file mode 100644
index 00000000000..a3aa165fe84
--- /dev/null
+++ b/server/storage/backend/verify.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "runtime/debug"
+ "strings"
+
+ "github.com/google/go-cmp/cmp"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+const (
+ EnvVerifyValueLock verify.VerificationType = "lock"
+)
+
+func ValidateCalledInsideApply(lg *zap.Logger) {
+ if !verifyLockEnabled() {
+ return
+ }
+ if !insideApply() {
+ lg.Panic("Called outside of APPLY!", zap.Stack("stacktrace"))
+ }
+}
+
+func ValidateCalledOutSideApply(lg *zap.Logger) {
+ if !verifyLockEnabled() {
+ return
+ }
+ if insideApply() {
+ lg.Panic("Called inside of APPLY!", zap.Stack("stacktrace"))
+ }
+}
+
+func ValidateCalledInsideUnittest(lg *zap.Logger) {
+ if !verifyLockEnabled() {
+ return
+ }
+ if !insideUnittest() {
+ lg.Fatal("Lock called outside of unit test!", zap.Stack("stacktrace"))
+ }
+}
+
+func verifyLockEnabled() bool {
+ return verify.IsVerificationEnabled(EnvVerifyValueLock)
+}
+
+func insideApply() bool {
+ stackTraceStr := string(debug.Stack())
+ return strings.Contains(stackTraceStr, ".applyEntries")
+}
+
+func insideUnittest() bool {
+ stackTraceStr := string(debug.Stack())
+ return strings.Contains(stackTraceStr, "_test.go") && !strings.Contains(stackTraceStr, "tests/")
+}
+
+// VerifyBackendConsistency verifies data in ReadTx and BatchTx are consistent.
+func VerifyBackendConsistency(b Backend, lg *zap.Logger, skipSafeRangeBucket bool, bucket ...Bucket) {
+ verify.Verify(func() {
+ if b == nil {
+ return
+ }
+ if lg != nil {
+ lg.Debug("verifyBackendConsistency", zap.Bool("skipSafeRangeBucket", skipSafeRangeBucket))
+ }
+ b.BatchTx().LockOutsideApply()
+ defer b.BatchTx().Unlock()
+ b.ReadTx().RLock()
+ defer b.ReadTx().RUnlock()
+ for _, bkt := range bucket {
+ if skipSafeRangeBucket && bkt.IsSafeRangeBucket() {
+ continue
+ }
+ unsafeVerifyTxConsistency(b, bkt)
+ }
+ })
+}
+
+func unsafeVerifyTxConsistency(b Backend, bucket Bucket) {
+ dataFromWriteTxn := map[string]string{}
+ b.BatchTx().UnsafeForEach(bucket, func(k, v []byte) error {
+ dataFromWriteTxn[string(k)] = string(v)
+ return nil
+ })
+ dataFromReadTxn := map[string]string{}
+ b.ReadTx().UnsafeForEach(bucket, func(k, v []byte) error {
+ dataFromReadTxn[string(k)] = string(v)
+ return nil
+ })
+ if diff := cmp.Diff(dataFromWriteTxn, dataFromReadTxn); diff != "" {
+ panic(fmt.Sprintf("bucket %s data mismatch\nwrite TXN: %v\nread TXN: %v\ndiff: %s", bucket.String(), dataFromWriteTxn, dataFromReadTxn, diff))
+ }
+}
diff --git a/server/storage/backend/verify_test.go b/server/storage/backend/verify_test.go
new file mode 100644
index 00000000000..1a0b571cc3f
--- /dev/null
+++ b/server/storage/backend/verify_test.go
@@ -0,0 +1,107 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend_test
+
+import (
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+func TestLockVerify(t *testing.T) {
+ tcs := []struct {
+ name string
+ insideApply bool
+ lock func(tx backend.BatchTx)
+ txPostLockInsideApplyHook func()
+ expectPanic bool
+ }{
+ {
+ name: "call lockInsideApply from inside apply",
+ insideApply: true,
+ lock: lockInsideApply,
+ expectPanic: false,
+ },
+ {
+ name: "call lockInsideApply from outside apply (without txPostLockInsideApplyHook)",
+ insideApply: false,
+ lock: lockInsideApply,
+ expectPanic: false,
+ },
+ {
+ name: "call lockInsideApply from outside apply (with txPostLockInsideApplyHook)",
+ insideApply: false,
+ lock: lockInsideApply,
+ txPostLockInsideApplyHook: func() {},
+ expectPanic: true,
+ },
+ {
+ name: "call lockOutsideApply from outside apply",
+ insideApply: false,
+ lock: lockOutsideApply,
+ expectPanic: false,
+ },
+ {
+ name: "call lockOutsideApply from inside apply",
+ insideApply: true,
+ lock: lockOutsideApply,
+ expectPanic: true,
+ },
+ {
+ name: "call Lock from unit test",
+ insideApply: false,
+ lock: lockFromUT,
+ expectPanic: false,
+ },
+ }
+ revertVerifyFunc := verify.EnableVerifications(backend.EnvVerifyValueLock)
+ defer revertVerifyFunc()
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ be, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
+ be.SetTxPostLockInsideApplyHook(tc.txPostLockInsideApplyHook)
+
+ hasPaniced := handlePanic(func() {
+ if tc.insideApply {
+ applyEntries(be, tc.lock)
+ } else {
+ tc.lock(be.BatchTx())
+ }
+ }) != nil
+ if hasPaniced != tc.expectPanic {
+ t.Errorf("%v != %v", hasPaniced, tc.expectPanic)
+ }
+ })
+ }
+}
+
+func handlePanic(f func()) (result any) {
+ defer func() {
+ result = recover()
+ }()
+ f()
+ return result
+}
+
+func applyEntries(be backend.Backend, f func(tx backend.BatchTx)) {
+ f(be.BatchTx())
+}
+
+func lockInsideApply(tx backend.BatchTx) { tx.LockInsideApply() }
+func lockOutsideApply(tx backend.BatchTx) { tx.LockOutsideApply() }
+func lockFromUT(tx backend.BatchTx) { tx.Lock() }
diff --git a/server/datadir/datadir.go b/server/storage/datadir/datadir.go
similarity index 84%
rename from server/datadir/datadir.go
rename to server/storage/datadir/datadir.go
index fa4c51ad1a2..ced7d767fe5 100644
--- a/server/datadir/datadir.go
+++ b/server/storage/datadir/datadir.go
@@ -31,7 +31,16 @@ func ToSnapDir(dataDir string) string {
return filepath.Join(ToMemberDir(dataDir), snapDirSegment)
}
+// ToWalDir returns the directory path for the member's WAL.
+//
+// Deprecated: use ToWALDir instead.
+//
+//revive:disable-next-line:var-naming
func ToWalDir(dataDir string) string {
+ return ToWALDir(dataDir)
+}
+
+func ToWALDir(dataDir string) string {
return filepath.Join(ToMemberDir(dataDir), walDirSegment)
}
diff --git a/server/storage/datadir/datadir_test.go b/server/storage/datadir/datadir_test.go
new file mode 100644
index 00000000000..5b8abeeb6f6
--- /dev/null
+++ b/server/storage/datadir/datadir_test.go
@@ -0,0 +1,48 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package datadir_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+)
+
+func TestToBackendFileName(t *testing.T) {
+ result := datadir.ToBackendFileName("/dir/data-dir")
+ assert.Equal(t, "/dir/data-dir/member/snap/db", result)
+}
+
+func TestToMemberDir(t *testing.T) {
+ result := datadir.ToMemberDir("/dir/data-dir")
+ assert.Equal(t, "/dir/data-dir/member", result)
+}
+
+func TestToSnapDir(t *testing.T) {
+ result := datadir.ToSnapDir("/dir/data-dir")
+ assert.Equal(t, "/dir/data-dir/member/snap", result)
+}
+
+func TestToWALDir(t *testing.T) {
+ result := datadir.ToWALDir("/dir/data-dir")
+ assert.Equal(t, "/dir/data-dir/member/wal", result)
+}
+
+func TestToWALDirSlash(t *testing.T) {
+ result := datadir.ToWALDir("/dir/data-dir/")
+ assert.Equal(t, "/dir/data-dir/member/wal", result)
+}
diff --git a/server/datadir/doc.go b/server/storage/datadir/doc.go
similarity index 100%
rename from server/datadir/doc.go
rename to server/storage/datadir/doc.go
diff --git a/server/storage/hooks.go b/server/storage/hooks.go
new file mode 100644
index 00000000000..ffec71c0bca
--- /dev/null
+++ b/server/storage/hooks.go
@@ -0,0 +1,60 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "sync"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+type BackendHooks struct {
+ indexer cindex.ConsistentIndexer
+ lg *zap.Logger
+
+ // confState to Be written in the next submitted Backend transaction (if dirty)
+ confState raftpb.ConfState
+ // first write changes it to 'dirty'. false by default, so
+ // not initialized `confState` is meaningless.
+ confStateDirty bool
+ confStateLock sync.Mutex
+}
+
+func NewBackendHooks(lg *zap.Logger, indexer cindex.ConsistentIndexer) *BackendHooks {
+ return &BackendHooks{lg: lg, indexer: indexer}
+}
+
+func (bh *BackendHooks) OnPreCommitUnsafe(tx backend.UnsafeReadWriter) {
+ bh.indexer.UnsafeSave(tx)
+ bh.confStateLock.Lock()
+ defer bh.confStateLock.Unlock()
+ if bh.confStateDirty {
+ schema.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState)
+ // save bh.confState
+ bh.confStateDirty = false
+ }
+}
+
+func (bh *BackendHooks) SetConfState(confState *raftpb.ConfState) {
+ bh.confStateLock.Lock()
+ defer bh.confStateLock.Unlock()
+ bh.confState = *confState
+ bh.confStateDirty = true
+}
diff --git a/server/storage/metrics.go b/server/storage/metrics.go
new file mode 100644
index 00000000000..cb7f87057f9
--- /dev/null
+++ b/server/storage/metrics.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "quota_backend_bytes",
+ Help: "Current backend storage quota size in bytes.",
+})
+
+func init() {
+ prometheus.MustRegister(quotaBackendBytes)
+}
diff --git a/server/mvcc/doc.go b/server/storage/mvcc/doc.go
similarity index 100%
rename from server/mvcc/doc.go
rename to server/storage/mvcc/doc.go
diff --git a/server/storage/mvcc/hash.go b/server/storage/mvcc/hash.go
new file mode 100644
index 00000000000..21618535da4
--- /dev/null
+++ b/server/storage/mvcc/hash.go
@@ -0,0 +1,180 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "hash"
+ "hash/crc32"
+ "sort"
+ "sync"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+const (
+ hashStorageMaxSize = 10
+)
+
+func unsafeHashByRev(tx backend.UnsafeReader, compactRevision, revision int64, keep map[Revision]struct{}) (KeyValueHash, error) {
+ h := newKVHasher(compactRevision, revision, keep)
+ err := tx.UnsafeForEach(schema.Key, func(k, v []byte) error {
+ h.WriteKeyValue(k, v)
+ return nil
+ })
+ return h.Hash(), err
+}
+
+type kvHasher struct {
+ hash hash.Hash32
+ compactRevision int64
+ revision int64
+ keep map[Revision]struct{}
+}
+
+func newKVHasher(compactRev, rev int64, keep map[Revision]struct{}) kvHasher {
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ h.Write(schema.Key.Name())
+ return kvHasher{
+ hash: h,
+ compactRevision: compactRev,
+ revision: rev,
+ keep: keep,
+ }
+}
+
+func (h *kvHasher) WriteKeyValue(k, v []byte) {
+ kr := BytesToRev(k)
+ upper := Revision{Main: h.revision + 1}
+ if !upper.GreaterThan(kr) {
+ return
+ }
+
+ isTombstone := BytesToBucketKey(k).tombstone
+
+ lower := Revision{Main: h.compactRevision + 1}
+ // skip revisions that are scheduled for deletion
+ // due to compacting; don't skip if there isn't one.
+ if lower.GreaterThan(kr) && len(h.keep) > 0 {
+ if _, ok := h.keep[kr]; !ok {
+ return
+ }
+ }
+
+ // When performing compaction, if the compacted revision is a
+ // tombstone, older versions (<= 3.5.15 or <= 3.4.33) will delete
+ // the tombstone. But newer versions (> 3.5.15 or > 3.4.33) won't
+ // delete it. So we should skip the tombstone in such cases when
+ // computing the hash to ensure that both older and newer versions
+ // can always generate the same hash values.
+ if kr.Main == h.compactRevision && isTombstone {
+ return
+ }
+
+ h.hash.Write(k)
+ h.hash.Write(v)
+}
+
+func (h *kvHasher) Hash() KeyValueHash {
+ return KeyValueHash{Hash: h.hash.Sum32(), CompactRevision: h.compactRevision, Revision: h.revision}
+}
+
+type KeyValueHash struct {
+ Hash uint32
+ CompactRevision int64
+ Revision int64
+}
+
+type HashStorage interface {
+ // Hash computes the hash of the whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashByRev" method instead for "key" bucket consistency checks.
+ Hash() (hash uint32, revision int64, err error)
+
+ // HashByRev computes the hash of all MVCC revisions up to a given revision.
+ HashByRev(rev int64) (hash KeyValueHash, currentRev int64, err error)
+
+ // Store adds hash value in local cache, allowing it to be returned by HashByRev.
+ Store(valueHash KeyValueHash)
+
+ // Hashes returns list of up to `hashStorageMaxSize` newest previously stored hashes.
+ Hashes() []KeyValueHash
+}
+
+type hashStorage struct {
+ store *store
+ hashMu sync.RWMutex
+ hashes []KeyValueHash
+ lg *zap.Logger
+}
+
+func NewHashStorage(lg *zap.Logger, s *store) HashStorage {
+ return &hashStorage{
+ store: s,
+ lg: lg,
+ }
+}
+
+func (s *hashStorage) Hash() (hash uint32, revision int64, err error) {
+ return s.store.hash()
+}
+
+func (s *hashStorage) HashByRev(rev int64) (KeyValueHash, int64, error) {
+ s.hashMu.RLock()
+ for _, h := range s.hashes {
+ if rev == h.Revision {
+ s.hashMu.RUnlock()
+
+ s.store.revMu.RLock()
+ currentRev := s.store.currentRev
+ s.store.revMu.RUnlock()
+ return h, currentRev, nil
+ }
+ }
+ s.hashMu.RUnlock()
+
+ return s.store.hashByRev(rev)
+}
+
+func (s *hashStorage) Store(hash KeyValueHash) {
+ s.lg.Info("storing new hash",
+ zap.Uint32("hash", hash.Hash),
+ zap.Int64("revision", hash.Revision),
+ zap.Int64("compact-revision", hash.CompactRevision),
+ )
+ s.hashMu.Lock()
+ defer s.hashMu.Unlock()
+ s.hashes = append(s.hashes, hash)
+ sort.Slice(s.hashes, func(i, j int) bool {
+ return s.hashes[i].Revision < s.hashes[j].Revision
+ })
+ if len(s.hashes) > hashStorageMaxSize {
+ s.hashes = s.hashes[len(s.hashes)-hashStorageMaxSize:]
+ }
+}
+
+func (s *hashStorage) Hashes() []KeyValueHash {
+ s.hashMu.RLock()
+ // Copy out hashes under lock just to be safe
+ hashes := make([]KeyValueHash, 0, len(s.hashes))
+ hashes = append(hashes, s.hashes...)
+ s.hashMu.RUnlock()
+ return hashes
+}
diff --git a/server/storage/mvcc/hash_test.go b/server/storage/mvcc/hash_test.go
new file mode 100644
index 00000000000..d08b3ad1982
--- /dev/null
+++ b/server/storage/mvcc/hash_test.go
@@ -0,0 +1,234 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/mvcc/testutil"
+)
+
+// TestHashByRevValue test HashByRevValue values to ensure we don't change the
+// output which would have catastrophic consequences. Expected output is just
+// hardcoded, so please regenerate it every time you change input parameters.
+func TestHashByRevValue(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ var totalRevisions int64 = 1210
+ assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions)
+ assert.Less(t, int64(testutil.CompactionCycle*10), totalRevisions)
+ var rev int64
+ var got []KeyValueHash
+ for ; rev < totalRevisions; rev += testutil.CompactionCycle {
+ putKVs(s, rev, testutil.CompactionCycle)
+ hash := testHashByRev(t, s, rev+testutil.CompactionCycle/2)
+ got = append(got, hash)
+ }
+ putKVs(s, rev, totalRevisions)
+ hash := testHashByRev(t, s, rev+totalRevisions/2)
+ got = append(got, hash)
+ assert.Equal(t, []KeyValueHash{
+ {4082599214, -1, 35},
+ {2279933401, 35, 106},
+ {3284231217, 106, 177},
+ {126286495, 177, 248},
+ {900108730, 248, 319},
+ {2475485232, 319, 390},
+ {1226296507, 390, 461},
+ {2503661030, 461, 532},
+ {4155130747, 532, 603},
+ {106915399, 603, 674},
+ {406914006, 674, 745},
+ {1882211381, 745, 816},
+ {806177088, 816, 887},
+ {664311366, 887, 958},
+ {1496914449, 958, 1029},
+ {2434525091, 1029, 1100},
+ {3988652253, 1100, 1171},
+ {1122462288, 1171, 1242},
+ {724436716, 1242, 1883},
+ }, got)
+}
+
+func TestHashByRevValueLastRevision(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ var totalRevisions int64 = 1210
+ assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions)
+ assert.Less(t, int64(testutil.CompactionCycle*10), totalRevisions)
+ var rev int64
+ var got []KeyValueHash
+ for ; rev < totalRevisions; rev += testutil.CompactionCycle {
+ putKVs(s, rev, testutil.CompactionCycle)
+ hash := testHashByRev(t, s, 0)
+ got = append(got, hash)
+ }
+ putKVs(s, rev, totalRevisions)
+ hash := testHashByRev(t, s, 0)
+ got = append(got, hash)
+ assert.Equal(t, []KeyValueHash{
+ {1913897190, -1, 73},
+ {224860069, 73, 145},
+ {1565167519, 145, 217},
+ {1566261620, 217, 289},
+ {2037173024, 289, 361},
+ {691659396, 361, 433},
+ {2713730748, 433, 505},
+ {3919322507, 505, 577},
+ {769967540, 577, 649},
+ {2909194793, 649, 721},
+ {1576921157, 721, 793},
+ {4067701532, 793, 865},
+ {2226384237, 865, 937},
+ {2923408134, 937, 1009},
+ {2680329256, 1009, 1081},
+ {1546717673, 1081, 1153},
+ {2713657846, 1153, 1225},
+ {1046575299, 1225, 1297},
+ {2017735779, 1297, 2508},
+ }, got)
+}
+
+func putKVs(s *store, rev, count int64) {
+ for i := rev; i <= rev+count; i++ {
+ s.Put([]byte(testutil.PickKey(i)), []byte(fmt.Sprint(i)), 0)
+ }
+}
+
+func testHashByRev(t *testing.T, s *store, rev int64) KeyValueHash {
+ if rev == 0 {
+ rev = s.Rev()
+ }
+ hash, _, err := s.hashByRev(rev)
+ require.NoErrorf(t, err, "error on rev %v", rev)
+ _, err = s.Compact(traceutil.TODO(), rev)
+ assert.NoErrorf(t, err, "error on compact %v", rev)
+ return hash
+}
+
+// TestCompactionHash tests compaction hash
+// TODO: Change this to fuzz test
+func TestCompactionHash(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testutil.TestCompactionHash(context.Background(), t, hashTestCase{s}, s.cfg.CompactionBatchLimit)
+}
+
+type hashTestCase struct {
+ *store
+}
+
+func (tc hashTestCase) Put(ctx context.Context, key, value string) error {
+ tc.store.Put([]byte(key), []byte(value), 0)
+ return nil
+}
+
+func (tc hashTestCase) Delete(ctx context.Context, key string) error {
+ tc.store.DeleteRange([]byte(key), nil)
+ return nil
+}
+
+func (tc hashTestCase) HashByRev(ctx context.Context, rev int64) (testutil.KeyValueHash, error) {
+ hash, _, err := tc.store.HashStorage().HashByRev(rev)
+ return testutil.KeyValueHash{Hash: hash.Hash, CompactRevision: hash.CompactRevision, Revision: hash.Revision}, err
+}
+
+func (tc hashTestCase) Defrag(ctx context.Context) error {
+ return tc.store.b.Defrag()
+}
+
+func (tc hashTestCase) Compact(ctx context.Context, rev int64) error {
+ done, err := tc.store.Compact(traceutil.TODO(), rev)
+ if err != nil {
+ return err
+ }
+ select {
+ case <-done:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return nil
+}
+
+func TestHasherStore(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ store := newFakeStore(lg)
+ s := NewHashStorage(lg, store)
+ defer store.Close()
+
+ var hashes []KeyValueHash
+ for i := 0; i < hashStorageMaxSize; i++ {
+ hash := KeyValueHash{Hash: uint32(i), Revision: int64(i) + 10, CompactRevision: int64(i) + 100}
+ hashes = append(hashes, hash)
+ s.Store(hash)
+ }
+
+ for _, want := range hashes {
+ got, _, err := s.HashByRev(want.Revision)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want.Hash != got.Hash {
+ t.Errorf("Expected stored hash to match, got: %d, expected: %d", want.Hash, got.Hash)
+ }
+ if want.Revision != got.Revision {
+ t.Errorf("Expected stored revision to match, got: %d, expected: %d", want.Revision, got.Revision)
+ }
+ if want.CompactRevision != got.CompactRevision {
+ t.Errorf("Expected stored compact revision to match, got: %d, expected: %d", want.CompactRevision, got.CompactRevision)
+ }
+ }
+}
+
+func TestHasherStoreFull(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ store := newFakeStore(lg)
+ s := NewHashStorage(lg, store)
+ defer store.Close()
+
+ var minRevision int64 = 100
+ maxRevision := minRevision + hashStorageMaxSize
+ for i := 0; i < hashStorageMaxSize; i++ {
+ s.Store(KeyValueHash{Revision: int64(i) + minRevision})
+ }
+
+ // Hash for old revision should be discarded as storage is already full
+ s.Store(KeyValueHash{Revision: minRevision - 1})
+ hash, _, err := s.HashByRev(minRevision - 1)
+ if err == nil {
+ t.Errorf("Expected an error as old revision should be discarded, got: %v", hash)
+ }
+ // Hash for new revision should be stored even when storage is full
+ s.Store(KeyValueHash{Revision: maxRevision + 1})
+ _, _, err = s.HashByRev(maxRevision + 1)
+ if err != nil {
+ t.Errorf("Didn't expect error for new revision, err: %v", err)
+ }
+}
diff --git a/server/storage/mvcc/index.go b/server/storage/mvcc/index.go
new file mode 100644
index 00000000000..f300831b293
--- /dev/null
+++ b/server/storage/mvcc/index.go
@@ -0,0 +1,253 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sync"
+
+ "github.com/google/btree"
+ "go.uber.org/zap"
+)
+
+type index interface {
+ Get(key []byte, atRev int64) (rev, created Revision, ver int64, err error)
+ Range(key, end []byte, atRev int64) ([][]byte, []Revision)
+ Revisions(key, end []byte, atRev int64, limit int) ([]Revision, int)
+ CountRevisions(key, end []byte, atRev int64) int
+ Put(key []byte, rev Revision)
+ Tombstone(key []byte, rev Revision) error
+ Compact(rev int64) map[Revision]struct{}
+ Keep(rev int64) map[Revision]struct{}
+ Equal(b index) bool
+
+ Insert(ki *keyIndex)
+ KeyIndex(ki *keyIndex) *keyIndex
+}
+
+type treeIndex struct {
+ sync.RWMutex
+ tree *btree.BTreeG[*keyIndex]
+ lg *zap.Logger
+}
+
+func newTreeIndex(lg *zap.Logger) index {
+ return &treeIndex{
+ tree: btree.NewG(32, func(aki *keyIndex, bki *keyIndex) bool {
+ return aki.Less(bki)
+ }),
+ lg: lg,
+ }
+}
+
+func (ti *treeIndex) Put(key []byte, rev Revision) {
+ keyi := &keyIndex{key: key}
+
+ ti.Lock()
+ defer ti.Unlock()
+ okeyi, ok := ti.tree.Get(keyi)
+ if !ok {
+ keyi.put(ti.lg, rev.Main, rev.Sub)
+ ti.tree.ReplaceOrInsert(keyi)
+ return
+ }
+ okeyi.put(ti.lg, rev.Main, rev.Sub)
+}
+
+func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created Revision, ver int64, err error) {
+ ti.RLock()
+ defer ti.RUnlock()
+ return ti.unsafeGet(key, atRev)
+}
+
+func (ti *treeIndex) unsafeGet(key []byte, atRev int64) (modified, created Revision, ver int64, err error) {
+ keyi := &keyIndex{key: key}
+ if keyi = ti.keyIndex(keyi); keyi == nil {
+ return Revision{}, Revision{}, 0, ErrRevisionNotFound
+ }
+ return keyi.get(ti.lg, atRev)
+}
+
+func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
+ ti.RLock()
+ defer ti.RUnlock()
+ return ti.keyIndex(keyi)
+}
+
+func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
+ if ki, ok := ti.tree.Get(keyi); ok {
+ return ki
+ }
+ return nil
+}
+
+func (ti *treeIndex) unsafeVisit(key, end []byte, f func(ki *keyIndex) bool) {
+ keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
+
+ ti.tree.AscendGreaterOrEqual(keyi, func(item *keyIndex) bool {
+ if len(endi.key) > 0 && !item.Less(endi) {
+ return false
+ }
+ if !f(item) {
+ return false
+ }
+ return true
+ })
+}
+
+// Revisions returns limited number of revisions from key(included) to end(excluded)
+// at the given rev. The returned slice is sorted in the order of key. There is no limit if limit <= 0.
+// The second return parameter isn't capped by the limit and reflects the total number of revisions.
+func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []Revision, total int) {
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil {
+ rev, _, _, err := ti.unsafeGet(key, atRev)
+ if err != nil {
+ return nil, 0
+ }
+ return []Revision{rev}, 1
+ }
+ ti.unsafeVisit(key, end, func(ki *keyIndex) bool {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ if limit <= 0 || len(revs) < limit {
+ revs = append(revs, rev)
+ }
+ total++
+ }
+ return true
+ })
+ return revs, total
+}
+
+// CountRevisions returns the number of revisions
+// from key(included) to end(excluded) at the given rev.
+func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64) int {
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil {
+ _, _, _, err := ti.unsafeGet(key, atRev)
+ if err != nil {
+ return 0
+ }
+ return 1
+ }
+ total := 0
+ ti.unsafeVisit(key, end, func(ki *keyIndex) bool {
+ if _, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ total++
+ }
+ return true
+ })
+ return total
+}
+
+func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []Revision) {
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil {
+ rev, _, _, err := ti.unsafeGet(key, atRev)
+ if err != nil {
+ return nil, nil
+ }
+ return [][]byte{key}, []Revision{rev}
+ }
+ ti.unsafeVisit(key, end, func(ki *keyIndex) bool {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ revs = append(revs, rev)
+ keys = append(keys, ki.key)
+ }
+ return true
+ })
+ return keys, revs
+}
+
+func (ti *treeIndex) Tombstone(key []byte, rev Revision) error {
+ keyi := &keyIndex{key: key}
+
+ ti.Lock()
+ defer ti.Unlock()
+ ki, ok := ti.tree.Get(keyi)
+ if !ok {
+ return ErrRevisionNotFound
+ }
+
+ return ki.tombstone(ti.lg, rev.Main, rev.Sub)
+}
+
+func (ti *treeIndex) Compact(rev int64) map[Revision]struct{} {
+ available := make(map[Revision]struct{})
+ ti.lg.Info("compact tree index", zap.Int64("revision", rev))
+ ti.Lock()
+ clone := ti.tree.Clone()
+ ti.Unlock()
+
+ clone.Ascend(func(keyi *keyIndex) bool {
+ // Lock is needed here to prevent modification to the keyIndex while
+ // compaction is going on or revision added to empty before deletion
+ ti.Lock()
+ keyi.compact(ti.lg, rev, available)
+ if keyi.isEmpty() {
+ _, ok := ti.tree.Delete(keyi)
+ if !ok {
+ ti.lg.Panic("failed to delete during compaction")
+ }
+ }
+ ti.Unlock()
+ return true
+ })
+ return available
+}
+
+// Keep finds all revisions to be kept for a Compaction at the given rev.
+func (ti *treeIndex) Keep(rev int64) map[Revision]struct{} {
+ available := make(map[Revision]struct{})
+ ti.RLock()
+ defer ti.RUnlock()
+ ti.tree.Ascend(func(keyi *keyIndex) bool {
+ keyi.keep(rev, available)
+ return true
+ })
+ return available
+}
+
+func (ti *treeIndex) Equal(bi index) bool {
+ b := bi.(*treeIndex)
+
+ if ti.tree.Len() != b.tree.Len() {
+ return false
+ }
+
+ equal := true
+
+ ti.tree.Ascend(func(aki *keyIndex) bool {
+ bki, _ := b.tree.Get(aki)
+ if !aki.equal(bki) {
+ equal = false
+ return false
+ }
+ return true
+ })
+
+ return equal
+}
+
+func (ti *treeIndex) Insert(ki *keyIndex) {
+ ti.Lock()
+ defer ti.Unlock()
+ ti.tree.ReplaceOrInsert(ki)
+}
diff --git a/server/storage/mvcc/index_bench_test.go b/server/storage/mvcc/index_bench_test.go
new file mode 100644
index 00000000000..493ac570f65
--- /dev/null
+++ b/server/storage/mvcc/index_bench_test.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "testing"
+
+ "go.uber.org/zap"
+)
+
+func BenchmarkIndexCompact1(b *testing.B) { benchmarkIndexCompact(b, 1) }
+func BenchmarkIndexCompact100(b *testing.B) { benchmarkIndexCompact(b, 100) }
+func BenchmarkIndexCompact10000(b *testing.B) { benchmarkIndexCompact(b, 10000) }
+func BenchmarkIndexCompact100000(b *testing.B) { benchmarkIndexCompact(b, 100000) }
+func BenchmarkIndexCompact1000000(b *testing.B) { benchmarkIndexCompact(b, 1000000) }
+
+func benchmarkIndexCompact(b *testing.B, size int) {
+ log := zap.NewNop()
+ kvindex := newTreeIndex(log)
+
+ bytesN := 64
+ keys := createBytesSlice(bytesN, size)
+ for i := 1; i < size; i++ {
+ kvindex.Put(keys[i], Revision{Main: int64(i), Sub: int64(i)})
+ }
+ b.ResetTimer()
+ for i := 1; i < b.N; i++ {
+ kvindex.Compact(int64(i))
+ }
+}
+
+func BenchmarkIndexPut(b *testing.B) {
+ log := zap.NewNop()
+ kvindex := newTreeIndex(log)
+
+ bytesN := 64
+ keys := createBytesSlice(bytesN, b.N)
+ b.ResetTimer()
+ for i := 1; i < b.N; i++ {
+ kvindex.Put(keys[i], Revision{Main: int64(i), Sub: int64(i)})
+ }
+}
+
+func BenchmarkIndexGet(b *testing.B) {
+ log := zap.NewNop()
+ kvindex := newTreeIndex(log)
+
+ bytesN := 64
+ keys := createBytesSlice(bytesN, b.N)
+ for i := 1; i < b.N; i++ {
+ kvindex.Put(keys[i], Revision{Main: int64(i), Sub: int64(i)})
+ }
+ b.ResetTimer()
+ for i := 1; i < b.N; i++ {
+ kvindex.Get(keys[i], int64(i))
+ }
+}
diff --git a/server/storage/mvcc/index_test.go b/server/storage/mvcc/index_test.go
new file mode 100644
index 00000000000..d7f32fded2d
--- /dev/null
+++ b/server/storage/mvcc/index_test.go
@@ -0,0 +1,680 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+)
+
+func TestIndexGet(t *testing.T) {
+ ti := newTreeIndex(zaptest.NewLogger(t))
+ ti.Put([]byte("foo"), Revision{Main: 2})
+ ti.Put([]byte("foo"), Revision{Main: 4})
+ ti.Tombstone([]byte("foo"), Revision{Main: 6})
+
+ tests := []struct {
+ rev int64
+
+ wrev Revision
+ wcreated Revision
+ wver int64
+ werr error
+ }{
+ {0, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {1, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {2, Revision{Main: 2}, Revision{Main: 2}, 1, nil},
+ {3, Revision{Main: 2}, Revision{Main: 2}, 1, nil},
+ {4, Revision{Main: 4}, Revision{Main: 2}, 2, nil},
+ {5, Revision{Main: 4}, Revision{Main: 2}, 2, nil},
+ {6, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ }
+ for i, tt := range tests {
+ rev, created, ver, err := ti.Get([]byte("foo"), tt.rev)
+ if !errors.Is(err, tt.werr) {
+ t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
+ }
+ if rev != tt.wrev {
+ t.Errorf("#%d: rev = %+v, want %+v", i, rev, tt.wrev)
+ }
+ if created != tt.wcreated {
+ t.Errorf("#%d: created = %+v, want %+v", i, created, tt.wcreated)
+ }
+ if ver != tt.wver {
+ t.Errorf("#%d: ver = %d, want %d", i, ver, tt.wver)
+ }
+ }
+}
+
+func TestIndexRange(t *testing.T) {
+ allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
+ allRevs := []Revision{{Main: 1}, {Main: 2}, {Main: 3}}
+
+ ti := newTreeIndex(zaptest.NewLogger(t))
+ for i := range allKeys {
+ ti.Put(allKeys[i], allRevs[i])
+ }
+
+ atRev := int64(3)
+ tests := []struct {
+ key, end []byte
+ wkeys [][]byte
+ wrevs []Revision
+ }{
+ // single key that not found
+ {
+ []byte("bar"), nil, nil, nil,
+ },
+ // single key that found
+ {
+ []byte("foo"), nil, allKeys[:1], allRevs[:1],
+ },
+ // range keys, return first member
+ {
+ []byte("foo"), []byte("foo1"), allKeys[:1], allRevs[:1],
+ },
+ // range keys, return first two members
+ {
+ []byte("foo"), []byte("foo2"), allKeys[:2], allRevs[:2],
+ },
+ // range keys, return all members
+ {
+ []byte("foo"), []byte("fop"), allKeys, allRevs,
+ },
+ // range keys, return last two members
+ {
+ []byte("foo1"), []byte("fop"), allKeys[1:], allRevs[1:],
+ },
+ // range keys, return last member
+ {
+ []byte("foo2"), []byte("fop"), allKeys[2:], allRevs[2:],
+ },
+ // range keys, return nothing
+ {
+ []byte("foo3"), []byte("fop"), nil, nil,
+ },
+ }
+ for i, tt := range tests {
+ keys, revs := ti.Range(tt.key, tt.end, atRev)
+ if !reflect.DeepEqual(keys, tt.wkeys) {
+ t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys)
+ }
+ if !reflect.DeepEqual(revs, tt.wrevs) {
+ t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs)
+ }
+ }
+}
+
+func TestIndexTombstone(t *testing.T) {
+ ti := newTreeIndex(zaptest.NewLogger(t))
+ ti.Put([]byte("foo"), Revision{Main: 1})
+
+ err := ti.Tombstone([]byte("foo"), Revision{Main: 2})
+ if err != nil {
+ t.Errorf("tombstone error = %v, want nil", err)
+ }
+
+ _, _, _, err = ti.Get([]byte("foo"), 2)
+ if !errors.Is(err, ErrRevisionNotFound) {
+ t.Errorf("get error = %v, want ErrRevisionNotFound", err)
+ }
+ err = ti.Tombstone([]byte("foo"), Revision{Main: 3})
+ if !errors.Is(err, ErrRevisionNotFound) {
+ t.Errorf("tombstone error = %v, want %v", err, ErrRevisionNotFound)
+ }
+}
+
+func TestIndexRevision(t *testing.T) {
+ allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2"), []byte("foo2"), []byte("foo1"), []byte("foo")}
+ allRevs := []Revision{{Main: 1}, {Main: 2}, {Main: 3}, {Main: 4}, {Main: 5}, {Main: 6}}
+
+ ti := newTreeIndex(zaptest.NewLogger(t))
+ for i := range allKeys {
+ ti.Put(allKeys[i], allRevs[i])
+ }
+
+ tests := []struct {
+ key, end []byte
+ atRev int64
+ limit int
+ wrevs []Revision
+ wcounts int
+ }{
+ // single key that not found
+ {
+ []byte("bar"), nil, 6, 0, nil, 0,
+ },
+ // single key that found
+ {
+ []byte("foo"), nil, 6, 0, []Revision{{Main: 6}}, 1,
+ },
+ // various range keys, fixed atRev, unlimited
+ {
+ []byte("foo"), []byte("foo1"), 6, 0, []Revision{{Main: 6}}, 1,
+ },
+ {
+ []byte("foo"), []byte("foo2"), 6, 0, []Revision{{Main: 6}, {Main: 5}}, 2,
+ },
+ {
+ []byte("foo"), []byte("fop"), 6, 0, []Revision{{Main: 6}, {Main: 5}, {Main: 4}}, 3,
+ },
+ {
+ []byte("foo1"), []byte("fop"), 6, 0, []Revision{{Main: 5}, {Main: 4}}, 2,
+ },
+ {
+ []byte("foo2"), []byte("fop"), 6, 0, []Revision{{Main: 4}}, 1,
+ },
+ {
+ []byte("foo3"), []byte("fop"), 6, 0, nil, 0,
+ },
+ // fixed range keys, various atRev, unlimited
+ {
+ []byte("foo1"), []byte("fop"), 1, 0, nil, 0,
+ },
+ {
+ []byte("foo1"), []byte("fop"), 2, 0, []Revision{{Main: 2}}, 1,
+ },
+ {
+ []byte("foo1"), []byte("fop"), 3, 0, []Revision{{Main: 2}, {Main: 3}}, 2,
+ },
+ {
+ []byte("foo1"), []byte("fop"), 4, 0, []Revision{{Main: 2}, {Main: 4}}, 2,
+ },
+ {
+ []byte("foo1"), []byte("fop"), 5, 0, []Revision{{Main: 5}, {Main: 4}}, 2,
+ },
+ {
+ []byte("foo1"), []byte("fop"), 6, 0, []Revision{{Main: 5}, {Main: 4}}, 2,
+ },
+ // fixed range keys, fixed atRev, various limit
+ {
+ []byte("foo"), []byte("fop"), 6, 1, []Revision{{Main: 6}}, 3,
+ },
+ {
+ []byte("foo"), []byte("fop"), 6, 2, []Revision{{Main: 6}, {Main: 5}}, 3,
+ },
+ {
+ []byte("foo"), []byte("fop"), 6, 3, []Revision{{Main: 6}, {Main: 5}, {Main: 4}}, 3,
+ },
+ {
+ []byte("foo"), []byte("fop"), 3, 1, []Revision{{Main: 1}}, 3,
+ },
+ {
+ []byte("foo"), []byte("fop"), 3, 2, []Revision{{Main: 1}, {Main: 2}}, 3,
+ },
+ {
+ []byte("foo"), []byte("fop"), 3, 3, []Revision{{Main: 1}, {Main: 2}, {Main: 3}}, 3,
+ },
+ }
+ for i, tt := range tests {
+ revs, _ := ti.Revisions(tt.key, tt.end, tt.atRev, tt.limit)
+ if !reflect.DeepEqual(revs, tt.wrevs) {
+ t.Errorf("#%d limit %d: revs = %+v, want %+v", i, tt.limit, revs, tt.wrevs)
+ }
+ count := ti.CountRevisions(tt.key, tt.end, tt.atRev)
+ if count != tt.wcounts {
+ t.Errorf("#%d: count = %d, want %v", i, count, tt.wcounts)
+ }
+ }
+}
+
+func TestIndexCompactAndKeep(t *testing.T) {
+ maxRev := int64(20)
+
+ // key: "foo"
+ // modified: 10
+ // generations:
+ // {{10, 0}}
+ // {{1, 0}, {5, 0}, {9, 0}(t)}
+ //
+ // key: "foo1"
+ // modified: 10, 1
+ // generations:
+ // {{10, 1}}
+ // {{2, 0}, {6, 0}, {7, 0}(t)}
+ //
+ // key: "foo2"
+ // modified: 8
+ // generations:
+ // {empty}
+ // {{3, 0}, {4, 0}, {8, 0}(t)}
+ //
+ buildTreeIndex := func() index {
+ ti := newTreeIndex(zaptest.NewLogger(t))
+
+ ti.Put([]byte("foo"), Revision{Main: 1})
+ ti.Put([]byte("foo1"), Revision{Main: 2})
+ ti.Put([]byte("foo2"), Revision{Main: 3})
+ ti.Put([]byte("foo2"), Revision{Main: 4})
+ ti.Put([]byte("foo"), Revision{Main: 5})
+ ti.Put([]byte("foo1"), Revision{Main: 6})
+ require.NoError(t, ti.Tombstone([]byte("foo1"), Revision{Main: 7}))
+ require.NoError(t, ti.Tombstone([]byte("foo2"), Revision{Main: 8}))
+ require.NoError(t, ti.Tombstone([]byte("foo"), Revision{Main: 9}))
+ ti.Put([]byte("foo"), Revision{Main: 10})
+ ti.Put([]byte("foo1"), Revision{Main: 10, Sub: 1})
+ return ti
+ }
+
+ afterCompacts := []struct {
+ atRev int
+ keyIndexes []keyIndex
+ keep map[Revision]struct{}
+ compacted map[Revision]struct{}
+ }{
+ {
+ atRev: 1,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 1}, {Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 2}, revs: []Revision{{Main: 2}, {Main: 6}, {Main: 7}}},
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 3}, {Main: 4}, {Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 1}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 1}: {},
+ },
+ },
+ {
+ atRev: 2,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 1}, {Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 2}, revs: []Revision{{Main: 2}, {Main: 6}, {Main: 7}}},
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 3}, {Main: 4}, {Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 1}: {},
+ {Main: 2}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 1}: {},
+ {Main: 2}: {},
+ },
+ },
+ {
+ atRev: 3,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 1}, {Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 2}, revs: []Revision{{Main: 2}, {Main: 6}, {Main: 7}}},
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 3}, {Main: 4}, {Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 1}: {},
+ {Main: 2}: {},
+ {Main: 3}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 1}: {},
+ {Main: 2}: {},
+ {Main: 3}: {},
+ },
+ },
+ {
+ atRev: 4,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 1}, {Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 2}, revs: []Revision{{Main: 2}, {Main: 6}, {Main: 7}}},
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 4}, {Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 1}: {},
+ {Main: 2}: {},
+ {Main: 4}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 1}: {},
+ {Main: 2}: {},
+ {Main: 4}: {},
+ },
+ },
+ {
+ atRev: 5,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 2}, revs: []Revision{{Main: 2}, {Main: 6}, {Main: 7}}},
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 4}, {Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 2}: {},
+ {Main: 4}: {},
+ {Main: 5}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 2}: {},
+ {Main: 4}: {},
+ {Main: 5}: {},
+ },
+ },
+ {
+ atRev: 6,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 2}, revs: []Revision{{Main: 6}, {Main: 7}}},
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 4}, {Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 6}: {},
+ {Main: 4}: {},
+ {Main: 5}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 6}: {},
+ {Main: 4}: {},
+ {Main: 5}: {},
+ },
+ },
+ {
+ atRev: 7,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 2}, revs: []Revision{{Main: 7}}},
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 4}, {Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 4}: {},
+ {Main: 5}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 7}: {},
+ {Main: 4}: {},
+ {Main: 5}: {},
+ },
+ },
+ {
+ atRev: 8,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 5}, {Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ {
+ key: []byte("foo2"),
+ modified: Revision{Main: 8},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 3}, revs: []Revision{{Main: 8}}},
+ {},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 5}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 8}: {},
+ {Main: 5}: {},
+ },
+ },
+ {
+ atRev: 9,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 3, created: Revision{Main: 1}, revs: []Revision{{Main: 9}}},
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{},
+ compacted: map[Revision]struct{}{
+ {Main: 9}: {},
+ },
+ },
+ {
+ atRev: 10,
+ keyIndexes: []keyIndex{
+ {
+ key: []byte("foo"),
+ modified: Revision{Main: 10},
+ generations: []generation{
+ {ver: 1, created: Revision{Main: 10}, revs: []Revision{{Main: 10}}},
+ },
+ },
+ {
+ key: []byte("foo1"),
+ modified: Revision{Main: 10, Sub: 1},
+ generations: []generation{
+ {ver: 1, created: Revision{Main: 10, Sub: 1}, revs: []Revision{{Main: 10, Sub: 1}}},
+ },
+ },
+ },
+ keep: map[Revision]struct{}{
+ {Main: 10}: {},
+ {Main: 10, Sub: 1}: {},
+ },
+ compacted: map[Revision]struct{}{
+ {Main: 10}: {},
+ {Main: 10, Sub: 1}: {},
+ },
+ },
+ }
+
+ ti := buildTreeIndex()
+ // Continuous Compact and Keep
+ for i := int64(1); i < maxRev; i++ {
+ j := i - 1
+ if i >= int64(len(afterCompacts)) {
+ j = int64(len(afterCompacts)) - 1
+ }
+
+ am := ti.Compact(i)
+ require.Equalf(t, afterCompacts[j].compacted, am, "#%d: compact(%d) != expected", i, i)
+
+ keep := ti.Keep(i)
+ require.Equalf(t, afterCompacts[j].keep, keep, "#%d: keep(%d) != expected", i, i)
+
+ nti := newTreeIndex(zaptest.NewLogger(t)).(*treeIndex)
+ for k := range afterCompacts[j].keyIndexes {
+ ki := afterCompacts[j].keyIndexes[k]
+ nti.tree.ReplaceOrInsert(&ki)
+ }
+ require.Truef(t, ti.Equal(nti), "#%d: not equal ti", i)
+ }
+
+ // Once Compact and Keep
+ for i := int64(1); i < maxRev; i++ {
+ ti := buildTreeIndex()
+
+ j := i - 1
+ if i >= int64(len(afterCompacts)) {
+ j = int64(len(afterCompacts)) - 1
+ }
+
+ am := ti.Compact(i)
+ require.Equalf(t, afterCompacts[j].compacted, am, "#%d: compact(%d) != expected", i, i)
+
+ keep := ti.Keep(i)
+ require.Equalf(t, afterCompacts[j].keep, keep, "#%d: keep(%d) != expected", i, i)
+
+ nti := newTreeIndex(zaptest.NewLogger(t)).(*treeIndex)
+ for k := range afterCompacts[j].keyIndexes {
+ ki := afterCompacts[j].keyIndexes[k]
+ nti.tree.ReplaceOrInsert(&ki)
+ }
+
+ require.Truef(t, ti.Equal(nti), "#%d: not equal ti", i)
+ }
+}
diff --git a/server/mvcc/key_index.go b/server/storage/mvcc/key_index.go
similarity index 78%
rename from server/mvcc/key_index.go
rename to server/storage/mvcc/key_index.go
index 58ad4832eba..27b22fd4899 100644
--- a/server/mvcc/key_index.go
+++ b/server/storage/mvcc/key_index.go
@@ -19,29 +19,27 @@ import (
"errors"
"fmt"
- "github.com/google/btree"
"go.uber.org/zap"
)
-var (
- ErrRevisionNotFound = errors.New("mvcc: revision not found")
-)
+var ErrRevisionNotFound = errors.New("mvcc: revision not found")
// keyIndex stores the revisions of a key in the backend.
// Each keyIndex has at least one key generation.
// Each generation might have several key versions.
-// Tombstone on a key appends an tombstone version at the end
+// Tombstone on a key appends a tombstone version at the end
// of the current generation and creates a new empty generation.
// Each version of a key has an index pointing to the backend.
//
// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo"
// generate a keyIndex:
// key: "foo"
-// rev: 5
+// modified: 5
// generations:
-// {empty}
-// {4.0, 5.0(t)}
-// {1.0, 2.0, 3.0(t)}
+//
+// {empty}
+// {4.0, 5.0(t)}
+// {1.0, 2.0, 3.0(t)}
//
// Compact a keyIndex removes the versions with smaller or equal to
// rev except the largest one. If the generation becomes empty
@@ -51,39 +49,44 @@ var (
// For example:
// compact(2) on the previous example
// generations:
-// {empty}
-// {4.0, 5.0(t)}
-// {2.0, 3.0(t)}
+//
+// {empty}
+// {4.0, 5.0(t)}
+// {2.0, 3.0(t)}
//
// compact(4)
// generations:
-// {empty}
-// {4.0, 5.0(t)}
+//
+// {empty}
+// {4.0, 5.0(t)}
//
// compact(5):
// generations:
-// {empty} -> key SHOULD be removed.
+//
+// {empty}
+// {5.0(t)}
//
// compact(6):
// generations:
-// {empty} -> key SHOULD be removed.
+//
+// {empty} -> key SHOULD be removed.
type keyIndex struct {
key []byte
- modified revision // the main rev of the last modification
+ modified Revision // the main rev of the last modification
generations []generation
}
// put puts a revision to the keyIndex.
func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) {
- rev := revision{main: main, sub: sub}
+ rev := Revision{Main: main, Sub: sub}
if !rev.GreaterThan(ki.modified) {
lg.Panic(
"'put' with an unexpected smaller revision",
- zap.Int64("given-revision-main", rev.main),
- zap.Int64("given-revision-sub", rev.sub),
- zap.Int64("modified-revision-main", ki.modified.main),
- zap.Int64("modified-revision-sub", ki.modified.sub),
+ zap.Int64("given-revision-main", rev.Main),
+ zap.Int64("given-revision-sub", rev.Sub),
+ zap.Int64("modified-revision-main", ki.modified.Main),
+ zap.Int64("modified-revision-sub", ki.modified.Sub),
)
}
if len(ki.generations) == 0 {
@@ -99,7 +102,7 @@ func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) {
ki.modified = rev
}
-func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) {
+func (ki *keyIndex) restore(lg *zap.Logger, created, modified Revision, ver int64) {
if len(ki.generations) != 0 {
lg.Panic(
"'restore' got an unexpected non-empty generations",
@@ -108,7 +111,7 @@ func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int6
}
ki.modified = modified
- g := generation{created: created, ver: ver, revs: []revision{modified}}
+ g := generation{created: created, ver: ver, revs: []Revision{modified}}
ki.generations = append(ki.generations, g)
keysGauge.Inc()
}
@@ -133,8 +136,8 @@ func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error {
}
// get gets the modified, created revision and version of the key that satisfies the given atRev.
-// Rev must be higher than or equal to the given atRev.
-func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) {
+// Rev must be smaller than or equal to the given atRev.
+func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created Revision, ver int64, err error) {
if ki.isEmpty() {
lg.Panic(
"'get' got an unexpected empty keyIndex",
@@ -143,28 +146,28 @@ func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision
}
g := ki.findGeneration(atRev)
if g.isEmpty() {
- return revision{}, revision{}, 0, ErrRevisionNotFound
+ return Revision{}, Revision{}, 0, ErrRevisionNotFound
}
- n := g.walk(func(rev revision) bool { return rev.main > atRev })
+ n := g.walk(func(rev Revision) bool { return rev.Main > atRev })
if n != -1 {
return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil
}
- return revision{}, revision{}, 0, ErrRevisionNotFound
+ return Revision{}, Revision{}, 0, ErrRevisionNotFound
}
// since returns revisions since the given rev. Only the revision with the
// largest sub revision will be returned if multiple revisions have the same
// main revision.
-func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision {
+func (ki *keyIndex) since(lg *zap.Logger, rev int64) []Revision {
if ki.isEmpty() {
lg.Panic(
"'since' got an unexpected empty keyIndex",
zap.String("key", string(ki.key)),
)
}
- since := revision{rev, 0}
+ since := Revision{Main: rev}
var gi int
// find the generations to start checking
for gi = len(ki.generations) - 1; gi > 0; gi-- {
@@ -177,31 +180,30 @@ func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision {
}
}
- var revs []revision
+ var revs []Revision
var last int64
for ; gi < len(ki.generations); gi++ {
for _, r := range ki.generations[gi].revs {
if since.GreaterThan(r) {
continue
}
- if r.main == last {
+ if r.Main == last {
// replace the revision with a new one that has higher sub value,
// because the original one should not be seen by external
revs[len(revs)-1] = r
continue
}
revs = append(revs, r)
- last = r.main
+ last = r.Main
}
}
return revs
}
// compact compacts a keyIndex by removing the versions with smaller or equal
-// revision than the given atRev except the largest one (If the largest one is
-// a tombstone, it will not be kept).
+// revision than the given atRev except the largest one.
// If a generation becomes empty during compaction, it will be removed.
-func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) {
+func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[Revision]struct{}) {
if ki.isEmpty() {
lg.Panic(
"'compact' got an unexpected empty keyIndex",
@@ -217,11 +219,6 @@ func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]
if revIndex != -1 {
g.revs = g.revs[revIndex:]
}
- // remove any tombstone
- if len(g.revs) == 1 && genIdx != len(ki.generations)-1 {
- delete(available, g.revs[0])
- genIdx++
- }
}
// remove the previous generations.
@@ -229,7 +226,7 @@ func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]
}
// keep finds the revision to be kept if compact is called at given atRev.
-func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
+func (ki *keyIndex) keep(atRev int64, available map[Revision]struct{}) {
if ki.isEmpty() {
return
}
@@ -237,18 +234,23 @@ func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
genIdx, revIndex := ki.doCompact(atRev, available)
g := &ki.generations[genIdx]
if !g.isEmpty() {
- // remove any tombstone
+ // If the given `atRev` is a tombstone, we need to skip it.
+ //
+ // Note that this s different from the `compact` function which
+ // keeps tombstone in such case. We need to stay consistent with
+ // existing versions, ensuring they always generate the same hash
+ // values.
if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 {
delete(available, g.revs[revIndex])
}
}
}
-func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) {
+func (ki *keyIndex) doCompact(atRev int64, available map[Revision]struct{}) (genIdx int, revIndex int) {
// walk until reaching the first revision smaller or equal to "atRev",
// and add the revision to the available map
- f := func(rev revision) bool {
- if rev.main <= atRev {
+ f := func(rev Revision) bool {
+ if rev.Main <= atRev {
available[rev] = struct{}{}
return false
}
@@ -258,7 +260,7 @@ func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (gen
genIdx, g := 0, &ki.generations[0]
// find first generation includes atRev or created after atRev
for genIdx < len(ki.generations)-1 {
- if tomb := g.revs[len(g.revs)-1].main; tomb > atRev {
+ if tomb := g.revs[len(g.revs)-1].Main; tomb >= atRev {
break
}
genIdx++
@@ -288,11 +290,11 @@ func (ki *keyIndex) findGeneration(rev int64) *generation {
}
g := ki.generations[cg]
if cg != lastg {
- if tomb := g.revs[len(g.revs)-1].main; tomb <= rev {
+ if tomb := g.revs[len(g.revs)-1].Main; tomb <= rev {
return nil
}
}
- if g.revs[0].main <= rev {
+ if g.revs[0].Main <= rev {
return &ki.generations[cg]
}
cg--
@@ -300,8 +302,8 @@ func (ki *keyIndex) findGeneration(rev int64) *generation {
return nil
}
-func (ki *keyIndex) Less(b btree.Item) bool {
- return bytes.Compare(ki.key, b.(*keyIndex).key) == -1
+func (ki *keyIndex) Less(bki *keyIndex) bool {
+ return bytes.Compare(ki.key, bki.key) == -1
}
func (ki *keyIndex) equal(b *keyIndex) bool {
@@ -334,8 +336,8 @@ func (ki *keyIndex) String() string {
// generation contains multiple revisions of a key.
type generation struct {
ver int64
- created revision // when the generation is created (put in first revision).
- revs []revision
+ created Revision // when the generation is created (put in first revision).
+ revs []Revision
}
func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 }
@@ -345,7 +347,7 @@ func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 }
// walk returns until: 1. it finishes walking all pairs 2. the function returns false.
// walk returns the position at where it stopped. If it stopped after
// finishing walking, -1 will be returned.
-func (g *generation) walk(f func(rev revision) bool) int {
+func (g *generation) walk(f func(rev Revision) bool) int {
l := len(g.revs)
for i := range g.revs {
ok := f(g.revs[l-i-1])
diff --git a/server/storage/mvcc/key_index_test.go b/server/storage/mvcc/key_index_test.go
new file mode 100644
index 00000000000..f86e027e525
--- /dev/null
+++ b/server/storage/mvcc/key_index_test.go
@@ -0,0 +1,756 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+)
+
+func TestKeyIndexGet(t *testing.T) {
+ // key: "foo"
+ // modified: 16
+ // generations:
+ // {empty}
+ // {{14, 0}[1], {15, 1}[2], {16, 0}(t)[3]}
+ // {{8, 0}[1], {10, 0}[2], {12, 0}(t)[3]}
+ // {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]}
+ ki := newTestKeyIndex(zaptest.NewLogger(t))
+ ki.compact(zaptest.NewLogger(t), 4, make(map[Revision]struct{}))
+
+ tests := []struct {
+ rev int64
+
+ wmod Revision
+ wcreat Revision
+ wver int64
+ werr error
+ }{
+ {17, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {16, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+
+ // get on generation 3
+ {15, Revision{Main: 15, Sub: 1}, Revision{Main: 14}, 2, nil},
+ {14, Revision{Main: 14}, Revision{Main: 14}, 1, nil},
+
+ {13, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {12, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+
+ // get on generation 2
+ {11, Revision{Main: 10}, Revision{Main: 8}, 2, nil},
+ {10, Revision{Main: 10}, Revision{Main: 8}, 2, nil},
+ {9, Revision{Main: 8}, Revision{Main: 8}, 1, nil},
+ {8, Revision{Main: 8}, Revision{Main: 8}, 1, nil},
+
+ {7, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {6, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+
+ // get on generation 1
+ {5, Revision{Main: 4}, Revision{Main: 2}, 2, nil},
+ {4, Revision{Main: 4}, Revision{Main: 2}, 2, nil},
+
+ {3, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {2, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {1, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ {0, Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ }
+
+ for i, tt := range tests {
+ mod, creat, ver, err := ki.get(zaptest.NewLogger(t), tt.rev)
+ if !errors.Is(err, tt.werr) {
+ t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
+ }
+ if mod != tt.wmod {
+ t.Errorf("#%d: modified = %+v, want %+v", i, mod, tt.wmod)
+ }
+ if creat != tt.wcreat {
+ t.Errorf("#%d: created = %+v, want %+v", i, creat, tt.wcreat)
+ }
+ if ver != tt.wver {
+ t.Errorf("#%d: version = %d, want %d", i, ver, tt.wver)
+ }
+ }
+}
+
+func TestKeyIndexSince(t *testing.T) {
+ ki := newTestKeyIndex(zaptest.NewLogger(t))
+ ki.compact(zaptest.NewLogger(t), 4, make(map[Revision]struct{}))
+
+ allRevs := []Revision{
+ {Main: 4},
+ {Main: 6},
+ {Main: 8},
+ {Main: 10},
+ {Main: 12},
+ {Main: 14},
+ {Main: 15, Sub: 1},
+ {Main: 16},
+ }
+ tests := []struct {
+ rev int64
+
+ wrevs []Revision
+ }{
+ {17, nil},
+ {16, allRevs[7:]},
+ {15, allRevs[6:]},
+ {14, allRevs[5:]},
+ {13, allRevs[5:]},
+ {12, allRevs[4:]},
+ {11, allRevs[4:]},
+ {10, allRevs[3:]},
+ {9, allRevs[3:]},
+ {8, allRevs[2:]},
+ {7, allRevs[2:]},
+ {6, allRevs[1:]},
+ {5, allRevs[1:]},
+ {4, allRevs},
+ {3, allRevs},
+ {2, allRevs},
+ {1, allRevs},
+ {0, allRevs},
+ }
+
+ for i, tt := range tests {
+ revs := ki.since(zaptest.NewLogger(t), tt.rev)
+ if !reflect.DeepEqual(revs, tt.wrevs) {
+ t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs)
+ }
+ }
+}
+
+func TestKeyIndexPut(t *testing.T) {
+ ki := &keyIndex{key: []byte("foo")}
+ ki.put(zaptest.NewLogger(t), 5, 0)
+
+ wki := &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 5},
+ generations: []generation{{created: Revision{Main: 5}, ver: 1, revs: []Revision{{Main: 5}}}},
+ }
+ if !reflect.DeepEqual(ki, wki) {
+ t.Errorf("ki = %+v, want %+v", ki, wki)
+ }
+
+ ki.put(zaptest.NewLogger(t), 7, 0)
+
+ wki = &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 7},
+ generations: []generation{{created: Revision{Main: 5}, ver: 2, revs: []Revision{{Main: 5}, {Main: 7}}}},
+ }
+ if !reflect.DeepEqual(ki, wki) {
+ t.Errorf("ki = %+v, want %+v", ki, wki)
+ }
+}
+
+func TestKeyIndexRestore(t *testing.T) {
+ ki := &keyIndex{key: []byte("foo")}
+ ki.restore(zaptest.NewLogger(t), Revision{Main: 5}, Revision{Main: 7}, 2)
+
+ wki := &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 7},
+ generations: []generation{{created: Revision{Main: 5}, ver: 2, revs: []Revision{{Main: 7}}}},
+ }
+ if !reflect.DeepEqual(ki, wki) {
+ t.Errorf("ki = %+v, want %+v", ki, wki)
+ }
+}
+
+func TestKeyIndexTombstone(t *testing.T) {
+ ki := &keyIndex{key: []byte("foo")}
+ ki.put(zaptest.NewLogger(t), 5, 0)
+
+ err := ki.tombstone(zaptest.NewLogger(t), 7, 0)
+ if err != nil {
+ t.Errorf("unexpected tombstone error: %v", err)
+ }
+
+ wki := &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 7},
+ generations: []generation{{created: Revision{Main: 5}, ver: 2, revs: []Revision{{Main: 5}, {Main: 7}}}, {}},
+ }
+ if !reflect.DeepEqual(ki, wki) {
+ t.Errorf("ki = %+v, want %+v", ki, wki)
+ }
+
+ ki.put(zaptest.NewLogger(t), 8, 0)
+ ki.put(zaptest.NewLogger(t), 9, 0)
+ err = ki.tombstone(zaptest.NewLogger(t), 15, 0)
+ if err != nil {
+ t.Errorf("unexpected tombstone error: %v", err)
+ }
+
+ wki = &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 15},
+ generations: []generation{
+ {created: Revision{Main: 5}, ver: 2, revs: []Revision{{Main: 5}, {Main: 7}}},
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 9}, {Main: 15}}},
+ {},
+ },
+ }
+ if !reflect.DeepEqual(ki, wki) {
+ t.Errorf("ki = %+v, want %+v", ki, wki)
+ }
+
+ err = ki.tombstone(zaptest.NewLogger(t), 16, 0)
+ if !errors.Is(err, ErrRevisionNotFound) {
+ t.Errorf("tombstone error = %v, want %v", err, ErrRevisionNotFound)
+ }
+}
+
+func TestKeyIndexCompactAndKeep(t *testing.T) {
+ tests := []struct {
+ compact int64
+
+ wki *keyIndex
+ wam map[Revision]struct{}
+ }{
+ {
+ 1,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 2}, ver: 3, revs: []Revision{{Main: 2}, {Main: 4}, {Main: 6}}},
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{},
+ },
+ {
+ 2,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 2}, ver: 3, revs: []Revision{{Main: 2}, {Main: 4}, {Main: 6}}},
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 2}: {},
+ },
+ },
+ {
+ 3,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 2}, ver: 3, revs: []Revision{{Main: 2}, {Main: 4}, {Main: 6}}},
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 2}: {},
+ },
+ },
+ {
+ 4,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 2}, ver: 3, revs: []Revision{{Main: 4}, {Main: 6}}},
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 4}: {},
+ },
+ },
+ {
+ 5,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 2}, ver: 3, revs: []Revision{{Main: 4}, {Main: 6}}},
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 4}: {},
+ },
+ },
+ {
+ 6,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 2}, ver: 3, revs: []Revision{{Main: 6}}},
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 6}: {},
+ },
+ },
+ {
+ 7,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{},
+ },
+ {
+ 8,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 8}: {},
+ },
+ },
+ {
+ 9,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 8}, {Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 8}: {},
+ },
+ },
+ {
+ 10,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 10}: {},
+ },
+ },
+ {
+ 11,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 10}, {Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 10}: {},
+ },
+ },
+ {
+ 12,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 8}, ver: 3, revs: []Revision{{Main: 12}}},
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 12}: {},
+ },
+ },
+ {
+ 13,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{},
+ },
+ {
+ 14,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 14}, {Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 14}: {},
+ },
+ },
+ {
+ 15,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 15, Sub: 1}, {Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 15, Sub: 1}: {},
+ },
+ },
+ {
+ 16,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {created: Revision{Main: 14}, ver: 3, revs: []Revision{{Main: 16}}},
+ {},
+ },
+ },
+ map[Revision]struct{}{
+ {Main: 16}: {},
+ },
+ },
+ {
+ 17,
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 16},
+ generations: []generation{
+ {},
+ },
+ },
+ map[Revision]struct{}{},
+ },
+ }
+
+ isTombstoneRevFn := func(ki *keyIndex, rev int64) bool {
+ for i := 0; i < len(ki.generations)-1; i++ {
+ g := ki.generations[i]
+
+ if l := len(g.revs); l > 0 && g.revs[l-1].Main == rev {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Continuous Compaction and finding Keep
+ ki := newTestKeyIndex(zaptest.NewLogger(t))
+ for i, tt := range tests {
+ isTombstone := isTombstoneRevFn(ki, tt.compact)
+
+ am := make(map[Revision]struct{})
+ kiclone := cloneKeyIndex(ki)
+ ki.keep(tt.compact, am)
+ if !reflect.DeepEqual(ki, kiclone) {
+ t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiclone)
+ }
+
+ if isTombstone {
+ assert.Emptyf(t, am, "#%d: ki = %d, keep result wants empty because tombstone", i, ki)
+ } else {
+ assert.Equalf(t, tt.wam, am,
+ "#%d: ki = %d, compact keep should be equal to keep keep if it's not tombstone", i, ki)
+ }
+
+ am = make(map[Revision]struct{})
+ ki.compact(zaptest.NewLogger(t), tt.compact, am)
+ if !reflect.DeepEqual(ki, tt.wki) {
+ t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
+ }
+ if !reflect.DeepEqual(am, tt.wam) {
+ t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
+ }
+ }
+
+ // Jump Compaction and finding Keep
+ ki = newTestKeyIndex(zaptest.NewLogger(t))
+ for i, tt := range tests {
+ if !isTombstoneRevFn(ki, tt.compact) {
+ am := make(map[Revision]struct{})
+ kiclone := cloneKeyIndex(ki)
+ ki.keep(tt.compact, am)
+ if !reflect.DeepEqual(ki, kiclone) {
+ t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiclone)
+ }
+ if !reflect.DeepEqual(am, tt.wam) {
+ t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
+ }
+ am = make(map[Revision]struct{})
+ ki.compact(zaptest.NewLogger(t), tt.compact, am)
+ if !reflect.DeepEqual(ki, tt.wki) {
+ t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
+ }
+ if !reflect.DeepEqual(am, tt.wam) {
+ t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
+ }
+ }
+ }
+
+ kiClone := newTestKeyIndex(zaptest.NewLogger(t))
+ // Once Compaction and finding Keep
+ for i, tt := range tests {
+ ki := newTestKeyIndex(zaptest.NewLogger(t))
+ am := make(map[Revision]struct{})
+ ki.keep(tt.compact, am)
+ if !reflect.DeepEqual(ki, kiClone) {
+ t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiClone)
+ }
+
+ if isTombstoneRevFn(ki, tt.compact) {
+ assert.Emptyf(t, am, "#%d: ki = %d, keep result wants empty because tombstone", i, ki)
+ } else {
+ assert.Equalf(t, tt.wam, am,
+ "#%d: ki = %d, compact keep should be equal to keep keep if it's not tombstone", i, ki)
+ }
+
+ am = make(map[Revision]struct{})
+ ki.compact(zaptest.NewLogger(t), tt.compact, am)
+ if !reflect.DeepEqual(ki, tt.wki) {
+ t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
+ }
+ if !reflect.DeepEqual(am, tt.wam) {
+ t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
+ }
+ }
+}
+
+func cloneKeyIndex(ki *keyIndex) *keyIndex {
+ generations := make([]generation, len(ki.generations))
+ for i, gen := range ki.generations {
+ generations[i] = *cloneGeneration(&gen)
+ }
+ return &keyIndex{ki.key, ki.modified, generations}
+}
+
+func cloneGeneration(g *generation) *generation {
+ if g.revs == nil {
+ return &generation{g.ver, g.created, nil}
+ }
+ tmp := make([]Revision, len(g.revs))
+ copy(tmp, g.revs)
+ return &generation{g.ver, g.created, tmp}
+}
+
+// TestKeyIndexCompactOnFurtherRev tests that compact on version that
+// higher than last modified version works well
+func TestKeyIndexCompactOnFurtherRev(t *testing.T) {
+ ki := &keyIndex{key: []byte("foo")}
+ ki.put(zaptest.NewLogger(t), 1, 0)
+ ki.put(zaptest.NewLogger(t), 2, 0)
+ am := make(map[Revision]struct{})
+ ki.compact(zaptest.NewLogger(t), 3, am)
+
+ wki := &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 2},
+ generations: []generation{
+ {created: Revision{Main: 1}, ver: 2, revs: []Revision{{Main: 2}}},
+ },
+ }
+ wam := map[Revision]struct{}{
+ {Main: 2}: {},
+ }
+ if !reflect.DeepEqual(ki, wki) {
+ t.Errorf("ki = %+v, want %+v", ki, wki)
+ }
+ if !reflect.DeepEqual(am, wam) {
+ t.Errorf("am = %+v, want %+v", am, wam)
+ }
+}
+
+func TestKeyIndexIsEmpty(t *testing.T) {
+ tests := []struct {
+ ki *keyIndex
+ w bool
+ }{
+ {
+ &keyIndex{
+ key: []byte("foo"),
+ generations: []generation{{}},
+ },
+ true,
+ },
+ {
+ &keyIndex{
+ key: []byte("foo"),
+ modified: Revision{Main: 2},
+ generations: []generation{
+ {created: Revision{Main: 1}, ver: 2, revs: []Revision{{Main: 2}}},
+ },
+ },
+ false,
+ },
+ }
+ for i, tt := range tests {
+ g := tt.ki.isEmpty()
+ if g != tt.w {
+ t.Errorf("#%d: isEmpty = %v, want %v", i, g, tt.w)
+ }
+ }
+}
+
+func TestKeyIndexFindGeneration(t *testing.T) {
+ ki := newTestKeyIndex(zaptest.NewLogger(t))
+
+ tests := []struct {
+ rev int64
+ wg *generation
+ }{
+ {0, nil},
+ {1, nil},
+ {2, &ki.generations[0]},
+ {3, &ki.generations[0]},
+ {4, &ki.generations[0]},
+ {5, &ki.generations[0]},
+ {6, nil},
+ {7, nil},
+ {8, &ki.generations[1]},
+ {9, &ki.generations[1]},
+ {10, &ki.generations[1]},
+ {11, &ki.generations[1]},
+ {12, nil},
+ {13, nil},
+ }
+ for i, tt := range tests {
+ g := ki.findGeneration(tt.rev)
+ if g != tt.wg {
+ t.Errorf("#%d: generation = %+v, want %+v", i, g, tt.wg)
+ }
+ }
+}
+
+func TestKeyIndexLess(t *testing.T) {
+ ki := &keyIndex{key: []byte("foo")}
+
+ tests := []struct {
+ ki *keyIndex
+ w bool
+ }{
+ {&keyIndex{key: []byte("doo")}, false},
+ {&keyIndex{key: []byte("foo")}, false},
+ {&keyIndex{key: []byte("goo")}, true},
+ }
+ for i, tt := range tests {
+ g := ki.Less(tt.ki)
+ if g != tt.w {
+ t.Errorf("#%d: Less = %v, want %v", i, g, tt.w)
+ }
+ }
+}
+
+func TestGenerationIsEmpty(t *testing.T) {
+ tests := []struct {
+ g *generation
+ w bool
+ }{
+ {nil, true},
+ {&generation{}, true},
+ {&generation{revs: []Revision{{Main: 1}}}, false},
+ }
+ for i, tt := range tests {
+ g := tt.g.isEmpty()
+ if g != tt.w {
+ t.Errorf("#%d: isEmpty = %v, want %v", i, g, tt.w)
+ }
+ }
+}
+
+func TestGenerationWalk(t *testing.T) {
+ g := &generation{
+ ver: 3,
+ created: Revision{Main: 2},
+ revs: []Revision{{Main: 2}, {Main: 4}, {Main: 6}},
+ }
+ tests := []struct {
+ f func(rev Revision) bool
+ wi int
+ }{
+ {func(rev Revision) bool { return rev.Main >= 7 }, 2},
+ {func(rev Revision) bool { return rev.Main >= 6 }, 1},
+ {func(rev Revision) bool { return rev.Main >= 5 }, 1},
+ {func(rev Revision) bool { return rev.Main >= 4 }, 0},
+ {func(rev Revision) bool { return rev.Main >= 3 }, 0},
+ {func(rev Revision) bool { return rev.Main >= 2 }, -1},
+ }
+ for i, tt := range tests {
+ idx := g.walk(tt.f)
+ if idx != tt.wi {
+ t.Errorf("#%d: index = %d, want %d", i, idx, tt.wi)
+ }
+ }
+}
+
+func newTestKeyIndex(lg *zap.Logger) *keyIndex {
+ // key: "foo"
+ // modified: 16
+ // generations:
+ // {empty}
+ // {{14, 0}[1], {15, 1}[2], {16, 0}(t)[3]}
+ // {{8, 0}[1], {10, 0}[2], {12, 0}(t)[3]}
+ // {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]}
+
+ ki := &keyIndex{key: []byte("foo")}
+ ki.put(lg, 2, 0)
+ ki.put(lg, 4, 0)
+ ki.tombstone(lg, 6, 0)
+ ki.put(lg, 8, 0)
+ ki.put(lg, 10, 0)
+ ki.tombstone(lg, 12, 0)
+ ki.put(lg, 14, 0)
+ ki.put(lg, 15, 1)
+ ki.tombstone(lg, 16, 0)
+ return ki
+}
diff --git a/server/mvcc/kv.go b/server/storage/mvcc/kv.go
similarity index 94%
rename from server/mvcc/kv.go
rename to server/storage/mvcc/kv.go
index 79c2e687005..6250bb91198 100644
--- a/server/mvcc/kv.go
+++ b/server/storage/mvcc/kv.go
@@ -20,7 +20,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
+ "go.etcd.io/etcd/server/v3/storage/backend"
)
type RangeOptions struct {
@@ -119,11 +119,8 @@ type KV interface {
// Write creates a write transaction.
Write(trace *traceutil.Trace) TxnWrite
- // Hash computes the hash of the KV's backend.
- Hash() (hash uint32, revision int64, err error)
-
- // HashByRev computes the hash of all MVCC revisions up to a given revision.
- HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
+ // HashStorage returns HashStorage interface for KV storage.
+ HashStorage() HashStorage
// Compact frees all superseded keys with revisions less than rev.
Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error)
diff --git a/server/storage/mvcc/kv_test.go b/server/storage/mvcc/kv_test.go
new file mode 100644
index 00000000000..c727b444af7
--- /dev/null
+++ b/server/storage/mvcc/kv_test.go
@@ -0,0 +1,878 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+// Functional tests for features implemented in v3 store. It treats v3 store
+// as a black box, and tests it by feeding the input and validating the output.
+
+// TODO: add similar tests on operations in one txn/rev
+
+type (
+ rangeFunc func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error)
+ putFunc func(kv KV, key, value []byte, lease lease.LeaseID) int64
+ deleteRangeFunc func(kv KV, key, end []byte) (n, rev int64)
+)
+
+var (
+ normalRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) {
+ return kv.Range(context.TODO(), key, end, ro)
+ }
+ txnRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) {
+ txn := kv.Read(ConcurrentReadTxMode, traceutil.TODO())
+ defer txn.End()
+ return txn.Range(context.TODO(), key, end, ro)
+ }
+
+ normalPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 {
+ return kv.Put(key, value, lease)
+ }
+ txnPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 {
+ txn := kv.Write(traceutil.TODO())
+ defer txn.End()
+ return txn.Put(key, value, lease)
+ }
+
+ normalDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) {
+ return kv.DeleteRange(key, end)
+ }
+ txnDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) {
+ txn := kv.Write(traceutil.TODO())
+ defer txn.End()
+ return txn.DeleteRange(key, end)
+ }
+)
+
+func TestKVRange(t *testing.T) { testKVRange(t, normalRangeFunc) }
+func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
+
+func testKVRange(t *testing.T, f rangeFunc) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ kvs := put3TestKVs(s)
+
+ wrev := int64(4)
+ tests := []struct {
+ key, end []byte
+ wkvs []mvccpb.KeyValue
+ }{
+ // get no keys
+ {
+ []byte("doo"), []byte("foo"),
+ nil,
+ },
+ // get no keys when key == end
+ {
+ []byte("foo"), []byte("foo"),
+ nil,
+ },
+ // get no keys when ranging single key
+ {
+ []byte("doo"), nil,
+ nil,
+ },
+ // get all keys
+ {
+ []byte("foo"), []byte("foo3"),
+ kvs,
+ },
+ // get partial keys
+ {
+ []byte("foo"), []byte("foo1"),
+ kvs[:1],
+ },
+ // get single key
+ {
+ []byte("foo"), nil,
+ kvs[:1],
+ },
+ // get entire keyspace
+ {
+ []byte(""), []byte(""),
+ kvs,
+ },
+ }
+
+ for i, tt := range tests {
+ r, err := f(s, tt.key, tt.end, RangeOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if r.Rev != wrev {
+ t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev)
+ }
+ if !reflect.DeepEqual(r.KVs, tt.wkvs) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
+ }
+ }
+}
+
+func TestKVRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) }
+func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
+
+func testKVRangeRev(t *testing.T, f rangeFunc) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ kvs := put3TestKVs(s)
+
+ tests := []struct {
+ rev int64
+ wrev int64
+ wkvs []mvccpb.KeyValue
+ }{
+ {-1, 4, kvs},
+ {0, 4, kvs},
+ {2, 4, kvs[:1]},
+ {3, 4, kvs[:2]},
+ {4, 4, kvs},
+ }
+
+ for i, tt := range tests {
+ r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if r.Rev != tt.wrev {
+ t.Errorf("#%d: rev = %d, want %d", i, r.Rev, tt.wrev)
+ }
+ if !reflect.DeepEqual(r.KVs, tt.wkvs) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
+ }
+ }
+}
+
+func TestKVRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) }
+func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
+
+func testKVRangeBadRev(t *testing.T, f rangeFunc) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ put3TestKVs(s)
+ if _, err := s.Compact(traceutil.TODO(), 4); err != nil {
+ t.Fatalf("compact error (%v)", err)
+ }
+
+ tests := []struct {
+ rev int64
+ werr error
+ }{
+ {-1, nil}, // <= 0 is most recent store
+ {0, nil},
+ {1, ErrCompacted},
+ {2, ErrCompacted},
+ {4, nil},
+ {5, ErrFutureRev},
+ {100, ErrFutureRev},
+ }
+ for i, tt := range tests {
+ _, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev})
+ if !errors.Is(err, tt.werr) {
+ t.Errorf("#%d: error = %v, want %v", i, err, tt.werr)
+ }
+ }
+}
+
+func TestKVRangeLimit(t *testing.T) { testKVRangeLimit(t, normalRangeFunc) }
+func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
+
+func testKVRangeLimit(t *testing.T, f rangeFunc) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ kvs := put3TestKVs(s)
+
+ wrev := int64(4)
+ tests := []struct {
+ limit int64
+ wcounts int64
+ wkvs []mvccpb.KeyValue
+ }{
+ // no limit
+ {-1, 3, kvs},
+ // no limit
+ {0, 3, kvs},
+ {1, 3, kvs[:1]},
+ {2, 3, kvs[:2]},
+ {3, 3, kvs},
+ {100, 3, kvs},
+ }
+ for i, tt := range tests {
+ r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Limit: tt.limit})
+ if err != nil {
+ t.Fatalf("#%d: range error (%v)", i, err)
+ }
+ if !reflect.DeepEqual(r.KVs, tt.wkvs) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
+ }
+ if r.Rev != wrev {
+ t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev)
+ }
+ if tt.limit <= 0 || int(tt.limit) > len(kvs) {
+ if r.Count != len(kvs) {
+ t.Errorf("#%d: count = %d, want %d", i, r.Count, len(kvs))
+ }
+ } else if r.Count != int(tt.wcounts) {
+ t.Errorf("#%d: count = %d, want %d", i, r.Count, tt.limit)
+ }
+ }
+}
+
+func TestKVPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, normalPutFunc) }
+func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) }
+
+func testKVPutMultipleTimes(t *testing.T, f putFunc) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ for i := 0; i < 10; i++ {
+ base := int64(i + 1)
+
+ rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(base))
+ if rev != base+1 {
+ t.Errorf("#%d: rev = %d, want %d", i, rev, base+1)
+ }
+
+ r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ wkvs := []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: base + 1, Version: base, Lease: base},
+ }
+ if !reflect.DeepEqual(r.KVs, wkvs) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
+ }
+ }
+}
+
+func TestKVDeleteRange(t *testing.T) { testKVDeleteRange(t, normalDeleteRangeFunc) }
+func TestKVTxnDeleteRange(t *testing.T) { testKVDeleteRange(t, txnDeleteRangeFunc) }
+
+func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
+ tests := []struct {
+ key, end []byte
+
+ wrev int64
+ wN int64
+ }{
+ {
+ []byte("foo"), nil,
+ 5, 1,
+ },
+ {
+ []byte("foo"), []byte("foo1"),
+ 5, 1,
+ },
+ {
+ []byte("foo"), []byte("foo2"),
+ 5, 2,
+ },
+ {
+ []byte("foo"), []byte("foo3"),
+ 5, 3,
+ },
+ {
+ []byte("foo3"), []byte("foo8"),
+ 4, 0,
+ },
+ {
+ []byte("foo3"), nil,
+ 4, 0,
+ },
+ }
+
+ for i, tt := range tests {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+
+ s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+ s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
+ s.Put([]byte("foo2"), []byte("bar2"), lease.NoLease)
+
+ n, rev := f(s, tt.key, tt.end)
+ if n != tt.wN || rev != tt.wrev {
+ t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev)
+ }
+
+ cleanup(s, b)
+ }
+}
+
+func TestKVDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, normalDeleteRangeFunc) }
+func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) }
+
+func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+
+ n, rev := f(s, []byte("foo"), nil)
+ if n != 1 || rev != 3 {
+ t.Fatalf("n = %d, rev = %d, want (%d, %d)", n, rev, 1, 3)
+ }
+
+ for i := 0; i < 10; i++ {
+ n, rev := f(s, []byte("foo"), nil)
+ if n != 0 || rev != 3 {
+ t.Fatalf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 0, 3)
+ }
+ }
+}
+
+func TestKVPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, normalPutFunc) }
+func TestKVTxnPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, txnPutFunc) }
+
+func testKVPutWithSameLease(t *testing.T, f putFunc) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+ leaseID := int64(1)
+
+ // put foo
+ rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(leaseID))
+ if rev != 2 {
+ t.Errorf("rev = %d, want %d", 2, rev)
+ }
+
+ // put foo with same lease again
+ rev2 := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(leaseID))
+ if rev2 != 3 {
+ t.Errorf("rev = %d, want %d", 3, rev2)
+ }
+
+ // check leaseID
+ r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ wkvs := []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2, Lease: leaseID},
+ }
+ if !reflect.DeepEqual(r.KVs, wkvs) {
+ t.Errorf("kvs = %+v, want %+v", r.KVs, wkvs)
+ }
+}
+
+// TestKVOperationInSequence tests that range, put, delete on single key in
+// sequence repeatedly works correctly.
+func TestKVOperationInSequence(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ for i := 0; i < 10; i++ {
+ base := int64(i*2 + 1)
+
+ // put foo
+ rev := s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+ if rev != base+1 {
+ t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1)
+ }
+
+ r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1})
+ if err != nil {
+ t.Fatal(err)
+ }
+ wkvs := []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)},
+ }
+ if !reflect.DeepEqual(r.KVs, wkvs) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
+ }
+ if r.Rev != base+1 {
+ t.Errorf("#%d: range rev = %d, want %d", i, rev, base+1)
+ }
+
+ // delete foo
+ n, rev := s.DeleteRange([]byte("foo"), nil)
+ if n != 1 || rev != base+2 {
+ t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+2)
+ }
+
+ r, err = s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 2})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if r.KVs != nil {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil)
+ }
+ if r.Rev != base+2 {
+ t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+2)
+ }
+ }
+}
+
+func TestKVTxnBlockWriteOperations(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+
+ tests := []func(){
+ func() { s.Put([]byte("foo"), nil, lease.NoLease) },
+ func() { s.DeleteRange([]byte("foo"), nil) },
+ }
+ for i, tt := range tests {
+ tf := tt
+ txn := s.Write(traceutil.TODO())
+ done := make(chan struct{}, 1)
+ go func() {
+ tf()
+ done <- struct{}{}
+ }()
+ select {
+ case <-done:
+ t.Fatalf("#%d: operation failed to be blocked", i)
+ case <-time.After(10 * time.Millisecond):
+ }
+
+ txn.End()
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ testutil.FatalStack(t, fmt.Sprintf("#%d: operation failed to be unblocked", i))
+ }
+ }
+
+ // only close backend when we know all the tx are finished
+ cleanup(s, b)
+}
+
+func TestKVTxnNonBlockRange(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ txn := s.Write(traceutil.TODO())
+ defer txn.End()
+
+ donec := make(chan struct{})
+ go func() {
+ defer close(donec)
+ s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{})
+ }()
+ select {
+ case <-donec:
+ case <-time.After(100 * time.Millisecond):
+ t.Fatalf("range operation blocked on write txn")
+ }
+}
+
+// TestKVTxnOperationInSequence tests that txn range, put, delete on single key
+// in sequence repeatedly works correctly.
+func TestKVTxnOperationInSequence(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ for i := 0; i < 10; i++ {
+ txn := s.Write(traceutil.TODO())
+ base := int64(i + 1)
+
+ // put foo
+ rev := txn.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+ if rev != base+1 {
+ t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1)
+ }
+
+ r, err := txn.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1})
+ if err != nil {
+ t.Fatal(err)
+ }
+ wkvs := []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)},
+ }
+ if !reflect.DeepEqual(r.KVs, wkvs) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
+ }
+ if r.Rev != base+1 {
+ t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1)
+ }
+
+ // delete foo
+ n, rev := txn.DeleteRange([]byte("foo"), nil)
+ if n != 1 || rev != base+1 {
+ t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+1)
+ }
+
+ r, err = txn.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1})
+ if err != nil {
+ t.Errorf("#%d: range error (%v)", i, err)
+ }
+ if r.KVs != nil {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil)
+ }
+ if r.Rev != base+1 {
+ t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1)
+ }
+
+ txn.End()
+ }
+}
+
+func TestKVCompactReserveLastValue(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ s.Put([]byte("foo"), []byte("bar0"), 1)
+ s.Put([]byte("foo"), []byte("bar1"), 2)
+ s.DeleteRange([]byte("foo"), nil)
+ s.Put([]byte("foo"), []byte("bar2"), 3)
+
+ // rev in tests will be called in Compact() one by one on the same store
+ tests := []struct {
+ rev int64
+ // wanted kvs right after the compacted rev
+ wkvs []mvccpb.KeyValue
+ }{
+ {
+ 1,
+ []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar0"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1},
+ },
+ },
+ {
+ 2,
+ []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar1"), CreateRevision: 2, ModRevision: 3, Version: 2, Lease: 2},
+ },
+ },
+ {
+ 3,
+ nil,
+ },
+ {
+ 4,
+ []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar2"), CreateRevision: 5, ModRevision: 5, Version: 1, Lease: 3},
+ },
+ },
+ }
+ for i, tt := range tests {
+ _, err := s.Compact(traceutil.TODO(), tt.rev)
+ if err != nil {
+ t.Errorf("#%d: unexpect compact error %v", i, err)
+ }
+ r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: tt.rev + 1})
+ if err != nil {
+ t.Errorf("#%d: unexpect range error %v", i, err)
+ }
+ if !reflect.DeepEqual(r.KVs, tt.wkvs) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
+ }
+ }
+}
+
+func TestKVCompactBad(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
+ s.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
+ s.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
+
+ // rev in tests will be called in Compact() one by one on the same store
+ tests := []struct {
+ rev int64
+ werr error
+ }{
+ {0, nil},
+ {1, nil},
+ {1, ErrCompacted},
+ {4, nil},
+ {5, ErrFutureRev},
+ {100, ErrFutureRev},
+ }
+ for i, tt := range tests {
+ _, err := s.Compact(traceutil.TODO(), tt.rev)
+ if !errors.Is(err, tt.werr) {
+ t.Errorf("#%d: compact error = %v, want %v", i, err, tt.werr)
+ }
+ }
+}
+
+func TestKVHash(t *testing.T) {
+ hashes := make([]uint32, 3)
+
+ for i := 0; i < len(hashes); i++ {
+ var err error
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ kv := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
+ kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
+ hashes[i], _, err = kv.hash()
+ if err != nil {
+ t.Fatalf("failed to get hash: %v", err)
+ }
+ cleanup(kv, b)
+ }
+
+ for i := 1; i < len(hashes); i++ {
+ if hashes[i-1] != hashes[i] {
+ t.Errorf("hash[%d](%d) != hash[%d](%d)", i-1, hashes[i-1], i, hashes[i])
+ }
+ }
+}
+
+func TestKVRestore(t *testing.T) {
+ tests := []func(kv KV){
+ func(kv KV) {
+ kv.Put([]byte("foo"), []byte("bar0"), 1)
+ kv.Put([]byte("foo"), []byte("bar1"), 2)
+ kv.Put([]byte("foo"), []byte("bar2"), 3)
+ kv.Put([]byte("foo2"), []byte("bar0"), 1)
+ },
+ func(kv KV) {
+ kv.Put([]byte("foo"), []byte("bar0"), 1)
+ kv.DeleteRange([]byte("foo"), nil)
+ kv.Put([]byte("foo"), []byte("bar1"), 2)
+ },
+ func(kv KV) {
+ kv.Put([]byte("foo"), []byte("bar0"), 1)
+ kv.Put([]byte("foo"), []byte("bar1"), 2)
+ kv.Compact(traceutil.TODO(), 1)
+ },
+ }
+ for i, tt := range tests {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ tt(s)
+ var kvss [][]mvccpb.KeyValue
+ for k := int64(0); k < 10; k++ {
+ r, _ := s.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k})
+ kvss = append(kvss, r.KVs)
+ }
+
+ keysBefore := readGaugeInt(keysGauge)
+ s.Close()
+
+ // ns should recover the previous state from backend.
+ ns := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+
+ if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
+ t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
+ }
+
+ // wait for possible compaction to finish
+ testutil.WaitSchedule()
+ var nkvss [][]mvccpb.KeyValue
+ for k := int64(0); k < 10; k++ {
+ r, _ := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k})
+ nkvss = append(nkvss, r.KVs)
+ }
+ cleanup(ns, b)
+
+ if !reflect.DeepEqual(nkvss, kvss) {
+ t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss)
+ }
+ }
+}
+
+func readGaugeInt(g prometheus.Gauge) int {
+ ch := make(chan prometheus.Metric, 1)
+ g.Collect(ch)
+ m := <-ch
+ mm := &dto.Metric{}
+ m.Write(mm)
+ return int(mm.GetGauge().GetValue())
+}
+
+func TestKVSnapshot(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ wkvs := put3TestKVs(s)
+
+ newPath := "new_test"
+ f, err := os.Create(newPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(newPath)
+
+ snap := s.b.Snapshot()
+ defer snap.Close()
+ _, err = snap.WriteTo(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ ns := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer ns.Close()
+ r, err := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{})
+ if err != nil {
+ t.Errorf("unexpect range error (%v)", err)
+ }
+ if !reflect.DeepEqual(r.KVs, wkvs) {
+ t.Errorf("kvs = %+v, want %+v", r.KVs, wkvs)
+ }
+ if r.Rev != 4 {
+ t.Errorf("rev = %d, want %d", r.Rev, 4)
+ }
+}
+
+func TestWatchableKVWatch(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ wid, _ := w.Watch(0, []byte("foo"), []byte("fop"), 0)
+
+ wev := []mvccpb.Event{
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 2,
+ ModRevision: 2,
+ Version: 1,
+ Lease: 1,
+ },
+ },
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{
+ Key: []byte("foo1"),
+ Value: []byte("bar1"),
+ CreateRevision: 3,
+ ModRevision: 3,
+ Version: 1,
+ Lease: 2,
+ },
+ },
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{
+ Key: []byte("foo1"),
+ Value: []byte("bar11"),
+ CreateRevision: 3,
+ ModRevision: 4,
+ Version: 2,
+ Lease: 3,
+ },
+ },
+ }
+
+ s.Put([]byte("foo"), []byte("bar"), 1)
+ select {
+ case resp := <-w.Chan():
+ if resp.WatchID != wid {
+ t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
+ }
+ ev := resp.Events[0]
+ if !reflect.DeepEqual(ev, wev[0]) {
+ t.Errorf("watched event = %+v, want %+v", ev, wev[0])
+ }
+ case <-time.After(5 * time.Second):
+ // CPU might be too slow, and the routine is not able to switch around
+ testutil.FatalStack(t, "failed to watch the event")
+ }
+
+ s.Put([]byte("foo1"), []byte("bar1"), 2)
+ select {
+ case resp := <-w.Chan():
+ if resp.WatchID != wid {
+ t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
+ }
+ ev := resp.Events[0]
+ if !reflect.DeepEqual(ev, wev[1]) {
+ t.Errorf("watched event = %+v, want %+v", ev, wev[1])
+ }
+ case <-time.After(5 * time.Second):
+ testutil.FatalStack(t, "failed to watch the event")
+ }
+
+ w = s.NewWatchStream()
+ wid, _ = w.Watch(0, []byte("foo1"), []byte("foo2"), 3)
+
+ select {
+ case resp := <-w.Chan():
+ if resp.WatchID != wid {
+ t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
+ }
+ ev := resp.Events[0]
+ if !reflect.DeepEqual(ev, wev[1]) {
+ t.Errorf("watched event = %+v, want %+v", ev, wev[1])
+ }
+ case <-time.After(5 * time.Second):
+ testutil.FatalStack(t, "failed to watch the event")
+ }
+
+ s.Put([]byte("foo1"), []byte("bar11"), 3)
+ select {
+ case resp := <-w.Chan():
+ if resp.WatchID != wid {
+ t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
+ }
+ ev := resp.Events[0]
+ if !reflect.DeepEqual(ev, wev[2]) {
+ t.Errorf("watched event = %+v, want %+v", ev, wev[2])
+ }
+ case <-time.After(5 * time.Second):
+ testutil.FatalStack(t, "failed to watch the event")
+ }
+}
+
+func cleanup(s KV, b backend.Backend) {
+ s.Close()
+ b.Close()
+}
+
+func put3TestKVs(s KV) []mvccpb.KeyValue {
+ s.Put([]byte("foo"), []byte("bar"), 1)
+ s.Put([]byte("foo1"), []byte("bar1"), 2)
+ s.Put([]byte("foo2"), []byte("bar2"), 3)
+ return []mvccpb.KeyValue{
+ {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1},
+ {Key: []byte("foo1"), Value: []byte("bar1"), CreateRevision: 3, ModRevision: 3, Version: 1, Lease: 2},
+ {Key: []byte("foo2"), Value: []byte("bar2"), CreateRevision: 4, ModRevision: 4, Version: 1, Lease: 3},
+ }
+}
diff --git a/server/mvcc/kv_view.go b/server/storage/mvcc/kv_view.go
similarity index 100%
rename from server/mvcc/kv_view.go
rename to server/storage/mvcc/kv_view.go
diff --git a/server/storage/mvcc/kvstore.go b/server/storage/mvcc/kvstore.go
new file mode 100644
index 00000000000..3e1226c9174
--- /dev/null
+++ b/server/storage/mvcc/kvstore.go
@@ -0,0 +1,539 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/pkg/v3/schedule"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+var (
+ ErrCompacted = errors.New("mvcc: required revision has been compacted")
+ ErrFutureRev = errors.New("mvcc: required revision is a future revision")
+)
+
+var (
+ restoreChunkKeys = 10000 // non-const for testing
+ defaultCompactionBatchLimit = 1000
+ defaultCompactionSleepInterval = 10 * time.Millisecond
+)
+
+type StoreConfig struct {
+ CompactionBatchLimit int
+ CompactionSleepInterval time.Duration
+}
+
+type store struct {
+ ReadView
+ WriteView
+
+ cfg StoreConfig
+
+ // mu read locks for txns and write locks for non-txn store changes.
+ mu sync.RWMutex
+
+ b backend.Backend
+ kvindex index
+
+ le lease.Lessor
+
+ // revMuLock protects currentRev and compactMainRev.
+ // Locked at end of write txn and released after write txn unlock lock.
+ // Locked before locking read txn and released after locking.
+ revMu sync.RWMutex
+ // currentRev is the revision of the last completed transaction.
+ currentRev int64
+ // compactMainRev is the main revision of the last compaction.
+ compactMainRev int64
+
+ fifoSched schedule.Scheduler
+
+ stopc chan struct{}
+
+ lg *zap.Logger
+ hashes HashStorage
+}
+
+// NewStore returns a new store. It is useful to create a store inside
+// mvcc pkg. It should only be used for testing externally.
+func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if cfg.CompactionBatchLimit == 0 {
+ cfg.CompactionBatchLimit = defaultCompactionBatchLimit
+ }
+ if cfg.CompactionSleepInterval == 0 {
+ cfg.CompactionSleepInterval = defaultCompactionSleepInterval
+ }
+ s := &store{
+ cfg: cfg,
+ b: b,
+ kvindex: newTreeIndex(lg),
+
+ le: le,
+
+ currentRev: 1,
+ compactMainRev: -1,
+
+ fifoSched: schedule.NewFIFOScheduler(lg),
+
+ stopc: make(chan struct{}),
+
+ lg: lg,
+ }
+ s.hashes = NewHashStorage(lg, s)
+ s.ReadView = &readView{s}
+ s.WriteView = &writeView{s}
+ if s.le != nil {
+ s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
+ }
+
+ tx := s.b.BatchTx()
+ tx.LockOutsideApply()
+ tx.UnsafeCreateBucket(schema.Key)
+ schema.UnsafeCreateMetaBucket(tx)
+ tx.Unlock()
+ s.b.ForceCommit()
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if err := s.restore(); err != nil {
+ // TODO: return the error instead of panic here?
+ panic("failed to recover store from backend")
+ }
+
+ return s
+}
+
+func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
+ if ctx == nil || ctx.Err() != nil {
+ select {
+ case <-s.stopc:
+ default:
+ // fix deadlock in mvcc, for more information, please refer to pr 11817.
+ // s.stopc is only updated in restore operation, which is called by apply
+ // snapshot call, compaction and apply snapshot requests are serialized by
+ // raft, and do not happen at the same time.
+ s.mu.Lock()
+ f := schedule.NewJob("kvstore_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) })
+ s.fifoSched.Schedule(f)
+ s.mu.Unlock()
+ }
+ return
+ }
+ close(ch)
+}
+
+func (s *store) hash() (hash uint32, revision int64, err error) {
+ // TODO: hash and revision could be inconsistent, one possible fix is to add s.revMu.RLock() at the beginning of function, which is costly
+ start := time.Now()
+
+ s.b.ForceCommit()
+ h, err := s.b.Hash(schema.DefaultIgnores)
+
+ hashSec.Observe(time.Since(start).Seconds())
+ return h, s.currentRev, err
+}
+
+func (s *store) hashByRev(rev int64) (hash KeyValueHash, currentRev int64, err error) {
+ var compactRev int64
+ start := time.Now()
+
+ s.mu.RLock()
+ s.revMu.RLock()
+ compactRev, currentRev = s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+
+ if rev > 0 && rev < compactRev {
+ s.mu.RUnlock()
+ return KeyValueHash{}, 0, ErrCompacted
+ } else if rev > 0 && rev > currentRev {
+ s.mu.RUnlock()
+ return KeyValueHash{}, currentRev, ErrFutureRev
+ }
+ if rev == 0 {
+ rev = currentRev
+ }
+ keep := s.kvindex.Keep(rev)
+
+ tx := s.b.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ s.mu.RUnlock()
+ hash, err = unsafeHashByRev(tx, compactRev, rev, keep)
+ hashRevSec.Observe(time.Since(start).Seconds())
+ return hash, currentRev, err
+}
+
+func (s *store) updateCompactRev(rev int64) (<-chan struct{}, int64, error) {
+ s.revMu.Lock()
+ if rev <= s.compactMainRev {
+ ch := make(chan struct{})
+ f := schedule.NewJob("kvstore_updateCompactRev_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) })
+ s.fifoSched.Schedule(f)
+ s.revMu.Unlock()
+ return ch, 0, ErrCompacted
+ }
+ if rev > s.currentRev {
+ s.revMu.Unlock()
+ return nil, 0, ErrFutureRev
+ }
+ compactMainRev := s.compactMainRev
+ s.compactMainRev = rev
+
+ SetScheduledCompact(s.b.BatchTx(), rev)
+ // ensure that desired compaction is persisted
+ // gofail: var compactBeforeCommitScheduledCompact struct{}
+ s.b.ForceCommit()
+ // gofail: var compactAfterCommitScheduledCompact struct{}
+
+ s.revMu.Unlock()
+
+ return nil, compactMainRev, nil
+}
+
+// checkPrevCompactionCompleted checks whether the previous scheduled compaction is completed.
+func (s *store) checkPrevCompactionCompleted() bool {
+ tx := s.b.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ scheduledCompact, scheduledCompactFound := UnsafeReadScheduledCompact(tx)
+ finishedCompact, finishedCompactFound := UnsafeReadFinishedCompact(tx)
+ return scheduledCompact == finishedCompact && scheduledCompactFound == finishedCompactFound
+}
+
+func (s *store) compact(trace *traceutil.Trace, rev, prevCompactRev int64, prevCompactionCompleted bool) <-chan struct{} {
+ ch := make(chan struct{})
+ j := schedule.NewJob("kvstore_compact", func(ctx context.Context) {
+ if ctx.Err() != nil {
+ s.compactBarrier(ctx, ch)
+ return
+ }
+ hash, err := s.scheduleCompaction(rev, prevCompactRev)
+ if err != nil {
+ s.lg.Warn("Failed compaction", zap.Error(err))
+ s.compactBarrier(context.TODO(), ch)
+ return
+ }
+ // Only store the hash value if the previous hash is completed, i.e. this compaction
+ // hashes every revision from last compaction. For more details, see #15919.
+ if prevCompactionCompleted {
+ s.hashes.Store(hash)
+ } else {
+ s.lg.Info("previous compaction was interrupted, skip storing compaction hash value")
+ }
+ close(ch)
+ })
+
+ s.fifoSched.Schedule(j)
+ trace.Step("schedule compaction")
+ return ch
+}
+
+func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) {
+ prevCompactionCompleted := s.checkPrevCompactionCompleted()
+ ch, prevCompactRev, err := s.updateCompactRev(rev)
+ if err != nil {
+ return ch, err
+ }
+
+ return s.compact(traceutil.TODO(), rev, prevCompactRev, prevCompactionCompleted), nil
+}
+
+func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
+ s.mu.Lock()
+ prevCompactionCompleted := s.checkPrevCompactionCompleted()
+ ch, prevCompactRev, err := s.updateCompactRev(rev)
+ trace.Step("check and update compact revision")
+ if err != nil {
+ s.mu.Unlock()
+ return ch, err
+ }
+ s.mu.Unlock()
+
+ return s.compact(trace, rev, prevCompactRev, prevCompactionCompleted), nil
+}
+
+func (s *store) Commit() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.b.ForceCommit()
+}
+
+func (s *store) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ close(s.stopc)
+ s.fifoSched.Stop()
+
+ s.b = b
+ s.kvindex = newTreeIndex(s.lg)
+
+ {
+ // During restore the metrics might report 'special' values
+ s.revMu.Lock()
+ s.currentRev = 1
+ s.compactMainRev = -1
+ s.revMu.Unlock()
+ }
+
+ s.fifoSched = schedule.NewFIFOScheduler(s.lg)
+ s.stopc = make(chan struct{})
+
+ return s.restore()
+}
+
+//nolint:unparam
+func (s *store) restore() error {
+ s.setupMetricsReporter()
+
+ min, max := NewRevBytes(), NewRevBytes()
+ min = RevToBytes(Revision{Main: 1}, min)
+ max = RevToBytes(Revision{Main: math.MaxInt64, Sub: math.MaxInt64}, max)
+
+ keyToLease := make(map[string]lease.LeaseID)
+
+ // restore index
+ tx := s.b.ReadTx()
+ tx.RLock()
+
+ finishedCompact, found := UnsafeReadFinishedCompact(tx)
+ if found {
+ s.revMu.Lock()
+ s.compactMainRev = finishedCompact
+
+ s.lg.Info(
+ "restored last compact revision",
+ zap.String("meta-bucket-name-key", string(schema.FinishedCompactKeyName)),
+ zap.Int64("restored-compact-revision", s.compactMainRev),
+ )
+ s.revMu.Unlock()
+ }
+ scheduledCompact, _ := UnsafeReadScheduledCompact(tx)
+ // index keys concurrently as they're loaded in from tx
+ keysGauge.Set(0)
+ rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
+ for {
+ keys, vals := tx.UnsafeRange(schema.Key, min, max, int64(restoreChunkKeys))
+ if len(keys) == 0 {
+ break
+ }
+ // rkvc blocks if the total pending keys exceeds the restore
+ // chunk size to keep keys from consuming too much memory.
+ restoreChunk(s.lg, rkvc, keys, vals, keyToLease)
+ if len(keys) < restoreChunkKeys {
+ // partial set implies final set
+ break
+ }
+ // next set begins after where this one ended
+ newMin := BytesToRev(keys[len(keys)-1][:revBytesLen])
+ newMin.Sub++
+ min = RevToBytes(newMin, min)
+ }
+ close(rkvc)
+
+ {
+ s.revMu.Lock()
+ s.currentRev = <-revc
+
+ // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
+ // the correct revision should be set to compaction revision in the case, not the largest revision
+ // we have seen.
+ if s.currentRev < s.compactMainRev {
+ s.currentRev = s.compactMainRev
+ }
+
+ // If the latest revision was a tombstone revision and etcd just compacted
+ // it, but crashed right before persisting the FinishedCompactRevision,
+ // then it would lead to revision decreasing in bbolt db file. In such
+ // a scenario, we should adjust the current revision using the scheduled
+ // compact revision on bootstrap when etcd gets started again.
+ //
+ // See https://github.com/etcd-io/etcd/issues/17780#issuecomment-2061900231
+ if s.currentRev < scheduledCompact {
+ s.currentRev = scheduledCompact
+ }
+ s.revMu.Unlock()
+ }
+
+ if scheduledCompact <= s.compactMainRev {
+ scheduledCompact = 0
+ }
+
+ for key, lid := range keyToLease {
+ if s.le == nil {
+ tx.RUnlock()
+ panic("no lessor to attach lease")
+ }
+ err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
+ if err != nil {
+ s.lg.Error(
+ "failed to attach a lease",
+ zap.String("lease-id", fmt.Sprintf("%016x", lid)),
+ zap.Error(err),
+ )
+ }
+ }
+ tx.RUnlock()
+
+ s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev))
+
+ if scheduledCompact != 0 {
+ if _, err := s.compactLockfree(scheduledCompact); err != nil {
+ s.lg.Warn("compaction encountered error",
+ zap.Int64("scheduled-compact-revision", scheduledCompact),
+ zap.Error(err),
+ )
+ } else {
+ s.lg.Info(
+ "resume scheduled compaction",
+ zap.Int64("scheduled-compact-revision", scheduledCompact),
+ )
+ }
+ }
+
+ return nil
+}
+
+type revKeyValue struct {
+ key []byte
+ kv mvccpb.KeyValue
+ kstr string
+}
+
+func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) {
+ rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
+ go func() {
+ currentRev := int64(1)
+ defer func() { revc <- currentRev }()
+ // restore the tree index from streaming the unordered index.
+ kiCache := make(map[string]*keyIndex, restoreChunkKeys)
+ for rkv := range rkvc {
+ ki, ok := kiCache[rkv.kstr]
+ // purge kiCache if many keys but still missing in the cache
+ if !ok && len(kiCache) >= restoreChunkKeys {
+ i := 10
+ for k := range kiCache {
+ delete(kiCache, k)
+ if i--; i == 0 {
+ break
+ }
+ }
+ }
+ // cache miss, fetch from tree index if there
+ if !ok {
+ ki = &keyIndex{key: rkv.kv.Key}
+ if idxKey := idx.KeyIndex(ki); idxKey != nil {
+ kiCache[rkv.kstr], ki = idxKey, idxKey
+ ok = true
+ }
+ }
+
+ rev := BytesToRev(rkv.key)
+ verify.Verify(func() {
+ if rev.Main < currentRev {
+ panic(fmt.Errorf("revision %d shouldn't be less than the previous revision %d", rev.Main, currentRev))
+ }
+ })
+ currentRev = rev.Main
+
+ if ok {
+ if isTombstone(rkv.key) {
+ if err := ki.tombstone(lg, rev.Main, rev.Sub); err != nil {
+ lg.Warn("tombstone encountered error", zap.Error(err))
+ }
+ continue
+ }
+ ki.put(lg, rev.Main, rev.Sub)
+ } else if !isTombstone(rkv.key) {
+ ki.restore(lg, Revision{Main: rkv.kv.CreateRevision}, rev, rkv.kv.Version)
+ idx.Insert(ki)
+ kiCache[rkv.kstr] = ki
+ }
+ }
+ }()
+ return rkvc, revc
+}
+
+func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
+ for i, key := range keys {
+ rkv := revKeyValue{key: key}
+ if err := rkv.kv.Unmarshal(vals[i]); err != nil {
+ lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ }
+ rkv.kstr = string(rkv.kv.Key)
+ if isTombstone(key) {
+ delete(keyToLease, rkv.kstr)
+ } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
+ keyToLease[rkv.kstr] = lid
+ } else {
+ delete(keyToLease, rkv.kstr)
+ }
+ kvc <- rkv
+ }
+}
+
+func (s *store) Close() error {
+ close(s.stopc)
+ s.fifoSched.Stop()
+ return nil
+}
+
+func (s *store) setupMetricsReporter() {
+ b := s.b
+ reportDbTotalSizeInBytesMu.Lock()
+ reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
+ reportDbTotalSizeInBytesMu.Unlock()
+ reportDbTotalSizeInUseInBytesMu.Lock()
+ reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
+ reportDbTotalSizeInUseInBytesMu.Unlock()
+ reportDbOpenReadTxNMu.Lock()
+ reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) }
+ reportDbOpenReadTxNMu.Unlock()
+ reportCurrentRevMu.Lock()
+ reportCurrentRev = func() float64 {
+ s.revMu.RLock()
+ defer s.revMu.RUnlock()
+ return float64(s.currentRev)
+ }
+ reportCurrentRevMu.Unlock()
+ reportCompactRevMu.Lock()
+ reportCompactRev = func() float64 {
+ s.revMu.RLock()
+ defer s.revMu.RUnlock()
+ return float64(s.compactMainRev)
+ }
+ reportCompactRevMu.Unlock()
+}
+
+func (s *store) HashStorage() HashStorage {
+ return s.hashes
+}
diff --git a/server/mvcc/kvstore_bench_test.go b/server/storage/mvcc/kvstore_bench_test.go
similarity index 76%
rename from server/mvcc/kvstore_bench_test.go
rename to server/storage/mvcc/kvstore_bench_test.go
index 918cecacca2..f7a9bd7296c 100644
--- a/server/mvcc/kvstore_bench_test.go
+++ b/server/storage/mvcc/kvstore_bench_test.go
@@ -18,19 +18,20 @@ import (
"context"
"testing"
- "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/lease"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
-
- "go.uber.org/zap"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
func BenchmarkStorePut(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, be, tmpPath)
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, be)
// arbitrary number of bytes
bytesN := 64
@@ -47,9 +48,9 @@ func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) }
func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
func benchmarkStoreRange(b *testing.B, n int) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, be, tmpPath)
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, be)
// 64 byte key/val
keys, val := createBytesSlice(64, n), createBytesSlice(64, 1)
@@ -83,7 +84,7 @@ func BenchmarkConsistentIndex(b *testing.B) {
tx := be.BatchTx()
tx.Lock()
- cindex.UnsafeCreateMetaBucket(tx)
+ schema.UnsafeCreateMetaBucket(tx)
ci.UnsafeSave(tx)
tx.Unlock()
@@ -94,11 +95,11 @@ func BenchmarkConsistentIndex(b *testing.B) {
}
}
-// BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
+// BenchmarkStorePutUpdate is same as above, but instead updates single key
func BenchmarkStorePutUpdate(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, be, tmpPath)
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, be)
// arbitrary number of bytes
keys := createBytesSlice(64, 1)
@@ -114,9 +115,9 @@ func BenchmarkStorePutUpdate(b *testing.B) {
// with transaction begin and end, where transaction involves
// some synchronization operations, such as mutex locking.
func BenchmarkStoreTxnPut(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(s, be, tmpPath)
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, be)
// arbitrary number of bytes
bytesN := 64
@@ -134,10 +135,10 @@ func BenchmarkStoreTxnPut(b *testing.B) {
// benchmarkStoreRestore benchmarks the restore operation
func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
// use closure to capture 's' to pick up the reassignment
- defer func() { cleanup(s, be, tmpPath) }()
+ defer func() { cleanup(s, be) }()
// arbitrary number of bytes
bytesN := 64
@@ -151,11 +152,11 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
txn.End()
}
}
- assert.NoError(b, s.Close())
+ require.NoError(b, s.Close())
b.ReportAllocs()
b.ResetTimer()
- s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
+ s = NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
}
func BenchmarkStoreRestoreRevs1(b *testing.B) {
diff --git a/server/storage/mvcc/kvstore_compaction.go b/server/storage/mvcc/kvstore_compaction.go
new file mode 100644
index 00000000000..45409114e39
--- /dev/null
+++ b/server/storage/mvcc/kvstore_compaction.go
@@ -0,0 +1,99 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/binary"
+ "fmt"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyValueHash, error) {
+ totalStart := time.Now()
+ keep := s.kvindex.Compact(compactMainRev)
+ indexCompactionPauseMs.Observe(float64(time.Since(totalStart) / time.Millisecond))
+
+ totalStart = time.Now()
+ defer func() { dbCompactionTotalMs.Observe(float64(time.Since(totalStart) / time.Millisecond)) }()
+ keyCompactions := 0
+ defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
+ defer func() { dbCompactionLast.Set(float64(time.Now().Unix())) }()
+
+ end := make([]byte, 8)
+ binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
+
+ batchNum := s.cfg.CompactionBatchLimit
+ batchTicker := time.NewTicker(s.cfg.CompactionSleepInterval)
+ defer batchTicker.Stop()
+ h := newKVHasher(prevCompactRev, compactMainRev, keep)
+ last := make([]byte, 8+1+8)
+ for {
+ var rev Revision
+
+ start := time.Now()
+
+ tx := s.b.BatchTx()
+ tx.LockOutsideApply()
+ keys, values := tx.UnsafeRange(schema.Key, last, end, int64(batchNum))
+ for i := range keys {
+ rev = BytesToRev(keys[i])
+ if _, ok := keep[rev]; !ok {
+ tx.UnsafeDelete(schema.Key, keys[i])
+ keyCompactions++
+ }
+ h.WriteKeyValue(keys[i], values[i])
+ }
+
+ if len(keys) < batchNum {
+ // gofail: var compactBeforeSetFinishedCompact struct{}
+ UnsafeSetFinishedCompact(tx, compactMainRev)
+ tx.Unlock()
+ // gofail: var compactAfterSetFinishedCompact struct{}
+ hash := h.Hash()
+ size, sizeInUse := s.b.Size(), s.b.SizeInUse()
+ s.lg.Info(
+ "finished scheduled compaction",
+ zap.Int64("compact-revision", compactMainRev),
+ zap.Duration("took", time.Since(totalStart)),
+ zap.Uint32("hash", hash.Hash),
+ zap.Int64("current-db-size-bytes", size),
+ zap.String("current-db-size", humanize.Bytes(uint64(size))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
+ )
+ return hash, nil
+ }
+
+ tx.Unlock()
+ // update last
+ last = RevToBytes(Revision{Main: rev.Main, Sub: rev.Sub + 1}, last)
+ // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer
+ // gofail: var compactBeforeCommitBatch struct{}
+ s.b.ForceCommit()
+ // gofail: var compactAfterCommitBatch struct{}
+ dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
+
+ select {
+ case <-batchTicker.C:
+ case <-s.stopc:
+ return KeyValueHash{}, fmt.Errorf("interrupted due to stop signal")
+ }
+ }
+}
diff --git a/server/storage/mvcc/kvstore_compaction_test.go b/server/storage/mvcc/kvstore_compaction_test.go
new file mode 100644
index 00000000000..b4a7f41a367
--- /dev/null
+++ b/server/storage/mvcc/kvstore_compaction_test.go
@@ -0,0 +1,150 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "reflect"
+ "testing"
+ "time"
+
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func TestScheduleCompaction(t *testing.T) {
+ revs := []Revision{{Main: 1}, {Main: 2}, {Main: 3}}
+
+ tests := []struct {
+ rev int64
+ keep map[Revision]struct{}
+ wrevs []Revision
+ }{
+ // compact at 1 and discard all history
+ {
+ 1,
+ nil,
+ revs[1:],
+ },
+ // compact at 3 and discard all history
+ {
+ 3,
+ nil,
+ nil,
+ },
+ // compact at 1 and keeps history one step earlier
+ {
+ 1,
+ map[Revision]struct{}{
+ {Main: 1}: {},
+ },
+ revs,
+ },
+ // compact at 1 and keeps history two steps earlier
+ {
+ 3,
+ map[Revision]struct{}{
+ {Main: 2}: {},
+ {Main: 3}: {},
+ },
+ revs[1:],
+ },
+ }
+ for i, tt := range tests {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ fi := newFakeIndex()
+ fi.indexCompactRespc <- tt.keep
+ s.kvindex = fi
+
+ tx := s.b.BatchTx()
+
+ tx.Lock()
+ for _, rev := range revs {
+ ibytes := NewRevBytes()
+ ibytes = RevToBytes(rev, ibytes)
+ tx.UnsafePut(schema.Key, ibytes, []byte("bar"))
+ }
+ tx.Unlock()
+
+ _, err := s.scheduleCompaction(tt.rev, 0)
+ if err != nil {
+ t.Error(err)
+ }
+
+ tx.Lock()
+ for _, rev := range tt.wrevs {
+ ibytes := NewRevBytes()
+ ibytes = RevToBytes(rev, ibytes)
+ keys, _ := tx.UnsafeRange(schema.Key, ibytes, nil, 0)
+ if len(keys) != 1 {
+ t.Errorf("#%d: range on %v = %d, want 1", i, rev, len(keys))
+ }
+ }
+ vals, _ := UnsafeReadFinishedCompact(tx)
+ if !reflect.DeepEqual(vals, tt.rev) {
+ t.Errorf("#%d: finished compact equal %+v, want %+v", i, vals, tt.rev)
+ }
+ tx.Unlock()
+
+ cleanup(s, b)
+ }
+}
+
+func TestCompactAllAndRestore(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer b.Close()
+
+ s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+ s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
+ s0.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
+ s0.DeleteRange([]byte("foo"), nil)
+
+ rev := s0.Rev()
+ // compact all keys
+ done, err := s0.Compact(traceutil.TODO(), rev)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.Fatal("timeout waiting for compaction to finish")
+ }
+
+ err = s0.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s1 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ if s1.Rev() != rev {
+ t.Errorf("rev = %v, want %v", s1.Rev(), rev)
+ }
+ _, err = s1.Range(context.TODO(), []byte("foo"), nil, RangeOptions{})
+ if err != nil {
+ t.Errorf("unexpect range error %v", err)
+ }
+ err = s1.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/server/storage/mvcc/kvstore_test.go b/server/storage/mvcc/kvstore_test.go
new file mode 100644
index 00000000000..65ad4f240ec
--- /dev/null
+++ b/server/storage/mvcc/kvstore_test.go
@@ -0,0 +1,1101 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ mrand "math/rand"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/pkg/v3/schedule"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func TestStoreRev(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer s.Close()
+
+ for i := 1; i <= 3; i++ {
+ s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+ if r := s.Rev(); r != int64(i+1) {
+ t.Errorf("#%d: rev = %d, want %d", i, r, i+1)
+ }
+ }
+}
+
+func TestStorePut(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ kv := mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 1,
+ ModRevision: 2,
+ Version: 1,
+ }
+ kvb, err := kv.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tests := []struct {
+ rev Revision
+ r indexGetResp
+ rr *rangeResp
+
+ wrev Revision
+ wkey []byte
+ wkv mvccpb.KeyValue
+ wputrev Revision
+ }{
+ {
+ Revision{Main: 1},
+ indexGetResp{Revision{}, Revision{}, 0, ErrRevisionNotFound},
+ nil,
+
+ Revision{Main: 2},
+ newTestRevBytes(Revision{Main: 2}),
+ mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 2,
+ ModRevision: 2,
+ Version: 1,
+ Lease: 1,
+ },
+ Revision{Main: 2},
+ },
+ {
+ Revision{Main: 1, Sub: 1},
+ indexGetResp{Revision{Main: 2}, Revision{Main: 2}, 1, nil},
+ &rangeResp{[][]byte{newTestRevBytes(Revision{Main: 2, Sub: 1})}, [][]byte{kvb}},
+
+ Revision{Main: 2},
+ newTestRevBytes(Revision{Main: 2}),
+ mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 2,
+ ModRevision: 2,
+ Version: 2,
+ Lease: 2,
+ },
+ Revision{Main: 2},
+ },
+ {
+ Revision{Main: 2},
+ indexGetResp{Revision{Main: 2, Sub: 1}, Revision{Main: 2}, 2, nil},
+ &rangeResp{[][]byte{newTestRevBytes(Revision{Main: 2, Sub: 1})}, [][]byte{kvb}},
+
+ Revision{Main: 3},
+ newTestRevBytes(Revision{Main: 3}),
+ mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 2,
+ ModRevision: 3,
+ Version: 3,
+ Lease: 3,
+ },
+ Revision{Main: 3},
+ },
+ }
+ for i, tt := range tests {
+ s := newFakeStore(lg)
+ b := s.b.(*fakeBackend)
+ fi := s.kvindex.(*fakeIndex)
+
+ s.currentRev = tt.rev.Main
+ fi.indexGetRespc <- tt.r
+ if tt.rr != nil {
+ b.tx.rangeRespc <- *tt.rr
+ }
+
+ s.Put([]byte("foo"), []byte("bar"), lease.LeaseID(i+1))
+
+ data, err := tt.wkv.Marshal()
+ if err != nil {
+ t.Errorf("#%d: marshal err = %v, want nil", i, err)
+ }
+
+ wact := []testutil.Action{
+ {Name: "seqput", Params: []any{schema.Key, tt.wkey, data}},
+ }
+
+ if tt.rr != nil {
+ wact = []testutil.Action{
+ {Name: "seqput", Params: []any{schema.Key, tt.wkey, data}},
+ }
+ }
+
+ if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
+ }
+ wact = []testutil.Action{
+ {Name: "get", Params: []any{[]byte("foo"), tt.wputrev.Main}},
+ {Name: "put", Params: []any{[]byte("foo"), tt.wputrev}},
+ }
+ if g := fi.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
+ }
+ if s.currentRev != tt.wrev.Main {
+ t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev)
+ }
+
+ s.Close()
+ }
+}
+
+func TestStoreRange(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ key := newTestRevBytes(Revision{Main: 2})
+ kv := mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 1,
+ ModRevision: 2,
+ Version: 1,
+ }
+ kvb, err := kv.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ wrev := int64(2)
+
+ tests := []struct {
+ idxr indexRangeResp
+ r rangeResp
+ }{
+ {
+ indexRangeResp{[][]byte{[]byte("foo")}, []Revision{{Main: 2}}},
+ rangeResp{[][]byte{key}, [][]byte{kvb}},
+ },
+ {
+ indexRangeResp{[][]byte{[]byte("foo"), []byte("foo1")}, []Revision{{Main: 2}, {Main: 3}}},
+ rangeResp{[][]byte{key}, [][]byte{kvb}},
+ },
+ }
+
+ ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
+ for i, tt := range tests {
+ s := newFakeStore(lg)
+ b := s.b.(*fakeBackend)
+ fi := s.kvindex.(*fakeIndex)
+
+ s.currentRev = 2
+ b.tx.rangeRespc <- tt.r
+ fi.indexRangeRespc <- tt.idxr
+
+ ret, err := s.Range(context.TODO(), []byte("foo"), []byte("goo"), ro)
+ if err != nil {
+ t.Errorf("#%d: err = %v, want nil", i, err)
+ }
+ if w := []mvccpb.KeyValue{kv}; !reflect.DeepEqual(ret.KVs, w) {
+ t.Errorf("#%d: kvs = %+v, want %+v", i, ret.KVs, w)
+ }
+ if ret.Rev != wrev {
+ t.Errorf("#%d: rev = %d, want %d", i, ret.Rev, wrev)
+ }
+
+ wstart := NewRevBytes()
+ wstart = RevToBytes(tt.idxr.revs[0], wstart)
+ wact := []testutil.Action{
+ {Name: "range", Params: []any{schema.Key, wstart, []byte(nil), int64(0)}},
+ }
+ if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
+ }
+ wact = []testutil.Action{
+ {Name: "range", Params: []any{[]byte("foo"), []byte("goo"), wrev}},
+ }
+ if g := fi.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
+ }
+ if s.currentRev != 2 {
+ t.Errorf("#%d: current rev = %+v, want %+v", i, s.currentRev, 2)
+ }
+
+ s.Close()
+ }
+}
+
+func TestStoreDeleteRange(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ key := newTestRevBytes(Revision{Main: 2})
+ kv := mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 1,
+ ModRevision: 2,
+ Version: 1,
+ }
+ kvb, err := kv.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tests := []struct {
+ rev Revision
+ r indexRangeResp
+ rr rangeResp
+
+ wkey []byte
+ wrev Revision
+ wrrev int64
+ wdelrev Revision
+ }{
+ {
+ Revision{Main: 2},
+ indexRangeResp{[][]byte{[]byte("foo")}, []Revision{{Main: 2}}},
+ rangeResp{[][]byte{key}, [][]byte{kvb}},
+
+ newTestBucketKeyBytes(newBucketKey(3, 0, true)),
+ Revision{Main: 3},
+ 2,
+ Revision{Main: 3},
+ },
+ }
+ for i, tt := range tests {
+ s := newFakeStore(lg)
+ b := s.b.(*fakeBackend)
+ fi := s.kvindex.(*fakeIndex)
+
+ s.currentRev = tt.rev.Main
+ fi.indexRangeRespc <- tt.r
+ b.tx.rangeRespc <- tt.rr
+
+ n, _ := s.DeleteRange([]byte("foo"), []byte("goo"))
+ if n != 1 {
+ t.Errorf("#%d: n = %d, want 1", i, n)
+ }
+
+ data, err := (&mvccpb.KeyValue{
+ Key: []byte("foo"),
+ }).Marshal()
+ if err != nil {
+ t.Errorf("#%d: marshal err = %v, want nil", i, err)
+ }
+ wact := []testutil.Action{
+ {Name: "seqput", Params: []any{schema.Key, tt.wkey, data}},
+ }
+ if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
+ }
+ wact = []testutil.Action{
+ {Name: "range", Params: []any{[]byte("foo"), []byte("goo"), tt.wrrev}},
+ {Name: "tombstone", Params: []any{[]byte("foo"), tt.wdelrev}},
+ }
+ if g := fi.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("#%d: index action = %+v, want %+v", i, g, wact)
+ }
+ if s.currentRev != tt.wrev.Main {
+ t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev)
+ }
+ s.Close()
+ }
+}
+
+func TestStoreCompact(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ s := newFakeStore(lg)
+ defer s.Close()
+ b := s.b.(*fakeBackend)
+ fi := s.kvindex.(*fakeIndex)
+
+ s.currentRev = 3
+ fi.indexCompactRespc <- map[Revision]struct{}{{Main: 1}: {}}
+ key1 := newTestRevBytes(Revision{Main: 1})
+ key2 := newTestRevBytes(Revision{Main: 2})
+ b.tx.rangeRespc <- rangeResp{[][]byte{}, [][]byte{}}
+ b.tx.rangeRespc <- rangeResp{[][]byte{}, [][]byte{}}
+ b.tx.rangeRespc <- rangeResp{[][]byte{key1, key2}, [][]byte{[]byte("alice"), []byte("bob")}}
+
+ s.Compact(traceutil.TODO(), 3)
+ s.fifoSched.WaitFinish(1)
+
+ if s.compactMainRev != 3 {
+ t.Errorf("compact main rev = %d, want 3", s.compactMainRev)
+ }
+ end := make([]byte, 8)
+ binary.BigEndian.PutUint64(end, uint64(4))
+ wact := []testutil.Action{
+ {Name: "range", Params: []any{schema.Meta, schema.ScheduledCompactKeyName, []uint8(nil), int64(0)}},
+ {Name: "range", Params: []any{schema.Meta, schema.FinishedCompactKeyName, []uint8(nil), int64(0)}},
+ {Name: "put", Params: []any{schema.Meta, schema.ScheduledCompactKeyName, newTestRevBytes(Revision{Main: 3})}},
+ {Name: "range", Params: []any{schema.Key, make([]byte, 17), end, int64(10000)}},
+ {Name: "delete", Params: []any{schema.Key, key2}},
+ {Name: "put", Params: []any{schema.Meta, schema.FinishedCompactKeyName, newTestRevBytes(Revision{Main: 3})}},
+ }
+ if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("tx actions = %+v, want %+v", g, wact)
+ }
+ wact = []testutil.Action{
+ {Name: "compact", Params: []any{int64(3)}},
+ }
+ if g := fi.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("index action = %+v, want %+v", g, wact)
+ }
+}
+
+func TestStoreRestore(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ s := newFakeStore(lg)
+ b := s.b.(*fakeBackend)
+ fi := s.kvindex.(*fakeIndex)
+ defer s.Close()
+
+ putkey := newTestRevBytes(Revision{Main: 3})
+ putkv := mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 4,
+ ModRevision: 4,
+ Version: 1,
+ }
+ putkvb, err := putkv.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ delkey := newTestBucketKeyBytes(newBucketKey(5, 0, true))
+ delkv := mvccpb.KeyValue{
+ Key: []byte("foo"),
+ }
+ delkvb, err := delkv.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b.tx.rangeRespc <- rangeResp{[][]byte{schema.FinishedCompactKeyName}, [][]byte{newTestRevBytes(Revision{Main: 3})}}
+ b.tx.rangeRespc <- rangeResp{[][]byte{schema.ScheduledCompactKeyName}, [][]byte{newTestRevBytes(Revision{Main: 3})}}
+
+ b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}}
+ b.tx.rangeRespc <- rangeResp{nil, nil}
+
+ s.restore()
+
+ if s.compactMainRev != 3 {
+ t.Errorf("compact rev = %d, want 3", s.compactMainRev)
+ }
+ if s.currentRev != 5 {
+ t.Errorf("current rev = %v, want 5", s.currentRev)
+ }
+ wact := []testutil.Action{
+ {Name: "range", Params: []any{schema.Meta, schema.FinishedCompactKeyName, []byte(nil), int64(0)}},
+ {Name: "range", Params: []any{schema.Meta, schema.ScheduledCompactKeyName, []byte(nil), int64(0)}},
+ {Name: "range", Params: []any{schema.Key, newTestRevBytes(Revision{Main: 1}), newTestRevBytes(Revision{Main: math.MaxInt64, Sub: math.MaxInt64}), int64(restoreChunkKeys)}},
+ }
+ if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("tx actions = %+v, want %+v", g, wact)
+ }
+
+ gens := []generation{
+ {created: Revision{Main: 4}, ver: 2, revs: []Revision{{Main: 3}, {Main: 5}}},
+ {created: Revision{Main: 0}, ver: 0, revs: nil},
+ }
+ ki := &keyIndex{key: []byte("foo"), modified: Revision{Main: 5}, generations: gens}
+ wact = []testutil.Action{
+ {Name: "keyIndex", Params: []any{ki}},
+ {Name: "insert", Params: []any{ki}},
+ }
+ if g := fi.Action(); !reflect.DeepEqual(g, wact) {
+ t.Errorf("index action = %+v, want %+v", g, wact)
+ }
+}
+
+func TestRestoreDelete(t *testing.T) {
+ oldChunk := restoreChunkKeys
+ restoreChunkKeys = mrand.Intn(3) + 2
+ defer func() { restoreChunkKeys = oldChunk }()
+
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer b.Close()
+
+ keys := make(map[string]struct{})
+ for i := 0; i < 20; i++ {
+ ks := fmt.Sprintf("foo-%d", i)
+ k := []byte(ks)
+ s.Put(k, []byte("bar"), lease.NoLease)
+ keys[ks] = struct{}{}
+ switch mrand.Intn(3) {
+ case 0:
+ // put random key from past via random range on map
+ ks = fmt.Sprintf("foo-%d", mrand.Intn(i+1))
+ s.Put([]byte(ks), []byte("baz"), lease.NoLease)
+ keys[ks] = struct{}{}
+ case 1:
+ // delete random key via random range on map
+ for k := range keys {
+ s.DeleteRange([]byte(k), nil)
+ delete(keys, k)
+ break
+ }
+ }
+ }
+ s.Close()
+
+ s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer s.Close()
+ for i := 0; i < 20; i++ {
+ ks := fmt.Sprintf("foo-%d", i)
+ r, err := s.Range(context.TODO(), []byte(ks), nil, RangeOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, ok := keys[ks]; ok {
+ if len(r.KVs) == 0 {
+ t.Errorf("#%d: expected %q, got deleted", i, ks)
+ }
+ } else if len(r.KVs) != 0 {
+ t.Errorf("#%d: expected deleted, got %q", i, ks)
+ }
+ }
+}
+
+func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
+ tests := []string{"recreate", "restore"}
+ for _, test := range tests {
+ test := test
+
+ t.Run(test, func(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+
+ s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+ s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
+ s0.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
+
+ // write scheduled compaction, but not do compaction
+ tx := s0.b.BatchTx()
+ tx.Lock()
+ UnsafeSetScheduledCompact(tx, 2)
+ tx.Unlock()
+
+ var s *store
+ switch test {
+ case "recreate":
+ s0.Close()
+ s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ case "restore":
+ // TODO(fuweid): store doesn't support to restore
+ // from a closed status because there is no lock
+ // for `Close` or action to mark it is closed.
+ s0.Restore(b)
+ s = s0
+ }
+ defer cleanup(s, b)
+
+ // wait for scheduled compaction to be finished
+ time.Sleep(100 * time.Millisecond)
+
+ if _, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: 1}); !errors.Is(err, ErrCompacted) {
+ t.Errorf("range on compacted rev error = %v, want %v", err, ErrCompacted)
+ }
+ // check the key in backend is deleted
+ revbytes := NewRevBytes()
+ revbytes = BucketKeyToBytes(newBucketKey(1, 0, false), revbytes)
+
+ // The disk compaction is done asynchronously and requires more time on slow disk.
+ // try 5 times for CI with slow IO.
+ for i := 0; i < 5; i++ {
+ tx := s.b.BatchTx()
+ tx.Lock()
+ ks, _ := tx.UnsafeRange(schema.Key, revbytes, nil, 0)
+ tx.Unlock()
+ if len(ks) != 0 {
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ return
+ }
+ t.Errorf("key for rev %+v still exists, want deleted", BytesToBucketKey(revbytes))
+ })
+ }
+}
+
+type hashKVResult struct {
+ hash uint32
+ compactRev int64
+}
+
+// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
+func TestHashKVWhenCompacting(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ rev := 10000
+ for i := 2; i <= rev; i++ {
+ s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
+ }
+
+ hashCompactc := make(chan hashKVResult, 1)
+ var wg sync.WaitGroup
+ donec := make(chan struct{})
+ stopc := make(chan struct{})
+
+ // Call HashByRev(10000) in multiple goroutines until donec is closed
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ hash, _, err := s.HashStorage().HashByRev(int64(rev))
+ if err != nil {
+ t.Error(err)
+ }
+ select {
+ case <-stopc:
+ return
+ case <-donec:
+ return
+ case hashCompactc <- hashKVResult{hash.Hash, hash.CompactRevision}:
+ }
+ }
+ }()
+ }
+
+ // Check computed hashes by HashByRev are correct in a goroutine, until donec is closed
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ revHash := make(map[int64]uint32)
+ for {
+ select {
+ case r := <-hashCompactc:
+ if revHash[r.compactRev] == 0 {
+ revHash[r.compactRev] = r.hash
+ }
+
+ if r.hash != revHash[r.compactRev] {
+ t.Errorf("Hashes differ (current %v) != (saved %v)", r.hash, revHash[r.compactRev])
+ }
+ case <-stopc:
+ return
+ case <-donec:
+ return
+ }
+ }
+ }()
+
+ // Compact the store in a goroutine, using RevisionTombstone 9900 to 10000 and close donec when finished
+ wg.Add(1)
+ go func() {
+ defer func() {
+ close(donec)
+ wg.Done()
+ }()
+
+ for i := 100; i >= 0; i-- {
+ select {
+ case <-stopc:
+ return
+ default:
+ }
+
+ _, err := s.Compact(traceutil.TODO(), int64(rev-i))
+ if err != nil {
+ t.Error(err)
+ }
+ // Wait for the compaction job to finish
+ s.fifoSched.WaitFinish(1)
+ // Leave time for calls to HashByRev to take place after each compaction
+ time.Sleep(10 * time.Millisecond)
+ }
+ }()
+
+ select {
+ case <-donec:
+ case <-time.After(20 * time.Second):
+ close(stopc)
+ wg.Wait()
+ testutil.FatalStack(t, "timeout")
+ }
+
+ close(stopc)
+ wg.Wait()
+}
+
+// TestHashKVWithCompactedAndFutureRevisions ensures that HashKV returns a correct hash when called
+// with a past RevisionTombstone (lower than compacted), a future RevisionTombstone, and the exact compacted RevisionTombstone
+func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ rev := 10000
+ compactRev := rev / 2
+
+ for i := 2; i <= rev; i++ {
+ s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
+ }
+ if _, err := s.Compact(traceutil.TODO(), int64(compactRev)); err != nil {
+ t.Fatal(err)
+ }
+
+ _, _, errFutureRev := s.HashStorage().HashByRev(int64(rev + 1))
+ if !errors.Is(errFutureRev, ErrFutureRev) {
+ t.Error(errFutureRev)
+ }
+
+ _, _, errPastRev := s.HashStorage().HashByRev(int64(compactRev - 1))
+ if !errors.Is(errPastRev, ErrCompacted) {
+ t.Error(errPastRev)
+ }
+
+ _, _, errCompactRev := s.HashStorage().HashByRev(int64(compactRev))
+ if errCompactRev != nil {
+ t.Error(errCompactRev)
+ }
+}
+
+// TestHashKVZeroRevision ensures that "HashByRev(0)" computes
+// correct hash value with latest RevisionTombstone.
+func TestHashKVZeroRevision(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ rev := 10000
+ for i := 2; i <= rev; i++ {
+ s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
+ }
+ if _, err := s.Compact(traceutil.TODO(), int64(rev/2)); err != nil {
+ t.Fatal(err)
+ }
+
+ hash1, _, err := s.HashStorage().HashByRev(int64(rev))
+ if err != nil {
+ t.Fatal(err)
+ }
+ var hash2 KeyValueHash
+ hash2, _, err = s.HashStorage().HashByRev(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hash1 != hash2 {
+ t.Errorf("hash %d (rev %d) != hash %d (rev 0)", hash1, rev, hash2)
+ }
+}
+
+func TestTxnPut(t *testing.T) {
+ // assign arbitrary size
+ bytesN := 30
+ sliceN := 100
+ keys := createBytesSlice(bytesN, sliceN)
+ vals := createBytesSlice(bytesN, sliceN)
+
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ for i := 0; i < sliceN; i++ {
+ txn := s.Write(traceutil.TODO())
+ base := int64(i + 2)
+ if rev := txn.Put(keys[i], vals[i], lease.NoLease); rev != base {
+ t.Errorf("#%d: rev = %d, want %d", i, rev, base)
+ }
+ txn.End()
+ }
+}
+
+// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
+func TestConcurrentReadNotBlockingWrite(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ // write something to read later
+ s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
+
+ // readTx simulates a long read request
+ readTx1 := s.Read(ConcurrentReadTxMode, traceutil.TODO())
+
+ // write should not be blocked by reads
+ done := make(chan struct{}, 1)
+ go func() {
+ s.Put([]byte("foo"), []byte("newBar"), lease.NoLease) // this is a write Txn
+ done <- struct{}{}
+ }()
+ select {
+ case <-done:
+ case <-time.After(1 * time.Second):
+ t.Fatalf("write should not be blocked by read")
+ }
+
+ // readTx2 simulates a short read request
+ readTx2 := s.Read(ConcurrentReadTxMode, traceutil.TODO())
+ ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
+ ret, err := readTx2.Range(context.TODO(), []byte("foo"), nil, ro)
+ if err != nil {
+ t.Fatalf("failed to range: %v", err)
+ }
+ // readTx2 should see the result of new write
+ w := mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("newBar"),
+ CreateRevision: 2,
+ ModRevision: 3,
+ Version: 2,
+ }
+ if !reflect.DeepEqual(ret.KVs[0], w) {
+ t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w)
+ }
+ readTx2.End()
+
+ ret, err = readTx1.Range(context.TODO(), []byte("foo"), nil, ro)
+ if err != nil {
+ t.Fatalf("failed to range: %v", err)
+ }
+ // readTx1 should not see the result of new write
+ w = mvccpb.KeyValue{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ CreateRevision: 2,
+ ModRevision: 2,
+ Version: 1,
+ }
+ if !reflect.DeepEqual(ret.KVs[0], w) {
+ t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w)
+ }
+ readTx1.End()
+}
+
+// TestConcurrentReadTxAndWrite creates random concurrent Reads and Writes, and ensures Reads always see latest Writes
+func TestConcurrentReadTxAndWrite(t *testing.T) {
+ var (
+ numOfReads = 100
+ numOfWrites = 100
+ maxNumOfPutsPerWrite = 10
+ committedKVs kvs // committedKVs records the key-value pairs written by the finished Write Txns
+ mu sync.Mutex // mu protects committedKVs
+ )
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ var wg sync.WaitGroup
+ wg.Add(numOfWrites)
+ for i := 0; i < numOfWrites; i++ {
+ go func() {
+ defer wg.Done()
+ time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time
+
+ tx := s.Write(traceutil.TODO())
+ numOfPuts := mrand.Intn(maxNumOfPutsPerWrite) + 1
+ var pendingKvs kvs
+ for j := 0; j < numOfPuts; j++ {
+ k := []byte(strconv.Itoa(mrand.Int()))
+ v := []byte(strconv.Itoa(mrand.Int()))
+ tx.Put(k, v, lease.NoLease)
+ pendingKvs = append(pendingKvs, kv{k, v})
+ }
+ // reads should not see above Puts until write is finished
+ mu.Lock()
+ committedKVs = merge(committedKVs, pendingKvs) // update shared data structure
+ tx.End()
+ mu.Unlock()
+ }()
+ }
+
+ wg.Add(numOfReads)
+ for i := 0; i < numOfReads; i++ {
+ go func() {
+ defer wg.Done()
+ time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time
+
+ mu.Lock()
+ wKVs := make(kvs, len(committedKVs))
+ copy(wKVs, committedKVs)
+ tx := s.Read(ConcurrentReadTxMode, traceutil.TODO())
+ mu.Unlock()
+ // get all keys in backend store, and compare with wKVs
+ ret, err := tx.Range(context.TODO(), []byte("\x00000000"), []byte("\xffffffff"), RangeOptions{})
+ tx.End()
+ if err != nil {
+ t.Errorf("failed to range keys: %v", err)
+ return
+ }
+ if len(wKVs) == 0 && len(ret.KVs) == 0 { // no committed KVs yet
+ return
+ }
+ var result kvs
+ for _, keyValue := range ret.KVs {
+ result = append(result, kv{keyValue.Key, keyValue.Value})
+ }
+ if !reflect.DeepEqual(wKVs, result) {
+ t.Errorf("unexpected range result") // too many key value pairs, skip printing them
+ }
+ }()
+ }
+
+ // wait until goroutines finish or timeout
+ doneC := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(doneC)
+ }()
+ select {
+ case <-doneC:
+ case <-time.After(5 * time.Minute):
+ testutil.FatalStack(t, "timeout")
+ }
+}
+
+type kv struct {
+ key []byte
+ val []byte
+}
+
+type kvs []kv
+
+func (kvs kvs) Len() int { return len(kvs) }
+func (kvs kvs) Less(i, j int) bool { return bytes.Compare(kvs[i].key, kvs[j].key) < 0 }
+func (kvs kvs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] }
+
+func merge(dst, src kvs) kvs {
+ dst = append(dst, src...)
+ sort.Stable(dst)
+ // remove duplicates, using only the newest value
+ // ref: tx_buffer.go
+ widx := 0
+ for ridx := 1; ridx < len(dst); ridx++ {
+ if !bytes.Equal(dst[widx].key, dst[ridx].key) {
+ widx++
+ }
+ dst[widx] = dst[ridx]
+ }
+ return dst[:widx+1]
+}
+
+// TODO: test attach key to lessor
+
+func newTestRevBytes(rev Revision) []byte {
+ bytes := NewRevBytes()
+ return RevToBytes(rev, bytes)
+}
+
+func newTestBucketKeyBytes(rev BucketKey) []byte {
+ bytes := NewRevBytes()
+ return BucketKeyToBytes(rev, bytes)
+}
+
+func newFakeStore(lg *zap.Logger) *store {
+ b := &fakeBackend{&fakeBatchTx{
+ Recorder: &testutil.RecorderBuffered{},
+ rangeRespc: make(chan rangeResp, 5),
+ }}
+ s := &store{
+ cfg: StoreConfig{
+ CompactionBatchLimit: 10000,
+ CompactionSleepInterval: defaultCompactionSleepInterval,
+ },
+ b: b,
+ le: &lease.FakeLessor{},
+ kvindex: newFakeIndex(),
+ currentRev: 0,
+ compactMainRev: -1,
+ fifoSched: schedule.NewFIFOScheduler(lg),
+ stopc: make(chan struct{}),
+ lg: lg,
+ }
+ s.ReadView, s.WriteView = &readView{s}, &writeView{s}
+ s.hashes = NewHashStorage(lg, s)
+ return s
+}
+
+func newFakeIndex() *fakeIndex {
+ return &fakeIndex{
+ Recorder: &testutil.RecorderBuffered{},
+ indexGetRespc: make(chan indexGetResp, 1),
+ indexRangeRespc: make(chan indexRangeResp, 1),
+ indexRangeEventsRespc: make(chan indexRangeEventsResp, 1),
+ indexCompactRespc: make(chan map[Revision]struct{}, 1),
+ }
+}
+
+type rangeResp struct {
+ keys [][]byte
+ vals [][]byte
+}
+
+type fakeBatchTx struct {
+ testutil.Recorder
+ rangeRespc chan rangeResp
+}
+
+func (b *fakeBatchTx) LockInsideApply() {}
+func (b *fakeBatchTx) LockOutsideApply() {}
+func (b *fakeBatchTx) Lock() {}
+func (b *fakeBatchTx) Unlock() {}
+func (b *fakeBatchTx) RLock() {}
+func (b *fakeBatchTx) RUnlock() {}
+func (b *fakeBatchTx) UnsafeCreateBucket(bucket backend.Bucket) {}
+func (b *fakeBatchTx) UnsafeDeleteBucket(bucket backend.Bucket) {}
+func (b *fakeBatchTx) UnsafePut(bucket backend.Bucket, key []byte, value []byte) {
+ b.Recorder.Record(testutil.Action{Name: "put", Params: []any{bucket, key, value}})
+}
+
+func (b *fakeBatchTx) UnsafeSeqPut(bucket backend.Bucket, key []byte, value []byte) {
+ b.Recorder.Record(testutil.Action{Name: "seqput", Params: []any{bucket, key, value}})
+}
+
+func (b *fakeBatchTx) UnsafeRange(bucket backend.Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
+ b.Recorder.Record(testutil.Action{Name: "range", Params: []any{bucket, key, endKey, limit}})
+ r := <-b.rangeRespc
+ return r.keys, r.vals
+}
+
+func (b *fakeBatchTx) UnsafeDelete(bucket backend.Bucket, key []byte) {
+ b.Recorder.Record(testutil.Action{Name: "delete", Params: []any{bucket, key}})
+}
+
+func (b *fakeBatchTx) UnsafeForEach(bucket backend.Bucket, visitor func(k, v []byte) error) error {
+ return nil
+}
+func (b *fakeBatchTx) Commit() {}
+func (b *fakeBatchTx) CommitAndStop() {}
+
+type fakeBackend struct {
+ tx *fakeBatchTx
+}
+
+func (b *fakeBackend) BatchTx() backend.BatchTx { return b.tx }
+func (b *fakeBackend) ReadTx() backend.ReadTx { return b.tx }
+func (b *fakeBackend) ConcurrentReadTx() backend.ReadTx { return b.tx }
+func (b *fakeBackend) Hash(func(bucketName, keyName []byte) bool) (uint32, error) { return 0, nil }
+func (b *fakeBackend) Size() int64 { return 0 }
+func (b *fakeBackend) SizeInUse() int64 { return 0 }
+func (b *fakeBackend) OpenReadTxN() int64 { return 0 }
+func (b *fakeBackend) Snapshot() backend.Snapshot { return nil }
+func (b *fakeBackend) ForceCommit() {}
+func (b *fakeBackend) Defrag() error { return nil }
+func (b *fakeBackend) Close() error { return nil }
+func (b *fakeBackend) SetTxPostLockInsideApplyHook(func()) {}
+
+type indexGetResp struct {
+ rev Revision
+ created Revision
+ ver int64
+ err error
+}
+
+type indexRangeResp struct {
+ keys [][]byte
+ revs []Revision
+}
+
+type indexRangeEventsResp struct {
+ revs []Revision
+}
+
+type fakeIndex struct {
+ testutil.Recorder
+ indexGetRespc chan indexGetResp
+ indexRangeRespc chan indexRangeResp
+ indexRangeEventsRespc chan indexRangeEventsResp
+ indexCompactRespc chan map[Revision]struct{}
+}
+
+func (i *fakeIndex) Revisions(key, end []byte, atRev int64, limit int) ([]Revision, int) {
+ _, rev := i.Range(key, end, atRev)
+ if len(rev) >= limit {
+ rev = rev[:limit]
+ }
+ return rev, len(rev)
+}
+
+func (i *fakeIndex) CountRevisions(key, end []byte, atRev int64) int {
+ _, rev := i.Range(key, end, atRev)
+ return len(rev)
+}
+
+func (i *fakeIndex) Get(key []byte, atRev int64) (rev, created Revision, ver int64, err error) {
+ i.Recorder.Record(testutil.Action{Name: "get", Params: []any{key, atRev}})
+ r := <-i.indexGetRespc
+ return r.rev, r.created, r.ver, r.err
+}
+
+func (i *fakeIndex) Range(key, end []byte, atRev int64) ([][]byte, []Revision) {
+ i.Recorder.Record(testutil.Action{Name: "range", Params: []any{key, end, atRev}})
+ r := <-i.indexRangeRespc
+ return r.keys, r.revs
+}
+
+func (i *fakeIndex) Put(key []byte, rev Revision) {
+ i.Recorder.Record(testutil.Action{Name: "put", Params: []any{key, rev}})
+}
+
+func (i *fakeIndex) Tombstone(key []byte, rev Revision) error {
+ i.Recorder.Record(testutil.Action{Name: "tombstone", Params: []any{key, rev}})
+ return nil
+}
+
+func (i *fakeIndex) RangeSince(key, end []byte, rev int64) []Revision {
+ i.Recorder.Record(testutil.Action{Name: "rangeEvents", Params: []any{key, end, rev}})
+ r := <-i.indexRangeEventsRespc
+ return r.revs
+}
+
+func (i *fakeIndex) Compact(rev int64) map[Revision]struct{} {
+ i.Recorder.Record(testutil.Action{Name: "compact", Params: []any{rev}})
+ return <-i.indexCompactRespc
+}
+
+func (i *fakeIndex) Keep(rev int64) map[Revision]struct{} {
+ i.Recorder.Record(testutil.Action{Name: "keep", Params: []any{rev}})
+ return <-i.indexCompactRespc
+}
+func (i *fakeIndex) Equal(b index) bool { return false }
+
+func (i *fakeIndex) Insert(ki *keyIndex) {
+ i.Recorder.Record(testutil.Action{Name: "insert", Params: []any{ki}})
+}
+
+func (i *fakeIndex) KeyIndex(ki *keyIndex) *keyIndex {
+ i.Recorder.Record(testutil.Action{Name: "keyIndex", Params: []any{ki}})
+ return nil
+}
+
+func createBytesSlice(bytesN, sliceN int) [][]byte {
+ var rs [][]byte
+ for len(rs) != sliceN {
+ v := make([]byte, bytesN)
+ if _, err := rand.Read(v); err != nil {
+ panic(err)
+ }
+ rs = append(rs, v)
+ }
+ return rs
+}
diff --git a/server/storage/mvcc/kvstore_txn.go b/server/storage/mvcc/kvstore_txn.go
new file mode 100644
index 00000000000..c44a2cb3d91
--- /dev/null
+++ b/server/storage/mvcc/kvstore_txn.go
@@ -0,0 +1,321 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type storeTxnRead struct {
+ storeTxnCommon
+ tx backend.ReadTx
+}
+
+type storeTxnCommon struct {
+ s *store
+ tx backend.UnsafeReader
+
+ firstRev int64
+ rev int64
+
+ trace *traceutil.Trace
+}
+
+func (s *store) Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead {
+ s.mu.RLock()
+ s.revMu.RLock()
+ // For read-only workloads, we use shared buffer by copying transaction read buffer
+ // for higher concurrency with ongoing blocking writes.
+ // For write/write-read transactions, we use the shared buffer
+ // rather than duplicating transaction read buffer to avoid transaction overhead.
+ var tx backend.ReadTx
+ if mode == ConcurrentReadTxMode {
+ tx = s.b.ConcurrentReadTx()
+ } else {
+ tx = s.b.ReadTx()
+ }
+
+ tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+ firstRev, rev := s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+ return newMetricsTxnRead(&storeTxnRead{storeTxnCommon{s, tx, firstRev, rev, trace}, tx})
+}
+
+func (tr *storeTxnCommon) FirstRev() int64 { return tr.firstRev }
+func (tr *storeTxnCommon) Rev() int64 { return tr.rev }
+
+func (tr *storeTxnCommon) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ return tr.rangeKeys(ctx, key, end, tr.Rev(), ro)
+}
+
+func (tr *storeTxnCommon) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
+ rev := ro.Rev
+ if rev > curRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
+ }
+ if rev <= 0 {
+ rev = curRev
+ }
+ if rev < tr.s.compactMainRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
+ }
+ if ro.Count {
+ total := tr.s.kvindex.CountRevisions(key, end, rev)
+ tr.trace.Step("count revisions from in-memory index tree")
+ return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil
+ }
+ revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit))
+ tr.trace.Step("range keys from in-memory index tree")
+ if len(revpairs) == 0 {
+ return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil
+ }
+
+ limit := int(ro.Limit)
+ if limit <= 0 || limit > len(revpairs) {
+ limit = len(revpairs)
+ }
+
+ kvs := make([]mvccpb.KeyValue, limit)
+ revBytes := NewRevBytes()
+ for i, revpair := range revpairs[:len(kvs)] {
+ select {
+ case <-ctx.Done():
+ return nil, fmt.Errorf("rangeKeys: context cancelled: %w", ctx.Err())
+ default:
+ }
+ revBytes = RevToBytes(revpair, revBytes)
+ _, vs := tr.tx.UnsafeRange(schema.Key, revBytes, nil, 0)
+ if len(vs) != 1 {
+ tr.s.lg.Fatal(
+ "range failed to find revision pair",
+ zap.Int64("revision-main", revpair.Main),
+ zap.Int64("revision-sub", revpair.Sub),
+ zap.Int64("revision-current", curRev),
+ zap.Int64("range-option-rev", ro.Rev),
+ zap.Int64("range-option-limit", ro.Limit),
+ zap.Binary("key", key),
+ zap.Binary("end", end),
+ zap.Int("len-revpairs", len(revpairs)),
+ zap.Int("len-values", len(vs)),
+ )
+ }
+ if err := kvs[i].Unmarshal(vs[0]); err != nil {
+ tr.s.lg.Fatal(
+ "failed to unmarshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ }
+ }
+ tr.trace.Step("range keys from bolt db")
+ return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil
+}
+
+func (tr *storeTxnRead) End() {
+ tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx.
+ tr.s.mu.RUnlock()
+}
+
+type storeTxnWrite struct {
+ storeTxnCommon
+ tx backend.BatchTx
+ // beginRev is the revision where the txn begins; it will write to the next revision.
+ beginRev int64
+ changes []mvccpb.KeyValue
+}
+
+func (s *store) Write(trace *traceutil.Trace) TxnWrite {
+ s.mu.RLock()
+ tx := s.b.BatchTx()
+ tx.LockInsideApply()
+ tw := &storeTxnWrite{
+ storeTxnCommon: storeTxnCommon{s, tx, 0, 0, trace},
+ tx: tx,
+ beginRev: s.currentRev,
+ changes: make([]mvccpb.KeyValue, 0, 4),
+ }
+ return newMetricsTxnWrite(tw)
+}
+
+func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev }
+
+func (tw *storeTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ rev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rev++
+ }
+ return tw.rangeKeys(ctx, key, end, rev, ro)
+}
+
+func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
+ if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
+ return n, tw.beginRev + 1
+ }
+ return 0, tw.beginRev
+}
+
+func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
+ tw.put(key, value, lease)
+ return tw.beginRev + 1
+}
+
+func (tw *storeTxnWrite) End() {
+ // only update index if the txn modifies the mvcc state.
+ if len(tw.changes) != 0 {
+ // hold revMu lock to prevent new read txns from opening until writeback.
+ tw.s.revMu.Lock()
+ tw.s.currentRev++
+ }
+ tw.tx.Unlock()
+ if len(tw.changes) != 0 {
+ tw.s.revMu.Unlock()
+ }
+ tw.s.mu.RUnlock()
+}
+
+func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
+ rev := tw.beginRev + 1
+ c := rev
+ oldLease := lease.NoLease
+
+ // if the key exists before, use its previous created and
+ // get its previous leaseID
+ _, created, ver, err := tw.s.kvindex.Get(key, rev)
+ if err == nil {
+ c = created.Main
+ oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
+ tw.trace.Step("get key's previous created_revision and leaseID")
+ }
+ ibytes := NewRevBytes()
+ idxRev := Revision{Main: rev, Sub: int64(len(tw.changes))}
+ ibytes = RevToBytes(idxRev, ibytes)
+
+ ver = ver + 1
+ kv := mvccpb.KeyValue{
+ Key: key,
+ Value: value,
+ CreateRevision: c,
+ ModRevision: rev,
+ Version: ver,
+ Lease: int64(leaseID),
+ }
+
+ d, err := kv.Marshal()
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Fatal(
+ "failed to marshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ }
+
+ tw.trace.Step("marshal mvccpb.KeyValue")
+ tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
+ tw.s.kvindex.Put(key, idxRev)
+ tw.changes = append(tw.changes, kv)
+ tw.trace.Step("store kv pair into bolt db")
+
+ if oldLease == leaseID {
+ tw.trace.Step("attach lease to kv pair")
+ return
+ }
+
+ if oldLease != lease.NoLease {
+ if tw.s.le == nil {
+ panic("no lessor to detach lease")
+ }
+ err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Error(
+ "failed to detach old lease from a key",
+ zap.Error(err),
+ )
+ }
+ }
+ if leaseID != lease.NoLease {
+ if tw.s.le == nil {
+ panic("no lessor to attach lease")
+ }
+ err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ panic("unexpected error from lease Attach")
+ }
+ }
+ tw.trace.Step("attach lease to kv pair")
+}
+
+func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
+ rrev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rrev++
+ }
+ keys, _ := tw.s.kvindex.Range(key, end, rrev)
+ if len(keys) == 0 {
+ return 0
+ }
+ for _, key := range keys {
+ tw.delete(key)
+ }
+ return int64(len(keys))
+}
+
+func (tw *storeTxnWrite) delete(key []byte) {
+ ibytes := NewRevBytes()
+ idxRev := newBucketKey(tw.beginRev+1, int64(len(tw.changes)), true)
+ ibytes = BucketKeyToBytes(idxRev, ibytes)
+
+ kv := mvccpb.KeyValue{Key: key}
+
+ d, err := kv.Marshal()
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Fatal(
+ "failed to marshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ }
+
+ tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
+ err = tw.s.kvindex.Tombstone(key, idxRev.Revision)
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Fatal(
+ "failed to tombstone an existing key",
+ zap.String("key", string(key)),
+ zap.Error(err),
+ )
+ }
+ tw.changes = append(tw.changes, kv)
+
+ item := lease.LeaseItem{Key: string(key)}
+ leaseID := tw.s.le.GetLease(item)
+
+ if leaseID != lease.NoLease {
+ err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Error(
+ "failed to detach old lease from a key",
+ zap.Error(err),
+ )
+ }
+ }
+}
+
+func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
diff --git a/server/storage/mvcc/metrics.go b/server/storage/mvcc/metrics.go
new file mode 100644
index 00000000000..b75abbcc089
--- /dev/null
+++ b/server/storage/mvcc/metrics.go
@@ -0,0 +1,302 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ rangeCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "range_total",
+ Help: "Total number of ranges seen by this member.",
+ })
+
+ putCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "put_total",
+ Help: "Total number of puts seen by this member.",
+ })
+
+ deleteCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "delete_total",
+ Help: "Total number of deletes seen by this member.",
+ })
+
+ txnCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "txn_total",
+ Help: "Total number of txns seen by this member.",
+ })
+
+ keysGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "keys_total",
+ Help: "Total number of keys.",
+ })
+
+ watchStreamGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "watch_stream_total",
+ Help: "Total number of watch streams.",
+ })
+
+ watcherGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "watcher_total",
+ Help: "Total number of watchers.",
+ })
+
+ slowWatcherGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "slow_watcher_total",
+ Help: "Total number of unsynced slow watchers.",
+ })
+
+ totalEventsCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "events_total",
+ Help: "Total number of events sent by this member.",
+ })
+
+ pendingEventsGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "pending_events_total",
+ Help: "Total number of pending events to be sent.",
+ })
+
+ indexCompactionPauseMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "index_compaction_pause_duration_milliseconds",
+ Help: "Bucketed histogram of index compaction pause duration.",
+
+ // lowest bucket start of upper bound 0.5 ms with factor 2
+ // highest bucket start of 0.5 ms * 2^13 == 4.096 sec
+ Buckets: prometheus.ExponentialBuckets(0.5, 2, 14),
+ })
+
+ dbCompactionPauseMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_pause_duration_milliseconds",
+ Help: "Bucketed histogram of db compaction pause duration.",
+
+ // lowest bucket start of upper bound 1 ms with factor 2
+ // highest bucket start of 1 ms * 2^12 == 4.096 sec
+ Buckets: prometheus.ExponentialBuckets(1, 2, 13),
+ })
+
+ dbCompactionTotalMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_total_duration_milliseconds",
+ Help: "Bucketed histogram of db compaction total duration.",
+
+ // lowest bucket start of upper bound 100 ms with factor 2
+ // highest bucket start of 100 ms * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(100, 2, 14),
+ })
+
+ dbCompactionLast = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_last",
+ Help: "The unix time of the last db compaction. Resets to 0 on start.",
+ })
+
+ dbCompactionKeysCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_keys_total",
+ Help: "Total number of db keys compacted.",
+ })
+
+ dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_total_size_in_bytes",
+ Help: "Total size of the underlying database physically allocated in bytes.",
+ },
+ func() float64 {
+ reportDbTotalSizeInBytesMu.RLock()
+ defer reportDbTotalSizeInBytesMu.RUnlock()
+ return reportDbTotalSizeInBytes()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbTotalSizeInBytesMu sync.RWMutex
+ reportDbTotalSizeInBytes = func() float64 { return 0 }
+
+ dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_total_size_in_use_in_bytes",
+ Help: "Total size of the underlying database logically in use in bytes.",
+ },
+ func() float64 {
+ reportDbTotalSizeInUseInBytesMu.RLock()
+ defer reportDbTotalSizeInUseInBytesMu.RUnlock()
+ return reportDbTotalSizeInUseInBytes()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbTotalSizeInUseInBytesMu sync.RWMutex
+ reportDbTotalSizeInUseInBytes = func() float64 { return 0 }
+
+ dbOpenReadTxN = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_open_read_transactions",
+ Help: "The number of currently open read transactions",
+ },
+
+ func() float64 {
+ reportDbOpenReadTxNMu.RLock()
+ defer reportDbOpenReadTxNMu.RUnlock()
+ return reportDbOpenReadTxN()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbOpenReadTxNMu sync.RWMutex
+ reportDbOpenReadTxN = func() float64 { return 0 }
+
+ hashSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "hash_duration_seconds",
+ Help: "The latency distribution of storage hash operation.",
+
+ // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+ })
+
+ hashRevSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "hash_rev_duration_seconds",
+ Help: "The latency distribution of storage hash by revision operation.",
+
+ // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+ })
+
+ currentRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "current_revision",
+ Help: "The current revision of store.",
+ },
+ func() float64 {
+ reportCurrentRevMu.RLock()
+ defer reportCurrentRevMu.RUnlock()
+ return reportCurrentRev()
+ },
+ )
+ // overridden by mvcc initialization
+ reportCurrentRevMu sync.RWMutex
+ reportCurrentRev = func() float64 { return 0 }
+
+ compactRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "compact_revision",
+ Help: "The revision of the last compaction in store.",
+ },
+ func() float64 {
+ reportCompactRevMu.RLock()
+ defer reportCompactRevMu.RUnlock()
+ return reportCompactRev()
+ },
+ )
+ // overridden by mvcc initialization
+ reportCompactRevMu sync.RWMutex
+ reportCompactRev = func() float64 { return 0 }
+
+ totalPutSizeGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "total_put_size_in_bytes",
+ Help: "The total size of put kv pairs seen by this member.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(rangeCounter)
+ prometheus.MustRegister(putCounter)
+ prometheus.MustRegister(deleteCounter)
+ prometheus.MustRegister(txnCounter)
+ prometheus.MustRegister(keysGauge)
+ prometheus.MustRegister(watchStreamGauge)
+ prometheus.MustRegister(watcherGauge)
+ prometheus.MustRegister(slowWatcherGauge)
+ prometheus.MustRegister(totalEventsCounter)
+ prometheus.MustRegister(pendingEventsGauge)
+ prometheus.MustRegister(indexCompactionPauseMs)
+ prometheus.MustRegister(dbCompactionPauseMs)
+ prometheus.MustRegister(dbCompactionTotalMs)
+ prometheus.MustRegister(dbCompactionLast)
+ prometheus.MustRegister(dbCompactionKeysCounter)
+ prometheus.MustRegister(dbTotalSize)
+ prometheus.MustRegister(dbTotalSizeInUse)
+ prometheus.MustRegister(dbOpenReadTxN)
+ prometheus.MustRegister(hashSec)
+ prometheus.MustRegister(hashRevSec)
+ prometheus.MustRegister(currentRev)
+ prometheus.MustRegister(compactRev)
+ prometheus.MustRegister(totalPutSizeGauge)
+}
+
+// ReportEventReceived reports that an event is received.
+// This function should be called when the external systems received an
+// event from mvcc.Watcher.
+func ReportEventReceived(n int) {
+ pendingEventsGauge.Sub(float64(n))
+ totalEventsCounter.Add(float64(n))
+}
diff --git a/server/mvcc/metrics_txn.go b/server/storage/mvcc/metrics_txn.go
similarity index 96%
rename from server/mvcc/metrics_txn.go
rename to server/storage/mvcc/metrics_txn.go
index af844f8468b..aef877a1c15 100644
--- a/server/mvcc/metrics_txn.go
+++ b/server/storage/mvcc/metrics_txn.go
@@ -61,7 +61,6 @@ func (tw *metricsTxnWrite) End() {
ranges := float64(tw.ranges)
rangeCounter.Add(ranges)
- rangeCounterDebug.Add(ranges) // TODO: remove in 3.5 release
puts := float64(tw.puts)
putCounter.Add(puts)
diff --git a/server/storage/mvcc/revision.go b/server/storage/mvcc/revision.go
new file mode 100644
index 00000000000..db44da29343
--- /dev/null
+++ b/server/storage/mvcc/revision.go
@@ -0,0 +1,122 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+const (
+ // revBytesLen is the byte length of a normal revision.
+ // First 8 bytes is the revision.main in big-endian format. The 9th byte
+ // is a '_'. The last 8 bytes is the revision.sub in big-endian format.
+ revBytesLen = 8 + 1 + 8
+ // markedRevBytesLen is the byte length of marked revision.
+ // The first `revBytesLen` bytes represents a normal revision. The last
+ // one byte is the mark.
+ markedRevBytesLen = revBytesLen + 1
+ markBytePosition = markedRevBytesLen - 1
+ markTombstone byte = 't'
+)
+
+type Revision struct {
+ // Main is the main revision of a set of changes that happen atomically.
+ Main int64
+ // Sub is the sub revision of a change in a set of changes that happen
+ // atomically. Each change has different increasing sub revision in that
+ // set.
+ Sub int64
+}
+
+func (a Revision) GreaterThan(b Revision) bool {
+ if a.Main > b.Main {
+ return true
+ }
+ if a.Main < b.Main {
+ return false
+ }
+ return a.Sub > b.Sub
+}
+
+func RevToBytes(rev Revision, bytes []byte) []byte {
+ return BucketKeyToBytes(newBucketKey(rev.Main, rev.Sub, false), bytes)
+}
+
+func BytesToRev(bytes []byte) Revision {
+ return BytesToBucketKey(bytes).Revision
+}
+
+// BucketKey indicates modification of the key-value space.
+// The set of changes that share same main revision changes the key-value space atomically.
+type BucketKey struct {
+ Revision
+ tombstone bool
+}
+
+func newBucketKey(main, sub int64, isTombstone bool) BucketKey {
+ return BucketKey{
+ Revision: Revision{
+ Main: main,
+ Sub: sub,
+ },
+ tombstone: isTombstone,
+ }
+}
+
+func NewRevBytes() []byte {
+ return make([]byte, revBytesLen, markedRevBytesLen)
+}
+
+func BucketKeyToBytes(rev BucketKey, bytes []byte) []byte {
+ binary.BigEndian.PutUint64(bytes, uint64(rev.Main))
+ bytes[8] = '_'
+ binary.BigEndian.PutUint64(bytes[9:], uint64(rev.Sub))
+ if rev.tombstone {
+ switch len(bytes) {
+ case revBytesLen:
+ bytes = append(bytes, markTombstone)
+ case markedRevBytesLen:
+ bytes[markBytePosition] = markTombstone
+ }
+ }
+ return bytes
+}
+
+func BytesToBucketKey(bytes []byte) BucketKey {
+ if (len(bytes) != revBytesLen) && (len(bytes) != markedRevBytesLen) {
+ panic(fmt.Sprintf("invalid revision length: %d", len(bytes)))
+ }
+ if bytes[8] != '_' {
+ panic(fmt.Sprintf("invalid separator in bucket key: %q", bytes[8]))
+ }
+ main := int64(binary.BigEndian.Uint64(bytes[0:8]))
+ sub := int64(binary.BigEndian.Uint64(bytes[9:]))
+ if main < 0 || sub < 0 {
+ panic(fmt.Sprintf("negative revision: main=%d sub=%d", main, sub))
+ }
+ return BucketKey{
+ Revision: Revision{
+ Main: main,
+ Sub: sub,
+ },
+ tombstone: isTombstone(bytes),
+ }
+}
+
+// isTombstone checks whether the revision bytes is a tombstone.
+func isTombstone(b []byte) bool {
+ return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
+}
diff --git a/server/storage/mvcc/store.go b/server/storage/mvcc/store.go
new file mode 100644
index 00000000000..523b8cd8f2c
--- /dev/null
+++ b/server/storage/mvcc/store.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func UnsafeReadFinishedCompact(tx backend.UnsafeReader) (int64, bool) {
+ _, finishedCompactBytes := tx.UnsafeRange(schema.Meta, schema.FinishedCompactKeyName, nil, 0)
+ if len(finishedCompactBytes) != 0 {
+ return BytesToRev(finishedCompactBytes[0]).Main, true
+ }
+ return 0, false
+}
+
+func UnsafeReadScheduledCompact(tx backend.UnsafeReader) (int64, bool) {
+ _, scheduledCompactBytes := tx.UnsafeRange(schema.Meta, schema.ScheduledCompactKeyName, nil, 0)
+ if len(scheduledCompactBytes) != 0 {
+ return BytesToRev(scheduledCompactBytes[0]).Main, true
+ }
+ return 0, false
+}
+
+func SetScheduledCompact(tx backend.BatchTx, value int64) {
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ UnsafeSetScheduledCompact(tx, value)
+}
+
+func UnsafeSetScheduledCompact(tx backend.UnsafeWriter, value int64) {
+ rbytes := NewRevBytes()
+ rbytes = RevToBytes(Revision{Main: value}, rbytes)
+ tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes)
+}
+
+func SetFinishedCompact(tx backend.BatchTx, value int64) {
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ UnsafeSetFinishedCompact(tx, value)
+}
+
+func UnsafeSetFinishedCompact(tx backend.UnsafeWriter, value int64) {
+ rbytes := NewRevBytes()
+ rbytes = RevToBytes(Revision{Main: value}, rbytes)
+ tx.UnsafePut(schema.Meta, schema.FinishedCompactKeyName, rbytes)
+}
diff --git a/server/storage/mvcc/store_test.go b/server/storage/mvcc/store_test.go
new file mode 100644
index 00000000000..df7607a54ef
--- /dev/null
+++ b/server/storage/mvcc/store_test.go
@@ -0,0 +1,107 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "fmt"
+ "math"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+// TestScheduledCompact ensures that UnsafeSetScheduledCompact&UnsafeReadScheduledCompact work well together.
+func TestScheduledCompact(t *testing.T) {
+ tcs := []struct {
+ value int64
+ }{
+ {
+ value: 1,
+ },
+ {
+ value: 0,
+ },
+ {
+ value: math.MaxInt64,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(fmt.Sprint(tc.value), func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Meta)
+ UnsafeSetScheduledCompact(tx, tc.value)
+ tx.Unlock()
+ be.ForceCommit()
+ be.Close()
+
+ b := backend.NewDefaultBackend(lg, tmpPath)
+ defer b.Close()
+ v, found := UnsafeReadScheduledCompact(b.BatchTx())
+ assert.True(t, found)
+ assert.Equal(t, tc.value, v)
+ })
+ }
+}
+
+// TestFinishedCompact ensures that UnsafeSetFinishedCompact&UnsafeReadFinishedCompact work well together.
+func TestFinishedCompact(t *testing.T) {
+ tcs := []struct {
+ value int64
+ }{
+ {
+ value: 1,
+ },
+ {
+ value: 0,
+ },
+ {
+ value: math.MaxInt64,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(fmt.Sprint(tc.value), func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ tx.UnsafeCreateBucket(schema.Meta)
+ UnsafeSetFinishedCompact(tx, tc.value)
+ tx.Unlock()
+ be.ForceCommit()
+ be.Close()
+
+ b := backend.NewDefaultBackend(lg, tmpPath)
+ defer b.Close()
+ v, found := UnsafeReadFinishedCompact(b.BatchTx())
+ assert.True(t, found)
+ assert.Equal(t, tc.value, v)
+ })
+ }
+}
diff --git a/server/storage/mvcc/testutil/hash.go b/server/storage/mvcc/testutil/hash.go
new file mode 100644
index 00000000000..01771bb5c54
--- /dev/null
+++ b/server/storage/mvcc/testutil/hash.go
@@ -0,0 +1,149 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutil
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+)
+
+const (
+ // CompactionCycle is high prime used to test hash calculation between compactions.
+ CompactionCycle = 71
+)
+
+func TestCompactionHash(ctx context.Context, t *testing.T, h CompactionHashTestCase, compactionBatchLimit int) {
+ var totalRevisions int64 = 1210
+ assert.Less(t, int64(compactionBatchLimit), totalRevisions)
+ assert.Less(t, int64(CompactionCycle*10), totalRevisions)
+ var rev int64
+ for ; rev < totalRevisions; rev += CompactionCycle {
+ testCompactionHash(ctx, t, h, rev, rev+CompactionCycle)
+ }
+ testCompactionHash(ctx, t, h, rev, rev+totalRevisions)
+}
+
+type CompactionHashTestCase interface {
+ Put(ctx context.Context, key, value string) error
+ Delete(ctx context.Context, key string) error
+ HashByRev(ctx context.Context, rev int64) (KeyValueHash, error)
+ Defrag(ctx context.Context) error
+ Compact(ctx context.Context, rev int64) error
+}
+
+type KeyValueHash struct {
+ Hash uint32
+ CompactRevision int64
+ Revision int64
+}
+
+func testCompactionHash(ctx context.Context, t *testing.T, h CompactionHashTestCase, start, stop int64) {
+ for i := start; i <= stop; i++ {
+ if i%67 == 0 {
+ err := h.Delete(ctx, PickKey(i+83))
+ require.NoErrorf(t, err, "error on delete")
+ } else {
+ err := h.Put(ctx, PickKey(i), fmt.Sprint(i))
+ require.NoErrorf(t, err, "error on put")
+ }
+ }
+ hash1, err := h.HashByRev(ctx, stop)
+ require.NoErrorf(t, err, "error on hash (rev %v)", stop)
+
+ err = h.Compact(ctx, stop)
+ require.NoErrorf(t, err, "error on compact (rev %v)", stop)
+
+ err = h.Defrag(ctx)
+ require.NoErrorf(t, err, "error on defrag")
+
+ hash2, err := h.HashByRev(ctx, stop)
+ require.NoErrorf(t, err, "error on hash (rev %v)", stop)
+ assert.Equalf(t, hash1, hash2, "hashes do not match on rev %v", stop)
+}
+
+func PickKey(i int64) string {
+ if i%(CompactionCycle*2) == 30 {
+ return "zenek"
+ }
+ if i%CompactionCycle == 30 {
+ return "xavery"
+ }
+ // Use low prime number to ensure repeats without alignment
+ switch i % 7 {
+ case 0:
+ return "alice"
+ case 1:
+ return "bob"
+ case 2:
+ return "celine"
+ case 3:
+ return "dominik"
+ case 4:
+ return "eve"
+ case 5:
+ return "frederica"
+ case 6:
+ return "gorge"
+ default:
+ panic("Can't count")
+ }
+}
+
+func CorruptBBolt(fpath string) error {
+ db, derr := bbolt.Open(fpath, os.ModePerm, &bbolt.Options{})
+ if derr != nil {
+ return derr
+ }
+ defer db.Close()
+
+ return db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket([]byte("key"))
+ if b == nil {
+ return errors.New("got nil bucket for 'key'")
+ }
+ var vals [][]byte
+ var keys [][]byte
+ c := b.Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ keys = append(keys, k)
+ var kv mvccpb.KeyValue
+ if uerr := kv.Unmarshal(v); uerr != nil {
+ return uerr
+ }
+ kv.Key[0]++
+ kv.Value[0]++
+ v2, v2err := kv.Marshal()
+ if v2err != nil {
+ return v2err
+ }
+ vals = append(vals, v2)
+ }
+ for i := range keys {
+ if perr := b.Put(keys[i], vals[i]); perr != nil {
+ return perr
+ }
+ }
+ return nil
+ })
+}
diff --git a/server/storage/mvcc/watchable_store.go b/server/storage/mvcc/watchable_store.go
new file mode 100644
index 00000000000..ee47c2c6d72
--- /dev/null
+++ b/server/storage/mvcc/watchable_store.go
@@ -0,0 +1,616 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+// non-const so modifiable by tests
+var (
+ // chanBufLen is the length of the buffered chan
+ // for sending out watched events.
+ // See https://github.com/etcd-io/etcd/issues/11906 for more detail.
+ chanBufLen = 128
+
+ // maxWatchersPerSync is the number of watchers to sync in a single batch
+ maxWatchersPerSync = 512
+)
+
+func ChanBufLen() int { return chanBufLen }
+
+type watchable interface {
+ watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
+ progress(w *watcher)
+ progressAll(watchers map[WatchID]*watcher) bool
+ rev() int64
+}
+
+type watchableStore struct {
+ *store
+
+ // mu protects watcher groups and batches. It should never be locked
+ // before locking store.mu to avoid deadlock.
+ mu sync.RWMutex
+
+ // victims are watcher batches that were blocked on the watch channel
+ victims []watcherBatch
+ victimc chan struct{}
+
+ // contains all unsynced watchers that needs to sync with events that have happened
+ unsynced watcherGroup
+
+ // contains all synced watchers that are in sync with the progress of the store.
+ // The key of the map is the key that the watcher watches on.
+ synced watcherGroup
+
+ stopc chan struct{}
+ wg sync.WaitGroup
+}
+
+var _ WatchableKV = (*watchableStore)(nil)
+
+// cancelFunc updates unsynced and synced maps when running
+// cancel operations.
+type cancelFunc func()
+
+func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore {
+ s := newWatchableStore(lg, b, le, cfg)
+ s.wg.Add(2)
+ go s.syncWatchersLoop()
+ go s.syncVictimsLoop()
+ return s
+}
+
+func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ s := &watchableStore{
+ store: NewStore(lg, b, le, cfg),
+ victimc: make(chan struct{}, 1),
+ unsynced: newWatcherGroup(),
+ synced: newWatcherGroup(),
+ stopc: make(chan struct{}),
+ }
+ s.store.ReadView = &readView{s}
+ s.store.WriteView = &writeView{s}
+ if s.le != nil {
+ // use this store as the deleter so revokes trigger watch events
+ s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
+ }
+ return s
+}
+
+func (s *watchableStore) Close() error {
+ close(s.stopc)
+ s.wg.Wait()
+ return s.store.Close()
+}
+
+func (s *watchableStore) NewWatchStream() WatchStream {
+ watchStreamGauge.Inc()
+ return &watchStream{
+ watchable: s,
+ ch: make(chan WatchResponse, chanBufLen),
+ cancels: make(map[WatchID]cancelFunc),
+ watchers: make(map[WatchID]*watcher),
+ }
+}
+
+func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
+ wa := &watcher{
+ key: key,
+ end: end,
+ minRev: startRev,
+ id: id,
+ ch: ch,
+ fcs: fcs,
+ }
+
+ s.mu.Lock()
+ s.revMu.RLock()
+ synced := startRev > s.store.currentRev || startRev == 0
+ if synced {
+ wa.minRev = s.store.currentRev + 1
+ if startRev > wa.minRev {
+ wa.minRev = startRev
+ }
+ s.synced.add(wa)
+ } else {
+ slowWatcherGauge.Inc()
+ s.unsynced.add(wa)
+ }
+ s.revMu.RUnlock()
+ s.mu.Unlock()
+
+ watcherGauge.Inc()
+
+ return wa, func() { s.cancelWatcher(wa) }
+}
+
+// cancelWatcher removes references of the watcher from the watchableStore
+func (s *watchableStore) cancelWatcher(wa *watcher) {
+ for {
+ s.mu.Lock()
+ if s.unsynced.delete(wa) {
+ slowWatcherGauge.Dec()
+ watcherGauge.Dec()
+ break
+ } else if s.synced.delete(wa) {
+ watcherGauge.Dec()
+ break
+ } else if wa.compacted {
+ watcherGauge.Dec()
+ break
+ } else if wa.ch == nil {
+ // already canceled (e.g., cancel/close race)
+ break
+ }
+
+ if !wa.victim {
+ s.mu.Unlock()
+ panic("watcher not victim but not in watch groups")
+ }
+
+ var victimBatch watcherBatch
+ for _, wb := range s.victims {
+ if wb[wa] != nil {
+ victimBatch = wb
+ break
+ }
+ }
+ if victimBatch != nil {
+ slowWatcherGauge.Dec()
+ watcherGauge.Dec()
+ delete(victimBatch, wa)
+ break
+ }
+
+ // victim being processed so not accessible; retry
+ s.mu.Unlock()
+ time.Sleep(time.Millisecond)
+ }
+
+ wa.ch = nil
+ s.mu.Unlock()
+}
+
+func (s *watchableStore) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ err := s.store.Restore(b)
+ if err != nil {
+ return err
+ }
+
+ for wa := range s.synced.watchers {
+ wa.restore = true
+ s.unsynced.add(wa)
+ }
+ s.synced = newWatcherGroup()
+ return nil
+}
+
+// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
+func (s *watchableStore) syncWatchersLoop() {
+ defer s.wg.Done()
+
+ waitDuration := 100 * time.Millisecond
+ delayTicker := time.NewTicker(waitDuration)
+ defer delayTicker.Stop()
+ var evs []mvccpb.Event
+
+ for {
+ s.mu.RLock()
+ st := time.Now()
+ lastUnsyncedWatchers := s.unsynced.size()
+ s.mu.RUnlock()
+
+ unsyncedWatchers := 0
+ if lastUnsyncedWatchers > 0 {
+ unsyncedWatchers, evs = s.syncWatchers(evs)
+ }
+ syncDuration := time.Since(st)
+
+ delayTicker.Reset(waitDuration)
+ // more work pending?
+ if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
+ // be fair to other store operations by yielding time taken
+ delayTicker.Reset(syncDuration)
+ }
+
+ select {
+ case <-delayTicker.C:
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// syncVictimsLoop tries to write precomputed watcher responses to
+// watchers that had a blocked watcher channel
+func (s *watchableStore) syncVictimsLoop() {
+ defer s.wg.Done()
+
+ for {
+ for s.moveVictims() != 0 {
+ // try to update all victim watchers
+ }
+ s.mu.RLock()
+ isEmpty := len(s.victims) == 0
+ s.mu.RUnlock()
+
+ var tickc <-chan time.Time
+ if !isEmpty {
+ tickc = time.After(10 * time.Millisecond)
+ }
+
+ select {
+ case <-tickc:
+ case <-s.victimc:
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// moveVictims tries to update watches with already pending event data
+func (s *watchableStore) moveVictims() (moved int) {
+ s.mu.Lock()
+ victims := s.victims
+ s.victims = nil
+ s.mu.Unlock()
+
+ var newVictim watcherBatch
+ for _, wb := range victims {
+ // try to send responses again
+ for w, eb := range wb {
+ // watcher has observed the store up to, but not including, w.minRev
+ rev := w.minRev - 1
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ if newVictim == nil {
+ newVictim = make(watcherBatch)
+ }
+ newVictim[w] = eb
+ continue
+ }
+ moved++
+ }
+
+ // assign completed victim watchers to unsync/sync
+ s.mu.Lock()
+ s.store.revMu.RLock()
+ curRev := s.store.currentRev
+ for w, eb := range wb {
+ if newVictim != nil && newVictim[w] != nil {
+ // couldn't send watch response; stays victim
+ continue
+ }
+ w.victim = false
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+ if w.minRev <= curRev {
+ s.unsynced.add(w)
+ } else {
+ slowWatcherGauge.Dec()
+ s.synced.add(w)
+ }
+ }
+ s.store.revMu.RUnlock()
+ s.mu.Unlock()
+ }
+
+ if len(newVictim) > 0 {
+ s.mu.Lock()
+ s.victims = append(s.victims, newVictim)
+ s.mu.Unlock()
+ }
+
+ return moved
+}
+
+// syncWatchers syncs unsynced watchers by:
+// 1. choose a set of watchers from the unsynced watcher group
+// 2. iterate over the set to get the minimum revision and remove compacted watchers
+// 3. use minimum revision to get all key-value pairs and send those events to watchers
+// 4. remove synced watchers in set from unsynced group and move to synced group
+func (s *watchableStore) syncWatchers(evs []mvccpb.Event) (int, []mvccpb.Event) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.unsynced.size() == 0 {
+ return 0, []mvccpb.Event{}
+ }
+
+ s.store.revMu.RLock()
+ defer s.store.revMu.RUnlock()
+
+ // in order to find key-value pairs from unsynced watchers, we need to
+ // find min revision index, and these revisions can be used to
+ // query the backend store of key-value pairs
+ curRev := s.store.currentRev
+ compactionRev := s.store.compactMainRev
+
+ wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
+ evs = rangeEventsWithReuse(s.store.lg, s.store.b, evs, minRev, curRev+1)
+
+ victims := make(watcherBatch)
+ wb := newWatcherBatch(wg, evs)
+ for w := range wg.watchers {
+ if w.minRev < compactionRev {
+ // Skip the watcher that failed to send compacted watch response due to w.ch is full.
+ // Next retry of syncWatchers would try to resend the compacted watch response to w.ch
+ continue
+ }
+ w.minRev = curRev + 1
+
+ eb, ok := wb[w]
+ if !ok {
+ // bring un-notified watcher to synced
+ s.synced.add(w)
+ s.unsynced.delete(w)
+ continue
+ }
+
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ w.victim = true
+ }
+
+ if w.victim {
+ victims[w] = eb
+ } else {
+ if eb.moreRev != 0 {
+ // stay unsynced; more to read
+ continue
+ }
+ s.synced.add(w)
+ }
+ s.unsynced.delete(w)
+ }
+ s.addVictim(victims)
+
+ vsz := 0
+ for _, v := range s.victims {
+ vsz += len(v)
+ }
+ slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
+
+ return s.unsynced.size(), evs
+}
+
+// rangeEventsWithReuse returns events in range [minRev, maxRev), while reusing already provided events.
+func rangeEventsWithReuse(lg *zap.Logger, b backend.Backend, evs []mvccpb.Event, minRev, maxRev int64) []mvccpb.Event {
+ if len(evs) == 0 {
+ return rangeEvents(lg, b, minRev, maxRev)
+ }
+ // append from left
+ if evs[0].Kv.ModRevision > minRev {
+ evs = append(rangeEvents(lg, b, minRev, evs[0].Kv.ModRevision), evs...)
+ }
+ // cut from left
+ prefixIndex := 0
+ for prefixIndex < len(evs) && evs[prefixIndex].Kv.ModRevision < minRev {
+ prefixIndex++
+ }
+ evs = evs[prefixIndex:]
+
+ if len(evs) == 0 {
+ return rangeEvents(lg, b, minRev, maxRev)
+ }
+ // append from right
+ if evs[len(evs)-1].Kv.ModRevision+1 < maxRev {
+ evs = append(evs, rangeEvents(lg, b, evs[len(evs)-1].Kv.ModRevision+1, maxRev)...)
+ }
+ // cut from right
+ suffixIndex := len(evs) - 1
+ for suffixIndex >= 0 && evs[suffixIndex].Kv.ModRevision >= maxRev {
+ suffixIndex--
+ }
+ evs = evs[:suffixIndex+1]
+ return evs
+}
+
+// rangeEvents returns events in range [minRev, maxRev).
+func rangeEvents(lg *zap.Logger, b backend.Backend, minRev, maxRev int64) []mvccpb.Event {
+ minBytes, maxBytes := NewRevBytes(), NewRevBytes()
+ minBytes = RevToBytes(Revision{Main: minRev}, minBytes)
+ maxBytes = RevToBytes(Revision{Main: maxRev}, maxBytes)
+
+ // UnsafeRange returns keys and values. And in boltdb, keys are revisions.
+ // values are actual key-value pairs in backend.
+ tx := b.ReadTx()
+ tx.RLock()
+ revs, vs := tx.UnsafeRange(schema.Key, minBytes, maxBytes, 0)
+ evs := kvsToEvents(lg, revs, vs)
+ // Must unlock after kvsToEvents, because vs (come from boltdb memory) is not deep copy.
+ // We can only unlock after Unmarshal, which will do deep copy.
+ // Otherwise we will trigger SIGSEGV during boltdb re-mmap.
+ tx.RUnlock()
+ return evs
+}
+
+// kvsToEvents gets all events for the watchers from all key-value pairs
+func kvsToEvents(lg *zap.Logger, revs, vals [][]byte) (evs []mvccpb.Event) {
+ for i, v := range vals {
+ var kv mvccpb.KeyValue
+ if err := kv.Unmarshal(v); err != nil {
+ lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ }
+
+ ty := mvccpb.PUT
+ if isTombstone(revs[i]) {
+ ty = mvccpb.DELETE
+ // patch in mod revision so watchers won't skip
+ kv.ModRevision = BytesToRev(revs[i]).Main
+ }
+ evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
+ }
+ return evs
+}
+
+// notify notifies the fact that given event at the given rev just happened to
+// watchers that watch on the key of the event.
+func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
+ victim := make(watcherBatch)
+ for w, eb := range newWatcherBatch(&s.synced, evs) {
+ if eb.revs != 1 {
+ s.store.lg.Panic(
+ "unexpected multiple revisions in watch notification",
+ zap.Int("number-of-revisions", eb.revs),
+ )
+ }
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ // move slow watcher to victims
+ w.victim = true
+ victim[w] = eb
+ s.synced.delete(w)
+ slowWatcherGauge.Inc()
+ }
+ // always update minRev
+ // in case 'send' returns true and watcher stays synced, this is needed for Restore when all watchers become unsynced
+ // in case 'send' returns false, this is needed for syncWatchers
+ w.minRev = rev + 1
+ }
+ s.addVictim(victim)
+}
+
+func (s *watchableStore) addVictim(victim watcherBatch) {
+ if len(victim) == 0 {
+ return
+ }
+ s.victims = append(s.victims, victim)
+ select {
+ case s.victimc <- struct{}{}:
+ default:
+ }
+}
+
+func (s *watchableStore) rev() int64 { return s.store.Rev() }
+
+func (s *watchableStore) progress(w *watcher) {
+ s.progressIfSync(map[WatchID]*watcher{w.id: w}, w.id)
+}
+
+func (s *watchableStore) progressAll(watchers map[WatchID]*watcher) bool {
+ return s.progressIfSync(watchers, clientv3.InvalidWatchID)
+}
+
+func (s *watchableStore) progressIfSync(watchers map[WatchID]*watcher, responseWatchID WatchID) bool {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ // Any watcher unsynced?
+ for _, w := range watchers {
+ if _, ok := s.synced.watchers[w]; !ok {
+ return false
+ }
+ }
+
+ // If all watchers are synchronised, send out progress
+ // notification on first watcher. Note that all watchers
+ // should have the same underlying stream, and the progress
+ // notification will be broadcasted client-side if required
+ // (see dispatchEvent in client/v3/watch.go)
+ for _, w := range watchers {
+ w.send(WatchResponse{WatchID: responseWatchID, Revision: s.rev()})
+ return true
+ }
+ return true
+}
+
+type watcher struct {
+ // the watcher key
+ key []byte
+ // end indicates the end of the range to watch.
+ // If end is set, the watcher is on a range.
+ end []byte
+
+ // victim is set when ch is blocked and undergoing victim processing
+ victim bool
+
+ // compacted is set when the watcher is removed because of compaction
+ compacted bool
+
+ // restore is true when the watcher is being restored from leader snapshot
+ // which means that this watcher has just been moved from "synced" to "unsynced"
+ // watcher group, possibly with a future revision when it was first added
+ // to the synced watcher
+ // "unsynced" watcher revision must always be <= current revision,
+ // except when the watcher were to be moved from "synced" watcher group
+ restore bool
+
+ // minRev is the minimum revision update the watcher will accept
+ minRev int64
+ id WatchID
+
+ fcs []FilterFunc
+ // a chan to send out the watch response.
+ // The chan might be shared with other watchers.
+ ch chan<- WatchResponse
+}
+
+func (w *watcher) send(wr WatchResponse) bool {
+ progressEvent := len(wr.Events) == 0
+
+ if len(w.fcs) != 0 {
+ ne := make([]mvccpb.Event, 0, len(wr.Events))
+ for i := range wr.Events {
+ filtered := false
+ for _, filter := range w.fcs {
+ if filter(wr.Events[i]) {
+ filtered = true
+ break
+ }
+ }
+ if !filtered {
+ ne = append(ne, wr.Events[i])
+ }
+ }
+ wr.Events = ne
+ }
+
+ // if all events are filtered out, we should send nothing.
+ if !progressEvent && len(wr.Events) == 0 {
+ return true
+ }
+ select {
+ case w.ch <- wr:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/server/storage/mvcc/watchable_store_bench_test.go b/server/storage/mvcc/watchable_store_bench_test.go
new file mode 100644
index 00000000000..c8990576b30
--- /dev/null
+++ b/server/storage/mvcc/watchable_store_bench_test.go
@@ -0,0 +1,200 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "math/rand"
+ "testing"
+
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+func BenchmarkWatchableStorePut(b *testing.B) {
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, be)
+
+ // arbitrary number of bytes
+ bytesN := 64
+ keys := createBytesSlice(bytesN, b.N)
+ vals := createBytesSlice(bytesN, b.N)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ s.Put(keys[i], vals[i], lease.NoLease)
+ }
+}
+
+// BenchmarkWatchableStoreTxnPut benchmarks the Put operation
+// with transaction begin and end, where transaction involves
+// some synchronization operations, such as mutex locking.
+func BenchmarkWatchableStoreTxnPut(b *testing.B) {
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, be)
+
+ // arbitrary number of bytes
+ bytesN := 64
+ keys := createBytesSlice(bytesN, b.N)
+ vals := createBytesSlice(bytesN, b.N)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ txn := s.Write(traceutil.TODO())
+ txn.Put(keys[i], vals[i], lease.NoLease)
+ txn.End()
+ }
+}
+
+// BenchmarkWatchableStoreWatchPutSync benchmarks the case of
+// many synced watchers receiving a Put notification.
+func BenchmarkWatchableStoreWatchPutSync(b *testing.B) {
+ benchmarkWatchableStoreWatchPut(b, true)
+}
+
+// BenchmarkWatchableStoreWatchPutUnsync benchmarks the case of
+// many unsynced watchers receiving a Put notification.
+func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
+ benchmarkWatchableStoreWatchPut(b, false)
+}
+
+func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, be)
+
+ k := []byte("testkey")
+ v := []byte("testval")
+
+ rev := int64(0)
+ if !synced {
+ // non-0 value to keep watchers in unsynced
+ rev = 1
+ }
+
+ w := s.NewWatchStream()
+ defer w.Close()
+ watchIDs := make([]WatchID, b.N)
+ for i := range watchIDs {
+ watchIDs[i], _ = w.Watch(0, k, nil, rev)
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ // trigger watchers
+ s.Put(k, v, lease.NoLease)
+ for range watchIDs {
+ <-w.Chan()
+ }
+ select {
+ case wc := <-w.Chan():
+ b.Fatalf("unexpected data %v", wc)
+ default:
+ }
+}
+
+// BenchmarkWatchableStoreUnsyncedCancel benchmarks on cancel function
+// performance for unsynced watchers in a WatchableStore. It creates
+// k*N watchers to populate unsynced with a reasonably large number of
+// watchers. And measures the time it takes to cancel N watchers out
+// of k*N watchers. The performance is expected to differ depending on
+// the unsynced member implementation.
+// TODO: k is an arbitrary constant. We need to figure out what factor
+// we should put to simulate the real-world use cases.
+func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ ws := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+
+ defer cleanup(ws, be)
+
+ // Put a key so that we can spawn watchers on that key
+ // (testKey in this test). This increases the rev to 1,
+ // and later we can we set the watcher's startRev to 1,
+ // and force watchers to be in unsynced.
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+ ws.Put(testKey, testValue, lease.NoLease)
+
+ w := ws.NewWatchStream()
+ defer w.Close()
+
+ const k int = 2
+ benchSampleN := b.N
+ watcherN := k * benchSampleN
+
+ watchIDs := make([]WatchID, watcherN)
+ for i := 0; i < watcherN; i++ {
+ // non-0 value to keep watchers in unsynced
+ watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
+ }
+
+ // random-cancel N watchers to make it not biased towards
+ // data structures with an order, such as slice.
+ ix := rand.Perm(watcherN)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ // cancel N watchers
+ for _, idx := range ix[:benchSampleN] {
+ if err := w.Cancel(watchIDs[idx]); err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
+
+ defer cleanup(s, be)
+
+ // Put a key so that we can spawn watchers on that key
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+ s.Put(testKey, testValue, lease.NoLease)
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ // put 1 million watchers on the same key
+ const watcherN = 1000000
+
+ watchIDs := make([]WatchID, watcherN)
+ for i := 0; i < watcherN; i++ {
+ // 0 for startRev to keep watchers in synced
+ watchIDs[i], _ = w.Watch(0, testKey, nil, 0)
+ }
+
+ // randomly cancel watchers to make it not biased towards
+ // data structures with an order, such as slice.
+ ix := rand.Perm(watcherN)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for _, idx := range ix {
+ if err := w.Cancel(watchIDs[idx]); err != nil {
+ b.Error(err)
+ }
+ }
+}
diff --git a/server/storage/mvcc/watchable_store_test.go b/server/storage/mvcc/watchable_store_test.go
new file mode 100644
index 00000000000..a418c6c78fe
--- /dev/null
+++ b/server/storage/mvcc/watchable_store_test.go
@@ -0,0 +1,793 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+func TestWatch(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+ s.Put(testKey, testValue, lease.NoLease)
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ w.Watch(0, testKey, nil, 0)
+ if !s.synced.contains(string(testKey)) {
+ // the key must have had an entry in synced
+ t.Errorf("existence = false, want true")
+ }
+}
+
+func TestNewWatcherCancel(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+ s.Put(testKey, testValue, lease.NoLease)
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ wt, _ := w.Watch(0, testKey, nil, 0)
+ if err := w.Cancel(wt); err != nil {
+ t.Error(err)
+ }
+
+ if s.synced.contains(string(testKey)) {
+ // the key shoud have been deleted
+ t.Errorf("existence = true, want false")
+ }
+}
+
+// TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
+func TestCancelUnsynced(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+
+ // manually create watchableStore instead of newWatchableStore
+ // because newWatchableStore automatically calls syncWatchers
+ // method to sync watchers in unsynced map. We want to keep watchers
+ // in unsynced to test if syncWatchers works as expected.
+ s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ // Put a key so that we can spawn watchers on that key.
+ // (testKey in this test). This increases the rev to 1,
+ // and later we can we set the watcher's startRev to 1,
+ // and force watchers to be in unsynced.
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+ s.Put(testKey, testValue, lease.NoLease)
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ // arbitrary number for watchers
+ watcherN := 100
+
+ // create watcherN of watch ids to cancel
+ watchIDs := make([]WatchID, watcherN)
+ for i := 0; i < watcherN; i++ {
+ // use 1 to keep watchers in unsynced
+ watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
+ }
+
+ for _, idx := range watchIDs {
+ if err := w.Cancel(idx); err != nil {
+ t.Error(err)
+ }
+ }
+
+ // After running CancelFunc
+ //
+ // unsynced should be empty
+ // because cancel removes watcher from unsynced
+ if size := s.unsynced.size(); size != 0 {
+ t.Errorf("unsynced size = %d, want 0", size)
+ }
+}
+
+// TestSyncWatchers populates unsynced watcher map and tests syncWatchers
+// method to see if it correctly sends events to channel of unsynced watchers
+// and moves these watchers to synced.
+func TestSyncWatchers(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+ s.Put(testKey, testValue, lease.NoLease)
+ w := s.NewWatchStream()
+ defer w.Close()
+ watcherN := 100
+ for i := 0; i < watcherN; i++ {
+ _, err := w.Watch(0, testKey, nil, 1)
+ require.NoError(t, err)
+ }
+
+ assert.Empty(t, s.synced.watcherSetByKey(string(testKey)))
+ assert.Len(t, s.unsynced.watcherSetByKey(string(testKey)), watcherN)
+ s.syncWatchers([]mvccpb.Event{})
+ assert.Len(t, s.synced.watcherSetByKey(string(testKey)), watcherN)
+ assert.Empty(t, s.unsynced.watcherSetByKey(string(testKey)))
+
+ require.Len(t, w.(*watchStream).ch, watcherN)
+ for i := 0; i < watcherN; i++ {
+ events := (<-w.(*watchStream).ch).Events
+ assert.Len(t, events, 1)
+ assert.Equal(t, []mvccpb.Event{
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{
+ Key: testKey,
+ CreateRevision: 2,
+ ModRevision: 2,
+ Version: 1,
+ Value: testValue,
+ },
+ },
+ }, events)
+ }
+}
+
+func TestRangeEvents(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ lg := zaptest.NewLogger(t)
+ s := NewStore(lg, b, &lease.FakeLessor{}, StoreConfig{})
+
+ defer cleanup(s, b)
+
+ foo1 := []byte("foo1")
+ foo2 := []byte("foo2")
+ foo3 := []byte("foo3")
+ value := []byte("bar")
+ s.Put(foo1, value, lease.NoLease)
+ s.Put(foo2, value, lease.NoLease)
+ s.Put(foo3, value, lease.NoLease)
+ s.DeleteRange(foo1, foo3) // Deletes "foo1" and "foo2" generating 2 events
+
+ expectEvents := []mvccpb.Event{
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{
+ Key: foo1,
+ CreateRevision: 2,
+ ModRevision: 2,
+ Version: 1,
+ Value: value,
+ },
+ },
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{
+ Key: foo2,
+ CreateRevision: 3,
+ ModRevision: 3,
+ Version: 1,
+ Value: value,
+ },
+ },
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{
+ Key: foo3,
+ CreateRevision: 4,
+ ModRevision: 4,
+ Version: 1,
+ Value: value,
+ },
+ },
+ {
+ Type: mvccpb.DELETE,
+ Kv: &mvccpb.KeyValue{
+ Key: foo1,
+ ModRevision: 5,
+ },
+ },
+ {
+ Type: mvccpb.DELETE,
+ Kv: &mvccpb.KeyValue{
+ Key: foo2,
+ ModRevision: 5,
+ },
+ },
+ }
+
+ tcs := []struct {
+ minRev int64
+ maxRev int64
+ expectEvents []mvccpb.Event
+ }{
+ // maxRev, top to bottom
+ {minRev: 2, maxRev: 6, expectEvents: expectEvents[0:5]},
+ {minRev: 2, maxRev: 5, expectEvents: expectEvents[0:3]},
+ {minRev: 2, maxRev: 4, expectEvents: expectEvents[0:2]},
+ {minRev: 2, maxRev: 3, expectEvents: expectEvents[0:1]},
+ {minRev: 2, maxRev: 2, expectEvents: expectEvents[0:0]},
+
+ // minRev, bottom to top
+ {minRev: 2, maxRev: 6, expectEvents: expectEvents[0:5]},
+ {minRev: 3, maxRev: 6, expectEvents: expectEvents[1:5]},
+ {minRev: 4, maxRev: 6, expectEvents: expectEvents[2:5]},
+ {minRev: 5, maxRev: 6, expectEvents: expectEvents[3:5]},
+ {minRev: 6, maxRev: 6, expectEvents: expectEvents[0:0]},
+
+ // Moving window algorithm, first increase maxRev, then increase minRev, repeat.
+ {minRev: 2, maxRev: 2, expectEvents: expectEvents[0:0]},
+ {minRev: 2, maxRev: 3, expectEvents: expectEvents[0:1]},
+ {minRev: 2, maxRev: 4, expectEvents: expectEvents[0:2]},
+ {minRev: 3, maxRev: 4, expectEvents: expectEvents[1:2]},
+ {minRev: 3, maxRev: 5, expectEvents: expectEvents[1:3]},
+ {minRev: 4, maxRev: 5, expectEvents: expectEvents[2:3]},
+ {minRev: 4, maxRev: 6, expectEvents: expectEvents[2:5]},
+ {minRev: 5, maxRev: 6, expectEvents: expectEvents[3:5]},
+ {minRev: 6, maxRev: 6, expectEvents: expectEvents[5:5]},
+ }
+ // reuse the evs to test rangeEventsWithReuse
+ var evs []mvccpb.Event
+ for i, tc := range tcs {
+ t.Run(fmt.Sprintf("%d rangeEvents(%d, %d)", i, tc.minRev, tc.maxRev), func(t *testing.T) {
+ assert.ElementsMatch(t, tc.expectEvents, rangeEvents(lg, b, tc.minRev, tc.maxRev))
+ evs = rangeEventsWithReuse(lg, b, evs, tc.minRev, tc.maxRev)
+ assert.ElementsMatch(t, tc.expectEvents, evs)
+ })
+ }
+}
+
+// TestWatchCompacted tests a watcher that watches on a compacted revision.
+func TestWatchCompacted(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+
+ maxRev := 10
+ compactRev := int64(5)
+ for i := 0; i < maxRev; i++ {
+ s.Put(testKey, testValue, lease.NoLease)
+ }
+ _, err := s.Compact(traceutil.TODO(), compactRev)
+ if err != nil {
+ t.Fatalf("failed to compact kv (%v)", err)
+ }
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ wt, _ := w.Watch(0, testKey, nil, compactRev-1)
+ select {
+ case resp := <-w.Chan():
+ if resp.WatchID != wt {
+ t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
+ }
+ if resp.CompactRevision == 0 {
+ t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("failed to receive response (timeout)")
+ }
+}
+
+func TestWatchNoEventLossOnCompact(t *testing.T) {
+ oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
+
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ lg := zaptest.NewLogger(t)
+ s := New(lg, b, &lease.FakeLessor{}, StoreConfig{})
+
+ defer func() {
+ cleanup(s, b)
+ chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
+ }()
+
+ chanBufLen, maxWatchersPerSync = 1, 4
+ testKey, testValue := []byte("foo"), []byte("bar")
+
+ maxRev := 10
+ compactRev := int64(5)
+ for i := 0; i < maxRev; i++ {
+ s.Put(testKey, testValue, lease.NoLease)
+ }
+ _, err := s.Compact(traceutil.TODO(), compactRev)
+ require.NoErrorf(t, err, "failed to compact kv (%v)", err)
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ watchers := map[WatchID]int64{
+ 0: 1,
+ 1: 1, // create unsyncd watchers with startRev < compactRev
+ 2: 6, // create unsyncd watchers with compactRev < startRev < currentRev
+ }
+ for id, startRev := range watchers {
+ _, err := w.Watch(id, testKey, nil, startRev)
+ require.NoError(t, err)
+ }
+ // fill up w.Chan() with 1 buf via 2 compacted watch response
+ s.syncWatchers([]mvccpb.Event{})
+
+ for len(watchers) > 0 {
+ resp := <-w.Chan()
+ if resp.CompactRevision != 0 {
+ require.Equal(t, resp.CompactRevision, compactRev)
+ require.Contains(t, watchers, resp.WatchID)
+ delete(watchers, resp.WatchID)
+ continue
+ }
+ nextRev := watchers[resp.WatchID]
+ for _, ev := range resp.Events {
+ require.Equalf(t, nextRev, ev.Kv.ModRevision, "got event revision %d but want %d for watcher with watch ID %d", ev.Kv.ModRevision, nextRev, resp.WatchID)
+ nextRev++
+ }
+ if nextRev == s.rev()+1 {
+ delete(watchers, resp.WatchID)
+ }
+ }
+}
+
+func TestWatchFutureRev(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ wrev := int64(10)
+ w.Watch(0, testKey, nil, wrev)
+
+ for i := 0; i < 10; i++ {
+ rev := s.Put(testKey, testValue, lease.NoLease)
+ if rev >= wrev {
+ break
+ }
+ }
+
+ select {
+ case resp := <-w.Chan():
+ if resp.Revision != wrev {
+ t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
+ }
+ if len(resp.Events) != 1 {
+ t.Fatalf("failed to get events from the response")
+ }
+ if resp.Events[0].Kv.ModRevision != wrev {
+ t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
+ }
+ case <-time.After(time.Second):
+ t.Fatal("failed to receive event in 1 second.")
+ }
+}
+
+func TestWatchRestore(t *testing.T) {
+ test := func(delay time.Duration) func(t *testing.T) {
+ return func(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testKey := []byte("foo")
+ testValue := []byte("bar")
+ w := s.NewWatchStream()
+ defer w.Close()
+ w.Watch(0, testKey, nil, 1)
+
+ time.Sleep(delay)
+ wantRev := s.Put(testKey, testValue, lease.NoLease)
+
+ s.Restore(b)
+ events := readEventsForSecond(w.Chan())
+ if len(events) != 1 {
+ t.Errorf("Expected only one event, got %d", len(events))
+ }
+ if events[0].Kv.ModRevision != wantRev {
+ t.Errorf("Expected revision to match, got %d, want %d", events[0].Kv.ModRevision, wantRev)
+ }
+ }
+ }
+
+ t.Run("Normal", test(0))
+ t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration
+}
+
+func readEventsForSecond(ws <-chan WatchResponse) (events []mvccpb.Event) {
+ for {
+ select {
+ case resp := <-ws:
+ events = append(events, resp.Events...)
+ case <-time.After(time.Second):
+ return events
+ }
+ }
+}
+
+// TestWatchRestoreSyncedWatcher tests such a case that:
+// 1. watcher is created with a future revision "math.MaxInt64 - 2"
+// 2. watcher with a future revision is added to "synced" watcher group
+// 3. restore/overwrite storage with snapshot of a higher lasat revision
+// 4. restore operation moves "synced" to "unsynced" watcher group
+// 5. choose the watcher from step 1, without panic
+func TestWatchRestoreSyncedWatcher(t *testing.T) {
+ b1, _ := betesting.NewDefaultTmpBackend(t)
+ s1 := New(zaptest.NewLogger(t), b1, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s1, b1)
+
+ b2, _ := betesting.NewDefaultTmpBackend(t)
+ s2 := New(zaptest.NewLogger(t), b2, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s2, b2)
+
+ testKey, testValue := []byte("foo"), []byte("bar")
+ rev := s1.Put(testKey, testValue, lease.NoLease)
+ startRev := rev + 2
+
+ // create a watcher with a future revision
+ // add to "synced" watcher group (startRev > s.store.currentRev)
+ w1 := s1.NewWatchStream()
+ defer w1.Close()
+
+ w1.Watch(0, testKey, nil, startRev)
+
+ // make "s2" ends up with a higher last revision
+ s2.Put(testKey, testValue, lease.NoLease)
+ s2.Put(testKey, testValue, lease.NoLease)
+
+ // overwrite storage with higher revisions
+ if err := s1.Restore(b2); err != nil {
+ t.Fatal(err)
+ }
+
+ // wait for next "syncWatchersLoop" iteration
+ // and the unsynced watcher should be chosen
+ time.Sleep(2 * time.Second)
+
+ // trigger events for "startRev"
+ s1.Put(testKey, testValue, lease.NoLease)
+
+ select {
+ case resp := <-w1.Chan():
+ if resp.Revision != startRev {
+ t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision)
+ }
+ if len(resp.Events) != 1 {
+ t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events))
+ }
+ if resp.Events[0].Kv.ModRevision != startRev {
+ t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision)
+ }
+ case <-time.After(time.Second):
+ t.Fatal("failed to receive event in 1 second")
+ }
+}
+
+// TestWatchBatchUnsynced tests batching on unsynced watchers
+func TestWatchBatchUnsynced(t *testing.T) {
+ tcs := []struct {
+ name string
+ revisions int
+ watchBatchMaxRevs int
+ eventsPerRevision int
+ expectRevisionBatches [][]int64
+ }{
+ {
+ name: "3 revisions, 4 revs per batch, 1 events per revision",
+ revisions: 12,
+ watchBatchMaxRevs: 4,
+ eventsPerRevision: 1,
+ expectRevisionBatches: [][]int64{
+ {2, 3, 4, 5},
+ {6, 7, 8, 9},
+ {10, 11, 12, 13},
+ },
+ },
+ {
+ name: "3 revisions, 4 revs per batch, 3 events per revision",
+ revisions: 12,
+ watchBatchMaxRevs: 4,
+ eventsPerRevision: 3,
+ expectRevisionBatches: [][]int64{
+ {2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5},
+ {6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
+ {10, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13},
+ },
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ oldMaxRevs := watchBatchMaxRevs
+ defer func() {
+ watchBatchMaxRevs = oldMaxRevs
+ cleanup(s, b)
+ }()
+ watchBatchMaxRevs = tc.watchBatchMaxRevs
+
+ v := []byte("foo")
+ for i := 0; i < tc.revisions; i++ {
+ txn := s.Write(traceutil.TODO())
+ for j := 0; j < tc.eventsPerRevision; j++ {
+ txn.Put(v, v, lease.NoLease)
+ }
+ txn.End()
+ }
+
+ w := s.NewWatchStream()
+ defer w.Close()
+
+ w.Watch(0, v, nil, 1)
+ var revisionBatches [][]int64
+ eventCount := 0
+ for eventCount < tc.revisions*tc.eventsPerRevision {
+ var revisions []int64
+ for _, e := range (<-w.Chan()).Events {
+ revisions = append(revisions, e.Kv.ModRevision)
+ eventCount++
+ }
+ revisionBatches = append(revisionBatches, revisions)
+ }
+ assert.Equal(t, tc.expectRevisionBatches, revisionBatches)
+
+ s.store.revMu.Lock()
+ defer s.store.revMu.Unlock()
+ assert.Equal(t, 1, s.synced.size())
+ assert.Equal(t, 0, s.unsynced.size())
+ })
+ }
+}
+
+func TestNewMapwatcherToEventMap(t *testing.T) {
+ k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
+ v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
+
+ ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
+
+ evs := []mvccpb.Event{
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{Key: k0, Value: v0},
+ },
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{Key: k1, Value: v1},
+ },
+ {
+ Type: mvccpb.PUT,
+ Kv: &mvccpb.KeyValue{Key: k2, Value: v2},
+ },
+ }
+
+ tests := []struct {
+ sync []*watcher
+ evs []mvccpb.Event
+
+ wwe map[*watcher][]mvccpb.Event
+ }{
+ // no watcher in sync, some events should return empty wwe
+ {
+ nil,
+ evs,
+ map[*watcher][]mvccpb.Event{},
+ },
+
+ // one watcher in sync, one event that does not match the key of that
+ // watcher should return empty wwe
+ {
+ []*watcher{ws[2]},
+ evs[:1],
+ map[*watcher][]mvccpb.Event{},
+ },
+
+ // one watcher in sync, one event that matches the key of that
+ // watcher should return wwe with that matching watcher
+ {
+ []*watcher{ws[1]},
+ evs[1:2],
+ map[*watcher][]mvccpb.Event{
+ ws[1]: evs[1:2],
+ },
+ },
+
+ // two watchers in sync that watches two different keys, one event
+ // that matches the key of only one of the watcher should return wwe
+ // with the matching watcher
+ {
+ []*watcher{ws[0], ws[2]},
+ evs[2:],
+ map[*watcher][]mvccpb.Event{
+ ws[2]: evs[2:],
+ },
+ },
+
+ // two watchers in sync that watches the same key, two events that
+ // match the keys should return wwe with those two watchers
+ {
+ []*watcher{ws[0], ws[1]},
+ evs[:2],
+ map[*watcher][]mvccpb.Event{
+ ws[0]: evs[:1],
+ ws[1]: evs[1:2],
+ },
+ },
+ }
+
+ for i, tt := range tests {
+ wg := newWatcherGroup()
+ for _, w := range tt.sync {
+ wg.add(w)
+ }
+
+ gwe := newWatcherBatch(&wg, tt.evs)
+ if len(gwe) != len(tt.wwe) {
+ t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
+ }
+ // compare gwe and tt.wwe
+ for w, eb := range gwe {
+ if len(eb.evs) != len(tt.wwe[w]) {
+ t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
+ }
+ if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
+ t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
+ }
+ }
+ }
+}
+
+// TestWatchVictims tests that watchable store delivers watch events
+// when the watch channel is temporarily clogged with too many events.
+func TestWatchVictims(t *testing.T) {
+ oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
+
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+
+ defer func() {
+ cleanup(s, b)
+ chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
+ }()
+
+ chanBufLen, maxWatchersPerSync = 1, 2
+ numPuts := chanBufLen * 64
+ testKey, testValue := []byte("foo"), []byte("bar")
+
+ var wg sync.WaitGroup
+ numWatches := maxWatchersPerSync * 128
+ errc := make(chan error, numWatches)
+ wg.Add(numWatches)
+ for i := 0; i < numWatches; i++ {
+ go func() {
+ w := s.NewWatchStream()
+ w.Watch(0, testKey, nil, 1)
+ defer func() {
+ w.Close()
+ wg.Done()
+ }()
+ tc := time.After(10 * time.Second)
+ evs, nextRev := 0, int64(2)
+ for evs < numPuts {
+ select {
+ case <-tc:
+ errc <- fmt.Errorf("time out")
+ return
+ case wr := <-w.Chan():
+ evs += len(wr.Events)
+ for _, ev := range wr.Events {
+ if ev.Kv.ModRevision != nextRev {
+ errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision)
+ return
+ }
+ nextRev++
+ }
+ time.Sleep(time.Millisecond)
+ }
+ }
+ if evs != numPuts {
+ errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs)
+ return
+ }
+ select {
+ case <-w.Chan():
+ errc <- fmt.Errorf("unexpected response")
+ default:
+ }
+ }()
+ time.Sleep(time.Millisecond)
+ }
+
+ var wgPut sync.WaitGroup
+ wgPut.Add(numPuts)
+ for i := 0; i < numPuts; i++ {
+ go func() {
+ defer wgPut.Done()
+ s.Put(testKey, testValue, lease.NoLease)
+ }()
+ }
+ wgPut.Wait()
+
+ wg.Wait()
+ select {
+ case err := <-errc:
+ t.Fatal(err)
+ default:
+ }
+}
+
+// TestStressWatchCancelClose tests closing a watch stream while
+// canceling its watches.
+func TestStressWatchCancelClose(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
+
+ testKey, testValue := []byte("foo"), []byte("bar")
+ var wg sync.WaitGroup
+ readyc := make(chan struct{})
+ wg.Add(100)
+ for i := 0; i < 100; i++ {
+ go func() {
+ defer wg.Done()
+ w := s.NewWatchStream()
+ ids := make([]WatchID, 10)
+ for i := range ids {
+ ids[i], _ = w.Watch(0, testKey, nil, 0)
+ }
+ <-readyc
+ wg.Add(1 + len(ids)/2)
+ for i := range ids[:len(ids)/2] {
+ go func(n int) {
+ defer wg.Done()
+ w.Cancel(ids[n])
+ }(i)
+ }
+ go func() {
+ defer wg.Done()
+ w.Close()
+ }()
+ }()
+ }
+
+ close(readyc)
+ for i := 0; i < 100; i++ {
+ s.Put(testKey, testValue, lease.NoLease)
+ }
+
+ wg.Wait()
+}
diff --git a/server/mvcc/watchable_store_txn.go b/server/storage/mvcc/watchable_store_txn.go
similarity index 100%
rename from server/mvcc/watchable_store_txn.go
rename to server/storage/mvcc/watchable_store_txn.go
diff --git a/server/storage/mvcc/watcher.go b/server/storage/mvcc/watcher.go
new file mode 100644
index 00000000000..c67c21d6139
--- /dev/null
+++ b/server/storage/mvcc/watcher.go
@@ -0,0 +1,203 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "bytes"
+ "errors"
+ "sync"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+var (
+ ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
+ ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty")
+ ErrWatcherDuplicateID = errors.New("mvcc: duplicate watch ID provided on the WatchStream")
+)
+
+type WatchID int64
+
+// FilterFunc returns true if the given event should be filtered out.
+type FilterFunc func(e mvccpb.Event) bool
+
+type WatchStream interface {
+ // Watch creates a watcher. The watcher watches the events happening or
+ // happened on the given key or range [key, end) from the given startRev.
+ //
+ // The whole event history can be watched unless compacted.
+ // If "startRev" <=0, watch observes events after currentRev.
+ //
+ // The returned "id" is the ID of this watcher. It appears as WatchID
+ // in events that are sent to the created watcher through stream channel.
+ // The watch ID is used when it's not equal to AutoWatchID. Otherwise,
+ // an auto-generated watch ID is returned.
+ Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error)
+
+ // Chan returns a chan. All watch response will be sent to the returned chan.
+ Chan() <-chan WatchResponse
+
+ // RequestProgress requests the progress of the watcher with given ID. The response
+ // will only be sent if the watcher is currently synced.
+ // The responses will be sent through the WatchRespone Chan attached
+ // with this stream to ensure correct ordering.
+ // The responses contains no events. The revision in the response is the progress
+ // of the watchers since the watcher is currently synced.
+ RequestProgress(id WatchID)
+
+ // RequestProgressAll requests a progress notification for all
+ // watchers sharing the stream. If all watchers are synced, a
+ // progress notification with watch ID -1 will be sent to an
+ // arbitrary watcher of this stream, and the function returns
+ // true.
+ RequestProgressAll() bool
+
+ // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
+ // returned.
+ Cancel(id WatchID) error
+
+ // Close closes Chan and release all related resources.
+ Close()
+
+ // Rev returns the current revision of the KV the stream watches on.
+ Rev() int64
+}
+
+type WatchResponse struct {
+ // WatchID is the WatchID of the watcher this response sent to.
+ WatchID WatchID
+
+ // Events contains all the events that needs to send.
+ Events []mvccpb.Event
+
+ // Revision is the revision of the KV when the watchResponse is created.
+ // For a normal response, the revision should be the same as the last
+ // modified revision inside Events. For a delayed response to a unsynced
+ // watcher, the revision is greater than the last modified revision
+ // inside Events.
+ Revision int64
+
+ // CompactRevision is set when the watcher is cancelled due to compaction.
+ CompactRevision int64
+}
+
+// watchStream contains a collection of watchers that share
+// one streaming chan to send out watched events and other control events.
+type watchStream struct {
+ watchable watchable
+ ch chan WatchResponse
+
+ mu sync.Mutex // guards fields below it
+ // nextID is the ID pre-allocated for next new watcher in this stream
+ nextID WatchID
+ closed bool
+ cancels map[WatchID]cancelFunc
+ watchers map[WatchID]*watcher
+}
+
+// Watch creates a new watcher in the stream and returns its WatchID.
+func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) {
+ // prevent wrong range where key >= end lexicographically
+ // watch request with 'WithFromKey' has empty-byte range end
+ if len(end) != 0 && bytes.Compare(key, end) != -1 {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ if ws.closed {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ if id == clientv3.AutoWatchID {
+ for ws.watchers[ws.nextID] != nil {
+ ws.nextID++
+ }
+ id = ws.nextID
+ ws.nextID++
+ } else if _, ok := ws.watchers[id]; ok {
+ return -1, ErrWatcherDuplicateID
+ }
+
+ w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
+
+ ws.cancels[id] = c
+ ws.watchers[id] = w
+ return id, nil
+}
+
+func (ws *watchStream) Chan() <-chan WatchResponse {
+ return ws.ch
+}
+
+func (ws *watchStream) Cancel(id WatchID) error {
+ ws.mu.Lock()
+ cancel, ok := ws.cancels[id]
+ w := ws.watchers[id]
+ ok = ok && !ws.closed
+ ws.mu.Unlock()
+
+ if !ok {
+ return ErrWatcherNotExist
+ }
+ cancel()
+
+ ws.mu.Lock()
+ // The watch isn't removed until cancel so that if Close() is called,
+ // it will wait for the cancel. Otherwise, Close() could close the
+ // watch channel while the store is still posting events.
+ if ww := ws.watchers[id]; ww == w {
+ delete(ws.cancels, id)
+ delete(ws.watchers, id)
+ }
+ ws.mu.Unlock()
+
+ return nil
+}
+
+func (ws *watchStream) Close() {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+
+ for _, cancel := range ws.cancels {
+ cancel()
+ }
+ ws.closed = true
+ close(ws.ch)
+ watchStreamGauge.Dec()
+}
+
+func (ws *watchStream) Rev() int64 {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ return ws.watchable.rev()
+}
+
+func (ws *watchStream) RequestProgress(id WatchID) {
+ ws.mu.Lock()
+ w, ok := ws.watchers[id]
+ ws.mu.Unlock()
+ if !ok {
+ return
+ }
+ ws.watchable.progress(w)
+}
+
+func (ws *watchStream) RequestProgressAll() bool {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ return ws.watchable.progressAll(ws.watchers)
+}
diff --git a/server/mvcc/watcher_bench_test.go b/server/storage/mvcc/watcher_bench_test.go
similarity index 77%
rename from server/mvcc/watcher_bench_test.go
rename to server/storage/mvcc/watcher_bench_test.go
index 5a90a46ebb6..3d0dccea342 100644
--- a/server/mvcc/watcher_bench_test.go
+++ b/server/storage/mvcc/watcher_bench_test.go
@@ -18,19 +18,20 @@ import (
"fmt"
"testing"
- "go.etcd.io/etcd/server/v3/lease"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
+ "go.uber.org/zap/zaptest"
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/lease"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
)
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
- be, tmpPath := betesting.NewDefaultTmpBackend(b)
- watchable := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
+ be, _ := betesting.NewDefaultTmpBackend(b)
+ watchable := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
- defer cleanup(watchable, be, tmpPath)
+ defer cleanup(watchable, be)
w := watchable.NewWatchStream()
+ defer w.Close()
b.ReportAllocs()
b.StartTimer()
diff --git a/server/mvcc/watcher_group.go b/server/storage/mvcc/watcher_group.go
similarity index 97%
rename from server/mvcc/watcher_group.go
rename to server/storage/mvcc/watcher_group.go
index 356b49e6413..c9db0e2bd9b 100644
--- a/server/mvcc/watcher_group.go
+++ b/server/storage/mvcc/watcher_group.go
@@ -22,12 +22,10 @@ import (
"go.etcd.io/etcd/pkg/v3/adt"
)
-var (
- // watchBatchMaxRevs is the maximum distinct revisions that
- // may be sent to an unsynced watcher at a time. Declared as
- // var instead of const for testing purposes.
- watchBatchMaxRevs = 1000
-)
+// watchBatchMaxRevs is the maximum distinct revisions that
+// may be sent to an unsynced watcher at a time. Declared as
+// var instead of const for testing purposes.
+var watchBatchMaxRevs = 1000
type eventBatch struct {
// evs is a batch of revision-ordered events
diff --git a/server/mvcc/watcher_test.go b/server/storage/mvcc/watcher_test.go
similarity index 76%
rename from server/mvcc/watcher_test.go
rename to server/storage/mvcc/watcher_test.go
index bbada4ed5dc..e774c70cfac 100644
--- a/server/mvcc/watcher_test.go
+++ b/server/storage/mvcc/watcher_test.go
@@ -16,24 +16,27 @@ package mvcc
import (
"bytes"
+ "errors"
"fmt"
"os"
"reflect"
"testing"
"time"
+ "go.uber.org/zap/zaptest"
+
"go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/lease"
- betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
- "go.uber.org/zap"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
)
// TestWatcherWatchID tests that each watcher provides unique watchID,
// and the watched event attaches the correct watchID.
func TestWatcherWatchID(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
- defer cleanup(s, b, tmpPath)
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
w := s.NewWatchStream()
defer w.Close()
@@ -81,9 +84,9 @@ func TestWatcherWatchID(t *testing.T) {
}
func TestWatcherRequestsCustomID(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
- defer cleanup(s, b, tmpPath)
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
w := s.NewWatchStream()
defer w.Close()
@@ -106,7 +109,7 @@ func TestWatcherRequestsCustomID(t *testing.T) {
for i, tcase := range tt {
id, err := w.Watch(tcase.givenID, []byte("foo"), nil, 0)
if tcase.expectedErr != nil || err != nil {
- if err != tcase.expectedErr {
+ if !errors.Is(err, tcase.expectedErr) {
t.Errorf("expected get error %q in test case %q, got %q", tcase.expectedErr, i, err)
}
} else if tcase.expectedID != id {
@@ -118,9 +121,9 @@ func TestWatcherRequestsCustomID(t *testing.T) {
// TestWatcherWatchPrefix tests if Watch operation correctly watches
// and returns events with matching prefixes.
func TestWatcherWatchPrefix(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
- defer cleanup(s, b, tmpPath)
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
w := s.NewWatchStream()
defer w.Close()
@@ -192,17 +195,17 @@ func TestWatcherWatchPrefix(t *testing.T) {
// TestWatcherWatchWrongRange ensures that watcher with wrong 'end' range
// does not create watcher, which panics when canceling in range tree.
func TestWatcherWatchWrongRange(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
- defer cleanup(s, b, tmpPath)
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
w := s.NewWatchStream()
defer w.Close()
- if _, err := w.Watch(0, []byte("foa"), []byte("foa"), 1); err != ErrEmptyWatcherRange {
+ if _, err := w.Watch(0, []byte("foa"), []byte("foa"), 1); !errors.Is(err, ErrEmptyWatcherRange) {
t.Fatalf("key == end range given; expected ErrEmptyWatcherRange, got %+v", err)
}
- if _, err := w.Watch(0, []byte("fob"), []byte("foa"), 1); err != ErrEmptyWatcherRange {
+ if _, err := w.Watch(0, []byte("fob"), []byte("foa"), 1); !errors.Is(err, ErrEmptyWatcherRange) {
t.Fatalf("key > end range given; expected ErrEmptyWatcherRange, got %+v", err)
}
// watch request with 'WithFromKey' has empty-byte range end
@@ -213,10 +216,11 @@ func TestWatcherWatchWrongRange(t *testing.T) {
func TestWatchDeleteRange(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
- s.store.Close()
+ b.Close()
+ s.Close()
os.Remove(tmpPath)
}()
@@ -251,9 +255,9 @@ func TestWatchDeleteRange(t *testing.T) {
// TestWatchStreamCancelWatcherByID ensures cancel calls the cancel func of the watcher
// with given id inside watchStream.
func TestWatchStreamCancelWatcherByID(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
- defer cleanup(s, b, tmpPath)
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
w := s.NewWatchStream()
defer w.Close()
@@ -275,7 +279,7 @@ func TestWatchStreamCancelWatcherByID(t *testing.T) {
for i, tt := range tests {
gerr := w.Cancel(tt.cancelID)
- if gerr != tt.werr {
+ if !errors.Is(gerr, tt.werr) {
t.Errorf("#%d: err = %v, want %v", i, gerr, tt.werr)
}
}
@@ -288,22 +292,10 @@ func TestWatchStreamCancelWatcherByID(t *testing.T) {
// TestWatcherRequestProgress ensures synced watcher can correctly
// report its correct progress.
func TestWatcherRequestProgress(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
- // manually create watchableStore instead of newWatchableStore
- // because newWatchableStore automatically calls syncWatchers
- // method to sync watchers in unsynced map. We want to keep watchers
- // in unsynced to test if syncWatchers works as expected.
- s := &watchableStore{
- store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
- unsynced: newWatcherGroup(),
- synced: newWatcherGroup(),
- }
-
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
+ defer cleanup(s, b)
testKey := []byte("foo")
notTestKey := []byte("bad")
@@ -328,7 +320,7 @@ func TestWatcherRequestProgress(t *testing.T) {
default:
}
- s.syncWatchers()
+ s.syncWatchers([]mvccpb.Event{})
w.RequestProgress(id)
wrs := WatchResponse{WatchID: id, Revision: 2}
@@ -342,10 +334,49 @@ func TestWatcherRequestProgress(t *testing.T) {
}
}
+func TestWatcherRequestProgressAll(t *testing.T) {
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+
+ defer cleanup(s, b)
+
+ testKey := []byte("foo")
+ notTestKey := []byte("bad")
+ testValue := []byte("bar")
+ s.Put(testKey, testValue, lease.NoLease)
+
+ // Create watch stream with watcher. We will not actually get
+ // any notifications on it specifically, but there needs to be
+ // at least one Watch for progress notifications to get
+ // generated.
+ w := s.NewWatchStream()
+ w.Watch(0, notTestKey, nil, 1)
+
+ w.RequestProgressAll()
+ select {
+ case resp := <-w.Chan():
+ t.Fatalf("unexpected %+v", resp)
+ default:
+ }
+
+ s.syncWatchers([]mvccpb.Event{})
+
+ w.RequestProgressAll()
+ wrs := WatchResponse{WatchID: clientv3.InvalidWatchID, Revision: 2}
+ select {
+ case resp := <-w.Chan():
+ if !reflect.DeepEqual(resp, wrs) {
+ t.Fatalf("got %+v, expect %+v", resp, wrs)
+ }
+ case <-time.After(time.Second):
+ t.Fatal("failed to receive progress")
+ }
+}
+
func TestWatcherWatchWithFilter(t *testing.T) {
- b, tmpPath := betesting.NewDefaultTmpBackend(t)
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
- defer cleanup(s, b, tmpPath)
+ b, _ := betesting.NewDefaultTmpBackend(t)
+ s := New(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
+ defer cleanup(s, b)
w := s.NewWatchStream()
defer w.Close()
diff --git a/server/storage/quota.go b/server/storage/quota.go
new file mode 100644
index 00000000000..f9ff72d7e82
--- /dev/null
+++ b/server/storage/quota.go
@@ -0,0 +1,176 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "sync"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+const (
+ // DefaultQuotaBytes is the number of bytes the backend Size may
+ // consume before exceeding the space quota.
+ DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
+ // MaxQuotaBytes is the maximum number of bytes suggested for a backend
+ // quota. A larger quota may lead to degraded performance.
+ MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
+)
+
+// Quota represents an arbitrary quota against arbitrary requests. Each request
+// costs some charge; if there is not enough remaining charge, then there are
+// too few resources available within the quota to apply the request.
+type Quota interface {
+ // Available judges whether the given request fits within the quota.
+ Available(req any) bool
+ // Cost computes the charge against the quota for a given request.
+ Cost(req any) int
+ // Remaining is the amount of charge left for the quota.
+ Remaining() int64
+}
+
+type passthroughQuota struct{}
+
+func (*passthroughQuota) Available(any) bool { return true }
+func (*passthroughQuota) Cost(any) int { return 0 }
+func (*passthroughQuota) Remaining() int64 { return 1 }
+
+type BackendQuota struct {
+ be backend.Backend
+ maxBackendBytes int64
+}
+
+const (
+ // leaseOverhead is an estimate for the cost of storing a lease
+ leaseOverhead = 64
+ // kvOverhead is an estimate for the cost of storing a key's Metadata
+ kvOverhead = 256
+)
+
+var (
+ // only log once
+ quotaLogOnce sync.Once
+
+ DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes))
+ maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes))
+)
+
+// NewBackendQuota creates a quota layer with the given storage limit.
+func NewBackendQuota(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, name string) Quota {
+ quotaBackendBytes.Set(float64(quotaBackendBytesCfg))
+ if quotaBackendBytesCfg < 0 {
+ // disable quotas if negative
+ quotaLogOnce.Do(func() {
+ lg.Info(
+ "disabled backend quota",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", quotaBackendBytesCfg),
+ )
+ })
+ return &passthroughQuota{}
+ }
+
+ if quotaBackendBytesCfg == 0 {
+ // use default size if no quota size given
+ quotaLogOnce.Do(func() {
+ if lg != nil {
+ lg.Info(
+ "enabled backend quota with default value",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", DefaultQuotaBytes),
+ zap.String("quota-size", DefaultQuotaSize),
+ )
+ }
+ })
+ quotaBackendBytes.Set(float64(DefaultQuotaBytes))
+ return &BackendQuota{be, DefaultQuotaBytes}
+ }
+
+ quotaLogOnce.Do(func() {
+ if quotaBackendBytesCfg > MaxQuotaBytes {
+ lg.Warn(
+ "quota exceeds the maximum value",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", quotaBackendBytesCfg),
+ zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))),
+ zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes),
+ zap.String("quota-maximum-size", maxQuotaSize),
+ )
+ }
+ lg.Info(
+ "enabled backend quota",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", quotaBackendBytesCfg),
+ zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))),
+ )
+ })
+ return &BackendQuota{be, quotaBackendBytesCfg}
+}
+
+func (b *BackendQuota) Available(v any) bool {
+ cost := b.Cost(v)
+ // if there are no mutating requests, it's safe to pass through
+ if cost == 0 {
+ return true
+ }
+ // TODO: maybe optimize Backend.Size()
+ return b.be.Size()+int64(cost) < b.maxBackendBytes
+}
+
+func (b *BackendQuota) Cost(v any) int {
+ switch r := v.(type) {
+ case *pb.PutRequest:
+ return costPut(r)
+ case *pb.TxnRequest:
+ return costTxn(r)
+ case *pb.LeaseGrantRequest:
+ return leaseOverhead
+ default:
+ panic("unexpected cost")
+ }
+}
+
+func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
+
+func costTxnReq(u *pb.RequestOp) int {
+ r := u.GetRequestPut()
+ if r == nil {
+ return 0
+ }
+ return costPut(r)
+}
+
+func costTxn(r *pb.TxnRequest) int {
+ sizeSuccess := 0
+ for _, u := range r.Success {
+ sizeSuccess += costTxnReq(u)
+ }
+ sizeFailure := 0
+ for _, u := range r.Failure {
+ sizeFailure += costTxnReq(u)
+ }
+ if sizeFailure > sizeSuccess {
+ return sizeFailure
+ }
+ return sizeSuccess
+}
+
+func (b *BackendQuota) Remaining() int64 {
+ return b.maxBackendBytes - b.be.Size()
+}
diff --git a/server/storage/schema/actions.go b/server/storage/schema/actions.go
new file mode 100644
index 00000000000..8d18cee8638
--- /dev/null
+++ b/server/storage/schema/actions.go
@@ -0,0 +1,93 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+type action interface {
+ // unsafeDo executes the action and returns revert action, when executed
+ // should restore the state from before.
+ unsafeDo(tx backend.UnsafeReadWriter) (revert action, err error)
+}
+
+type setKeyAction struct {
+ Bucket backend.Bucket
+ FieldName []byte
+ FieldValue []byte
+}
+
+func (a setKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
+ revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName)
+ tx.UnsafePut(a.Bucket, a.FieldName, a.FieldValue)
+ return revert, nil
+}
+
+type deleteKeyAction struct {
+ Bucket backend.Bucket
+ FieldName []byte
+}
+
+func (a deleteKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
+ revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName)
+ tx.UnsafeDelete(a.Bucket, a.FieldName)
+ return revert, nil
+}
+
+func restoreFieldValueAction(tx backend.UnsafeReader, bucket backend.Bucket, fieldName []byte) action {
+ _, vs := tx.UnsafeRange(bucket, fieldName, nil, 1)
+ if len(vs) == 1 {
+ return &setKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ FieldValue: vs[0],
+ }
+ }
+ return &deleteKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ }
+}
+
+type ActionList []action
+
+// unsafeExecute executes actions one by one. If one of actions returns error,
+// it will revert them.
+func (as ActionList) unsafeExecute(lg *zap.Logger, tx backend.UnsafeReadWriter) error {
+ revertActions := make(ActionList, 0, len(as))
+ for _, a := range as {
+ revert, err := a.unsafeDo(tx)
+ if err != nil {
+ revertActions.unsafeExecuteInReversedOrder(lg, tx)
+ return err
+ }
+ revertActions = append(revertActions, revert)
+ }
+ return nil
+}
+
+// unsafeExecuteInReversedOrder executes actions in revered order. Will panic on
+// action error. Should be used when reverting.
+func (as ActionList) unsafeExecuteInReversedOrder(lg *zap.Logger, tx backend.UnsafeReadWriter) {
+ for j := len(as) - 1; j >= 0; j-- {
+ _, err := as[j].unsafeDo(tx)
+ if err != nil {
+ lg.Panic("Cannot recover from revert error", zap.Error(err))
+ }
+ }
+}
diff --git a/server/storage/schema/actions_test.go b/server/storage/schema/actions_test.go
new file mode 100644
index 00000000000..d3cb812c11e
--- /dev/null
+++ b/server/storage/schema/actions_test.go
@@ -0,0 +1,172 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+func TestActionIsReversible(t *testing.T) {
+ tcs := []struct {
+ name string
+ action action
+ state map[string]string
+ }{
+ {
+ name: "setKeyAction empty state",
+ action: setKeyAction{
+ Bucket: Meta,
+ FieldName: []byte("/test"),
+ FieldValue: []byte("1"),
+ },
+ },
+ {
+ name: "setKeyAction with key",
+ action: setKeyAction{
+ Bucket: Meta,
+ FieldName: []byte("/test"),
+ FieldValue: []byte("1"),
+ },
+ state: map[string]string{"/test": "2"},
+ },
+ {
+ name: "deleteKeyAction empty state",
+ action: deleteKeyAction{
+ Bucket: Meta,
+ FieldName: []byte("/test"),
+ },
+ },
+ {
+ name: "deleteKeyAction with key",
+ action: deleteKeyAction{
+ Bucket: Meta,
+ FieldName: []byte("/test"),
+ },
+ state: map[string]string{"/test": "2"},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ defer be.Close()
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ defer tx.Unlock()
+ UnsafeCreateMetaBucket(tx)
+ putKeyValues(tx, Meta, tc.state)
+
+ assertBucketState(t, tx, Meta, tc.state)
+ reverse, err := tc.action.unsafeDo(tx)
+ if err != nil {
+ t.Errorf("Failed to upgrade, err: %v", err)
+ }
+ _, err = reverse.unsafeDo(tx)
+ if err != nil {
+ t.Errorf("Failed to downgrade, err: %v", err)
+ }
+ assertBucketState(t, tx, Meta, tc.state)
+ })
+ }
+}
+
+func TestActionListRevert(t *testing.T) {
+ tcs := []struct {
+ name string
+
+ actions ActionList
+ expectState map[string]string
+ expectError error
+ }{
+ {
+ name: "Apply multiple actions",
+ actions: ActionList{
+ setKeyAction{Meta, []byte("/testKey1"), []byte("testValue1")},
+ setKeyAction{Meta, []byte("/testKey2"), []byte("testValue2")},
+ },
+ expectState: map[string]string{"/testKey1": "testValue1", "/testKey2": "testValue2"},
+ },
+ {
+ name: "Broken action should result in changes reverted",
+ actions: ActionList{
+ setKeyAction{Meta, []byte("/testKey1"), []byte("testValue1")},
+ brokenAction{},
+ setKeyAction{Meta, []byte("/testKey2"), []byte("testValue2")},
+ },
+ expectState: map[string]string{},
+ expectError: errBrokenAction,
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+
+ be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ defer be.Close()
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ defer tx.Unlock()
+
+ UnsafeCreateMetaBucket(tx)
+ err := tc.actions.unsafeExecute(lg, tx)
+ if !errors.Is(err, tc.expectError) {
+ t.Errorf("Unexpected error or lack thereof, expected: %v, got: %v", tc.expectError, err)
+ }
+ assertBucketState(t, tx, Meta, tc.expectState)
+ })
+ }
+}
+
+type brokenAction struct{}
+
+var errBrokenAction = fmt.Errorf("broken action error")
+
+func (c brokenAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
+ return nil, errBrokenAction
+}
+
+func putKeyValues(tx backend.UnsafeWriter, bucket backend.Bucket, kvs map[string]string) {
+ for k, v := range kvs {
+ tx.UnsafePut(bucket, []byte(k), []byte(v))
+ }
+}
+
+func assertBucketState(t *testing.T, tx backend.UnsafeReadWriter, bucket backend.Bucket, expect map[string]string) {
+ t.Helper()
+ got := map[string]string{}
+ ks, vs := tx.UnsafeRange(bucket, []byte("\x00"), []byte("\xff"), 0)
+ for i := 0; i < len(ks); i++ {
+ got[string(ks[i])] = string(vs[i])
+ }
+ if expect == nil {
+ expect = map[string]string{}
+ }
+ assert.Equal(t, expect, got)
+}
diff --git a/server/storage/schema/alarm.go b/server/storage/schema/alarm.go
new file mode 100644
index 00000000000..6e81d0f4671
--- /dev/null
+++ b/server/storage/schema/alarm.go
@@ -0,0 +1,97 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+type alarmBackend struct {
+ lg *zap.Logger
+ be backend.Backend
+}
+
+func NewAlarmBackend(lg *zap.Logger, be backend.Backend) *alarmBackend {
+ return &alarmBackend{
+ lg: lg,
+ be: be,
+ }
+}
+
+func (s *alarmBackend) CreateAlarmBucket() {
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(Alarm)
+}
+
+func (s *alarmBackend) MustPutAlarm(alarm *etcdserverpb.AlarmMember) {
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ s.mustUnsafePutAlarm(tx, alarm)
+}
+
+func (s *alarmBackend) mustUnsafePutAlarm(tx backend.UnsafeWriter, alarm *etcdserverpb.AlarmMember) {
+ v, err := alarm.Marshal()
+ if err != nil {
+ s.lg.Panic("failed to marshal alarm member", zap.Error(err))
+ }
+
+ tx.UnsafePut(Alarm, v, nil)
+}
+
+func (s *alarmBackend) MustDeleteAlarm(alarm *etcdserverpb.AlarmMember) {
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ s.mustUnsafeDeleteAlarm(tx, alarm)
+}
+
+func (s *alarmBackend) mustUnsafeDeleteAlarm(tx backend.UnsafeWriter, alarm *etcdserverpb.AlarmMember) {
+ v, err := alarm.Marshal()
+ if err != nil {
+ s.lg.Panic("failed to marshal alarm member", zap.Error(err))
+ }
+
+ tx.UnsafeDelete(Alarm, v)
+}
+
+func (s *alarmBackend) GetAllAlarms() ([]*etcdserverpb.AlarmMember, error) {
+ tx := s.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return s.unsafeGetAllAlarms(tx)
+}
+
+func (s *alarmBackend) unsafeGetAllAlarms(tx backend.UnsafeReader) ([]*etcdserverpb.AlarmMember, error) {
+ var ms []*etcdserverpb.AlarmMember
+ err := tx.UnsafeForEach(Alarm, func(k, v []byte) error {
+ var m etcdserverpb.AlarmMember
+ if err := m.Unmarshal(k); err != nil {
+ return err
+ }
+ ms = append(ms, &m)
+ return nil
+ })
+ return ms, err
+}
+
+func (s alarmBackend) ForceCommit() {
+ s.be.ForceCommit()
+}
diff --git a/server/storage/schema/auth.go b/server/storage/schema/auth.go
new file mode 100644
index 00000000000..96ca881c5c8
--- /dev/null
+++ b/server/storage/schema/auth.go
@@ -0,0 +1,152 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+const (
+ revBytesLen = 8
+)
+
+var (
+ authEnabled = []byte{1}
+ authDisabled = []byte{0}
+)
+
+type authBackend struct {
+ be backend.Backend
+ lg *zap.Logger
+}
+
+var _ auth.AuthBackend = (*authBackend)(nil)
+
+func NewAuthBackend(lg *zap.Logger, be backend.Backend) *authBackend {
+ return &authBackend{
+ be: be,
+ lg: lg,
+ }
+}
+
+func (abe *authBackend) CreateAuthBuckets() {
+ tx := abe.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(Auth)
+ tx.UnsafeCreateBucket(AuthUsers)
+ tx.UnsafeCreateBucket(AuthRoles)
+}
+
+func (abe *authBackend) ForceCommit() {
+ abe.be.ForceCommit()
+}
+
+func (abe *authBackend) ReadTx() auth.AuthReadTx {
+ return &authReadTx{tx: abe.be.ReadTx(), lg: abe.lg}
+}
+
+func (abe *authBackend) BatchTx() auth.AuthBatchTx {
+ return &authBatchTx{tx: abe.be.BatchTx(), lg: abe.lg}
+}
+
+type authReadTx struct {
+ tx backend.ReadTx
+ lg *zap.Logger
+}
+
+type authBatchTx struct {
+ tx backend.BatchTx
+ lg *zap.Logger
+}
+
+var (
+ _ auth.AuthReadTx = (*authReadTx)(nil)
+ _ auth.AuthBatchTx = (*authBatchTx)(nil)
+)
+
+func (atx *authBatchTx) UnsafeSaveAuthEnabled(enabled bool) {
+ if enabled {
+ atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authEnabled)
+ } else {
+ atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authDisabled)
+ }
+}
+
+func (atx *authBatchTx) UnsafeSaveAuthRevision(rev uint64) {
+ revBytes := make([]byte, revBytesLen)
+ binary.BigEndian.PutUint64(revBytes, rev)
+ atx.tx.UnsafePut(Auth, AuthRevisionKeyName, revBytes)
+}
+
+func (atx *authBatchTx) UnsafeReadAuthEnabled() bool {
+ return unsafeReadAuthEnabled(atx.tx)
+}
+
+func (atx *authBatchTx) UnsafeReadAuthRevision() uint64 {
+ return unsafeReadAuthRevision(atx.tx)
+}
+
+func (atx *authBatchTx) Lock() {
+ atx.tx.LockInsideApply()
+}
+
+func (atx *authBatchTx) Unlock() {
+ atx.tx.Unlock()
+ // Calling Commit() for defensive purpose. If the number of pending writes doesn't exceed batchLimit,
+ // ReadTx can miss some writes issued by its predecessor BatchTx.
+ atx.tx.Commit()
+}
+
+func (atx *authReadTx) UnsafeReadAuthEnabled() bool {
+ return unsafeReadAuthEnabled(atx.tx)
+}
+
+func unsafeReadAuthEnabled(tx backend.UnsafeReader) bool {
+ _, vs := tx.UnsafeRange(Auth, AuthEnabledKeyName, nil, 0)
+ if len(vs) == 1 {
+ if bytes.Equal(vs[0], authEnabled) {
+ return true
+ }
+ }
+ return false
+}
+
+func (atx *authReadTx) UnsafeReadAuthRevision() uint64 {
+ return unsafeReadAuthRevision(atx.tx)
+}
+
+func unsafeReadAuthRevision(tx backend.UnsafeReader) uint64 {
+ _, vs := tx.UnsafeRange(Auth, AuthRevisionKeyName, nil, 0)
+ if len(vs) != 1 {
+ // this can happen in the initialization phase
+ return 0
+ }
+ return binary.BigEndian.Uint64(vs[0])
+}
+
+func (atx *authReadTx) RLock() {
+ atx.tx.RLock()
+}
+
+func (atx *authReadTx) RUnlock() {
+ atx.tx.RUnlock()
+}
diff --git a/server/storage/schema/auth_roles.go b/server/storage/schema/auth_roles.go
new file mode 100644
index 00000000000..6161a0885a9
--- /dev/null
+++ b/server/storage/schema/auth_roles.go
@@ -0,0 +1,105 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+func UnsafeCreateAuthRolesBucket(tx backend.UnsafeWriter) {
+ tx.UnsafeCreateBucket(AuthRoles)
+}
+
+func (abe *authBackend) GetRole(roleName string) *authpb.Role {
+ tx := abe.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return tx.UnsafeGetRole(roleName)
+}
+
+func (atx *authBatchTx) UnsafeGetRole(roleName string) *authpb.Role {
+ return unsafeGetRole(atx.lg, atx.tx, roleName)
+}
+
+func (abe *authBackend) GetAllRoles() []*authpb.Role {
+ tx := abe.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ return tx.UnsafeGetAllRoles()
+}
+
+func (atx *authBatchTx) UnsafeGetAllRoles() []*authpb.Role {
+ return unsafeGetAllRoles(atx.lg, atx.tx)
+}
+
+func (atx *authBatchTx) UnsafePutRole(role *authpb.Role) {
+ b, err := role.Marshal()
+ if err != nil {
+ atx.lg.Panic(
+ "failed to marshal 'authpb.Role'",
+ zap.String("role-name", string(role.Name)),
+ zap.Error(err),
+ )
+ }
+
+ atx.tx.UnsafePut(AuthRoles, role.Name, b)
+}
+
+func (atx *authBatchTx) UnsafeDeleteRole(rolename string) {
+ atx.tx.UnsafeDelete(AuthRoles, []byte(rolename))
+}
+
+func (atx *authReadTx) UnsafeGetRole(roleName string) *authpb.Role {
+ return unsafeGetRole(atx.lg, atx.tx, roleName)
+}
+
+func unsafeGetRole(lg *zap.Logger, tx backend.UnsafeReader, roleName string) *authpb.Role {
+ _, vs := tx.UnsafeRange(AuthRoles, []byte(roleName), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[0])
+ if err != nil {
+ lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
+ }
+ return role
+}
+
+func (atx *authReadTx) UnsafeGetAllRoles() []*authpb.Role {
+ return unsafeGetAllRoles(atx.lg, atx.tx)
+}
+
+func unsafeGetAllRoles(lg *zap.Logger, tx backend.UnsafeReader) []*authpb.Role {
+ _, vs := tx.UnsafeRange(AuthRoles, []byte{0}, []byte{0xff}, -1)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ roles := make([]*authpb.Role, len(vs))
+ for i := range vs {
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[i])
+ if err != nil {
+ lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
+ }
+ roles[i] = role
+ }
+ return roles
+}
diff --git a/server/storage/schema/auth_roles_test.go b/server/storage/schema/auth_roles_test.go
new file mode 100644
index 00000000000..7f90663d854
--- /dev/null
+++ b/server/storage/schema/auth_roles_test.go
@@ -0,0 +1,230 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+func TestGetAllRoles(t *testing.T) {
+ tcs := []struct {
+ name string
+ setup func(tx auth.UnsafeAuthWriter)
+ want []*authpb.Role
+ }{
+ {
+ name: "Empty by default",
+ setup: func(tx auth.UnsafeAuthWriter) {},
+ want: nil,
+ },
+ {
+ name: "Returns data put before",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("readKey"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READ,
+ Key: []byte("key"),
+ RangeEnd: []byte("end"),
+ },
+ },
+ })
+ },
+ want: []*authpb.Role{
+ {
+ Name: []byte("readKey"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READ,
+ Key: []byte("key"),
+ RangeEnd: []byte("end"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Skips deleted",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role1"),
+ })
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role2"),
+ })
+ tx.UnsafeDeleteRole("role1")
+ },
+ want: []*authpb.Role{{Name: []byte("role2")}},
+ },
+ {
+ name: "Returns data overridden by put",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role1"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READ,
+ },
+ },
+ })
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role2"),
+ })
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role1"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READWRITE,
+ },
+ },
+ })
+ },
+ want: []*authpb.Role{
+ {Name: []byte("role1"), KeyPermission: []*authpb.Permission{{PermType: authpb.READWRITE}}},
+ {Name: []byte("role2")},
+ },
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ abe := NewAuthBackend(lg, be)
+ abe.CreateAuthBuckets()
+
+ tx := abe.BatchTx()
+ tx.Lock()
+ tc.setup(tx)
+ tx.Unlock()
+
+ abe.ForceCommit()
+ be.Close()
+
+ be2 := backend.NewDefaultBackend(lg, tmpPath)
+ defer be2.Close()
+ abe2 := NewAuthBackend(lg, be2)
+ users := abe2.GetAllRoles()
+
+ assert.Equal(t, tc.want, users)
+ })
+ }
+}
+
+func TestGetRole(t *testing.T) {
+ tcs := []struct {
+ name string
+ setup func(tx auth.UnsafeAuthWriter)
+ want *authpb.Role
+ }{
+ {
+ name: "Returns nil for missing",
+ setup: func(tx auth.UnsafeAuthWriter) {},
+ want: nil,
+ },
+ {
+ name: "Returns data put before",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role1"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READ,
+ Key: []byte("key"),
+ RangeEnd: []byte("end"),
+ },
+ },
+ })
+ },
+ want: &authpb.Role{
+ Name: []byte("role1"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READ,
+ Key: []byte("key"),
+ RangeEnd: []byte("end"),
+ },
+ },
+ },
+ },
+ {
+ name: "Return nil for deleted",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role1"),
+ })
+ tx.UnsafeDeleteRole("role1")
+ },
+ want: nil,
+ },
+ {
+ name: "Returns data overridden by put",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role1"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READ,
+ },
+ },
+ })
+ tx.UnsafePutRole(&authpb.Role{
+ Name: []byte("role1"),
+ KeyPermission: []*authpb.Permission{
+ {
+ PermType: authpb.READWRITE,
+ },
+ },
+ })
+ },
+ want: &authpb.Role{
+ Name: []byte("role1"),
+ KeyPermission: []*authpb.Permission{{PermType: authpb.READWRITE}},
+ },
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ abe := NewAuthBackend(lg, be)
+ abe.CreateAuthBuckets()
+
+ tx := abe.BatchTx()
+ tx.Lock()
+ tc.setup(tx)
+ tx.Unlock()
+
+ abe.ForceCommit()
+ be.Close()
+
+ be2 := backend.NewDefaultBackend(lg, tmpPath)
+ defer be2.Close()
+ abe2 := NewAuthBackend(lg, be2)
+ users := abe2.GetRole("role1")
+
+ assert.Equal(t, tc.want, users)
+ })
+ }
+}
diff --git a/server/storage/schema/auth_test.go b/server/storage/schema/auth_test.go
new file mode 100644
index 00000000000..96174e50ffc
--- /dev/null
+++ b/server/storage/schema/auth_test.go
@@ -0,0 +1,131 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+// TestAuthEnabled ensures that UnsafeSaveAuthEnabled&UnsafeReadAuthEnabled work well together.
+func TestAuthEnabled(t *testing.T) {
+ tcs := []struct {
+ name string
+ skipSetting bool
+ setEnabled bool
+ wantEnabled bool
+ }{
+ {
+ name: "Returns true after setting true",
+ setEnabled: true,
+ wantEnabled: true,
+ },
+ {
+ name: "Returns false after setting false",
+ setEnabled: false,
+ wantEnabled: false,
+ },
+ {
+ name: "Returns false by default",
+ skipSetting: true,
+ wantEnabled: false,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ abe := NewAuthBackend(lg, be)
+ tx := abe.BatchTx()
+ abe.CreateAuthBuckets()
+
+ tx.Lock()
+ if !tc.skipSetting {
+ tx.UnsafeSaveAuthEnabled(tc.setEnabled)
+ }
+ tx.Unlock()
+ abe.ForceCommit()
+ be.Close()
+
+ be2 := backend.NewDefaultBackend(lg, tmpPath)
+ defer be2.Close()
+ abe2 := NewAuthBackend(lg, be2)
+ tx = abe2.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ v := tx.UnsafeReadAuthEnabled()
+
+ assert.Equal(t, tc.wantEnabled, v)
+ })
+ }
+}
+
+// TestAuthRevision ensures that UnsafeSaveAuthRevision&UnsafeReadAuthRevision work well together.
+func TestAuthRevision(t *testing.T) {
+ tcs := []struct {
+ name string
+ setRevision uint64
+ wantRevision uint64
+ }{
+ {
+ name: "Returns 0 by default",
+ wantRevision: 0,
+ },
+ {
+ name: "Returns 1 after setting 1",
+ setRevision: 1,
+ wantRevision: 1,
+ },
+ {
+ name: "Returns max int after setting max int",
+ setRevision: math.MaxUint64,
+ wantRevision: math.MaxUint64,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ abe := NewAuthBackend(lg, be)
+ abe.CreateAuthBuckets()
+
+ if tc.setRevision != 0 {
+ tx := abe.BatchTx()
+ tx.Lock()
+ tx.UnsafeSaveAuthRevision(tc.setRevision)
+ tx.Unlock()
+ }
+ abe.ForceCommit()
+ be.Close()
+
+ be2 := backend.NewDefaultBackend(lg, tmpPath)
+ defer be2.Close()
+ abe2 := NewAuthBackend(lg, be2)
+ tx := abe2.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ v := tx.UnsafeReadAuthRevision()
+
+ assert.Equal(t, tc.wantRevision, v)
+ })
+ }
+}
diff --git a/server/storage/schema/auth_users.go b/server/storage/schema/auth_users.go
new file mode 100644
index 00000000000..c21fa7c16d8
--- /dev/null
+++ b/server/storage/schema/auth_users.go
@@ -0,0 +1,108 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+func (abe *authBackend) GetUser(username string) *authpb.User {
+ tx := abe.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return tx.UnsafeGetUser(username)
+}
+
+func (atx *authBatchTx) UnsafeGetUser(username string) *authpb.User {
+ return unsafeGetUser(atx.lg, atx.tx, username)
+}
+
+func (atx *authBatchTx) UnsafeGetAllUsers() []*authpb.User {
+ return unsafeGetAllUsers(atx.lg, atx.tx)
+}
+
+func (atx *authBatchTx) UnsafePutUser(user *authpb.User) {
+ b, err := user.Marshal()
+ if err != nil {
+ atx.lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
+ }
+ atx.tx.UnsafePut(AuthUsers, user.Name, b)
+}
+
+func (atx *authBatchTx) UnsafeDeleteUser(username string) {
+ atx.tx.UnsafeDelete(AuthUsers, []byte(username))
+}
+
+func (atx *authReadTx) UnsafeGetUser(username string) *authpb.User {
+ return unsafeGetUser(atx.lg, atx.tx, username)
+}
+
+func unsafeGetUser(lg *zap.Logger, tx backend.UnsafeReader, username string) *authpb.User {
+ _, vs := tx.UnsafeRange(AuthUsers, []byte(username), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[0])
+ if err != nil {
+ lg.Panic(
+ "failed to unmarshal 'authpb.User'",
+ zap.String("user-name", username),
+ zap.Error(err),
+ )
+ }
+ return user
+}
+
+func (abe *authBackend) GetAllUsers() []*authpb.User {
+ tx := abe.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return tx.UnsafeGetAllUsers()
+}
+
+func (atx *authReadTx) UnsafeGetAllUsers() []*authpb.User {
+ return unsafeGetAllUsers(atx.lg, atx.tx)
+}
+
+func unsafeGetAllUsers(lg *zap.Logger, tx backend.UnsafeReader) []*authpb.User {
+ var vs [][]byte
+ err := tx.UnsafeForEach(AuthUsers, func(k []byte, v []byte) error {
+ vs = append(vs, v)
+ return nil
+ })
+ if err != nil {
+ lg.Panic("failed to get users",
+ zap.Error(err))
+ }
+ if len(vs) == 0 {
+ return nil
+ }
+
+ users := make([]*authpb.User, len(vs))
+ for i := range vs {
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[i])
+ if err != nil {
+ lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
+ }
+ users[i] = user
+ }
+ return users
+}
diff --git a/server/storage/schema/auth_users_test.go b/server/storage/schema/auth_users_test.go
new file mode 100644
index 00000000000..f109697d0af
--- /dev/null
+++ b/server/storage/schema/auth_users_test.go
@@ -0,0 +1,206 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+func TestGetAllUsers(t *testing.T) {
+ tcs := []struct {
+ name string
+ setup func(tx auth.UnsafeAuthWriter)
+ want []*authpb.User
+ }{
+ {
+ name: "Empty by default",
+ setup: func(tx auth.UnsafeAuthWriter) {},
+ want: nil,
+ },
+ {
+ name: "Returns user put before",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("alicePassword"),
+ Roles: []string{"aliceRole1", "aliceRole2"},
+ Options: &authpb.UserAddOptions{
+ NoPassword: true,
+ },
+ })
+ },
+ want: []*authpb.User{
+ {
+ Name: []byte("alice"),
+ Password: []byte("alicePassword"),
+ Roles: []string{"aliceRole1", "aliceRole2"},
+ Options: &authpb.UserAddOptions{
+ NoPassword: true,
+ },
+ },
+ },
+ },
+ {
+ name: "Skips deleted user",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ })
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("bob"),
+ })
+ tx.UnsafeDeleteUser("alice")
+ },
+ want: []*authpb.User{{Name: []byte("bob")}},
+ },
+ {
+ name: "Returns data overridden by put",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("oldPassword"),
+ })
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("bob"),
+ })
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("newPassword"),
+ })
+ },
+ want: []*authpb.User{
+ {Name: []byte("alice"), Password: []byte("newPassword")},
+ {Name: []byte("bob")},
+ },
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ abe := NewAuthBackend(lg, be)
+ abe.CreateAuthBuckets()
+
+ tx := abe.BatchTx()
+ tx.Lock()
+ tc.setup(tx)
+ tx.Unlock()
+
+ abe.ForceCommit()
+ be.Close()
+
+ be2 := backend.NewDefaultBackend(lg, tmpPath)
+ defer be2.Close()
+ abe2 := NewAuthBackend(lg, be2)
+ users := abe2.ReadTx().UnsafeGetAllUsers()
+
+ assert.Equal(t, tc.want, users)
+ })
+ }
+}
+
+func TestGetUser(t *testing.T) {
+ tcs := []struct {
+ name string
+ setup func(tx auth.UnsafeAuthWriter)
+ want *authpb.User
+ }{
+ {
+ name: "Returns nil for missing user",
+ setup: func(tx auth.UnsafeAuthWriter) {},
+ want: nil,
+ },
+ {
+ name: "Returns data put before",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("alicePassword"),
+ Roles: []string{"aliceRole1", "aliceRole2"},
+ Options: &authpb.UserAddOptions{
+ NoPassword: true,
+ },
+ })
+ },
+ want: &authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("alicePassword"),
+ Roles: []string{"aliceRole1", "aliceRole2"},
+ Options: &authpb.UserAddOptions{
+ NoPassword: true,
+ },
+ },
+ },
+ {
+ name: "Skips deleted",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ })
+ tx.UnsafeDeleteUser("alice")
+ },
+ want: nil,
+ },
+ {
+ name: "Returns data overridden by put",
+ setup: func(tx auth.UnsafeAuthWriter) {
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("oldPassword"),
+ })
+ tx.UnsafePutUser(&authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("newPassword"),
+ })
+ },
+ want: &authpb.User{
+ Name: []byte("alice"),
+ Password: []byte("newPassword"),
+ },
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ abe := NewAuthBackend(lg, be)
+ abe.CreateAuthBuckets()
+
+ tx := abe.BatchTx()
+ tx.Lock()
+ tc.setup(tx)
+ tx.Unlock()
+
+ abe.ForceCommit()
+ be.Close()
+
+ be2 := backend.NewDefaultBackend(lg, tmpPath)
+ defer be2.Close()
+ abe2 := NewAuthBackend(lg, be2)
+ users := abe2.GetUser("alice")
+
+ assert.Equal(t, tc.want, users)
+ })
+ }
+}
diff --git a/server/storage/schema/bucket.go b/server/storage/schema/bucket.go
new file mode 100644
index 00000000000..06da660df5e
--- /dev/null
+++ b/server/storage/schema/bucket.go
@@ -0,0 +1,100 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "bytes"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+var (
+ keyBucketName = []byte("key")
+ metaBucketName = []byte("meta")
+ leaseBucketName = []byte("lease")
+ alarmBucketName = []byte("alarm")
+
+ clusterBucketName = []byte("cluster")
+
+ membersBucketName = []byte("members")
+ membersRemovedBucketName = []byte("members_removed")
+
+ authBucketName = []byte("auth")
+ authUsersBucketName = []byte("authUsers")
+ authRolesBucketName = []byte("authRoles")
+
+ testBucketName = []byte("test")
+)
+
+var (
+ Key = backend.Bucket(bucket{id: 1, name: keyBucketName, safeRangeBucket: true})
+ Meta = backend.Bucket(bucket{id: 2, name: metaBucketName, safeRangeBucket: false})
+ Lease = backend.Bucket(bucket{id: 3, name: leaseBucketName, safeRangeBucket: false})
+ Alarm = backend.Bucket(bucket{id: 4, name: alarmBucketName, safeRangeBucket: false})
+ Cluster = backend.Bucket(bucket{id: 5, name: clusterBucketName, safeRangeBucket: false})
+
+ Members = backend.Bucket(bucket{id: 10, name: membersBucketName, safeRangeBucket: false})
+ MembersRemoved = backend.Bucket(bucket{id: 11, name: membersRemovedBucketName, safeRangeBucket: false})
+
+ Auth = backend.Bucket(bucket{id: 20, name: authBucketName, safeRangeBucket: false})
+ AuthUsers = backend.Bucket(bucket{id: 21, name: authUsersBucketName, safeRangeBucket: false})
+ AuthRoles = backend.Bucket(bucket{id: 22, name: authRolesBucketName, safeRangeBucket: false})
+
+ Test = backend.Bucket(bucket{id: 100, name: testBucketName, safeRangeBucket: false})
+
+ AllBuckets = []backend.Bucket{Key, Meta, Lease, Alarm, Cluster, Members, MembersRemoved, Auth, AuthUsers, AuthRoles}
+)
+
+type bucket struct {
+ id backend.BucketID
+ name []byte
+ safeRangeBucket bool
+}
+
+func (b bucket) ID() backend.BucketID { return b.id }
+func (b bucket) Name() []byte { return b.name }
+func (b bucket) String() string { return string(b.Name()) }
+func (b bucket) IsSafeRangeBucket() bool { return b.safeRangeBucket }
+
+var (
+ // Pre v3.5
+ ScheduledCompactKeyName = []byte("scheduledCompactRev")
+ FinishedCompactKeyName = []byte("finishedCompactRev")
+ MetaConsistentIndexKeyName = []byte("consistent_index")
+ AuthEnabledKeyName = []byte("authEnabled")
+ AuthRevisionKeyName = []byte("authRevision")
+ // Since v3.5
+ MetaTermKeyName = []byte("term")
+ MetaConfStateName = []byte("confState")
+ ClusterClusterVersionKeyName = []byte("clusterVersion")
+ ClusterDowngradeKeyName = []byte("downgrade")
+ // Since v3.6
+ MetaStorageVersionName = []byte("storageVersion")
+ // Before adding new meta key please update server/etcdserver/version
+)
+
+// DefaultIgnores defines buckets & keys to ignore in hash checking.
+func DefaultIgnores(bucket, key []byte) bool {
+ // consistent index & term might be changed due to v2 internal sync, which
+ // is not controllable by the user.
+ // storage version might change after wal snapshot and is not controller by user.
+ return bytes.Equal(bucket, Meta.Name()) &&
+ (bytes.Equal(key, MetaTermKeyName) || bytes.Equal(key, MetaConsistentIndexKeyName) || bytes.Equal(key, MetaStorageVersionName))
+}
+
+func BackendMemberKey(id types.ID) []byte {
+ return []byte(id.String())
+}
diff --git a/server/storage/schema/changes.go b/server/storage/schema/changes.go
new file mode 100644
index 00000000000..6eb0b751209
--- /dev/null
+++ b/server/storage/schema/changes.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import "go.etcd.io/etcd/server/v3/storage/backend"
+
+type schemaChange interface {
+ upgradeAction() action
+ downgradeAction() action
+}
+
+// addNewField represents adding new field when upgrading. Downgrade will remove the field.
+func addNewField(bucket backend.Bucket, fieldName []byte, fieldValue []byte) schemaChange {
+ return simpleSchemaChange{
+ upgrade: setKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ FieldValue: fieldValue,
+ },
+ downgrade: deleteKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ },
+ }
+}
+
+type simpleSchemaChange struct {
+ upgrade action
+ downgrade action
+}
+
+func (c simpleSchemaChange) upgradeAction() action {
+ return c.upgrade
+}
+
+func (c simpleSchemaChange) downgradeAction() action {
+ return c.downgrade
+}
diff --git a/server/storage/schema/changes_test.go b/server/storage/schema/changes_test.go
new file mode 100644
index 00000000000..05b8d49cf44
--- /dev/null
+++ b/server/storage/schema/changes_test.go
@@ -0,0 +1,61 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "testing"
+ "time"
+
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+func TestUpgradeDowngrade(t *testing.T) {
+ tcs := []struct {
+ name string
+ change schemaChange
+ expectStateAfterUpgrade map[string]string
+ expectStateAfterDowngrade map[string]string
+ }{
+ {
+ name: "addNewField empty",
+ change: addNewField(Meta, []byte("/test"), []byte("1")),
+ expectStateAfterUpgrade: map[string]string{"/test": "1"},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ defer be.Close()
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ defer tx.Unlock()
+ UnsafeCreateMetaBucket(tx)
+
+ _, err := tc.change.upgradeAction().unsafeDo(tx)
+ if err != nil {
+ t.Errorf("Failed to upgrade, err: %v", err)
+ }
+ assertBucketState(t, tx, Meta, tc.expectStateAfterUpgrade)
+ _, err = tc.change.downgradeAction().unsafeDo(tx)
+ if err != nil {
+ t.Errorf("Failed to downgrade, err: %v", err)
+ }
+ assertBucketState(t, tx, Meta, tc.expectStateAfterDowngrade)
+ })
+ }
+}
diff --git a/server/storage/schema/cindex.go b/server/storage/schema/cindex.go
new file mode 100644
index 00000000000..cdf938d346c
--- /dev/null
+++ b/server/storage/schema/cindex.go
@@ -0,0 +1,95 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exist yet).
+func UnsafeCreateMetaBucket(tx backend.UnsafeWriter) {
+ tx.UnsafeCreateBucket(Meta)
+}
+
+// CreateMetaBucket creates the `meta` bucket (if it does not exist yet).
+func CreateMetaBucket(tx backend.BatchTx) {
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(Meta)
+}
+
+// UnsafeReadConsistentIndex loads consistent index & term from given transaction.
+// returns 0,0 if the data are not found.
+// Term is persisted since v3.5.
+func UnsafeReadConsistentIndex(tx backend.UnsafeReader) (uint64, uint64) {
+ _, vs := tx.UnsafeRange(Meta, MetaConsistentIndexKeyName, nil, 0)
+ if len(vs) == 0 {
+ return 0, 0
+ }
+ v := binary.BigEndian.Uint64(vs[0])
+ _, ts := tx.UnsafeRange(Meta, MetaTermKeyName, nil, 0)
+ if len(ts) == 0 {
+ return v, 0
+ }
+ t := binary.BigEndian.Uint64(ts[0])
+ return v, t
+}
+
+// ReadConsistentIndex loads consistent index and term from given transaction.
+// returns 0 if the data are not found.
+func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) {
+ tx.RLock()
+ defer tx.RUnlock()
+ return UnsafeReadConsistentIndex(tx)
+}
+
+func UnsafeUpdateConsistentIndexForce(tx backend.UnsafeReadWriter, index uint64, term uint64) {
+ unsafeUpdateConsistentIndex(tx, index, term, true)
+}
+
+func UnsafeUpdateConsistentIndex(tx backend.UnsafeReadWriter, index uint64, term uint64) {
+ unsafeUpdateConsistentIndex(tx, index, term, false)
+}
+
+func unsafeUpdateConsistentIndex(tx backend.UnsafeReadWriter, index uint64, term uint64, allowDecreasing bool) {
+ if index == 0 {
+ // Never save 0 as it means that we didn't load the real index yet.
+ return
+ }
+ bs1 := make([]byte, 8)
+ binary.BigEndian.PutUint64(bs1, index)
+
+ if !allowDecreasing {
+ verify.Verify(func() {
+ previousIndex, _ := UnsafeReadConsistentIndex(tx)
+ if index < previousIndex {
+ panic(fmt.Errorf("update of consistent index not advancing: previous: %v new: %v", previousIndex, index))
+ }
+ })
+ }
+
+ // put the index into the underlying backend
+ // tx has been locked in TxnBegin, so there is no need to lock it again
+ tx.UnsafePut(Meta, MetaConsistentIndexKeyName, bs1)
+ if term > 0 {
+ bs2 := make([]byte, 8)
+ binary.BigEndian.PutUint64(bs2, term)
+ tx.UnsafePut(Meta, MetaTermKeyName, bs2)
+ }
+}
diff --git a/server/storage/schema/confstate.go b/server/storage/schema/confstate.go
new file mode 100644
index 00000000000..c2bcb540130
--- /dev/null
+++ b/server/storage/schema/confstate.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/json"
+ "log"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// MustUnsafeSaveConfStateToBackend persists confState using given transaction (tx).
+// confState in backend is persisted since etcd v3.5.
+func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.UnsafeWriter, confState *raftpb.ConfState) {
+ confStateBytes, err := json.Marshal(confState)
+ if err != nil {
+ lg.Panic("Cannot marshal raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err))
+ }
+
+ tx.UnsafePut(Meta, MetaConfStateName, confStateBytes)
+}
+
+// UnsafeConfStateFromBackend retrieves ConfState from the backend.
+// Returns nil if confState in backend is not persisted (e.g. backend written by = 3.6, so we don't need to
+ // use any other fields to identify the etcd's storage version.
+ confstate := UnsafeConfStateFromBackend(lg, tx)
+ if confstate == nil {
+ return v, fmt.Errorf("missing confstate information")
+ }
+ _, term := UnsafeReadConsistentIndex(tx)
+ if term == 0 {
+ return v, fmt.Errorf("missing term information")
+ }
+ return version.V3_5, nil
+}
+
+func schemaChangesForVersion(v semver.Version, isUpgrade bool) ([]schemaChange, error) {
+ // changes should be taken from higher version
+ higherV := v
+ if isUpgrade {
+ higherV = semver.Version{Major: v.Major, Minor: v.Minor + 1}
+ }
+
+ actions, found := schemaChanges[higherV]
+ if !found {
+ if isUpgrade {
+ return nil, fmt.Errorf("version %q is not supported", higherV.String())
+ }
+ return nil, fmt.Errorf("version %q is not supported", v.String())
+ }
+ return actions, nil
+}
+
+var (
+ // schemaChanges list changes that were introduced in a particular version.
+ // schema was introduced in v3.6 as so its changes were not tracked before.
+ schemaChanges = map[semver.Version][]schemaChange{
+ version.V3_6: {
+ addNewField(Meta, MetaStorageVersionName, emptyStorageVersion),
+ },
+ }
+ // emptyStorageVersion is used for v3.6 Step for the first time, in all other version StoragetVersion should be set by migrator.
+ // Adding a addNewField for StorageVersion we can reuse logic to remove it when downgrading to v3.5
+ emptyStorageVersion = []byte("")
+)
diff --git a/server/storage/schema/schema_test.go b/server/storage/schema/schema_test.go
new file mode 100644
index 00000000000..e87f0ff5b14
--- /dev/null
+++ b/server/storage/schema/schema_test.go
@@ -0,0 +1,332 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ waltesting "go.etcd.io/etcd/server/v3/storage/wal/testing"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func TestValidate(t *testing.T) {
+ tcs := []struct {
+ name string
+ version semver.Version
+ // Overrides which keys should be set (default based on version)
+ overrideKeys func(tx backend.UnsafeReadWriter)
+ expectError bool
+ expectErrorMsg string
+ }{
+ // As storage version field was added in v3.6, for v3.5 we will not set it.
+ // For storage to be considered v3.5 it have both confstate and term key set.
+ {
+ name: `V3.4 schema is correct`,
+ version: version.V3_4,
+ },
+ {
+ name: `V3.5 schema without confstate and term fields is correct`,
+ version: version.V3_5,
+ overrideKeys: func(tx backend.UnsafeReadWriter) {},
+ },
+ {
+ name: `V3.5 schema without term field is correct`,
+ version: version.V3_5,
+ overrideKeys: func(tx backend.UnsafeReadWriter) {
+ MustUnsafeSaveConfStateToBackend(zap.NewNop(), tx, &raftpb.ConfState{})
+ },
+ },
+ {
+ name: `V3.5 schema with all fields is correct`,
+ version: version.V3_5,
+ overrideKeys: func(tx backend.UnsafeReadWriter) {
+ MustUnsafeSaveConfStateToBackend(zap.NewNop(), tx, &raftpb.ConfState{})
+ UnsafeUpdateConsistentIndex(tx, 1, 1)
+ },
+ },
+ {
+ name: `V3.6 schema is correct`,
+ version: version.V3_6,
+ },
+ {
+ name: `V3.7 schema is unknown and should return error`,
+ version: version.V3_7,
+ expectError: true,
+ expectErrorMsg: `version "3.7.0" is not supported`,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zap.NewNop()
+ dataPath := setupBackendData(t, tc.version, tc.overrideKeys)
+
+ b := backend.NewDefaultBackend(lg, dataPath)
+ defer b.Close()
+ err := Validate(lg, b.ReadTx())
+ if (err != nil) != tc.expectError {
+ t.Errorf("Validate(lg, tx) = %+v, expected error: %v", err, tc.expectError)
+ }
+ if err != nil && err.Error() != tc.expectErrorMsg {
+ t.Errorf("Validate(lg, tx) = %q, expected error message: %q", err, tc.expectErrorMsg)
+ }
+ })
+ }
+}
+
+func TestMigrate(t *testing.T) {
+ tcs := []struct {
+ name string
+ version semver.Version
+ // Overrides which keys should be set (default based on version)
+ overrideKeys func(tx backend.UnsafeReadWriter)
+ targetVersion semver.Version
+ walEntries []etcdserverpb.InternalRaftRequest
+
+ expectVersion *semver.Version
+ expectError bool
+ expectErrorMsg string
+ }{
+ // As storage version field was added in v3.6, for v3.5 we will not set it.
+ // For storage to be considered v3.5 it have both confstate and term key set.
+ {
+ name: `Upgrading v3.5 to v3.6 should be rejected if confstate is not set`,
+ version: version.V3_5,
+ overrideKeys: func(tx backend.UnsafeReadWriter) {},
+ targetVersion: version.V3_6,
+ expectVersion: nil,
+ expectError: true,
+ expectErrorMsg: `cannot detect storage schema version: missing confstate information`,
+ },
+ {
+ name: `Upgrading v3.5 to v3.6 should be rejected if term is not set`,
+ version: version.V3_5,
+ overrideKeys: func(tx backend.UnsafeReadWriter) {
+ MustUnsafeSaveConfStateToBackend(zap.NewNop(), tx, &raftpb.ConfState{})
+ },
+ targetVersion: version.V3_6,
+ expectVersion: nil,
+ expectError: true,
+ expectErrorMsg: `cannot detect storage schema version: missing term information`,
+ },
+ {
+ name: `Upgrading v3.5 to v3.6 should succeed; all required fields are set`,
+ version: version.V3_5,
+ targetVersion: version.V3_6,
+ expectVersion: &version.V3_6,
+ },
+ {
+ name: `Migrate on same v3.5 version passes and doesn't set storage version'`,
+ version: version.V3_5,
+ targetVersion: version.V3_5,
+ expectVersion: nil,
+ },
+ {
+ name: `Migrate on same v3.6 version passes`,
+ version: version.V3_6,
+ targetVersion: version.V3_6,
+ expectVersion: &version.V3_6,
+ },
+ {
+ name: `Migrate on same v3.7 version passes`,
+ version: version.V3_7,
+ targetVersion: version.V3_7,
+ expectVersion: &version.V3_7,
+ },
+ {
+ name: "Upgrading 3.6 to v3.7 is not supported",
+ version: version.V3_6,
+ targetVersion: version.V3_7,
+ expectVersion: &version.V3_6,
+ expectError: true,
+ expectErrorMsg: `cannot create migration plan: version "3.7.0" is not supported`,
+ },
+ {
+ name: "Downgrading v3.7 to v3.6 is not supported",
+ version: version.V3_7,
+ targetVersion: version.V3_6,
+ expectVersion: &version.V3_7,
+ expectError: true,
+ expectErrorMsg: `cannot create migration plan: version "3.7.0" is not supported`,
+ },
+ {
+ name: "Downgrading v3.6 to v3.5 works as there are no v3.6 wal entries",
+ version: version.V3_6,
+ targetVersion: version.V3_5,
+ walEntries: []etcdserverpb.InternalRaftRequest{
+ {Range: &etcdserverpb.RangeRequest{Key: []byte("\x00"), RangeEnd: []byte("\xff")}},
+ },
+ expectVersion: nil,
+ },
+ {
+ name: "Downgrading v3.6 to v3.5 fails if there are newer WAL entries",
+ version: version.V3_6,
+ targetVersion: version.V3_5,
+ walEntries: []etcdserverpb.InternalRaftRequest{
+ {ClusterVersionSet: &membershippb.ClusterVersionSetRequest{Ver: "3.6.0"}},
+ },
+ expectVersion: &version.V3_6,
+ expectError: true,
+ expectErrorMsg: "cannot downgrade storage, WAL contains newer entries",
+ },
+ {
+ name: "Downgrading v3.5 to v3.4 is not supported as schema was introduced in v3.6",
+ version: version.V3_5,
+ targetVersion: version.V3_4,
+ expectVersion: nil,
+ expectError: true,
+ expectErrorMsg: `cannot create migration plan: version "3.5.0" is not supported`,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ lg := zap.NewNop()
+ dataPath := setupBackendData(t, tc.version, tc.overrideKeys)
+ w, _ := waltesting.NewTmpWAL(t, tc.walEntries)
+ defer w.Close()
+ walVersion, err := wal.ReadWALVersion(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b := backend.NewDefaultBackend(lg, dataPath)
+ defer b.Close()
+
+ err = Migrate(lg, b.BatchTx(), walVersion, tc.targetVersion)
+ if (err != nil) != tc.expectError {
+ t.Errorf("Migrate(lg, tx, %q) = %+v, expected error: %v", tc.targetVersion, err, tc.expectError)
+ }
+ if err != nil && !strings.Contains(err.Error(), tc.expectErrorMsg) {
+ t.Errorf("Migrate(lg, tx, %q) = %q, expected error message: %q", tc.targetVersion, err, tc.expectErrorMsg)
+ }
+ v := UnsafeReadStorageVersion(b.BatchTx())
+ assert.Equal(t, tc.expectVersion, v)
+ })
+ }
+}
+
+func TestMigrateIsReversible(t *testing.T) {
+ tcs := []struct {
+ initialVersion semver.Version
+ state map[string]string
+ }{
+ {
+ initialVersion: version.V3_5,
+ state: map[string]string{
+ "confState": `{"auto_leave":false}`,
+ "consistent_index": "\x00\x00\x00\x00\x00\x00\x00\x01",
+ "term": "\x00\x00\x00\x00\x00\x00\x00\x01",
+ },
+ },
+ {
+ initialVersion: version.V3_6,
+ state: map[string]string{
+ "confState": `{"auto_leave":false}`,
+ "consistent_index": "\x00\x00\x00\x00\x00\x00\x00\x01",
+ "term": "\x00\x00\x00\x00\x00\x00\x00\x01",
+ "storageVersion": "3.6.0",
+ },
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.initialVersion.String(), func(t *testing.T) {
+ lg := zap.NewNop()
+ dataPath := setupBackendData(t, tc.initialVersion, nil)
+
+ be := backend.NewDefaultBackend(lg, dataPath)
+ defer be.Close()
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ assertBucketState(t, tx, Meta, tc.state)
+ w, walPath := waltesting.NewTmpWAL(t, nil)
+ walVersion, err := wal.ReadWALVersion(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Upgrade to current version
+ ver := localBinaryVersion()
+ err = UnsafeMigrate(lg, tx, walVersion, ver)
+ if err != nil {
+ t.Errorf("Migrate(lg, tx, %q) returned error %+v", ver, err)
+ }
+ assert.Equal(t, &ver, UnsafeReadStorageVersion(tx))
+
+ // Downgrade back to initial version
+ w.Close()
+ w = waltesting.Reopen(t, walPath)
+ defer w.Close()
+ walVersion, err = wal.ReadWALVersion(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = UnsafeMigrate(lg, tx, walVersion, tc.initialVersion)
+ if err != nil {
+ t.Errorf("Migrate(lg, tx, %q) returned error %+v", tc.initialVersion, err)
+ }
+
+ // Assert that all changes were revered
+ assertBucketState(t, tx, Meta, tc.state)
+ })
+ }
+}
+
+func setupBackendData(t *testing.T, ver semver.Version, overrideKeys func(tx backend.UnsafeReadWriter)) string {
+ t.Helper()
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ UnsafeCreateMetaBucket(tx)
+ if overrideKeys != nil {
+ overrideKeys(tx)
+ } else {
+ switch ver {
+ case version.V3_4:
+ case version.V3_5:
+ MustUnsafeSaveConfStateToBackend(zap.NewNop(), tx, &raftpb.ConfState{})
+ UnsafeUpdateConsistentIndex(tx, 1, 1)
+ case version.V3_6:
+ MustUnsafeSaveConfStateToBackend(zap.NewNop(), tx, &raftpb.ConfState{})
+ UnsafeUpdateConsistentIndex(tx, 1, 1)
+ UnsafeSetStorageVersion(tx, &version.V3_6)
+ case version.V3_7:
+ MustUnsafeSaveConfStateToBackend(zap.NewNop(), tx, &raftpb.ConfState{})
+ UnsafeUpdateConsistentIndex(tx, 1, 1)
+ UnsafeSetStorageVersion(tx, &version.V3_7)
+ tx.UnsafePut(Meta, []byte("future-key"), []byte(""))
+ default:
+ t.Fatalf("Unsupported storage version")
+ }
+ }
+ tx.Unlock()
+ be.ForceCommit()
+ be.Close()
+ return tmpPath
+}
diff --git a/server/storage/schema/version.go b/server/storage/schema/version.go
new file mode 100644
index 00000000000..83984e6b5ff
--- /dev/null
+++ b/server/storage/schema/version.go
@@ -0,0 +1,67 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "github.com/coreos/go-semver/semver"
+
+ "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+// ReadStorageVersion loads storage version from given backend transaction.
+// Populated since v3.6
+func ReadStorageVersion(tx backend.ReadTx) *semver.Version {
+ tx.RLock()
+ defer tx.RUnlock()
+ return UnsafeReadStorageVersion(tx)
+}
+
+// UnsafeReadStorageVersion loads storage version from given backend transaction.
+// Populated since v3.6
+func UnsafeReadStorageVersion(tx backend.UnsafeReader) *semver.Version {
+ _, vs := tx.UnsafeRange(Meta, MetaStorageVersionName, nil, 1)
+ if len(vs) == 0 {
+ return nil
+ }
+ v, err := semver.NewVersion(string(vs[0]))
+ if err != nil {
+ return nil
+ }
+ return v
+}
+
+// ReadStorageVersionFromSnapshot loads storage version from given bbolt transaction.
+// Populated since v3.6
+func ReadStorageVersionFromSnapshot(tx *bbolt.Tx) *semver.Version {
+ v := tx.Bucket(Meta.Name()).Get(MetaStorageVersionName)
+ version, err := semver.NewVersion(string(v))
+ if err != nil {
+ return nil
+ }
+ return version
+}
+
+// UnsafeSetStorageVersion updates etcd storage version in backend.
+// Populated since v3.6
+func UnsafeSetStorageVersion(tx backend.UnsafeWriter, v *semver.Version) {
+ sv := semver.Version{Major: v.Major, Minor: v.Minor}
+ tx.UnsafePut(Meta, MetaStorageVersionName, []byte(sv.String()))
+}
+
+// UnsafeClearStorageVersion removes etcd storage version in backend.
+func UnsafeClearStorageVersion(tx backend.UnsafeWriter) {
+ tx.UnsafeDelete(Meta, MetaStorageVersionName)
+}
diff --git a/server/storage/schema/version_test.go b/server/storage/schema/version_test.go
new file mode 100644
index 00000000000..63442fc24d5
--- /dev/null
+++ b/server/storage/schema/version_test.go
@@ -0,0 +1,134 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "testing"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
+)
+
+// TestVersion ensures that UnsafeSetStorageVersion/UnsafeReadStorageVersion work well together.
+func TestVersion(t *testing.T) {
+ tcs := []struct {
+ version string
+ expectVersion string
+ }{
+ {
+ version: "3.5.0",
+ expectVersion: "3.5.0",
+ },
+ {
+ version: "3.5.0-alpha",
+ expectVersion: "3.5.0",
+ },
+ {
+ version: "3.5.0-beta.0",
+ expectVersion: "3.5.0",
+ },
+ {
+ version: "3.5.0-rc.1",
+ expectVersion: "3.5.0",
+ },
+ {
+ version: "3.5.1",
+ expectVersion: "3.5.0",
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.version, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ tx.UnsafeCreateBucket(Meta)
+ UnsafeSetStorageVersion(tx, semver.New(tc.version))
+ tx.Unlock()
+ be.ForceCommit()
+ be.Close()
+
+ b := backend.NewDefaultBackend(lg, tmpPath)
+ defer b.Close()
+ v := UnsafeReadStorageVersion(b.BatchTx())
+
+ assert.Equal(t, tc.expectVersion, v.String())
+ })
+ }
+}
+
+// TestVersionSnapshot ensures that UnsafeSetStorageVersion/unsafeReadStorageVersionFromSnapshot work well together.
+func TestVersionSnapshot(t *testing.T) {
+ tcs := []struct {
+ version string
+ expectVersion string
+ }{
+ {
+ version: "3.5.0",
+ expectVersion: "3.5.0",
+ },
+ {
+ version: "3.5.0-alpha",
+ expectVersion: "3.5.0",
+ },
+ {
+ version: "3.5.0-beta.0",
+ expectVersion: "3.5.0",
+ },
+ {
+ version: "3.5.0-rc.1",
+ expectVersion: "3.5.0",
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.version, func(t *testing.T) {
+ be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10)
+ tx := be.BatchTx()
+ if tx == nil {
+ t.Fatal("batch tx is nil")
+ }
+ tx.Lock()
+ tx.UnsafeCreateBucket(Meta)
+ UnsafeSetStorageVersion(tx, semver.New(tc.version))
+ tx.Unlock()
+ be.ForceCommit()
+ be.Close()
+ db, err := bbolt.Open(tmpPath, 0o400, &bbolt.Options{ReadOnly: true})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ var ver *semver.Version
+ if err = db.View(func(tx *bbolt.Tx) error {
+ ver = ReadStorageVersionFromSnapshot(tx)
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, tc.expectVersion, ver.String())
+ })
+ }
+}
diff --git a/server/storage/storage.go b/server/storage/storage.go
new file mode 100644
index 00000000000..99a37a23d07
--- /dev/null
+++ b/server/storage/storage.go
@@ -0,0 +1,135 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+type Storage interface {
+ // Save function saves ents and state to the underlying stable storage.
+ // Save MUST block until st and ents are on stable storage.
+ Save(st raftpb.HardState, ents []raftpb.Entry) error
+ // SaveSnap function saves snapshot to the underlying stable storage.
+ SaveSnap(snap raftpb.Snapshot) error
+ // Close closes the Storage and performs finalization.
+ Close() error
+ // Release releases the locked wal files older than the provided snapshot.
+ Release(snap raftpb.Snapshot) error
+ // Sync WAL
+ Sync() error
+ // MinimalEtcdVersion returns minimal etcd storage able to interpret WAL log.
+ MinimalEtcdVersion() *semver.Version
+}
+
+type storage struct {
+ lg *zap.Logger
+ s *snap.Snapshotter
+
+ // Mutex protected variables
+ mux sync.RWMutex
+ w *wal.WAL
+}
+
+func NewStorage(lg *zap.Logger, w *wal.WAL, s *snap.Snapshotter) Storage {
+ return &storage{lg: lg, w: w, s: s}
+}
+
+// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry.
+func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ walsnap := walpb.Snapshot{
+ Index: snap.Metadata.Index,
+ Term: snap.Metadata.Term,
+ ConfState: &snap.Metadata.ConfState,
+ }
+ // save the snapshot file before writing the snapshot to the wal.
+ // This makes it possible for the snapshot file to become orphaned, but prevents
+ // a WAL snapshot entry from having no corresponding snapshot file.
+ err := st.s.SaveSnap(snap)
+ if err != nil {
+ return err
+ }
+ // gofail: var raftBeforeWALSaveSnaphot struct{}
+
+ return st.w.SaveSnapshot(walsnap)
+}
+
+// Release releases resources older than the given snap and are no longer needed:
+// - releases the locks to the wal files that are older than the provided wal for the given snap.
+// - deletes any .snap.db files that are older than the given snap.
+func (st *storage) Release(snap raftpb.Snapshot) error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ if err := st.w.ReleaseLockTo(snap.Metadata.Index); err != nil {
+ return err
+ }
+ return st.s.ReleaseSnapDBs(snap)
+}
+
+func (st *storage) Save(s raftpb.HardState, ents []raftpb.Entry) error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ return st.w.Save(s, ents)
+}
+
+func (st *storage) Close() error {
+ st.mux.Lock()
+ defer st.mux.Unlock()
+ return st.w.Close()
+}
+
+func (st *storage) Sync() error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ return st.w.Sync()
+}
+
+func (st *storage) MinimalEtcdVersion() *semver.Version {
+ st.mux.Lock()
+ defer st.mux.Unlock()
+ walsnap := walpb.Snapshot{}
+
+ sn, err := st.s.Load()
+ if err != nil && !errors.Is(err, snap.ErrNoSnapshot) {
+ panic(err)
+ }
+ if sn != nil {
+ walsnap.Index = sn.Metadata.Index
+ walsnap.Term = sn.Metadata.Term
+ walsnap.ConfState = &sn.Metadata.ConfState
+ }
+ w, err := st.w.Reopen(st.lg, walsnap)
+ if err != nil {
+ panic(err)
+ }
+ _, _, ents, err := w.ReadAll()
+ if err != nil {
+ panic(err)
+ }
+ v := wal.MinimalEtcdVersion(ents)
+ st.w = w
+ return v
+}
diff --git a/server/storage/util.go b/server/storage/util.go
new file mode 100644
index 00000000000..0dc7f1c6d30
--- /dev/null
+++ b/server/storage/util.go
@@ -0,0 +1,160 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// AssertNoV2StoreContent -> depending on the deprecation stage, warns or report an error
+// if the v2store contains custom content.
+func AssertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error {
+ metaOnly, err := membership.IsMetaStoreOnly(st)
+ if err != nil {
+ return err
+ }
+ if metaOnly {
+ return nil
+ }
+ if deprecationStage.IsAtLeast(config.V2Depr1WriteOnly) {
+ return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage)
+ }
+ lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.")
+ return nil
+}
+
+// CreateConfigChangeEnts creates a series of Raft entries (i.e.
+// EntryConfChange) to remove the set of given IDs from the cluster. The ID
+// `self` is _not_ removed, even if present in the set.
+// If `self` is not inside the given ids, it creates a Raft entry to add a
+// default member with the given `self`.
+func CreateConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
+ found := false
+ for _, id := range ids {
+ if id == self {
+ found = true
+ }
+ }
+
+ var ents []raftpb.Entry
+ next := index + 1
+
+ // NB: always add self first, then remove other nodes. Raft will panic if the
+ // set of voters ever becomes empty.
+ if !found {
+ m := membership.Member{
+ ID: types.ID(self),
+ RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
+ }
+ ctx, err := json.Marshal(m)
+ if err != nil {
+ lg.Panic("failed to marshal member", zap.Error(err))
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: self,
+ Context: ctx,
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc),
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ for _, id := range ids {
+ if id == self {
+ continue
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc),
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ return ents
+}
+
+// GetEffectiveNodeIdsFromWalEntries returns an ordered set of IDs included in the given snapshot and
+// the entries.
+//
+// Deprecated: use GetEffectiveNodeIDsFromWALEntries instead.
+//
+//revive:disable-next-line:var-naming
+func GetEffectiveNodeIdsFromWalEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+ return GetEffectiveNodeIDsFromWALEntries(lg, snap, ents)
+}
+
+// GetEffectiveNodeIDsFromWALEntries returns an ordered set of IDs included in the given snapshot and
+// the entries. The given snapshot/entries can contain three kinds of
+// ID-related entry:
+// - ConfChangeAddNode, in which case the contained ID will Be added into the set.
+// - ConfChangeRemoveNode, in which case the contained ID will Be removed from the set.
+// - ConfChangeAddLearnerNode, in which the contained ID will Be added into the set.
+func GetEffectiveNodeIDsFromWALEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+ ids := make(map[uint64]bool)
+ if snap != nil {
+ for _, id := range snap.Metadata.ConfState.Voters {
+ ids[id] = true
+ }
+ }
+ for _, e := range ents {
+ if e.Type != raftpb.EntryConfChange {
+ continue
+ }
+ var cc raftpb.ConfChange
+ pbutil.MustUnmarshal(&cc, e.Data)
+ switch cc.Type {
+ case raftpb.ConfChangeAddLearnerNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeAddNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeRemoveNode:
+ delete(ids, cc.NodeID)
+ case raftpb.ConfChangeUpdateNode:
+ // do nothing
+ default:
+ lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
+ }
+ }
+ sids := make(types.Uint64Slice, 0, len(ids))
+ for id := range ids {
+ sids = append(sids, id)
+ }
+ sort.Sort(sids)
+ return sids
+}
diff --git a/server/storage/wal/decoder.go b/server/storage/wal/decoder.go
new file mode 100644
index 00000000000..bdd4962e97f
--- /dev/null
+++ b/server/storage/wal/decoder.go
@@ -0,0 +1,229 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "sync"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/crc"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const minSectorSize = 512
+
+// frameSizeBytes is frame size in bytes, including record size and padding size.
+const frameSizeBytes = 8
+
+type Decoder interface {
+ Decode(rec *walpb.Record) error
+ LastOffset() int64
+ LastCRC() uint32
+ UpdateCRC(prevCrc uint32)
+}
+
+type decoder struct {
+ mu sync.Mutex
+ brs []*fileutil.FileBufReader
+
+ // lastValidOff file offset following the last valid decoded record
+ lastValidOff int64
+ crc hash.Hash32
+
+ // continueOnCrcError - causes the decoder to continue working even in case of crc mismatch.
+ // This is a desired mode for tools performing inspection of the corrupted WAL logs.
+ // See comments on 'Decode' method for semantic.
+ continueOnCrcError bool
+}
+
+func NewDecoderAdvanced(continueOnCrcError bool, r ...fileutil.FileReader) Decoder {
+ readers := make([]*fileutil.FileBufReader, len(r))
+ for i := range r {
+ readers[i] = fileutil.NewFileBufReader(r[i])
+ }
+ return &decoder{
+ brs: readers,
+ crc: crc.New(0, crcTable),
+ continueOnCrcError: continueOnCrcError,
+ }
+}
+
+func NewDecoder(r ...fileutil.FileReader) Decoder {
+ return NewDecoderAdvanced(false, r...)
+}
+
+// Decode reads the next record out of the file.
+// In the success path, fills 'rec' and returns nil.
+// When it fails, it returns err and usually resets 'rec' to the defaults.
+// When continueOnCrcError is set, the method may return ErrUnexpectedEOF or ErrCRCMismatch, but preserve the read
+// (potentially corrupted) record content.
+func (d *decoder) Decode(rec *walpb.Record) error {
+ rec.Reset()
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.decodeRecord(rec)
+}
+
+func (d *decoder) decodeRecord(rec *walpb.Record) error {
+ if len(d.brs) == 0 {
+ return io.EOF
+ }
+
+ fileBufReader := d.brs[0]
+ l, err := readInt64(fileBufReader)
+ if errors.Is(err, io.EOF) || (err == nil && l == 0) {
+ // hit end of file or preallocated space
+ d.brs = d.brs[1:]
+ if len(d.brs) == 0 {
+ return io.EOF
+ }
+ d.lastValidOff = 0
+ return d.decodeRecord(rec)
+ }
+ if err != nil {
+ return err
+ }
+
+ recBytes, padBytes := decodeFrameSize(l)
+ // The length of current WAL entry must be less than the remaining file size.
+ maxEntryLimit := fileBufReader.FileInfo().Size() - d.lastValidOff - padBytes
+ if recBytes > maxEntryLimit {
+ return fmt.Errorf("%w: [wal] max entry size limit exceeded when reading %q, recBytes: %d, fileSize(%d) - offset(%d) - padBytes(%d) = entryLimit(%d)",
+ io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), recBytes, fileBufReader.FileInfo().Size(), d.lastValidOff, padBytes, maxEntryLimit)
+ }
+
+ data := make([]byte, recBytes+padBytes)
+ if _, err = io.ReadFull(fileBufReader, data); err != nil {
+ // ReadFull returns io.EOF only if no bytes were read
+ // the decoder should treat this as an ErrUnexpectedEOF instead.
+ if errors.Is(err, io.EOF) {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ if err := rec.Unmarshal(data[:recBytes]); err != nil {
+ if d.isTornEntry(data) {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+ }
+
+ // skip crc checking if the record type is CrcType
+ if rec.Type != CrcType {
+ _, err := d.crc.Write(rec.Data)
+ if err != nil {
+ return err
+ }
+ if err := rec.Validate(d.crc.Sum32()); err != nil {
+ if !d.continueOnCrcError {
+ rec.Reset()
+ } else {
+ // If we continue, we want to update lastValidOff, such that following errors are consistent
+ defer func() { d.lastValidOff += frameSizeBytes + recBytes + padBytes }()
+ }
+
+ if d.isTornEntry(data) {
+ return fmt.Errorf("%w: in file '%s' at position: %d", io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), d.lastValidOff)
+ }
+ return fmt.Errorf("%w: in file '%s' at position: %d", err, fileBufReader.FileInfo().Name(), d.lastValidOff)
+ }
+ }
+ // record decoded as valid; point last valid offset to end of record
+ d.lastValidOff += frameSizeBytes + recBytes + padBytes
+ return nil
+}
+
+func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
+ // the record size is stored in the lower 56 bits of the 64-bit length
+ recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56))
+ // non-zero padding is indicated by set MSb / a negative length
+ if lenField < 0 {
+ // padding is stored in lower 3 bits of length MSB
+ padBytes = int64((uint64(lenField) >> 56) & 0x7)
+ }
+ return recBytes, padBytes
+}
+
+// isTornEntry determines whether the last entry of the WAL was partially written
+// and corrupted because of a torn write.
+func (d *decoder) isTornEntry(data []byte) bool {
+ if len(d.brs) != 1 {
+ return false
+ }
+
+ fileOff := d.lastValidOff + frameSizeBytes
+ curOff := 0
+ var chunks [][]byte
+ // split data on sector boundaries
+ for curOff < len(data) {
+ chunkLen := int(minSectorSize - (fileOff % minSectorSize))
+ if chunkLen > len(data)-curOff {
+ chunkLen = len(data) - curOff
+ }
+ chunks = append(chunks, data[curOff:curOff+chunkLen])
+ fileOff += int64(chunkLen)
+ curOff += chunkLen
+ }
+
+ // if any data for a sector chunk is all 0, it's a torn write
+ for _, sect := range chunks {
+ isZero := true
+ for _, v := range sect {
+ if v != 0 {
+ isZero = false
+ break
+ }
+ }
+ if isZero {
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) UpdateCRC(prevCrc uint32) {
+ d.crc = crc.New(prevCrc, crcTable)
+}
+
+func (d *decoder) LastCRC() uint32 {
+ return d.crc.Sum32()
+}
+
+func (d *decoder) LastOffset() int64 { return d.lastValidOff }
+
+func MustUnmarshalEntry(d []byte) raftpb.Entry {
+ var e raftpb.Entry
+ pbutil.MustUnmarshal(&e, d)
+ return e
+}
+
+func MustUnmarshalState(d []byte) raftpb.HardState {
+ var s raftpb.HardState
+ pbutil.MustUnmarshal(&s, d)
+ return s
+}
+
+func readInt64(r io.Reader) (int64, error) {
+ var n int64
+ err := binary.Read(r, binary.LittleEndian, &n)
+ return n, err
+}
diff --git a/server/storage/wal/doc.go b/server/storage/wal/doc.go
new file mode 100644
index 00000000000..0f7ef8527be
--- /dev/null
+++ b/server/storage/wal/doc.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package wal provides an implementation of write ahead log that is used by
+etcd.
+
+A WAL is created at a particular directory and is made up of a number of
+segmented WAL files. Inside each file the raft state and entries are appended
+to it with the Save method:
+
+ metadata := []byte{}
+ w, err := wal.Create(zap.NewExample(), "/var/lib/etcd", metadata)
+ ...
+ err := w.Save(s, ents)
+
+After saving a raft snapshot to disk, SaveSnapshot method should be called to
+record it. So WAL can match with the saved snapshot when restarting.
+
+ err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
+
+When a user has finished using a WAL it must be closed:
+
+ w.Close()
+
+Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record
+protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a
+64-bit packed structure holding the length of the remaining logical record data in its lower
+56 bits and its physical padding in the first three bits of the most significant byte. Each
+record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32
+value of all record protobufs preceding the current record.
+
+WAL files are placed inside the directory in the following format:
+$seq-$index.wal
+
+The first WAL file to be created will be 0000000000000000-0000000000000000.wal
+indicating an initial sequence of 0 and an initial raft index of 0. The first
+entry written to WAL MUST have raft index 0.
+
+WAL will cut its current tail wal file if its size exceeds 64 MB. This will increment an internal
+sequence number and cause a new file to be created. If the last raft index saved
+was 0x20 and this is the first time cut has been called on this WAL then the sequence will
+increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
+If a second cut issues 0x10 entries with incremental index later, then the file will be called:
+0000000000000002-0000000000000031.wal.
+
+At a later time a WAL can be opened at a particular snapshot. If there is no
+snapshot, an empty snapshot should be passed in.
+
+ w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
+ ...
+
+The snapshot must have been written to the WAL.
+
+Additional items cannot be Saved to this WAL until all the items from the given
+snapshot to the end of the WAL are read first:
+
+ metadata, state, ents, err := w.ReadAll()
+
+This will give you the metadata, the last raft.State and the slice of
+raft.Entry items in the log.
+*/
+package wal
diff --git a/server/wal/encoder.go b/server/storage/wal/encoder.go
similarity index 81%
rename from server/wal/encoder.go
rename to server/storage/wal/encoder.go
index 61b4c20efb1..5944ba7b120 100644
--- a/server/wal/encoder.go
+++ b/server/storage/wal/encoder.go
@@ -20,10 +20,11 @@ import (
"io"
"os"
"sync"
+ "time"
"go.etcd.io/etcd/pkg/v3/crc"
"go.etcd.io/etcd/pkg/v3/ioutil"
- "go.etcd.io/etcd/server/v3/wal/walpb"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
)
// walPageBytes is the alignment for flushing records to the backing Writer.
@@ -84,17 +85,9 @@ func (e *encoder) encode(rec *walpb.Record) error {
data = e.buf[:n]
}
- lenField, padBytes := encodeFrameSize(len(data))
- if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil {
- return err
- }
+ data, lenField := prepareDataWithPadding(data)
- if padBytes != 0 {
- data = append(data, make([]byte, padBytes)...)
- }
- n, err = e.bw.Write(data)
- walWriteBytes.Add(float64(n))
- return err
+ return write(e.bw, e.uint64buf, data, lenField)
}
func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) {
@@ -109,16 +102,32 @@ func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) {
func (e *encoder) flush() error {
e.mu.Lock()
- n, err := e.bw.FlushN()
- e.mu.Unlock()
- walWriteBytes.Add(float64(n))
- return err
+ defer e.mu.Unlock()
+ return e.bw.Flush()
}
-func writeUint64(w io.Writer, n uint64, buf []byte) error {
- // http://golang.org/src/encoding/binary/binary.go
- binary.LittleEndian.PutUint64(buf, n)
- nv, err := w.Write(buf)
+func prepareDataWithPadding(data []byte) ([]byte, uint64) {
+ lenField, padBytes := encodeFrameSize(len(data))
+ if padBytes != 0 {
+ data = append(data, make([]byte, padBytes)...)
+ }
+ return data, lenField
+}
+
+func write(w io.Writer, uint64buf, data []byte, lenField uint64) error {
+ // write padding info
+ binary.LittleEndian.PutUint64(uint64buf, lenField)
+
+ start := time.Now()
+ nv, err := w.Write(uint64buf)
walWriteBytes.Add(float64(nv))
+ if err != nil {
+ return err
+ }
+
+ // write the record with padding
+ n, err := w.Write(data)
+ walWriteSec.Observe(time.Since(start).Seconds())
+ walWriteBytes.Add(float64(n))
return err
}
diff --git a/server/wal/file_pipeline.go b/server/storage/wal/file_pipeline.go
similarity index 94%
rename from server/wal/file_pipeline.go
rename to server/storage/wal/file_pipeline.go
index c2feba6c1dc..c8ee4cce429 100644
--- a/server/wal/file_pipeline.go
+++ b/server/storage/wal/file_pipeline.go
@@ -19,9 +19,9 @@ import (
"os"
"path/filepath"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
-
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
)
// filePipeline pipelines allocating disk space
@@ -58,6 +58,7 @@ func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline {
// Open returns a fresh file for writing. Rename the file before calling
// Open again or there will be file collisions.
+// it will 'block' if the tmp file lock is already taken.
func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) {
select {
case f = <-fp.filec:
@@ -74,7 +75,7 @@ func (fp *filePipeline) Close() error {
func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) {
// count % 2 so this file isn't the same as the one last published
fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
- if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+ if f, err = createNewWALFile[*fileutil.LockedFile](fpath, false); err != nil {
return nil, err
}
if err = fileutil.Preallocate(f.File, fp.size, true); err != nil {
diff --git a/server/storage/wal/file_pipeline_test.go b/server/storage/wal/file_pipeline_test.go
new file mode 100644
index 00000000000..bb59270bf3a
--- /dev/null
+++ b/server/storage/wal/file_pipeline_test.go
@@ -0,0 +1,47 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "math"
+ "testing"
+
+ "go.uber.org/zap/zaptest"
+)
+
+func TestFilePipeline(t *testing.T) {
+ tdir := t.TempDir()
+
+ fp := newFilePipeline(zaptest.NewLogger(t), tdir, SegmentSizeBytes)
+ defer fp.Close()
+
+ f, ferr := fp.Open()
+ if ferr != nil {
+ t.Fatal(ferr)
+ }
+ f.Close()
+}
+
+func TestFilePipelineFailPreallocate(t *testing.T) {
+ tdir := t.TempDir()
+
+ fp := newFilePipeline(zaptest.NewLogger(t), tdir, math.MaxInt64)
+ defer fp.Close()
+
+ f, ferr := fp.Open()
+ if f != nil || ferr == nil { // no space left on device
+ t.Fatal("expected error on invalid pre-allocate size, but no error")
+ }
+}
diff --git a/server/storage/wal/metrics.go b/server/storage/wal/metrics.go
new file mode 100644
index 00000000000..6f09deb51f6
--- /dev/null
+++ b/server/storage/wal/metrics.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ walFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_fsync_duration_seconds",
+ Help: "The latency distributions of fsync called by WAL.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ walWriteSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_write_duration_seconds",
+ Help: "The latency distributions of write called by WAL.",
+
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_write_bytes_total",
+ Help: "Total number of bytes written in WAL.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(walFsyncSec)
+ prometheus.MustRegister(walWriteSec)
+ prometheus.MustRegister(walWriteBytes)
+}
diff --git a/server/storage/wal/record_test.go b/server/storage/wal/record_test.go
new file mode 100644
index 00000000000..85ceebed9c1
--- /dev/null
+++ b/server/storage/wal/record_test.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bytes"
+ "errors"
+ "hash/crc32"
+ "io"
+ "os"
+ "reflect"
+ "testing"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+)
+
+var (
+ infoData = []byte("\b\xef\xfd\x02")
+ infoRecord = append([]byte("\x0e\x00\x00\x00\x00\x00\x00\x00\b\x01\x10\x99\xb5\xe4\xd0\x03\x1a\x04"), infoData...)
+)
+
+func TestReadRecord(t *testing.T) {
+ badInfoRecord := make([]byte, len(infoRecord))
+ copy(badInfoRecord, infoRecord)
+ badInfoRecord[len(badInfoRecord)-1] = 'a'
+
+ tests := []struct {
+ data []byte
+ wr *walpb.Record
+ we error
+ }{
+ {infoRecord, &walpb.Record{Type: 1, Crc: crc32.Checksum(infoData, crcTable), Data: infoData}, nil},
+ {[]byte(""), &walpb.Record{}, io.EOF},
+ {infoRecord[:14], &walpb.Record{}, io.ErrUnexpectedEOF},
+ {infoRecord[:len(infoRecord)-len(infoData)], &walpb.Record{}, io.ErrUnexpectedEOF},
+ {infoRecord[:len(infoRecord)-8], &walpb.Record{}, io.ErrUnexpectedEOF},
+ {badInfoRecord, &walpb.Record{}, walpb.ErrCRCMismatch},
+ }
+
+ rec := &walpb.Record{}
+ for i, tt := range tests {
+ buf := bytes.NewBuffer(tt.data)
+ f, err := createFileWithData(t, buf)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ decoder := NewDecoder(fileutil.NewFileReader(f))
+ e := decoder.Decode(rec)
+ if !reflect.DeepEqual(rec, tt.wr) {
+ t.Errorf("#%d: block = %v, want %v", i, rec, tt.wr)
+ }
+ if !errors.Is(e, tt.we) {
+ t.Errorf("#%d: err = %v, want %v", i, e, tt.we)
+ }
+ rec = &walpb.Record{}
+ }
+}
+
+func TestWriteRecord(t *testing.T) {
+ b := &walpb.Record{}
+ typ := int64(0xABCD)
+ d := []byte("Hello world!")
+ buf := new(bytes.Buffer)
+ e := newEncoder(buf, 0, 0)
+ e.encode(&walpb.Record{Type: typ, Data: d})
+ e.flush()
+ f, err := createFileWithData(t, buf)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ decoder := NewDecoder(fileutil.NewFileReader(f))
+ err = decoder.Decode(b)
+ if err != nil {
+ t.Errorf("err = %v, want nil", err)
+ }
+ if b.Type != typ {
+ t.Errorf("type = %d, want %d", b.Type, typ)
+ }
+ if !reflect.DeepEqual(b.Data, d) {
+ t.Errorf("data = %v, want %v", b.Data, d)
+ }
+}
+
+func createFileWithData(t *testing.T, bf *bytes.Buffer) (*os.File, error) {
+ f, err := os.CreateTemp(t.TempDir(), "wal")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := f.Write(bf.Bytes()); err != nil {
+ return nil, err
+ }
+ f.Seek(0, 0)
+ return f, nil
+}
diff --git a/server/wal/repair.go b/server/storage/wal/repair.go
similarity index 80%
rename from server/wal/repair.go
rename to server/storage/wal/repair.go
index 122ee49a6a4..16277540f34 100644
--- a/server/wal/repair.go
+++ b/server/storage/wal/repair.go
@@ -15,14 +15,16 @@
package wal
import (
+ "errors"
"io"
"os"
"path/filepath"
"time"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/server/v3/wal/walpb"
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
)
// Repair tries to repair ErrUnexpectedEOF in the
@@ -40,33 +42,34 @@ func Repair(lg *zap.Logger, dirpath string) bool {
lg.Info("repairing", zap.String("path", f.Name()))
rec := &walpb.Record{}
- decoder := newDecoder(f)
+ decoder := NewDecoder(fileutil.NewFileReader(f.File))
for {
- lastOffset := decoder.lastOffset()
- err := decoder.decode(rec)
- switch err {
- case nil:
+ lastOffset := decoder.LastOffset()
+ err := decoder.Decode(rec)
+ switch {
+ case err == nil:
// update crc of the decoder when necessary
switch rec.Type {
- case crcType:
- crc := decoder.crc.Sum32()
+ case CrcType:
+ crc := decoder.LastCRC()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
return false
}
- decoder.updateCRC(rec.Crc)
+ decoder.UpdateCRC(rec.Crc)
}
continue
- case io.EOF:
+ case errors.Is(err, io.EOF):
lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.EOF))
return true
- case io.ErrUnexpectedEOF:
- bf, bferr := os.Create(f.Name() + ".broken")
+ case errors.Is(err, io.ErrUnexpectedEOF):
+ brokenName := f.Name() + ".broken"
+ bf, bferr := createNewWALFile[*os.File](brokenName, true)
if bferr != nil {
- lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr))
+ lg.Warn("failed to create backup file", zap.String("path", brokenName), zap.Error(bferr))
return false
}
defer bf.Close()
@@ -77,7 +80,7 @@ func Repair(lg *zap.Logger, dirpath string) bool {
}
if _, err = io.Copy(bf, f); err != nil {
- lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err))
+ lg.Warn("failed to copy", zap.String("from", f.Name()), zap.String("to", brokenName), zap.Error(err))
return false
}
diff --git a/server/storage/wal/repair_test.go b/server/storage/wal/repair_test.go
new file mode 100644
index 00000000000..585a7c3bd66
--- /dev/null
+++ b/server/storage/wal/repair_test.go
@@ -0,0 +1,215 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+type corruptFunc func(string, int64) error
+
+// TestRepairTruncate ensures a truncated file can be repaired
+func TestRepairTruncate(t *testing.T) {
+ corruptf := func(p string, offset int64) error {
+ f, err := openLast(zaptest.NewLogger(t), p)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return f.Truncate(offset - 4)
+ }
+
+ testRepair(t, makeEnts(10), corruptf, 9)
+}
+
+func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expectedEnts int) {
+ lg := zaptest.NewLogger(t)
+ p := t.TempDir()
+
+ // create WAL
+ w, err := Create(lg, p, nil)
+ defer func() {
+ // The Close might fail.
+ _ = w.Close()
+ }()
+ require.NoError(t, err)
+
+ for _, es := range ents {
+ require.NoError(t, w.Save(raftpb.HardState{}, es))
+ }
+
+ offset, err := w.tail().Seek(0, io.SeekCurrent)
+ require.NoError(t, err)
+ require.NoError(t, w.Close())
+
+ require.NoError(t, corrupt(p, offset))
+
+ // verify we broke the wal
+ w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ require.NoError(t, err)
+
+ _, _, _, err = w.ReadAll()
+ require.ErrorIs(t, err, io.ErrUnexpectedEOF)
+ require.NoError(t, w.Close())
+
+ // repair the wal
+ require.True(t, Repair(lg, p))
+
+ // verify the broken wal has correct permissions
+ bf := filepath.Join(p, filepath.Base(w.tail().Name())+".broken")
+ fi, err := os.Stat(bf)
+ require.NoError(t, err)
+ expectedPerms := fmt.Sprintf("%o", os.FileMode(fileutil.PrivateFileMode))
+ actualPerms := fmt.Sprintf("%o", fi.Mode().Perm())
+ require.Equalf(t, expectedPerms, actualPerms, "unexpected file permissions on .broken wal")
+
+ // read it back
+ w, err = Open(lg, p, walpb.Snapshot{})
+ require.NoError(t, err)
+
+ _, _, walEnts, err := w.ReadAll()
+ require.NoError(t, err)
+ assert.Len(t, walEnts, expectedEnts)
+
+ // write some more entries to repaired log
+ for i := 1; i <= 10; i++ {
+ es := []raftpb.Entry{{Index: uint64(expectedEnts + i)}}
+ require.NoError(t, w.Save(raftpb.HardState{}, es))
+ }
+ require.NoError(t, w.Close())
+
+ // read back entries following repair, ensure it's all there
+ w, err = Open(lg, p, walpb.Snapshot{})
+ require.NoError(t, err)
+ _, _, walEnts, err = w.ReadAll()
+ require.NoError(t, err)
+ assert.Len(t, walEnts, expectedEnts+10)
+}
+
+func makeEnts(ents int) (ret [][]raftpb.Entry) {
+ for i := 1; i <= ents; i++ {
+ ret = append(ret, []raftpb.Entry{{Index: uint64(i)}})
+ }
+ return ret
+}
+
+// TestRepairWriteTearLast repairs the WAL in case the last record is a torn write
+// that straddled two sectors.
+func TestRepairWriteTearLast(t *testing.T) {
+ corruptf := func(p string, offset int64) error {
+ f, err := openLast(zaptest.NewLogger(t), p)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // 512 bytes perfectly aligns the last record, so use 1024
+ if offset < 1024 {
+ return fmt.Errorf("got offset %d, expected >1024", offset)
+ }
+ if terr := f.Truncate(1024); terr != nil {
+ return terr
+ }
+ return f.Truncate(offset)
+ }
+ testRepair(t, makeEnts(50), corruptf, 40)
+}
+
+// TestRepairWriteTearMiddle repairs the WAL when there is write tearing
+// in the middle of a record.
+func TestRepairWriteTearMiddle(t *testing.T) {
+ corruptf := func(p string, offset int64) error {
+ f, err := openLast(zaptest.NewLogger(t), p)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // corrupt middle of 2nd record
+ _, werr := f.WriteAt(make([]byte, 512), 4096+512)
+ return werr
+ }
+ ents := makeEnts(5)
+ // 4096 bytes of data so a middle sector is easy to corrupt
+ dat := make([]byte, 4096)
+ for i := range dat {
+ dat[i] = byte(i)
+ }
+ for i := range ents {
+ ents[i][0].Data = dat
+ }
+ testRepair(t, ents, corruptf, 1)
+}
+
+func TestRepairFailDeleteDir(t *testing.T) {
+ p := t.TempDir()
+
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ oldSegmentSizeBytes := SegmentSizeBytes
+ SegmentSizeBytes = 64
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ for _, es := range makeEnts(50) {
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ _, serr := w.tail().Seek(0, io.SeekCurrent)
+ if serr != nil {
+ t.Fatal(serr)
+ }
+ w.Close()
+
+ f, err := openLast(zaptest.NewLogger(t), p)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if terr := f.Truncate(20); terr != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, _, err = w.ReadAll()
+ if !errors.Is(err, io.ErrUnexpectedEOF) {
+ t.Fatalf("err = %v, want error %v", err, io.ErrUnexpectedEOF)
+ }
+ w.Close()
+
+ os.RemoveAll(p)
+ if Repair(zaptest.NewLogger(t), p) {
+ t.Fatal("expect 'Repair' fail on unexpected directory deletion")
+ }
+}
diff --git a/server/storage/wal/testing/waltesting.go b/server/storage/wal/testing/waltesting.go
new file mode 100644
index 00000000000..bd1dbaade63
--- /dev/null
+++ b/server/storage/wal/testing/waltesting.go
@@ -0,0 +1,92 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testing
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func NewTmpWAL(t testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL, string) {
+ t.Helper()
+ dir, err := os.MkdirTemp(t.TempDir(), "etcd_wal_test")
+ if err != nil {
+ panic(err)
+ }
+ tmpPath := filepath.Join(dir, "wal")
+ lg := zaptest.NewLogger(t)
+ w, err := wal.Create(lg, tmpPath, nil)
+ if err != nil {
+ t.Fatalf("Failed to create WAL: %v", err)
+ }
+ err = w.Close()
+ if err != nil {
+ t.Fatalf("Failed to close WAL: %v", err)
+ }
+ if len(reqs) != 0 {
+ w, err = wal.Open(lg, tmpPath, walpb.Snapshot{})
+ if err != nil {
+ t.Fatalf("Failed to open WAL: %v", err)
+ }
+
+ var state raftpb.HardState
+ _, state, _, err = w.ReadAll()
+ if err != nil {
+ t.Fatalf("Failed to read WAL: %v", err)
+ }
+ var entries []raftpb.Entry
+ for _, req := range reqs {
+ entries = append(entries, raftpb.Entry{
+ Term: 1,
+ Index: 1,
+ Type: raftpb.EntryNormal,
+ Data: pbutil.MustMarshal(&req),
+ })
+ }
+ err = w.Save(state, entries)
+ if err != nil {
+ t.Fatalf("Failed to save WAL: %v", err)
+ }
+ err = w.Close()
+ if err != nil {
+ t.Fatalf("Failed to close WAL: %v", err)
+ }
+ }
+
+ w, err = wal.OpenForRead(lg, tmpPath, walpb.Snapshot{})
+ if err != nil {
+ t.Fatalf("Failed to open WAL: %v", err)
+ }
+ return w, tmpPath
+}
+
+func Reopen(t testing.TB, walPath string) *wal.WAL {
+ t.Helper()
+ lg := zaptest.NewLogger(t)
+ w, err := wal.OpenForRead(lg, walPath, walpb.Snapshot{})
+ if err != nil {
+ t.Fatalf("Failed to open WAL: %v", err)
+ }
+ return w
+}
diff --git a/server/storage/wal/util.go b/server/storage/wal/util.go
new file mode 100644
index 00000000000..8dec85c15d5
--- /dev/null
+++ b/server/storage/wal/util.go
@@ -0,0 +1,112 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+)
+
+var errBadWALName = errors.New("bad wal name")
+
+// Exist returns true if there are any files in a given directory.
+func Exist(dir string) bool {
+ names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal"))
+ if err != nil {
+ return false
+ }
+ return len(names) != 0
+}
+
+// searchIndex returns the last array index of names whose raft index section is
+// equal to or smaller than the given index.
+// The given names MUST be sorted.
+func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) {
+ for i := len(names) - 1; i >= 0; i-- {
+ name := names[i]
+ _, curIndex, err := parseWALName(name)
+ if err != nil {
+ lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
+ }
+ if index >= curIndex {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+// names should have been sorted based on sequence number.
+// isValidSeq checks whether seq increases continuously.
+func isValidSeq(lg *zap.Logger, names []string) bool {
+ var lastSeq uint64
+ for _, name := range names {
+ curSeq, _, err := parseWALName(name)
+ if err != nil {
+ lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
+ }
+ if lastSeq != 0 && lastSeq != curSeq-1 {
+ return false
+ }
+ lastSeq = curSeq
+ }
+ return true
+}
+
+func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) {
+ names, err := fileutil.ReadDir(dirpath)
+ if err != nil {
+ return nil, fmt.Errorf("[readWALNames] fileutil.ReadDir failed: %w", err)
+ }
+ wnames := checkWALNames(lg, names)
+ if len(wnames) == 0 {
+ return nil, ErrFileNotFound
+ }
+ return wnames, nil
+}
+
+func checkWALNames(lg *zap.Logger, names []string) []string {
+ wnames := make([]string, 0)
+ for _, name := range names {
+ if _, _, err := parseWALName(name); err != nil {
+ // don't complain about left over tmp files
+ if !strings.HasSuffix(name, ".tmp") {
+ lg.Warn(
+ "ignored file in WAL directory",
+ zap.String("path", name),
+ )
+ }
+ continue
+ }
+ wnames = append(wnames, name)
+ }
+ return wnames
+}
+
+func parseWALName(str string) (seq, index uint64, err error) {
+ if !strings.HasSuffix(str, ".wal") {
+ return 0, 0, errBadWALName
+ }
+ _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+ return seq, index, err
+}
+
+func walName(seq, index uint64) string {
+ return fmt.Sprintf("%016x-%016x.wal", seq, index)
+}
diff --git a/server/storage/wal/version.go b/server/storage/wal/version.go
new file mode 100644
index 00000000000..98592650303
--- /dev/null
+++ b/server/storage/wal/version.go
@@ -0,0 +1,282 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// ReadWALVersion reads remaining entries from opened WAL and returns struct
+// that implements schema.WAL interface.
+func ReadWALVersion(w *WAL) (*walVersion, error) {
+ _, _, ents, err := w.ReadAll()
+ if err != nil {
+ return nil, err
+ }
+ return &walVersion{entries: ents}, nil
+}
+
+type walVersion struct {
+ entries []raftpb.Entry
+}
+
+// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log,
+func (w *walVersion) MinimalEtcdVersion() *semver.Version {
+ return MinimalEtcdVersion(w.entries)
+}
+
+// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log,
+// determined by looking at entries since the last snapshot and returning the highest
+// etcd version annotation from used messages, fields, enums and their values.
+func MinimalEtcdVersion(ents []raftpb.Entry) *semver.Version {
+ var maxVer *semver.Version
+ for _, ent := range ents {
+ err := visitEntry(ent, func(path protoreflect.FullName, ver *semver.Version) error {
+ maxVer = maxVersion(maxVer, ver)
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ }
+ return maxVer
+}
+
+type Visitor func(path protoreflect.FullName, ver *semver.Version) error
+
+// VisitFileDescriptor calls visitor on each field and enum value with etcd version read from proto definition.
+// If field/enum value is not annotated, visitor will be called with nil.
+// Upon encountering invalid annotation, will immediately exit with error.
+func VisitFileDescriptor(file protoreflect.FileDescriptor, visitor Visitor) error {
+ msgs := file.Messages()
+ for i := 0; i < msgs.Len(); i++ {
+ err := visitMessageDescriptor(msgs.Get(i), visitor)
+ if err != nil {
+ return err
+ }
+ }
+ enums := file.Enums()
+ for i := 0; i < enums.Len(); i++ {
+ err := visitEnumDescriptor(enums.Get(i), visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func visitEntry(ent raftpb.Entry, visitor Visitor) error {
+ err := visitMessage(proto.MessageReflect(&ent), visitor)
+ if err != nil {
+ return err
+ }
+ return visitEntryData(ent.Type, ent.Data, visitor)
+}
+
+func visitEntryData(entryType raftpb.EntryType, data []byte, visitor Visitor) error {
+ var msg protoreflect.Message
+ switch entryType {
+ case raftpb.EntryNormal:
+ var raftReq etcdserverpb.InternalRaftRequest
+ if err := pbutil.Unmarshaler(&raftReq).Unmarshal(data); err != nil {
+ // try V2 Request
+ var r etcdserverpb.Request
+ if pbutil.Unmarshaler(&r).Unmarshal(data) != nil {
+ // return original error
+ return err
+ }
+ msg = proto.MessageReflect(&r)
+ break
+ }
+ msg = proto.MessageReflect(&raftReq)
+ if raftReq.ClusterVersionSet != nil {
+ ver, err := semver.NewVersion(raftReq.ClusterVersionSet.Ver)
+ if err != nil {
+ return err
+ }
+ err = visitor(msg.Descriptor().FullName(), ver)
+ if err != nil {
+ return err
+ }
+ }
+ case raftpb.EntryConfChange:
+ var confChange raftpb.ConfChange
+ err := pbutil.Unmarshaler(&confChange).Unmarshal(data)
+ if err != nil {
+ return nil
+ }
+ msg = proto.MessageReflect(&confChange)
+ return visitor(msg.Descriptor().FullName(), &version.V3_0)
+ case raftpb.EntryConfChangeV2:
+ var confChange raftpb.ConfChangeV2
+ err := pbutil.Unmarshaler(&confChange).Unmarshal(data)
+ if err != nil {
+ return nil
+ }
+ msg = proto.MessageReflect(&confChange)
+ return visitor(msg.Descriptor().FullName(), &version.V3_4)
+ default:
+ panic("unhandled")
+ }
+ return visitMessage(msg, visitor)
+}
+
+func visitMessageDescriptor(md protoreflect.MessageDescriptor, visitor Visitor) error {
+ err := visitDescriptor(md, visitor)
+ if err != nil {
+ return err
+ }
+ fields := md.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+ err = visitDescriptor(fd, visitor)
+ if err != nil {
+ return err
+ }
+ }
+
+ enums := md.Enums()
+ for i := 0; i < enums.Len(); i++ {
+ err = visitEnumDescriptor(enums.Get(i), visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func visitMessage(m protoreflect.Message, visitor Visitor) error {
+ md := m.Descriptor()
+ err := visitDescriptor(md, visitor)
+ if err != nil {
+ return err
+ }
+ m.Range(func(field protoreflect.FieldDescriptor, value protoreflect.Value) bool {
+ fd := md.Fields().Get(field.Index())
+ err = visitDescriptor(fd, visitor)
+ if err != nil {
+ return false
+ }
+
+ switch m := value.Interface().(type) {
+ case protoreflect.Message:
+ err = visitMessage(m, visitor)
+ case protoreflect.EnumNumber:
+ err = visitEnumNumber(fd.Enum(), m, visitor)
+ }
+ return err == nil
+ })
+ return err
+}
+
+func visitEnumDescriptor(enum protoreflect.EnumDescriptor, visitor Visitor) error {
+ err := visitDescriptor(enum, visitor)
+ if err != nil {
+ return err
+ }
+ fields := enum.Values()
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+ err = visitDescriptor(fd, visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func visitEnumNumber(enum protoreflect.EnumDescriptor, number protoreflect.EnumNumber, visitor Visitor) error {
+ err := visitDescriptor(enum, visitor)
+ if err != nil {
+ return err
+ }
+ intNumber := int(number)
+ fields := enum.Values()
+ if intNumber >= fields.Len() || intNumber < 0 {
+ return fmt.Errorf("could not visit EnumNumber [%d]", intNumber)
+ }
+ return visitEnumValue(fields.Get(intNumber), visitor)
+}
+
+func visitEnumValue(enum protoreflect.EnumValueDescriptor, visitor Visitor) error {
+ valueOpts := enum.Options().(*descriptorpb.EnumValueOptions)
+ if valueOpts != nil {
+ ver, _ := etcdVersionFromOptionsString(valueOpts.String())
+ err := visitor(enum.FullName(), ver)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func visitDescriptor(md protoreflect.Descriptor, visitor Visitor) error {
+ opts, ok := md.Options().(fmt.Stringer)
+ if !ok {
+ return nil
+ }
+ ver, err := etcdVersionFromOptionsString(opts.String())
+ if err != nil {
+ return fmt.Errorf("%s: %w", md.FullName(), err)
+ }
+ return visitor(md.FullName(), ver)
+}
+
+func maxVersion(a *semver.Version, b *semver.Version) *semver.Version {
+ if a != nil && (b == nil || b.LessThan(*a)) {
+ return a
+ }
+ return b
+}
+
+func etcdVersionFromOptionsString(opts string) (*semver.Version, error) {
+ // TODO: Use proto.GetExtention when gogo/protobuf is usable with protoreflect
+ msgs := []string{"[versionpb.etcd_version_msg]:", "[versionpb.etcd_version_field]:", "[versionpb.etcd_version_enum]:", "[versionpb.etcd_version_enum_value]:"}
+ var end, index int
+ for _, msg := range msgs {
+ index = strings.Index(opts, msg)
+ end = index + len(msg)
+ if index != -1 {
+ break
+ }
+ }
+ if index == -1 {
+ return nil, nil
+ }
+ var verStr string
+ _, err := fmt.Sscanf(opts[end:], "%q", &verStr)
+ if err != nil {
+ return nil, err
+ }
+ if strings.Count(verStr, ".") == 1 {
+ verStr = verStr + ".0"
+ }
+ ver, err := semver.NewVersion(verStr)
+ if err != nil {
+ return nil, err
+ }
+ return ver, nil
+}
diff --git a/server/storage/wal/version_test.go b/server/storage/wal/version_test.go
new file mode 100644
index 00000000000..8e7e83897ec
--- /dev/null
+++ b/server/storage/wal/version_test.go
@@ -0,0 +1,288 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/golang/protobuf/proto"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func TestEtcdVersionFromEntry(t *testing.T) {
+ raftReq := etcdserverpb.InternalRaftRequest{Header: &etcdserverpb.RequestHeader{AuthRevision: 1}}
+ normalRequestData := pbutil.MustMarshal(&raftReq)
+
+ clusterVersionV3_6Req := etcdserverpb.InternalRaftRequest{ClusterVersionSet: &membershippb.ClusterVersionSetRequest{Ver: "3.6.0"}}
+ clusterVersionV3_6Data := pbutil.MustMarshal(&clusterVersionV3_6Req)
+
+ confChange := raftpb.ConfChange{Type: raftpb.ConfChangeAddLearnerNode}
+ confChangeData := pbutil.MustMarshal(&confChange)
+
+ confChangeV2 := raftpb.ConfChangeV2{Transition: raftpb.ConfChangeTransitionJointExplicit}
+ confChangeV2Data := pbutil.MustMarshal(&confChangeV2)
+
+ tcs := []struct {
+ name string
+ input raftpb.Entry
+ expect *semver.Version
+ }{
+ {
+ name: "Using RequestHeader AuthRevision in NormalEntry implies v3.1",
+ input: raftpb.Entry{
+ Term: 1,
+ Index: 2,
+ Type: raftpb.EntryNormal,
+ Data: normalRequestData,
+ },
+ expect: &version.V3_1,
+ },
+ {
+ name: "Setting cluster version implies version within",
+ input: raftpb.Entry{
+ Term: 1,
+ Index: 2,
+ Type: raftpb.EntryNormal,
+ Data: clusterVersionV3_6Data,
+ },
+ expect: &version.V3_6,
+ },
+ {
+ name: "Using ConfigChange implies v3.0",
+ input: raftpb.Entry{
+ Term: 1,
+ Index: 2,
+ Type: raftpb.EntryConfChange,
+ Data: confChangeData,
+ },
+ expect: &version.V3_0,
+ },
+ {
+ name: "Using ConfigChangeV2 implies v3.4",
+ input: raftpb.Entry{
+ Term: 1,
+ Index: 2,
+ Type: raftpb.EntryConfChangeV2,
+ Data: confChangeV2Data,
+ },
+ expect: &version.V3_4,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ var maxVer *semver.Version
+ err := visitEntry(tc.input, func(path protoreflect.FullName, ver *semver.Version) error {
+ maxVer = maxVersion(maxVer, ver)
+ return nil
+ })
+ require.NoError(t, err)
+ assert.Equal(t, tc.expect, maxVer)
+ })
+ }
+}
+
+func TestEtcdVersionFromMessage(t *testing.T) {
+ tcs := []struct {
+ name string
+ input proto.Message
+ expect *semver.Version
+ }{
+ {
+ name: "Empty RequestHeader impies v3.0",
+ input: &etcdserverpb.RequestHeader{},
+ expect: &version.V3_0,
+ },
+ {
+ name: "RequestHeader AuthRevision field set implies v3.5",
+ input: &etcdserverpb.RequestHeader{AuthRevision: 1},
+ expect: &version.V3_1,
+ },
+ {
+ name: "RequestHeader Username set implies v3.0",
+ input: &etcdserverpb.RequestHeader{Username: "Alice"},
+ expect: &version.V3_0,
+ },
+ {
+ name: "When two fields are set take higher version",
+ input: &etcdserverpb.RequestHeader{AuthRevision: 1, Username: "Alice"},
+ expect: &version.V3_1,
+ },
+ {
+ name: "Setting a RequestHeader AuthRevision in subfield implies v3.1",
+ input: &etcdserverpb.InternalRaftRequest{Header: &etcdserverpb.RequestHeader{AuthRevision: 1}},
+ expect: &version.V3_1,
+ },
+ {
+ name: "Setting a DowngradeInfoSetRequest implies v3.5",
+ input: &etcdserverpb.InternalRaftRequest{DowngradeInfoSet: &membershippb.DowngradeInfoSetRequest{}},
+ expect: &version.V3_5,
+ },
+ {
+ name: "Enum CompareResult set to EQUAL implies v3.0",
+ input: &etcdserverpb.Compare{Result: etcdserverpb.Compare_EQUAL},
+ expect: &version.V3_0,
+ },
+ {
+ name: "Enum CompareResult set to NOT_EQUAL implies v3.1",
+ input: &etcdserverpb.Compare{Result: etcdserverpb.Compare_NOT_EQUAL},
+ expect: &version.V3_1,
+ },
+ {
+ name: "Oneof Compare version set implies v3.1",
+ input: &etcdserverpb.Compare{TargetUnion: &etcdserverpb.Compare_Version{}},
+ expect: &version.V3_0,
+ },
+ {
+ name: "Oneof Compare lease set implies v3.3",
+ input: &etcdserverpb.Compare{TargetUnion: &etcdserverpb.Compare_Lease{}},
+ expect: &version.V3_3,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ var maxVer *semver.Version
+ err := visitMessage(proto.MessageReflect(tc.input), func(path protoreflect.FullName, ver *semver.Version) error {
+ maxVer = maxVersion(maxVer, ver)
+ return nil
+ })
+ require.NoError(t, err)
+ assert.Equal(t, tc.expect, maxVer)
+ })
+ }
+}
+
+func TestEtcdVersionFromFieldOptionsString(t *testing.T) {
+ tcs := []struct {
+ input string
+ expect *semver.Version
+ }{
+ {
+ input: "65001:0",
+ },
+ {
+ input: `65001:0 65004:"NodeID"`,
+ },
+ {
+ input: `[versionpb.XXX]:"3.5"`,
+ },
+ {
+ input: `[versionpb.etcd_version_msg]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.etcd_version_enum]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.etcd_version_field]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.etcd_version_enum_value]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `65001:0 [versionpb.etcd_version_msg]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `65004:"NodeID" [versionpb.etcd_version_msg]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `65004:"NodeID" [versionpb.etcd_version_enum]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.other_field]:"NodeID" [versionpb.etcd_version_msg]:"3.5"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.etcd_version_msg]:"3.5" 65001:0`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.etcd_version_msg]:"3.5" 65004:"NodeID"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.etcd_version_msg]:"3.5" [versionpb.other_field]:"NodeID"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `[versionpb.other_field]:"NodeID" [versionpb.etcd_version_msg]:"3.5" [versionpb.another_field]:"NodeID"`,
+ expect: &version.V3_5,
+ },
+ {
+ input: `65001:0 [versionpb.etcd_version_msg]:"3.5" 65001:0"`,
+ expect: &version.V3_5,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.input, func(t *testing.T) {
+ ver, err := etcdVersionFromOptionsString(tc.input)
+ require.NoError(t, err)
+ assert.Equal(t, ver, tc.expect)
+ })
+ }
+}
+
+func TestMaxVersion(t *testing.T) {
+ tcs := []struct {
+ a, b, expect *semver.Version
+ }{
+ {
+ a: nil,
+ b: nil,
+ expect: nil,
+ },
+ {
+ a: &version.V3_5,
+ b: nil,
+ expect: &version.V3_5,
+ },
+ {
+ a: nil,
+ b: &version.V3_5,
+ expect: &version.V3_5,
+ },
+ {
+ a: &version.V3_6,
+ b: &version.V3_5,
+ expect: &version.V3_6,
+ },
+ {
+ a: &version.V3_5,
+ b: &version.V3_6,
+ expect: &version.V3_6,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(fmt.Sprintf("%v %v %v", tc.a, tc.b, tc.expect), func(t *testing.T) {
+ got := maxVersion(tc.a, tc.b)
+ assert.Equal(t, got, tc.expect)
+ })
+ }
+}
diff --git a/server/storage/wal/wal.go b/server/storage/wal/wal.go
new file mode 100644
index 00000000000..b364cab63e8
--- /dev/null
+++ b/server/storage/wal/wal.go
@@ -0,0 +1,1047 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ MetadataType int64 = iota + 1
+ EntryType
+ StateType
+ CrcType
+ SnapshotType
+
+ // warnSyncDuration is the amount of time allotted to an fsync before
+ // logging a warning
+ warnSyncDuration = time.Second
+)
+
+var (
+ // SegmentSizeBytes is the preallocated size of each wal segment file.
+ // The actual size might be larger than this. In general, the default
+ // value should be used, but this is defined as an exported variable
+ // so that tests can set a different segment size.
+ SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
+
+ ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+ ErrFileNotFound = errors.New("wal: file not found")
+ ErrCRCMismatch = walpb.ErrCRCMismatch
+ ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+ ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+ ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
+ ErrDecoderNotFound = errors.New("wal: decoder not found")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+)
+
+// WAL is a logical representation of the stable storage.
+// WAL is either in read mode or append mode but not both.
+// A newly created WAL is in append mode, and ready for appending records.
+// A just opened WAL is in read mode, and ready for reading records.
+// The WAL will be ready for appending after reading out all the previous records.
+type WAL struct {
+ lg *zap.Logger
+
+ dir string // the living directory of the underlay files
+
+ // dirFile is a fd for the wal directory for syncing on Rename
+ dirFile *os.File
+
+ metadata []byte // metadata recorded at the head of each WAL
+ state raftpb.HardState // hardstate recorded at the head of WAL
+
+ start walpb.Snapshot // snapshot to start reading
+ decoder Decoder // decoder to Decode records
+ readClose func() error // closer for Decode reader
+
+ unsafeNoSync bool // if set, do not fsync
+
+ mu sync.Mutex
+ enti uint64 // index of the last entry saved to the wal
+ encoder *encoder // encoder to encode records
+
+ locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
+ fp *filePipeline
+}
+
+// Create creates a WAL ready for appending records. The given metadata is
+// recorded at the head of each WAL file, and can be retrieved with ReadAll
+// after the file is Open.
+func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
+ if Exist(dirpath) {
+ return nil, os.ErrExist
+ }
+
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ // keep temporary wal directory so WAL initialization appears atomic
+ tmpdirpath := filepath.Clean(dirpath) + ".tmp"
+ if fileutil.Exist(tmpdirpath) {
+ if err := os.RemoveAll(tmpdirpath); err != nil {
+ return nil, err
+ }
+ }
+ defer os.RemoveAll(tmpdirpath)
+
+ if err := fileutil.CreateDirAll(lg, tmpdirpath); err != nil {
+ lg.Warn(
+ "failed to create a temporary WAL directory",
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", dirpath),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ p := filepath.Join(tmpdirpath, walName(0, 0))
+ f, err := createNewWALFile[*fileutil.LockedFile](p, false)
+ if err != nil {
+ lg.Warn(
+ "failed to flock an initial WAL file",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+ if _, err = f.Seek(0, io.SeekEnd); err != nil {
+ lg.Warn(
+ "failed to seek an initial WAL file",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+ if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
+ lg.Warn(
+ "failed to preallocate an initial WAL file",
+ zap.String("path", p),
+ zap.Int64("segment-bytes", SegmentSizeBytes),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ metadata: metadata,
+ }
+ w.encoder, err = newFileEncoder(f.File, 0)
+ if err != nil {
+ return nil, err
+ }
+ w.locks = append(w.locks, f)
+ if err = w.saveCrc(0); err != nil {
+ return nil, err
+ }
+ if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: metadata}); err != nil {
+ return nil, err
+ }
+ if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+ return nil, err
+ }
+
+ logDirPath := w.dir
+ if w, err = w.renameWAL(tmpdirpath); err != nil {
+ lg.Warn(
+ "failed to rename the temporary WAL directory",
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", logDirPath),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ var perr error
+ defer func() {
+ if perr != nil {
+ w.cleanupWAL(lg)
+ }
+ }()
+
+ // directory was renamed; sync parent dir to persist rename
+ pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
+ if perr != nil {
+ lg.Warn(
+ "failed to open the parent data directory",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return nil, perr
+ }
+ dirCloser := func() error {
+ if perr = pdir.Close(); perr != nil {
+ lg.Warn(
+ "failed to close the parent data directory file",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return perr
+ }
+ return nil
+ }
+ start := time.Now()
+ if perr = fileutil.Fsync(pdir); perr != nil {
+ dirCloser()
+ lg.Warn(
+ "failed to fsync the parent data directory file",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return nil, perr
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+ if err = dirCloser(); err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+// createNewWALFile creates a WAL file.
+// To create a locked file, use *fileutil.LockedFile type parameter.
+// To create a standard file, use *os.File type parameter.
+// If forceNew is true, the file will be truncated if it already exists.
+func createNewWALFile[T *os.File | *fileutil.LockedFile](path string, forceNew bool) (T, error) {
+ flag := os.O_WRONLY | os.O_CREATE
+ if forceNew {
+ flag |= os.O_TRUNC
+ }
+
+ if _, isLockedFile := any(T(nil)).(*fileutil.LockedFile); isLockedFile {
+ lockedFile, err := fileutil.LockFile(path, flag, fileutil.PrivateFileMode)
+ if err != nil {
+ return nil, err
+ }
+ return any(lockedFile).(T), nil
+ }
+
+ file, err := os.OpenFile(path, flag, fileutil.PrivateFileMode)
+ if err != nil {
+ return nil, err
+ }
+ return any(file).(T), nil
+}
+
+func (w *WAL) Reopen(lg *zap.Logger, snap walpb.Snapshot) (*WAL, error) {
+ err := w.Close()
+ if err != nil {
+ lg.Panic("failed to close WAL during reopen", zap.Error(err))
+ }
+ return Open(lg, w.dir, snap)
+}
+
+func (w *WAL) SetUnsafeNoFsync() {
+ w.unsafeNoSync = true
+}
+
+func (w *WAL) cleanupWAL(lg *zap.Logger) {
+ var err error
+ if err = w.Close(); err != nil {
+ lg.Panic("failed to close WAL during cleanup", zap.Error(err))
+ }
+ brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
+ if err = os.Rename(w.dir, brokenDirName); err != nil {
+ lg.Panic(
+ "failed to rename WAL during cleanup",
+ zap.Error(err),
+ zap.String("source-path", w.dir),
+ zap.String("rename-path", brokenDirName),
+ )
+ }
+}
+
+func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
+ if err := os.RemoveAll(w.dir); err != nil {
+ return nil, err
+ }
+ // On non-Windows platforms, hold the lock while renaming. Releasing
+ // the lock and trying to reacquire it quickly can be flaky because
+ // it's possible the process will fork to spawn a process while this is
+ // happening. The fds are set up as close-on-exec by the Go runtime,
+ // but there is a window between the fork and the exec where another
+ // process holds the lock.
+ if err := os.Rename(tmpdirpath, w.dir); err != nil {
+ var linkErr *os.LinkError
+ if errors.As(err, &linkErr) {
+ return w.renameWALUnlock(tmpdirpath)
+ }
+ return nil, err
+ }
+ w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes)
+ df, err := fileutil.OpenDir(w.dir)
+ w.dirFile = df
+ return w, err
+}
+
+func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
+ // rename of directory with locked files doesn't work on windows/cifs;
+ // close the WAL to release the locks so the directory can be renamed.
+ w.lg.Info(
+ "closing WAL to release flock and retry directory renaming",
+ zap.String("from", tmpdirpath),
+ zap.String("to", w.dir),
+ )
+ w.Close()
+
+ if err := os.Rename(tmpdirpath, w.dir); err != nil {
+ return nil, err
+ }
+
+ // reopen and relock
+ newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{})
+ if oerr != nil {
+ return nil, oerr
+ }
+ if _, _, _, err := newWAL.ReadAll(); err != nil {
+ newWAL.Close()
+ return nil, err
+ }
+ return newWAL, nil
+}
+
+// Open opens the WAL at the given snap.
+// The snap SHOULD have been previously saved to the WAL, or the following
+// ReadAll will fail.
+// The returned WAL is ready to read and the first record will be the one after
+// the given snap. The WAL cannot be appended to before reading out all of its
+// previous records.
+func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ w, err := openAtIndex(lg, dirpath, snap, true)
+ if err != nil {
+ return nil, fmt.Errorf("openAtIndex failed: %w", err)
+ }
+ if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
+ return nil, fmt.Errorf("fileutil.OpenDir failed: %w", err)
+ }
+ return w, nil
+}
+
+// OpenForRead only opens the wal files for read.
+// Write on a read only wal panics.
+func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ return openAtIndex(lg, dirpath, snap, false)
+}
+
+func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ names, nameIndex, err := selectWALFiles(lg, dirpath, snap)
+ if err != nil {
+ return nil, fmt.Errorf("[openAtIndex] selectWALFiles failed: %w", err)
+ }
+
+ rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write)
+ if err != nil {
+ return nil, fmt.Errorf("[openAtIndex] openWALFiles failed: %w", err)
+ }
+
+ // create a WAL ready for reading
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ start: snap,
+ decoder: NewDecoder(rs...),
+ readClose: closer,
+ locks: ls,
+ }
+
+ if write {
+ // write reuses the file descriptors from read; don't close so
+ // WAL can append without dropping the file lock
+ w.readClose = nil
+ if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil {
+ closer()
+ return nil, fmt.Errorf("[openAtIndex] parseWALName failed: %w", err)
+ }
+ w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes)
+ }
+
+ return w, nil
+}
+
+func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) {
+ names, err := readWALNames(lg, dirpath)
+ if err != nil {
+ return nil, -1, fmt.Errorf("readWALNames failed: %w", err)
+ }
+
+ nameIndex, ok := searchIndex(lg, names, snap.Index)
+ if !ok {
+ return nil, -1, fmt.Errorf("wal: file not found which matches the snapshot index '%d'", snap.Index)
+ }
+
+ if !isValidSeq(lg, names[nameIndex:]) {
+ return nil, -1, fmt.Errorf("wal: file sequence numbers (starting from %d) do not increase continuously", nameIndex)
+ }
+
+ return names, nameIndex, nil
+}
+
+func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]fileutil.FileReader, []*fileutil.LockedFile, func() error, error) {
+ rcs := make([]io.ReadCloser, 0)
+ rs := make([]fileutil.FileReader, 0)
+ ls := make([]*fileutil.LockedFile, 0)
+ for _, name := range names[nameIndex:] {
+ p := filepath.Join(dirpath, name)
+ var f *os.File
+ if write {
+ l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(lg, rcs...)
+ return nil, nil, nil, fmt.Errorf("[openWALFiles] fileutil.TryLockFile failed: %w", err)
+ }
+ ls = append(ls, l)
+ rcs = append(rcs, l)
+ f = l.File
+ } else {
+ rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(lg, rcs...)
+ return nil, nil, nil, fmt.Errorf("[openWALFiles] os.OpenFile failed (%q): %w", p, err)
+ }
+ ls = append(ls, nil)
+ rcs = append(rcs, rf)
+ f = rf
+ }
+ fileReader := fileutil.NewFileReader(f)
+ rs = append(rs, fileReader)
+ }
+
+ closer := func() error { return closeAll(lg, rcs...) }
+
+ return rs, ls, closer, nil
+}
+
+// ReadAll reads out records of the current WAL.
+// If opened in write mode, it must read out all records until EOF. Or an error
+// will be returned.
+// If opened in read mode, it will try to read all records if possible.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If loaded snap doesn't match with the expected one, it will return
+// all the records and error ErrSnapshotMismatch.
+// TODO: detect not-last-snap error.
+// TODO: maybe loose the checking of match.
+// After ReadAll, the WAL will be ready for appending new records.
+//
+// ReadAll suppresses WAL entries that got overridden (i.e. a newer entry with the same index
+// exists in the log). Such a situation can happen in cases described in figure 7. of the
+// RAFT paper (http://web.stanford.edu/~ouster/cgi-bin/papers/raft-atc14.pdf).
+//
+// ReadAll may return uncommitted yet entries, that are subject to be overridden.
+// Do not apply entries that have index > state.commit, as they are subject to change.
+func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{}
+
+ if w.decoder == nil {
+ return nil, state, nil, ErrDecoderNotFound
+ }
+ decoder := w.decoder
+
+ var match bool
+ for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) {
+ switch rec.Type {
+ case EntryType:
+ e := MustUnmarshalEntry(rec.Data)
+ // 0 <= e.Index-w.start.Index - 1 < len(ents)
+ if e.Index > w.start.Index {
+ // prevent "panic: runtime error: slice bounds out of range [:13038096702221461992] with capacity 0"
+ offset := e.Index - w.start.Index - 1
+ if offset > uint64(len(ents)) {
+ // return error before append call causes runtime panic
+ return nil, state, nil, fmt.Errorf("%w, snapshot[Index: %d, Term: %d], current entry[Index: %d, Term: %d], len(ents): %d",
+ ErrSliceOutOfRange, w.start.Index, w.start.Term, e.Index, e.Term, len(ents))
+ }
+ // The line below is potentially overriding some 'uncommitted' entries.
+ ents = append(ents[:offset], e)
+ }
+ w.enti = e.Index
+
+ case StateType:
+ state = MustUnmarshalState(rec.Data)
+
+ case MetadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ state.Reset()
+ return nil, state, nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+
+ case CrcType:
+ crc := decoder.LastCRC()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ state.Reset()
+ return nil, state, nil, ErrCRCMismatch
+ }
+ decoder.UpdateCRC(rec.Crc)
+
+ case SnapshotType:
+ var snap walpb.Snapshot
+ pbutil.MustUnmarshal(&snap, rec.Data)
+ if snap.Index == w.start.Index {
+ if snap.Term != w.start.Term {
+ state.Reset()
+ return nil, state, nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+
+ default:
+ state.Reset()
+ return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ switch w.tail() {
+ case nil:
+ // We do not have to read out all entries in read mode.
+ // The last record maybe a partial written one, so
+ // `io.ErrUnexpectedEOF` might be returned.
+ if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ default:
+ // We must read all the entries if WAL is opened in write mode.
+ if !errors.Is(err, io.EOF) {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ // decodeRecord() will return io.EOF if it detects a zero record,
+ // but this zero record may be followed by non-zero records from
+ // a torn write. Overwriting some of these non-zero records, but
+ // not all, will cause CRC errors on WAL open. Since the records
+ // were never fully synced to disk in the first place, it's safe
+ // to zero them out to avoid any CRC errors from new writes.
+ if _, err = w.tail().Seek(w.decoder.LastOffset(), io.SeekStart); err != nil {
+ return nil, state, nil, err
+ }
+ if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
+ return nil, state, nil, err
+ }
+ }
+
+ err = nil
+ if !match {
+ err = ErrSnapshotNotFound
+ }
+
+ // close decoder, disable reading
+ if w.readClose != nil {
+ w.readClose()
+ w.readClose = nil
+ }
+ w.start = walpb.Snapshot{}
+
+ w.metadata = metadata
+
+ if w.tail() != nil {
+ // create encoder (chain crc with the decoder), enable appending
+ w.encoder, err = newFileEncoder(w.tail().File, w.decoder.LastCRC())
+ if err != nil {
+ return nil, state, nil, err
+ }
+ }
+ w.decoder = nil
+
+ return metadata, state, ents, err
+}
+
+// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory.
+// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate.
+func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, error) {
+ var snaps []walpb.Snapshot
+ var state raftpb.HardState
+ var err error
+
+ rec := &walpb.Record{}
+ names, err := readWALNames(lg, walDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(lg, walDir, names, 0, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := NewDecoder(rs...)
+
+ for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) {
+ switch rec.Type {
+ case SnapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ snaps = append(snaps, loadedSnap)
+ case StateType:
+ state = MustUnmarshalState(rec.Data)
+ case CrcType:
+ crc := decoder.LastCRC()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.UpdateCRC(rec.Crc)
+ }
+ }
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
+ return nil, err
+ }
+
+ // filter out any snaps that are newer than the committed hardstate
+ n := 0
+ for _, s := range snaps {
+ if s.Index <= state.Commit {
+ snaps[n] = s
+ n++
+ }
+ }
+ snaps = snaps[:n:n]
+ return snaps, nil
+}
+
+// Verify reads through the given WAL and verifies that it is not corrupted.
+// It creates a new decoder to read through the records of the given WAL.
+// It does not conflict with any open WAL, but it is recommended not to
+// call this function after opening the WAL for writing.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If the loaded snap doesn't match with the expected one, it will
+// return error ErrSnapshotMismatch.
+func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardState, error) {
+ var metadata []byte
+ var err error
+ var match bool
+ var state raftpb.HardState
+
+ rec := &walpb.Record{}
+
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ names, nameIndex, err := selectWALFiles(lg, walDir, snap)
+ if err != nil {
+ return nil, err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := NewDecoder(rs...)
+
+ for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) {
+ switch rec.Type {
+ case MetadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ return nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+ case CrcType:
+ crc := decoder.LastCRC()
+ // Current crc of decoder must match the crc of the record.
+ // We need not match 0 crc, since the decoder is a new one at this point.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.UpdateCRC(rec.Crc)
+ case SnapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ if loadedSnap.Index == snap.Index {
+ if loadedSnap.Term != snap.Term {
+ return nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+ // We ignore all entry and state type records as these
+ // are not necessary for validating the WAL contents
+ case EntryType:
+ case StateType:
+ pbutil.MustUnmarshal(&state, rec.Data)
+ default:
+ return nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
+ return nil, err
+ }
+
+ if !match {
+ return nil, ErrSnapshotNotFound
+ }
+
+ return &state, nil
+}
+
+// cut closes current file written and creates a new one ready to append.
+// cut first creates a temp wal file and writes necessary headers into it.
+// Then cut atomically rename temp wal file to a wal file.
+func (w *WAL) cut() error {
+ // close old wal file; truncate to avoid wasting space if an early cut
+ off, serr := w.tail().Seek(0, io.SeekCurrent)
+ if serr != nil {
+ return serr
+ }
+
+ if err := w.tail().Truncate(off); err != nil {
+ return err
+ }
+
+ if err := w.sync(); err != nil {
+ return err
+ }
+
+ fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
+
+ // create a temp wal file with name sequence + 1, or truncate the existing one
+ newTail, err := w.fp.Open()
+ if err != nil {
+ return err
+ }
+
+ // update writer and save the previous crc
+ w.locks = append(w.locks, newTail)
+ prevCrc := w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ if err = w.saveCrc(prevCrc); err != nil {
+ return err
+ }
+
+ if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: w.metadata}); err != nil {
+ return err
+ }
+
+ if err = w.saveState(&w.state); err != nil {
+ return err
+ }
+
+ // atomically move temp wal file to wal file
+ if err = w.sync(); err != nil {
+ return err
+ }
+
+ off, err = w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+
+ if err = os.Rename(newTail.Name(), fpath); err != nil {
+ return err
+ }
+ start := time.Now()
+ if err = fileutil.Fsync(w.dirFile); err != nil {
+ return err
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+
+ // reopen newTail with its new path so calls to Name() match the wal filename format
+ newTail.Close()
+
+ if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+ return err
+ }
+ if _, err = newTail.Seek(off, io.SeekStart); err != nil {
+ return err
+ }
+
+ w.locks[len(w.locks)-1] = newTail
+
+ prevCrc = w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ w.lg.Info("created a new WAL segment", zap.String("path", fpath))
+ return nil
+}
+
+func (w *WAL) sync() error {
+ if w.encoder != nil {
+ if err := w.encoder.flush(); err != nil {
+ return err
+ }
+ }
+
+ if w.unsafeNoSync {
+ return nil
+ }
+
+ start := time.Now()
+ err := fileutil.Fdatasync(w.tail().File)
+
+ took := time.Since(start)
+ if took > warnSyncDuration {
+ w.lg.Warn(
+ "slow fdatasync",
+ zap.Duration("took", took),
+ zap.Duration("expected-duration", warnSyncDuration),
+ )
+ }
+ walFsyncSec.Observe(took.Seconds())
+
+ return err
+}
+
+func (w *WAL) Sync() error {
+ return w.sync()
+}
+
+// ReleaseLockTo releases the locks, which has smaller index than the given index
+// except the largest one among them.
+// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
+// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
+func (w *WAL) ReleaseLockTo(index uint64) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if len(w.locks) == 0 {
+ return nil
+ }
+
+ var smaller int
+ found := false
+ for i, l := range w.locks {
+ _, lockIndex, err := parseWALName(filepath.Base(l.Name()))
+ if err != nil {
+ return err
+ }
+ if lockIndex >= index {
+ smaller = i - 1
+ found = true
+ break
+ }
+ }
+
+ // if no lock index is greater than the release index, we can
+ // release lock up to the last one(excluding).
+ if !found {
+ smaller = len(w.locks) - 1
+ }
+
+ if smaller <= 0 {
+ return nil
+ }
+
+ for i := 0; i < smaller; i++ {
+ if w.locks[i] == nil {
+ continue
+ }
+ w.locks[i].Close()
+ }
+ w.locks = w.locks[smaller:]
+
+ return nil
+}
+
+// Close closes the current WAL file and directory.
+func (w *WAL) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.fp != nil {
+ w.fp.Close()
+ w.fp = nil
+ }
+
+ if w.tail() != nil {
+ if err := w.sync(); err != nil {
+ return err
+ }
+ }
+ for _, l := range w.locks {
+ if l == nil {
+ continue
+ }
+ if err := l.Close(); err != nil {
+ w.lg.Error("failed to close WAL", zap.Error(err))
+ }
+ }
+
+ return w.dirFile.Close()
+}
+
+func (w *WAL) saveEntry(e *raftpb.Entry) error {
+ // TODO: add MustMarshalTo to reduce one allocation.
+ b := pbutil.MustMarshal(e)
+ rec := &walpb.Record{Type: EntryType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ w.enti = e.Index
+ return nil
+}
+
+func (w *WAL) saveState(s *raftpb.HardState) error {
+ if raft.IsEmptyHardState(*s) {
+ return nil
+ }
+ w.state = *s
+ b := pbutil.MustMarshal(s)
+ rec := &walpb.Record{Type: StateType, Data: b}
+ return w.encoder.encode(rec)
+}
+
+func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ // short cut, do not call sync
+ if raft.IsEmptyHardState(st) && len(ents) == 0 {
+ return nil
+ }
+
+ mustSync := raft.MustSync(st, w.state, len(ents))
+
+ // TODO(xiangli): no more reference operator
+ for i := range ents {
+ if err := w.saveEntry(&ents[i]); err != nil {
+ return err
+ }
+ }
+ if err := w.saveState(&st); err != nil {
+ return err
+ }
+
+ curOff, err := w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ if curOff < SegmentSizeBytes {
+ if mustSync {
+ // gofail: var walBeforeSync struct{}
+ err = w.sync()
+ // gofail: var walAfterSync struct{}
+ return err
+ }
+ return nil
+ }
+
+ return w.cut()
+}
+
+func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+ if err := walpb.ValidateSnapshotForWrite(&e); err != nil {
+ return err
+ }
+
+ b := pbutil.MustMarshal(&e)
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{Type: SnapshotType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ // update enti only when snapshot is ahead of last index
+ if w.enti < e.Index {
+ w.enti = e.Index
+ }
+ return w.sync()
+}
+
+func (w *WAL) saveCrc(prevCrc uint32) error {
+ return w.encoder.encode(&walpb.Record{Type: CrcType, Crc: prevCrc})
+}
+
+func (w *WAL) tail() *fileutil.LockedFile {
+ if len(w.locks) > 0 {
+ return w.locks[len(w.locks)-1]
+ }
+ return nil
+}
+
+func (w *WAL) seq() uint64 {
+ t := w.tail()
+ if t == nil {
+ return 0
+ }
+ seq, _, err := parseWALName(filepath.Base(t.Name()))
+ if err != nil {
+ w.lg.Fatal("failed to parse WAL name", zap.String("name", t.Name()), zap.Error(err))
+ }
+ return seq
+}
+
+func closeAll(lg *zap.Logger, rcs ...io.ReadCloser) error {
+ stringArr := make([]string, 0)
+ for _, f := range rcs {
+ if err := f.Close(); err != nil {
+ lg.Warn("failed to close: ", zap.Error(err))
+ stringArr = append(stringArr, err.Error())
+ }
+ }
+ if len(stringArr) == 0 {
+ return nil
+ }
+ return errors.New(strings.Join(stringArr, ", "))
+}
diff --git a/server/wal/wal_bench_test.go b/server/storage/wal/wal_bench_test.go
similarity index 89%
rename from server/wal/wal_bench_test.go
rename to server/storage/wal/wal_bench_test.go
index 3d12d2f62a2..c8996dc1275 100644
--- a/server/wal/wal_bench_test.go
+++ b/server/storage/wal/wal_bench_test.go
@@ -15,13 +15,11 @@
package wal
import (
- "io/ioutil"
- "os"
"testing"
- "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
- "go.etcd.io/etcd/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/raftpb"
)
func BenchmarkWrite100EntryWithoutBatch(b *testing.B) { benchmarkWriteEntry(b, 100, 0) }
@@ -37,13 +35,9 @@ func BenchmarkWrite1000EntryBatch500(b *testing.B) { benchmarkWriteEntry(b,
func BenchmarkWrite1000EntryBatch1000(b *testing.B) { benchmarkWriteEntry(b, 1000, 1000) }
func benchmarkWriteEntry(b *testing.B, size int, batch int) {
- p, err := ioutil.TempDir(os.TempDir(), "waltest")
- if err != nil {
- b.Fatal(err)
- }
- defer os.RemoveAll(p)
+ p := b.TempDir()
- w, err := Create(zap.NewExample(), p, []byte("somedata"))
+ w, err := Create(zaptest.NewLogger(b), p, []byte("somedata"))
if err != nil {
b.Fatalf("err = %v, want nil", err)
}
diff --git a/server/storage/wal/wal_test.go b/server/storage/wal/wal_test.go
new file mode 100644
index 00000000000..a9e6dc84910
--- /dev/null
+++ b/server/storage/wal/wal_test.go
@@ -0,0 +1,1213 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+var confState = raftpb.ConfState{
+ Voters: []uint64{0x00ffca74},
+ AutoLeave: false,
+}
+
+func TestNew(t *testing.T) {
+ p := t.TempDir()
+
+ w, err := Create(zaptest.NewLogger(t), p, []byte("somedata"))
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ if g := filepath.Base(w.tail().Name()); g != walName(0, 0) {
+ t.Errorf("name = %+v, want %+v", g, walName(0, 0))
+ }
+ defer w.Close()
+
+ // file is preallocated to segment size; only read data written by wal
+ off, err := w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ t.Fatal(err)
+ }
+ gd := make([]byte, off)
+ f, err := os.Open(filepath.Join(p, filepath.Base(w.tail().Name())))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ if _, err = io.ReadFull(f, gd); err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+
+ var wb bytes.Buffer
+ e := newEncoder(&wb, 0, 0)
+ err = e.encode(&walpb.Record{Type: CrcType, Crc: 0})
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ err = e.encode(&walpb.Record{Type: MetadataType, Data: []byte("somedata")})
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ r := &walpb.Record{
+ Type: SnapshotType,
+ Data: pbutil.MustMarshal(&walpb.Snapshot{}),
+ }
+ if err = e.encode(r); err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ e.flush()
+ if !bytes.Equal(gd, wb.Bytes()) {
+ t.Errorf("data = %v, want %v", gd, wb.Bytes())
+ }
+}
+
+func TestCreateNewWALFile(t *testing.T) {
+ tests := []struct {
+ name string
+ fileType interface{}
+ forceNew bool
+ }{
+ {
+ name: "creating standard file should succeed and not truncate file",
+ fileType: &os.File{},
+ forceNew: false,
+ },
+ {
+ name: "creating locked file should succeed and not truncate file",
+ fileType: &fileutil.LockedFile{},
+ forceNew: false,
+ },
+ {
+ name: "creating standard file with forceNew should truncate file",
+ fileType: &os.File{},
+ forceNew: true,
+ },
+ {
+ name: "creating locked file with forceNew should truncate file",
+ fileType: &fileutil.LockedFile{},
+ forceNew: true,
+ },
+ }
+
+ for i, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p := filepath.Join(t.TempDir(), walName(0, uint64(i)))
+
+ // create initial file with some data to verify truncate behavior
+ err := os.WriteFile(p, []byte("test data"), fileutil.PrivateFileMode)
+ require.NoError(t, err)
+
+ var f interface{}
+ switch tt.fileType.(type) {
+ case *os.File:
+ f, err = createNewWALFile[*os.File](p, tt.forceNew)
+ require.IsType(t, &os.File{}, f)
+ case *fileutil.LockedFile:
+ f, err = createNewWALFile[*fileutil.LockedFile](p, tt.forceNew)
+ require.IsType(t, &fileutil.LockedFile{}, f)
+ default:
+ panic("unknown file type")
+ }
+
+ require.NoError(t, err)
+
+ // validate the file permissions
+ fi, err := os.Stat(p)
+ require.NoError(t, err)
+ expectedPerms := fmt.Sprintf("%o", os.FileMode(fileutil.PrivateFileMode))
+ actualPerms := fmt.Sprintf("%o", fi.Mode().Perm())
+ require.Equalf(t, expectedPerms, actualPerms, "unexpected file permissions on %q", p)
+
+ content, err := os.ReadFile(p)
+ require.NoError(t, err)
+
+ if tt.forceNew {
+ require.Emptyf(t, string(content), "file content should be truncated but it wasn't")
+ } else {
+ require.Equalf(t, "test data", string(content), "file content should not be truncated but it was")
+ }
+ })
+ }
+}
+
+func TestCreateFailFromPollutedDir(t *testing.T) {
+ p := t.TempDir()
+ os.WriteFile(filepath.Join(p, "test.wal"), []byte("data"), os.ModeTemporary)
+
+ _, err := Create(zaptest.NewLogger(t), p, []byte("data"))
+ if !errors.Is(err, os.ErrExist) {
+ t.Fatalf("expected %v, got %v", os.ErrExist, err)
+ }
+}
+
+func TestWalCleanup(t *testing.T) {
+ testRoot := t.TempDir()
+ p, err := os.MkdirTemp(testRoot, "waltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ logger := zaptest.NewLogger(t)
+ w, err := Create(logger, p, []byte(""))
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ w.cleanupWAL(logger)
+ fnames, err := fileutil.ReadDir(testRoot)
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ if len(fnames) != 1 {
+ t.Fatalf("expected 1 file under %v, got %v", testRoot, len(fnames))
+ }
+ pattern := fmt.Sprintf(`%s.broken\.[\d]{8}\.[\d]{6}\.[\d]{1,6}?`, filepath.Base(p))
+ match, _ := regexp.MatchString(pattern, fnames[0])
+ if !match {
+ t.Errorf("match = false, expected true for %v with pattern %v", fnames[0], pattern)
+ }
+}
+
+func TestCreateFailFromNoSpaceLeft(t *testing.T) {
+ p := t.TempDir()
+
+ oldSegmentSizeBytes := SegmentSizeBytes
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ SegmentSizeBytes = math.MaxInt64
+
+ _, err := Create(zaptest.NewLogger(t), p, []byte("data"))
+ if err == nil { // no space left on device
+ t.Fatalf("expected error 'no space left on device', got nil")
+ }
+}
+
+func TestNewForInitedDir(t *testing.T) {
+ p := t.TempDir()
+
+ os.Create(filepath.Join(p, walName(0, 0)))
+ if _, err := Create(zaptest.NewLogger(t), p, nil); err == nil || !errors.Is(err, os.ErrExist) {
+ t.Errorf("err = %v, want %v", err, os.ErrExist)
+ }
+}
+
+func TestOpenAtIndex(t *testing.T) {
+ dir := t.TempDir()
+
+ f, err := os.Create(filepath.Join(dir, walName(0, 0)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ w, err := Open(zaptest.NewLogger(t), dir, walpb.Snapshot{})
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ if g := filepath.Base(w.tail().Name()); g != walName(0, 0) {
+ t.Errorf("name = %+v, want %+v", g, walName(0, 0))
+ }
+ if w.seq() != 0 {
+ t.Errorf("seq = %d, want %d", w.seq(), 0)
+ }
+ w.Close()
+
+ wname := walName(2, 10)
+ f, err = os.Create(filepath.Join(dir, wname))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ w, err = Open(zaptest.NewLogger(t), dir, walpb.Snapshot{Index: 5})
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ if g := filepath.Base(w.tail().Name()); g != wname {
+ t.Errorf("name = %+v, want %+v", g, wname)
+ }
+ if w.seq() != 2 {
+ t.Errorf("seq = %d, want %d", w.seq(), 2)
+ }
+ w.Close()
+
+ emptydir := t.TempDir()
+ if _, err = Open(zaptest.NewLogger(t), emptydir, walpb.Snapshot{}); !errors.Is(err, ErrFileNotFound) {
+ t.Errorf("err = %v, want %v", err, ErrFileNotFound)
+ }
+}
+
+// TestVerify tests that Verify throws a non-nil error when the WAL is corrupted.
+// The test creates a WAL directory and cuts out multiple WAL files. Then
+// it corrupts one of the files by completely truncating it.
+func TestVerify(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ walDir := t.TempDir()
+
+ // create WAL
+ w, err := Create(lg, walDir, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ // make 5 separate files
+ for i := 0; i < 5; i++ {
+ es := []raftpb.Entry{{Index: uint64(i), Data: []byte(fmt.Sprintf("waldata%d", i+1))}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.cut(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ hs := raftpb.HardState{Term: 1, Vote: 3, Commit: 5}
+ require.NoError(t, w.Save(hs, nil))
+
+ // to verify the WAL is not corrupted at this point
+ hardstate, err := Verify(lg, walDir, walpb.Snapshot{})
+ if err != nil {
+ t.Errorf("expected a nil error, got %v", err)
+ }
+ assert.Equal(t, hs, *hardstate)
+
+ walFiles, err := os.ReadDir(walDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // corrupt the WAL by truncating one of the WAL files completely
+ err = os.Truncate(path.Join(walDir, walFiles[2].Name()), 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = Verify(lg, walDir, walpb.Snapshot{})
+ if err == nil {
+ t.Error("expected a non-nil error, got nil")
+ }
+}
+
+// TestCut tests cut
+// TODO: split it into smaller tests for better readability
+func TestCut(t *testing.T) {
+ p := t.TempDir()
+
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ state := raftpb.HardState{Term: 1}
+ if err = w.Save(state, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.cut(); err != nil {
+ t.Fatal(err)
+ }
+ wname := walName(1, 1)
+ if g := filepath.Base(w.tail().Name()); g != wname {
+ t.Errorf("name = %s, want %s", g, wname)
+ }
+
+ es := []raftpb.Entry{{Index: 1, Term: 1, Data: []byte{1}}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.cut(); err != nil {
+ t.Fatal(err)
+ }
+ snap := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState}
+ if err = w.SaveSnapshot(snap); err != nil {
+ t.Fatal(err)
+ }
+ wname = walName(2, 2)
+ if g := filepath.Base(w.tail().Name()); g != wname {
+ t.Errorf("name = %s, want %s", g, wname)
+ }
+
+ // check the state in the last WAL
+ // We do check before closing the WAL to ensure that Cut syncs the data
+ // into the disk.
+ f, err := os.Open(filepath.Join(p, wname))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ nw := &WAL{
+ decoder: NewDecoder(fileutil.NewFileReader(f)),
+ start: snap,
+ }
+ _, gst, _, err := nw.ReadAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(gst, state) {
+ t.Errorf("state = %+v, want %+v", gst, state)
+ }
+}
+
+func TestSaveWithCut(t *testing.T) {
+ p := t.TempDir()
+
+ w, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ state := raftpb.HardState{Term: 1}
+ if err = w.Save(state, nil); err != nil {
+ t.Fatal(err)
+ }
+ bigData := make([]byte, 500)
+ strdata := "Hello World!!"
+ copy(bigData, strdata)
+ // set a lower value for SegmentSizeBytes, else the test takes too long to complete
+ restoreLater := SegmentSizeBytes
+ const EntrySize int = 500
+ SegmentSizeBytes = 2 * 1024
+ defer func() { SegmentSizeBytes = restoreLater }()
+ index := uint64(0)
+ for totalSize := 0; totalSize < int(SegmentSizeBytes); totalSize += EntrySize {
+ ents := []raftpb.Entry{{Index: index, Term: 1, Data: bigData}}
+ if err = w.Save(state, ents); err != nil {
+ t.Fatal(err)
+ }
+ index++
+ }
+
+ w.Close()
+
+ neww, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ defer neww.Close()
+ wname := walName(1, index)
+ if g := filepath.Base(neww.tail().Name()); g != wname {
+ t.Errorf("name = %s, want %s", g, wname)
+ }
+
+ _, newhardstate, entries, err := neww.ReadAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(newhardstate, state) {
+ t.Errorf("Hard State = %+v, want %+v", newhardstate, state)
+ }
+ if len(entries) != int(SegmentSizeBytes/int64(EntrySize)) {
+ t.Errorf("Number of entries = %d, expected = %d", len(entries), int(SegmentSizeBytes/int64(EntrySize)))
+ }
+ for _, oneent := range entries {
+ if !bytes.Equal(oneent.Data, bigData) {
+ t.Errorf("the saved data does not match at Index %d : found: %s , want :%s", oneent.Index, oneent.Data, bigData)
+ }
+ }
+}
+
+func TestRecover(t *testing.T) {
+ cases := []struct {
+ name string
+ size int
+ }{
+ {
+ name: "10MB",
+ size: 10 * 1024 * 1024,
+ },
+ {
+ name: "20MB",
+ size: 20 * 1024 * 1024,
+ },
+ {
+ name: "40MB",
+ size: 40 * 1024 * 1024,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := t.TempDir()
+
+ w, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+ t.Fatal(err)
+ }
+
+ data := make([]byte, tc.size)
+ n, err := rand.Read(data)
+ assert.Equal(t, tc.size, n)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ ents := []raftpb.Entry{{Index: 1, Term: 1, Data: data}, {Index: 2, Term: 2, Data: data}}
+ if err = w.Save(raftpb.HardState{}, ents); err != nil {
+ t.Fatal(err)
+ }
+ sts := []raftpb.HardState{{Term: 1, Vote: 1, Commit: 1}, {Term: 2, Vote: 2, Commit: 2}}
+ for _, s := range sts {
+ if err = w.Save(s, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+ w.Close()
+
+ if w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}); err != nil {
+ t.Fatal(err)
+ }
+ metadata, state, entries, err := w.ReadAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(metadata, []byte("metadata")) {
+ t.Errorf("metadata = %s, want %s", metadata, "metadata")
+ }
+ if !reflect.DeepEqual(entries, ents) {
+ t.Errorf("ents = %+v, want %+v", entries, ents)
+ }
+ // only the latest state is recorded
+ s := sts[len(sts)-1]
+ if !reflect.DeepEqual(state, s) {
+ t.Errorf("state = %+v, want %+v", state, s)
+ }
+ w.Close()
+ })
+ }
+}
+
+func TestSearchIndex(t *testing.T) {
+ tests := []struct {
+ names []string
+ index uint64
+ widx int
+ wok bool
+ }{
+ {
+ []string{
+ "0000000000000000-0000000000000000.wal",
+ "0000000000000001-0000000000001000.wal",
+ "0000000000000002-0000000000002000.wal",
+ },
+ 0x1000, 1, true,
+ },
+ {
+ []string{
+ "0000000000000001-0000000000004000.wal",
+ "0000000000000002-0000000000003000.wal",
+ "0000000000000003-0000000000005000.wal",
+ },
+ 0x4000, 1, true,
+ },
+ {
+ []string{
+ "0000000000000001-0000000000002000.wal",
+ "0000000000000002-0000000000003000.wal",
+ "0000000000000003-0000000000005000.wal",
+ },
+ 0x1000, -1, false,
+ },
+ }
+ for i, tt := range tests {
+ idx, ok := searchIndex(zaptest.NewLogger(t), tt.names, tt.index)
+ if idx != tt.widx {
+ t.Errorf("#%d: idx = %d, want %d", i, idx, tt.widx)
+ }
+ if ok != tt.wok {
+ t.Errorf("#%d: ok = %v, want %v", i, ok, tt.wok)
+ }
+ }
+}
+
+func TestScanWalName(t *testing.T) {
+ tests := []struct {
+ str string
+ wseq, windex uint64
+ wok bool
+ }{
+ {"0000000000000000-0000000000000000.wal", 0, 0, true},
+ {"0000000000000000.wal", 0, 0, false},
+ {"0000000000000000-0000000000000000.snap", 0, 0, false},
+ }
+ for i, tt := range tests {
+ s, index, err := parseWALName(tt.str)
+ if g := err == nil; g != tt.wok {
+ t.Errorf("#%d: ok = %v, want %v", i, g, tt.wok)
+ }
+ if s != tt.wseq {
+ t.Errorf("#%d: seq = %d, want %d", i, s, tt.wseq)
+ }
+ if index != tt.windex {
+ t.Errorf("#%d: index = %d, want %d", i, index, tt.windex)
+ }
+ }
+}
+
+func TestRecoverAfterCut(t *testing.T) {
+ p := t.TempDir()
+
+ md, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 10; i++ {
+ if err = md.SaveSnapshot(walpb.Snapshot{Index: uint64(i), Term: 1, ConfState: &confState}); err != nil {
+ t.Fatal(err)
+ }
+ es := []raftpb.Entry{{Index: uint64(i)}}
+ if err = md.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ if err = md.cut(); err != nil {
+ t.Fatal(err)
+ }
+ }
+ md.Close()
+
+ if err := os.Remove(filepath.Join(p, walName(4, 4))); err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ w, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{Index: uint64(i), Term: 1})
+ if err != nil {
+ if i <= 4 {
+ if !strings.Contains(err.Error(), "do not increase continuously") {
+ t.Errorf("#%d: err = %v isn't expected, want: '* do not increase continuously'", i, err)
+ }
+ } else {
+ t.Errorf("#%d: err = %v, want nil", i, err)
+ }
+ continue
+ }
+ metadata, _, entries, err := w.ReadAll()
+ if err != nil {
+ t.Errorf("#%d: err = %v, want nil", i, err)
+ continue
+ }
+ if !bytes.Equal(metadata, []byte("metadata")) {
+ t.Errorf("#%d: metadata = %s, want %s", i, metadata, "metadata")
+ }
+ for j, e := range entries {
+ if e.Index != uint64(j+i+1) {
+ t.Errorf("#%d: ents[%d].Index = %+v, want %+v", i, j, e.Index, j+i+1)
+ }
+ }
+ w.Close()
+ }
+}
+
+func TestOpenAtUncommittedIndex(t *testing.T) {
+ p := t.TempDir()
+
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.Save(raftpb.HardState{}, []raftpb.Entry{{Index: 0}}); err != nil {
+ t.Fatal(err)
+ }
+ w.Close()
+
+ w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ // commit up to index 0, try to read index 1
+ if _, _, _, err = w.ReadAll(); err != nil {
+ t.Errorf("err = %v, want nil", err)
+ }
+ w.Close()
+}
+
+// TestOpenForRead tests that OpenForRead can load all files.
+// The tests creates WAL directory, and cut out multiple WAL files. Then
+// it releases the lock of part of data, and excepts that OpenForRead
+// can read out all files even if some are locked for write.
+func TestOpenForRead(t *testing.T) {
+ p := t.TempDir()
+ // create WAL
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+ // make 10 separate files
+ for i := 0; i < 10; i++ {
+ es := []raftpb.Entry{{Index: uint64(i)}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.cut(); err != nil {
+ t.Fatal(err)
+ }
+ }
+ // release the lock to 5
+ unlockIndex := uint64(5)
+ w.ReleaseLockTo(unlockIndex)
+
+ // All are available for read
+ w2, err := OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w2.Close()
+ _, _, ents, err := w2.ReadAll()
+ if err != nil {
+ t.Fatalf("err = %v, want nil", err)
+ }
+ if g := ents[len(ents)-1].Index; g != 9 {
+ t.Errorf("last index read = %d, want %d", g, 9)
+ }
+}
+
+func TestOpenWithMaxIndex(t *testing.T) {
+ p := t.TempDir()
+ // create WAL
+ w1, err := Create(zaptest.NewLogger(t), p, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if w1 != nil {
+ w1.Close()
+ }
+ }()
+
+ es := []raftpb.Entry{{Index: uint64(math.MaxInt64)}}
+ if err = w1.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ w1.Close()
+ w1 = nil
+
+ w2, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w2.Close()
+
+ _, _, _, err = w2.ReadAll()
+ if !errors.Is(err, ErrSliceOutOfRange) {
+ t.Fatalf("err = %v, want ErrSliceOutOfRange", err)
+ }
+}
+
+func TestSaveEmpty(t *testing.T) {
+ var buf bytes.Buffer
+ var est raftpb.HardState
+ w := WAL{
+ encoder: newEncoder(&buf, 0, 0),
+ }
+ if err := w.saveState(&est); err != nil {
+ t.Errorf("err = %v, want nil", err)
+ }
+ if len(buf.Bytes()) != 0 {
+ t.Errorf("buf.Bytes = %d, want 0", len(buf.Bytes()))
+ }
+}
+
+func TestReleaseLockTo(t *testing.T) {
+ p := t.TempDir()
+ // create WAL
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ defer func() {
+ if err = w.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // release nothing if no files
+ err = w.ReleaseLockTo(10)
+ if err != nil {
+ t.Errorf("err = %v, want nil", err)
+ }
+
+ // make 10 separate files
+ for i := 0; i < 10; i++ {
+ es := []raftpb.Entry{{Index: uint64(i)}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.cut(); err != nil {
+ t.Fatal(err)
+ }
+ }
+ // release the lock to 5
+ unlockIndex := uint64(5)
+ w.ReleaseLockTo(unlockIndex)
+
+ // expected remaining are 4,5,6,7,8,9,10
+ if len(w.locks) != 7 {
+ t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 7)
+ }
+ for i, l := range w.locks {
+ var lockIndex uint64
+ _, lockIndex, err = parseWALName(filepath.Base(l.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if lockIndex != uint64(i+4) {
+ t.Errorf("#%d: lockindex = %d, want %d", i, lockIndex, uint64(i+4))
+ }
+ }
+
+ // release the lock to 15
+ unlockIndex = uint64(15)
+ w.ReleaseLockTo(unlockIndex)
+
+ // expected remaining is 10
+ if len(w.locks) != 1 {
+ t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 1)
+ }
+ _, lockIndex, err := parseWALName(filepath.Base(w.locks[0].Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if lockIndex != uint64(10) {
+ t.Errorf("lockindex = %d, want %d", lockIndex, 10)
+ }
+}
+
+// TestTailWriteNoSlackSpace ensures that tail writes append if there's no preallocated space.
+func TestTailWriteNoSlackSpace(t *testing.T) {
+ p := t.TempDir()
+
+ // create initial WAL
+ w, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // write some entries
+ for i := 1; i <= 5; i++ {
+ es := []raftpb.Entry{{Index: uint64(i), Term: 1, Data: []byte{byte(i)}}}
+ if err = w.Save(raftpb.HardState{Term: 1}, es); err != nil {
+ t.Fatal(err)
+ }
+ }
+ // get rid of slack space by truncating file
+ off, serr := w.tail().Seek(0, io.SeekCurrent)
+ if serr != nil {
+ t.Fatal(serr)
+ }
+ if terr := w.tail().Truncate(off); terr != nil {
+ t.Fatal(terr)
+ }
+ w.Close()
+
+ // open, write more
+ w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, ents, rerr := w.ReadAll()
+ if rerr != nil {
+ t.Fatal(rerr)
+ }
+ if len(ents) != 5 {
+ t.Fatalf("got entries %+v, expected 5 entries", ents)
+ }
+ // write more entries
+ for i := 6; i <= 10; i++ {
+ es := []raftpb.Entry{{Index: uint64(i), Term: 1, Data: []byte{byte(i)}}}
+ if err = w.Save(raftpb.HardState{Term: 1}, es); err != nil {
+ t.Fatal(err)
+ }
+ }
+ w.Close()
+
+ // confirm all writes
+ w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, ents, rerr = w.ReadAll()
+ if rerr != nil {
+ t.Fatal(rerr)
+ }
+ if len(ents) != 10 {
+ t.Fatalf("got entries %+v, expected 10 entries", ents)
+ }
+ w.Close()
+}
+
+// TestRestartCreateWal ensures that an interrupted WAL initialization is clobbered on restart
+func TestRestartCreateWal(t *testing.T) {
+ p := t.TempDir()
+ var err error
+
+ // make temporary directory so it looks like initialization is interrupted
+ tmpdir := filepath.Clean(p) + ".tmp"
+ if err = os.Mkdir(tmpdir, fileutil.PrivateDirMode); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = os.OpenFile(filepath.Join(tmpdir, "test"), os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode); err != nil {
+ t.Fatal(err)
+ }
+
+ w, werr := Create(zaptest.NewLogger(t), p, []byte("abc"))
+ if werr != nil {
+ t.Fatal(werr)
+ }
+ w.Close()
+ if Exist(tmpdir) {
+ t.Fatalf("got %q exists, expected it to not exist", tmpdir)
+ }
+
+ if w, err = OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{}); err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ if meta, _, _, rerr := w.ReadAll(); rerr != nil || string(meta) != "abc" {
+ t.Fatalf("got error %v and meta %q, expected nil and %q", rerr, meta, "abc")
+ }
+}
+
+// TestOpenOnTornWrite ensures that entries past the torn write are truncated.
+func TestOpenOnTornWrite(t *testing.T) {
+ maxEntries := 40
+ clobberIdx := 20
+ overwriteEntries := 5
+
+ p := t.TempDir()
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ defer func() {
+ if err = w.Close(); err != nil && !errors.Is(err, os.ErrInvalid) {
+ t.Fatal(err)
+ }
+ }()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // get offset of end of each saved entry
+ offsets := make([]int64, maxEntries)
+ for i := range offsets {
+ es := []raftpb.Entry{{Index: uint64(i)}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ if offsets[i], err = w.tail().Seek(0, io.SeekCurrent); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ fn := filepath.Join(p, filepath.Base(w.tail().Name()))
+ w.Close()
+
+ // clobber some entry with 0's to simulate a torn write
+ f, ferr := os.OpenFile(fn, os.O_WRONLY, fileutil.PrivateFileMode)
+ if ferr != nil {
+ t.Fatal(ferr)
+ }
+ defer f.Close()
+ _, err = f.Seek(offsets[clobberIdx], io.SeekStart)
+ if err != nil {
+ t.Fatal(err)
+ }
+ zeros := make([]byte, offsets[clobberIdx+1]-offsets[clobberIdx])
+ _, err = f.Write(zeros)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ // seek up to clobbered entry
+ _, _, _, err = w.ReadAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // write a few entries past the clobbered entry
+ for i := 0; i < overwriteEntries; i++ {
+ // Index is different from old, truncated entries
+ es := []raftpb.Entry{{Index: uint64(i + clobberIdx), Data: []byte("new")}}
+ if err = w.Save(raftpb.HardState{}, es); err != nil {
+ t.Fatal(err)
+ }
+ }
+ w.Close()
+
+ // read back the entries, confirm number of entries matches expectation
+ w, err = OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, _, ents, rerr := w.ReadAll()
+ if rerr != nil {
+ // CRC error? the old entries were likely never truncated away
+ t.Fatal(rerr)
+ }
+ wEntries := (clobberIdx - 1) + overwriteEntries
+ if len(ents) != wEntries {
+ t.Fatalf("expected len(ents) = %d, got %d", wEntries, len(ents))
+ }
+}
+
+func TestRenameFail(t *testing.T) {
+ p := t.TempDir()
+
+ oldSegmentSizeBytes := SegmentSizeBytes
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ SegmentSizeBytes = math.MaxInt64
+
+ tp := t.TempDir()
+ os.RemoveAll(tp)
+
+ w := &WAL{
+ lg: zaptest.NewLogger(t),
+ dir: p,
+ }
+ w2, werr := w.renameWAL(tp)
+ if w2 != nil || werr == nil { // os.Rename should fail from 'no such file or directory'
+ t.Fatalf("expected error, got %v", werr)
+ }
+}
+
+// TestReadAllFail ensure ReadAll error if used without opening the WAL
+func TestReadAllFail(t *testing.T) {
+ dir := t.TempDir()
+
+ // create initial WAL
+ f, err := Create(zaptest.NewLogger(t), dir, []byte("metadata"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+ // try to read without opening the WAL
+ _, _, _, err = f.ReadAll()
+ if err == nil || !errors.Is(err, ErrDecoderNotFound) {
+ t.Fatalf("err = %v, want ErrDecoderNotFound", err)
+ }
+}
+
+// TestValidSnapshotEntries ensures ValidSnapshotEntries returns all valid wal snapshot entries, accounting
+// for hardstate
+func TestValidSnapshotEntries(t *testing.T) {
+ p := t.TempDir()
+ snap0 := walpb.Snapshot{}
+ snap1 := walpb.Snapshot{Index: 1, Term: 1, ConfState: &confState}
+ state1 := raftpb.HardState{Commit: 1, Term: 1}
+ snap2 := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState}
+ snap3 := walpb.Snapshot{Index: 3, Term: 2, ConfState: &confState}
+ state2 := raftpb.HardState{Commit: 3, Term: 2}
+ snap4 := walpb.Snapshot{Index: 4, Term: 2, ConfState: &confState} // will be orphaned since the last committed entry will be snap3
+ func() {
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ // snap0 is implicitly created at index 0, term 0
+ if err = w.SaveSnapshot(snap1); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.Save(state1, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap2); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap3); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.Save(state2, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap4); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ walSnaps, err := ValidSnapshotEntries(zaptest.NewLogger(t), p)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := []walpb.Snapshot{snap0, snap1, snap2, snap3}
+ if !reflect.DeepEqual(walSnaps, expected) {
+ t.Errorf("expected walSnaps %+v, got %+v", expected, walSnaps)
+ }
+}
+
+// TestValidSnapshotEntriesAfterPurgeWal ensure that there are many wal files, and after cleaning the first wal file,
+// it can work well.
+func TestValidSnapshotEntriesAfterPurgeWal(t *testing.T) {
+ oldSegmentSizeBytes := SegmentSizeBytes
+ SegmentSizeBytes = 64
+ defer func() {
+ SegmentSizeBytes = oldSegmentSizeBytes
+ }()
+ p := t.TempDir()
+ snap0 := walpb.Snapshot{}
+ snap1 := walpb.Snapshot{Index: 1, Term: 1, ConfState: &confState}
+ state1 := raftpb.HardState{Commit: 1, Term: 1}
+ snap2 := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState}
+ snap3 := walpb.Snapshot{Index: 3, Term: 2, ConfState: &confState}
+ state2 := raftpb.HardState{Commit: 3, Term: 2}
+ func() {
+ w, err := Create(zaptest.NewLogger(t), p, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ // snap0 is implicitly created at index 0, term 0
+ if err = w.SaveSnapshot(snap1); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.Save(state1, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap2); err != nil {
+ t.Fatal(err)
+ }
+ if err = w.SaveSnapshot(snap3); err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 128; i++ {
+ if err = w.Save(state2, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }()
+ files, _, err := selectWALFiles(nil, p, snap0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ os.Remove(p + "/" + files[0])
+ _, err = ValidSnapshotEntries(zaptest.NewLogger(t), p)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLastRecordLengthExceedFileEnd(t *testing.T) {
+ /* The data below was generated by code something like below. The length
+ * of the last record was intentionally changed to 1000 in order to make
+ * sure it exceeds the end of the file.
+ *
+ * for i := 0; i < 3; i++ {
+ * es := []raftpb.Entry{{Index: uint64(i + 1), Data: []byte(fmt.Sprintf("waldata%d", i+1))}}
+ * if err = w.Save(raftpb.HardState{}, es); err != nil {
+ * t.Fatal(err)
+ * }
+ * }
+ * ......
+ * var sb strings.Builder
+ * for _, ch := range buf {
+ * sb.WriteString(fmt.Sprintf("\\x%02x", ch))
+ * }
+ */
+ // Generate WAL file
+ t.Log("Generate a WAL file with the last record's length modified.")
+ data := []byte("\x04\x00\x00\x00\x00\x00\x00\x84\x08\x04\x10\x00\x00" +
+ "\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x84\x08\x01\x10\x00\x00" +
+ "\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x82\x08\x05\x10\xa0\xb3" +
+ "\x9b\x8f\x08\x1a\x04\x08\x00\x10\x00\x00\x00\x1a\x00\x00\x00\x00" +
+ "\x00\x00\x86\x08\x02\x10\xba\x8b\xdc\x85\x0f\x1a\x10\x08\x00\x10" +
+ "\x00\x18\x01\x22\x08\x77\x61\x6c\x64\x61\x74\x61\x31\x00\x00\x00" +
+ "\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x86\x08\x02\x10\xa1\xe8" +
+ "\xff\x9c\x02\x1a\x10\x08\x00\x10\x00\x18\x02\x22\x08\x77\x61\x6c" +
+ "\x64\x61\x74\x61\x32\x00\x00\x00\x00\x00\x00\xe8\x03\x00\x00\x00" +
+ "\x00\x00\x86\x08\x02\x10\xa1\x9c\xa1\xaa\x04\x1a\x10\x08\x00\x10" +
+ "\x00\x18\x03\x22\x08\x77\x61\x6c\x64\x61\x74\x61\x33\x00\x00\x00" +
+ "\x00\x00\x00")
+
+ buf := bytes.NewBuffer(data)
+ f, err := createFileWithData(t, buf)
+ fileName := f.Name()
+ require.NoError(t, err)
+ t.Logf("fileName: %v", fileName)
+
+ // Verify low-level decoder directly
+ t.Log("Verify all records can be parsed correctly.")
+ rec := &walpb.Record{}
+ decoder := NewDecoder(fileutil.NewFileReader(f))
+ for {
+ if err = decoder.Decode(rec); err != nil {
+ require.ErrorIs(t, err, io.ErrUnexpectedEOF)
+ break
+ }
+ if rec.Type == EntryType {
+ e := MustUnmarshalEntry(rec.Data)
+ t.Logf("Validating normal entry: %v", e)
+ recData := fmt.Sprintf("waldata%d", e.Index)
+ require.Equal(t, raftpb.EntryNormal, e.Type)
+ require.Equal(t, recData, string(e.Data))
+ }
+ rec = &walpb.Record{}
+ }
+ require.NoError(t, f.Close())
+
+ // Verify w.ReadAll() returns io.ErrUnexpectedEOF in the error chain.
+ t.Log("Verify the w.ReadAll returns io.ErrUnexpectedEOF in the error chain")
+ newFileName := filepath.Join(filepath.Dir(fileName), "0000000000000000-0000000000000000.wal")
+ require.NoError(t, os.Rename(fileName, newFileName))
+
+ w, err := Open(zaptest.NewLogger(t), filepath.Dir(fileName), walpb.Snapshot{
+ Index: 0,
+ Term: 0,
+ })
+ require.NoError(t, err)
+ defer w.Close()
+
+ _, _, _, err = w.ReadAll()
+ // Note: The wal file will be repaired automatically in production
+ // environment, but only once.
+ require.ErrorIs(t, err, io.ErrUnexpectedEOF)
+}
diff --git a/server/wal/walpb/record.go b/server/storage/wal/walpb/record.go
similarity index 87%
rename from server/wal/walpb/record.go
rename to server/storage/wal/walpb/record.go
index e2070fbba3b..30e6c061366 100644
--- a/server/wal/walpb/record.go
+++ b/server/storage/wal/walpb/record.go
@@ -14,18 +14,18 @@
package walpb
-import "errors"
-
-var (
- ErrCRCMismatch = errors.New("walpb: crc mismatch")
+import (
+ "errors"
+ "fmt"
)
+var ErrCRCMismatch = errors.New("walpb: crc mismatch")
+
func (rec *Record) Validate(crc uint32) error {
if rec.Crc == crc {
return nil
}
- rec.Reset()
- return ErrCRCMismatch
+ return fmt.Errorf("%w: expected: %x computed: %x", ErrCRCMismatch, rec.Crc, crc)
}
// ValidateSnapshotForWrite ensures the Snapshot the newly written snapshot is valid.
diff --git a/server/wal/walpb/record.pb.go b/server/storage/wal/walpb/record.pb.go
similarity index 89%
rename from server/wal/walpb/record.pb.go
rename to server/storage/wal/walpb/record.pb.go
index 654d82636c0..5605fbdaa31 100644
--- a/server/wal/walpb/record.pb.go
+++ b/server/storage/wal/walpb/record.pb.go
@@ -11,7 +11,7 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/golang/protobuf/proto"
- raftpb "go.etcd.io/etcd/raft/v3/raftpb"
+ raftpb "go.etcd.io/raft/v3/raftpb"
)
// Reference imports to suppress errors if they are not otherwise used.
@@ -119,22 +119,24 @@ func init() {
func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) }
var fileDescriptor_bf94fd919e302a1d = []byte{
- // 234 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8e, 0x41, 0x4e, 0xc3, 0x30,
- 0x10, 0x45, 0x63, 0xe2, 0x22, 0x18, 0xca, 0x02, 0xab, 0xaa, 0xa2, 0x2c, 0x4c, 0xd4, 0x55, 0x56,
- 0x29, 0xe2, 0x08, 0x65, 0xcf, 0x22, 0x3d, 0x00, 0x72, 0x1d, 0xa7, 0x20, 0xd1, 0x8c, 0x35, 0xb5,
- 0x04, 0xdc, 0x84, 0x23, 0x65, 0xc9, 0x09, 0x10, 0x84, 0x8b, 0xa0, 0x8c, 0x03, 0x1b, 0xfb, 0xeb,
- 0x7d, 0xf9, 0x7d, 0xc3, 0x9c, 0x9c, 0x45, 0x6a, 0x2a, 0x4f, 0x18, 0x50, 0xcd, 0x5e, 0xcc, 0xb3,
- 0xdf, 0xe5, 0x8b, 0x3d, 0xee, 0x91, 0xc9, 0x7a, 0x4c, 0xb1, 0xcc, 0x97, 0x64, 0xda, 0xb0, 0x1e,
- 0x0f, 0xbf, 0xe3, 0x2b, 0xf2, 0xd5, 0x3d, 0x9c, 0xd6, 0x2c, 0x51, 0x19, 0xc8, 0xf0, 0xe6, 0x5d,
- 0x26, 0x0a, 0x51, 0xa6, 0x1b, 0xd9, 0x7f, 0x5e, 0x27, 0x35, 0x13, 0xb5, 0x84, 0xd4, 0x92, 0xcd,
- 0x4e, 0x0a, 0x51, 0x5e, 0x4e, 0xc5, 0x08, 0x94, 0x02, 0xd9, 0x98, 0x60, 0xb2, 0xb4, 0x10, 0xe5,
- 0xbc, 0xe6, 0xbc, 0x22, 0x38, 0xdb, 0x76, 0xc6, 0x1f, 0x1f, 0x31, 0xa8, 0x1c, 0x66, 0x4f, 0x5d,
- 0xe3, 0x5e, 0x59, 0x29, 0xa7, 0x97, 0x11, 0xf1, 0x9a, 0xa3, 0x03, 0x4b, 0xe5, 0xff, 0x9a, 0xa3,
- 0x83, 0xba, 0x01, 0xb0, 0xd8, 0xb5, 0x0f, 0xc7, 0x60, 0x82, 0x63, 0xf7, 0xc5, 0xed, 0x55, 0x15,
- 0x7f, 0x5e, 0xdd, 0x61, 0xd7, 0x6e, 0xc7, 0xa2, 0x3e, 0xb7, 0x7f, 0x71, 0xb3, 0xe8, 0xbf, 0x75,
- 0xd2, 0x0f, 0x5a, 0x7c, 0x0c, 0x5a, 0x7c, 0x0d, 0x5a, 0xbc, 0xff, 0xe8, 0xe4, 0x37, 0x00, 0x00,
- 0xff, 0xff, 0xc3, 0x36, 0x0c, 0xad, 0x1d, 0x01, 0x00, 0x00,
+ // 266 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x90, 0x41, 0x4e, 0xc3, 0x30,
+ 0x14, 0x44, 0x63, 0x92, 0x22, 0x30, 0x65, 0x51, 0x0b, 0xa1, 0x28, 0x8b, 0x10, 0x75, 0x15, 0x09,
+ 0x29, 0x46, 0x70, 0x02, 0xca, 0x9e, 0x45, 0xba, 0x63, 0x83, 0x5c, 0xe7, 0x27, 0x54, 0x6a, 0xf3,
+ 0xad, 0x1f, 0xab, 0x85, 0x9b, 0x70, 0xa4, 0x2c, 0x39, 0x01, 0x82, 0x70, 0x11, 0x64, 0xa7, 0xb0,
+ 0xfa, 0xa3, 0x37, 0x9a, 0x19, 0xcb, 0x7c, 0x4a, 0xa0, 0x91, 0xaa, 0xc2, 0x10, 0x5a, 0x14, 0x93,
+ 0xbd, 0xda, 0x98, 0x55, 0x72, 0xd1, 0x60, 0x83, 0x9e, 0x48, 0xa7, 0x46, 0x33, 0x99, 0x91, 0xaa,
+ 0xad, 0x59, 0x49, 0x77, 0x46, 0x34, 0x7f, 0xe4, 0xc7, 0xa5, 0xcf, 0x8b, 0x98, 0x47, 0xf6, 0xcd,
+ 0x40, 0xcc, 0x32, 0x96, 0x87, 0x8b, 0xa8, 0xff, 0xbc, 0x0a, 0x4a, 0x4f, 0xc4, 0x25, 0x0f, 0x35,
+ 0xe9, 0xf8, 0x28, 0x63, 0xf9, 0xf9, 0xc1, 0x70, 0x40, 0x08, 0x1e, 0x55, 0xca, 0xaa, 0x38, 0xcc,
+ 0x58, 0x3e, 0x2d, 0xbd, 0x9e, 0x13, 0x3f, 0x59, 0xb6, 0xca, 0x74, 0x2f, 0x68, 0x45, 0xc2, 0x27,
+ 0xeb, 0xb6, 0x82, 0x57, 0x5f, 0x19, 0x1d, 0x92, 0x23, 0xf2, 0x6b, 0x40, 0x5b, 0x5f, 0x1a, 0xfd,
+ 0xaf, 0x01, 0x6d, 0xc5, 0x0d, 0xe7, 0x1a, 0xdb, 0xfa, 0xb9, 0xb3, 0xca, 0x82, 0xef, 0x3e, 0xbb,
+ 0x9d, 0x15, 0xe3, 0xcb, 0x8b, 0x07, 0x6c, 0xeb, 0xa5, 0x33, 0xca, 0x53, 0xfd, 0x27, 0x17, 0xf7,
+ 0xfd, 0x77, 0x1a, 0xf4, 0x43, 0xca, 0x3e, 0x86, 0x94, 0x7d, 0x0d, 0x29, 0x7b, 0xff, 0x49, 0x83,
+ 0xa7, 0xeb, 0x06, 0x0b, 0xb0, 0xba, 0x2a, 0xd6, 0x28, 0xdd, 0x95, 0x1d, 0xd0, 0x0e, 0x48, 0xee,
+ 0xee, 0x64, 0x67, 0x91, 0x54, 0x03, 0x72, 0xaf, 0x36, 0xd2, 0xff, 0xd7, 0x6f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xcf, 0xa9, 0xf0, 0x02, 0x45, 0x01, 0x00, 0x00,
}
func (m *Record) Marshal() (dAtA []byte, err error) {
diff --git a/server/wal/walpb/record.proto b/server/storage/wal/walpb/record.proto
similarity index 87%
rename from server/wal/walpb/record.proto
rename to server/storage/wal/walpb/record.proto
index 536fa6c19c1..e1050fde481 100644
--- a/server/wal/walpb/record.proto
+++ b/server/storage/wal/walpb/record.proto
@@ -2,7 +2,9 @@ syntax = "proto2";
package walpb;
import "gogoproto/gogo.proto";
-import "raft/raftpb/raft.proto";
+import "raftpb/raft.proto";
+
+option go_package = "go.etcd.io/etcd/server/v3/storage/wal/walpb";
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
diff --git a/server/storage/wal/walpb/record_test.go b/server/storage/wal/walpb/record_test.go
new file mode 100644
index 00000000000..cdacb3d03f2
--- /dev/null
+++ b/server/storage/wal/walpb/record_test.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package walpb
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/descriptor"
+
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func TestSnapshotMetadataCompatibility(t *testing.T) {
+ _, snapshotMetadataMd := descriptor.ForMessage(&raftpb.SnapshotMetadata{})
+ _, snapshotMd := descriptor.ForMessage(&Snapshot{})
+ if len(snapshotMetadataMd.GetField()) != len(snapshotMd.GetField()) {
+ t.Errorf("Different number of fields in raftpb.SnapshotMetadata vs. walpb.Snapshot. " +
+ "They are supposed to be in sync.")
+ }
+}
+
+func TestValidateSnapshot(t *testing.T) {
+ tests := []struct {
+ name string
+ snap *Snapshot
+ wantErr bool
+ }{
+ {name: "empty", snap: &Snapshot{}, wantErr: false},
+ {name: "invalid", snap: &Snapshot{Index: 5, Term: 3}, wantErr: true},
+ {name: "valid", snap: &Snapshot{Index: 5, Term: 3, ConfState: &raftpb.ConfState{Voters: []uint64{0x00cad1}}}, wantErr: false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := ValidateSnapshotForWrite(tt.snap); (err != nil) != tt.wantErr {
+ t.Errorf("ValidateSnapshotForWrite() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/server/verify/verify.go b/server/verify/verify.go
index f727201ce8a..0dc99afc819 100644
--- a/server/verify/verify.go
+++ b/server/verify/verify.go
@@ -16,19 +16,20 @@ package verify
import (
"fmt"
- "os"
-
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/datadir"
- "go.etcd.io/etcd/server/v3/etcdserver/cindex"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- wal2 "go.etcd.io/etcd/server/v3/wal"
- "go.etcd.io/etcd/server/v3/wal/walpb"
+
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ wal2 "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
)
-const ENV_VERIFY = "ETCD_VERIFY"
-const ENV_VERIFY_ALL_VALUE = "all"
+const envVerifyValueStorageWAL verify.VerificationType = "storage_wal"
type Config struct {
// DataDir is a root directory where the data being verified are stored.
@@ -47,19 +48,23 @@ type Config struct {
// the function can also panic.
// The function is expected to work on not-in-use data model, i.e.
// no file-locks should be taken. Verify does not modified the data.
-func Verify(cfg Config) error {
+func Verify(cfg Config) (retErr error) {
lg := cfg.Logger
if lg == nil {
lg = zap.NewNop()
}
- var err error
+ if !fileutil.Exist(datadir.ToBackendFileName(cfg.DataDir)) {
+ lg.Info("verification skipped due to non exist db file")
+ return nil
+ }
+
lg.Info("verification of persisted state", zap.String("data-dir", cfg.DataDir))
defer func() {
- if err != nil {
+ if retErr != nil {
lg.Error("verification of persisted state failed",
zap.String("data-dir", cfg.DataDir),
- zap.Error(err))
+ zap.Error(retErr))
} else if r := recover(); r != nil {
lg.Error("verification of persisted state failed",
zap.String("data-dir", cfg.DataDir))
@@ -69,14 +74,10 @@ func Verify(cfg Config) error {
}
}()
- beConfig := backend.DefaultBackendConfig()
- beConfig.Path = datadir.ToBackendFileName(cfg.DataDir)
- beConfig.Logger = cfg.Logger
-
- be := backend.New(beConfig)
+ be := backend.NewDefaultBackend(lg, datadir.ToBackendFileName(cfg.DataDir))
defer be.Close()
- snapshot, hardstate, err := validateWal(cfg)
+ snapshot, hardstate, err := validateWAL(cfg)
if err != nil {
return err
}
@@ -90,7 +91,7 @@ func Verify(cfg Config) error {
// VerifyIfEnabled performs verification according to ETCD_VERIFY env settings.
// See Verify for more information.
func VerifyIfEnabled(cfg Config) error {
- if os.Getenv(ENV_VERIFY) == ENV_VERIFY_ALL_VALUE {
+ if verify.IsVerificationEnabled(envVerifyValueStorageWAL) {
return Verify(cfg)
}
return nil
@@ -108,8 +109,7 @@ func MustVerifyIfEnabled(cfg Config) {
}
func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error {
- tx := be.BatchTx()
- index, term := cindex.ReadConsistentIndex(tx)
+ index, term := schema.ReadConsistentIndex(be.ReadTx())
if cfg.ExactIndex && index != hardstate.Commit {
return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit)
}
@@ -131,8 +131,8 @@ func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *
return nil
}
-func validateWal(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) {
- walDir := datadir.ToWalDir(cfg.DataDir)
+func validateWAL(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) {
+ walDir := datadir.ToWALDir(cfg.DataDir)
walSnaps, err := wal2.ValidSnapshotEntries(cfg.Logger, walDir)
if err != nil {
diff --git a/server/wal/decoder.go b/server/wal/decoder.go
deleted file mode 100644
index 0251a72133d..00000000000
--- a/server/wal/decoder.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "bufio"
- "encoding/binary"
- "hash"
- "io"
- "sync"
-
- "go.etcd.io/etcd/pkg/v3/crc"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/wal/walpb"
-)
-
-const minSectorSize = 512
-
-// frameSizeBytes is frame size in bytes, including record size and padding size.
-const frameSizeBytes = 8
-
-type decoder struct {
- mu sync.Mutex
- brs []*bufio.Reader
-
- // lastValidOff file offset following the last valid decoded record
- lastValidOff int64
- crc hash.Hash32
-}
-
-func newDecoder(r ...io.Reader) *decoder {
- readers := make([]*bufio.Reader, len(r))
- for i := range r {
- readers[i] = bufio.NewReader(r[i])
- }
- return &decoder{
- brs: readers,
- crc: crc.New(0, crcTable),
- }
-}
-
-func (d *decoder) decode(rec *walpb.Record) error {
- rec.Reset()
- d.mu.Lock()
- defer d.mu.Unlock()
- return d.decodeRecord(rec)
-}
-
-// raft max message size is set to 1 MB in etcd server
-// assume projects set reasonable message size limit,
-// thus entry size should never exceed 10 MB
-const maxWALEntrySizeLimit = int64(10 * 1024 * 1024)
-
-func (d *decoder) decodeRecord(rec *walpb.Record) error {
- if len(d.brs) == 0 {
- return io.EOF
- }
-
- l, err := readInt64(d.brs[0])
- if err == io.EOF || (err == nil && l == 0) {
- // hit end of file or preallocated space
- d.brs = d.brs[1:]
- if len(d.brs) == 0 {
- return io.EOF
- }
- d.lastValidOff = 0
- return d.decodeRecord(rec)
- }
- if err != nil {
- return err
- }
-
- recBytes, padBytes := decodeFrameSize(l)
- if recBytes >= maxWALEntrySizeLimit-padBytes {
- return ErrMaxWALEntrySizeLimitExceeded
- }
-
- data := make([]byte, recBytes+padBytes)
- if _, err = io.ReadFull(d.brs[0], data); err != nil {
- // ReadFull returns io.EOF only if no bytes were read
- // the decoder should treat this as an ErrUnexpectedEOF instead.
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return err
- }
- if err := rec.Unmarshal(data[:recBytes]); err != nil {
- if d.isTornEntry(data) {
- return io.ErrUnexpectedEOF
- }
- return err
- }
-
- // skip crc checking if the record type is crcType
- if rec.Type != crcType {
- d.crc.Write(rec.Data)
- if err := rec.Validate(d.crc.Sum32()); err != nil {
- if d.isTornEntry(data) {
- return io.ErrUnexpectedEOF
- }
- return err
- }
- }
- // record decoded as valid; point last valid offset to end of record
- d.lastValidOff += frameSizeBytes + recBytes + padBytes
- return nil
-}
-
-func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
- // the record size is stored in the lower 56 bits of the 64-bit length
- recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56))
- // non-zero padding is indicated by set MSb / a negative length
- if lenField < 0 {
- // padding is stored in lower 3 bits of length MSB
- padBytes = int64((uint64(lenField) >> 56) & 0x7)
- }
- return recBytes, padBytes
-}
-
-// isTornEntry determines whether the last entry of the WAL was partially written
-// and corrupted because of a torn write.
-func (d *decoder) isTornEntry(data []byte) bool {
- if len(d.brs) != 1 {
- return false
- }
-
- fileOff := d.lastValidOff + frameSizeBytes
- curOff := 0
- chunks := [][]byte{}
- // split data on sector boundaries
- for curOff < len(data) {
- chunkLen := int(minSectorSize - (fileOff % minSectorSize))
- if chunkLen > len(data)-curOff {
- chunkLen = len(data) - curOff
- }
- chunks = append(chunks, data[curOff:curOff+chunkLen])
- fileOff += int64(chunkLen)
- curOff += chunkLen
- }
-
- // if any data for a sector chunk is all 0, it's a torn write
- for _, sect := range chunks {
- isZero := true
- for _, v := range sect {
- if v != 0 {
- isZero = false
- break
- }
- }
- if isZero {
- return true
- }
- }
- return false
-}
-
-func (d *decoder) updateCRC(prevCrc uint32) {
- d.crc = crc.New(prevCrc, crcTable)
-}
-
-func (d *decoder) lastCRC() uint32 {
- return d.crc.Sum32()
-}
-
-func (d *decoder) lastOffset() int64 { return d.lastValidOff }
-
-func mustUnmarshalEntry(d []byte) raftpb.Entry {
- var e raftpb.Entry
- pbutil.MustUnmarshal(&e, d)
- return e
-}
-
-func mustUnmarshalState(d []byte) raftpb.HardState {
- var s raftpb.HardState
- pbutil.MustUnmarshal(&s, d)
- return s
-}
-
-func readInt64(r io.Reader) (int64, error) {
- var n int64
- err := binary.Read(r, binary.LittleEndian, &n)
- return n, err
-}
diff --git a/server/wal/doc.go b/server/wal/doc.go
deleted file mode 100644
index 7ea348e4a96..00000000000
--- a/server/wal/doc.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package wal provides an implementation of a write ahead log that is used by
-etcd.
-
-A WAL is created at a particular directory and is made up of a number of
-segmented WAL files. Inside of each file the raft state and entries are appended
-to it with the Save method:
-
- metadata := []byte{}
- w, err := wal.Create(zap.NewExample(), "/var/lib/etcd", metadata)
- ...
- err := w.Save(s, ents)
-
-After saving a raft snapshot to disk, SaveSnapshot method should be called to
-record it. So WAL can match with the saved snapshot when restarting.
-
- err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
-
-When a user has finished using a WAL it must be closed:
-
- w.Close()
-
-Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record
-protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a
-64-bit packed structure holding the length of the remaining logical record data in its lower
-56 bits and its physical padding in the first three bits of the most significant byte. Each
-record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32
-value of all record protobufs preceding the current record.
-
-WAL files are placed inside of the directory in the following format:
-$seq-$index.wal
-
-The first WAL file to be created will be 0000000000000000-0000000000000000.wal
-indicating an initial sequence of 0 and an initial raft index of 0. The first
-entry written to WAL MUST have raft index 0.
-
-WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal
-sequence number and cause a new file to be created. If the last raft index saved
-was 0x20 and this is the first time cut has been called on this WAL then the sequence will
-increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
-If a second cut issues 0x10 entries with incremental index later then the file will be called:
-0000000000000002-0000000000000031.wal.
-
-At a later time a WAL can be opened at a particular snapshot. If there is no
-snapshot, an empty snapshot should be passed in.
-
- w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
- ...
-
-The snapshot must have been written to the WAL.
-
-Additional items cannot be Saved to this WAL until all of the items from the given
-snapshot to the end of the WAL are read first:
-
- metadata, state, ents, err := w.ReadAll()
-
-This will give you the metadata, the last raft.State and the slice of
-raft.Entry items in the log.
-
-*/
-package wal
diff --git a/server/wal/file_pipeline_test.go b/server/wal/file_pipeline_test.go
deleted file mode 100644
index 01560570e8b..00000000000
--- a/server/wal/file_pipeline_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "io/ioutil"
- "math"
- "os"
- "testing"
-
- "go.uber.org/zap"
-)
-
-func TestFilePipeline(t *testing.T) {
- tdir, err := ioutil.TempDir(os.TempDir(), "wal-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tdir)
-
- fp := newFilePipeline(zap.NewExample(), tdir, SegmentSizeBytes)
- defer fp.Close()
-
- f, ferr := fp.Open()
- if ferr != nil {
- t.Fatal(ferr)
- }
- f.Close()
-}
-
-func TestFilePipelineFailPreallocate(t *testing.T) {
- tdir, err := ioutil.TempDir(os.TempDir(), "wal-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tdir)
-
- fp := newFilePipeline(zap.NewExample(), tdir, math.MaxInt64)
- defer fp.Close()
-
- f, ferr := fp.Open()
- if f != nil || ferr == nil { // no space left on device
- t.Fatal("expected error on invalid pre-allocate size, but no error")
- }
-}
-
-func TestFilePipelineFailLockFile(t *testing.T) {
- tdir, err := ioutil.TempDir(os.TempDir(), "wal-test")
- if err != nil {
- t.Fatal(err)
- }
- os.RemoveAll(tdir)
-
- fp := newFilePipeline(zap.NewExample(), tdir, math.MaxInt64)
- defer fp.Close()
-
- f, ferr := fp.Open()
- if f != nil || ferr == nil { // no such file or directory
- t.Fatal("expected error on invalid pre-allocate size, but no error")
- }
-}
diff --git a/server/wal/metrics.go b/server/wal/metrics.go
deleted file mode 100644
index 814d654cdd3..00000000000
--- a/server/wal/metrics.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- walFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "wal_fsync_duration_seconds",
- Help: "The latency distributions of fsync called by WAL.",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "wal_write_bytes_total",
- Help: "Total number of bytes written in WAL.",
- })
-)
-
-func init() {
- prometheus.MustRegister(walFsyncSec)
- prometheus.MustRegister(walWriteBytes)
-}
diff --git a/server/wal/record_test.go b/server/wal/record_test.go
deleted file mode 100644
index d28807ebb8c..00000000000
--- a/server/wal/record_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "bytes"
- "errors"
- "hash/crc32"
- "io"
- "io/ioutil"
- "reflect"
- "testing"
-
- "go.etcd.io/etcd/server/v3/wal/walpb"
-)
-
-var (
- infoData = []byte("\b\xef\xfd\x02")
- infoRecord = append([]byte("\x0e\x00\x00\x00\x00\x00\x00\x00\b\x01\x10\x99\xb5\xe4\xd0\x03\x1a\x04"), infoData...)
-)
-
-func TestReadRecord(t *testing.T) {
- badInfoRecord := make([]byte, len(infoRecord))
- copy(badInfoRecord, infoRecord)
- badInfoRecord[len(badInfoRecord)-1] = 'a'
-
- tests := []struct {
- data []byte
- wr *walpb.Record
- we error
- }{
- {infoRecord, &walpb.Record{Type: 1, Crc: crc32.Checksum(infoData, crcTable), Data: infoData}, nil},
- {[]byte(""), &walpb.Record{}, io.EOF},
- {infoRecord[:8], &walpb.Record{}, io.ErrUnexpectedEOF},
- {infoRecord[:len(infoRecord)-len(infoData)-8], &walpb.Record{}, io.ErrUnexpectedEOF},
- {infoRecord[:len(infoRecord)-len(infoData)], &walpb.Record{}, io.ErrUnexpectedEOF},
- {infoRecord[:len(infoRecord)-8], &walpb.Record{}, io.ErrUnexpectedEOF},
- {badInfoRecord, &walpb.Record{}, walpb.ErrCRCMismatch},
- }
-
- rec := &walpb.Record{}
- for i, tt := range tests {
- buf := bytes.NewBuffer(tt.data)
- decoder := newDecoder(ioutil.NopCloser(buf))
- e := decoder.decode(rec)
- if !reflect.DeepEqual(rec, tt.wr) {
- t.Errorf("#%d: block = %v, want %v", i, rec, tt.wr)
- }
- if !errors.Is(e, tt.we) {
- t.Errorf("#%d: err = %v, want %v", i, e, tt.we)
- }
- rec = &walpb.Record{}
- }
-}
-
-func TestWriteRecord(t *testing.T) {
- b := &walpb.Record{}
- typ := int64(0xABCD)
- d := []byte("Hello world!")
- buf := new(bytes.Buffer)
- e := newEncoder(buf, 0, 0)
- e.encode(&walpb.Record{Type: typ, Data: d})
- e.flush()
- decoder := newDecoder(ioutil.NopCloser(buf))
- err := decoder.decode(b)
- if err != nil {
- t.Errorf("err = %v, want nil", err)
- }
- if b.Type != typ {
- t.Errorf("type = %d, want %d", b.Type, typ)
- }
- if !reflect.DeepEqual(b.Data, d) {
- t.Errorf("data = %v, want %v", b.Data, d)
- }
-}
diff --git a/server/wal/repair_test.go b/server/wal/repair_test.go
deleted file mode 100644
index e2e3294acda..00000000000
--- a/server/wal/repair_test.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "testing"
-
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/wal/walpb"
-
- "go.uber.org/zap"
-)
-
-type corruptFunc func(string, int64) error
-
-// TestRepairTruncate ensures a truncated file can be repaired
-func TestRepairTruncate(t *testing.T) {
- corruptf := func(p string, offset int64) error {
- f, err := openLast(zap.NewExample(), p)
- if err != nil {
- return err
- }
- defer f.Close()
- return f.Truncate(offset - 4)
- }
-
- testRepair(t, makeEnts(10), corruptf, 9)
-}
-
-func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expectedEnts int) {
- p, err := ioutil.TempDir(os.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- // create WAL
- w, err := Create(zap.NewExample(), p, nil)
- defer func() {
- if err = w.Close(); err != nil {
- t.Fatal(err)
- }
- }()
- if err != nil {
- t.Fatal(err)
- }
-
- for _, es := range ents {
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- }
-
- offset, err := w.tail().Seek(0, io.SeekCurrent)
- if err != nil {
- t.Fatal(err)
- }
- w.Close()
-
- err = corrupt(p, offset)
- if err != nil {
- t.Fatal(err)
- }
-
- // verify we broke the wal
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- _, _, _, err = w.ReadAll()
- if err != io.ErrUnexpectedEOF {
- t.Fatalf("err = %v, want error %v", err, io.ErrUnexpectedEOF)
- }
- w.Close()
-
- // repair the wal
- if ok := Repair(zap.NewExample(), p); !ok {
- t.Fatalf("'Repair' returned '%v', want 'true'", ok)
- }
-
- // read it back
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- _, _, walEnts, err := w.ReadAll()
- if err != nil {
- t.Fatal(err)
- }
- if len(walEnts) != expectedEnts {
- t.Fatalf("len(ents) = %d, want %d", len(walEnts), expectedEnts)
- }
-
- // write some more entries to repaired log
- for i := 1; i <= 10; i++ {
- es := []raftpb.Entry{{Index: uint64(expectedEnts + i)}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- }
- w.Close()
-
- // read back entries following repair, ensure it's all there
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- _, _, walEnts, err = w.ReadAll()
- if err != nil {
- t.Fatal(err)
- }
- if len(walEnts) != expectedEnts+10 {
- t.Fatalf("len(ents) = %d, want %d", len(walEnts), expectedEnts+10)
- }
-}
-
-func makeEnts(ents int) (ret [][]raftpb.Entry) {
- for i := 1; i <= ents; i++ {
- ret = append(ret, []raftpb.Entry{{Index: uint64(i)}})
- }
- return ret
-}
-
-// TestRepairWriteTearLast repairs the WAL in case the last record is a torn write
-// that straddled two sectors.
-func TestRepairWriteTearLast(t *testing.T) {
- corruptf := func(p string, offset int64) error {
- f, err := openLast(zap.NewExample(), p)
- if err != nil {
- return err
- }
- defer f.Close()
- // 512 bytes perfectly aligns the last record, so use 1024
- if offset < 1024 {
- return fmt.Errorf("got offset %d, expected >1024", offset)
- }
- if terr := f.Truncate(1024); terr != nil {
- return terr
- }
- return f.Truncate(offset)
- }
- testRepair(t, makeEnts(50), corruptf, 40)
-}
-
-// TestRepairWriteTearMiddle repairs the WAL when there is write tearing
-// in the middle of a record.
-func TestRepairWriteTearMiddle(t *testing.T) {
- corruptf := func(p string, offset int64) error {
- f, err := openLast(zap.NewExample(), p)
- if err != nil {
- return err
- }
- defer f.Close()
- // corrupt middle of 2nd record
- _, werr := f.WriteAt(make([]byte, 512), 4096+512)
- return werr
- }
- ents := makeEnts(5)
- // 4096 bytes of data so a middle sector is easy to corrupt
- dat := make([]byte, 4096)
- for i := range dat {
- dat[i] = byte(i)
- }
- for i := range ents {
- ents[i][0].Data = dat
- }
- testRepair(t, ents, corruptf, 1)
-}
-
-func TestRepairFailDeleteDir(t *testing.T) {
- p, err := ioutil.TempDir(os.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- w, err := Create(zap.NewExample(), p, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- oldSegmentSizeBytes := SegmentSizeBytes
- SegmentSizeBytes = 64
- defer func() {
- SegmentSizeBytes = oldSegmentSizeBytes
- }()
- for _, es := range makeEnts(50) {
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- }
-
- _, serr := w.tail().Seek(0, io.SeekCurrent)
- if serr != nil {
- t.Fatal(serr)
- }
- w.Close()
-
- f, err := openLast(zap.NewExample(), p)
- if err != nil {
- t.Fatal(err)
- }
- if terr := f.Truncate(20); terr != nil {
- t.Fatal(err)
- }
- f.Close()
-
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- _, _, _, err = w.ReadAll()
- if err != io.ErrUnexpectedEOF {
- t.Fatalf("err = %v, want error %v", err, io.ErrUnexpectedEOF)
- }
- w.Close()
-
- os.RemoveAll(p)
- if Repair(zap.NewExample(), p) {
- t.Fatal("expect 'Repair' fail on unexpected directory deletion")
- }
-}
diff --git a/server/wal/util.go b/server/wal/util.go
deleted file mode 100644
index 4a21ae6182f..00000000000
--- a/server/wal/util.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
-
- "go.uber.org/zap"
-)
-
-var errBadWALName = errors.New("bad wal name")
-
-// Exist returns true if there are any files in a given directory.
-func Exist(dir string) bool {
- names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal"))
- if err != nil {
- return false
- }
- return len(names) != 0
-}
-
-// searchIndex returns the last array index of names whose raft index section is
-// equal to or smaller than the given index.
-// The given names MUST be sorted.
-func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) {
- for i := len(names) - 1; i >= 0; i-- {
- name := names[i]
- _, curIndex, err := parseWALName(name)
- if err != nil {
- lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
- }
- if index >= curIndex {
- return i, true
- }
- }
- return -1, false
-}
-
-// names should have been sorted based on sequence number.
-// isValidSeq checks whether seq increases continuously.
-func isValidSeq(lg *zap.Logger, names []string) bool {
- var lastSeq uint64
- for _, name := range names {
- curSeq, _, err := parseWALName(name)
- if err != nil {
- lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
- }
- if lastSeq != 0 && lastSeq != curSeq-1 {
- return false
- }
- lastSeq = curSeq
- }
- return true
-}
-
-func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) {
- names, err := fileutil.ReadDir(dirpath)
- if err != nil {
- return nil, err
- }
- wnames := checkWalNames(lg, names)
- if len(wnames) == 0 {
- return nil, ErrFileNotFound
- }
- return wnames, nil
-}
-
-func checkWalNames(lg *zap.Logger, names []string) []string {
- wnames := make([]string, 0)
- for _, name := range names {
- if _, _, err := parseWALName(name); err != nil {
- // don't complain about left over tmp files
- if !strings.HasSuffix(name, ".tmp") {
- lg.Warn(
- "ignored file in WAL directory",
- zap.String("path", name),
- )
- }
- continue
- }
- wnames = append(wnames, name)
- }
- return wnames
-}
-
-func parseWALName(str string) (seq, index uint64, err error) {
- if !strings.HasSuffix(str, ".wal") {
- return 0, 0, errBadWALName
- }
- _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
- return seq, index, err
-}
-
-func walName(seq, index uint64) string {
- return fmt.Sprintf("%016x-%016x.wal", seq, index)
-}
diff --git a/server/wal/wal.go b/server/wal/wal.go
deleted file mode 100644
index 3c940e0cdeb..00000000000
--- a/server/wal/wal.go
+++ /dev/null
@@ -1,1003 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "bytes"
- "errors"
- "fmt"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/wal/walpb"
-
- "go.uber.org/zap"
-)
-
-const (
- metadataType int64 = iota + 1
- entryType
- stateType
- crcType
- snapshotType
-
- // warnSyncDuration is the amount of time allotted to an fsync before
- // logging a warning
- warnSyncDuration = time.Second
-)
-
-var (
- // SegmentSizeBytes is the preallocated size of each wal segment file.
- // The actual size might be larger than this. In general, the default
- // value should be used, but this is defined as an exported variable
- // so that tests can set a different segment size.
- SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
-
- ErrMetadataConflict = errors.New("wal: conflicting metadata found")
- ErrFileNotFound = errors.New("wal: file not found")
- ErrCRCMismatch = errors.New("wal: crc mismatch")
- ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
- ErrSnapshotNotFound = errors.New("wal: snapshot not found")
- ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
- ErrMaxWALEntrySizeLimitExceeded = errors.New("wal: max entry size limit exceeded")
- ErrDecoderNotFound = errors.New("wal: decoder not found")
- crcTable = crc32.MakeTable(crc32.Castagnoli)
-)
-
-// WAL is a logical representation of the stable storage.
-// WAL is either in read mode or append mode but not both.
-// A newly created WAL is in append mode, and ready for appending records.
-// A just opened WAL is in read mode, and ready for reading records.
-// The WAL will be ready for appending after reading out all the previous records.
-type WAL struct {
- lg *zap.Logger
-
- dir string // the living directory of the underlay files
-
- // dirFile is a fd for the wal directory for syncing on Rename
- dirFile *os.File
-
- metadata []byte // metadata recorded at the head of each WAL
- state raftpb.HardState // hardstate recorded at the head of WAL
-
- start walpb.Snapshot // snapshot to start reading
- decoder *decoder // decoder to decode records
- readClose func() error // closer for decode reader
-
- unsafeNoSync bool // if set, do not fsync
-
- mu sync.Mutex
- enti uint64 // index of the last entry saved to the wal
- encoder *encoder // encoder to encode records
-
- locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
- fp *filePipeline
-}
-
-// Create creates a WAL ready for appending records. The given metadata is
-// recorded at the head of each WAL file, and can be retrieved with ReadAll
-// after the file is Open.
-func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
- if Exist(dirpath) {
- return nil, os.ErrExist
- }
-
- if lg == nil {
- lg = zap.NewNop()
- }
-
- // keep temporary wal directory so WAL initialization appears atomic
- tmpdirpath := filepath.Clean(dirpath) + ".tmp"
- if fileutil.Exist(tmpdirpath) {
- if err := os.RemoveAll(tmpdirpath); err != nil {
- return nil, err
- }
- }
- defer os.RemoveAll(tmpdirpath)
-
- if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
- lg.Warn(
- "failed to create a temporary WAL directory",
- zap.String("tmp-dir-path", tmpdirpath),
- zap.String("dir-path", dirpath),
- zap.Error(err),
- )
- return nil, err
- }
-
- p := filepath.Join(tmpdirpath, walName(0, 0))
- f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
- if err != nil {
- lg.Warn(
- "failed to flock an initial WAL file",
- zap.String("path", p),
- zap.Error(err),
- )
- return nil, err
- }
- if _, err = f.Seek(0, io.SeekEnd); err != nil {
- lg.Warn(
- "failed to seek an initial WAL file",
- zap.String("path", p),
- zap.Error(err),
- )
- return nil, err
- }
- if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
- lg.Warn(
- "failed to preallocate an initial WAL file",
- zap.String("path", p),
- zap.Int64("segment-bytes", SegmentSizeBytes),
- zap.Error(err),
- )
- return nil, err
- }
-
- w := &WAL{
- lg: lg,
- dir: dirpath,
- metadata: metadata,
- }
- w.encoder, err = newFileEncoder(f.File, 0)
- if err != nil {
- return nil, err
- }
- w.locks = append(w.locks, f)
- if err = w.saveCrc(0); err != nil {
- return nil, err
- }
- if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
- return nil, err
- }
- if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
- return nil, err
- }
-
- logDirPath := w.dir
- if w, err = w.renameWAL(tmpdirpath); err != nil {
- lg.Warn(
- "failed to rename the temporary WAL directory",
- zap.String("tmp-dir-path", tmpdirpath),
- zap.String("dir-path", logDirPath),
- zap.Error(err),
- )
- return nil, err
- }
-
- var perr error
- defer func() {
- if perr != nil {
- w.cleanupWAL(lg)
- }
- }()
-
- // directory was renamed; sync parent dir to persist rename
- pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
- if perr != nil {
- lg.Warn(
- "failed to open the parent data directory",
- zap.String("parent-dir-path", filepath.Dir(w.dir)),
- zap.String("dir-path", w.dir),
- zap.Error(perr),
- )
- return nil, perr
- }
- dirCloser := func() error {
- if perr = pdir.Close(); perr != nil {
- lg.Warn(
- "failed to close the parent data directory file",
- zap.String("parent-dir-path", filepath.Dir(w.dir)),
- zap.String("dir-path", w.dir),
- zap.Error(perr),
- )
- return perr
- }
- return nil
- }
- start := time.Now()
- if perr = fileutil.Fsync(pdir); perr != nil {
- dirCloser()
- lg.Warn(
- "failed to fsync the parent data directory file",
- zap.String("parent-dir-path", filepath.Dir(w.dir)),
- zap.String("dir-path", w.dir),
- zap.Error(perr),
- )
- return nil, perr
- }
- walFsyncSec.Observe(time.Since(start).Seconds())
- if err = dirCloser(); err != nil {
- return nil, err
- }
-
- return w, nil
-}
-
-func (w *WAL) SetUnsafeNoFsync() {
- w.unsafeNoSync = true
-}
-
-func (w *WAL) cleanupWAL(lg *zap.Logger) {
- var err error
- if err = w.Close(); err != nil {
- lg.Panic("failed to close WAL during cleanup", zap.Error(err))
- }
- brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
- if err = os.Rename(w.dir, brokenDirName); err != nil {
- lg.Panic(
- "failed to rename WAL during cleanup",
- zap.Error(err),
- zap.String("source-path", w.dir),
- zap.String("rename-path", brokenDirName),
- )
- }
-}
-
-func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
- if err := os.RemoveAll(w.dir); err != nil {
- return nil, err
- }
- // On non-Windows platforms, hold the lock while renaming. Releasing
- // the lock and trying to reacquire it quickly can be flaky because
- // it's possible the process will fork to spawn a process while this is
- // happening. The fds are set up as close-on-exec by the Go runtime,
- // but there is a window between the fork and the exec where another
- // process holds the lock.
- if err := os.Rename(tmpdirpath, w.dir); err != nil {
- if _, ok := err.(*os.LinkError); ok {
- return w.renameWALUnlock(tmpdirpath)
- }
- return nil, err
- }
- w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes)
- df, err := fileutil.OpenDir(w.dir)
- w.dirFile = df
- return w, err
-}
-
-func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
- // rename of directory with locked files doesn't work on windows/cifs;
- // close the WAL to release the locks so the directory can be renamed.
- w.lg.Info(
- "closing WAL to release flock and retry directory renaming",
- zap.String("from", tmpdirpath),
- zap.String("to", w.dir),
- )
- w.Close()
-
- if err := os.Rename(tmpdirpath, w.dir); err != nil {
- return nil, err
- }
-
- // reopen and relock
- newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{})
- if oerr != nil {
- return nil, oerr
- }
- if _, _, _, err := newWAL.ReadAll(); err != nil {
- newWAL.Close()
- return nil, err
- }
- return newWAL, nil
-}
-
-// Open opens the WAL at the given snap.
-// The snap SHOULD have been previously saved to the WAL, or the following
-// ReadAll will fail.
-// The returned WAL is ready to read and the first record will be the one after
-// the given snap. The WAL cannot be appended to before reading out all of its
-// previous records.
-func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
- w, err := openAtIndex(lg, dirpath, snap, true)
- if err != nil {
- return nil, err
- }
- if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
- return nil, err
- }
- return w, nil
-}
-
-// OpenForRead only opens the wal files for read.
-// Write on a read only wal panics.
-func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
- return openAtIndex(lg, dirpath, snap, false)
-}
-
-func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
- if lg == nil {
- lg = zap.NewNop()
- }
- names, nameIndex, err := selectWALFiles(lg, dirpath, snap)
- if err != nil {
- return nil, err
- }
-
- rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write)
- if err != nil {
- return nil, err
- }
-
- // create a WAL ready for reading
- w := &WAL{
- lg: lg,
- dir: dirpath,
- start: snap,
- decoder: newDecoder(rs...),
- readClose: closer,
- locks: ls,
- }
-
- if write {
- // write reuses the file descriptors from read; don't close so
- // WAL can append without dropping the file lock
- w.readClose = nil
- if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil {
- closer()
- return nil, err
- }
- w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes)
- }
-
- return w, nil
-}
-
-func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) {
- names, err := readWALNames(lg, dirpath)
- if err != nil {
- return nil, -1, err
- }
-
- nameIndex, ok := searchIndex(lg, names, snap.Index)
- if !ok || !isValidSeq(lg, names[nameIndex:]) {
- err = ErrFileNotFound
- return nil, -1, err
- }
-
- return names, nameIndex, nil
-}
-
-func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) {
- rcs := make([]io.ReadCloser, 0)
- rs := make([]io.Reader, 0)
- ls := make([]*fileutil.LockedFile, 0)
- for _, name := range names[nameIndex:] {
- p := filepath.Join(dirpath, name)
- if write {
- l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
- if err != nil {
- closeAll(lg, rcs...)
- return nil, nil, nil, err
- }
- ls = append(ls, l)
- rcs = append(rcs, l)
- } else {
- rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
- if err != nil {
- closeAll(lg, rcs...)
- return nil, nil, nil, err
- }
- ls = append(ls, nil)
- rcs = append(rcs, rf)
- }
- rs = append(rs, rcs[len(rcs)-1])
- }
-
- closer := func() error { return closeAll(lg, rcs...) }
-
- return rs, ls, closer, nil
-}
-
-// ReadAll reads out records of the current WAL.
-// If opened in write mode, it must read out all records until EOF. Or an error
-// will be returned.
-// If opened in read mode, it will try to read all records if possible.
-// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
-// If loaded snap doesn't match with the expected one, it will return
-// all the records and error ErrSnapshotMismatch.
-// TODO: detect not-last-snap error.
-// TODO: maybe loose the checking of match.
-// After ReadAll, the WAL will be ready for appending new records.
-//
-// ReadAll suppresses WAL entries that got overridden (i.e. a newer entry with the same index
-// exists in the log). Such a situation can happen in cases described in figure 7. of the
-// RAFT paper (http://web.stanford.edu/~ouster/cgi-bin/papers/raft-atc14.pdf).
-//
-// ReadAll may return uncommitted yet entries, that are subject to be overriden.
-// Do not apply entries that have index > state.commit, as they are subject to change.
-func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- rec := &walpb.Record{}
-
- if w.decoder == nil {
- return nil, state, nil, ErrDecoderNotFound
- }
- decoder := w.decoder
-
- var match bool
- for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
- switch rec.Type {
- case entryType:
- e := mustUnmarshalEntry(rec.Data)
- // 0 <= e.Index-w.start.Index - 1 < len(ents)
- if e.Index > w.start.Index {
- // prevent "panic: runtime error: slice bounds out of range [:13038096702221461992] with capacity 0"
- up := e.Index - w.start.Index - 1
- if up > uint64(len(ents)) {
- // return error before append call causes runtime panic
- return nil, state, nil, ErrSliceOutOfRange
- }
- // The line below is potentially overriding some 'uncommitted' entries.
- ents = append(ents[:up], e)
- }
- w.enti = e.Index
-
- case stateType:
- state = mustUnmarshalState(rec.Data)
-
- case metadataType:
- if metadata != nil && !bytes.Equal(metadata, rec.Data) {
- state.Reset()
- return nil, state, nil, ErrMetadataConflict
- }
- metadata = rec.Data
-
- case crcType:
- crc := decoder.crc.Sum32()
- // current crc of decoder must match the crc of the record.
- // do no need to match 0 crc, since the decoder is a new one at this case.
- if crc != 0 && rec.Validate(crc) != nil {
- state.Reset()
- return nil, state, nil, ErrCRCMismatch
- }
- decoder.updateCRC(rec.Crc)
-
- case snapshotType:
- var snap walpb.Snapshot
- pbutil.MustUnmarshal(&snap, rec.Data)
- if snap.Index == w.start.Index {
- if snap.Term != w.start.Term {
- state.Reset()
- return nil, state, nil, ErrSnapshotMismatch
- }
- match = true
- }
-
- default:
- state.Reset()
- return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
- }
- }
-
- switch w.tail() {
- case nil:
- // We do not have to read out all entries in read mode.
- // The last record maybe a partial written one, so
- // ErrunexpectedEOF might be returned.
- if err != io.EOF && err != io.ErrUnexpectedEOF {
- state.Reset()
- return nil, state, nil, err
- }
- default:
- // We must read all of the entries if WAL is opened in write mode.
- if err != io.EOF {
- state.Reset()
- return nil, state, nil, err
- }
- // decodeRecord() will return io.EOF if it detects a zero record,
- // but this zero record may be followed by non-zero records from
- // a torn write. Overwriting some of these non-zero records, but
- // not all, will cause CRC errors on WAL open. Since the records
- // were never fully synced to disk in the first place, it's safe
- // to zero them out to avoid any CRC errors from new writes.
- if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
- return nil, state, nil, err
- }
- if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
- return nil, state, nil, err
- }
- }
-
- err = nil
- if !match {
- err = ErrSnapshotNotFound
- }
-
- // close decoder, disable reading
- if w.readClose != nil {
- w.readClose()
- w.readClose = nil
- }
- w.start = walpb.Snapshot{}
-
- w.metadata = metadata
-
- if w.tail() != nil {
- // create encoder (chain crc with the decoder), enable appending
- w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
- if err != nil {
- return
- }
- }
- w.decoder = nil
-
- return metadata, state, ents, err
-}
-
-// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory.
-// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate.
-func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, error) {
- var snaps []walpb.Snapshot
- var state raftpb.HardState
- var err error
-
- rec := &walpb.Record{}
- names, err := readWALNames(lg, walDir)
- if err != nil {
- return nil, err
- }
-
- // open wal files in read mode, so that there is no conflict
- // when the same WAL is opened elsewhere in write mode
- rs, _, closer, err := openWALFiles(lg, walDir, names, 0, false)
- if err != nil {
- return nil, err
- }
- defer func() {
- if closer != nil {
- closer()
- }
- }()
-
- // create a new decoder from the readers on the WAL files
- decoder := newDecoder(rs...)
-
- for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
- switch rec.Type {
- case snapshotType:
- var loadedSnap walpb.Snapshot
- pbutil.MustUnmarshal(&loadedSnap, rec.Data)
- snaps = append(snaps, loadedSnap)
- case stateType:
- state = mustUnmarshalState(rec.Data)
- case crcType:
- crc := decoder.crc.Sum32()
- // current crc of decoder must match the crc of the record.
- // do no need to match 0 crc, since the decoder is a new one at this case.
- if crc != 0 && rec.Validate(crc) != nil {
- return nil, ErrCRCMismatch
- }
- decoder.updateCRC(rec.Crc)
- }
- }
- // We do not have to read out all the WAL entries
- // as the decoder is opened in read mode.
- if err != io.EOF && err != io.ErrUnexpectedEOF {
- return nil, err
- }
-
- // filter out any snaps that are newer than the committed hardstate
- n := 0
- for _, s := range snaps {
- if s.Index <= state.Commit {
- snaps[n] = s
- n++
- }
- }
- snaps = snaps[:n:n]
- return snaps, nil
-}
-
-// Verify reads through the given WAL and verifies that it is not corrupted.
-// It creates a new decoder to read through the records of the given WAL.
-// It does not conflict with any open WAL, but it is recommended not to
-// call this function after opening the WAL for writing.
-// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
-// If the loaded snap doesn't match with the expected one, it will
-// return error ErrSnapshotMismatch.
-func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardState, error) {
- var metadata []byte
- var err error
- var match bool
- var state raftpb.HardState
-
- rec := &walpb.Record{}
-
- if lg == nil {
- lg = zap.NewNop()
- }
- names, nameIndex, err := selectWALFiles(lg, walDir, snap)
- if err != nil {
- return nil, err
- }
-
- // open wal files in read mode, so that there is no conflict
- // when the same WAL is opened elsewhere in write mode
- rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false)
- if err != nil {
- return nil, err
- }
- defer func() {
- if closer != nil {
- closer()
- }
- }()
-
- // create a new decoder from the readers on the WAL files
- decoder := newDecoder(rs...)
-
- for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
- switch rec.Type {
- case metadataType:
- if metadata != nil && !bytes.Equal(metadata, rec.Data) {
- return nil, ErrMetadataConflict
- }
- metadata = rec.Data
- case crcType:
- crc := decoder.crc.Sum32()
- // Current crc of decoder must match the crc of the record.
- // We need not match 0 crc, since the decoder is a new one at this point.
- if crc != 0 && rec.Validate(crc) != nil {
- return nil, ErrCRCMismatch
- }
- decoder.updateCRC(rec.Crc)
- case snapshotType:
- var loadedSnap walpb.Snapshot
- pbutil.MustUnmarshal(&loadedSnap, rec.Data)
- if loadedSnap.Index == snap.Index {
- if loadedSnap.Term != snap.Term {
- return nil, ErrSnapshotMismatch
- }
- match = true
- }
- // We ignore all entry and state type records as these
- // are not necessary for validating the WAL contents
- case entryType:
- case stateType:
- pbutil.MustUnmarshal(&state, rec.Data)
- default:
- return nil, fmt.Errorf("unexpected block type %d", rec.Type)
- }
- }
-
- // We do not have to read out all the WAL entries
- // as the decoder is opened in read mode.
- if err != io.EOF && err != io.ErrUnexpectedEOF {
- return nil, err
- }
-
- if !match {
- return nil, ErrSnapshotNotFound
- }
-
- return &state, nil
-}
-
-// cut closes current file written and creates a new one ready to append.
-// cut first creates a temp wal file and writes necessary headers into it.
-// Then cut atomically rename temp wal file to a wal file.
-func (w *WAL) cut() error {
- // close old wal file; truncate to avoid wasting space if an early cut
- off, serr := w.tail().Seek(0, io.SeekCurrent)
- if serr != nil {
- return serr
- }
-
- if err := w.tail().Truncate(off); err != nil {
- return err
- }
-
- if err := w.sync(); err != nil {
- return err
- }
-
- fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
-
- // create a temp wal file with name sequence + 1, or truncate the existing one
- newTail, err := w.fp.Open()
- if err != nil {
- return err
- }
-
- // update writer and save the previous crc
- w.locks = append(w.locks, newTail)
- prevCrc := w.encoder.crc.Sum32()
- w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
- if err != nil {
- return err
- }
-
- if err = w.saveCrc(prevCrc); err != nil {
- return err
- }
-
- if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
- return err
- }
-
- if err = w.saveState(&w.state); err != nil {
- return err
- }
-
- // atomically move temp wal file to wal file
- if err = w.sync(); err != nil {
- return err
- }
-
- off, err = w.tail().Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
-
- if err = os.Rename(newTail.Name(), fpath); err != nil {
- return err
- }
- start := time.Now()
- if err = fileutil.Fsync(w.dirFile); err != nil {
- return err
- }
- walFsyncSec.Observe(time.Since(start).Seconds())
-
- // reopen newTail with its new path so calls to Name() match the wal filename format
- newTail.Close()
-
- if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
- return err
- }
- if _, err = newTail.Seek(off, io.SeekStart); err != nil {
- return err
- }
-
- w.locks[len(w.locks)-1] = newTail
-
- prevCrc = w.encoder.crc.Sum32()
- w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
- if err != nil {
- return err
- }
-
- w.lg.Info("created a new WAL segment", zap.String("path", fpath))
- return nil
-}
-
-func (w *WAL) sync() error {
- if w.encoder != nil {
- if err := w.encoder.flush(); err != nil {
- return err
- }
- }
-
- if w.unsafeNoSync {
- return nil
- }
-
- start := time.Now()
- err := fileutil.Fdatasync(w.tail().File)
-
- took := time.Since(start)
- if took > warnSyncDuration {
- w.lg.Warn(
- "slow fdatasync",
- zap.Duration("took", took),
- zap.Duration("expected-duration", warnSyncDuration),
- )
- }
- walFsyncSec.Observe(took.Seconds())
-
- return err
-}
-
-func (w *WAL) Sync() error {
- return w.sync()
-}
-
-// ReleaseLockTo releases the locks, which has smaller index than the given index
-// except the largest one among them.
-// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
-// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
-func (w *WAL) ReleaseLockTo(index uint64) error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if len(w.locks) == 0 {
- return nil
- }
-
- var smaller int
- found := false
- for i, l := range w.locks {
- _, lockIndex, err := parseWALName(filepath.Base(l.Name()))
- if err != nil {
- return err
- }
- if lockIndex >= index {
- smaller = i - 1
- found = true
- break
- }
- }
-
- // if no lock index is greater than the release index, we can
- // release lock up to the last one(excluding).
- if !found {
- smaller = len(w.locks) - 1
- }
-
- if smaller <= 0 {
- return nil
- }
-
- for i := 0; i < smaller; i++ {
- if w.locks[i] == nil {
- continue
- }
- w.locks[i].Close()
- }
- w.locks = w.locks[smaller:]
-
- return nil
-}
-
-// Close closes the current WAL file and directory.
-func (w *WAL) Close() error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if w.fp != nil {
- w.fp.Close()
- w.fp = nil
- }
-
- if w.tail() != nil {
- if err := w.sync(); err != nil {
- return err
- }
- }
- for _, l := range w.locks {
- if l == nil {
- continue
- }
- if err := l.Close(); err != nil {
- w.lg.Error("failed to close WAL", zap.Error(err))
- }
- }
-
- return w.dirFile.Close()
-}
-
-func (w *WAL) saveEntry(e *raftpb.Entry) error {
- // TODO: add MustMarshalTo to reduce one allocation.
- b := pbutil.MustMarshal(e)
- rec := &walpb.Record{Type: entryType, Data: b}
- if err := w.encoder.encode(rec); err != nil {
- return err
- }
- w.enti = e.Index
- return nil
-}
-
-func (w *WAL) saveState(s *raftpb.HardState) error {
- if raft.IsEmptyHardState(*s) {
- return nil
- }
- w.state = *s
- b := pbutil.MustMarshal(s)
- rec := &walpb.Record{Type: stateType, Data: b}
- return w.encoder.encode(rec)
-}
-
-func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- // short cut, do not call sync
- if raft.IsEmptyHardState(st) && len(ents) == 0 {
- return nil
- }
-
- mustSync := raft.MustSync(st, w.state, len(ents))
-
- // TODO(xiangli): no more reference operator
- for i := range ents {
- if err := w.saveEntry(&ents[i]); err != nil {
- return err
- }
- }
- if err := w.saveState(&st); err != nil {
- return err
- }
-
- curOff, err := w.tail().Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- if curOff < SegmentSizeBytes {
- if mustSync {
- return w.sync()
- }
- return nil
- }
-
- return w.cut()
-}
-
-func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
- if err := walpb.ValidateSnapshotForWrite(&e); err != nil {
- return err
- }
-
- b := pbutil.MustMarshal(&e)
-
- w.mu.Lock()
- defer w.mu.Unlock()
-
- rec := &walpb.Record{Type: snapshotType, Data: b}
- if err := w.encoder.encode(rec); err != nil {
- return err
- }
- // update enti only when snapshot is ahead of last index
- if w.enti < e.Index {
- w.enti = e.Index
- }
- return w.sync()
-}
-
-func (w *WAL) saveCrc(prevCrc uint32) error {
- return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
-}
-
-func (w *WAL) tail() *fileutil.LockedFile {
- if len(w.locks) > 0 {
- return w.locks[len(w.locks)-1]
- }
- return nil
-}
-
-func (w *WAL) seq() uint64 {
- t := w.tail()
- if t == nil {
- return 0
- }
- seq, _, err := parseWALName(filepath.Base(t.Name()))
- if err != nil {
- w.lg.Fatal("failed to parse WAL name", zap.String("name", t.Name()), zap.Error(err))
- }
- return seq
-}
-
-func closeAll(lg *zap.Logger, rcs ...io.ReadCloser) error {
- stringArr := make([]string, 0)
- for _, f := range rcs {
- if err := f.Close(); err != nil {
- lg.Warn("failed to close: ", zap.Error(err))
- stringArr = append(stringArr, err.Error())
- }
- }
- if len(stringArr) == 0 {
- return nil
- }
- return errors.New(strings.Join(stringArr, ", "))
-}
diff --git a/server/wal/wal_test.go b/server/wal/wal_test.go
deleted file mode 100644
index 05014086c26..00000000000
--- a/server/wal/wal_test.go
+++ /dev/null
@@ -1,1124 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "regexp"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/wal/walpb"
- "go.uber.org/zap/zaptest"
-
- "go.uber.org/zap"
-)
-
-var (
- confState = raftpb.ConfState{
- Voters: []uint64{0x00ffca74},
- AutoLeave: false,
- }
-)
-
-func TestNew(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- w, err := Create(zap.NewExample(), p, []byte("somedata"))
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- if g := filepath.Base(w.tail().Name()); g != walName(0, 0) {
- t.Errorf("name = %+v, want %+v", g, walName(0, 0))
- }
- defer w.Close()
-
- // file is preallocated to segment size; only read data written by wal
- off, err := w.tail().Seek(0, io.SeekCurrent)
- if err != nil {
- t.Fatal(err)
- }
- gd := make([]byte, off)
- f, err := os.Open(filepath.Join(p, filepath.Base(w.tail().Name())))
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- if _, err = io.ReadFull(f, gd); err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
-
- var wb bytes.Buffer
- e := newEncoder(&wb, 0, 0)
- err = e.encode(&walpb.Record{Type: crcType, Crc: 0})
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- err = e.encode(&walpb.Record{Type: metadataType, Data: []byte("somedata")})
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- r := &walpb.Record{
- Type: snapshotType,
- Data: pbutil.MustMarshal(&walpb.Snapshot{}),
- }
- if err = e.encode(r); err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- e.flush()
- if !bytes.Equal(gd, wb.Bytes()) {
- t.Errorf("data = %v, want %v", gd, wb.Bytes())
- }
-}
-
-func TestCreateFailFromPollutedDir(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
- ioutil.WriteFile(filepath.Join(p, "test.wal"), []byte("data"), os.ModeTemporary)
-
- _, err = Create(zap.NewExample(), p, []byte("data"))
- if err != os.ErrExist {
- t.Fatalf("expected %v, got %v", os.ErrExist, err)
- }
-}
-
-func TestWalCleanup(t *testing.T) {
- testRoot, err := ioutil.TempDir(t.TempDir(), "waltestroot")
- if err != nil {
- t.Fatal(err)
- }
- p, err := ioutil.TempDir(testRoot, "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testRoot)
-
- logger := zap.NewExample()
- w, err := Create(logger, p, []byte(""))
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- w.cleanupWAL(logger)
- fnames, err := fileutil.ReadDir(testRoot)
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- if len(fnames) != 1 {
- t.Fatalf("expected 1 file under %v, got %v", testRoot, len(fnames))
- }
- pattern := fmt.Sprintf(`%s.broken\.[\d]{8}\.[\d]{6}\.[\d]{1,6}?`, filepath.Base(p))
- match, _ := regexp.MatchString(pattern, fnames[0])
- if !match {
- t.Errorf("match = false, expected true for %v with pattern %v", fnames[0], pattern)
- }
-}
-
-func TestCreateFailFromNoSpaceLeft(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- oldSegmentSizeBytes := SegmentSizeBytes
- defer func() {
- SegmentSizeBytes = oldSegmentSizeBytes
- }()
- SegmentSizeBytes = math.MaxInt64
-
- _, err = Create(zap.NewExample(), p, []byte("data"))
- if err == nil { // no space left on device
- t.Fatalf("expected error 'no space left on device', got nil")
- }
-}
-
-func TestNewForInitedDir(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- os.Create(filepath.Join(p, walName(0, 0)))
- if _, err = Create(zap.NewExample(), p, nil); err == nil || err != os.ErrExist {
- t.Errorf("err = %v, want %v", err, os.ErrExist)
- }
-}
-
-func TestOpenAtIndex(t *testing.T) {
- dir, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
-
- f, err := os.Create(filepath.Join(dir, walName(0, 0)))
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
-
- w, err := Open(zap.NewExample(), dir, walpb.Snapshot{})
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- if g := filepath.Base(w.tail().Name()); g != walName(0, 0) {
- t.Errorf("name = %+v, want %+v", g, walName(0, 0))
- }
- if w.seq() != 0 {
- t.Errorf("seq = %d, want %d", w.seq(), 0)
- }
- w.Close()
-
- wname := walName(2, 10)
- f, err = os.Create(filepath.Join(dir, wname))
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
-
- w, err = Open(zap.NewExample(), dir, walpb.Snapshot{Index: 5})
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- if g := filepath.Base(w.tail().Name()); g != wname {
- t.Errorf("name = %+v, want %+v", g, wname)
- }
- if w.seq() != 2 {
- t.Errorf("seq = %d, want %d", w.seq(), 2)
- }
- w.Close()
-
- emptydir, err := ioutil.TempDir(t.TempDir(), "waltestempty")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(emptydir)
- if _, err = Open(zap.NewExample(), emptydir, walpb.Snapshot{}); err != ErrFileNotFound {
- t.Errorf("err = %v, want %v", err, ErrFileNotFound)
- }
-}
-
-// TestVerify tests that Verify throws a non-nil error when the WAL is corrupted.
-// The test creates a WAL directory and cuts out multiple WAL files. Then
-// it corrupts one of the files by completely truncating it.
-func TestVerify(t *testing.T) {
- lg := zaptest.NewLogger(t)
- walDir, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
-
- // create WAL
- w, err := Create(lg, walDir, nil)
- if err != nil {
- t.Fatal(err)
- }
- defer w.Close()
-
- // make 5 separate files
- for i := 0; i < 5; i++ {
- es := []raftpb.Entry{{Index: uint64(i), Data: []byte(fmt.Sprintf("waldata%d", i+1))}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- if err = w.cut(); err != nil {
- t.Fatal(err)
- }
- }
-
- hs := raftpb.HardState{Term: 1, Vote: 3, Commit: 5}
- assert.NoError(t, w.Save(hs, nil))
-
- // to verify the WAL is not corrupted at this point
- hardstate, err := Verify(lg, walDir, walpb.Snapshot{})
- if err != nil {
- t.Errorf("expected a nil error, got %v", err)
- }
- assert.Equal(t, hs, *hardstate)
-
- walFiles, err := ioutil.ReadDir(walDir)
- if err != nil {
- t.Fatal(err)
- }
-
- // corrupt the WAL by truncating one of the WAL files completely
- err = os.Truncate(path.Join(walDir, walFiles[2].Name()), 0)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = Verify(lg, walDir, walpb.Snapshot{})
- if err == nil {
- t.Error("expected a non-nil error, got nil")
- }
-}
-
-// TODO: split it into smaller tests for better readability
-func TestCut(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- w, err := Create(zap.NewExample(), p, nil)
- if err != nil {
- t.Fatal(err)
- }
- defer w.Close()
-
- state := raftpb.HardState{Term: 1}
- if err = w.Save(state, nil); err != nil {
- t.Fatal(err)
- }
- if err = w.cut(); err != nil {
- t.Fatal(err)
- }
- wname := walName(1, 1)
- if g := filepath.Base(w.tail().Name()); g != wname {
- t.Errorf("name = %s, want %s", g, wname)
- }
-
- es := []raftpb.Entry{{Index: 1, Term: 1, Data: []byte{1}}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- if err = w.cut(); err != nil {
- t.Fatal(err)
- }
- snap := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState}
- if err = w.SaveSnapshot(snap); err != nil {
- t.Fatal(err)
- }
- wname = walName(2, 2)
- if g := filepath.Base(w.tail().Name()); g != wname {
- t.Errorf("name = %s, want %s", g, wname)
- }
-
- // check the state in the last WAL
- // We do check before closing the WAL to ensure that Cut syncs the data
- // into the disk.
- f, err := os.Open(filepath.Join(p, wname))
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- nw := &WAL{
- decoder: newDecoder(f),
- start: snap,
- }
- _, gst, _, err := nw.ReadAll()
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(gst, state) {
- t.Errorf("state = %+v, want %+v", gst, state)
- }
-}
-
-func TestSaveWithCut(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- w, err := Create(zap.NewExample(), p, []byte("metadata"))
- if err != nil {
- t.Fatal(err)
- }
-
- state := raftpb.HardState{Term: 1}
- if err = w.Save(state, nil); err != nil {
- t.Fatal(err)
- }
- bigData := make([]byte, 500)
- strdata := "Hello World!!"
- copy(bigData, strdata)
- // set a lower value for SegmentSizeBytes, else the test takes too long to complete
- restoreLater := SegmentSizeBytes
- const EntrySize int = 500
- SegmentSizeBytes = 2 * 1024
- defer func() { SegmentSizeBytes = restoreLater }()
- index := uint64(0)
- for totalSize := 0; totalSize < int(SegmentSizeBytes); totalSize += EntrySize {
- ents := []raftpb.Entry{{Index: index, Term: 1, Data: bigData}}
- if err = w.Save(state, ents); err != nil {
- t.Fatal(err)
- }
- index++
- }
-
- w.Close()
-
- neww, err := Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- defer neww.Close()
- wname := walName(1, index)
- if g := filepath.Base(neww.tail().Name()); g != wname {
- t.Errorf("name = %s, want %s", g, wname)
- }
-
- _, newhardstate, entries, err := neww.ReadAll()
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(newhardstate, state) {
- t.Errorf("Hard State = %+v, want %+v", newhardstate, state)
- }
- if len(entries) != int(SegmentSizeBytes/int64(EntrySize)) {
- t.Errorf("Number of entries = %d, expected = %d", len(entries), int(SegmentSizeBytes/int64(EntrySize)))
- }
- for _, oneent := range entries {
- if !bytes.Equal(oneent.Data, bigData) {
- t.Errorf("the saved data does not match at Index %d : found: %s , want :%s", oneent.Index, oneent.Data, bigData)
- }
- }
-}
-
-func TestRecover(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- w, err := Create(zap.NewExample(), p, []byte("metadata"))
- if err != nil {
- t.Fatal(err)
- }
- if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
- t.Fatal(err)
- }
- ents := []raftpb.Entry{{Index: 1, Term: 1, Data: []byte{1}}, {Index: 2, Term: 2, Data: []byte{2}}}
- if err = w.Save(raftpb.HardState{}, ents); err != nil {
- t.Fatal(err)
- }
- sts := []raftpb.HardState{{Term: 1, Vote: 1, Commit: 1}, {Term: 2, Vote: 2, Commit: 2}}
- for _, s := range sts {
- if err = w.Save(s, nil); err != nil {
- t.Fatal(err)
- }
- }
- w.Close()
-
- if w, err = Open(zap.NewExample(), p, walpb.Snapshot{}); err != nil {
- t.Fatal(err)
- }
- metadata, state, entries, err := w.ReadAll()
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(metadata, []byte("metadata")) {
- t.Errorf("metadata = %s, want %s", metadata, "metadata")
- }
- if !reflect.DeepEqual(entries, ents) {
- t.Errorf("ents = %+v, want %+v", entries, ents)
- }
- // only the latest state is recorded
- s := sts[len(sts)-1]
- if !reflect.DeepEqual(state, s) {
- t.Errorf("state = %+v, want %+v", state, s)
- }
- w.Close()
-}
-
-func TestSearchIndex(t *testing.T) {
- tests := []struct {
- names []string
- index uint64
- widx int
- wok bool
- }{
- {
- []string{
- "0000000000000000-0000000000000000.wal",
- "0000000000000001-0000000000001000.wal",
- "0000000000000002-0000000000002000.wal",
- },
- 0x1000, 1, true,
- },
- {
- []string{
- "0000000000000001-0000000000004000.wal",
- "0000000000000002-0000000000003000.wal",
- "0000000000000003-0000000000005000.wal",
- },
- 0x4000, 1, true,
- },
- {
- []string{
- "0000000000000001-0000000000002000.wal",
- "0000000000000002-0000000000003000.wal",
- "0000000000000003-0000000000005000.wal",
- },
- 0x1000, -1, false,
- },
- }
- for i, tt := range tests {
- idx, ok := searchIndex(zap.NewExample(), tt.names, tt.index)
- if idx != tt.widx {
- t.Errorf("#%d: idx = %d, want %d", i, idx, tt.widx)
- }
- if ok != tt.wok {
- t.Errorf("#%d: ok = %v, want %v", i, ok, tt.wok)
- }
- }
-}
-
-func TestScanWalName(t *testing.T) {
- tests := []struct {
- str string
- wseq, windex uint64
- wok bool
- }{
- {"0000000000000000-0000000000000000.wal", 0, 0, true},
- {"0000000000000000.wal", 0, 0, false},
- {"0000000000000000-0000000000000000.snap", 0, 0, false},
- }
- for i, tt := range tests {
- s, index, err := parseWALName(tt.str)
- if g := err == nil; g != tt.wok {
- t.Errorf("#%d: ok = %v, want %v", i, g, tt.wok)
- }
- if s != tt.wseq {
- t.Errorf("#%d: seq = %d, want %d", i, s, tt.wseq)
- }
- if index != tt.windex {
- t.Errorf("#%d: index = %d, want %d", i, index, tt.windex)
- }
- }
-}
-
-func TestRecoverAfterCut(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- md, err := Create(zap.NewExample(), p, []byte("metadata"))
- if err != nil {
- t.Fatal(err)
- }
- for i := 0; i < 10; i++ {
- if err = md.SaveSnapshot(walpb.Snapshot{Index: uint64(i), Term: 1, ConfState: &confState}); err != nil {
- t.Fatal(err)
- }
- es := []raftpb.Entry{{Index: uint64(i)}}
- if err = md.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- if err = md.cut(); err != nil {
- t.Fatal(err)
- }
- }
- md.Close()
-
- if err := os.Remove(filepath.Join(p, walName(4, 4))); err != nil {
- t.Fatal(err)
- }
-
- for i := 0; i < 10; i++ {
- w, err := Open(zap.NewExample(), p, walpb.Snapshot{Index: uint64(i), Term: 1})
- if err != nil {
- if i <= 4 {
- if err != ErrFileNotFound {
- t.Errorf("#%d: err = %v, want %v", i, err, ErrFileNotFound)
- }
- } else {
- t.Errorf("#%d: err = %v, want nil", i, err)
- }
- continue
- }
- metadata, _, entries, err := w.ReadAll()
- if err != nil {
- t.Errorf("#%d: err = %v, want nil", i, err)
- continue
- }
- if !bytes.Equal(metadata, []byte("metadata")) {
- t.Errorf("#%d: metadata = %s, want %s", i, metadata, "metadata")
- }
- for j, e := range entries {
- if e.Index != uint64(j+i+1) {
- t.Errorf("#%d: ents[%d].Index = %+v, want %+v", i, j, e.Index, j+i+1)
- }
- }
- w.Close()
- }
-}
-
-func TestOpenAtUncommittedIndex(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- w, err := Create(zap.NewExample(), p, nil)
- if err != nil {
- t.Fatal(err)
- }
- if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
- t.Fatal(err)
- }
- if err = w.Save(raftpb.HardState{}, []raftpb.Entry{{Index: 0}}); err != nil {
- t.Fatal(err)
- }
- w.Close()
-
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- // commit up to index 0, try to read index 1
- if _, _, _, err = w.ReadAll(); err != nil {
- t.Errorf("err = %v, want nil", err)
- }
- w.Close()
-}
-
-// TestOpenForRead tests that OpenForRead can load all files.
-// The tests creates WAL directory, and cut out multiple WAL files. Then
-// it releases the lock of part of data, and excepts that OpenForRead
-// can read out all files even if some are locked for write.
-func TestOpenForRead(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
- // create WAL
- w, err := Create(zap.NewExample(), p, nil)
- if err != nil {
- t.Fatal(err)
- }
- defer w.Close()
- // make 10 separate files
- for i := 0; i < 10; i++ {
- es := []raftpb.Entry{{Index: uint64(i)}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- if err = w.cut(); err != nil {
- t.Fatal(err)
- }
- }
- // release the lock to 5
- unlockIndex := uint64(5)
- w.ReleaseLockTo(unlockIndex)
-
- // All are available for read
- w2, err := OpenForRead(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- defer w2.Close()
- _, _, ents, err := w2.ReadAll()
- if err != nil {
- t.Fatalf("err = %v, want nil", err)
- }
- if g := ents[len(ents)-1].Index; g != 9 {
- t.Errorf("last index read = %d, want %d", g, 9)
- }
-}
-
-func TestOpenWithMaxIndex(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
- // create WAL
- w, err := Create(zap.NewExample(), p, nil)
- if err != nil {
- t.Fatal(err)
- }
- defer w.Close()
-
- es := []raftpb.Entry{{Index: uint64(math.MaxInt64)}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- w.Close()
-
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- _, _, _, err = w.ReadAll()
- if err == nil || err != ErrSliceOutOfRange {
- t.Fatalf("err = %v, want ErrSliceOutOfRange", err)
- }
-}
-
-func TestSaveEmpty(t *testing.T) {
- var buf bytes.Buffer
- var est raftpb.HardState
- w := WAL{
- encoder: newEncoder(&buf, 0, 0),
- }
- if err := w.saveState(&est); err != nil {
- t.Errorf("err = %v, want nil", err)
- }
- if len(buf.Bytes()) != 0 {
- t.Errorf("buf.Bytes = %d, want 0", len(buf.Bytes()))
- }
-}
-
-func TestReleaseLockTo(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
- // create WAL
- w, err := Create(zap.NewExample(), p, nil)
- defer func() {
- if err = w.Close(); err != nil {
- t.Fatal(err)
- }
- }()
- if err != nil {
- t.Fatal(err)
- }
-
- // release nothing if no files
- err = w.ReleaseLockTo(10)
- if err != nil {
- t.Errorf("err = %v, want nil", err)
- }
-
- // make 10 separate files
- for i := 0; i < 10; i++ {
- es := []raftpb.Entry{{Index: uint64(i)}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- if err = w.cut(); err != nil {
- t.Fatal(err)
- }
- }
- // release the lock to 5
- unlockIndex := uint64(5)
- w.ReleaseLockTo(unlockIndex)
-
- // expected remaining are 4,5,6,7,8,9,10
- if len(w.locks) != 7 {
- t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 7)
- }
- for i, l := range w.locks {
- var lockIndex uint64
- _, lockIndex, err = parseWALName(filepath.Base(l.Name()))
- if err != nil {
- t.Fatal(err)
- }
-
- if lockIndex != uint64(i+4) {
- t.Errorf("#%d: lockindex = %d, want %d", i, lockIndex, uint64(i+4))
- }
- }
-
- // release the lock to 15
- unlockIndex = uint64(15)
- w.ReleaseLockTo(unlockIndex)
-
- // expected remaining is 10
- if len(w.locks) != 1 {
- t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 1)
- }
- _, lockIndex, err := parseWALName(filepath.Base(w.locks[0].Name()))
- if err != nil {
- t.Fatal(err)
- }
-
- if lockIndex != uint64(10) {
- t.Errorf("lockindex = %d, want %d", lockIndex, 10)
- }
-}
-
-// TestTailWriteNoSlackSpace ensures that tail writes append if there's no preallocated space.
-func TestTailWriteNoSlackSpace(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- // create initial WAL
- w, err := Create(zap.NewExample(), p, []byte("metadata"))
- if err != nil {
- t.Fatal(err)
- }
- // write some entries
- for i := 1; i <= 5; i++ {
- es := []raftpb.Entry{{Index: uint64(i), Term: 1, Data: []byte{byte(i)}}}
- if err = w.Save(raftpb.HardState{Term: 1}, es); err != nil {
- t.Fatal(err)
- }
- }
- // get rid of slack space by truncating file
- off, serr := w.tail().Seek(0, io.SeekCurrent)
- if serr != nil {
- t.Fatal(serr)
- }
- if terr := w.tail().Truncate(off); terr != nil {
- t.Fatal(terr)
- }
- w.Close()
-
- // open, write more
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- _, _, ents, rerr := w.ReadAll()
- if rerr != nil {
- t.Fatal(rerr)
- }
- if len(ents) != 5 {
- t.Fatalf("got entries %+v, expected 5 entries", ents)
- }
- // write more entries
- for i := 6; i <= 10; i++ {
- es := []raftpb.Entry{{Index: uint64(i), Term: 1, Data: []byte{byte(i)}}}
- if err = w.Save(raftpb.HardState{Term: 1}, es); err != nil {
- t.Fatal(err)
- }
- }
- w.Close()
-
- // confirm all writes
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- _, _, ents, rerr = w.ReadAll()
- if rerr != nil {
- t.Fatal(rerr)
- }
- if len(ents) != 10 {
- t.Fatalf("got entries %+v, expected 10 entries", ents)
- }
- w.Close()
-}
-
-// TestRestartCreateWal ensures that an interrupted WAL initialization is clobbered on restart
-func TestRestartCreateWal(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- // make temporary directory so it looks like initialization is interrupted
- tmpdir := filepath.Clean(p) + ".tmp"
- if err = os.Mkdir(tmpdir, fileutil.PrivateDirMode); err != nil {
- t.Fatal(err)
- }
- if _, err = os.OpenFile(filepath.Join(tmpdir, "test"), os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode); err != nil {
- t.Fatal(err)
- }
-
- w, werr := Create(zap.NewExample(), p, []byte("abc"))
- if werr != nil {
- t.Fatal(werr)
- }
- w.Close()
- if Exist(tmpdir) {
- t.Fatalf("got %q exists, expected it to not exist", tmpdir)
- }
-
- if w, err = OpenForRead(zap.NewExample(), p, walpb.Snapshot{}); err != nil {
- t.Fatal(err)
- }
- defer w.Close()
-
- if meta, _, _, rerr := w.ReadAll(); rerr != nil || string(meta) != "abc" {
- t.Fatalf("got error %v and meta %q, expected nil and %q", rerr, meta, "abc")
- }
-}
-
-// TestOpenOnTornWrite ensures that entries past the torn write are truncated.
-func TestOpenOnTornWrite(t *testing.T) {
- maxEntries := 40
- clobberIdx := 20
- overwriteEntries := 5
-
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
- w, err := Create(zap.NewExample(), p, nil)
- defer func() {
- if err = w.Close(); err != nil && err != os.ErrInvalid {
- t.Fatal(err)
- }
- }()
- if err != nil {
- t.Fatal(err)
- }
-
- // get offset of end of each saved entry
- offsets := make([]int64, maxEntries)
- for i := range offsets {
- es := []raftpb.Entry{{Index: uint64(i)}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- if offsets[i], err = w.tail().Seek(0, io.SeekCurrent); err != nil {
- t.Fatal(err)
- }
- }
-
- fn := filepath.Join(p, filepath.Base(w.tail().Name()))
- w.Close()
-
- // clobber some entry with 0's to simulate a torn write
- f, ferr := os.OpenFile(fn, os.O_WRONLY, fileutil.PrivateFileMode)
- if ferr != nil {
- t.Fatal(ferr)
- }
- defer f.Close()
- _, err = f.Seek(offsets[clobberIdx], io.SeekStart)
- if err != nil {
- t.Fatal(err)
- }
- zeros := make([]byte, offsets[clobberIdx+1]-offsets[clobberIdx])
- _, err = f.Write(zeros)
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
-
- w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
- // seek up to clobbered entry
- _, _, _, err = w.ReadAll()
- if err != nil {
- t.Fatal(err)
- }
-
- // write a few entries past the clobbered entry
- for i := 0; i < overwriteEntries; i++ {
- // Index is different from old, truncated entries
- es := []raftpb.Entry{{Index: uint64(i + clobberIdx), Data: []byte("new")}}
- if err = w.Save(raftpb.HardState{}, es); err != nil {
- t.Fatal(err)
- }
- }
- w.Close()
-
- // read back the entries, confirm number of entries matches expectation
- w, err = OpenForRead(zap.NewExample(), p, walpb.Snapshot{})
- if err != nil {
- t.Fatal(err)
- }
-
- _, _, ents, rerr := w.ReadAll()
- if rerr != nil {
- // CRC error? the old entries were likely never truncated away
- t.Fatal(rerr)
- }
- wEntries := (clobberIdx - 1) + overwriteEntries
- if len(ents) != wEntries {
- t.Fatalf("expected len(ents) = %d, got %d", wEntries, len(ents))
- }
-}
-
-func TestRenameFail(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- oldSegmentSizeBytes := SegmentSizeBytes
- defer func() {
- SegmentSizeBytes = oldSegmentSizeBytes
- }()
- SegmentSizeBytes = math.MaxInt64
-
- tp, terr := ioutil.TempDir(t.TempDir(), "waltest")
- if terr != nil {
- t.Fatal(terr)
- }
- os.RemoveAll(tp)
-
- w := &WAL{
- lg: zap.NewExample(),
- dir: p,
- }
- w2, werr := w.renameWAL(tp)
- if w2 != nil || werr == nil { // os.Rename should fail from 'no such file or directory'
- t.Fatalf("expected error, got %v", werr)
- }
-}
-
-// TestReadAllFail ensure ReadAll error if used without opening the WAL
-func TestReadAllFail(t *testing.T) {
- dir, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
-
- // create initial WAL
- f, err := Create(zap.NewExample(), dir, []byte("metadata"))
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
- // try to read without opening the WAL
- _, _, _, err = f.ReadAll()
- if err == nil || err != ErrDecoderNotFound {
- t.Fatalf("err = %v, want ErrDecoderNotFound", err)
- }
-}
-
-// TestValidSnapshotEntries ensures ValidSnapshotEntries returns all valid wal snapshot entries, accounting
-// for hardstate
-func TestValidSnapshotEntries(t *testing.T) {
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
- snap0 := walpb.Snapshot{}
- snap1 := walpb.Snapshot{Index: 1, Term: 1, ConfState: &confState}
- state1 := raftpb.HardState{Commit: 1, Term: 1}
- snap2 := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState}
- snap3 := walpb.Snapshot{Index: 3, Term: 2, ConfState: &confState}
- state2 := raftpb.HardState{Commit: 3, Term: 2}
- snap4 := walpb.Snapshot{Index: 4, Term: 2, ConfState: &confState} // will be orphaned since the last committed entry will be snap3
- func() {
- w, err := Create(zap.NewExample(), p, nil)
- if err != nil {
- t.Fatal(err)
- }
- defer w.Close()
-
- // snap0 is implicitly created at index 0, term 0
- if err = w.SaveSnapshot(snap1); err != nil {
- t.Fatal(err)
- }
- if err = w.Save(state1, nil); err != nil {
- t.Fatal(err)
- }
- if err = w.SaveSnapshot(snap2); err != nil {
- t.Fatal(err)
- }
- if err = w.SaveSnapshot(snap3); err != nil {
- t.Fatal(err)
- }
- if err = w.Save(state2, nil); err != nil {
- t.Fatal(err)
- }
- if err = w.SaveSnapshot(snap4); err != nil {
- t.Fatal(err)
- }
- }()
- walSnaps, err := ValidSnapshotEntries(zap.NewExample(), p)
- if err != nil {
- t.Fatal(err)
- }
- expected := []walpb.Snapshot{snap0, snap1, snap2, snap3}
- if !reflect.DeepEqual(walSnaps, expected) {
- t.Errorf("expected walSnaps %+v, got %+v", expected, walSnaps)
- }
-}
-
-// TestValidSnapshotEntriesAfterPurgeWal ensure that there are many wal files, and after cleaning the first wal file,
-// it can work well.
-func TestValidSnapshotEntriesAfterPurgeWal(t *testing.T) {
- oldSegmentSizeBytes := SegmentSizeBytes
- SegmentSizeBytes = 64
- defer func() {
- SegmentSizeBytes = oldSegmentSizeBytes
- }()
- p, err := ioutil.TempDir(t.TempDir(), "waltest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
- snap0 := walpb.Snapshot{}
- snap1 := walpb.Snapshot{Index: 1, Term: 1, ConfState: &confState}
- state1 := raftpb.HardState{Commit: 1, Term: 1}
- snap2 := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState}
- snap3 := walpb.Snapshot{Index: 3, Term: 2, ConfState: &confState}
- state2 := raftpb.HardState{Commit: 3, Term: 2}
- func() {
- w, err := Create(zap.NewExample(), p, nil)
- if err != nil {
- t.Fatal(err)
- }
- defer w.Close()
-
- // snap0 is implicitly created at index 0, term 0
- if err = w.SaveSnapshot(snap1); err != nil {
- t.Fatal(err)
- }
- if err = w.Save(state1, nil); err != nil {
- t.Fatal(err)
- }
- if err = w.SaveSnapshot(snap2); err != nil {
- t.Fatal(err)
- }
- if err = w.SaveSnapshot(snap3); err != nil {
- t.Fatal(err)
- }
- for i := 0; i < 128; i++ {
- if err = w.Save(state2, nil); err != nil {
- t.Fatal(err)
- }
- }
-
- }()
- files, _, err := selectWALFiles(nil, p, snap0)
- if err != nil {
- t.Fatal(err)
- }
- os.Remove(p + "/" + files[0])
- _, err = ValidSnapshotEntries(zap.NewExample(), p)
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/server/wal/walpb/record_test.go b/server/wal/walpb/record_test.go
deleted file mode 100644
index 82941363557..00000000000
--- a/server/wal/walpb/record_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package walpb
-
-import (
- "testing"
-
- "github.com/golang/protobuf/descriptor"
- "go.etcd.io/etcd/raft/v3/raftpb"
-)
-
-func TestSnapshotMetadataCompatibility(t *testing.T) {
- _, snapshotMetadataMd := descriptor.ForMessage(&raftpb.SnapshotMetadata{})
- _, snapshotMd := descriptor.ForMessage(&Snapshot{})
- if len(snapshotMetadataMd.GetField()) != len(snapshotMd.GetField()) {
- t.Errorf("Different number of fields in raftpb.SnapshotMetadata vs. walpb.Snapshot. " +
- "They are supposed to be in sync.")
- }
-}
-
-func TestValidateSnapshot(t *testing.T) {
- tests := []struct {
- name string
- snap *Snapshot
- wantErr bool
- }{
- {name: "empty", snap: &Snapshot{}, wantErr: false},
- {name: "invalid", snap: &Snapshot{Index: 5, Term: 3}, wantErr: true},
- {name: "valid", snap: &Snapshot{Index: 5, Term: 3, ConfState: &raftpb.ConfState{Voters: []uint64{0x00cad1}}}, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if err := ValidateSnapshotForWrite(tt.snap); (err != nil) != tt.wantErr {
- t.Errorf("ValidateSnapshotForWrite() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
diff --git a/test b/test
deleted file mode 100755
index a14782bc3c1..00000000000
--- a/test
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-echo -e "\\e[91mDEPRECATED!!! Use test.sh script instead.\\e[0m\\n"
-sleep 1
-
-source ./test.sh
diff --git a/test.sh b/test.sh
deleted file mode 100755
index 308cde40215..00000000000
--- a/test.sh
+++ /dev/null
@@ -1,696 +0,0 @@
-#!/usr/bin/env bash
-#
-# Run all etcd tests
-# ./test
-# ./test -v
-#
-#
-# Run specified test pass
-#
-# $ PASSES=unit ./test
-# $ PASSES=integration ./test
-#
-#
-# Run tests for one package
-# Each pass has different default timeout, if you just run tests in one package or 1 test case then you can set TIMEOUT
-# flag for different expectation
-#
-# $ PASSES=unit PKG=./wal TIMEOUT=1m ./test
-# $ PASSES=integration PKG=./clientv3 TIMEOUT=1m ./test
-#
-# Run specified unit tests in one package
-# To run all the tests with prefix of "TestNew", set "TESTCASE=TestNew ";
-# to run only "TestNew", set "TESTCASE="\bTestNew\b""
-#
-# $ PASSES=unit PKG=./wal TESTCASE=TestNew TIMEOUT=1m ./test
-# $ PASSES=unit PKG=./wal TESTCASE="\bTestNew\b" TIMEOUT=1m ./test
-# $ PASSES=integration PKG=./client/integration TESTCASE="\bTestV2NoRetryEOF\b" TIMEOUT=1m ./test
-#
-#
-# Run code coverage
-# COVERDIR must either be a absolute path or a relative path to the etcd root
-# $ COVERDIR=coverage PASSES="build build_cov cov" ./test
-# $ go tool cover -html ./coverage/cover.out
-set -e
-set -o pipefail
-
-
-# Consider command as failed when any component of the pipe fails:
-# https://stackoverflow.com/questions/1221833/pipe-output-and-capture-exit-status-in-bash
-set -o pipefail
-
-# The test script is not supposed to make any changes to the files
-# e.g. add/update missing dependencies. Such divergences should be
-# detected and trigger a failure that needs explicit developer's action.
-export GOFLAGS=-mod=readonly
-
-source ./scripts/test_lib.sh
-source ./build.sh
-
-PASSES=${PASSES:-"fmt bom dep build unit"}
-PKG=${PKG:-}
-
-if [ -z "$GOARCH" ]; then
- GOARCH=$(go env GOARCH);
-fi
-
-# determine whether target supports race detection
-if [ -z "${RACE}" ] ; then
- if [ "$GOARCH" == "amd64" ]; then
- RACE="--race"
- else
- RACE="--race=false"
- fi
-else
- RACE="--race=${RACE:-true}"
-fi
-
-# This options make sense for cases where SUT (System Under Test) is compiled by test.
-COMMON_TEST_FLAGS=("${RACE}")
-if [[ -n "${CPU}" ]]; then
- COMMON_TEST_FLAGS+=("--cpu=${CPU}")
-fi
-
-log_callout "Running with ${COMMON_TEST_FLAGS[*]}"
-
-RUN_ARG=()
-if [ -n "${TESTCASE}" ]; then
- RUN_ARG=("-run=${TESTCASE}")
-fi
-
-function build_pass {
- log_callout "Building etcd"
- run_for_modules run go build "${@}" || return 2
- GO_BUILD_FLAGS="-v" etcd_build "${@}"
- GO_BUILD_FLAGS="-v" tools_build "${@}"
-}
-
-################# REGULAR TESTS ################################################
-
-# run_unit_tests [pkgs] runs unit tests for a current module and givesn set of [pkgs]
-function run_unit_tests {
- local pkgs="${1:-./...}"
- shift 1
- # shellcheck disable=SC2086
- GOLANG_TEST_SHORT=true go_test "${pkgs}" "parallel" : -short -timeout="${TIMEOUT:-3m}" "${COMMON_TEST_FLAGS[@]}" "${RUN_ARG[@]}" "$@"
-}
-
-function unit_pass {
- run_for_modules run_unit_tests "$@"
-}
-
-function integration_extra {
- if [ -z "${PKG}" ] ; then
- run_for_module "." go_test "./contrib/raftexample" "keep_going" : -timeout="${TIMEOUT:-5m}" "${RUN_ARG[@]}" "${COMMON_TEST_FLAGS[@]}" "$@" || return $?
- run_for_module "tests" go_test "./integration/v2store/..." "keep_going" : -tags v2v3 -timeout="${TIMEOUT:-5m}" "${RUN_ARG[@]}" "${COMMON_TEST_FLAGS[@]}" "$@" || return $?
- else
- log_warning "integration_extra ignored when PKG is specified"
- fi
-}
-
-function integration_pass {
- local pkgs=${USERPKG:-"./integration/..."}
- run_for_module "tests" go_test "${pkgs}" "parallel" : -timeout="${TIMEOUT:-15m}" "${COMMON_TEST_FLAGS[@]}" "${RUN_ARG[@]}" "$@" || return $?
- integration_extra "$@"
-}
-
-function e2e_pass {
- # e2e tests are running pre-build binary. Settings like --race,-cover,-cpu does not have any impact.
- run_for_module "tests" go_test "./e2e/..." "keep_going" : -timeout="${TIMEOUT:-30m}" "${RUN_ARG[@]}" "$@"
-}
-
-function integration_e2e_pass {
- run_pass "integration" "${@}"
- run_pass "e2e" "${@}"
-}
-
-# generic_checker [cmd...]
-# executes given command in the current module, and clearly fails if it
-# failed or returned output.
-function generic_checker {
- local cmd=("$@")
- if ! output=$("${cmd[@]}"); then
- echo "${output}"
- log_error -e "FAIL: '${cmd[*]}' checking failed (!=0 return code)"
- return 255
- fi
- if [ -n "${output}" ]; then
- echo "${output}"
- log_error -e "FAIL: '${cmd[*]}' checking failed (printed output)"
- return 255
- fi
-}
-
-function functional_pass {
- run ./tests/functional/build
-
- # Clean up any data and logs from previous runs
- rm -rf /tmp/etcd-functional-* /tmp/etcd-functional-*.backup
-
- # TODO: These ports should be dynamically allocated instead of hard-coded.
- for a in 1 2 3; do
- ./bin/etcd-agent --network tcp --address 127.0.0.1:${a}9027 < /dev/null &
- pid="$!"
- agent_pids="${agent_pids} $pid"
- done
-
- for a in 1 2 3; do
- log_callout "Waiting for 'etcd-agent' on ${a}9027..."
- while ! nc -z localhost ${a}9027; do
- sleep 1
- done
- done
-
- log_callout "functional test START!"
- run ./bin/etcd-tester --config ./tests/functional/functional.yaml && log_success "'etcd-tester' succeeded"
- local etcd_tester_exit_code=$?
-
- if [[ "${etcd_tester_exit_code}" -ne "0" ]]; then
- log_error "ETCD_TESTER_EXIT_CODE:" ${etcd_tester_exit_code}
- fi
-
- # shellcheck disable=SC2206
- agent_pids=($agent_pids)
- kill -s TERM "${agent_pids[@]}" || true
-
- if [[ "${etcd_tester_exit_code}" -ne "0" ]]; then
- log_error -e "\\nFAILED! 'tail -1000 /tmp/etcd-functional-1/etcd.log'"
- tail -1000 /tmp/etcd-functional-1/etcd.log
-
- log_error -e "\\nFAILED! 'tail -1000 /tmp/etcd-functional-2/etcd.log'"
- tail -1000 /tmp/etcd-functional-2/etcd.log
-
- log_error -e "\\nFAILED! 'tail -1000 /tmp/etcd-functional-3/etcd.log'"
- tail -1000 /tmp/etcd-functional-3/etcd.log
-
- log_error "--- FAIL: exit code" ${etcd_tester_exit_code}
- return ${etcd_tester_exit_code}
- fi
- log_success "functional test PASS!"
-}
-
-function grpcproxy_pass {
- run_for_module "tests" go_test "./integration/... ./e2e" "fail_fast" : \
- -timeout=30m -tags cluster_proxy "${COMMON_TEST_FLAGS[@]}" "$@"
-}
-
-################# COVERAGE #####################################################
-
-# Builds artifacts used by tests/e2e in coverage mode.
-function build_cov_pass {
- run_for_module "server" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcd_test"
- run_for_module "etcdctl" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcdctl_test"
- run_for_module "etcdutl" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcdutl_test"
-}
-
-# pkg_to_coverflag [prefix] [pkgs]
-# produces name of .coverprofile file to be used for tests of this package
-function pkg_to_coverprofileflag {
- local prefix="${1}"
- local pkgs="${2}"
- local pkgs_normalized
- prefix_normalized=$(echo "${prefix}" | tr "./ " "__+")
- if [ "${pkgs}" == "./..." ]; then
- pkgs_normalized="all"
- else
- pkgs_normalized=$(echo "${pkgs}" | tr "./ " "__+")
- fi
- mkdir -p "${coverdir}/${prefix_normalized}"
- echo -n "-coverprofile=${coverdir}/${prefix_normalized}/${pkgs_normalized}.coverprofile"
-}
-
-function not_test_packages {
- for m in $(modules); do
- if [[ $m =~ .*/etcd/tests/v3 ]]; then continue; fi
- if [[ $m =~ .*/etcd/v3 ]]; then continue; fi
- echo "${m}/..."
- done
-}
-
-# split_dir [dir] [num]
-function split_dir {
- local d="${1}"
- local num="${2}"
- local i=0
- for f in "${d}/"*; do
- local g=$(( "${i}" % "${num}" ))
- mkdir -p "${d}_${g}"
- mv "${f}" "${d}_${g}/"
- (( i++ ))
- done
-}
-
-function split_dir_pass {
- split_dir ./covdir/integration 4
-}
-
-
-# merge_cov_files [coverdir] [outfile]
-# merges all coverprofile files into a single file in the given directory.
-function merge_cov_files {
- local coverdir="${1}"
- local cover_out_file="${2}"
- log_callout "Merging coverage results in: ${coverdir}"
- # gocovmerge requires not-empty test to start with:
- echo "mode: set" > "${cover_out_file}"
-
- local i=0
- local count
- count=$(find "${coverdir}"/*.coverprofile | wc -l)
- for f in "${coverdir}"/*.coverprofile; do
- # print once per 20 files
- if ! (( "${i}" % 20 )); then
- log_callout "${i} of ${count}: Merging file: ${f}"
- fi
- run_go_tool "github.com/gyuho/gocovmerge" "${f}" "${cover_out_file}" > "${coverdir}/cover.tmp" 2>/dev/null
- if [ -s "${coverdir}"/cover.tmp ]; then
- mv "${coverdir}/cover.tmp" "${cover_out_file}"
- fi
- (( i++ ))
- done
-}
-
-# merge_cov [coverdir]
-function merge_cov {
- log_callout "[$(date)] Merging coverage files ..."
- coverdir="${1}"
- for d in "${coverdir}"/*/; do
- d=${d%*/} # remove the trailing "/"
- merge_cov_files "${d}" "${d}.coverprofile" &
- done
- wait
- merge_cov_files "${coverdir}" "${coverdir}/all.coverprofile"
-}
-
-function cov_pass {
- # shellcheck disable=SC2153
- if [ -z "$COVERDIR" ]; then
- log_error "COVERDIR undeclared"
- return 255
- fi
-
- if [ ! -f "bin/etcd_test" ]; then
- log_error "etcd_test binary not found. Call: PASSES='build_cov' ./test"
- return 255
- fi
-
- local coverdir
- coverdir=$(readlink -f "${COVERDIR}")
- mkdir -p "${coverdir}"
- find "${coverdir}" -print0 -name '*.coverprofile' | xargs -0 rm
-
- local covpkgs
- covpkgs=$(not_test_packages)
- local coverpkg_comma
- coverpkg_comma=$(echo "${covpkgs[@]}" | xargs | tr ' ' ',')
- local gocov_build_flags=("-covermode=set" "-coverpkg=$coverpkg_comma")
-
- local failed=""
-
- log_callout "[$(date)] Collecting coverage from unit tests ..."
- for m in $(module_dirs); do
- GOLANG_TEST_SHORT=true run_for_module "${m}" go_test "./..." "parallel" "pkg_to_coverprofileflag unit_${m}" -short -timeout=30m \
- "${gocov_build_flags[@]}" "$@" || failed="$failed unit"
- done
-
- log_callout "[$(date)] Collecting coverage from integration tests ..."
- run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration" \
- -timeout=30m "${gocov_build_flags[@]}" "$@" || failed="$failed integration"
- # integration-store-v2
- run_for_module "tests" go_test "./integration/v2store/..." "keep_going" "pkg_to_coverprofileflag store_v2" \
- -tags v2v3 -timeout=5m "${gocov_build_flags[@]}" "$@" || failed="$failed integration_v2v3"
- # integration_cluster_proxy
- run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration_cluster_proxy" \
- -tags cluster_proxy -timeout=5m "${gocov_build_flags[@]}" || failed="$failed integration_cluster_proxy"
-
- log_callout "[$(date)] Collecting coverage from e2e tests ..."
- # We don't pass 'gocov_build_flags' nor 'pkg_to_coverprofileflag' here,
- # as the coverage is collected from the ./bin/etcd_test & ./bin/etcdctl_test internally spawned.
- mkdir -p "${coverdir}/e2e"
- COVERDIR="${coverdir}/e2e" run_for_module "tests" go_test "./e2e/..." "keep_going" : -tags=cov -timeout 30m "$@" || failed="$failed tests_e2e"
- split_dir "${coverdir}/e2e" 10
-
- log_callout "[$(date)] Collecting coverage from e2e tests with proxy ..."
- mkdir -p "${coverdir}/e2e_proxy"
- COVERDIR="${coverdir}/e2e_proxy" run_for_module "tests" go_test "./e2e/..." "keep_going" : -tags="cov cluster_proxy" -timeout 30m "$@" || failed="$failed tests_e2e_proxy"
- split_dir "${coverdir}/e2e_proxy" 10
-
- local cover_out_file="${coverdir}/all.coverprofile"
- merge_cov "${coverdir}"
-
- # strip out generated files (using GNU-style sed)
- sed --in-place -E "/[.]pb[.](gw[.])?go/d" "${cover_out_file}" || true
-
- sed --in-place -E "s|go.etcd.io/etcd/api/v3/|api/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/client/v3/|client/v3/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/client/v2/|client/v2/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/client/pkg/v3|client/pkg/v3/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/etcdctl/v3/|etcdctl/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/etcdutl/v3/|etcdutl/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/pkg/v3/|pkg/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/raft/v3/|raft/|g" "${cover_out_file}" || true
- sed --in-place -E "s|go.etcd.io/etcd/server/v3/|server/|g" "${cover_out_file}" || true
-
- # held failures to generate the full coverage file, now fail
- if [ -n "$failed" ]; then
- for f in $failed; do
- log_error "--- FAIL:" "$f"
- done
- log_warning "Despite failures, you can see partial report:"
- log_warning " go tool cover -html ${cover_out_file}"
- return 255
- fi
-
- log_success "done :) [see report: go tool cover -html ${cover_out_file}]"
-}
-
-######### Code formatting checkers #############################################
-
-function fmt_pass {
- toggle_failpoints disable
-
- # TODO: add "unparam","staticcheck", "unconvert", "ineffasign","nakedret"
- # after resolving ore-existing errors.
- # markdown_you - too sensitive check was temporarilly disbled.
- for p in shellcheck \
- goword \
- gofmt \
- govet \
- revive \
- license_header \
- receiver_name \
- mod_tidy \
- dep \
- shellcheck \
- shellws \
- ; do
- run_pass "${p}" "${@}"
- done
-}
-
-function shellcheck_pass {
- if tool_exists "shellcheck" "https://github.com/koalaman/shellcheck#installing"; then
- generic_checker run shellcheck -fgcc build test scripts/*.sh ./*.sh
- fi
-}
-
-function shellws_pass {
- TAB=$'\t'
- log_callout "Ensuring no tab-based indention in shell scripts"
- local files
- files=$(find ./ -name '*.sh' -print0 | xargs -0 )
- # shellcheck disable=SC2206
- files=( ${files[@]} "./scripts/build-binary" "./scripts/build-docker" "./scripts/release" )
- log_cmd "grep -E -n $'^ *${TAB}' ${files[*]}"
- # shellcheck disable=SC2086
- if grep -E -n $'^ *${TAB}' "${files[@]}" | sed $'s|${TAB}|[\\\\tab]|g'; then
- log_error "FAIL: found tab-based indention in bash scripts. Use ' ' (double space)."
- local files_with_tabs
- files_with_tabs=$(grep -E -l $'^ *\\t' "${files[@]}")
- log_warning "Try: sed -i 's|\\t| |g' $files_with_tabs"
- return 1
- else
- log_success "SUCCESS: no tabulators found."
- return 0
- fi
-}
-
-function markdown_you_find_eschew_you {
- local find_you_cmd="find . -name \\*.md ! -path '*/vendor/*' ! -path './Documentation/*' ! -path './gopath.proto/*' ! -path './release/*' -exec grep -E --color '[Yy]ou[r]?[ '\\''.,;]' {} + || true"
- run eval "${find_you_cmd}"
-}
-
-function markdown_you_pass {
- generic_checker markdown_you_find_eschew_you
-}
-
-function markdown_marker_pass {
- # TODO: check other markdown files when marker handles headers with '[]'
- if tool_exists "marker" "https://crates.io/crates/marker"; then
- generic_checker run marker --skip-http --root ./Documentation 2>&1
- fi
-}
-
-function govet_pass {
- run_for_modules generic_checker run go vet
-}
-
-function govet_shadow_pass {
- local shadow
- shadow=$(tool_get_bin "golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow")
- run_for_modules generic_checker run go vet -all -vettool="${shadow}"
-}
-
-function unparam_pass {
- run_for_modules generic_checker run_go_tool "mvdan.cc/unparam"
-}
-
-function staticcheck_pass {
- run_for_modules generic_checker run_go_tool "honnef.co/go/tools/cmd/staticcheck"
-}
-
-function revive_pass {
- run_for_modules generic_checker run_go_tool "github.com/mgechev/revive" -config "${ETCD_ROOT_DIR}/tests/revive.toml" -exclude "vendor/..."
-}
-
-function unconvert_pass {
- run_for_modules generic_checker run_go_tool "github.com/mdempsky/unconvert" unconvert -v
-}
-
-function ineffassign_per_package {
- # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1")
- local gofiles=()
- while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1")
- run_go_tool github.com/gordonklaus/ineffassign "${gofiles[@]}"
-}
-
-function ineffassign_pass {
- run_for_modules generic_checker ineffassign_per_package
-}
-
-function nakedret_pass {
- run_for_modules generic_checker run_go_tool "github.com/alexkohler/nakedret"
-}
-
-function license_header_pass {
- # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1")
- local gofiles=()
- while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1")
-
- for file in "${gofiles[@]}"; do
- if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" ; then
- licRes="${licRes}"$(echo -e " ${file}")
- fi
- done
- if [ -n "${licRes}" ]; then
- log_error -e "license header checking failed:\\n${licRes}"
- return 255
- fi
-}
-
-function receiver_name_for_package {
- # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1")
- local gofiles=()
- while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1")
-
- recvs=$(grep 'func ([^*]' "${gofiles[@]}" | tr ':' ' ' | \
- awk ' { print $2" "$3" "$4" "$1 }' | sed "s/[a-zA-Z\\.]*go//g" | sort | uniq | \
- grep -Ev "(Descriptor|Proto|_)" | awk ' { print $3" "$4 } ' | sort | uniq -c | grep -v ' 1 ' | awk ' { print $2 } ')
- if [ -n "${recvs}" ]; then
- # shellcheck disable=SC2206
- recvs=($recvs)
- for recv in "${recvs[@]}"; do
- log_error "Mismatched receiver for $recv..."
- grep "$recv" "${gofiles[@]}" | grep 'func ('
- done
- return 255
- fi
-}
-
-function receiver_name_pass {
- run_for_modules receiver_name_for_package
-}
-
-# goword_for_package package
-# checks spelling and comments in the 'package' in the current module
-#
-function goword_for_package {
- # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1")
- local gofiles=()
- while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1")
-
- local gowordRes
-
- # spellchecking can be enabled with GOBINARGS="--tags=spell"
- # but it requires heavy dependencies installation, like:
- # apt-get install libaspell-dev libhunspell-dev hunspell-en-us aspell-en
-
- # only check for broke exported godocs
- if gowordRes=$(run_go_tool "github.com/chzchzchz/goword" -use-spell=false "${gofiles[@]}" | grep godoc-export | sort); then
- log_error -e "goword checking failed:\\n${gowordRes}"
- return 255
- fi
- if [ -n "$gowordRes" ]; then
- log_error -e "goword checking returned output:\\n${gowordRes}"
- return 255
- fi
-}
-
-
-function goword_pass {
- run_for_modules goword_for_package || return 255
-}
-
-function go_fmt_for_package {
- # We utilize 'go fmt' to find all files suitable for formatting,
- # but reuse full power gofmt to perform just RO check.
- go fmt -n "$1" | sed 's| -w | -d |g' | sh
-}
-
-function gofmt_pass {
- run_for_modules generic_checker go_fmt_for_package
-}
-
-function bom_pass {
- log_callout "Checking bill of materials..."
- # https://github.com/golang/go/commit/7c388cc89c76bc7167287fb488afcaf5a4aa12bf
- # shellcheck disable=SC2207
- modules=($(modules_exp))
-
- # Internally license-bill-of-materials tends to modify go.sum
- run cp go.sum go.sum.tmp || return 2
- run cp go.mod go.mod.tmp || return 2
-
- output=$(GOFLAGS=-mod=mod run_go_tool github.com/coreos/license-bill-of-materials \
- --override-file ./bill-of-materials.override.json \
- "${modules[@]}")
- code="$?"
-
- run cp go.sum.tmp go.sum || return 2
- run cp go.mod.tmp go.mod || return 2
-
- if [ "${code}" -ne 0 ] ; then
- log_error -e "license-bill-of-materials (code: ${code}) failed with:\\n${output}"
- return 255
- else
- echo "${output}" > "bom-now.json.tmp"
- fi
- if ! diff ./bill-of-materials.json bom-now.json.tmp; then
- log_error "modularized licenses do not match given bill of materials"
- return 255
- fi
- rm bom-now.json.tmp
-}
-
-######## VARIOUS CHECKERS ######################################################
-
-function dump_deps_of_module() {
- local module
- if ! module=$(run go list -m); then
- return 255
- fi
- run go list -f "{{if not .Indirect}}{{if .Version}}{{.Path}},{{.Version}},${module}{{end}}{{end}}" -m all
-}
-
-# Checks whether dependencies are consistent across modules
-function dep_pass {
- local all_dependencies
- all_dependencies=$(run_for_modules dump_deps_of_module | sort) || return 2
-
- local duplicates
- duplicates=$(echo "${all_dependencies}" | cut -d ',' -f 1,2 | sort | uniq | cut -d ',' -f 1 | sort | uniq -d) || return 2
-
- for dup in ${duplicates}; do
- log_error "FAIL: inconsistent versions for depencency: ${dup}"
- echo "${all_dependencies}" | grep "${dup}" | sed "s|\\([^,]*\\),\\([^,]*\\),\\([^,]*\\)| - \\1@\\2 from: \\3|g"
- done
- if [[ -n "${duplicates}" ]]; then
- log_error "FAIL: inconsistent dependencies"
- return 2
- else
- log_success "SUCCESS: dependencies are consistent across modules"
- fi
-}
-
-function release_pass {
- rm -f ./bin/etcd-last-release
- # to grab latest patch release; bump this up for every minor release
- UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.4.*" | head -1)
- if [ -n "$MANUAL_VER" ]; then
- # in case, we need to test against different version
- UPGRADE_VER=$MANUAL_VER
- fi
- if [[ -z ${UPGRADE_VER} ]]; then
- UPGRADE_VER="v3.3.0"
- log_warning "fallback to" ${UPGRADE_VER}
- fi
-
- local file="etcd-$UPGRADE_VER-linux-$GOARCH.tar.gz"
- log_callout "Downloading $file"
-
- set +e
- curl --fail -L "https://github.com/etcd-io/etcd/releases/download/$UPGRADE_VER/$file" -o "/tmp/$file"
- local result=$?
- set -e
- case $result in
- 0) ;;
- *) log_error "--- FAIL:" ${result}
- return $result
- ;;
- esac
-
- tar xzvf "/tmp/$file" -C /tmp/ --strip-components=1
- mkdir -p ./bin
- mv /tmp/etcd ./bin/etcd-last-release
-}
-
-function mod_tidy_for_module {
- # Watch for upstream solution: https://github.com/golang/go/issues/27005
- local tmpModDir
- tmpModDir=$(mktemp -d -t 'tmpModDir.XXXXXX')
- run cp "./go.mod" "${tmpModDir}" || return 2
-
- # Guarantees keeping go.sum minimal
- # If this is causing too much problems, we should
- # stop controlling go.sum at all.
- rm go.sum
- run go mod tidy || return 2
-
- set +e
- local tmpFileGoModInSync
- diff -C 5 "${tmpModDir}/go.mod" "./go.mod"
- tmpFileGoModInSync="$?"
-
- # Bring back initial state
- mv "${tmpModDir}/go.mod" "./go.mod"
-
- if [ "${tmpFileGoModInSync}" -ne 0 ]; then
- log_error "${PWD}/go.mod is not in sync with 'go mod tidy'"
- return 255
- fi
-}
-
-function mod_tidy_pass {
- run_for_modules mod_tidy_for_module
-}
-
-########### MAIN ###############################################################
-
-function run_pass {
- local pass="${1}"
- shift 1
- log_callout -e "\\n'${pass}' started at $(date)"
- if "${pass}_pass" "$@" ; then
- log_success "'${pass}' completed at $(date)"
- else
- log_error "FAIL: '${pass}' failed at $(date)"
- exit 255
- fi
-}
-
-log_callout "Starting at: $(date)"
-for pass in $PASSES; do
- run_pass "${pass}" "${@}"
-done
-
-log_success "SUCCESS"
diff --git a/tests/Dockerfile b/tests/Dockerfile
deleted file mode 100644
index 8774811f368..00000000000
--- a/tests/Dockerfile
+++ /dev/null
@@ -1,51 +0,0 @@
-FROM ubuntu:20.10
-
-RUN rm /bin/sh && ln -s /bin/bash /bin/sh
-RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
-
-RUN apt-get -y update \
- && apt-get -y install \
- build-essential \
- gcc \
- apt-utils \
- pkg-config \
- software-properties-common \
- apt-transport-https \
- libssl-dev \
- sudo \
- bash \
- curl \
- wget \
- tar \
- git \
- netcat \
- libaspell-dev \
- libhunspell-dev \
- hunspell-en-us \
- aspell-en \
- shellcheck \
- && apt-get -y update \
- && apt-get -y upgrade \
- && apt-get -y autoremove \
- && apt-get -y autoclean
-
-ENV GOROOT /usr/local/go
-ENV GOPATH /go
-ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
-ENV GO_VERSION REPLACE_ME_GO_VERSION
-ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
-RUN rm -rf ${GOROOT} \
- && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
- && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
- && go version
-
-RUN mkdir -p ${GOPATH}/src/go.etcd.io/etcd
-WORKDIR ${GOPATH}/src/go.etcd.io/etcd
-
-ADD ./scripts/install-marker.sh /tmp/install-marker.sh
-
-RUN GO111MODULE=off go get github.com/myitcv/gobin
-RUN /tmp/install-marker.sh amd64 \
- && rm -f /tmp/install-marker.sh \
- && curl -s https://codecov.io/bash >/codecov \
- && chmod 700 /codecov
diff --git a/tests/OWNERS b/tests/OWNERS
new file mode 100644
index 00000000000..365ae7b38a9
--- /dev/null
+++ b/tests/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/testing
diff --git a/tests/common/alarm_test.go b/tests/common/alarm_test.go
new file mode 100644
index 00000000000..17a677b4003
--- /dev/null
+++ b/tests/common/alarm_test.go
@@ -0,0 +1,120 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestAlarm(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t,
+ config.WithClusterSize(1),
+ config.WithQuotaBackendBytes(int64(13*os.Getpagesize())),
+ )
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ // test small put still works
+ smallbuf := strings.Repeat("a", 64)
+ require.NoErrorf(t, cc.Put(ctx, "1st_test", smallbuf, config.PutOptions{}), "alarmTest: put kv error")
+
+ // write some chunks to fill up the database
+ buf := strings.Repeat("b", os.Getpagesize())
+ for {
+ if err := cc.Put(ctx, "2nd_test", buf, config.PutOptions{}); err != nil {
+ require.ErrorContains(t, err, "etcdserver: mvcc: database space exceeded")
+ break
+ }
+ }
+
+ // quota alarm should now be on
+ alarmResp, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "alarmTest: Alarm error")
+
+ // check that Put is rejected when alarm is on
+ if err = cc.Put(ctx, "3rd_test", smallbuf, config.PutOptions{}); err != nil {
+ require.ErrorContains(t, err, "etcdserver: mvcc: database space exceeded")
+ }
+
+ // get latest revision to compact
+ sresp, err := cc.Status(ctx)
+ require.NoErrorf(t, err, "get endpoint status error")
+ var rvs int64
+ for _, resp := range sresp {
+ if resp != nil && resp.Header != nil {
+ rvs = resp.Header.Revision
+ break
+ }
+ }
+
+ // make some space
+ _, err = cc.Compact(ctx, rvs, config.CompactOption{Physical: true, Timeout: 10 * time.Second})
+ require.NoErrorf(t, err, "alarmTest: Compact error")
+
+ err = cc.Defragment(ctx, config.DefragOption{Timeout: 10 * time.Second})
+ require.NoErrorf(t, err, "alarmTest: defrag error")
+
+ // turn off alarm
+ for _, alarm := range alarmResp.Alarms {
+ alarmMember := &clientv3.AlarmMember{
+ MemberID: alarm.MemberID,
+ Alarm: alarm.Alarm,
+ }
+ _, err = cc.AlarmDisarm(ctx, alarmMember)
+ require.NoErrorf(t, err, "alarmTest: Alarm error")
+ }
+
+ // put one more key below quota
+ err = cc.Put(ctx, "4th_test", smallbuf, config.PutOptions{})
+ require.NoError(t, err)
+ })
+}
+
+func TestAlarmlistOnMemberRestart(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t,
+ config.WithClusterSize(1),
+ config.WithQuotaBackendBytes(int64(13*os.Getpagesize())),
+ config.WithSnapshotCount(5),
+ )
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ for i := 0; i < 6; i++ {
+ _, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "Unexpected error")
+ }
+
+ clus.Members()[0].Stop()
+ err := clus.Members()[0].Start(ctx)
+ require.NoErrorf(t, err, "failed to start etcdserver")
+ })
+}
diff --git a/tests/common/auth_test.go b/tests/common/auth_test.go
new file mode 100644
index 00000000000..c9544b9076d
--- /dev/null
+++ b/tests/common/auth_test.go
@@ -0,0 +1,861 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+var (
+ tokenTTL = time.Second * 3
+ defaultAuthToken = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=%s",
+ mustAbsPath("../fixtures/server.crt"), mustAbsPath("../fixtures/server.key.insecure"), tokenTTL)
+)
+
+const (
+ PermissionDenied = "etcdserver: permission denied"
+ AuthenticationFailed = "etcdserver: authentication failed, invalid user ID or password"
+ InvalidAuthManagement = "etcdserver: invalid auth management"
+
+ testPeerURL = "http://localhost:20011"
+)
+
+func TestAuthEnable(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ })
+}
+
+func TestAuthDisable(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoError(t, cc.Put(ctx, "hoo", "a", config.PutOptions{}))
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ // test-user doesn't have the permission, it must fail
+ require.Error(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}))
+ require.NoErrorf(t, rootAuthClient.AuthDisable(ctx), "failed to disable auth")
+ // now ErrAuthNotEnabled of Authenticate() is simply ignored
+ require.NoError(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}))
+ // now the key can be accessed
+ require.NoError(t, cc.Put(ctx, "hoo", "bar", config.PutOptions{}))
+ // confirm put succeeded
+ resp, err := cc.Get(ctx, "hoo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar" {
+ t.Fatalf("want key value pair 'hoo', 'bar' but got %+v", resp.Kvs)
+ }
+ })
+}
+
+func TestAuthGracefulDisable(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ donec := make(chan struct{})
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+
+ go func() {
+ defer close(donec)
+ // sleep a bit to let the watcher connects while auth is still enabled
+ time.Sleep(time.Second)
+ // now disable auth...
+ if err := rootAuthClient.AuthDisable(ctx); err != nil {
+ t.Errorf("failed to auth disable %v", err)
+ return
+ }
+ // ...and restart the node
+ clus.Members()[0].Stop()
+ if err := clus.Members()[0].Start(ctx); err != nil {
+ t.Errorf("failed to restart member %v", err)
+ return
+ }
+ // the watcher should still work after reconnecting
+ assert.NoErrorf(t, rootAuthClient.Put(ctx, "key", "value", config.PutOptions{}), "failed to put key value")
+ }()
+
+ wCtx, wCancel := context.WithCancel(ctx)
+ defer wCancel()
+
+ watchCh := rootAuthClient.Watch(wCtx, "key", config.WatchOptions{Revision: 1})
+ wantedLen := 1
+ watchTimeout := 10 * time.Second
+ wanted := []testutils.KV{{Key: "key", Val: "value"}}
+ kvs, err := testutils.KeyValuesFromWatchChan(watchCh, wantedLen, watchTimeout)
+ require.NoErrorf(t, err, "failed to get key-values from watch channel %s", err)
+ require.Equal(t, wanted, kvs)
+ <-donec
+ })
+}
+
+func TestAuthStatus(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ resp, err := cc.AuthStatus(ctx)
+ require.NoError(t, err)
+ require.Falsef(t, resp.Enabled, "want auth not enabled but enabled")
+
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ resp, err = rootAuthClient.AuthStatus(ctx)
+ require.NoError(t, err)
+ require.Truef(t, resp.Enabled, "want enabled but got not enabled")
+ })
+}
+
+func TestAuthRoleUpdate(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoError(t, cc.Put(ctx, "foo", "bar", config.PutOptions{}))
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ require.ErrorContains(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}), PermissionDenied)
+ // grant a new key
+ _, err := rootAuthClient.RoleGrantPermission(ctx, testRoleName, "hoo", "", clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoError(t, err)
+ // try a newly granted key
+ require.NoError(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}))
+ // confirm put succeeded
+ resp, err := testUserAuthClient.Get(ctx, "hoo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar" {
+ t.Fatalf("want key value pair 'hoo' 'bar' but got %+v", resp.Kvs)
+ }
+ // revoke the newly granted key
+ _, err = rootAuthClient.RoleRevokePermission(ctx, testRoleName, "hoo", "")
+ require.NoError(t, err)
+ // try put to the revoked key
+ require.ErrorContains(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}), PermissionDenied)
+ // confirm a key still granted can be accessed
+ resp, err = testUserAuthClient.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" {
+ t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs)
+ }
+ })
+}
+
+func TestAuthUserDeleteDuringOps(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoError(t, cc.Put(ctx, "foo", "bar", config.PutOptions{}))
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ // create a key
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}))
+ // confirm put succeeded
+ resp, err := testUserAuthClient.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" {
+ t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs)
+ }
+ // delete the user
+ _, err = rootAuthClient.UserDelete(ctx, testUserName)
+ require.NoError(t, err)
+ // check the user is deleted
+ err = testUserAuthClient.Put(ctx, "foo", "baz", config.PutOptions{})
+ require.ErrorContains(t, err, AuthenticationFailed)
+ })
+}
+
+func TestAuthRoleRevokeDuringOps(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoError(t, cc.Put(ctx, "foo", "bar", config.PutOptions{}))
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ // create a key
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}))
+ // confirm put succeeded
+ resp, err := testUserAuthClient.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" {
+ t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs)
+ }
+ // create a new role
+ _, err = rootAuthClient.RoleAdd(ctx, "test-role2")
+ require.NoError(t, err)
+ // grant a new key to the new role
+ _, err = rootAuthClient.RoleGrantPermission(ctx, "test-role2", "hoo", "", clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoError(t, err)
+ // grant the new role to the user
+ _, err = rootAuthClient.UserGrantRole(ctx, testUserName, "test-role2")
+ require.NoError(t, err)
+
+ // try a newly granted key
+ require.NoError(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}))
+ // confirm put succeeded
+ resp, err = testUserAuthClient.Get(ctx, "hoo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar" {
+ t.Fatalf("want key value pair 'hoo' 'bar' but got %+v", resp.Kvs)
+ }
+ // revoke a role from the user
+ _, err = rootAuthClient.UserRevokeRole(ctx, testUserName, testRoleName)
+ require.NoError(t, err)
+ // check the role is revoked and permission is lost from the user
+ require.ErrorContains(t, testUserAuthClient.Put(ctx, "foo", "baz", config.PutOptions{}), PermissionDenied)
+
+ // try a key that can be accessed from the remaining role
+ require.NoError(t, testUserAuthClient.Put(ctx, "hoo", "bar2", config.PutOptions{}))
+ // confirm put succeeded
+ resp, err = testUserAuthClient.Get(ctx, "hoo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar2" {
+ t.Fatalf("want key value pair 'hoo' 'bar2' but got %+v", resp.Kvs)
+ }
+ })
+}
+
+func TestAuthWriteKey(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoError(t, cc.Put(ctx, "foo", "a", config.PutOptions{}))
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ // confirm root role can access to all keys
+ require.NoError(t, rootAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}))
+ resp, err := rootAuthClient.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" {
+ t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs)
+ }
+ // try invalid user
+ _, err = clus.Client(WithAuth("a", "b"))
+ require.ErrorContains(t, err, AuthenticationFailed)
+
+ // try good user
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar2", config.PutOptions{}))
+ // confirm put succeeded
+ resp, err = testUserAuthClient.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar2" {
+ t.Fatalf("want key value pair 'foo' 'bar2' but got %+v", resp.Kvs)
+ }
+
+ // try bad password
+ _, err = clus.Client(WithAuth(testUserName, "badpass"))
+ require.ErrorContains(t, err, AuthenticationFailed)
+ })
+}
+
+func TestAuthTxn(t *testing.T) {
+ tcs := []struct {
+ name string
+ cfg config.ClusterConfig
+ }{
+ {
+ "NoJWT",
+ config.ClusterConfig{ClusterSize: 1},
+ },
+ {
+ "JWT",
+ config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken},
+ },
+ }
+
+ reqs := []txnReq{
+ {
+ compare: []string{`version("c2") = "1"`},
+ ifSuccess: []string{"get s2"},
+ ifFail: []string{"get f2"},
+ expectResults: []string{"SUCCESS", "s2", "v"},
+ expectError: false,
+ },
+ // a key of compare case isn't granted
+ {
+ compare: []string{`version("c1") = "1"`},
+ ifSuccess: []string{"get s2"},
+ ifFail: []string{"get f2"},
+ expectResults: []string{PermissionDenied},
+ expectError: true,
+ },
+ // a key of success case isn't granted
+ {
+ compare: []string{`version("c2") = "1"`},
+ ifSuccess: []string{"get s1"},
+ ifFail: []string{"get f2"},
+ expectResults: []string{PermissionDenied},
+ expectError: true,
+ },
+ // a key of failure case isn't granted
+ {
+ compare: []string{`version("c2") = "1"`},
+ ifSuccess: []string{"get s2"},
+ ifFail: []string{"get f1"},
+ expectResults: []string{PermissionDenied},
+ expectError: true,
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.cfg))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ // keys with 1 suffix aren't granted to test-user
+ keys := []string{"c1", "s1", "f1"}
+ // keys with 2 suffix are granted to test-user, see Line 399
+ grantedKeys := []string{"c2", "s2", "f2"}
+ for _, key := range keys {
+ err := cc.Put(ctx, key, "v", config.PutOptions{})
+ require.NoError(t, err)
+ }
+ for _, key := range grantedKeys {
+ err := cc.Put(ctx, key, "v", config.PutOptions{})
+ require.NoError(t, err)
+ }
+
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ // grant keys to test-user
+ for _, key := range grantedKeys {
+ _, err := rootAuthClient.RoleGrantPermission(ctx, testRoleName, key, "", clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoError(t, err)
+ }
+ for _, req := range reqs {
+ resp, err := testUserAuthClient.Txn(ctx, req.compare, req.ifSuccess, req.ifFail, config.TxnOptions{
+ Interactive: true,
+ })
+ if req.expectError {
+ require.Contains(t, err.Error(), req.expectResults[0])
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, req.expectResults, getRespValues(resp))
+ }
+ }
+ })
+ })
+ }
+}
+
+func TestAuthPrefixPerm(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+ prefix := "/prefix/" // directory like prefix
+ // grant keys to test-user
+ _, err := rootAuthClient.RoleGrantPermission(ctx, "test-role", prefix, clientv3.GetPrefixRangeEnd(prefix), clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoError(t, err)
+ // try a prefix granted permission
+ for i := 0; i < 10; i++ {
+ key := fmt.Sprintf("%s%d", prefix, i)
+ require.NoError(t, testUserAuthClient.Put(ctx, key, "val", config.PutOptions{}))
+ }
+ // expect put 'key with prefix end "/prefix0"' value failed
+ require.ErrorContains(t, testUserAuthClient.Put(ctx, clientv3.GetPrefixRangeEnd(prefix), "baz", config.PutOptions{}), PermissionDenied)
+
+ // grant the prefix2 keys to test-user
+ prefix2 := "/prefix2/"
+ _, err = rootAuthClient.RoleGrantPermission(ctx, "test-role", prefix2, clientv3.GetPrefixRangeEnd(prefix2), clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoError(t, err)
+ for i := 0; i < 10; i++ {
+ key := fmt.Sprintf("%s%d", prefix2, i)
+ require.NoError(t, testUserAuthClient.Put(ctx, key, "val", config.PutOptions{}))
+ }
+ })
+}
+
+func TestAuthLeaseKeepAlive(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+
+ resp, err := rootAuthClient.Grant(ctx, 10)
+ require.NoError(t, err)
+ leaseID := resp.ID
+ require.NoError(t, rootAuthClient.Put(ctx, "key", "value", config.PutOptions{LeaseID: leaseID}))
+ _, err = rootAuthClient.KeepAliveOnce(ctx, leaseID)
+ require.NoError(t, err)
+
+ gresp, err := rootAuthClient.Get(ctx, "key", config.GetOptions{})
+ require.NoError(t, err)
+ if len(gresp.Kvs) != 1 || string(gresp.Kvs[0].Key) != "key" || string(gresp.Kvs[0].Value) != "value" {
+ t.Fatalf("want kv pair ('key', 'value') but got %v", gresp.Kvs)
+ }
+ })
+}
+
+func TestAuthRevokeWithDelete(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ // create a new role
+ newTestRoleName := "test-role2"
+ _, err := rootAuthClient.RoleAdd(ctx, newTestRoleName)
+ require.NoError(t, err)
+ // grant the new role to the user
+ _, err = rootAuthClient.UserGrantRole(ctx, testUserName, newTestRoleName)
+ require.NoError(t, err)
+ // check the result
+ resp, err := rootAuthClient.UserGet(ctx, testUserName)
+ require.NoError(t, err)
+ require.ElementsMatch(t, resp.Roles, []string{testRoleName, newTestRoleName})
+ // delete the role, test-role2 must be revoked from test-user
+ _, err = rootAuthClient.RoleDelete(ctx, newTestRoleName)
+ require.NoError(t, err)
+ // check the result
+ resp, err = rootAuthClient.UserGet(ctx, testUserName)
+ require.NoError(t, err)
+ require.ElementsMatch(t, resp.Roles, []string{testRoleName})
+ })
+}
+
+func TestAuthLeaseTimeToLiveExpired(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ resp, err := rootAuthClient.Grant(ctx, 2)
+ require.NoError(t, err)
+ leaseID := resp.ID
+ require.NoError(t, rootAuthClient.Put(ctx, "key", "val", config.PutOptions{LeaseID: leaseID}))
+ // eliminate false positive
+ time.Sleep(3 * time.Second)
+ tresp, err := rootAuthClient.TimeToLive(ctx, leaseID, config.LeaseOption{})
+ require.NoError(t, err)
+ require.Equal(t, int64(-1), tresp.TTL)
+
+ gresp, err := rootAuthClient.Get(ctx, "key", config.GetOptions{})
+ require.NoError(t, err)
+ require.Empty(t, gresp.Kvs)
+ })
+}
+
+func TestAuthLeaseGrantLeases(t *testing.T) {
+ testRunner.BeforeTest(t)
+ tcs := []testCase{
+ {
+ name: "NoJWT",
+ config: config.ClusterConfig{ClusterSize: 1},
+ },
+ {
+ name: "JWT",
+ config: config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+
+ resp, err := rootAuthClient.Grant(ctx, 10)
+ require.NoError(t, err)
+
+ leaseID := resp.ID
+ lresp, err := rootAuthClient.Leases(ctx)
+ require.NoError(t, err)
+ if len(lresp.Leases) != 1 || lresp.Leases[0].ID != leaseID {
+ t.Fatalf("want %v leaseID but got %v leases", leaseID, lresp.Leases)
+ }
+ })
+ })
+ }
+}
+
+func TestAuthMemberAdd(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+ _, err := testUserAuthClient.MemberAdd(ctx, "newmember", []string{testPeerURL})
+ require.ErrorContains(t, err, PermissionDenied)
+ _, err = rootAuthClient.MemberAdd(ctx, "newmember", []string{testPeerURL})
+ require.NoError(t, err)
+ })
+}
+
+func TestAuthMemberRemove(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clusterSize := 3
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: clusterSize}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ memberIDToEndpoints := getMemberIDToEndpoints(ctx, t, clus)
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+
+ memberID, clusterID := memberToRemove(ctx, t, rootAuthClient, clusterSize)
+ delete(memberIDToEndpoints, memberID)
+ endpoints := make([]string, 0, len(memberIDToEndpoints))
+ for _, ep := range memberIDToEndpoints {
+ endpoints = append(endpoints, ep)
+ }
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+ // ordinary user cannot remove a member
+ _, err := testUserAuthClient.MemberRemove(ctx, memberID)
+ require.ErrorContains(t, err, PermissionDenied)
+
+ // root can remove a member, building a client excluding removed member endpoint
+ rootAuthClient2 := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword), WithEndpoints(endpoints)))
+ resp, err := rootAuthClient2.MemberRemove(ctx, memberID)
+ require.NoError(t, err)
+ require.Equal(t, resp.Header.ClusterId, clusterID)
+ found := false
+ for _, member := range resp.Members {
+ if member.ID == memberID {
+ found = true
+ break
+ }
+ }
+ require.Falsef(t, found, "expect removed member not found in member remove response")
+ })
+}
+
+func TestAuthTestInvalidMgmt(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ _, err := rootAuthClient.UserDelete(ctx, rootUserName)
+ require.ErrorContains(t, err, InvalidAuthManagement)
+ _, err = rootAuthClient.UserRevokeRole(ctx, rootUserName, rootRoleName)
+ require.ErrorContains(t, err, InvalidAuthManagement)
+ })
+}
+
+func TestAuthLeaseRevoke(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+
+ lresp, err := rootAuthClient.Grant(ctx, 10)
+ require.NoError(t, err)
+ err = rootAuthClient.Put(ctx, "key", "value", config.PutOptions{LeaseID: lresp.ID})
+ require.NoError(t, err)
+
+ _, err = rootAuthClient.Revoke(ctx, lresp.ID)
+ require.NoError(t, err)
+
+ _, err = rootAuthClient.Get(ctx, "key", config.GetOptions{})
+ require.NoError(t, err)
+ })
+}
+
+func TestAuthRoleGet(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ resp, err := rootAuthClient.RoleGet(ctx, testRoleName)
+ require.NoError(t, err)
+ requireRolePermissionEqual(t, testRole, resp.Perm)
+
+ // test-user can get the information of test-role because it belongs to the role
+ resp, err = testUserAuthClient.RoleGet(ctx, testRoleName)
+ require.NoError(t, err)
+ requireRolePermissionEqual(t, testRole, resp.Perm)
+ // test-user cannot get the information of root because it doesn't belong to the role
+ _, err = testUserAuthClient.RoleGet(ctx, rootRoleName)
+ require.ErrorContains(t, err, PermissionDenied)
+ })
+}
+
+func TestAuthUserGet(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ resp, err := rootAuthClient.UserGet(ctx, testUserName)
+ require.NoError(t, err)
+ requireUserRolesEqual(t, testUser, resp.Roles)
+
+ // test-user can get the information of test-user itself
+ resp, err = testUserAuthClient.UserGet(ctx, testUserName)
+ require.NoError(t, err)
+ requireUserRolesEqual(t, testUser, resp.Roles)
+ // test-user cannot get the information of root
+ _, err = testUserAuthClient.UserGet(ctx, rootUserName)
+ require.ErrorContains(t, err, PermissionDenied)
+ })
+}
+
+func TestAuthRoleList(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+
+ resp, err := rootAuthClient.RoleList(ctx)
+ require.NoError(t, err)
+ requireUserRolesEqual(t, testUser, resp.Roles)
+ })
+}
+
+func TestAuthJWTExpire(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}))
+ // wait an expiration of my JWT token
+ <-time.After(3 * tokenTTL)
+
+ // e2e test will generate a new token while
+ // integration test that re-uses the same etcd client will refresh the token on server failure.
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}))
+ })
+}
+
+// TestAuthRevisionConsistency ensures auth revision is the same after member restarts
+func TestAuthRevisionConsistency(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth")
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+
+ // add user
+ _, err := rootAuthClient.UserAdd(ctx, testUserName, testPassword, config.UserAddOptions{})
+ require.NoError(t, err)
+ // delete the same user
+ _, err = rootAuthClient.UserDelete(ctx, testUserName)
+ require.NoError(t, err)
+
+ // get node0 auth revision
+ aresp, err := rootAuthClient.AuthStatus(ctx)
+ require.NoError(t, err)
+ oldAuthRevision := aresp.AuthRevision
+
+ // restart the node
+ clus.Members()[0].Stop()
+ require.NoError(t, clus.Members()[0].Start(ctx))
+
+ aresp2, err := rootAuthClient.AuthStatus(ctx)
+ require.NoError(t, err)
+ newAuthRevision := aresp2.AuthRevision
+
+ require.Equal(t, oldAuthRevision, newAuthRevision)
+ })
+}
+
+// TestAuthTestCacheReload ensures permissions are persisted and will be reloaded after member restarts
+func TestAuthTestCacheReload(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ // create foo since that is within the permission set
+ // expectation is to succeed
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}))
+
+ // restart the node
+ clus.Members()[0].Stop()
+ require.NoError(t, clus.Members()[0].Start(ctx))
+
+ // nothing has changed, but it fails without refreshing cache after restart
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar2", config.PutOptions{}))
+ })
+}
+
+// TestAuthLeaseTimeToLive gated lease time to live with RBAC control
+func TestAuthLeaseTimeToLive(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken}))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth")
+ testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword)))
+
+ gresp, err := testUserAuthClient.Grant(ctx, 10)
+ require.NoError(t, err)
+ leaseID := gresp.ID
+
+ require.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{LeaseID: leaseID}))
+ _, err = testUserAuthClient.TimeToLive(ctx, leaseID, config.LeaseOption{WithAttachedKeys: true})
+ require.NoError(t, err)
+
+ rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword)))
+ require.NoError(t, rootAuthClient.Put(ctx, "bar", "foo", config.PutOptions{LeaseID: leaseID}))
+
+ // the lease is attached to bar, which test-user cannot access
+ _, err = testUserAuthClient.TimeToLive(ctx, leaseID, config.LeaseOption{WithAttachedKeys: true})
+ require.Errorf(t, err, "test-user must not be able to access to the lease, because it's attached to the key bar")
+
+ // without --keys, access should be allowed
+ _, err = testUserAuthClient.TimeToLive(ctx, leaseID, config.LeaseOption{WithAttachedKeys: false})
+ require.NoError(t, err)
+ })
+}
+
+func mustAbsPath(path string) string {
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ panic(err)
+ }
+ return abs
+}
diff --git a/tests/common/auth_util.go b/tests/common/auth_util.go
new file mode 100644
index 00000000000..b157ef4fc83
--- /dev/null
+++ b/tests/common/auth_util.go
@@ -0,0 +1,120 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/interfaces"
+)
+
+const (
+ rootUserName = "root"
+ rootRoleName = "root"
+ rootPassword = "rootPassword"
+ testUserName = "test-user"
+ testRoleName = "test-role"
+ testPassword = "pass"
+)
+
+var (
+ rootUser = authUser{user: rootUserName, pass: rootPassword, role: rootRoleName}
+ testUser = authUser{user: testUserName, pass: testPassword, role: testRoleName}
+
+ testRole = authRole{
+ role: testRoleName,
+ permission: clientv3.PermissionType(clientv3.PermReadWrite),
+ key: "foo",
+ keyEnd: "",
+ }
+)
+
+type authRole struct {
+ role string
+ permission clientv3.PermissionType
+ key string
+ keyEnd string
+}
+
+type authUser struct {
+ user string
+ pass string
+ role string
+}
+
+func createRoles(c interfaces.Client, roles []authRole) error {
+ for _, r := range roles {
+ // add role
+ if _, err := c.RoleAdd(context.TODO(), r.role); err != nil {
+ return fmt.Errorf("RoleAdd failed: %w", err)
+ }
+
+ // grant permission to role
+ if _, err := c.RoleGrantPermission(context.TODO(), r.role, r.key, r.keyEnd, r.permission); err != nil {
+ return fmt.Errorf("RoleGrantPermission failed: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func createUsers(c interfaces.Client, users []authUser) error {
+ for _, u := range users {
+ // add user
+ if _, err := c.UserAdd(context.TODO(), u.user, u.pass, config.UserAddOptions{}); err != nil {
+ return fmt.Errorf("UserAdd failed: %w", err)
+ }
+
+ // grant role to user
+ if _, err := c.UserGrantRole(context.TODO(), u.user, u.role); err != nil {
+ return fmt.Errorf("UserGrantRole failed: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func setupAuth(c interfaces.Client, roles []authRole, users []authUser) error {
+ // create roles
+ if err := createRoles(c, roles); err != nil {
+ return err
+ }
+
+ if err := createUsers(c, users); err != nil {
+ return err
+ }
+
+ // enable auth
+ return c.AuthEnable(context.TODO())
+}
+
+func requireRolePermissionEqual(t *testing.T, expectRole authRole, actual []*authpb.Permission) {
+ require.Len(t, actual, 1)
+ require.Equal(t, expectRole.permission, clientv3.PermissionType(actual[0].PermType))
+ require.Equal(t, expectRole.key, string(actual[0].Key))
+ require.Equal(t, expectRole.keyEnd, string(actual[0].RangeEnd))
+}
+
+func requireUserRolesEqual(t *testing.T, expectUser authUser, actual []string) {
+ require.Len(t, actual, 1)
+ require.Equal(t, expectUser.role, actual[0])
+}
diff --git a/tests/common/compact_test.go b/tests/common/compact_test.go
new file mode 100644
index 00000000000..a1ded59b0b2
--- /dev/null
+++ b/tests/common/compact_test.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestCompact(t *testing.T) {
+ testRunner.BeforeTest(t)
+ tcs := []struct {
+ name string
+ options config.CompactOption
+ }{
+ {
+ name: "NoPhysical",
+ options: config.CompactOption{Physical: false, Timeout: 10 * time.Second},
+ },
+ {
+ name: "Physical",
+ options: config.CompactOption{Physical: true, Timeout: 10 * time.Second},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t)
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ kvs := []testutils.KV{{Key: "key", Val: "val1"}, {Key: "key", Val: "val2"}, {Key: "key", Val: "val3"}}
+ for i := range kvs {
+ err := cc.Put(ctx, kvs[i].Key, kvs[i].Val, config.PutOptions{})
+ require.NoErrorf(t, err, "compactTest #%d: put kv error", i)
+ }
+ get, err := cc.Get(ctx, "key", config.GetOptions{Revision: 3})
+ require.NoErrorf(t, err, "compactTest: Get kv by revision error")
+
+ getkvs := testutils.KeyValuesFromGetResponse(get)
+ assert.Equal(t, kvs[1:2], getkvs)
+
+ _, err = cc.Compact(ctx, 4, tc.options)
+ require.NoErrorf(t, err, "compactTest: Compact error")
+
+ _, err = cc.Get(ctx, "key", config.GetOptions{Revision: 3})
+ if err != nil {
+ if !strings.Contains(err.Error(), "required revision has been compacted") {
+ t.Fatalf("compactTest: Get compact key error (%v)", err)
+ }
+ } else {
+ t.Fatalf("expected '...has been compacted' error, got ")
+ }
+
+ _, err = cc.Compact(ctx, 2, tc.options)
+ if err != nil {
+ if !strings.Contains(err.Error(), "required revision has been compacted") {
+ t.Fatal(err)
+ }
+ } else {
+ t.Fatalf("expected '...has been compacted' error, got ")
+ }
+ })
+ })
+ }
+}
diff --git a/tests/common/defrag_test.go b/tests/common/defrag_test.go
new file mode 100644
index 00000000000..4509ad51d97
--- /dev/null
+++ b/tests/common/defrag_test.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestDefragOnline(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ options := config.DefragOption{Timeout: 10 * time.Second}
+ clus := testRunner.NewCluster(ctx, t)
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ defer clus.Close()
+ kvs := []testutils.KV{{Key: "key", Val: "val1"}, {Key: "key", Val: "val2"}, {Key: "key", Val: "val3"}}
+ for i := range kvs {
+ if err := cc.Put(ctx, kvs[i].Key, kvs[i].Val, config.PutOptions{}); err != nil {
+ t.Fatalf("compactTest #%d: put kv error (%v)", i, err)
+ }
+ }
+ _, err := cc.Compact(ctx, 4, config.CompactOption{Physical: true, Timeout: 10 * time.Second})
+ if err != nil {
+ t.Fatalf("defrag_test: compact with revision error (%v)", err)
+ }
+
+ if err = cc.Defragment(ctx, options); err != nil {
+ t.Fatalf("defrag_test: defrag error (%v)", err)
+ }
+ })
+}
diff --git a/tests/common/e2e_test.go b/tests/common/e2e_test.go
new file mode 100644
index 00000000000..11c4f94a335
--- /dev/null
+++ b/tests/common/e2e_test.go
@@ -0,0 +1,83 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build e2e
+
+package common
+
+import (
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/tests/v3/framework"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func init() {
+ testRunner = framework.E2eTestRunner
+ clusterTestCases = e2eClusterTestCases
+}
+
+func e2eClusterTestCases() []testCase {
+ tcs := []testCase{
+ {
+ name: "NoTLS",
+ config: config.ClusterConfig{ClusterSize: 1},
+ },
+ {
+ name: "PeerTLS",
+ config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.ManualTLS},
+ },
+ {
+ name: "PeerAutoTLS",
+ config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.AutoTLS},
+ },
+ {
+ name: "ClientTLS",
+ config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.ManualTLS},
+ },
+ {
+ name: "ClientAutoTLS",
+ config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.AutoTLS},
+ },
+ }
+
+ if fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ tcs = append(tcs, testCase{
+ name: "MinorityLastVersion",
+ config: config.ClusterConfig{
+ ClusterSize: 3,
+ ClusterContext: &e2e.ClusterContext{
+ Version: e2e.MinorityLastVersion,
+ },
+ },
+ }, testCase{
+ name: "QuorumLastVersion",
+ config: config.ClusterConfig{
+ ClusterSize: 3,
+ ClusterContext: &e2e.ClusterContext{
+ Version: e2e.QuorumLastVersion,
+ },
+ },
+ })
+ }
+ return tcs
+}
+
+func WithAuth(userName, password string) config.ClientOption {
+ return e2e.WithAuth(userName, password)
+}
+
+func WithEndpoints(endpoints []string) config.ClientOption {
+ return e2e.WithEndpoints(endpoints)
+}
diff --git a/tests/common/endpoint_test.go b/tests/common/endpoint_test.go
new file mode 100644
index 00000000000..eeef011f3cc
--- /dev/null
+++ b/tests/common/endpoint_test.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestEndpointStatus(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t)
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ _, err := cc.Status(ctx)
+ if err != nil {
+ t.Fatalf("get endpoint status error: %v", err)
+ }
+ })
+}
+
+func TestEndpointHashKV(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t)
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ t.Log("Add some entries")
+ for i := 0; i < 10; i++ {
+ key := fmt.Sprintf("key-%d", i)
+ value := fmt.Sprintf("value-%d", i)
+ if err := cc.Put(ctx, key, value, config.PutOptions{}); err != nil {
+ t.Fatalf("count not put key %q, err: %s", key, err)
+ }
+ }
+
+ t.Log("Check all members' Hash and HashRevision")
+ require.Eventually(t, func() bool {
+ resp, err := cc.HashKV(ctx, 0)
+ require.NoErrorf(t, err, "failed to get endpoint hashkv")
+
+ require.Len(t, resp, 3)
+ if resp[0].HashRevision == resp[1].HashRevision && resp[0].HashRevision == resp[2].HashRevision {
+ require.Equal(t, resp[0].Hash, resp[1].Hash)
+ require.Equal(t, resp[0].Hash, resp[2].Hash)
+ return true
+ }
+ t.Logf("HashRevisions are not equal: [%d, %d, %d], retry...", resp[0].HashRevision, resp[1].HashRevision, resp[2].HashRevision)
+ return false
+ }, 5*time.Second, 200*time.Millisecond)
+}
+
+func TestEndpointHealth(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t)
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ if err := cc.Health(ctx); err != nil {
+ t.Fatalf("get endpoint health error: %v", err)
+ }
+ })
+}
diff --git a/tests/common/integration_test.go b/tests/common/integration_test.go
new file mode 100644
index 00000000000..c4cabeeb1f9
--- /dev/null
+++ b/tests/common/integration_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build integration
+
+package common
+
+import (
+ "go.etcd.io/etcd/tests/v3/framework"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func init() {
+ testRunner = framework.IntegrationTestRunner
+ clusterTestCases = integrationClusterTestCases
+}
+
+func integrationClusterTestCases() []testCase {
+ return []testCase{
+ {
+ name: "NoTLS",
+ config: config.ClusterConfig{ClusterSize: 1},
+ },
+ {
+ name: "PeerTLS",
+ config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.ManualTLS},
+ },
+ {
+ name: "PeerAutoTLS",
+ config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.AutoTLS},
+ },
+ {
+ name: "ClientTLS",
+ config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.ManualTLS},
+ },
+ {
+ name: "ClientAutoTLS",
+ config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.AutoTLS},
+ },
+ }
+}
+
+func WithAuth(userName, password string) config.ClientOption {
+ return integration.WithAuth(userName, password)
+}
+
+func WithEndpoints(endpoints []string) config.ClientOption {
+ return integration.WithEndpoints(endpoints)
+}
diff --git a/tests/common/kv_test.go b/tests/common/kv_test.go
new file mode 100644
index 00000000000..988a15f17cd
--- /dev/null
+++ b/tests/common/kv_test.go
@@ -0,0 +1,238 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestKVPut(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ key, value := "foo", "bar"
+
+ if err := cc.Put(ctx, key, value, config.PutOptions{}); err != nil {
+ t.Fatalf("count not put key %q, err: %s", key, err)
+ }
+ resp, err := cc.Get(ctx, key, config.GetOptions{})
+ if err != nil {
+ t.Fatalf("count not get key %q, err: %s", key, err)
+ }
+ if len(resp.Kvs) != 1 {
+ t.Errorf("Unexpected length of response, got %d", len(resp.Kvs))
+ }
+ if string(resp.Kvs[0].Key) != key {
+ t.Errorf("Unexpected key, want %q, got %q", key, resp.Kvs[0].Key)
+ }
+ if string(resp.Kvs[0].Value) != value {
+ t.Errorf("Unexpected value, want %q, got %q", value, resp.Kvs[0].Value)
+ }
+ })
+ })
+ }
+}
+
+func TestKVGet(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ var (
+ kvs = []string{"a", "b", "c", "c", "c", "foo", "foo/abc", "fop"}
+ wantKvs = []string{"a", "b", "c", "foo", "foo/abc", "fop"}
+ kvsByVersion = []string{"a", "b", "foo", "foo/abc", "fop", "c"}
+ reversedKvs = []string{"fop", "foo/abc", "foo", "c", "b", "a"}
+ )
+
+ for i := range kvs {
+ if err := cc.Put(ctx, kvs[i], "bar", config.PutOptions{}); err != nil {
+ t.Fatalf("count not put key %q, err: %s", kvs[i], err)
+ }
+ }
+ tests := []struct {
+ begin string
+ end string
+ options config.GetOptions
+
+ wkv []string
+ }{
+ {begin: "a", wkv: wantKvs[:1]},
+ {begin: "a", options: config.GetOptions{Serializable: true}, wkv: wantKvs[:1]},
+ {begin: "a", options: config.GetOptions{End: "c"}, wkv: wantKvs[:2]},
+ {begin: "", options: config.GetOptions{Prefix: true}, wkv: wantKvs},
+ {begin: "", options: config.GetOptions{FromKey: true}, wkv: wantKvs},
+ {begin: "a", options: config.GetOptions{End: "x"}, wkv: wantKvs},
+ {begin: "", options: config.GetOptions{Prefix: true, Revision: 4}, wkv: kvs[:3]},
+ {begin: "a", options: config.GetOptions{CountOnly: true}, wkv: nil},
+ {begin: "foo", options: config.GetOptions{Prefix: true}, wkv: []string{"foo", "foo/abc"}},
+ {begin: "foo", options: config.GetOptions{FromKey: true}, wkv: []string{"foo", "foo/abc", "fop"}},
+ {begin: "", options: config.GetOptions{Prefix: true, Limit: 2}, wkv: wantKvs[:2]},
+ {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortAscend, SortBy: clientv3.SortByModRevision}, wkv: wantKvs},
+ {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortAscend, SortBy: clientv3.SortByVersion}, wkv: kvsByVersion},
+ {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortNone, SortBy: clientv3.SortByCreateRevision}, wkv: wantKvs},
+ {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortDescend, SortBy: clientv3.SortByCreateRevision}, wkv: reversedKvs},
+ {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortDescend, SortBy: clientv3.SortByKey}, wkv: reversedKvs},
+ }
+ for _, tt := range tests {
+ resp, err := cc.Get(ctx, tt.begin, tt.options)
+ if err != nil {
+ t.Fatalf("count not get key %q, err: %s", tt.begin, err)
+ }
+ kvs := testutils.KeysFromGetResponse(resp)
+ assert.Equal(t, tt.wkv, kvs)
+ }
+ })
+ })
+ }
+}
+
+func TestKVDelete(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ kvs := []string{"a", "b", "c", "c/abc", "d"}
+ tests := []struct {
+ deleteKey string
+ options config.DeleteOptions
+
+ wantDeleted int
+ wantKeys []string
+ }{
+ { // delete all keys
+ deleteKey: "",
+ options: config.DeleteOptions{Prefix: true},
+ wantDeleted: 5,
+ },
+ { // delete all keys
+ deleteKey: "",
+ options: config.DeleteOptions{FromKey: true},
+ wantDeleted: 5,
+ },
+ {
+ deleteKey: "a",
+ options: config.DeleteOptions{End: "c"},
+ wantDeleted: 2,
+ wantKeys: []string{"c", "c/abc", "d"},
+ },
+ {
+ deleteKey: "c",
+ wantDeleted: 1,
+ wantKeys: []string{"a", "b", "c/abc", "d"},
+ },
+ {
+ deleteKey: "c",
+ options: config.DeleteOptions{Prefix: true},
+ wantDeleted: 2,
+ wantKeys: []string{"a", "b", "d"},
+ },
+ {
+ deleteKey: "c",
+ options: config.DeleteOptions{FromKey: true},
+ wantDeleted: 3,
+ wantKeys: []string{"a", "b"},
+ },
+ {
+ deleteKey: "e",
+ wantDeleted: 0,
+ wantKeys: kvs,
+ },
+ }
+ for _, tt := range tests {
+ for i := range kvs {
+ err := cc.Put(ctx, kvs[i], "bar", config.PutOptions{})
+ require.NoErrorf(t, err, "count not put key %q", kvs[i])
+ }
+ del, err := cc.Delete(ctx, tt.deleteKey, tt.options)
+ require.NoErrorf(t, err, "count not get key %q, err", tt.deleteKey)
+ assert.Equal(t, tt.wantDeleted, int(del.Deleted))
+ get, err := cc.Get(ctx, "", config.GetOptions{Prefix: true})
+ require.NoErrorf(t, err, "count not get key")
+ kvs := testutils.KeysFromGetResponse(get)
+ assert.Equal(t, tt.wantKeys, kvs)
+ }
+ })
+ })
+ }
+}
+
+func TestKVGetNoQuorum(t *testing.T) {
+ testRunner.BeforeTest(t)
+ tcs := []struct {
+ name string
+ options config.GetOptions
+
+ wantError bool
+ }{
+ {
+ name: "Serializable",
+ options: config.GetOptions{Serializable: true},
+ },
+ {
+ name: "Linearizable",
+ options: config.GetOptions{Serializable: false, Timeout: time.Second},
+ wantError: true,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t)
+ defer clus.Close()
+
+ clus.Members()[0].Stop()
+ clus.Members()[1].Stop()
+
+ cc := clus.Members()[2].Client()
+ testutils.ExecuteUntil(ctx, t, func() {
+ key := "foo"
+ _, err := cc.Get(ctx, key, tc.options)
+ gotError := err != nil
+ if gotError != tc.wantError {
+ t.Fatalf("Unexpected result, wantError: %v, gotErr: %v, err: %s", tc.wantError, gotError, err)
+ }
+ })
+ })
+ }
+}
diff --git a/tests/common/lease_test.go b/tests/common/lease_test.go
new file mode 100644
index 00000000000..afb27c330f9
--- /dev/null
+++ b/tests/common/lease_test.go
@@ -0,0 +1,222 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestLeaseGrantTimeToLive(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ ttl := int64(10)
+ leaseResp, err := cc.Grant(ctx, ttl)
+ require.NoError(t, err)
+
+ ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
+ require.NoError(t, err)
+ require.Equal(t, ttl, ttlResp.GrantedTTL)
+ })
+ })
+ }
+}
+
+func TestLeaseGrantAndList(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ nestedCases := []struct {
+ name string
+ leaseCount int
+ }{
+ {
+ name: "no_leases",
+ leaseCount: 0,
+ },
+ {
+ name: "one_lease",
+ leaseCount: 1,
+ },
+ {
+ name: "many_leases",
+ leaseCount: 3,
+ },
+ }
+
+ for _, nc := range nestedCases {
+ t.Run(tc.name+"/"+nc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ t.Logf("Creating cluster...")
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ t.Logf("Created cluster and client")
+ testutils.ExecuteUntil(ctx, t, func() {
+ var createdLeases []clientv3.LeaseID
+ for i := 0; i < nc.leaseCount; i++ {
+ leaseResp, err := cc.Grant(ctx, 10)
+ t.Logf("Grant returned: resp:%s err:%v", leaseResp.String(), err)
+ require.NoError(t, err)
+ createdLeases = append(createdLeases, leaseResp.ID)
+ }
+
+ // Because we're not guaranteed to talk to the same member, wait for
+ // listing to eventually return true, either by the result propagating
+ // or by hitting an up to date member.
+ var leases []clientv3.LeaseStatus
+ require.Eventually(t, func() bool {
+ resp, err := cc.Leases(ctx)
+ if err != nil {
+ return false
+ }
+ leases = resp.Leases
+ // TODO: update this to use last Revision from leaseResp
+ // after https://github.com/etcd-io/etcd/issues/13989 is fixed
+ return len(leases) == len(createdLeases)
+ }, 2*time.Second, 10*time.Millisecond)
+
+ returnedLeases := make([]clientv3.LeaseID, 0, nc.leaseCount)
+ for _, status := range leases {
+ returnedLeases = append(returnedLeases, status.ID)
+ }
+
+ require.ElementsMatch(t, createdLeases, returnedLeases)
+ })
+ })
+ }
+ }
+}
+
+func TestLeaseGrantTimeToLiveExpired(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ leaseResp, err := cc.Grant(ctx, 2)
+ require.NoError(t, err)
+
+ err = cc.Put(ctx, "foo", "bar", config.PutOptions{LeaseID: leaseResp.ID})
+ require.NoError(t, err)
+
+ getResp, err := cc.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ require.Equal(t, int64(1), getResp.Count)
+
+ time.Sleep(3 * time.Second)
+
+ ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
+ require.NoError(t, err)
+ require.Equal(t, int64(-1), ttlResp.TTL)
+
+ getResp, err = cc.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ // Value should expire with the lease
+ require.Equal(t, int64(0), getResp.Count)
+ })
+ })
+ }
+}
+
+func TestLeaseGrantKeepAliveOnce(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ leaseResp, err := cc.Grant(ctx, 2)
+ require.NoError(t, err)
+
+ _, err = cc.KeepAliveOnce(ctx, leaseResp.ID)
+ require.NoError(t, err)
+
+ time.Sleep(2 * time.Second) // Wait for the original lease to expire
+
+ ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
+ require.NoError(t, err)
+ // We still have a lease!
+ require.Greater(t, int64(2), ttlResp.TTL)
+ })
+ })
+ }
+}
+
+func TestLeaseGrantRevoke(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ leaseResp, err := cc.Grant(ctx, 20)
+ require.NoError(t, err)
+
+ err = cc.Put(ctx, "foo", "bar", config.PutOptions{LeaseID: leaseResp.ID})
+ require.NoError(t, err)
+
+ getResp, err := cc.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ require.Equal(t, int64(1), getResp.Count)
+
+ _, err = cc.Revoke(ctx, leaseResp.ID)
+ require.NoError(t, err)
+
+ ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
+ require.NoError(t, err)
+ require.Equal(t, int64(-1), ttlResp.TTL)
+
+ getResp, err = cc.Get(ctx, "foo", config.GetOptions{})
+ require.NoError(t, err)
+ // Value should expire with the lease
+ require.Equal(t, int64(0), getResp.Count)
+ })
+ })
+ }
+}
diff --git a/tests/common/main_test.go b/tests/common/main_test.go
new file mode 100644
index 00000000000..be5e5a17d4d
--- /dev/null
+++ b/tests/common/main_test.go
@@ -0,0 +1,36 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "testing"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+)
+
+var (
+ testRunner intf.TestRunner
+ clusterTestCases func() []testCase
+)
+
+func TestMain(m *testing.M) {
+ testRunner.TestMain(m)
+}
+
+type testCase struct {
+ name string
+ config config.ClusterConfig
+}
diff --git a/tests/common/maintenance_auth_test.go b/tests/common/maintenance_auth_test.go
new file mode 100644
index 00000000000..21a72580ede
--- /dev/null
+++ b/tests/common/maintenance_auth_test.go
@@ -0,0 +1,241 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+/*
+Test Defragment
+*/
+func TestDefragmentWithNoAuth(t *testing.T) {
+ testDefragmentWithAuth(t, false, true)
+}
+
+func TestDefragmentWithInvalidAuth(t *testing.T) {
+ testDefragmentWithAuth(t, true, true, WithAuth("invalid", "invalid"))
+}
+
+func TestDefragmentWithRootAuth(t *testing.T) {
+ testDefragmentWithAuth(t, false, false, WithAuth("root", "rootPass"))
+}
+
+func TestDefragmentWithUserAuth(t *testing.T) {
+ testDefragmentWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
+}
+
+func testDefragmentWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
+ testMaintenanceOperationWithAuth(t, expectConnectionError, expectOperationError, func(ctx context.Context, cc intf.Client) error {
+ return cc.Defragment(ctx, config.DefragOption{Timeout: 10 * time.Second})
+ }, opts...)
+}
+
+/*
+Test Downgrade
+*/
+func TestDowngradeWithNoAuth(t *testing.T) {
+ testDowngradeWithAuth(t, false, true)
+}
+
+func TestDowngradeWithInvalidAuth(t *testing.T) {
+ testDowngradeWithAuth(t, true, true, WithAuth("invalid", "invalid"))
+}
+
+func TestDowngradeWithRootAuth(t *testing.T) {
+ testDowngradeWithAuth(t, false, false, WithAuth("root", "rootPass"))
+}
+
+func TestDowngradeWithUserAuth(t *testing.T) {
+ testDowngradeWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
+}
+
+func testDowngradeWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
+ // TODO(ahrtr): finish this after we added interface methods `Downgrade` into `Client`
+ t.Skip()
+}
+
+/*
+Test HashKV
+*/
+func TestHashKVWithNoAuth(t *testing.T) {
+ testHashKVWithAuth(t, false, true)
+}
+
+func TestHashKVWithInvalidAuth(t *testing.T) {
+ testHashKVWithAuth(t, true, true, WithAuth("invalid", "invalid"))
+}
+
+func TestHashKVWithRootAuth(t *testing.T) {
+ testHashKVWithAuth(t, false, false, WithAuth("root", "rootPass"))
+}
+
+func TestHashKVWithUserAuth(t *testing.T) {
+ testHashKVWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
+}
+
+func testHashKVWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
+ testMaintenanceOperationWithAuth(t, expectConnectionError, expectOperationError, func(ctx context.Context, cc intf.Client) error {
+ _, err := cc.HashKV(ctx, 0)
+ return err
+ }, opts...)
+}
+
+/*
+Test MoveLeader
+*/
+func TestMoveLeaderWithNoAuth(t *testing.T) {
+ testMoveLeaderWithAuth(t, false, true)
+}
+
+func TestMoveLeaderWithInvalidAuth(t *testing.T) {
+ testMoveLeaderWithAuth(t, true, true, WithAuth("invalid", "invalid"))
+}
+
+func TestMoveLeaderWithRootAuth(t *testing.T) {
+ testMoveLeaderWithAuth(t, false, false, WithAuth("root", "rootPass"))
+}
+
+func TestMoveLeaderWithUserAuth(t *testing.T) {
+ testMoveLeaderWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
+}
+
+func testMoveLeaderWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
+ // TODO(ahrtr): finish this after we added interface methods `MoveLeader` into `Client`
+ t.Skip()
+}
+
+/*
+Test Snapshot
+*/
+func TestSnapshotWithNoAuth(t *testing.T) {
+ testSnapshotWithAuth(t, false, true)
+}
+
+func TestSnapshotWithInvalidAuth(t *testing.T) {
+ testSnapshotWithAuth(t, true, true, WithAuth("invalid", "invalid"))
+}
+
+func TestSnapshotWithRootAuth(t *testing.T) {
+ testSnapshotWithAuth(t, false, false, WithAuth("root", "rootPass"))
+}
+
+func TestSnapshotWithUserAuth(t *testing.T) {
+ testSnapshotWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
+}
+
+func testSnapshotWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
+ // TODO(ahrtr): finish this after we added interface methods `Snapshot` into `Client`
+ t.Skip()
+}
+
+/*
+Test Status
+*/
+func TestStatusWithNoAuth(t *testing.T) {
+ testStatusWithAuth(t, false, true)
+}
+
+func TestStatusWithInvalidAuth(t *testing.T) {
+ testStatusWithAuth(t, true, true, WithAuth("invalid", "invalid"))
+}
+
+func TestStatusWithRootAuth(t *testing.T) {
+ testStatusWithAuth(t, false, false, WithAuth("root", "rootPass"))
+}
+
+func TestStatusWithUserAuth(t *testing.T) {
+ testStatusWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
+}
+
+func testStatusWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
+ testMaintenanceOperationWithAuth(t, expectConnectionError, expectOperationError, func(ctx context.Context, cc intf.Client) error {
+ _, err := cc.Status(ctx)
+ return err
+ }, opts...)
+}
+
+func setupAuthForMaintenanceTest(c intf.Client) error {
+ roles := []authRole{
+ {
+ role: "role0",
+ permission: clientv3.PermissionType(clientv3.PermReadWrite),
+ key: "foo",
+ },
+ }
+
+ users := []authUser{
+ {
+ user: "root",
+ pass: "rootPass",
+ role: "root",
+ },
+ {
+ user: "user0",
+ pass: "user0Pass",
+ role: "role0",
+ },
+ }
+
+ return setupAuth(c, roles, users)
+}
+
+func testMaintenanceOperationWithAuth(t *testing.T, expectConnectError, expectOperationError bool, f func(context.Context, intf.Client) error, opts ...config.ClientOption) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ clus := testRunner.NewCluster(ctx, t)
+ defer clus.Close()
+
+ cc := testutils.MustClient(clus.Client())
+ err := setupAuthForMaintenanceTest(cc)
+ require.NoError(t, err)
+
+ ccWithAuth, err := clus.Client(opts...)
+ if expectConnectError {
+ require.Errorf(t, err, "%s: expected connection error, but got successful response", t.Name())
+ t.Logf("%s: connection error: %v", t.Name(), err)
+ return
+ }
+ if err != nil {
+ require.NoErrorf(t, err, "%s: unexpected connection error", t.Name())
+ return
+ }
+
+ // sleep 1 second to wait for etcd cluster to finish the authentication process.
+ // TODO(ahrtr): find a better way to do it.
+ time.Sleep(1 * time.Second)
+ testutils.ExecuteUntil(ctx, t, func() {
+ err := f(ctx, ccWithAuth)
+
+ if expectOperationError {
+ require.Errorf(t, err, "%s: expected error, but got successful response", t.Name())
+ t.Logf("%s: operation error: %v", t.Name(), err)
+ return
+ }
+
+ require.NoErrorf(t, err, "%s: unexpected operation error", t.Name())
+ })
+}
diff --git a/tests/common/member_test.go b/tests/common/member_test.go
new file mode 100644
index 00000000000..1efb95039a6
--- /dev/null
+++ b/tests/common/member_test.go
@@ -0,0 +1,287 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestMemberList(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ resp, err := cc.MemberList(ctx, false)
+ require.NoErrorf(t, err, "could not get member list")
+ expectNum := len(clus.Members())
+ gotNum := len(resp.Members)
+ if expectNum != gotNum {
+ t.Fatalf("number of members not equal, expect: %d, got: %d", expectNum, gotNum)
+ }
+ assert.Eventually(t, func() bool {
+ resp, err := cc.MemberList(ctx, false)
+ if err != nil {
+ t.Logf("Failed to get member list, err: %v", err)
+ return false
+ }
+ for _, m := range resp.Members {
+ if len(m.ClientURLs) == 0 {
+ t.Logf("member is not started, memberID:%d, memberName:%s", m.ID, m.Name)
+ return false
+ }
+ }
+ return true
+ }, time.Second*5, time.Millisecond*100)
+ })
+ })
+ }
+}
+
+func TestMemberAdd(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ learnerTcs := []struct {
+ name string
+ learner bool
+ }{
+ {
+ name: "NotLearner",
+ learner: false,
+ },
+ {
+ name: "Learner",
+ learner: true,
+ },
+ }
+
+ quorumTcs := []struct {
+ name string
+ strictReconfigCheck bool
+ waitForQuorum bool
+ expectError bool
+ }{
+ {
+ name: "StrictReconfigCheck/WaitForQuorum",
+ strictReconfigCheck: true,
+ waitForQuorum: true,
+ },
+ {
+ name: "StrictReconfigCheck/NoWaitForQuorum",
+ strictReconfigCheck: true,
+ expectError: true,
+ },
+ {
+ name: "DisableStrictReconfigCheck/WaitForQuorum",
+ waitForQuorum: true,
+ },
+ {
+ name: "DisableStrictReconfigCheck/NoWaitForQuorum",
+ },
+ }
+
+ for _, learnerTc := range learnerTcs {
+ for _, quorumTc := range quorumTcs {
+ for _, clusterTc := range clusterTestCases() {
+ t.Run(learnerTc.name+"/"+quorumTc.name+"/"+clusterTc.name, func(t *testing.T) {
+ ctxTimeout := 10 * time.Second
+ if quorumTc.waitForQuorum {
+ ctxTimeout += etcdserver.HealthInterval
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), ctxTimeout)
+ defer cancel()
+ c := clusterTc.config
+ c.StrictReconfigCheck = quorumTc.strictReconfigCheck
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(c))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ var addResp *clientv3.MemberAddResponse
+ var err error
+ if quorumTc.waitForQuorum {
+ time.Sleep(etcdserver.HealthInterval)
+ }
+ if learnerTc.learner {
+ addResp, err = cc.MemberAddAsLearner(ctx, "newmember", []string{"http://localhost:123"})
+ } else {
+ addResp, err = cc.MemberAdd(ctx, "newmember", []string{"http://localhost:123"})
+ }
+ if quorumTc.expectError && c.ClusterSize > 1 {
+ // calling MemberAdd/MemberAddAsLearner on a single node will not fail,
+ // whether strictReconfigCheck or whether waitForQuorum
+ require.ErrorContains(t, err, "etcdserver: unhealthy cluster")
+ } else {
+ require.NoErrorf(t, err, "MemberAdd failed")
+ if addResp.Member == nil {
+ t.Fatalf("MemberAdd failed, expected: member != nil, got: member == nil")
+ }
+ if addResp.Member.ID == 0 {
+ t.Fatalf("MemberAdd failed, expected: ID != 0, got: ID == 0")
+ }
+ if len(addResp.Member.PeerURLs) == 0 {
+ t.Fatalf("MemberAdd failed, expected: non-empty PeerURLs, got: empty PeerURLs")
+ }
+ }
+ })
+ })
+ }
+ }
+ }
+}
+
+func TestMemberRemove(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ tcs := []struct {
+ name string
+ strictReconfigCheck bool
+ waitForQuorum bool
+ expectSingleNodeError bool
+ expectClusterError bool
+ }{
+ {
+ name: "StrictReconfigCheck/WaitForQuorum",
+ strictReconfigCheck: true,
+ waitForQuorum: true,
+ expectSingleNodeError: true,
+ },
+ {
+ name: "StrictReconfigCheck/NoWaitForQuorum",
+ strictReconfigCheck: true,
+ expectSingleNodeError: true,
+ expectClusterError: true,
+ },
+ {
+ name: "DisableStrictReconfigCheck/WaitForQuorum",
+ waitForQuorum: true,
+ },
+ {
+ name: "DisableStrictReconfigCheck/NoWaitForQuorum",
+ },
+ }
+
+ for _, quorumTc := range tcs {
+ for _, clusterTc := range clusterTestCases() {
+ if !quorumTc.strictReconfigCheck && clusterTc.config.ClusterSize == 1 {
+ // skip these test cases
+ // when strictReconfigCheck is disabled, calling MemberRemove will cause the single node to panic
+ continue
+ }
+ t.Run(quorumTc.name+"/"+clusterTc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 14*time.Second)
+ defer cancel()
+ c := clusterTc.config
+ c.StrictReconfigCheck = quorumTc.strictReconfigCheck
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(c))
+ defer clus.Close()
+ // client connects to a specific member which won't be removed from cluster
+ cc := clus.Members()[0].Client()
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ if quorumTc.waitForQuorum {
+ // wait for health interval + leader election
+ time.Sleep(etcdserver.HealthInterval + 2*time.Second)
+ }
+
+ memberID, clusterID := memberToRemove(ctx, t, cc, c.ClusterSize)
+ removeResp, err := cc.MemberRemove(ctx, memberID)
+
+ if c.ClusterSize == 1 && quorumTc.expectSingleNodeError {
+ require.ErrorContains(t, err, "etcdserver: re-configuration failed due to not enough started members")
+ return
+ }
+
+ if c.ClusterSize > 1 && quorumTc.expectClusterError {
+ require.ErrorContains(t, err, "etcdserver: unhealthy cluster")
+ return
+ }
+
+ require.NoErrorf(t, err, "MemberRemove failed")
+ t.Logf("removeResp.Members:%v", removeResp.Members)
+ if removeResp.Header.ClusterId != clusterID {
+ t.Fatalf("MemberRemove failed, expected ClusterID: %d, got: %d", clusterID, removeResp.Header.ClusterId)
+ }
+ if len(removeResp.Members) != c.ClusterSize-1 {
+ t.Fatalf("MemberRemove failed, expected length of members: %d, got: %d", c.ClusterSize-1, len(removeResp.Members))
+ }
+ for _, m := range removeResp.Members {
+ if m.ID == memberID {
+ t.Fatalf("MemberRemove failed, member(id=%d) is still in cluster", memberID)
+ }
+ }
+ })
+ })
+ }
+ }
+}
+
+// memberToRemove chooses a member to remove.
+// If clusterSize == 1, return the only member.
+// Otherwise, return a member that client has not connected to.
+// It ensures that `MemberRemove` function does not return an "etcdserver: server stopped" error.
+func memberToRemove(ctx context.Context, t *testing.T, client intf.Client, clusterSize int) (memberID uint64, clusterID uint64) {
+ listResp, err := client.MemberList(ctx, false)
+ require.NoError(t, err)
+
+ clusterID = listResp.Header.ClusterId
+ if clusterSize == 1 {
+ memberID = listResp.Members[0].ID
+ } else {
+ // get status of the specific member that client has connected to
+ statusResp, err := client.Status(ctx)
+ require.NoError(t, err)
+
+ // choose a member that client has not connected to
+ for _, m := range listResp.Members {
+ if m.ID != statusResp[0].Header.MemberId {
+ memberID = m.ID
+ break
+ }
+ }
+ if memberID == 0 {
+ t.Fatalf("memberToRemove failed. listResp:%v, statusResp:%v", listResp, statusResp)
+ }
+ }
+ return memberID, clusterID
+}
+
+func getMemberIDToEndpoints(ctx context.Context, t *testing.T, clus intf.Cluster) (memberIDToEndpoints map[uint64]string) {
+ memberIDToEndpoints = make(map[uint64]string, len(clus.Endpoints()))
+ for _, ep := range clus.Endpoints() {
+ cc := testutils.MustClient(clus.Client(WithEndpoints([]string{ep})))
+ gresp, err := cc.Get(ctx, "health", config.GetOptions{})
+ require.NoError(t, err)
+ memberIDToEndpoints[gresp.Header.MemberId] = ep
+ }
+ return memberIDToEndpoints
+}
diff --git a/tests/common/role_test.go b/tests/common/role_test.go
new file mode 100644
index 00000000000..e196902fd1b
--- /dev/null
+++ b/tests/common/role_test.go
@@ -0,0 +1,130 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestRoleAdd_Simple(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ _, err := cc.RoleAdd(ctx, "root")
+ require.NoErrorf(t, err, "want no error, but got")
+ })
+ })
+ }
+}
+
+func TestRoleAdd_Error(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ _, err := cc.RoleAdd(ctx, "test-role")
+ require.NoErrorf(t, err, "want no error, but got")
+ _, err = cc.RoleAdd(ctx, "test-role")
+ if err == nil || !strings.Contains(err.Error(), rpctypes.ErrRoleAlreadyExist.Error()) {
+ t.Fatalf("want (%v) error, but got (%v)", rpctypes.ErrRoleAlreadyExist, err)
+ }
+ _, err = cc.RoleAdd(ctx, "")
+ if err == nil || !strings.Contains(err.Error(), rpctypes.ErrRoleEmpty.Error()) {
+ t.Fatalf("want (%v) error, but got (%v)", rpctypes.ErrRoleEmpty, err)
+ }
+ })
+}
+
+func TestRootRole(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ _, err := cc.RoleAdd(ctx, "root")
+ require.NoErrorf(t, err, "want no error, but got")
+ resp, err := cc.RoleGet(ctx, "root")
+ require.NoErrorf(t, err, "want no error, but got")
+ t.Logf("get role resp %+v", resp)
+ // granting to root should be refused by server and a no-op
+ _, err = cc.RoleGrantPermission(ctx, "root", "foo", "", clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoErrorf(t, err, "want no error, but got")
+ resp2, err := cc.RoleGet(ctx, "root")
+ require.NoErrorf(t, err, "want no error, but got")
+ t.Logf("get role resp %+v", resp2)
+ })
+}
+
+func TestRoleGrantRevokePermission(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ _, err := cc.RoleAdd(ctx, "role1")
+ require.NoErrorf(t, err, "want no error, but got")
+ _, err = cc.RoleGrantPermission(ctx, "role1", "bar", "", clientv3.PermissionType(clientv3.PermRead))
+ require.NoErrorf(t, err, "want no error, but got")
+ _, err = cc.RoleGrantPermission(ctx, "role1", "bar", "", clientv3.PermissionType(clientv3.PermWrite))
+ require.NoErrorf(t, err, "want no error, but got")
+ _, err = cc.RoleGrantPermission(ctx, "role1", "bar", "foo", clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoErrorf(t, err, "want no error, but got")
+ _, err = cc.RoleRevokePermission(ctx, "role1", "foo", "")
+ if err == nil || !strings.Contains(err.Error(), rpctypes.ErrPermissionNotGranted.Error()) {
+ t.Fatalf("want error (%v), but got (%v)", rpctypes.ErrPermissionNotGranted, err)
+ }
+ _, err = cc.RoleRevokePermission(ctx, "role1", "bar", "foo")
+ require.NoErrorf(t, err, "want no error, but got")
+ })
+}
+
+func TestRoleDelete(t *testing.T) {
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ _, err := cc.RoleAdd(ctx, "role1")
+ require.NoErrorf(t, err, "want no error, but got")
+ _, err = cc.RoleDelete(ctx, "role1")
+ require.NoErrorf(t, err, "want no error, but got")
+ })
+}
diff --git a/tests/common/status_test.go b/tests/common/status_test.go
new file mode 100644
index 00000000000..b3e317530be
--- /dev/null
+++ b/tests/common/status_test.go
@@ -0,0 +1,58 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestStatus(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ rs, err := cc.Status(ctx)
+ require.NoErrorf(t, err, "could not get status")
+ if len(rs) != tc.config.ClusterSize {
+ t.Fatalf("wrong number of status responses. expected:%d, got:%d ", tc.config.ClusterSize, len(rs))
+ }
+ memberIDs := make(map[uint64]struct{})
+ for _, r := range rs {
+ if r == nil {
+ t.Fatalf("status response is nil")
+ }
+ memberIDs[r.Header.MemberId] = struct{}{}
+ }
+ if len(rs) != len(memberIDs) {
+ t.Fatalf("found duplicated members")
+ }
+ })
+ })
+ }
+}
diff --git a/tests/common/txn_test.go b/tests/common/txn_test.go
new file mode 100644
index 00000000000..975248fc80e
--- /dev/null
+++ b/tests/common/txn_test.go
@@ -0,0 +1,154 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+type txnReq struct {
+ compare []string
+ ifSuccess []string
+ ifFail []string
+ expectResults []string
+ expectError bool
+}
+
+func TestTxnSucc(t *testing.T) {
+ testRunner.BeforeTest(t)
+ reqs := []txnReq{
+ {
+ compare: []string{`value("key1") != "value2"`, `value("key2") != "value1"`},
+ ifSuccess: []string{"get key1", "get key2"},
+ expectResults: []string{"SUCCESS", "key1", "value1", "key2", "value2"},
+ },
+ {
+ compare: []string{`version("key1") = "1"`, `version("key2") = "1"`},
+ ifSuccess: []string{"get key1", "get key2", `put "key \"with\" space" "value \x23"`},
+ ifFail: []string{`put key1 "fail"`, `put key2 "fail"`},
+ expectResults: []string{"SUCCESS", "key1", "value1", "key2", "value2", "OK"},
+ },
+ {
+ compare: []string{`version("key \"with\" space") = "1"`},
+ ifSuccess: []string{`get "key \"with\" space"`},
+ expectResults: []string{"SUCCESS", `key "with" space`, "value \x23"},
+ },
+ }
+ for _, cfg := range clusterTestCases() {
+ t.Run(cfg.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(cfg.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ err := cc.Put(ctx, "key1", "value1", config.PutOptions{})
+ require.NoErrorf(t, err, "could not create key:%s, value:%s", "key1", "value1")
+ err = cc.Put(ctx, "key2", "value2", config.PutOptions{})
+ require.NoErrorf(t, err, "could not create key:%s, value:%s", "key2", "value2")
+ for _, req := range reqs {
+ resp, err := cc.Txn(ctx, req.compare, req.ifSuccess, req.ifFail, config.TxnOptions{
+ Interactive: true,
+ })
+ if err != nil {
+ t.Errorf("Txn returned error: %s", err)
+ }
+ assert.Equal(t, req.expectResults, getRespValues(resp))
+ }
+ })
+ })
+ }
+}
+
+func TestTxnFail(t *testing.T) {
+ testRunner.BeforeTest(t)
+ reqs := []txnReq{
+ {
+ compare: []string{`version("key") < "0"`},
+ ifSuccess: []string{`put key "success"`},
+ ifFail: []string{`put key "fail"`},
+ expectResults: []string{"FAILURE", "OK"},
+ },
+ {
+ compare: []string{`value("key1") != "value1"`},
+ ifSuccess: []string{`put key1 "success"`},
+ ifFail: []string{`put key1 "fail"`},
+ expectResults: []string{"FAILURE", "OK"},
+ },
+ }
+ for _, cfg := range clusterTestCases() {
+ t.Run(cfg.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(cfg.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ err := cc.Put(ctx, "key1", "value1", config.PutOptions{})
+ require.NoErrorf(t, err, "could not create key:%s, value:%s", "key1", "value1")
+ for _, req := range reqs {
+ resp, err := cc.Txn(ctx, req.compare, req.ifSuccess, req.ifFail, config.TxnOptions{
+ Interactive: true,
+ })
+ if err != nil {
+ t.Errorf("Txn returned error: %s", err)
+ }
+ assert.Equal(t, req.expectResults, getRespValues(resp))
+ }
+ })
+ })
+ }
+}
+
+func getRespValues(r *clientv3.TxnResponse) []string {
+ var ss []string
+ if r.Succeeded {
+ ss = append(ss, "SUCCESS")
+ } else {
+ ss = append(ss, "FAILURE")
+ }
+ for _, resp := range r.Responses {
+ switch v := resp.Response.(type) {
+ case *pb.ResponseOp_ResponseDeleteRange:
+ r := (clientv3.DeleteResponse)(*v.ResponseDeleteRange)
+ ss = append(ss, fmt.Sprintf("%d", r.Deleted))
+ case *pb.ResponseOp_ResponsePut:
+ r := (clientv3.PutResponse)(*v.ResponsePut)
+ ss = append(ss, "OK")
+ if r.PrevKv != nil {
+ ss = append(ss, string(r.PrevKv.Key), string(r.PrevKv.Value))
+ }
+ case *pb.ResponseOp_ResponseRange:
+ r := (clientv3.GetResponse)(*v.ResponseRange)
+ for _, kv := range r.Kvs {
+ ss = append(ss, string(kv.Key), string(kv.Value))
+ }
+ default:
+ ss = append(ss, fmt.Sprintf("\"Unknown\" : %q\n", fmt.Sprintf("%+v", v)))
+ }
+ }
+ return ss
+}
diff --git a/tests/common/unit_test.go b/tests/common/unit_test.go
new file mode 100644
index 00000000000..4b172e7a3cb
--- /dev/null
+++ b/tests/common/unit_test.go
@@ -0,0 +1,42 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !(e2e || integration)
+
+package common
+
+import (
+ "go.etcd.io/etcd/tests/v3/framework"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+)
+
+func init() {
+ testRunner = framework.UnitTestRunner
+ clusterTestCases = unitClusterTestCases
+}
+
+func unitClusterTestCases() []testCase {
+ return nil
+}
+
+// WithAuth is when a build tag (e.g. e2e or integration) isn't configured
+// in IDE, then IDE may complain "Unresolved reference 'WithAuth'".
+// So we need to define a default WithAuth to resolve such case.
+func WithAuth(userName, password string) config.ClientOption {
+ return func(any) {}
+}
+
+func WithEndpoints(endpoints []string) config.ClientOption {
+ return func(any) {}
+}
diff --git a/tests/common/user_test.go b/tests/common/user_test.go
new file mode 100644
index 00000000000..d36118f5d0e
--- /dev/null
+++ b/tests/common/user_test.go
@@ -0,0 +1,226 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestUserAdd_Simple(t *testing.T) {
+ testRunner.BeforeTest(t)
+ tcs := []struct {
+ name string
+ username string
+ password string
+ noPassword bool
+ expectedError string
+ }{
+ {
+ name: "empty_username_not_allowed",
+ username: "",
+ password: "foobar",
+ // Very Vague error expectation because the CLI and the API return very
+ // different error structures.
+ expectedError: "user name",
+ },
+ {
+ // Can create a user with no password, restricted to CN auth
+ name: "no_password_with_noPassword_set",
+ username: "foo",
+ password: "",
+ noPassword: true,
+ },
+ {
+ // Can create a user with no password, but not restricted to CN auth
+ name: "no_password_without_noPassword_set",
+ username: "foo",
+ password: "",
+ noPassword: false,
+ },
+ {
+ name: "regular_user_with_password",
+ username: "foo",
+ password: "bar",
+ },
+ }
+ for _, tc := range clusterTestCases() {
+ for _, nc := range tcs {
+ t.Run(tc.name+"/"+nc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ resp, err := cc.UserAdd(ctx, nc.username, nc.password, config.UserAddOptions{NoPassword: nc.noPassword})
+ if nc.expectedError != "" {
+ if err != nil {
+ assert.ErrorContains(t, err, nc.expectedError)
+ return
+ }
+
+ t.Fatalf("expected user creation to fail")
+ }
+
+ require.NoErrorf(t, err, "expected no error")
+
+ if resp == nil {
+ t.Fatalf("unexpected nil response to successful user creation")
+ }
+ })
+ })
+ }
+ }
+}
+
+func TestUserAdd_DuplicateUserNotAllowed(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ user := "barb"
+ password := "rhubarb"
+
+ _, err := cc.UserAdd(ctx, user, password, config.UserAddOptions{})
+ require.NoErrorf(t, err, "first user creation should succeed")
+
+ _, err = cc.UserAdd(ctx, user, password, config.UserAddOptions{})
+ require.Errorf(t, err, "duplicate user creation should fail")
+ assert.Contains(t, err.Error(), "etcdserver: user name already exists")
+ })
+ })
+ }
+}
+
+func TestUserList(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ // No Users Yet
+ resp, err := cc.UserList(ctx)
+ require.NoErrorf(t, err, "user listing should succeed")
+ if len(resp.Users) != 0 {
+ t.Fatalf("expected no pre-existing users, found: %q", resp.Users)
+ }
+
+ user := "barb"
+ password := "rhubarb"
+
+ _, err = cc.UserAdd(ctx, user, password, config.UserAddOptions{})
+ require.NoErrorf(t, err, "user creation should succeed")
+
+ // Users!
+ resp, err = cc.UserList(ctx)
+ require.NoErrorf(t, err, "user listing should succeed")
+ if len(resp.Users) != 1 {
+ t.Fatalf("expected one user, found: %q", resp.Users)
+ }
+ })
+ })
+ }
+}
+
+func TestUserDelete(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ user := "barb"
+ password := "rhubarb"
+
+ _, err := cc.UserAdd(ctx, user, password, config.UserAddOptions{})
+ require.NoErrorf(t, err, "user creation should succeed")
+
+ resp, err := cc.UserList(ctx)
+ require.NoErrorf(t, err, "user listing should succeed")
+ if len(resp.Users) != 1 {
+ t.Fatalf("expected one user, found: %q", resp.Users)
+ }
+
+ // Delete barb, sorry barb!
+ _, err = cc.UserDelete(ctx, user)
+ require.NoErrorf(t, err, "user deletion should succeed at first")
+
+ resp, err = cc.UserList(ctx)
+ require.NoErrorf(t, err, "user listing should succeed")
+ if len(resp.Users) != 0 {
+ t.Fatalf("expected no users after deletion, found: %q", resp.Users)
+ }
+
+ // Try to delete barb again
+ _, err = cc.UserDelete(ctx, user)
+ require.Errorf(t, err, "deleting a non-existent user should fail")
+ assert.Contains(t, err.Error(), "user name not found")
+ })
+ })
+ }
+}
+
+func TestUserChangePassword(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+
+ testutils.ExecuteUntil(ctx, t, func() {
+ user := "barb"
+ password := "rhubarb"
+ newPassword := "potato"
+
+ _, err := cc.UserAdd(ctx, user, password, config.UserAddOptions{})
+ require.NoErrorf(t, err, "user creation should succeed")
+
+ err = cc.UserChangePass(ctx, user, newPassword)
+ require.NoErrorf(t, err, "user password change should succeed")
+
+ err = cc.UserChangePass(ctx, "non-existent-user", newPassword)
+ require.Errorf(t, err, "user password change for non-existent user should fail")
+ assert.Contains(t, err.Error(), "user name not found")
+ })
+ })
+ }
+}
diff --git a/tests/common/wait_leader_test.go b/tests/common/wait_leader_test.go
new file mode 100644
index 00000000000..faa1f716cd7
--- /dev/null
+++ b/tests/common/wait_leader_test.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+)
+
+func TestWaitLeader(t *testing.T) {
+ testRunner.BeforeTest(t)
+
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+
+ leader := clus.WaitLeader(t)
+ if leader < 0 || leader >= len(clus.Members()) {
+ t.Fatalf("WaitLeader failed for the leader index (%d) is out of range, cluster member count: %d", leader, len(clus.Members()))
+ }
+ })
+ }
+}
+
+func TestWaitLeader_MemberStop(t *testing.T) {
+ testRunner.BeforeTest(t)
+ tcs := []testCase{
+ {
+ name: "PeerTLS",
+ config: config.NewClusterConfig(config.WithPeerTLS(config.ManualTLS)),
+ },
+ {
+ name: "PeerAutoTLS",
+ config: config.NewClusterConfig(config.WithPeerTLS(config.AutoTLS)),
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+ defer clus.Close()
+
+ lead1 := clus.WaitLeader(t)
+ if lead1 < 0 || lead1 >= len(clus.Members()) {
+ t.Fatalf("WaitLeader failed for the leader index (%d) is out of range, cluster member count: %d", lead1, len(clus.Members()))
+ }
+
+ clus.Members()[lead1].Stop()
+ lead2 := clus.WaitLeader(t)
+ if lead2 < 0 || lead2 >= len(clus.Members()) {
+ t.Fatalf("WaitLeader failed for the leader index (%d) is out of range, cluster member count: %d", lead2, len(clus.Members()))
+ }
+
+ if lead1 == lead2 {
+ t.Fatalf("WaitLeader failed for the leader(index=%d) did not change as expected after a member stopped", lead1)
+ }
+ })
+ }
+}
diff --git a/tests/common/watch_test.go b/tests/common/watch_test.go
new file mode 100644
index 00000000000..67187d59698
--- /dev/null
+++ b/tests/common/watch_test.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestWatch(t *testing.T) {
+ testRunner.BeforeTest(t)
+ watchTimeout := 1 * time.Second
+ for _, tc := range clusterTestCases() {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
+
+ defer clus.Close()
+ cc := testutils.MustClient(clus.Client())
+ testutils.ExecuteUntil(ctx, t, func() {
+ tests := []struct {
+ puts []testutils.KV
+ watchKey string
+ opts config.WatchOptions
+ wanted []testutils.KV
+ }{
+ { // watch by revision
+ puts: []testutils.KV{{Key: "bar", Val: "revision_1"}, {Key: "bar", Val: "revision_2"}, {Key: "bar", Val: "revision_3"}},
+ watchKey: "bar",
+ opts: config.WatchOptions{Revision: 3},
+ wanted: []testutils.KV{{Key: "bar", Val: "revision_2"}, {Key: "bar", Val: "revision_3"}},
+ },
+ { // watch 1 key
+ puts: []testutils.KV{{Key: "sample", Val: "value"}},
+ watchKey: "sample",
+ opts: config.WatchOptions{Revision: 1},
+ wanted: []testutils.KV{{Key: "sample", Val: "value"}},
+ },
+ { // watch 3 keys by prefix
+ puts: []testutils.KV{{Key: "foo1", Val: "val1"}, {Key: "foo2", Val: "val2"}, {Key: "foo3", Val: "val3"}},
+ watchKey: "foo",
+ opts: config.WatchOptions{Revision: 1, Prefix: true},
+ wanted: []testutils.KV{{Key: "foo1", Val: "val1"}, {Key: "foo2", Val: "val2"}, {Key: "foo3", Val: "val3"}},
+ },
+ { // watch 3 keys by range
+ puts: []testutils.KV{{Key: "key1", Val: "val1"}, {Key: "key3", Val: "val3"}, {Key: "key2", Val: "val2"}},
+ watchKey: "key",
+ opts: config.WatchOptions{Revision: 1, RangeEnd: "key3"},
+ wanted: []testutils.KV{{Key: "key1", Val: "val1"}, {Key: "key2", Val: "val2"}},
+ },
+ }
+
+ for _, tt := range tests {
+ wCtx, wCancel := context.WithCancel(ctx)
+ wch := cc.Watch(wCtx, tt.watchKey, tt.opts)
+ if wch == nil {
+ t.Fatalf("failed to watch %s", tt.watchKey)
+ }
+
+ for j := range tt.puts {
+ err := cc.Put(ctx, tt.puts[j].Key, tt.puts[j].Val, config.PutOptions{})
+ require.NoErrorf(t, err, "can't not put key %q, err: %s", tt.puts[j].Key, err)
+ }
+
+ kvs, err := testutils.KeyValuesFromWatchChan(wch, len(tt.wanted), watchTimeout)
+ if err != nil {
+ wCancel()
+ require.NoErrorf(t, err, "failed to get key-values from watch channel %s", err)
+ }
+
+ wCancel()
+ assert.Equal(t, tt.wanted, kvs)
+ }
+ })
+ })
+ }
+}
diff --git a/tests/docker-dns-srv/Dockerfile b/tests/docker-dns-srv/Dockerfile
deleted file mode 100644
index dbc3f4bdc69..00000000000
--- a/tests/docker-dns-srv/Dockerfile
+++ /dev/null
@@ -1,44 +0,0 @@
-FROM ubuntu:18.04
-
-RUN rm /bin/sh && ln -s /bin/bash /bin/sh
-RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
-
-RUN apt-get -y update \
- && apt-get -y install \
- build-essential \
- gcc \
- apt-utils \
- pkg-config \
- software-properties-common \
- apt-transport-https \
- libssl-dev \
- sudo \
- bash \
- curl \
- tar \
- git \
- netcat \
- bind9 \
- dnsutils \
- && apt-get -y update \
- && apt-get -y upgrade \
- && apt-get -y autoremove \
- && apt-get -y autoclean
-
-ENV GOROOT /usr/local/go
-ENV GOPATH /go
-ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
-ENV GO_VERSION REPLACE_ME_GO_VERSION
-ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
-RUN rm -rf ${GOROOT} \
- && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
- && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
- && go version \
- && go get -v -u github.com/mattn/goreman
-
-RUN mkdir -p /var/bind /etc/bind
-RUN chown root:bind /var/bind /etc/bind
-
-ADD named.conf etcd.zone rdns.zone /etc/bind/
-RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone
-ADD resolv.conf /etc/resolv.conf
diff --git a/tests/docker-dns-srv/certs-gateway/Procfile b/tests/docker-dns-srv/certs-gateway/Procfile
deleted file mode 100644
index 7e3c3d9368a..00000000000
--- a/tests/docker-dns-srv/certs-gateway/Procfile
+++ /dev/null
@@ -1,7 +0,0 @@
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-gateway: ./etcd gateway start --discovery-srv etcd.local --trusted-ca-file /certs-gateway/ca.crt --listen-addr 127.0.0.1:23790
diff --git a/tests/docker-dns-srv/certs-gateway/ca-csr.json b/tests/docker-dns-srv/certs-gateway/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns-srv/certs-gateway/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns-srv/certs-gateway/ca.crt b/tests/docker-dns-srv/certs-gateway/ca.crt
deleted file mode 100644
index 19b26c45551..00000000000
--- a/tests/docker-dns-srv/certs-gateway/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIUbQA3lX1hcR1W8D5wmmAwaLp4AWQwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTI5MDBaFw0yNzExMjkxOTI5
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQDdZjG+dJixdUuZLIlPVE/qvqNqbgIQy3Hrgq9OlPevLu3FAKIgTHoSKugq
-jOuBjzAtmbGTky3PPmkjWrOUWKEUYMuJJzXA1fO2NALXle47NVyVVfuwCmDnaAAL
-Sw4QTZKREoe3EwswbeYguQinCqazRwbXMzzfypIfaHAyGrqFCq12IvarrjfDcamm
-egtPkxNNdj1QHbkeYXcp76LOSBRjD2B3bzZvyVv/wPORaGTFXQ0feGz/93/Y/E0z
-BL5TdZ84qmgKxW04hxkhhuuxsL5zDNpbXcGm//Zw9qzO/AvtEux6ag9t0JziiEtj
-zLz5M7yXivfG4oxEeLKTieS/1ZkbAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBR7XtZP3fc6ElgHl6hdSHLmrFWj
-MzANBgkqhkiG9w0BAQsFAAOCAQEAPy3ol3CPyFxuWD0IGKde26p1mT8cdoaeRbOa
-2Z3GMuRrY2ojaKMfXuroOi+5ZbR9RSvVXhVX5tEMOSy81tb5OGPZP24Eroh4CUfK
-bw7dOeBNCm9tcmHkV+5frJwOgjN2ja8W8jBlV1flLx+Jpyk2PSGun5tQPsDlqzor
-E8QQ2FzCzxoGiEpB53t5gKeX+mH6gS1c5igJ5WfsEGXBC4xJm/u8/sg30uCGP6kT
-tCoQ8gnvGen2OqYJEfCIEk28/AZJvJ90TJFS3ExXJpyfImK9j5VcTohW+KvcX5xF
-W7M6KCGVBQtophobt3v/Zs4f11lWck9xVFCPGn9+LI1dbJUIIQ==
------END CERTIFICATE-----
diff --git a/tests/docker-dns-srv/certs-gateway/gencert.json b/tests/docker-dns-srv/certs-gateway/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns-srv/certs-gateway/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns-srv/certs-gateway/gencerts.sh b/tests/docker-dns-srv/certs-gateway/gencerts.sh
deleted file mode 100755
index af8663e09eb..00000000000
--- a/tests/docker-dns-srv/certs-gateway/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: *.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns-srv/certs-gateway/run.sh b/tests/docker-dns-srv/certs-gateway/run.sh
deleted file mode 100755
index ef4c1667cf9..00000000000
--- a/tests/docker-dns-srv/certs-gateway/run.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs-gateway/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --discovery-srv etcd.local \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --discovery-srv etcd.local \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --discovery-srv etcd.local \
- get abc
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --endpoints=127.0.0.1:23790 \
- put ghi jkl
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --endpoints=127.0.0.1:23790 \
- get ghi
diff --git a/tests/docker-dns-srv/certs-gateway/server-ca-csr.json b/tests/docker-dns-srv/certs-gateway/server-ca-csr.json
deleted file mode 100644
index 72bd3808288..00000000000
--- a/tests/docker-dns-srv/certs-gateway/server-ca-csr.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "hosts": [
- "m1.etcd.local",
- "m2.etcd.local",
- "m3.etcd.local",
- "etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns-srv/certs-gateway/server.crt b/tests/docker-dns-srv/certs-gateway/server.crt
deleted file mode 100644
index ef591cc7cc9..00000000000
--- a/tests/docker-dns-srv/certs-gateway/server.crt
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIENTCCAx2gAwIBAgIUcviGEkA57QgUUFUIuB23kO/jHWIwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTI5MDBaFw0yNzExMjkxOTI5
-MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL6rB1Kh08Fo
-FieWqzB4WvKxSFjLWlNfAXbSC1IEPEc/2JOSTF/VfsEX7Xf4eDlTUIZ/TpMS4nUE
-Jn0rOIxDJWieQgF99a88CKCwVeqyiQ1iGlI/Ls78P7712QJ1QvcYPBRCvAFo2VLg
-TSNhq4taRtAnP690TJVKMSxHg7qtMIpiBLc8ryNbtNUkQHl7/puiBZVVFwHQZm6d
-ZRkfMqXWs4+VKLTx0pqJaM0oWVISQlLWQV83buVsuDVyLAZu2MjRYZwBj9gQwZDO
-15VGvacjMU+l1+nLRuODrpGeGlxwfT57jqipbUtTsoZFsGxPdIWn14M6Pzw/mML4
-guYLKv3UqkkCAwEAAaOB1TCB0jAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI
-KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFKYKYVPu
-XPnZ2j0NORiNPUJpBnhkMB8GA1UdIwQYMBaAFHte1k/d9zoSWAeXqF1IcuasVaMz
-MFMGA1UdEQRMMEqCDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0
-Y2QubG9jYWyCCmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0B
-AQsFAAOCAQEAK40lD6Nx/V6CaShL95fQal7mFp/LXiyrlFTqCqrCruVnntwpukSx
-I864bNMxVSTStEA3NM5V4mGuYjRvdjS65LBhaS1MQDPb4ofPj0vnxDOx6fryRIsB
-wYKDuT4LSQ7pV/hBfL/bPb+itvb24G4/ECbduOprrywxmZskeEm/m0WqUb1A08Hv
-6vDleyt382Wnxahq8txhMU+gNLTGVne60hhfLR+ePK7MJ4oyk3yeUxsmsnBkYaOu
-gYOak5nWzRa09dLq6/vHQLt6n0AB0VurMAjshzO2rsbdOkD233sdkvKiYpayAyEf
-Iu7S5vNjP9jiUgmws6G95wgJOd2xv54D4Q==
------END CERTIFICATE-----
diff --git a/tests/docker-dns-srv/certs-gateway/server.key.insecure b/tests/docker-dns-srv/certs-gateway/server.key.insecure
deleted file mode 100644
index 623457b5dab..00000000000
--- a/tests/docker-dns-srv/certs-gateway/server.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAvqsHUqHTwWgWJ5arMHha8rFIWMtaU18BdtILUgQ8Rz/Yk5JM
-X9V+wRftd/h4OVNQhn9OkxLidQQmfSs4jEMlaJ5CAX31rzwIoLBV6rKJDWIaUj8u
-zvw/vvXZAnVC9xg8FEK8AWjZUuBNI2Gri1pG0Cc/r3RMlUoxLEeDuq0wimIEtzyv
-I1u01SRAeXv+m6IFlVUXAdBmbp1lGR8ypdazj5UotPHSmolozShZUhJCUtZBXzdu
-5Wy4NXIsBm7YyNFhnAGP2BDBkM7XlUa9pyMxT6XX6ctG44OukZ4aXHB9PnuOqKlt
-S1OyhkWwbE90hafXgzo/PD+YwviC5gsq/dSqSQIDAQABAoIBAEAOsb0fRUdbMuZG
-BmmYZeXXjdjXKReNea5zzv3VEnNVjeu2YRZpYdZ5tXxy6+FGjm1BZCKhW5e4tz2i
-QbNN88l8MezSZrJi1vs1gwgAx27JoNI1DALaWIhNjIT45HCjobuk2AkZMrpXRVM3
-wyxkPho8tXa6+efGL1MTC7yx5vb2dbhnEsjrPdUO0GLVP56bgrz7vRk+hE772uq2
-QDenZg+PcH+hOhptbY1h9CYotGWYXCpi0+yoHhsh5PTcEpyPmLWSkACsHovm3MIn
-a5oU0uh28nVBfYE0Sk6I9XBERHVO/OrCvz4Y3ZbVyGpCdLcaMB5wI1P4a5ULV52+
-VPrALQkCgYEA+w85KYuL+eUjHeMqa8V8A9xgcl1+dvB8SXgfRRm5QTqxgetzurD9
-G7vgMex42nqgoW1XUx6i9roRk3Qn3D2NKvBJcpMohYcY3HcGkCsBwtNUCyOWKasS
-Oj2q9LzPjVqTFII0zzarQ85XuuZyTRieFAMoYmsS8O/GcapKqYhPIDMCgYEAwmuR
-ctnCNgoEj1NaLBSAcq7njONvYUFvbXO8BCyd1WeLZyz/krgXxuhQh9oXIccWAKX2
-uxIDaoWV8F5c8bNOkeebHzVHfaLpwl4IlLa/i5WTIc+IZmpBR0aiS021k/M3KkDg
-KnQXAer6jEymT3lUL0AqZd+GX6DjFw61zPOFH5MCgYAnCiv6YN/IYTA/woZjMddi
-Bk/dGNrEhgrdpdc++IwNL6JQsJtTaZhCSsnHGZ2FY9I8p/MPUtFGipKXGlXkcpHU
-Hn9dWLLRaLud9MhJfNaORCxqewMrwZVZByPhYMbplS8P3lt16WtiZODRiGo3wN87
-/221OC8+1hpGrJNln3OmbwKBgDV8voEoY4PWcba0qcQix8vFTrK2B3hsNimYg4tq
-cum5GOMDwDQvLWttkmotl9uVF/qJrj19ES+HHN8KNuvP9rexTj3hvI9V+JWepSG0
-vTG7rsTIgbAbX2Yqio/JC0Fu0ihvvLwxP/spGFDs7XxD1uNA9ekc+6znaFJ5m46N
-GHy9AoGBAJmGEv5+rM3cucRyYYhE7vumXeCLXyAxxaf0f7+1mqRVO6uNGNGbNY6U
-Heq6De4yc1VeAXUpkGQi/afPJNMU+fy8paCjFyzID1yLvdtFOG38KDbgMmj4t+cH
-xTp2RT3MkcCWPq2+kXZeQjPdesPkzdB+nA8ckaSursV908n6AHcM
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns-srv/certs-wildcard/Procfile b/tests/docker-dns-srv/certs-wildcard/Procfile
deleted file mode 100644
index 3d5dc6eaee3..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/Procfile
+++ /dev/null
@@ -1,5 +0,0 @@
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
diff --git a/tests/docker-dns-srv/certs-wildcard/ca-csr.json b/tests/docker-dns-srv/certs-wildcard/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns-srv/certs-wildcard/ca.crt b/tests/docker-dns-srv/certs-wildcard/ca.crt
deleted file mode 100644
index c89d6531c94..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIUWzsBehxAkgLLYBUZEUpSjHkIaMowDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTUxODAyMDBaFw0yNzExMTMxODAy
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQCxjHVNtcCSCz1w9AiN7zAql0ZsPN6MNQWJ2j3iPCvmy9oi0wqSfYXTs+xw
-Y4Q+j0dfA54+PcyIOSBQCZBeLLIwCaXN+gLkMxYEWCCVgWYUa6UY+NzPKRCfkbwG
-oE2Ilv3R1FWIpMqDVE2rLmTb3YxSiw460Ruv4l16kodEzfs4BRcqrEiobBwaIMLd
-0rDJju7Q2TcioNji+HFoXV2aLN58LDgKO9AqszXxW88IKwUspfGBcsA4Zti/OHr+
-W+i/VxsxnQSJiAoKYbv9SkS8fUWw2hQ9SBBCKqE3jLzI71HzKgjS5TiQVZJaD6oK
-cw8FjexOELZd4r1+/p+nQdKqwnb5AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRLfPxmhlZix1eTdBMAzMVlAnOV
-gTANBgkqhkiG9w0BAQsFAAOCAQEAeT2NfOt3WsBLUVcnyGMeVRQ0gXazxJXD/Z+3
-2RF3KClqBLuGmPUZVl0FU841J6hLlwNjS33mye7k2OHrjJcouElbV3Olxsgh/EV0
-J7b7Wf4zWYHFNZz/VxwGHunsEZ+SCXUzU8OiMrEcHkOVzhtbC2veVPJzrESqd88z
-m1MseGW636VIcrg4fYRS9EebRPFvlwfymMd+bqLky9KsUbjNupYd/TlhpAudrIzA
-wO9ZUDb/0P44iOo+xURCoodxDTM0vvfZ8eJ6VZ/17HIf/a71kvk1oMqEhf060nmF
-IxnbK6iUqqhV8DLE1869vpFvgbDdOxP7BeabN5FXEnZFDTLDqg==
------END CERTIFICATE-----
diff --git a/tests/docker-dns-srv/certs-wildcard/gencert.json b/tests/docker-dns-srv/certs-wildcard/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns-srv/certs-wildcard/gencerts.sh b/tests/docker-dns-srv/certs-wildcard/gencerts.sh
deleted file mode 100755
index af8663e09eb..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: *.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns-srv/certs-wildcard/run.sh b/tests/docker-dns-srv/certs-wildcard/run.sh
deleted file mode 100755
index 13e16bda995..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/run.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs-wildcard/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-wildcard/ca.crt \
- --cert=/certs-wildcard/server.crt \
- --key=/certs-wildcard/server.key.insecure \
- --discovery-srv etcd.local \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-wildcard/ca.crt \
- --cert=/certs-wildcard/server.crt \
- --key=/certs-wildcard/server.key.insecure \
- --discovery-srv etcd.local \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-wildcard/ca.crt \
- --cert=/certs-wildcard/server.crt \
- --key=/certs-wildcard/server.key.insecure \
- --discovery-srv etcd.local \
- get abc
diff --git a/tests/docker-dns-srv/certs-wildcard/server-ca-csr.json b/tests/docker-dns-srv/certs-wildcard/server-ca-csr.json
deleted file mode 100644
index fd9adae03eb..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/server-ca-csr.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "hosts": [
- "*.etcd.local",
- "etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns-srv/certs-wildcard/server.crt b/tests/docker-dns-srv/certs-wildcard/server.crt
deleted file mode 100644
index 385f0321ca8..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/server.crt
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEFjCCAv6gAwIBAgIUCIUuNuEPRjp/EeDBNHipRI/qoAcwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTUxODAyMDBaFw0yNzExMTMxODAy
-MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMzoOebyKdXF
-5QiVs0mB3cVqMRgRoRGWt9emIOsYCX89SBaRNOIAByop98Vb1GmUDNDv1qR4Oq+m
-4JlWhgZniABWpekFw8mpN8wMIT86DoNnTe64ouLkDQRZDYOBO9I2+r4EuschRxNs
-+Hh5W9JzX/eOomnOhaZfTp6EaxczRHnVmgkWuFUnacfUf7W2FE/HAYfjYpvXw5/+
-eT9AW+Jg/b9SkyU9XKEpWZT7NMqF9OXDXYdxHtRNTGxasLEqPZnG58mqR2QFU2me
-/motY24faZpHo8i9ASb03Vy6xee2/FlS6cj2POCGQx3oLZsiQdgIOva7JrQtRsCn
-e5P0Wk4qk+cCAwEAAaOBtjCBszAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI
-KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFCI+fP2T
-xgvJG68Xdgamg4lzGRX1MB8GA1UdIwQYMBaAFEt8/GaGVmLHV5N0EwDMxWUCc5WB
-MDQGA1UdEQQtMCuCDCouZXRjZC5sb2NhbIIKZXRjZC5sb2NhbIIJbG9jYWxob3N0
-hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQASub3+YZAXJ8x8b55Hl7FkkIt+rML1
-LdgPHsolNntNXeSqVJ4oi4KvuaM0ueFf/+AlTusTAbXWbi/qiG5Tw24xyzY6NGgV
-/vCs56YqNlFyr3bNp1QJlnV3JQ4d3KqosulJ5jk+InhjAKJKomMH01pYhhStRAKg
-1fNwSyD34oyZpSQL0Z7X7wdaMGdOmzxwE99EG6jmYl/P7MiP6rC0WP1elIF4sCGM
-jY6oewvIMj0zWloBf/NlzrcY7VKpPqvBnV65Tllyo5n4y1sc8y2uzgJO/QnVKqhp
-Sdd/74mU8dSh3ALSOqkbmIBhqig21jP7GBgNCNdmsaR2LvPI97n1PYE7
------END CERTIFICATE-----
diff --git a/tests/docker-dns-srv/certs-wildcard/server.key.insecure b/tests/docker-dns-srv/certs-wildcard/server.key.insecure
deleted file mode 100644
index 2b6595fa880..00000000000
--- a/tests/docker-dns-srv/certs-wildcard/server.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAzOg55vIp1cXlCJWzSYHdxWoxGBGhEZa316Yg6xgJfz1IFpE0
-4gAHKin3xVvUaZQM0O/WpHg6r6bgmVaGBmeIAFal6QXDyak3zAwhPzoOg2dN7rii
-4uQNBFkNg4E70jb6vgS6xyFHE2z4eHlb0nNf946iac6Fpl9OnoRrFzNEedWaCRa4
-VSdpx9R/tbYUT8cBh+Nim9fDn/55P0Bb4mD9v1KTJT1coSlZlPs0yoX05cNdh3Ee
-1E1MbFqwsSo9mcbnyapHZAVTaZ7+ai1jbh9pmkejyL0BJvTdXLrF57b8WVLpyPY8
-4IZDHegtmyJB2Ag69rsmtC1GwKd7k/RaTiqT5wIDAQABAoIBAF0nTfuCKCa5WtA2
-TlWippGzHzKUASef32A4dEqsmNSxpW4tAV+lJ5yxi6S7hKui1Ni/0FLhHbzxHrZX
-MYMD2j5dJfvz1Ph+55DqCstVt3dhpXpbkiGYD5rkaVJZlDqTKBbuy4LvzAI2zhbn
-BSl9rik7PPbhHr1uIq3KAW2Arya7dlpPZiEX04Dg9xqZvxZkxt9IM25E+uzTWKSR
-v5BRmijWiGJ6atujgmP7KcYtgBC5EDR9yZf2uK+hnsKEcH94TUkTnJriTcOCKVbb
-isAuzsxStLpmyibfiLXD55aYjzr7KRVzQpoVXGJ4vJfs7lTxqxXBjUIsBJMPBcck
-ATabIcECgYEA8C8JeKPmcA4KaGFSusF5OsXt4SM9jz5Kr7larA+ozuuR/z0m4pnx
-AdjwQiGlhXaMtyziZ7Uwx+tmfnJDijpE/hUnkcAIKheDLXB/r1VpJdj/mqXtK49Y
-mnOxV66TcWAmXav31TgmLVSj0SYLGEnvV4MPbgJroMg3VO7LnNbNL7cCgYEA2maB
-Edbn4pJqUjVCZG68m0wQHmFZFOaoYZLeR3FgH+PQYIzUj96TP9XFpOwBmYAl2jiM
-kQZ3Q6VQY37rwu0M+2BVFkQFnFbelH5jXbHDLdoeFDGCRnJkH2VG1kE/rPfzVsiz
-NFDJD+17kPw3tTdHwDYGHwxyNuEoBQw3q6hfXVECgYBEUfzttiGMalIHkveHbBVh
-5H9f9ThDkMKJ7b2fB+1KvrOO2QRAnO1zSxQ8m3mL10b7q+bS/TVdCNbkzPftT9nk
-NHxG90rbPkjwGfoYE8GPJITApsYqB+J6PMKLYHtMWr9PEeWzXv9tEZBvo9SwGgfc
-6sjuz/1xhMJIhIyilm9TTQKBgHRsYDGaVlK5qmPYcGQJhBFlItKPImW579jT6ho7
-nfph/xr49/cZt3U4B/w6sz+YyJTjwEsvHzS4U3o2lod6xojaeYE9EaCdzllqZp3z
-vRAcThyFp+TV5fm2i2R7s+4I33dL1fv1dLlA57YKPcgkh+M26Vxzzg7jR+oo8SRY
-xT2BAoGBAKNR60zpSQZ2SuqEoWcj1Nf+KloZv2tZcnsHhqhiugbYhZOQVyTCNipa
-Ib3/BGERCyI7oWMk0yTTQK4wg3+0EsxQX10hYJ5+rd4btWac7G/tjo2+BSaTnWSW
-0vWM/nu33Pq0JHYIo0q0Jee0evTgizqH9UJ3wI5LG29LKwurXxPW
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns-srv/certs/Procfile b/tests/docker-dns-srv/certs/Procfile
deleted file mode 100644
index 9be48cb8718..00000000000
--- a/tests/docker-dns-srv/certs/Procfile
+++ /dev/null
@@ -1,11 +0,0 @@
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd4: ./etcd --name m4 --data-dir /tmp/m4.data --listen-client-urls https://127.0.0.1:13791 --advertise-client-urls https://m4.etcd.local:13791 --listen-peer-urls https://127.0.0.1:13880 --initial-advertise-peer-urls=https://m1.etcd.local:13880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd5: ./etcd --name m5 --data-dir /tmp/m5.data --listen-client-urls https://127.0.0.1:23791 --advertise-client-urls https://m5.etcd.local:23791 --listen-peer-urls https://127.0.0.1:23880 --initial-advertise-peer-urls=https://m5.etcd.local:23880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd6: ./etcd --name m6 --data-dir /tmp/m6.data --listen-client-urls https://127.0.0.1:33791 --advertise-client-urls https://m6.etcd.local:33791 --listen-peer-urls https://127.0.0.1:33880 --initial-advertise-peer-urls=https://m6.etcd.local:33880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
diff --git a/tests/docker-dns-srv/certs/ca-csr.json b/tests/docker-dns-srv/certs/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns-srv/certs/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns-srv/certs/ca.crt b/tests/docker-dns-srv/certs/ca.crt
deleted file mode 100644
index ebe259d0bfc..00000000000
--- a/tests/docker-dns-srv/certs/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDrjCCApagAwIBAgIUb8ICEcp5me1o5zF4mh4GKnf57hUwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODExMDkxNzQ2MDBaFw0yODExMDYxNzQ2
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQDEBKTfgg0MFy62Sslp8nJPLknl+qTO8ohan80CealThTMuRoGMYpXha0sx
-d+mv13sm+vRwEMaRU0FTmxtE9nrM/DNfRoeDd+ZW+Q/hNRuQ0mf0xvmY/h25M+It
-uaDbAD3m+UhmOCC1nzdwyBOxm4DQONMwMGtfCOZ8OkIVsKkubx3/pgRB/LdJZRdL
-1KWGucjMFxEaTGdwAIxdRyPS9pIX9g+B3zC7T3sYk7YbCGyvi1KLVR45Lm1MPcFY
-Gy3hU+CVHiljT6+87N+c98lv8wjnTFJXDkouLm6CxyxGgfGop8fHzpMpGcNmcN5t
-Yb3exRWn9u9BfNVH1YEOfiRVB+ylAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
-BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQe5E9CeqoDpGgJ1u++mp72Ajvt6DAN
-BgkqhkiG9w0BAQsFAAOCAQEAUCj9oKV43RyjvcqKSs00mFKctHZih4Kf0HWGC47M
-ny8c/FzCcC66q9TZx1vuf2PHkLsY8Z8f7Rjig2G6hbPKwU05JSFzKCwJhnRSxX4f
-ELDqQXbidlQ6wOcj2zoLSVC6WIjVmLyXCu0Zrcp+YwHyGb5x7SQcA1wNmJKOba+h
-ooXl5Ea4R1bxK+43lB2bsFovJVhS+6iyBih6oMlLycaSu6c5X38i0mcxQu6Ul/Ua
-I8nW1cAXnQC53VzQGkhfxnvWsc98XU/NzF778EaLwLECE7R4zkHWKSUktge1x+co
-bRXtQ/C7BoEVaTmQnl211O3rA8gnZ0cmmNBO1S0hIiZIBQ==
------END CERTIFICATE-----
diff --git a/tests/docker-dns-srv/certs/gencert.json b/tests/docker-dns-srv/certs/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns-srv/certs/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns-srv/certs/gencerts.sh b/tests/docker-dns-srv/certs/gencerts.sh
deleted file mode 100755
index af8663e09eb..00000000000
--- a/tests/docker-dns-srv/certs/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: *.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns-srv/certs/run.sh b/tests/docker-dns-srv/certs/run.sh
deleted file mode 100755
index 44d5920b611..00000000000
--- a/tests/docker-dns-srv/certs/run.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m{1,2,3,4,5,6}.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --discovery-srv etcd.local \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --discovery-srv etcd.local \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --discovery-srv etcd.local \
- get abc
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --discovery-srv etcd.local \
- --discovery-srv-name c1 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --discovery-srv etcd.local \
- --discovery-srv-name c1 \
- put ghi jkl
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --discovery-srv etcd.local \
- --discovery-srv-name c1 \
- get ghi
diff --git a/tests/docker-dns-srv/certs/server-ca-csr.json b/tests/docker-dns-srv/certs/server-ca-csr.json
deleted file mode 100644
index 661de379991..00000000000
--- a/tests/docker-dns-srv/certs/server-ca-csr.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "hosts": [
- "m1.etcd.local",
- "m2.etcd.local",
- "m3.etcd.local",
- "m4.etcd.local",
- "m5.etcd.local",
- "m6.etcd.local",
- "etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns-srv/certs/server.crt b/tests/docker-dns-srv/certs/server.crt
deleted file mode 100644
index 83c2fd9d42c..00000000000
--- a/tests/docker-dns-srv/certs/server.crt
+++ /dev/null
@@ -1,26 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEZTCCA02gAwIBAgIULBrfr3JYYypJkYr+LK0oWAqHsCowDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODExMDkxNzQ3MDBaFw0yODExMDYxNzQ3
-MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALBFVoY2gbx/
-z9ciHrH6LzxIwIDmeVbOIMyooTun3iCtM8OjSkw15fl6WvM0KLKb6D2B+N7MLGa8
-T+KqKHIrzCudK21WGV8g5Pwc56fjRT796zQsyMjcjMlf9AEtP4ZdY4aap4r0d28m
-ZiUx9hccUtC6b0AFVgBuHjGNw4Ym6zmz38ZWEfnJ/R71uccmQpB5CoOZ7dN1bCJa
-gZqaWwRCYNG5XAQD2GMcn6r7oFijhlVO99auT04Et2lpoOzg2P4a8pPGgzsUCFOP
-WnuqNh78p61AHnEpUM0eLzzENFAmSSzwMr9jFkNF4gMgLrn0t3M1JUrbzXWIk9EX
-5G6pafkxXlkCAwEAAaOCAQQwggEAMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAU
-BggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUtaRC
-6qucn6KvF+/u/esMahneemswHwYDVR0jBBgwFoAUHuRPQnqqA6RoCdbvvpqe9gI7
-7egwgYAGA1UdEQR5MHeCDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0z
-LmV0Y2QubG9jYWyCDW00LmV0Y2QubG9jYWyCDW01LmV0Y2QubG9jYWyCDW02LmV0
-Y2QubG9jYWyCCmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0B
-AQsFAAOCAQEAFfdWclzi+J7vI0p/7F0UaJ54JkRx4zR9+qcmDHqRbLdiGOYsTiDq
-AudryZjbCsl8koj9k1f7MvDGSIQpCp3jyAJpv/NE9NxuSagDO3vIvAuKfox2HcHV
-RPyoo6igp9FY6F8af0h7CyCXgX0+4PFaLnyJgpQ3tV4jCKduyjCYkAiC1QwoNB8H
-wZEw0zlyFml/5GlQoqtjJyZ7JFIJhrFIUbRIFO7gZZSIipsON7teOjA2HvYme33Y
-uvx/FWr7GBXqpHUamQqWS6ixWBM/rj0lEViYtuWkitek41YHJuktxKs1+peXPjpb
-rYCK5H6Bn/zLKOo2zikqfq41+g/mui3/jQ==
------END CERTIFICATE-----
diff --git a/tests/docker-dns-srv/certs/server.key.insecure b/tests/docker-dns-srv/certs/server.key.insecure
deleted file mode 100644
index 5030a91dc42..00000000000
--- a/tests/docker-dns-srv/certs/server.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAsEVWhjaBvH/P1yIesfovPEjAgOZ5Vs4gzKihO6feIK0zw6NK
-TDXl+Xpa8zQospvoPYH43swsZrxP4qoocivMK50rbVYZXyDk/Bznp+NFPv3rNCzI
-yNyMyV/0AS0/hl1jhpqnivR3byZmJTH2FxxS0LpvQAVWAG4eMY3DhibrObPfxlYR
-+cn9HvW5xyZCkHkKg5nt03VsIlqBmppbBEJg0blcBAPYYxyfqvugWKOGVU731q5P
-TgS3aWmg7ODY/hryk8aDOxQIU49ae6o2HvynrUAecSlQzR4vPMQ0UCZJLPAyv2MW
-Q0XiAyAuufS3czUlStvNdYiT0Rfkbqlp+TFeWQIDAQABAoIBAHj6LacRc7secVPy
-a+S0k4SpXc1Z4L9N2z77ISVjUdVVaiiEQnLJrxuo+RDfpGrpC9xi/p5SvMqJxb4I
-EJhDLO5mAS8aH3GljuLlJ6yXE6hm9u0pK2iHzexLeZjxKB8cqzjvnbuFiw7y6Lnw
-bzhvTPtKaR4kS2EiMoDKDf5daaWAhaJSlLpVnSW7COrVd12vF8YhKkGeyoXEVrAH
-GjdHmpZKI3qzvNJNe8ZQq8VXxMmQs8bryKFO1k7rN6ypMFILYuze7+x+DQ1/Kbee
-UoCN6HIja5GGF77ZggDdyDMrcWv0t1ib/6mFV03m+Iv6n2GBOOkrDSi+rRACdRtF
-5YRXSQECgYEA1sxrz6w0Etg6VrYsOZGSHKh8b/9agGSCtV2T3Jo4LSndFvzy78s0
-lVSuR6irflnaXdngSl8OyZ3s90egWzVpKTq9VCV/Mwk5cscUYaMSBNw75H1Yxfrq
-wyiygL38m/gKZ6L1kjGEdFH80ODr26tQM/npruYRyd33R7/YO6QRJrkCgYEA0hUI
-uEiZDdudtpifAODzVvuxptCH1V1sXJLpdy2XSk2PWBKJ+ePHVhkx9sOnIX1m7DuE
-RsdFVunUz1jVEy4UYxMwpQK7KxZZB05daiwhUI5qMSuwwOKqQbyjCOBIAlgcRWow
-fVbIWbOsza/a9bjgZ3QQFWAbCVxrAuG9Re2mJKECgYBUnZjG6YZl+goZSJBpaUAO
-zAyhLg2f0HhxK9joqVQB7qDqwmCNOBaR0RcKoZZVIt5T5FVn1sSDhhPoYa344DR6
-Cmq08ESIfVTFM0mDIPMjOQLbAsnqy+qZULno327YnkCzDM4CdkFAdV/LhR9EnNru
-br+wp29Qf4E/IYL0E7Cx+QKBgHpbGdsLHWl+0Zp5xZHjcpbkvRFlPte8M9KvFh79
-hLIX/jbThVzvlzfEMN+CEKNmwD0yZNY8VVxLkFC7ck5bdjBGCvzwXEa6G1wv/iRK
-U5TxfVPqGGYfHf5veZ0/03DaFI0xTdCSbNoh1bFujN60sK5QYNWyRczr8L+a7nv9
-79hBAoGBAMytiRzt0hj06ww3oJSQjxwotJ19pnV52p84BQfsGEAgfVRqqADMyn5U
-dkpT9q+IADivb1ELNWUl4af2levage/rBnaDzer0ywnl50J0TRu+DJppIGJIi3r4
-IufVehZ6F+pntM+UbMcBxNXr3cLzAaEHoIhyKq0UG4P4Ef3v6DeI
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns-srv/etcd.zone b/tests/docker-dns-srv/etcd.zone
deleted file mode 100644
index c80a07f0be8..00000000000
--- a/tests/docker-dns-srv/etcd.zone
+++ /dev/null
@@ -1,32 +0,0 @@
-$TTL 86400
-@ IN SOA etcdns.local. root.etcdns.local. (
- 100500 ; Serial
- 604800 ; Refresh
- 86400 ; Retry
- 2419200 ; Expire
- 86400 ) ; Negative Cache TTL
- IN NS ns.etcdns.local.
- IN A 127.0.0.1
-
-ns IN A 127.0.0.1
-m1 IN A 127.0.0.1
-m2 IN A 127.0.0.1
-m3 IN A 127.0.0.1
-m4 IN A 127.0.0.1
-m5 IN A 127.0.0.1
-m6 IN A 127.0.0.1
-
-_etcd-client-ssl._tcp IN SRV 0 0 2379 m1.etcd.local.
-_etcd-server-ssl._tcp IN SRV 0 0 2380 m1.etcd.local.
-_etcd-client-ssl._tcp IN SRV 0 0 22379 m2.etcd.local.
-_etcd-server-ssl._tcp IN SRV 0 0 22380 m2.etcd.local.
-_etcd-client-ssl._tcp IN SRV 0 0 32379 m3.etcd.local.
-_etcd-server-ssl._tcp IN SRV 0 0 32380 m3.etcd.local.
-
-; discovery-srv-name=c1
-_etcd-client-ssl-c1._tcp IN SRV 0 0 13791 m4.etcd.local.
-_etcd-server-ssl-c1._tcp IN SRV 0 0 13880 m4.etcd.local.
-_etcd-client-ssl-c1._tcp IN SRV 0 0 23791 m5.etcd.local.
-_etcd-server-ssl-c1._tcp IN SRV 0 0 23880 m5.etcd.local.
-_etcd-client-ssl-c1._tcp IN SRV 0 0 33791 m6.etcd.local.
-_etcd-server-ssl-c1._tcp IN SRV 0 0 33880 m6.etcd.local.
diff --git a/tests/docker-dns-srv/named.conf b/tests/docker-dns-srv/named.conf
deleted file mode 100644
index 76ce0caa165..00000000000
--- a/tests/docker-dns-srv/named.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-options {
- directory "/var/bind";
- listen-on { 127.0.0.1; };
- listen-on-v6 { none; };
- allow-transfer {
- none;
- };
- // If you have problems and are behind a firewall:
- query-source address * port 53;
- pid-file "/var/run/named/named.pid";
- allow-recursion { none; };
- recursion no;
-};
-
-zone "etcd.local" IN {
- type main;
- file "/etc/bind/etcd.zone";
-};
-
-zone "0.0.127.in-addr.arpa" {
- type main;
- file "/etc/bind/rdns.zone";
-};
diff --git a/tests/docker-dns-srv/rdns.zone b/tests/docker-dns-srv/rdns.zone
deleted file mode 100644
index d129188e400..00000000000
--- a/tests/docker-dns-srv/rdns.zone
+++ /dev/null
@@ -1,17 +0,0 @@
-$TTL 86400
-@ IN SOA etcdns.local. root.etcdns.local. (
- 100500 ; Serial
- 604800 ; Refresh
- 86400 ; Retry
- 2419200 ; Expire
- 86400 ) ; Negative Cache TTL
- IN NS ns.etcdns.local.
- IN A 127.0.0.1
-
-1 IN PTR m1.etcd.local.
-1 IN PTR m2.etcd.local.
-1 IN PTR m3.etcd.local.
-1 IN PTR m4.etcd.local.
-1 IN PTR m5.etcd.local.
-1 IN PTR m6.etcd.local.
-
diff --git a/tests/docker-dns-srv/resolv.conf b/tests/docker-dns-srv/resolv.conf
deleted file mode 100644
index bbc8559cd54..00000000000
--- a/tests/docker-dns-srv/resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 127.0.0.1
diff --git a/tests/docker-dns/Dockerfile b/tests/docker-dns/Dockerfile
deleted file mode 100644
index 76dfe60b2c0..00000000000
--- a/tests/docker-dns/Dockerfile
+++ /dev/null
@@ -1,45 +0,0 @@
-FROM ubuntu:18.04
-
-RUN rm /bin/sh && ln -s /bin/bash /bin/sh
-RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
-
-RUN apt-get -y update \
- && apt-get -y install \
- build-essential \
- gcc \
- apt-utils \
- pkg-config \
- software-properties-common \
- apt-transport-https \
- libssl-dev \
- sudo \
- bash \
- curl \
- tar \
- git \
- netcat \
- bind9 \
- dnsutils \
- lsof \
- && apt-get -y update \
- && apt-get -y upgrade \
- && apt-get -y autoremove \
- && apt-get -y autoclean
-
-ENV GOROOT /usr/local/go
-ENV GOPATH /go
-ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
-ENV GO_VERSION REPLACE_ME_GO_VERSION
-ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
-RUN rm -rf ${GOROOT} \
- && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
- && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
- && go version \
- && go get -v -u github.com/mattn/goreman
-
-RUN mkdir -p /var/bind /etc/bind
-RUN chown root:bind /var/bind /etc/bind
-
-ADD named.conf etcd.zone rdns.zone /etc/bind/
-RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone
-ADD resolv.conf /etc/resolv.conf
diff --git a/tests/docker-dns/certs-common-name-auth/Procfile b/tests/docker-dns/certs-common-name-auth/Procfile
deleted file mode 100644
index 2fb95f5fefa..00000000000
--- a/tests/docker-dns/certs-common-name-auth/Procfile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-auth/server.crt --peer-key-file=/certs-common-name-auth/server.key.insecure --peer-trusted-ca-file=/certs-common-name-auth/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name-auth/server.crt --key-file=/certs-common-name-auth/server.key.insecure --trusted-ca-file=/certs-common-name-auth/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-auth/server.crt --peer-key-file=/certs-common-name-auth/server.key.insecure --peer-trusted-ca-file=/certs-common-name-auth/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name-auth/server.crt --key-file=/certs-common-name-auth/server.key.insecure --trusted-ca-file=/certs-common-name-auth/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-auth/server.crt --peer-key-file=/certs-common-name-auth/server.key.insecure --peer-trusted-ca-file=/certs-common-name-auth/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name-auth/server.crt --key-file=/certs-common-name-auth/server.key.insecure --trusted-ca-file=/certs-common-name-auth/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
\ No newline at end of file
diff --git a/tests/docker-dns/certs-common-name-auth/ca-csr.json b/tests/docker-dns/certs-common-name-auth/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns/certs-common-name-auth/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns/certs-common-name-auth/ca.crt b/tests/docker-dns/certs-common-name-auth/ca.crt
deleted file mode 100644
index 00faeca22a5..00000000000
--- a/tests/docker-dns/certs-common-name-auth/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIUdASu5zT1US/6LPyKmczbC3NgdY4wDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTQwNjIzMDBaFw0yNzExMTIwNjIz
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQDBbE44RP/Tk9l7KShzxQAypatoqDJQL32hyw8plZIfni5XFIlG2GwyjNvX
-wiP6u0YcsApZKc58ytqcHQqMyk68OTTxcM+HVWvKHMKOBPBYgXeeVnD+7Ixuinq/
-X6RK3n2jEipFgE9FiAXDNICF3ZQz+HVNBSbzwCjBtIcYkinWHX+kgnQkFT1NnmuZ
-uloz6Uh7/Ngn/XPNSsoMyLrh4TwDsx/fQEpVcrXMbxWux1xEHmfDzRKvE7VhSo39
-/mcpKBOwTg4jwh9tDjxWX4Yat+/cX0cGxQ7JSrdy14ESV5AGBmesGHd2SoWhZK9l
-tWm1Eq0JYWD+Cd5yNrODTUxWRNs9AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSZMjlLnc7Vv2mxRMebo5ezJ7gt
-pzANBgkqhkiG9w0BAQsFAAOCAQEAA2d2nV4CXjp7xpTQrh8sHzSBDYUNr9DY5hej
-52X6q8WV0N3QC7Utvv2Soz6Ol72/xoGajIJvqorsIBB5Ms3dgCzPMy3R01Eb3MzI
-7KG/4AGVEiAKUBkNSD8PWD7bREnnv1g9tUftE7jWsgMaPIpi6KhzhyJsClT4UsKQ
-6Lp+Be80S293LrlmUSdZ/v7FAvMzDGOLd2iTlTr1fXK6YJJEXpk3+HIi8nbUPvYQ
-6O8iOtf5QoCm1yMLJQMFvNr51Z1EeF935HRj8U2MJP5jXPW4/UY2TAUBcWEhlNsK
-6od+f1B8xGe/6KHvF0C8bg23kj8QphM/E7HCZiVgdm6FNf54AQ==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-common-name-auth/gencert.json b/tests/docker-dns/certs-common-name-auth/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns/certs-common-name-auth/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns/certs-common-name-auth/gencerts.sh b/tests/docker-dns/certs-common-name-auth/gencerts.sh
deleted file mode 100755
index 09819cf2239..00000000000
--- a/tests/docker-dns/certs-common-name-auth/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: m1/m2/m3.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns/certs-common-name-auth/run.sh b/tests/docker-dns/certs-common-name-auth/run.sh
deleted file mode 100755
index d4aaaecf25f..00000000000
--- a/tests/docker-dns/certs-common-name-auth/run.sh
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs-common-name-auth/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get abc
-
-sleep 1s && printf "\n"
-echo "Step 1. creating root role"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- role add root
-
-sleep 1s && printf "\n"
-echo "Step 2. granting readwrite 'foo' permission to role 'root'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- role grant-permission root readwrite foo
-
-sleep 1s && printf "\n"
-echo "Step 3. getting role 'root'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- role get root
-
-sleep 1s && printf "\n"
-echo "Step 4. creating user 'root'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --interactive=false \
- user add root:123
-
-sleep 1s && printf "\n"
-echo "Step 5. granting role 'root' to user 'root'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- user grant-role root root
-
-sleep 1s && printf "\n"
-echo "Step 6. getting user 'root'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- user get root
-
-sleep 1s && printf "\n"
-echo "Step 7. enabling auth"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- auth enable
-
-sleep 1s && printf "\n"
-echo "Step 8. writing 'foo' with 'root:123'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- put foo bar
-
-sleep 1s && printf "\n"
-echo "Step 9. writing 'aaa' with 'root:123'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- put aaa bbb
-
-sleep 1s && printf "\n"
-echo "Step 10. writing 'foo' without 'root:123'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put foo bar
-
-sleep 1s && printf "\n"
-echo "Step 11. reading 'foo' with 'root:123'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- get foo
-
-sleep 1s && printf "\n"
-echo "Step 12. reading 'aaa' with 'root:123'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- get aaa
-
-sleep 1s && printf "\n"
-echo "Step 13. creating a new user 'test-common-name:test-pass'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- --interactive=false \
- user add test-common-name:test-pass
-
-sleep 1s && printf "\n"
-echo "Step 14. creating a role 'test-role'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- role add test-role
-
-sleep 1s && printf "\n"
-echo "Step 15. granting readwrite 'aaa' --prefix permission to role 'test-role'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- role grant-permission test-role readwrite aaa --prefix
-
-sleep 1s && printf "\n"
-echo "Step 16. getting role 'test-role'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- role get test-role
-
-sleep 1s && printf "\n"
-echo "Step 17. granting role 'test-role' to user 'test-common-name'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=root:123 \
- user grant-role test-common-name test-role
-
-sleep 1s && printf "\n"
-echo "Step 18. writing 'aaa' with 'test-common-name:test-pass'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=test-common-name:test-pass \
- put aaa bbb
-
-sleep 1s && printf "\n"
-echo "Step 19. writing 'bbb' with 'test-common-name:test-pass'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=test-common-name:test-pass \
- put bbb bbb
-
-sleep 1s && printf "\n"
-echo "Step 20. reading 'aaa' with 'test-common-name:test-pass'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=test-common-name:test-pass \
- get aaa
-
-sleep 1s && printf "\n"
-echo "Step 21. reading 'bbb' with 'test-common-name:test-pass'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- --user=test-common-name:test-pass \
- get bbb
-
-sleep 1s && printf "\n"
-echo "Step 22. writing 'aaa' with CommonName 'test-common-name'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put aaa ccc
-
-sleep 1s && printf "\n"
-echo "Step 23. reading 'aaa' with CommonName 'test-common-name'"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-auth/ca.crt \
- --cert=/certs-common-name-auth/server.crt \
- --key=/certs-common-name-auth/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get aaa
diff --git a/tests/docker-dns/certs-common-name-auth/server-ca-csr.json b/tests/docker-dns/certs-common-name-auth/server-ca-csr.json
deleted file mode 100644
index 6a57789b1ab..00000000000
--- a/tests/docker-dns/certs-common-name-auth/server-ca-csr.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "test-common-name",
- "hosts": [
- "m1.etcd.local",
- "m2.etcd.local",
- "m3.etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns/certs-common-name-auth/server.crt b/tests/docker-dns/certs-common-name-auth/server.crt
deleted file mode 100644
index b9719b2f013..00000000000
--- a/tests/docker-dns/certs-common-name-auth/server.crt
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIERDCCAyygAwIBAgIUO500NxhwBHJsodbGKbo5NsW9/p8wDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTQwNjIzMDBaFw0yNzExMTIwNjIz
-MDBaMH0xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTEZMBcGA1UEAxMQdGVzdC1jb21tb24tbmFtZTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAMRvVMj3+5jAhRng4izVm4zrvMBnHNMh2MOFVTp7
-wdhEF2en7pFsKzWgczewil6v4d6QzJpgB9yQzPT2q0SOvetpbqP950y6MdPHAF9D
-qZd0+wC+RLdSmK5oQKzgZER/vH3eSbTa1UdwaLBHlT6PiTzGm+gEYL43gr3kle+A
-9c7aT9pkJWQFTCSdqwcQopyHEwgrfPHC8Bdn804soG4HtR9Gg/R4xtlu7ir6LTHn
-vpPBScaMZDUQ5UNrEMh8TM8/sXG6oxqo86r5wpVQt6vscnTMrTTUqq+Mo/OJnDAf
-plaqkWX5NfIJ9tmE2V06hq1/ptQkl714Wb+ske+aJ2Poc/UCAwEAAaOByTCBxjAO
-BgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwG
-A1UdEwEB/wQCMAAwHQYDVR0OBBYEFEG2hXyVTpxLXTse3fXe0U/g0F8kMB8GA1Ud
-IwQYMBaAFJkyOUudztW/abFEx5ujl7MnuC2nMEcGA1UdEQRAMD6CDW0xLmV0Y2Qu
-bG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcE
-fwAAATANBgkqhkiG9w0BAQsFAAOCAQEADtH0NZBrWfXTUvTa3WDsa/JPBhiPu/kH
-+gRxOD5UNeDX9+QAx/gxGHrCh4j51OUx55KylUe0qAPHHZ4vhgD2lCRBqFLYx69m
-xRIzVnt5NCruriskxId1aFTZ5pln5KK5tTVkAp04MBHZOgv8giXdRWn+7TtMyJxj
-wVGf8R7/bwJGPPJFrLNtN4EWwXv/a2/SEoZd8fkTxzw12TeJ8w1PnkH4Zer+nzNb
-dH5f+OIBGGZ2fIWANX5g9JEJvvsxBBL8uoCrFE/YdnD0fLyhoplSOVEIvncQLHd8
-3QoIVQ5GXnreMF9vuuEU5LlSsqd/Zv5mAQNrbEAfAL+QZQsnHY12qQ==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-common-name-auth/server.key.insecure b/tests/docker-dns/certs-common-name-auth/server.key.insecure
deleted file mode 100644
index 07417b2552c..00000000000
--- a/tests/docker-dns/certs-common-name-auth/server.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAxG9UyPf7mMCFGeDiLNWbjOu8wGcc0yHYw4VVOnvB2EQXZ6fu
-kWwrNaBzN7CKXq/h3pDMmmAH3JDM9ParRI6962luo/3nTLox08cAX0Opl3T7AL5E
-t1KYrmhArOBkRH+8fd5JtNrVR3BosEeVPo+JPMab6ARgvjeCveSV74D1ztpP2mQl
-ZAVMJJ2rBxCinIcTCCt88cLwF2fzTiygbge1H0aD9HjG2W7uKvotMee+k8FJxoxk
-NRDlQ2sQyHxMzz+xcbqjGqjzqvnClVC3q+xydMytNNSqr4yj84mcMB+mVqqRZfk1
-8gn22YTZXTqGrX+m1CSXvXhZv6yR75onY+hz9QIDAQABAoIBABiq+nS6X4gRNSXI
-zd5ffMc3m152FHKXH4d+KPPNMsyb0Gyd9CGi+dIkMhPeQaIeaDjw6iDAynvyWyqw
-B1X2rvbvKIvDiNZj03oK1YshDh0M/bBcNHjpEG9mfCi5jR3lBKCx14O0r2/nN95b
-Puy6TbuqHU4HrrZ0diCuof2Prk6pd0EhQC+C3bZCcoWXOaRTqrMBTT6DdSMQrVKD
-eGTXYqCzs/AlGKkOiErKtKWouNpkPpPiba1qp7YWXUasrXqPgPi4d97TmOShGIfc
-zXNJT+e2rDX4OEVAJtOt6U2l9QG+PIhpH4P/ZYsvindm4VZBs+Vysrj4xkLgGBBP
-ygOfBIECgYEA0IfP9Z9mzvCXiGrkrx2tN/k31cX674P/KwxPgSWM/AdXenYYzsmj
-rVcoFx2eCFnBFdPz4BAqEfH70gtsG7OoTmoJSwN6wurIdGcFQwItrghgt9Qp46Dq
-AIT9RXSpcB9AjM6p2reCjWcNeBVMrrHU3eaQitCxZbzuxvMMhMs/zzECgYEA8Sak
-UhXFtNjxBW6EMNmTpjhShIZmxtPNzTJ5DtmARr8F+SMELp3JGJj/9Bm4TsvqJmGs
-j9g/MVvSTjJlOuYPGJ5DBl3egZ5ZlRJx3I2qA4lFFCb71OJzuoR8YdHRlHnhJOu9
-2Jyrki1wrAefby8Fe/+5vswxq2u+Qurjya716AUCgYB+E06ZGzmmLfH/6Vi/wzqC
-F+w5FAzGGNECbtv2ogReL/YktRgElgaee45ig2aTd+h0UQQmWL+Gv/3XHU7MZM+C
-MTvTHZRwGlD9h3e37q49hRUsr1pwJE6157HU91al0k9NknlBIigNY9vR2VbWW+/u
-BUMomkpWz2ax5CqScuvuUQKBgQCE+zYqPe9kpy1iPWuQNKuDQhPfGO6cPjiDK44u
-biqa2MRGetTXkBNRCS48QeKtMS3SNJKgUDOo2GXE0W2ZaTxx6vQzEpidCeGEn0NC
-yKw0fwIk9spwvt/qvxyIJNhZ9Ev/vDBYvyyt03kKpLl66ocvtfmMCbZqPWQSKs2q
-bl0UsQKBgQDDrsPnuVQiv6l0J9VrZc0f5DYZIJmQij1Rcg/fL1Dv2mEpADrH2hkY
-HI27Q15dfgvccAGbGXbZt3xi7TCLDDm+Kl9V9bR2e2EhqA84tFryiBZ5XSDRAWPU
-UIjejblTgtzrTqUd75XUkNoKvJIGrLApmQiBJRQbcbwtmt2pWbziyQ==
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-common-name-multi/Procfile b/tests/docker-dns/certs-common-name-multi/Procfile
deleted file mode 100644
index ac1094c0897..00000000000
--- a/tests/docker-dns/certs-common-name-multi/Procfile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-multi/server-1.crt --peer-key-file=/certs-common-name-multi/server-1.key.insecure --peer-trusted-ca-file=/certs-common-name-multi/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-common-name-multi/server-1.crt --key-file=/certs-common-name-multi/server-1.key.insecure --trusted-ca-file=/certs-common-name-multi/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-multi/server-2.crt --peer-key-file=/certs-common-name-multi/server-2.key.insecure --peer-trusted-ca-file=/certs-common-name-multi/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-common-name-multi/server-2.crt --key-file=/certs-common-name-multi/server-2.key.insecure --trusted-ca-file=/certs-common-name-multi/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-multi/server-3.crt --peer-key-file=/certs-common-name-multi/server-3.key.insecure --peer-trusted-ca-file=/certs-common-name-multi/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-common-name-multi/server-3.crt --key-file=/certs-common-name-multi/server-3.key.insecure --trusted-ca-file=/certs-common-name-multi/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
\ No newline at end of file
diff --git a/tests/docker-dns/certs-common-name-multi/ca-csr.json b/tests/docker-dns/certs-common-name-multi/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns/certs-common-name-multi/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns/certs-common-name-multi/ca.crt b/tests/docker-dns/certs-common-name-multi/ca.crt
deleted file mode 100644
index 2e9b32003df..00000000000
--- a/tests/docker-dns/certs-common-name-multi/ca.crt
+++ /dev/null
@@ -1,23 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID0jCCArqgAwIBAgIUd3UZnVmZFo8x9MWWhUrYQvZHLrQwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQCqgFTgSFl+ugXkZuiN5PXp84Zv05crwI5x2ePMnc2/3u1s7cQBvXQGCJcq
-OwWD7tjcy4K2PDC0DLRa4Mkd8JpwADmf6ojbMH/3a1pXY2B3BJQwmNPFnxRJbDZL
-Iti6syWKwyfLVb1KFCU08G+ZrWmGIXPWDiE+rTn/ArD/6WbQI1LYBFJm25NLpttM
-mA3HnWoErNGY4Z/AR54ROdQSPL7RSUZBa0Kn1riXeOJ40/05qosR2O/hBSAGkD+m
-5Rj+A6oek44zZqVzCSEncLsRJAKqgZIqsBrErAho72irEgTwv4OM0MyOCsY/9erf
-hNYRSoQeX+zUvEvgToalfWGt6kT3AgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRDePNja5CK4zUfO5x1vzGvdmUF
-CzAfBgNVHSMEGDAWgBRDePNja5CK4zUfO5x1vzGvdmUFCzANBgkqhkiG9w0BAQsF
-AAOCAQEAZu0a3B7Ef/z5Ct99xgzPy4z9RwglqPuxk446hBWR5TYT9fzm+voHCAwb
-MJEaQK3hvAz47qAjyR9/b+nBw4LRTMxg0WqB+UEEVwBGJxtfcOHx4mJHc3lgVJnR
-LiEWtIND7lu5Ql0eOjSehQzkJZhUb4SnXD7yk64zukQQv9zlZYZCHPDAQ9LzR2vI
-ii4yhwdWl7iiZ0lOyR4xqPB3Cx/2kjtuRiSkbpHGwWBJLng2ZqgO4K+gL3naNgqN
-TRtdOSK3j/E5WtAeFUUT68Gjsg7yXxqyjUFq+piunFfQHhPB+6sPPy56OtIogOk4
-dFCfFAygYNrFKz366KY+7CbpB+4WKA==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-common-name-multi/gencert.json b/tests/docker-dns/certs-common-name-multi/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns/certs-common-name-multi/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns/certs-common-name-multi/gencerts.sh b/tests/docker-dns/certs-common-name-multi/gencerts.sh
deleted file mode 100755
index b2318fd0865..00000000000
--- a/tests/docker-dns/certs-common-name-multi/gencerts.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: m1/m2/m3.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr-1.json | cfssljson --bare ./server-1
-mv server-1.pem server-1.crt
-mv server-1-key.pem server-1.key.insecure
-
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr-2.json | cfssljson --bare ./server-2
-mv server-2.pem server-2.crt
-mv server-2-key.pem server-2.key.insecure
-
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr-3.json | cfssljson --bare ./server-3
-mv server-3.pem server-3.crt
-mv server-3-key.pem server-3.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns/certs-common-name-multi/run.sh b/tests/docker-dns/certs-common-name-multi/run.sh
deleted file mode 100755
index 2ccb6b67849..00000000000
--- a/tests/docker-dns/certs-common-name-multi/run.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs-common-name-multi/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-multi/ca.crt \
- --cert=/certs-common-name-multi/server-1.crt \
- --key=/certs-common-name-multi/server-1.key.insecure \
- --endpoints=https://m1.etcd.local:2379 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-multi/ca.crt \
- --cert=/certs-common-name-multi/server-2.crt \
- --key=/certs-common-name-multi/server-2.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-common-name-multi/ca.crt \
- --cert=/certs-common-name-multi/server-3.crt \
- --key=/certs-common-name-multi/server-3.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get abc
diff --git a/tests/docker-dns/certs-common-name-multi/server-1.crt b/tests/docker-dns/certs-common-name-multi/server-1.crt
deleted file mode 100644
index f10b2727753..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-1.crt
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEIDCCAwigAwIBAgIUaDLXBmJpHrElwENdnVk9hvAvlKcwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw
-MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAOb5CdovL9QCdgsxnCBikTbJko6r5mrF+eA47gDLcVbWrRW5
-d8eZYV1Fyn5qe80O6LB6LKPrRftxyAGABKqIBCHR57E97UsICC4lGycBWaav6cJ+
-7Spkpf8cSSDjjgb4KC6VVPf9MCsHxBYSTfme8JEFE+6KjlG8Mqt2yv/5aIyRYITN
-WzXvV7wxS9aOgDdXLbojW9FJQCuzttOPfvINTyhtvUvCM8S61La5ymCdAdPpx1U9
-m5KC23k6ZbkAC8/jcOV+68adTUuMWLefPf9Ww3qMT8382k86gJgQjZuJDGUl3Xi5
-GXmO0GfrMh+v91yiaiqjsJCDp3uVcUSeH7qSkb0CAwEAAaOBqzCBqDAOBgNVHQ8B
-Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
-/wQCMAAwHQYDVR0OBBYEFEwLLCuIHilzynJ7DlTrikyhy2TAMB8GA1UdIwQYMBaA
-FEN482NrkIrjNR87nHW/Ma92ZQULMCkGA1UdEQQiMCCCDW0xLmV0Y2QubG9jYWyC
-CWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAkERnrIIvkZHWsyih
-mFNf/JmFHC+0/UAG9Ti9msRlr9j1fh+vBIid3FAIShX0zFXf+AtN/+Bz5SVvQHUT
-tm71AK/vER1Ue059SIty+Uz5mNAjwtXy0WaUgSuF4uju7MkYD5yUnSGv1iBfm88a
-q+q1Vd5m6PkOCfuyNQQm5RKUiJiO4OS+2F9/JOpyr0qqdQthOWr266CqXuvVhd+Z
-oZZn5TLq5GHCaTxfngSqS3TXl55QEGl65SUgYdGqpIfaQt3QKq2dqVg/syLPkTJt
-GNJVLxJuUIu0PLrfuWynUm+1mOOfwXd8NZVZITUxC7Tl5ecFbTaOzU/4a7Cyssny
-Wr3dUg==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-common-name-multi/server-1.key.insecure b/tests/docker-dns/certs-common-name-multi/server-1.key.insecure
deleted file mode 100644
index 61f2da4dfa4..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-1.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA5vkJ2i8v1AJ2CzGcIGKRNsmSjqvmasX54DjuAMtxVtatFbl3
-x5lhXUXKfmp7zQ7osHoso+tF+3HIAYAEqogEIdHnsT3tSwgILiUbJwFZpq/pwn7t
-KmSl/xxJIOOOBvgoLpVU9/0wKwfEFhJN+Z7wkQUT7oqOUbwyq3bK//lojJFghM1b
-Ne9XvDFL1o6AN1ctuiNb0UlAK7O2049+8g1PKG29S8IzxLrUtrnKYJ0B0+nHVT2b
-koLbeTpluQALz+Nw5X7rxp1NS4xYt589/1bDeoxPzfzaTzqAmBCNm4kMZSXdeLkZ
-eY7QZ+syH6/3XKJqKqOwkIOne5VxRJ4fupKRvQIDAQABAoIBAQCYQsXm6kJqTbEJ
-kgutIa0+48TUfqen7Zja4kyrg3HU4DI75wb6MreHqFFj4sh4FoL4i6HP8XIx3wEN
-VBo/XOj0bo6BPiSm2MWjvdxXa0Fxa/f6uneYAb+YHEps/vWKzJ6YjuLzlBnj0/vE
-3Q5AJzHJOAK6tuY5JYp1lBsggYcVWiQSW6wGQRReU/B/GdFgglL1chqL33Dt11Uv
-Y6+oJz/PyqzPLPHcPbhqyQRMOZXnhx+8/+ooq5IojqOHfpa9JQURcHY7isBnpI/G
-ZAa8tZctgTqtL4hB1rxDhdq1fS2YC12lxkBZse4jszcm0tYzy2gWmNTH480uo/0J
-GOxX7eP1AoGBAO7O+aLhQWrspWQ//8YFbPWNhyscQub+t6WYjc0wn9j0dz8vkhMw
-rh5O8uMcZBMDQdq185BcB3aHInw9COWZEcWNIen4ZyNJa5VCN4FY0a2GtFSSGG3f
-ilKmQ7cjB950q2jl1AR3t2H7yah+i1ZChzPx+GEe+51LcJZX8mMjGvwjAoGBAPeZ
-qJ2W4O2dOyupAfnKpZZclrEBqlyg7Xj85u20eBMUqtaIEcI/u2kaotQPeuaekUH0
-b1ybr3sJBTp3qzHUaNV3iMfgrnbWEOkIV2TCReWQb1Fk93o3gilMIkhGLIhxwWpM
-UpQy3JTjGG/Y6gIOs7YnOBGVMA0o+RvouwooU6ifAoGAH6D6H0CGUYsWPLjdP3To
-gX1FMciEc+O4nw4dede+1BVM1emPB0ujRBBgywOvnXUI+9atc6k8s84iGyJaU056
-tBeFLl/gCSRoQ1SJ1W/WFY2JxMm0wpig0WGEBnV1TVlWeoY2FoFkoG2gv9hCzCHz
-lkWuB+76lFKxjrgHOmoj4NECgYB+COmbzkGQsoh8IPuwe0bu0xKh54cgv4oiHBow
-xbyZedu8eGcRyf9L8RMRfw/AdNbcC+Dj8xvQNTdEG8Y5BzaV8tLda7FjLHRPKr/R
-ulJ6GJuRgyO2Qqsu+mI5B/+DNOSPh2pBpeJCp5a42GHFylYQUsZnrNlY2ZJ0cnND
-KGPtYQKBgQDL30+BB95FtRUvFoJIWwASCp7TIqW7N7RGWgqmsXU0EZ0Mya4dquqG
-rJ1QuXQIJ+xV060ehwJR+iDUAY2xUg3/LCoDD0rwBzSdh+NEKjOmRNFRtn7WT03Q
-264E80r6VTRSN4sWQwAAbd1VF1uGO5tkzZdJGWGhQhvTUZ498dE+9Q==
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-common-name-multi/server-2.crt b/tests/docker-dns/certs-common-name-multi/server-2.crt
deleted file mode 100644
index e319fade463..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-2.crt
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEIDCCAwigAwIBAgIUHXDUS+Vry/Tquc6S6OoaeuGozrEwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw
-MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAOO+FsO+6pwpv+5K+VQTYQb0lT0BjnM7Y2qSZIiTGCDp/M0P
-yHSed4oTzxBeA9hEytczH/oddAUuSZNgag5sGFVgjFNdiZli4wQqJaMQRodivuUl
-ZscqnWwtP3GYVAfg+t/4YdGB+dQRDQvHBl9BRYmUh2ixOA98OXKfNMr+u+3sh5Gy
-dwx5ZEBRvgBcRrgCaIMsvVeIzHQBMHrNySAD1bGgm3xGdLeVPhAp24yUKZ5IbN6/
-+5hyCRARtGwLH/1Q/h10Sr5jxQi00eEXH+CNOvcerH6b2II/BxHIcqKd0u36pUfG
-0KsY+ia0fvYi510V6Q0FAn45luEjHEk5ITN/LnMCAwEAAaOBqzCBqDAOBgNVHQ8B
-Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
-/wQCMAAwHQYDVR0OBBYEFE69SZun6mXZe6cd3Cb2HWrK281MMB8GA1UdIwQYMBaA
-FEN482NrkIrjNR87nHW/Ma92ZQULMCkGA1UdEQQiMCCCDW0yLmV0Y2QubG9jYWyC
-CWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAI5nHHULV7eUJMsvv
-zk1shv826kOwXbMX10iRaf49/r7TWBq0pbPapvf5VXRsZ5wlDrDzjaNstpsaow/j
-fhZ1zpU0h1bdifxE+omFSWZjpVM8kQD/yzT34VdyA+P2HuxG8ZTa8r7wTGrooD60
-TjBBM5gFV4nGVe+KbApQ26KWr+P8biKaWe6MM/jAv6TNeXiWReHqyM5v404PZQXK
-cIN+fBb8bQfuaKaN1dkOUI3uSHmVmeYc5OGNJ2QKL9Uzm1VGbbM+1BOLhmF53QSm
-5m2B64lPKy+vpTcRLN7oW1FHZOKts+1OEaLMCyjWFKFbdcrmJI+AP2IB+V6ODECn
-RwJDtA==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-common-name-multi/server-2.key.insecure b/tests/docker-dns/certs-common-name-multi/server-2.key.insecure
deleted file mode 100644
index 57c3e78cb32..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-2.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEA474Ww77qnCm/7kr5VBNhBvSVPQGOcztjapJkiJMYIOn8zQ/I
-dJ53ihPPEF4D2ETK1zMf+h10BS5Jk2BqDmwYVWCMU12JmWLjBColoxBGh2K+5SVm
-xyqdbC0/cZhUB+D63/hh0YH51BENC8cGX0FFiZSHaLE4D3w5cp80yv677eyHkbJ3
-DHlkQFG+AFxGuAJogyy9V4jMdAEwes3JIAPVsaCbfEZ0t5U+ECnbjJQpnkhs3r/7
-mHIJEBG0bAsf/VD+HXRKvmPFCLTR4Rcf4I069x6sfpvYgj8HEchyop3S7fqlR8bQ
-qxj6JrR+9iLnXRXpDQUCfjmW4SMcSTkhM38ucwIDAQABAoIBAQCHYF6N2zYAwDyL
-/Ns65A4gIVF5Iyy3SM0u83h5St7j6dNRXhltYSlz1ZSXiRtF+paM16IhflKSJdKs
-nXpNumm4jpy7jXWWzRZfSmJ3DNyv673H3rS6nZVYUYlOEBubV1wpuK8E5/tG2R/l
-KVibVORuBPF9BSNq6RAJF6Q9KrExmvH4MmG/3Y+iYbZgn0OK1WHxzbeMzdI8OO4z
-eg4gTKuMoRFt5B4rZmC5QiXGHdnUXRWfy+yPLTH3hfTek4JT98akFNS01Q4UAi9p
-5cC3TOqDNiZdAkN83UKhW9TNAc/vJlq6d5oXW5R+yPt+d8yMvEch4KfpYo33j0oz
-qB40pdJRAoGBAP8ZXnWXxhzLhZ4o+aKefnsUUJjaiVhhSRH/kGAAg65lc4IEnt+N
-nzyNIwz/2vPv2Gq2BpStrTsTNKVSZCKgZhoBTavP60FaszDSM0bKHTWHW7zaQwc0
-bQG6YvvCiP0iwEzXw7S4BhdAl+x/5C30dUZgKMSDFzuBI187h6dQQNZpAoGBAOSL
-/MBuRYBgrHIL9V1v9JGDBeawGc3j2D5c56TeDtGGv8WGeCuE/y9tn+LcKQ+bCGyi
-qkW+hobro/iaXODwUZqSKaAVbxC7uBLBTRB716weMzrnD8zSTOiMWg/gh+FOnr/4
-ZfcBco2Pmm5qQ3ZKwVk2jsfLhz6ZKwMrjSaO1Zp7AoGBAJZsajPjRHI0XN0vgkyv
-Mxv2lbQcoYKZE1JmpcbGZt/OePdBLEHcq/ozq2h98qmHU9FQ9r5zT0QXhiK6W8vD
-U5GgFSHsH+hQyHtQZ+YlRmYLJEBPX9j+xAyR0M5uHwNNm6F0VbXaEdViRHOz0mR6
-0zClgUSnnGp9MtN0MgCqJSGJAoGAJYba3Jn+rYKyLhPKmSoN5Wq3KFbYFdeIpUzJ
-+GdB1aOjj4Jx7utqn1YHv89YqqhRLM1U2hjbrAG7LdHi2Eh9jbzcOt3qG7xHEEVP
-Kxq6ohdfYBean44UdMa+7wZ2KUeoh2r5CyLgtV/UArdOFnlV4Bk2PpYrwdqSlnWr
-Op6PcksCgYEA6HmIHLRTGyOUzS82BEcs5an2mzhQ8XCNdYS6sDaYSiDu2qlPukyZ
-jons6P4qpOxlP9Cr6DW7px2fUZrEuPUV8fRJOc+a5AtZ5TmV6N1uH/G1rKmmAMCc
-jGAmTJW87QguauTpuUto5u6IhyO2CRsYEy8K1A/1HUQKl721faZBIMA=
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-common-name-multi/server-3.crt b/tests/docker-dns/certs-common-name-multi/server-3.crt
deleted file mode 100644
index 294de533239..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-3.crt
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEIDCCAwigAwIBAgIURfpNMXGb1/oZVwEWyc0Ofn7IItQwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw
-MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBALgCDkDM4qayF6CFt1ZScKR8B+/7qrn1iQ/qYnzRHQ1hlkuS
-b3TkQtt7amGAuoD42d8jLYYvHn2Pbmdhn0mtgYZpFfLFCg4O67ZbX54lBHi+yDEh
-QhneM9Ovsc42A0EVvabINYtKR6B2YRN00QRXS5R1t+QmclpshFgY0+ITsxlJeygs
-wojXthPEfjTQK04JUi5LTHP15rLVzDEd7MguCWdEWRnOu/mSfPHlyz2noUcKuy0M
-awsnSMwf+KBwQMLbJhTXtA4MG2FYsm/2en3/oAc8/0Z8sMOX05F+b0MgHl+a31aQ
-UHM5ykfDNm3hGQfzjQCx4y4hjDoFxbuXvsey6GMCAwEAAaOBqzCBqDAOBgNVHQ8B
-Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
-/wQCMAAwHQYDVR0OBBYEFDMydqyg/s43/dJTMt25zJubI/CUMB8GA1UdIwQYMBaA
-FEN482NrkIrjNR87nHW/Ma92ZQULMCkGA1UdEQQiMCCCDW0zLmV0Y2QubG9jYWyC
-CWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAVs3VQjgx9CycaWKS
-P6EvMtlqOkanJEe3zr69sI66cc2ZhfJ5xK38ox4oYpMOA131WRvwq0hjKhhZoVQ8
-aQ4yALi1XBltuIyEyrTX9GWAMeDzY95MdWKhyI8ps6/OOoXN596g9ZdOdIbZAMT4
-XAXm43WccM2W2jiKCEKcE4afIF8RiMIaFwG8YU8oHtnnNvxTVa0wrpcObtEtIzC5
-RJxzX9bkHCTHTgJog4OPChU4zffn18U/AVJ7MZ8gweVwhc4gGe0kwOJE+mLHcC5G
-uoFSuVmAhYrH/OPpZhSDOaCED4dsF5jN25CbR3NufEBFRXBH20ZHNkNvbbBnYCBU
-4+Rx5w==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-common-name-multi/server-3.key.insecure b/tests/docker-dns/certs-common-name-multi/server-3.key.insecure
deleted file mode 100644
index f931adb3881..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-3.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEAuAIOQMziprIXoIW3VlJwpHwH7/uqufWJD+pifNEdDWGWS5Jv
-dORC23tqYYC6gPjZ3yMthi8efY9uZ2GfSa2BhmkV8sUKDg7rtltfniUEeL7IMSFC
-Gd4z06+xzjYDQRW9psg1i0pHoHZhE3TRBFdLlHW35CZyWmyEWBjT4hOzGUl7KCzC
-iNe2E8R+NNArTglSLktMc/XmstXMMR3syC4JZ0RZGc67+ZJ88eXLPaehRwq7LQxr
-CydIzB/4oHBAwtsmFNe0DgwbYViyb/Z6ff+gBzz/Rnyww5fTkX5vQyAeX5rfVpBQ
-cznKR8M2beEZB/ONALHjLiGMOgXFu5e+x7LoYwIDAQABAoIBAQCY54RmjprNAHKn
-vlXCEpFt7W8/GXcePg2ePxuGMtKcevpEZDPgA4oXDnAxA6J3Z9LMHFRJC8Cff9+z
-YqjVtatLQOmvKdMYKYfvqfBD3ujfWVHLmaJvEnkor/flrnZ30BQfkoED9T6d9aDn
-ZQwHOm8gt82OdfBSeZhkCIWReOM73622qJhmLWUUY3xEucRAFF6XffOLvJAT87Vu
-pXKtCnQxhzxkUsCYNIOeH/pTX+XoLkysFBKxnrlbTeM0cEgWpYMICt/vsUrp6DHs
-jygxR1EnT2/4ufe81aFSO4SzUZKJrz8zj4yIyDOR0Mp6FW+xMp8S0fDOywHhLlXn
-xQOevmGBAoGBAOMQaWWs2FcxWvLfX95RyWPtkQ+XvmWlL5FR427TlLhtU6EPs0xZ
-eeanMtQqSRHlDkatwc0XQk+s30/UJ+5i1iz3shLwtnZort/pbnyWrxkE9pcR0fgr
-IklujJ8e8kQHpY75gOLmEiADrUITqvfbvSMsaG3h1VydPNU3JYTUuYmjAoGBAM91
-Atnri0PH3UKonAcMPSdwQ5NexqAD1JUk6KUoX2poXBXO3zXBFLgbMeJaWthbe+dG
-Raw/zjBET/oRfDOssh+QTD8TutI9LA2+EN7TG7Kr6NFciz4Q2pioaimv9KUhJx+8
-HH2wCANYgkv69IWUFskF0uDCW9FQVvpepcctCJJBAoGAMlWxB5kJXErUnoJl/iKj
-QkOnpI0+58l2ggBlKmw8y6VwpIOWe5ZaL4dg/Sdii1T7lS9vhsdhK8hmuIuPToka
-cV13XDuANz99hKV6mKPOrP0srNCGez0UnLKk+aEik3IegVNN/v6BhhdKkRtLCybr
-BqERhUpKwf0ZPyq6ZnfBqYECgYEAsiD2YcctvPVPtnyv/B02JTbvzwoB4kNntOgM
-GkOgKe2Ro+gNIEq5T5uKKaELf9qNePeNu2jN0gPV6BI7YuNVzmRIE6ENOJfty573
-PVxm2/Nf5ORhatlt2MZC4aiDl4Xv4f/TNth/COBmgHbqngeZyOGHQBWiYQdqp2+9
-SFgSlAECgYEA1zLhxj6f+psM5Gpx56JJIEraHfyuyR1Oxii5mo7I3PLsbF/s6YDR
-q9E64GoR5PdgCQlMm09f6wfT61NVwsYrbLlLET6tAiG0eNxXe71k1hUb6aa4DpNQ
-IcS3E3hb5KREXUH5d+PKeD2qrf52mtakjn9b2aH2rQw2e2YNkIDV+XA=
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-common-name-multi/server-ca-csr-1.json b/tests/docker-dns/certs-common-name-multi/server-ca-csr-1.json
deleted file mode 100644
index ae9fe36e980..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-ca-csr-1.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "etcd.local",
- "hosts": [
- "m1.etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns/certs-common-name-multi/server-ca-csr-2.json b/tests/docker-dns/certs-common-name-multi/server-ca-csr-2.json
deleted file mode 100644
index 5d938fb8a45..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-ca-csr-2.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "etcd.local",
- "hosts": [
- "m2.etcd.local",
- "127.0.0.1",
- "localhost"
- ]
- }
diff --git a/tests/docker-dns/certs-common-name-multi/server-ca-csr-3.json b/tests/docker-dns/certs-common-name-multi/server-ca-csr-3.json
deleted file mode 100644
index 7b8ffcfae9f..00000000000
--- a/tests/docker-dns/certs-common-name-multi/server-ca-csr-3.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "etcd.local",
- "hosts": [
- "m3.etcd.local",
- "127.0.0.1",
- "localhost"
- ]
- }
diff --git a/tests/docker-dns/certs-gateway/Procfile b/tests/docker-dns/certs-gateway/Procfile
deleted file mode 100644
index 47b2aeba263..00000000000
--- a/tests/docker-dns/certs-gateway/Procfile
+++ /dev/null
@@ -1,8 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-gateway: ./etcd gateway start --endpoints https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 --trusted-ca-file /certs-gateway/ca.crt --listen-addr 127.0.0.1:23790
diff --git a/tests/docker-dns/certs-gateway/ca-csr.json b/tests/docker-dns/certs-gateway/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns/certs-gateway/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns/certs-gateway/ca.crt b/tests/docker-dns/certs-gateway/ca.crt
deleted file mode 100644
index 7e3814e92d6..00000000000
--- a/tests/docker-dns/certs-gateway/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIUClliB9ECLPuQpOrlqLkeI1ib7zYwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTE3MDBaFw0yNzExMjkxOTE3
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQCjClF0TCk2qrHUTjFgFv2jmV0yUqnP3SG/7eVCptcFKE7kcGAx+j06GfEP
-UXmCV13cgE0dYYLtz7/g29BiZzlBLlLsmpBMM+S4nfVH9BGLbKCSnwp5ba816AuS
-rc8+qmJ0fAo56snLQWoAlnZxZ1tVjAtj5ZrQP9QDK2djgyviPS4kqWQ7Ulbeqgs7
-rGz56xAsyMTWYlotgZTnnZ3Pckr1FHXhwkO1rFK5+oMZPh2HhvXL9wv0/TMAypUv
-oQqDzUfUvYeaKr6qy1ADc53SQjqeTXg0jOShmnWM2zC7MwX+VPh+6ZApk3NLXwgv
-6wT0U1tNfvctp8JvC7FqqCEny9hdAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQWI6eUGqKWkCjOKGAYd+5K6eh5
-GTANBgkqhkiG9w0BAQsFAAOCAQEAS3nIyLoGMsioLb89T1KMq+0NDDCx7R20EguT
-qUvFUYKjzdxDA1RlZ2HzPxBJRwBc0Vf98pNtkWCkwUl5hxthndNQo7F9lLs/zNzp
-bL4agho6kadIbcb4v/3g9XPSzqJ/ysfrwxZoBd7D+0PVGJjRTIJiN83Kt68IMx2b
-8mFEBiMZiSJW+sRuKXMSJsubJE3QRn862y2ktq/lEJyYR6zC0MOeYR6BPIs/B6vU
-8/iUbyk5ULc7NzWGytC+QKC3O9RTuA8MGF1aFaNSK7wDyrAlBZdxjWi52Mz3lJCK
-ffBaVfvG55WKjwAqgNU17jK/Rxw1ev9mp4aCkXkD0KUTGLcoZw==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-gateway/gencert.json b/tests/docker-dns/certs-gateway/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns/certs-gateway/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns/certs-gateway/gencerts.sh b/tests/docker-dns/certs-gateway/gencerts.sh
deleted file mode 100755
index af8663e09eb..00000000000
--- a/tests/docker-dns/certs-gateway/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: *.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns/certs-gateway/run.sh b/tests/docker-dns/certs-gateway/run.sh
deleted file mode 100755
index 94fdc32eefd..00000000000
--- a/tests/docker-dns/certs-gateway/run.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs-gateway/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get abc
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --endpoints=127.0.0.1:23790 \
- put ghi jkl
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-gateway/ca.crt \
- --cert=/certs-gateway/server.crt \
- --key=/certs-gateway/server.key.insecure \
- --endpoints=127.0.0.1:23790 \
- get ghi
diff --git a/tests/docker-dns/certs-gateway/server-ca-csr.json b/tests/docker-dns/certs-gateway/server-ca-csr.json
deleted file mode 100644
index 77cdb408cf0..00000000000
--- a/tests/docker-dns/certs-gateway/server-ca-csr.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "hosts": [
- "m1.etcd.local",
- "m2.etcd.local",
- "m3.etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns/certs-gateway/server.crt b/tests/docker-dns/certs-gateway/server.crt
deleted file mode 100644
index 688a5afe641..00000000000
--- a/tests/docker-dns/certs-gateway/server.crt
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEKTCCAxGgAwIBAgIUDOkW+H3KLeHEwsovqOUMKKfEuqQwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTE3MDBaFw0yNzExMjkxOTE3
-MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANfu298kCxFY
-KXAmdG5BeqnFoezAJQCtgv+ZRS0+OB4hVsahnNSsztEfIJnVSvYJTr1u+TGSbzBZ
-q85ua3S92Mzo/71yoDlFjj1JfBmPdL1Ij1256LAwUYoPXgcACyiKpI1DnTlhwTvU
-G41teQBo+u4sxr9beuNpLlehVbknH9JkTNaTbF9/B5hy5hQPomGvzPzzBNAfrb2B
-EyqabnzoX4qv6cMsQSJrcOYQ8znnTPWa5WFP8rWujsvxOUjxikQn8d7lkzy+PHwq
-zx69L9VzdoWyJgQ3m73SIMTgP+HL+OsxDfmbu++Ds+2i2Dgf/vdJku/rP+Wka7vn
-yCM807xi96kCAwEAAaOByTCBxjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI
-KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFAH+dsuv
-L6qvUmB/w9eKl83+MGTtMB8GA1UdIwQYMBaAFBYjp5QaopaQKM4oYBh37krp6HkZ
-MEcGA1UdEQRAMD6CDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0
-Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAh049
-srxFkiH9Lp8le3fJkuY25T/MUrmfa10RdNSKgj3qcpCMnf9nQjIWtaQsjoZJ5MQc
-VIT3gWDWK8SWlpx+O2cVEQDG0ccv7gc38YGywVhMoQ5HthTAjLCbNk4TdKJOIk7D
-hmfs7BHDvjRPi38CFklLzdUQaVCcvB43TNA3Y9M75oP/UGOSe3lJz1KKXOI/t+vA
-5U3yxwXlVNJVsZgeWAbXN9F6WbCZDsz+4Obpk/LV1NLqgLd/hHXzoOOWNw977S2b
-+dOd95OJ/cq09OzKn/g26NgtHOl0xqol7wIwqJhweEEiVueyFxXD04jcsxdAFZSJ
-9H6q3inNQaLyJHSYWQ==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-gateway/server.key.insecure b/tests/docker-dns/certs-gateway/server.key.insecure
deleted file mode 100644
index 6c0c16c0ba7..00000000000
--- a/tests/docker-dns/certs-gateway/server.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA1+7b3yQLEVgpcCZ0bkF6qcWh7MAlAK2C/5lFLT44HiFWxqGc
-1KzO0R8gmdVK9glOvW75MZJvMFmrzm5rdL3YzOj/vXKgOUWOPUl8GY90vUiPXbno
-sDBRig9eBwALKIqkjUOdOWHBO9QbjW15AGj67izGv1t642kuV6FVuScf0mRM1pNs
-X38HmHLmFA+iYa/M/PME0B+tvYETKppufOhfiq/pwyxBImtw5hDzOedM9ZrlYU/y
-ta6Oy/E5SPGKRCfx3uWTPL48fCrPHr0v1XN2hbImBDebvdIgxOA/4cv46zEN+Zu7
-74Oz7aLYOB/+90mS7+s/5aRru+fIIzzTvGL3qQIDAQABAoIBABO8azA79R8Ctdbg
-TOf+6B04SRKAhWFIep6t/ZqjAzINzgadot31ZXnLpIkq640NULsTt4cGYU9EAuX9
-RakH6RbhfO5t2aMiblu/qa4UZJEgXqosYc4ovGsn+GofYOW1tlCLC4XBH44+Vr5Y
-cSTOc5DtWsUGsXazmF6+Cj3AC7KI+VWegHexGezyO0not8Q5L55TuH2lCW4sx9th
-W4Q7jg2lrCvz4x8ZRIAXOGmBaDTZmMtVlEjezu+7xr8QDQsvUwj7a87HPjgXFesj
-CbbCr8kaqEdZ23AVDZuLAKS4hWQlbacRhRAxMkomZkg5U6J/PC3ikIqfOda1zu1D
-MTIOuwECgYEA8hFkISWVEzbaIZgO1BZl36wNaOLYIpX0CzlycptcEssbefLy7Nxo
-TZ+m9AjF6TBPl4fO4edo00iiJMy6ZdhItduNWLO+usJEY9UdzHex7fCUeG8usUXQ
-g4VGEvPGg88VEM45pkAgbga7kzkG2Ihfu6La5apbXeOpNpuC58DdlzkCgYEA5Fxl
-/qGzLlTwioaaE+qpEX46MfbJl38nkeSf9B7J1ISc/fnDPcBPvcHaYELqyHM+7OFa
-Gt9oBDrLgyP4ZgOTaHKHdofXjAMC97b9oa/Lrors5dMrf/fxTTe2X+Kab94E1Wbo
-39kA3qzV/CT7EZWuqbHO3Bqkv/qe6ks0Tbahc/ECgYBuB2OpAWkyc6NQ08ohsxCZ
-S55Ix5uQlPJ5y6Hu4BlI3ZNeqgSrjz/F0MTVdctnxDLZYLyzyDjImOJCseAj/NyH
-9QTZhdIzF6x4aF2EG///dHQ4Del+YIp3zbNdV/sq3Izpt6NSoyFagarvL2OiNtK0
-+kBfVkDze1Dl5mfpKaxPWQKBgQC+gXqxJxKE92VIGyxUqzHqHwTLg9b/ZJuNMU5j
-aH/1o8AYfJFtZY7gfeUA4zJckRAQq5rwyilLRgVbXNmvuRHzU4BA2OhvrF+Aag9D
-IJXqAYnJ3RXwBtcuFOk3KqKt6mjb4qMpgy4flc5aMDunmtiARo6MvklswtZqHN0A
-a/ha8QKBgQCqF/xCf5ORzVkikYYGsO910QXlzsyPdRJbhrBCRTsdhz/paT5GQQXr
-y3ToUuKEoHfjFudUeGNOstjchWw+WgT9iqMJhtwV1nU1lkPyjmCQ2ONIP+13dZ+i
-I/LDyMngtOKzvD5qpswY1Er+84+RVrtseQjXDC2NlrvDr5LnZDtGag==
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-san-dns/Procfile b/tests/docker-dns/certs-san-dns/Procfile
deleted file mode 100644
index 32298f8cbb9..00000000000
--- a/tests/docker-dns/certs-san-dns/Procfile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-san-dns/server-1.crt --peer-key-file=/certs-san-dns/server-1.key.insecure --peer-trusted-ca-file=/certs-san-dns/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-san-dns/server-1.crt --key-file=/certs-san-dns/server-1.key.insecure --trusted-ca-file=/certs-san-dns/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-san-dns/server-2.crt --peer-key-file=/certs-san-dns/server-2.key.insecure --peer-trusted-ca-file=/certs-san-dns/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-san-dns/server-2.crt --key-file=/certs-san-dns/server-2.key.insecure --trusted-ca-file=/certs-san-dns/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-san-dns/server-3.crt --peer-key-file=/certs-san-dns/server-3.key.insecure --peer-trusted-ca-file=/certs-san-dns/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-san-dns/server-3.crt --key-file=/certs-san-dns/server-3.key.insecure --trusted-ca-file=/certs-san-dns/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
\ No newline at end of file
diff --git a/tests/docker-dns/certs-san-dns/ca-csr.json b/tests/docker-dns/certs-san-dns/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns/certs-san-dns/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns/certs-san-dns/ca.crt b/tests/docker-dns/certs-san-dns/ca.crt
deleted file mode 100644
index 2eaf8172cf4..00000000000
--- a/tests/docker-dns/certs-san-dns/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDrjCCApagAwIBAgIUV77P/m6U+QIMz7Ql0Q6xC3GO/fAwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQDEN9lZnkS16bi42zl+iGlYSHGJn0uxiqhff1KRJlwbEBXr3ywllJLgnAA3
-XEQsBMYk0yEB82380flVJd6UMt+0n6bo5Mp2Z+X8eXZgVgB4uLz0APRhozO89I2D
-wk74aTrV3wseCmN9ZOvG+2b1AzM6rwwnozhnoC2qlZ5yNZRSKMTRX+ZcDQ6FQopk
-Kg+ACGyiU94bLJkd4Vj7oSOiParjtj1laGE88QAL8clkcT6enHlwVJDs7BF3SRBI
-sBKlUnyC47mjR4v9KKkeZ7LHBcW9D7FZZYNg85mubVHfj8rZb1EAF+Kqskd6YpYz
-ZezQVdJOyUrp8/+mSBaS2HpF4HjpAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
-BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTr390x+ChxCV+AkCnxh+5vgtoiyTAN
-BgkqhkiG9w0BAQsFAAOCAQEAq+o4uF9xkJ/SzGgBePb3r/F0aNcBIY3XmCsGE4gd
-0U/tqkGP10BKlermi87ADLxjBux+2n6eAHycac9mDynOr1d5GUVHK8BrAzKeabuP
-Q8J2NQyVXpRF9z2EolLpw7J1n5CYJqsVMBjov33AKk9SmCFg3O4wD6oladWXT/Ie
-ld2+EUS6TLzPNsU+AoPx64L0Aru05ynpPnlUB+DSXCBUckffmGgv0HEd5bU3QOl4
-9SUx35lk8nh7x+sHQblijuNNLi7bTIhzQTolJTCo3rd8YgSdnof0z5bROVTwymD5
-tWshIE4BP+ri+1NPKCe2KlcP3MIynKtx+obr5cLZjDHWoA==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-san-dns/gencert.json b/tests/docker-dns/certs-san-dns/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns/certs-san-dns/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns/certs-san-dns/gencerts.sh b/tests/docker-dns/certs-san-dns/gencerts.sh
deleted file mode 100755
index b2318fd0865..00000000000
--- a/tests/docker-dns/certs-san-dns/gencerts.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: m1/m2/m3.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr-1.json | cfssljson --bare ./server-1
-mv server-1.pem server-1.crt
-mv server-1-key.pem server-1.key.insecure
-
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr-2.json | cfssljson --bare ./server-2
-mv server-2.pem server-2.crt
-mv server-2-key.pem server-2.key.insecure
-
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr-3.json | cfssljson --bare ./server-3
-mv server-3.pem server-3.crt
-mv server-3-key.pem server-3.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns/certs-san-dns/run.sh b/tests/docker-dns/certs-san-dns/run.sh
deleted file mode 100755
index 5d0a3d47d4f..00000000000
--- a/tests/docker-dns/certs-san-dns/run.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-echo "127.0.0.1 m1.etcd.local" >> /etc/hosts
-echo "127.0.0.1 m2.etcd.local" >> /etc/hosts
-echo "127.0.0.1 m3.etcd.local" >> /etc/hosts
-
-goreman -f /certs-san-dns/Procfile start &
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-san-dns/ca.crt \
- --cert=/certs-san-dns/server-1.crt \
- --key=/certs-san-dns/server-1.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- endpoint health --cluster
-
-printf "\nPut abc \n"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-san-dns/ca.crt \
- --cert=/certs-san-dns/server-2.crt \
- --key=/certs-san-dns/server-2.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put abc def
-
-printf "\nGet abc \n"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-san-dns/ca.crt \
- --cert=/certs-san-dns/server-3.crt \
- --key=/certs-san-dns/server-3.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get abc
-
-printf "\nKill etcd server 1...\n"
-kill $(lsof -t -i:2379)
-sleep 7s
-
-printf "\nGet abc after killing server 1\n"
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-san-dns/ca.crt \
- --cert=/certs-san-dns/server-2.crt \
- --key=/certs-san-dns/server-2.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get abc
-printf "\n\nDone!!!\n\n"
-
diff --git a/tests/docker-dns/certs-san-dns/server-1.crt b/tests/docker-dns/certs-san-dns/server-1.crt
deleted file mode 100644
index c99fef83403..00000000000
--- a/tests/docker-dns/certs-san-dns/server-1.crt
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEDzCCAvegAwIBAgIUYSODFGYUNAEskvyamAAxpZ8/86swDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy
-MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAMNEq66ZcntXibYne3W9L53JyMmGrJJi2FbVAEv76OraVnO5
-7qJNXjXZ3bOhQ3WDawbWBA5lNi1mwZcKVxM41PQXpez/6/ZkZliwNQFsDZ3WgPIx
-mfcWWnoVPEKFrJTnKZm5/o+50w07yMGZLCgIS66oIcOGJ3G35/NKm+T94yKnRV2m
-M1YvkmgU69MwQwbvGh1fypKB734wVp9Yz46FTuAoY8I63feYrSHKHXZf70rm3Kqm
-iTU3jixWq86aI1dIRbAqObc5pgSoBwAczLjWvhhcO7n9KRkyzxjg+ZFPwRHiBWi1
-ZU70D4XHZMdcAgu+2/IBXfGBZbKOyq9WN65N9tUCAwEAAaOBmjCBlzAOBgNVHQ8B
-Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
-/wQCMAAwHQYDVR0OBBYEFJzBC8YE22RmOwKyxnH0SPC08zE4MB8GA1UdIwQYMBaA
-FOvf3TH4KHEJX4CQKfGH7m+C2iLJMBgGA1UdEQQRMA+CDW0xLmV0Y2QubG9jYWww
-DQYJKoZIhvcNAQELBQADggEBAKvIARZDTNcGAcu5SkrjB/mWlq7GaLqgnGARvMQ0
-O5IC6hPsOcIsTnGKzert2xkc6y7msYMOl4ddP5PgSIfpCtkmL6bACoros4ViWwl5
-Lg0YF3PQvwSL+h2StTE2pGrNp/eQL8HJD2Lhyac2vTAq01Vbh3ySrfQP9zjoH8U7
-+mJJk9VWAagU+ww17kq5VZL9iJnlFSxVLNo6dcNo/dU6eWqKWoZjAHl+/zhoSOuZ
-tBRshTcFuLbBe59ULFoZ+Mt5Sa4+OuN5Jir4hQH6DS1ETd7hwsSvHf6KcIw9fIXz
-h+PZ0ssNDq4Yr7i3dQS5xAQO1aO35Ru9q2ABt20E1dQGIyY=
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-san-dns/server-1.key.insecure b/tests/docker-dns/certs-san-dns/server-1.key.insecure
deleted file mode 100644
index 575ee5e82f7..00000000000
--- a/tests/docker-dns/certs-san-dns/server-1.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEAw0Srrplye1eJtid7db0vncnIyYaskmLYVtUAS/vo6tpWc7nu
-ok1eNdnds6FDdYNrBtYEDmU2LWbBlwpXEzjU9Bel7P/r9mRmWLA1AWwNndaA8jGZ
-9xZaehU8QoWslOcpmbn+j7nTDTvIwZksKAhLrqghw4Yncbfn80qb5P3jIqdFXaYz
-Vi+SaBTr0zBDBu8aHV/KkoHvfjBWn1jPjoVO4Chjwjrd95itIcoddl/vSubcqqaJ
-NTeOLFarzpojV0hFsCo5tzmmBKgHABzMuNa+GFw7uf0pGTLPGOD5kU/BEeIFaLVl
-TvQPhcdkx1wCC77b8gFd8YFlso7Kr1Y3rk321QIDAQABAoIBAQCl3c4LqNKDDQ+w
-SAdqMsKgwIerD5fFXOsxjwsKgDgQTljDQrv+58NP8PmOnTxFNNWT3/VgGP8VP8TP
-vPvMGylhEjligN151TzOtxa/V36VhWDQ2etT5IwEScd/Jjc74MQIjeI7SfiJtC/K
-q4bDlpBbEvxjLrCQu0vu8IBN2o+2nWx8l7Jy0VrDuw5LQM90ZA7OcU7H2kE1ehbp
-M5waHE0tdgHzlLqrVl0RlXh/FlIG7/cfQRL1rpD5T8llD7XshF2BhtXerk+QtC9b
-It8xGnhd6e9Yk96KIN/3U/W5DORYwtq1r54r1OxZkUX3C0RqU2P3EcNvBHbbZydm
-6xq6EfDBAoGBAM3LIHo4v96YPNHtj+GI9ZRr+o9UMrl3dcRTMkbEjFIqBdON8GrS
-fdLSvZms+wqU8v7gNEZhhk7U9Y71pHUZsN5WAGHMCC6Q6/5lY2ObEEitrV7btrUe
-75JNlSq52JT7L9NZRhD5ACqw9qrdUq0mNyPtrSV/J2DfubuBWcSLf58lAoGBAPLo
-MGLyzuG5WTwPAkcB/T3Z5kNFlr8po9tuso5WDuXws7nIPR8yb6UIvP7JqWOgaHyh
-YBA4aKC1T8gpAwVxZxJ9bbntxt13sxyuMZgA/CGn6FXCPbhAztnQDle81QcsMGXK
-y2YbeMUVuMrowcjK6g8J9E9AkB4SDvme+xhEQgHxAoGBAIxtzRa5/Ov3dKFH+8PK
-QtJqMIt3yDlZNEqo/wjdfGdg96LaG7G5O1UOq4TfTlt1MrAL7IAOcqj+lyZbp0Kl
-KlU92Hrj0L199RwesYi5uo3tvf2Z7n5/wrlSKbUDJrDbC1Kse6x/TcbUBS6pYo53
-Im9o85s/vm5TnJk/9jKxgn/lAoGAVUbutc5IkzZe/ZbHVeZ84Zn+HN/xbGtR+1eB
-mDbeRBuc/TwvOSSbzXSj5U8nCLLn+9krwIYNNV5yA/Nh/Ccz6Gnge8XeayH637bH
-8nVmDurDxlfLE0StWgqQ/nxszXfWBeaMQeyjGY3mslXEspmKUn1MKAaikewFFd2a
-iYptIgECgYEAr81jSoXyHSKpEEHzy5hyH+JOsUeWZVFduqkTTHNZe7MlXSSSZdhW
-6TCjnA9HpzBlgTI8PwXXKEa2G7OCr4dHFBJSWCgzQTfd1hf5xiE7ca2bxiEC7SKF
-H3TvfLCi9Dky9uFAXsp6SlI/x6Abm6CpqTlR19KyCo64LztaAmRkmNU=
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-san-dns/server-2.crt b/tests/docker-dns/certs-san-dns/server-2.crt
deleted file mode 100644
index 9c15aa05bb7..00000000000
--- a/tests/docker-dns/certs-san-dns/server-2.crt
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEDzCCAvegAwIBAgIUDrW+8pB5rh4jfT8GQ3R9EqRLuzkwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy
-MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBANHtpC3LDlC1MqHx/LT6vWA9DJApziy9Vh0f0SC1hFjRiFGp
-yA8d4uWHg7ebEVj/hWyJPrYpNMSDXhmJVa8UtE6G3B2ZS4WZsjfKMYs0ydu8mjjV
-FlfC6vuDGX3gUdI7XhW1KCmnFI0XfRaskS/khY31SMyblAZ0hDpRz/nQ3vyMSS7+
-xYgPn7SHNrJFz8+K3NB35lbvkBvYZvVJ0mONeIMB1BffHILzexiaXyHXeKTPw9yI
-FSRTDlXQqY9afNpAAv12xW2Xa9chuQ5Q+5P8syRqePgjR+TVJkeUCpLunNHcxZTD
-DoXqJjOlqy6OzdFGnGzvtDh/1/QL880/e6jOCcUCAwEAAaOBmjCBlzAOBgNVHQ8B
-Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
-/wQCMAAwHQYDVR0OBBYEFNoiUFY7gFUJUtJpBXFVIFipiFo/MB8GA1UdIwQYMBaA
-FOvf3TH4KHEJX4CQKfGH7m+C2iLJMBgGA1UdEQQRMA+CDW0yLmV0Y2QubG9jYWww
-DQYJKoZIhvcNAQELBQADggEBAGUisaOqg4ps1XTqRnqnk/zajC0MeyayE4X2VLtO
-gq04wT7N9nUmFAiL2uUBzK4IKrb68ZIGQbm/3NNJrKxauWvK79J9xbeOAnOQWIAx
-VFA7uGw0JpiYFk6W9YzTCpNlIWEOEw5RaNIj8F5dAFqgqNDqd1zw1+04jIGlBTpD
-v3LQjr8IvB/cmvnugwAnb8cKDlr1GO322/1otrJi2BpmjAi4FQmuxdyQTmgkQU7T
-k2whauuwDrwVmc+LyoObbiiaJPi60lSABIttbUmFqWo9U+mBcbAtFE6EW6Wo1gFR
-q7uKqwYjARW/h/amHhyiHkNnu+TjY1SL2+kk+EBAt0SSmq8=
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-san-dns/server-2.key.insecure b/tests/docker-dns/certs-san-dns/server-2.key.insecure
deleted file mode 100644
index 131ea6f4bf3..00000000000
--- a/tests/docker-dns/certs-san-dns/server-2.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA0e2kLcsOULUyofH8tPq9YD0MkCnOLL1WHR/RILWEWNGIUanI
-Dx3i5YeDt5sRWP+FbIk+tik0xINeGYlVrxS0TobcHZlLhZmyN8oxizTJ27yaONUW
-V8Lq+4MZfeBR0jteFbUoKacUjRd9FqyRL+SFjfVIzJuUBnSEOlHP+dDe/IxJLv7F
-iA+ftIc2skXPz4rc0HfmVu+QG9hm9UnSY414gwHUF98cgvN7GJpfIdd4pM/D3IgV
-JFMOVdCpj1p82kAC/XbFbZdr1yG5DlD7k/yzJGp4+CNH5NUmR5QKku6c0dzFlMMO
-heomM6WrLo7N0UacbO+0OH/X9AvzzT97qM4JxQIDAQABAoIBAQCYEZ9mlLsv97JP
-4a1/pErelhqtq7rwac8hldS17weKF266SVTkrm+YeYwOysPMRnzuXJUS+9J/r/HQ
-ac2p4EOkxshYoJ02kFmrVEqDXqADDyJgnOtsc4Qo4ZTrvD1JHzxOWUZYtfGLK0Kv
-1B3wJYghh1dO8DxQWMMYQ/92JboCEoVmO/vAcUH5V4qhZMEGvFm8AiaXnVi13myt
-OAlfyQQ1CsnOoxxQhomzqNVrMjPelv5yLAq1Z5gXSeylc6y8NVWKsLbWJUj5IhqH
-bmCw2V/1snJCJews/S/4wgDBibjldlUEPfjNwBoeRTl9DB6uCHzUiF98PB8MoDx5
-VaJiRHZZAoGBAOqVcgB+3gJ9Pf+6bUdL4NhKdr4wje2IAbeidQMXOsbp455b7NLj
-/Z92tKOGJ2HBdGBzGkA4JbHcy/HBxDm6DXKWIIqYcOubDDDiBAYtEJhLG3Mqz4p8
-sp1QUICQoskCAP4gHc8/AeXKp1CQoU1dJksC4mZ66KQMdYaJ1f7gNxJ7AoGBAOUX
-9mLDFjqpJ7IPt02I4yn/tlFI3GLwuO/yxEuCGt8T2CAXkc/cp+ojEI29ckwYpqv6
-D+FRPYqNN+c6OJWAR4U4OiuRQlShGZmBvn11BIn7ILZ3KnxvFXKkOzzFNU5oYczE
-/L/z2SSKQfGlgDWmKWIoWt5D3TjMA7xysTgQIcC/AoGAFgyV+pXyKCm9ehv7yYfI
-Sow1PQszS/BMuQX8GZ5FWA0D6A6b4/aqECMIN5aUfQvB9I7dGMwuPtmSEdc0qnhi
-azLRPDW3521bZ/zWg/4YYTguDFUpzMqLv12dM3hk1J/rl/dM1f4GH6M8tsXhY3Qt
-9T8AKMHEvCavpUWvZ5WLl6ECgYAgxmzZdE+Z1Nl5AAaZcRwOxiavOl1NSmMq8PBk
-XRi7EXu6G6Ugt9DODnYv0QqpGF2//OaItba4O7vjuNCfktqolIK9+OokcWfYLley
-WytrEiJ7+FB7vOi0ngpbh1s4/HYBda0zSQ+nyp/kkmjlRABnqp5VbiAYIBfovf/c
-pXIuwQKBgQCGJBX7vmFcsL1qdG5d8jQr2K/dbTcU8sXQzUIXGQcCxePYOrO8Rcn2
-EMXAGIdOn6i2x0/rNn+EnPHhT6XC0hSOu52srL8BB9tbDYk3i+3ghUG5QI4dp+GQ
-D1+HZD3SVrqjWlTU0aBB/NYMldIo9e3LU1ZUXTm2Rmg6Mre9ann6/w==
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-san-dns/server-3.crt b/tests/docker-dns/certs-san-dns/server-3.crt
deleted file mode 100644
index 3fb516db993..00000000000
--- a/tests/docker-dns/certs-san-dns/server-3.crt
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEDzCCAvegAwIBAgIUNPjXxMAkrxdr1sZA7Gw+gYbVeLAwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy
-MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBALwQOtWoCcO13D/7i96Bkb376WvoqYJw+yN9kYwVkpM1+EQd
-3hzSNT0byRGeNtlXAd8tY/SpjTM7mnq5yIhNjhJ2eo5GO1YuJyDJe9WnfQ30rVfv
-WzCV/BiwloaqX/tlgCJ3PVNAZdyCZ+ouRIggBUHCQo88LuKwpM9QrUmBCGFLD/M2
-PYKewGv+h9JwMRLxp5mARBS+bkUsQy9F7U/GZs/9xULXIo9l3Bj8Zqz6UMmtW+Y2
-lkK5wawG04bZwkr8lUzMC2AVKFidTuZsda9GP4OxKclW0ro0HtlYaiI7+a0xONZ6
-yuj4cYrs1KZ9z3uYji1Li8XFUb4g/v9dar0oK70CAwEAAaOBmjCBlzAOBgNVHQ8B
-Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
-/wQCMAAwHQYDVR0OBBYEFATpeRk6Bxgf8LHU/wlw0iLQltEoMB8GA1UdIwQYMBaA
-FOvf3TH4KHEJX4CQKfGH7m+C2iLJMBgGA1UdEQQRMA+CDW0zLmV0Y2QubG9jYWww
-DQYJKoZIhvcNAQELBQADggEBADjH3ytTogX2BqnhYaVia31Zjy240iViU6BNCARq
-PdBB5WCtti7yzonfS9Uytc9YLB4ln4Z0wZpRk3O0QGehHX5CDT5EL5zKwDQdoYG3
-oKx9qOu2VyxDA/1hYdPvMW3aq4g/oE8nFjNbrFEVCuGLbJdfDnyJJFsvNRNqs8hS
-xpfYLNH9lD4sD13vul7RJQJrvCjbaqQp9oLe9NZ9f+cBPGqATkicMWbABq4xbpCE
-IY19SHk0WHRSem5jlbfF3O58Ow+LRR/Bn2/IYKpyidEixxu9VX06BDRH5GmG7wBd
-5Y9YhmeyPCXiHHPar7m/Rmel82RLI+/qomKh9pii3u357yY=
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-san-dns/server-3.key.insecure b/tests/docker-dns/certs-san-dns/server-3.key.insecure
deleted file mode 100644
index b64e3bad369..00000000000
--- a/tests/docker-dns/certs-san-dns/server-3.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAvBA61agJw7XcP/uL3oGRvfvpa+ipgnD7I32RjBWSkzX4RB3e
-HNI1PRvJEZ422VcB3y1j9KmNMzuaernIiE2OEnZ6jkY7Vi4nIMl71ad9DfStV+9b
-MJX8GLCWhqpf+2WAInc9U0Bl3IJn6i5EiCAFQcJCjzwu4rCkz1CtSYEIYUsP8zY9
-gp7Aa/6H0nAxEvGnmYBEFL5uRSxDL0XtT8Zmz/3FQtcij2XcGPxmrPpQya1b5jaW
-QrnBrAbThtnCSvyVTMwLYBUoWJ1O5mx1r0Y/g7EpyVbSujQe2VhqIjv5rTE41nrK
-6PhxiuzUpn3Pe5iOLUuLxcVRviD+/11qvSgrvQIDAQABAoIBAG1ny7JsFXIjpEQc
-pJwHKLArkvnR2nsmGxPkgv3JtwGpDgsijQqbR5mLRofXUPVTZqVdFJ9K2/gIHrBy
-0DRrWdFn15hZRz+1jdHHJSGAVIH/67AScSxstMHwSUGCcGAiBk8Gq0h5WEjWHHnh
-/MBsUGKXDn2hd20tclOhDY6LYEKolRPFjfBmPRdhdR5A6RS+U+jx1yFsWa6cUjv6
-kInlE5yMdhEOuA/QnVvcaAsKb5CKAuCtAkmFH3fjDp3nkhYFXJy4DTsVRMAfsr5s
-SpsKt272URd5fLeZ5QlOb82QCvJr9GushkkKk7N5TMh5C/r74zpROdLTRlXD4I2q
-yvnSv8kCgYEA+HRjeRRxujVWo7YSnHYJ/xConrCSekfRMvIXvSq43E+I/t5SlPl8
-YoJYhGWzZ7A/szqTvTW/v2blScd+X4KiK0TX8tTQFvWEBBcZhLILUB/ZiIfi/6ZG
-fxe+BAmTMSBThknnRsvAA4jkTvErdpBhhRltyjdLunEEjnfSzJJORHMCgYEAwcZU
-TpAfo4ni1Am9Nskk/5LjmPX5u+qfPNJfe6dfO+BoMA51XuAagqZhdsSwTGoxs5xQ
-cKmNFA6QmAQnPZK7+QYwmDUXb8/Dtz/d5jylsZdYRHYr4hx3DcKFFEyhlPqrj44k
-HxparrkDIq7nVz1t3YMVXYJM/5k2cx/VHlTD8w8CgYEA6Ypl0nNwL4thpENKHT4r
-SVG8XmY1WbHWKCA+Rjc5SwWMDZ6nW5dj3ykM0W7Tg5y9U9i09L7oPZ8X2hEmbdra
-Wve8UWrPKzWe4UVhXEULs0Ys8VRiANKoI2EK4LqrXBs5x9oCBp8RH4F2semqZCl1
-MWpktBbkHR2NHenuARNpdJcCgYBzlY3sXuPAdRssR7Lp3wmGuWOxdefFQ6pAaWwz
-Ih8YZD9Bix5PvXWSwRQZ+DEBI8cJ0A/bZAeXEykExFVz0Pb3D84kvGaCd3fS8vG1
-yC89w30POT3r3fbV6lXfSeaIKw3yz2KUeu/kkM9h/NpZm3bRTsOLx5GOVSG5gh9p
-vD412QKBgFxq4rsxJC6+QZvRZaJDcmTHSytbAw3B5Lyv6G+xLBUqc27KjQzCved1
-9Ofzy7KEC3AtKiq3Y0q5q01Rzk5ZYCh6lVe2tw36Muw1bvZjqblGm9X2VRO8Ui2Q
-4WOdvIP4z5ZTJQXdIahKAYOyxiYFIvCkvS5SYoKkgWNSzFNKvQtH
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs-san-dns/server-ca-csr-1.json b/tests/docker-dns/certs-san-dns/server-ca-csr-1.json
deleted file mode 100644
index 6927354930b..00000000000
--- a/tests/docker-dns/certs-san-dns/server-ca-csr-1.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "etcd.local",
- "hosts": [
- "m1.etcd.local"
- ]
-}
diff --git a/tests/docker-dns/certs-san-dns/server-ca-csr-2.json b/tests/docker-dns/certs-san-dns/server-ca-csr-2.json
deleted file mode 100644
index 4e6debef1ed..00000000000
--- a/tests/docker-dns/certs-san-dns/server-ca-csr-2.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "etcd.local",
- "hosts": [
- "m2.etcd.local"
- ]
- }
diff --git a/tests/docker-dns/certs-san-dns/server-ca-csr-3.json b/tests/docker-dns/certs-san-dns/server-ca-csr-3.json
deleted file mode 100644
index af67a615b8e..00000000000
--- a/tests/docker-dns/certs-san-dns/server-ca-csr-3.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "etcd.local",
- "hosts": [
- "m3.etcd.local"
- ]
- }
diff --git a/tests/docker-dns/certs-wildcard/Procfile b/tests/docker-dns/certs-wildcard/Procfile
deleted file mode 100644
index d53a22a799e..00000000000
--- a/tests/docker-dns/certs-wildcard/Procfile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
\ No newline at end of file
diff --git a/tests/docker-dns/certs-wildcard/ca-csr.json b/tests/docker-dns/certs-wildcard/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns/certs-wildcard/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns/certs-wildcard/ca.crt b/tests/docker-dns/certs-wildcard/ca.crt
deleted file mode 100644
index 23ee34f4a4d..00000000000
--- a/tests/docker-dns/certs-wildcard/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIUanA77pXfEz2idrPSlIoPrSo6MmcwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA5MDBaFw0yNzExMTEwNDA5
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQDqtw5G6XZ4N2uuc7TAoiXI+IXA/H+IJIbHrVFQ3LIzLDaS6AmVWw4yT4o2
-X/1IbR5TU6dCnGxuHPutnfnG87is5Oxk1HfIy5cfpf75St3uQycJRcr3Bui/fEZ0
-IZaoRyklcYGI8Y+VfaSADl++EP7UU0X7cc263rZulJXkqp4HihDTPixBgVDruNWf
-Yfa2K/Zhiq+zj3hE6s/cBn2pIdY6SMlQ1P0uT/Y5oBTTJFBxeqw+Sz/NXgKgErQg
-Za/gNHQWzyRoYHiOGQylvsiXr6tgdk29f0Z6gTQy8FQpwOXYERJr45zh8KvE+FJK
-MaWUhGW7hkv85JDZSsmDZ6lVYIfhAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBS+p7B3RLjI8HOOPvVhqtBQNRmH
-ZTANBgkqhkiG9w0BAQsFAAOCAQEAFWHLvzzTRQJYjVDxBuXrNZkhFsGAoCYoXhAK
-1nXmqLb9/dPMxjkB4ptkQNuP8cMCMPMlapoLkHxEihN1sWZwJRfWShRTK2cQ2kd6
-IKH/M3/ido1PqN/CxhfqvMj3ap3ZkV81nvwn3XhciCGca1CyLzij9RroO0Ee+R3h
-mK5A38I1YeRMNOnNAJAW+5scaVtPe6famG2p/OcswobF+ojeZIQJcuk7/FP5iXGA
-UfG5WaW3bVfSr5aUGtf/RYZvYu3kWZlAzGaey5iLutRc7f63Ma4jjEEauiGLqQ+6
-F17Feafs2ibRr1wes11O0B/9Ivx9qM/CFgEYhJfp/nBgY/UZXw==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-wildcard/gencert.json b/tests/docker-dns/certs-wildcard/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns/certs-wildcard/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns/certs-wildcard/gencerts.sh b/tests/docker-dns/certs-wildcard/gencerts.sh
deleted file mode 100755
index af8663e09eb..00000000000
--- a/tests/docker-dns/certs-wildcard/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: *.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns/certs-wildcard/run.sh b/tests/docker-dns/certs-wildcard/run.sh
deleted file mode 100755
index 683a4d28af9..00000000000
--- a/tests/docker-dns/certs-wildcard/run.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs-wildcard/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-wildcard/ca.crt \
- --cert=/certs-wildcard/server.crt \
- --key=/certs-wildcard/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-wildcard/ca.crt \
- --cert=/certs-wildcard/server.crt \
- --key=/certs-wildcard/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-wildcard/ca.crt \
- --cert=/certs-wildcard/server.crt \
- --key=/certs-wildcard/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get abc
diff --git a/tests/docker-dns/certs-wildcard/server-ca-csr.json b/tests/docker-dns/certs-wildcard/server-ca-csr.json
deleted file mode 100644
index 616bf11f8f1..00000000000
--- a/tests/docker-dns/certs-wildcard/server-ca-csr.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "hosts": [
- "*.etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns/certs-wildcard/server.crt b/tests/docker-dns/certs-wildcard/server.crt
deleted file mode 100644
index a51cd0b9492..00000000000
--- a/tests/docker-dns/certs-wildcard/server.crt
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIECjCCAvKgAwIBAgIUQ0AgAKntDzHW4JxYheDkVMow5ykwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA5MDBaFw0yNzExMTEwNDA5
-MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANMqNEozhdLm
-K5ATSkgIOyQmBmoUCgiWB+P52YWzfmwaWwQP2FFs3qih2c3DHHH7s2zdceXKT2ZN
-lvSO8yj08slLPYSC4LQ3su8njGJlasJ28JMjRqshnH3umxFXf9+aPcZ5yYkoXE9V
-fzsnBMJz8hI6K2j4Q6sJe+v/0pdz8MpbdIPnmL9qfVpuD6JqmDCZiQOJ8lpMuqqD
-60uLjtLv/JKjgdqe5C4psERVm09fg3vOZckv9CC6a4MupeXo2il6femZnPrxC8LX
-u2KT3njEjoyzEu2NSdy+BUJDVLgKSh8s2TC8ViNfiFONQo6L1y78ZAyCDrRbTgN9
-Nu1Ou/yzqHkCAwEAAaOBqjCBpzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI
-KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFC83cRfE
-/EKcz7GJKmgDLUBi3kRSMB8GA1UdIwQYMBaAFL6nsHdEuMjwc44+9WGq0FA1GYdl
-MCgGA1UdEQQhMB+CDCouZXRjZC5sb2NhbIIJbG9jYWxob3N0hwR/AAABMA0GCSqG
-SIb3DQEBCwUAA4IBAQCI7estG86E9IEGREfYul1ej8hltpiAxucmsI0i0lyRHOGa
-dss3CKs6TWe5LWXThCIJ2WldI/VgPe63Ezz7WuP3EJxt9QclYArIklS/WN+Bjbn7
-6b8KAtGQkFh7hhjoyilBixpGjECcc7lbriXoEpmUZj9DYQymXWtjKeUJCfQjseNS
-V/fmsPph8QveN+pGCypdQ9EA4LGXErg4DQMIo40maYf9/uGBMIrddi930llB0wAh
-lsGNUDkrKKJVs2PiVsy8p8sF1h7zAQ+gSqk3ZuWjrTqIIMHtRfIaNICimc7wEy1t
-u5fbySMusy1PRAwHVdl5yPxx++KlHyBNowh/9OJh
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs-wildcard/server.key.insecure b/tests/docker-dns/certs-wildcard/server.key.insecure
deleted file mode 100644
index ac56ed4ea32..00000000000
--- a/tests/docker-dns/certs-wildcard/server.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEA0yo0SjOF0uYrkBNKSAg7JCYGahQKCJYH4/nZhbN+bBpbBA/Y
-UWzeqKHZzcMccfuzbN1x5cpPZk2W9I7zKPTyyUs9hILgtDey7yeMYmVqwnbwkyNG
-qyGcfe6bEVd/35o9xnnJiShcT1V/OycEwnPyEjoraPhDqwl76//Sl3Pwylt0g+eY
-v2p9Wm4PomqYMJmJA4nyWky6qoPrS4uO0u/8kqOB2p7kLimwRFWbT1+De85lyS/0
-ILprgy6l5ejaKXp96Zmc+vELwte7YpPeeMSOjLMS7Y1J3L4FQkNUuApKHyzZMLxW
-I1+IU41CjovXLvxkDIIOtFtOA3027U67/LOoeQIDAQABAoIBAH/sM104NTv8QCu5
-4+gbRGizuHMOzL1C1mjfdU0v3chzduvRBYTeZUzXL/Ec3+CVUK8Ev/krREp/epGQ
-//Gx4lrbf9sExkem7nk/Biadtb00/KzGVAtcA0evArXQwiCdegsAwHycvL861ibp
-jlKWlvE/2AhxTd0Rk8b2ZYdmr1qGTesIy7S4ilj1B8aYWnZglhSyyU7TqLhYmsWo
-3B1ufNpkPCzo97bJmc1/bqXCIQXi/HkkDxJRFa/vESebiy2wdgkWflybW37vLaN0
-mox44uXpVYtZuuGyxdKjX6T2EOglZztXlC8gdxrnFS5leyBEu+7ABS5OvHgnlOX5
-80MyUpkCgYEA/4xpEBltbeJPH52Lla8VrcW3nGWPnfY8xUSnjKBspswTQPu389EO
-ayM3DewcpIfxFu/BlMzKz0lm77QQZIu3gIJoEu8IXzUa3fJ2IavRKPSvbNFj5Icl
-kVX+mE4BtF+tnAjDWiX9qaNXZcU7b0/q0yXzL35WB4H7Op4axqBir/sCgYEA04m3
-4LtRKWgObQXqNaw+8yEvznWdqVlJngyKoJkSVnqwWRuin9eZDfc84genxxT0rGI9
-/3Fw8enfBVIYGLR5V5aYmGfYyRCkN4aeRc0zDlInm0x2UcZShT8D0LktufwRYZh8
-Ui6+iiIBELwxyyWfuybH5hhstbdFazfu1yNA+xsCgYB47tORYNceVyimh4HU9iRG
-NfjsNEvArxSXLiQ0Mn74eD7sU7L72QT/wox9NC1h10tKVW/AoSGg8tWZvha73jqa
-wBvMSf4mQBVUzzcEPDEhNpoF3xlsvmAS5SU0okXAPD8GRkdcU/o02y2y5aF4zdMM
-1Tq+UQUZTHO9i7CUKrZJHQKBgQC+FueRn0ITv1oXRlVs3dfDi3L2SGLhJ0csK4D3
-SBZed+m4aUj98jOrhRzE0LRIBeDId4/W2A3ylYK/uUHGEYdo2f9OFSONqtKmwuW/
-O+JBYDoPJ+q7GUhWTIYVLhKVKppD5U7yWucGIgBrFXJ5Ztnex76iWhh2Qray3pRV
-52whOQKBgHVBI4F7pkn6id9W4sx2LqrVjpjw6vTDepIRK0SXBIQp34WnCL5CERDJ
-pks203i42Ww7IadufepkGQOfwuik9wVRNWrNp4oKle6oNK9oK3ihuyb+5DtyKwDm
-5sQUYUXc5E3qDQhHCGDzbT7wP+bCDnWKgvV6smshuQSW8M+tFIOQ
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/certs/Procfile b/tests/docker-dns/certs/Procfile
deleted file mode 100644
index a7f68a6c989..00000000000
--- a/tests/docker-dns/certs/Procfile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
\ No newline at end of file
diff --git a/tests/docker-dns/certs/ca-csr.json b/tests/docker-dns/certs/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-dns/certs/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-dns/certs/ca.crt b/tests/docker-dns/certs/ca.crt
deleted file mode 100644
index 4a17292de68..00000000000
--- a/tests/docker-dns/certs/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIUCeu/ww6+XbCM3m8m6fp17t8bjOcwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA2MDBaFw0yNzExMTEwNDA2
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQCgH8KMvldAoQjWA5YQoEOQgRyjZ3hkKdTQcFBj3OR8OyhiNJ+4oEJ/AqyJ
-b41G9NGd+88hRSrcCeUBrUY3nWVhqzclCe7mQ1IyordmuKxekmPD/uvzcbySzpJT
-qGEwNEiiBcr4mSQiGA5yMgBLKLpKw27t0ncVn/Qt0rKtqwLUYYWGEfADLw7+6iDK
-xzCxLeXV/cB1VtFZa62j3KRJR4XQ/QosqwZw2dRGF/jUZgmsRYYK8noOvqY/uRPV
-sqwGAKq0B0zOMp185dFrzJVD+LHZgSS9GLGmvRgttwayDuYSOny7WXugQ28fCaRX
-p+53s1eBb5cHCGSko48f2329cnlFAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSgglhjDWaAJm9ju5x1YMArtH7c
-yjANBgkqhkiG9w0BAQsFAAOCAQEAK6IGimbnP9oFSvwNGmXjEtn/vE82dDhQJv8k
-oiAsx0JurXBYybvu/MLaBJVQ6bF77hW/fzvhMOzLNEMGY1ql80TmfaTqyPpTN85I
-6YhXOViZEQJvH17lVA8d57aSve0WPZqBqS3xI0dGpn/Ji6JPrjKCrgjeukXXHR+L
-MScK1lpxaCjD45SMJCzANsMnIKTiKN8RnIcSmnrr/gGl7bC6Y7P84xUGgYu2hvNG
-1DZBcelmzbZYk2DtbrR0Ed6IFD1Tz4RAEuKJfInjgAP2da41j4smoecXOsJMGVl5
-5RX7ba3Hohys6la8jSS3opCPKkwEN9mQaB++iN1qoZFY4qB9gg==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs/gencert.json b/tests/docker-dns/certs/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-dns/certs/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-dns/certs/gencerts.sh b/tests/docker-dns/certs/gencerts.sh
deleted file mode 100755
index af8663e09eb..00000000000
--- a/tests/docker-dns/certs/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: *.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-dns/certs/run.sh b/tests/docker-dns/certs/run.sh
deleted file mode 100755
index 8faec3458e5..00000000000
--- a/tests/docker-dns/certs/run.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /certs/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs/ca.crt \
- --cert=/certs/server.crt \
- --key=/certs/server.key.insecure \
- --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \
- get abc
-
-printf "\nWriting v2 key...\n"
-curl -L https://127.0.0.1:2379/v2/keys/queue \
- --cacert /certs/ca.crt \
- --cert /certs/server.crt \
- --key /certs/server.key.insecure \
- -X POST \
- -d value=data
-
-printf "\nWriting v2 key...\n"
-curl -L https://m1.etcd.local:2379/v2/keys/queue \
- --cacert /certs/ca.crt \
- --cert /certs/server.crt \
- --key /certs/server.key.insecure \
- -X POST \
- -d value=data
-
-printf "\nWriting v3 key...\n"
-curl -L https://127.0.0.1:2379/v3/kv/put \
- --cacert /certs/ca.crt \
- --cert /certs/server.crt \
- --key /certs/server.key.insecure \
- -X POST \
- -d '{"key": "Zm9v", "value": "YmFy"}'
-
-printf "\n\nWriting v3 key...\n"
-curl -L https://m1.etcd.local:2379/v3/kv/put \
- --cacert /certs/ca.crt \
- --cert /certs/server.crt \
- --key /certs/server.key.insecure \
- -X POST \
- -d '{"key": "Zm9v", "value": "YmFy"}'
-
-printf "\n\nReading v3 key...\n"
-curl -L https://m1.etcd.local:2379/v3/kv/range \
- --cacert /certs/ca.crt \
- --cert /certs/server.crt \
- --key /certs/server.key.insecure \
- -X POST \
- -d '{"key": "Zm9v"}'
-
-printf "\n\nFetching 'curl https://m1.etcd.local:2379/metrics'...\n"
-curl \
- --cacert /certs/ca.crt \
- --cert /certs/server.crt \
- --key /certs/server.key.insecure \
- -L https://m1.etcd.local:2379/metrics | grep Put | tail -3
-
-printf "\n\nDone!!!\n\n"
diff --git a/tests/docker-dns/certs/server-ca-csr.json b/tests/docker-dns/certs/server-ca-csr.json
deleted file mode 100644
index 77cdb408cf0..00000000000
--- a/tests/docker-dns/certs/server-ca-csr.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "hosts": [
- "m1.etcd.local",
- "m2.etcd.local",
- "m3.etcd.local",
- "127.0.0.1",
- "localhost"
- ]
-}
diff --git a/tests/docker-dns/certs/server.crt b/tests/docker-dns/certs/server.crt
deleted file mode 100644
index 928e3cf5db6..00000000000
--- a/tests/docker-dns/certs/server.crt
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEKTCCAxGgAwIBAgIUUwtQlOqMccWY8MOaSaWutEjlMrgwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA2MDBaFw0yNzExMTEwNDA2
-MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALyYH7bL79If
-75AezzSpkuTOPAGBzPlGFLM5QS4jrt6fJBpElAUV2VmZm+isVsTs2X63md1t4s3Y
-44soYK02HONUxUXxbeW7S8yJYSplG5hCJpFiSVP0GyVojQ04OLO1yI5m82fWJNi6
-9PgTmb3+/YD08TKbjjJ4FB0kqoFJE4qoUNNpbkpQxHW4cx9iyWbE9gwyGoC76ftr
-DC4J5HavmZ/y51rq1VWrO/d9rmCEUN++M8FcGt6D4WVQ54sWafl4Q1HafBq3FAT5
-swpqi6aDDFKYYTdvjFEmJ2uWacak8NO+vjTt8fTfSFBUYcxweVWIDm6xU8kR8Lwy
-aNxD26jQ9GMCAwEAAaOByTCBxjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI
-KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFELi+Ig+
-uxXrOvjoacXjcCjtfHcsMB8GA1UdIwQYMBaAFKCCWGMNZoAmb2O7nHVgwCu0ftzK
-MEcGA1UdEQRAMD6CDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0
-Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAn6e8
-LPd53xQGiicDHN8+WkUS7crr+A+bIfY0nbWUf1H7zwNxpHHnKgVRHc4MKpRY4f+E
-M2bEYdNJZDrjFYIWWlFDteVKZevH2dB3weiCAYWPYuiR9dGH6NvVbPcEMwarPBW4
-mLsm9Nl/r7YBxXx73rhfxyBbhTuDwKtY/BAMi+ZO4msnuWiiSiQEUrEmzm9PWhAD
-CgNjxCL3xoGyIJGj1xev0PYo+iFrAd9Pkfg2+FaSYXtNPbZX229yHmxU7GbOJumx
-5vGQMRtzByq7wqw1dZpITlgbDPJc5jdIRKGnusQ96GXLORSQcP+tfG4NhreYYpI1
-69Y78gNCTl0uGmI21g==
------END CERTIFICATE-----
diff --git a/tests/docker-dns/certs/server.key.insecure b/tests/docker-dns/certs/server.key.insecure
deleted file mode 100644
index 08784a7c65d..00000000000
--- a/tests/docker-dns/certs/server.key.insecure
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAvJgftsvv0h/vkB7PNKmS5M48AYHM+UYUszlBLiOu3p8kGkSU
-BRXZWZmb6KxWxOzZfreZ3W3izdjjiyhgrTYc41TFRfFt5btLzIlhKmUbmEImkWJJ
-U/QbJWiNDTg4s7XIjmbzZ9Yk2Lr0+BOZvf79gPTxMpuOMngUHSSqgUkTiqhQ02lu
-SlDEdbhzH2LJZsT2DDIagLvp+2sMLgnkdq+Zn/LnWurVVas7932uYIRQ374zwVwa
-3oPhZVDnixZp+XhDUdp8GrcUBPmzCmqLpoMMUphhN2+MUSYna5ZpxqTw076+NO3x
-9N9IUFRhzHB5VYgObrFTyRHwvDJo3EPbqND0YwIDAQABAoIBAQC0YCbM9YZ9CRBe
-Xik9rAYTknBv3I6Hx5BaziLaF0TUJY8pFHgh2QDVooYsZlBi7kki2kVuNAAdcxhG
-ayrz33KHtvcq6zt54sYfbTGik6tt1679k+ygQDOKdtGZWDFbKD0Wfb7FjFoXc9CC
-SHTd9DjPkvXxujepa5GJQh1Vo+ftz2I+8e6LeoiBZJM1IosfrpxKg02UnWrLia7o
-i8eoXIyMAJHuNUGpGl33WnckyMGDsVKMc2DVG2exfVBZ37lAemYOLRKmd4AwUk2l
-ztd71sXQodLk++1BqaS9cc9yvsNiBjGL3Ehm7uUcLH1k3VHd4ArcGhiqffKzQuSE
-Dhm8GXNZAoGBAMrXOAdnfLlxYKCqOaj0JwN0RusWBP3cC7jluS5UmeTROPnBa0Fb
-219YtiXkDrWtoiwLvvPXobem0/juPkiGnprGcOsPUGa6pV3TPJ40BiIfh9/vt7fr
-Bko2SqEA9U0FxredcOFoCPxX9k9EDWxhF/nD20amvRHKK/wv995iXKxHAoGBAO4F
-GILNxBHlH5F++dbSSSTcZUTXvuBr7JQkbMK+luSikEtaSW9IO2bf65LtqjaWp4Ds
-rENCQAB3PmI111Rjwrk7925W0JCHw/+UArlVoM3K2q1zhYUWAn9L3v+qUTN2TLu1
-Tso3OkCrQ5aa812tffW3hZHOWJ+aZp2nnBnruDEFAoGAGJDCD1uAJnFNs4eKHiUb
-iHaPlC8BgcEhhk4EBFFopeaU0OKU28CFK+HxhVs+UNBrgIwXny5xPm2s5EvuLRho
-ovP/fuhG43odRuSrRbmlOIK7EOrWRCbphxlWJnOYQbC+ZURjBFl2JSF+ChGC0qpb
-nfsTVlYhNcNXWl5w1XTyJkcCgYEAp07XquJeh0GqTgiWL8XC+nEdkiWhG3lhY8Sy
-2rVDtdT7XqxJYDrC3o5Ztf7vnc2KUpqKgACqomkvZbN49+3j63bWdy35Dw8P27A7
-tfEVxnJoAnJokWMmQDqhts8OowDt8SgCCSyG+vwn10518QxJtRXaguIr84yBwyIV
-HTdPUs0CgYBIAxoPD9/6R2swClvln15sjaIXDp5rYLbm6mWU8fBURU2fdUw3VBlJ
-7YVgQ4GnKiCI7NueBBNRhjXA3KDkFyZw0/oKe2uc/4Gdyx1/L40WbYOaxJD2vIAf
-FZ4pK9Yq5Rp3XiCNm0eURBlNM+fwXOQin2XdzDRoEq1B5JalQO87lA==
------END RSA PRIVATE KEY-----
diff --git a/tests/docker-dns/etcd.zone b/tests/docker-dns/etcd.zone
deleted file mode 100644
index 03c15fe8e66..00000000000
--- a/tests/docker-dns/etcd.zone
+++ /dev/null
@@ -1,14 +0,0 @@
-$TTL 86400
-@ IN SOA etcdns.local. root.etcdns.local. (
- 100500 ; Serial
- 604800 ; Refresh
- 86400 ; Retry
- 2419200 ; Expire
- 86400 ) ; Negative Cache TTL
- IN NS ns.etcdns.local.
- IN A 127.0.0.1
-
-ns IN A 127.0.0.1
-m1 IN A 127.0.0.1
-m2 IN A 127.0.0.1
-m3 IN A 127.0.0.1
diff --git a/tests/docker-dns/insecure/Procfile b/tests/docker-dns/insecure/Procfile
deleted file mode 100644
index 46d15367bd6..00000000000
--- a/tests/docker-dns/insecure/Procfile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://m1.etcd.local:2379 --listen-peer-urls http://127.0.0.1:2380 --initial-advertise-peer-urls=http://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=http://m1.etcd.local:2380,m2=http://m2.etcd.local:22380,m3=http://m3.etcd.local:32380 --host-whitelist "localhost,127.0.0.1,m1.etcd.local" --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://m2.etcd.local:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls=http://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=http://m1.etcd.local:2380,m2=http://m2.etcd.local:22380,m3=http://m3.etcd.local:32380 --host-whitelist "localhost,127.0.0.1,m1.etcd.local" --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://m3.etcd.local:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls=http://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=http://m1.etcd.local:2380,m2=http://m2.etcd.local:22380,m3=http://m3.etcd.local:32380 --host-whitelist "localhost,127.0.0.1,m1.etcd.local" --logger=zap --log-outputs=stderr
\ No newline at end of file
diff --git a/tests/docker-dns/insecure/run.sh b/tests/docker-dns/insecure/run.sh
deleted file mode 100755
index 6b247622821..00000000000
--- a/tests/docker-dns/insecure/run.sh
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data
-
-/etc/init.d/bind9 start
-
-# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
-cat /dev/null >/etc/hosts
-
-goreman -f /insecure/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --endpoints=http://m1.etcd.local:2379 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --endpoints=http://m1.etcd.local:2379,http://m2.etcd.local:22379,http://m3.etcd.local:32379 \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --endpoints=http://m1.etcd.local:2379,http://m2.etcd.local:22379,http://m3.etcd.local:32379 \
- get abc
-
-printf "\nWriting v2 key...\n"
-curl \
- -L http://127.0.0.1:2379/v2/keys/queue \
- -X POST \
- -d value=data
-
-printf "\nWriting v2 key...\n"
-curl \
- -L http://m1.etcd.local:2379/v2/keys/queue \
- -X POST \
- -d value=data
-
-printf "\nWriting v3 key...\n"
-curl \
- -L http://127.0.0.1:2379/v3/kv/put \
- -X POST \
- -d '{"key": "Zm9v", "value": "YmFy"}'
-
-printf "\n\nWriting v3 key...\n"
-curl \
- -L http://m1.etcd.local:2379/v3/kv/put \
- -X POST \
- -d '{"key": "Zm9v", "value": "YmFy"}'
-
-printf "\n\nReading v3 key...\n"
-curl \
- -L http://m1.etcd.local:2379/v3/kv/range \
- -X POST \
- -d '{"key": "Zm9v"}'
-
-printf "\n\nFetching 'curl http://m1.etcd.local:2379/metrics'...\n"
-curl \
- -L http://m1.etcd.local:2379/metrics | grep Put | tail -3
-
-name1=$(base64 <<< "/election-prefix")
-val1=$(base64 <<< "v1")
-data1="{\"name\":\"${name1}\", \"value\":\"${val1}\"}"
-
-printf "\n\nCampaign: ${data1}\n"
-result1=$(curl -L http://m1.etcd.local:2379/v3/election/campaign -X POST -d "${data1}")
-echo ${result1}
-
-# should not panic servers
-val2=$(base64 <<< "v2")
-data2="{\"value\": \"${val2}\"}"
-printf "\n\nProclaim (wrong-format): ${data2}\n"
-curl \
- -L http://m1.etcd.local:2379/v3/election/proclaim \
- -X POST \
- -d "${data2}"
-
-printf "\n\nProclaim (wrong-format)...\n"
-curl \
- -L http://m1.etcd.local:2379/v3/election/proclaim \
- -X POST \
- -d '}'
-
-printf "\n\nProclaim (wrong-format)...\n"
-curl \
- -L http://m1.etcd.local:2379/v3/election/proclaim \
- -X POST \
- -d '{"value": "Zm9v"}'
-
-printf "\n\nDone!!!\n\n"
diff --git a/tests/docker-dns/named.conf b/tests/docker-dns/named.conf
deleted file mode 100644
index 76ce0caa165..00000000000
--- a/tests/docker-dns/named.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-options {
- directory "/var/bind";
- listen-on { 127.0.0.1; };
- listen-on-v6 { none; };
- allow-transfer {
- none;
- };
- // If you have problems and are behind a firewall:
- query-source address * port 53;
- pid-file "/var/run/named/named.pid";
- allow-recursion { none; };
- recursion no;
-};
-
-zone "etcd.local" IN {
- type main;
- file "/etc/bind/etcd.zone";
-};
-
-zone "0.0.127.in-addr.arpa" {
- type main;
- file "/etc/bind/rdns.zone";
-};
diff --git a/tests/docker-dns/rdns.zone b/tests/docker-dns/rdns.zone
deleted file mode 100644
index fb71b30b1fa..00000000000
--- a/tests/docker-dns/rdns.zone
+++ /dev/null
@@ -1,13 +0,0 @@
-$TTL 86400
-@ IN SOA etcdns.local. root.etcdns.local. (
- 100500 ; Serial
- 604800 ; Refresh
- 86400 ; Retry
- 2419200 ; Expire
- 86400 ) ; Negative Cache TTL
- IN NS ns.etcdns.local.
- IN A 127.0.0.1
-
-1 IN PTR m1.etcd.local.
-1 IN PTR m2.etcd.local.
-1 IN PTR m3.etcd.local.
diff --git a/tests/docker-dns/resolv.conf b/tests/docker-dns/resolv.conf
deleted file mode 100644
index bbc8559cd54..00000000000
--- a/tests/docker-dns/resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 127.0.0.1
diff --git a/tests/docker-static-ip/Dockerfile b/tests/docker-static-ip/Dockerfile
deleted file mode 100644
index d5f7913be7b..00000000000
--- a/tests/docker-static-ip/Dockerfile
+++ /dev/null
@@ -1,37 +0,0 @@
-FROM ubuntu:18.04
-
-RUN rm /bin/sh && ln -s /bin/bash /bin/sh
-RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
-
-RUN apt-get -y update \
- && apt-get -y install \
- build-essential \
- gcc \
- apt-utils \
- pkg-config \
- software-properties-common \
- apt-transport-https \
- libssl-dev \
- sudo \
- bash \
- curl \
- tar \
- git \
- netcat \
- bind9 \
- dnsutils \
- && apt-get -y update \
- && apt-get -y upgrade \
- && apt-get -y autoremove \
- && apt-get -y autoclean
-
-ENV GOROOT /usr/local/go
-ENV GOPATH /go
-ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
-ENV GO_VERSION REPLACE_ME_GO_VERSION
-ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
-RUN rm -rf ${GOROOT} \
- && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
- && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
- && go version \
- && go get -v -u github.com/mattn/goreman
diff --git a/tests/docker-static-ip/certs-metrics-proxy/Procfile b/tests/docker-static-ip/certs-metrics-proxy/Procfile
deleted file mode 100644
index 4cf163c174b..00000000000
--- a/tests/docker-static-ip/certs-metrics-proxy/Procfile
+++ /dev/null
@@ -1,8 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://localhost:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://localhost:2380 --initial-cluster-token tkn --initial-cluster=m1=https://localhost:2380,m2=https://localhost:22380,m3=https://localhost:32380 --initial-cluster-state new --peer-cert-file=/certs-metrics-proxy/server.crt --peer-key-file=/certs-metrics-proxy/server.key.insecure --peer-trusted-ca-file=/certs-metrics-proxy/ca.crt --peer-client-cert-auth --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --client-cert-auth --listen-metrics-urls=https://localhost:2378,http://localhost:9379 --logger=zap --log-outputs=stderr
-
-etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://localhost:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://localhost:22380 --initial-cluster-token tkn --initial-cluster=m1=https://localhost:2380,m2=https://localhost:22380,m3=https://localhost:32380 --initial-cluster-state new --peer-cert-file=/certs-metrics-proxy/server.crt --peer-key-file=/certs-metrics-proxy/server.key.insecure --peer-trusted-ca-file=/certs-metrics-proxy/ca.crt --peer-client-cert-auth --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --client-cert-auth --listen-metrics-urls=https://localhost:22378,http://localhost:29379 --logger=zap --log-outputs=stderr
-
-etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://localhost:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://localhost:32380 --initial-cluster-token tkn --initial-cluster=m1=https://localhost:2380,m2=https://localhost:22380,m3=https://localhost:32380 --initial-cluster-state new --peer-cert-file=/certs-metrics-proxy/server.crt --peer-key-file=/certs-metrics-proxy/server.key.insecure --peer-trusted-ca-file=/certs-metrics-proxy/ca.crt --peer-client-cert-auth --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --client-cert-auth --listen-metrics-urls=https://localhost:32378,http://localhost:39379 --logger=zap --log-outputs=stderr
-
-proxy: ./etcd grpc-proxy start --advertise-client-url=localhost:23790 --listen-addr=localhost:23790 --endpoints=https://localhost:2379,https://localhost:22379,https://localhost:32379 --data-dir=/tmp/proxy.data --cacert=/certs-metrics-proxy/ca.crt --cert=/certs-metrics-proxy/server.crt --key=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --metrics-addr=http://localhost:9378
diff --git a/tests/docker-static-ip/certs-metrics-proxy/ca-csr.json b/tests/docker-static-ip/certs-metrics-proxy/ca-csr.json
deleted file mode 100644
index ecafabaadd3..00000000000
--- a/tests/docker-static-ip/certs-metrics-proxy/ca-csr.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "algo": "rsa",
- "size": 2048
- },
- "names": [
- {
- "O": "etcd",
- "OU": "etcd Security",
- "L": "San Francisco",
- "ST": "California",
- "C": "USA"
- }
- ],
- "CN": "ca",
- "ca": {
- "expiry": "87600h"
- }
-}
diff --git a/tests/docker-static-ip/certs-metrics-proxy/ca.crt b/tests/docker-static-ip/certs-metrics-proxy/ca.crt
deleted file mode 100644
index 0d8dc386b38..00000000000
--- a/tests/docker-static-ip/certs-metrics-proxy/ca.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIUYWIIesEznr7VfYawvmttxxmOfeUwDQYJKoZIhvcNAQEL
-BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
-Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDYyMTUzMDBaFw0yNzEyMDQyMTUz
-MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
-BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
-ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQDDN/cW7rl/qz59gF3csnDhp5BAxVY7n0+inzZO+MZIdkCFuus6Klc6mWMY
-/ZGvpWxVDgQvYBs310eq4BrM2BjwWNfgqIn6bHVwwGfngojcDEHlZHw1e9sdBlO5
-e/rNONpNtMUjUeukhzFwPOdsUfweAGsqj4VYJV+kkS3uGmCGIj+3wIF411FliiQP
-WiyLG16BwR1Vem2qOotCRgCawKSb4/wKfF8dvv00IjP5Jcy+aXLQ4ULW1fvj3cRR
-JLdZmZ/PF0Cqm75qw2IqzIhRB5b1e8HyRPeNtEZ7frNLZyFhLgHJbRFF5WooFX79
-q9py8dERBXOxCKrSdqEOre0OU/4pAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS
-BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBS+CaA8UIkIJT9xhXff4p143UuW
-7TANBgkqhkiG9w0BAQsFAAOCAQEAK7lScAUi+R68oxxmgZ/pdEr9wsMj4xtss+GO
-UDgzxudpT1nYQ2iBudC3LIuTiaUHUSseVleXEKeNbKhKhVhlIwhmPxiOgbbFu9hr
-e2Z87SjtdlbE/KcYFw0W/ukWYxYrq08BB19w2Mqd8J5CnLcj4/0iiH1uARo1swFy
-GUYAJ2I147sHIDbbmLKuxbdf4dcrkf3D4inBOLcRhS/MzaXfdMFntzJDQAo5YwFI
-zZ4TRGOhj8IcU1Cn5SVufryWy3qJ+sKHDYsGQQ/ArBXwQnO3NAFCpEN9rDDuQVmH
-+ATHDFBQZcGfN4GDh74FGnliRjip2sO4oWTfImmgJGGAn+P2CA==
------END CERTIFICATE-----
diff --git a/tests/docker-static-ip/certs-metrics-proxy/gencert.json b/tests/docker-static-ip/certs-metrics-proxy/gencert.json
deleted file mode 100644
index 09b67267bb2..00000000000
--- a/tests/docker-static-ip/certs-metrics-proxy/gencert.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "87600h"
- }
- }
-}
diff --git a/tests/docker-static-ip/certs-metrics-proxy/gencerts.sh b/tests/docker-static-ip/certs-metrics-proxy/gencerts.sh
deleted file mode 100755
index af8663e09eb..00000000000
--- a/tests/docker-static-ip/certs-metrics-proxy/gencerts.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if ! [[ "$0" =~ "./gencerts.sh" ]]; then
- echo "must be run from 'fixtures'"
- exit 255
-fi
-
-if ! which cfssl; then
- echo "cfssl is not installed"
- exit 255
-fi
-
-cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
-mv ca.pem ca.crt
-openssl x509 -in ca.crt -noout -text
-
-# generate wildcard certificates DNS: *.etcd.local
-cfssl gencert \
- --ca ./ca.crt \
- --ca-key ./ca-key.pem \
- --config ./gencert.json \
- ./server-ca-csr.json | cfssljson --bare ./server
-mv server.pem server.crt
-mv server-key.pem server.key.insecure
-
-rm -f *.csr *.pem *.stderr *.txt
diff --git a/tests/docker-static-ip/certs-metrics-proxy/run.sh b/tests/docker-static-ip/certs-metrics-proxy/run.sh
deleted file mode 100755
index 6089f3ed94b..00000000000
--- a/tests/docker-static-ip/certs-metrics-proxy/run.sh
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/bin/sh
-rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data /tmp/proxy.data
-
-goreman -f /certs-metrics-proxy/Procfile start &
-
-# TODO: remove random sleeps
-sleep 7s
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-metrics-proxy/ca.crt \
- --cert=/certs-metrics-proxy/server.crt \
- --key=/certs-metrics-proxy/server.key.insecure \
- --endpoints=https://localhost:2379 \
- endpoint health --cluster
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-metrics-proxy/ca.crt \
- --cert=/certs-metrics-proxy/server.crt \
- --key=/certs-metrics-proxy/server.key.insecure \
- --endpoints=https://localhost:2379,https://localhost:22379,https://localhost:32379 \
- put abc def
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert=/certs-metrics-proxy/ca.crt \
- --cert=/certs-metrics-proxy/server.crt \
- --key=/certs-metrics-proxy/server.key.insecure \
- --endpoints=https://localhost:2379,https://localhost:22379,https://localhost:32379 \
- get abc
-
-#################
-sleep 3s && printf "\n\n" && echo "curl https://localhost:2378/metrics"
-curl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- -L https://localhost:2378/metrics | grep Put | tail -3
-
-sleep 3s && printf "\n" && echo "curl https://localhost:2379/metrics"
-curl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- -L https://localhost:2379/metrics | grep Put | tail -3
-
-sleep 3s && printf "\n" && echo "curl http://localhost:9379/metrics"
-curl -L http://localhost:9379/metrics | grep Put | tail -3
-#################
-
-#################
-sleep 3s && printf "\n\n" && echo "curl https://localhost:22378/metrics"
-curl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- -L https://localhost:22378/metrics | grep Put | tail -3
-
-sleep 3s && printf "\n" && echo "curl https://localhost:22379/metrics"
-curl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- -L https://localhost:22379/metrics | grep Put | tail -3
-
-sleep 3s && printf "\n" && echo "curl http://localhost:29379/metrics"
-curl -L http://localhost:29379/metrics | grep Put | tail -3
-#################
-
-#################
-sleep 3s && printf "\n\n" && echo "curl https://localhost:32378/metrics"
-curl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- -L https://localhost:32378/metrics | grep Put | tail -3
-
-sleep 3s && printf "\n" && echo "curl https://localhost:32379/metrics"
-curl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- -L https://localhost:32379/metrics | grep Put | tail -3
-
-sleep 3s && printf "\n" && echo "curl http://localhost:39379/metrics"
-curl -L http://localhost:39379/metrics | grep Put | tail -3
-#################
-
-#################
-sleep 3s && printf "\n\n" && echo "Requests to gRPC proxy localhost:23790"
-ETCDCTL_API=3 ./etcdctl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- --endpoints=localhost:23790 \
- put ghi jkl
-
-ETCDCTL_API=3 ./etcdctl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- --endpoints=localhost:23790 \
- get ghi
-
-sleep 3s && printf "\n" && echo "Requests to gRPC proxy https://localhost:23790/metrics"
-curl \
- --cacert /certs-metrics-proxy/ca.crt \
- --cert /certs-metrics-proxy/server.crt \
- --key /certs-metrics-proxy/server.key.insecure \
- -L https://localhost:23790/metrics | grep Put | tail -3
-
-sleep 3s && printf "\n" && echo "Requests to gRPC proxy http://localhost:9378/metrics"
-curl -L http://localhost:9378/metrics | grep Put | tail -3
-< 1 {
+ t.Log("Waiting health interval to required to make membership changes")
+ time.Sleep(etcdserver.HealthInterval)
+ }
+
+ t.Log("Adding member to test membership, but a learner avoid breaking quorum")
+ resp, err := cc.MemberAddAsLearner(context.Background(), "fake1", []string{"http://127.0.0.1:1001"})
+ require.NoError(t, err)
+ if triggerSnapshot {
+ t.Logf("Generating snapshot")
+ generateSnapshot(t, snapshotCount, cc)
+ verifySnapshot(t, epc)
+ }
+ t.Log("Removing learner to test membership")
+ _, err = cc.MemberRemove(context.Background(), resp.Member.ID)
+ require.NoError(t, err)
+ beforeMembers, beforeKV := getMembersAndKeys(t, cc)
+
+ t.Logf("etcdctl downgrade enable %s", lastVersionStr)
+ downgradeEnable(t, epc, lastVersion)
+
+ t.Log("Downgrade enabled, validating if cluster is ready for downgrade")
+ for i := 0; i < len(epc.Procs); i++ {
+ validateVersion(t, epc.Cfg, epc.Procs[i], version.Versions{
+ Cluster: lastClusterVersionStr,
+ Server: version.Version,
+ Storage: lastClusterVersionStr,
+ })
+ e2e.AssertProcessLogs(t, epc.Procs[i], "The server is ready to downgrade")
+ }
+
+ t.Log("Cluster is ready for downgrade")
+ t.Logf("Starting downgrade process to %q", lastVersionStr)
+ for i := 0; i < len(epc.Procs); i++ {
+ t.Logf("Downgrading member %d by running %s binary", i, lastReleaseBinary)
+ stopEtcd(t, epc.Procs[i])
+ startEtcd(t, epc.Procs[i], lastReleaseBinary)
+ }
+
+ t.Log("All members downgraded, validating downgrade")
+ e2e.AssertProcessLogs(t, leader(t, epc), "the cluster has been downgraded")
+ for i := 0; i < len(epc.Procs); i++ {
+ validateVersion(t, epc.Cfg, epc.Procs[i], version.Versions{
+ Cluster: lastClusterVersionStr,
+ Server: lastVersionStr,
+ })
+ }
+
+ t.Log("Downgrade complete")
+ afterMembers, afterKV := getMembersAndKeys(t, cc)
+ assert.Equal(t, beforeKV.Kvs, afterKV.Kvs)
+ assert.Equal(t, beforeMembers.Members, afterMembers.Members)
+
+ if len(epc.Procs) > 1 {
+ t.Log("Waiting health interval to required to make membership changes")
+ time.Sleep(etcdserver.HealthInterval)
+ }
+ t.Log("Adding learner to test membership, but avoid breaking quorum")
+ resp, err = cc.MemberAddAsLearner(context.Background(), "fake2", []string{"http://127.0.0.1:1002"})
+ require.NoError(t, err)
+ if triggerSnapshot {
+ t.Logf("Generating snapshot")
+ generateSnapshot(t, snapshotCount, cc)
+ verifySnapshot(t, epc)
+ }
+ t.Log("Removing learner to test membership")
+ _, err = cc.MemberRemove(context.Background(), resp.Member.ID)
+ require.NoError(t, err)
+ beforeMembers, beforeKV = getMembersAndKeys(t, cc)
+
+ t.Logf("Starting upgrade process to %q", currentVersionStr)
+ for i := 0; i < len(epc.Procs); i++ {
+ t.Logf("Upgrading member %d", i)
+ stopEtcd(t, epc.Procs[i])
+ startEtcd(t, epc.Procs[i], currentEtcdBinary)
+ // NOTE: The leader has monitor to the cluster version, which will
+ // update cluster version. We don't need to check the transient
+ // version just in case that it might be flaky.
+ }
+
+ t.Log("All members upgraded, validating upgrade")
+ for i := 0; i < len(epc.Procs); i++ {
+ validateVersion(t, epc.Cfg, epc.Procs[i], version.Versions{
+ Cluster: currentVersionStr,
+ Server: version.Version,
+ Storage: currentVersionStr,
+ })
+ }
+ t.Log("Upgrade complete")
+
+ afterMembers, afterKV = getMembersAndKeys(t, cc)
+ assert.Equal(t, beforeKV.Kvs, afterKV.Kvs)
+ assert.Equal(t, beforeMembers.Members, afterMembers.Members)
+}
+
+func newCluster(t *testing.T, clusterSize int, snapshotCount uint64) *e2e.EtcdProcessCluster {
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(clusterSize),
+ e2e.WithSnapshotCount(snapshotCount),
+ e2e.WithKeepDataDir(true),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ t.Cleanup(func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+ return epc
+}
+
+func startEtcd(t *testing.T, ep e2e.EtcdProcess, execPath string) {
+ ep.Config().ExecPath = execPath
+ err := ep.Restart(context.TODO())
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+}
+
+func downgradeEnable(t *testing.T, epc *e2e.EtcdProcessCluster, ver *semver.Version) {
+ c := epc.Etcdctl()
+ testutils.ExecuteWithTimeout(t, 20*time.Second, func() {
+ err := c.DowngradeEnable(context.TODO(), ver.String())
+ require.NoError(t, err)
+ })
+}
+
+func stopEtcd(t *testing.T, ep e2e.EtcdProcess) {
+ err := ep.Stop()
+ require.NoError(t, err)
+}
+
+func validateVersion(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, expect version.Versions) {
+ testutils.ExecuteWithTimeout(t, 30*time.Second, func() {
+ for {
+ result, err := getMemberVersionByCurl(cfg, member)
+ if err != nil {
+ cfg.Logger.Warn("failed to get member version and retrying", zap.Error(err), zap.String("member", member.Config().Name))
+ time.Sleep(time.Second)
+ continue
+ }
+ cfg.Logger.Info("Comparing versions", zap.String("member", member.Config().Name), zap.Any("got", result), zap.Any("want", expect))
+ if err := compareMemberVersion(expect, result); err != nil {
+ cfg.Logger.Warn("Versions didn't match retrying", zap.Error(err), zap.String("member", member.Config().Name))
+ time.Sleep(time.Second)
+ continue
+ }
+ cfg.Logger.Info("Versions match", zap.String("member", member.Config().Name))
+ break
+ }
+ })
+}
+
+func leader(t *testing.T, epc *e2e.EtcdProcessCluster) e2e.EtcdProcess {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
+ defer cancel()
+ for i := 0; i < len(epc.Procs); i++ {
+ endpoints := epc.Procs[i].EndpointsGRPC()
+ cli, err := clientv3.New(clientv3.Config{
+ Endpoints: endpoints,
+ DialTimeout: 3 * time.Second,
+ })
+ require.NoError(t, err)
+ defer cli.Close()
+ resp, err := cli.Status(ctx, endpoints[0])
+ require.NoError(t, err)
+ if resp.Header.GetMemberId() == resp.Leader {
+ return epc.Procs[i]
+ }
+ }
+ t.Fatal("Leader not found")
+ return nil
+}
+
+func compareMemberVersion(expect version.Versions, target version.Versions) error {
+ if expect.Server != "" && expect.Server != target.Server {
+ return fmt.Errorf("expect etcdserver version %v, but got %v", expect.Server, target.Server)
+ }
+
+ if expect.Cluster != "" && expect.Cluster != target.Cluster {
+ return fmt.Errorf("expect etcdcluster version %v, but got %v", expect.Cluster, target.Cluster)
+ }
+
+ if expect.Storage != "" && expect.Storage != target.Storage {
+ return fmt.Errorf("expect storage version %v, but got %v", expect.Storage, target.Storage)
+ }
+ return nil
+}
+
+func getMemberVersionByCurl(cfg *e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess) (version.Versions, error) {
+ args := e2e.CURLPrefixArgsCluster(cfg, member, "GET", e2e.CURLReq{Endpoint: "/version"})
+ lines, err := e2e.RunUtilCompletion(args, nil)
+ if err != nil {
+ return version.Versions{}, err
+ }
+
+ data := strings.Join(lines, "\n")
+ result := version.Versions{}
+ if err := json.Unmarshal([]byte(data), &result); err != nil {
+ return version.Versions{}, fmt.Errorf("failed to unmarshal (%v): %w", data, err)
+ }
+ return result, nil
+}
+
+func generateSnapshot(t *testing.T, snapshotCount uint64, cc *e2e.EtcdctlV3) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ var i uint64
+ t.Logf("Adding keys")
+ for i = 0; i < snapshotCount*3; i++ {
+ err := cc.Put(ctx, fmt.Sprintf("%d", i), "1", config.PutOptions{})
+ assert.NoError(t, err)
+ }
+}
+
+func verifySnapshot(t *testing.T, epc *e2e.EtcdProcessCluster) {
+ for i := range epc.Procs {
+ t.Logf("Verifying snapshot for member %d", i)
+ ss := snap.New(epc.Cfg.Logger, datadir.ToSnapDir(epc.Procs[i].Config().DataDirPath))
+ _, err := ss.Load()
+ require.NoError(t, err)
+ }
+ t.Logf("All members have a valid snapshot")
+}
+
+func verifySnapshotMembers(t *testing.T, epc *e2e.EtcdProcessCluster, expectedMembers *clientv3.MemberListResponse) {
+ for i := range epc.Procs {
+ t.Logf("Verifying snapshot for member %d", i)
+ ss := snap.New(epc.Cfg.Logger, datadir.ToSnapDir(epc.Procs[i].Config().DataDirPath))
+ snap, err := ss.Load()
+ require.NoError(t, err)
+ st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
+ err = st.Recovery(snap.Data)
+ require.NoError(t, err)
+ for _, m := range expectedMembers.Members {
+ _, err := st.Get(membership.MemberStoreKey(types.ID(m.ID)), true, true)
+ require.NoError(t, err)
+ }
+ t.Logf("Verifed snapshot for member %d", i)
+ }
+ t.Log("All members have a valid snapshot")
+}
+
+func getMembersAndKeys(t *testing.T, cc *e2e.EtcdctlV3) (*clientv3.MemberListResponse, *clientv3.GetResponse) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ kvs, err := cc.Get(ctx, "", config.GetOptions{Prefix: true})
+ require.NoError(t, err)
+
+ members, err := cc.MemberList(ctx, false)
+ assert.NoError(t, err)
+
+ return members, kvs
+}
diff --git a/tests/e2e/cluster_proxy_test.go b/tests/e2e/cluster_proxy_test.go
deleted file mode 100644
index b96a10037fd..00000000000
--- a/tests/e2e/cluster_proxy_test.go
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build cluster_proxy
-// +build cluster_proxy
-
-package e2e
-
-import (
- "fmt"
- "net"
- "net/url"
- "os"
- "path"
- "strconv"
- "strings"
-
- "go.etcd.io/etcd/pkg/v3/expect"
- "go.uber.org/zap"
-)
-
-type proxyEtcdProcess struct {
- etcdProc etcdProcess
- proxyV2 *proxyV2Proc
- proxyV3 *proxyV3Proc
-}
-
-func newEtcdProcess(cfg *etcdServerProcessConfig) (etcdProcess, error) {
- return newProxyEtcdProcess(cfg)
-}
-
-func newProxyEtcdProcess(cfg *etcdServerProcessConfig) (*proxyEtcdProcess, error) {
- ep, err := newEtcdServerProcess(cfg)
- if err != nil {
- return nil, err
- }
- pep := &proxyEtcdProcess{
- etcdProc: ep,
- proxyV2: newProxyV2Proc(cfg),
- proxyV3: newProxyV3Proc(cfg),
- }
- return pep, nil
-}
-
-func (p *proxyEtcdProcess) Config() *etcdServerProcessConfig { return p.etcdProc.Config() }
-
-func (p *proxyEtcdProcess) EndpointsV2() []string { return p.proxyV2.endpoints() }
-func (p *proxyEtcdProcess) EndpointsV3() []string { return p.proxyV3.endpoints() }
-func (p *proxyEtcdProcess) EndpointsMetrics() []string {
- panic("not implemented; proxy doesn't provide health information")
-}
-
-func (p *proxyEtcdProcess) Start() error {
- if err := p.etcdProc.Start(); err != nil {
- return err
- }
- if err := p.proxyV2.Start(); err != nil {
- return err
- }
- return p.proxyV3.Start()
-}
-
-func (p *proxyEtcdProcess) Restart() error {
- if err := p.etcdProc.Restart(); err != nil {
- return err
- }
- if err := p.proxyV2.Restart(); err != nil {
- return err
- }
- return p.proxyV3.Restart()
-}
-
-func (p *proxyEtcdProcess) Stop() error {
- err := p.proxyV2.Stop()
- if v3err := p.proxyV3.Stop(); err == nil {
- err = v3err
- }
- if eerr := p.etcdProc.Stop(); eerr != nil && err == nil {
- // fails on go-grpc issue #1384
- if !strings.Contains(eerr.Error(), "exit status 2") {
- err = eerr
- }
- }
- return err
-}
-
-func (p *proxyEtcdProcess) Close() error {
- err := p.proxyV2.Close()
- if v3err := p.proxyV3.Close(); err == nil {
- err = v3err
- }
- if eerr := p.etcdProc.Close(); eerr != nil && err == nil {
- // fails on go-grpc issue #1384
- if !strings.Contains(eerr.Error(), "exit status 2") {
- err = eerr
- }
- }
- return err
-}
-
-func (p *proxyEtcdProcess) WithStopSignal(sig os.Signal) os.Signal {
- p.proxyV3.WithStopSignal(sig)
- p.proxyV3.WithStopSignal(sig)
- return p.etcdProc.WithStopSignal(sig)
-}
-
-type proxyProc struct {
- lg *zap.Logger
- execPath string
- args []string
- ep string
- murl string
- donec chan struct{}
-
- proc *expect.ExpectProcess
-}
-
-func (pp *proxyProc) endpoints() []string { return []string{pp.ep} }
-
-func (pp *proxyProc) start() error {
- if pp.proc != nil {
- panic("already started")
- }
- proc, err := spawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...))
- if err != nil {
- return err
- }
- pp.proc = proc
- return nil
-}
-
-func (pp *proxyProc) waitReady(readyStr string) error {
- defer close(pp.donec)
- return waitReadyExpectProc(pp.proc, []string{readyStr})
-}
-
-func (pp *proxyProc) Stop() error {
- if pp.proc == nil {
- return nil
- }
- if err := pp.proc.Stop(); err != nil && !strings.Contains(err.Error(), "exit status 1") {
- // v2proxy exits with status 1 on auto tls; not sure why
- return err
- }
- pp.proc = nil
- <-pp.donec
- pp.donec = make(chan struct{})
- return nil
-}
-
-func (pp *proxyProc) WithStopSignal(sig os.Signal) os.Signal {
- ret := pp.proc.StopSignal
- pp.proc.StopSignal = sig
- return ret
-}
-
-func (pp *proxyProc) Close() error { return pp.Stop() }
-
-type proxyV2Proc struct {
- proxyProc
- dataDir string
-}
-
-func proxyListenURL(cfg *etcdServerProcessConfig, portOffset int) string {
- u, err := url.Parse(cfg.acurl)
- if err != nil {
- panic(err)
- }
- host, port, _ := net.SplitHostPort(u.Host)
- p, _ := strconv.ParseInt(port, 10, 16)
- u.Host = fmt.Sprintf("%s:%d", host, int(p)+portOffset)
- return u.String()
-}
-
-func newProxyV2Proc(cfg *etcdServerProcessConfig) *proxyV2Proc {
- listenAddr := proxyListenURL(cfg, 2)
- name := fmt.Sprintf("testname-proxy-%p", cfg)
- dataDir := path.Join(cfg.dataDirPath, name+".etcd")
- args := []string{
- "--name", name,
- "--proxy", "on",
- "--listen-client-urls", listenAddr,
- "--initial-cluster", cfg.name + "=" + cfg.purl.String(),
- "--data-dir", dataDir,
- }
- return &proxyV2Proc{
- proxyProc: proxyProc{
- lg: cfg.lg,
- execPath: cfg.execPath,
- args: append(args, cfg.tlsArgs...),
- ep: listenAddr,
- donec: make(chan struct{}),
- },
- dataDir: dataDir,
- }
-}
-
-func (v2p *proxyV2Proc) Start() error {
- os.RemoveAll(v2p.dataDir)
- if err := v2p.start(); err != nil {
- return err
- }
- // The full line we are expecting in the logs:
- // "caller":"httpproxy/director.go:65","msg":"endpoints found","endpoints":["http://localhost:20000"]}
- return v2p.waitReady("endpoints found")
-}
-
-func (v2p *proxyV2Proc) Restart() error {
- if err := v2p.Stop(); err != nil {
- return err
- }
- return v2p.Start()
-}
-
-func (v2p *proxyV2Proc) Stop() error {
- if err := v2p.proxyProc.Stop(); err != nil {
- return err
- }
- // v2 proxy caches members; avoid reuse of directory
- return os.RemoveAll(v2p.dataDir)
-}
-
-type proxyV3Proc struct {
- proxyProc
-}
-
-func newProxyV3Proc(cfg *etcdServerProcessConfig) *proxyV3Proc {
- listenAddr := proxyListenURL(cfg, 3)
- args := []string{
- "grpc-proxy",
- "start",
- "--listen-addr", strings.Split(listenAddr, "/")[2],
- "--endpoints", cfg.acurl,
- // pass-through member RPCs
- "--advertise-client-url", "",
- "--data-dir", cfg.dataDirPath,
- }
- murl := ""
- if cfg.murl != "" {
- murl = proxyListenURL(cfg, 4)
- args = append(args, "--metrics-addr", murl)
- }
- tlsArgs := []string{}
- for i := 0; i < len(cfg.tlsArgs); i++ {
- switch cfg.tlsArgs[i] {
- case "--cert-file":
- tlsArgs = append(tlsArgs, "--cert-file", cfg.tlsArgs[i+1])
- i++
- case "--key-file":
- tlsArgs = append(tlsArgs, "--key-file", cfg.tlsArgs[i+1])
- i++
- case "--trusted-ca-file":
- tlsArgs = append(tlsArgs, "--trusted-ca-file", cfg.tlsArgs[i+1])
- i++
- case "--auto-tls":
- tlsArgs = append(tlsArgs, "--auto-tls", "--insecure-skip-tls-verify")
- case "--peer-trusted-ca-file", "--peer-cert-file", "--peer-key-file":
- i++ // skip arg
- case "--client-cert-auth", "--peer-auto-tls":
- default:
- tlsArgs = append(tlsArgs, cfg.tlsArgs[i])
- }
-
- // Configure certificates for connection proxy ---> server.
- // This certificate must NOT have CN set.
- tlsArgs = append(tlsArgs,
- "--cert", path.Join(fixturesDir, "client-nocn.crt"),
- "--key", path.Join(fixturesDir, "client-nocn.key.insecure"),
- "--cacert", path.Join(fixturesDir, "ca.crt"),
- "--client-crl-file", path.Join(fixturesDir, "revoke.crl"))
- }
- return &proxyV3Proc{
- proxyProc{
- lg: cfg.lg,
- execPath: cfg.execPath,
- args: append(args, tlsArgs...),
- ep: listenAddr,
- murl: murl,
- donec: make(chan struct{}),
- },
- }
-}
-
-func (v3p *proxyV3Proc) Restart() error {
- if err := v3p.Stop(); err != nil {
- return err
- }
- return v3p.Start()
-}
-
-func (v3p *proxyV3Proc) Start() error {
- if err := v3p.start(); err != nil {
- return err
- }
- return v3p.waitReady("started gRPC proxy")
-}
diff --git a/tests/e2e/cluster_test.go b/tests/e2e/cluster_test.go
deleted file mode 100644
index 4b3993d5c61..00000000000
--- a/tests/e2e/cluster_test.go
+++ /dev/null
@@ -1,478 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "fmt"
- "net/url"
- "os"
- "path"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.uber.org/zap"
- "go.uber.org/zap/zaptest"
-)
-
-const etcdProcessBasePort = 20000
-
-type clientConnType int
-
-var (
- fixturesDir = integration.MustAbsPath("../fixtures")
-)
-
-const (
- clientNonTLS clientConnType = iota
- clientTLS
- clientTLSAndNonTLS
-)
-
-func newConfigNoTLS() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{clusterSize: 3,
- initialToken: "new",
- }
-}
-
-func newConfigAutoTLS() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 3,
- isPeerTLS: true,
- isPeerAutoTLS: true,
- initialToken: "new",
- }
-}
-
-func newConfigTLS() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 3,
- clientTLS: clientTLS,
- isPeerTLS: true,
- initialToken: "new",
- }
-}
-
-func newConfigClientTLS() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 3,
- clientTLS: clientTLS,
- initialToken: "new",
- }
-}
-
-func newConfigClientBoth() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 1,
- clientTLS: clientTLSAndNonTLS,
- initialToken: "new",
- }
-}
-
-func newConfigClientAutoTLS() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 1,
- isClientAutoTLS: true,
- clientTLS: clientTLS,
- initialToken: "new",
- }
-}
-
-func newConfigPeerTLS() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 3,
- isPeerTLS: true,
- initialToken: "new",
- }
-}
-
-func newConfigClientTLSCertAuth() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 1,
- clientTLS: clientTLS,
- initialToken: "new",
- clientCertAuthEnabled: true,
- }
-}
-
-func newConfigClientTLSCertAuthWithNoCN() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 1,
- clientTLS: clientTLS,
- initialToken: "new",
- clientCertAuthEnabled: true,
- noCN: true,
- }
-}
-
-func newConfigJWT() *etcdProcessClusterConfig {
- return &etcdProcessClusterConfig{
- clusterSize: 1,
- initialToken: "new",
- authTokenOpts: "jwt,pub-key=" + path.Join(fixturesDir, "server.crt") +
- ",priv-key=" + path.Join(fixturesDir, "server.key.insecure") + ",sign-method=RS256,ttl=1s",
- }
-}
-
-func configStandalone(cfg etcdProcessClusterConfig) *etcdProcessClusterConfig {
- ret := cfg
- ret.clusterSize = 1
- return &ret
-}
-
-type etcdProcessCluster struct {
- lg *zap.Logger
- cfg *etcdProcessClusterConfig
- procs []etcdProcess
-}
-
-type etcdProcessClusterConfig struct {
- execPath string
- dataDirPath string
- keepDataDir bool
-
- clusterSize int
-
- baseScheme string
- basePort int
-
- metricsURLScheme string
-
- snapshotCount int // default is 10000
-
- clientTLS clientConnType
- clientCertAuthEnabled bool
- isPeerTLS bool
- isPeerAutoTLS bool
- isClientAutoTLS bool
- isClientCRL bool
- noCN bool
-
- cipherSuites []string
-
- forceNewCluster bool
- initialToken string
- quotaBackendBytes int64
- noStrictReconfig bool
- enableV2 bool
- initialCorruptCheck bool
- authTokenOpts string
- v2deprecation string
-
- rollingStart bool
-}
-
-// newEtcdProcessCluster launches a new cluster from etcd processes, returning
-// a new etcdProcessCluster once all nodes are ready to accept client requests.
-func newEtcdProcessCluster(t testing.TB, cfg *etcdProcessClusterConfig) (*etcdProcessCluster, error) {
- skipInShortMode(t)
-
- etcdCfgs := cfg.etcdServerProcessConfigs(t)
- epc := &etcdProcessCluster{
- cfg: cfg,
- lg: zaptest.NewLogger(t),
- procs: make([]etcdProcess, cfg.clusterSize),
- }
-
- // launch etcd processes
- for i := range etcdCfgs {
- proc, err := newEtcdProcess(etcdCfgs[i])
- if err != nil {
- epc.Close()
- return nil, fmt.Errorf("Cannot configure: %v", err)
- }
- epc.procs[i] = proc
- }
-
- if cfg.rollingStart {
- if err := epc.RollingStart(); err != nil {
- return nil, fmt.Errorf("Cannot rolling-start: %v", err)
- }
- } else {
- if err := epc.Start(); err != nil {
- return nil, fmt.Errorf("Cannot start: %v", err)
- }
- }
- return epc, nil
-}
-
-func (cfg *etcdProcessClusterConfig) clientScheme() string {
- if cfg.clientTLS == clientTLS {
- return "https"
- }
- return "http"
-}
-
-func (cfg *etcdProcessClusterConfig) peerScheme() string {
- peerScheme := cfg.baseScheme
- if peerScheme == "" {
- peerScheme = "http"
- }
- if cfg.isPeerTLS {
- peerScheme += "s"
- }
- return peerScheme
-}
-
-func (cfg *etcdProcessClusterConfig) etcdServerProcessConfigs(tb testing.TB) []*etcdServerProcessConfig {
- lg := zaptest.NewLogger(tb)
-
- if cfg.basePort == 0 {
- cfg.basePort = etcdProcessBasePort
- }
- if cfg.execPath == "" {
- cfg.execPath = binPath
- }
- if cfg.snapshotCount == 0 {
- cfg.snapshotCount = etcdserver.DefaultSnapshotCount
- }
-
- etcdCfgs := make([]*etcdServerProcessConfig, cfg.clusterSize)
- initialCluster := make([]string, cfg.clusterSize)
- for i := 0; i < cfg.clusterSize; i++ {
- var curls []string
- var curl, curltls string
- port := cfg.basePort + 5*i
- curlHost := fmt.Sprintf("localhost:%d", port)
-
- switch cfg.clientTLS {
- case clientNonTLS, clientTLS:
- curl = (&url.URL{Scheme: cfg.clientScheme(), Host: curlHost}).String()
- curls = []string{curl}
- case clientTLSAndNonTLS:
- curl = (&url.URL{Scheme: "http", Host: curlHost}).String()
- curltls = (&url.URL{Scheme: "https", Host: curlHost}).String()
- curls = []string{curl, curltls}
- }
-
- purl := url.URL{Scheme: cfg.peerScheme(), Host: fmt.Sprintf("localhost:%d", port+1)}
- name := fmt.Sprintf("test-%d", i)
- dataDirPath := cfg.dataDirPath
- if cfg.dataDirPath == "" {
- dataDirPath = tb.TempDir()
- }
- initialCluster[i] = fmt.Sprintf("%s=%s", name, purl.String())
-
- args := []string{
- "--name", name,
- "--listen-client-urls", strings.Join(curls, ","),
- "--advertise-client-urls", strings.Join(curls, ","),
- "--listen-peer-urls", purl.String(),
- "--initial-advertise-peer-urls", purl.String(),
- "--initial-cluster-token", cfg.initialToken,
- "--data-dir", dataDirPath,
- "--snapshot-count", fmt.Sprintf("%d", cfg.snapshotCount),
- }
- args = addV2Args(args)
- if cfg.forceNewCluster {
- args = append(args, "--force-new-cluster")
- }
- if cfg.quotaBackendBytes > 0 {
- args = append(args,
- "--quota-backend-bytes", fmt.Sprintf("%d", cfg.quotaBackendBytes),
- )
- }
- if cfg.noStrictReconfig {
- args = append(args, "--strict-reconfig-check=false")
- }
- if cfg.enableV2 {
- args = append(args, "--enable-v2")
- }
- if cfg.initialCorruptCheck {
- args = append(args, "--experimental-initial-corrupt-check")
- }
- var murl string
- if cfg.metricsURLScheme != "" {
- murl = (&url.URL{
- Scheme: cfg.metricsURLScheme,
- Host: fmt.Sprintf("localhost:%d", port+2),
- }).String()
- args = append(args, "--listen-metrics-urls", murl)
- }
-
- args = append(args, cfg.tlsArgs()...)
-
- if cfg.authTokenOpts != "" {
- args = append(args, "--auth-token", cfg.authTokenOpts)
- }
-
- if cfg.v2deprecation != "" {
- args = append(args, "--v2-deprecation", cfg.v2deprecation)
- }
-
- etcdCfgs[i] = &etcdServerProcessConfig{
- lg: lg,
- execPath: cfg.execPath,
- args: args,
- tlsArgs: cfg.tlsArgs(),
- dataDirPath: dataDirPath,
- keepDataDir: cfg.keepDataDir,
- name: name,
- purl: purl,
- acurl: curl,
- murl: murl,
- initialToken: cfg.initialToken,
- }
- }
-
- initialClusterArgs := []string{"--initial-cluster", strings.Join(initialCluster, ",")}
- for i := range etcdCfgs {
- etcdCfgs[i].initialCluster = strings.Join(initialCluster, ",")
- etcdCfgs[i].args = append(etcdCfgs[i].args, initialClusterArgs...)
- }
-
- return etcdCfgs
-}
-
-func (cfg *etcdProcessClusterConfig) tlsArgs() (args []string) {
- if cfg.clientTLS != clientNonTLS {
- if cfg.isClientAutoTLS {
- args = append(args, "--auto-tls")
- } else {
- tlsClientArgs := []string{
- "--cert-file", certPath,
- "--key-file", privateKeyPath,
- "--trusted-ca-file", caPath,
- }
- args = append(args, tlsClientArgs...)
-
- if cfg.clientCertAuthEnabled {
- args = append(args, "--client-cert-auth")
- }
- }
- }
-
- if cfg.isPeerTLS {
- if cfg.isPeerAutoTLS {
- args = append(args, "--peer-auto-tls")
- } else {
- tlsPeerArgs := []string{
- "--peer-cert-file", certPath,
- "--peer-key-file", privateKeyPath,
- "--peer-trusted-ca-file", caPath,
- }
- args = append(args, tlsPeerArgs...)
- }
- }
-
- if cfg.isClientCRL {
- args = append(args, "--client-crl-file", crlPath, "--client-cert-auth")
- }
-
- if len(cfg.cipherSuites) > 0 {
- args = append(args, "--cipher-suites", strings.Join(cfg.cipherSuites, ","))
- }
-
- return args
-}
-
-func (epc *etcdProcessCluster) EndpointsV2() []string {
- return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV2() })
-}
-
-func (epc *etcdProcessCluster) EndpointsV3() []string {
- return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV3() })
-}
-
-func (epc *etcdProcessCluster) endpoints(f func(ep etcdProcess) []string) (ret []string) {
- for _, p := range epc.procs {
- ret = append(ret, f(p)...)
- }
- return ret
-}
-
-func (epc *etcdProcessCluster) Start() error {
- return epc.start(func(ep etcdProcess) error { return ep.Start() })
-}
-
-func (epc *etcdProcessCluster) RollingStart() error {
- return epc.rollingStart(func(ep etcdProcess) error { return ep.Start() })
-}
-
-func (epc *etcdProcessCluster) Restart() error {
- return epc.start(func(ep etcdProcess) error { return ep.Restart() })
-}
-
-func (epc *etcdProcessCluster) start(f func(ep etcdProcess) error) error {
- readyC := make(chan error, len(epc.procs))
- for i := range epc.procs {
- go func(n int) { readyC <- f(epc.procs[n]) }(i)
- }
- for range epc.procs {
- if err := <-readyC; err != nil {
- epc.Close()
- return err
- }
- }
- return nil
-}
-
-func (epc *etcdProcessCluster) rollingStart(f func(ep etcdProcess) error) error {
- readyC := make(chan error, len(epc.procs))
- for i := range epc.procs {
- go func(n int) { readyC <- f(epc.procs[n]) }(i)
- // make sure the servers do not start at the same time
- time.Sleep(time.Second)
- }
- for range epc.procs {
- if err := <-readyC; err != nil {
- epc.Close()
- return err
- }
- }
- return nil
-}
-
-func (epc *etcdProcessCluster) Stop() (err error) {
- for _, p := range epc.procs {
- if p == nil {
- continue
- }
- if curErr := p.Stop(); curErr != nil {
- if err != nil {
- err = fmt.Errorf("%v; %v", err, curErr)
- } else {
- err = curErr
- }
- }
- }
- return err
-}
-
-func (epc *etcdProcessCluster) Close() error {
- epc.lg.Info("closing test cluster...")
- err := epc.Stop()
- for _, p := range epc.procs {
- // p is nil when newEtcdProcess fails in the middle
- // Close still gets called to clean up test data
- if p == nil {
- continue
- }
- if cerr := p.Close(); cerr != nil {
- err = cerr
- }
- }
- epc.lg.Info("closed test cluster.")
- return err
-}
-
-func (epc *etcdProcessCluster) WithStopSignal(sig os.Signal) (ret os.Signal) {
- for _, p := range epc.procs {
- ret = p.WithStopSignal(sig)
- }
- return ret
-}
diff --git a/tests/e2e/cmux_test.go b/tests/e2e/cmux_test.go
new file mode 100644
index 00000000000..9281705ee04
--- /dev/null
+++ b/tests/e2e/cmux_test.go
@@ -0,0 +1,225 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// These tests are directly validating etcd connection multiplexing.
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/common/expfmt"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestConnectionMultiplexing(t *testing.T) {
+ e2e.BeforeTest(t)
+ for _, tc := range []struct {
+ name string
+ serverTLS e2e.ClientConnType
+ separateHTTPPort bool
+ }{
+ {
+ name: "ServerTLS",
+ serverTLS: e2e.ClientTLS,
+ },
+ {
+ name: "ServerNonTLS",
+ serverTLS: e2e.ClientNonTLS,
+ },
+ {
+ name: "ServerTLSAndNonTLS",
+ serverTLS: e2e.ClientTLSAndNonTLS,
+ },
+ {
+ name: "SeparateHTTP/ServerTLS",
+ serverTLS: e2e.ClientTLS,
+ separateHTTPPort: true,
+ },
+ {
+ name: "SeparateHTTP/ServerNonTLS",
+ serverTLS: e2e.ClientNonTLS,
+ separateHTTPPort: true,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := context.Background()
+ cfg := e2e.NewConfig(e2e.WithClusterSize(1))
+ cfg.Client.ConnectionType = tc.serverTLS
+ cfg.ClientHTTPSeparate = tc.separateHTTPPort
+ clus, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ defer clus.Close()
+
+ var clientScenarios []e2e.ClientConnType
+ switch tc.serverTLS {
+ case e2e.ClientTLS:
+ clientScenarios = []e2e.ClientConnType{e2e.ClientTLS}
+ case e2e.ClientNonTLS:
+ clientScenarios = []e2e.ClientConnType{e2e.ClientNonTLS}
+ case e2e.ClientTLSAndNonTLS:
+ clientScenarios = []e2e.ClientConnType{e2e.ClientTLS, e2e.ClientNonTLS}
+ }
+
+ for _, clientTLS := range clientScenarios {
+ name := "ClientNonTLS"
+ if clientTLS == e2e.ClientTLS {
+ name = "ClientTLS"
+ }
+ t.Run(name, func(t *testing.T) {
+ testConnectionMultiplexing(ctx, t, clus.Procs[0], clientTLS)
+ })
+ }
+ })
+ }
+}
+
+func testConnectionMultiplexing(ctx context.Context, t *testing.T, member e2e.EtcdProcess, connType e2e.ClientConnType) {
+ httpEndpoint := member.EndpointsHTTP()[0]
+ grpcEndpoint := member.EndpointsGRPC()[0]
+ switch connType {
+ case e2e.ClientTLS:
+ httpEndpoint = e2e.ToTLS(httpEndpoint)
+ grpcEndpoint = e2e.ToTLS(grpcEndpoint)
+ case e2e.ClientNonTLS:
+ default:
+ panic(fmt.Sprintf("Unsupported conn type %v", connType))
+ }
+ t.Run("etcdctl", func(t *testing.T) {
+ etcdctl, err := e2e.NewEtcdctl(e2e.ClientConfig{ConnectionType: connType}, []string{grpcEndpoint})
+ require.NoError(t, err)
+ _, err = etcdctl.Get(ctx, "a", config.GetOptions{})
+ assert.NoError(t, err)
+ })
+ t.Run("clientv3", func(t *testing.T) {
+ c := newClient(t, []string{grpcEndpoint}, e2e.ClientConfig{ConnectionType: connType})
+ _, err := c.Get(ctx, "a")
+ assert.NoError(t, err)
+ })
+ t.Run("curl", func(t *testing.T) {
+ for _, httpVersion := range []string{"2", "1.1", ""} {
+ tname := "http" + httpVersion
+ if httpVersion == "" {
+ tname = "default"
+ }
+ t.Run(tname, func(t *testing.T) {
+ assert.NoError(t, fetchGRPCGateway(httpEndpoint, httpVersion, connType))
+ assert.NoError(t, fetchMetrics(t, httpEndpoint, httpVersion, connType))
+ assert.NoError(t, fetchVersion(httpEndpoint, httpVersion, connType))
+ assert.NoError(t, fetchHealth(httpEndpoint, httpVersion, connType))
+ assert.NoError(t, fetchDebugVars(httpEndpoint, httpVersion, connType))
+ })
+ }
+ })
+}
+
+func fetchGRPCGateway(endpoint string, httpVersion string, connType e2e.ClientConnType) error {
+ rangeData, err := json.Marshal(&pb.RangeRequest{
+ Key: []byte("a"),
+ })
+ if err != nil {
+ return err
+ }
+ req := e2e.CURLReq{Endpoint: "/v3/kv/range", Value: string(rangeData), Timeout: 5, HTTPVersion: httpVersion}
+ respData, err := curl(endpoint, "POST", req, connType)
+ if err != nil {
+ return err
+ }
+ return validateGrpcgatewayRangeReponse([]byte(respData))
+}
+
+func validateGrpcgatewayRangeReponse(respData []byte) error {
+ // Modified json annotation so ResponseHeader fields are stored in string.
+ type responseHeader struct {
+ //revive:disable:var-naming
+ ClusterId uint64 `json:"cluster_id,string,omitempty"`
+ MemberId uint64 `json:"member_id,string,omitempty"`
+ //revive:enable:var-naming
+ Revision int64 `json:"revision,string,omitempty"`
+ RaftTerm uint64 `json:"raft_term,string,omitempty"`
+ }
+ type rangeResponse struct {
+ Header *responseHeader `json:"header,omitempty"`
+ Kvs []*mvccpb.KeyValue `json:"kvs,omitempty"`
+ More bool `json:"more,omitempty"`
+ Count int64 `json:"count,omitempty"`
+ }
+ var resp rangeResponse
+ return json.Unmarshal(respData, &resp)
+}
+
+func fetchMetrics(t *testing.T, endpoint string, httpVersion string, connType e2e.ClientConnType) error {
+ tmpDir := t.TempDir()
+ metricFile := filepath.Join(tmpDir, "metrics")
+
+ req := e2e.CURLReq{Endpoint: "/metrics", Timeout: 5, HTTPVersion: httpVersion, OutputFile: metricFile}
+ if _, err := curl(endpoint, "GET", req, connType); err != nil {
+ return err
+ }
+
+ rawData, err := os.ReadFile(metricFile)
+ if err != nil {
+ return fmt.Errorf("failed to read the metric: %w", err)
+ }
+ respData := string(rawData)
+
+ var parser expfmt.TextParser
+ _, err = parser.TextToMetricFamilies(strings.NewReader(strings.ReplaceAll(respData, "\r\n", "\n")))
+ return err
+}
+
+func fetchVersion(endpoint string, httpVersion string, connType e2e.ClientConnType) error {
+ req := e2e.CURLReq{Endpoint: "/version", Timeout: 5, HTTPVersion: httpVersion}
+ respData, err := curl(endpoint, "GET", req, connType)
+ if err != nil {
+ return err
+ }
+ var resp version.Versions
+ return json.Unmarshal([]byte(respData), &resp)
+}
+
+func fetchHealth(endpoint string, httpVersion string, connType e2e.ClientConnType) error {
+ req := e2e.CURLReq{Endpoint: "/health", Timeout: 5, HTTPVersion: httpVersion}
+ respData, err := curl(endpoint, "GET", req, connType)
+ if err != nil {
+ return err
+ }
+ var resp etcdhttp.Health
+ return json.Unmarshal([]byte(respData), &resp)
+}
+
+func fetchDebugVars(endpoint string, httpVersion string, connType e2e.ClientConnType) error {
+ req := e2e.CURLReq{Endpoint: "/debug/vars", Timeout: 5, HTTPVersion: httpVersion}
+ respData, err := curl(endpoint, "GET", req, connType)
+ if err != nil {
+ return err
+ }
+ var resp map[string]any
+ return json.Unmarshal([]byte(respData), &resp)
+}
diff --git a/tests/e2e/corrupt_test.go b/tests/e2e/corrupt_test.go
new file mode 100644
index 00000000000..9b71c8dc609
--- /dev/null
+++ b/tests/e2e/corrupt_test.go
@@ -0,0 +1,465 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+ "go.etcd.io/etcd/server/v3/storage/mvcc/testutil"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestEtcdCorruptHash(t *testing.T) {
+ // oldenv := os.Getenv("EXPECT_DEBUG")
+ // defer os.Setenv("EXPECT_DEBUG", oldenv)
+ // os.Setenv("EXPECT_DEBUG", "1")
+
+ cfg := e2e.NewConfigNoTLS()
+
+ // trigger snapshot so that restart member can load peers from disk
+ cfg.ServerConfig.SnapshotCount = 3
+
+ testCtl(t, corruptTest, withQuorum(),
+ withCfg(*cfg),
+ withInitialCorruptCheck(),
+ withCorruptFunc(testutil.CorruptBBolt),
+ )
+}
+
+func corruptTest(cx ctlCtx) {
+ cx.t.Log("putting 10 keys...")
+ for i := 0; i < 10; i++ {
+ if err := ctlV3Put(cx, fmt.Sprintf("foo%05d", i), fmt.Sprintf("v%05d", i), ""); err != nil {
+ if cx.dialTimeout > 0 && !isGRPCTimedout(err) {
+ cx.t.Fatalf("putTest ctlV3Put error (%v)", err)
+ }
+ }
+ }
+ // enough time for all nodes sync on the same data
+ cx.t.Log("sleeping 3sec to let nodes sync...")
+ time.Sleep(3 * time.Second)
+
+ cx.t.Log("connecting clientv3...")
+ eps := cx.epc.EndpointsGRPC()
+ cli1, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[1]}, DialTimeout: 3 * time.Second})
+ require.NoError(cx.t, err)
+ defer cli1.Close()
+
+ sresp, err := cli1.Status(context.TODO(), eps[0])
+ cx.t.Logf("checked status sresp:%v err:%v", sresp, err)
+ require.NoError(cx.t, err)
+ id0 := sresp.Header.GetMemberId()
+
+ cx.t.Log("stopping etcd[0]...")
+ cx.epc.Procs[0].Stop()
+
+ // corrupting first member by modifying backend offline.
+ fp := datadir.ToBackendFileName(cx.epc.Procs[0].Config().DataDirPath)
+ cx.t.Logf("corrupting backend: %v", fp)
+ err = cx.corruptFunc(fp)
+ require.NoError(cx.t, err)
+
+ cx.t.Log("restarting etcd[0]")
+ ep := cx.epc.Procs[0]
+ proc, err := e2e.SpawnCmd(append([]string{ep.Config().ExecPath}, ep.Config().Args...), cx.envMap)
+ require.NoError(cx.t, err)
+ defer proc.Stop()
+
+ cx.t.Log("waiting for etcd[0] failure...")
+ // restarting corrupted member should fail
+ e2e.WaitReadyExpectProc(context.TODO(), proc, []string{fmt.Sprintf("etcdmain: %016x found data inconsistency with peers", id0)})
+}
+
+func TestInPlaceRecovery(t *testing.T) {
+ basePort := 20000
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Initialize the cluster.
+ epcOld, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithInitialClusterToken("old"),
+ e2e.WithKeepDataDir(false),
+ e2e.WithCorruptCheckTime(time.Second),
+ e2e.WithBasePort(basePort),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ t.Cleanup(func() {
+ if errC := epcOld.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+ t.Log("old cluster started.")
+
+ // Put some data into the old cluster, so that after recovering from a blank db, the hash diverges.
+ t.Log("putting 10 keys...")
+ oldCc, err := e2e.NewEtcdctl(epcOld.Cfg.Client, epcOld.EndpointsGRPC())
+ require.NoError(t, err)
+ for i := 0; i < 10; i++ {
+ err = oldCc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
+ require.NoErrorf(t, err, "error on put")
+ }
+
+ // Create a new cluster config, but with the same port numbers. In this way the new servers can stay in
+ // contact with the old ones.
+ epcNewConfig := e2e.NewConfig(
+ e2e.WithInitialClusterToken("new"),
+ e2e.WithKeepDataDir(false),
+ e2e.WithCorruptCheckTime(time.Second),
+ e2e.WithBasePort(basePort),
+ e2e.WithInitialCorruptCheck(true),
+ )
+ epcNew, err := e2e.InitEtcdProcessCluster(t, epcNewConfig)
+ if err != nil {
+ t.Fatalf("could not init etcd process cluster (%v)", err)
+ }
+ t.Cleanup(func() {
+ if errC := epcNew.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+
+ newCc, err := e2e.NewEtcdctl(epcNew.Cfg.Client, epcNew.EndpointsGRPC())
+ require.NoError(t, err)
+
+ // Rolling recovery of the servers.
+ wg := sync.WaitGroup{}
+ t.Log("rolling updating servers in place...")
+ for i := range epcNew.Procs {
+ oldProc := epcOld.Procs[i]
+ err = oldProc.Close()
+ if err != nil {
+ t.Fatalf("could not stop etcd process (%v)", err)
+ }
+ t.Logf("old cluster server %d: %s stopped.", i, oldProc.Config().Name)
+ wg.Add(1)
+ // Start servers in background to avoid blocking on server start.
+ // EtcdProcess.Start waits until etcd becomes healthy, which will not happen here until we restart at least 2 members.
+ go func(proc e2e.EtcdProcess) {
+ defer wg.Done()
+ err = proc.Start(ctx)
+ if err != nil {
+ t.Errorf("could not start etcd process (%v)", err)
+ }
+ t.Logf("new cluster server: %s started in-place with blank db.", proc.Config().Name)
+ }(epcNew.Procs[i])
+ t.Log("sleeping 5 sec to let nodes do periodical check...")
+ time.Sleep(5 * time.Second)
+ }
+ wg.Wait()
+ t.Log("new cluster started.")
+
+ alarmResponse, err := newCc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+ for _, alarm := range alarmResponse.Alarms {
+ if alarm.Alarm == etcdserverpb.AlarmType_CORRUPT {
+ t.Fatalf("there is no corruption after in-place recovery, but corruption reported.")
+ }
+ }
+ t.Log("no corruption detected.")
+}
+
+func TestPeriodicCheckDetectsCorruption(t *testing.T) {
+ checkTime := time.Second
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithKeepDataDir(true),
+ e2e.WithCorruptCheckTime(time.Second),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ t.Cleanup(func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+
+ cc := epc.Etcdctl()
+ for i := 0; i < 10; i++ {
+ err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
+ require.NoErrorf(t, err, "error on put")
+ }
+
+ memberID, found, err := getMemberIDByName(ctx, cc, epc.Procs[0].Config().Name)
+ require.NoErrorf(t, err, "error on member list")
+ assert.Truef(t, found, "member not found")
+
+ epc.Procs[0].Stop()
+ err = testutil.CorruptBBolt(datadir.ToBackendFileName(epc.Procs[0].Config().DataDirPath))
+ require.NoError(t, err)
+
+ err = epc.Procs[0].Restart(context.TODO())
+ require.NoError(t, err)
+ time.Sleep(checkTime * 11 / 10)
+ alarmResponse, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+ assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: memberID}}, alarmResponse.Alarms)
+}
+
+func TestCompactHashCheckDetectCorruption(t *testing.T) {
+ testCompactHashCheckDetectCorruption(t, false)
+}
+
+func TestCompactHashCheckDetectCorruptionWithFeatureGate(t *testing.T) {
+ testCompactHashCheckDetectCorruption(t, true)
+}
+
+func testCompactHashCheckDetectCorruption(t *testing.T, useFeatureGate bool) {
+ checkTime := time.Second
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ opts := []e2e.EPClusterOption{e2e.WithKeepDataDir(true), e2e.WithCompactHashCheckTime(checkTime)}
+ if useFeatureGate {
+ opts = append(opts, e2e.WithServerFeatureGate("CompactHashCheck", true))
+ } else {
+ opts = append(opts, e2e.WithCompactHashCheckEnabled(true))
+ }
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, opts...)
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ t.Cleanup(func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+
+ cc := epc.Etcdctl()
+ for i := 0; i < 10; i++ {
+ err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{})
+ require.NoErrorf(t, err, "error on put")
+ }
+ memberID, found, err := getMemberIDByName(ctx, cc, epc.Procs[0].Config().Name)
+ require.NoErrorf(t, err, "error on member list")
+ assert.Truef(t, found, "member not found")
+
+ epc.Procs[0].Stop()
+ err = testutil.CorruptBBolt(datadir.ToBackendFileName(epc.Procs[0].Config().DataDirPath))
+ require.NoError(t, err)
+
+ err = epc.Procs[0].Restart(ctx)
+ require.NoError(t, err)
+ _, err = cc.Compact(ctx, 5, config.CompactOption{})
+ require.NoError(t, err)
+ time.Sleep(checkTime * 11 / 10)
+ alarmResponse, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+ assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: memberID}}, alarmResponse.Alarms)
+}
+
+func TestCompactHashCheckDetectCorruptionInterrupt(t *testing.T) {
+ testCompactHashCheckDetectCorruptionInterrupt(t, false)
+}
+
+func TestCompactHashCheckDetectCorruptionInterruptWithFeatureGate(t *testing.T) {
+ testCompactHashCheckDetectCorruptionInterrupt(t, true)
+}
+
+func testCompactHashCheckDetectCorruptionInterrupt(t *testing.T, useFeatureGate bool) {
+ checkTime := time.Second
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+
+ slowCompactionNodeIndex := 1
+
+ // Start a new cluster, with compact hash check enabled.
+ t.Log("creating a new cluster with 3 nodes...")
+
+ dataDirPath := t.TempDir()
+ opts := []e2e.EPClusterOption{
+ e2e.WithKeepDataDir(true),
+ e2e.WithCompactHashCheckTime(checkTime),
+ e2e.WithClusterSize(3),
+ e2e.WithDataDirPath(dataDirPath),
+ e2e.WithLogLevel("info"),
+ }
+ if useFeatureGate {
+ opts = append(opts, e2e.WithServerFeatureGate("CompactHashCheck", true))
+ } else {
+ opts = append(opts, e2e.WithCompactHashCheckEnabled(true))
+ }
+
+ cfg := e2e.NewConfig(opts...)
+ epc, err := e2e.InitEtcdProcessCluster(t, cfg)
+ require.NoError(t, err)
+
+ // Assign a node a very slow compaction speed, so that its compaction can be interrupted.
+ err = epc.UpdateProcOptions(slowCompactionNodeIndex, t,
+ e2e.WithCompactionBatchLimit(1),
+ e2e.WithCompactionSleepInterval(1*time.Hour),
+ )
+ require.NoError(t, err)
+
+ epc, err = e2e.StartEtcdProcessCluster(ctx, t, epc, cfg)
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+
+ // Put 10 identical keys to the cluster, so that the compaction will drop some stale values.
+ t.Log("putting 10 values to the identical key...")
+ cc := epc.Etcdctl()
+ for i := 0; i < 10; i++ {
+ err = cc.Put(ctx, "key", fmt.Sprint(i), config.PutOptions{})
+ require.NoErrorf(t, err, "error on put")
+ }
+
+ t.Log("compaction started...")
+ _, err = cc.Compact(ctx, 5, config.CompactOption{})
+ require.NoError(t, err)
+
+ err = epc.Procs[slowCompactionNodeIndex].Close()
+ require.NoError(t, err)
+
+ err = epc.UpdateProcOptions(slowCompactionNodeIndex, t)
+ require.NoError(t, err)
+
+ t.Logf("restart proc %d to interrupt its compaction...", slowCompactionNodeIndex)
+ err = epc.Procs[slowCompactionNodeIndex].Restart(ctx)
+ require.NoError(t, err)
+
+ // Wait until the node finished compaction and the leader finished compaction hash check
+ _, err = epc.Procs[slowCompactionNodeIndex].Logs().ExpectWithContext(ctx, expect.ExpectedResponse{Value: "finished scheduled compaction"})
+ require.NoErrorf(t, err, "can't get log indicating finished scheduled compaction")
+
+ leaderIndex := epc.WaitLeader(t)
+ _, err = epc.Procs[leaderIndex].Logs().ExpectWithContext(ctx, expect.ExpectedResponse{Value: "finished compaction hash check"})
+ require.NoErrorf(t, err, "can't get log indicating finished compaction hash check")
+
+ alarmResponse, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+ for _, alarm := range alarmResponse.Alarms {
+ if alarm.Alarm == etcdserverpb.AlarmType_CORRUPT {
+ t.Fatal("there should be no corruption after resuming the compaction, but corruption detected")
+ }
+ }
+ t.Log("no corruption detected.")
+}
+
+func TestCtlV3SerializableRead(t *testing.T) {
+ testCtlV3ReadAfterWrite(t, clientv3.WithSerializable())
+}
+
+func TestCtlV3LinearizableRead(t *testing.T) {
+ testCtlV3ReadAfterWrite(t)
+}
+
+func testCtlV3ReadAfterWrite(t *testing.T, ops ...clientv3.OpOption) {
+ e2e.BeforeTest(t)
+
+ ctx := context.Background()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(1),
+ e2e.WithEnvVars(map[string]string{"GOFAIL_FAILPOINTS": `raftBeforeSave=sleep("200ms");beforeCommit=sleep("200ms")`}),
+ )
+ require.NoErrorf(t, err, "failed to start etcd cluster")
+ defer func() {
+ derr := epc.Close()
+ require.NoErrorf(t, derr, "failed to close etcd cluster")
+ }()
+
+ cc, err := clientv3.New(clientv3.Config{
+ Endpoints: epc.EndpointsGRPC(),
+ DialKeepAliveTime: 5 * time.Second,
+ DialKeepAliveTimeout: 1 * time.Second,
+ })
+ require.NoError(t, err)
+ defer func() {
+ derr := cc.Close()
+ require.NoError(t, derr)
+ }()
+
+ _, err = cc.Put(ctx, "foo", "bar")
+ require.NoError(t, err)
+
+ // Refer to https://github.com/etcd-io/etcd/pull/16658#discussion_r1341346778
+ t.Log("Restarting the etcd process to ensure all data is persisted")
+ err = epc.Procs[0].Restart(ctx)
+ require.NoError(t, err)
+ epc.WaitLeader(t)
+
+ _, err = cc.Put(ctx, "foo", "bar2")
+ require.NoError(t, err)
+
+ t.Log("Killing the etcd process right after successfully writing a new key/value")
+ err = epc.Procs[0].Kill()
+ require.NoError(t, err)
+ err = epc.Procs[0].Wait(ctx)
+ require.NoError(t, err)
+
+ stopc := make(chan struct{}, 1)
+ donec := make(chan struct{}, 1)
+
+ t.Log("Starting a goroutine to repeatedly read the key/value")
+ count := 0
+ go func() {
+ defer func() {
+ donec <- struct{}{}
+ }()
+ for {
+ select {
+ case <-stopc:
+ return
+ default:
+ }
+
+ rctx, cancel := context.WithTimeout(ctx, 2*time.Second)
+ resp, rerr := cc.Get(rctx, "foo", ops...)
+ cancel()
+ if rerr != nil {
+ continue
+ }
+
+ count++
+ assert.Equal(t, "bar2", string(resp.Kvs[0].Value))
+ }
+ }()
+
+ t.Log("Starting the etcd process again")
+ err = epc.Procs[0].Start(ctx)
+ require.NoError(t, err)
+
+ time.Sleep(3 * time.Second)
+ stopc <- struct{}{}
+
+ <-donec
+ assert.Positive(t, count)
+ t.Logf("Checked the key/value %d times", count)
+}
diff --git a/tests/e2e/ctl_v2_test.go b/tests/e2e/ctl_v2_test.go
deleted file mode 100644
index 107f1c2f79c..00000000000
--- a/tests/e2e/ctl_v2_test.go
+++ /dev/null
@@ -1,534 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "testing"
- "time"
-)
-
-func BeforeTestV2(t testing.TB) {
- BeforeTest(t)
- os.Setenv("ETCDCTL_API", "2")
- t.Cleanup(func() {
- os.Unsetenv("ETCDCTL_API")
- })
-}
-
-func TestCtlV2Set(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), false) }
-func TestCtlV2SetQuorum(t *testing.T) { testCtlV2Set(t, newConfigNoTLS(), true) }
-func TestCtlV2SetClientTLS(t *testing.T) { testCtlV2Set(t, newConfigClientTLS(), false) }
-func TestCtlV2SetPeerTLS(t *testing.T) { testCtlV2Set(t, newConfigPeerTLS(), false) }
-func TestCtlV2SetTLS(t *testing.T) { testCtlV2Set(t, newConfigTLS(), false) }
-func testCtlV2Set(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
- BeforeTestV2(t)
-
- cfg.enableV2 = true
- epc := setupEtcdctlTest(t, cfg, quorum)
- defer cleanupEtcdProcessCluster(epc, t)
-
- key, value := "foo", "bar"
-
- if err := etcdctlSet(epc, key, value); err != nil {
- t.Fatalf("failed set (%v)", err)
- }
-
- if err := etcdctlGet(epc, key, value, quorum); err != nil {
- t.Fatalf("failed get (%v)", err)
- }
-}
-
-func TestCtlV2Mk(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), false) }
-func TestCtlV2MkQuorum(t *testing.T) { testCtlV2Mk(t, newConfigNoTLS(), true) }
-func TestCtlV2MkTLS(t *testing.T) { testCtlV2Mk(t, newConfigTLS(), false) }
-func testCtlV2Mk(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
- BeforeTestV2(t)
-
- cfg.enableV2 = true
- epc := setupEtcdctlTest(t, cfg, quorum)
- defer cleanupEtcdProcessCluster(epc, t)
-
- key, value := "foo", "bar"
-
- if err := etcdctlMk(epc, key, value, true); err != nil {
- t.Fatalf("failed mk (%v)", err)
- }
- if err := etcdctlMk(epc, key, value, false); err != nil {
- t.Fatalf("failed mk (%v)", err)
- }
-
- if err := etcdctlGet(epc, key, value, quorum); err != nil {
- t.Fatalf("failed get (%v)", err)
- }
-}
-
-func TestCtlV2Rm(t *testing.T) { testCtlV2Rm(t, newConfigNoTLS()) }
-func TestCtlV2RmTLS(t *testing.T) { testCtlV2Rm(t, newConfigTLS()) }
-func testCtlV2Rm(t *testing.T, cfg *etcdProcessClusterConfig) {
- BeforeTestV2(t)
-
- cfg.enableV2 = true
- epc := setupEtcdctlTest(t, cfg, true)
- defer cleanupEtcdProcessCluster(epc, t)
-
- key, value := "foo", "bar"
-
- if err := etcdctlSet(epc, key, value); err != nil {
- t.Fatalf("failed set (%v)", err)
- }
-
- if err := etcdctlRm(epc, key, value, true); err != nil {
- t.Fatalf("failed rm (%v)", err)
- }
- if err := etcdctlRm(epc, key, value, false); err != nil {
- t.Fatalf("failed rm (%v)", err)
- }
-}
-
-func TestCtlV2Ls(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), false) }
-func TestCtlV2LsQuorum(t *testing.T) { testCtlV2Ls(t, newConfigNoTLS(), true) }
-func TestCtlV2LsTLS(t *testing.T) { testCtlV2Ls(t, newConfigTLS(), false) }
-func testCtlV2Ls(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) {
- BeforeTestV2(t)
-
- cfg.enableV2 = true
- epc := setupEtcdctlTest(t, cfg, quorum)
- defer cleanupEtcdProcessCluster(epc, t)
-
- key, value := "foo", "bar"
-
- if err := etcdctlSet(epc, key, value); err != nil {
- t.Fatalf("failed set (%v)", err)
- }
-
- if err := etcdctlLs(epc, key, quorum); err != nil {
- t.Fatalf("failed ls (%v)", err)
- }
-}
-
-func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, newConfigNoTLS(), false) }
-func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, newConfigTLS(), false) }
-
-func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) {
- BeforeTestV2(t)
-
- cfg.enableV2 = true
- epc := setupEtcdctlTest(t, cfg, true)
- defer cleanupEtcdProcessCluster(epc, t)
-
- key, value := "foo", "bar"
- errc := etcdctlWatch(epc, key, value, noSync)
- if err := etcdctlSet(epc, key, value); err != nil {
- t.Fatalf("failed set (%v)", err)
- }
-
- select {
- case err := <-errc:
- if err != nil {
- t.Fatalf("failed watch (%v)", err)
- }
- case <-time.After(5 * time.Second):
- t.Fatalf("watch timed out")
- }
-}
-
-func TestCtlV2GetRoleUser(t *testing.T) {
- BeforeTestV2(t)
-
- copied := newConfigNoTLS()
- copied.enableV2 = true
- epc := setupEtcdctlTest(t, copied, false)
- defer cleanupEtcdProcessCluster(epc, t)
-
- if err := etcdctlRoleAdd(epc, "foo"); err != nil {
- t.Fatalf("failed to add role (%v)", err)
- }
- if err := etcdctlUserAdd(epc, "username", "password"); err != nil {
- t.Fatalf("failed to add user (%v)", err)
- }
- if err := etcdctlUserGrant(epc, "username", "foo"); err != nil {
- t.Fatalf("failed to grant role (%v)", err)
- }
- if err := etcdctlUserGet(epc, "username"); err != nil {
- t.Fatalf("failed to get user (%v)", err)
- }
-
- // ensure double grant gives an error; was crashing in 2.3.1
- regrantArgs := etcdctlPrefixArgs(epc)
- regrantArgs = append(regrantArgs, "user", "grant", "--roles", "foo", "username")
- if err := spawnWithExpect(regrantArgs, "duplicate"); err != nil {
- t.Fatalf("missing duplicate error on double grant role (%v)", err)
- }
-}
-
-func TestCtlV2UserListUsername(t *testing.T) { testCtlV2UserList(t, "username") }
-func TestCtlV2UserListRoot(t *testing.T) { testCtlV2UserList(t, "root") }
-func testCtlV2UserList(t *testing.T, username string) {
- BeforeTestV2(t)
-
- copied := newConfigNoTLS()
- copied.enableV2 = true
- epc := setupEtcdctlTest(t, copied, false)
- defer cleanupEtcdProcessCluster(epc, t)
-
- if err := etcdctlUserAdd(epc, username, "password"); err != nil {
- t.Fatalf("failed to add user (%v)", err)
- }
- if err := etcdctlUserList(epc, username); err != nil {
- t.Fatalf("failed to list users (%v)", err)
- }
-}
-
-func TestCtlV2RoleList(t *testing.T) {
- BeforeTestV2(t)
-
- copied := newConfigNoTLS()
- copied.enableV2 = true
- epc := setupEtcdctlTest(t, copied, false)
- defer cleanupEtcdProcessCluster(epc, t)
-
- if err := etcdctlRoleAdd(epc, "foo"); err != nil {
- t.Fatalf("failed to add role (%v)", err)
- }
- if err := etcdctlRoleList(epc, "foo"); err != nil {
- t.Fatalf("failed to list roles (%v)", err)
- }
-}
-
-func TestUtlCtlV2Backup(t *testing.T) {
- for snap := range []int{0, 1} {
- for _, v3 := range []bool{true, false} {
- for _, utl := range []bool{true, false} {
- t.Run(fmt.Sprintf("etcdutl:%v;snap:%v;v3:%v", utl, snap, v3),
- func(t *testing.T) {
- testUtlCtlV2Backup(t, snap, v3, utl)
- })
- }
- }
- }
-}
-
-func testUtlCtlV2Backup(t *testing.T, snapCount int, v3 bool, utl bool) {
- BeforeTestV2(t)
-
- backupDir, err := ioutil.TempDir(t.TempDir(), "testbackup0.etcd")
- if err != nil {
- t.Fatal(err)
- }
-
- etcdCfg := newConfigNoTLS()
- etcdCfg.snapshotCount = snapCount
- etcdCfg.enableV2 = true
- t.Log("Starting etcd-1")
- epc1 := setupEtcdctlTest(t, etcdCfg, false)
-
- // v3 put before v2 set so snapshot happens after v3 operations to confirm
- // v3 data is preserved after snapshot.
- os.Setenv("ETCDCTL_API", "3")
- if err := ctlV3Put(ctlCtx{t: t, epc: epc1}, "v3key", "123", ""); err != nil {
- t.Fatal(err)
- }
- os.Setenv("ETCDCTL_API", "2")
-
- t.Log("Setting key in etcd-1")
- if err := etcdctlSet(epc1, "foo1", "bar1"); err != nil {
- t.Fatal(err)
- }
-
- if v3 {
- t.Log("Stopping etcd-1")
- // v3 must lock the db to backup, so stop process
- if err := epc1.Stop(); err != nil {
- t.Fatal(err)
- }
- }
- t.Log("Triggering etcd backup")
- if err := etcdctlBackup(t, epc1, epc1.procs[0].Config().dataDirPath, backupDir, v3, utl); err != nil {
- t.Fatal(err)
- }
- t.Log("Closing etcd-1 backup")
- if err := epc1.Close(); err != nil {
- t.Fatalf("error closing etcd processes (%v)", err)
- }
-
- t.Logf("Backup directory: %s", backupDir)
-
- t.Log("Starting etcd-2 (post backup)")
- // restart from the backup directory
- cfg2 := newConfigNoTLS()
- cfg2.dataDirPath = backupDir
- cfg2.keepDataDir = true
- cfg2.forceNewCluster = true
- cfg2.enableV2 = true
- epc2 := setupEtcdctlTest(t, cfg2, false)
- // Make sure a failing test is not leaking resources (running server).
- defer epc2.Close()
-
- t.Log("Getting examplar key")
- // check if backup went through correctly
- if err := etcdctlGet(epc2, "foo1", "bar1", false); err != nil {
- t.Fatal(err)
- }
-
- os.Setenv("ETCDCTL_API", "3")
- ctx2 := ctlCtx{t: t, epc: epc2}
- if v3 {
- t.Log("Getting v3 examplar key")
- if err := ctlV3Get(ctx2, []string{"v3key"}, kv{"v3key", "123"}); err != nil {
- t.Fatal(err)
- }
- } else {
- if err := ctlV3Get(ctx2, []string{"v3key"}); err != nil {
- t.Fatal(err)
- }
- }
- os.Setenv("ETCDCTL_API", "2")
-
- t.Log("Getting examplar key foo2")
- // check if it can serve client requests
- if err := etcdctlSet(epc2, "foo2", "bar2"); err != nil {
- t.Fatal(err)
- }
- if err := etcdctlGet(epc2, "foo2", "bar2", false); err != nil {
- t.Fatal(err)
- }
-
- t.Log("Closing etcd-2")
- if err := epc2.Close(); err != nil {
- t.Fatalf("error closing etcd processes (%v)", err)
- }
-}
-
-func TestCtlV2AuthWithCommonName(t *testing.T) {
- BeforeTestV2(t)
-
- copiedCfg := newConfigClientTLS()
- copiedCfg.clientCertAuthEnabled = true
- copiedCfg.enableV2 = true
- epc := setupEtcdctlTest(t, copiedCfg, false)
- defer cleanupEtcdProcessCluster(epc, t)
-
- if err := etcdctlRoleAdd(epc, "testrole"); err != nil {
- t.Fatalf("failed to add role (%v)", err)
- }
- if err := etcdctlRoleGrant(epc, "testrole", "--rw", "--path=/foo"); err != nil {
- t.Fatalf("failed to grant role (%v)", err)
- }
- if err := etcdctlUserAdd(epc, "root", "123"); err != nil {
- t.Fatalf("failed to add user (%v)", err)
- }
- if err := etcdctlUserAdd(epc, "Autogenerated CA", "123"); err != nil {
- t.Fatalf("failed to add user (%v)", err)
- }
- if err := etcdctlUserGrant(epc, "Autogenerated CA", "testrole"); err != nil {
- t.Fatalf("failed to grant role (%v)", err)
- }
- if err := etcdctlAuthEnable(epc); err != nil {
- t.Fatalf("failed to enable auth (%v)", err)
- }
- if err := etcdctlSet(epc, "foo", "bar"); err != nil {
- t.Fatalf("failed to write (%v)", err)
- }
-}
-
-func TestCtlV2ClusterHealth(t *testing.T) {
- BeforeTestV2(t)
-
- copied := newConfigNoTLS()
- copied.enableV2 = true
- epc := setupEtcdctlTest(t, copied, true)
- defer cleanupEtcdProcessCluster(epc, t)
-
- // all members available
- if err := etcdctlClusterHealth(epc, "cluster is healthy"); err != nil {
- t.Fatalf("cluster-health expected to be healthy (%v)", err)
- }
-
- // missing members, has quorum
- epc.procs[0].Stop()
-
- for i := 0; i < 3; i++ {
- err := etcdctlClusterHealth(epc, "cluster is degraded")
- if err == nil {
- break
- } else if i == 2 {
- t.Fatalf("cluster-health expected to be degraded (%v)", err)
- }
- // possibly no leader yet; retry
- time.Sleep(time.Second)
- }
-
- // no quorum
- epc.procs[1].Stop()
- if err := etcdctlClusterHealth(epc, "cluster is unavailable"); err != nil {
- t.Fatalf("cluster-health expected to be unavailable (%v)", err)
- }
-
- epc.procs[0], epc.procs[1] = nil, nil
-}
-
-func etcdctlPrefixArgs(clus *etcdProcessCluster) []string {
- endpoints := strings.Join(clus.EndpointsV2(), ",")
- cmdArgs := []string{ctlBinPath}
-
- cmdArgs = append(cmdArgs, "--endpoints", endpoints)
- if clus.cfg.clientTLS == clientTLS {
- cmdArgs = append(cmdArgs, "--ca-file", caPath, "--cert-file", certPath, "--key-file", privateKeyPath)
- }
- return cmdArgs
-}
-
-func etcductlPrefixArgs(utl bool) []string {
- if utl {
- return []string{utlBinPath}
- }
- return []string{ctlBinPath}
-}
-
-func etcdctlClusterHealth(clus *etcdProcessCluster, val string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "cluster-health")
- return spawnWithExpect(cmdArgs, val)
-}
-
-func etcdctlSet(clus *etcdProcessCluster, key, value string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "set", key, value)
- return spawnWithExpect(cmdArgs, value)
-}
-
-func etcdctlMk(clus *etcdProcessCluster, key, value string, first bool) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "mk", key, value)
- if first {
- return spawnWithExpect(cmdArgs, value)
- }
- return spawnWithExpect(cmdArgs, "Error: 105: Key already exists")
-}
-
-func etcdctlGet(clus *etcdProcessCluster, key, value string, quorum bool) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "get", key)
- if quorum {
- cmdArgs = append(cmdArgs, "--quorum")
- }
- return spawnWithExpect(cmdArgs, value)
-}
-
-func etcdctlRm(clus *etcdProcessCluster, key, value string, first bool) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "rm", key)
- if first {
- return spawnWithExpect(cmdArgs, "PrevNode.Value: "+value)
- }
- return spawnWithExpect(cmdArgs, "Error: 100: Key not found")
-}
-
-func etcdctlLs(clus *etcdProcessCluster, key string, quorum bool) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "ls")
- if quorum {
- cmdArgs = append(cmdArgs, "--quorum")
- }
- return spawnWithExpect(cmdArgs, key)
-}
-
-func etcdctlWatch(clus *etcdProcessCluster, key, value string, noSync bool) <-chan error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "watch", "--after-index=1", key)
- if noSync {
- cmdArgs = append(cmdArgs, "--no-sync")
- }
- errc := make(chan error, 1)
- go func() {
- errc <- spawnWithExpect(cmdArgs, value)
- }()
- return errc
-}
-
-func etcdctlRoleAdd(clus *etcdProcessCluster, role string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "role", "add", role)
- return spawnWithExpect(cmdArgs, role)
-}
-
-func etcdctlRoleGrant(clus *etcdProcessCluster, role string, perms ...string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "role", "grant")
- cmdArgs = append(cmdArgs, perms...)
- cmdArgs = append(cmdArgs, role)
- return spawnWithExpect(cmdArgs, role)
-}
-
-func etcdctlRoleList(clus *etcdProcessCluster, expectedRole string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "role", "list")
- return spawnWithExpect(cmdArgs, expectedRole)
-}
-
-func etcdctlUserAdd(clus *etcdProcessCluster, user, pass string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "user", "add", user+":"+pass)
- return spawnWithExpect(cmdArgs, "User "+user+" created")
-}
-
-func etcdctlUserGrant(clus *etcdProcessCluster, user, role string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "user", "grant", "--roles", role, user)
- return spawnWithExpect(cmdArgs, "User "+user+" updated")
-}
-
-func etcdctlUserGet(clus *etcdProcessCluster, user string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "user", "get", user)
- return spawnWithExpect(cmdArgs, "User: "+user)
-}
-
-func etcdctlUserList(clus *etcdProcessCluster, expectedUser string) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "user", "list")
- return spawnWithExpect(cmdArgs, expectedUser)
-}
-
-func etcdctlAuthEnable(clus *etcdProcessCluster) error {
- cmdArgs := append(etcdctlPrefixArgs(clus), "auth", "enable")
- return spawnWithExpect(cmdArgs, "Authentication Enabled")
-}
-
-func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir string, v3 bool, utl bool) error {
- cmdArgs := append(etcductlPrefixArgs(utl), "backup", "--data-dir", dataDir, "--backup-dir", backupDir)
- if v3 {
- cmdArgs = append(cmdArgs, "--with-v3")
- } else if utl {
- cmdArgs = append(cmdArgs, "--with-v3=false")
- }
- t.Logf("Running: %v", cmdArgs)
- proc, err := spawnCmd(cmdArgs)
- if err != nil {
- return err
- }
- err = proc.Close()
- if err != nil {
- return err
- }
- return proc.ProcessError()
-}
-
-func setupEtcdctlTest(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) *etcdProcessCluster {
- if !quorum {
- cfg = configStandalone(*cfg)
- }
- epc, err := newEtcdProcessCluster(t, cfg)
- if err != nil {
- t.Fatalf("could not start etcd process cluster (%v)", err)
- }
- return epc
-}
-
-func cleanupEtcdProcessCluster(epc *etcdProcessCluster, t *testing.T) {
- if errC := epc.Close(); errC != nil {
- t.Fatalf("error closing etcd processes (%v)", errC)
- }
-}
diff --git a/tests/e2e/ctl_v3_alarm_test.go b/tests/e2e/ctl_v3_alarm_test.go
deleted file mode 100644
index 7b9b445b09b..00000000000
--- a/tests/e2e/ctl_v3_alarm_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "context"
- "os"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/client/v3"
-)
-
-func TestCtlV3Alarm(t *testing.T) {
- // The boltdb minimum working set is six pages.
- testCtl(t, alarmTest, withQuota(int64(13*os.Getpagesize())))
-}
-
-func alarmTest(cx ctlCtx) {
- // test small put still works
- smallbuf := strings.Repeat("a", 64)
- if err := ctlV3Put(cx, "1st_test", smallbuf, ""); err != nil {
- cx.t.Fatal(err)
- }
-
- // write some chunks to fill up the database
- buf := strings.Repeat("b", os.Getpagesize())
- for {
- if err := ctlV3Put(cx, "2nd_test", buf, ""); err != nil {
- if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") {
- cx.t.Fatal(err)
- }
- break
- }
- }
-
- // quota alarm should now be on
- if err := ctlV3Alarm(cx, "list", "alarm:NOSPACE"); err != nil {
- cx.t.Fatal(err)
- }
-
- // '/health' handler should return 'false'
- if err := cURLGet(cx.epc, cURLReq{endpoint: "/health", expected: `{"health":"false","reason":"ALARM NOSPACE"}`}); err != nil {
- cx.t.Fatalf("failed get with curl (%v)", err)
- }
-
- // check that Put is rejected when alarm is on
- if err := ctlV3Put(cx, "3rd_test", smallbuf, ""); err != nil {
- if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") {
- cx.t.Fatal(err)
- }
- }
-
- eps := cx.epc.EndpointsV3()
-
- // get latest revision to compact
- cli, err := clientv3.New(clientv3.Config{
- Endpoints: eps,
- DialTimeout: 3 * time.Second,
- })
- if err != nil {
- cx.t.Fatal(err)
- }
- defer cli.Close()
- sresp, err := cli.Status(context.TODO(), eps[0])
- if err != nil {
- cx.t.Fatal(err)
- }
-
- // make some space
- if err := ctlV3Compact(cx, sresp.Header.Revision, true); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3OnlineDefrag(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- // turn off alarm
- if err := ctlV3Alarm(cx, "disarm", "alarm:NOSPACE"); err != nil {
- cx.t.Fatal(err)
- }
-
- // put one more key below quota
- if err := ctlV3Put(cx, "4th_test", smallbuf, ""); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func ctlV3Alarm(cx ctlCtx, cmd string, as ...string) error {
- cmdArgs := append(cx.PrefixArgs(), "alarm", cmd)
- return spawnWithExpects(cmdArgs, as...)
-}
diff --git a/tests/e2e/ctl_v3_auth_cluster_test.go b/tests/e2e/ctl_v3_auth_cluster_test.go
new file mode 100644
index 00000000000..35b7cd289be
--- /dev/null
+++ b/tests/e2e/ctl_v3_auth_cluster_test.go
@@ -0,0 +1,145 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestAuthCluster(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(1),
+ e2e.WithSnapshotCount(2),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer func() {
+ if err := epc.Close(); err != nil {
+ t.Fatalf("could not close test cluster (%v)", err)
+ }
+ }()
+
+ epcClient := epc.Etcdctl()
+ createUsers(ctx, t, epcClient)
+
+ if err := epcClient.AuthEnable(ctx); err != nil {
+ t.Fatalf("could not enable Auth: (%v)", err)
+ }
+
+ testUserClientOpts := e2e.WithAuth("test", "testPassword")
+ rootUserClientOpts := e2e.WithAuth("root", "rootPassword")
+
+ // write more than SnapshotCount keys to single leader to make sure snapshot is created
+ for i := 0; i <= 10; i++ {
+ if err := epc.Etcdctl(testUserClientOpts).Put(ctx, fmt.Sprintf("/test/%d", i), "test", config.PutOptions{}); err != nil {
+ t.Fatalf("failed to Put (%v)", err)
+ }
+ }
+
+ // start second process
+ if _, err := epc.StartNewProc(ctx, nil, t, false /* addAsLearner */, rootUserClientOpts); err != nil {
+ t.Fatalf("could not start second etcd process (%v)", err)
+ }
+
+ // make sure writes to both endpoints are successful
+ endpoints := epc.EndpointsGRPC()
+ assert.Len(t, endpoints, 2)
+ for _, endpoint := range epc.EndpointsGRPC() {
+ if err := epc.Etcdctl(testUserClientOpts, e2e.WithEndpoints([]string{endpoint})).Put(ctx, "/test/key", endpoint, config.PutOptions{}); err != nil {
+ t.Fatalf("failed to write to Put to %q (%v)", endpoint, err)
+ }
+ }
+
+ // verify all nodes have exact same revision and hash
+ assert.Eventually(t, func() bool {
+ hashKvs, err := epc.Etcdctl(rootUserClientOpts).HashKV(ctx, 0)
+ if err != nil {
+ t.Logf("failed to get HashKV: %v", err)
+ return false
+ }
+ if len(hashKvs) != 2 {
+ t.Logf("not exactly 2 hashkv responses returned: %d", len(hashKvs))
+ return false
+ }
+ if hashKvs[0].Header.Revision != hashKvs[1].Header.Revision {
+ t.Logf("The two members' revision (%d, %d) are not equal", hashKvs[0].Header.Revision, hashKvs[1].Header.Revision)
+ return false
+ }
+ assert.Equal(t, hashKvs[0].Hash, hashKvs[1].Hash)
+ return true
+ }, time.Second*5, time.Millisecond*100)
+}
+
+func applyTLSWithRootCommonName() func() {
+ var (
+ oldCertPath = e2e.CertPath
+ oldPrivateKeyPath = e2e.PrivateKeyPath
+ oldCaPath = e2e.CaPath
+
+ newCertPath = filepath.Join(e2e.FixturesDir, "CommonName-root.crt")
+ newPrivateKeyPath = filepath.Join(e2e.FixturesDir, "CommonName-root.key")
+ newCaPath = filepath.Join(e2e.FixturesDir, "CommonName-root.crt")
+ )
+
+ e2e.CertPath = newCertPath
+ e2e.PrivateKeyPath = newPrivateKeyPath
+ e2e.CaPath = newCaPath
+
+ return func() {
+ e2e.CertPath = oldCertPath
+ e2e.PrivateKeyPath = oldPrivateKeyPath
+ e2e.CaPath = oldCaPath
+ }
+}
+
+func createUsers(ctx context.Context, t *testing.T, client *e2e.EtcdctlV3) {
+ if _, err := client.UserAdd(ctx, "root", "rootPassword", config.UserAddOptions{}); err != nil {
+ t.Fatalf("could not add root user (%v)", err)
+ }
+ if _, err := client.RoleAdd(ctx, "root"); err != nil {
+ t.Fatalf("could not create 'root' role (%v)", err)
+ }
+ if _, err := client.UserGrantRole(ctx, "root", "root"); err != nil {
+ t.Fatalf("could not grant root role to root user (%v)", err)
+ }
+
+ if _, err := client.RoleAdd(ctx, "test"); err != nil {
+ t.Fatalf("could not create 'test' role (%v)", err)
+ }
+ if _, err := client.RoleGrantPermission(ctx, "test", "/test/", "/test0", clientv3.PermissionType(clientv3.PermReadWrite)); err != nil {
+ t.Fatalf("could not RoleGrantPermission (%v)", err)
+ }
+ if _, err := client.UserAdd(ctx, "test", "testPassword", config.UserAddOptions{}); err != nil {
+ t.Fatalf("could not add user test (%v)", err)
+ }
+ if _, err := client.UserGrantRole(ctx, "test", "test"); err != nil {
+ t.Fatalf("could not grant test role user (%v)", err)
+ }
+}
diff --git a/tests/e2e/ctl_v3_auth_no_proxy_test.go b/tests/e2e/ctl_v3_auth_no_proxy_test.go
index 0b4807c8304..8529ff38dc9 100644
--- a/tests/e2e/ctl_v3_auth_no_proxy_test.go
+++ b/tests/e2e/ctl_v3_auth_no_proxy_test.go
@@ -12,23 +12,133 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// These tests depends on certificate-based authentication that is NOT supported
+// These tests depend on certificate-based authentication that is NOT supported
// by gRPC proxy.
//go:build !cluster_proxy
-// +build !cluster_proxy
package e2e
import (
+ "context"
+ "fmt"
+ "sync"
"testing"
+ "time"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3AuthCertCN(t *testing.T) {
- testCtl(t, authTestCertCN, withCfg(*newConfigClientTLSCertAuth()))
+ testCtl(t, authTestCertCN, withCfg(*e2e.NewConfigClientTLSCertAuth()))
}
+
func TestCtlV3AuthCertCNAndUsername(t *testing.T) {
- testCtl(t, authTestCertCNAndUsername, withCfg(*newConfigClientTLSCertAuth()))
+ testCtl(t, authTestCertCNAndUsername, withCfg(*e2e.NewConfigClientTLSCertAuth()))
}
+
func TestCtlV3AuthCertCNAndUsernameNoPassword(t *testing.T) {
- testCtl(t, authTestCertCNAndUsernameNoPassword, withCfg(*newConfigClientTLSCertAuth()))
+ testCtl(t, authTestCertCNAndUsernameNoPassword, withCfg(*e2e.NewConfigClientTLSCertAuth()))
+}
+
+func TestCtlV3AuthCertCNWithWithConcurrentOperation(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // apply the certificate which has `root` CommonName,
+ // and reset the setting when the test case finishes.
+ // TODO(ahrtr): enhance the e2e test framework to support
+ // certificates with CommonName.
+ t.Log("Apply certificate with root CommonName")
+ resetCert := applyTLSWithRootCommonName()
+ defer resetCert()
+
+ t.Log("Create etcd cluster")
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(1),
+ e2e.WithClientConnType(e2e.ClientTLS),
+ e2e.WithClientCertAuthority(true),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer func() {
+ if err := epc.Close(); err != nil {
+ t.Fatalf("could not close test cluster (%v)", err)
+ }
+ }()
+
+ epcClient := epc.Etcdctl()
+ t.Log("Create users")
+ createUsers(ctx, t, epcClient)
+
+ t.Log("Enable auth")
+ if err := epcClient.AuthEnable(ctx); err != nil {
+ t.Fatalf("could not enable Auth: (%v)", err)
+ }
+
+ // Create two goroutines, one goroutine keeps creating & deleting users,
+ // and the other goroutine keeps writing & deleting K/V entries.
+ var wg sync.WaitGroup
+ wg.Add(2)
+ errs := make(chan error, 2)
+ donec := make(chan struct{})
+
+ // Create the first goroutine to create & delete users
+ t.Log("Create the first goroutine to create & delete users")
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 100; i++ {
+ user := fmt.Sprintf("testuser-%d", i)
+ pass := fmt.Sprintf("testpass-%d", i)
+ if _, err := epcClient.UserAdd(ctx, user, pass, config.UserAddOptions{}); err != nil {
+ errs <- fmt.Errorf("failed to create user %q: %w", user, err)
+ break
+ }
+
+ if _, err := epcClient.UserDelete(ctx, user); err != nil {
+ errs <- fmt.Errorf("failed to delete user %q: %w", user, err)
+ break
+ }
+ }
+ t.Log("The first goroutine finished")
+ }()
+
+ // Create the second goroutine to write & delete K/V entries
+ t.Log("Create the second goroutine to write & delete K/V entries")
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 100; i++ {
+ key := fmt.Sprintf("key-%d", i)
+ value := fmt.Sprintf("value-%d", i)
+
+ if err := epcClient.Put(ctx, key, value, config.PutOptions{}); err != nil {
+ errs <- fmt.Errorf("failed to put key %q: %w", key, err)
+ break
+ }
+
+ if _, err := epcClient.Delete(ctx, key, config.DeleteOptions{}); err != nil {
+ errs <- fmt.Errorf("failed to delete key %q: %w", key, err)
+ break
+ }
+ }
+ t.Log("The second goroutine finished")
+ }()
+
+ t.Log("Waiting for the two goroutines to complete")
+ go func() {
+ wg.Wait()
+ close(donec)
+ }()
+
+ t.Log("Waiting for test result")
+ select {
+ case err := <-errs:
+ t.Fatalf("Unexpected error: %v", err)
+ case <-donec:
+ t.Log("All done!")
+ case <-time.After(40 * time.Second):
+ t.Fatal("Test case timeout after 40 seconds")
+ }
}
diff --git a/tests/e2e/ctl_v3_auth_security_test.go b/tests/e2e/ctl_v3_auth_security_test.go
new file mode 100644
index 00000000000..fb7a66f9b89
--- /dev/null
+++ b/tests/e2e/ctl_v3_auth_security_test.go
@@ -0,0 +1,57 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+// TestAuth_CVE_2021_28235 verifies https://nvd.nist.gov/vuln/detail/CVE-2021-28235
+func TestAuth_CVE_2021_28235(t *testing.T) {
+ testCtl(t, authTestCVE2021_28235, withCfg(*e2e.NewConfigNoTLS()), withLogLevel("debug"))
+}
+
+func authTestCVE2021_28235(cx ctlCtx) {
+ // create root user with root role
+ rootPass := "changeme123"
+ err := ctlV3User(cx, []string{"add", "root", "--interactive=false"}, "User root created", []string{rootPass})
+ require.NoError(cx.t, err)
+ err = ctlV3User(cx, []string{"grant-role", "root", "root"}, "Role root is granted to user root", nil)
+ require.NoError(cx.t, err)
+ err = ctlV3AuthEnable(cx)
+ require.NoError(cx.t, err)
+
+ // issue a put request
+ cx.user, cx.pass = "root", rootPass
+ err = ctlV3Put(cx, "foo", "bar", "")
+ require.NoError(cx.t, err)
+
+ // GET /debug/requests
+ httpEndpoint := cx.epc.Procs[0].EndpointsHTTP()[0]
+ req := e2e.CURLReq{Endpoint: "/debug/requests?fam=grpc.Recv.etcdserverpb.Auth&b=0&exp=1", Timeout: 5}
+ respData, err := curl(httpEndpoint, "GET", req, e2e.ClientNonTLS)
+ require.NoError(cx.t, err)
+
+ if strings.Contains(respData, rootPass) {
+ cx.t.Errorf("The root password is included in the request.\n %s", respData)
+ }
+}
diff --git a/tests/e2e/ctl_v3_auth_test.go b/tests/e2e/ctl_v3_auth_test.go
index 58a3b61e037..40a879f1e9f 100644
--- a/tests/e2e/ctl_v3_auth_test.go
+++ b/tests/e2e/ctl_v3_auth_test.go
@@ -15,581 +15,82 @@
package e2e
import (
- "context"
"fmt"
"os"
- "syscall"
"testing"
- "time"
- "go.etcd.io/etcd/client/v3"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
-func TestCtlV3AuthEnable(t *testing.T) {
- testCtl(t, authEnableTest)
-}
-func TestCtlV3AuthDisable(t *testing.T) { testCtl(t, authDisableTest) }
-func TestCtlV3AuthStatus(t *testing.T) { testCtl(t, authStatusTest) }
-func TestCtlV3AuthWriteKey(t *testing.T) { testCtl(t, authCredWriteKeyTest) }
-func TestCtlV3AuthRoleUpdate(t *testing.T) { testCtl(t, authRoleUpdateTest) }
-func TestCtlV3AuthUserDeleteDuringOps(t *testing.T) { testCtl(t, authUserDeleteDuringOpsTest) }
-func TestCtlV3AuthRoleRevokeDuringOps(t *testing.T) { testCtl(t, authRoleRevokeDuringOpsTest) }
-func TestCtlV3AuthTxn(t *testing.T) { testCtl(t, authTestTxn) }
-func TestCtlV3AuthTxnJWT(t *testing.T) { testCtl(t, authTestTxn, withCfg(*newConfigJWT())) }
-func TestCtlV3AuthPrefixPerm(t *testing.T) { testCtl(t, authTestPrefixPerm) }
-func TestCtlV3AuthMemberAdd(t *testing.T) { testCtl(t, authTestMemberAdd) }
-func TestCtlV3AuthMemberRemove(t *testing.T) {
- testCtl(t, authTestMemberRemove, withQuorum(), withNoStrictReconfig())
-}
-func TestCtlV3AuthMemberUpdate(t *testing.T) { testCtl(t, authTestMemberUpdate) }
-func TestCtlV3AuthRevokeWithDelete(t *testing.T) { testCtl(t, authTestRevokeWithDelete) }
-func TestCtlV3AuthInvalidMgmt(t *testing.T) { testCtl(t, authTestInvalidMgmt) }
-func TestCtlV3AuthFromKeyPerm(t *testing.T) { testCtl(t, authTestFromKeyPerm) }
-func TestCtlV3AuthAndWatch(t *testing.T) { testCtl(t, authTestWatch) }
-func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(*newConfigJWT())) }
-
-func TestCtlV3AuthLeaseTestKeepAlive(t *testing.T) { testCtl(t, authLeaseTestKeepAlive) }
-func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) {
- testCtl(t, authLeaseTestTimeToLiveExpired)
-}
-func TestCtlV3AuthLeaseGrantLeases(t *testing.T) { testCtl(t, authLeaseTestLeaseGrantLeases) }
-func TestCtlV3AuthLeaseGrantLeasesJWT(t *testing.T) {
- testCtl(t, authLeaseTestLeaseGrantLeases, withCfg(*newConfigJWT()))
-}
-func TestCtlV3AuthLeaseRevoke(t *testing.T) { testCtl(t, authLeaseTestLeaseRevoke) }
+func TestCtlV3AuthMemberUpdate(t *testing.T) { testCtl(t, authTestMemberUpdate) }
+func TestCtlV3AuthFromKeyPerm(t *testing.T) { testCtl(t, authTestFromKeyPerm) }
-func TestCtlV3AuthRoleGet(t *testing.T) { testCtl(t, authTestRoleGet) }
-func TestCtlV3AuthUserGet(t *testing.T) { testCtl(t, authTestUserGet) }
-func TestCtlV3AuthRoleList(t *testing.T) { testCtl(t, authTestRoleList) }
+// TestCtlV3AuthAndWatch TODO https://github.com/etcd-io/etcd/issues/7988 is the blocker of migration to common/auth_test.go
+func TestCtlV3AuthAndWatch(t *testing.T) { testCtl(t, authTestWatch) }
+func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(*e2e.NewConfigJWT())) }
-func TestCtlV3AuthDefrag(t *testing.T) { testCtl(t, authTestDefrag) }
+// TestCtlV3AuthEndpointHealth https://github.com/etcd-io/etcd/pull/13774#discussion_r1189118815 is the blocker of migration to common/auth_test.go
func TestCtlV3AuthEndpointHealth(t *testing.T) {
testCtl(t, authTestEndpointHealth, withQuorum())
}
-func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) }
-func TestCtlV3AuthSnapshotJWT(t *testing.T) { testCtl(t, authTestSnapshot, withCfg(*newConfigJWT())) }
-func TestCtlV3AuthJWTExpire(t *testing.T) { testCtl(t, authTestJWTExpire, withCfg(*newConfigJWT())) }
-func TestCtlV3AuthRevisionConsistency(t *testing.T) { testCtl(t, authTestRevisionConsistency) }
-
-func authEnableTest(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+
+// TestCtlV3AuthSnapshot TODO fill up common/maintenance_auth_test.go when Snapshot API is added in interfaces.Client
+func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) }
+
+func TestCtlV3AuthSnapshotJWT(t *testing.T) {
+ testCtl(t, authTestSnapshot, withCfg(*e2e.NewConfigJWT()))
}
func authEnable(cx ctlCtx) error {
// create root user with root role
if err := ctlV3User(cx, []string{"add", "root", "--interactive=false"}, "User root created", []string{"root"}); err != nil {
- return fmt.Errorf("failed to create root user %v", err)
+ return fmt.Errorf("failed to create root user %w", err)
}
if err := ctlV3User(cx, []string{"grant-role", "root", "root"}, "Role root is granted to user root", nil); err != nil {
- return fmt.Errorf("failed to grant root user root role %v", err)
+ return fmt.Errorf("failed to grant root user root role %w", err)
}
if err := ctlV3AuthEnable(cx); err != nil {
- return fmt.Errorf("authEnableTest ctlV3AuthEnable error (%v)", err)
+ return fmt.Errorf("authEnableTest ctlV3AuthEnable error (%w)", err)
}
return nil
}
func ctlV3AuthEnable(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgs(), "auth", "enable")
- return spawnWithExpect(cmdArgs, "Authentication Enabled")
-}
-
-func authDisableTest(cx ctlCtx) {
- // a key that isn't granted to test-user
- if err := ctlV3Put(cx, "hoo", "a", ""); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // test-user doesn't have the permission, it must fail
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3PutFailPerm(cx, "hoo", "bar"); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- if err := ctlV3AuthDisable(cx); err != nil {
- cx.t.Fatalf("authDisableTest ctlV3AuthDisable error (%v)", err)
- }
-
- // now ErrAuthNotEnabled of Authenticate() is simply ignored
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
-
- // now the key can be accessed
- cx.user, cx.pass = "", ""
- if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put succeeded
- if err := ctlV3Get(cx, []string{"hoo"}, []kv{{"hoo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func ctlV3AuthDisable(cx ctlCtx) error {
- cmdArgs := append(cx.PrefixArgs(), "auth", "disable")
- return spawnWithExpect(cmdArgs, "Authentication Disabled")
-}
-
-func authStatusTest(cx ctlCtx) {
- cmdArgs := append(cx.PrefixArgs(), "auth", "status")
- if err := spawnWithExpects(cmdArgs, "Authentication Status: false", "AuthRevision:"); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- cmdArgs = append(cx.PrefixArgs(), "auth", "status")
-
- if err := spawnWithExpects(cmdArgs, "Authentication Status: true", "AuthRevision:"); err != nil {
- cx.t.Fatal(err)
- }
-
- cmdArgs = append(cx.PrefixArgs(), "auth", "status", "--write-out", "json")
- if err := spawnWithExpect(cmdArgs, "enabled"); err != nil {
- cx.t.Fatal(err)
- }
- if err := spawnWithExpect(cmdArgs, "authRevision"); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authCredWriteKeyTest(cx ctlCtx) {
- // baseline key to check for failed puts
- if err := ctlV3Put(cx, "foo", "a", ""); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // confirm root role can access to all keys
- if err := ctlV3Put(cx, "foo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-
- // try invalid user
- cx.user, cx.pass = "a", "b"
- if err := ctlV3PutFailAuth(cx, "foo", "bar"); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put failed
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-
- // try good user
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "foo", "bar2", ""); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put succeeded
- if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar2"}}...); err != nil {
- cx.t.Fatal(err)
- }
-
- // try bad password
- cx.user, cx.pass = "test-user", "badpass"
- if err := ctlV3PutFailAuth(cx, "foo", "baz"); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put failed
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar2"}}...); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authRoleUpdateTest(cx ctlCtx) {
- if err := ctlV3Put(cx, "foo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // try put to not granted key
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3PutFailPerm(cx, "hoo", "bar"); err != nil {
- cx.t.Fatal(err)
- }
-
- // grant a new key
- cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "hoo", "", false}); err != nil {
- cx.t.Fatal(err)
- }
-
- // try a newly granted key
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put succeeded
- if err := ctlV3Get(cx, []string{"hoo"}, []kv{{"hoo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-
- // revoke the newly granted key
- cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleRevokePermission(cx, "test-role", "hoo", "", false); err != nil {
- cx.t.Fatal(err)
- }
-
- // try put to the revoked key
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3PutFailPerm(cx, "hoo", "bar"); err != nil {
- cx.t.Fatal(err)
- }
-
- // confirm a key still granted can be accessed
- if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authUserDeleteDuringOpsTest(cx ctlCtx) {
- if err := ctlV3Put(cx, "foo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // create a key
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "foo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put succeeded
- if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-
- // delete the user
- cx.user, cx.pass = "root", "root"
- err := ctlV3User(cx, []string{"delete", "test-user"}, "User test-user deleted", []string{})
- if err != nil {
- cx.t.Fatal(err)
- }
-
- // check the user is deleted
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3PutFailAuth(cx, "foo", "baz"); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authRoleRevokeDuringOpsTest(cx ctlCtx) {
- if err := ctlV3Put(cx, "foo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // create a key
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "foo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put succeeded
- if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-
- // create a new role
- cx.user, cx.pass = "root", "root"
- if err := ctlV3Role(cx, []string{"add", "test-role2"}, "Role test-role2 created"); err != nil {
- cx.t.Fatal(err)
- }
- // grant a new key to the new role
- if err := ctlV3RoleGrantPermission(cx, "test-role2", grantingPerm{true, true, "hoo", "", false}); err != nil {
- cx.t.Fatal(err)
- }
- // grant the new role to the user
- if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role2"}, "Role test-role2 is granted to user test-user", nil); err != nil {
- cx.t.Fatal(err)
- }
-
- // try a newly granted key
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put succeeded
- if err := ctlV3Get(cx, []string{"hoo"}, []kv{{"hoo", "bar"}}...); err != nil {
- cx.t.Fatal(err)
- }
-
- // revoke a role from the user
- cx.user, cx.pass = "root", "root"
- err := ctlV3User(cx, []string{"revoke-role", "test-user", "test-role"}, "Role test-role is revoked from user test-user", []string{})
- if err != nil {
- cx.t.Fatal(err)
- }
-
- // check the role is revoked and permission is lost from the user
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3PutFailPerm(cx, "foo", "baz"); err != nil {
- cx.t.Fatal(err)
- }
-
- // try a key that can be accessed from the remaining role
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "hoo", "bar2", ""); err != nil {
- cx.t.Fatal(err)
- }
- // confirm put succeeded
- if err := ctlV3Get(cx, []string{"hoo"}, []kv{{"hoo", "bar2"}}...); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func ctlV3PutFailAuth(cx ctlCtx, key, val string) error {
- return spawnWithExpect(append(cx.PrefixArgs(), "put", key, val), "authentication failed")
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "Authentication Enabled"})
}
func ctlV3PutFailPerm(cx ctlCtx, key, val string) error {
- return spawnWithExpect(append(cx.PrefixArgs(), "put", key, val), "permission denied")
+ return e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, expect.ExpectedResponse{Value: "permission denied"})
}
func authSetupTestUser(cx ctlCtx) {
- if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil {
- cx.t.Fatal(err)
- }
- if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "add", "test-role"), "Role test-role created"); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role"}, "Role test-role is granted to user test-user", nil); err != nil {
- cx.t.Fatal(err)
- }
+ err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"})
+ require.NoError(cx.t, err)
+ err = e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, expect.ExpectedResponse{Value: "Role test-role created"})
+ require.NoError(cx.t, err)
+ err = ctlV3User(cx, []string{"grant-role", "test-user", "test-role"}, "Role test-role is granted to user test-user", nil)
+ require.NoError(cx.t, err)
cmd := append(cx.PrefixArgs(), "role", "grant-permission", "test-role", "readwrite", "foo")
- if err := spawnWithExpect(cmd, "Role test-role updated"); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authTestTxn(cx ctlCtx) {
- // keys with 1 suffix aren't granted to test-user
- // keys with 2 suffix are granted to test-user
-
- keys := []string{"c1", "s1", "f1"}
- grantedKeys := []string{"c2", "s2", "f2"}
- for _, key := range keys {
- if err := ctlV3Put(cx, key, "v", ""); err != nil {
- cx.t.Fatal(err)
- }
- }
-
- for _, key := range grantedKeys {
- if err := ctlV3Put(cx, key, "v", ""); err != nil {
- cx.t.Fatal(err)
- }
- }
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // grant keys to test-user
- cx.user, cx.pass = "root", "root"
- for _, key := range grantedKeys {
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, key, "", false}); err != nil {
- cx.t.Fatal(err)
- }
- }
-
- // now test txn
- cx.interactive = true
- cx.user, cx.pass = "test-user", "pass"
-
- rqs := txnRequests{
- compare: []string{`version("c2") = "1"`},
- ifSucess: []string{"get s2"},
- ifFail: []string{"get f2"},
- results: []string{"SUCCESS", "s2", "v"},
- }
- if err := ctlV3Txn(cx, rqs); err != nil {
- cx.t.Fatal(err)
- }
-
- // a key of compare case isn't granted
- rqs = txnRequests{
- compare: []string{`version("c1") = "1"`},
- ifSucess: []string{"get s2"},
- ifFail: []string{"get f2"},
- results: []string{"Error: etcdserver: permission denied"},
- }
- if err := ctlV3Txn(cx, rqs); err != nil {
- cx.t.Fatal(err)
- }
-
- // a key of success case isn't granted
- rqs = txnRequests{
- compare: []string{`version("c2") = "1"`},
- ifSucess: []string{"get s1"},
- ifFail: []string{"get f2"},
- results: []string{"Error: etcdserver: permission denied"},
- }
- if err := ctlV3Txn(cx, rqs); err != nil {
- cx.t.Fatal(err)
- }
-
- // a key of failure case isn't granted
- rqs = txnRequests{
- compare: []string{`version("c2") = "1"`},
- ifSucess: []string{"get s2"},
- ifFail: []string{"get f1"},
- results: []string{"Error: etcdserver: permission denied"},
- }
- if err := ctlV3Txn(cx, rqs); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authTestPrefixPerm(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- prefix := "/prefix/" // directory like prefix
- // grant keys to test-user
- cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, prefix, "", true}); err != nil {
- cx.t.Fatal(err)
- }
-
- // try a prefix granted permission
- cx.user, cx.pass = "test-user", "pass"
- for i := 0; i < 10; i++ {
- key := fmt.Sprintf("%s%d", prefix, i)
- if err := ctlV3Put(cx, key, "val", ""); err != nil {
- cx.t.Fatal(err)
- }
- }
-
- if err := ctlV3PutFailPerm(cx, clientv3.GetPrefixRangeEnd(prefix), "baz"); err != nil {
- cx.t.Fatal(err)
- }
-
- // grant the entire keys to test-user
- cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "", "", true}); err != nil {
- cx.t.Fatal(err)
- }
-
- prefix2 := "/prefix2/"
- cx.user, cx.pass = "test-user", "pass"
- for i := 0; i < 10; i++ {
- key := fmt.Sprintf("%s%d", prefix2, i)
- if err := ctlV3Put(cx, key, "val", ""); err != nil {
- cx.t.Fatal(err)
- }
- }
-}
-
-func authTestMemberAdd(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11)
- // ordinary user cannot add a new member
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3MemberAdd(cx, peerURL, false); err == nil {
- cx.t.Fatalf("ordinary user must not be allowed to add a member")
- }
-
- // root can add a new member
- cx.user, cx.pass = "root", "root"
- if err := ctlV3MemberAdd(cx, peerURL, false); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authTestMemberRemove(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- ep, memIDToRemove, clusterID := cx.memberToRemove()
-
- // ordinary user cannot remove a member
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3MemberRemove(cx, ep, memIDToRemove, clusterID); err == nil {
- cx.t.Fatalf("ordinary user must not be allowed to remove a member")
- }
-
- // root can remove a member
- cx.user, cx.pass = "root", "root"
- if err := ctlV3MemberRemove(cx, ep, memIDToRemove, clusterID); err != nil {
- cx.t.Fatal(err)
- }
+ err = e2e.SpawnWithExpectWithEnv(cmd, cx.envMap, expect.ExpectedResponse{Value: "Role test-role updated"})
+ require.NoError(cx.t, err)
}
func authTestMemberUpdate(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, authEnable(cx))
cx.user, cx.pass = "root", "root"
authSetupTestUser(cx)
- mr, err := getMemberList(cx)
- if err != nil {
- cx.t.Fatal(err)
- }
+ mr, err := getMemberList(cx, false)
+ require.NoError(cx.t, err)
// ordinary user cannot update a member
cx.user, cx.pass = "test-user", "pass"
- peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11)
+ peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
memberID := fmt.Sprintf("%x", mr.Members[0].ID)
if err = ctlV3MemberUpdate(cx, memberID, peerURL); err == nil {
cx.t.Fatalf("ordinary user must not be allowed to update a member")
@@ -597,31 +98,20 @@ func authTestMemberUpdate(cx ctlCtx) {
// root can update a member
cx.user, cx.pass = "root", "root"
- if err = ctlV3MemberUpdate(cx, memberID, peerURL); err != nil {
- cx.t.Fatal(err)
- }
+ err = ctlV3MemberUpdate(cx, memberID, peerURL)
+ require.NoError(cx.t, err)
}
func authTestCertCN(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, authEnable(cx))
cx.user, cx.pass = "root", "root"
- if err := ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}); err != nil {
- cx.t.Fatal(err)
- }
- if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "add", "test-role"), "Role test-role created"); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role"}, "Role test-role is granted to user example.com", nil); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}))
+ require.NoError(cx.t, e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, expect.ExpectedResponse{Value: "Role test-role created"}))
+ require.NoError(cx.t, ctlV3User(cx, []string{"grant-role", "example.com", "test-role"}, "Role test-role is granted to user example.com", nil))
// grant a new key
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "hoo", "", false}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "hoo", "", false}))
// try a granted key
cx.user, cx.pass = "", ""
@@ -629,221 +119,79 @@ func authTestCertCN(cx ctlCtx) {
cx.t.Error(err)
}
- // try a non granted key
+ // try a non-granted key
cx.user, cx.pass = "", ""
- if err := ctlV3PutFailPerm(cx, "baz", "bar"); err != nil {
- cx.t.Error(err)
- }
-}
-
-func authTestRevokeWithDelete(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // create a new role
- cx.user, cx.pass = "root", "root"
- if err := ctlV3Role(cx, []string{"add", "test-role2"}, "Role test-role2 created"); err != nil {
- cx.t.Fatal(err)
- }
-
- // grant the new role to the user
- if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role2"}, "Role test-role2 is granted to user test-user", nil); err != nil {
- cx.t.Fatal(err)
- }
-
- // check the result
- if err := ctlV3User(cx, []string{"get", "test-user"}, "Roles: test-role test-role2", nil); err != nil {
- cx.t.Fatal(err)
- }
-
- // delete the role, test-role2 must be revoked from test-user
- if err := ctlV3Role(cx, []string{"delete", "test-role2"}, "Role test-role2 deleted"); err != nil {
- cx.t.Fatal(err)
- }
-
- // check the result
- if err := ctlV3User(cx, []string{"get", "test-user"}, "Roles: test-role", nil); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authTestInvalidMgmt(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := ctlV3Role(cx, []string{"delete", "root"}, "Error: etcdserver: invalid auth management"); err == nil {
- cx.t.Fatal("deleting the role root must not be allowed")
- }
-
- if err := ctlV3User(cx, []string{"revoke-role", "root", "root"}, "Error: etcdserver: invalid auth management", []string{}); err == nil {
- cx.t.Fatal("revoking the role root from the user root must not be allowed")
- }
+ require.ErrorContains(cx.t, ctlV3PutFailPerm(cx, "baz", "bar"), "permission denied")
}
func authTestFromKeyPerm(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, authEnable(cx))
cx.user, cx.pass = "root", "root"
authSetupTestUser(cx)
// grant keys after z to test-user
cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "z", "\x00", false}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "z", "\x00", false}))
// try the granted open ended permission
cx.user, cx.pass = "test-user", "pass"
for i := 0; i < 10; i++ {
key := fmt.Sprintf("z%d", i)
- if err := ctlV3Put(cx, key, "val", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, key, "val", ""))
}
largeKey := ""
for i := 0; i < 10; i++ {
largeKey += "\xff"
- if err := ctlV3Put(cx, largeKey, "val", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, largeKey, "val", ""))
}
// try a non granted key
- if err := ctlV3PutFailPerm(cx, "x", "baz"); err != nil {
- cx.t.Fatal(err)
- }
+ require.ErrorContains(cx.t, ctlV3PutFailPerm(cx, "x", "baz"), "permission denied")
// revoke the open ended permission
cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleRevokePermission(cx, "test-role", "z", "", true); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleRevokePermission(cx, "test-role", "z", "", true))
// try the revoked open ended permission
cx.user, cx.pass = "test-user", "pass"
for i := 0; i < 10; i++ {
key := fmt.Sprintf("z%d", i)
- if err := ctlV3PutFailPerm(cx, key, "val"); err != nil {
- cx.t.Fatal(err)
- }
+ require.ErrorContains(cx.t, ctlV3PutFailPerm(cx, key, "val"), "permission denied")
}
// grant the entire keys
cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "", "\x00", false}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "", "\x00", false}))
// try keys, of course it must be allowed because test-role has a permission of the entire keys
cx.user, cx.pass = "test-user", "pass"
for i := 0; i < 10; i++ {
key := fmt.Sprintf("z%d", i)
- if err := ctlV3Put(cx, key, "val", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, key, "val", ""))
}
// revoke the entire keys
cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleRevokePermission(cx, "test-role", "", "", true); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleRevokePermission(cx, "test-role", "", "", true))
// try the revoked entire key permission
cx.user, cx.pass = "test-user", "pass"
for i := 0; i < 10; i++ {
key := fmt.Sprintf("z%d", i)
- if err := ctlV3PutFailPerm(cx, key, "val"); err != nil {
- cx.t.Fatal(err)
- }
- }
-}
-
-func authLeaseTestKeepAlive(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
- // put with TTL 10 seconds and keep-alive
- leaseID, err := ctlV3LeaseGrant(cx, 10)
- if err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseGrant error (%v)", err)
- }
- if err := ctlV3Put(cx, "key", "val", leaseID); err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3Put error (%v)", err)
- }
- if err := ctlV3LeaseKeepAlive(cx, leaseID); err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseKeepAlive error (%v)", err)
- }
- if err := ctlV3Get(cx, []string{"key"}, kv{"key", "val"}); err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3Get error (%v)", err)
- }
-}
-
-func authLeaseTestTimeToLiveExpired(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- ttl := 3
- if err := leaseTestTimeToLiveExpire(cx, ttl); err != nil {
- cx.t.Fatalf("leaseTestTimeToLiveExpire: error (%v)", err)
- }
-}
-
-func authLeaseTestLeaseGrantLeases(cx ctlCtx) {
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- if err := leaseTestGrantLeasesList(cx); err != nil {
- cx.t.Fatalf("authLeaseTestLeaseGrantLeases: error (%v)", err)
- }
-}
-
-func authLeaseTestLeaseRevoke(cx ctlCtx) {
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // put with TTL 10 seconds and revoke
- leaseID, err := ctlV3LeaseGrant(cx, 10)
- if err != nil {
- cx.t.Fatalf("ctlV3LeaseGrant error (%v)", err)
- }
- if err := ctlV3Put(cx, "key", "val", leaseID); err != nil {
- cx.t.Fatalf("ctlV3Put error (%v)", err)
- }
- if err := ctlV3LeaseRevoke(cx, leaseID); err != nil {
- cx.t.Fatalf("ctlV3LeaseRevoke error (%v)", err)
- }
- if err := ctlV3GetWithErr(cx, []string{"key"}, []string{"retrying of unary invoker failed"}); err != nil { // expect errors
- cx.t.Fatalf("ctlV3GetWithErr error (%v)", err)
+ err := ctlV3PutFailPerm(cx, key, "val")
+ require.ErrorContains(cx.t, err, "permission denied")
}
}
func authTestWatch(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, authEnable(cx))
cx.user, cx.pass = "root", "root"
authSetupTestUser(cx)
// grant a key range
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "key", "key4", false}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "key", "key4", false}))
tests := []struct {
puts []kv
@@ -894,123 +242,23 @@ func authTestWatch(cx ctlCtx) {
var err error
if tt.want {
err = ctlV3Watch(cx, tt.args, tt.wkv...)
- } else {
- err = ctlV3WatchFailPerm(cx, tt.args)
- }
-
- if err != nil {
- if cx.dialTimeout > 0 && !isGRPCTimedout(err) {
+ if err != nil && cx.dialTimeout > 0 && !isGRPCTimedout(err) {
cx.t.Errorf("watchTest #%d: ctlV3Watch error (%v)", i, err)
}
+ } else {
+ err = ctlV3WatchFailPerm(cx, tt.args)
+ // this will not have any meaningful error output, but the process fails due to the cancellation
+ require.ErrorContains(cx.t, err, "unexpected exit code")
}
<-donec
}
-
-}
-
-func authTestRoleGet(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- expected := []string{
- "Role test-role",
- "KV Read:", "foo",
- "KV Write:", "foo",
- }
- if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), expected...); err != nil {
- cx.t.Fatal(err)
- }
-
- // test-user can get the information of test-role because it belongs to the role
- cx.user, cx.pass = "test-user", "pass"
- if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), expected...); err != nil {
- cx.t.Fatal(err)
- }
-
- // test-user cannot get the information of root because it doesn't belong to the role
- expected = []string{
- "Error: etcdserver: permission denied",
- }
- if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), expected...); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authTestUserGet(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- expected := []string{
- "User: test-user",
- "Roles: test-role",
- }
-
- if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), expected...); err != nil {
- cx.t.Fatal(err)
- }
-
- // test-user can get the information of test-user itself
- cx.user, cx.pass = "test-user", "pass"
- if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), expected...); err != nil {
- cx.t.Fatal(err)
- }
-
- // test-user cannot get the information of root
- expected = []string{
- "Error: etcdserver: permission denied",
- }
- if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), expected...); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authTestRoleList(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
- if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "list"), "test-role"); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func authTestDefrag(cx ctlCtx) {
- maintenanceInitKeys(cx)
-
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // ordinary user cannot defrag
- cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3OnlineDefrag(cx); err == nil {
- cx.t.Fatal("ordinary user should not be able to issue a defrag request")
- }
-
- // root can defrag
- cx.user, cx.pass = "root", "root"
- if err := ctlV3OnlineDefrag(cx); err != nil {
- cx.t.Fatal(err)
- }
}
func authTestSnapshot(cx ctlCtx) {
maintenanceInitKeys(cx)
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, authEnable(cx))
cx.user, cx.pass = "root", "root"
authSetupTestUser(cx)
@@ -1020,20 +268,14 @@ func authTestSnapshot(cx ctlCtx) {
// ordinary user cannot save a snapshot
cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3SnapshotSave(cx, fpath); err == nil {
- cx.t.Fatal("ordinary user should not be able to save a snapshot")
- }
+ require.Errorf(cx.t, ctlV3SnapshotSave(cx, fpath), "ordinary user should not be able to save a snapshot")
// root can save a snapshot
cx.user, cx.pass = "root", "root"
- if err := ctlV3SnapshotSave(cx, fpath); err != nil {
- cx.t.Fatalf("snapshotTest ctlV3SnapshotSave error (%v)", err)
- }
+ require.NoErrorf(cx.t, ctlV3SnapshotSave(cx, fpath), "snapshotTest ctlV3SnapshotSave error")
st, err := getSnapshotStatus(cx, fpath)
- if err != nil {
- cx.t.Fatalf("snapshotTest getSnapshotStatus error (%v)", err)
- }
+ require.NoErrorf(cx.t, err, "snapshotTest getSnapshotStatus error")
if st.Revision != 4 {
cx.t.Fatalf("expected 4, got %d", st.Revision)
}
@@ -1043,28 +285,20 @@ func authTestSnapshot(cx ctlCtx) {
}
func authTestEndpointHealth(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, authEnable(cx))
cx.user, cx.pass = "root", "root"
authSetupTestUser(cx)
- if err := ctlV3EndpointHealth(cx); err != nil {
- cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err)
- }
+ require.NoErrorf(cx.t, ctlV3EndpointHealth(cx), "endpointStatusTest ctlV3EndpointHealth error")
// health checking with an ordinary user "succeeds" since permission denial goes through consensus
cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3EndpointHealth(cx); err != nil {
- cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err)
- }
+ require.NoErrorf(cx.t, ctlV3EndpointHealth(cx), "endpointStatusTest ctlV3EndpointHealth error")
// succeed if permissions granted for ordinary user
cx.user, cx.pass = "root", "root"
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "health", "", false}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "health", "", false}))
cx.user, cx.pass = "test-user", "pass"
if err := ctlV3EndpointHealth(cx); err != nil {
cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err)
@@ -1072,61 +306,39 @@ func authTestEndpointHealth(cx ctlCtx) {
}
func certCNAndUsername(cx ctlCtx, noPassword bool) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, authEnable(cx))
cx.user, cx.pass = "root", "root"
authSetupTestUser(cx)
if noPassword {
- if err := ctlV3User(cx, []string{"add", "example.com", "--no-password"}, "User example.com created", []string{""}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3User(cx, []string{"add", "example.com", "--no-password"}, "User example.com created", []string{""}))
} else {
- if err := ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}); err != nil {
- cx.t.Fatal(err)
- }
- }
- if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), "Role test-role-cn created"); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role-cn"}, "Role test-role-cn is granted to user example.com", nil); err != nil {
- cx.t.Fatal(err)
+ require.NoError(cx.t, ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}))
}
+ require.NoError(cx.t, e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), cx.envMap, expect.ExpectedResponse{Value: "Role test-role-cn created"}))
+ require.NoError(cx.t, ctlV3User(cx, []string{"grant-role", "example.com", "test-role-cn"}, "Role test-role-cn is granted to user example.com", nil))
// grant a new key for CN based user
- if err := ctlV3RoleGrantPermission(cx, "test-role-cn", grantingPerm{true, true, "hoo", "", false}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleGrantPermission(cx, "test-role-cn", grantingPerm{true, true, "hoo", "", false}))
// grant a new key for username based user
- if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "bar", "", false}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "bar", "", false}))
// try a granted key for CN based user
cx.user, cx.pass = "", ""
- if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil {
- cx.t.Error(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, "hoo", "bar", ""))
// try a granted key for username based user
cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3Put(cx, "bar", "bar", ""); err != nil {
- cx.t.Error(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, "bar", "bar", ""))
- // try a non granted key for both of them
+ // try a non-granted key for both of them
cx.user, cx.pass = "", ""
- if err := ctlV3PutFailPerm(cx, "baz", "bar"); err != nil {
- cx.t.Error(err)
- }
+ require.ErrorContains(cx.t, ctlV3PutFailPerm(cx, "baz", "bar"), "permission denied")
cx.user, cx.pass = "test-user", "pass"
- if err := ctlV3PutFailPerm(cx, "baz", "bar"); err != nil {
- cx.t.Error(err)
- }
+ require.ErrorContains(cx.t, ctlV3PutFailPerm(cx, "baz", "bar"), "permission denied")
}
func authTestCertCNAndUsername(cx ctlCtx) {
@@ -1137,72 +349,32 @@ func authTestCertCNAndUsernameNoPassword(cx ctlCtx) {
certCNAndUsername(cx, true)
}
-func authTestJWTExpire(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.user, cx.pass = "root", "root"
- authSetupTestUser(cx)
-
- // try a granted key
- if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil {
- cx.t.Error(err)
- }
-
- // wait an expiration of my JWT token
- <-time.After(3 * time.Second)
-
- if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil {
- cx.t.Error(err)
+func ctlV3EndpointHealth(cx ctlCtx) error {
+ cmdArgs := append(cx.PrefixArgs(), "endpoint", "health")
+ lines := make([]expect.ExpectedResponse, cx.epc.Cfg.ClusterSize)
+ for i := range lines {
+ lines[i] = expect.ExpectedResponse{Value: "is healthy"}
}
+ return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
}
-func authTestRevisionConsistency(cx ctlCtx) {
- if err := authEnable(cx); err != nil {
- cx.t.Fatal(err)
- }
- cx.user, cx.pass = "root", "root"
+func ctlV3User(cx ctlCtx, args []string, expStr string, stdIn []string) error {
+ cmdArgs := append(cx.PrefixArgs(), "user")
+ cmdArgs = append(cmdArgs, args...)
- // add user
- if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil {
- cx.t.Fatal(err)
- }
- // delete the same user
- if err := ctlV3User(cx, []string{"delete", "test-user"}, "User test-user deleted", []string{}); err != nil {
- cx.t.Fatal(err)
- }
-
- // get node0 auth revision
- node0 := cx.epc.procs[0]
- endpoint := node0.EndpointsV3()[0]
- cli, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, Username: cx.user, Password: cx.pass, DialTimeout: 3 * time.Second})
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
- cx.t.Fatal(err)
+ return err
}
- defer cli.Close()
+ defer proc.Close()
- sresp, err := cli.AuthStatus(context.TODO())
- if err != nil {
- cx.t.Fatal(err)
- }
- oldAuthRevision := sresp.AuthRevision
-
- // restart the node
- node0.WithStopSignal(syscall.SIGINT)
- if err := node0.Restart(); err != nil {
- cx.t.Fatal(err)
- }
-
- // get node0 auth revision again
- sresp, err = cli.AuthStatus(context.TODO())
- if err != nil {
- cx.t.Fatal(err)
+ // Send 'stdIn' strings as input.
+ for _, s := range stdIn {
+ if err = proc.Send(s + "\r"); err != nil {
+ return err
+ }
}
- newAuthRevision := sresp.AuthRevision
- // assert AuthRevision equal
- if newAuthRevision != oldAuthRevision {
- cx.t.Fatalf("auth revison shouldn't change when restarting etcd, expected: %d, got: %d", oldAuthRevision, newAuthRevision)
- }
+ _, err = proc.Expect(expStr)
+ return err
}
diff --git a/tests/e2e/ctl_v3_compact_test.go b/tests/e2e/ctl_v3_compact_test.go
deleted file mode 100644
index 5b0c51eb426..00000000000
--- a/tests/e2e/ctl_v3_compact_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "strconv"
- "strings"
- "testing"
-)
-
-func TestCtlV3Compact(t *testing.T) { testCtl(t, compactTest) }
-func TestCtlV3CompactPhysical(t *testing.T) { testCtl(t, compactTest, withCompactPhysical()) }
-
-func compactTest(cx ctlCtx) {
- compactPhysical := cx.compactPhysical
- if err := ctlV3Compact(cx, 2, compactPhysical); err != nil {
- if !strings.Contains(err.Error(), "required revision is a future revision") {
- cx.t.Fatal(err)
- }
- } else {
- cx.t.Fatalf("expected '...future revision' error, got ")
- }
-
- var kvs = []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}}
- for i := range kvs {
- if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
- cx.t.Fatalf("compactTest #%d: ctlV3Put error (%v)", i, err)
- }
- }
-
- if err := ctlV3Get(cx, []string{"key", "--rev", "3"}, kvs[1:2]...); err != nil {
- cx.t.Errorf("compactTest: ctlV3Get error (%v)", err)
- }
-
- if err := ctlV3Compact(cx, 4, compactPhysical); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := ctlV3Get(cx, []string{"key", "--rev", "3"}, kvs[1:2]...); err != nil {
- if !strings.Contains(err.Error(), "required revision has been compacted") {
- cx.t.Errorf("compactTest: ctlV3Get error (%v)", err)
- }
- } else {
- cx.t.Fatalf("expected '...has been compacted' error, got ")
- }
-
- if err := ctlV3Compact(cx, 2, compactPhysical); err != nil {
- if !strings.Contains(err.Error(), "required revision has been compacted") {
- cx.t.Fatal(err)
- }
- } else {
- cx.t.Fatalf("expected '...has been compacted' error, got ")
- }
-}
-
-func ctlV3Compact(cx ctlCtx, rev int64, physical bool) error {
- rs := strconv.FormatInt(rev, 10)
- cmdArgs := append(cx.PrefixArgs(), "compact", rs)
- if physical {
- cmdArgs = append(cmdArgs, "--physical")
- }
- return spawnWithExpect(cmdArgs, "compacted revision "+rs)
-}
diff --git a/tests/e2e/ctl_v3_completion_test.go b/tests/e2e/ctl_v3_completion_test.go
new file mode 100644
index 00000000000..8057c0c1e21
--- /dev/null
+++ b/tests/e2e/ctl_v3_completion_test.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCtlV3CompletionBash(t *testing.T) {
+ testShellCompletion(t, e2e.BinPath.Etcdctl, "bash")
+}
+
+func TestUtlV3CompletionBash(t *testing.T) {
+ testShellCompletion(t, e2e.BinPath.Etcdutl, "bash")
+}
+
+// testShellCompletion can only run in non-coverage mode. The etcdctl and etcdutl
+// built with `-tags cov` mode will show go-test result after each execution, like
+//
+// PASS
+// coverage: 0.0% of statements in ./...
+//
+// Since the PASS is not real command, the `source completion" fails with
+// command-not-found error.
+func testShellCompletion(t *testing.T, binPath, shellName string) {
+ e2e.BeforeTest(t)
+
+ stdout := new(bytes.Buffer)
+ completionCmd := exec.Command(binPath, "completion", shellName)
+ completionCmd.Stdout = stdout
+ completionCmd.Stderr = os.Stderr
+ require.NoError(t, completionCmd.Run())
+
+ filename := fmt.Sprintf("etcdctl-%s.completion", shellName)
+ require.NoError(t, os.WriteFile(filename, stdout.Bytes(), 0o644))
+
+ shellCmd := exec.Command(shellName, "-c", "source "+filename)
+ require.NoError(t, shellCmd.Run())
+}
diff --git a/tests/e2e/ctl_v3_defrag_test.go b/tests/e2e/ctl_v3_defrag_test.go
index 8fbe476f093..d8ceb7426b7 100644
--- a/tests/e2e/ctl_v3_defrag_test.go
+++ b/tests/e2e/ctl_v3_defrag_test.go
@@ -14,51 +14,30 @@
package e2e
-import "testing"
+import (
+ "testing"
-func TestCtlV3DefragOnline(t *testing.T) { testCtl(t, defragOnlineTest) }
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
func TestCtlV3DefragOffline(t *testing.T) {
testCtlWithOffline(t, maintenanceInitKeys, defragOfflineTest)
}
-func TestCtlV3DefragOfflineEtcdutl(t *testing.T) {
- testCtlWithOffline(t, maintenanceInitKeys, defragOfflineTest, withEtcdutl())
-}
func maintenanceInitKeys(cx ctlCtx) {
- var kvs = []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}}
+ kvs := []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}}
for i := range kvs {
- if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
- cx.t.Fatal(err)
- }
- }
-}
-
-func defragOnlineTest(cx ctlCtx) {
- maintenanceInitKeys(cx)
-
- if err := ctlV3Compact(cx, 4, cx.compactPhysical); err != nil {
- cx.t.Fatal(err)
- }
-
- if err := ctlV3OnlineDefrag(cx); err != nil {
- cx.t.Fatalf("defragTest ctlV3Defrag error (%v)", err)
- }
-}
-
-func ctlV3OnlineDefrag(cx ctlCtx) error {
- cmdArgs := append(cx.PrefixArgs(), "defrag")
- lines := make([]string, cx.epc.cfg.clusterSize)
- for i := range lines {
- lines[i] = "Finished defragmenting etcd member"
+ require.NoError(cx.t, ctlV3Put(cx, kvs[i].key, kvs[i].val, ""))
}
- return spawnWithExpects(cmdArgs, lines...)
}
func ctlV3OfflineDefrag(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgsUtl(), "defrag", "--data-dir", cx.dataDir)
- lines := []string{"finished defragmenting directory"}
- return spawnWithExpects(cmdArgs, lines...)
+ lines := []expect.ExpectedResponse{{Value: "finished defragmenting directory"}}
+ return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
}
func defragOfflineTest(cx ctlCtx) {
diff --git a/tests/e2e/ctl_v3_elect_test.go b/tests/e2e/ctl_v3_elect_test.go
index 332ce9708c0..c8a2cb151fd 100644
--- a/tests/e2e/ctl_v3_elect_test.go
+++ b/tests/e2e/ctl_v3_elect_test.go
@@ -15,12 +15,16 @@
package e2e
import (
+ "context"
"os"
"strings"
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3Elect(t *testing.T) {
@@ -30,10 +34,8 @@ func TestCtlV3Elect(t *testing.T) {
func testElect(cx ctlCtx) {
name := "a"
- holder, ch, err := ctlV3Elect(cx, name, "p1")
- if err != nil {
- cx.t.Fatal(err)
- }
+ holder, ch, err := ctlV3Elect(cx, name, "p1", false)
+ require.NoError(cx.t, err)
l1 := ""
select {
@@ -46,10 +48,8 @@ func testElect(cx ctlCtx) {
}
// blocked process that won't win the election
- blocked, ch, err := ctlV3Elect(cx, name, "p2")
- if err != nil {
- cx.t.Fatal(err)
- }
+ blocked, ch, err := ctlV3Elect(cx, name, "p2", true)
+ require.NoError(cx.t, err)
select {
case <-time.After(100 * time.Millisecond):
case <-ch:
@@ -57,11 +57,14 @@ func testElect(cx ctlCtx) {
}
// overlap with a blocker that will win the election
- blockAcquire, ch, err := ctlV3Elect(cx, name, "p2")
- if err != nil {
- cx.t.Fatal(err)
- }
- defer blockAcquire.Stop()
+ blockAcquire, ch, err := ctlV3Elect(cx, name, "p2", false)
+ require.NoError(cx.t, err)
+ defer func(blockAcquire *expect.ExpectProcess) {
+ err = blockAcquire.Stop()
+ require.NoError(cx.t, err)
+ blockAcquire.Wait()
+ }(blockAcquire)
+
select {
case <-time.After(100 * time.Millisecond):
case <-ch:
@@ -69,20 +72,16 @@ func testElect(cx ctlCtx) {
}
// kill blocked process with clean shutdown
- if err = blocked.Signal(os.Interrupt); err != nil {
- cx.t.Fatal(err)
- }
- if err = closeWithTimeout(blocked, time.Second); err != nil {
- cx.t.Fatal(err)
+ require.NoError(cx.t, blocked.Signal(os.Interrupt))
+ err = e2e.CloseWithTimeout(blocked, time.Second)
+ if err != nil {
+ // due to being blocked, this can potentially get killed and thus exit non-zero sometimes
+ require.ErrorContains(cx.t, err, "unexpected exit code")
}
// kill the holder with clean shutdown
- if err = holder.Signal(os.Interrupt); err != nil {
- cx.t.Fatal(err)
- }
- if err = closeWithTimeout(holder, time.Second); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, holder.Signal(os.Interrupt))
+ require.NoError(cx.t, e2e.CloseWithTimeout(holder, time.Second))
// blockAcquire should win the election
select {
@@ -96,18 +95,23 @@ func testElect(cx ctlCtx) {
}
// ctlV3Elect creates a elect process with a channel listening for when it wins the election.
-func ctlV3Elect(cx ctlCtx, name, proposal string) (*expect.ExpectProcess, <-chan string, error) {
+func ctlV3Elect(cx ctlCtx, name, proposal string, expectFailure bool) (*expect.ExpectProcess, <-chan string, error) {
cmdArgs := append(cx.PrefixArgs(), "elect", name, proposal)
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
outc := make(chan string, 1)
if err != nil {
close(outc)
return proc, outc, err
}
go func() {
- s, xerr := proc.ExpectFunc(func(string) bool { return true })
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ s, xerr := proc.ExpectFunc(ctx, func(string) bool { return true })
if xerr != nil {
- cx.t.Errorf("expect failed (%v)", xerr)
+ if !expectFailure {
+ cx.t.Errorf("expect failed (%v)", xerr)
+ }
}
outc <- s
}()
diff --git a/tests/e2e/ctl_v3_endpoint_test.go b/tests/e2e/ctl_v3_endpoint_test.go
deleted file mode 100644
index 3c4e7d72b57..00000000000
--- a/tests/e2e/ctl_v3_endpoint_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "context"
- "fmt"
- "net/url"
- "testing"
- "time"
-
- "go.etcd.io/etcd/client/v3"
-)
-
-func TestCtlV3EndpointHealth(t *testing.T) { testCtl(t, endpointHealthTest, withQuorum()) }
-func TestCtlV3EndpointStatus(t *testing.T) { testCtl(t, endpointStatusTest, withQuorum()) }
-func TestCtlV3EndpointHashKV(t *testing.T) { testCtl(t, endpointHashKVTest, withQuorum()) }
-
-func endpointHealthTest(cx ctlCtx) {
- if err := ctlV3EndpointHealth(cx); err != nil {
- cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err)
- }
-}
-
-func ctlV3EndpointHealth(cx ctlCtx) error {
- cmdArgs := append(cx.PrefixArgs(), "endpoint", "health")
- lines := make([]string, cx.epc.cfg.clusterSize)
- for i := range lines {
- lines[i] = "is healthy"
- }
- return spawnWithExpects(cmdArgs, lines...)
-}
-
-func endpointStatusTest(cx ctlCtx) {
- if err := ctlV3EndpointStatus(cx); err != nil {
- cx.t.Fatalf("endpointStatusTest ctlV3EndpointStatus error (%v)", err)
- }
-}
-
-func ctlV3EndpointStatus(cx ctlCtx) error {
- cmdArgs := append(cx.PrefixArgs(), "endpoint", "status")
- var eps []string
- for _, ep := range cx.epc.EndpointsV3() {
- u, _ := url.Parse(ep)
- eps = append(eps, u.Host)
- }
- return spawnWithExpects(cmdArgs, eps...)
-}
-
-func endpointHashKVTest(cx ctlCtx) {
- if err := ctlV3EndpointHashKV(cx); err != nil {
- cx.t.Fatalf("endpointHashKVTest ctlV3EndpointHashKV error (%v)", err)
- }
-}
-
-func ctlV3EndpointHashKV(cx ctlCtx) error {
- eps := cx.epc.EndpointsV3()
-
- // get latest hash to compare
- cli, err := clientv3.New(clientv3.Config{
- Endpoints: eps,
- DialTimeout: 3 * time.Second,
- })
- if err != nil {
- cx.t.Fatal(err)
- }
- defer cli.Close()
- hresp, err := cli.HashKV(context.TODO(), eps[0], 0)
- if err != nil {
- cx.t.Fatal(err)
- }
-
- cmdArgs := append(cx.PrefixArgs(), "endpoint", "hashkv")
- var ss []string
- for _, ep := range cx.epc.EndpointsV3() {
- u, _ := url.Parse(ep)
- ss = append(ss, fmt.Sprintf("%s, %d", u.Host, hresp.Hash))
- }
- return spawnWithExpects(cmdArgs, ss...)
-}
diff --git a/tests/e2e/ctl_v3_grpc_test.go b/tests/e2e/ctl_v3_grpc_test.go
new file mode 100644
index 00000000000..78881f3ed05
--- /dev/null
+++ b/tests/e2e/ctl_v3_grpc_test.go
@@ -0,0 +1,172 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestAuthority(t *testing.T) {
+ tcs := []struct {
+ name string
+ useUnix bool
+ useTLS bool
+ useInsecureTLS bool
+ clientURLPattern string
+ expectAuthorityPattern string
+ }{
+ {
+ name: "unix:path",
+ useUnix: true,
+ clientURLPattern: "unix:localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "unix://absolute_path",
+ useUnix: true,
+ clientURLPattern: "unix://localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ // "unixs" is not standard schema supported by etcd
+ {
+ name: "unixs:absolute_path",
+ useUnix: true,
+ useTLS: true,
+ clientURLPattern: "unixs:localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "unixs://absolute_path",
+ useUnix: true,
+ useTLS: true,
+ clientURLPattern: "unixs://localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "http://domain[:port]",
+ clientURLPattern: "http://localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "http://address[:port]",
+ clientURLPattern: "http://127.0.0.1:${MEMBER_PORT}",
+ expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}",
+ },
+ {
+ name: "https://domain[:port] insecure",
+ useTLS: true,
+ useInsecureTLS: true,
+ clientURLPattern: "https://localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "https://address[:port] insecure",
+ useTLS: true,
+ useInsecureTLS: true,
+ clientURLPattern: "https://127.0.0.1:${MEMBER_PORT}",
+ expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}",
+ },
+ {
+ name: "https://domain[:port]",
+ useTLS: true,
+ clientURLPattern: "https://localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "https://address[:port]",
+ useTLS: true,
+ clientURLPattern: "https://127.0.0.1:${MEMBER_PORT}",
+ expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}",
+ },
+ }
+ for _, tc := range tcs {
+ for _, clusterSize := range []int{1, 3} {
+ t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ cfg := e2e.NewConfigNoTLS()
+ cfg.ClusterSize = clusterSize
+ if tc.useTLS {
+ cfg.Client.ConnectionType = e2e.ClientTLS
+ }
+ cfg.Client.AutoTLS = tc.useInsecureTLS
+ // Enable debug mode to get logs with http2 headers (including authority)
+ cfg.EnvVars = map[string]string{"GODEBUG": "http2debug=2"}
+ if tc.useUnix {
+ cfg.BaseClientScheme = "unix"
+ }
+
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer epc.Close()
+
+ endpoints := templateEndpoints(t, tc.clientURLPattern, epc)
+ client, err := e2e.NewEtcdctl(cfg.Client, endpoints)
+ require.NoError(t, err)
+ for i := 0; i < 100; i++ {
+ require.NoError(t, client.Put(ctx, "foo", "bar", config.PutOptions{}))
+ }
+
+ testutils.ExecuteWithTimeout(t, 5*time.Second, func() {
+ assertAuthority(t, tc.expectAuthorityPattern, epc)
+ })
+ })
+ }
+ }
+}
+
+func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluster) []string {
+ t.Helper()
+ var endpoints []string
+ for i := 0; i < clus.Cfg.ClusterSize; i++ {
+ ent := pattern
+ ent = strings.ReplaceAll(ent, "${MEMBER_PORT}", fmt.Sprintf("%d", e2e.EtcdProcessBasePort+i*5))
+ endpoints = append(endpoints, ent)
+ }
+ return endpoints
+}
+
+func assertAuthority(t *testing.T, expectAuthorityPattern string, clus *e2e.EtcdProcessCluster) {
+ for i := range clus.Procs {
+ line, _ := clus.Procs[i].Logs().ExpectWithContext(context.TODO(), expect.ExpectedResponse{Value: `http2: decoded hpack field header field ":authority"`})
+ line = strings.TrimSuffix(line, "\n")
+ line = strings.TrimSuffix(line, "\r")
+
+ u, err := url.Parse(clus.Procs[i].EndpointsGRPC()[0])
+ require.NoError(t, err)
+ expectAuthority := strings.ReplaceAll(expectAuthorityPattern, "${MEMBER_PORT}", u.Port())
+ expectLine := fmt.Sprintf(`http2: decoded hpack field header field ":authority" = %q`, expectAuthority)
+ assert.Truef(t, strings.HasSuffix(line, expectLine), "Got %q expected suffix %q", line, expectLine)
+ }
+}
diff --git a/tests/e2e/ctl_v3_kv_test.go b/tests/e2e/ctl_v3_kv_test.go
index 1952ddd2280..82211946c35 100644
--- a/tests/e2e/ctl_v3_kv_test.go
+++ b/tests/e2e/ctl_v3_kv_test.go
@@ -15,64 +15,53 @@
package e2e
import (
+ "context"
"fmt"
"strings"
"testing"
"time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
-func TestCtlV3Put(t *testing.T) { testCtl(t, putTest, withDialTimeout(7*time.Second)) }
-func TestCtlV3PutNoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3PutClientTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigClientTLS())) }
-func TestCtlV3PutClientAutoTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigClientAutoTLS())) }
-func TestCtlV3PutPeerTLS(t *testing.T) { testCtl(t, putTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) }
+func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDefaultDialTimeout()) }
func TestCtlV3PutClientTLSFlagByEnv(t *testing.T) {
- testCtl(t, putTest, withCfg(*newConfigClientTLS()), withFlagByEnv())
+ testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS()), withFlagByEnv())
}
func TestCtlV3PutIgnoreValue(t *testing.T) { testCtl(t, putTestIgnoreValue) }
func TestCtlV3PutIgnoreLease(t *testing.T) { testCtl(t, putTestIgnoreLease) }
-func TestCtlV3Get(t *testing.T) { testCtl(t, getTest) }
-func TestCtlV3GetNoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3GetClientTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigClientTLS())) }
-func TestCtlV3GetClientAutoTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigClientAutoTLS())) }
-func TestCtlV3GetPeerTLS(t *testing.T) { testCtl(t, getTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) }
-func TestCtlV3GetQuorum(t *testing.T) { testCtl(t, getTest, withQuorum()) }
-
-func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) }
-func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) }
-func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) }
-func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) }
-
-func TestCtlV3Del(t *testing.T) { testCtl(t, delTest) }
-func TestCtlV3DelNoTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3DelClientTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigClientTLS())) }
-func TestCtlV3DelPeerTLS(t *testing.T) { testCtl(t, delTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDialTimeout(0)) }
+func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDefaultDialTimeout()) }
+
+func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) }
+func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) }
+func TestCtlV3GetMinMaxCreateModRev(t *testing.T) { testCtl(t, getMinMaxCreateModRevTest) }
+func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) }
+func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) }
+
+func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDefaultDialTimeout()) }
func TestCtlV3GetRevokedCRL(t *testing.T) {
- cfg := etcdProcessClusterConfig{
- clusterSize: 1,
- initialToken: "new",
- clientTLS: clientTLS,
- isClientCRL: true,
- clientCertAuthEnabled: true,
- }
- testCtl(t, testGetRevokedCRL, withCfg(cfg))
+ cfg := e2e.NewConfig(
+ e2e.WithClusterSize(1),
+ e2e.WithClientConnType(e2e.ClientTLS),
+ e2e.WithClientRevokeCerts(true),
+ e2e.WithClientCertAuthority(true),
+ )
+ testCtl(t, testGetRevokedCRL, withCfg(*cfg))
}
func testGetRevokedCRL(cx ctlCtx) {
// test reject
- if err := ctlV3Put(cx, "k", "v", ""); err == nil || !strings.Contains(err.Error(), "Error:") {
- cx.t.Fatalf("expected reset connection on put, got %v", err)
- }
+ require.ErrorContains(cx.t, ctlV3Put(cx, "k", "v", ""), "context deadline exceeded")
+
// test accept
- cx.epc.cfg.isClientCRL = false
- if err := ctlV3Put(cx, "k", "v", ""); err != nil {
- cx.t.Fatal(err)
- }
+ cx.epc.Cfg.Client.RevokeCerts = false
+ require.NoError(cx.t, ctlV3Put(cx, "k", "v", ""))
}
func putTest(cx ctlCtx) {
@@ -91,18 +80,10 @@ func putTest(cx ctlCtx) {
}
func putTestIgnoreValue(cx ctlCtx) {
- if err := ctlV3Put(cx, "foo", "bar", ""); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar"}); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Put(cx, "foo", "", "", "--ignore-value"); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar"}); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, "foo", "bar", ""))
+ require.NoError(cx.t, ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar"}))
+ require.NoError(cx.t, ctlV3Put(cx, "foo", "", "", "--ignore-value"))
+ require.NoError(cx.t, ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar"}))
}
func putTestIgnoreLease(cx ctlCtx) {
@@ -167,9 +148,7 @@ func getTest(cx ctlCtx) {
}
func getFormatTest(cx ctlCtx) {
- if err := ctlV3Put(cx, "abc", "123", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, "abc", "123", ""))
tests := []struct {
format string
@@ -190,16 +169,16 @@ func getFormatTest(cx ctlCtx) {
cmdArgs = append(cmdArgs, "--print-value-only")
}
cmdArgs = append(cmdArgs, "abc")
- if err := spawnWithExpect(cmdArgs, tt.wstr); err != nil {
- cx.t.Errorf("#%d: error (%v), wanted %v", i, err, tt.wstr)
+ lines, err := e2e.RunUtilCompletion(cmdArgs, cx.envMap)
+ if err != nil {
+ cx.t.Errorf("#%d: error (%v)", i, err)
}
+ assert.Contains(cx.t, strings.Join(lines, "\n"), tt.wstr)
}
}
func getRevTest(cx ctlCtx) {
- var (
- kvs = []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}}
- )
+ kvs := []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}}
for i := range kvs {
if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
cx.t.Fatalf("getRevTest #%d: ctlV3Put error (%v)", i, err)
@@ -223,55 +202,71 @@ func getRevTest(cx ctlCtx) {
}
}
-func getKeysOnlyTest(cx ctlCtx) {
- if err := ctlV3Put(cx, "key", "val", ""); err != nil {
- cx.t.Fatal(err)
+func getMinMaxCreateModRevTest(cx ctlCtx) {
+ kvs := []kv{ // revision: store | key create | key modify
+ {"key1", "val1"}, // 2 2 2
+ {"key2", "val2"}, // 3 3 3
+ {"key1", "val3"}, // 4 2 4
+ {"key4", "val4"}, // 5 5 5
}
- cmdArgs := append(cx.PrefixArgs(), []string{"get", "--keys-only", "key"}...)
- if err := spawnWithExpect(cmdArgs, "key"); err != nil {
- cx.t.Fatal(err)
+ for i := range kvs {
+ if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
+ cx.t.Fatalf("getRevTest #%d: ctlV3Put error (%v)", i, err)
+ }
+ }
+
+ tests := []struct {
+ args []string
+
+ wkv []kv
+ }{
+ {[]string{"key", "--prefix", "--max-create-rev", "3"}, []kv{kvs[1], kvs[2]}},
+ {[]string{"key", "--prefix", "--min-create-rev", "3"}, []kv{kvs[1], kvs[3]}},
+ {[]string{"key", "--prefix", "--max-mod-rev", "3"}, []kv{kvs[1]}},
+ {[]string{"key", "--prefix", "--min-mod-rev", "4"}, kvs[2:]},
}
- if err := spawnWithExpects(cmdArgs, "val"); err == nil {
- cx.t.Fatalf("got value but passed --keys-only")
+
+ for i, tt := range tests {
+ if err := ctlV3Get(cx, tt.args, tt.wkv...); err != nil {
+ cx.t.Errorf("getMinModRevTest #%d: ctlV3Get error (%v)", i, err)
+ }
}
}
+func getKeysOnlyTest(cx ctlCtx) {
+ require.NoError(cx.t, ctlV3Put(cx, "key", "val", ""))
+ cmdArgs := append(cx.PrefixArgs(), []string{"get", "--keys-only", "key"}...)
+ require.NoError(cx.t, e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "key"}))
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ lines, err := e2e.SpawnWithExpectLines(ctx, cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "key"})
+ require.NoError(cx.t, err)
+ require.NotContainsf(cx.t, lines, "val", "got value but passed --keys-only")
+}
+
func getCountOnlyTest(cx ctlCtx) {
cmdArgs := append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
- if err := spawnWithExpects(cmdArgs, "\"Count\" : 0"); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Put(cx, "key", "val", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, e2e.SpawnWithExpects(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "\"Count\" : 0"}))
+ require.NoError(cx.t, ctlV3Put(cx, "key", "val", ""))
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
- if err := spawnWithExpects(cmdArgs, "\"Count\" : 1"); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Put(cx, "key1", "val", ""); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Put(cx, "key1", "val", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, e2e.SpawnWithExpects(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "\"Count\" : 1"}))
+ require.NoError(cx.t, ctlV3Put(cx, "key1", "val", ""))
+ require.NoError(cx.t, ctlV3Put(cx, "key1", "val", ""))
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
- if err := spawnWithExpects(cmdArgs, "\"Count\" : 2"); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Put(cx, "key2", "val", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, e2e.SpawnWithExpects(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "\"Count\" : 2"}))
+ require.NoError(cx.t, ctlV3Put(cx, "key2", "val", ""))
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
- if err := spawnWithExpects(cmdArgs, "\"Count\" : 3"); err != nil {
- cx.t.Fatal(err)
- }
- expected := []string{
- "\"Count\" : 3",
- }
+ require.NoError(cx.t, e2e.SpawnWithExpects(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "\"Count\" : 3"}))
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key3", "--prefix", "--write-out=fields"}...)
- if err := spawnWithExpects(cmdArgs, expected...); err == nil {
- cx.t.Fatal(err)
- }
+ lines, err := e2e.SpawnWithExpectLines(ctx, cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "\"Count\""})
+ require.NoError(cx.t, err)
+ require.NotContains(cx.t, lines, "\"Count\" : 3")
}
func delTest(cx ctlCtx) {
@@ -348,7 +343,7 @@ func ctlV3Put(cx ctlCtx, key, value, leaseID string, flags ...string) error {
if len(flags) != 0 {
cmdArgs = append(cmdArgs, flags...)
}
- return spawnWithExpect(cmdArgs, "OK")
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "OK"})
}
type kv struct {
@@ -361,25 +356,15 @@ func ctlV3Get(cx ctlCtx, args []string, kvs ...kv) error {
if !cx.quorum {
cmdArgs = append(cmdArgs, "--consistency", "s")
}
- var lines []string
+ var lines []expect.ExpectedResponse
for _, elem := range kvs {
- lines = append(lines, elem.key, elem.val)
- }
- return spawnWithExpects(cmdArgs, lines...)
-}
-
-// ctlV3GetWithErr runs "get" command expecting no output but error
-func ctlV3GetWithErr(cx ctlCtx, args []string, errs []string) error {
- cmdArgs := append(cx.PrefixArgs(), "get")
- cmdArgs = append(cmdArgs, args...)
- if !cx.quorum {
- cmdArgs = append(cmdArgs, "--consistency", "s")
+ lines = append(lines, expect.ExpectedResponse{Value: elem.key}, expect.ExpectedResponse{Value: elem.val})
}
- return spawnWithExpects(cmdArgs, errs...)
+ return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
}
func ctlV3Del(cx ctlCtx, args []string, num int) error {
cmdArgs := append(cx.PrefixArgs(), "del")
cmdArgs = append(cmdArgs, args...)
- return spawnWithExpects(cmdArgs, fmt.Sprintf("%d", num))
+ return e2e.SpawnWithExpects(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: fmt.Sprintf("%d", num)})
}
diff --git a/tests/e2e/ctl_v3_lease_test.go b/tests/e2e/ctl_v3_lease_test.go
index 59e47bc1faa..0ac6d1b32bd 100644
--- a/tests/e2e/ctl_v3_lease_test.go
+++ b/tests/e2e/ctl_v3_lease_test.go
@@ -19,171 +19,26 @@ import (
"strconv"
"strings"
"testing"
- "time"
-)
-
-func TestCtlV3LeaseGrantTimeToLive(t *testing.T) { testCtl(t, leaseTestGrantTimeToLive) }
-func TestCtlV3LeaseGrantTimeToLiveNoTLS(t *testing.T) {
- testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3LeaseGrantTimeToLiveClientTLS(t *testing.T) {
- testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3LeaseGrantTimeToLiveClientAutoTLS(t *testing.T) {
- testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3LeaseGrantTimeToLivePeerTLS(t *testing.T) {
- testCtl(t, leaseTestGrantTimeToLive, withCfg(*newConfigPeerTLS()))
-}
-
-func TestCtlV3LeaseGrantLeases(t *testing.T) { testCtl(t, leaseTestGrantLeaseListed) }
-func TestCtlV3LeaseGrantLeasesNoTLS(t *testing.T) {
- testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3LeaseGrantLeasesClientTLS(t *testing.T) {
- testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3LeaseGrantLeasesClientAutoTLS(t *testing.T) {
- testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3LeaseGrantLeasesPeerTLS(t *testing.T) {
- testCtl(t, leaseTestGrantLeaseListed, withCfg(*newConfigPeerTLS()))
-}
-func TestCtlV3LeaseTestTimeToLiveExpired(t *testing.T) { testCtl(t, leaseTestTimeToLiveExpired) }
-func TestCtlV3LeaseTestTimeToLiveExpiredNoTLS(t *testing.T) {
- testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3LeaseTestTimeToLiveExpiredClientTLS(t *testing.T) {
- testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3LeaseTestTimeToLiveExpiredClientAutoTLS(t *testing.T) {
- testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3LeaseTestTimeToLiveExpiredPeerTLS(t *testing.T) {
- testCtl(t, leaseTestTimeToLiveExpired, withCfg(*newConfigPeerTLS()))
-}
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
func TestCtlV3LeaseKeepAlive(t *testing.T) { testCtl(t, leaseTestKeepAlive) }
func TestCtlV3LeaseKeepAliveNoTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAlive, withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3LeaseKeepAliveClientTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAlive, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3LeaseKeepAliveClientAutoTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAlive, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3LeaseKeepAlivePeerTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAlive, withCfg(*newConfigPeerTLS()))
-}
-
-func TestCtlV3LeaseKeepAliveOnce(t *testing.T) { testCtl(t, leaseTestKeepAliveOnce) }
-func TestCtlV3LeaseKeepAliveOnceNoTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3LeaseKeepAliveOnceClientTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3LeaseKeepAliveOnceClientAutoTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3LeaseKeepAliveOncePeerTLS(t *testing.T) {
- testCtl(t, leaseTestKeepAliveOnce, withCfg(*newConfigPeerTLS()))
+ testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigNoTLS()))
}
-func TestCtlV3LeaseRevoke(t *testing.T) { testCtl(t, leaseTestRevoked) }
-func TestCtlV3LeaseRevokeNoTLS(t *testing.T) {
- testCtl(t, leaseTestRevoked, withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3LeaseRevokeClientTLS(t *testing.T) {
- testCtl(t, leaseTestRevoked, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3LeaseRevokeClientAutoTLS(t *testing.T) {
- testCtl(t, leaseTestRevoked, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3LeaseRevokePeerTLS(t *testing.T) {
- testCtl(t, leaseTestRevoked, withCfg(*newConfigPeerTLS()))
-}
-
-func leaseTestGrantTimeToLive(cx ctlCtx) {
- id, err := ctlV3LeaseGrant(cx, 10)
- if err != nil {
- cx.t.Fatalf("leaseTestGrantTimeToLive: ctlV3LeaseGrant error (%v)", err)
- }
-
- cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", id, "--keys")
- proc, err := spawnCmd(cmdArgs)
- if err != nil {
- cx.t.Fatalf("leaseTestGrantTimeToLive: error (%v)", err)
- }
- line, err := proc.Expect(" granted with TTL(")
- if err != nil {
- cx.t.Fatalf("leaseTestGrantTimeToLive: error (%v)", err)
- }
- if err = proc.Close(); err != nil {
- cx.t.Fatalf("leaseTestGrantTimeToLive: error (%v)", err)
- }
- if !strings.Contains(line, ", attached keys") {
- cx.t.Fatalf("leaseTestGrantTimeToLive: expected 'attached keys', got %q", line)
- }
- if !strings.Contains(line, id) {
- cx.t.Fatalf("leaseTestGrantTimeToLive: expected leaseID %q, got %q", id, line)
- }
-}
-
-func leaseTestGrantLeaseListed(cx ctlCtx) {
- err := leaseTestGrantLeasesList(cx)
- if err != nil {
- cx.t.Fatalf("leaseTestGrantLeasesList: (%v)", err)
- }
-}
-
-func leaseTestGrantLeasesList(cx ctlCtx) error {
- id, err := ctlV3LeaseGrant(cx, 10)
- if err != nil {
- return fmt.Errorf("ctlV3LeaseGrant error (%v)", err)
- }
-
- cmdArgs := append(cx.PrefixArgs(), "lease", "list")
- proc, err := spawnCmd(cmdArgs)
- if err != nil {
- return fmt.Errorf("lease list failed (%v)", err)
- }
- _, err = proc.Expect(id)
- if err != nil {
- return fmt.Errorf("lease id not in returned list (%v)", err)
- }
- return proc.Close()
+func TestCtlV3LeaseKeepAliveClientTLS(t *testing.T) {
+ testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientTLS()))
}
-func leaseTestTimeToLiveExpired(cx ctlCtx) {
- err := leaseTestTimeToLiveExpire(cx, 3)
- if err != nil {
- cx.t.Fatalf("leaseTestTimeToLiveExpire: (%v)", err)
- }
+func TestCtlV3LeaseKeepAliveClientAutoTLS(t *testing.T) {
+ testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientAutoTLS()))
}
-func leaseTestTimeToLiveExpire(cx ctlCtx, ttl int) error {
- leaseID, err := ctlV3LeaseGrant(cx, ttl)
- if err != nil {
- return fmt.Errorf("ctlV3LeaseGrant error (%v)", err)
- }
-
- if err = ctlV3Put(cx, "key", "val", leaseID); err != nil {
- return fmt.Errorf("ctlV3Put error (%v)", err)
- }
- // eliminate false positive
- time.Sleep(time.Duration(ttl+1) * time.Second)
- cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", leaseID)
- exp := fmt.Sprintf("lease %s already expired", leaseID)
- if err = spawnWithExpect(cmdArgs, exp); err != nil {
- return fmt.Errorf("lease not properly expired: (%v)", err)
- }
- if err := ctlV3Get(cx, []string{"key"}); err != nil {
- return fmt.Errorf("ctlV3Get error (%v)", err)
- }
- return nil
+func TestCtlV3LeaseKeepAlivePeerTLS(t *testing.T) {
+ testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigPeerTLS()))
}
func leaseTestKeepAlive(cx ctlCtx) {
@@ -203,51 +58,9 @@ func leaseTestKeepAlive(cx ctlCtx) {
}
}
-func leaseTestKeepAliveOnce(cx ctlCtx) {
- // put with TTL 10 seconds and keep-alive once
- leaseID, err := ctlV3LeaseGrant(cx, 10)
- if err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseGrant error (%v)", err)
- }
- if err := ctlV3Put(cx, "key", "val", leaseID); err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3Put error (%v)", err)
- }
- if err := ctlV3LeaseKeepAliveOnce(cx, leaseID); err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseKeepAliveOnce error (%v)", err)
- }
- if err := ctlV3Get(cx, []string{"key"}, kv{"key", "val"}); err != nil {
- cx.t.Fatalf("leaseTestKeepAlive: ctlV3Get error (%v)", err)
- }
-}
-
-func leaseTestRevoked(cx ctlCtx) {
- err := leaseTestRevoke(cx)
- if err != nil {
- cx.t.Fatalf("leaseTestRevoke: (%v)", err)
- }
-}
-
-func leaseTestRevoke(cx ctlCtx) error {
- // put with TTL 10 seconds and revoke
- leaseID, err := ctlV3LeaseGrant(cx, 10)
- if err != nil {
- return fmt.Errorf("ctlV3LeaseGrant error (%v)", err)
- }
- if err := ctlV3Put(cx, "key", "val", leaseID); err != nil {
- return fmt.Errorf("ctlV3Put error (%v)", err)
- }
- if err := ctlV3LeaseRevoke(cx, leaseID); err != nil {
- return fmt.Errorf("ctlV3LeaseRevoke error (%v)", err)
- }
- if err := ctlV3Get(cx, []string{"key"}); err != nil { // expect no output
- return fmt.Errorf("ctlV3Get error (%v)", err)
- }
- return nil
-}
-
func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) {
cmdArgs := append(cx.PrefixArgs(), "lease", "grant", strconv.Itoa(ttl))
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
return "", err
}
@@ -271,21 +84,7 @@ func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) {
func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error {
cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", leaseID)
- proc, err := spawnCmd(cmdArgs)
- if err != nil {
- return err
- }
-
- if _, err = proc.Expect(fmt.Sprintf("lease %s keepalived with TTL(", leaseID)); err != nil {
- return err
- }
- return proc.Stop()
-}
-
-func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error {
- cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", "--once", leaseID)
-
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, nil)
if err != nil {
return err
}
@@ -298,5 +97,5 @@ func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error {
func ctlV3LeaseRevoke(cx ctlCtx, leaseID string) error {
cmdArgs := append(cx.PrefixArgs(), "lease", "revoke", leaseID)
- return spawnWithExpect(cmdArgs, fmt.Sprintf("lease %s revoked", leaseID))
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: fmt.Sprintf("lease %s revoked", leaseID)})
}
diff --git a/tests/e2e/ctl_v3_lock_test.go b/tests/e2e/ctl_v3_lock_test.go
index e88000a1516..b96dba6c1df 100644
--- a/tests/e2e/ctl_v3_lock_test.go
+++ b/tests/e2e/ctl_v3_lock_test.go
@@ -15,13 +15,17 @@
package e2e
import (
+ "context"
"fmt"
"os"
"strings"
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3Lock(t *testing.T) {
@@ -36,9 +40,7 @@ func testLock(cx ctlCtx) {
name := "a"
holder, ch, err := ctlV3Lock(cx, name)
- if err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, err)
l1 := ""
select {
@@ -52,9 +54,7 @@ func testLock(cx ctlCtx) {
// blocked process that won't acquire the lock
blocked, ch, err := ctlV3Lock(cx, name)
- if err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, err)
select {
case <-time.After(100 * time.Millisecond):
case <-ch:
@@ -63,10 +63,13 @@ func testLock(cx ctlCtx) {
// overlap with a blocker that will acquire the lock
blockAcquire, ch, err := ctlV3Lock(cx, name)
- if err != nil {
- cx.t.Fatal(err)
- }
- defer blockAcquire.Stop()
+ require.NoError(cx.t, err)
+ defer func(blockAcquire *expect.ExpectProcess) {
+ err = blockAcquire.Stop()
+ require.NoError(cx.t, err)
+ blockAcquire.Wait()
+ }(blockAcquire)
+
select {
case <-time.After(100 * time.Millisecond):
case <-ch:
@@ -74,20 +77,16 @@ func testLock(cx ctlCtx) {
}
// kill blocked process with clean shutdown
- if err = blocked.Signal(os.Interrupt); err != nil {
- cx.t.Fatal(err)
- }
- if err = closeWithTimeout(blocked, time.Second); err != nil {
- cx.t.Fatal(err)
+ require.NoError(cx.t, blocked.Signal(os.Interrupt))
+ err = e2e.CloseWithTimeout(blocked, time.Second)
+ if err != nil {
+ // due to being blocked, this can potentially get killed and thus exit non-zero sometimes
+ require.ErrorContains(cx.t, err, "unexpected exit code")
}
// kill the holder with clean shutdown
- if err = holder.Signal(os.Interrupt); err != nil {
- cx.t.Fatal(err)
- }
- if err = closeWithTimeout(holder, 200*time.Millisecond+time.Second); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, holder.Signal(os.Interrupt))
+ require.NoError(cx.t, e2e.CloseWithTimeout(holder, 200*time.Millisecond+time.Second))
// blockAcquire should acquire the lock
select {
@@ -103,32 +102,31 @@ func testLock(cx ctlCtx) {
func testLockWithCmd(cx ctlCtx) {
// exec command with zero exit code
echoCmd := []string{"echo"}
- if err := ctlV3LockWithCmd(cx, echoCmd, ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3LockWithCmd(cx, echoCmd, expect.ExpectedResponse{Value: ""}))
// exec command with non-zero exit code
code := 3
awkCmd := []string{"awk", fmt.Sprintf("BEGIN{exit %d}", code)}
- expect := fmt.Sprintf("Error: exit status %d", code)
- if err := ctlV3LockWithCmd(cx, awkCmd, expect); err != nil {
- cx.t.Fatal(err)
- }
+ expect := expect.ExpectedResponse{Value: fmt.Sprintf("Error: exit status %d", code)}
+ require.ErrorContains(cx.t, ctlV3LockWithCmd(cx, awkCmd, expect), expect.Value)
}
// ctlV3Lock creates a lock process with a channel listening for when it acquires the lock.
func ctlV3Lock(cx ctlCtx, name string) (*expect.ExpectProcess, <-chan string, error) {
cmdArgs := append(cx.PrefixArgs(), "lock", name)
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
outc := make(chan string, 1)
if err != nil {
close(outc)
return proc, outc, err
}
go func() {
- s, xerr := proc.ExpectFunc(func(string) bool { return true })
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ s, xerr := proc.ExpectFunc(ctx, func(string) bool { return true })
if xerr != nil {
- cx.t.Errorf("expect failed (%v)", xerr)
+ require.ErrorContains(cx.t, xerr, "Error: context canceled")
}
outc <- s
}()
@@ -136,9 +134,11 @@ func ctlV3Lock(cx ctlCtx, name string) (*expect.ExpectProcess, <-chan string, er
}
// ctlV3LockWithCmd creates a lock process to exec command.
-func ctlV3LockWithCmd(cx ctlCtx, execCmd []string, as ...string) error {
+func ctlV3LockWithCmd(cx ctlCtx, execCmd []string, as ...expect.ExpectedResponse) error {
// use command as lock name
cmdArgs := append(cx.PrefixArgs(), "lock", execCmd[0])
cmdArgs = append(cmdArgs, execCmd...)
- return spawnWithExpects(cmdArgs, as...)
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ return e2e.SpawnWithExpectsContext(ctx, cmdArgs, cx.envMap, as...)
}
diff --git a/tests/e2e/ctl_v3_make_mirror_test.go b/tests/e2e/ctl_v3_make_mirror_test.go
index f15340670e3..720c4f22de4 100644
--- a/tests/e2e/ctl_v3_make_mirror_test.go
+++ b/tests/e2e/ctl_v3_make_mirror_test.go
@@ -15,18 +15,24 @@
package e2e
import (
+ "context"
"fmt"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }
func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }
func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }
+func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWithWatchRev) }
func makeMirrorTest(cx ctlCtx) {
var (
- flags = []string{}
+ flags []string
kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
prefix = "key"
@@ -57,18 +63,30 @@ func makeMirrorNoDestPrefixTest(cx ctlCtx) {
testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
}
+func makeMirrorWithWatchRev(cx ctlCtx) {
+ var (
+ flags = []string{"--prefix", "o_", "--no-dest-prefix", "--rev", "4"}
+ kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}, {"o_key4", "val4"}}
+ kvs2 = []kvExec{{key: "key3", val: "val3"}, {key: "key4", val: "val4"}}
+ srcprefix = "o_"
+ destprefix = "key"
+ )
+
+ testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
+}
+
func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {
// set up another cluster to mirror with
- mirrorcfg := newConfigAutoTLS()
- mirrorcfg.clusterSize = 1
- mirrorcfg.basePort = 10000
+ mirrorcfg := e2e.NewConfigAutoTLS()
+ mirrorcfg.ClusterSize = 1
+ mirrorcfg.BasePort = 10000
mirrorctx := ctlCtx{
t: cx.t,
cfg: *mirrorcfg,
dialTimeout: 7 * time.Second,
}
- mirrorepc, err := newEtcdProcessCluster(cx.t, &mirrorctx.cfg)
+ mirrorepc, err := e2e.NewEtcdProcessCluster(context.TODO(), cx.t, e2e.WithConfig(&mirrorctx.cfg))
if err != nil {
cx.t.Fatalf("could not start etcd process cluster (%v)", err)
}
@@ -82,28 +100,16 @@ func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvEx
cmdArgs := append(cx.PrefixArgs(), "make-mirror")
cmdArgs = append(cmdArgs, flags...)
- cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.basePort))
- proc, err := spawnCmd(cmdArgs)
- if err != nil {
- cx.t.Fatal(err)
- }
+ cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.BasePort))
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
+ require.NoError(cx.t, err)
defer func() {
- err = proc.Stop()
- if err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, proc.Stop())
}()
for i := range sourcekvs {
- if err = ctlV3Put(cx, sourcekvs[i].key, sourcekvs[i].val, ""); err != nil {
- cx.t.Fatal(err)
- }
- }
- if err = ctlV3Get(cx, []string{srcprefix, "--prefix"}, sourcekvs...); err != nil {
- cx.t.Fatal(err)
- }
-
- if err = ctlV3Watch(mirrorctx, []string{destprefix, "--rev", "1", "--prefix"}, destkvs...); err != nil {
- cx.t.Fatal(err)
+ require.NoError(cx.t, ctlV3Put(cx, sourcekvs[i].key, sourcekvs[i].val, ""))
}
+ require.NoError(cx.t, ctlV3Get(cx, []string{srcprefix, "--prefix"}, sourcekvs...))
+ require.NoError(cx.t, ctlV3Watch(mirrorctx, []string{destprefix, "--rev", "1", "--prefix"}, destkvs...))
}
diff --git a/tests/e2e/ctl_v3_member_no_proxy_test.go b/tests/e2e/ctl_v3_member_no_proxy_test.go
new file mode 100644
index 00000000000..be492f6a4c8
--- /dev/null
+++ b/tests/e2e/ctl_v3_member_no_proxy_test.go
@@ -0,0 +1,176 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "math/rand"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestMemberReplace(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t)
+ require.NoError(t, err)
+ defer epc.Close()
+
+ memberIdx := rand.Int() % len(epc.Procs)
+ member := epc.Procs[memberIdx]
+ memberName := member.Config().Name
+ var endpoints []string
+ for i := 1; i < len(epc.Procs); i++ {
+ endpoints = append(endpoints, epc.Procs[(memberIdx+i)%len(epc.Procs)].EndpointsGRPC()...)
+ }
+ cc, err := e2e.NewEtcdctl(epc.Cfg.Client, endpoints)
+ require.NoError(t, err)
+
+ memberID, found, err := getMemberIDByName(ctx, cc, memberName)
+ require.NoError(t, err)
+ require.Truef(t, found, "Member not found")
+
+ // Need to wait health interval for cluster to accept member changes
+ time.Sleep(etcdserver.HealthInterval)
+
+ t.Logf("Removing member %s", memberName)
+ _, err = cc.MemberRemove(ctx, memberID)
+ require.NoError(t, err)
+ _, found, err = getMemberIDByName(ctx, cc, memberName)
+ require.NoError(t, err)
+ require.Falsef(t, found, "Expected member to be removed")
+ for member.IsRunning() {
+ err = member.Wait(ctx)
+ if err != nil && !strings.Contains(err.Error(), "unexpected exit code") {
+ t.Fatalf("member didn't exit as expected: %v", err)
+ }
+ }
+
+ t.Logf("Removing member %s data", memberName)
+ err = os.RemoveAll(member.Config().DataDirPath)
+ require.NoError(t, err)
+
+ t.Logf("Adding member %s back", memberName)
+ removedMemberPeerURL := member.Config().PeerURL.String()
+ _, err = cc.MemberAdd(ctx, memberName, []string{removedMemberPeerURL})
+ require.NoError(t, err)
+ err = patchArgs(member.Config().Args, "initial-cluster-state", "existing")
+ require.NoError(t, err)
+
+ // Sleep 100ms to bypass the known issue https://github.com/etcd-io/etcd/issues/16687.
+ time.Sleep(100 * time.Millisecond)
+ t.Logf("Starting member %s", memberName)
+ err = member.Start(ctx)
+ require.NoError(t, err)
+ testutils.ExecuteUntil(ctx, t, func() {
+ for {
+ _, found, err := getMemberIDByName(ctx, cc, memberName)
+ if err != nil || !found {
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ break
+ }
+ })
+}
+
+func TestMemberReplaceWithLearner(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t)
+ require.NoError(t, err)
+ defer epc.Close()
+
+ memberIdx := rand.Int() % len(epc.Procs)
+ member := epc.Procs[memberIdx]
+ memberName := member.Config().Name
+ var endpoints []string
+ for i := 1; i < len(epc.Procs); i++ {
+ endpoints = append(endpoints, epc.Procs[(memberIdx+i)%len(epc.Procs)].EndpointsGRPC()...)
+ }
+ cc, err := e2e.NewEtcdctl(epc.Cfg.Client, endpoints)
+ require.NoError(t, err)
+
+ memberID, found, err := getMemberIDByName(ctx, cc, memberName)
+ require.NoError(t, err)
+ require.Truef(t, found, "Member not found")
+
+ // Need to wait health interval for cluster to accept member changes
+ time.Sleep(etcdserver.HealthInterval)
+
+ t.Logf("Removing member %s", memberName)
+ _, err = cc.MemberRemove(ctx, memberID)
+ require.NoError(t, err)
+ _, found, err = getMemberIDByName(ctx, cc, memberName)
+ require.NoError(t, err)
+ require.Falsef(t, found, "Expected member to be removed")
+ for member.IsRunning() {
+ err = member.Wait(ctx)
+ if err != nil && !strings.Contains(err.Error(), "unexpected exit code") {
+ t.Fatalf("member didn't exit as expected: %v", err)
+ }
+ }
+
+ t.Logf("Removing member %s data", memberName)
+ err = os.RemoveAll(member.Config().DataDirPath)
+ require.NoError(t, err)
+
+ t.Logf("Adding member %s back as Learner", memberName)
+ removedMemberPeerURL := member.Config().PeerURL.String()
+ _, err = cc.MemberAddAsLearner(ctx, memberName, []string{removedMemberPeerURL})
+ require.NoError(t, err)
+
+ err = patchArgs(member.Config().Args, "initial-cluster-state", "existing")
+ require.NoError(t, err)
+
+ // Sleep 100ms to bypass the known issue https://github.com/etcd-io/etcd/issues/16687.
+ time.Sleep(100 * time.Millisecond)
+
+ t.Logf("Starting member %s", memberName)
+ err = member.Start(ctx)
+ require.NoError(t, err)
+ var learnMemberID uint64
+ testutils.ExecuteUntil(ctx, t, func() {
+ for {
+ learnMemberID, found, err = getMemberIDByName(ctx, cc, memberName)
+ if err != nil || !found {
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ break
+ }
+ })
+
+ learnMemberID, found, err = getMemberIDByName(ctx, cc, memberName)
+ require.NoError(t, err)
+ require.Truef(t, found, "Member not found")
+
+ _, err = cc.MemberPromote(ctx, learnMemberID)
+ require.NoError(t, err)
+}
diff --git a/tests/e2e/ctl_v3_member_test.go b/tests/e2e/ctl_v3_member_test.go
index 76198569c19..7eb2a046be0 100644
--- a/tests/e2e/ctl_v3_member_test.go
+++ b/tests/e2e/ctl_v3_member_test.go
@@ -15,72 +15,136 @@
package e2e
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
"io"
"reflect"
"strings"
+ "sync"
"testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3MemberList(t *testing.T) { testCtl(t, memberListTest) }
func TestCtlV3MemberListWithHex(t *testing.T) { testCtl(t, memberListWithHexTest) }
-func TestCtlV3MemberListNoTLS(t *testing.T) { testCtl(t, memberListTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3MemberListClientTLS(t *testing.T) {
- testCtl(t, memberListTest, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3MemberListClientAutoTLS(t *testing.T) {
- testCtl(t, memberListTest, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3MemberListPeerTLS(t *testing.T) {
- testCtl(t, memberListTest, withCfg(*newConfigPeerTLS()))
-}
-func TestCtlV3MemberRemove(t *testing.T) {
- testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig())
-}
-func TestCtlV3MemberRemoveNoTLS(t *testing.T) {
- testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3MemberRemoveClientTLS(t *testing.T) {
- testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3MemberRemoveClientAutoTLS(t *testing.T) {
- testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(
- // default clusterSize is 1
- etcdProcessClusterConfig{
- clusterSize: 3,
- isClientAutoTLS: true,
- clientTLS: clientTLS,
- initialToken: "new",
- }))
+func TestCtlV3MemberListSerializable(t *testing.T) {
+ cfg := e2e.NewConfig(
+ e2e.WithClusterSize(1),
+ )
+ testCtl(t, memberListSerializableTest, withCfg(*cfg))
}
-func TestCtlV3MemberRemovePeerTLS(t *testing.T) {
- testCtl(t, memberRemoveTest, withQuorum(), withNoStrictReconfig(), withCfg(*newConfigPeerTLS()))
-}
-func TestCtlV3MemberAdd(t *testing.T) { testCtl(t, memberAddTest) }
-func TestCtlV3MemberAddNoTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3MemberAddClientTLS(t *testing.T) {
- testCtl(t, memberAddTest, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3MemberAddClientAutoTLS(t *testing.T) {
- testCtl(t, memberAddTest, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3MemberAddPeerTLS(t *testing.T) { testCtl(t, memberAddTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3MemberAddForLearner(t *testing.T) { testCtl(t, memberAddForLearnerTest) }
-func TestCtlV3MemberUpdate(t *testing.T) { testCtl(t, memberUpdateTest) }
+
+func TestCtlV3MemberAdd(t *testing.T) { testCtl(t, memberAddTest) }
+func TestCtlV3MemberAddAsLearner(t *testing.T) { testCtl(t, memberAddAsLearnerTest) }
+
+func TestCtlV3MemberUpdate(t *testing.T) { testCtl(t, memberUpdateTest) }
func TestCtlV3MemberUpdateNoTLS(t *testing.T) {
- testCtl(t, memberUpdateTest, withCfg(*newConfigNoTLS()))
+ testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigNoTLS()))
}
+
func TestCtlV3MemberUpdateClientTLS(t *testing.T) {
- testCtl(t, memberUpdateTest, withCfg(*newConfigClientTLS()))
+ testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientTLS()))
}
+
func TestCtlV3MemberUpdateClientAutoTLS(t *testing.T) {
- testCtl(t, memberUpdateTest, withCfg(*newConfigClientAutoTLS()))
+ testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientAutoTLS()))
}
+
func TestCtlV3MemberUpdatePeerTLS(t *testing.T) {
- testCtl(t, memberUpdateTest, withCfg(*newConfigPeerTLS()))
+ testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigPeerTLS()))
+}
+
+// TestCtlV3ConsistentMemberList requires the gofailpoint to be enabled.
+// If you execute this case locally, please do not forget to execute
+// `make gofail-enable`.
+func TestCtlV3ConsistentMemberList(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ ctx := context.Background()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(1),
+ e2e.WithEnvVars(map[string]string{"GOFAIL_FAILPOINTS": `beforeApplyOneConfChange=sleep("2s")`}),
+ )
+ require.NoErrorf(t, err, "failed to start etcd cluster")
+ defer func() {
+ derr := epc.Close()
+ require.NoErrorf(t, derr, "failed to close etcd cluster")
+ }()
+
+ t.Log("Adding and then removing a learner")
+ resp, err := epc.Etcdctl().MemberAddAsLearner(ctx, "newLearner", []string{fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)})
+ require.NoError(t, err)
+ _, err = epc.Etcdctl().MemberRemove(ctx, resp.Member.ID)
+ require.NoError(t, err)
+ t.Logf("Added and then removed a learner with ID: %x", resp.Member.ID)
+
+ t.Log("Restarting the etcd process to ensure all data is persisted")
+ err = epc.Procs[0].Restart(ctx)
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+ stopc := make(chan struct{}, 2)
+
+ t.Log("Starting a goroutine to repeatedly restart etcdserver")
+ go func() {
+ defer func() {
+ stopc <- struct{}{}
+ wg.Done()
+ }()
+ for i := 0; i < 3; i++ {
+ select {
+ case <-stopc:
+ return
+ default:
+ }
+
+ merr := epc.Procs[0].Restart(ctx)
+ assert.NoError(t, merr)
+ epc.WaitLeader(t)
+
+ time.Sleep(100 * time.Millisecond)
+ }
+ }()
+
+ t.Log("Starting a goroutine to repeated check the member list")
+ count := 0
+ go func() {
+ defer func() {
+ stopc <- struct{}{}
+ wg.Done()
+ }()
+
+ for {
+ select {
+ case <-stopc:
+ return
+ default:
+ }
+
+ mresp, merr := epc.Etcdctl().MemberList(ctx, true)
+ if merr != nil {
+ continue
+ }
+
+ count++
+ assert.Len(t, mresp.Members, 1)
+ }
+ }()
+
+ wg.Wait()
+ assert.Positive(t, count)
+ t.Logf("Checked the member list %d times", count)
}
func memberListTest(cx ctlCtx) {
@@ -89,19 +153,36 @@ func memberListTest(cx ctlCtx) {
}
}
+func memberListSerializableTest(cx ctlCtx) {
+ resp, err := getMemberList(cx, false)
+ require.NoError(cx.t, err)
+ require.Len(cx.t, resp.Members, 1)
+
+ peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
+ err = ctlV3MemberAdd(cx, peerURL, false)
+ require.NoError(cx.t, err)
+
+ resp, err = getMemberList(cx, true)
+ require.NoError(cx.t, err)
+ require.Len(cx.t, resp.Members, 2)
+}
+
func ctlV3MemberList(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgs(), "member", "list")
- lines := make([]string, cx.cfg.clusterSize)
+ lines := make([]expect.ExpectedResponse, cx.cfg.ClusterSize)
for i := range lines {
- lines[i] = "started"
+ lines[i] = expect.ExpectedResponse{Value: "started"}
}
- return spawnWithExpects(cmdArgs, lines...)
+ return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...)
}
-func getMemberList(cx ctlCtx) (etcdserverpb.MemberListResponse, error) {
+func getMemberList(cx ctlCtx, serializable bool) (etcdserverpb.MemberListResponse, error) {
cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "member", "list")
+ if serializable {
+ cmdArgs = append(cmdArgs, "--consistency", "s")
+ }
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
return etcdserverpb.MemberListResponse{}, err
}
@@ -116,21 +197,21 @@ func getMemberList(cx ctlCtx) (etcdserverpb.MemberListResponse, error) {
resp := etcdserverpb.MemberListResponse{}
dec := json.NewDecoder(strings.NewReader(txt))
- if err := dec.Decode(&resp); err == io.EOF {
+ if err := dec.Decode(&resp); errors.Is(err, io.EOF) {
return etcdserverpb.MemberListResponse{}, err
}
return resp, nil
}
func memberListWithHexTest(cx ctlCtx) {
- resp, err := getMemberList(cx)
+ resp, err := getMemberList(cx, false)
if err != nil {
cx.t.Fatalf("getMemberList error (%v)", err)
}
cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "--hex", "member", "list")
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
cx.t.Fatalf("memberListWithHexTest error (%v)", err)
}
@@ -144,7 +225,7 @@ func memberListWithHexTest(cx ctlCtx) {
}
hexResp := etcdserverpb.MemberListResponse{}
dec := json.NewDecoder(strings.NewReader(txt))
- if err := dec.Decode(&hexResp); err == io.EOF {
+ if err := dec.Decode(&hexResp); errors.Is(err, io.EOF) {
cx.t.Fatalf("memberListWithHexTest error (%v)", err)
}
num := len(resp.Members)
@@ -155,65 +236,71 @@ func memberListWithHexTest(cx ctlCtx) {
if num == 0 {
cx.t.Fatal("member number is 0")
}
+
+ if resp.Header.RaftTerm != hexResp.Header.RaftTerm {
+ cx.t.Fatalf("Unexpected raft_term, expected %d, got %d", resp.Header.RaftTerm, hexResp.Header.RaftTerm)
+ }
+
for i := 0; i < num; i++ {
if resp.Members[i].Name != hexResp.Members[i].Name {
- cx.t.Fatalf("member name,expected %v,got %v", resp.Members[i].Name, hexResp.Members[i].Name)
+ cx.t.Fatalf("Unexpected member name,expected %v, got %v", resp.Members[i].Name, hexResp.Members[i].Name)
}
if !reflect.DeepEqual(resp.Members[i].PeerURLs, hexResp.Members[i].PeerURLs) {
- cx.t.Fatalf("member peerURLs,expected %v,got %v", resp.Members[i].PeerURLs, hexResp.Members[i].PeerURLs)
+ cx.t.Fatalf("Unexpected member peerURLs, expected %v, got %v", resp.Members[i].PeerURLs, hexResp.Members[i].PeerURLs)
}
if !reflect.DeepEqual(resp.Members[i].ClientURLs, hexResp.Members[i].ClientURLs) {
- cx.t.Fatalf("member clientURLS,expected %v,got %v", resp.Members[i].ClientURLs, hexResp.Members[i].ClientURLs)
+ cx.t.Fatalf("Unexpected member clientURLs, expected %v, got %v", resp.Members[i].ClientURLs, hexResp.Members[i].ClientURLs)
}
}
}
-func memberRemoveTest(cx ctlCtx) {
- ep, memIDToRemove, clusterID := cx.memberToRemove()
- if err := ctlV3MemberRemove(cx, ep, memIDToRemove, clusterID); err != nil {
- cx.t.Fatal(err)
- }
-}
-
-func ctlV3MemberRemove(cx ctlCtx, ep, memberID, clusterID string) error {
- cmdArgs := append(cx.prefixArgs([]string{ep}), "member", "remove", memberID)
- return spawnWithExpect(cmdArgs, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID))
-}
-
func memberAddTest(cx ctlCtx) {
- if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11), false); err != nil {
- cx.t.Fatal(err)
- }
+ peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
+ require.NoError(cx.t, ctlV3MemberAdd(cx, peerURL, false))
}
-func memberAddForLearnerTest(cx ctlCtx) {
- if err := ctlV3MemberAdd(cx, fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11), true); err != nil {
- cx.t.Fatal(err)
- }
+func memberAddAsLearnerTest(cx ctlCtx) {
+ peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
+ require.NoError(cx.t, ctlV3MemberAdd(cx, peerURL, true))
}
func ctlV3MemberAdd(cx ctlCtx, peerURL string, isLearner bool) error {
cmdArgs := append(cx.PrefixArgs(), "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL))
+ asLearner := " "
if isLearner {
cmdArgs = append(cmdArgs, "--learner")
+ asLearner = " as learner "
}
- return spawnWithExpect(cmdArgs, " added to cluster ")
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: fmt.Sprintf(" added%sto cluster ", asLearner)})
}
func memberUpdateTest(cx ctlCtx) {
- mr, err := getMemberList(cx)
- if err != nil {
- cx.t.Fatal(err)
- }
+ mr, err := getMemberList(cx, false)
+ require.NoError(cx.t, err)
- peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+11)
+ peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11)
memberID := fmt.Sprintf("%x", mr.Members[0].ID)
- if err = ctlV3MemberUpdate(cx, memberID, peerURL); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3MemberUpdate(cx, memberID, peerURL))
}
func ctlV3MemberUpdate(cx ctlCtx, memberID, peerURL string) error {
cmdArgs := append(cx.PrefixArgs(), "member", "update", memberID, fmt.Sprintf("--peer-urls=%s", peerURL))
- return spawnWithExpect(cmdArgs, " updated in cluster ")
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: " updated in cluster "})
+}
+
+func TestRemoveNonExistingMember(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx := context.Background()
+
+ cfg := e2e.ConfigStandalone(*e2e.NewConfig())
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ defer epc.Close()
+ c := epc.Etcdctl()
+
+ _, err = c.MemberRemove(ctx, 1)
+ require.Error(t, err)
+
+ // Ensure that membership is properly bootstrapped.
+ assert.NoError(t, epc.Restart(ctx))
}
diff --git a/tests/e2e/ctl_v3_move_leader_test.go b/tests/e2e/ctl_v3_move_leader_test.go
index 08abc37e26b..cc3e8077443 100644
--- a/tests/e2e/ctl_v3_move_leader_test.go
+++ b/tests/e2e/ctl_v3_move_leader_test.go
@@ -18,26 +18,43 @@ import (
"context"
"crypto/tls"
"fmt"
- "os"
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
-func TestCtlV3MoveLeaderSecure(t *testing.T) {
- testCtlV3MoveLeader(t, *newConfigTLS())
-}
+func TestCtlV3MoveLeaderScenarios(t *testing.T) {
+ securityParent := map[string]struct {
+ cfg e2e.EtcdProcessClusterConfig
+ }{
+ "Secure": {cfg: *e2e.NewConfigTLS()},
+ "Insecure": {cfg: *e2e.NewConfigNoTLS()},
+ }
-func TestCtlV3MoveLeaderInsecure(t *testing.T) {
- testCtlV3MoveLeader(t, *newConfigNoTLS())
-}
+ tests := map[string]struct {
+ env map[string]string
+ }{
+ "happy path": {env: map[string]string{}},
+ "with env": {env: map[string]string{"ETCDCTL_ENDPOINTS": "something-else-is-set"}},
+ }
-func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
- BeforeTest(t)
+ for testName, tc := range securityParent {
+ for subTestName, tx := range tests {
+ t.Run(testName+" "+subTestName, func(t *testing.T) {
+ testCtlV3MoveLeader(t, tc.cfg, tx.env)
+ })
+ }
+ }
+}
+func testCtlV3MoveLeader(t *testing.T, cfg e2e.EtcdProcessClusterConfig, envVars map[string]string) {
epc := setupEtcdctlTest(t, &cfg, true)
defer func() {
if errC := epc.Close(); errC != nil {
@@ -46,31 +63,27 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
}()
var tcfg *tls.Config
- if cfg.clientTLS == clientTLS {
+ if cfg.Client.ConnectionType == e2e.ClientTLS {
tinfo := transport.TLSInfo{
- CertFile: certPath,
- KeyFile: privateKeyPath,
- TrustedCAFile: caPath,
+ CertFile: e2e.CertPath,
+ KeyFile: e2e.PrivateKeyPath,
+ TrustedCAFile: e2e.CaPath,
}
var err error
tcfg, err = tinfo.ClientConfig()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
}
var leadIdx int
var leaderID uint64
var transferee uint64
- for i, ep := range epc.EndpointsV3() {
+ for i, ep := range epc.EndpointsGRPC() {
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{ep},
DialTimeout: 3 * time.Second,
TLS: tcfg,
})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
resp, err := cli.Status(ctx, ep)
if err != nil {
@@ -87,32 +100,54 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
}
}
- os.Setenv("ETCDCTL_API", "3")
- defer os.Unsetenv("ETCDCTL_API")
cx := ctlCtx{
t: t,
- cfg: *newConfigNoTLS(),
+ cfg: *e2e.NewConfigNoTLS(),
dialTimeout: 7 * time.Second,
epc: epc,
+ envMap: envVars,
}
tests := []struct {
- prefixes []string
- expect string
+ eps []string
+ expect string
+ expectErr bool
}{
{ // request to non-leader
- cx.prefixArgs([]string{cx.epc.EndpointsV3()[(leadIdx+1)%3]}),
+ []string{cx.epc.EndpointsGRPC()[(leadIdx+1)%3]},
"no leader endpoint given at ",
+ true,
},
{ // request to leader
- cx.prefixArgs([]string{cx.epc.EndpointsV3()[leadIdx]}),
+ []string{cx.epc.EndpointsGRPC()[leadIdx]},
fmt.Sprintf("Leadership transferred from %s to %s", types.ID(leaderID), types.ID(transferee)),
+ false,
+ },
+ { // request to all endpoints
+ cx.epc.EndpointsGRPC(),
+ "Leadership transferred",
+ false,
},
}
for i, tc := range tests {
- cmdArgs := append(tc.prefixes, "move-leader", types.ID(transferee).String())
- if err := spawnWithExpect(cmdArgs, tc.expect); err != nil {
- t.Fatalf("#%d: %v", i, err)
+ prefix := cx.prefixArgs(tc.eps)
+ cmdArgs := append(prefix, "move-leader", types.ID(transferee).String())
+ err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: tc.expect})
+ if tc.expectErr {
+ require.ErrorContains(t, err, tc.expect)
+ } else {
+ require.NoErrorf(t, err, "#%d: %v", i, err)
}
}
}
+
+func setupEtcdctlTest(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) *e2e.EtcdProcessCluster {
+ if !quorum {
+ cfg = e2e.ConfigStandalone(*cfg)
+ }
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ return epc
+}
diff --git a/tests/e2e/ctl_v3_role_test.go b/tests/e2e/ctl_v3_role_test.go
index fb4e5de606b..a007bd59903 100644
--- a/tests/e2e/ctl_v3_role_test.go
+++ b/tests/e2e/ctl_v3_role_test.go
@@ -17,15 +17,14 @@ package e2e
import (
"fmt"
"testing"
-)
-func TestCtlV3RoleAdd(t *testing.T) { testCtl(t, roleAddTest) }
-func TestCtlV3RoleAddNoTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3RoleAddClientTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigClientTLS())) }
-func TestCtlV3RoleAddPeerTLS(t *testing.T) { testCtl(t, roleAddTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) }
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
-func TestCtlV3RoleGrant(t *testing.T) { testCtl(t, roleGrantTest) }
+// TestCtlV3RoleAddTimeout tests add role with 0 grpc dial timeout while it tolerates dial timeout error.
+// This is unique in e2e test
+func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDefaultDialTimeout()) }
func roleAddTest(cx ctlCtx) {
cmdSet := []struct {
@@ -53,50 +52,11 @@ func roleAddTest(cx ctlCtx) {
}
}
-func roleGrantTest(cx ctlCtx) {
- cmdSet := []struct {
- args []string
- expectedStr string
- }{
- // Add a role.
- {
- args: []string{"add", "root"},
- expectedStr: "Role root created",
- },
- // Grant read permission to the role.
- {
- args: []string{"grant", "root", "read", "foo"},
- expectedStr: "Role root updated",
- },
- // Grant write permission to the role.
- {
- args: []string{"grant", "root", "write", "foo"},
- expectedStr: "Role root updated",
- },
- // Grant rw permission to the role.
- {
- args: []string{"grant", "root", "readwrite", "foo"},
- expectedStr: "Role root updated",
- },
- // Try granting invalid permission to the role.
- {
- args: []string{"grant", "root", "123", "foo"},
- expectedStr: "invalid permission type",
- },
- }
-
- for i, cmd := range cmdSet {
- if err := ctlV3Role(cx, cmd.args, cmd.expectedStr); err != nil {
- cx.t.Fatalf("roleGrantTest #%d: ctlV3Role error (%v)", i, err)
- }
- }
-}
-
func ctlV3Role(cx ctlCtx, args []string, expStr string) error {
cmdArgs := append(cx.PrefixArgs(), "role")
cmdArgs = append(cmdArgs, args...)
- return spawnWithExpect(cmdArgs, expStr)
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: expStr})
}
func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) error {
@@ -110,7 +70,7 @@ func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) err
cmdArgs = append(cmdArgs, rolename)
cmdArgs = append(cmdArgs, grantingPermToArgs(perm)...)
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
return err
}
@@ -136,7 +96,7 @@ func ctlV3RoleRevokePermission(cx ctlCtx, rolename string, key, rangeEnd string,
expStr = fmt.Sprintf("Permission of key %s is revoked from role %s", key, rolename)
}
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
return err
}
diff --git a/tests/e2e/ctl_v3_snapshot_test.go b/tests/e2e/ctl_v3_snapshot_test.go
index ce172f4c18e..5f145abab2c 100644
--- a/tests/e2e/ctl_v3_snapshot_test.go
+++ b/tests/e2e/ctl_v3_snapshot_test.go
@@ -15,21 +15,30 @@
package e2e
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
"io"
"os"
+ "path"
"path/filepath"
"strings"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/etcdutl/v3/snapshot"
"go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
)
-func TestCtlV3Snapshot(t *testing.T) { testCtl(t, snapshotTest) }
-func TestCtlV3SnapshotEtcdutl(t *testing.T) { testCtl(t, snapshotTest, withEtcdutl()) }
+func TestCtlV3Snapshot(t *testing.T) { testCtl(t, snapshotTest) }
func snapshotTest(cx ctlCtx) {
maintenanceInitKeys(cx)
@@ -61,8 +70,7 @@ func snapshotTest(cx ctlCtx) {
}
}
-func TestCtlV3SnapshotCorrupt(t *testing.T) { testCtl(t, snapshotCorruptTest) }
-func TestCtlV3SnapshotCorruptEtcdutl(t *testing.T) { testCtl(t, snapshotCorruptTest, withEtcdutl()) }
+func TestCtlV3SnapshotCorrupt(t *testing.T) { testCtl(t, snapshotCorruptTest) }
func snapshotCorruptTest(cx ctlCtx) {
fpath := filepath.Join(cx.t.TempDir(), "snapshot")
@@ -74,31 +82,26 @@ func snapshotCorruptTest(cx ctlCtx) {
// corrupt file
f, oerr := os.OpenFile(fpath, os.O_WRONLY, 0)
- if oerr != nil {
- cx.t.Fatal(oerr)
- }
- if _, err := f.Write(make([]byte, 512)); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, oerr)
+ _, err := f.Write(make([]byte, 512))
+ require.NoError(cx.t, err)
f.Close()
datadir := cx.t.TempDir()
- serr := spawnWithExpect(
+ serr := e2e.SpawnWithExpectWithEnv(
append(cx.PrefixArgsUtl(), "snapshot", "restore",
"--data-dir", datadir,
fpath),
- "expected sha256")
-
- if serr != nil {
- cx.t.Fatal(serr)
- }
+ cx.envMap,
+ expect.ExpectedResponse{Value: "expected sha256"})
+ require.ErrorContains(cx.t, serr, "Error: expected sha256")
}
-// This test ensures that the snapshot status does not modify the snapshot file
-func TestCtlV3SnapshotStatusBeforeRestore(t *testing.T) { testCtl(t, snapshotStatusBeforeRestoreTest) }
-func TestCtlV3SnapshotStatusBeforeRestoreEtcdutl(t *testing.T) {
- testCtl(t, snapshotStatusBeforeRestoreTest, withEtcdutl())
+// TestCtlV3SnapshotStatusBeforeRestore ensures that the snapshot
+// status does not modify the snapshot file
+func TestCtlV3SnapshotStatusBeforeRestore(t *testing.T) {
+ testCtl(t, snapshotStatusBeforeRestoreTest)
}
func snapshotStatusBeforeRestoreTest(cx ctlCtx) {
@@ -117,25 +120,24 @@ func snapshotStatusBeforeRestoreTest(cx ctlCtx) {
dataDir := cx.t.TempDir()
defer os.RemoveAll(dataDir)
- serr := spawnWithExpect(
+ serr := e2e.SpawnWithExpectWithEnv(
append(cx.PrefixArgsUtl(), "snapshot", "restore",
"--data-dir", dataDir,
fpath),
- "added member")
- if serr != nil {
- cx.t.Fatal(serr)
- }
+ cx.envMap,
+ expect.ExpectedResponse{Value: "added member"})
+ require.NoError(cx.t, serr)
}
func ctlV3SnapshotSave(cx ctlCtx, fpath string) error {
cmdArgs := append(cx.PrefixArgs(), "snapshot", "save", fpath)
- return spawnWithExpect(cmdArgs, fmt.Sprintf("Snapshot saved at %s", fpath))
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: fmt.Sprintf("Snapshot saved at %s", fpath)})
}
func getSnapshotStatus(cx ctlCtx, fpath string) (snapshot.Status, error) {
cmdArgs := append(cx.PrefixArgsUtl(), "--write-out", "json", "snapshot", "status", fpath)
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, nil)
if err != nil {
return snapshot.Status{}, err
}
@@ -150,18 +152,17 @@ func getSnapshotStatus(cx ctlCtx, fpath string) (snapshot.Status, error) {
resp := snapshot.Status{}
dec := json.NewDecoder(strings.NewReader(txt))
- if err := dec.Decode(&resp); err == io.EOF {
+ if err := dec.Decode(&resp); errors.Is(err, io.EOF) {
return snapshot.Status{}, err
}
return resp, nil
}
-func TestIssue6361(t *testing.T) { testIssue6361(t, false) }
-func TestIssue6361etcdutl(t *testing.T) { testIssue6361(t, true) }
+func TestIssue6361(t *testing.T) { testIssue6361(t) }
// TestIssue6361 ensures new member that starts with snapshot correctly
// syncs up with other members and serve correct data.
-func testIssue6361(t *testing.T, etcdutl bool) {
+func testIssue6361(t *testing.T) {
{
// This tests is pretty flaky on semaphoreci as of 2021-01-10.
// TODO: Remove when the flakiness source is identified.
@@ -170,15 +171,12 @@ func testIssue6361(t *testing.T, etcdutl bool) {
os.Setenv("EXPECT_DEBUG", "1")
}
- BeforeTest(t)
- os.Setenv("ETCDCTL_API", "3")
- defer os.Unsetenv("ETCDCTL_API")
+ e2e.BeforeTest(t)
- epc, err := newEtcdProcessCluster(t, &etcdProcessClusterConfig{
- clusterSize: 1,
- initialToken: "new",
- keepDataDir: true,
- })
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(1),
+ e2e.WithKeepDataDir(true),
+ )
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
@@ -189,99 +187,304 @@ func testIssue6361(t *testing.T, etcdutl bool) {
}()
dialTimeout := 10 * time.Second
- prefixArgs := []string{ctlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()}
+ prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsGRPC(), ","), "--dial-timeout", dialTimeout.String()}
t.Log("Writing some keys...")
kvs := []kv{{"foo1", "val1"}, {"foo2", "val2"}, {"foo3", "val3"}}
for i := range kvs {
- if err = spawnWithExpect(append(prefixArgs, "put", kvs[i].key, kvs[i].val), "OK"); err != nil {
- t.Fatal(err)
- }
+ err = e2e.SpawnWithExpect(append(prefixArgs, "put", kvs[i].key, kvs[i].val), expect.ExpectedResponse{Value: "OK"})
+ require.NoError(t, err)
}
fpath := filepath.Join(t.TempDir(), "test.snapshot")
t.Log("etcdctl saving snapshot...")
- if err = spawnWithExpect(append(prefixArgs, "snapshot", "save", fpath), fmt.Sprintf("Snapshot saved at %s", fpath)); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, e2e.SpawnWithExpects(append(prefixArgs, "snapshot", "save", fpath),
+ nil,
+ expect.ExpectedResponse{Value: fmt.Sprintf("Snapshot saved at %s", fpath)},
+ ))
t.Log("Stopping the original server...")
- if err = epc.procs[0].Stop(); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, epc.Procs[0].Stop())
newDataDir := filepath.Join(t.TempDir(), "test.data")
-
- uctlBinPath := ctlBinPath
- if etcdutl {
- uctlBinPath = utlBinPath
- }
-
t.Log("etcdctl restoring the snapshot...")
- err = spawnWithExpect([]string{uctlBinPath, "snapshot", "restore", fpath, "--name", epc.procs[0].Config().name, "--initial-cluster", epc.procs[0].Config().initialCluster, "--initial-cluster-token", epc.procs[0].Config().initialToken, "--initial-advertise-peer-urls", epc.procs[0].Config().purl.String(), "--data-dir", newDataDir}, "added member")
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, e2e.SpawnWithExpect([]string{
+ e2e.BinPath.Etcdutl, "snapshot", "restore", fpath,
+ "--name", epc.Procs[0].Config().Name,
+ "--initial-cluster", epc.Procs[0].Config().InitialCluster,
+ "--initial-cluster-token", epc.Procs[0].Config().InitialToken,
+ "--initial-advertise-peer-urls", epc.Procs[0].Config().PeerURL.String(),
+ "--data-dir", newDataDir,
+ },
+ expect.ExpectedResponse{Value: "added member"}))
t.Log("(Re)starting the etcd member using the restored snapshot...")
- epc.procs[0].Config().dataDirPath = newDataDir
- for i := range epc.procs[0].Config().args {
- if epc.procs[0].Config().args[i] == "--data-dir" {
- epc.procs[0].Config().args[i+1] = newDataDir
+ epc.Procs[0].Config().DataDirPath = newDataDir
+ for i := range epc.Procs[0].Config().Args {
+ if epc.Procs[0].Config().Args[i] == "--data-dir" {
+ epc.Procs[0].Config().Args[i+1] = newDataDir
}
}
- if err = epc.procs[0].Restart(); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, epc.Procs[0].Restart(context.TODO()))
t.Log("Ensuring the restored member has the correct data...")
for i := range kvs {
- if err = spawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), expect.ExpectedResponse{Value: kvs[i].val}))
}
t.Log("Adding new member into the cluster")
- clientURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+30)
- peerURL := fmt.Sprintf("http://localhost:%d", etcdProcessBasePort+31)
- err = spawnWithExpect(append(prefixArgs, "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)), " added to cluster ")
- if err != nil {
- t.Fatal(err)
- }
+ clientURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+30)
+ peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+31)
+ require.NoError(t, e2e.SpawnWithExpect(append(prefixArgs, "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)), expect.ExpectedResponse{Value: " added to cluster "}))
newDataDir2 := t.TempDir()
defer os.RemoveAll(newDataDir2)
name2 := "infra2"
- initialCluster2 := epc.procs[0].Config().initialCluster + fmt.Sprintf(",%s=%s", name2, peerURL)
+ initialCluster2 := epc.Procs[0].Config().InitialCluster + fmt.Sprintf(",%s=%s", name2, peerURL)
t.Log("Starting the new member")
// start the new member
var nepc *expect.ExpectProcess
- nepc, err = spawnCmd([]string{epc.procs[0].Config().execPath, "--name", name2,
+ nepc, err = e2e.SpawnCmd([]string{
+ epc.Procs[0].Config().ExecPath, "--name", name2,
"--listen-client-urls", clientURL, "--advertise-client-urls", clientURL,
"--listen-peer-urls", peerURL, "--initial-advertise-peer-urls", peerURL,
- "--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2})
+ "--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2,
+ }, nil)
+ require.NoError(t, err)
+ _, err = nepc.Expect("ready to serve client requests")
+ require.NoError(t, err)
+
+ prefixArgs = []string{e2e.BinPath.Etcdctl, "--endpoints", clientURL, "--dial-timeout", dialTimeout.String()}
+
+ t.Log("Ensuring added member has data from incoming snapshot...")
+ for i := range kvs {
+ require.NoError(t, e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), expect.ExpectedResponse{Value: kvs[i].val}))
+ }
+
+ t.Log("Stopping the second member")
+ require.NoError(t, nepc.Stop())
+ t.Log("Test logic done")
+}
+
+// TestCtlV3SnapshotVersion is for storageVersion to be stored, all fields
+// expected 3.6 fields need to be set. This happens after first WAL snapshot.
+// In this test we lower SnapshotCount to 1 to ensure WAL snapshot is triggered.
+func TestCtlV3SnapshotVersion(t *testing.T) {
+ testCtl(t, snapshotVersionTest, withCfg(*e2e.NewConfig(e2e.WithSnapshotCount(1))))
+}
+
+func snapshotVersionTest(cx ctlCtx) {
+ maintenanceInitKeys(cx)
+
+ fpath := filepath.Join(cx.t.TempDir(), "snapshot")
+ defer os.RemoveAll(fpath)
+
+ if err := ctlV3SnapshotSave(cx, fpath); err != nil {
+ cx.t.Fatalf("snapshotVersionTest ctlV3SnapshotSave error (%v)", err)
+ }
+
+ st, err := getSnapshotStatus(cx, fpath)
if err != nil {
- t.Fatal(err)
+ cx.t.Fatalf("snapshotVersionTest getSnapshotStatus error (%v)", err)
}
- if _, err = nepc.Expect("ready to serve client requests"); err != nil {
- t.Fatal(err)
+ if st.Version != "3.6.0" {
+ cx.t.Fatalf("expected %q, got %q", "3.6.0", st.Version)
}
+}
- prefixArgs = []string{ctlBinPath, "--endpoints", clientURL, "--dial-timeout", dialTimeout.String()}
+func TestRestoreCompactionRevBump(t *testing.T) {
+ e2e.BeforeTest(t)
- t.Log("Ensuring added member has data from incoming snapshot...")
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(1),
+ e2e.WithKeepDataDir(true),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ }()
+
+ ctl := epc.Etcdctl()
+
+ watchCh := ctl.Watch(context.Background(), "foo", config.WatchOptions{Prefix: true})
+ // flake-fix: the watch can sometimes miss the first put below causing test failure
+ time.Sleep(100 * time.Millisecond)
+
+ kvs := []testutils.KV{{Key: "foo1", Val: "val1"}, {Key: "foo2", Val: "val2"}, {Key: "foo3", Val: "val3"}}
for i := range kvs {
- if err = spawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil {
- t.Fatal(err)
+ require.NoError(t, ctl.Put(context.Background(), kvs[i].Key, kvs[i].Val, config.PutOptions{}))
+ }
+
+ watchTimeout := 1 * time.Second
+ watchRes, err := testutils.KeyValuesFromWatchChan(watchCh, len(kvs), watchTimeout)
+ require.NoErrorf(t, err, "failed to get key-values from watch channel %s", err)
+ require.Equal(t, kvs, watchRes)
+
+ // ensure we get the right revision back for each of the keys
+ currentRev := 4
+ baseRev := 2
+ hasKVs(t, ctl, kvs, currentRev, baseRev)
+
+ fpath := filepath.Join(t.TempDir(), "test.snapshot")
+
+ t.Log("etcdctl saving snapshot...")
+ cmdPrefix := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsGRPC(), ",")}
+ require.NoError(t, e2e.SpawnWithExpects(append(cmdPrefix, "snapshot", "save", fpath), nil, expect.ExpectedResponse{Value: fmt.Sprintf("Snapshot saved at %s", fpath)}))
+
+ // add some more kvs that are not in the snapshot that will be lost after restore
+ unsnappedKVs := []testutils.KV{{Key: "unsnapped1", Val: "one"}, {Key: "unsnapped2", Val: "two"}, {Key: "unsnapped3", Val: "three"}}
+ for i := range unsnappedKVs {
+ require.NoError(t, ctl.Put(context.Background(), unsnappedKVs[i].Key, unsnappedKVs[i].Val, config.PutOptions{}))
+ }
+
+ membersBefore, err := ctl.MemberList(context.Background(), false)
+ require.NoError(t, err)
+
+ t.Log("Stopping the original server...")
+ require.NoError(t, epc.Stop())
+
+ newDataDir := filepath.Join(t.TempDir(), "test.data")
+ t.Log("etcdctl restoring the snapshot...")
+ bumpAmount := 10000
+ require.NoError(t, e2e.SpawnWithExpect([]string{
+ e2e.BinPath.Etcdutl,
+ "snapshot",
+ "restore", fpath,
+ "--name", epc.Procs[0].Config().Name,
+ "--initial-cluster", epc.Procs[0].Config().InitialCluster,
+ "--initial-cluster-token", epc.Procs[0].Config().InitialToken,
+ "--initial-advertise-peer-urls", epc.Procs[0].Config().PeerURL.String(),
+ "--bump-revision", fmt.Sprintf("%d", bumpAmount),
+ "--mark-compacted",
+ "--data-dir", newDataDir,
+ }, expect.ExpectedResponse{Value: "added member"}))
+
+ t.Log("(Re)starting the etcd member using the restored snapshot...")
+ epc.Procs[0].Config().DataDirPath = newDataDir
+
+ for i := range epc.Procs[0].Config().Args {
+ if epc.Procs[0].Config().Args[i] == "--data-dir" {
+ epc.Procs[0].Config().Args[i+1] = newDataDir
}
}
- t.Log("Stopping the second member")
- if err = nepc.Stop(); err != nil {
- t.Fatal(err)
+ // Verify that initial snapshot is created by the restore operation
+ verifySnapshotMembers(t, epc, membersBefore)
+
+ require.NoError(t, epc.Restart(context.Background()))
+
+ t.Log("Ensuring the restored member has the correct data...")
+ hasKVs(t, ctl, kvs, currentRev, baseRev)
+ for i := range unsnappedKVs {
+ v, gerr := ctl.Get(context.Background(), unsnappedKVs[i].Key, config.GetOptions{})
+ require.NoError(t, gerr)
+ require.Equal(t, int64(0), v.Count)
+ }
+
+ cancelResult, ok := <-watchCh
+ require.Truef(t, ok, "watchChannel should be open")
+ require.Equal(t, v3rpc.ErrCompacted, cancelResult.Err())
+ require.Truef(t, cancelResult.Canceled, "expected ongoing watch to be cancelled after restoring with --mark-compacted")
+ require.Equal(t, int64(bumpAmount+currentRev), cancelResult.CompactRevision)
+ _, ok = <-watchCh
+ require.Falsef(t, ok, "watchChannel should be closed after restoring with --mark-compacted")
+
+ // clients might restart the watch at the old base revision, that should not yield any new data
+ // everything up until bumpAmount+currentRev should return "already compacted"
+ for i := bumpAmount - 2; i < bumpAmount+currentRev; i++ {
+ watchCh = ctl.Watch(context.Background(), "foo", config.WatchOptions{Prefix: true, Revision: int64(i)})
+ cancelResult := <-watchCh
+ require.Equal(t, v3rpc.ErrCompacted, cancelResult.Err())
+ require.Truef(t, cancelResult.Canceled, "expected ongoing watch to be cancelled after restoring with --mark-compacted")
+ require.Equal(t, int64(bumpAmount+currentRev), cancelResult.CompactRevision)
+ }
+
+ // a watch after that revision should yield successful results when a new put arrives
+ ctx, cancel := context.WithTimeout(context.Background(), watchTimeout*5)
+ defer cancel()
+ watchCh = ctl.Watch(ctx, "foo", config.WatchOptions{Prefix: true, Revision: int64(bumpAmount + currentRev + 1)})
+ require.NoError(t, ctl.Put(context.Background(), "foo4", "val4", config.PutOptions{}))
+ watchRes, err = testutils.KeyValuesFromWatchChan(watchCh, 1, watchTimeout)
+ require.NoErrorf(t, err, "failed to get key-values from watch channel %s", err)
+ require.Equal(t, []testutils.KV{{Key: "foo4", Val: "val4"}}, watchRes)
+}
+
+func hasKVs(t *testing.T, ctl *e2e.EtcdctlV3, kvs []testutils.KV, currentRev int, baseRev int) {
+ for i := range kvs {
+ v, err := ctl.Get(context.Background(), kvs[i].Key, config.GetOptions{})
+ require.NoError(t, err)
+ require.Equal(t, int64(1), v.Count)
+ require.Equal(t, kvs[i].Val, string(v.Kvs[0].Value))
+ require.Equal(t, int64(baseRev+i), v.Kvs[0].CreateRevision)
+ require.Equal(t, int64(baseRev+i), v.Kvs[0].ModRevision)
+ require.Equal(t, int64(1), v.Kvs[0].Version)
+ require.GreaterOrEqual(t, int64(currentRev), v.Kvs[0].ModRevision)
}
- t.Log("Test logic done")
+}
+
+func TestBreakConsistentIndexNewerThanSnapshot(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ var snapshotCount uint64 = 50
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(1),
+ e2e.WithKeepDataDir(true),
+ e2e.WithSnapshotCount(snapshotCount),
+ )
+ require.NoError(t, err)
+ defer epc.Close()
+ member := epc.Procs[0]
+
+ t.Log("Stop member and copy out the db file to tmp directory")
+ err = member.Stop()
+ require.NoError(t, err)
+ dbPath := path.Join(member.Config().DataDirPath, "member", "snap", "db")
+ tmpFile := path.Join(t.TempDir(), "db")
+ err = copyFile(dbPath, tmpFile)
+ require.NoError(t, err)
+
+ t.Log("Ensure snapshot there is a newer snapshot")
+ err = member.Start(ctx)
+ require.NoError(t, err)
+ generateSnapshot(t, snapshotCount, member.Etcdctl())
+ _, err = member.Logs().ExpectWithContext(ctx, expect.ExpectedResponse{Value: "saved snapshot"})
+ require.NoError(t, err)
+ err = member.Stop()
+ require.NoError(t, err)
+
+ t.Log("Start etcd with older db file")
+ err = copyFile(tmpFile, dbPath)
+ require.NoError(t, err)
+ err = member.Start(ctx)
+ require.Error(t, err)
+ _, err = member.Logs().ExpectWithContext(ctx, expect.ExpectedResponse{Value: "failed to find database snapshot file (snap: snapshot file doesn't exist)"})
+ assert.NoError(t, err)
+}
+
+func copyFile(src, dst string) error {
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ w, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer w.Close()
+
+ if _, err = io.Copy(w, f); err != nil {
+ return err
+ }
+ return w.Sync()
}
diff --git a/tests/e2e/ctl_v3_test.go b/tests/e2e/ctl_v3_test.go
index 5c8bb2fe95a..2cd112dc604 100644
--- a/tests/e2e/ctl_v3_test.go
+++ b/tests/e2e/ctl_v3_test.go
@@ -15,6 +15,7 @@
package e2e
import (
+ "context"
"fmt"
"os"
"strings"
@@ -22,16 +23,19 @@ import (
"time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/pkg/v3/expect"
"go.etcd.io/etcd/pkg/v3/flags"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3Version(t *testing.T) { testCtl(t, versionTest) }
func TestClusterVersion(t *testing.T) {
- BeforeTest(t)
+ e2e.BeforeTest(t)
tests := []struct {
name string
@@ -49,18 +53,14 @@ func TestClusterVersion(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- binary := binDir + "/etcd"
- if !fileutil.Exist(binary) {
- t.Skipf("%q does not exist", binary)
- }
- BeforeTest(t)
- cfg := newConfigNoTLS()
- cfg.execPath = binary
- cfg.snapshotCount = 3
- cfg.baseScheme = "unix" // to avoid port conflict
- cfg.rollingStart = tt.rollingStart
-
- epc, err := newEtcdProcessCluster(t, cfg)
+ e2e.BeforeTest(t)
+ cfg := e2e.NewConfig(
+ e2e.WithSnapshotCount(3),
+ e2e.WithBasePeerScheme("unix"), // to avoid port conflict)
+ e2e.WithRollingStart(tt.rollingStart),
+ )
+
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
@@ -90,7 +90,7 @@ func versionTest(cx ctlCtx) {
func clusterVersionTest(cx ctlCtx, expected string) {
var err error
for i := 0; i < 35; i++ {
- if err = cURLGet(cx.epc, cURLReq{endpoint: "/version", expected: expected}); err != nil {
+ if err = e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: "/version", Expected: expect.ExpectedResponse{Value: expected}}); err != nil {
cx.t.Logf("#%d: v3 is not ready yet (%v)", i, err)
time.Sleep(200 * time.Millisecond)
continue
@@ -104,34 +104,32 @@ func clusterVersionTest(cx ctlCtx, expected string) {
func ctlV3Version(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgs(), "version")
- return spawnWithExpect(cmdArgs, version.Version)
+ return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: version.Version})
}
-// TestCtlV3DialWithHTTPScheme ensures that client handles endpoints with HTTPS scheme.
+// TestCtlV3DialWithHTTPScheme ensures that client handles Endpoints with HTTPS scheme.
func TestCtlV3DialWithHTTPScheme(t *testing.T) {
- testCtl(t, dialWithSchemeTest, withCfg(*newConfigClientTLS()))
+ testCtl(t, dialWithSchemeTest, withCfg(*e2e.NewConfigClientTLS()))
}
func dialWithSchemeTest(cx ctlCtx) {
- cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsV3()), "put", "foo", "bar")
- if err := spawnWithExpect(cmdArgs, "OK"); err != nil {
- cx.t.Fatal(err)
- }
+ cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsGRPC()), "put", "foo", "bar")
+ require.NoError(cx.t, e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expect.ExpectedResponse{Value: "OK"}))
}
type ctlCtx struct {
- t *testing.T
- apiPrefix string
- cfg etcdProcessClusterConfig
- quotaBackendBytes int64
- corruptFunc func(string) error
- noStrictReconfig bool
+ t *testing.T
+ cfg e2e.EtcdProcessClusterConfig
- epc *etcdProcessCluster
+ corruptFunc func(string) error
+ disableStrictReconfigCheck bool
- envMap map[string]struct{}
+ epc *e2e.EtcdProcessCluster
+
+ envMap map[string]string
dialTimeout time.Duration
+ testTimeout time.Duration
quorum bool // if true, set up 3-node cluster and linearizable read
interactive bool
@@ -141,12 +139,6 @@ type ctlCtx struct {
initialCorruptCheck bool
- // for compaction
- compactPhysical bool
-
- // to run etcdutl instead of etcdctl for suitable commands.
- etcdutl bool
-
// dir that was used during the test
dataDir string
}
@@ -157,17 +149,26 @@ func (cx *ctlCtx) applyOpts(opts []ctlOption) {
for _, opt := range opts {
opt(cx)
}
+
cx.initialCorruptCheck = true
}
-func withCfg(cfg etcdProcessClusterConfig) ctlOption {
+func withCfg(cfg e2e.EtcdProcessClusterConfig) ctlOption {
return func(cx *ctlCtx) { cx.cfg = cfg }
}
+func withDefaultDialTimeout() ctlOption {
+ return withDialTimeout(0)
+}
+
func withDialTimeout(timeout time.Duration) ctlOption {
return func(cx *ctlCtx) { cx.dialTimeout = timeout }
}
+func withTestTimeout(timeout time.Duration) ctlOption {
+ return func(cx *ctlCtx) { cx.testTimeout = timeout }
+}
+
func withQuorum() ctlOption {
return func(cx *ctlCtx) { cx.quorum = true }
}
@@ -176,14 +177,6 @@ func withInteractive() ctlOption {
return func(cx *ctlCtx) { cx.interactive = true }
}
-func withQuota(b int64) ctlOption {
- return func(cx *ctlCtx) { cx.quotaBackendBytes = b }
-}
-
-func withCompactPhysical() ctlOption {
- return func(cx *ctlCtx) { cx.compactPhysical = true }
-}
-
func withInitialCorruptCheck() ctlOption {
return func(cx *ctlCtx) { cx.initialCorruptCheck = true }
}
@@ -192,81 +185,86 @@ func withCorruptFunc(f func(string) error) ctlOption {
return func(cx *ctlCtx) { cx.corruptFunc = f }
}
-func withNoStrictReconfig() ctlOption {
- return func(cx *ctlCtx) { cx.noStrictReconfig = true }
-}
-
-func withApiPrefix(p string) ctlOption {
- return func(cx *ctlCtx) { cx.apiPrefix = p }
+func withFlagByEnv() ctlOption {
+ return func(cx *ctlCtx) { cx.envMap = make(map[string]string) }
}
-func withFlagByEnv() ctlOption {
- return func(cx *ctlCtx) { cx.envMap = make(map[string]struct{}) }
+// This function must be called after the `withCfg`, otherwise its value
+// may be overwritten by `withCfg`.
+func withMaxConcurrentStreams(streams uint32) ctlOption {
+ return func(cx *ctlCtx) {
+ cx.cfg.ServerConfig.MaxConcurrentStreams = streams
+ }
}
-func withEtcdutl() ctlOption {
- return func(cx *ctlCtx) { cx.etcdutl = true }
+func withLogLevel(logLevel string) ctlOption {
+ return func(cx *ctlCtx) {
+ cx.cfg.ServerConfig.LogLevel = logLevel
+ }
}
func testCtl(t *testing.T, testFunc func(ctlCtx), opts ...ctlOption) {
testCtlWithOffline(t, testFunc, nil, opts...)
}
-func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc func(ctlCtx), opts ...ctlOption) {
- BeforeTest(t)
-
- ret := ctlCtx{
+func getDefaultCtlCtx(t *testing.T) ctlCtx {
+ return ctlCtx{
t: t,
- cfg: *newConfigAutoTLS(),
+ cfg: *e2e.NewConfigAutoTLS(),
dialTimeout: 7 * time.Second,
}
+}
+
+func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc func(ctlCtx), opts ...ctlOption) {
+ e2e.BeforeTest(t)
+
+ ret := getDefaultCtlCtx(t)
ret.applyOpts(opts)
if !ret.quorum {
- ret.cfg = *configStandalone(ret.cfg)
+ ret.cfg = *e2e.ConfigStandalone(ret.cfg)
}
- if ret.quotaBackendBytes > 0 {
- ret.cfg.quotaBackendBytes = ret.quotaBackendBytes
- }
- ret.cfg.noStrictReconfig = ret.noStrictReconfig
+ ret.cfg.ServerConfig.StrictReconfigCheck = !ret.disableStrictReconfigCheck
if ret.initialCorruptCheck {
- ret.cfg.initialCorruptCheck = ret.initialCorruptCheck
+ ret.cfg.ServerConfig.ExperimentalInitialCorruptCheck = ret.initialCorruptCheck
}
if testOfflineFunc != nil {
- ret.cfg.keepDataDir = true
+ ret.cfg.KeepDataDir = true
}
- epc, err := newEtcdProcessCluster(t, &ret.cfg)
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(&ret.cfg))
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
ret.epc = epc
- ret.dataDir = epc.procs[0].Config().dataDirPath
+ ret.dataDir = epc.Procs[0].Config().DataDirPath
+
+ runCtlTest(t, testFunc, testOfflineFunc, ret)
+}
+func runCtlTest(t *testing.T, testFunc func(ctlCtx), testOfflineFunc func(ctlCtx), cx ctlCtx) {
defer func() {
- if ret.envMap != nil {
- for k := range ret.envMap {
+ if cx.envMap != nil {
+ for k := range cx.envMap {
os.Unsetenv(k)
}
+ cx.envMap = make(map[string]string)
}
- if ret.epc != nil {
- if errC := ret.epc.Close(); errC != nil {
- t.Fatalf("error closing etcd processes (%v)", errC)
- }
+ if cx.epc != nil {
+ cx.epc.Stop()
+ cx.epc.Close()
}
}()
donec := make(chan struct{})
go func() {
defer close(donec)
- testFunc(ret)
+ testFunc(cx)
t.Log("---testFunc logic DONE")
}()
- timeout := 2*ret.dialTimeout + time.Second
- if ret.dialTimeout == 0 {
- timeout = 30 * time.Second
- }
+ timeout := cx.getTestTimeout()
+
select {
case <-time.After(timeout):
testutil.FatalStack(t, fmt.Sprintf("test timed out after %v", timeout))
@@ -274,31 +272,43 @@ func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc fun
}
t.Log("closing test cluster...")
- assert.NoError(t, epc.Close())
- epc = nil
+ assert.NoError(t, cx.epc.Stop())
+ assert.NoError(t, cx.epc.Close())
+ cx.epc = nil
t.Log("closed test cluster...")
if testOfflineFunc != nil {
- testOfflineFunc(ret)
+ testOfflineFunc(cx)
}
}
+func (cx *ctlCtx) getTestTimeout() time.Duration {
+ timeout := cx.testTimeout
+ if timeout == 0 {
+ timeout = 2*cx.dialTimeout + time.Second
+ if cx.dialTimeout == 0 {
+ timeout = 30 * time.Second
+ }
+ }
+ return timeout
+}
+
func (cx *ctlCtx) prefixArgs(eps []string) []string {
fmap := make(map[string]string)
fmap["endpoints"] = strings.Join(eps, ",")
fmap["dial-timeout"] = cx.dialTimeout.String()
- if cx.epc.cfg.clientTLS == clientTLS {
- if cx.epc.cfg.isClientAutoTLS {
+ if cx.epc.Cfg.Client.ConnectionType == e2e.ClientTLS {
+ if cx.epc.Cfg.Client.AutoTLS {
fmap["insecure-transport"] = "false"
fmap["insecure-skip-tls-verify"] = "true"
- } else if cx.epc.cfg.isClientCRL {
- fmap["cacert"] = caPath
- fmap["cert"] = revokedCertPath
- fmap["key"] = revokedPrivateKeyPath
+ } else if cx.epc.Cfg.Client.RevokeCerts {
+ fmap["cacert"] = e2e.CaPath
+ fmap["cert"] = e2e.RevokedCertPath
+ fmap["key"] = e2e.RevokedPrivateKeyPath
} else {
- fmap["cacert"] = caPath
- fmap["cert"] = certPath
- fmap["key"] = privateKeyPath
+ fmap["cacert"] = e2e.CaPath
+ fmap["cert"] = e2e.CertPath
+ fmap["key"] = e2e.PrivateKeyPath
}
}
if cx.user != "" {
@@ -307,12 +317,11 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string {
useEnv := cx.envMap != nil
- cmdArgs := []string{ctlBinPath + "3"}
+ cmdArgs := []string{e2e.BinPath.Etcdctl}
for k, v := range fmap {
if useEnv {
ek := flags.FlagToEnv("ETCDCTL", k)
- os.Setenv(ek, v)
- cx.envMap[ek] = struct{}{}
+ cx.envMap[ek] = v
} else {
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v))
}
@@ -323,40 +332,15 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string {
// PrefixArgs prefixes etcdctl command.
// Make sure to unset environment variables after tests.
func (cx *ctlCtx) PrefixArgs() []string {
- return cx.prefixArgs(cx.epc.EndpointsV3())
+ return cx.prefixArgs(cx.epc.EndpointsGRPC())
}
-// PrefixArgsUtl returns prefix of the command that is either etcdctl or etcdutl
-// depending on cx configuration.
+// PrefixArgsUtl returns prefix of the command that is etcdutl
// Please not thet 'utl' compatible commands does not consume --endpoints flag.
func (cx *ctlCtx) PrefixArgsUtl() []string {
- if cx.etcdutl {
- return []string{utlBinPath}
- }
- return []string{ctlBinPath}
+ return []string{e2e.BinPath.Etcdutl}
}
func isGRPCTimedout(err error) bool {
return strings.Contains(err.Error(), "grpc: timed out trying to connect")
}
-
-func (cx *ctlCtx) memberToRemove() (ep string, memberID string, clusterID string) {
- n1 := cx.cfg.clusterSize
- if n1 < 2 {
- cx.t.Fatalf("%d-node is too small to test 'member remove'", n1)
- }
-
- resp, err := getMemberList(*cx)
- if err != nil {
- cx.t.Fatal(err)
- }
- if n1 != len(resp.Members) {
- cx.t.Fatalf("expected %d, got %d", n1, len(resp.Members))
- }
-
- ep = resp.Members[0].ClientURLs[0]
- clusterID = fmt.Sprintf("%x", resp.Header.ClusterId)
- memberID = fmt.Sprintf("%x", resp.Members[1].ID)
-
- return ep, memberID, clusterID
-}
diff --git a/tests/e2e/ctl_v3_txn_test.go b/tests/e2e/ctl_v3_txn_test.go
deleted file mode 100644
index bbcec5db6d9..00000000000
--- a/tests/e2e/ctl_v3_txn_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import "testing"
-
-func TestCtlV3TxnInteractiveSuccess(t *testing.T) {
- testCtl(t, txnTestSuccess, withInteractive())
-}
-func TestCtlV3TxnInteractiveSuccessNoTLS(t *testing.T) {
- testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3TxnInteractiveSuccessClientTLS(t *testing.T) {
- testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3TxnInteractiveSuccessPeerTLS(t *testing.T) {
- testCtl(t, txnTestSuccess, withInteractive(), withCfg(*newConfigPeerTLS()))
-}
-func TestCtlV3TxnInteractiveFail(t *testing.T) {
- testCtl(t, txnTestFail, withInteractive())
-}
-
-func txnTestSuccess(cx ctlCtx) {
- if err := ctlV3Put(cx, "key1", "value1", ""); err != nil {
- cx.t.Fatalf("txnTestSuccess ctlV3Put error (%v)", err)
- }
- if err := ctlV3Put(cx, "key2", "value2", ""); err != nil {
- cx.t.Fatalf("txnTestSuccess ctlV3Put error (%v)", err)
- }
- rqs := []txnRequests{
- {
- compare: []string{`value("key1") != "value2"`, `value("key2") != "value1"`},
- ifSucess: []string{"get key1", "get key2"},
- results: []string{"SUCCESS", "key1", "value1", "key2", "value2"},
- },
- {
- compare: []string{`version("key1") = "1"`, `version("key2") = "1"`},
- ifSucess: []string{"get key1", "get key2", `put "key \"with\" space" "value \x23"`},
- ifFail: []string{`put key1 "fail"`, `put key2 "fail"`},
- results: []string{"SUCCESS", "key1", "value1", "key2", "value2"},
- },
- {
- compare: []string{`version("key \"with\" space") = "1"`},
- ifSucess: []string{`get "key \"with\" space"`},
- results: []string{"SUCCESS", `key "with" space`, "value \x23"},
- },
- }
- for _, rq := range rqs {
- if err := ctlV3Txn(cx, rq); err != nil {
- cx.t.Fatal(err)
- }
- }
-}
-
-func txnTestFail(cx ctlCtx) {
- if err := ctlV3Put(cx, "key1", "value1", ""); err != nil {
- cx.t.Fatalf("txnTestSuccess ctlV3Put error (%v)", err)
- }
- rqs := []txnRequests{
- {
- compare: []string{`version("key") < "0"`},
- ifSucess: []string{`put key "success"`},
- ifFail: []string{`put key "fail"`},
- results: []string{"FAILURE", "OK"},
- },
- {
- compare: []string{`value("key1") != "value1"`},
- ifSucess: []string{`put key1 "success"`},
- ifFail: []string{`put key1 "fail"`},
- results: []string{"FAILURE", "OK"},
- },
- }
- for _, rq := range rqs {
- if err := ctlV3Txn(cx, rq); err != nil {
- cx.t.Fatal(err)
- }
- }
-}
-
-type txnRequests struct {
- compare []string
- ifSucess []string
- ifFail []string
- results []string
-}
-
-func ctlV3Txn(cx ctlCtx, rqs txnRequests) error {
- // TODO: support non-interactive mode
- cmdArgs := append(cx.PrefixArgs(), "txn")
- if cx.interactive {
- cmdArgs = append(cmdArgs, "--interactive")
- }
- proc, err := spawnCmd(cmdArgs)
- if err != nil {
- return err
- }
- _, err = proc.Expect("compares:")
- if err != nil {
- return err
- }
- for _, req := range rqs.compare {
- if err = proc.Send(req + "\r"); err != nil {
- return err
- }
- }
- if err = proc.Send("\r"); err != nil {
- return err
- }
-
- _, err = proc.Expect("success requests (get, put, del):")
- if err != nil {
- return err
- }
- for _, req := range rqs.ifSucess {
- if err = proc.Send(req + "\r"); err != nil {
- return err
- }
- }
- if err = proc.Send("\r"); err != nil {
- return err
- }
-
- _, err = proc.Expect("failure requests (get, put, del):")
- if err != nil {
- return err
- }
- for _, req := range rqs.ifFail {
- if err = proc.Send(req + "\r"); err != nil {
- return err
- }
- }
- if err = proc.Send("\r"); err != nil {
- return err
- }
-
- for _, line := range rqs.results {
- _, err = proc.Expect(line)
- if err != nil {
- return err
- }
- }
- return proc.Close()
-}
diff --git a/tests/e2e/ctl_v3_user_test.go b/tests/e2e/ctl_v3_user_test.go
deleted file mode 100644
index 8672ae7b67b..00000000000
--- a/tests/e2e/ctl_v3_user_test.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import "testing"
-
-func TestCtlV3UserAdd(t *testing.T) { testCtl(t, userAddTest) }
-func TestCtlV3UserAddNoTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3UserAddClientTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigClientTLS())) }
-func TestCtlV3UserAddPeerTLS(t *testing.T) { testCtl(t, userAddTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3UserAddTimeout(t *testing.T) { testCtl(t, userAddTest, withDialTimeout(0)) }
-func TestCtlV3UserAddClientAutoTLS(t *testing.T) {
- testCtl(t, userAddTest, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3UserList(t *testing.T) { testCtl(t, userListTest) }
-func TestCtlV3UserListNoTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3UserListClientTLS(t *testing.T) {
- testCtl(t, userListTest, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3UserListPeerTLS(t *testing.T) { testCtl(t, userListTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3UserListClientAutoTLS(t *testing.T) {
- testCtl(t, userListTest, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3UserDelete(t *testing.T) { testCtl(t, userDelTest) }
-func TestCtlV3UserDeleteNoTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3UserDeleteClientTLS(t *testing.T) {
- testCtl(t, userDelTest, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3UserDeletePeerTLS(t *testing.T) { testCtl(t, userDelTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3UserDeleteClientAutoTLS(t *testing.T) {
- testCtl(t, userDelTest, withCfg(*newConfigClientAutoTLS()))
-}
-func TestCtlV3UserPasswd(t *testing.T) { testCtl(t, userPasswdTest) }
-func TestCtlV3UserPasswdNoTLS(t *testing.T) { testCtl(t, userPasswdTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3UserPasswdClientTLS(t *testing.T) {
- testCtl(t, userPasswdTest, withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3UserPasswdPeerTLS(t *testing.T) {
- testCtl(t, userPasswdTest, withCfg(*newConfigPeerTLS()))
-}
-func TestCtlV3UserPasswdClientAutoTLS(t *testing.T) {
- testCtl(t, userPasswdTest, withCfg(*newConfigClientAutoTLS()))
-}
-
-type userCmdDesc struct {
- args []string
- expectedStr string
- stdIn []string
-}
-
-func userAddTest(cx ctlCtx) {
- cmdSet := []userCmdDesc{
- // Adds a user name.
- {
- args: []string{"add", "username", "--interactive=false"},
- expectedStr: "User username created",
- stdIn: []string{"password"},
- },
- // Adds a user name using the usertest:password syntax.
- {
- args: []string{"add", "usertest:password"},
- expectedStr: "User usertest created",
- stdIn: []string{},
- },
- // Tries to add a user with empty username.
- {
- args: []string{"add", ":password"},
- expectedStr: "empty user name is not allowed",
- stdIn: []string{},
- },
- // Tries to add a user name that already exists.
- {
- args: []string{"add", "username", "--interactive=false"},
- expectedStr: "user name already exists",
- stdIn: []string{"password"},
- },
- // Adds a user without password.
- {
- args: []string{"add", "userwopasswd", "--no-password"},
- expectedStr: "User userwopasswd created",
- stdIn: []string{},
- },
- }
-
- for i, cmd := range cmdSet {
- if err := ctlV3User(cx, cmd.args, cmd.expectedStr, cmd.stdIn); err != nil {
- if cx.dialTimeout > 0 && !isGRPCTimedout(err) {
- cx.t.Fatalf("userAddTest #%d: ctlV3User error (%v)", i, err)
- }
- }
- }
-}
-
-func userListTest(cx ctlCtx) {
- cmdSet := []userCmdDesc{
- // Adds a user name.
- {
- args: []string{"add", "username", "--interactive=false"},
- expectedStr: "User username created",
- stdIn: []string{"password"},
- },
- // List user name
- {
- args: []string{"list"},
- expectedStr: "username",
- },
- }
-
- for i, cmd := range cmdSet {
- if err := ctlV3User(cx, cmd.args, cmd.expectedStr, cmd.stdIn); err != nil {
- cx.t.Fatalf("userListTest #%d: ctlV3User error (%v)", i, err)
- }
- }
-}
-
-func userDelTest(cx ctlCtx) {
- cmdSet := []userCmdDesc{
- // Adds a user name.
- {
- args: []string{"add", "username", "--interactive=false"},
- expectedStr: "User username created",
- stdIn: []string{"password"},
- },
- // Deletes the user name just added.
- {
- args: []string{"delete", "username"},
- expectedStr: "User username deleted",
- },
- // Deletes a user name that is not present.
- {
- args: []string{"delete", "username"},
- expectedStr: "user name not found",
- },
- }
-
- for i, cmd := range cmdSet {
- if err := ctlV3User(cx, cmd.args, cmd.expectedStr, cmd.stdIn); err != nil {
- cx.t.Fatalf("userDelTest #%d: ctlV3User error (%v)", i, err)
- }
- }
-}
-
-func userPasswdTest(cx ctlCtx) {
- cmdSet := []userCmdDesc{
- // Adds a user name.
- {
- args: []string{"add", "username", "--interactive=false"},
- expectedStr: "User username created",
- stdIn: []string{"password"},
- },
- // Changes the password.
- {
- args: []string{"passwd", "username", "--interactive=false"},
- expectedStr: "Password updated",
- stdIn: []string{"password1"},
- },
- }
-
- for i, cmd := range cmdSet {
- if err := ctlV3User(cx, cmd.args, cmd.expectedStr, cmd.stdIn); err != nil {
- cx.t.Fatalf("userPasswdTest #%d: ctlV3User error (%v)", i, err)
- }
- }
-}
-
-func ctlV3User(cx ctlCtx, args []string, expStr string, stdIn []string) error {
- cmdArgs := append(cx.PrefixArgs(), "user")
- cmdArgs = append(cmdArgs, args...)
-
- proc, err := spawnCmd(cmdArgs)
- if err != nil {
- return err
- }
- defer proc.Close()
-
- // Send 'stdIn' strings as input.
- for _, s := range stdIn {
- if err = proc.Send(s + "\r"); err != nil {
- return err
- }
- }
-
- _, err = proc.Expect(expStr)
- return err
-}
diff --git a/tests/e2e/ctl_v3_watch_cov_test.go b/tests/e2e/ctl_v3_watch_cov_test.go
deleted file mode 100644
index 8d2fd04d607..00000000000
--- a/tests/e2e/ctl_v3_watch_cov_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build cov
-// +build cov
-
-package e2e
-
-import (
- "os"
- "testing"
-)
-
-func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) }
-func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigClientTLS())) }
-func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) }
-
-func TestCtlV3WatchInteractive(t *testing.T) {
- testCtl(t, watchTest, withInteractive())
-}
-func TestCtlV3WatchInteractiveNoTLS(t *testing.T) {
- testCtl(t, watchTest, withInteractive(), withCfg(*newConfigNoTLS()))
-}
-func TestCtlV3WatchInteractiveClientTLS(t *testing.T) {
- testCtl(t, watchTest, withInteractive(), withCfg(*newConfigClientTLS()))
-}
-func TestCtlV3WatchInteractivePeerTLS(t *testing.T) {
- testCtl(t, watchTest, withInteractive(), withCfg(*newConfigPeerTLS()))
-}
-
-func watchTest(cx ctlCtx) {
- tests := []struct {
- puts []kv
- envKey string
- envRange string
- args []string
-
- wkv []kvExec
- }{
- { // watch 1 key
- puts: []kv{{"sample", "value"}},
- args: []string{"sample", "--rev", "1"},
- wkv: []kvExec{{key: "sample", val: "value"}},
- },
- { // watch 1 key with env
- puts: []kv{{"sample", "value"}},
- envKey: "sample",
- args: []string{"--rev", "1"},
- wkv: []kvExec{{key: "sample", val: "value"}},
- },
-
- // coverage tests get extra arguments:
- // ./bin/etcdctl_test -test.coverprofile=e2e.1525392462795198897.coverprofile -test.outputdir=../..
- // do not test watch exec commands
-
- { // watch 3 keys by prefix
- puts: []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}},
- args: []string{"key", "--rev", "1", "--prefix"},
- wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}},
- },
- { // watch 3 keys by prefix, with env
- puts: []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}},
- envKey: "key",
- args: []string{"--rev", "1", "--prefix"},
- wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}},
- },
- { // watch by revision
- puts: []kv{{"etcd", "revision_1"}, {"etcd", "revision_2"}, {"etcd", "revision_3"}},
- args: []string{"etcd", "--rev", "2"},
- wkv: []kvExec{{key: "etcd", val: "revision_2"}, {key: "etcd", val: "revision_3"}},
- },
- { // watch 3 keys by range
- puts: []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}},
- args: []string{"key", "key3", "--rev", "1"},
- wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}},
- },
- { // watch 3 keys by range, with env
- puts: []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}},
- envKey: "key",
- envRange: "key3",
- args: []string{"--rev", "1"},
- wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}},
- },
- }
-
- for i, tt := range tests {
- donec := make(chan struct{})
- go func(i int, puts []kv) {
- for j := range puts {
- if err := ctlV3Put(cx, puts[j].key, puts[j].val, ""); err != nil {
- cx.t.Fatalf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err)
- }
- }
- close(donec)
- }(i, tt.puts)
-
- unsetEnv := func() {}
- if tt.envKey != "" || tt.envRange != "" {
- if tt.envKey != "" {
- os.Setenv("ETCDCTL_WATCH_KEY", tt.envKey)
- unsetEnv = func() { os.Unsetenv("ETCDCTL_WATCH_KEY") }
- }
- if tt.envRange != "" {
- os.Setenv("ETCDCTL_WATCH_RANGE_END", tt.envRange)
- unsetEnv = func() { os.Unsetenv("ETCDCTL_WATCH_RANGE_END") }
- }
- if tt.envKey != "" && tt.envRange != "" {
- unsetEnv = func() {
- os.Unsetenv("ETCDCTL_WATCH_KEY")
- os.Unsetenv("ETCDCTL_WATCH_RANGE_END")
- }
- }
- }
- if err := ctlV3Watch(cx, tt.args, tt.wkv...); err != nil {
- if cx.dialTimeout > 0 && !isGRPCTimedout(err) {
- cx.t.Errorf("watchTest #%d: ctlV3Watch error (%v)", i, err)
- }
- }
- unsetEnv()
- <-donec
- }
-}
diff --git a/tests/e2e/ctl_v3_watch_no_cov_test.go b/tests/e2e/ctl_v3_watch_no_cov_test.go
index a952aa4a419..9df7cc60078 100644
--- a/tests/e2e/ctl_v3_watch_no_cov_test.go
+++ b/tests/e2e/ctl_v3_watch_no_cov_test.go
@@ -12,33 +12,35 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !cov
-// +build !cov
-
package e2e
import (
"os"
"testing"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) }
-func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigNoTLS())) }
-func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigClientTLS())) }
-func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*newConfigPeerTLS())) }
-func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) }
+func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) }
+func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) }
+func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) }
+func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDefaultDialTimeout()) }
func TestCtlV3WatchInteractive(t *testing.T) {
testCtl(t, watchTest, withInteractive())
}
+
func TestCtlV3WatchInteractiveNoTLS(t *testing.T) {
- testCtl(t, watchTest, withInteractive(), withCfg(*newConfigNoTLS()))
+ testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigNoTLS()))
}
+
func TestCtlV3WatchInteractiveClientTLS(t *testing.T) {
- testCtl(t, watchTest, withInteractive(), withCfg(*newConfigClientTLS()))
+ testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigClientTLS()))
}
+
func TestCtlV3WatchInteractivePeerTLS(t *testing.T) {
- testCtl(t, watchTest, withInteractive(), withCfg(*newConfigPeerTLS()))
+ testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigPeerTLS()))
}
func watchTest(cx ctlCtx) {
@@ -50,11 +52,6 @@ func watchTest(cx ctlCtx) {
wkv []kvExec
}{
- { // watch 1 key
- puts: []kv{{"sample", "value"}},
- args: []string{"sample", "--rev", "1"},
- wkv: []kvExec{{key: "sample", val: "value"}},
- },
{ // watch 1 key with env
puts: []kv{{"sample", "value"}},
envKey: "sample",
@@ -99,27 +96,12 @@ func watchTest(cx ctlCtx) {
args: []string{"sample", "--rev", "1", "samplx", "--", "echo", "watch event received"},
wkv: []kvExec{{key: "sample", val: "value", execOutput: "watch event received"}},
},
- { // watch 3 keys by prefix
- puts: []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}},
- args: []string{"key", "--rev", "1", "--prefix"},
- wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}},
- },
{ // watch 3 keys by prefix, with env
puts: []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}},
envKey: "key",
args: []string{"--rev", "1", "--prefix"},
wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}},
},
- { // watch by revision
- puts: []kv{{"etcd", "revision_1"}, {"etcd", "revision_2"}, {"etcd", "revision_3"}},
- args: []string{"etcd", "--rev", "2"},
- wkv: []kvExec{{key: "etcd", val: "revision_2"}, {key: "etcd", val: "revision_3"}},
- },
- { // watch 3 keys by range
- puts: []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}},
- args: []string{"key", "key3", "--rev", "1"},
- wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}},
- },
{ // watch 3 keys by range, with env
puts: []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}},
envKey: "key",
diff --git a/tests/e2e/ctl_v3_watch_test.go b/tests/e2e/ctl_v3_watch_test.go
index fe25da1b765..bec43224e4c 100644
--- a/tests/e2e/ctl_v3_watch_test.go
+++ b/tests/e2e/ctl_v3_watch_test.go
@@ -14,7 +14,11 @@
package e2e
-import "strings"
+import (
+ "strings"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
type kvExec struct {
key, val string
@@ -35,7 +39,7 @@ func setupWatchArgs(cx ctlCtx, args []string) []string {
func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error {
cmdArgs := setupWatchArgs(cx, args)
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, nil)
if err != nil {
return err
}
@@ -66,7 +70,7 @@ func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error {
func ctlV3WatchFailPerm(cx ctlCtx, args []string) error {
cmdArgs := setupWatchArgs(cx, args)
- proc, err := spawnCmd(cmdArgs)
+ proc, err := e2e.SpawnCmd(cmdArgs, nil)
if err != nil {
return err
}
diff --git a/tests/e2e/defrag_no_space_test.go b/tests/e2e/defrag_no_space_test.go
new file mode 100644
index 00000000000..f6ceabe667b
--- /dev/null
+++ b/tests/e2e/defrag_no_space_test.go
@@ -0,0 +1,71 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestDefragNoSpace(t *testing.T) {
+ tests := []struct {
+ name string
+ failpoint string
+ err string
+ }{
+ {
+ name: "no space (#18810) - can't open/create new bbolt db",
+ failpoint: "defragOpenFileError",
+ err: "no space",
+ },
+ {
+ name: "defragdb failure",
+ failpoint: "defragdbFail",
+ err: "some random error",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ clus, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(1),
+ e2e.WithGoFailEnabled(true),
+ )
+ require.NoError(t, err)
+ t.Cleanup(func() { clus.Stop() })
+
+ member := clus.Procs[0]
+
+ require.NoError(t, member.Failpoints().SetupHTTP(context.Background(), tc.failpoint, fmt.Sprintf(`return("%s")`, tc.err)))
+ require.ErrorContains(t, member.Etcdctl().Defragment(context.Background(), config.DefragOption{Timeout: time.Minute}), tc.err)
+
+ // Make sure etcd continues to run even after the failed defrag attempt
+ require.NoError(t, member.Etcdctl().Put(context.Background(), "foo", "bar", config.PutOptions{}))
+ value, err := member.Etcdctl().Get(context.Background(), "foo", config.GetOptions{})
+ require.NoError(t, err)
+ require.Len(t, value.Kvs, 1)
+ require.Equal(t, "bar", string(value.Kvs[0].Value))
+ })
+ }
+}
diff --git a/tests/e2e/discovery_test.go b/tests/e2e/discovery_test.go
new file mode 100644
index 00000000000..15f51a9572d
--- /dev/null
+++ b/tests/e2e/discovery_test.go
@@ -0,0 +1,100 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/v2"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1, false) }
+func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3, false) }
+func TestTLSClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3, true) }
+
+func testClusterUsingDiscovery(t *testing.T, size int, peerTLS bool) {
+ e2e.BeforeTest(t)
+
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+
+ dc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithBasePort(2000),
+ e2e.WithVersion(e2e.LastVersion),
+ e2e.WithClusterSize(1),
+ e2e.WithEnableV2(true),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer dc.Close()
+
+ dcc := MustNewHTTPClient(t, dc.EndpointsHTTP(), nil)
+ dkapi := client.NewKeysAPI(dcc)
+ ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
+ _, err = dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size))
+ require.NoError(t, err)
+ cancel()
+
+ c, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithBasePort(3000),
+ e2e.WithClusterSize(size),
+ e2e.WithIsPeerTLS(peerTLS),
+ e2e.WithDiscovery(dc.EndpointsHTTP()[0]+"/v2/keys"),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer c.Close()
+
+ kubectl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(c.EndpointsGRPC(), ",")}
+ require.NoError(t, e2e.SpawnWithExpect(append(kubectl, "put", "key", "value"), expect.ExpectedResponse{Value: "OK"}))
+ require.NoError(t, e2e.SpawnWithExpect(append(kubectl, "get", "key"), expect.ExpectedResponse{Value: "value"}))
+}
+
+func MustNewHTTPClient(t testutil.TB, eps []string, tls *transport.TLSInfo) client.Client {
+ cfgtls := transport.TLSInfo{}
+ if tls != nil {
+ cfgtls = *tls
+ }
+ cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps}
+ c, err := client.New(cfg)
+ require.NoError(t, err)
+ return c
+}
+
+func mustNewTransport(t testutil.TB, tlsInfo transport.TLSInfo) *http.Transport {
+ // tick in integration test is short, so 1s dial timeout could play well.
+ tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
+ require.NoError(t, err)
+ return tr
+}
diff --git a/tests/e2e/discovery_v3_test.go b/tests/e2e/discovery_v3_test.go
new file mode 100644
index 00000000000..f3c47dd34e5
--- /dev/null
+++ b/tests/e2e/discovery_v3_test.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestClusterOf1UsingV3Discovery_1endpoint(t *testing.T) {
+ testClusterUsingV3Discovery(t, 1, 1, e2e.ClientNonTLS, false)
+}
+
+func TestClusterOf3UsingV3Discovery_1endpoint(t *testing.T) {
+ testClusterUsingV3Discovery(t, 1, 3, e2e.ClientTLS, true)
+}
+
+func TestTLSClusterOf5UsingV3Discovery_1endpoint(t *testing.T) {
+ testClusterUsingV3Discovery(t, 1, 5, e2e.ClientTLS, false)
+}
+
+func TestClusterOf1UsingV3Discovery_3endpoints(t *testing.T) {
+ testClusterUsingV3Discovery(t, 3, 1, e2e.ClientNonTLS, false)
+}
+
+func TestClusterOf3UsingV3Discovery_3endpoints(t *testing.T) {
+ testClusterUsingV3Discovery(t, 3, 3, e2e.ClientTLS, true)
+}
+
+func TestTLSClusterOf5UsingV3Discovery_3endpoints(t *testing.T) {
+ testClusterUsingV3Discovery(t, 3, 5, e2e.ClientTLS, false)
+}
+
+func testClusterUsingV3Discovery(t *testing.T, discoveryClusterSize, targetClusterSize int, clientTLSType e2e.ClientConnType, isClientAutoTLS bool) {
+ e2e.BeforeTest(t)
+
+ // step 1: start the discovery service
+ ds, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithBasePort(2000),
+ e2e.WithClusterSize(discoveryClusterSize),
+ e2e.WithClientConnType(clientTLSType),
+ e2e.WithClientAutoTLS(isClientAutoTLS),
+ )
+ if err != nil {
+ t.Fatalf("could not start discovery etcd cluster (%v)", err)
+ }
+ defer ds.Close()
+
+ // step 2: configure the cluster size
+ discoveryToken := "8A591FAB-1D72-41FA-BDF2-A27162FDA1E0"
+ configSizeKey := fmt.Sprintf("/_etcd/registry/%s/_config/size", discoveryToken)
+ configSizeValStr := strconv.Itoa(targetClusterSize)
+ if err = ctlV3Put(ctlCtx{epc: ds}, configSizeKey, configSizeValStr, ""); err != nil {
+ t.Errorf("failed to configure cluster size to discovery serivce, error: %v", err)
+ }
+
+ // step 3: start the etcd cluster
+ epc, err := bootstrapEtcdClusterUsingV3Discovery(t, ds.EndpointsGRPC(), discoveryToken, targetClusterSize, clientTLSType, isClientAutoTLS)
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer epc.Close()
+
+ // step 4: sanity test on the etcd cluster
+ etcdctl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsGRPC(), ",")}
+ require.NoError(t, e2e.SpawnWithExpect(append(etcdctl, "put", "key", "value"), expect.ExpectedResponse{Value: "OK"}))
+ require.NoError(t, e2e.SpawnWithExpect(append(etcdctl, "get", "key"), expect.ExpectedResponse{Value: "value"}))
+}
+
+func bootstrapEtcdClusterUsingV3Discovery(t *testing.T, discoveryEndpoints []string, discoveryToken string, clusterSize int, clientTLSType e2e.ClientConnType, isClientAutoTLS bool) (*e2e.EtcdProcessCluster, error) {
+ // cluster configuration
+ cfg := e2e.NewConfig(
+ e2e.WithBasePort(3000),
+ e2e.WithClusterSize(clusterSize),
+ e2e.WithIsPeerTLS(true),
+ e2e.WithIsPeerAutoTLS(true),
+ e2e.WithDiscoveryToken(discoveryToken),
+ e2e.WithDiscoveryEndpoints(discoveryEndpoints),
+ )
+
+ // initialize the cluster
+ epc, err := e2e.InitEtcdProcessCluster(t, cfg)
+ if err != nil {
+ t.Fatalf("could not initialize etcd cluster (%v)", err)
+ return epc, err
+ }
+
+ // populate discovery related security configuration
+ for _, ep := range epc.Procs {
+ epCfg := ep.Config()
+
+ if clientTLSType == e2e.ClientTLS {
+ if isClientAutoTLS {
+ epCfg.Args = append(epCfg.Args, "--discovery-insecure-transport=false")
+ epCfg.Args = append(epCfg.Args, "--discovery-insecure-skip-tls-verify=true")
+ } else {
+ epCfg.Args = append(epCfg.Args, "--discovery-cacert="+e2e.CaPath)
+ epCfg.Args = append(epCfg.Args, "--discovery-cert="+e2e.CertPath)
+ epCfg.Args = append(epCfg.Args, "--discovery-key="+e2e.PrivateKeyPath)
+ }
+ }
+ }
+
+ // start the cluster
+ return e2e.StartEtcdProcessCluster(context.TODO(), t, epc, cfg)
+}
diff --git a/tests/e2e/etcd_config_test.go b/tests/e2e/etcd_config_test.go
index ef39a52e588..21d587623da 100644
--- a/tests/e2e/etcd_config_test.go
+++ b/tests/e2e/etcd_config_test.go
@@ -15,43 +15,42 @@
package e2e
import (
+ "context"
"fmt"
- "io/ioutil"
+ "net"
"os"
"strings"
"testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
"go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
const exampleConfigFile = "../../etcd.conf.yml.sample"
func TestEtcdExampleConfig(t *testing.T) {
- skipInShortMode(t)
+ e2e.SkipInShortMode(t)
- proc, err := spawnCmd([]string{binDir + "/etcd", "--config-file", exampleConfigFile})
- if err != nil {
- t.Fatal(err)
- }
- if err = waitReadyExpectProc(proc, etcdServerReadyLines); err != nil {
- t.Fatal(err)
- }
- if err = proc.Stop(); err != nil {
- t.Fatal(err)
- }
+ proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--config-file", exampleConfigFile}, nil)
+ require.NoError(t, err)
+ require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines))
+ require.NoError(t, proc.Stop())
}
func TestEtcdMultiPeer(t *testing.T) {
- skipInShortMode(t)
+ e2e.SkipInShortMode(t)
peers, tmpdirs := make([]string, 3), make([]string, 3)
for i := range peers {
- peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, etcdProcessBasePort+i)
- d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i))
- if err != nil {
- t.Fatal(err)
- }
- tmpdirs[i] = d
+ peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i)
+ tmpdirs[i] = t.TempDir()
}
ic := strings.Join(peers, ",")
@@ -60,78 +59,108 @@ func TestEtcdMultiPeer(t *testing.T) {
for i := range procs {
if procs[i] != nil {
procs[i].Stop()
+ procs[i].Close()
}
- os.RemoveAll(tmpdirs[i])
}
}()
for i := range procs {
args := []string{
- binDir + "/etcd",
+ e2e.BinPath.Etcd,
"--name", fmt.Sprintf("e%d", i),
"--listen-client-urls", "http://0.0.0.0:0",
"--data-dir", tmpdirs[i],
"--advertise-client-urls", "http://0.0.0.0:0",
- "--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i),
- "--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", etcdProcessBasePort+i),
+ "--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i),
+ "--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i),
"--initial-cluster", ic,
}
- p, err := spawnCmd(args)
- if err != nil {
- t.Fatal(err)
- }
+ p, err := e2e.SpawnCmd(args, nil)
+ require.NoError(t, err)
procs[i] = p
}
for _, p := range procs {
- if err := waitReadyExpectProc(p, etcdServerReadyLines); err != nil {
- t.Fatal(err)
- }
+ err := e2e.WaitReadyExpectProc(context.TODO(), p, e2e.EtcdServerReadyLines)
+ require.NoError(t, err)
}
}
// TestEtcdUnixPeers checks that etcd will boot with unix socket peers.
func TestEtcdUnixPeers(t *testing.T) {
- skipInShortMode(t)
+ e2e.SkipInShortMode(t)
- d, err := ioutil.TempDir("", "e1.etcd")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(d)
- proc, err := spawnCmd(
+ d := t.TempDir()
+ proc, err := e2e.SpawnCmd(
[]string{
- binDir + "/etcd",
+ e2e.BinPath.Etcd,
"--data-dir", d,
"--name", "e1",
"--listen-peer-urls", "unix://etcd.unix:1",
"--initial-advertise-peer-urls", "unix://etcd.unix:1",
"--initial-cluster", "e1=unix://etcd.unix:1",
- },
+ }, nil,
)
defer os.Remove("etcd.unix:1")
- if err != nil {
- t.Fatal(err)
- }
- if err = waitReadyExpectProc(proc, etcdServerReadyLines); err != nil {
- t.Fatal(err)
- }
- if err = proc.Stop(); err != nil {
- t.Fatal(err)
+ require.NoError(t, err)
+ require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines))
+ require.NoError(t, proc.Stop())
+}
+
+// TestEtcdListenMetricsURLsWithMissingClientTLSInfo checks that the HTTPs listen metrics URL
+// but without the client TLS info will fail its verification.
+func TestEtcdListenMetricsURLsWithMissingClientTLSInfo(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ tempDir := t.TempDir()
+ defer os.RemoveAll(tempDir)
+
+ caFile, certFiles, keyFiles, err := generateCertsForIPs(tempDir, []net.IP{net.ParseIP("127.0.0.1")})
+ require.NoError(t, err)
+
+ // non HTTP but metrics URL is HTTPS, invalid when the client TLS info is not provided
+ clientURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort)
+ peerURL := fmt.Sprintf("https://localhost:%d", e2e.EtcdProcessBasePort+1)
+ listenMetricsURL := fmt.Sprintf("https://localhost:%d", e2e.EtcdProcessBasePort+2)
+
+ commonArgs := []string{
+ e2e.BinPath.Etcd,
+ "--name", "e0",
+ "--data-dir", tempDir,
+
+ "--listen-client-urls", clientURL,
+ "--advertise-client-urls", clientURL,
+
+ "--initial-advertise-peer-urls", peerURL,
+ "--listen-peer-urls", peerURL,
+
+ "--initial-cluster", "e0=" + peerURL,
+
+ "--listen-metrics-urls", listenMetricsURL,
+
+ "--peer-cert-file", certFiles[0],
+ "--peer-key-file", keyFiles[0],
+ "--peer-trusted-ca-file", caFile,
+ "--peer-client-cert-auth",
}
+
+ proc, err := e2e.SpawnCmd(commonArgs, nil)
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, proc.Stop())
+ _ = proc.Close()
+ }()
+
+ require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{embed.ErrMissingClientTLSInfoForMetricsURL.Error()}))
}
// TestEtcdPeerCNAuth checks that the inter peer auth based on CN of cert is working correctly.
func TestEtcdPeerCNAuth(t *testing.T) {
- skipInShortMode(t)
+ e2e.SkipInShortMode(t)
peers, tmpdirs := make([]string, 3), make([]string, 3)
for i := range peers {
- peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i)
- d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i))
- if err != nil {
- t.Fatal(err)
- }
- tmpdirs[i] = d
+ peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i)
+ tmpdirs[i] = t.TempDir()
}
ic := strings.Join(peers, ",")
@@ -140,42 +169,42 @@ func TestEtcdPeerCNAuth(t *testing.T) {
for i := range procs {
if procs[i] != nil {
procs[i].Stop()
+ procs[i].Close()
}
- os.RemoveAll(tmpdirs[i])
}
}()
// node 0 and 1 have a cert with the correct CN, node 2 doesn't
for i := range procs {
commonArgs := []string{
- binDir + "/etcd",
+ e2e.BinPath.Etcd,
"--name", fmt.Sprintf("e%d", i),
"--listen-client-urls", "http://0.0.0.0:0",
"--data-dir", tmpdirs[i],
"--advertise-client-urls", "http://0.0.0.0:0",
- "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i),
- "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", etcdProcessBasePort+i),
+ "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i),
+ "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i),
"--initial-cluster", ic,
}
var args []string
if i <= 1 {
args = []string{
- "--peer-cert-file", certPath,
- "--peer-key-file", privateKeyPath,
- "--peer-client-cert-file", certPath,
- "--peer-client-key-file", privateKeyPath,
- "--peer-trusted-ca-file", caPath,
+ "--peer-cert-file", e2e.CertPath,
+ "--peer-key-file", e2e.PrivateKeyPath,
+ "--peer-client-cert-file", e2e.CertPath,
+ "--peer-client-key-file", e2e.PrivateKeyPath,
+ "--peer-trusted-ca-file", e2e.CaPath,
"--peer-client-cert-auth",
"--peer-cert-allowed-cn", "example.com",
}
} else {
args = []string{
- "--peer-cert-file", certPath2,
- "--peer-key-file", privateKeyPath2,
- "--peer-client-cert-file", certPath2,
- "--peer-client-key-file", privateKeyPath2,
- "--peer-trusted-ca-file", caPath,
+ "--peer-cert-file", e2e.CertPath2,
+ "--peer-key-file", e2e.PrivateKeyPath2,
+ "--peer-client-cert-file", e2e.CertPath2,
+ "--peer-client-key-file", e2e.PrivateKeyPath2,
+ "--peer-trusted-ca-file", e2e.CaPath,
"--peer-client-cert-auth",
"--peer-cert-allowed-cn", "example2.com",
}
@@ -183,38 +212,118 @@ func TestEtcdPeerCNAuth(t *testing.T) {
commonArgs = append(commonArgs, args...)
- p, err := spawnCmd(commonArgs)
- if err != nil {
- t.Fatal(err)
- }
+ p, err := e2e.SpawnCmd(commonArgs, nil)
+ require.NoError(t, err)
procs[i] = p
}
for i, p := range procs {
var expect []string
if i <= 1 {
- expect = etcdServerReadyLines
+ expect = e2e.EtcdServerReadyLines
} else {
expect = []string{"remote error: tls: bad certificate"}
}
- if err := waitReadyExpectProc(p, expect); err != nil {
- t.Fatal(err)
+ err := e2e.WaitReadyExpectProc(context.TODO(), p, expect)
+ require.NoError(t, err)
+ }
+}
+
+// TestEtcdPeerMultiCNAuth checks that the inter peer auth based on CN of cert is working correctly
+// when there are multiple allowed values for the CN.
+func TestEtcdPeerMultiCNAuth(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ peers, tmpdirs := make([]string, 3), make([]string, 3)
+ for i := range peers {
+ peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i)
+ tmpdirs[i] = t.TempDir()
+ }
+ ic := strings.Join(peers, ",")
+ procs := make([]*expect.ExpectProcess, len(peers))
+ defer func() {
+ for i := range procs {
+ if procs[i] != nil {
+ procs[i].Stop()
+ procs[i].Close()
+ }
}
+ }()
+
+ // all nodes have unique certs with different CNs
+ // node 0 and 1 have a cert with one of the correct CNs, node 2 doesn't
+ for i := range procs {
+ commonArgs := []string{
+ e2e.BinPath.Etcd,
+ "--name", fmt.Sprintf("e%d", i),
+ "--listen-client-urls", "http://0.0.0.0:0",
+ "--data-dir", tmpdirs[i],
+ "--advertise-client-urls", "http://0.0.0.0:0",
+ "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i),
+ "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i),
+ "--initial-cluster", ic,
+ }
+
+ var args []string
+ switch i {
+ case 0:
+ args = []string{
+ "--peer-cert-file", e2e.CertPath, // server.crt has CN "example.com".
+ "--peer-key-file", e2e.PrivateKeyPath,
+ "--peer-client-cert-file", e2e.CertPath,
+ "--peer-client-key-file", e2e.PrivateKeyPath,
+ "--peer-trusted-ca-file", e2e.CaPath,
+ "--peer-client-cert-auth",
+ "--peer-cert-allowed-cn", "example.com,example2.com",
+ }
+ case 1:
+ args = []string{
+ "--peer-cert-file", e2e.CertPath2, // server2.crt has CN "example2.com".
+ "--peer-key-file", e2e.PrivateKeyPath2,
+ "--peer-client-cert-file", e2e.CertPath2,
+ "--peer-client-key-file", e2e.PrivateKeyPath2,
+ "--peer-trusted-ca-file", e2e.CaPath,
+ "--peer-client-cert-auth",
+ "--peer-cert-allowed-cn", "example.com,example2.com",
+ }
+ default:
+ args = []string{
+ "--peer-cert-file", e2e.CertPath3, // server3.crt has CN "ca".
+ "--peer-key-file", e2e.PrivateKeyPath3,
+ "--peer-client-cert-file", e2e.CertPath3,
+ "--peer-client-key-file", e2e.PrivateKeyPath3,
+ "--peer-trusted-ca-file", e2e.CaPath,
+ "--peer-client-cert-auth",
+ "--peer-cert-allowed-cn", "example.com,example2.com",
+ }
+ }
+
+ commonArgs = append(commonArgs, args...)
+ p, err := e2e.SpawnCmd(commonArgs, nil)
+ require.NoError(t, err)
+ procs[i] = p
+ }
+
+ for i, p := range procs {
+ var expect []string
+ if i <= 1 {
+ expect = e2e.EtcdServerReadyLines
+ } else {
+ expect = []string{"remote error: tls: bad certificate"}
+ }
+ err := e2e.WaitReadyExpectProc(context.TODO(), p, expect)
+ require.NoError(t, err)
}
}
// TestEtcdPeerNameAuth checks that the inter peer auth based on cert name validation is working correctly.
func TestEtcdPeerNameAuth(t *testing.T) {
- skipInShortMode(t)
+ e2e.SkipInShortMode(t)
peers, tmpdirs := make([]string, 3), make([]string, 3)
for i := range peers {
- peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, etcdProcessBasePort+i)
- d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i))
- if err != nil {
- t.Fatal(err)
- }
- tmpdirs[i] = d
+ peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i)
+ tmpdirs[i] = t.TempDir()
}
ic := strings.Join(peers, ",")
@@ -223,6 +332,7 @@ func TestEtcdPeerNameAuth(t *testing.T) {
for i := range procs {
if procs[i] != nil {
procs[i].Stop()
+ procs[i].Close()
}
os.RemoveAll(tmpdirs[i])
}
@@ -231,30 +341,30 @@ func TestEtcdPeerNameAuth(t *testing.T) {
// node 0 and 1 have a cert with the correct certificate name, node 2 doesn't
for i := range procs {
commonArgs := []string{
- binDir + "/etcd",
+ e2e.BinPath.Etcd,
"--name", fmt.Sprintf("e%d", i),
"--listen-client-urls", "http://0.0.0.0:0",
"--data-dir", tmpdirs[i],
"--advertise-client-urls", "http://0.0.0.0:0",
- "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i),
- "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", etcdProcessBasePort+i),
+ "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i),
+ "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i),
"--initial-cluster", ic,
}
var args []string
if i <= 1 {
args = []string{
- "--peer-cert-file", certPath,
- "--peer-key-file", privateKeyPath,
- "--peer-trusted-ca-file", caPath,
+ "--peer-cert-file", e2e.CertPath,
+ "--peer-key-file", e2e.PrivateKeyPath,
+ "--peer-trusted-ca-file", e2e.CaPath,
"--peer-client-cert-auth",
"--peer-cert-allowed-hostname", "localhost",
}
} else {
args = []string{
- "--peer-cert-file", certPath2,
- "--peer-key-file", privateKeyPath2,
- "--peer-trusted-ca-file", caPath,
+ "--peer-cert-file", e2e.CertPath2,
+ "--peer-key-file", e2e.PrivateKeyPath2,
+ "--peer-trusted-ca-file", e2e.CaPath,
"--peer-client-cert-auth",
"--peer-cert-allowed-hostname", "example2.com",
}
@@ -262,75 +372,366 @@ func TestEtcdPeerNameAuth(t *testing.T) {
commonArgs = append(commonArgs, args...)
- p, err := spawnCmd(commonArgs)
- if err != nil {
- t.Fatal(err)
- }
+ p, err := e2e.SpawnCmd(commonArgs, nil)
+ require.NoError(t, err)
procs[i] = p
}
for i, p := range procs {
var expect []string
if i <= 1 {
- expect = etcdServerReadyLines
+ expect = e2e.EtcdServerReadyLines
} else {
expect = []string{"client certificate authentication failed"}
}
- if err := waitReadyExpectProc(p, expect); err != nil {
- t.Fatal(err)
+ err := e2e.WaitReadyExpectProc(context.TODO(), p, expect)
+ require.NoError(t, err)
+ }
+}
+
+// TestEtcdPeerLocalAddr checks that the inter peer auth works with when
+// the member LocalAddr is set.
+func TestEtcdPeerLocalAddr(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ nodeIP, err := getLocalIP()
+ t.Log("Using node IP", nodeIP)
+ require.NoError(t, err)
+
+ peers, tmpdirs := make([]string, 3), make([]string, 3)
+
+ for i := range peers {
+ peerIP := nodeIP
+ if i == 0 {
+ peerIP = "127.0.0.1"
+ }
+ peers[i] = fmt.Sprintf("e%d=https://%s:%d", i, peerIP, e2e.EtcdProcessBasePort+i)
+ tmpdirs[i] = t.TempDir()
+ }
+ procs := make([]*expect.ExpectProcess, len(peers))
+ defer func() {
+ for i := range procs {
+ if procs[i] != nil {
+ procs[i].Stop()
+ procs[i].Close()
+ }
+ os.RemoveAll(tmpdirs[i])
+ }
+ }()
+
+ tempDir := t.TempDir()
+ caFile, certFiles, keyFiles, err := generateCertsForIPs(tempDir, []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP(nodeIP)})
+ require.NoError(t, err)
+
+ defer func() {
+ os.RemoveAll(tempDir)
+ }()
+
+ // node 0 (127.0.0.1) does not set `--experimental-set-member-localaddr`,
+ // while nodes 1 and nodes 2 do.
+ //
+ // node 0's peer certificate is signed for 127.0.0.1, but it uses the host
+ // IP (by default) to communicate with peers, so they don't match.
+ // Accordingly, other peers will reject connections from node 0.
+ //
+ // Both node 1 and node 2's peer certificates are signed for the host IP,
+ // and they also communicate with peers using the host IP (explicitly set
+ // with --initial-advertise-peer-urls and
+ // --experimental-set-member-localaddr), so node 0 has no issue connecting
+ // to them.
+ //
+ // Refer to https://github.com/etcd-io/etcd/issues/17068.
+ for i := range procs {
+ peerIP := nodeIP
+ if i == 0 {
+ peerIP = "127.0.0.1"
+ }
+ ic := strings.Join(peers, ",")
+ commonArgs := []string{
+ e2e.BinPath.Etcd,
+ "--name", fmt.Sprintf("e%d", i),
+ "--listen-client-urls", "http://0.0.0.0:0",
+ "--data-dir", tmpdirs[i],
+ "--advertise-client-urls", "http://0.0.0.0:0",
+ "--initial-advertise-peer-urls", fmt.Sprintf("https://%s:%d", peerIP, e2e.EtcdProcessBasePort+i),
+ "--listen-peer-urls", fmt.Sprintf("https://%s:%d,https://%s:%d", peerIP, e2e.EtcdProcessBasePort+i, peerIP, e2e.EtcdProcessBasePort+len(peers)+i),
+ "--initial-cluster", ic,
+ }
+
+ var args []string
+ if i == 0 {
+ args = []string{
+ "--peer-cert-file", certFiles[0],
+ "--peer-key-file", keyFiles[0],
+ "--peer-trusted-ca-file", caFile,
+ "--peer-client-cert-auth",
+ }
+ } else {
+ args = []string{
+ "--peer-cert-file", certFiles[1],
+ "--peer-key-file", keyFiles[1],
+ "--peer-trusted-ca-file", caFile,
+ "--peer-client-cert-auth",
+ "--experimental-set-member-localaddr",
+ }
+ }
+
+ commonArgs = append(commonArgs, args...)
+
+ p, err := e2e.SpawnCmd(commonArgs, nil)
+ require.NoError(t, err)
+ procs[i] = p
+ }
+
+ for i, p := range procs {
+ var expect []string
+ if i == 0 {
+ expect = e2e.EtcdServerReadyLines
+ } else {
+ expect = []string{"x509: certificate is valid for 127.0.0.1, not "}
}
+ err := e2e.WaitReadyExpectProc(context.TODO(), p, expect)
+ require.NoError(t, err)
}
}
func TestGrpcproxyAndCommonName(t *testing.T) {
- skipInShortMode(t)
+ e2e.SkipInShortMode(t)
argsWithNonEmptyCN := []string{
- binDir + "/etcd",
+ e2e.BinPath.Etcd,
"grpc-proxy",
"start",
- "--cert", certPath2,
- "--key", privateKeyPath2,
- "--cacert", caPath,
+ "--cert", e2e.CertPath2,
+ "--key", e2e.PrivateKeyPath2,
+ "--cacert", e2e.CaPath,
}
argsWithEmptyCN := []string{
- binDir + "/etcd",
+ e2e.BinPath.Etcd,
"grpc-proxy",
"start",
- "--cert", certPath3,
- "--key", privateKeyPath3,
- "--cacert", caPath,
+ "--cert", e2e.CertPath3,
+ "--key", e2e.PrivateKeyPath3,
+ "--cacert", e2e.CaPath,
}
- err := spawnWithExpect(argsWithNonEmptyCN, "cert has non empty Common Name")
- if err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
+ err := e2e.SpawnWithExpect(argsWithNonEmptyCN, expect.ExpectedResponse{Value: "cert has non empty Common Name"})
+ require.ErrorContains(t, err, "cert has non empty Common Name")
- p, err := spawnCmd(argsWithEmptyCN)
+ p, err := e2e.SpawnCmd(argsWithEmptyCN, nil)
defer func() {
if p != nil {
p.Stop()
}
}()
- if err != nil {
- t.Fatal(err)
+ require.NoError(t, err)
+}
+
+func TestGrpcproxyAndListenCipherSuite(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ cases := []struct {
+ name string
+ args []string
+ }{
+ {
+ name: "ArgsWithCipherSuites",
+ args: []string{
+ e2e.BinPath.Etcd,
+ "grpc-proxy",
+ "start",
+ "--listen-cipher-suites", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
+ },
+ },
+ {
+ name: "ArgsWithoutCipherSuites",
+ args: []string{
+ e2e.BinPath.Etcd,
+ "grpc-proxy",
+ "start",
+ "--listen-cipher-suites", "",
+ },
+ },
+ }
+
+ for _, test := range cases {
+ t.Run(test.name, func(t *testing.T) {
+ pw, err := e2e.SpawnCmd(test.args, nil)
+ require.NoError(t, err)
+ require.NoError(t, pw.Stop())
+ })
}
}
func TestBootstrapDefragFlag(t *testing.T) {
- skipInShortMode(t)
+ e2e.SkipInShortMode(t)
+
+ proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil)
+ require.NoError(t, err)
+ require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{"Skipping defragmentation"}))
+ require.NoError(t, proc.Stop())
+
+ // wait for the process to exit, otherwise test will have leaked goroutine
+ if err := proc.Close(); err != nil {
+ t.Logf("etcd process closed with error %v", err)
+ }
+}
- proc, err := spawnCmd([]string{binDir + "/etcd", "--experimental-bootstrap-defrag-threshold-megabytes", "1000"})
- if err != nil {
- t.Fatal(err)
+func TestSnapshotCatchupEntriesFlag(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-snapshot-catchup-entries", "1000"}, nil)
+ require.NoError(t, err)
+ require.NoError(t, e2e.WaitReadyExpectProc(ctx, proc, []string{"\"snapshot-catchup-entries\":1000"}))
+ require.NoError(t, e2e.WaitReadyExpectProc(ctx, proc, []string{"serving client traffic"}))
+ require.NoError(t, proc.Stop())
+
+ // wait for the process to exit, otherwise test will have leaked goroutine
+ if err := proc.Close(); err != nil {
+ t.Logf("etcd process closed with error %v", err)
}
- if err = waitReadyExpectProc(proc, []string{"Skipping defragmentation"}); err != nil {
- t.Fatal(err)
+}
+
+// TestEtcdHealthyWithTinySnapshotCatchupEntries ensures multi-node etcd cluster remains healthy with 1 snapshot catch up entry
+func TestEtcdHealthyWithTinySnapshotCatchupEntries(t *testing.T) {
+ e2e.BeforeTest(t)
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(3),
+ e2e.WithSnapshotCount(1),
+ e2e.WithSnapshotCatchUpEntries(1),
+ )
+ require.NoErrorf(t, err, "could not start etcd process cluster (%v)", err)
+ t.Cleanup(func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+
+ // simulate 10 clients keep writing to etcd in parallel with no error
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ g, ctx := errgroup.WithContext(ctx)
+ for i := 0; i < 10; i++ {
+ clientID := i
+ g.Go(func() error {
+ cc := epc.Etcdctl()
+ for j := 0; j < 100; j++ {
+ if err := cc.Put(ctx, "foo", fmt.Sprintf("bar%d", clientID), config.PutOptions{}); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
}
- if err = proc.Stop(); err != nil {
- t.Fatal(err)
+ require.NoError(t, g.Wait())
+}
+
+func TestEtcdTLSVersion(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ d := t.TempDir()
+ proc, err := e2e.SpawnCmd(
+ []string{
+ e2e.BinPath.Etcd,
+ "--data-dir", d,
+ "--name", "e1",
+ "--listen-client-urls", "https://0.0.0.0:0",
+ "--advertise-client-urls", "https://0.0.0.0:0",
+ "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort),
+ "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort),
+ "--initial-cluster", fmt.Sprintf("e1=https://127.0.0.1:%d", e2e.EtcdProcessBasePort),
+ "--peer-cert-file", e2e.CertPath,
+ "--peer-key-file", e2e.PrivateKeyPath,
+ "--cert-file", e2e.CertPath2,
+ "--key-file", e2e.PrivateKeyPath2,
+
+ "--tls-min-version", "TLS1.2",
+ "--tls-max-version", "TLS1.3",
+ }, nil,
+ )
+ assert.NoError(t, err)
+ assert.NoErrorf(t, e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines), "did not receive expected output from etcd process")
+ assert.NoError(t, proc.Stop())
+
+ proc.Wait() // ensure the port has been released
+ proc.Close()
+}
+
+// TestEtcdDeprecatedFlags checks that etcd will print warning messages if deprecated flags are set.
+func TestEtcdDeprecatedFlags(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ commonArgs := []string{
+ e2e.BinPath.Etcd,
+ "--name", "e1",
+ }
+
+ testCases := []struct {
+ name string
+ args []string
+ expectedMsg string
+ }{
+ {
+ name: "snapshot-count",
+ args: append(commonArgs, "--snapshot-count=100"),
+ expectedMsg: "--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7",
+ },
+ {
+ name: "max-snapshots",
+ args: append(commonArgs, "--max-snapshots=10"),
+ expectedMsg: "--max-snapshots is deprecated in 3.6 and will be decommissioned in 3.7",
+ },
+ {
+ name: "v2-deprecation",
+ args: append(commonArgs, "--v2-deprecation", "write-only-drop-data"),
+ expectedMsg: "--v2-deprecation is deprecated and scheduled for removal in v3.8. The default value is enforced, ignoring user input",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ proc, err := e2e.SpawnCmd(
+ tc.args, nil,
+ )
+ require.NoError(t, err)
+ require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{tc.expectedMsg}))
+ require.NoError(t, proc.Stop())
+
+ proc.Wait() // ensure the port has been released
+ proc.Close()
+ })
+ }
+}
+
+// TestV2DeprecationEnforceDefaultValue verifies that etcd enforces the default V2Deprecation level
+// and ignores users input.
+func TestV2DeprecationEnforceDefaultValue(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ commonArgs := []string{
+ e2e.BinPath.Etcd,
+ "--name", "e1",
+ }
+
+ validV2DeprecationLevels := []string{"write-only", "write-only-drop-data", "gone"}
+ expectedDeprecationLevelMsg := `"v2-deprecation":"write-only"`
+
+ for _, optionLevel := range validV2DeprecationLevels {
+ t.Run(optionLevel, func(t *testing.T) {
+ proc, err := e2e.SpawnCmd(
+ append(commonArgs, "--v2-deprecation", optionLevel), nil,
+ )
+ require.NoError(t, err)
+ require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{expectedDeprecationLevelMsg}))
+ require.NoError(t, proc.Stop())
+
+ proc.Wait() // ensure the port has been released
+ proc.Close()
+ })
}
}
diff --git a/tests/e2e/etcd_corrupt_test.go b/tests/e2e/etcd_corrupt_test.go
deleted file mode 100644
index edc95c0101b..00000000000
--- a/tests/e2e/etcd_corrupt_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "context"
- "errors"
- "fmt"
- "os"
- "testing"
- "time"
-
- bolt "go.etcd.io/bbolt"
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/datadir"
-)
-
-// TODO: test with embedded etcd in integration package
-
-func TestEtcdCorruptHash(t *testing.T) {
- // oldenv := os.Getenv("EXPECT_DEBUG")
- // defer os.Setenv("EXPECT_DEBUG", oldenv)
- // os.Setenv("EXPECT_DEBUG", "1")
-
- cfg := newConfigNoTLS()
-
- // trigger snapshot so that restart member can load peers from disk
- cfg.snapshotCount = 3
-
- testCtl(t, corruptTest, withQuorum(),
- withCfg(*cfg),
- withInitialCorruptCheck(),
- withCorruptFunc(corruptHash),
- )
-}
-
-func corruptTest(cx ctlCtx) {
- cx.t.Log("putting 10 keys...")
- for i := 0; i < 10; i++ {
- if err := ctlV3Put(cx, fmt.Sprintf("foo%05d", i), fmt.Sprintf("v%05d", i), ""); err != nil {
- if cx.dialTimeout > 0 && !isGRPCTimedout(err) {
- cx.t.Fatalf("putTest ctlV3Put error (%v)", err)
- }
- }
- }
- // enough time for all nodes sync on the same data
- cx.t.Log("sleeping 3sec to let nodes sync...")
- time.Sleep(3 * time.Second)
-
- cx.t.Log("connecting clientv3...")
- eps := cx.epc.EndpointsV3()
- cli1, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[1]}, DialTimeout: 3 * time.Second})
- if err != nil {
- cx.t.Fatal(err)
- }
- defer cli1.Close()
-
- sresp, err := cli1.Status(context.TODO(), eps[0])
- cx.t.Logf("checked status sresp:%v err:%v", sresp, err)
- if err != nil {
- cx.t.Fatal(err)
- }
- id0 := sresp.Header.GetMemberId()
-
- cx.t.Log("stopping etcd[0]...")
- cx.epc.procs[0].Stop()
-
- // corrupting first member by modifying backend offline.
- fp := datadir.ToBackendFileName(cx.epc.procs[0].Config().dataDirPath)
- cx.t.Logf("corrupting backend: %v", fp)
- if err = cx.corruptFunc(fp); err != nil {
- cx.t.Fatal(err)
- }
-
- cx.t.Log("restarting etcd[0]")
- ep := cx.epc.procs[0]
- proc, err := spawnCmd(append([]string{ep.Config().execPath}, ep.Config().args...))
- if err != nil {
- cx.t.Fatal(err)
- }
- defer proc.Stop()
-
- cx.t.Log("waiting for etcd[0] failure...")
- // restarting corrupted member should fail
- waitReadyExpectProc(proc, []string{fmt.Sprintf("etcdmain: %016x found data inconsistency with peers", id0)})
-}
-
-func corruptHash(fpath string) error {
- db, derr := bolt.Open(fpath, os.ModePerm, &bolt.Options{})
- if derr != nil {
- return derr
- }
- defer db.Close()
-
- return db.Update(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte("key"))
- if b == nil {
- return errors.New("got nil bucket for 'key'")
- }
- keys, vals := [][]byte{}, [][]byte{}
- c := b.Cursor()
- for k, v := c.First(); k != nil; k, v = c.Next() {
- keys = append(keys, k)
- var kv mvccpb.KeyValue
- if uerr := kv.Unmarshal(v); uerr != nil {
- return uerr
- }
- kv.Key[0]++
- kv.Value[0]++
- v2, v2err := kv.Marshal()
- if v2err != nil {
- return v2err
- }
- vals = append(vals, v2)
- }
- for i := range keys {
- if perr := b.Put(keys[i], vals[i]); perr != nil {
- return perr
- }
- }
- return nil
- })
-}
diff --git a/tests/e2e/etcd_grpcproxy_test.go b/tests/e2e/etcd_grpcproxy_test.go
new file mode 100644
index 00000000000..02174e89f62
--- /dev/null
+++ b/tests/e2e/etcd_grpcproxy_test.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+func TestGrpcProxyAutoSync(t *testing.T) {
+ e2e.SkipInShortMode(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(1))
+ require.NoError(t, err)
+ defer func() {
+ assert.NoError(t, epc.Close())
+ }()
+
+ var (
+ node1ClientURL = epc.Procs[0].Config().ClientURL
+ proxyClientURL = "127.0.0.1:32379"
+ )
+
+ // Run independent grpc-proxy instance
+ proxyProc, err := e2e.SpawnCmd([]string{
+ e2e.BinPath.Etcd, "grpc-proxy", "start",
+ "--advertise-client-url", proxyClientURL, "--listen-addr", proxyClientURL,
+ "--endpoints", node1ClientURL,
+ "--endpoints-auto-sync-interval", "1s",
+ }, nil)
+ require.NoError(t, err)
+ defer func() {
+ assert.NoError(t, proxyProc.Stop())
+ }()
+
+ proxyCtl, err := e2e.NewEtcdctl(e2e.ClientConfig{}, []string{proxyClientURL})
+ require.NoError(t, err)
+ err = proxyCtl.Put(ctx, "k1", "v1", config.PutOptions{})
+ require.NoError(t, err)
+
+ // Add and start second member
+ _, err = epc.StartNewProc(ctx, nil, t, false /* addAsLearner */)
+ require.NoError(t, err)
+
+ // Wait for auto sync of endpoints
+ err = waitForEndpointInLog(ctx, proxyProc, epc.Procs[1].Config().ClientURL)
+ require.NoError(t, err)
+
+ err = epc.CloseProc(ctx, func(proc e2e.EtcdProcess) bool {
+ return proc.Config().ClientURL == node1ClientURL
+ })
+ require.NoError(t, err)
+
+ var resp *clientv3.GetResponse
+ for i := 0; i < 10; i++ {
+ resp, err = proxyCtl.Get(ctx, "k1", config.GetOptions{})
+ if err != nil && strings.Contains(err.Error(), rpctypes.ErrGRPCLeaderChanged.Error()) {
+ time.Sleep(500 * time.Millisecond)
+ continue
+ }
+ }
+ require.NoError(t, err)
+
+ kvs := testutils.KeyValuesFromGetResponse(resp)
+ assert.Equal(t, []testutils.KV{{Key: "k1", Val: "v1"}}, kvs)
+}
+
+func TestGrpcProxyTLSVersions(t *testing.T) {
+ e2e.SkipInShortMode(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(1))
+ require.NoError(t, err)
+ defer func() {
+ assert.NoError(t, epc.Close())
+ }()
+
+ var (
+ node1ClientURL = epc.Procs[0].Config().ClientURL
+ proxyClientURL = "127.0.0.1:42379"
+ )
+
+ // Run independent grpc-proxy instance
+ proxyProc, err := e2e.SpawnCmd([]string{
+ e2e.BinPath.Etcd, "grpc-proxy", "start",
+ "--advertise-client-url", proxyClientURL,
+ "--listen-addr", proxyClientURL,
+ "--endpoints", node1ClientURL,
+ "--endpoints-auto-sync-interval", "1s",
+ "--cert-file", e2e.CertPath2,
+ "--key-file", e2e.PrivateKeyPath2,
+ "--tls-min-version", "TLS1.2",
+ "--tls-max-version", "TLS1.3",
+ }, nil)
+ require.NoError(t, err)
+ defer func() {
+ assert.NoError(t, proxyProc.Stop())
+ }()
+
+ _, err = proxyProc.ExpectFunc(ctx, func(s string) bool {
+ return strings.Contains(s, "started gRPC proxy")
+ })
+ require.NoError(t, err)
+}
+
+func waitForEndpointInLog(ctx context.Context, proxyProc *expect.ExpectProcess, endpoint string) error {
+ endpoint = strings.Replace(endpoint, "http://", "", 1)
+
+ ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ defer cancel()
+
+ _, err := proxyProc.ExpectFunc(ctx, func(s string) bool {
+ return strings.Contains(s, endpoint)
+ })
+
+ return err
+}
diff --git a/tests/e2e/etcd_mix_versions_test.go b/tests/e2e/etcd_mix_versions_test.go
new file mode 100644
index 00000000000..0ff55f67877
--- /dev/null
+++ b/tests/e2e/etcd_mix_versions_test.go
@@ -0,0 +1,204 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+type clusterTestCase struct {
+ name string
+ config *e2e.EtcdProcessClusterConfig
+}
+
+func clusterTestCases(size int) []clusterTestCase {
+ tcs := []clusterTestCase{
+ {
+ name: "CurrentVersion",
+ config: e2e.NewConfig(e2e.WithClusterSize(size)),
+ },
+ }
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ return tcs
+ }
+
+ tcs = append(tcs,
+ clusterTestCase{
+ name: "LastVersion",
+ config: e2e.NewConfig(e2e.WithClusterSize(size), e2e.WithVersion(e2e.LastVersion)),
+ },
+ )
+ if size > 2 {
+ tcs = append(tcs,
+ clusterTestCase{
+ name: "MinorityLastVersion",
+ config: e2e.NewConfig(e2e.WithClusterSize(size), e2e.WithVersion(e2e.MinorityLastVersion)),
+ }, clusterTestCase{
+ name: "QuorumLastVersion",
+ config: e2e.NewConfig(e2e.WithClusterSize(size), e2e.WithVersion(e2e.QuorumLastVersion)),
+ },
+ )
+ }
+ return tcs
+}
+
+// TestMixVersionsSnapshotByAddingMember tests the mix version send snapshots by adding member
+func TestMixVersionsSnapshotByAddingMember(t *testing.T) {
+ for _, tc := range clusterTestCases(1) {
+ t.Run(tc.name+"-adding-new-member-of-current-version", func(t *testing.T) {
+ mixVersionsSnapshotTestByAddingMember(t, tc.config, e2e.CurrentVersion)
+ })
+ // etcd doesn't support adding a new member of old version into
+ // a cluster with higher version. For example, etcd cluster
+ // version is 3.6.x, then a new member of 3.5.x can't join the
+ // cluster. Please refer to link below,
+ // https://github.com/etcd-io/etcd/blob/3e903d0b12e399519a4013c52d4635ec8bdd6863/server/etcdserver/cluster_util.go#L222-L230
+ /*t.Run(tc.name+"-adding-new-member-of-last-version", func(t *testing.T) {
+ mixVersionsSnapshotTestByAddingMember(t, tc.config, e2e.LastVersion)
+ })*/
+ }
+}
+
+func mixVersionsSnapshotTestByAddingMember(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, newInstanceVersion e2e.ClusterVersion) {
+ e2e.BeforeTest(t)
+
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+
+ t.Logf("Create an etcd cluster with %d member", cfg.ClusterSize)
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithConfig(cfg),
+ e2e.WithSnapshotCount(10),
+ )
+ require.NoErrorf(t, err, "failed to start etcd cluster")
+ defer func() {
+ derr := epc.Close()
+ require.NoErrorf(t, derr, "failed to close etcd cluster")
+ }()
+
+ t.Log("Writing 20 keys to the cluster (more than SnapshotCount entries to trigger at least a snapshot)")
+ writeKVs(t, epc.Etcdctl(), 0, 20)
+
+ t.Log("start a new etcd instance, which will receive a snapshot from the leader.")
+ newCfg := *epc.Cfg
+ newCfg.Version = newInstanceVersion
+ newCfg.ServerConfig.SnapshotCatchUpEntries = 10
+ t.Log("Starting a new etcd instance")
+ _, err = epc.StartNewProc(context.TODO(), &newCfg, t, false /* addAsLearner */)
+ require.NoErrorf(t, err, "failed to start the new etcd instance")
+ defer epc.CloseProc(context.TODO(), nil)
+
+ assertKVHash(t, epc)
+}
+
+func TestMixVersionsSnapshotByMockingPartition(t *testing.T) {
+ mockPartitionNodeIndex := 2
+ for _, tc := range clusterTestCases(3) {
+ t.Run(tc.name, func(t *testing.T) {
+ mixVersionsSnapshotTestByMockPartition(t, tc.config, mockPartitionNodeIndex)
+ })
+ }
+}
+
+func mixVersionsSnapshotTestByMockPartition(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, mockPartitionNodeIndex int) {
+ e2e.BeforeTest(t)
+
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+
+ clusterOptions := []e2e.EPClusterOption{
+ e2e.WithConfig(cfg),
+ e2e.WithSnapshotCount(10),
+ e2e.WithSnapshotCatchUpEntries(10),
+ }
+ t.Logf("Create an etcd cluster with %d member", cfg.ClusterSize)
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, clusterOptions...)
+ require.NoErrorf(t, err, "failed to start etcd cluster")
+ defer func() {
+ derr := epc.Close()
+ require.NoErrorf(t, derr, "failed to close etcd cluster")
+ }()
+ toPartitionedMember := epc.Procs[mockPartitionNodeIndex]
+
+ t.Log("Stop and restart the partitioned member")
+ err = toPartitionedMember.Stop()
+ require.NoError(t, err)
+
+ t.Log("Writing 20 keys to the cluster (more than SnapshotCount entries to trigger at least a snapshot)")
+ writeKVs(t, epc.Etcdctl(), 0, 20)
+
+ t.Log("Verify logs to check leader has saved snapshot")
+ leaderEPC := epc.Procs[epc.WaitLeader(t)]
+ e2e.AssertProcessLogs(t, leaderEPC, "saved snapshot")
+
+ t.Log("Restart the partitioned member")
+ err = toPartitionedMember.Restart(context.TODO())
+ require.NoError(t, err)
+
+ assertKVHash(t, epc)
+
+ leaderEPC = epc.Procs[epc.WaitLeader(t)]
+ t.Log("Verify logs to check snapshot be sent from leader to follower")
+ e2e.AssertProcessLogs(t, leaderEPC, "sent database snapshot")
+}
+
+func writeKVs(t *testing.T, etcdctl *e2e.EtcdctlV3, startIdx, endIdx int) {
+ for i := startIdx; i < endIdx; i++ {
+ key := fmt.Sprintf("key-%d", i)
+ value := fmt.Sprintf("value-%d", i)
+ err := etcdctl.Put(context.TODO(), key, value, config.PutOptions{})
+ require.NoErrorf(t, err, "failed to put %q", key)
+ }
+}
+
+func assertKVHash(t *testing.T, epc *e2e.EtcdProcessCluster) {
+ clusterSize := len(epc.Procs)
+ if clusterSize < 2 {
+ return
+ }
+ t.Log("Verify all nodes have exact same revision and hash")
+ assert.Eventually(t, func() bool {
+ hashKvs, err := epc.Etcdctl().HashKV(context.TODO(), 0)
+ if err != nil {
+ t.Logf("failed to get HashKV: %v", err)
+ return false
+ }
+ if len(hashKvs) != clusterSize {
+ t.Logf("expected %d hashkv responses, but got: %d", clusterSize, len(hashKvs))
+ return false
+ }
+ for i := 1; i < clusterSize; i++ {
+ if hashKvs[0].Header.Revision != hashKvs[i].Header.Revision {
+ t.Logf("Got different revisions, [%d, %d]", hashKvs[0].Header.Revision, hashKvs[1].Header.Revision)
+ return false
+ }
+
+ assert.Equal(t, hashKvs[0].Hash, hashKvs[i].Hash)
+ }
+ return true
+ }, 10*time.Second, 500*time.Millisecond)
+}
diff --git a/tests/e2e/etcd_process.go b/tests/e2e/etcd_process.go
deleted file mode 100644
index f744fa81cd9..00000000000
--- a/tests/e2e/etcd_process.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "fmt"
- "net/url"
- "os"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/pkg/v3/expect"
- "go.uber.org/zap"
-)
-
-var (
- etcdServerReadyLines = []string{"ready to serve client requests"}
- binPath string
- ctlBinPath string
- utlBinPath string
-)
-
-// etcdProcess is a process that serves etcd requests.
-type etcdProcess interface {
- EndpointsV2() []string
- EndpointsV3() []string
- EndpointsMetrics() []string
-
- Start() error
- Restart() error
- Stop() error
- Close() error
- WithStopSignal(sig os.Signal) os.Signal
- Config() *etcdServerProcessConfig
-}
-
-type etcdServerProcess struct {
- cfg *etcdServerProcessConfig
- proc *expect.ExpectProcess
- donec chan struct{} // closed when Interact() terminates
-}
-
-type etcdServerProcessConfig struct {
- lg *zap.Logger
- execPath string
- args []string
- tlsArgs []string
-
- dataDirPath string
- keepDataDir bool
-
- name string
-
- purl url.URL
-
- acurl string
- murl string
-
- initialToken string
- initialCluster string
-}
-
-func newEtcdServerProcess(cfg *etcdServerProcessConfig) (*etcdServerProcess, error) {
- if !fileutil.Exist(cfg.execPath) {
- return nil, fmt.Errorf("could not find etcd binary: %s", cfg.execPath)
- }
- if !cfg.keepDataDir {
- if err := os.RemoveAll(cfg.dataDirPath); err != nil {
- return nil, err
- }
- }
- return &etcdServerProcess{cfg: cfg, donec: make(chan struct{})}, nil
-}
-
-func (ep *etcdServerProcess) EndpointsV2() []string { return []string{ep.cfg.acurl} }
-func (ep *etcdServerProcess) EndpointsV3() []string { return ep.EndpointsV2() }
-func (ep *etcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.murl} }
-
-func (ep *etcdServerProcess) Start() error {
- if ep.proc != nil {
- panic("already started")
- }
- ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.name))
- proc, err := spawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.execPath}, ep.cfg.args...))
- if err != nil {
- return err
- }
- ep.proc = proc
- err = ep.waitReady()
- if err == nil {
- ep.cfg.lg.Info("started server.", zap.String("name", ep.cfg.name))
- }
- return err
-}
-
-func (ep *etcdServerProcess) Restart() error {
- ep.cfg.lg.Info("restaring server...", zap.String("name", ep.cfg.name))
- if err := ep.Stop(); err != nil {
- return err
- }
- ep.donec = make(chan struct{})
- err := ep.Start()
- if err == nil {
- ep.cfg.lg.Info("restared server", zap.String("name", ep.cfg.name))
- }
- return err
-}
-
-func (ep *etcdServerProcess) Stop() (err error) {
- ep.cfg.lg.Info("stoping server...", zap.String("name", ep.cfg.name))
- if ep == nil || ep.proc == nil {
- return nil
- }
- err = ep.proc.Stop()
- if err != nil {
- return err
- }
- ep.proc = nil
- <-ep.donec
- ep.donec = make(chan struct{})
- if ep.cfg.purl.Scheme == "unix" || ep.cfg.purl.Scheme == "unixs" {
- err = os.Remove(ep.cfg.purl.Host + ep.cfg.purl.Path)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- }
- ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.name))
- return nil
-}
-
-func (ep *etcdServerProcess) Close() error {
- ep.cfg.lg.Info("closing server...", zap.String("name", ep.cfg.name))
- if err := ep.Stop(); err != nil {
- return err
- }
- if !ep.cfg.keepDataDir {
- ep.cfg.lg.Info("removing directory", zap.String("data-dir", ep.cfg.dataDirPath))
- return os.RemoveAll(ep.cfg.dataDirPath)
- }
- return nil
-}
-
-func (ep *etcdServerProcess) WithStopSignal(sig os.Signal) os.Signal {
- ret := ep.proc.StopSignal
- ep.proc.StopSignal = sig
- return ret
-}
-
-func (ep *etcdServerProcess) waitReady() error {
- defer close(ep.donec)
- return waitReadyExpectProc(ep.proc, etcdServerReadyLines)
-}
-
-func (ep *etcdServerProcess) Config() *etcdServerProcessConfig { return ep.cfg }
diff --git a/tests/e2e/etcd_release_upgrade_test.go b/tests/e2e/etcd_release_upgrade_test.go
index 78caef96fac..2722cacdb65 100644
--- a/tests/e2e/etcd_release_upgrade_test.go
+++ b/tests/e2e/etcd_release_upgrade_test.go
@@ -15,32 +15,35 @@
package e2e
import (
+ "context"
"fmt"
- "os"
"sync"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
// TestReleaseUpgrade ensures that changes to master branch does not affect
// upgrade from latest etcd releases.
func TestReleaseUpgrade(t *testing.T) {
- lastReleaseBinary := binDir + "/etcd-last-release"
- if !fileutil.Exist(lastReleaseBinary) {
- t.Skipf("%q does not exist", lastReleaseBinary)
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
}
- BeforeTest(t)
-
- copiedCfg := newConfigNoTLS()
- copiedCfg.execPath = lastReleaseBinary
- copiedCfg.snapshotCount = 3
- copiedCfg.baseScheme = "unix" // to avoid port conflict
+ e2e.BeforeTest(t)
- epc, err := newEtcdProcessCluster(t, copiedCfg)
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithVersion(e2e.LastVersion),
+ e2e.WithSnapshotCount(3),
+ e2e.WithBasePeerScheme("unix"), // to avoid port conflict
+ )
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
@@ -50,11 +53,9 @@ func TestReleaseUpgrade(t *testing.T) {
}
}()
- os.Setenv("ETCDCTL_API", "3")
- defer os.Unsetenv("ETCDCTL_API")
cx := ctlCtx{
t: t,
- cfg: *newConfigNoTLS(),
+ cfg: *e2e.NewConfigNoTLS(),
dialTimeout: 7 * time.Second,
quorum: true,
epc: epc,
@@ -64,32 +65,30 @@ func TestReleaseUpgrade(t *testing.T) {
kvs = append(kvs, kv{key: fmt.Sprintf("foo%d", i), val: "bar"})
}
for i := range kvs {
- if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
+ if err = ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
cx.t.Fatalf("#%d: ctlV3Put error (%v)", i, err)
}
}
t.Log("Cluster of etcd in old version running")
- for i := range epc.procs {
+ for i := range epc.Procs {
t.Logf("Stopping node: %v", i)
- if err := epc.procs[i].Stop(); err != nil {
+ if err = epc.Procs[i].Stop(); err != nil {
t.Fatalf("#%d: error closing etcd process (%v)", i, err)
}
t.Logf("Stopped node: %v", i)
- epc.procs[i].Config().execPath = binDir + "/etcd"
- epc.procs[i].Config().keepDataDir = true
+ epc.Procs[i].Config().ExecPath = e2e.BinPath.Etcd
+ epc.Procs[i].Config().KeepDataDir = true
t.Logf("Restarting node in the new version: %v", i)
- if err := epc.procs[i].Restart(); err != nil {
+ if err = epc.Procs[i].Restart(context.TODO()); err != nil {
t.Fatalf("error restarting etcd process (%v)", err)
}
t.Logf("Testing reads after node restarts: %v", i)
for j := range kvs {
- if err := ctlV3Get(cx, []string{kvs[j].key}, []kv{kvs[j]}...); err != nil {
- cx.t.Fatalf("#%d-%d: ctlV3Get error (%v)", i, j, err)
- }
+ require.NoErrorf(cx.t, ctlV3Get(cx, []string{kvs[j].key}, []kv{kvs[j]}...), "#%d-%d: ctlV3Get error", i, j)
}
t.Logf("Tested reads after node restarts: %v", i)
}
@@ -100,7 +99,7 @@ func TestReleaseUpgrade(t *testing.T) {
// new cluster version needs more time to upgrade
ver := version.Cluster(version.Version)
for i := 0; i < 7; i++ {
- if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"` + ver}); err != nil {
+ if err = e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/version", Expected: expect.ExpectedResponse{Value: `"etcdcluster":"` + ver}}); err != nil {
t.Logf("#%d: %v is not ready yet (%v)", i, ver, err)
time.Sleep(time.Second)
continue
@@ -114,19 +113,17 @@ func TestReleaseUpgrade(t *testing.T) {
}
func TestReleaseUpgradeWithRestart(t *testing.T) {
- lastReleaseBinary := binDir + "/etcd-last-release"
- if !fileutil.Exist(lastReleaseBinary) {
- t.Skipf("%q does not exist", lastReleaseBinary)
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
}
- BeforeTest(t)
+ e2e.BeforeTest(t)
- copiedCfg := newConfigNoTLS()
- copiedCfg.execPath = lastReleaseBinary
- copiedCfg.snapshotCount = 10
- copiedCfg.baseScheme = "unix"
-
- epc, err := newEtcdProcessCluster(t, copiedCfg)
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithVersion(e2e.LastVersion),
+ e2e.WithSnapshotCount(10),
+ e2e.WithBasePeerScheme("unix"),
+ )
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
@@ -136,11 +133,9 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
}
}()
- os.Setenv("ETCDCTL_API", "3")
- defer os.Unsetenv("ETCDCTL_API")
cx := ctlCtx{
t: t,
- cfg: *newConfigNoTLS(),
+ cfg: *e2e.NewConfigNoTLS(),
dialTimeout: 7 * time.Second,
quorum: true,
epc: epc,
@@ -150,32 +145,24 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
kvs = append(kvs, kv{key: fmt.Sprintf("foo%d", i), val: "bar"})
}
for i := range kvs {
- if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil {
- cx.t.Fatalf("#%d: ctlV3Put error (%v)", i, err)
- }
+ require.NoErrorf(cx.t, ctlV3Put(cx, kvs[i].key, kvs[i].val, ""), "#%d: ctlV3Put error", i)
}
- for i := range epc.procs {
- if err := epc.procs[i].Stop(); err != nil {
- t.Fatalf("#%d: error closing etcd process (%v)", i, err)
- }
+ for i := range epc.Procs {
+ require.NoErrorf(t, epc.Procs[i].Stop(), "#%d: error closing etcd process", i)
}
var wg sync.WaitGroup
- wg.Add(len(epc.procs))
- for i := range epc.procs {
+ wg.Add(len(epc.Procs))
+ for i := range epc.Procs {
go func(i int) {
- epc.procs[i].Config().execPath = binDir + "/etcd"
- epc.procs[i].Config().keepDataDir = true
- if err := epc.procs[i].Restart(); err != nil {
- t.Errorf("error restarting etcd process (%v)", err)
- }
+ epc.Procs[i].Config().ExecPath = e2e.BinPath.Etcd
+ epc.Procs[i].Config().KeepDataDir = true
+ assert.NoErrorf(t, epc.Procs[i].Restart(context.TODO()), "error restarting etcd process")
wg.Done()
}(i)
}
wg.Wait()
- if err := ctlV3Get(cx, []string{kvs[0].key}, []kv{kvs[0]}...); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, ctlV3Get(cx, []string{kvs[0].key}, []kv{kvs[0]}...))
}
diff --git a/tests/e2e/etcd_spawn_cov.go b/tests/e2e/etcd_spawn_cov.go
deleted file mode 100644
index 9b24ac9d0c1..00000000000
--- a/tests/e2e/etcd_spawn_cov.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build cov
-// +build cov
-
-package e2e
-
-import (
- "fmt"
- "os"
- "strings"
- "syscall"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/pkg/v3/expect"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.uber.org/zap"
-)
-
-const noOutputLineCount = 2 // cov-enabled binaries emit PASS and coverage count lines
-
-var (
- coverDir = integration.MustAbsPath(os.Getenv("COVERDIR"))
-)
-
-func spawnCmd(args []string) (*expect.ExpectProcess, error) {
- return spawnCmdWithLogger(zap.NewNop(), args)
-}
-
-func spawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, error) {
- cmd := args[0]
- env := make([]string, 0)
- switch {
- case strings.HasSuffix(cmd, "/etcd"):
- cmd = cmd + "_test"
- case strings.HasSuffix(cmd, "/etcdctl"):
- cmd = cmd + "_test"
- case strings.HasSuffix(cmd, "/etcdutl"):
- cmd = cmd + "_test"
- case strings.HasSuffix(cmd, "/etcdctl3"):
- cmd = ctlBinPath + "_test"
- env = append(env, "ETCDCTL_API=3")
- }
-
- wd, err := os.Getwd()
- if err != nil {
- return nil, err
- }
-
- covArgs, err := getCovArgs()
- if err != nil {
- return nil, err
- }
- // when withFlagByEnv() is used in testCtl(), env variables for ctl is set to os.env.
- // they must be included in ctl_cov_env.
- env = append(env, os.Environ()...)
- all_args := append(args[1:], covArgs...)
- lg.Info("spawning process", zap.Strings("args", all_args), zap.String("working-dir", wd))
- ep, err := expect.NewExpectWithEnv(cmd, all_args, env)
- if err != nil {
- return nil, err
- }
- ep.StopSignal = syscall.SIGTERM
- return ep, nil
-}
-
-func getCovArgs() ([]string, error) {
- if !fileutil.Exist(coverDir) {
- return nil, fmt.Errorf("could not find coverage folder: %s", coverDir)
- }
- covArgs := []string{
- fmt.Sprintf("-test.coverprofile=e2e.%v.coverprofile", time.Now().UnixNano()),
- "-test.outputdir=" + coverDir,
- }
- return covArgs, nil
-}
diff --git a/tests/e2e/etcd_spawn_nocov.go b/tests/e2e/etcd_spawn_nocov.go
deleted file mode 100644
index b0e872fb220..00000000000
--- a/tests/e2e/etcd_spawn_nocov.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !cov
-// +build !cov
-
-package e2e
-
-import (
- "os"
- "strings"
-
- "go.etcd.io/etcd/pkg/v3/expect"
- "go.uber.org/zap"
-)
-
-const noOutputLineCount = 0 // regular binaries emit no extra lines
-
-func spawnCmd(args []string) (*expect.ExpectProcess, error) {
- return spawnCmdWithLogger(zap.NewNop(), args)
-}
-
-func spawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, error) {
- wd, err := os.Getwd()
- if err != nil {
- return nil, err
- }
- if strings.HasSuffix(args[0], "/etcdctl3") {
- env := append(os.Environ(), "ETCDCTL_API=3")
- lg.Info("spawning process with ETCDCTL_API=3", zap.Strings("args", args), zap.String("working-dir", wd))
- return expect.NewExpectWithEnv(ctlBinPath, args[1:], env)
- }
- lg.Info("spawning process", zap.Strings("args", args), zap.String("working-dir", wd))
- return expect.NewExpect(args[0], args[1:]...)
-}
diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go
new file mode 100644
index 00000000000..87860367348
--- /dev/null
+++ b/tests/e2e/failover_test.go
@@ -0,0 +1,204 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+ "google.golang.org/grpc"
+ _ "google.golang.org/grpc/health"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+const (
+ // in sync with how kubernetes uses etcd
+ // https://github.com/kubernetes/kubernetes/blob/release-1.28/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go#L59-L71
+ keepaliveTime = 30 * time.Second
+ keepaliveTimeout = 10 * time.Second
+ dialTimeout = 20 * time.Second
+
+ clientRuntime = 10 * time.Second
+ requestTimeout = 100 * time.Millisecond
+)
+
+func TestFailoverOnDefrag(t *testing.T) {
+ tcs := []struct {
+ name string
+ clusterOptions []e2e.EPClusterOption
+ gRPCDialOptions []grpc.DialOption
+
+ // common assertion
+ expectedMinQPS float64
+ // happy case assertion
+ expectedMaxFailureRate float64
+ // negative case assertion
+ expectedMinFailureRate float64
+ }{
+ {
+ name: "defrag failover happy case",
+ clusterOptions: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithExperimentalStopGRPCServiceOnDefrag(true),
+ e2e.WithGoFailEnabled(true),
+ },
+ gRPCDialOptions: []grpc.DialOption{
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy": "round_robin", "healthCheckConfig": {"serviceName": ""}}`),
+ },
+ expectedMinQPS: 20,
+ expectedMaxFailureRate: 0.01,
+ },
+ {
+ name: "defrag blocks one-third of requests with stopGRPCServiceOnDefrag set to false",
+ clusterOptions: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithExperimentalStopGRPCServiceOnDefrag(false),
+ e2e.WithGoFailEnabled(true),
+ },
+ gRPCDialOptions: []grpc.DialOption{
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy": "round_robin", "healthCheckConfig": {"serviceName": ""}}`),
+ },
+ expectedMinQPS: 20,
+ expectedMinFailureRate: 0.25,
+ },
+ {
+ name: "defrag blocks one-third of requests with stopGRPCServiceOnDefrag set to true and client health check disabled",
+ clusterOptions: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithExperimentalStopGRPCServiceOnDefrag(true),
+ e2e.WithGoFailEnabled(true),
+ },
+ expectedMinQPS: 20,
+ expectedMinFailureRate: 0.25,
+ },
+ {
+ name: "defrag failover happy case with feature gate",
+ clusterOptions: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithServerFeatureGate("StopGRPCServiceOnDefrag", true),
+ e2e.WithGoFailEnabled(true),
+ },
+ gRPCDialOptions: []grpc.DialOption{
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy": "round_robin", "healthCheckConfig": {"serviceName": ""}}`),
+ },
+ expectedMinQPS: 20,
+ expectedMaxFailureRate: 0.01,
+ },
+ {
+ name: "defrag blocks one-third of requests with StopGRPCServiceOnDefrag feature gate set to false",
+ clusterOptions: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithServerFeatureGate("StopGRPCServiceOnDefrag", false),
+ e2e.WithGoFailEnabled(true),
+ },
+ gRPCDialOptions: []grpc.DialOption{
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy": "round_robin", "healthCheckConfig": {"serviceName": ""}}`),
+ },
+ expectedMinQPS: 20,
+ expectedMinFailureRate: 0.25,
+ },
+ {
+ name: "defrag blocks one-third of requests with StopGRPCServiceOnDefrag feature gate set to true and client health check disabled",
+ clusterOptions: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithServerFeatureGate("StopGRPCServiceOnDefrag", true),
+ e2e.WithGoFailEnabled(true),
+ },
+ expectedMinQPS: 20,
+ expectedMinFailureRate: 0.25,
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ e2e.BeforeTest(t)
+ clus, cerr := e2e.NewEtcdProcessCluster(context.TODO(), t, tc.clusterOptions...)
+ require.NoError(t, cerr)
+ t.Cleanup(func() { clus.Stop() })
+
+ endpoints := clus.EndpointsGRPC()
+
+ requestVolume, successfulRequestCount := 0, 0
+ start := time.Now()
+ g := new(errgroup.Group)
+ g.Go(func() (lastErr error) {
+ clusterClient, cerr := clientv3.New(clientv3.Config{
+ DialTimeout: dialTimeout,
+ DialKeepAliveTime: keepaliveTime,
+ DialKeepAliveTimeout: keepaliveTimeout,
+ Endpoints: endpoints,
+ DialOptions: tc.gRPCDialOptions,
+ })
+ if cerr != nil {
+ return cerr
+ }
+ defer clusterClient.Close()
+
+ timeout := time.After(clientRuntime)
+ for {
+ select {
+ case <-timeout:
+ return lastErr
+ default:
+ }
+ getContext, cancel := context.WithTimeout(context.Background(), requestTimeout)
+ _, err := clusterClient.Get(getContext, "health")
+ cancel()
+ requestVolume++
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ successfulRequestCount++
+ }
+ })
+ triggerDefrag(t, clus.Procs[0])
+
+ err := g.Wait()
+ if err != nil {
+ t.Logf("etcd client failed to fail over, error (%v)", err)
+ }
+
+ qps := float64(requestVolume) / float64(time.Since(start)) * float64(time.Second)
+ failureRate := 1 - float64(successfulRequestCount)/float64(requestVolume)
+ t.Logf("request failure rate is %.2f%%, qps is %.2f requests/second", failureRate*100, qps)
+
+ require.GreaterOrEqual(t, qps, tc.expectedMinQPS)
+ if tc.expectedMaxFailureRate != 0.0 {
+ require.LessOrEqual(t, failureRate, tc.expectedMaxFailureRate)
+ }
+ if tc.expectedMinFailureRate != 0.0 {
+ require.GreaterOrEqual(t, failureRate, tc.expectedMinFailureRate)
+ }
+ })
+ }
+}
+
+func triggerDefrag(t *testing.T, member e2e.EtcdProcess) {
+ require.NoError(t, member.Failpoints().SetupHTTP(context.Background(), "defragBeforeCopy", `sleep("10s")`))
+ require.NoError(t, member.Etcdctl().Defragment(context.Background(), config.DefragOption{Timeout: time.Minute}))
+}
diff --git a/tests/e2e/gateway_test.go b/tests/e2e/gateway_test.go
index 9f48a522543..60020761088 100644
--- a/tests/e2e/gateway_test.go
+++ b/tests/e2e/gateway_test.go
@@ -15,46 +15,41 @@
package e2e
import (
- "os"
+ "context"
"strings"
"testing"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
-var (
- defaultGatewayEndpoint = "127.0.0.1:23790"
-)
+var defaultGatewayEndpoint = "127.0.0.1:23790"
func TestGateway(t *testing.T) {
- ec, err := newEtcdProcessCluster(t, newConfigNoTLS())
- if err != nil {
- t.Fatal(err)
- }
+ ec, err := e2e.NewEtcdProcessCluster(context.TODO(), t)
+ require.NoError(t, err)
defer ec.Stop()
- eps := strings.Join(ec.EndpointsV3(), ",")
+ eps := strings.Join(ec.EndpointsGRPC(), ",")
p := startGateway(t, eps)
- defer p.Stop()
-
- os.Setenv("ETCDCTL_API", "3")
- defer os.Unsetenv("ETCDCTL_API")
+ defer func() {
+ p.Stop()
+ p.Close()
+ }()
- err = spawnWithExpect([]string{ctlBinPath, "--endpoints=" + defaultGatewayEndpoint, "put", "foo", "bar"}, "OK\r\n")
+ err = e2e.SpawnWithExpect([]string{e2e.BinPath.Etcdctl, "--endpoints=" + defaultGatewayEndpoint, "put", "foo", "bar"}, expect.ExpectedResponse{Value: "OK\r\n"})
if err != nil {
t.Errorf("failed to finish put request through gateway: %v", err)
}
}
func startGateway(t *testing.T, endpoints string) *expect.ExpectProcess {
- p, err := expect.NewExpect(binPath, "gateway", "--endpoints="+endpoints, "start")
- if err != nil {
- t.Fatal(err)
- }
+ p, err := expect.NewExpect(e2e.BinPath.Etcd, "gateway", "--endpoints="+endpoints, "start")
+ require.NoError(t, err)
_, err = p.Expect("ready to proxy client requests")
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
return p
}
diff --git a/tests/e2e/graceful_shutdown_test.go b/tests/e2e/graceful_shutdown_test.go
new file mode 100644
index 00000000000..b612a5c1b6a
--- /dev/null
+++ b/tests/e2e/graceful_shutdown_test.go
@@ -0,0 +1,115 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/interfaces"
+ "go.etcd.io/raft/v3"
+)
+
+func TestGracefulShutdown(t *testing.T) {
+ tcs := []struct {
+ name string
+ clusterSize int
+ }{
+ {
+ name: "clusterSize3",
+ clusterSize: 3,
+ },
+ {
+ name: "clusterSize5",
+ clusterSize: 5,
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ testRunner := e2e.NewE2eRunner()
+ testRunner.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(tc.clusterSize))
+ // clean up orphaned resources like closing member client.
+ defer clus.Close()
+ // shutdown each etcd member process sequentially
+ // and start from old leader, (new leader), (follower)
+ tryShutdownLeader(ctx, t, clus.Members())
+ })
+ }
+}
+
+// tryShutdownLeader tries stop etcd member if it is leader.
+// it also asserts stop leader should not take longer than 1.5 seconds and leaderID has been changed within 500ms.
+func tryShutdownLeader(ctx context.Context, t *testing.T, members []interfaces.Member) {
+ quorum := len(members)/2 + 1
+ for len(members) > quorum {
+ leader, leaderID, term, followers := getLeader(ctx, t, members)
+ stopped := make(chan error, 1)
+ go func() {
+ // each etcd server will wait up to 1 seconds to close all idle connections in peer handler.
+ start := time.Now()
+ leader.Stop()
+ took := time.Since(start)
+ if took > 1500*time.Millisecond {
+ stopped <- fmt.Errorf("leader stop took %v longer than 1.5 seconds", took)
+ return
+ }
+ stopped <- nil
+ }()
+
+ // etcd election timeout could range from 1s to 2s without explicit leadership transfer.
+ // assert leader ID has been changed within 500ms
+ time.Sleep(500 * time.Millisecond)
+ resps, err := followers[0].Client().Status(ctx)
+ require.NoError(t, err)
+ require.NotEqual(t, raft.None, leaderID)
+ require.Equal(t, resps[0].RaftTerm, term+1)
+ require.NotEqualf(t, resps[0].Leader, leaderID, "expect old leaderID %x changed to new leader ID %x", leaderID, resps[0].Leader)
+
+ err = <-stopped
+ require.NoError(t, err)
+
+ members = followers
+ }
+}
+
+func getLeader(ctx context.Context, t *testing.T, members []interfaces.Member) (leader interfaces.Member, leaderID, term uint64, followers []interfaces.Member) {
+ leaderIdx := -1
+ for i, m := range members {
+ mc := m.Client()
+ sresps, err := mc.Status(ctx)
+ require.NoError(t, err)
+ if sresps[0].Leader == sresps[0].Header.MemberId {
+ leaderIdx = i
+ leaderID = sresps[0].Leader
+ term = sresps[0].RaftTerm
+ break
+ }
+ }
+ if leaderIdx == -1 {
+ return nil, 0, 0, members
+ }
+ leader = members[leaderIdx]
+ return leader, leaderID, term, append(members[:leaderIdx], members[leaderIdx+1:]...)
+}
diff --git a/tests/e2e/hashkv_test.go b/tests/e2e/hashkv_test.go
new file mode 100644
index 00000000000..6c81e1cc7dd
--- /dev/null
+++ b/tests/e2e/hashkv_test.go
@@ -0,0 +1,234 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestVerifyHashKVAfterCompact(t *testing.T) {
+ scenarios := []struct {
+ clusterVersion e2e.ClusterVersion
+ keys []string // used for data generators
+ }{
+ {
+ clusterVersion: e2e.CurrentVersion,
+ keys: []string{"key0"},
+ },
+ {
+ clusterVersion: e2e.CurrentVersion,
+ keys: []string{"key0", "key1"},
+ },
+ {
+ clusterVersion: e2e.QuorumLastVersion,
+ keys: []string{"key0"},
+ },
+ {
+ clusterVersion: e2e.QuorumLastVersion,
+ keys: []string{"key0", "key1"},
+ },
+ }
+
+ for _, compactedOnTombstoneRev := range []bool{false, true} {
+ for _, scenario := range scenarios {
+ t.Run(fmt.Sprintf("compactedOnTombstone=%v - %s - Keys=%v", compactedOnTombstoneRev, scenario.clusterVersion, scenario.keys), func(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ if scenario.clusterVersion != e2e.CurrentVersion {
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+ }
+
+ ctx := context.Background()
+
+ cfg := e2e.NewConfigClientTLS()
+ clus, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithConfig(cfg),
+ e2e.WithClusterSize(3),
+ e2e.WithVersion(scenario.clusterVersion))
+ require.NoError(t, err)
+
+ t.Cleanup(func() { clus.Close() })
+
+ tombstoneRevs, lastestRev := populateDataForHashKV(t, clus, cfg.Client, scenario.keys)
+
+ compactedOnRev := tombstoneRevs[0]
+
+ // If compaction revision isn't a tombstone, select a revision in the middle of two tombstones.
+ if !compactedOnTombstoneRev {
+ compactedOnRev = (tombstoneRevs[0] + tombstoneRevs[1]) / 2
+ require.Greater(t, compactedOnRev, tombstoneRevs[0])
+ require.Greater(t, tombstoneRevs[1], compactedOnRev)
+ }
+
+ cli, err := e2e.NewEtcdctl(cfg.Client, clus.EndpointsGRPC())
+ require.NoError(t, err)
+
+ t.Logf("COMPACT on rev=%d", compactedOnRev)
+ _, err = cli.Compact(ctx, compactedOnRev, config.CompactOption{Physical: true})
+ require.NoError(t, err)
+
+ for rev := compactedOnRev; rev <= lastestRev; rev++ {
+ verifyConsistentHashKVAcrossAllMembers(t, cli, rev)
+ }
+ })
+ }
+ }
+}
+
+func TestVerifyHashKVAfterTwoCompactionsOnTombstone_MixVersions(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+
+ ctx := context.Background()
+
+ cfg := e2e.NewConfigClientTLS()
+ clus, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithConfig(cfg),
+ e2e.WithClusterSize(3),
+ e2e.WithVersion(e2e.QuorumLastVersion))
+ require.NoError(t, err)
+ t.Cleanup(func() { clus.Close() })
+
+ tombstoneRevs, lastestRev := populateDataForHashKV(t, clus, cfg.Client, []string{"key0"})
+
+ cli, err := e2e.NewEtcdctl(cfg.Client, clus.EndpointsGRPC())
+ require.NoError(t, err)
+
+ firstCompactOnRev := tombstoneRevs[0]
+ t.Logf("COMPACT rev=%d", firstCompactOnRev)
+ _, err = cli.Compact(ctx, firstCompactOnRev, config.CompactOption{Physical: true})
+ require.NoError(t, err)
+
+ secondCompactOnRev := tombstoneRevs[1]
+ t.Logf("COMPACT rev=%d", secondCompactOnRev)
+ _, err = cli.Compact(ctx, secondCompactOnRev, config.CompactOption{Physical: true})
+ require.NoError(t, err)
+
+ for rev := secondCompactOnRev; rev <= lastestRev; rev++ {
+ verifyConsistentHashKVAcrossAllMembers(t, cli, rev)
+ }
+}
+
+func TestVerifyHashKVAfterCompactionOnLastTombstone_MixVersions(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+
+ for _, keys := range [][]string{
+ {"key0"},
+ {"key0", "key1"},
+ } {
+ t.Run(fmt.Sprintf("#%v", keys), func(t *testing.T) {
+ ctx := context.Background()
+
+ cfg := e2e.NewConfigClientTLS()
+ clus, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithConfig(cfg),
+ e2e.WithClusterSize(3),
+ e2e.WithVersion(e2e.QuorumLastVersion))
+ require.NoError(t, err)
+ t.Cleanup(func() { clus.Close() })
+
+ tombstoneRevs, lastestRev := populateDataForHashKV(t, clus, cfg.Client, keys)
+
+ cli, err := e2e.NewEtcdctl(cfg.Client, clus.EndpointsGRPC())
+ require.NoError(t, err)
+
+ compactOnRev := tombstoneRevs[len(tombstoneRevs)-1]
+ t.Logf("COMPACT rev=%d", compactOnRev)
+ _, err = cli.Compact(ctx, compactOnRev, config.CompactOption{Physical: true})
+ require.NoError(t, err)
+
+ for rev := compactOnRev; rev <= lastestRev; rev++ {
+ verifyConsistentHashKVAcrossAllMembers(t, cli, rev)
+ }
+ })
+ }
+}
+
+// populateDataForHashKV populates some sample data, and return a slice of tombstone
+// revisions and the latest revision
+func populateDataForHashKV(t *testing.T, clus *e2e.EtcdProcessCluster, clientCfg e2e.ClientConfig, keys []string) ([]int64, int64) {
+ c := newClient(t, clus.EndpointsGRPC(), clientCfg)
+ defer c.Close()
+
+ ctx := context.Background()
+ totalOperations := 40
+
+ var (
+ tombStoneRevs []int64
+ latestRev int64
+ )
+
+ deleteStep := 10 // submit a delete operation on every 10 operations
+ for i := 1; i <= totalOperations; i++ {
+ if i%deleteStep == 0 {
+ t.Logf("Deleting key=%s", keys[0]) // Only delete the first key for simplicity
+ resp, derr := c.Delete(ctx, keys[0])
+ require.NoError(t, derr)
+ latestRev = resp.Header.Revision
+ tombStoneRevs = append(tombStoneRevs, resp.Header.Revision)
+ continue
+ }
+
+ value := fmt.Sprintf("%d", i)
+ var ops []clientv3.Op
+ for _, key := range keys {
+ ops = append(ops, clientv3.OpPut(key, value))
+ }
+
+ t.Logf("Writing keys: %v, value: %s", keys, value)
+ resp, terr := c.Txn(ctx).Then(ops...).Commit()
+ require.NoError(t, terr)
+ require.True(t, resp.Succeeded)
+ require.Len(t, resp.Responses, len(ops))
+ latestRev = resp.Header.Revision
+ }
+ return tombStoneRevs, latestRev
+}
+
+func verifyConsistentHashKVAcrossAllMembers(t *testing.T, cli *e2e.EtcdctlV3, hashKVOnRev int64) {
+ ctx := context.Background()
+
+ t.Logf("HashKV on rev=%d", hashKVOnRev)
+ resp, err := cli.HashKV(ctx, hashKVOnRev)
+ require.NoError(t, err)
+
+ require.Greater(t, len(resp), 1)
+ require.NotEqual(t, 0, resp[0].Hash)
+ t.Logf("One Hash value is %d", resp[0].Hash)
+
+ for i := 1; i < len(resp); i++ {
+ require.Equal(t, resp[0].Hash, resp[i].Hash)
+ }
+}
diff --git a/tests/e2e/http_health_check_test.go b/tests/e2e/http_health_check_test.go
new file mode 100644
index 00000000000..86b41bfd654
--- /dev/null
+++ b/tests/e2e/http_health_check_test.go
@@ -0,0 +1,438 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/mvcc/testutil"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+const (
+ healthCheckTimeout = 2 * time.Second
+ putCommandTimeout = 200 * time.Millisecond
+)
+
+type healthCheckConfig struct {
+ url string
+ expectedStatusCode int
+ expectedTimeoutError bool
+ expectedRespSubStrings []string
+}
+
+type injectFailure func(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, duration time.Duration)
+
+func TestHTTPHealthHandler(t *testing.T) {
+ e2e.BeforeTest(t)
+ client := &http.Client{}
+ tcs := []struct {
+ name string
+ injectFailure injectFailure
+ clusterOptions []e2e.EPClusterOption
+ healthChecks []healthCheckConfig
+ }{
+ {
+ name: "no failures", // happy case
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1)},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/health",
+ expectedStatusCode: http.StatusOK,
+ },
+ },
+ },
+ {
+ name: "activated no space alarm",
+ injectFailure: triggerNoSpaceAlarm,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1), e2e.WithQuotaBackendBytes(int64(13 * os.Getpagesize()))},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/health",
+ expectedStatusCode: http.StatusServiceUnavailable,
+ },
+ {
+ url: "/health?exclude=NOSPACE",
+ expectedStatusCode: http.StatusOK,
+ },
+ },
+ },
+ {
+ name: "overloaded server slow apply",
+ injectFailure: triggerSlowApply,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(3), e2e.WithGoFailEnabled(true)},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/health?serializable=true",
+ expectedStatusCode: http.StatusOK,
+ },
+ {
+ url: "/health?serializable=false",
+ expectedTimeoutError: true,
+ },
+ },
+ },
+ {
+ name: "network partitioned",
+ injectFailure: blackhole,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(3), e2e.WithIsPeerTLS(true), e2e.WithPeerProxy(true)},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/health?serializable=true",
+ expectedStatusCode: http.StatusOK,
+ },
+ {
+ url: "/health?serializable=false",
+ expectedTimeoutError: true,
+ expectedStatusCode: http.StatusServiceUnavailable,
+ // old leader may return "etcdserver: leader changed" error with 503 in ReadIndex leaderChangedNotifier
+ },
+ },
+ },
+ {
+ name: "raft loop deadlock",
+ injectFailure: triggerRaftLoopDeadLock,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1), e2e.WithGoFailEnabled(true)},
+ healthChecks: []healthCheckConfig{
+ {
+ // current kubeadm etcd liveness check failed to detect raft loop deadlock in steady state
+ // ref. https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/phases/etcd/local.go#L225-L226
+ // current liveness probe depends on the etcd /health check has a flaw that new /livez check should resolve.
+ url: "/health?serializable=true",
+ expectedStatusCode: http.StatusOK,
+ },
+ {
+ url: "/health?serializable=false",
+ expectedTimeoutError: true,
+ },
+ },
+ },
+ // verify that auth enabled serializable read must go through mvcc
+ {
+ name: "slow buffer write back with auth enabled",
+ injectFailure: triggerSlowBufferWriteBackWithAuth,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1), e2e.WithGoFailEnabled(true)},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/health?serializable=true",
+ expectedTimeoutError: true,
+ },
+ },
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel()
+ clus, err := e2e.NewEtcdProcessCluster(ctx, t, tc.clusterOptions...)
+ require.NoError(t, err)
+ defer clus.Close()
+ testutils.ExecuteUntil(ctx, t, func() {
+ if tc.injectFailure != nil {
+ // guaranteed that failure point is active until all the health checks timeout.
+ duration := time.Duration(len(tc.healthChecks)+1) * healthCheckTimeout
+ tc.injectFailure(ctx, t, clus, duration)
+ }
+
+ for _, hc := range tc.healthChecks {
+ requestURL := clus.Procs[0].EndpointsHTTP()[0] + hc.url
+ t.Logf("health check URL is %s", requestURL)
+ doHealthCheckAndVerify(t, client, requestURL, hc.expectedTimeoutError, hc.expectedStatusCode, hc.expectedRespSubStrings)
+ }
+ })
+ })
+ }
+}
+
+var defaultHealthCheckConfigs = []healthCheckConfig{
+ {
+ url: "/livez",
+ expectedStatusCode: http.StatusOK,
+ expectedRespSubStrings: []string{`ok`},
+ },
+ {
+ url: "/readyz",
+ expectedStatusCode: http.StatusOK,
+ expectedRespSubStrings: []string{`ok`},
+ },
+ {
+ url: "/livez?verbose=true",
+ expectedStatusCode: http.StatusOK,
+ expectedRespSubStrings: []string{`[+]serializable_read ok`},
+ },
+ {
+ url: "/readyz?verbose=true",
+ expectedStatusCode: http.StatusOK,
+ expectedRespSubStrings: []string{
+ `[+]serializable_read ok`,
+ `[+]data_corruption ok`,
+ },
+ },
+}
+
+func TestHTTPLivezReadyzHandler(t *testing.T) {
+ e2e.BeforeTest(t)
+ client := &http.Client{}
+ tcs := []struct {
+ name string
+ injectFailure injectFailure
+ clusterOptions []e2e.EPClusterOption
+ healthChecks []healthCheckConfig
+ }{
+ {
+ name: "no failures", // happy case
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1)},
+ healthChecks: defaultHealthCheckConfigs,
+ },
+ {
+ name: "activated no space alarm",
+ injectFailure: triggerNoSpaceAlarm,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1), e2e.WithQuotaBackendBytes(int64(13 * os.Getpagesize()))},
+ healthChecks: defaultHealthCheckConfigs,
+ },
+ // Readiness is not an indicator of performance. Slow response is not covered by readiness.
+ // refer to https://tinyurl.com/livez-readyz-design-doc or https://github.com/etcd-io/etcd/issues/16007#issuecomment-1726541091 in case tinyurl is down.
+ {
+ name: "overloaded server slow apply",
+ injectFailure: triggerSlowApply,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(3), e2e.WithGoFailEnabled(true)},
+ // TODO expected behavior of readyz check should be 200 after ReadIndex check is implemented to replace linearizable read.
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/livez",
+ expectedStatusCode: http.StatusOK,
+ },
+ {
+ url: "/readyz",
+ expectedTimeoutError: true,
+ expectedStatusCode: http.StatusServiceUnavailable,
+ },
+ },
+ },
+ {
+ name: "network partitioned",
+ injectFailure: blackhole,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(3), e2e.WithIsPeerTLS(true), e2e.WithPeerProxy(true)},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/livez",
+ expectedStatusCode: http.StatusOK,
+ },
+ {
+ url: "/readyz",
+ expectedTimeoutError: true,
+ expectedStatusCode: http.StatusServiceUnavailable,
+ expectedRespSubStrings: []string{
+ `[-]linearizable_read failed: etcdserver: leader changed`,
+ },
+ },
+ },
+ },
+ {
+ name: "raft loop deadlock",
+ injectFailure: triggerRaftLoopDeadLock,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1), e2e.WithGoFailEnabled(true)},
+ // TODO expected behavior of livez check should be 503 or timeout after RaftLoopDeadLock check is implemented.
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/livez",
+ expectedStatusCode: http.StatusOK,
+ },
+ {
+ url: "/readyz",
+ expectedTimeoutError: true,
+ expectedStatusCode: http.StatusServiceUnavailable,
+ },
+ },
+ },
+ // verify that auth enabled serializable read must go through mvcc
+ {
+ name: "slow buffer write back with auth enabled",
+ injectFailure: triggerSlowBufferWriteBackWithAuth,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(1), e2e.WithGoFailEnabled(true)},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/livez",
+ expectedTimeoutError: true,
+ },
+ {
+ url: "/readyz",
+ expectedTimeoutError: true,
+ },
+ },
+ },
+ {
+ name: "corrupt",
+ injectFailure: triggerCorrupt,
+ clusterOptions: []e2e.EPClusterOption{e2e.WithClusterSize(3), e2e.WithCorruptCheckTime(time.Second)},
+ healthChecks: []healthCheckConfig{
+ {
+ url: "/livez?verbose=true",
+ expectedStatusCode: http.StatusOK,
+ expectedRespSubStrings: []string{`[+]serializable_read ok`},
+ },
+ {
+ url: "/readyz",
+ expectedStatusCode: http.StatusServiceUnavailable,
+ expectedRespSubStrings: []string{
+ `[+]serializable_read ok`,
+ `[-]data_corruption failed: alarm activated: CORRUPT`,
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel()
+ clus, err := e2e.NewEtcdProcessCluster(ctx, t, tc.clusterOptions...)
+ require.NoError(t, err)
+ defer clus.Close()
+ testutils.ExecuteUntil(ctx, t, func() {
+ if tc.injectFailure != nil {
+ // guaranteed that failure point is active until all the health checks timeout.
+ duration := time.Duration(len(tc.healthChecks)+1) * healthCheckTimeout
+ tc.injectFailure(ctx, t, clus, duration)
+ }
+
+ for _, hc := range tc.healthChecks {
+ requestURL := clus.Procs[0].EndpointsHTTP()[0] + hc.url
+ t.Logf("health check URL is %s", requestURL)
+ doHealthCheckAndVerify(t, client, requestURL, hc.expectedTimeoutError, hc.expectedStatusCode, hc.expectedRespSubStrings)
+ }
+ })
+ })
+ }
+}
+
+func doHealthCheckAndVerify(t *testing.T, client *http.Client, url string, expectTimeoutError bool, expectStatusCode int, expectRespSubStrings []string) {
+ ctx, cancel := context.WithTimeout(context.Background(), healthCheckTimeout)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+ require.NoErrorf(t, err, "failed to creat request %+v", err)
+ resp, herr := client.Do(req)
+ cancel()
+ if expectTimeoutError {
+ if herr != nil && strings.Contains(herr.Error(), context.DeadlineExceeded.Error()) {
+ return
+ }
+ }
+ require.NoErrorf(t, herr, "failed to get response %+v", err)
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ require.NoErrorf(t, err, "failed to read response %+v", err)
+
+ t.Logf("health check response body is:\n%s", body)
+ require.Equal(t, expectStatusCode, resp.StatusCode)
+ for _, expectRespSubString := range expectRespSubStrings {
+ require.Contains(t, string(body), expectRespSubString)
+ }
+}
+
+func triggerNoSpaceAlarm(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, _ time.Duration) {
+ buf := strings.Repeat("b", os.Getpagesize())
+ etcdctl := clus.Etcdctl()
+ for {
+ if err := etcdctl.Put(ctx, "foo", buf, config.PutOptions{}); err != nil {
+ require.ErrorContains(t, err, "etcdserver: mvcc: database space exceeded")
+ break
+ }
+ }
+}
+
+func triggerSlowApply(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, duration time.Duration) {
+ // the following proposal will be blocked at applying stage
+ // because when apply index < committed index, linearizable read would time out.
+ require.NoError(t, clus.Procs[0].Failpoints().SetupHTTP(ctx, "beforeApplyOneEntryNormal", fmt.Sprintf(`sleep("%s")`, duration)))
+ require.NoError(t, clus.Procs[1].Etcdctl().Put(ctx, "foo", "bar", config.PutOptions{}))
+}
+
+func blackhole(_ context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, _ time.Duration) {
+ member := clus.Procs[0]
+ proxy := member.PeerProxy()
+ t.Logf("Blackholing traffic from and to member %q", member.Config().Name)
+ proxy.BlackholeTx()
+ proxy.BlackholeRx()
+}
+
+func triggerRaftLoopDeadLock(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, duration time.Duration) {
+ require.NoError(t, clus.Procs[0].Failpoints().SetupHTTP(ctx, "raftBeforeSave", fmt.Sprintf(`sleep("%s")`, duration)))
+ clus.Procs[0].Etcdctl().Put(context.Background(), "foo", "bar", config.PutOptions{Timeout: putCommandTimeout})
+}
+
+func triggerSlowBufferWriteBackWithAuth(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, duration time.Duration) {
+ etcdctl := clus.Etcdctl()
+ _, err := etcdctl.UserAdd(ctx, "root", "root", config.UserAddOptions{})
+ require.NoError(t, err)
+ _, err = etcdctl.UserGrantRole(ctx, "root", "root")
+ require.NoError(t, err)
+ require.NoError(t, etcdctl.AuthEnable(ctx))
+
+ require.NoError(t, clus.Procs[0].Failpoints().SetupHTTP(ctx, "beforeWritebackBuf", fmt.Sprintf(`sleep("%s")`, duration)))
+ clus.Procs[0].Etcdctl(e2e.WithAuth("root", "root")).Put(context.Background(), "foo", "bar", config.PutOptions{Timeout: putCommandTimeout})
+}
+
+func triggerCorrupt(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, _ time.Duration) {
+ etcdctl := clus.Procs[0].Etcdctl()
+ for i := 0; i < 10; i++ {
+ err := etcdctl.Put(ctx, "foo", "bar", config.PutOptions{})
+ require.NoError(t, err)
+ }
+ err := clus.Procs[0].Kill()
+ require.NoError(t, err)
+ err = clus.Procs[0].Wait(ctx)
+ require.NoError(t, err)
+ err = testutil.CorruptBBolt(path.Join(clus.Procs[0].Config().DataDirPath, "member", "snap", "db"))
+ require.NoError(t, err)
+ err = clus.Procs[0].Start(ctx)
+ for {
+ time.Sleep(time.Second)
+ select {
+ case <-ctx.Done():
+ require.NoError(t, err)
+ default:
+ }
+ response, err := etcdctl.AlarmList(ctx)
+ if err != nil {
+ continue
+ }
+ if len(response.Alarms) == 0 {
+ continue
+ }
+ require.Len(t, response.Alarms, 1)
+ if response.Alarms[0].Alarm == etcdserverpb.AlarmType_CORRUPT {
+ break
+ }
+ }
+}
diff --git a/tests/e2e/leader_snapshot_no_proxy_test.go b/tests/e2e/leader_snapshot_no_proxy_test.go
new file mode 100644
index 00000000000..7b3c39270f3
--- /dev/null
+++ b/tests/e2e/leader_snapshot_no_proxy_test.go
@@ -0,0 +1,98 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/failpoint"
+)
+
+func TestRecoverSnapshotBackend(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(3),
+ e2e.WithKeepDataDir(true),
+ e2e.WithPeerProxy(true),
+ e2e.WithSnapshotCatchUpEntries(50),
+ e2e.WithSnapshotCount(50),
+ e2e.WithGoFailEnabled(true),
+ e2e.WithIsPeerTLS(true),
+ )
+ require.NoError(t, err)
+
+ defer epc.Close()
+
+ blackholedMember := epc.Procs[0]
+ otherMember := epc.Procs[1]
+
+ wg := sync.WaitGroup{}
+
+ trafficCtx, trafficCancel := context.WithCancel(ctx)
+ c, err := clientv3.New(clientv3.Config{
+ Endpoints: otherMember.EndpointsGRPC(),
+ Logger: zap.NewNop(),
+ DialKeepAliveTime: 10 * time.Second,
+ DialKeepAliveTimeout: 100 * time.Millisecond,
+ })
+ require.NoError(t, err)
+ defer c.Close()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case <-trafficCtx.Done():
+ return
+ default:
+ }
+ putCtx, putCancel := context.WithTimeout(trafficCtx, 50*time.Millisecond)
+ c.Put(putCtx, "a", "b")
+ putCancel()
+ time.Sleep(10 * time.Millisecond)
+ }
+ }()
+
+ err = blackholedMember.Failpoints().SetupHTTP(ctx, "applyBeforeOpenSnapshot", "panic")
+ require.NoError(t, err)
+ err = failpoint.Blackhole(ctx, t, blackholedMember, epc, true)
+ require.NoError(t, err)
+ err = blackholedMember.Wait(ctx)
+ require.NoError(t, err)
+ trafficCancel()
+ wg.Wait()
+ err = blackholedMember.Start(ctx)
+ require.NoError(t, err)
+ _, err = blackholedMember.Logs().ExpectWithContext(ctx, expect.ExpectedResponse{Value: "Recovering from snapshot backend"})
+ require.NoError(t, err)
+ err = blackholedMember.Etcdctl().Put(ctx, "a", "1", config.PutOptions{})
+ assert.NoError(t, err)
+}
diff --git a/tests/e2e/logging_test.go b/tests/e2e/logging_test.go
new file mode 100644
index 00000000000..3c9b13b9250
--- /dev/null
+++ b/tests/e2e/logging_test.go
@@ -0,0 +1,135 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestNoErrorLogsDuringNormalOperations(t *testing.T) {
+ tests := []struct {
+ name string
+ options []e2e.EPClusterOption
+ allowedErrors map[string]bool
+ }{
+ {
+ name: "single node cluster",
+ options: []e2e.EPClusterOption{
+ e2e.WithClusterSize(1),
+ e2e.WithLogLevel("debug"),
+ },
+ allowedErrors: map[string]bool{
+ "setting up serving from embedded etcd failed.": true,
+ },
+ },
+ {
+ name: "three node cluster",
+ options: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithLogLevel("debug"),
+ },
+ allowedErrors: map[string]bool{
+ "setting up serving from embedded etcd failed.": true,
+ },
+ },
+ {
+ name: "three node cluster with auto tls (all)",
+ options: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithLogLevel("debug"),
+ e2e.WithIsPeerTLS(true),
+ e2e.WithIsPeerAutoTLS(true),
+ e2e.WithClientAutoTLS(true),
+ e2e.WithClientConnType(e2e.ClientTLS),
+ },
+ allowedErrors: map[string]bool{
+ "setting up serving from embedded etcd failed.": true,
+ },
+ },
+ {
+ name: "three node cluster with auto tls (peers)",
+ options: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithLogLevel("debug"),
+ e2e.WithIsPeerTLS(true),
+ e2e.WithIsPeerAutoTLS(true),
+ },
+ allowedErrors: map[string]bool{
+ "setting up serving from embedded etcd failed.": true,
+ },
+ },
+ {
+ name: "three node cluster with auto tls (client)",
+ options: []e2e.EPClusterOption{
+ e2e.WithClusterSize(3),
+ e2e.WithLogLevel("debug"),
+ e2e.WithClientAutoTLS(true),
+ e2e.WithClientConnType(e2e.ClientTLS),
+ },
+ allowedErrors: map[string]bool{
+ "setting up serving from embedded etcd failed.": true,
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ e2e.BeforeTest(t)
+ ctx := context.TODO()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, tc.options...)
+ require.NoError(t, err)
+ defer epc.Close()
+
+ require.Lenf(t, epc.Procs, epc.Cfg.ClusterSize, "embedded etcd cluster process count is not as expected")
+
+ // Collect the handle of logs before closing the processes.
+ var logHandles []e2e.LogsExpect
+ for i := range epc.Cfg.ClusterSize {
+ logHandles = append(logHandles, epc.Procs[i].Logs())
+ }
+
+ time.Sleep(time.Second)
+ require.NoErrorf(t, epc.Close(), "closing etcd processes")
+
+ // Now that the processes are closed we can collect all log lines. This must happen after closing, else we
+ // might not get all log lines.
+ var lines []string
+ for _, h := range logHandles {
+ lines = append(lines, h.Lines()...)
+ }
+ require.NotEmptyf(t, lines, "expected at least one log line")
+
+ var entry logEntry
+ for _, line := range lines {
+ err := json.Unmarshal([]byte(line), &entry)
+ require.NoErrorf(t, err, "parse log line as json, line: %s", line)
+
+ if tc.allowedErrors[entry.Message] || tc.allowedErrors[entry.Error] {
+ continue
+ }
+
+ require.NotEqualf(t, "error", entry.Level, "error level log message found: %s", line)
+ }
+ })
+ }
+}
diff --git a/tests/e2e/main_test.go b/tests/e2e/main_test.go
index 41561b5501e..58d7efb95da 100644
--- a/tests/e2e/main_test.go
+++ b/tests/e2e/main_test.go
@@ -5,61 +5,15 @@
package e2e
import (
- "flag"
"os"
- "runtime"
"testing"
"go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/tests/v3/integration"
-)
-
-var (
- binDir string
- certDir string
-
- certPath string
- privateKeyPath string
- caPath string
-
- certPath2 string
- privateKeyPath2 string
-
- certPath3 string
- privateKeyPath3 string
-
- crlPath string
- revokedCertPath string
- revokedPrivateKeyPath string
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestMain(m *testing.M) {
- os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH)
- os.Unsetenv("ETCDCTL_API")
-
- binDirDef := integration.MustAbsPath("../../bin")
- certDirDef := fixturesDir
-
- flag.StringVar(&binDir, "bin-dir", binDirDef, "The directory for store etcd and etcdctl binaries.")
- flag.StringVar(&certDir, "cert-dir", certDirDef, "The directory for store certificate files.")
- flag.Parse()
-
- binPath = binDir + "/etcd"
- ctlBinPath = binDir + "/etcdctl"
- utlBinPath = binDir + "/etcdutl"
- certPath = certDir + "/server.crt"
- privateKeyPath = certDir + "/server.key.insecure"
- caPath = certDir + "/ca.crt"
- revokedCertPath = certDir + "/server-revoked.crt"
- revokedPrivateKeyPath = certDir + "/server-revoked.key.insecure"
- crlPath = certDir + "/revoke.crl"
-
- certPath2 = certDir + "/server2.crt"
- privateKeyPath2 = certDir + "/server2.key.insecure"
-
- certPath3 = certDir + "/server3.crt"
- privateKeyPath3 = certDir + "/server3.key.insecure"
-
+ e2e.InitFlags()
v := m.Run()
if v == 0 && testutil.CheckLeakedGoroutine() {
os.Exit(1)
diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go
index ce26f350182..ee3009632d0 100644
--- a/tests/e2e/metrics_test.go
+++ b/tests/e2e/metrics_test.go
@@ -15,55 +15,114 @@
package e2e
import (
+ "context"
"fmt"
"testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
"go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestV3MetricsSecure(t *testing.T) {
- cfg := newConfigTLS()
- cfg.clusterSize = 1
- cfg.metricsURLScheme = "https"
+ cfg := e2e.NewConfigTLS()
+ cfg.ClusterSize = 1
+ cfg.MetricsURLScheme = "https"
testCtl(t, metricsTest)
}
func TestV3MetricsInsecure(t *testing.T) {
- cfg := newConfigTLS()
- cfg.clusterSize = 1
- cfg.metricsURLScheme = "http"
+ cfg := e2e.NewConfigTLS()
+ cfg.ClusterSize = 1
+ cfg.MetricsURLScheme = "http"
testCtl(t, metricsTest)
}
+func TestV3LearnerMetricRecover(t *testing.T) {
+ cfg := e2e.NewConfigTLS()
+ cfg.ServerConfig.SnapshotCount = 10
+ testCtl(t, learnerMetricRecoverTest, withCfg(*cfg))
+}
+
+func TestV3LearnerMetricApplyFromSnapshotTest(t *testing.T) {
+ cfg := e2e.NewConfigTLS()
+ cfg.ServerConfig.SnapshotCount = 10
+ testCtl(t, learnerMetricApplyFromSnapshotTest, withCfg(*cfg))
+}
+
func metricsTest(cx ctlCtx) {
- if err := ctlV3Put(cx, "k", "v", ""); err != nil {
- cx.t.Fatal(err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, "k", "v", ""))
i := 0
for _, test := range []struct {
endpoint, expected string
}{
- {"/metrics", fmt.Sprintf("etcd_mvcc_put_total 2")},
- {"/metrics", fmt.Sprintf("etcd_debugging_mvcc_keys_total 1")},
- {"/metrics", fmt.Sprintf("etcd_mvcc_delete_total 3")},
+ {"/metrics", "etcd_mvcc_put_total 2"},
+ {"/metrics", "etcd_debugging_mvcc_keys_total 1"},
+ {"/metrics", "etcd_mvcc_delete_total 3"},
{"/metrics", fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version)},
{"/metrics", fmt.Sprintf(`etcd_cluster_version{cluster_version="%s"} 1`, version.Cluster(version.Version))},
- {"/metrics", fmt.Sprintf(`grpc_server_handled_total{grpc_code="Canceled",grpc_method="Watch",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"} 6`)},
+ {"/metrics", `grpc_server_handled_total{grpc_code="Canceled",grpc_method="Watch",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"} 6`},
{"/health", `{"health":"true","reason":""}`},
} {
i++
- if err := ctlV3Put(cx, fmt.Sprintf("%d", i), "v", ""); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Del(cx, []string{fmt.Sprintf("%d", i)}, 1); err != nil {
- cx.t.Fatal(err)
- }
- if err := ctlV3Watch(cx, []string{"k", "--rev", "1"}, []kvExec{{key: "k", val: "v"}}...); err != nil {
- cx.t.Fatal(err)
- }
- if err := cURLGet(cx.epc, cURLReq{endpoint: test.endpoint, expected: test.expected, metricsURLScheme: cx.cfg.metricsURLScheme}); err != nil {
- cx.t.Fatalf("failed get with curl (%v)", err)
- }
+ require.NoError(cx.t, ctlV3Put(cx, fmt.Sprintf("%d", i), "v", ""))
+ require.NoError(cx.t, ctlV3Del(cx, []string{fmt.Sprintf("%d", i)}, 1))
+ require.NoError(cx.t, ctlV3Watch(cx, []string{"k", "--rev", "1"}, []kvExec{{key: "k", val: "v"}}...))
+ require.NoErrorf(cx.t, e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: test.endpoint, Expected: expect.ExpectedResponse{Value: test.expected}}), "failed get with curl")
+ }
+}
+
+func learnerMetricRecoverTest(cx ctlCtx) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ _, err := cx.epc.StartNewProc(ctx, nil, cx.t, true /* addAsLearner */)
+ require.NoError(cx.t, err)
+ expectLearnerMetrics(cx)
+
+ triggerSnapshot(ctx, cx)
+
+ // Restart cluster
+ require.NoError(cx.t, cx.epc.Restart(ctx))
+ expectLearnerMetrics(cx)
+}
+
+func learnerMetricApplyFromSnapshotTest(cx ctlCtx) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Add learner but do not start it
+ _, learnerCfg, err := cx.epc.AddMember(ctx, nil, cx.t, true /* addAsLearner */)
+ require.NoError(cx.t, err)
+
+ triggerSnapshot(ctx, cx)
+
+ // Start the learner
+ require.NoError(cx.t, cx.epc.StartNewProcFromConfig(ctx, cx.t, learnerCfg))
+ expectLearnerMetrics(cx)
+}
+
+func triggerSnapshot(ctx context.Context, cx ctlCtx) {
+ etcdctl := cx.epc.Procs[0].Etcdctl()
+ for i := 0; i < int(cx.epc.Cfg.ServerConfig.SnapshotCount); i++ {
+ require.NoError(cx.t, etcdctl.Put(ctx, "k", "v", config.PutOptions{}))
}
}
+
+func expectLearnerMetrics(cx ctlCtx) {
+ expectLearnerMetric(cx, 0, "etcd_server_is_learner 0")
+ expectLearnerMetric(cx, 1, "etcd_server_is_learner 1")
+}
+
+func expectLearnerMetric(cx ctlCtx, procIdx int, expectMetric string) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ args := e2e.CURLPrefixArgsCluster(cx.epc.Cfg, cx.epc.Procs[procIdx], "GET", e2e.CURLReq{Endpoint: "/metrics"})
+ require.NoError(cx.t, e2e.SpawnWithExpectsContext(ctx, args, nil, expect.ExpectedResponse{Value: expectMetric}))
+}
diff --git a/tests/e2e/promote_experimental_flag_test.go b/tests/e2e/promote_experimental_flag_test.go
new file mode 100644
index 00000000000..13a8fcba4fb
--- /dev/null
+++ b/tests/e2e/promote_experimental_flag_test.go
@@ -0,0 +1,90 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestWarningApplyDuration(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(1),
+ e2e.WithWarningUnaryRequestDuration(time.Microsecond),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ t.Cleanup(func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+
+ cc := epc.Etcdctl()
+ err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{})
+ require.NoErrorf(t, err, "error on put")
+
+ // verify warning
+ e2e.AssertProcessLogs(t, epc.Procs[0], "request stats")
+}
+
+// TestExperimentalWarningApplyDuration tests the experimental warning apply duration
+// TODO: this test is a duplicate of TestWarningApplyDuration except it uses --experimental-warning-unary-request-duration
+// Remove this test after --experimental-warning-unary-request-duration flag is removed.
+func TestExperimentalWarningApplyDuration(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(1),
+ e2e.WithExperimentalWarningUnaryRequestDuration(time.Microsecond),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ t.Cleanup(func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ })
+
+ cc := epc.Etcdctl()
+ err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{})
+ require.NoErrorf(t, err, "error on put")
+
+ // verify warning
+ e2e.AssertProcessLogs(t, epc.Procs[0], "request stats")
+}
+
+func TestBothWarningApplyDurationFlagsFail(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ _, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(1),
+ e2e.WithWarningUnaryRequestDuration(time.Second),
+ e2e.WithExperimentalWarningUnaryRequestDuration(time.Second),
+ )
+ if err == nil {
+ t.Fatal("Expected process to fail")
+ }
+}
diff --git a/tests/e2e/reproduce_17780_test.go b/tests/e2e/reproduce_17780_test.go
new file mode 100644
index 00000000000..f5ef97b91a4
--- /dev/null
+++ b/tests/e2e/reproduce_17780_test.go
@@ -0,0 +1,108 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/stringutil"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+// TestReproduce17780 reproduces the issue: https://github.com/etcd-io/etcd/issues/17780.
+func TestReproduce17780(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ compactionBatchLimit := 10
+
+ ctx := context.TODO()
+ clus, cerr := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(3),
+ e2e.WithGoFailEnabled(true),
+ e2e.WithSnapshotCount(1000),
+ e2e.WithCompactionBatchLimit(compactionBatchLimit),
+ e2e.WithWatchProcessNotifyInterval(100*time.Millisecond),
+ )
+ require.NoError(t, cerr)
+
+ t.Cleanup(func() { clus.Stop() })
+
+ leaderIdx := clus.WaitLeader(t)
+ targetIdx := (leaderIdx + 1) % clus.Cfg.ClusterSize
+
+ cli := newClient(t, clus.Procs[targetIdx].EndpointsGRPC(), e2e.ClientConfig{})
+
+ // Revision: 2 -> 8 for new keys
+ n := compactionBatchLimit - 2
+ valueSize := 16
+ for i := 2; i <= n; i++ {
+ _, err := cli.Put(ctx, fmt.Sprintf("%d", i), stringutil.RandString(uint(valueSize)))
+ require.NoError(t, err)
+ }
+
+ // Revision: 9 -> 11 for delete keys with compared revision
+ //
+ // We need last compaction batch is no-op and all the tombstones should
+ // be deleted in previous compaction batch. So that we just lost the
+ // finishedCompactRev after panic.
+ for i := 9; i <= compactionBatchLimit+1; i++ {
+ rev := i - 5
+ key := fmt.Sprintf("%d", rev)
+
+ _, err := cli.Delete(ctx, key)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, clus.Procs[targetIdx].Failpoints().SetupHTTP(ctx, "compactBeforeSetFinishedCompact", `panic`))
+
+ _, err := cli.Compact(ctx, 11, clientv3.WithCompactPhysical())
+ require.Error(t, err)
+
+ require.NoError(t, clus.Procs[targetIdx].Restart(ctx))
+
+ // NOTE: We should not decrease the revision if there is no record
+ // about finished compact operation.
+ resp, err := cli.Get(ctx, fmt.Sprintf("%d", n))
+ require.NoError(t, err)
+ assert.GreaterOrEqual(t, resp.Header.Revision, int64(11))
+
+ // Revision 4 should be deleted by compaction.
+ resp, err = cli.Get(ctx, fmt.Sprintf("%d", 4))
+ require.NoError(t, err)
+ require.Equal(t, int64(0), resp.Count)
+
+ next := 20
+ for i := 12; i <= next; i++ {
+ _, err := cli.Put(ctx, fmt.Sprintf("%d", i), stringutil.RandString(uint(valueSize)))
+ require.NoError(t, err)
+ }
+
+ expectedRevision := next
+ for procIdx, proc := range clus.Procs {
+ cli = newClient(t, proc.EndpointsGRPC(), e2e.ClientConfig{})
+ resp, err := cli.Get(ctx, fmt.Sprintf("%d", next))
+ require.NoError(t, err)
+
+ assert.GreaterOrEqualf(t, resp.Header.Revision, int64(expectedRevision),
+ "LeaderIdx: %d, Current: %d", leaderIdx, procIdx)
+ }
+}
diff --git a/tests/e2e/runtime_reconfiguration_test.go b/tests/e2e/runtime_reconfiguration_test.go
new file mode 100644
index 00000000000..308808a5377
--- /dev/null
+++ b/tests/e2e/runtime_reconfiguration_test.go
@@ -0,0 +1,203 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+// TestRuntimeReconfigGrowClusterSize ensures growing cluster size with two phases
+// Phase 1 - Inform cluster of new configuration
+// Phase 2 - Start new member
+func TestRuntimeReconfigGrowClusterSize(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ tcs := []struct {
+ name string
+ clusterSize int
+ asLearner bool
+ }{
+ {
+ name: "grow cluster size from 1 to 3",
+ clusterSize: 1,
+ },
+ {
+ name: "grow cluster size from 3 to 5",
+ clusterSize: 3,
+ },
+ {
+ name: "grow cluster size from 1 to 3 with learner",
+ clusterSize: 1,
+ asLearner: true,
+ },
+ {
+ name: "grow cluster size from 3 to 5 with learner",
+ clusterSize: 3,
+ asLearner: true,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(tc.clusterSize))
+ require.NoError(t, err)
+ require.NoError(t, epc.Procs[0].Etcdctl().Health(ctx))
+ defer func() {
+ err := epc.Close()
+ require.NoErrorf(t, err, "failed to close etcd cluster")
+ }()
+
+ for i := 0; i < 2; i++ {
+ time.Sleep(etcdserver.HealthInterval)
+ if !tc.asLearner {
+ addMember(ctx, t, epc)
+ } else {
+ addMemberAsLearnerAndPromote(ctx, t, epc)
+ }
+ }
+ })
+ }
+}
+
+func TestRuntimeReconfigDecreaseClusterSize(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ tcs := []struct {
+ name string
+ clusterSize int
+ asLearner bool
+ }{
+ {
+ name: "decrease cluster size from 3 to 1",
+ clusterSize: 3,
+ },
+ {
+ name: "decrease cluster size from 5 to 3",
+ clusterSize: 5,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(tc.clusterSize))
+ require.NoError(t, err)
+ require.NoError(t, epc.Procs[0].Etcdctl().Health(ctx))
+ defer func() {
+ err := epc.Close()
+ require.NoErrorf(t, err, "failed to close etcd cluster")
+ }()
+
+ for i := 0; i < 2; i++ {
+ time.Sleep(etcdserver.HealthInterval)
+ removeFirstMember(ctx, t, epc)
+ }
+ })
+ }
+}
+
+func TestRuntimeReconfigRollingUpgrade(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ tcs := []struct {
+ name string
+ withLearner bool
+ }{
+ {
+ name: "with learner",
+ withLearner: true,
+ },
+ {
+ name: "without learner",
+ withLearner: false,
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(3))
+ require.NoError(t, err)
+ require.NoError(t, epc.Procs[0].Etcdctl().Health(ctx))
+ defer func() {
+ err := epc.Close()
+ require.NoErrorf(t, err, "failed to close etcd cluster")
+ }()
+
+ for i := 0; i < 2; i++ {
+ time.Sleep(etcdserver.HealthInterval)
+ removeFirstMember(ctx, t, epc)
+ epc.WaitLeader(t)
+ // if we do not wait for leader, without the fix of notify raft Advance,
+ // have to wait 1 sec to pass the test stably.
+ if tc.withLearner {
+ addMemberAsLearnerAndPromote(ctx, t, epc)
+ } else {
+ addMember(ctx, t, epc)
+ }
+ }
+ })
+ }
+}
+
+func addMember(ctx context.Context, t *testing.T, epc *e2e.EtcdProcessCluster) {
+ _, err := epc.StartNewProc(ctx, nil, t, false /* addAsLearner */)
+ require.NoError(t, err)
+ require.NoError(t, epc.Procs[len(epc.Procs)-1].Etcdctl().Health(ctx))
+}
+
+func addMemberAsLearnerAndPromote(ctx context.Context, t *testing.T, epc *e2e.EtcdProcessCluster) {
+ endpoints := epc.EndpointsGRPC()
+
+ id, err := epc.StartNewProc(ctx, nil, t, true /* addAsLearner */)
+ require.NoError(t, err)
+ _, err = epc.Etcdctl(e2e.WithEndpoints(endpoints)).MemberPromote(ctx, id)
+ require.NoError(t, err)
+
+ newLearnerMemberProc := epc.Procs[len(epc.Procs)-1]
+ require.NoError(t, newLearnerMemberProc.Etcdctl().Health(ctx))
+}
+
+func removeFirstMember(ctx context.Context, t *testing.T, epc *e2e.EtcdProcessCluster) {
+ // avoid tearing down the last etcd process
+ if len(epc.Procs) == 1 {
+ return
+ }
+
+ firstProc := epc.Procs[0]
+ sts, err := firstProc.Etcdctl().Status(ctx)
+ require.NoError(t, err)
+ memberIDToRemove := sts[0].Header.MemberId
+
+ epc.Procs = epc.Procs[1:]
+ _, err = epc.Etcdctl().MemberRemove(ctx, memberIDToRemove)
+ require.NoError(t, err)
+ require.NoError(t, firstProc.Stop())
+ require.NoError(t, firstProc.Close())
+}
diff --git a/tests/e2e/testing.go b/tests/e2e/testing.go
deleted file mode 100644
index a36c075dbd7..00000000000
--- a/tests/e2e/testing.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "os"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/server/v3/verify"
-)
-
-func BeforeTest(t testing.TB) {
- skipInShortMode(t)
- testutil.BeforeTest(t)
- os.Setenv(verify.ENV_VERIFY, verify.ENV_VERIFY_ALL_VALUE)
-
- path, err := os.Getwd()
- assert.NoError(t, err)
- tempDir := t.TempDir()
- assert.NoError(t, os.Chdir(tempDir))
- t.Logf("Changing working directory to: %s", tempDir)
-
- t.Cleanup(func() { assert.NoError(t, os.Chdir(path)) })
-}
diff --git a/tests/e2e/util.go b/tests/e2e/util.go
deleted file mode 100644
index 2841d94fb05..00000000000
--- a/tests/e2e/util.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "encoding/json"
- "fmt"
- "math/rand"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/pkg/v3/expect"
-)
-
-func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error {
- matchSet := func(l string) bool {
- for _, s := range readyStrs {
- if strings.Contains(l, s) {
- return true
- }
- }
- return false
- }
- _, err := exproc.ExpectFunc(matchSet)
- return err
-}
-
-func spawnWithExpect(args []string, expected string) error {
- return spawnWithExpects(args, []string{expected}...)
-}
-
-func spawnWithExpects(args []string, xs ...string) error {
- _, err := spawnWithExpectLines(args, xs...)
- return err
-}
-
-func spawnWithExpectLines(args []string, xs ...string) ([]string, error) {
- proc, err := spawnCmd(args)
- if err != nil {
- return nil, err
- }
- // process until either stdout or stderr contains
- // the expected string
- var (
- lines []string
- lineFunc = func(txt string) bool { return true }
- )
- for _, txt := range xs {
- for {
- l, lerr := proc.ExpectFunc(lineFunc)
- if lerr != nil {
- proc.Close()
- return nil, fmt.Errorf("%v %v (expected %q, got %q). Try EXPECT_DEBUG=TRUE", args, lerr, txt, lines)
- }
- lines = append(lines, l)
- if strings.Contains(l, txt) {
- break
- }
- }
- }
- perr := proc.Close()
- l := proc.LineCount()
- if len(xs) == 0 && l != noOutputLineCount { // expect no output
- return nil, fmt.Errorf("unexpected output from %v (got lines %q, line count %d) %v. Try EXPECT_DEBUG=TRUE", args, lines, l, l != noOutputLineCount)
- }
- return lines, perr
-}
-
-func randomLeaseID() int64 {
- return rand.New(rand.NewSource(time.Now().UnixNano())).Int63()
-}
-
-func dataMarshal(data interface{}) (d string, e error) {
- m, err := json.Marshal(data)
- if err != nil {
- return "", err
- }
- return string(m), nil
-}
-
-func closeWithTimeout(p *expect.ExpectProcess, d time.Duration) error {
- errc := make(chan error, 1)
- go func() { errc <- p.Close() }()
- select {
- case err := <-errc:
- return err
- case <-time.After(d):
- p.Stop()
- // retry close after stopping to collect SIGQUIT data, if any
- closeWithTimeout(p, time.Second)
- }
- return fmt.Errorf("took longer than %v to Close process %+v", d, p)
-}
-
-func toTLS(s string) string {
- return strings.Replace(s, "http://", "https://", 1)
-}
-
-func skipInShortMode(t testing.TB) {
- testutil.SkipTestIfShortMode(t, "e2e tests are not running in --short mode")
-}
diff --git a/tests/e2e/utils.go b/tests/e2e/utils.go
new file mode 100644
index 00000000000..8917bd8072a
--- /dev/null
+++ b/tests/e2e/utils.go
@@ -0,0 +1,273 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "math/big"
+ "net"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/stringutil"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func newClient(t *testing.T, entpoints []string, cfg e2e.ClientConfig) *clientv3.Client {
+ tlscfg, err := tlsInfo(t, cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ccfg := clientv3.Config{
+ Endpoints: entpoints,
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ }
+ if tlscfg != nil {
+ ccfg.TLS, err = tlscfg.ClientConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ c, err := clientv3.New(ccfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(func() {
+ c.Close()
+ })
+ return c
+}
+
+// tlsInfo follows the Client-to-server communication in https://etcd.io/docs/v3.6/op-guide/security/#basic-setup
+func tlsInfo(t testing.TB, cfg e2e.ClientConfig) (*transport.TLSInfo, error) {
+ switch cfg.ConnectionType {
+ case e2e.ClientNonTLS, e2e.ClientTLSAndNonTLS:
+ return nil, nil
+ case e2e.ClientTLS:
+ if cfg.AutoTLS {
+ tls, err := transport.SelfCert(zap.NewNop(), t.TempDir(), []string{"localhost"}, 1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate cert: %w", err)
+ }
+ return &tls, nil
+ }
+ return &integration.TestTLSInfo, nil
+ default:
+ return nil, fmt.Errorf("config %v not supported", cfg)
+ }
+}
+
+func fillEtcdWithData(ctx context.Context, c *clientv3.Client, dbSize int) error {
+ g := errgroup.Group{}
+ concurrency := 10
+ keyCount := 100
+ keysPerRoutine := keyCount / concurrency
+ valueSize := dbSize / keyCount
+ for i := 0; i < concurrency; i++ {
+ i := i
+ g.Go(func() error {
+ for j := 0; j < keysPerRoutine; j++ {
+ _, err := c.Put(ctx, fmt.Sprintf("%d", i*keysPerRoutine+j), stringutil.RandString(uint(valueSize)))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ return g.Wait()
+}
+
+func curl(endpoint string, method string, curlReq e2e.CURLReq, connType e2e.ClientConnType) (string, error) {
+ args := e2e.CURLPrefixArgs(endpoint, e2e.ClientConfig{ConnectionType: connType}, false, method, curlReq)
+ lines, err := e2e.RunUtilCompletion(args, nil)
+ if err != nil {
+ return "", err
+ }
+ return strings.Join(lines, "\n"), nil
+}
+
+func runCommandAndReadJSONOutput(args []string) (map[string]any, error) {
+ lines, err := e2e.RunUtilCompletion(args, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var resp map[string]any
+ err = json.Unmarshal([]byte(strings.Join(lines, "\n")), &resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func getMemberIDByName(ctx context.Context, c *e2e.EtcdctlV3, name string) (id uint64, found bool, err error) {
+ resp, err := c.MemberList(ctx, false)
+ if err != nil {
+ return 0, false, err
+ }
+ for _, member := range resp.Members {
+ if name == member.Name {
+ return member.ID, true, nil
+ }
+ }
+ return 0, false, nil
+}
+
+func patchArgs(args []string, flag, newValue string) error {
+ for i, arg := range args {
+ if strings.Contains(arg, flag) {
+ args[i] = fmt.Sprintf("--%s=%s", flag, newValue)
+ return nil
+ }
+ }
+ return fmt.Errorf("--%s flag not found", flag)
+}
+
+func generateCertsForIPs(tempDir string, ips []net.IP) (caFile string, certFiles []string, keyFiles []string, err error) {
+ ca := &x509.Certificate{
+ SerialNumber: big.NewInt(1001),
+ Subject: pkix.Name{
+ Organization: []string{"etcd"},
+ OrganizationalUnit: []string{"etcd Security"},
+ Locality: []string{"San Francisco"},
+ Province: []string{"California"},
+ Country: []string{"USA"},
+ },
+ NotBefore: time.Now(),
+ NotAfter: time.Now().AddDate(0, 0, 1),
+ IsCA: true,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ }
+
+ caKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return "", nil, nil, err
+ }
+ caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caKey.PublicKey, caKey)
+ if err != nil {
+ return "", nil, nil, err
+ }
+
+ caFile, _, err = saveCertToFile(tempDir, caBytes, nil)
+ if err != nil {
+ return "", nil, nil, err
+ }
+
+ for i, ip := range ips {
+ cert := &x509.Certificate{
+ SerialNumber: big.NewInt(1001 + int64(i)),
+ Subject: pkix.Name{
+ Organization: []string{"etcd"},
+ OrganizationalUnit: []string{"etcd Security"},
+ Locality: []string{"San Francisco"},
+ Province: []string{"California"},
+ Country: []string{"USA"},
+ },
+ IPAddresses: []net.IP{ip},
+ NotBefore: time.Now(),
+ NotAfter: time.Now().AddDate(0, 0, 1),
+ SubjectKeyId: []byte{1, 2, 3, 4, 5},
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature,
+ }
+ certKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return "", nil, nil, err
+ }
+ certBytes, err := x509.CreateCertificate(rand.Reader, cert, ca, &certKey.PublicKey, caKey)
+ if err != nil {
+ return "", nil, nil, err
+ }
+ certFile, keyFile, err := saveCertToFile(tempDir, certBytes, certKey)
+ if err != nil {
+ return "", nil, nil, err
+ }
+ certFiles = append(certFiles, certFile)
+ keyFiles = append(keyFiles, keyFile)
+ }
+
+ return caFile, certFiles, keyFiles, nil
+}
+
+func saveCertToFile(tempDir string, certBytes []byte, key *rsa.PrivateKey) (certFile string, keyFile string, err error) {
+ certPEM := new(bytes.Buffer)
+ pem.Encode(certPEM, &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: certBytes,
+ })
+ cf, err := os.CreateTemp(tempDir, "*.crt")
+ if err != nil {
+ return "", "", err
+ }
+ defer cf.Close()
+ if _, err := cf.Write(certPEM.Bytes()); err != nil {
+ return "", "", err
+ }
+
+ if key != nil {
+ certKeyPEM := new(bytes.Buffer)
+ pem.Encode(certKeyPEM, &pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: x509.MarshalPKCS1PrivateKey(key),
+ })
+
+ kf, err := os.CreateTemp(tempDir, "*.key.insecure")
+ if err != nil {
+ return "", "", err
+ }
+ defer kf.Close()
+ if _, err := kf.Write(certKeyPEM.Bytes()); err != nil {
+ return "", "", err
+ }
+
+ return cf.Name(), kf.Name(), nil
+ }
+
+ return cf.Name(), "", nil
+}
+
+func getLocalIP() (string, error) {
+ conn, err := net.Dial("udp", "8.8.8.8:80")
+ if err != nil {
+ return "", err
+ }
+ defer conn.Close()
+
+ localAddress := conn.LocalAddr().(*net.UDPAddr)
+
+ return localAddress.IP.String(), nil
+}
diff --git a/tests/e2e/utl_migrate_test.go b/tests/e2e/utl_migrate_test.go
new file mode 100644
index 00000000000..7cc18e65353
--- /dev/null
+++ b/tests/e2e/utl_migrate_test.go
@@ -0,0 +1,171 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestEtctlutlMigrate(t *testing.T) {
+ lastReleaseBinary := e2e.BinPath.EtcdLastRelease
+
+ tcs := []struct {
+ name string
+ targetVersion string
+ clusterVersion e2e.ClusterVersion
+ force bool
+
+ expectLogsSubString string
+ expectStorageVersion *semver.Version
+ }{
+ {
+ name: "Invalid target version string",
+ targetVersion: "abc",
+ expectLogsSubString: `Error: wrong target version format, expected "X.Y", got "abc"`,
+ expectStorageVersion: &version.V3_6,
+ },
+ {
+ name: "Invalid target version",
+ targetVersion: "3.a",
+ expectLogsSubString: `Error: failed to parse target version: strconv.ParseInt: parsing "a": invalid syntax`,
+ expectStorageVersion: &version.V3_6,
+ },
+ {
+ name: "Target with only major version is invalid",
+ targetVersion: "3",
+ expectLogsSubString: `Error: wrong target version format, expected "X.Y", got "3"`,
+ expectStorageVersion: &version.V3_6,
+ },
+ {
+ name: "Target with patch version is invalid",
+ targetVersion: "3.6.0",
+ expectLogsSubString: `Error: wrong target version format, expected "X.Y", got "3.6.0"`,
+ expectStorageVersion: &version.V3_6,
+ },
+ {
+ name: "Migrate v3.5 to v3.5 is no-op",
+ clusterVersion: e2e.LastVersion,
+ targetVersion: "3.5",
+ expectLogsSubString: "storage version up-to-date\t" + `{"storage-version": "3.5"}`,
+ },
+ {
+ name: "Upgrade v3.5 to v3.6 should work",
+ clusterVersion: e2e.LastVersion,
+ targetVersion: "3.6",
+ expectStorageVersion: &version.V3_6,
+ },
+ {
+ name: "Migrate v3.6 to v3.6 is no-op",
+ targetVersion: "3.6",
+ expectLogsSubString: "storage version up-to-date\t" + `{"storage-version": "3.6"}`,
+ expectStorageVersion: &version.V3_6,
+ },
+ {
+ name: "Downgrade v3.6 to v3.5 should fail until it's implemented",
+ targetVersion: "3.5",
+ expectLogsSubString: "cannot downgrade storage, WAL contains newer entries",
+ expectStorageVersion: &version.V3_6,
+ },
+ {
+ name: "Downgrade v3.6 to v3.5 with force should work",
+ targetVersion: "3.5",
+ force: true,
+ expectLogsSubString: "forcefully cleared storage version",
+ },
+ {
+ name: "Upgrade v3.6 to v3.7 with force should work",
+ targetVersion: "3.7",
+ force: true,
+ expectLogsSubString: "forcefully set storage version\t" + `{"storage-version": "3.7"}`,
+ expectStorageVersion: &semver.Version{Major: 3, Minor: 7},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ e2e.BeforeTest(t)
+ lg := zaptest.NewLogger(t)
+ if tc.clusterVersion != e2e.CurrentVersion && !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", lastReleaseBinary)
+ }
+ dataDirPath := t.TempDir()
+
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithVersion(tc.clusterVersion),
+ e2e.WithDataDirPath(dataDirPath),
+ e2e.WithClusterSize(1),
+ e2e.WithKeepDataDir(true),
+ // Set low SnapshotCount to ensure wal snapshot is done
+ e2e.WithSnapshotCount(1),
+ )
+ if err != nil {
+ t.Fatalf("could not start etcd process cluster (%v)", err)
+ }
+ defer func() {
+ if errC := epc.Close(); errC != nil {
+ t.Fatalf("error closing etcd processes (%v)", errC)
+ }
+ }()
+
+ dialTimeout := 10 * time.Second
+ prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsGRPC(), ","), "--dial-timeout", dialTimeout.String()}
+
+ t.Log("Write keys to ensure wal snapshot is created and all v3.5 fields are set...")
+ for i := 0; i < 10; i++ {
+ require.NoError(t, e2e.SpawnWithExpect(append(prefixArgs, "put", fmt.Sprintf("%d", i), "value"), expect.ExpectedResponse{Value: "OK"}))
+ }
+
+ t.Log("Stopping the server...")
+ err = epc.Procs[0].Stop()
+ require.NoError(t, err)
+
+ t.Log("etcdutl migrate...")
+ memberDataDir := epc.Procs[0].Config().DataDirPath
+ args := []string{e2e.BinPath.Etcdutl, "migrate", "--data-dir", memberDataDir, "--target-version", tc.targetVersion}
+ if tc.force {
+ args = append(args, "--force")
+ }
+ err = e2e.SpawnWithExpect(args, expect.ExpectedResponse{Value: tc.expectLogsSubString})
+ if err != nil && tc.expectLogsSubString != "" {
+ require.ErrorContains(t, err, tc.expectLogsSubString)
+ } else {
+ require.NoError(t, err)
+ }
+
+ t.Log("etcdutl migrate...")
+ be := backend.NewDefaultBackend(lg, filepath.Join(memberDataDir, "member/snap/db"))
+ defer be.Close()
+
+ ver := schema.ReadStorageVersion(be.ReadTx())
+ assert.Equal(t, tc.expectStorageVersion, ver)
+ })
+ }
+}
diff --git a/tests/e2e/v2_curl_test.go b/tests/e2e/v2_curl_test.go
deleted file mode 100644
index 0285a7befde..00000000000
--- a/tests/e2e/v2_curl_test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "fmt"
- "math/rand"
- "strings"
- "testing"
-)
-
-func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, newConfigNoTLS()) }
-func TestV2CurlAutoTLS(t *testing.T) { testCurlPutGet(t, newConfigAutoTLS()) }
-func TestV2CurlAllTLS(t *testing.T) { testCurlPutGet(t, newConfigTLS()) }
-func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, newConfigPeerTLS()) }
-func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, newConfigClientTLS()) }
-func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, newConfigClientBoth()) }
-func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) {
- BeforeTestV2(t)
-
- // test doesn't use quorum gets, so ensure there are no followers to avoid
- // stale reads that will break the test
- cfg = configStandalone(*cfg)
-
- cfg.enableV2 = true
- epc, err := newEtcdProcessCluster(t, cfg)
- if err != nil {
- t.Fatalf("could not start etcd process cluster (%v)", err)
- }
- defer func() {
- if err := epc.Close(); err != nil {
- t.Fatalf("error closing etcd processes (%v)", err)
- }
- }()
-
- var (
- expectPut = `{"action":"set","node":{"key":"/foo","value":"bar","`
- expectGet = `{"action":"get","node":{"key":"/foo","value":"bar","`
- )
- if err := cURLPut(epc, cURLReq{endpoint: "/v2/keys/foo", value: "bar", expected: expectPut}); err != nil {
- t.Fatalf("failed put with curl (%v)", err)
- }
- if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet}); err != nil {
- t.Fatalf("failed get with curl (%v)", err)
- }
- if cfg.clientTLS == clientTLSAndNonTLS {
- if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo", expected: expectGet, isTLS: true}); err != nil {
- t.Fatalf("failed get with curl (%v)", err)
- }
- }
-}
-
-func TestV2CurlIssue5182(t *testing.T) {
- BeforeTestV2(t)
-
- copied := newConfigNoTLS()
- copied.enableV2 = true
- epc := setupEtcdctlTest(t, copied, false)
- defer func() {
- if err := epc.Close(); err != nil {
- t.Fatalf("error closing etcd processes (%v)", err)
- }
- }()
-
- expectPut := `{"action":"set","node":{"key":"/foo","value":"bar","`
- if err := cURLPut(epc, cURLReq{endpoint: "/v2/keys/foo", value: "bar", expected: expectPut}); err != nil {
- t.Fatal(err)
- }
-
- expectUserAdd := `{"user":"foo","roles":null}`
- if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/users/foo", value: `{"user":"foo", "password":"pass"}`, expected: expectUserAdd}); err != nil {
- t.Fatal(err)
- }
- expectRoleAdd := `{"role":"foo","permissions":{"kv":{"read":["/foo/*"],"write":null}}`
- if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/roles/foo", value: `{"role":"foo", "permissions": {"kv": {"read": ["/foo/*"]}}}`, expected: expectRoleAdd}); err != nil {
- t.Fatal(err)
- }
- expectUserUpdate := `{"user":"foo","roles":["foo"]}`
- if err := cURLPut(epc, cURLReq{endpoint: "/v2/auth/users/foo", value: `{"user": "foo", "grant": ["foo"]}`, expected: expectUserUpdate}); err != nil {
- t.Fatal(err)
- }
-
- if err := etcdctlUserAdd(epc, "root", "a"); err != nil {
- t.Fatal(err)
- }
- if err := etcdctlAuthEnable(epc); err != nil {
- t.Fatal(err)
- }
-
- if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "root", password: "a", expected: "bar"}); err != nil {
- t.Fatal(err)
- }
- if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "foo", password: "pass", expected: "bar"}); err != nil {
- t.Fatal(err)
- }
- if err := cURLGet(epc, cURLReq{endpoint: "/v2/keys/foo/", username: "foo", password: "", expected: "bar"}); err != nil {
- if !strings.Contains(err.Error(), `The request requires user authentication`) {
- t.Fatalf("expected 'The request requires user authentication' error, got %v", err)
- }
- } else {
- t.Fatalf("expected 'The request requires user authentication' error")
- }
-}
-
-type cURLReq struct {
- username string
- password string
-
- isTLS bool
- timeout int
-
- endpoint string
-
- value string
- expected string
- header string
-
- metricsURLScheme string
-
- ciphers string
-}
-
-// cURLPrefixArgs builds the beginning of a curl command for a given key
-// addressed to a random URL in the given cluster.
-func cURLPrefixArgs(clus *etcdProcessCluster, method string, req cURLReq) []string {
- var (
- cmdArgs = []string{"curl"}
- acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl
- )
- if req.metricsURLScheme != "https" {
- if req.isTLS {
- if clus.cfg.clientTLS != clientTLSAndNonTLS {
- panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS")
- }
- cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath)
- acurl = toTLS(clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl)
- } else if clus.cfg.clientTLS == clientTLS {
- if !clus.cfg.noCN {
- cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath)
- } else {
- cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath3, "--key", privateKeyPath3)
- }
- }
- }
- if req.metricsURLScheme != "" {
- acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].EndpointsMetrics()[0]
- }
- ep := acurl + req.endpoint
-
- if req.username != "" || req.password != "" {
- cmdArgs = append(cmdArgs, "-L", "-u", fmt.Sprintf("%s:%s", req.username, req.password), ep)
- } else {
- cmdArgs = append(cmdArgs, "-L", ep)
- }
- if req.timeout != 0 {
- cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.timeout))
- }
-
- if req.header != "" {
- cmdArgs = append(cmdArgs, "-H", req.header)
- }
-
- if req.ciphers != "" {
- cmdArgs = append(cmdArgs, "--ciphers", req.ciphers)
- }
-
- switch method {
- case "POST", "PUT":
- dt := req.value
- if !strings.HasPrefix(dt, "{") { // for non-JSON value
- dt = "value=" + dt
- }
- cmdArgs = append(cmdArgs, "-X", method, "-d", dt)
- }
- return cmdArgs
-}
-
-func cURLPost(clus *etcdProcessCluster, req cURLReq) error {
- return spawnWithExpect(cURLPrefixArgs(clus, "POST", req), req.expected)
-}
-
-func cURLPut(clus *etcdProcessCluster, req cURLReq) error {
- return spawnWithExpect(cURLPrefixArgs(clus, "PUT", req), req.expected)
-}
-
-func cURLGet(clus *etcdProcessCluster, req cURLReq) error {
- return spawnWithExpect(cURLPrefixArgs(clus, "GET", req), req.expected)
-}
diff --git a/tests/e2e/v2_test.go b/tests/e2e/v2_test.go
deleted file mode 100644
index fe44b3f961b..00000000000
--- a/tests/e2e/v2_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !v2v3
-// +build !v2v3
-
-package e2e
-
-func addV2Args(args []string) []string { return args }
diff --git a/tests/e2e/v2store_deprecation_test.go b/tests/e2e/v2store_deprecation_test.go
index 06ad555d07a..b267ae240b8 100644
--- a/tests/e2e/v2store_deprecation_test.go
+++ b/tests/e2e/v2store_deprecation_test.go
@@ -15,84 +15,269 @@
package e2e
import (
+ "bytes"
+ "context"
"fmt"
+ "reflect"
+ "sort"
+ "strings"
"testing"
+ "github.com/coreos/go-semver/semver"
"github.com/stretchr/testify/assert"
-)
-
-func createV2store(t testing.TB, dataDirPath string) {
- t.Log("Creating not-yet v2-deprecated etcd")
-
- cfg := configStandalone(etcdProcessClusterConfig{enableV2: true, dataDirPath: dataDirPath, snapshotCount: 5})
- epc, err := newEtcdProcessCluster(t, cfg)
- assert.NoError(t, err)
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
- defer func() {
- assert.NoError(t, epc.Stop())
- }()
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
- // We need to exceed 'snapshotCount' such that v2 snapshot is dumped.
- for i := 0; i < 10; i++ {
- if err := cURLPut(epc, cURLReq{
- endpoint: "/v2/keys/foo", value: "bar" + fmt.Sprint(i),
- expected: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)}); err != nil {
+func writeCustomV2Data(t testing.TB, epc *e2e.EtcdProcessCluster, count int) {
+ for i := 0; i < count; i++ {
+ if err := e2e.CURLPut(epc, e2e.CURLReq{
+ Endpoint: "/v2/keys/foo", Value: "bar" + fmt.Sprint(i),
+ Expected: expect.ExpectedResponse{Value: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)},
+ }); err != nil {
t.Fatalf("failed put with curl (%v)", err)
}
}
}
-func assertVerifyCanStartV2deprecationNotYet(t testing.TB, dataDirPath string) {
- t.Log("verify: possible to start etcd with --v2-deprecation=not-yet mode")
+func TestV2DeprecationNotYet(t *testing.T) {
+ e2e.BeforeTest(t)
+ t.Log("Verify its infeasible to start etcd with --v2-deprecation=not-yet mode")
+ proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--v2-deprecation=not-yet"}, nil)
+ require.NoError(t, err)
- cfg := configStandalone(etcdProcessClusterConfig{enableV2: true, dataDirPath: dataDirPath, v2deprecation: "not-yet", keepDataDir: true})
- epc, err := newEtcdProcessCluster(t, cfg)
+ _, err = proc.Expect(`invalid value "not-yet" for flag -v2-deprecation: invalid value "not-yet"`)
assert.NoError(t, err)
+}
- defer func() {
- assert.NoError(t, epc.Stop())
- }()
+func TestV2DeprecationWriteOnlyWAL(t *testing.T) {
+ e2e.BeforeTest(t)
+ dataDirPath := t.TempDir()
- if err := cURLGet(epc, cURLReq{
- endpoint: "/v2/keys/foo",
- expected: `{"action":"get","node":{"key":"/foo","value":"bar9","modifiedIndex":13,"createdIndex":13}}`}); err != nil {
- t.Fatalf("failed get with curl (%v)", err)
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
}
+ cfg := e2e.ConfigStandalone(*e2e.NewConfig(
+ e2e.WithVersion(e2e.LastVersion),
+ e2e.WithEnableV2(true),
+ e2e.WithDataDirPath(dataDirPath),
+ ))
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ memberDataDir := epc.Procs[0].Config().DataDirPath
-}
+ writeCustomV2Data(t, epc, 1)
+
+ require.NoError(t, epc.Stop())
-func assertVerifyCannotStartV2deprecationWriteOnly(t testing.TB, dataDirPath string) {
t.Log("Verify its infeasible to start etcd with --v2-deprecation=write-only mode")
- proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--data-dir=" + dataDirPath})
+ proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--v2-deprecation=write-only", "--data-dir=" + memberDataDir}, nil)
+ require.NoError(t, err)
+
+ _, err = proc.Expect("detected disallowed v2 WAL for stage --v2-deprecation=write-only")
assert.NoError(t, err)
+}
+
+func TestV2DeprecationWriteOnlySnapshot(t *testing.T) {
+ e2e.BeforeTest(t)
+ dataDirPath := t.TempDir()
+
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+ cfg := e2e.ConfigStandalone(*e2e.NewConfig(
+ e2e.WithVersion(e2e.LastVersion),
+ e2e.WithEnableV2(true),
+ e2e.WithDataDirPath(dataDirPath),
+ e2e.WithSnapshotCount(10),
+ ))
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ memberDataDir := epc.Procs[0].Config().DataDirPath
+
+ // We need to exceed 'SnapshotCount' such that v2 snapshot is dumped.
+ writeCustomV2Data(t, epc, 10)
+
+ require.NoError(t, epc.Stop())
+
+ t.Log("Verify its infeasible to start etcd with --v2-deprecation=write-only mode")
+ proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--v2-deprecation=write-only", "--data-dir=" + memberDataDir}, nil)
+ require.NoError(t, err)
+ defer proc.Close()
_, err = proc.Expect("detected disallowed custom content in v2store for stage --v2-deprecation=write-only")
assert.NoError(t, err)
}
-func TestV2Deprecation(t *testing.T) {
- BeforeTest(t)
- dataDirPath := t.TempDir()
+func TestV2DeprecationSnapshotMatches(t *testing.T) {
+ e2e.BeforeTest(t)
+ lastReleaseData := t.TempDir()
+ currentReleaseData := t.TempDir()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
- t.Run("create-storev2-data", func(t *testing.T) {
- createV2store(t, dataDirPath)
- })
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+ var snapshotCount uint64 = 10
+ epc := runEtcdAndCreateSnapshot(t, e2e.LastVersion, lastReleaseData, snapshotCount)
+ oldMemberDataDir := epc.Procs[0].Config().DataDirPath
+ cc1 := epc.Etcdctl()
+ members1 := addAndRemoveKeysAndMembers(ctx, t, cc1, snapshotCount)
+ require.NoError(t, epc.Close())
+ epc = runEtcdAndCreateSnapshot(t, e2e.CurrentVersion, currentReleaseData, snapshotCount)
+ newMemberDataDir := epc.Procs[0].Config().DataDirPath
+ cc2 := epc.Etcdctl()
+ members2 := addAndRemoveKeysAndMembers(ctx, t, cc2, snapshotCount)
+ require.NoError(t, epc.Close())
- t.Run("--v2-deprecation=write-only fails", func(t *testing.T) {
- assertVerifyCannotStartV2deprecationWriteOnly(t, dataDirPath)
+ assertSnapshotsMatch(t, oldMemberDataDir, newMemberDataDir, func(data []byte) []byte {
+ // Patch members ids
+ for i, mid := range members1 {
+ data = bytes.Replace(data, []byte(fmt.Sprintf("%x", mid)), []byte(fmt.Sprintf("%d", i+1)), -1)
+ }
+ for i, mid := range members2 {
+ data = bytes.Replace(data, []byte(fmt.Sprintf("%x", mid)), []byte(fmt.Sprintf("%d", i+1)), -1)
+ }
+ return data
})
+}
- t.Run("--v2-deprecation=not-yet succeeds", func(t *testing.T) {
- assertVerifyCanStartV2deprecationNotYet(t, dataDirPath)
- })
+func TestV2DeprecationSnapshotRecover(t *testing.T) {
+ e2e.BeforeTest(t)
+ dataDir := t.TempDir()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
-}
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
+ }
+ epc := runEtcdAndCreateSnapshot(t, e2e.LastVersion, dataDir, 10)
-func TestV2DeprecationWriteOnlyNoV2Api(t *testing.T) {
- BeforeTest(t)
- proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--enable-v2"})
+ cc := epc.Etcdctl()
+ lastReleaseGetResponse, err := cc.Get(ctx, "", config.GetOptions{Prefix: true})
+ require.NoError(t, err)
+
+ lastReleaseMemberListResponse, err := cc.MemberList(ctx, false)
assert.NoError(t, err)
- _, err = proc.Expect("--enable-v2 and --v2-deprecation=write-only are mutually exclusive")
+ assert.NoError(t, epc.Close())
+ cfg := e2e.ConfigStandalone(*e2e.NewConfig(
+ e2e.WithVersion(e2e.CurrentVersion),
+ e2e.WithDataDirPath(dataDir),
+ ))
+ epc, err = e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+
+ cc = epc.Etcdctl()
+ currentReleaseGetResponse, err := cc.Get(ctx, "", config.GetOptions{Prefix: true})
+ require.NoError(t, err)
+
+ currentReleaseMemberListResponse, err := cc.MemberList(ctx, false)
+ require.NoError(t, err)
+
+ assert.Equal(t, lastReleaseGetResponse.Kvs, currentReleaseGetResponse.Kvs)
+ assert.Equal(t, lastReleaseMemberListResponse.Members, currentReleaseMemberListResponse.Members)
+ assert.NoError(t, epc.Close())
+}
+
+func runEtcdAndCreateSnapshot(t testing.TB, serverVersion e2e.ClusterVersion, dataDir string, snapshotCount uint64) *e2e.EtcdProcessCluster {
+ cfg := e2e.ConfigStandalone(*e2e.NewConfig(
+ e2e.WithVersion(serverVersion),
+ e2e.WithDataDirPath(dataDir),
+ e2e.WithSnapshotCount(snapshotCount),
+ e2e.WithKeepDataDir(true),
+ ))
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
assert.NoError(t, err)
+ return epc
+}
+
+func addAndRemoveKeysAndMembers(ctx context.Context, t testing.TB, cc *e2e.EtcdctlV3, snapshotCount uint64) (members []uint64) {
+ // Execute some non-trivial key&member operation
+ var i uint64
+ for i = 0; i < snapshotCount*3; i++ {
+ err := cc.Put(ctx, fmt.Sprintf("%d", i), "1", config.PutOptions{})
+ require.NoError(t, err)
+ }
+ member1, err := cc.MemberAddAsLearner(ctx, "member1", []string{"http://127.0.0.1:2000"})
+ require.NoError(t, err)
+ members = append(members, member1.Member.ID)
+
+ for i = 0; i < snapshotCount*2; i++ {
+ _, err = cc.Delete(ctx, fmt.Sprintf("%d", i), config.DeleteOptions{})
+ require.NoError(t, err)
+ }
+ _, err = cc.MemberRemove(ctx, member1.Member.ID)
+ require.NoError(t, err)
+
+ for i = 0; i < snapshotCount; i++ {
+ err = cc.Put(ctx, fmt.Sprintf("%d", i), "2", config.PutOptions{})
+ require.NoError(t, err)
+ }
+ member2, err := cc.MemberAddAsLearner(ctx, "member2", []string{"http://127.0.0.1:2001"})
+ require.NoError(t, err)
+ members = append(members, member2.Member.ID)
+
+ for i = 0; i < snapshotCount/2; i++ {
+ err = cc.Put(ctx, fmt.Sprintf("%d", i), "3", config.PutOptions{})
+ assert.NoError(t, err)
+ }
+ return members
+}
+
+func filterSnapshotFiles(path string) bool {
+ return strings.HasSuffix(path, ".snap")
+}
+
+func assertSnapshotsMatch(t testing.TB, firstDataDir, secondDataDir string, patch func([]byte) []byte) {
+ lg := zaptest.NewLogger(t)
+ firstFiles, err := fileutil.ListFiles(firstDataDir, filterSnapshotFiles)
+ require.NoError(t, err)
+ secondFiles, err := fileutil.ListFiles(secondDataDir, filterSnapshotFiles)
+ require.NoError(t, err)
+ assert.NotEmpty(t, firstFiles)
+ assert.NotEmpty(t, secondFiles)
+ assert.Equal(t, len(firstFiles), len(secondFiles))
+ sort.Strings(firstFiles)
+ sort.Strings(secondFiles)
+ for i := 0; i < len(firstFiles); i++ {
+ firstSnapshot, err := snap.Read(lg, firstFiles[i])
+ require.NoError(t, err)
+ secondSnapshot, err := snap.Read(lg, secondFiles[i])
+ require.NoError(t, err)
+ assertMembershipEqual(t, openSnap(patch(firstSnapshot.Data)), openSnap(patch(secondSnapshot.Data)))
+ }
+}
+
+func assertMembershipEqual(t testing.TB, firstStore v2store.Store, secondStore v2store.Store) {
+ rc1 := membership.NewCluster(zaptest.NewLogger(t))
+ rc1.SetStore(firstStore)
+ rc1.Recover(func(lg *zap.Logger, v *semver.Version) {})
+
+ rc2 := membership.NewCluster(zaptest.NewLogger(t))
+ rc2.SetStore(secondStore)
+ rc2.Recover(func(lg *zap.Logger, v *semver.Version) {})
+
+ // membership should match
+ if !reflect.DeepEqual(rc1.Members(), rc2.Members()) {
+ t.Logf("memberids_from_last_version = %+v, member_ids_from_current_version = %+v", rc1.MemberIDs(), rc2.MemberIDs())
+ t.Errorf("members_from_last_version_snapshot = %+v, members_from_current_version_snapshot %+v", rc1.Members(), rc2.Members())
+ }
+}
+
+func openSnap(data []byte) v2store.Store {
+ st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
+ st.Recovery(data)
+ return st
}
diff --git a/tests/e2e/v2v3_test.go b/tests/e2e/v2v3_test.go
deleted file mode 100644
index 75043b7df95..00000000000
--- a/tests/e2e/v2v3_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build v2v3
-// +build v2v3
-
-package e2e
-
-func addV2Args(args []string) []string {
- return append(args, "--experimental-enable-v2v3", "v2/")
-}
diff --git a/tests/e2e/v3_cipher_suite_test.go b/tests/e2e/v3_cipher_suite_test.go
index 694de13b4de..a2a5ba3f02c 100644
--- a/tests/e2e/v3_cipher_suite_test.go
+++ b/tests/e2e/v3_cipher_suite_test.go
@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !cov && !cluster_proxy
-// +build !cov,!cluster_proxy
+//go:build !cluster_proxy
package e2e
@@ -21,15 +20,19 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
-func TestV3CurlCipherSuitesValid(t *testing.T) { testV3CurlCipherSuites(t, true) }
-func TestV3CurlCipherSuitesMismatch(t *testing.T) { testV3CurlCipherSuites(t, false) }
-func testV3CurlCipherSuites(t *testing.T, valid bool) {
- cc := newConfigClientTLS()
- cc.clusterSize = 1
- cc.cipherSuites = []string{
+func TestCurlV3CipherSuitesValid(t *testing.T) { testCurlV3CipherSuites(t, true) }
+func TestCurlV3CipherSuitesMismatch(t *testing.T) { testCurlV3CipherSuites(t, false) }
+func testCurlV3CipherSuites(t *testing.T, valid bool) {
+ cc := e2e.NewConfigClientTLS()
+ cc.ClusterSize = 1
+ cc.ServerConfig.CipherSuites = []string{
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
@@ -45,30 +48,20 @@ func testV3CurlCipherSuites(t *testing.T, valid bool) {
}
func cipherSuiteTestValid(cx ctlCtx) {
- if err := cURLGet(cx.epc, cURLReq{
- endpoint: "/metrics",
- expected: fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version),
- metricsURLScheme: cx.cfg.metricsURLScheme,
- ciphers: "ECDHE-RSA-AES128-GCM-SHA256", // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ if err := e2e.CURLGet(cx.epc, e2e.CURLReq{
+ Endpoint: "/metrics",
+ Expected: expect.ExpectedResponse{Value: fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version)},
+ Ciphers: "ECDHE-RSA-AES128-GCM-SHA256", // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
}); err != nil {
- cx.t.Fatalf("failed get with curl (%v)", err)
+ require.ErrorContains(cx.t, err, fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version))
}
}
func cipherSuiteTestMismatch(cx ctlCtx) {
- var err error
- for _, exp := range []string{"alert handshake failure", "failed setting cipher list"} {
- err = cURLGet(cx.epc, cURLReq{
- endpoint: "/metrics",
- expected: exp,
- metricsURLScheme: cx.cfg.metricsURLScheme,
- ciphers: "ECDHE-RSA-DES-CBC3-SHA", // TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
- })
- if err == nil {
- break
- }
- }
- if err != nil {
- cx.t.Fatalf("failed get with curl (%v)", err)
- }
+ err := e2e.CURLGet(cx.epc, e2e.CURLReq{
+ Endpoint: "/metrics",
+ Expected: expect.ExpectedResponse{Value: "failed setting cipher list"},
+ Ciphers: "ECDHE-RSA-DES-CBC3-SHA", // TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
+ })
+ require.ErrorContains(cx.t, err, "curl: (59) failed setting cipher list")
}
diff --git a/tests/e2e/v3_curl_auth_test.go b/tests/e2e/v3_curl_auth_test.go
new file mode 100644
index 00000000000..c8745d23179
--- /dev/null
+++ b/tests/e2e/v3_curl_auth_test.go
@@ -0,0 +1,410 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCurlV3Auth(t *testing.T) {
+ testCtl(t, testCurlV3Auth)
+}
+
+func TestCurlV3AuthClientTLSCertAuth(t *testing.T) {
+ testCtl(t, testCurlV3Auth, withCfg(*e2e.NewConfigClientTLSCertAuthWithNoCN()))
+}
+
+func TestCurlV3AuthUserBasicOperations(t *testing.T) {
+ testCtl(t, testCurlV3AuthUserBasicOperations)
+}
+
+func TestCurlV3AuthUserGrantRevokeRoles(t *testing.T) {
+ testCtl(t, testCurlV3AuthUserGrantRevokeRoles)
+}
+
+func TestCurlV3AuthRoleBasicOperations(t *testing.T) {
+ testCtl(t, testCurlV3AuthRoleBasicOperations)
+}
+
+func TestCurlV3AuthRoleManagePermission(t *testing.T) {
+ testCtl(t, testCurlV3AuthRoleManagePermission)
+}
+
+func TestCurlV3AuthEnableDisableStatus(t *testing.T) {
+ testCtl(t, testCurlV3AuthEnableDisableStatus)
+}
+
+func testCurlV3Auth(cx ctlCtx) {
+ usernames := []string{"root", "nonroot", "nooption"}
+ pwds := []string{"toor", "pass", "pass"}
+ options := []*authpb.UserAddOptions{{NoPassword: false}, {NoPassword: false}, nil}
+
+ // create users
+ for i := 0; i < len(usernames); i++ {
+ user, err := json.Marshal(&pb.AuthUserAddRequest{Name: usernames[i], Password: pwds[i], Options: options[i]})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/add",
+ Value: string(user),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3Auth failed to add user %v", usernames[i])
+ }
+
+ // create root role
+ rolereq, err := json.Marshal(&pb.AuthRoleAddRequest{Name: "root"})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/add",
+ Value: string(rolereq),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3Auth failed to create role")
+
+ // grant root role
+ for i := 0; i < len(usernames); i++ {
+ grantroleroot, merr := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"})
+ require.NoError(cx.t, merr)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/grant",
+ Value: string(grantroleroot),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3Auth failed to grant role")
+ }
+
+ // enable auth
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/enable",
+ Value: "{}",
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3Auth failed to enable auth")
+
+ for i := 0; i < len(usernames); i++ {
+ // put "bar[i]" into "foo[i]"
+ putreq, err := json.Marshal(&pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte(fmt.Sprintf("bar%d", i))})
+ require.NoError(cx.t, err)
+
+ // fail put no auth
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/put",
+ Value: string(putreq),
+ Expected: expect.ExpectedResponse{Value: "etcdserver: user name is empty"},
+ }), "testCurlV3Auth failed to put without token")
+
+ // auth request
+ authreq, err := json.Marshal(&pb.AuthenticateRequest{Name: usernames[i], Password: pwds[i]})
+ require.NoError(cx.t, err)
+
+ var (
+ authHeader string
+ cmdArgs []string
+ lineFunc = func(txt string) bool { return true }
+ )
+
+ cmdArgs = e2e.CURLPrefixArgsCluster(cx.epc.Cfg, cx.epc.Procs[rand.Intn(cx.epc.Cfg.ClusterSize)], "POST", e2e.CURLReq{
+ Endpoint: "/v3/auth/authenticate",
+ Value: string(authreq),
+ })
+ proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
+ require.NoError(cx.t, err)
+ defer proc.Close()
+
+ cURLRes, err := proc.ExpectFunc(context.Background(), lineFunc)
+ require.NoError(cx.t, err)
+
+ authRes := make(map[string]any)
+ require.NoError(cx.t, json.Unmarshal([]byte(cURLRes), &authRes))
+
+ token, ok := authRes[rpctypes.TokenFieldNameGRPC].(string)
+ if !ok {
+ cx.t.Fatalf("failed invalid token in authenticate response using user (%v)", usernames[i])
+ }
+
+ authHeader = "Authorization: " + token
+ // put with auth
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/put",
+ Value: string(putreq),
+ Header: authHeader,
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3Auth failed to auth put with user (%v)", usernames[i])
+ }
+}
+
+func testCurlV3AuthUserBasicOperations(cx ctlCtx) {
+ usernames := []string{"user1", "user2", "user3"}
+
+ // create users
+ for i := 0; i < len(usernames); i++ {
+ user, err := json.Marshal(&pb.AuthUserAddRequest{Name: usernames[i], Password: "123"})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/add",
+ Value: string(user),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthUserBasicOperations failed to add user %v", usernames[i])
+ }
+
+ // change password
+ user, err := json.Marshal(&pb.AuthUserChangePasswordRequest{Name: "user1", Password: "456"})
+ require.NoError(cx.t, err)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/changepw",
+ Value: string(user),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthUserBasicOperations failed to change user's password")
+
+ // get users
+ usernames = []string{"user1", "userX"}
+ expectedResponse := []string{"revision", "etcdserver: user name not found"}
+ for i := 0; i < len(usernames); i++ {
+ user, err = json.Marshal(&pb.AuthUserGetRequest{
+ Name: usernames[i],
+ })
+
+ require.NoError(cx.t, err)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/get",
+ Value: string(user),
+ Expected: expect.ExpectedResponse{Value: expectedResponse[i]},
+ }), "testCurlV3AuthUserBasicOperations failed to get user %v", usernames[i])
+ }
+
+ // delete users
+ usernames = []string{"user2", "userX"}
+ expectedResponse = []string{"revision", "etcdserver: user name not found"}
+ for i := 0; i < len(usernames); i++ {
+ user, err = json.Marshal(&pb.AuthUserDeleteRequest{
+ Name: usernames[i],
+ })
+ require.NoError(cx.t, err)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/delete",
+ Value: string(user),
+ Expected: expect.ExpectedResponse{Value: expectedResponse[i]},
+ }), "testCurlV3AuthUserBasicOperations failed to delete user %v", usernames[i])
+ }
+
+ // list users
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "POST", e2e.CURLReq{
+ Endpoint: "/v3/auth/user/list",
+ Value: "{}",
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+
+ users, ok := resp["users"]
+ require.True(cx.t, ok)
+ userSlice := users.([]any)
+ require.Len(cx.t, userSlice, 2)
+ require.Equal(cx.t, "user1", userSlice[0])
+ require.Equal(cx.t, "user3", userSlice[1])
+}
+
+func testCurlV3AuthUserGrantRevokeRoles(cx ctlCtx) {
+ var (
+ username = "user1"
+ rolename = "role1"
+ )
+
+ // create user
+ user, err := json.Marshal(&pb.AuthUserAddRequest{Name: username, Password: "123"})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/add",
+ Value: string(user),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthUserGrantRevokeRoles failed to add user %v", username)
+
+ // create role
+ role, err := json.Marshal(&pb.AuthRoleAddRequest{Name: rolename})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/add",
+ Value: string(role),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthUserGrantRevokeRoles failed to add role %v", rolename)
+
+ // grant role to user
+ grantRoleReq, err := json.Marshal(&pb.AuthUserGrantRoleRequest{
+ User: username,
+ Role: rolename,
+ })
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/grant",
+ Value: string(grantRoleReq),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthUserGrantRevokeRoles failed to grant role to user")
+
+ // revoke role from user
+ revokeRoleReq, err := json.Marshal(&pb.AuthUserRevokeRoleRequest{
+ Name: username,
+ Role: rolename,
+ })
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/user/revoke",
+ Value: string(revokeRoleReq),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthUserGrantRevokeRoles failed to revoke role from user")
+}
+
+func testCurlV3AuthRoleBasicOperations(cx ctlCtx) {
+ rolenames := []string{"role1", "role2", "role3"}
+
+ // create roles
+ for i := 0; i < len(rolenames); i++ {
+ role, err := json.Marshal(&pb.AuthRoleAddRequest{Name: rolenames[i]})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/add",
+ Value: string(role),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthRoleBasicOperations failed to add role %v", rolenames[i])
+ }
+
+ // get roles
+ rolenames = []string{"role1", "roleX"}
+ expectedResponse := []string{"revision", "etcdserver: role name not found"}
+ for i := 0; i < len(rolenames); i++ {
+ role, err := json.Marshal(&pb.AuthRoleGetRequest{
+ Role: rolenames[i],
+ })
+ require.NoError(cx.t, err)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/get",
+ Value: string(role),
+ Expected: expect.ExpectedResponse{Value: expectedResponse[i]},
+ }), "testCurlV3AuthRoleBasicOperations failed to get role %v", rolenames[i])
+ }
+
+ // delete roles
+ rolenames = []string{"role2", "roleX"}
+ expectedResponse = []string{"revision", "etcdserver: role name not found"}
+ for i := 0; i < len(rolenames); i++ {
+ role, err := json.Marshal(&pb.AuthRoleDeleteRequest{
+ Role: rolenames[i],
+ })
+ require.NoError(cx.t, err)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/delete",
+ Value: string(role),
+ Expected: expect.ExpectedResponse{Value: expectedResponse[i]},
+ }), "testCurlV3AuthRoleBasicOperations failed to delete role %v", rolenames[i])
+ }
+
+ // list roles
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "POST", e2e.CURLReq{
+ Endpoint: "/v3/auth/role/list",
+ Value: "{}",
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+
+ roles, ok := resp["roles"]
+ require.True(cx.t, ok)
+ roleSlice := roles.([]any)
+ require.Len(cx.t, roleSlice, 2)
+ require.Equal(cx.t, "role1", roleSlice[0])
+ require.Equal(cx.t, "role3", roleSlice[1])
+}
+
+func testCurlV3AuthRoleManagePermission(cx ctlCtx) {
+ rolename := "role1"
+
+ // create a role
+ role, err := json.Marshal(&pb.AuthRoleAddRequest{Name: rolename})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/add",
+ Value: string(role),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthRoleManagePermission failed to add role %v", rolename)
+
+ // grant permission
+ grantPermissionReq, err := json.Marshal(&pb.AuthRoleGrantPermissionRequest{
+ Name: rolename,
+ Perm: &authpb.Permission{
+ PermType: authpb.READ,
+ Key: []byte("fakeKey"),
+ },
+ })
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/grant",
+ Value: string(grantPermissionReq),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthRoleManagePermission failed to grant permission to role %v", rolename)
+
+ // revoke permission
+ revokePermissionReq, err := json.Marshal(&pb.AuthRoleRevokePermissionRequest{
+ Role: rolename,
+ Key: []byte("fakeKey"),
+ })
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/role/revoke",
+ Value: string(revokePermissionReq),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthRoleManagePermission failed to revoke permission from role %v", rolename)
+}
+
+func testCurlV3AuthEnableDisableStatus(cx ctlCtx) {
+ // enable auth
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/enable",
+ Value: "{}",
+ Expected: expect.ExpectedResponse{Value: "etcdserver: root user does not exist"},
+ }), "testCurlV3AuthEnableDisableStatus failed to enable auth")
+
+ // disable auth
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/disable",
+ Value: "{}",
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthEnableDisableStatus failed to disable auth")
+
+ // auth status
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/auth/status",
+ Value: "{}",
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3AuthEnableDisableStatus failed to get auth status")
+}
diff --git a/tests/e2e/v3_curl_cluster_test.go b/tests/e2e/v3_curl_cluster_test.go
new file mode 100644
index 00000000000..945946e41d4
--- /dev/null
+++ b/tests/e2e/v3_curl_cluster_test.go
@@ -0,0 +1,116 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCurlV3ClusterOperations(t *testing.T) {
+ testCtl(t, testCurlV3ClusterOperations, withCfg(*e2e.NewConfig(e2e.WithClusterSize(1))))
+}
+
+func testCurlV3ClusterOperations(cx ctlCtx) {
+ var (
+ peerURL = "http://127.0.0.1:22380"
+ updatedPeerURL = "http://127.0.0.1:32380"
+ )
+
+ // add member
+ cx.t.Logf("Adding member %q", peerURL)
+ addMemberReq, err := json.Marshal(&pb.MemberAddRequest{PeerURLs: []string{peerURL}, IsLearner: true})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/cluster/member/add",
+ Value: string(addMemberReq),
+ Expected: expect.ExpectedResponse{Value: peerURL},
+ }), "testCurlV3ClusterOperations failed to add member")
+
+ // list members and get the new member's ID
+ cx.t.Log("Listing members after adding a member")
+ members := mustListMembers(cx)
+ require.Len(cx.t, members, 2)
+ cx.t.Logf("members: %+v", members)
+
+ var newMemberIDStr string
+ for _, m := range members {
+ mObj := m.(map[string]any)
+ pURL := mObj["peerURLs"].([]any)[0].(string)
+ if pURL == peerURL {
+ newMemberIDStr = mObj["ID"].(string)
+ break
+ }
+ }
+ require.Positive(cx.t, newMemberIDStr)
+
+ // update member
+ cx.t.Logf("Update peerURL from %q to %q for member %q", peerURL, updatedPeerURL, newMemberIDStr)
+ newMemberID, err := strconv.ParseUint(newMemberIDStr, 10, 64)
+ require.NoError(cx.t, err)
+
+ updateMemberReq, err := json.Marshal(&pb.MemberUpdateRequest{ID: newMemberID, PeerURLs: []string{updatedPeerURL}})
+ require.NoError(cx.t, err)
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/cluster/member/update",
+ Value: string(updateMemberReq),
+ Expected: expect.ExpectedResponse{Value: updatedPeerURL},
+ }), "testCurlV3ClusterOperations failed to update member")
+
+ // promote member
+ cx.t.Logf("Promoting the member %d", newMemberID)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/cluster/member/promote",
+ Value: fmt.Sprintf(`{"ID": %d}`, newMemberID),
+ Expected: expect.ExpectedResponse{Value: "etcdserver: can only promote a learner member which is in sync with leader"},
+ }), "testCurlV3ClusterOperations failed to promote member")
+
+ // remove member
+ cx.t.Logf("Removing the member %d", newMemberID)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/cluster/member/remove",
+ Value: fmt.Sprintf(`{"ID": %d}`, newMemberID),
+ Expected: expect.ExpectedResponse{Value: "members"},
+ }), "testCurlV3ClusterOperations failed to remove member")
+
+ // list members again after deleting a member
+ cx.t.Log("Listing members again after deleting a member")
+ members = mustListMembers(cx)
+ require.Len(cx.t, members, 1)
+}
+
+func mustListMembers(cx ctlCtx) []any {
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[0], "POST", e2e.CURLReq{
+ Endpoint: "/v3/cluster/member/list",
+ Value: "{}",
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+
+ members, ok := resp["members"]
+ require.True(cx.t, ok)
+ return members.([]any)
+}
diff --git a/tests/e2e/v3_curl_election_test.go b/tests/e2e/v3_curl_election_test.go
new file mode 100644
index 00000000000..765f7ec48b0
--- /dev/null
+++ b/tests/e2e/v3_curl_election_test.go
@@ -0,0 +1,157 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCurlV3CampaignNoTLS(t *testing.T) {
+ testCtl(t, testCurlV3Campaign, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3Campaign(cx ctlCtx) {
+ // campaign
+ cdata, err := json.Marshal(&epb.CampaignRequest{
+ Name: []byte("/election-prefix"),
+ Value: []byte("v1"),
+ })
+ require.NoError(cx.t, err)
+ cargs := e2e.CURLPrefixArgsCluster(cx.epc.Cfg, cx.epc.Procs[rand.Intn(cx.epc.Cfg.ClusterSize)], "POST", e2e.CURLReq{
+ Endpoint: "/v3/election/campaign",
+ Value: string(cdata),
+ })
+ lines, err := e2e.SpawnWithExpectLines(context.TODO(), cargs, cx.envMap, expect.ExpectedResponse{Value: `"leader":{"name":"`})
+ require.NoErrorf(cx.t, err, "failed post campaign request")
+ if len(lines) != 1 {
+ cx.t.Fatalf("len(lines) expected 1, got %+v", lines)
+ }
+
+ var cresp campaignResponse
+ require.NoErrorf(cx.t, json.Unmarshal([]byte(lines[0]), &cresp), "failed to unmarshal campaign response")
+ ndata, err := base64.StdEncoding.DecodeString(cresp.Leader.Name)
+ require.NoErrorf(cx.t, err, "failed to decode leader key")
+ kdata, err := base64.StdEncoding.DecodeString(cresp.Leader.Key)
+ require.NoErrorf(cx.t, err, "failed to decode leader key")
+
+ // observe
+ observeReq, err := json.Marshal(&epb.LeaderRequest{
+ Name: []byte("/election-prefix"),
+ })
+ require.NoError(cx.t, err)
+
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[0], "POST", e2e.CURLReq{
+ Endpoint: "/v3/election/observe",
+ Value: string(observeReq),
+ })
+ proc, err := e2e.SpawnCmd(args, nil)
+ require.NoError(cx.t, err)
+
+ proc.ExpectWithContext(context.TODO(), expect.ExpectedResponse{
+ Value: fmt.Sprintf(`"key":"%s"`, cresp.Leader.Key),
+ })
+ require.NoError(cx.t, proc.Stop())
+
+ // proclaim
+ rev, _ := strconv.ParseInt(cresp.Leader.Rev, 10, 64)
+ lease, _ := strconv.ParseInt(cresp.Leader.Lease, 10, 64)
+ pdata, err := json.Marshal(&epb.ProclaimRequest{
+ Leader: &epb.LeaderKey{
+ Name: ndata,
+ Key: kdata,
+ Rev: rev,
+ Lease: lease,
+ },
+ Value: []byte("v2"),
+ })
+ require.NoError(cx.t, err)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/election/proclaim",
+ Value: string(pdata),
+ Expected: expect.ExpectedResponse{Value: `"revision":`},
+ }), "failed post proclaim request")
+}
+
+func TestCurlV3ProclaimMissiongLeaderKeyNoTLS(t *testing.T) {
+ testCtl(t, testCurlV3ProclaimMissiongLeaderKey, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3ProclaimMissiongLeaderKey(cx ctlCtx) {
+ pdata, err := json.Marshal(&epb.ProclaimRequest{Value: []byte("v2")})
+ require.NoError(cx.t, err)
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/election/proclaim",
+ Value: string(pdata),
+ Expected: expect.ExpectedResponse{Value: `"message":"\"leader\" field must be provided"`},
+ }), "failed post proclaim request")
+}
+
+func TestCurlV3ResignMissiongLeaderKeyNoTLS(t *testing.T) {
+ testCtl(t, testCurlV3ResignMissiongLeaderKey, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3ResignMissiongLeaderKey(cx ctlCtx) {
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/election/resign",
+ Value: `{}`,
+ Expected: expect.ExpectedResponse{Value: `"message":"\"leader\" field must be provided"`},
+ }), "failed post resign request")
+}
+
+func TestCurlV3ElectionLeader(t *testing.T) {
+ testCtl(t, testCurlV3ElectionLeader, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3ElectionLeader(cx ctlCtx) {
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/election/leader",
+ Value: `{"name": "aGVsbG8="}`, // base64 encoded string "hello"
+ Expected: expect.ExpectedResponse{Value: `election: no leader`},
+ }), "testCurlV3ElectionLeader failed to get leader")
+}
+
+// to manually decode; JSON marshals integer fields with
+// string types, so can't unmarshal with epb.CampaignResponse
+type campaignResponse struct {
+ Leader struct {
+ Name string `json:"name,omitempty"`
+ Key string `json:"key,omitempty"`
+ Rev string `json:"rev,omitempty"`
+ Lease string `json:"lease,omitempty"`
+ } `json:"leader,omitempty"`
+}
+
+func CURLWithExpected(cx ctlCtx, tests []v3cURLTest) error {
+ for _, t := range tests {
+ value := fmt.Sprintf("%v", t.value)
+ if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: t.endpoint, Value: value, Expected: expect.ExpectedResponse{Value: t.expected}}); err != nil {
+ return fmt.Errorf("endpoint (%s): error (%w), wanted %v", t.endpoint, err, t.expected)
+ }
+ }
+ return nil
+}
diff --git a/tests/e2e/v3_curl_kv_test.go b/tests/e2e/v3_curl_kv_test.go
new file mode 100644
index 00000000000..5225fdc93b0
--- /dev/null
+++ b/tests/e2e/v3_curl_kv_test.go
@@ -0,0 +1,214 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "encoding/json"
+ "testing"
+
+ protov1 "github.com/golang/protobuf/proto"
+ gw "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCurlV3KVBasicOperation(t *testing.T) {
+ testCurlV3KV(t, testCurlV3KVBasicOperation)
+}
+
+func TestCurlV3KVTxn(t *testing.T) {
+ testCurlV3KV(t, testCurlV3KVTxn)
+}
+
+func TestCurlV3KVCompact(t *testing.T) {
+ testCurlV3KV(t, testCurlV3KVCompact)
+}
+
+func testCurlV3KV(t *testing.T, f func(ctlCtx)) {
+ testCases := []struct {
+ name string
+ cfg ctlOption
+ }{
+ {
+ name: "noTLS",
+ cfg: withCfg(*e2e.NewConfigNoTLS()),
+ },
+ {
+ name: "autoTLS",
+ cfg: withCfg(*e2e.NewConfigAutoTLS()),
+ },
+ {
+ name: "allTLS",
+ cfg: withCfg(*e2e.NewConfigTLS()),
+ },
+ {
+ name: "peerTLS",
+ cfg: withCfg(*e2e.NewConfigPeerTLS()),
+ },
+ {
+ name: "clientTLS",
+ cfg: withCfg(*e2e.NewConfigClientTLS()),
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ testCtl(t, f, tc.cfg)
+ })
+ }
+}
+
+func testCurlV3KVBasicOperation(cx ctlCtx) {
+ var (
+ key = []byte("foo")
+ value = []byte("bar") // this will be automatically base64-encoded by Go
+
+ expectedPutResponse = `"revision":"`
+ expectedGetResponse = `"value":"`
+ expectedDeleteResponse = `"deleted":"1"`
+ )
+ putData, err := json.Marshal(&pb.PutRequest{
+ Key: key,
+ Value: value,
+ })
+ require.NoError(cx.t, err)
+
+ rangeData, err := json.Marshal(&pb.RangeRequest{
+ Key: key,
+ })
+ require.NoError(cx.t, err)
+
+ deleteData, err := json.Marshal(&pb.DeleteRangeRequest{
+ Key: key,
+ })
+ require.NoError(cx.t, err)
+
+ err = e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/put",
+ Value: string(putData),
+ Expected: expect.ExpectedResponse{Value: expectedPutResponse},
+ })
+ require.NoErrorf(cx.t, err, "testCurlV3KVBasicOperation put failed")
+
+ err = e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/range",
+ Value: string(rangeData),
+ Expected: expect.ExpectedResponse{Value: expectedGetResponse},
+ })
+ require.NoErrorf(cx.t, err, "testCurlV3KVBasicOperation get failed")
+
+ if cx.cfg.Client.ConnectionType == e2e.ClientTLSAndNonTLS {
+ err = e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/range",
+ Value: string(rangeData),
+ Expected: expect.ExpectedResponse{Value: expectedGetResponse},
+ IsTLS: true,
+ })
+ require.NoErrorf(cx.t, err, "testCurlV3KVBasicOperation get failed")
+ }
+
+ err = e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/deleterange",
+ Value: string(deleteData),
+ Expected: expect.ExpectedResponse{Value: expectedDeleteResponse},
+ })
+ require.NoErrorf(cx.t, err, "testCurlV3KVBasicOperation delete failed")
+}
+
+func testCurlV3KVTxn(cx ctlCtx) {
+ txn := &pb.TxnRequest{
+ Compare: []*pb.Compare{
+ {
+ Key: []byte("foo"),
+ Result: pb.Compare_EQUAL,
+ Target: pb.Compare_CREATE,
+ TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 0},
+ },
+ },
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestPut{
+ RequestPut: &pb.PutRequest{
+ Key: []byte("foo"),
+ Value: []byte("bar"),
+ },
+ },
+ },
+ },
+ }
+ m := gw.JSONPb{
+ MarshalOptions: protojson.MarshalOptions{
+ UseProtoNames: true,
+ EmitUnpopulated: false,
+ },
+ }
+ jsonDat, jerr := m.Marshal(protov1.MessageV2(txn))
+ require.NoError(cx.t, jerr)
+
+ succeeded, responses := mustExecuteTxn(cx, string(jsonDat))
+ require.True(cx.t, succeeded)
+ require.Len(cx.t, responses, 1)
+ putResponse := responses[0].(map[string]any)
+ _, ok := putResponse["response_put"]
+ require.True(cx.t, ok)
+
+ // was crashing etcd server
+ malformed := `{"compare":[{"result":0,"target":1,"key":"Zm9v","TargetUnion":null}],"success":[{"Request":{"RequestPut":{"key":"Zm9v","value":"YmFy"}}}]}`
+ err := e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/txn",
+ Value: malformed,
+ Expected: expect.ExpectedResponse{Value: "etcdserver: key not found"},
+ })
+ require.NoErrorf(cx.t, err, "testCurlV3Txn with malformed request failed")
+}
+
+func mustExecuteTxn(cx ctlCtx, reqData string) (bool, []any) {
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[0], "POST", e2e.CURLReq{
+ Endpoint: "/v3/kv/txn",
+ Value: reqData,
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+
+ succeeded, ok := resp["succeeded"]
+ require.True(cx.t, ok)
+
+ responses, ok := resp["responses"]
+ require.True(cx.t, ok)
+
+ return succeeded.(bool), responses.([]any)
+}
+
+func testCurlV3KVCompact(cx ctlCtx) {
+ compactRequest, err := json.Marshal(&pb.CompactionRequest{
+ Revision: 10000,
+ })
+ require.NoError(cx.t, err)
+
+ err = e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/kv/compaction",
+ Value: string(compactRequest),
+ Expected: expect.ExpectedResponse{
+ Value: `"message":"etcdserver: mvcc: required revision is a future revision"`,
+ },
+ })
+ require.NoErrorf(cx.t, err, "testCurlV3KVCompact failed")
+}
diff --git a/tests/e2e/v3_curl_lease_test.go b/tests/e2e/v3_curl_lease_test.go
index ae0d523b2fe..bd9d1adc6dd 100644
--- a/tests/e2e/v3_curl_lease_test.go
+++ b/tests/e2e/v3_curl_lease_test.go
@@ -18,28 +18,26 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/require"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
)
-func TestV3CurlLeaseGrantNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlLeaseGrant, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
+func TestCurlV3LeaseGrantNoTLS(t *testing.T) {
+ testCtl(t, testCurlV3LeaseGrant, withCfg(*e2e.NewConfigNoTLS()))
}
-func TestV3CurlLeaseRevokeNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlLeaseRevoke, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
+
+func TestCurlV3LeaseRevokeNoTLS(t *testing.T) {
+ testCtl(t, testCurlV3LeaseRevoke, withCfg(*e2e.NewConfigNoTLS()))
}
-func TestV3CurlLeaseLeasesNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlLeaseLeases, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
+
+func TestCurlV3LeaseLeasesNoTLS(t *testing.T) {
+ testCtl(t, testCurlV3LeaseLeases, withCfg(*e2e.NewConfigNoTLS()))
}
-func TestV3CurlLeaseKeepAliveNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlLeaseKeepAlive, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
+
+func TestCurlV3LeaseKeepAliveNoTLS(t *testing.T) {
+ testCtl(t, testCurlV3LeaseKeepAlive, withCfg(*e2e.NewConfigNoTLS()))
}
type v3cURLTest struct {
@@ -48,111 +46,86 @@ type v3cURLTest struct {
expected string
}
-// TODO remove /kv/lease/timetolive, /kv/lease/revoke, /kv/lease/leases tests in 3.5 release
-
-func testV3CurlLeaseGrant(cx ctlCtx) {
- leaseID := randomLeaseID()
+func testCurlV3LeaseGrant(cx ctlCtx) {
+ leaseID := e2e.RandomLeaseID()
tests := []v3cURLTest{
{
- endpoint: "/lease/grant",
+ endpoint: "/v3/lease/grant",
value: gwLeaseGrant(cx, leaseID, 0),
expected: gwLeaseIDExpected(leaseID),
},
{
- endpoint: "/lease/grant",
+ endpoint: "/v3/lease/grant",
value: gwLeaseGrant(cx, 0, 20),
expected: `"TTL":"20"`,
},
{
- endpoint: "/kv/put",
+ endpoint: "/v3/kv/put",
value: gwKVPutLease(cx, "foo", "bar", leaseID),
expected: `"revision":"`,
},
{
- endpoint: "/lease/timetolive",
+ endpoint: "/v3/lease/timetolive",
value: gwLeaseTTLWithKeys(cx, leaseID),
expected: `"grantedTTL"`,
},
- {
- endpoint: "/kv/lease/timetolive",
- value: gwLeaseTTLWithKeys(cx, leaseID),
- expected: `"grantedTTL"`,
- },
- }
- if err := cURLWithExpected(cx, tests); err != nil {
- cx.t.Fatalf("testV3CurlLeaseGrant: %v", err)
}
+ require.NoErrorf(cx.t, CURLWithExpected(cx, tests), "testCurlV3LeaseGrant")
}
-func testV3CurlLeaseRevoke(cx ctlCtx) {
- leaseID := randomLeaseID()
+func testCurlV3LeaseRevoke(cx ctlCtx) {
+ leaseID := e2e.RandomLeaseID()
tests := []v3cURLTest{
{
- endpoint: "/lease/grant",
+ endpoint: "/v3/lease/grant",
value: gwLeaseGrant(cx, leaseID, 0),
expected: gwLeaseIDExpected(leaseID),
},
{
- endpoint: "/lease/revoke",
+ endpoint: "/v3/lease/revoke",
value: gwLeaseRevoke(cx, leaseID),
expected: `"revision":"`,
},
- {
- endpoint: "/kv/lease/revoke",
- value: gwLeaseRevoke(cx, leaseID),
- expected: `etcdserver: requested lease not found`,
- },
- }
- if err := cURLWithExpected(cx, tests); err != nil {
- cx.t.Fatalf("testV3CurlLeaseRevoke: %v", err)
}
+ require.NoErrorf(cx.t, CURLWithExpected(cx, tests), "testCurlV3LeaseRevoke")
}
-func testV3CurlLeaseLeases(cx ctlCtx) {
- leaseID := randomLeaseID()
+func testCurlV3LeaseLeases(cx ctlCtx) {
+ leaseID := e2e.RandomLeaseID()
tests := []v3cURLTest{
{
- endpoint: "/lease/grant",
+ endpoint: "/v3/lease/grant",
value: gwLeaseGrant(cx, leaseID, 0),
expected: gwLeaseIDExpected(leaseID),
},
{
- endpoint: "/lease/leases",
- value: "{}",
- expected: gwLeaseIDExpected(leaseID),
- },
- {
- endpoint: "/kv/lease/leases",
+ endpoint: "/v3/lease/leases",
value: "{}",
expected: gwLeaseIDExpected(leaseID),
},
}
- if err := cURLWithExpected(cx, tests); err != nil {
- cx.t.Fatalf("testV3CurlLeaseGrant: %v", err)
- }
+ require.NoErrorf(cx.t, CURLWithExpected(cx, tests), "testCurlV3LeaseGrant")
}
-func testV3CurlLeaseKeepAlive(cx ctlCtx) {
- leaseID := randomLeaseID()
+func testCurlV3LeaseKeepAlive(cx ctlCtx) {
+ leaseID := e2e.RandomLeaseID()
tests := []v3cURLTest{
{
- endpoint: "/lease/grant",
+ endpoint: "/v3/lease/grant",
value: gwLeaseGrant(cx, leaseID, 0),
expected: gwLeaseIDExpected(leaseID),
},
{
- endpoint: "/lease/keepalive",
+ endpoint: "/v3/lease/keepalive",
value: gwLeaseKeepAlive(cx, leaseID),
expected: gwLeaseIDExpected(leaseID),
},
}
- if err := cURLWithExpected(cx, tests); err != nil {
- cx.t.Fatalf("testV3CurlLeaseGrant: %v", err)
- }
+ require.NoErrorf(cx.t, CURLWithExpected(cx, tests), "testCurlV3LeaseGrant")
}
func gwLeaseIDExpected(leaseID int64) string {
@@ -161,45 +134,35 @@ func gwLeaseIDExpected(leaseID int64) string {
func gwLeaseTTLWithKeys(cx ctlCtx, leaseID int64) string {
d := &pb.LeaseTimeToLiveRequest{ID: leaseID, Keys: true}
- s, err := dataMarshal(d)
- if err != nil {
- cx.t.Fatalf("gwLeaseTTLWithKeys: error (%v)", err)
- }
+ s, err := e2e.DataMarshal(d)
+ require.NoErrorf(cx.t, err, "gwLeaseTTLWithKeys: error")
return s
}
func gwLeaseKeepAlive(cx ctlCtx, leaseID int64) string {
d := &pb.LeaseKeepAliveRequest{ID: leaseID}
- s, err := dataMarshal(d)
- if err != nil {
- cx.t.Fatalf("gwLeaseKeepAlive: Marshal error (%v)", err)
- }
+ s, err := e2e.DataMarshal(d)
+ require.NoErrorf(cx.t, err, "gwLeaseKeepAlive: Marshal error")
return s
}
func gwLeaseGrant(cx ctlCtx, leaseID int64, ttl int64) string {
d := &pb.LeaseGrantRequest{ID: leaseID, TTL: ttl}
- s, err := dataMarshal(d)
- if err != nil {
- cx.t.Fatalf("gwLeaseGrant: Marshal error (%v)", err)
- }
+ s, err := e2e.DataMarshal(d)
+ require.NoErrorf(cx.t, err, "gwLeaseGrant: Marshal error")
return s
}
func gwLeaseRevoke(cx ctlCtx, leaseID int64) string {
d := &pb.LeaseRevokeRequest{ID: leaseID}
- s, err := dataMarshal(d)
- if err != nil {
- cx.t.Fatalf("gwLeaseRevoke: Marshal error (%v)", err)
- }
+ s, err := e2e.DataMarshal(d)
+ require.NoErrorf(cx.t, err, "gwLeaseRevoke: Marshal error")
return s
}
func gwKVPutLease(cx ctlCtx, k string, v string, leaseID int64) string {
d := pb.PutRequest{Key: []byte(k), Value: []byte(v), Lease: leaseID}
- s, err := dataMarshal(d)
- if err != nil {
- cx.t.Fatalf("gwKVPutLease: Marshal error (%v)", err)
- }
+ s, err := e2e.DataMarshal(d)
+ require.NoErrorf(cx.t, err, "gwKVPutLease: Marshal error")
return s
}
diff --git a/tests/e2e/v3_curl_lock_test.go b/tests/e2e/v3_curl_lock_test.go
new file mode 100644
index 00000000000..670a76cc742
--- /dev/null
+++ b/tests/e2e/v3_curl_lock_test.go
@@ -0,0 +1,54 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCurlV3LockOperations(t *testing.T) {
+ testCtl(t, testCurlV3LockOperations, withCfg(*e2e.NewConfig(e2e.WithClusterSize(1))))
+}
+
+func testCurlV3LockOperations(cx ctlCtx) {
+ // lock
+ lockReq, err := json.Marshal(&v3lockpb.LockRequest{Name: []byte("lock1")})
+ require.NoError(cx.t, err)
+
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[0], "POST", e2e.CURLReq{
+ Endpoint: "/v3/lock/lock",
+ Value: string(lockReq),
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+ key, ok := resp["key"]
+ require.True(cx.t, ok)
+
+ // unlock
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/lock/unlock",
+ Value: fmt.Sprintf(`{"key": "%v"}`, key),
+ Expected: expect.ExpectedResponse{Value: "revision"},
+ }), "testCurlV3LockOperations failed to execute unlock")
+}
diff --git a/tests/e2e/v3_curl_maintenance_test.go b/tests/e2e/v3_curl_maintenance_test.go
new file mode 100644
index 00000000000..6a70037a96c
--- /dev/null
+++ b/tests/e2e/v3_curl_maintenance_test.go
@@ -0,0 +1,158 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCurlV3MaintenanceAlarmMissiongAlarm(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceAlarmMissiongAlarm, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceAlarmMissiongAlarm(cx ctlCtx) {
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/maintenance/alarm",
+ Value: `{"action": "ACTIVATE"}`,
+ }), "failed post maintenance alarm")
+}
+
+func TestCurlV3MaintenanceStatus(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceStatus, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceStatus(cx ctlCtx) {
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "POST", e2e.CURLReq{
+ Endpoint: "/v3/maintenance/status",
+ Value: "{}",
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+
+ requiredFields := []string{"version", "dbSize", "leader", "raftIndex", "raftTerm", "raftAppliedIndex", "dbSizeInUse", "storageVersion"}
+ for _, field := range requiredFields {
+ if _, ok := resp[field]; !ok {
+ cx.t.Fatalf("Field %q not found in (%v)", field, resp)
+ }
+ }
+
+ require.Equal(cx.t, version.Version, resp["version"])
+}
+
+func TestCurlV3MaintenanceDefragment(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceDefragment, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceDefragment(cx ctlCtx) {
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/maintenance/defragment",
+ Value: "{}",
+ Expected: expect.ExpectedResponse{
+ Value: "{}",
+ },
+ }), "failed post maintenance defragment request")
+}
+
+func TestCurlV3MaintenanceHash(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceHash, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceHash(cx ctlCtx) {
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "POST", e2e.CURLReq{
+ Endpoint: "/v3/maintenance/hash",
+ Value: "{}",
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+
+ requiredFields := []string{"header", "hash"}
+ for _, field := range requiredFields {
+ if _, ok := resp[field]; !ok {
+ cx.t.Fatalf("Field %q not found in (%v)", field, resp)
+ }
+ }
+}
+
+func TestCurlV3MaintenanceHashKV(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceHashKV, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceHashKV(cx ctlCtx) {
+ clus := cx.epc
+ args := e2e.CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "POST", e2e.CURLReq{
+ Endpoint: "/v3/maintenance/hashkv",
+ Value: "{}",
+ })
+ resp, err := runCommandAndReadJSONOutput(args)
+ require.NoError(cx.t, err)
+
+ requiredFields := []string{"header", "hash", "compact_revision", "hash_revision"}
+ for _, field := range requiredFields {
+ if _, ok := resp[field]; !ok {
+ cx.t.Fatalf("Field %q not found in (%v)", field, resp)
+ }
+ }
+}
+
+func TestCurlV3MaintenanceSnapshot(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceSnapshot, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceSnapshot(cx ctlCtx) {
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/maintenance/snapshot",
+ Value: "{}",
+ Expected: expect.ExpectedResponse{
+ Value: `"result":{"blob":`,
+ },
+ }), "failed post maintenance snapshot request")
+}
+
+func TestCurlV3MaintenanceMoveleader(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceMoveleader, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceMoveleader(cx ctlCtx) {
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/maintenance/transfer-leadership",
+ Value: `{"targetID": 123}`,
+ Expected: expect.ExpectedResponse{
+ Value: `"message":"etcdserver: bad leader transferee"`,
+ },
+ }), "failed post maintenance moveleader request")
+}
+
+func TestCurlV3MaintenanceDowngrade(t *testing.T) {
+ testCtl(t, testCurlV3MaintenanceDowngrade, withCfg(*e2e.NewConfigNoTLS()))
+}
+
+func testCurlV3MaintenanceDowngrade(cx ctlCtx) {
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{
+ Endpoint: "/v3/maintenance/downgrade",
+ Value: `{"action": 0, "version": "3.0"}`,
+ Expected: expect.ExpectedResponse{
+ Value: `"message":"etcdserver: invalid downgrade target version"`,
+ },
+ }), "failed post maintenance downgrade request")
+}
diff --git a/tests/e2e/v3_curl_maxstream_test.go b/tests/e2e/v3_curl_maxstream_test.go
new file mode 100644
index 00000000000..027a3b538e8
--- /dev/null
+++ b/tests/e2e/v3_curl_maxstream_test.go
@@ -0,0 +1,233 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+// TestCurlV3_MaxStreams_BelowLimit_NoTLS_Small tests no TLS
+func TestCurlV3_MaxStreams_BelowLimit_NoTLS_Small(t *testing.T) {
+ testCurlV3MaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(3))
+}
+
+func TestCurlV3_MaxStreams_BelowLimit_NoTLS_Medium(t *testing.T) {
+ testCurlV3MaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second))
+}
+
+func TestCurlV3_MaxStreamsNoTLS_BelowLimit_Large(t *testing.T) {
+ f, err := setRLimit(10240)
+ require.NoError(t, err)
+ defer f()
+ testCurlV3MaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(1000), withTestTimeout(200*time.Second))
+}
+
+func TestCurlV3_MaxStreams_ReachLimit_NoTLS_Small(t *testing.T) {
+ testCurlV3MaxStream(t, true, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(3))
+}
+
+func TestCurlV3_MaxStreams_ReachLimit_NoTLS_Medium(t *testing.T) {
+ testCurlV3MaxStream(t, true, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second))
+}
+
+// TestCurlV3_MaxStreams_BelowLimit_TLS_Small tests with TLS
+func TestCurlV3_MaxStreams_BelowLimit_TLS_Small(t *testing.T) {
+ testCurlV3MaxStream(t, false, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(3))
+}
+
+func TestCurlV3_MaxStreams_BelowLimit_TLS_Medium(t *testing.T) {
+ testCurlV3MaxStream(t, false, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second))
+}
+
+func TestCurlV3_MaxStreams_ReachLimit_TLS_Small(t *testing.T) {
+ testCurlV3MaxStream(t, true, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(3))
+}
+
+func TestCurlV3_MaxStreams_ReachLimit_TLS_Medium(t *testing.T) {
+ testCurlV3MaxStream(t, true, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second))
+}
+
+func testCurlV3MaxStream(t *testing.T, reachLimit bool, opts ...ctlOption) {
+ e2e.BeforeTest(t)
+
+ // Step 1: generate configuration for creating cluster
+ t.Log("Generating configuration for creating cluster.")
+ cx := getDefaultCtlCtx(t)
+ cx.applyOpts(opts)
+ // We must set the `ClusterSize` to 1, otherwise different streams may
+ // connect to different members, accordingly it's difficult to test the
+ // behavior.
+ cx.cfg.ClusterSize = 1
+
+ // Step 2: create the cluster
+ t.Log("Creating an etcd cluster")
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(&cx.cfg))
+ require.NoErrorf(t, err, "Failed to start etcd cluster")
+ cx.epc = epc
+ cx.dataDir = epc.Procs[0].Config().DataDirPath
+
+ // Step 3: run test
+ // (a) generate ${concurrentNumber} concurrent watch streams;
+ // (b) submit a range request.
+ var wg sync.WaitGroup
+ concurrentNumber := cx.cfg.ServerConfig.MaxConcurrentStreams - 1
+ expectedResponse := `"revision":"`
+ if reachLimit {
+ concurrentNumber = cx.cfg.ServerConfig.MaxConcurrentStreams
+ expectedResponse = "Operation timed out"
+ }
+ wg.Add(int(concurrentNumber))
+ t.Logf("Running the test, MaxConcurrentStreams: %d, concurrentNumber: %d, expected range's response: %s\n",
+ cx.cfg.ServerConfig.MaxConcurrentStreams, concurrentNumber, expectedResponse)
+
+ closeServerCh := make(chan struct{})
+ submitConcurrentWatch(cx, int(concurrentNumber), &wg, closeServerCh)
+ submitRangeAfterConcurrentWatch(cx, expectedResponse)
+
+ // Step 4: Close the cluster
+ t.Log("Closing test cluster...")
+ close(closeServerCh)
+ require.NoError(t, epc.Close())
+ t.Log("Closed test cluster")
+
+ // Step 5: Waiting all watch goroutines to exit.
+ doneCh := make(chan struct{})
+ go func() {
+ defer close(doneCh)
+ wg.Wait()
+ }()
+
+ timeout := cx.getTestTimeout()
+ t.Logf("Waiting test case to finish, timeout: %s", timeout)
+ select {
+ case <-time.After(timeout):
+ testutil.FatalStack(t, fmt.Sprintf("test timed out after %v", timeout))
+ case <-doneCh:
+ t.Log("All watch goroutines exited.")
+ }
+
+ t.Log("testCurlV3MaxStream done!")
+}
+
+func submitConcurrentWatch(cx ctlCtx, number int, wgDone *sync.WaitGroup, closeCh chan struct{}) {
+ watchData, err := json.Marshal(&pb.WatchRequest_CreateRequest{
+ CreateRequest: &pb.WatchCreateRequest{
+ Key: []byte("foo"),
+ },
+ })
+ require.NoError(cx.t, err)
+
+ var wgSchedule sync.WaitGroup
+
+ createWatchConnection := func() error {
+ cluster := cx.epc
+ member := cluster.Procs[rand.Intn(cluster.Cfg.ClusterSize)]
+ curlReq := e2e.CURLReq{Endpoint: "/v3/watch", Value: string(watchData)}
+
+ args := e2e.CURLPrefixArgsCluster(cluster.Cfg, member, "POST", curlReq)
+ proc, err := e2e.SpawnCmd(args, nil)
+ if err != nil {
+ return fmt.Errorf("failed to spawn: %w", err)
+ }
+ defer proc.Stop()
+
+ // make sure that watch request has been created
+ expectedLine := `"created":true}}`
+ _, lerr := proc.ExpectWithContext(context.TODO(), expect.ExpectedResponse{Value: expectedLine})
+ if lerr != nil {
+ return fmt.Errorf("%v %w (expected %q). Try EXPECT_DEBUG=TRUE", args, lerr, expectedLine)
+ }
+
+ wgSchedule.Done()
+
+ // hold the connection and wait for server shutdown
+ perr := proc.Close()
+
+ // curl process will return
+ select {
+ case <-closeCh:
+ default:
+ // perr could be nil.
+ return fmt.Errorf("unexpected connection close before server closes: %w", perr)
+ }
+ return nil
+ }
+
+ testutils.ExecuteWithTimeout(cx.t, cx.getTestTimeout(), func() {
+ wgSchedule.Add(number)
+
+ for i := 0; i < number; i++ {
+ go func(i int) {
+ defer wgDone.Done()
+
+ require.NoErrorf(cx.t, createWatchConnection(), "testCurlV3MaxStream watch failed: %d", i)
+ }(i)
+ }
+
+ // make sure all goroutines have already been scheduled.
+ wgSchedule.Wait()
+ })
+}
+
+func submitRangeAfterConcurrentWatch(cx ctlCtx, expectedValue string) {
+ rangeData, err := json.Marshal(&pb.RangeRequest{
+ Key: []byte("foo"),
+ })
+ require.NoError(cx.t, err)
+
+ cx.t.Log("Submitting range request...")
+ if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: "/v3/kv/range", Value: string(rangeData), Expected: expect.ExpectedResponse{Value: expectedValue}, Timeout: 5}); err != nil {
+ require.ErrorContains(cx.t, err, expectedValue)
+ }
+ cx.t.Log("range request done")
+}
+
+// setRLimit sets the open file limitation, and return a function which
+// is used to reset the limitation.
+func setRLimit(nofile uint64) (func() error, error) {
+ var rLimit syscall.Rlimit
+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit); err != nil {
+ return nil, fmt.Errorf("failed to get open file limit, error: %w", err)
+ }
+
+ var wLimit syscall.Rlimit
+ wLimit.Max = nofile
+ wLimit.Cur = nofile
+ if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &wLimit); err != nil {
+ return nil, fmt.Errorf("failed to set max open file limit, %w", err)
+ }
+
+ return func() error {
+ if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit); err != nil {
+ return fmt.Errorf("failed reset max open file limit, %w", err)
+ }
+ return nil
+ }, nil
+}
diff --git a/tests/e2e/v3_curl_test.go b/tests/e2e/v3_curl_test.go
deleted file mode 100644
index ecc94888d14..00000000000
--- a/tests/e2e/v3_curl_test.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package e2e
-
-import (
- "encoding/base64"
- "encoding/json"
- "fmt"
- "path"
- "strconv"
- "testing"
-
- "go.etcd.io/etcd/api/v3/authpb"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
-
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
-)
-
-// TODO: remove /v3beta tests in 3.5 release
-var apiPrefix = []string{"/v3", "/v3beta"}
-
-func TestV3CurlPutGetNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
-}
-func TestV3CurlPutGetAutoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigAutoTLS()))
- }
-}
-func TestV3CurlPutGetAllTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigTLS()))
- }
-}
-func TestV3CurlPutGetPeerTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigPeerTLS()))
- }
-}
-func TestV3CurlPutGetClientTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*newConfigClientTLS()))
- }
-}
-func TestV3CurlWatch(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlWatch, withApiPrefix(p))
- }
-}
-func TestV3CurlTxn(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlTxn, withApiPrefix(p))
- }
-}
-func TestV3CurlAuth(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlAuth, withApiPrefix(p))
- }
-}
-func TestV3CurlAuthClientTLSCertAuth(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlAuth, withApiPrefix(p), withCfg(*newConfigClientTLSCertAuthWithNoCN()))
- }
-}
-
-func testV3CurlPutGet(cx ctlCtx) {
- var (
- key = []byte("foo")
- value = []byte("bar") // this will be automatically base64-encoded by Go
-
- expectPut = `"revision":"`
- expectGet = `"value":"`
- )
- putData, err := json.Marshal(&pb.PutRequest{
- Key: key,
- Value: value,
- })
- if err != nil {
- cx.t.Fatal(err)
- }
- rangeData, err := json.Marshal(&pb.RangeRequest{
- Key: key,
- })
- if err != nil {
- cx.t.Fatal(err)
- }
-
- p := cx.apiPrefix
-
- if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putData), expected: expectPut}); err != nil {
- cx.t.Fatalf("failed testV3CurlPutGet put with curl using prefix (%s) (%v)", p, err)
- }
- if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/range"), value: string(rangeData), expected: expectGet}); err != nil {
- cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err)
- }
- if cx.cfg.clientTLS == clientTLSAndNonTLS {
- if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/range"), value: string(rangeData), expected: expectGet, isTLS: true}); err != nil {
- cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err)
- }
- }
-}
-
-func testV3CurlWatch(cx ctlCtx) {
- // store "bar" into "foo"
- putreq, err := json.Marshal(&pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
- if err != nil {
- cx.t.Fatal(err)
- }
- // watch for first update to "foo"
- wcr := &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}
- wreq, err := json.Marshal(wcr)
- if err != nil {
- cx.t.Fatal(err)
- }
- // marshaling the grpc to json gives:
- // "{"RequestUnion":{"CreateRequest":{"key":"Zm9v","start_revision":1}}}"
- // but the gprc-gateway expects a different format..
- wstr := `{"create_request" : ` + string(wreq) + "}"
- p := cx.apiPrefix
-
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "revision"}); err != nil {
- cx.t.Fatalf("failed testV3CurlWatch put with curl using prefix (%s) (%v)", p, err)
- }
- // expects "bar", timeout after 2 seconds since stream waits forever
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/watch"), value: wstr, expected: `"YmFy"`, timeout: 2}); err != nil {
- cx.t.Fatalf("failed testV3CurlWatch watch with curl using prefix (%s) (%v)", p, err)
- }
-}
-
-func testV3CurlTxn(cx ctlCtx) {
- txn := &pb.TxnRequest{
- Compare: []*pb.Compare{
- {
- Key: []byte("foo"),
- Result: pb.Compare_EQUAL,
- Target: pb.Compare_CREATE,
- TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 0},
- },
- },
- Success: []*pb.RequestOp{
- {
- Request: &pb.RequestOp_RequestPut{
- RequestPut: &pb.PutRequest{
- Key: []byte("foo"),
- Value: []byte("bar"),
- },
- },
- },
- },
- }
- m := &runtime.JSONPb{}
- jsonDat, jerr := m.Marshal(txn)
- if jerr != nil {
- cx.t.Fatal(jerr)
- }
- expected := `"succeeded":true,"responses":[{"response_put":{"header":{"revision":"2"}}}]`
- p := cx.apiPrefix
- if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/txn"), value: string(jsonDat), expected: expected}); err != nil {
- cx.t.Fatalf("failed testV3CurlTxn txn with curl using prefix (%s) (%v)", p, err)
- }
-
- // was crashing etcd server
- malformed := `{"compare":[{"result":0,"target":1,"key":"Zm9v","TargetUnion":null}],"success":[{"Request":{"RequestPut":{"key":"Zm9v","value":"YmFy"}}}]}`
- if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/txn"), value: malformed, expected: "error"}); err != nil {
- cx.t.Fatalf("failed testV3CurlTxn put with curl using prefix (%s) (%v)", p, err)
- }
-
-}
-
-func testV3CurlAuth(cx ctlCtx) {
- p := cx.apiPrefix
- usernames := []string{"root", "nonroot", "nooption"}
- pwds := []string{"toor", "pass", "pass"}
- options := []*authpb.UserAddOptions{{NoPassword: false}, {NoPassword: false}, nil}
-
- // create users
- for i := 0; i < len(usernames); i++ {
- user, err := json.Marshal(&pb.AuthUserAddRequest{Name: usernames[i], Password: pwds[i], Options: options[i]})
- testutil.AssertNil(cx.t, err)
-
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/add"), value: string(user), expected: "revision"}); err != nil {
- cx.t.Fatalf("failed testV3CurlAuth add user %v with curl (%v)", usernames[i], err)
- }
- }
-
- // create root role
- rolereq, err := json.Marshal(&pb.AuthRoleAddRequest{Name: string("root")})
- testutil.AssertNil(cx.t, err)
-
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/role/add"), value: string(rolereq), expected: "revision"}); err != nil {
- cx.t.Fatalf("failed testV3CurlAuth create role with curl using prefix (%s) (%v)", p, err)
- }
-
- //grant root role
- for i := 0; i < len(usernames); i++ {
- grantroleroot, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"})
- testutil.AssertNil(cx.t, err)
-
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/grant"), value: string(grantroleroot), expected: "revision"}); err != nil {
- cx.t.Fatalf("failed testV3CurlAuth grant role with curl using prefix (%s) (%v)", p, err)
- }
- }
-
- // enable auth
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/enable"), value: string("{}"), expected: "revision"}); err != nil {
- cx.t.Fatalf("failed testV3CurlAuth enable auth with curl using prefix (%s) (%v)", p, err)
- }
-
- for i := 0; i < len(usernames); i++ {
- // put "bar[i]" into "foo[i]"
- putreq, err := json.Marshal(&pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte(fmt.Sprintf("bar%d", i))})
- testutil.AssertNil(cx.t, err)
-
- // fail put no auth
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "error"}); err != nil {
- cx.t.Fatalf("failed testV3CurlAuth no auth put with curl using prefix (%s) (%v)", p, err)
- }
-
- // auth request
- authreq, err := json.Marshal(&pb.AuthenticateRequest{Name: usernames[i], Password: pwds[i]})
- testutil.AssertNil(cx.t, err)
-
- var (
- authHeader string
- cmdArgs []string
- lineFunc = func(txt string) bool { return true }
- )
-
- cmdArgs = cURLPrefixArgs(cx.epc, "POST", cURLReq{endpoint: path.Join(p, "/auth/authenticate"), value: string(authreq)})
- proc, err := spawnCmd(cmdArgs)
- testutil.AssertNil(cx.t, err)
- defer proc.Close()
-
- cURLRes, err := proc.ExpectFunc(lineFunc)
- testutil.AssertNil(cx.t, err)
-
- authRes := make(map[string]interface{})
- testutil.AssertNil(cx.t, json.Unmarshal([]byte(cURLRes), &authRes))
-
- token, ok := authRes[rpctypes.TokenFieldNameGRPC].(string)
- if !ok {
- cx.t.Fatalf("failed invalid token in authenticate response with curl using user (%v)", usernames[i])
- }
-
- authHeader = "Authorization: " + token
-
- // put with auth
- if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), header: authHeader, expected: "revision"}); err != nil {
- cx.t.Fatalf("failed testV3CurlAuth auth put with curl using prefix (%s) and user (%v) (%v)", p, usernames[i], err)
- }
- }
-}
-
-func TestV3CurlCampaignNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlCampaign, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
-}
-
-func testV3CurlCampaign(cx ctlCtx) {
- cdata, err := json.Marshal(&epb.CampaignRequest{
- Name: []byte("/election-prefix"),
- Value: []byte("v1"),
- })
- if err != nil {
- cx.t.Fatal(err)
- }
- cargs := cURLPrefixArgs(cx.epc, "POST", cURLReq{
- endpoint: path.Join(cx.apiPrefix, "/election/campaign"),
- value: string(cdata),
- })
- lines, err := spawnWithExpectLines(cargs, `"leader":{"name":"`)
- if err != nil {
- cx.t.Fatalf("failed post campaign request (%s) (%v)", cx.apiPrefix, err)
- }
- if len(lines) != 1 {
- cx.t.Fatalf("len(lines) expected 1, got %+v", lines)
- }
-
- var cresp campaignResponse
- if err = json.Unmarshal([]byte(lines[0]), &cresp); err != nil {
- cx.t.Fatalf("failed to unmarshal campaign response %v", err)
- }
- ndata, err := base64.StdEncoding.DecodeString(cresp.Leader.Name)
- if err != nil {
- cx.t.Fatalf("failed to decode leader key %v", err)
- }
- kdata, err := base64.StdEncoding.DecodeString(cresp.Leader.Key)
- if err != nil {
- cx.t.Fatalf("failed to decode leader key %v", err)
- }
-
- rev, _ := strconv.ParseInt(cresp.Leader.Rev, 10, 64)
- lease, _ := strconv.ParseInt(cresp.Leader.Lease, 10, 64)
- pdata, err := json.Marshal(&epb.ProclaimRequest{
- Leader: &epb.LeaderKey{
- Name: ndata,
- Key: kdata,
- Rev: rev,
- Lease: lease,
- },
- Value: []byte("v2"),
- })
- if err != nil {
- cx.t.Fatal(err)
- }
- if err = cURLPost(cx.epc, cURLReq{
- endpoint: path.Join(cx.apiPrefix, "/election/proclaim"),
- value: string(pdata),
- expected: `"revision":`,
- }); err != nil {
- cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err)
- }
-}
-
-func TestV3CurlProclaimMissiongLeaderKeyNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlProclaimMissiongLeaderKey, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
-}
-
-func testV3CurlProclaimMissiongLeaderKey(cx ctlCtx) {
- pdata, err := json.Marshal(&epb.ProclaimRequest{Value: []byte("v2")})
- if err != nil {
- cx.t.Fatal(err)
- }
- if err = cURLPost(cx.epc, cURLReq{
- endpoint: path.Join(cx.apiPrefix, "/election/proclaim"),
- value: string(pdata),
- expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`,
- }); err != nil {
- cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err)
- }
-}
-
-func TestV3CurlResignMissiongLeaderKeyNoTLS(t *testing.T) {
- for _, p := range apiPrefix {
- testCtl(t, testV3CurlResignMissiongLeaderKey, withApiPrefix(p), withCfg(*newConfigNoTLS()))
- }
-}
-
-func testV3CurlResignMissiongLeaderKey(cx ctlCtx) {
- if err := cURLPost(cx.epc, cURLReq{
- endpoint: path.Join(cx.apiPrefix, "/election/resign"),
- value: `{}`,
- expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`,
- }); err != nil {
- cx.t.Fatalf("failed post resign request (%s) (%v)", cx.apiPrefix, err)
- }
-}
-
-// to manually decode; JSON marshals integer fields with
-// string types, so can't unmarshal with epb.CampaignResponse
-type campaignResponse struct {
- Leader struct {
- Name string `json:"name,omitempty"`
- Key string `json:"key,omitempty"`
- Rev string `json:"rev,omitempty"`
- Lease string `json:"lease,omitempty"`
- } `json:"leader,omitempty"`
-}
-
-func cURLWithExpected(cx ctlCtx, tests []v3cURLTest) error {
- p := cx.apiPrefix
- for _, t := range tests {
- value := fmt.Sprintf("%v", t.value)
- if err := cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, t.endpoint), value: value, expected: t.expected}); err != nil {
- return fmt.Errorf("prefix (%s) endpoint (%s): error (%v), wanted %v", p, t.endpoint, err, t.expected)
- }
- }
- return nil
-}
diff --git a/tests/e2e/v3_curl_watch_test.go b/tests/e2e/v3_curl_watch_test.go
new file mode 100644
index 00000000000..5e7bc8de9bc
--- /dev/null
+++ b/tests/e2e/v3_curl_watch_test.go
@@ -0,0 +1,48 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestCurlV3Watch(t *testing.T) {
+ testCtl(t, testCurlV3Watch)
+}
+
+func testCurlV3Watch(cx ctlCtx) {
+ // store "bar" into "foo"
+ putreq, err := json.Marshal(&pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
+ require.NoError(cx.t, err)
+ // watch for first update to "foo"
+ wcr := &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}
+ wreq, err := json.Marshal(wcr)
+ require.NoError(cx.t, err)
+ // marshaling the grpc to json gives:
+ // "{"RequestUnion":{"CreateRequest":{"key":"Zm9v","start_revision":1}}}"
+ // but the gprc-gateway expects a different format..
+ wstr := `{"create_request" : ` + string(wreq) + "}"
+
+ require.NoErrorf(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: "/v3/kv/put", Value: string(putreq), Expected: expect.ExpectedResponse{Value: "revision"}}), "failed testCurlV3Watch put with curl")
+ // expects "bar", timeout after 2 seconds since stream waits forever
+ require.ErrorContains(cx.t, e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: "/v3/watch", Value: wstr, Expected: expect.ExpectedResponse{Value: `"YmFy"`}, Timeout: 2}), "unexpected exit code")
+}
diff --git a/tests/e2e/v3_lease_no_proxy_test.go b/tests/e2e/v3_lease_no_proxy_test.go
new file mode 100644
index 00000000000..c6418c8691a
--- /dev/null
+++ b/tests/e2e/v3_lease_no_proxy_test.go
@@ -0,0 +1,166 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+// TestLeaseRevoke_IgnoreOldLeader verifies that leases shouldn't be revoked
+// by old leader.
+// See the case 1 in https://github.com/etcd-io/etcd/issues/15247#issuecomment-1777862093.
+func TestLeaseRevoke_IgnoreOldLeader(t *testing.T) {
+ t.Run("3 members", func(t *testing.T) {
+ testLeaseRevokeIssue(t, 3, true)
+ })
+ t.Run("5 members", func(t *testing.T) {
+ testLeaseRevokeIssue(t, 5, true)
+ })
+}
+
+// TestLeaseRevoke_ClientSwitchToOtherMember verifies that leases shouldn't
+// be revoked by new leader.
+// See the case 2 in https://github.com/etcd-io/etcd/issues/15247#issuecomment-1777862093.
+func TestLeaseRevoke_ClientSwitchToOtherMember(t *testing.T) {
+ t.Run("3 members", func(t *testing.T) {
+ testLeaseRevokeIssue(t, 3, false)
+ })
+ t.Run("5 members", func(t *testing.T) {
+ testLeaseRevokeIssue(t, 5, false)
+ })
+}
+
+func testLeaseRevokeIssue(t *testing.T, clusterSize int, connectToOneFollower bool) {
+ e2e.BeforeTest(t)
+
+ ctx := context.Background()
+
+ t.Log("Starting a new etcd cluster")
+ epc, err := e2e.NewEtcdProcessCluster(ctx, t,
+ e2e.WithClusterSize(clusterSize),
+ e2e.WithGoFailEnabled(true),
+ e2e.WithGoFailClientTimeout(40*time.Second),
+ )
+ require.NoError(t, err)
+ defer func() {
+ require.NoErrorf(t, epc.Close(), "error closing etcd processes")
+ }()
+
+ leaderIdx := epc.WaitLeader(t)
+ t.Logf("Leader index: %d", leaderIdx)
+
+ epsForNormalOperations := epc.Procs[(leaderIdx+2)%clusterSize].EndpointsGRPC()
+ t.Logf("Creating a client for normal operations: %v", epsForNormalOperations)
+ client, err := clientv3.New(clientv3.Config{Endpoints: epsForNormalOperations, DialTimeout: 3 * time.Second})
+ require.NoError(t, err)
+ defer client.Close()
+
+ var epsForLeaseKeepAlive []string
+ if connectToOneFollower {
+ epsForLeaseKeepAlive = epc.Procs[(leaderIdx+1)%clusterSize].EndpointsGRPC()
+ } else {
+ epsForLeaseKeepAlive = epc.EndpointsGRPC()
+ }
+ t.Logf("Creating a client for the leaseKeepAlive operation: %v", epsForLeaseKeepAlive)
+ clientForKeepAlive, err := clientv3.New(clientv3.Config{Endpoints: epsForLeaseKeepAlive, DialTimeout: 3 * time.Second})
+ require.NoError(t, err)
+ defer clientForKeepAlive.Close()
+
+ resp, err := client.Status(ctx, epsForNormalOperations[0])
+ require.NoError(t, err)
+ oldLeaderID := resp.Leader
+
+ t.Log("Creating a new lease")
+ leaseRsp, err := client.Grant(ctx, 20)
+ require.NoError(t, err)
+
+ t.Log("Starting a goroutine to keep alive the lease")
+ doneC := make(chan struct{})
+ stopC := make(chan struct{})
+ startC := make(chan struct{}, 1)
+ go func() {
+ defer close(doneC)
+
+ respC, kerr := clientForKeepAlive.KeepAlive(ctx, leaseRsp.ID)
+ assert.NoError(t, kerr)
+ // ensure we have received the first response from the server
+ <-respC
+ startC <- struct{}{}
+
+ for {
+ select {
+ case <-stopC:
+ return
+ case <-respC:
+ }
+ }
+ }()
+
+ t.Log("Wait for the keepAlive goroutine to get started")
+ <-startC
+
+ t.Log("Trigger the failpoint to simulate stalled writing")
+ err = epc.Procs[leaderIdx].Failpoints().SetupHTTP(ctx, "raftBeforeSave", `sleep("30s")`)
+ require.NoError(t, err)
+
+ cctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ t.Logf("Waiting for a new leader to be elected, old leader index: %d, old leader ID: %d", leaderIdx, oldLeaderID)
+ testutils.ExecuteUntil(cctx, t, func() {
+ for {
+ resp, err = client.Status(ctx, epsForNormalOperations[0])
+ if err == nil && resp.Leader != oldLeaderID {
+ t.Logf("A new leader has already been elected, new leader index: %d", resp.Leader)
+ return
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ })
+ cancel()
+
+ t.Log("Writing a key/value pair")
+ _, err = client.Put(ctx, "foo", "bar")
+ require.NoError(t, err)
+
+ t.Log("Sleeping 30 seconds")
+ time.Sleep(30 * time.Second)
+
+ t.Log("Remove the failpoint 'raftBeforeSave'")
+ err = epc.Procs[leaderIdx].Failpoints().DeactivateHTTP(ctx, "raftBeforeSave")
+ require.NoError(t, err)
+
+ // By default, etcd tries to revoke leases every 7 seconds.
+ t.Log("Sleeping 10 seconds")
+ time.Sleep(10 * time.Second)
+
+ t.Log("Confirming the lease isn't revoked")
+ leases, err := client.Leases(ctx)
+ require.NoError(t, err)
+ require.Len(t, leases.Leases, 1)
+
+ t.Log("Waiting for the keepAlive goroutine to exit")
+ close(stopC)
+ <-doneC
+}
diff --git a/tests/e2e/watch_test.go b/tests/e2e/watch_test.go
new file mode 100644
index 00000000000..f167b8f8752
--- /dev/null
+++ b/tests/e2e/watch_test.go
@@ -0,0 +1,490 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// These tests are performance sensitive, addition of cluster proxy makes them unstable.
+//go:build !cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+const (
+ watchResponsePeriod = 100 * time.Millisecond
+ watchTestDuration = 5 * time.Second
+ readLoadConcurrency = 10
+)
+
+type testCase struct {
+ name string
+ client e2e.ClientConfig
+ clientHTTPSeparate bool
+ maxWatchDelay time.Duration
+ dbSizeBytes int
+}
+
+const (
+ Kilo = 1000
+ Mega = 1000 * Kilo
+)
+
+// 10 MB is not a bottleneck of grpc server, but filling up etcd with data.
+// Keeping it lower so tests don't take too long.
+// If we implement reuse of db we could increase the dbSize.
+var tcs = []testCase{
+ {
+ name: "NoTLS",
+ maxWatchDelay: 150 * time.Millisecond,
+ dbSizeBytes: 5 * Mega,
+ },
+ {
+ name: "TLS",
+ client: e2e.ClientConfig{ConnectionType: e2e.ClientTLS},
+ maxWatchDelay: 150 * time.Millisecond,
+ dbSizeBytes: 5 * Mega,
+ },
+ {
+ name: "SeparateHTTPNoTLS",
+ clientHTTPSeparate: true,
+ maxWatchDelay: 150 * time.Millisecond,
+ dbSizeBytes: 5 * Mega,
+ },
+ {
+ name: "SeparateHTTPTLS",
+ client: e2e.ClientConfig{ConnectionType: e2e.ClientTLS},
+ clientHTTPSeparate: true,
+ maxWatchDelay: 150 * time.Millisecond,
+ dbSizeBytes: 5 * Mega,
+ },
+}
+
+func TestWatchDelayForPeriodicProgressNotification(t *testing.T) {
+ e2e.BeforeTest(t)
+ for _, tc := range tcs {
+ tc := tc
+ cfg := e2e.DefaultConfig()
+ cfg.ClusterSize = 1
+ cfg.ServerConfig.ExperimentalWatchProgressNotifyInterval = watchResponsePeriod
+ cfg.Client = tc.client
+ cfg.ClientHTTPSeparate = tc.clientHTTPSeparate
+ t.Run(tc.name, func(t *testing.T) {
+ clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ defer clus.Close()
+ c := newClient(t, clus.EndpointsGRPC(), tc.client)
+ require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes))
+
+ ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration)
+ defer cancel()
+ g := errgroup.Group{}
+ continuouslyExecuteGetAll(ctx, t, &g, c)
+ validateWatchDelay(t, c.Watch(ctx, "fake-key", clientv3.WithProgressNotify()), tc.maxWatchDelay)
+ require.NoError(t, g.Wait())
+ })
+ }
+}
+
+func TestWatchDelayForManualProgressNotification(t *testing.T) {
+ e2e.BeforeTest(t)
+ for _, tc := range tcs {
+ tc := tc
+ cfg := e2e.DefaultConfig()
+ cfg.ClusterSize = 1
+ cfg.Client = tc.client
+ cfg.ClientHTTPSeparate = tc.clientHTTPSeparate
+ t.Run(tc.name, func(t *testing.T) {
+ clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ defer clus.Close()
+ c := newClient(t, clus.EndpointsGRPC(), tc.client)
+ require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes))
+
+ ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration)
+ defer cancel()
+ g := errgroup.Group{}
+ continuouslyExecuteGetAll(ctx, t, &g, c)
+ g.Go(func() error {
+ for {
+ err := c.RequestProgress(ctx)
+ if err != nil {
+ if strings.Contains(err.Error(), "context deadline exceeded") {
+ return nil
+ }
+ return err
+ }
+ time.Sleep(watchResponsePeriod)
+ }
+ })
+ validateWatchDelay(t, c.Watch(ctx, "fake-key"), tc.maxWatchDelay)
+ require.NoError(t, g.Wait())
+ })
+ }
+}
+
+func TestWatchDelayForEvent(t *testing.T) {
+ e2e.BeforeTest(t)
+ for _, tc := range tcs {
+ tc := tc
+ cfg := e2e.DefaultConfig()
+ cfg.ClusterSize = 1
+ cfg.Client = tc.client
+ cfg.ClientHTTPSeparate = tc.clientHTTPSeparate
+ t.Run(tc.name, func(t *testing.T) {
+ clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ defer clus.Close()
+ c := newClient(t, clus.EndpointsGRPC(), tc.client)
+ require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes))
+
+ ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration)
+ defer cancel()
+ g := errgroup.Group{}
+ g.Go(func() error {
+ i := 0
+ for {
+ _, err := c.Put(ctx, "key", fmt.Sprintf("%d", i))
+ if err != nil {
+ if strings.Contains(err.Error(), "context deadline exceeded") {
+ return nil
+ }
+ return err
+ }
+ time.Sleep(watchResponsePeriod)
+ }
+ })
+ continuouslyExecuteGetAll(ctx, t, &g, c)
+ validateWatchDelay(t, c.Watch(ctx, "key"), tc.maxWatchDelay)
+ require.NoError(t, g.Wait())
+ })
+ }
+}
+
+func validateWatchDelay(t *testing.T, watch clientv3.WatchChan, maxWatchDelay time.Duration) {
+ start := time.Now()
+ var maxDelay time.Duration
+ for range watch {
+ sinceLast := time.Since(start)
+ if sinceLast > watchResponsePeriod+maxWatchDelay {
+ t.Errorf("Unexpected watch response delayed over allowed threshold %s, delay: %s", maxWatchDelay, sinceLast-watchResponsePeriod)
+ } else {
+ t.Logf("Got watch response, since last: %s", sinceLast)
+ }
+ if sinceLast > maxDelay {
+ maxDelay = sinceLast
+ }
+ start = time.Now()
+ }
+ sinceLast := time.Since(start)
+ if sinceLast > maxDelay && sinceLast > watchResponsePeriod+maxWatchDelay {
+ t.Errorf("Unexpected watch response delayed over allowed threshold %s, delay: unknown", maxWatchDelay)
+ t.Errorf("Test finished while in middle of delayed response, measured delay: %s", sinceLast-watchResponsePeriod)
+ t.Logf("Please increase the test duration to measure delay")
+ } else {
+ t.Logf("Max delay: %s", maxDelay-watchResponsePeriod)
+ }
+}
+
+func continuouslyExecuteGetAll(ctx context.Context, t *testing.T, g *errgroup.Group, c *clientv3.Client) {
+ mux := sync.RWMutex{}
+ size := 0
+ for i := 0; i < readLoadConcurrency; i++ {
+ g.Go(func() error {
+ for {
+ resp, err := c.Get(ctx, "", clientv3.WithPrefix())
+ if err != nil {
+ if strings.Contains(err.Error(), "context deadline exceeded") {
+ return nil
+ }
+ return err
+ }
+ respSize := 0
+ for _, kv := range resp.Kvs {
+ respSize += kv.Size()
+ }
+ mux.Lock()
+ size += respSize
+ mux.Unlock()
+ }
+ })
+ }
+ g.Go(func() error {
+ lastSize := size
+ for range time.Tick(time.Second) {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ }
+ mux.RLock()
+ t.Logf("Generating read load around %.1f MB/s", float64(size-lastSize)/1000/1000)
+ lastSize = size
+ mux.RUnlock()
+ }
+ return nil
+ })
+}
+
+// TestDeleteEventDrop_Issue18089 is an e2e test to reproduce the issue reported in: https://github.com/etcd-io/etcd/issues/18089
+//
+// The goal is to reproduce a DELETE event being dropped in a watch after a compaction
+// occurs on the revision where the deletion took place. In order to reproduce this, we
+// perform the following sequence (steps for reproduction thanks to @ahrtr):
+// - PUT k v2 (assume returned revision = r2)
+// - PUT k v3 (assume returned revision = r3)
+// - PUT k v4 (assume returned revision = r4)
+// - DELETE k (assume returned revision = r5)
+// - PUT k v6 (assume returned revision = r6)
+// - COMPACT r5
+// - WATCH rev=r5
+//
+// We should get the DELETE event (r5) followed by the PUT event (r6).
+func TestDeleteEventDrop_Issue18089(t *testing.T) {
+ e2e.BeforeTest(t)
+ cfg := e2e.DefaultConfig()
+ cfg.ClusterSize = 1
+ cfg.Client = e2e.ClientConfig{ConnectionType: e2e.ClientTLS}
+ clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg))
+ require.NoError(t, err)
+ defer clus.Close()
+
+ c := newClient(t, clus.EndpointsGRPC(), cfg.Client)
+ defer c.Close()
+
+ ctx := context.Background()
+ const (
+ key = "k"
+ v2 = "v2"
+ v3 = "v3"
+ v4 = "v4"
+ v6 = "v6"
+ )
+
+ t.Logf("PUT key=%s, val=%s", key, v2)
+ _, err = c.KV.Put(ctx, key, v2)
+ require.NoError(t, err)
+
+ t.Logf("PUT key=%s, val=%s", key, v3)
+ _, err = c.KV.Put(ctx, key, v3)
+ require.NoError(t, err)
+
+ t.Logf("PUT key=%s, val=%s", key, v4)
+ _, err = c.KV.Put(ctx, key, v4)
+ require.NoError(t, err)
+
+ t.Logf("DELTE key=%s", key)
+ deleteResp, err := c.KV.Delete(ctx, key)
+ require.NoError(t, err)
+
+ t.Logf("PUT key=%s, val=%s", key, v6)
+ _, err = c.KV.Put(ctx, key, v6)
+ require.NoError(t, err)
+
+ t.Logf("COMPACT rev=%d", deleteResp.Header.Revision)
+ _, err = c.KV.Compact(ctx, deleteResp.Header.Revision, clientv3.WithCompactPhysical())
+ require.NoError(t, err)
+
+ watchChan := c.Watch(ctx, key, clientv3.WithRev(deleteResp.Header.Revision))
+ select {
+ case watchResp := <-watchChan:
+ require.Len(t, watchResp.Events, 2)
+
+ require.Equal(t, mvccpb.DELETE, watchResp.Events[0].Type)
+ deletedKey := string(watchResp.Events[0].Kv.Key)
+ require.Equal(t, key, deletedKey)
+
+ require.Equal(t, mvccpb.PUT, watchResp.Events[1].Type)
+
+ updatedKey := string(watchResp.Events[1].Kv.Key)
+ require.Equal(t, key, updatedKey)
+
+ require.Equal(t, v6, string(watchResp.Events[1].Kv.Value))
+ case <-time.After(100 * time.Millisecond):
+ // we care only about the first response, but have an
+ // escape hatch in case the watch response is delayed.
+ t.Fatal("timed out getting watch response")
+ }
+}
+
+func TestStartWatcherFromCompactedRevision(t *testing.T) {
+ t.Run("compaction on tombstone revision", func(t *testing.T) {
+ testStartWatcherFromCompactedRevision(t, true)
+ })
+ t.Run("compaction on normal revision", func(t *testing.T) {
+ testStartWatcherFromCompactedRevision(t, false)
+ })
+}
+
+func testStartWatcherFromCompactedRevision(t *testing.T, performCompactOnTombstone bool) {
+ e2e.BeforeTest(t)
+ cfg := e2e.DefaultConfig()
+ cfg.Client = e2e.ClientConfig{ConnectionType: e2e.ClientTLS}
+ clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg), e2e.WithClusterSize(1))
+ require.NoError(t, err)
+ defer clus.Close()
+
+ c := newClient(t, clus.EndpointsGRPC(), cfg.Client)
+ defer c.Close()
+
+ ctx := context.Background()
+ key := "foo"
+ totalRev := 100
+
+ type valueEvent struct {
+ value string
+ typ mvccpb.Event_EventType
+ }
+
+ var (
+ // requestedValues records all requested change
+ requestedValues = make([]valueEvent, 0)
+ // revisionChan sends each compacted revision via this channel
+ compactionRevChan = make(chan int64)
+ // compactionStep means that client performs a compaction on every 7 operations
+ compactionStep = 7
+ )
+
+ // This goroutine will submit changes on $key $totalRev times. It will
+ // perform compaction after every $compactedAfterChanges changes.
+ // Except for first time, the watcher always receives the compacted
+ // revision as start.
+ go func() {
+ defer close(compactionRevChan)
+
+ lastRevision := int64(1)
+
+ compactionRevChan <- lastRevision
+ for vi := 1; vi <= totalRev; vi++ {
+ var respHeader *etcdserverpb.ResponseHeader
+
+ if vi%compactionStep == 0 && performCompactOnTombstone {
+ t.Logf("DELETE key=%s", key)
+
+ resp, derr := c.KV.Delete(ctx, key)
+ assert.NoError(t, derr)
+ respHeader = resp.Header
+
+ requestedValues = append(requestedValues, valueEvent{value: "", typ: mvccpb.DELETE})
+ } else {
+ value := fmt.Sprintf("%d", vi)
+
+ t.Logf("PUT key=%s, val=%s", key, value)
+ resp, perr := c.KV.Put(ctx, key, value)
+ assert.NoError(t, perr)
+ respHeader = resp.Header
+
+ requestedValues = append(requestedValues, valueEvent{value: value, typ: mvccpb.PUT})
+ }
+
+ lastRevision = respHeader.Revision
+
+ if vi%compactionStep == 0 {
+ compactionRevChan <- lastRevision
+
+ t.Logf("COMPACT rev=%d", lastRevision)
+ _, err = c.KV.Compact(ctx, lastRevision, clientv3.WithCompactPhysical())
+ assert.NoError(t, err)
+ }
+ }
+ }()
+
+ receivedEvents := make([]*clientv3.Event, 0)
+
+ fromCompactedRev := false
+ for fromRev := range compactionRevChan {
+ watchChan := c.Watch(ctx, key, clientv3.WithRev(fromRev))
+
+ prevEventCount := len(receivedEvents)
+
+ // firstReceived represents this is first watch response.
+ // Just in case that ETCD sends event one by one.
+ firstReceived := true
+
+ t.Logf("Start to watch key %s starting from revision %d", key, fromRev)
+ watchLoop:
+ for {
+ currentEventCount := len(receivedEvents)
+ if currentEventCount-prevEventCount == compactionStep || currentEventCount == totalRev {
+ break
+ }
+
+ select {
+ case watchResp := <-watchChan:
+ t.Logf("Receive the number of events: %d", len(watchResp.Events))
+ for i := range watchResp.Events {
+ ev := watchResp.Events[i]
+
+ // If the $fromRev is the compacted revision,
+ // the first event should be the same as the last event receives in last watch response.
+ if firstReceived && fromCompactedRev {
+ firstReceived = false
+
+ last := receivedEvents[prevEventCount-1]
+
+ assert.Equalf(t, last.Type, ev.Type,
+ "last received event type %s, but got event type %s", last.Type, ev.Type)
+ assert.Equalf(t, string(last.Kv.Key), string(ev.Kv.Key),
+ "last received event key %s, but got event key %s", string(last.Kv.Key), string(ev.Kv.Key))
+ assert.Equalf(t, string(last.Kv.Value), string(ev.Kv.Value),
+ "last received event value %s, but got event value %s", string(last.Kv.Value), string(ev.Kv.Value))
+ continue
+ }
+ receivedEvents = append(receivedEvents, ev)
+ }
+
+ if len(watchResp.Events) == 0 {
+ require.Equal(t, v3rpc.ErrCompacted, watchResp.Err())
+ break watchLoop
+ }
+
+ case <-time.After(10 * time.Second):
+ t.Fatal("timed out getting watch response")
+ }
+ }
+
+ fromCompactedRev = true
+ }
+
+ t.Logf("Received total number of events: %d", len(receivedEvents))
+ require.Len(t, requestedValues, totalRev)
+ require.Lenf(t, receivedEvents, totalRev, "should receive %d events", totalRev)
+ for idx, expected := range requestedValues {
+ ev := receivedEvents[idx]
+
+ require.Equalf(t, expected.typ, ev.Type, "#%d expected event %s", idx, expected.typ)
+
+ updatedKey := string(ev.Kv.Key)
+
+ require.Equal(t, key, updatedKey)
+ if expected.typ == mvccpb.PUT {
+ updatedValue := string(ev.Kv.Value)
+ require.Equal(t, expected.value, updatedValue)
+ }
+ }
+}
diff --git a/tests/e2e/zap_logging_test.go b/tests/e2e/zap_logging_test.go
new file mode 100644
index 00000000000..6752a23cc2f
--- /dev/null
+++ b/tests/e2e/zap_logging_test.go
@@ -0,0 +1,156 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func TestServerJsonLogging(t *testing.T) {
+ e2e.BeforeTest(t)
+
+ epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t,
+ e2e.WithClusterSize(1),
+ e2e.WithLogLevel("debug"),
+ )
+ require.NoErrorf(t, err, "could not start etcd process cluster")
+ logs := epc.Procs[0].Logs()
+ time.Sleep(time.Second)
+ require.NoErrorf(t, epc.Close(), "error closing etcd processes")
+ var entry logEntry
+ lines := logs.Lines()
+ if len(lines) == 0 {
+ t.Errorf("Expected at least one log line")
+ }
+ for _, line := range lines {
+ err := json.Unmarshal([]byte(line), &entry)
+ if err != nil {
+ t.Errorf("Failed to parse log line as json, err: %q, line: %s", err, line)
+ continue
+ }
+ if entry.Level == "" {
+ t.Errorf(`Missing "level" key, line: %s`, line)
+ }
+ if entry.Timestamp == "" {
+ t.Errorf(`Missing "ts" key, line: %s`, line)
+ }
+ if _, err := time.Parse("2006-01-02T15:04:05.999999Z0700", entry.Timestamp); entry.Timestamp != "" && err != nil {
+ t.Errorf(`Unexpected "ts" key format, err: %s`, err)
+ }
+ if entry.Caller == "" {
+ t.Errorf(`Missing "caller" key, line: %s`, line)
+ }
+ if entry.Message == "" {
+ t.Errorf(`Missing "message" key, line: %s`, line)
+ }
+ }
+}
+
+type logEntry struct {
+ Level string `json:"level"`
+ Timestamp string `json:"ts"`
+ Caller string `json:"caller"`
+ Message string `json:"msg"`
+ Error string `json:"error"`
+}
+
+func TestConnectionRejectMessage(t *testing.T) {
+ e2e.SkipInShortMode(t)
+
+ testCases := []struct {
+ name string
+ url string
+ expectedErrMsg string
+ }{
+ {
+ name: "reject client connection",
+ url: "https://127.0.0.1:2379/version",
+ expectedErrMsg: "rejected connection on client endpoint",
+ },
+ {
+ name: "reject peer connection",
+ url: "https://127.0.0.1:2380/members",
+ expectedErrMsg: "rejected connection on peer endpoint",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ commonArgs := []string{
+ e2e.BinPath.Etcd,
+ "--name", "etcd1",
+ "--listen-client-urls", "https://127.0.0.1:2379",
+ "--advertise-client-urls", "https://127.0.0.1:2379",
+ "--cert-file", e2e.CertPath,
+ "--key-file", e2e.PrivateKeyPath,
+ "--trusted-ca-file", e2e.CaPath,
+ "--listen-peer-urls", "https://127.0.0.1:2380",
+ "--initial-advertise-peer-urls", "https://127.0.0.1:2380",
+ "--initial-cluster", "etcd1=https://127.0.0.1:2380",
+ "--peer-cert-file", e2e.CertPath,
+ "--peer-key-file", e2e.PrivateKeyPath,
+ "--peer-trusted-ca-file", e2e.CaPath,
+ }
+
+ t.Log("Starting an etcd process and wait for it to get ready.")
+ p, err := e2e.SpawnCmd(commonArgs, nil)
+ require.NoError(t, err)
+ err = e2e.WaitReadyExpectProc(context.TODO(), p, e2e.EtcdServerReadyLines)
+ require.NoError(t, err)
+ defer func() {
+ p.Stop()
+ p.Close()
+ }()
+
+ t.Log("Starting a separate goroutine to verify the expected output.")
+ startedCh := make(chan struct{}, 1)
+ doneCh := make(chan struct{}, 1)
+ go func() {
+ startedCh <- struct{}{}
+ verr := e2e.WaitReadyExpectProc(context.TODO(), p, []string{tc.expectedErrMsg})
+ assert.NoError(t, verr)
+ doneCh <- struct{}{}
+ }()
+
+ // wait for the goroutine to get started
+ <-startedCh
+
+ t.Log("Running curl command to trigger the corresponding warning message.")
+ curlCmdArgs := []string{"curl", "--connect-timeout", "1", "-k", tc.url}
+ curlCmd, err := e2e.SpawnCmd(curlCmdArgs, nil)
+ require.NoError(t, err)
+
+ defer func() {
+ curlCmd.Stop()
+ curlCmd.Close()
+ }()
+
+ t.Log("Waiting for the result.")
+ select {
+ case <-doneCh:
+ case <-time.After(5 * time.Second):
+ t.Fatal("Timed out waiting for the result")
+ }
+ })
+ }
+}
diff --git a/tests/fixtures/CommonName-root.crt b/tests/fixtures/CommonName-root.crt
new file mode 100644
index 00000000000..d786c80d668
--- /dev/null
+++ b/tests/fixtures/CommonName-root.crt
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIE5zCCA8+gAwIBAgIJAKooGDZuR2mMMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV
+BAYTAkNOMRAwDgYDVQQIDAdCZWlqaW5nMRAwDgYDVQQHDAdCZWlqaW5nMQ0wCwYD
+VQQKDAREZW1vMQ0wCwYDVQQLDAREZW1vMQ0wCwYDVQQDDARyb290MR8wHQYJKoZI
+hvcNAQkBFhB0ZXN0QGV4YW1wbGUuY29tMB4XDTIyMTExNjA2NTI1M1oXDTMyMTEx
+MzA2NTI1M1owfzELMAkGA1UEBhMCQ04xEDAOBgNVBAgMB0JlaWppbmcxEDAOBgNV
+BAcMB0JlaWppbmcxDTALBgNVBAoMBERlbW8xDTALBgNVBAsMBERlbW8xDTALBgNV
+BAMMBHJvb3QxHzAdBgkqhkiG9w0BCQEWEHRlc3RAZXhhbXBsZS5jb20wggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEAKcjzhtOG3hWbAUCbudE1gPOeteT
+0INk2ngN2uCMYjYSZmaGhW/GZk3EvV7wKVuhdTyrh36E5Iajng9d2t1iOU/8jROU
++uAyrS3C/S5P/urq8VBUrt3VG/44bhwTEdafNnAWQ6ojYfmK0tRqoQn1Ftm30l8I
+nWof5Jm3loNA2WdNdvAp/D+6OpjUdqGdMkFd0NhkuQODMnycBMw6btUTj5SnmrMk
+q7V1aasx4BqN5C4DciZF0pyyR/TT8MoQ5Vcit8rHvQUyz42Lj8+28RkDoi4prJ1i
+tLaCt2egDp58vXlYQZTd50inMhnBIapKNdGpg3flW/8AFul1tCTqd8NfAgMBAAGj
+ggFkMIIBYDAdBgNVHQ4EFgQUpwwvEqXjA/ArJu1Jnpw7+/sttOAwgbMGA1UdIwSB
+qzCBqIAUpwwvEqXjA/ArJu1Jnpw7+/sttOChgYSkgYEwfzELMAkGA1UEBhMCQ04x
+EDAOBgNVBAgMB0JlaWppbmcxEDAOBgNVBAcMB0JlaWppbmcxDTALBgNVBAoMBERl
+bW8xDTALBgNVBAsMBERlbW8xDTALBgNVBAMMBHJvb3QxHzAdBgkqhkiG9w0BCQEW
+EHRlc3RAZXhhbXBsZS5jb22CCQCqKBg2bkdpjDAMBgNVHRMEBTADAQH/MAsGA1Ud
+DwQEAwIC/DA2BgNVHREELzAtggtleGFtcGxlLmNvbYINKi5leGFtcGxlLmNvbYIJ
+bG9jYWxob3N0hwR/AAABMDYGA1UdEgQvMC2CC2V4YW1wbGUuY29tgg0qLmV4YW1w
+bGUuY29tgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAGi48ntm
+8cn08FrsCDWapsck7a56/dyFyzLg10c0blu396tzC3ZDCAwQYzHjeXVdeWHyGO+f
+KSFlmR6IA0jq6pFhUyJtgaAUJ91jW6s68GTVhlLoFhtYjy6EvhQ0lo+7GWh4qB2s
+LI0mJPjaLZY1teAC4TswzwMDVD8QsB06/aFBlA65VjgZiVH+aMwWJ88gKfVGp0Pv
+AApsy5MvwQn8WZ2L6foSY04OzXtmAg2gCl0PyDNgieqFDcM1g7mklHNgWl2Gvtte
+G6+TiB3gGUUlTsdy0+LS2psL71RS5Jv7g/7XGmSKBPqRmYyQ2t7m2kLPwWKtL5tE
+63c0FPtpV0FzKdU=
+-----END CERTIFICATE-----
diff --git a/tests/fixtures/CommonName-root.key b/tests/fixtures/CommonName-root.key
new file mode 100644
index 00000000000..046b4a58fbd
--- /dev/null
+++ b/tests/fixtures/CommonName-root.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAxACnI84bTht4VmwFAm7nRNYDznrXk9CDZNp4DdrgjGI2EmZm
+hoVvxmZNxL1e8ClboXU8q4d+hOSGo54PXdrdYjlP/I0TlPrgMq0twv0uT/7q6vFQ
+VK7d1Rv+OG4cExHWnzZwFkOqI2H5itLUaqEJ9RbZt9JfCJ1qH+SZt5aDQNlnTXbw
+Kfw/ujqY1HahnTJBXdDYZLkDgzJ8nATMOm7VE4+Up5qzJKu1dWmrMeAajeQuA3Im
+RdKcskf00/DKEOVXIrfKx70FMs+Ni4/PtvEZA6IuKaydYrS2grdnoA6efL15WEGU
+3edIpzIZwSGqSjXRqYN35Vv/ABbpdbQk6nfDXwIDAQABAoIBAA5AMebTjH6wVp6J
++g9EOwJxQROZMOVparRBgisXt+3dEitiUKAFQaw+MfdVAXsatrPVj1S1ZEiLSRLK
+YjmjuSb0HdGx/DN/zh9BIiukNuLQGQp+AyY1FKHzCBfYQahNSrqGvb2Qq+UosXkb
+fSBHly6/u5K28/vvXhD1kQudIOvtAc9tOg8LZnM6N3J4E0GtLqWimRZ4jNK4APu1
+YsLIg87Eam+7x25+phz9xc22tZ1H4WY9FnOGprPnievqiV7mgcNGAklTB93C6yX1
+EI+QxQnPg0P732C4EJZFDPqhVRA4E7BUb5uTIXCJBA/FFuRIx9ppyLZKt9vjTchM
+8YWIEsECgYEA/5DRR9FkIWJZb0Pv3SCc53PMPT/xpYB6lH2lGtG+u+L71dJNDiPt
+da3dPXSBy+aF7BbmRDawRvyOLGArlWiSsoEUVlES8BYzQ1MmfDf+MJooJoBE6/g6
+2OyyNnPde1GqyxsxgNTITvJCTjYH64lxKVRYfMgMAASK49SjYiEgGn8CgYEAxFXs
+Oe0sUcc3P1cQ9pJfSVKpSczZq/OGAxqlniqRHvoWgFfKOWB6F9PN0rd8G2aMlfGS
+BjyiPe770gtpX8Z4G4lrtkJD8NvGoVC8yX78HbrXL2RA4lPjQfrveUnwXIRbRKWa
+6D/GAYPOuNvJmwF4hY/orWyIqvpNczIjTjs1JyECgYEAvhuNAn6JnKfbXYBM+tIa
+xbWHFXzula2IAdOhMN0bpApKSZmBxmYFa0elTuTO9M2Li77RFacU5AlU/T+gzCiZ
+D34jkb4Hd18cTRWaiEbiqGbUPSennVzu8ZTJUOZJuEVc5m9ZGLuwMcHWfvWEWLrJ
+2fOrS09IVe8LHkV8MC/yAKMCgYBmDUdhgK9Fvqgv60Cs+b4/rZDDBJCsOUOSP3qQ
+sQ2HrXSet4MsucIcuoJEog0HbRFsKwm85i1qxdrs/fOCzfXGUnLDZMRN4N7pIL9Q
+eQnxJhoNzy2Otw3sUNPDFrSyUjXig7X2PJfeV7XPDqdHQ8dynS/TXRPY04wIcao6
+Uro5IQKBgFUz2GjAxI6uc7ihmRv/GYTuXYOlO0IN7MFwQDd0pVnWHkLNZscO9L9/
+ALV4g1p/75CewlQfyC8ynOJJWcDeHHFNsSMsOzAxUOVtVenaF/dgwk95wpXj6Rx6
+4kvQqnJg97fRBbyzvQcdL36kL8+pbmHNoqHPwxbuigYShB74d6/h
+-----END RSA PRIVATE KEY-----
diff --git a/tests/fixtures/ed25519-private-key.pem b/tests/fixtures/ed25519-private-key.pem
new file mode 100644
index 00000000000..1596cd455f5
--- /dev/null
+++ b/tests/fixtures/ed25519-private-key.pem
@@ -0,0 +1,3 @@
+-----BEGIN PRIVATE KEY-----
+MC4CAQAwBQYDK2VwBCIEIAtiwQ7KeS1I0otY9gw1Ox4av/zQ+wvs/8AIaTkawQ73
+-----END PRIVATE KEY-----
diff --git a/tests/fixtures/ed25519-public-key.pem b/tests/fixtures/ed25519-public-key.pem
new file mode 100644
index 00000000000..5563956f25c
--- /dev/null
+++ b/tests/fixtures/ed25519-public-key.pem
@@ -0,0 +1,3 @@
+-----BEGIN PUBLIC KEY-----
+MCowBQYDK2VwAyEAuOUxC8Bbn1KqYctlim/MHaP5JrtmeK5xcs+9w506btA=
+-----END PUBLIC KEY-----
diff --git a/tests/fixtures/gencerts.sh b/tests/fixtures/gencerts.sh
index e4226fca012..79c0680e6ce 100755
--- a/tests/fixtures/gencerts.sh
+++ b/tests/fixtures/gencerts.sh
@@ -1,15 +1,21 @@
#!/bin/bash
-set -e
+set -euo pipefail
if ! [[ "$0" =~ "./gencerts.sh" ]]; then
echo "must be run from 'fixtures'"
exit 255
fi
-if ! which cfssl; then
+if ! command -v cfssl; then
echo "cfssl is not installed"
- echo "use: go install -mod mod github.com/cloudflare/cfssl/cmd/cfssl github.com/cloudflare/cfssl/cmd/cfssljson"
+ echo 'use: bash -c "cd ../../tools/mod; go install github.com/cloudflare/cfssl/cmd/cfssl"'
+ exit 255
+fi
+
+if ! command -v cfssljson; then
+ echo "cfssljson is not installed"
+ echo 'use: bash -c "cd ../../tools/mod; go install github.com/cloudflare/cfssl/cmd/cfssljson"'
exit 255
fi
diff --git a/tests/framework/config/client.go b/tests/framework/config/client.go
new file mode 100644
index 00000000000..ac82bd54792
--- /dev/null
+++ b/tests/framework/config/client.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "time"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+// ClientOption configures the client with additional parameter.
+// For example, if Auth is enabled, the common test cases just need to
+// use `WithAuth` to return a ClientOption. Note that the common `WithAuth`
+// function calls `e2e.WithAuth` or `integration.WithAuth`, depending on the
+// build tag (either "e2e" or "integration").
+type ClientOption func(any)
+
+type GetOptions struct {
+ Revision int
+ End string
+ CountOnly bool
+ Serializable bool
+ Prefix bool
+ FromKey bool
+ Limit int
+ Order clientv3.SortOrder
+ SortBy clientv3.SortTarget
+ Timeout time.Duration
+}
+
+type PutOptions struct {
+ LeaseID clientv3.LeaseID
+ Timeout time.Duration
+}
+
+type DeleteOptions struct {
+ Prefix bool
+ FromKey bool
+ End string
+}
+
+type TxnOptions struct {
+ Interactive bool
+}
+
+type CompactOption struct {
+ Physical bool
+ Timeout time.Duration
+}
+
+type DefragOption struct {
+ Timeout time.Duration
+}
+
+type LeaseOption struct {
+ WithAttachedKeys bool
+}
+
+type UserAddOptions struct {
+ NoPassword bool
+}
+
+type WatchOptions struct {
+ Prefix bool
+ Revision int64
+ RangeEnd string
+}
diff --git a/tests/framework/config/cluster.go b/tests/framework/config/cluster.go
new file mode 100644
index 00000000000..0e6ec561afb
--- /dev/null
+++ b/tests/framework/config/cluster.go
@@ -0,0 +1,90 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "time"
+)
+
+type TLSConfig string
+
+const (
+ NoTLS TLSConfig = ""
+ AutoTLS TLSConfig = "auto-tls"
+ ManualTLS TLSConfig = "manual-tls"
+
+ TickDuration = 10 * time.Millisecond
+)
+
+type ClusterConfig struct {
+ ClusterSize int
+ PeerTLS TLSConfig
+ ClientTLS TLSConfig
+ QuotaBackendBytes int64
+ StrictReconfigCheck bool
+ AuthToken string
+ SnapshotCount uint64
+
+ // ClusterContext is used by "e2e" or "integration" to extend the
+ // ClusterConfig. The common test cases shouldn't care about what
+ // data is encoded or included; instead "e2e" or "integration"
+ // framework should decode or parse it separately.
+ ClusterContext any
+}
+
+func DefaultClusterConfig() ClusterConfig {
+ return ClusterConfig{
+ ClusterSize: 3,
+ StrictReconfigCheck: true,
+ }
+}
+
+func NewClusterConfig(opts ...ClusterOption) ClusterConfig {
+ c := DefaultClusterConfig()
+ for _, opt := range opts {
+ opt(&c)
+ }
+ return c
+}
+
+type ClusterOption func(*ClusterConfig)
+
+func WithClusterConfig(cfg ClusterConfig) ClusterOption {
+ return func(c *ClusterConfig) { *c = cfg }
+}
+
+func WithClusterSize(size int) ClusterOption {
+ return func(c *ClusterConfig) { c.ClusterSize = size }
+}
+
+func WithPeerTLS(tls TLSConfig) ClusterOption {
+ return func(c *ClusterConfig) { c.PeerTLS = tls }
+}
+
+func WithClientTLS(tls TLSConfig) ClusterOption {
+ return func(c *ClusterConfig) { c.ClientTLS = tls }
+}
+
+func WithQuotaBackendBytes(bytes int64) ClusterOption {
+ return func(c *ClusterConfig) { c.QuotaBackendBytes = bytes }
+}
+
+func WithSnapshotCount(count uint64) ClusterOption {
+ return func(c *ClusterConfig) { c.SnapshotCount = count }
+}
+
+func WithStrictReconfigCheck(strict bool) ClusterOption {
+ return func(c *ClusterConfig) { c.StrictReconfigCheck = strict }
+}
diff --git a/tests/framework/e2e/cluster.go b/tests/framework/e2e/cluster.go
new file mode 100644
index 00000000000..083dcc7a077
--- /dev/null
+++ b/tests/framework/e2e/cluster.go
@@ -0,0 +1,1110 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "net/url"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+ "go.etcd.io/etcd/pkg/v3/proxy"
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+)
+
+const EtcdProcessBasePort = 20000
+
+type ClientConnType int
+
+const (
+ ClientNonTLS ClientConnType = iota
+ ClientTLS
+ ClientTLSAndNonTLS
+)
+
+type ClientConfig struct {
+ ConnectionType ClientConnType
+ CertAuthority bool
+ AutoTLS bool
+ RevokeCerts bool
+}
+
+// allow alphanumerics, underscores and dashes
+var testNameCleanRegex = regexp.MustCompile(`[^a-zA-Z0-9 \-_]+`)
+
+func NewConfigNoTLS() *EtcdProcessClusterConfig {
+ return DefaultConfig()
+}
+
+func NewConfigAutoTLS() *EtcdProcessClusterConfig {
+ return NewConfig(
+ WithIsPeerTLS(true),
+ WithIsPeerAutoTLS(true),
+ )
+}
+
+func NewConfigTLS() *EtcdProcessClusterConfig {
+ return NewConfig(
+ WithClientConnType(ClientTLS),
+ WithIsPeerTLS(true),
+ )
+}
+
+func NewConfigClientTLS() *EtcdProcessClusterConfig {
+ return NewConfig(WithClientConnType(ClientTLS))
+}
+
+func NewConfigClientAutoTLS() *EtcdProcessClusterConfig {
+ return NewConfig(
+ WithClusterSize(1),
+ WithClientAutoTLS(true),
+ WithClientConnType(ClientTLS),
+ )
+}
+
+func NewConfigPeerTLS() *EtcdProcessClusterConfig {
+ return NewConfig(
+ WithIsPeerTLS(true),
+ )
+}
+
+func NewConfigClientTLSCertAuth() *EtcdProcessClusterConfig {
+ return NewConfig(
+ WithClusterSize(1),
+ WithClientConnType(ClientTLS),
+ WithClientCertAuthority(true),
+ )
+}
+
+func NewConfigClientTLSCertAuthWithNoCN() *EtcdProcessClusterConfig {
+ return NewConfig(
+ WithClusterSize(1),
+ WithClientConnType(ClientTLS),
+ WithClientCertAuthority(true),
+ WithCN(false),
+ )
+}
+
+func NewConfigJWT() *EtcdProcessClusterConfig {
+ return NewConfig(
+ WithClusterSize(1),
+ WithAuthTokenOpts("jwt,pub-key="+path.Join(FixturesDir, "server.crt")+
+ ",priv-key="+path.Join(FixturesDir, "server.key.insecure")+",sign-method=RS256,ttl=5s"),
+ )
+}
+
+func ConfigStandalone(cfg EtcdProcessClusterConfig) *EtcdProcessClusterConfig {
+ ret := cfg
+ ret.ClusterSize = 1
+ return &ret
+}
+
+type EtcdProcessCluster struct {
+ lg *zap.Logger
+ Cfg *EtcdProcessClusterConfig
+ Procs []EtcdProcess
+ nextSeq int // sequence number of the next etcd process (if it will be required)
+}
+
+type EtcdProcessClusterConfig struct {
+ ServerConfig embed.Config
+
+ // Test config
+
+ KeepDataDir bool
+ Logger *zap.Logger
+ GoFailEnabled bool
+ GoFailClientTimeout time.Duration
+ LazyFSEnabled bool
+ PeerProxy bool
+
+ // Process config
+
+ EnvVars map[string]string
+ Version ClusterVersion
+
+ // Cluster setup config
+
+ ClusterSize int
+ // InitialLeaderIndex makes sure the leader is the ith proc
+ // when the cluster starts if it is specified (>=0).
+ InitialLeaderIndex int
+ RollingStart bool
+ // BaseDataDirPath specifies the data-dir for the members. If test cases
+ // do not specify `BaseDataDirPath`, then e2e framework creates a
+ // temporary directory for each member; otherwise, it creates a
+ // subdirectory (e.g. member-0, member-1 and member-2) under the given
+ // `BaseDataDirPath` for each member.
+ BaseDataDirPath string
+
+ // Dynamic per member configuration
+
+ BasePeerScheme string
+ BasePort int
+ BaseClientScheme string
+ MetricsURLScheme string
+ Client ClientConfig
+ ClientHTTPSeparate bool
+ IsPeerTLS bool
+ IsPeerAutoTLS bool
+ CN bool
+
+ // Removed in v3.6
+
+ Discovery string // v2 discovery
+ EnableV2 bool
+}
+
+func DefaultConfig() *EtcdProcessClusterConfig {
+ cfg := &EtcdProcessClusterConfig{
+ ClusterSize: 3,
+ CN: true,
+ InitialLeaderIndex: -1,
+ ServerConfig: *embed.NewConfig(),
+ }
+ cfg.ServerConfig.InitialClusterToken = "new"
+ return cfg
+}
+
+func NewConfig(opts ...EPClusterOption) *EtcdProcessClusterConfig {
+ c := DefaultConfig()
+ for _, opt := range opts {
+ opt(c)
+ }
+ return c
+}
+
+type EPClusterOption func(*EtcdProcessClusterConfig)
+
+func WithConfig(cfg *EtcdProcessClusterConfig) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { *c = *cfg }
+}
+
+func WithVersion(version ClusterVersion) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.Version = version }
+}
+
+func WithInitialLeaderIndex(i int) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.InitialLeaderIndex = i }
+}
+
+func WithDataDirPath(path string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.BaseDataDirPath = path }
+}
+
+func WithKeepDataDir(keep bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.KeepDataDir = keep }
+}
+
+func WithSnapshotCount(count uint64) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.SnapshotCount = count }
+}
+
+func WithSnapshotCatchUpEntries(count uint64) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.SnapshotCatchUpEntries = count }
+}
+
+func WithClusterSize(size int) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ClusterSize = size }
+}
+
+func WithBasePeerScheme(scheme string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.BasePeerScheme = scheme }
+}
+
+func WithBasePort(port int) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.BasePort = port }
+}
+
+func WithBaseClientScheme(scheme string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.BaseClientScheme = scheme }
+}
+
+func WithClientConnType(clientConnType ClientConnType) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.Client.ConnectionType = clientConnType }
+}
+
+func WithClientCertAuthority(enabled bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.Client.CertAuthority = enabled }
+}
+
+func WithIsPeerTLS(isPeerTLS bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.IsPeerTLS = isPeerTLS }
+}
+
+func WithIsPeerAutoTLS(isPeerAutoTLS bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.IsPeerAutoTLS = isPeerAutoTLS }
+}
+
+func WithClientAutoTLS(isClientAutoTLS bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.Client.AutoTLS = isClientAutoTLS }
+}
+
+func WithClientRevokeCerts(isClientCRL bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.Client.RevokeCerts = isClientCRL }
+}
+
+func WithCN(cn bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.CN = cn }
+}
+
+func WithQuotaBackendBytes(bytes int64) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.QuotaBackendBytes = bytes }
+}
+
+func WithStrictReconfigCheck(strict bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.StrictReconfigCheck = strict }
+}
+
+func WithEnableV2(enable bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.EnableV2 = enable }
+}
+
+func WithAuthTokenOpts(token string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.AuthToken = token }
+}
+
+func WithRollingStart(rolling bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.RollingStart = rolling }
+}
+
+func WithDiscovery(discovery string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.Discovery = discovery }
+}
+
+func WithDiscoveryEndpoints(endpoints []string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.DiscoveryCfg.Endpoints = endpoints }
+}
+
+func WithDiscoveryToken(token string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.DiscoveryCfg.Token = token }
+}
+
+func WithLogLevel(level string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.LogLevel = level }
+}
+
+func WithCorruptCheckTime(time time.Duration) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.ExperimentalCorruptCheckTime = time }
+}
+
+func WithInitialClusterToken(token string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.InitialClusterToken = token }
+}
+
+func WithInitialCorruptCheck(enabled bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.ExperimentalInitialCorruptCheck = enabled }
+}
+
+func WithCompactHashCheckEnabled(enabled bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.ExperimentalCompactHashCheckEnabled = enabled }
+}
+
+func WithCompactHashCheckTime(time time.Duration) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.CompactHashCheckTime = time }
+}
+
+func WithGoFailEnabled(enabled bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.GoFailEnabled = enabled }
+}
+
+func WithGoFailClientTimeout(dur time.Duration) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.GoFailClientTimeout = dur }
+}
+
+func WithLazyFSEnabled(enabled bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.LazyFSEnabled = enabled }
+}
+
+func WithWarningUnaryRequestDuration(time time.Duration) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.WarningUnaryRequestDuration = time }
+}
+
+// WithExperimentalWarningUnaryRequestDuration sets a value for `-experimental-warning-unary-request-duration`.
+// TODO(ahrtr): remove this function when the corresponding experimental flag is decommissioned.
+func WithExperimentalWarningUnaryRequestDuration(time time.Duration) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.ExperimentalWarningUnaryRequestDuration = time }
+}
+
+func WithExperimentalStopGRPCServiceOnDefrag(stopGRPCServiceOnDefrag bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) {
+ c.ServerConfig.ExperimentalStopGRPCServiceOnDefrag = stopGRPCServiceOnDefrag
+ }
+}
+
+func WithServerFeatureGate(featureName string, val bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) {
+ if err := c.ServerConfig.ServerFeatureGate.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", featureName, val)); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func WithCompactionBatchLimit(limit int) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.ExperimentalCompactionBatchLimit = limit }
+}
+
+func WithCompactionSleepInterval(time time.Duration) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.ExperimentalCompactionSleepInterval = time }
+}
+
+func WithWatchProcessNotifyInterval(interval time.Duration) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.ServerConfig.ExperimentalWatchProgressNotifyInterval = interval }
+}
+
+func WithEnvVars(ev map[string]string) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.EnvVars = ev }
+}
+
+func WithPeerProxy(enabled bool) EPClusterOption {
+ return func(c *EtcdProcessClusterConfig) { c.PeerProxy = enabled }
+}
+
+// NewEtcdProcessCluster launches a new cluster from etcd processes, returning
+// a new EtcdProcessCluster once all nodes are ready to accept client requests.
+func NewEtcdProcessCluster(ctx context.Context, t testing.TB, opts ...EPClusterOption) (*EtcdProcessCluster, error) {
+ cfg := NewConfig(opts...)
+ epc, err := InitEtcdProcessCluster(t, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ return StartEtcdProcessCluster(ctx, t, epc, cfg)
+}
+
+// InitEtcdProcessCluster initializes a new cluster based on the given config.
+// It doesn't start the cluster.
+func InitEtcdProcessCluster(t testing.TB, cfg *EtcdProcessClusterConfig) (*EtcdProcessCluster, error) {
+ SkipInShortMode(t)
+
+ if cfg.Logger == nil {
+ cfg.Logger = zaptest.NewLogger(t)
+ }
+ if cfg.BasePort == 0 {
+ cfg.BasePort = EtcdProcessBasePort
+ }
+ if cfg.ServerConfig.SnapshotCount == 0 {
+ cfg.ServerConfig.SnapshotCount = etcdserver.DefaultSnapshotCount
+ }
+
+ // validate SnapshotCatchUpEntries could be set for at least one member
+ if cfg.ServerConfig.SnapshotCatchUpEntries != etcdserver.DefaultSnapshotCatchUpEntries {
+ if !CouldSetSnapshotCatchupEntries(BinPath.Etcd) {
+ return nil, fmt.Errorf("cannot set SnapshotCatchUpEntries for current etcd version: %s", BinPath.Etcd)
+ }
+ if cfg.Version == LastVersion && !CouldSetSnapshotCatchupEntries(BinPath.EtcdLastRelease) {
+ return nil, fmt.Errorf("cannot set SnapshotCatchUpEntries for last etcd version: %s", BinPath.EtcdLastRelease)
+ }
+ }
+
+ etcdCfgs := cfg.EtcdAllServerProcessConfigs(t)
+ epc := &EtcdProcessCluster{
+ Cfg: cfg,
+ lg: zaptest.NewLogger(t),
+ Procs: make([]EtcdProcess, cfg.ClusterSize),
+ nextSeq: cfg.ClusterSize,
+ }
+
+ // launch etcd processes
+ for i := range etcdCfgs {
+ proc, err := NewEtcdProcess(t, etcdCfgs[i])
+ if err != nil {
+ epc.Close()
+ return nil, fmt.Errorf("cannot configure: %w", err)
+ }
+ epc.Procs[i] = proc
+ }
+
+ return epc, nil
+}
+
+// StartEtcdProcessCluster launches a new cluster from etcd processes.
+func StartEtcdProcessCluster(ctx context.Context, t testing.TB, epc *EtcdProcessCluster, cfg *EtcdProcessClusterConfig) (*EtcdProcessCluster, error) {
+ if cfg.RollingStart {
+ if err := epc.RollingStart(ctx); err != nil {
+ return nil, fmt.Errorf("cannot rolling-start: %w", err)
+ }
+ } else {
+ if err := epc.Start(ctx); err != nil {
+ return nil, fmt.Errorf("cannot start: %w", err)
+ }
+ }
+
+ for _, proc := range epc.Procs {
+ if cfg.GoFailEnabled && !proc.Failpoints().Enabled() {
+ epc.Close()
+ t.Skip("please run 'make gofail-enable && make build' before running the test")
+ }
+ }
+ if cfg.InitialLeaderIndex >= 0 {
+ if err := epc.MoveLeader(ctx, t, cfg.InitialLeaderIndex); err != nil {
+ return nil, fmt.Errorf("failed to move leader: %w", err)
+ }
+ }
+ return epc, nil
+}
+
+func (cfg *EtcdProcessClusterConfig) ClientScheme() string {
+ return setupScheme(cfg.BaseClientScheme, cfg.Client.ConnectionType == ClientTLS)
+}
+
+func (cfg *EtcdProcessClusterConfig) PeerScheme() string {
+ return setupScheme(cfg.BasePeerScheme, cfg.IsPeerTLS)
+}
+
+func (cfg *EtcdProcessClusterConfig) EtcdAllServerProcessConfigs(tb testing.TB) []*EtcdServerProcessConfig {
+ etcdCfgs := make([]*EtcdServerProcessConfig, cfg.ClusterSize)
+ initialCluster := make([]string, cfg.ClusterSize)
+
+ for i := 0; i < cfg.ClusterSize; i++ {
+ etcdCfgs[i] = cfg.EtcdServerProcessConfig(tb, i)
+ initialCluster[i] = fmt.Sprintf("%s=%s", etcdCfgs[i].Name, etcdCfgs[i].PeerURL.String())
+ }
+
+ for i := range etcdCfgs {
+ cfg.SetInitialOrDiscovery(etcdCfgs[i], initialCluster, "new")
+ }
+
+ return etcdCfgs
+}
+
+func (cfg *EtcdProcessClusterConfig) SetInitialOrDiscovery(serverCfg *EtcdServerProcessConfig, initialCluster []string, initialClusterState string) {
+ if cfg.Discovery == "" && len(cfg.ServerConfig.DiscoveryCfg.Endpoints) == 0 {
+ serverCfg.InitialCluster = strings.Join(initialCluster, ",")
+ serverCfg.Args = append(serverCfg.Args, "--initial-cluster="+serverCfg.InitialCluster)
+ serverCfg.Args = append(serverCfg.Args, "--initial-cluster-state="+initialClusterState)
+ }
+
+ if len(cfg.ServerConfig.DiscoveryCfg.Endpoints) > 0 {
+ serverCfg.Args = append(serverCfg.Args, fmt.Sprintf("--discovery-token=%s", cfg.ServerConfig.DiscoveryCfg.Token))
+ serverCfg.Args = append(serverCfg.Args, fmt.Sprintf("--discovery-endpoints=%s", strings.Join(cfg.ServerConfig.DiscoveryCfg.Endpoints, ",")))
+ }
+}
+
+func (cfg *EtcdProcessClusterConfig) EtcdServerProcessConfig(tb testing.TB, i int) *EtcdServerProcessConfig {
+ var curls []string
+ var curl string
+ port := cfg.BasePort + 5*i
+ clientPort := port
+ peerPort := port + 1
+ metricsPort := port + 2
+ peer2Port := port + 3
+ clientHTTPPort := port + 4
+
+ if cfg.Client.ConnectionType == ClientTLSAndNonTLS {
+ curl = clientURL(cfg.ClientScheme(), clientPort, ClientNonTLS)
+ curls = []string{curl, clientURL(cfg.ClientScheme(), clientPort, ClientTLS)}
+ } else {
+ curl = clientURL(cfg.ClientScheme(), clientPort, cfg.Client.ConnectionType)
+ curls = []string{curl}
+ }
+
+ peerListenURL := url.URL{Scheme: cfg.PeerScheme(), Host: fmt.Sprintf("localhost:%d", peerPort)}
+ peerAdvertiseURL := url.URL{Scheme: cfg.PeerScheme(), Host: fmt.Sprintf("localhost:%d", peerPort)}
+ var proxyCfg *proxy.ServerConfig
+ if cfg.PeerProxy {
+ if !cfg.IsPeerTLS {
+ panic("Can't use peer proxy without peer TLS as it can result in malformed packets")
+ }
+ peerAdvertiseURL.Host = fmt.Sprintf("localhost:%d", peer2Port)
+ proxyCfg = &proxy.ServerConfig{
+ Logger: zap.NewNop(),
+ To: peerListenURL,
+ From: peerAdvertiseURL,
+ }
+ }
+
+ name := fmt.Sprintf("%s-test-%d", testNameCleanRegex.ReplaceAllString(tb.Name(), ""), i)
+
+ var dataDirPath string
+ if cfg.BaseDataDirPath == "" {
+ dataDirPath = tb.TempDir()
+ } else {
+ // When test cases specify the BaseDataDirPath and there are more than
+ // one member in the cluster, we need to create a subdirectory for
+ // each member to avoid conflict.
+ // We also create a subdirectory for one-member cluster, because we
+ // support dynamically adding new member.
+ dataDirPath = filepath.Join(cfg.BaseDataDirPath, fmt.Sprintf("member-%d", i))
+ }
+
+ args := []string{
+ "--name=" + name,
+ "--listen-client-urls=" + strings.Join(curls, ","),
+ "--advertise-client-urls=" + strings.Join(curls, ","),
+ "--listen-peer-urls=" + peerListenURL.String(),
+ "--initial-advertise-peer-urls=" + peerAdvertiseURL.String(),
+ "--initial-cluster-token=" + cfg.ServerConfig.InitialClusterToken,
+ "--data-dir", dataDirPath,
+ "--snapshot-count=" + fmt.Sprintf("%d", cfg.ServerConfig.SnapshotCount),
+ }
+ var clientHTTPURL string
+ if cfg.ClientHTTPSeparate {
+ clientHTTPURL = clientURL(cfg.ClientScheme(), clientHTTPPort, cfg.Client.ConnectionType)
+ args = append(args, "--listen-client-http-urls="+clientHTTPURL)
+ }
+
+ if cfg.ServerConfig.ForceNewCluster {
+ args = append(args, "--force-new-cluster")
+ }
+ if cfg.ServerConfig.QuotaBackendBytes > 0 {
+ args = append(args,
+ "--quota-backend-bytes="+fmt.Sprintf("%d", cfg.ServerConfig.QuotaBackendBytes),
+ )
+ }
+ if !cfg.ServerConfig.StrictReconfigCheck {
+ args = append(args, "--strict-reconfig-check=false")
+ }
+ if cfg.EnableV2 {
+ args = append(args, "--enable-v2")
+ }
+ var murl string
+ if cfg.MetricsURLScheme != "" {
+ murl = (&url.URL{
+ Scheme: cfg.MetricsURLScheme,
+ Host: fmt.Sprintf("localhost:%d", metricsPort),
+ }).String()
+ args = append(args, "--listen-metrics-urls="+murl)
+ }
+
+ args = append(args, cfg.TLSArgs()...)
+
+ if cfg.Discovery != "" {
+ args = append(args, "--discovery="+cfg.Discovery)
+ }
+
+ var execPath string
+ switch cfg.Version {
+ case CurrentVersion:
+ execPath = BinPath.Etcd
+ case MinorityLastVersion:
+ if i <= cfg.ClusterSize/2 {
+ execPath = BinPath.Etcd
+ } else {
+ execPath = BinPath.EtcdLastRelease
+ }
+ case QuorumLastVersion:
+ if i <= cfg.ClusterSize/2 {
+ execPath = BinPath.EtcdLastRelease
+ } else {
+ execPath = BinPath.Etcd
+ }
+ case LastVersion:
+ execPath = BinPath.EtcdLastRelease
+ default:
+ panic(fmt.Sprintf("Unknown cluster version %v", cfg.Version))
+ }
+
+ defaultValues := values(*embed.NewConfig())
+ overrideValues := values(cfg.ServerConfig)
+ for flag, value := range overrideValues {
+ if defaultValue := defaultValues[flag]; value == "" || value == defaultValue {
+ continue
+ }
+ if flag == "experimental-snapshot-catchup-entries" && !CouldSetSnapshotCatchupEntries(execPath) {
+ continue
+ }
+ args = append(args, fmt.Sprintf("--%s=%s", flag, value))
+ }
+ envVars := map[string]string{}
+ for key, value := range cfg.EnvVars {
+ envVars[key] = value
+ }
+ var gofailPort int
+ if cfg.GoFailEnabled {
+ gofailPort = (i+1)*10000 + 2381
+ envVars["GOFAIL_HTTP"] = fmt.Sprintf("127.0.0.1:%d", gofailPort)
+ }
+
+ return &EtcdServerProcessConfig{
+ lg: cfg.Logger,
+ ExecPath: execPath,
+ Args: args,
+ EnvVars: envVars,
+ TLSArgs: cfg.TLSArgs(),
+ Client: cfg.Client,
+ DataDirPath: dataDirPath,
+ KeepDataDir: cfg.KeepDataDir,
+ Name: name,
+ PeerURL: peerAdvertiseURL,
+ ClientURL: curl,
+ ClientHTTPURL: clientHTTPURL,
+ MetricsURL: murl,
+ InitialToken: cfg.ServerConfig.InitialClusterToken,
+ GoFailPort: gofailPort,
+ GoFailClientTimeout: cfg.GoFailClientTimeout,
+ Proxy: proxyCfg,
+ LazyFSEnabled: cfg.LazyFSEnabled,
+ }
+}
+
+func values(cfg embed.Config) map[string]string {
+ fs := flag.NewFlagSet("etcd", flag.ContinueOnError)
+ cfg.AddFlags(fs)
+ values := map[string]string{}
+ fs.VisitAll(func(f *flag.Flag) {
+ value := f.Value.String()
+ if value == "false" || value == "0" {
+ value = ""
+ }
+ values[f.Name] = value
+ })
+ return values
+}
+
+func clientURL(scheme string, port int, connType ClientConnType) string {
+ curlHost := fmt.Sprintf("localhost:%d", port)
+ switch connType {
+ case ClientNonTLS:
+ return (&url.URL{Scheme: scheme, Host: curlHost}).String()
+ case ClientTLS:
+ return (&url.URL{Scheme: ToTLS(scheme), Host: curlHost}).String()
+ default:
+ panic(fmt.Sprintf("Unsupported connection type %v", connType))
+ }
+}
+
+func (cfg *EtcdProcessClusterConfig) TLSArgs() (args []string) {
+ if cfg.Client.ConnectionType != ClientNonTLS {
+ if cfg.Client.AutoTLS {
+ args = append(args, "--auto-tls")
+ } else {
+ tlsClientArgs := []string{
+ "--cert-file", CertPath,
+ "--key-file", PrivateKeyPath,
+ "--trusted-ca-file", CaPath,
+ }
+ args = append(args, tlsClientArgs...)
+
+ if cfg.Client.CertAuthority {
+ args = append(args, "--client-cert-auth")
+ }
+ }
+ }
+
+ if cfg.IsPeerTLS {
+ if cfg.IsPeerAutoTLS {
+ args = append(args, "--peer-auto-tls")
+ } else {
+ tlsPeerArgs := []string{
+ "--peer-cert-file", CertPath,
+ "--peer-key-file", PrivateKeyPath,
+ "--peer-trusted-ca-file", CaPath,
+ }
+ args = append(args, tlsPeerArgs...)
+ }
+ }
+
+ if cfg.Client.RevokeCerts {
+ args = append(args, "--client-crl-file", CrlPath, "--client-cert-auth")
+ }
+
+ if len(cfg.ServerConfig.CipherSuites) > 0 {
+ args = append(args, "--cipher-suites", strings.Join(cfg.ServerConfig.CipherSuites, ","))
+ }
+
+ return args
+}
+
+func (epc *EtcdProcessCluster) EndpointsGRPC() []string {
+ return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsGRPC() })
+}
+
+func (epc *EtcdProcessCluster) EndpointsHTTP() []string {
+ return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsHTTP() })
+}
+
+func (epc *EtcdProcessCluster) Endpoints(f func(ep EtcdProcess) []string) (ret []string) {
+ for _, p := range epc.Procs {
+ ret = append(ret, f(p)...)
+ }
+ return ret
+}
+
+func (epc *EtcdProcessCluster) CloseProc(ctx context.Context, finder func(EtcdProcess) bool, opts ...config.ClientOption) error {
+ procIndex := -1
+ if finder != nil {
+ for i := range epc.Procs {
+ if finder(epc.Procs[i]) {
+ procIndex = i
+ break
+ }
+ }
+ } else {
+ procIndex = len(epc.Procs) - 1
+ }
+
+ if procIndex == -1 {
+ return fmt.Errorf("no process found to stop")
+ }
+
+ proc := epc.Procs[procIndex]
+ epc.Procs = append(epc.Procs[:procIndex], epc.Procs[procIndex+1:]...)
+
+ if proc == nil {
+ return nil
+ }
+
+ // First remove member from the cluster
+
+ memberCtl := epc.Etcdctl(opts...)
+ memberList, err := memberCtl.MemberList(ctx, false)
+ if err != nil {
+ return fmt.Errorf("failed to get member list: %w", err)
+ }
+
+ memberID, err := findMemberIDByEndpoint(memberList.Members, proc.Config().ClientURL)
+ if err != nil {
+ return fmt.Errorf("failed to find member ID: %w", err)
+ }
+
+ memberRemoved := false
+ for i := 0; i < 10; i++ {
+ _, err := memberCtl.MemberRemove(ctx, memberID)
+ if err != nil && strings.Contains(err.Error(), "member not found") {
+ memberRemoved = true
+ break
+ }
+
+ time.Sleep(500 * time.Millisecond)
+ }
+
+ if !memberRemoved {
+ return errors.New("failed to remove member after 10 tries")
+ }
+
+ epc.lg.Info("successfully removed member", zap.String("acurl", proc.Config().ClientURL))
+
+ // Then stop process
+ return proc.Close()
+}
+
+// StartNewProc grows cluster size by one with two phases
+// Phase 1 - Inform cluster of new configuration
+// Phase 2 - Start new member
+func (epc *EtcdProcessCluster) StartNewProc(ctx context.Context, cfg *EtcdProcessClusterConfig, tb testing.TB, addAsLearner bool, opts ...config.ClientOption) (memberID uint64, err error) {
+ memberID, serverCfg, err := epc.AddMember(ctx, cfg, tb, addAsLearner, opts...)
+ if err != nil {
+ return 0, err
+ }
+
+ // Then start process
+ if err = epc.StartNewProcFromConfig(ctx, tb, serverCfg); err != nil {
+ return 0, err
+ }
+
+ return memberID, nil
+}
+
+// AddMember adds a new member to the cluster without starting it.
+func (epc *EtcdProcessCluster) AddMember(ctx context.Context, cfg *EtcdProcessClusterConfig, tb testing.TB, addAsLearner bool, opts ...config.ClientOption) (memberID uint64, serverCfg *EtcdServerProcessConfig, err error) {
+ if cfg != nil {
+ serverCfg = cfg.EtcdServerProcessConfig(tb, epc.nextSeq)
+ } else {
+ serverCfg = epc.Cfg.EtcdServerProcessConfig(tb, epc.nextSeq)
+ }
+
+ epc.nextSeq++
+
+ initialCluster := []string{
+ fmt.Sprintf("%s=%s", serverCfg.Name, serverCfg.PeerURL.String()),
+ }
+ for _, p := range epc.Procs {
+ initialCluster = append(initialCluster, fmt.Sprintf("%s=%s", p.Config().Name, p.Config().PeerURL.String()))
+ }
+
+ epc.Cfg.SetInitialOrDiscovery(serverCfg, initialCluster, "existing")
+
+ // First add new member to cluster
+ tb.Logf("add new member to cluster; member-name %s, member-peer-url %s", serverCfg.Name, serverCfg.PeerURL.String())
+ memberCtl := epc.Etcdctl(opts...)
+ var resp *clientv3.MemberAddResponse
+ if addAsLearner {
+ resp, err = memberCtl.MemberAddAsLearner(ctx, serverCfg.Name, []string{serverCfg.PeerURL.String()})
+ } else {
+ resp, err = memberCtl.MemberAdd(ctx, serverCfg.Name, []string{serverCfg.PeerURL.String()})
+ }
+ if err != nil {
+ return 0, nil, fmt.Errorf("failed to add new member: %w", err)
+ }
+
+ return resp.Member.ID, serverCfg, nil
+}
+
+// StartNewProcFromConfig starts a new member process from the given config.
+func (epc *EtcdProcessCluster) StartNewProcFromConfig(ctx context.Context, tb testing.TB, serverCfg *EtcdServerProcessConfig) error {
+ tb.Log("start new member")
+ proc, err := NewEtcdProcess(tb, serverCfg)
+ if err != nil {
+ epc.Close()
+ return fmt.Errorf("cannot configure: %w", err)
+ }
+
+ epc.Procs = append(epc.Procs, proc)
+
+ return proc.Start(ctx)
+}
+
+// UpdateProcOptions updates the options for a specific process. If no opt is set, then the config is identical
+// to the cluster.
+func (epc *EtcdProcessCluster) UpdateProcOptions(i int, tb testing.TB, opts ...EPClusterOption) error {
+ if epc.Procs[i].IsRunning() {
+ return fmt.Errorf("process %d is still running, please close it before updating its options", i)
+ }
+ cfg := *epc.Cfg
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+ serverCfg := cfg.EtcdServerProcessConfig(tb, i)
+
+ var initialCluster []string
+ for _, p := range epc.Procs {
+ initialCluster = append(initialCluster, fmt.Sprintf("%s=%s", p.Config().Name, p.Config().PeerURL.String()))
+ }
+ epc.Cfg.SetInitialOrDiscovery(serverCfg, initialCluster, "new")
+
+ proc, err := NewEtcdProcess(tb, serverCfg)
+ if err != nil {
+ return err
+ }
+ epc.Procs[i] = proc
+ return nil
+}
+
+func (epc *EtcdProcessCluster) Start(ctx context.Context) error {
+ return epc.start(func(ep EtcdProcess) error { return ep.Start(ctx) })
+}
+
+func (epc *EtcdProcessCluster) RollingStart(ctx context.Context) error {
+ return epc.rollingStart(func(ep EtcdProcess) error { return ep.Start(ctx) })
+}
+
+func (epc *EtcdProcessCluster) Restart(ctx context.Context) error {
+ return epc.start(func(ep EtcdProcess) error { return ep.Restart(ctx) })
+}
+
+func (epc *EtcdProcessCluster) start(f func(ep EtcdProcess) error) error {
+ readyC := make(chan error, len(epc.Procs))
+ for i := range epc.Procs {
+ go func(n int) { readyC <- f(epc.Procs[n]) }(i)
+ }
+ for range epc.Procs {
+ if err := <-readyC; err != nil {
+ epc.Close()
+ return err
+ }
+ }
+ return nil
+}
+
+func (epc *EtcdProcessCluster) rollingStart(f func(ep EtcdProcess) error) error {
+ readyC := make(chan error, len(epc.Procs))
+ for i := range epc.Procs {
+ go func(n int) { readyC <- f(epc.Procs[n]) }(i)
+ // make sure the servers do not start at the same time
+ time.Sleep(time.Second)
+ }
+ for range epc.Procs {
+ if err := <-readyC; err != nil {
+ epc.Close()
+ return err
+ }
+ }
+ return nil
+}
+
+func (epc *EtcdProcessCluster) Stop() (err error) {
+ for _, p := range epc.Procs {
+ if p == nil {
+ continue
+ }
+ if curErr := p.Stop(); curErr != nil {
+ if err != nil {
+ err = fmt.Errorf("%w; %w", err, curErr)
+ } else {
+ err = curErr
+ }
+ }
+ }
+ return err
+}
+
+func (epc *EtcdProcessCluster) ConcurrentStop() (err error) {
+ errCh := make(chan error, len(epc.Procs))
+ for i := range epc.Procs {
+ if epc.Procs[i] == nil {
+ errCh <- nil
+ continue
+ }
+ go func(n int) { errCh <- epc.Procs[n].Stop() }(i)
+ }
+
+ for range epc.Procs {
+ if curErr := <-errCh; curErr != nil {
+ if err != nil {
+ err = fmt.Errorf("%w; %w", err, curErr)
+ } else {
+ err = curErr
+ }
+ }
+ }
+ close(errCh)
+ return err
+}
+
+func (epc *EtcdProcessCluster) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
+ etcdctl, err := NewEtcdctl(epc.Cfg.Client, epc.EndpointsGRPC(), opts...)
+ if err != nil {
+ panic(err)
+ }
+ return etcdctl
+}
+
+func (epc *EtcdProcessCluster) Close() error {
+ epc.lg.Info("closing test cluster...")
+ err := epc.Stop()
+ for _, p := range epc.Procs {
+ // p is nil when NewEtcdProcess fails in the middle
+ // Close still gets called to clean up test data
+ if p == nil {
+ continue
+ }
+ if cerr := p.Close(); cerr != nil {
+ err = cerr
+ }
+ }
+ epc.lg.Info("closed test cluster.")
+ return err
+}
+
+func findMemberIDByEndpoint(members []*etcdserverpb.Member, endpoint string) (uint64, error) {
+ for _, m := range members {
+ if m.ClientURLs[0] == endpoint {
+ return m.ID, nil
+ }
+ }
+
+ return 0, fmt.Errorf("member not found")
+}
+
+// WaitLeader returns index of the member in c.Members() that is leader
+// or fails the test (if not established in 30s).
+func (epc *EtcdProcessCluster) WaitLeader(t testing.TB) int {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ return epc.WaitMembersForLeader(ctx, t, epc.Procs)
+}
+
+// WaitMembersForLeader waits until given members agree on the same leader,
+// and returns its 'index' in the 'membs' list
+func (epc *EtcdProcessCluster) WaitMembersForLeader(ctx context.Context, t testing.TB, membs []EtcdProcess) int {
+ cc := epc.Etcdctl()
+
+ // ensure leader is up via linearizable get
+ for {
+ select {
+ case <-ctx.Done():
+ t.Fatal("WaitMembersForLeader timeout")
+ default:
+ }
+ _, err := cc.Get(ctx, "0", config.GetOptions{Timeout: 10*config.TickDuration + time.Second})
+ if err == nil || strings.Contains(err.Error(), "Key not found") {
+ break
+ }
+ t.Logf("WaitMembersForLeader Get err: %v", err)
+ }
+
+ leaders := make(map[uint64]struct{})
+ members := make(map[uint64]int)
+ for {
+ select {
+ case <-ctx.Done():
+ t.Fatal("WaitMembersForLeader timeout")
+ default:
+ }
+ for i := range membs {
+ resp, err := membs[i].Etcdctl().Status(ctx)
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ // if member[i] has stopped
+ continue
+ } else {
+ t.Fatal(err)
+ }
+ }
+ members[resp[0].Header.MemberId] = i
+ leaders[resp[0].Leader] = struct{}{}
+ }
+ // members agree on the same leader
+ if len(leaders) == 1 {
+ break
+ }
+ leaders = make(map[uint64]struct{})
+ members = make(map[uint64]int)
+ time.Sleep(10 * config.TickDuration)
+ }
+ for l := range leaders {
+ if index, ok := members[l]; ok {
+ t.Logf("members agree on a leader, members:%v , leader:%v", members, l)
+ return index
+ }
+ t.Fatalf("members agree on a leader which is not one of members, members:%v , leader:%v", members, l)
+ }
+ t.Fatal("impossible path of execution")
+ return -1
+}
+
+// MoveLeader moves the leader to the ith process.
+func (epc *EtcdProcessCluster) MoveLeader(ctx context.Context, t testing.TB, i int) error {
+ if i < 0 || i >= len(epc.Procs) {
+ return fmt.Errorf("invalid index: %d, must between 0 and %d", i, len(epc.Procs)-1)
+ }
+ t.Logf("moving leader to Procs[%d]", i)
+ oldLeader := epc.WaitMembersForLeader(ctx, t, epc.Procs)
+ if oldLeader == i {
+ t.Logf("Procs[%d] is already the leader", i)
+ return nil
+ }
+ resp, err := epc.Procs[i].Etcdctl().Status(ctx)
+ if err != nil {
+ return err
+ }
+ memberID := resp[0].Header.MemberId
+ err = epc.Procs[oldLeader].Etcdctl().MoveLeader(ctx, memberID)
+ if err != nil {
+ return err
+ }
+ newLeader := epc.WaitMembersForLeader(ctx, t, epc.Procs)
+ if newLeader != i {
+ t.Fatalf("expect new leader to be Procs[%d] but got Procs[%d]", i, newLeader)
+ }
+ t.Logf("moved leader from Procs[%d] to Procs[%d]", oldLeader, i)
+ return nil
+}
diff --git a/tests/framework/e2e/cluster_direct.go b/tests/framework/e2e/cluster_direct.go
new file mode 100644
index 00000000000..70c60dbf4c0
--- /dev/null
+++ b/tests/framework/e2e/cluster_direct.go
@@ -0,0 +1,23 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package e2e
+
+import "testing"
+
+func NewEtcdProcess(t testing.TB, cfg *EtcdServerProcessConfig) (EtcdProcess, error) {
+ return NewEtcdServerProcess(t, cfg)
+}
diff --git a/tests/framework/e2e/cluster_proxy.go b/tests/framework/e2e/cluster_proxy.go
new file mode 100644
index 00000000000..ed859858ba9
--- /dev/null
+++ b/tests/framework/e2e/cluster_proxy.go
@@ -0,0 +1,283 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build cluster_proxy
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "testing"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+)
+
+type proxyEtcdProcess struct {
+ *EtcdServerProcess
+ // TODO(ahrtr): We need to remove `proxyV2` and v2discovery when the v2client is removed.
+ proxyV2 *proxyV2Proc
+ proxyV3 *proxyV3Proc
+}
+
+func NewEtcdProcess(t testing.TB, cfg *EtcdServerProcessConfig) (EtcdProcess, error) {
+ return NewProxyEtcdProcess(t, cfg)
+}
+
+func NewProxyEtcdProcess(t testing.TB, cfg *EtcdServerProcessConfig) (*proxyEtcdProcess, error) {
+ ep, err := NewEtcdServerProcess(t, cfg)
+ if err != nil {
+ return nil, err
+ }
+ pep := &proxyEtcdProcess{
+ EtcdServerProcess: ep,
+ proxyV2: newProxyV2Proc(cfg),
+ proxyV3: newProxyV3Proc(cfg),
+ }
+ return pep, nil
+}
+
+func (p *proxyEtcdProcess) EndpointsHTTP() []string { return p.proxyV2.endpoints() }
+func (p *proxyEtcdProcess) EndpointsGRPC() []string { return p.proxyV3.endpoints() }
+func (p *proxyEtcdProcess) EndpointsMetrics() []string {
+ panic("not implemented; proxy doesn't provide health information")
+}
+
+func (p *proxyEtcdProcess) Start(ctx context.Context) error {
+ if err := p.EtcdServerProcess.Start(ctx); err != nil {
+ return err
+ }
+ return p.proxyV3.Start(ctx)
+}
+
+func (p *proxyEtcdProcess) Restart(ctx context.Context) error {
+ if err := p.EtcdServerProcess.Restart(ctx); err != nil {
+ return err
+ }
+ return p.proxyV3.Restart(ctx)
+}
+
+func (p *proxyEtcdProcess) Stop() error {
+ err := p.proxyV3.Stop()
+ if eerr := p.EtcdServerProcess.Stop(); eerr != nil && err == nil {
+ // fails on go-grpc issue #1384
+ if !strings.Contains(eerr.Error(), "exit status 2") {
+ err = eerr
+ }
+ }
+ return err
+}
+
+func (p *proxyEtcdProcess) Close() error {
+ err := p.proxyV3.Close()
+ if eerr := p.EtcdServerProcess.Close(); eerr != nil && err == nil {
+ // fails on go-grpc issue #1384
+ if !strings.Contains(eerr.Error(), "exit status 2") {
+ err = eerr
+ }
+ }
+ return err
+}
+
+func (p *proxyEtcdProcess) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
+ etcdctl, err := NewEtcdctl(p.EtcdServerProcess.Config().Client, p.EtcdServerProcess.EndpointsGRPC(), opts...)
+ if err != nil {
+ panic(err)
+ }
+ return etcdctl
+}
+
+type proxyProc struct {
+ lg *zap.Logger
+ name string
+ execPath string
+ args []string
+ ep string
+ murl string
+ donec chan struct{}
+
+ proc *expect.ExpectProcess
+}
+
+func (pp *proxyProc) endpoints() []string { return []string{pp.ep} }
+
+func (pp *proxyProc) start() error {
+ if pp.proc != nil {
+ panic("already started")
+ }
+ proc, err := SpawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...), nil, pp.name)
+ if err != nil {
+ return err
+ }
+ pp.proc = proc
+ return nil
+}
+
+func (pp *proxyProc) waitReady(ctx context.Context, readyStr string) error {
+ defer close(pp.donec)
+ return WaitReadyExpectProc(ctx, pp.proc, []string{readyStr})
+}
+
+func (pp *proxyProc) Stop() error {
+ if pp.proc == nil {
+ return nil
+ }
+ err := pp.proc.Stop()
+ if err != nil {
+ return err
+ }
+
+ err = pp.proc.Close()
+ if err != nil {
+ // proxy received SIGTERM signal
+ if !(strings.Contains(err.Error(), "unexpected exit code") ||
+ // v2proxy exits with status 1 on auto tls; not sure why
+ strings.Contains(err.Error(), "exit status 1")) {
+
+ return err
+ }
+ }
+ pp.proc = nil
+ <-pp.donec
+ pp.donec = make(chan struct{})
+ return nil
+}
+
+func (pp *proxyProc) Close() error { return pp.Stop() }
+
+type proxyV2Proc struct {
+ proxyProc
+ dataDir string
+}
+
+func proxyListenURL(cfg *EtcdServerProcessConfig, portOffset int) string {
+ u, err := url.Parse(cfg.ClientURL)
+ if err != nil {
+ panic(err)
+ }
+ host, port, _ := net.SplitHostPort(u.Host)
+ p, _ := strconv.ParseInt(port, 10, 16)
+ u.Host = fmt.Sprintf("%s:%d", host, int(p)+portOffset)
+ return u.String()
+}
+
+func newProxyV2Proc(cfg *EtcdServerProcessConfig) *proxyV2Proc {
+ listenAddr := proxyListenURL(cfg, 2)
+ name := fmt.Sprintf("testname-proxy-%p", cfg)
+ dataDir := path.Join(cfg.DataDirPath, name+".etcd")
+ args := []string{
+ "--name", name,
+ "--proxy", "on",
+ "--listen-client-urls", listenAddr,
+ "--initial-cluster", cfg.Name + "=" + cfg.PeerURL.String(),
+ "--data-dir", dataDir,
+ }
+ return &proxyV2Proc{
+ proxyProc: proxyProc{
+ name: cfg.Name,
+ lg: cfg.lg,
+ execPath: cfg.ExecPath,
+ args: append(args, cfg.TLSArgs...),
+ ep: listenAddr,
+ donec: make(chan struct{}),
+ },
+ dataDir: dataDir,
+ }
+}
+
+type proxyV3Proc struct {
+ proxyProc
+}
+
+func newProxyV3Proc(cfg *EtcdServerProcessConfig) *proxyV3Proc {
+ listenAddr := proxyListenURL(cfg, 3)
+ args := []string{
+ "grpc-proxy",
+ "start",
+ "--listen-addr", strings.Split(listenAddr, "/")[2],
+ "--endpoints", cfg.ClientURL,
+ // pass-through member RPCs
+ "--advertise-client-url", "",
+ "--data-dir", cfg.DataDirPath,
+ }
+ murl := ""
+ if cfg.MetricsURL != "" {
+ murl = proxyListenURL(cfg, 4)
+ args = append(args, "--metrics-addr", murl)
+ }
+ tlsArgs := []string{}
+ for i := 0; i < len(cfg.TLSArgs); i++ {
+ switch cfg.TLSArgs[i] {
+ case "--cert-file":
+ tlsArgs = append(tlsArgs, "--cert-file", cfg.TLSArgs[i+1])
+ i++
+ case "--key-file":
+ tlsArgs = append(tlsArgs, "--key-file", cfg.TLSArgs[i+1])
+ i++
+ case "--trusted-ca-file":
+ tlsArgs = append(tlsArgs, "--trusted-ca-file", cfg.TLSArgs[i+1])
+ i++
+ case "--auto-tls":
+ tlsArgs = append(tlsArgs, "--auto-tls", "--insecure-skip-tls-verify")
+ case "--peer-trusted-ca-file", "--peer-cert-file", "--peer-key-file":
+ i++ // skip arg
+ case "--client-cert-auth", "--peer-auto-tls":
+ default:
+ tlsArgs = append(tlsArgs, cfg.TLSArgs[i])
+ }
+ }
+ if len(cfg.TLSArgs) > 0 {
+ // Configure certificates for connection proxy ---> server.
+ // This certificate must NOT have CN set.
+ tlsArgs = append(tlsArgs,
+ "--cert", path.Join(FixturesDir, "client-nocn.crt"),
+ "--key", path.Join(FixturesDir, "client-nocn.key.insecure"),
+ "--cacert", path.Join(FixturesDir, "ca.crt"),
+ "--client-crl-file", path.Join(FixturesDir, "revoke.crl"))
+ }
+
+ return &proxyV3Proc{
+ proxyProc{
+ name: cfg.Name,
+ lg: cfg.lg,
+ execPath: cfg.ExecPath,
+ args: append(args, tlsArgs...),
+ ep: listenAddr,
+ murl: murl,
+ donec: make(chan struct{}),
+ },
+ }
+}
+
+func (v3p *proxyV3Proc) Restart(ctx context.Context) error {
+ if err := v3p.Stop(); err != nil {
+ return err
+ }
+ return v3p.Start(ctx)
+}
+
+func (v3p *proxyV3Proc) Start(ctx context.Context) error {
+ if err := v3p.start(); err != nil {
+ return err
+ }
+ return v3p.waitReady(ctx, "started gRPC proxy")
+}
diff --git a/tests/framework/e2e/cluster_test.go b/tests/framework/e2e/cluster_test.go
new file mode 100644
index 00000000000..7222460ef0d
--- /dev/null
+++ b/tests/framework/e2e/cluster_test.go
@@ -0,0 +1,121 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEtcdServerProcessConfig(t *testing.T) {
+ v3_5_12 := semver.Version{Major: 3, Minor: 5, Patch: 12}
+ v3_5_14 := semver.Version{Major: 3, Minor: 5, Patch: 14}
+ tcs := []struct {
+ name string
+ config *EtcdProcessClusterConfig
+ expectArgsNotContain []string
+ expectArgsContain []string
+ mockBinaryVersion *semver.Version
+ }{
+ {
+ name: "Default",
+ config: NewConfig(),
+ expectArgsContain: []string{
+ "--listen-client-urls=http://localhost:0",
+ "--advertise-client-urls=http://localhost:0",
+ "--listen-peer-urls=http://localhost:1",
+ "--initial-advertise-peer-urls=http://localhost:1",
+ "--initial-cluster-token=new",
+ "--snapshot-count=10000",
+ },
+ },
+ {
+ name: "SnapshotCount",
+ config: NewConfig(WithSnapshotCount(42)),
+ expectArgsContain: []string{
+ "--snapshot-count=42",
+ },
+ },
+ {
+ name: "QuotaBackendBytes",
+ config: NewConfig(WithQuotaBackendBytes(123)),
+ expectArgsContain: []string{
+ "--quota-backend-bytes=123",
+ },
+ },
+ {
+ name: "CorruptCheck",
+ config: NewConfig(WithInitialCorruptCheck(true)),
+ expectArgsContain: []string{
+ "--experimental-initial-corrupt-check=true",
+ },
+ },
+ {
+ name: "StrictReconfigCheck",
+ config: NewConfig(WithStrictReconfigCheck(false)),
+ expectArgsContain: []string{
+ "--strict-reconfig-check=false",
+ },
+ },
+ {
+ name: "CatchUpEntries",
+ config: NewConfig(WithSnapshotCatchUpEntries(100)),
+ expectArgsContain: []string{
+ "--experimental-snapshot-catchup-entries=100",
+ },
+ mockBinaryVersion: &v3_5_14,
+ },
+ {
+ name: "CatchUpEntriesNoVersion",
+ config: NewConfig(WithSnapshotCatchUpEntries(100), WithVersion(LastVersion)),
+ expectArgsNotContain: []string{
+ "--experimental-snapshot-catchup-entries=100",
+ },
+ },
+ {
+ name: "CatchUpEntriesOldVersion",
+ config: NewConfig(WithSnapshotCatchUpEntries(100), WithVersion(LastVersion)),
+ expectArgsNotContain: []string{
+ "--experimental-snapshot-catchup-entries=100",
+ },
+ mockBinaryVersion: &v3_5_12,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ var mockGetVersionFromBinary func(binaryPath string) (*semver.Version, error)
+ if tc.mockBinaryVersion == nil {
+ mockGetVersionFromBinary = func(binaryPath string) (*semver.Version, error) {
+ return nil, fmt.Errorf("could not get binary version")
+ }
+ } else {
+ mockGetVersionFromBinary = func(binaryPath string) (*semver.Version, error) {
+ return tc.mockBinaryVersion, nil
+ }
+ }
+ setGetVersionFromBinary(t, mockGetVersionFromBinary)
+ args := tc.config.EtcdServerProcessConfig(t, 0).Args
+ if len(tc.expectArgsContain) != 0 {
+ assert.Subset(t, args, tc.expectArgsContain)
+ }
+ if len(tc.expectArgsNotContain) != 0 {
+ assert.NotSubset(t, args, tc.expectArgsNotContain)
+ }
+ })
+ }
+}
diff --git a/tests/framework/e2e/config.go b/tests/framework/e2e/config.go
new file mode 100644
index 00000000000..acc1d82e048
--- /dev/null
+++ b/tests/framework/e2e/config.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+type ClusterVersion string
+
+const (
+ CurrentVersion ClusterVersion = ""
+ MinorityLastVersion ClusterVersion = "minority-last-version"
+ QuorumLastVersion ClusterVersion = "quorum-last-version"
+ LastVersion ClusterVersion = "last-version"
+)
+
+type ClusterContext struct {
+ Version ClusterVersion
+}
diff --git a/tests/framework/e2e/curl.go b/tests/framework/e2e/curl.go
new file mode 100644
index 00000000000..d0546622afd
--- /dev/null
+++ b/tests/framework/e2e/curl.go
@@ -0,0 +1,137 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+)
+
+type CURLReq struct {
+ Username string
+ Password string
+
+ IsTLS bool
+ Timeout int
+
+ Endpoint string
+
+ Value string
+ Expected expect.ExpectedResponse
+ Header string
+
+ Ciphers string
+ HTTPVersion string
+
+ OutputFile string
+}
+
+func (r CURLReq) timeoutDuration() time.Duration {
+ if r.Timeout != 0 {
+ return time.Duration(r.Timeout) * time.Second
+ }
+
+ // assume a sane default to finish a curl request
+ return 5 * time.Second
+}
+
+// CURLPrefixArgsCluster builds the beginning of a curl command for a given key
+// addressed to a random URL in the given cluster.
+func CURLPrefixArgsCluster(cfg *EtcdProcessClusterConfig, member EtcdProcess, method string, req CURLReq) []string {
+ return CURLPrefixArgs(member.Config().ClientURL, cfg.Client, cfg.CN, method, req)
+}
+
+func CURLPrefixArgs(clientURL string, cfg ClientConfig, CN bool, method string, req CURLReq) []string {
+ cmdArgs := []string{"curl"}
+ if req.HTTPVersion != "" {
+ cmdArgs = append(cmdArgs, "--http"+req.HTTPVersion)
+ }
+ if req.IsTLS {
+ if cfg.ConnectionType != ClientTLSAndNonTLS {
+ panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS")
+ }
+ cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath)
+ clientURL = ToTLS(clientURL)
+ } else if cfg.ConnectionType == ClientTLS {
+ if CN {
+ cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath)
+ } else {
+ cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath3, "--key", PrivateKeyPath3)
+ }
+ }
+ ep := clientURL + req.Endpoint
+
+ if req.Username != "" || req.Password != "" {
+ cmdArgs = append(cmdArgs, "-L", "-u", fmt.Sprintf("%s:%s", req.Username, req.Password), ep)
+ } else {
+ cmdArgs = append(cmdArgs, "-L", ep)
+ }
+ if req.Timeout != 0 {
+ cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.Timeout))
+ }
+
+ if req.Header != "" {
+ cmdArgs = append(cmdArgs, "-H", req.Header)
+ }
+
+ if req.Ciphers != "" {
+ cmdArgs = append(cmdArgs, "--ciphers", req.Ciphers)
+ }
+
+ if req.OutputFile != "" {
+ cmdArgs = append(cmdArgs, "--output", req.OutputFile)
+ }
+
+ switch method {
+ case "POST", "PUT":
+ dt := req.Value
+ if !strings.HasPrefix(dt, "{") { // for non-JSON value
+ dt = "value=" + dt
+ }
+ cmdArgs = append(cmdArgs, "-X", method, "-d", dt)
+ }
+ return cmdArgs
+}
+
+func CURLPost(clus *EtcdProcessCluster, req CURLReq) error {
+ ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration())
+ defer cancel()
+ return SpawnWithExpectsContext(ctx, CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "POST", req), nil, req.Expected)
+}
+
+func CURLPut(clus *EtcdProcessCluster, req CURLReq) error {
+ ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration())
+ defer cancel()
+ return SpawnWithExpectsContext(ctx, CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "PUT", req), nil, req.Expected)
+}
+
+func CURLGet(clus *EtcdProcessCluster, req CURLReq) error {
+ ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration())
+ defer cancel()
+
+ return SpawnWithExpectsContext(ctx, CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "GET", req), nil, req.Expected)
+}
+
+func CURLGetFromMember(clus *EtcdProcessCluster, member EtcdProcess, req CURLReq) error {
+ ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration())
+ defer cancel()
+
+ return SpawnWithExpectsContext(ctx, CURLPrefixArgsCluster(clus.Cfg, member, "GET", req), nil, req.Expected)
+}
diff --git a/tests/framework/e2e/e2e.go b/tests/framework/e2e/e2e.go
new file mode 100644
index 00000000000..f78df57926e
--- /dev/null
+++ b/tests/framework/e2e/e2e.go
@@ -0,0 +1,137 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+)
+
+type e2eRunner struct{}
+
+func NewE2eRunner() intf.TestRunner {
+ return &e2eRunner{}
+}
+
+func (e e2eRunner) TestMain(m *testing.M) {
+ InitFlags()
+ v := m.Run()
+ if v == 0 && testutil.CheckLeakedGoroutine() {
+ os.Exit(1)
+ }
+ os.Exit(v)
+}
+
+func (e e2eRunner) BeforeTest(t testing.TB) {
+ BeforeTest(t)
+}
+
+func (e e2eRunner) NewCluster(ctx context.Context, t testing.TB, opts ...config.ClusterOption) intf.Cluster {
+ cfg := config.NewClusterConfig(opts...)
+ e2eConfig := NewConfig(
+ WithClusterSize(cfg.ClusterSize),
+ WithQuotaBackendBytes(cfg.QuotaBackendBytes),
+ WithStrictReconfigCheck(cfg.StrictReconfigCheck),
+ WithAuthTokenOpts(cfg.AuthToken),
+ WithSnapshotCount(cfg.SnapshotCount),
+ )
+
+ if cfg.ClusterContext != nil {
+ e2eClusterCtx := cfg.ClusterContext.(*ClusterContext)
+ e2eConfig.Version = e2eClusterCtx.Version
+ }
+
+ switch cfg.ClientTLS {
+ case config.NoTLS:
+ e2eConfig.Client.ConnectionType = ClientNonTLS
+ case config.AutoTLS:
+ e2eConfig.Client.AutoTLS = true
+ e2eConfig.Client.ConnectionType = ClientTLS
+ case config.ManualTLS:
+ e2eConfig.Client.AutoTLS = false
+ e2eConfig.Client.ConnectionType = ClientTLS
+ default:
+ t.Fatalf("ClientTLS config %q not supported", cfg.ClientTLS)
+ }
+ switch cfg.PeerTLS {
+ case config.NoTLS:
+ e2eConfig.IsPeerTLS = false
+ e2eConfig.IsPeerAutoTLS = false
+ case config.AutoTLS:
+ e2eConfig.IsPeerTLS = true
+ e2eConfig.IsPeerAutoTLS = true
+ case config.ManualTLS:
+ e2eConfig.IsPeerTLS = true
+ e2eConfig.IsPeerAutoTLS = false
+ default:
+ t.Fatalf("PeerTLS config %q not supported", cfg.PeerTLS)
+ }
+ epc, err := NewEtcdProcessCluster(ctx, t, WithConfig(e2eConfig))
+ if err != nil {
+ t.Fatalf("could not start etcd integrationCluster: %s", err)
+ }
+ return &e2eCluster{t, *epc}
+}
+
+type e2eCluster struct {
+ t testing.TB
+ EtcdProcessCluster
+}
+
+func (c *e2eCluster) Client(opts ...config.ClientOption) (intf.Client, error) {
+ etcdctl, err := NewEtcdctl(c.Cfg.Client, c.EndpointsGRPC(), opts...)
+ return e2eClient{etcdctl}, err
+}
+
+func (c *e2eCluster) Endpoints() []string {
+ return c.EndpointsGRPC()
+}
+
+func (c *e2eCluster) Members() (ms []intf.Member) {
+ for _, proc := range c.EtcdProcessCluster.Procs {
+ ms = append(ms, e2eMember{EtcdProcess: proc, Cfg: c.Cfg})
+ }
+ return ms
+}
+
+type e2eClient struct {
+ *EtcdctlV3
+}
+
+type e2eMember struct {
+ EtcdProcess
+ Cfg *EtcdProcessClusterConfig
+}
+
+func (m e2eMember) Client() intf.Client {
+ etcdctl, err := NewEtcdctl(m.Cfg.Client, m.EndpointsGRPC())
+ if err != nil {
+ panic(err)
+ }
+ return e2eClient{etcdctl}
+}
+
+func (m e2eMember) Start(ctx context.Context) error {
+ return m.EtcdProcess.Start(ctx)
+}
+
+func (m e2eMember) Stop() {
+ m.EtcdProcess.Stop()
+}
diff --git a/tests/framework/e2e/e2e_test.go b/tests/framework/e2e/e2e_test.go
new file mode 100644
index 00000000000..00059df81ba
--- /dev/null
+++ b/tests/framework/e2e/e2e_test.go
@@ -0,0 +1,39 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "encoding/json"
+ "testing"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+func Test_AddTxnResponse(t *testing.T) {
+ jsonData := `{"header":{"cluster_id":238453183653593855,"member_id":14578408409545168728,"revision":3,"raft_term":2},"succeeded":true,"responses":[{"Response":{"response_range":{"header":{"revision":3},"kvs":[{"key":"a2V5MQ==","create_revision":2,"mod_revision":2,"version":1,"value":"dmFsdWUx"}],"count":1}}},{"Response":{"response_range":{"header":{"revision":3},"kvs":[{"key":"a2V5Mg==","create_revision":3,"mod_revision":3,"version":1,"value":"dmFsdWUy"}],"count":1}}}]}`
+ var resp clientv3.TxnResponse
+ AddTxnResponse(&resp, jsonData)
+ err := json.Unmarshal([]byte(jsonData), &resp)
+ if err != nil {
+ t.Errorf("json Unmarshal failed. err: %s", err)
+ }
+ enc, err := json.Marshal(resp)
+ if err != nil {
+ t.Errorf("json Marshal failed. err: %s", err)
+ }
+ if string(enc) != jsonData {
+ t.Error("could not get original message after encoding")
+ }
+}
diff --git a/tests/framework/e2e/etcd_process.go b/tests/framework/e2e/etcd_process.go
new file mode 100644
index 00000000000..445ea26e94c
--- /dev/null
+++ b/tests/framework/e2e/etcd_process.go
@@ -0,0 +1,533 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/pkg/v3/proxy"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+)
+
+var EtcdServerReadyLines = []string{"ready to serve client requests"}
+
+// EtcdProcess is a process that serves etcd requests.
+type EtcdProcess interface {
+ EndpointsGRPC() []string
+ EndpointsHTTP() []string
+ EndpointsMetrics() []string
+ Etcdctl(opts ...config.ClientOption) *EtcdctlV3
+
+ IsRunning() bool
+ Wait(ctx context.Context) error
+ Start(ctx context.Context) error
+ Restart(ctx context.Context) error
+ Stop() error
+ Close() error
+ Config() *EtcdServerProcessConfig
+ PeerProxy() proxy.Server
+ Failpoints() *BinaryFailpoints
+ LazyFS() *LazyFS
+ Logs() LogsExpect
+ Kill() error
+}
+
+type LogsExpect interface {
+ ExpectWithContext(context.Context, expect.ExpectedResponse) (string, error)
+ Lines() []string
+ LineCount() int
+}
+
+type EtcdServerProcess struct {
+ cfg *EtcdServerProcessConfig
+ proc *expect.ExpectProcess
+ proxy proxy.Server
+ lazyfs *LazyFS
+ failpoints *BinaryFailpoints
+ donec chan struct{} // closed when Interact() terminates
+}
+
+type EtcdServerProcessConfig struct {
+ lg *zap.Logger
+ ExecPath string
+ Args []string
+ TLSArgs []string
+ EnvVars map[string]string
+
+ Client ClientConfig
+ DataDirPath string
+ KeepDataDir bool
+
+ Name string
+
+ PeerURL url.URL
+ ClientURL string
+ ClientHTTPURL string
+ MetricsURL string
+
+ InitialToken string
+ InitialCluster string
+ GoFailPort int
+ GoFailClientTimeout time.Duration
+
+ LazyFSEnabled bool
+ Proxy *proxy.ServerConfig
+}
+
+func NewEtcdServerProcess(t testing.TB, cfg *EtcdServerProcessConfig) (*EtcdServerProcess, error) {
+ if !fileutil.Exist(cfg.ExecPath) {
+ return nil, fmt.Errorf("could not find etcd binary: %s", cfg.ExecPath)
+ }
+ if !cfg.KeepDataDir {
+ if err := os.RemoveAll(cfg.DataDirPath); err != nil {
+ return nil, err
+ }
+ if err := os.Mkdir(cfg.DataDirPath, 0o700); err != nil {
+ return nil, err
+ }
+ }
+ ep := &EtcdServerProcess{cfg: cfg, donec: make(chan struct{})}
+ if cfg.GoFailPort != 0 {
+ ep.failpoints = &BinaryFailpoints{
+ member: ep,
+ clientTimeout: cfg.GoFailClientTimeout,
+ }
+ }
+ if cfg.LazyFSEnabled {
+ ep.lazyfs = newLazyFS(cfg.lg, cfg.DataDirPath, t)
+ }
+ return ep, nil
+}
+
+func (ep *EtcdServerProcess) EndpointsGRPC() []string { return []string{ep.cfg.ClientURL} }
+func (ep *EtcdServerProcess) EndpointsHTTP() []string {
+ if ep.cfg.ClientHTTPURL == "" {
+ return []string{ep.cfg.ClientURL}
+ }
+ return []string{ep.cfg.ClientHTTPURL}
+}
+func (ep *EtcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.MetricsURL} }
+
+func (ep *EtcdServerProcess) Etcdctl(opts ...config.ClientOption) *EtcdctlV3 {
+ etcdctl, err := NewEtcdctl(ep.Config().Client, ep.EndpointsGRPC(), opts...)
+ if err != nil {
+ panic(err)
+ }
+ return etcdctl
+}
+
+func (ep *EtcdServerProcess) Start(ctx context.Context) error {
+ ep.donec = make(chan struct{})
+ if ep.proc != nil {
+ panic("already started")
+ }
+ if ep.cfg.Proxy != nil && ep.proxy == nil {
+ ep.cfg.lg.Info("starting proxy...", zap.String("name", ep.cfg.Name), zap.String("from", ep.cfg.Proxy.From.String()), zap.String("to", ep.cfg.Proxy.To.String()))
+ ep.proxy = proxy.NewServer(*ep.cfg.Proxy)
+ select {
+ case <-ep.proxy.Ready():
+ case err := <-ep.proxy.Error():
+ return err
+ }
+ }
+ if ep.lazyfs != nil {
+ ep.cfg.lg.Info("starting lazyfs...", zap.String("name", ep.cfg.Name))
+ err := ep.lazyfs.Start(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.Name))
+ proc, err := SpawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.ExecPath}, ep.cfg.Args...), ep.cfg.EnvVars, ep.cfg.Name)
+ if err != nil {
+ return err
+ }
+ ep.proc = proc
+ err = ep.waitReady(ctx)
+ if err == nil {
+ ep.cfg.lg.Info("started server.", zap.String("name", ep.cfg.Name), zap.Int("pid", ep.proc.Pid()))
+ }
+ return err
+}
+
+func (ep *EtcdServerProcess) Restart(ctx context.Context) error {
+ ep.cfg.lg.Info("restarting server...", zap.String("name", ep.cfg.Name))
+ if err := ep.Stop(); err != nil {
+ return err
+ }
+ err := ep.Start(ctx)
+ if err == nil {
+ ep.cfg.lg.Info("restarted server", zap.String("name", ep.cfg.Name))
+ }
+ return err
+}
+
+func (ep *EtcdServerProcess) Stop() (err error) {
+ if ep == nil || ep.proc == nil {
+ return nil
+ }
+
+ ep.cfg.lg.Info("stopping server...", zap.String("name", ep.cfg.Name))
+
+ defer func() {
+ ep.proc = nil
+ }()
+
+ err = ep.proc.Stop()
+ if err != nil {
+ return err
+ }
+ err = ep.proc.Close()
+ if err != nil && !strings.Contains(err.Error(), "unexpected exit code") {
+ return err
+ }
+ <-ep.donec
+ ep.donec = make(chan struct{})
+ if ep.cfg.PeerURL.Scheme == "unix" || ep.cfg.PeerURL.Scheme == "unixs" {
+ err = os.Remove(ep.cfg.PeerURL.Host + ep.cfg.PeerURL.Path)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.Name))
+ if ep.proxy != nil {
+ ep.cfg.lg.Info("stopping proxy...", zap.String("name", ep.cfg.Name))
+ err = ep.proxy.Close()
+ ep.proxy = nil
+ if err != nil {
+ return err
+ }
+ }
+ if ep.lazyfs != nil {
+ ep.cfg.lg.Info("stopping lazyfs...", zap.String("name", ep.cfg.Name))
+ err = ep.lazyfs.Stop()
+ ep.lazyfs = nil
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ep *EtcdServerProcess) Close() error {
+ ep.cfg.lg.Info("closing server...", zap.String("name", ep.cfg.Name))
+ if err := ep.Stop(); err != nil {
+ return err
+ }
+
+ if !ep.cfg.KeepDataDir {
+ ep.cfg.lg.Info("removing directory", zap.String("data-dir", ep.cfg.DataDirPath))
+ return os.RemoveAll(ep.cfg.DataDirPath)
+ }
+ return nil
+}
+
+func (ep *EtcdServerProcess) waitReady(ctx context.Context) error {
+ defer close(ep.donec)
+ err := WaitReadyExpectProc(ctx, ep.proc, EtcdServerReadyLines)
+ if err != nil {
+ return fmt.Errorf("failed to find etcd ready lines %q, err: %w", EtcdServerReadyLines, err)
+ }
+ return nil
+}
+
+func (ep *EtcdServerProcess) Config() *EtcdServerProcessConfig { return ep.cfg }
+
+func (ep *EtcdServerProcess) Logs() LogsExpect {
+ if ep.proc == nil {
+ ep.cfg.lg.Panic("Please grab logs before process is stopped")
+ }
+ return ep.proc
+}
+
+func (ep *EtcdServerProcess) Kill() error {
+ ep.cfg.lg.Info("killing server...", zap.String("name", ep.cfg.Name))
+ return ep.proc.Signal(syscall.SIGKILL)
+}
+
+func (ep *EtcdServerProcess) Wait(ctx context.Context) error {
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ if ep.proc != nil {
+ ep.proc.Wait()
+
+ exitCode, exitErr := ep.proc.ExitCode()
+
+ ep.cfg.lg.Info("server exited",
+ zap.String("name", ep.cfg.Name),
+ zap.Int("code", exitCode),
+ zap.Error(exitErr),
+ )
+ }
+ }()
+ select {
+ case <-ch:
+ ep.proc = nil
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (ep *EtcdServerProcess) IsRunning() bool {
+ if ep.proc == nil {
+ return false
+ }
+
+ exitCode, err := ep.proc.ExitCode()
+ if errors.Is(err, expect.ErrProcessRunning) {
+ return true
+ }
+
+ ep.cfg.lg.Info("server exited",
+ zap.String("name", ep.cfg.Name),
+ zap.Int("code", exitCode),
+ zap.Error(err))
+ ep.proc = nil
+ return false
+}
+
+func AssertProcessLogs(t *testing.T, ep EtcdProcess, expectLog string) {
+ t.Helper()
+ var err error
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ _, err = ep.Logs().ExpectWithContext(ctx, expect.ExpectedResponse{Value: expectLog})
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func (ep *EtcdServerProcess) PeerProxy() proxy.Server {
+ return ep.proxy
+}
+
+func (ep *EtcdServerProcess) LazyFS() *LazyFS {
+ return ep.lazyfs
+}
+
+func (ep *EtcdServerProcess) Failpoints() *BinaryFailpoints {
+ return ep.failpoints
+}
+
+type BinaryFailpoints struct {
+ member EtcdProcess
+ availableCache map[string]string
+ clientTimeout time.Duration
+}
+
+func (f *BinaryFailpoints) SetupEnv(failpoint, payload string) error {
+ if f.member.IsRunning() {
+ return errors.New("cannot setup environment variable while process is running")
+ }
+ f.member.Config().EnvVars["GOFAIL_FAILPOINTS"] = fmt.Sprintf("%s=%s", failpoint, payload)
+ return nil
+}
+
+func (f *BinaryFailpoints) SetupHTTP(ctx context.Context, failpoint, payload string) error {
+ host := fmt.Sprintf("127.0.0.1:%d", f.member.Config().GoFailPort)
+ failpointURL := url.URL{
+ Scheme: "http",
+ Host: host,
+ Path: failpoint,
+ }
+ r, err := http.NewRequestWithContext(ctx, http.MethodPut, failpointURL.String(), bytes.NewBuffer([]byte(payload)))
+ if err != nil {
+ return err
+ }
+ httpClient := http.Client{
+ Timeout: 1 * time.Second,
+ }
+ if f.clientTimeout != 0 {
+ httpClient.Timeout = f.clientTimeout
+ }
+ resp, err := httpClient.Do(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusNoContent {
+ errMsg, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("bad status code: %d, err: %w", resp.StatusCode, err)
+ }
+ return fmt.Errorf("bad status code: %d, err: %s", resp.StatusCode, errMsg)
+ }
+ return nil
+}
+
+func (f *BinaryFailpoints) DeactivateHTTP(ctx context.Context, failpoint string) error {
+ host := fmt.Sprintf("127.0.0.1:%d", f.member.Config().GoFailPort)
+ failpointURL := url.URL{
+ Scheme: "http",
+ Host: host,
+ Path: failpoint,
+ }
+ r, err := http.NewRequestWithContext(ctx, http.MethodDelete, failpointURL.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpClient := http.Client{
+ Timeout: time.Second,
+ }
+ if f.clientTimeout != 0 {
+ httpClient.Timeout = f.clientTimeout
+ }
+ resp, err := httpClient.Do(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusNoContent {
+ errMsg, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("bad status code: %d, err: %w", resp.StatusCode, err)
+ }
+ return fmt.Errorf("bad status code: %d, err: %s", resp.StatusCode, errMsg)
+ }
+ return nil
+}
+
+func (f *BinaryFailpoints) Enabled() bool {
+ _, err := failpoints(f.member)
+ return err == nil
+}
+
+func (f *BinaryFailpoints) Available(failpoint string) bool {
+ if f.availableCache == nil {
+ fs, err := failpoints(f.member)
+ if err != nil {
+ panic(err)
+ }
+ f.availableCache = fs
+ }
+ _, found := f.availableCache[failpoint]
+ return found
+}
+
+func failpoints(member EtcdProcess) (map[string]string, error) {
+ body, err := fetchFailpointsBody(member)
+ if err != nil {
+ return nil, err
+ }
+ defer body.Close()
+ return parseFailpointsBody(body)
+}
+
+func fetchFailpointsBody(member EtcdProcess) (io.ReadCloser, error) {
+ address := fmt.Sprintf("127.0.0.1:%d", member.Config().GoFailPort)
+ failpointURL := url.URL{
+ Scheme: "http",
+ Host: address,
+ }
+ resp, err := http.Get(failpointURL.String())
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ defer resp.Body.Close()
+ errMsg, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("invalid status code: %d, err: %w", resp.StatusCode, err)
+ }
+ return nil, fmt.Errorf("invalid status code: %d, err:%s", resp.StatusCode, errMsg)
+ }
+ return resp.Body, nil
+}
+
+func parseFailpointsBody(body io.Reader) (map[string]string, error) {
+ data, err := io.ReadAll(body)
+ if err != nil {
+ return nil, err
+ }
+ lines := strings.Split(string(data), "\n")
+ failpoints := map[string]string{}
+ for _, line := range lines {
+ // Format:
+ // failpoint=value
+ parts := strings.SplitN(line, "=", 2)
+ failpoint := parts[0]
+ var value string
+ if len(parts) == 2 {
+ value = parts[1]
+ }
+ failpoints[failpoint] = value
+ }
+ return failpoints, nil
+}
+
+var GetVersionFromBinary = func(binaryPath string) (*semver.Version, error) {
+ if !fileutil.Exist(binaryPath) {
+ return nil, fmt.Errorf("binary path does not exist: %s", binaryPath)
+ }
+ lines, err := RunUtilCompletion([]string{binaryPath, "--version"}, nil)
+ if err != nil {
+ return nil, fmt.Errorf("could not find binary version from %s, err: %w", binaryPath, err)
+ }
+
+ for _, line := range lines {
+ if strings.HasPrefix(line, "etcd Version:") {
+ versionString := strings.TrimSpace(strings.SplitAfter(line, ":")[1])
+ version, err := semver.NewVersion(versionString)
+ if err != nil {
+ return nil, err
+ }
+ return &semver.Version{
+ Major: version.Major,
+ Minor: version.Minor,
+ Patch: version.Patch,
+ }, nil
+ }
+ }
+
+ return nil, fmt.Errorf("could not find version in binary output of %s, lines outputted were %v", binaryPath, lines)
+}
+
+// setGetVersionFromBinary changes the GetVersionFromBinary function to a mock in testing.
+func setGetVersionFromBinary(tb testing.TB, f func(binaryPath string) (*semver.Version, error)) {
+ origGetVersionFromBinary := GetVersionFromBinary
+ GetVersionFromBinary = f
+ tb.Cleanup(func() {
+ GetVersionFromBinary = origGetVersionFromBinary
+ })
+}
+
+func CouldSetSnapshotCatchupEntries(execPath string) bool {
+ v, err := GetVersionFromBinary(execPath)
+ if err != nil {
+ return false
+ }
+ // snapshot-catchup-entries flag was backported in https://github.com/etcd-io/etcd/pull/17808
+ v3_5_14 := semver.Version{Major: 3, Minor: 5, Patch: 14}
+ return v.Compare(v3_5_14) >= 0
+}
diff --git a/tests/framework/e2e/etcd_spawn.go b/tests/framework/e2e/etcd_spawn.go
new file mode 100644
index 00000000000..73386de7ddf
--- /dev/null
+++ b/tests/framework/e2e/etcd_spawn.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "os"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+)
+
+func SpawnCmd(args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
+ return SpawnNamedCmd(strings.Join(args, "_"), args, envVars)
+}
+
+func SpawnNamedCmd(processName string, args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
+ return SpawnCmdWithLogger(zap.NewNop(), args, envVars, processName)
+}
+
+func SpawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string, name string) (*expect.ExpectProcess, error) {
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, err
+ }
+
+ env := mergeEnvVariables(envVars)
+ lg.Info("spawning process",
+ zap.Strings("args", args),
+ zap.String("working-dir", wd),
+ zap.String("name", name),
+ zap.Strings("environment-variables", env))
+ return expect.NewExpectWithEnv(args[0], args[1:], env, name)
+}
diff --git a/tests/framework/e2e/etcdctl.go b/tests/framework/e2e/etcdctl.go
new file mode 100644
index 00000000000..d0c8dc14c72
--- /dev/null
+++ b/tests/framework/e2e/etcdctl.go
@@ -0,0 +1,732 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+)
+
+type EtcdctlV3 struct {
+ cfg ClientConfig
+ endpoints []string
+ authConfig clientv3.AuthConfig
+}
+
+func NewEtcdctl(cfg ClientConfig, endpoints []string, opts ...config.ClientOption) (*EtcdctlV3, error) {
+ ctl := &EtcdctlV3{
+ cfg: cfg,
+ endpoints: endpoints,
+ }
+
+ for _, opt := range opts {
+ opt(ctl)
+ }
+
+ if !ctl.authConfig.Empty() {
+ client, err := clientv3.New(clientv3.Config{
+ Endpoints: ctl.endpoints,
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ Username: ctl.authConfig.Username,
+ Password: ctl.authConfig.Password,
+ })
+ if err != nil {
+ return nil, err
+ }
+ client.Close()
+ }
+
+ return ctl, nil
+}
+
+func WithAuth(userName, password string) config.ClientOption {
+ return func(c any) {
+ ctl := c.(*EtcdctlV3)
+ ctl.authConfig.Username = userName
+ ctl.authConfig.Password = password
+ }
+}
+
+func WithEndpoints(endpoints []string) config.ClientOption {
+ return func(c any) {
+ ctl := c.(*EtcdctlV3)
+ ctl.endpoints = endpoints
+ }
+}
+
+func (ctl *EtcdctlV3) DowngradeEnable(ctx context.Context, version string) error {
+ _, err := SpawnWithExpectLines(ctx, ctl.cmdArgs("downgrade", "enable", version), nil, expect.ExpectedResponse{Value: "Downgrade enable success"})
+ return err
+}
+
+func (ctl *EtcdctlV3) Get(ctx context.Context, key string, o config.GetOptions) (*clientv3.GetResponse, error) {
+ resp := clientv3.GetResponse{}
+ var args []string
+ if o.Timeout != 0 {
+ args = append(args, fmt.Sprintf("--command-timeout=%s", o.Timeout))
+ }
+ if o.Serializable {
+ args = append(args, "--consistency", "s")
+ }
+ args = append(args, "get", key, "-w", "json")
+ if o.End != "" {
+ args = append(args, o.End)
+ }
+ if o.Revision != 0 {
+ args = append(args, fmt.Sprintf("--rev=%d", o.Revision))
+ }
+ if o.Prefix {
+ args = append(args, "--prefix")
+ }
+ if o.Limit != 0 {
+ args = append(args, fmt.Sprintf("--limit=%d", o.Limit))
+ }
+ if o.FromKey {
+ args = append(args, "--from-key")
+ }
+ if o.CountOnly {
+ args = append(args, "-w", "fields", "--count-only")
+ } else {
+ args = append(args, "-w", "json")
+ }
+ switch o.SortBy {
+ case clientv3.SortByCreateRevision:
+ args = append(args, "--sort-by=CREATE")
+ case clientv3.SortByModRevision:
+ args = append(args, "--sort-by=MODIFY")
+ case clientv3.SortByValue:
+ args = append(args, "--sort-by=VALUE")
+ case clientv3.SortByVersion:
+ args = append(args, "--sort-by=VERSION")
+ case clientv3.SortByKey:
+ // nothing
+ default:
+ return nil, fmt.Errorf("bad sort target %v", o.SortBy)
+ }
+ switch o.Order {
+ case clientv3.SortAscend:
+ args = append(args, "--order=ASCEND")
+ case clientv3.SortDescend:
+ args = append(args, "--order=DESCEND")
+ case clientv3.SortNone:
+ // nothing
+ default:
+ return nil, fmt.Errorf("bad sort order %v", o.Order)
+ }
+ if o.CountOnly {
+ cmd, err := SpawnCmd(ctl.cmdArgs(args...), nil)
+ if err != nil {
+ return nil, err
+ }
+ defer cmd.Close()
+ _, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "Count"})
+ return &resp, err
+ }
+ err := ctl.spawnJSONCmd(ctx, &resp, args...)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) Put(ctx context.Context, key, value string, opts config.PutOptions) error {
+ args := ctl.cmdArgs()
+ args = append(args, "put", key, value)
+ if opts.LeaseID != 0 {
+ args = append(args, "--lease", strconv.FormatInt(int64(opts.LeaseID), 16))
+ }
+ if opts.Timeout != 0 {
+ args = append(args, fmt.Sprintf("--command-timeout=%s", opts.Timeout))
+ }
+ _, err := SpawnWithExpectLines(ctx, args, nil, expect.ExpectedResponse{Value: "OK"})
+ return err
+}
+
+func (ctl *EtcdctlV3) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) {
+ args := []string{"del", key}
+ if o.End != "" {
+ args = append(args, o.End)
+ }
+ if o.Prefix {
+ args = append(args, "--prefix")
+ }
+ if o.FromKey {
+ args = append(args, "--from-key")
+ }
+ var resp clientv3.DeleteResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, args...)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) {
+ args := ctl.cmdArgs()
+ args = append(args, "txn")
+ if o.Interactive {
+ args = append(args, "--interactive")
+ }
+ args = append(args, "-w", "json", "--hex=true")
+ cmd, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer cmd.Close()
+ _, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "compares:"})
+ if err != nil {
+ return nil, err
+ }
+ for _, cmp := range compares {
+ if err = cmd.Send(cmp + "\r"); err != nil {
+ return nil, err
+ }
+ }
+ if err = cmd.Send("\r"); err != nil {
+ return nil, err
+ }
+ _, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "success requests (get, put, del):"})
+ if err != nil {
+ return nil, err
+ }
+ for _, req := range ifSucess {
+ if err = cmd.Send(req + "\r"); err != nil {
+ return nil, err
+ }
+ }
+ if err = cmd.Send("\r"); err != nil {
+ return nil, err
+ }
+
+ _, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "failure requests (get, put, del):"})
+ if err != nil {
+ return nil, err
+ }
+ for _, req := range ifFail {
+ if err = cmd.Send(req + "\r"); err != nil {
+ return nil, err
+ }
+ }
+ if err = cmd.Send("\r"); err != nil {
+ return nil, err
+ }
+ var line string
+ line, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "header"})
+ if err != nil {
+ return nil, err
+ }
+ var resp clientv3.TxnResponse
+ AddTxnResponse(&resp, line)
+ err = json.Unmarshal([]byte(line), &resp)
+ return &resp, err
+}
+
+// AddTxnResponse looks for ResponseOp json tags and adds the objects for json decoding
+func AddTxnResponse(resp *clientv3.TxnResponse, jsonData string) {
+ if resp == nil {
+ return
+ }
+ if resp.Responses == nil {
+ resp.Responses = []*etcdserverpb.ResponseOp{}
+ }
+ jd := json.NewDecoder(strings.NewReader(jsonData))
+ for {
+ t, e := jd.Token()
+ if errors.Is(e, io.EOF) {
+ break
+ }
+ if t == "response_range" {
+ resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{
+ Response: &etcdserverpb.ResponseOp_ResponseRange{},
+ })
+ }
+ if t == "response_put" {
+ resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{
+ Response: &etcdserverpb.ResponseOp_ResponsePut{},
+ })
+ }
+ if t == "response_delete_range" {
+ resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{
+ Response: &etcdserverpb.ResponseOp_ResponseDeleteRange{},
+ })
+ }
+ if t == "response_txn" {
+ resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{
+ Response: &etcdserverpb.ResponseOp_ResponseTxn{},
+ })
+ }
+ }
+}
+
+func (ctl *EtcdctlV3) MemberList(ctx context.Context, serializable bool) (*clientv3.MemberListResponse, error) {
+ var resp clientv3.MemberListResponse
+ args := []string{"member", "list"}
+ if serializable {
+ args = append(args, "--consistency", "s")
+ }
+ err := ctl.spawnJSONCmd(ctx, &resp, args...)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) MemberAdd(ctx context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
+ var resp clientv3.MemberAddResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "member", "add", name, "--peer-urls", strings.Join(peerAddrs, ","))
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) MemberAddAsLearner(ctx context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
+ var resp clientv3.MemberAddResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "member", "add", name, "--learner", "--peer-urls", strings.Join(peerAddrs, ","))
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) {
+ var resp clientv3.MemberRemoveResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "member", "remove", fmt.Sprintf("%x", id))
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) MemberPromote(ctx context.Context, id uint64) (*clientv3.MemberPromoteResponse, error) {
+ var resp clientv3.MemberPromoteResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "member", "promote", fmt.Sprintf("%x", id))
+ return &resp, err
+}
+
+// MoveLeader requests current leader to transfer its leadership to the transferee.
+// Request must be made to the leader.
+func (ctl *EtcdctlV3) MoveLeader(ctx context.Context, transfereeID uint64) error {
+ _, err := SpawnWithExpectLines(ctx, ctl.cmdArgs("move-leader", fmt.Sprintf("%x", transfereeID)), nil, expect.ExpectedResponse{Value: "Leadership transferred"})
+ return err
+}
+
+func (ctl *EtcdctlV3) cmdArgs(args ...string) []string {
+ cmdArgs := []string{BinPath.Etcdctl}
+ for k, v := range ctl.flags() {
+ cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v))
+ }
+ return append(cmdArgs, args...)
+}
+
+func (ctl *EtcdctlV3) flags() map[string]string {
+ fmap := make(map[string]string)
+ if ctl.cfg.ConnectionType == ClientTLS {
+ if ctl.cfg.AutoTLS {
+ fmap["insecure-transport"] = "false"
+ fmap["insecure-skip-tls-verify"] = "true"
+ } else if ctl.cfg.RevokeCerts {
+ fmap["cacert"] = CaPath
+ fmap["cert"] = RevokedCertPath
+ fmap["key"] = RevokedPrivateKeyPath
+ } else {
+ fmap["cacert"] = CaPath
+ fmap["cert"] = CertPath
+ fmap["key"] = PrivateKeyPath
+ }
+ }
+ fmap["endpoints"] = strings.Join(ctl.endpoints, ",")
+ if !ctl.authConfig.Empty() {
+ fmap["user"] = ctl.authConfig.Username + ":" + ctl.authConfig.Password
+ }
+ return fmap
+}
+
+func (ctl *EtcdctlV3) Compact(ctx context.Context, rev int64, o config.CompactOption) (*clientv3.CompactResponse, error) {
+ args := ctl.cmdArgs("compact", fmt.Sprint(rev))
+ if o.Timeout != 0 {
+ args = append(args, fmt.Sprintf("--command-timeout=%s", o.Timeout))
+ }
+ if o.Physical {
+ args = append(args, "--physical")
+ }
+
+ _, err := SpawnWithExpectLines(ctx, args, nil, expect.ExpectedResponse{Value: fmt.Sprintf("compacted revision %v", rev)})
+ return nil, err
+}
+
+func (ctl *EtcdctlV3) Status(ctx context.Context) ([]*clientv3.StatusResponse, error) {
+ var epStatus []*struct {
+ Endpoint string
+ Status *clientv3.StatusResponse
+ }
+ err := ctl.spawnJSONCmd(ctx, &epStatus, "endpoint", "status")
+ if err != nil {
+ return nil, err
+ }
+ resp := make([]*clientv3.StatusResponse, len(epStatus))
+ for i, e := range epStatus {
+ resp[i] = e.Status
+ }
+ return resp, err
+}
+
+func (ctl *EtcdctlV3) HashKV(ctx context.Context, rev int64) ([]*clientv3.HashKVResponse, error) {
+ var epHashKVs []*struct {
+ Endpoint string
+ HashKV *clientv3.HashKVResponse
+ }
+ err := ctl.spawnJSONCmd(ctx, &epHashKVs, "endpoint", "hashkv", "--rev", fmt.Sprint(rev))
+ if err != nil {
+ return nil, err
+ }
+ resp := make([]*clientv3.HashKVResponse, len(epHashKVs))
+ for i, e := range epHashKVs {
+ resp[i] = e.HashKV
+ }
+ return resp, err
+}
+
+func (ctl *EtcdctlV3) Health(ctx context.Context) error {
+ args := ctl.cmdArgs()
+ args = append(args, "endpoint", "health")
+ lines := make([]expect.ExpectedResponse, len(ctl.endpoints))
+ for i := range lines {
+ lines[i] = expect.ExpectedResponse{Value: "is healthy"}
+ }
+ _, err := SpawnWithExpectLines(ctx, args, nil, lines...)
+ return err
+}
+
+func (ctl *EtcdctlV3) Grant(ctx context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error) {
+ args := ctl.cmdArgs()
+ args = append(args, "lease", "grant", strconv.FormatInt(ttl, 10), "-w", "json")
+ cmd, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer cmd.Close()
+ var resp clientv3.LeaseGrantResponse
+ line, err := cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "ID"})
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal([]byte(line), &resp)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
+ args := ctl.cmdArgs()
+ args = append(args, "lease", "timetolive", strconv.FormatInt(int64(id), 16), "-w", "json")
+ if o.WithAttachedKeys {
+ args = append(args, "--keys")
+ }
+ cmd, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer cmd.Close()
+ var resp clientv3.LeaseTimeToLiveResponse
+ line, err := cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "id"})
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal([]byte(line), &resp)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) Defragment(ctx context.Context, o config.DefragOption) error {
+ args := append(ctl.cmdArgs(), "defrag")
+ if o.Timeout != 0 {
+ args = append(args, fmt.Sprintf("--command-timeout=%s", o.Timeout))
+ }
+ lines := make([]expect.ExpectedResponse, len(ctl.endpoints))
+ for i := range lines {
+ lines[i] = expect.ExpectedResponse{Value: "Finished defragmenting etcd member"}
+ }
+ _, err := SpawnWithExpectLines(ctx, args, map[string]string{}, lines...)
+ return err
+}
+
+func (ctl *EtcdctlV3) Leases(ctx context.Context) (*clientv3.LeaseLeasesResponse, error) {
+ args := ctl.cmdArgs("lease", "list", "-w", "json")
+ cmd, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer cmd.Close()
+ var resp clientv3.LeaseLeasesResponse
+ line, err := cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "id"})
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal([]byte(line), &resp)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) KeepAliveOnce(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseKeepAliveResponse, error) {
+ args := ctl.cmdArgs("lease", "keep-alive", strconv.FormatInt(int64(id), 16), "--once", "-w", "json")
+ cmd, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer cmd.Close()
+ var resp clientv3.LeaseKeepAliveResponse
+ line, err := cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "ID"})
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal([]byte(line), &resp)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) Revoke(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) {
+ var resp clientv3.LeaseRevokeResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "lease", "revoke", strconv.FormatInt(int64(id), 16))
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) AlarmList(ctx context.Context) (*clientv3.AlarmResponse, error) {
+ var resp clientv3.AlarmResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "alarm", "list")
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) AlarmDisarm(ctx context.Context, _ *clientv3.AlarmMember) (*clientv3.AlarmResponse, error) {
+ args := ctl.cmdArgs()
+ args = append(args, "alarm", "disarm", "-w", "json")
+ ep, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer ep.Close()
+ var resp clientv3.AlarmResponse
+ line, err := ep.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "alarm"})
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal([]byte(line), &resp)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) AuthEnable(ctx context.Context) error {
+ args := []string{"auth", "enable"}
+ cmd, err := SpawnCmd(ctl.cmdArgs(args...), nil)
+ if err != nil {
+ return err
+ }
+ defer cmd.Close()
+
+ _, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "Authentication Enabled"})
+ return err
+}
+
+func (ctl *EtcdctlV3) AuthDisable(ctx context.Context) error {
+ args := []string{"auth", "disable"}
+ cmd, err := SpawnCmd(ctl.cmdArgs(args...), nil)
+ if err != nil {
+ return err
+ }
+ defer cmd.Close()
+
+ _, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "Authentication Disabled"})
+ return err
+}
+
+func (ctl *EtcdctlV3) AuthStatus(ctx context.Context) (*clientv3.AuthStatusResponse, error) {
+ var resp clientv3.AuthStatusResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "auth", "status")
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) UserAdd(ctx context.Context, name, password string, opts config.UserAddOptions) (*clientv3.AuthUserAddResponse, error) {
+ args := ctl.cmdArgs()
+ args = append(args, "user", "add")
+ if password == "" {
+ args = append(args, name)
+ } else {
+ args = append(args, fmt.Sprintf("%s:%s", name, password))
+ }
+
+ if opts.NoPassword {
+ args = append(args, "--no-password")
+ }
+
+ args = append(args, "--interactive=false", "-w", "json")
+
+ cmd, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer cmd.Close()
+
+ // If no password is provided, and NoPassword isn't set, the CLI will always
+ // wait for a password, send an enter in this case for an "empty" password.
+ if !opts.NoPassword && password == "" {
+ err = cmd.Send("\n")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var resp clientv3.AuthUserAddResponse
+ line, err := cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "header"})
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal([]byte(line), &resp)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) UserGet(ctx context.Context, name string) (*clientv3.AuthUserGetResponse, error) {
+ var resp clientv3.AuthUserGetResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "user", "get", name)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) UserList(ctx context.Context) (*clientv3.AuthUserListResponse, error) {
+ var resp clientv3.AuthUserListResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "user", "list")
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) UserDelete(ctx context.Context, name string) (*clientv3.AuthUserDeleteResponse, error) {
+ var resp clientv3.AuthUserDeleteResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "user", "delete", name)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) UserChangePass(ctx context.Context, user, newPass string) error {
+ args := ctl.cmdArgs()
+ args = append(args, "user", "passwd", user, "--interactive=false")
+ cmd, err := SpawnCmd(args, nil)
+ if err != nil {
+ return err
+ }
+ defer cmd.Close()
+ err = cmd.Send(newPass + "\n")
+ if err != nil {
+ return err
+ }
+
+ _, err = cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "Password updated"})
+ return err
+}
+
+func (ctl *EtcdctlV3) UserGrantRole(ctx context.Context, user string, role string) (*clientv3.AuthUserGrantRoleResponse, error) {
+ var resp clientv3.AuthUserGrantRoleResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "user", "grant-role", user, role)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) UserRevokeRole(ctx context.Context, user string, role string) (*clientv3.AuthUserRevokeRoleResponse, error) {
+ var resp clientv3.AuthUserRevokeRoleResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "user", "revoke-role", user, role)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) RoleAdd(ctx context.Context, name string) (*clientv3.AuthRoleAddResponse, error) {
+ var resp clientv3.AuthRoleAddResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "role", "add", name)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType clientv3.PermissionType) (*clientv3.AuthRoleGrantPermissionResponse, error) {
+ permissionType := authpb.Permission_Type_name[int32(permType)]
+ var resp clientv3.AuthRoleGrantPermissionResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "role", "grant-permission", name, permissionType, key, rangeEnd)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) RoleGet(ctx context.Context, role string) (*clientv3.AuthRoleGetResponse, error) {
+ var resp clientv3.AuthRoleGetResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "role", "get", role)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) RoleList(ctx context.Context) (*clientv3.AuthRoleListResponse, error) {
+ var resp clientv3.AuthRoleListResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "role", "list")
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*clientv3.AuthRoleRevokePermissionResponse, error) {
+ var resp clientv3.AuthRoleRevokePermissionResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "role", "revoke-permission", role, key, rangeEnd)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) RoleDelete(ctx context.Context, role string) (*clientv3.AuthRoleDeleteResponse, error) {
+ var resp clientv3.AuthRoleDeleteResponse
+ err := ctl.spawnJSONCmd(ctx, &resp, "role", "delete", role)
+ return &resp, err
+}
+
+func (ctl *EtcdctlV3) spawnJSONCmd(ctx context.Context, output any, args ...string) error {
+ args = append(args, "-w", "json")
+ cmd, err := SpawnCmd(append(ctl.cmdArgs(), args...), nil)
+ if err != nil {
+ return err
+ }
+ defer cmd.Close()
+ line, err := cmd.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "header"})
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal([]byte(line), output)
+}
+
+func (ctl *EtcdctlV3) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
+ args := ctl.cmdArgs()
+ args = append(args, "watch", key)
+ if opts.RangeEnd != "" {
+ args = append(args, opts.RangeEnd)
+ }
+ args = append(args, "-w", "json")
+ if opts.Prefix {
+ args = append(args, "--prefix")
+ }
+ if opts.Revision != 0 {
+ args = append(args, "--rev", fmt.Sprint(opts.Revision))
+ }
+ proc, err := SpawnCmd(args, nil)
+ if err != nil {
+ return nil
+ }
+
+ ch := make(chan clientv3.WatchResponse)
+ go func() {
+ defer proc.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ close(ch)
+ return
+ default:
+ if line := proc.ReadLine(); line != "" {
+ var resp clientv3.WatchResponse
+ json.Unmarshal([]byte(line), &resp)
+ if resp.Canceled {
+ ch <- resp
+ close(ch)
+ return
+ }
+ if len(resp.Events) > 0 {
+ ch <- resp
+ }
+ }
+ }
+ }
+ }()
+
+ return ch
+}
diff --git a/tests/framework/e2e/flags.go b/tests/framework/e2e/flags.go
new file mode 100644
index 00000000000..593117ff346
--- /dev/null
+++ b/tests/framework/e2e/flags.go
@@ -0,0 +1,99 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "flag"
+ "os"
+ "runtime"
+
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+)
+
+var (
+ CertDir string
+
+ CertPath string
+ PrivateKeyPath string
+ CaPath string
+
+ CertPath2 string
+ PrivateKeyPath2 string
+
+ CertPath3 string
+ PrivateKeyPath3 string
+
+ CrlPath string
+ RevokedCertPath string
+ RevokedPrivateKeyPath string
+
+ BinPath binPath
+ FixturesDir = testutils.MustAbsPath("../fixtures")
+)
+
+type binPath struct {
+ Etcd string
+ EtcdLastRelease string
+ Etcdctl string
+ Etcdutl string
+ LazyFS string
+}
+
+func (bp *binPath) LazyFSAvailable() bool {
+ _, err := os.Stat(bp.LazyFS)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ panic(err)
+ }
+ return false
+ }
+ return true
+}
+
+func InitFlags() {
+ os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH)
+
+ binDirDef := testutils.MustAbsPath("../../bin")
+ certDirDef := FixturesDir
+
+ binDir := flag.String("bin-dir", binDirDef, "The directory for store etcd and etcdctl binaries.")
+ binLastRelease := flag.String("bin-last-release", "", "The path for the last release etcd binary.")
+
+ flag.StringVar(&CertDir, "cert-dir", certDirDef, "The directory for store certificate files.")
+ flag.Parse()
+
+ BinPath = binPath{
+ Etcd: *binDir + "/etcd",
+ EtcdLastRelease: *binDir + "/etcd-last-release",
+ Etcdctl: *binDir + "/etcdctl",
+ Etcdutl: *binDir + "/etcdutl",
+ LazyFS: *binDir + "/lazyfs",
+ }
+ if *binLastRelease != "" {
+ BinPath.EtcdLastRelease = *binLastRelease
+ }
+ CertPath = CertDir + "/server.crt"
+ PrivateKeyPath = CertDir + "/server.key.insecure"
+ CaPath = CertDir + "/ca.crt"
+ RevokedCertPath = CertDir + "/server-revoked.crt"
+ RevokedPrivateKeyPath = CertDir + "/server-revoked.key.insecure"
+ CrlPath = CertDir + "/revoke.crl"
+
+ CertPath2 = CertDir + "/server2.crt"
+ PrivateKeyPath2 = CertDir + "/server2.key.insecure"
+
+ CertPath3 = CertDir + "/server3.crt"
+ PrivateKeyPath3 = CertDir + "/server3.key.insecure"
+}
diff --git a/tests/framework/e2e/lazyfs.go b/tests/framework/e2e/lazyfs.go
new file mode 100644
index 00000000000..98bfc69dc79
--- /dev/null
+++ b/tests/framework/e2e/lazyfs.go
@@ -0,0 +1,114 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/pkg/v3/expect"
+)
+
+func newLazyFS(lg *zap.Logger, dataDir string, tmp TempDirProvider) *LazyFS {
+ return &LazyFS{
+ lg: lg,
+ DataDir: dataDir,
+ LazyFSDir: tmp.TempDir(),
+ }
+}
+
+type TempDirProvider interface {
+ TempDir() string
+}
+
+type LazyFS struct {
+ lg *zap.Logger
+
+ DataDir string
+ LazyFSDir string
+
+ ep *expect.ExpectProcess
+}
+
+func (fs *LazyFS) Start(ctx context.Context) (err error) {
+ if fs.ep != nil {
+ return nil
+ }
+ err = os.WriteFile(fs.configPath(), fs.config(), 0o666)
+ if err != nil {
+ return err
+ }
+ dataPath := filepath.Join(fs.LazyFSDir, "data")
+ err = os.Mkdir(dataPath, 0o700)
+ if err != nil {
+ return err
+ }
+ flags := []string{fs.DataDir, "--config-path", fs.configPath(), "-o", "modules=subdir", "-o", "subdir=" + dataPath, "-f"}
+ fs.lg.Info("Started lazyfs", zap.Strings("flags", flags))
+ fs.ep, err = expect.NewExpect(BinPath.LazyFS, flags...)
+ if err != nil {
+ return err
+ }
+ _, err = fs.ep.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "waiting for fault commands"})
+ return err
+}
+
+func (fs *LazyFS) configPath() string {
+ return filepath.Join(fs.LazyFSDir, "config.toml")
+}
+
+func (fs *LazyFS) socketPath() string {
+ return filepath.Join(fs.LazyFSDir, "sock.fifo")
+}
+
+func (fs *LazyFS) config() []byte {
+ return []byte(fmt.Sprintf(`[faults]
+fifo_path=%q
+[cache]
+apply_eviction=false
+[cache.simple]
+custom_size="1gb"
+blocks_per_page=1
+[filesystem]
+log_all_operations=false
+`, fs.socketPath()))
+}
+
+func (fs *LazyFS) Stop() error {
+ if fs.ep == nil {
+ return nil
+ }
+ defer func() { fs.ep = nil }()
+ err := fs.ep.Stop()
+ if err != nil {
+ return err
+ }
+ return fs.ep.Close()
+}
+
+func (fs *LazyFS) ClearCache(ctx context.Context) error {
+ err := os.WriteFile(fs.socketPath(), []byte("lazyfs::clear-cache\n"), 0o666)
+ if err != nil {
+ return err
+ }
+ // TODO: Wait for response on socket instead of reading logs to get command completion.
+ // Set `fifo_path_completed` config for LazyFS to create separate socket to write when it has completed command.
+ _, err = fs.ep.ExpectWithContext(ctx, expect.ExpectedResponse{Value: "cache is cleared"})
+ return err
+}
diff --git a/tests/framework/e2e/testing.go b/tests/framework/e2e/testing.go
new file mode 100644
index 00000000000..7d7de27fddd
--- /dev/null
+++ b/tests/framework/e2e/testing.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "testing"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+)
+
+func BeforeTest(t testing.TB) {
+ SkipInShortMode(t)
+ testutil.BeforeTest(t)
+}
diff --git a/tests/framework/e2e/util.go b/tests/framework/e2e/util.go
new file mode 100644
index 00000000000..d72f2d4939d
--- /dev/null
+++ b/tests/framework/e2e/util.go
@@ -0,0 +1,179 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/pkg/v3/expect"
+)
+
+func WaitReadyExpectProc(ctx context.Context, exproc *expect.ExpectProcess, readyStrs []string) error {
+ matchSet := func(l string) bool {
+ for _, s := range readyStrs {
+ if strings.Contains(l, s) {
+ return true
+ }
+ }
+ return false
+ }
+ _, err := exproc.ExpectFunc(ctx, matchSet)
+ return err
+}
+
+func SpawnWithExpect(args []string, expected expect.ExpectedResponse) error {
+ return SpawnWithExpects(args, nil, []expect.ExpectedResponse{expected}...)
+}
+
+func SpawnWithExpectWithEnv(args []string, envVars map[string]string, expected expect.ExpectedResponse) error {
+ return SpawnWithExpects(args, envVars, []expect.ExpectedResponse{expected}...)
+}
+
+func SpawnWithExpects(args []string, envVars map[string]string, xs ...expect.ExpectedResponse) error {
+ return SpawnWithExpectsContext(context.TODO(), args, envVars, xs...)
+}
+
+func SpawnWithExpectsContext(ctx context.Context, args []string, envVars map[string]string, xs ...expect.ExpectedResponse) error {
+ _, err := SpawnWithExpectLines(ctx, args, envVars, xs...)
+ return err
+}
+
+func SpawnWithExpectLines(ctx context.Context, args []string, envVars map[string]string, xs ...expect.ExpectedResponse) ([]string, error) {
+ proc, err := SpawnCmd(args, envVars)
+ if err != nil {
+ return nil, err
+ }
+ defer proc.Close()
+ // process until either stdout or stderr contains
+ // the expected string
+ var (
+ lines []string
+ )
+ for _, txt := range xs {
+ l, lerr := proc.ExpectWithContext(ctx, txt)
+ if lerr != nil {
+ proc.Close()
+ return nil, fmt.Errorf("%v %w (expected %q, got %q). Try EXPECT_DEBUG=TRUE", args, lerr, txt.Value, lines)
+ }
+ lines = append(lines, l)
+ }
+ perr := proc.Close()
+ if perr != nil {
+ return lines, fmt.Errorf("err: %w, with output lines %v", perr, proc.Lines())
+ }
+
+ l := proc.LineCount()
+ if len(xs) == 0 && l != 0 { // expect no output
+ return nil, fmt.Errorf("unexpected output from %v (got lines %q, line count %d). Try EXPECT_DEBUG=TRUE", args, lines, l)
+ }
+ return lines, nil
+}
+
+func RunUtilCompletion(args []string, envVars map[string]string) ([]string, error) {
+ proc, err := SpawnCmd(args, envVars)
+ if err != nil {
+ return nil, fmt.Errorf("failed to spawn command %v with error: %w", args, err)
+ }
+
+ proc.Wait()
+ err = proc.Close()
+ if err != nil {
+ return nil, fmt.Errorf("failed to close command %v with error: %w", args, err)
+ }
+
+ return proc.Lines(), nil
+}
+
+func RandomLeaseID() int64 {
+ return rand.New(rand.NewSource(time.Now().UnixNano())).Int63()
+}
+
+func DataMarshal(data any) (d string, e error) {
+ m, err := json.Marshal(data)
+ if err != nil {
+ return "", err
+ }
+ return string(m), nil
+}
+
+func CloseWithTimeout(p *expect.ExpectProcess, d time.Duration) error {
+ errc := make(chan error, 1)
+ go func() { errc <- p.Close() }()
+ select {
+ case err := <-errc:
+ return err
+ case <-time.After(d):
+ p.Stop()
+ // retry close after stopping to collect SIGQUIT data, if any
+ CloseWithTimeout(p, time.Second)
+ }
+ return fmt.Errorf("took longer than %v to Close process %+v", d, p)
+}
+
+func setupScheme(s string, isTLS bool) string {
+ if s == "" {
+ s = "http"
+ }
+ if isTLS {
+ s = ToTLS(s)
+ }
+ return s
+}
+
+func ToTLS(s string) string {
+ if strings.Contains(s, "http") && !strings.Contains(s, "https") {
+ return strings.Replace(s, "http", "https", 1)
+ }
+ if strings.Contains(s, "unix") && !strings.Contains(s, "unixs") {
+ return strings.Replace(s, "unix", "unixs", 1)
+ }
+ return s
+}
+
+func SkipInShortMode(t testing.TB) {
+ testutil.SkipTestIfShortMode(t, "e2e tests are not running in --short mode")
+}
+
+func mergeEnvVariables(envVars map[string]string) []string {
+ var env []string
+ // Environment variables are passed as parameter have higher priority
+ // than os environment variables.
+ for k, v := range envVars {
+ env = append(env, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ // Now, we can set os environment variables not passed as parameter.
+ currVars := os.Environ()
+ for _, v := range currVars {
+ p := strings.Split(v, "=")
+ // TODO: Remove PATH when we stop using system binaries (`awk`, `echo`)
+ if !strings.HasPrefix(p[0], "ETCD_") && !strings.HasPrefix(p[0], "ETCDCTL_") && !strings.HasPrefix(p[0], "EXPECT_") && p[0] != "PATH" {
+ continue
+ }
+ if _, ok := envVars[p[0]]; !ok {
+ env = append(env, fmt.Sprintf("%s=%s", p[0], p[1]))
+ }
+ }
+
+ return env
+}
diff --git a/tests/integration/bridge.go b/tests/framework/integration/bridge.go
similarity index 82%
rename from tests/integration/bridge.go
rename to tests/framework/integration/bridge.go
index 1d2be109eee..5a832c58dcf 100644
--- a/tests/integration/bridge.go
+++ b/tests/framework/integration/bridge.go
@@ -15,22 +15,21 @@
package integration
import (
- "fmt"
"io"
- "io/ioutil"
"net"
"sync"
-
- "go.etcd.io/etcd/client/pkg/v3/transport"
)
-// bridge creates a unix socket bridge to another unix socket, making it possible
+type Dialer interface {
+ Dial() (net.Conn, error)
+}
+
+// bridge proxies connections between listener and dialer, making it possible
// to disconnect grpc network connections without closing the logical grpc connection.
type bridge struct {
- inaddr string
- outaddr string
- l net.Listener
- conns map[*bridgeConn]struct{}
+ dialer Dialer
+ l net.Listener
+ conns map[*bridgeConn]struct{}
stopc chan struct{}
pausec chan struct{}
@@ -40,30 +39,22 @@ type bridge struct {
mu sync.Mutex
}
-func newBridge(addr string) (*bridge, error) {
+func newBridge(dialer Dialer, listener net.Listener) *bridge {
b := &bridge{
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
- inaddr: addr + "0",
- outaddr: addr,
+ dialer: dialer,
+ l: listener,
conns: make(map[*bridgeConn]struct{}),
stopc: make(chan struct{}),
pausec: make(chan struct{}),
blackholec: make(chan struct{}),
}
close(b.pausec)
-
- l, err := transport.NewUnixListener(b.inaddr)
- if err != nil {
- return nil, fmt.Errorf("listen failed on socket %s (%v)", addr, err)
- }
- b.l = l
b.wg.Add(1)
go b.serveListen()
- return b, nil
+ return b
}
-func (b *bridge) URL() string { return "unix://" + b.inaddr }
-
func (b *bridge) Close() {
b.l.Close()
b.mu.Lock()
@@ -76,7 +67,7 @@ func (b *bridge) Close() {
b.wg.Wait()
}
-func (b *bridge) Reset() {
+func (b *bridge) DropConnections() {
b.mu.Lock()
defer b.mu.Unlock()
for bc := range b.conns {
@@ -85,13 +76,13 @@ func (b *bridge) Reset() {
b.conns = make(map[*bridgeConn]struct{})
}
-func (b *bridge) Pause() {
+func (b *bridge) PauseConnections() {
b.mu.Lock()
b.pausec = make(chan struct{})
b.mu.Unlock()
}
-func (b *bridge) Unpause() {
+func (b *bridge) UnpauseConnections() {
b.mu.Lock()
select {
case <-b.pausec:
@@ -127,7 +118,7 @@ func (b *bridge) serveListen() {
case <-pausec:
}
- outc, oerr := net.Dial("unix", b.outaddr)
+ outc, oerr := b.dialer.Dial()
if oerr != nil {
inc.Close()
return
@@ -205,7 +196,7 @@ func (b *bridge) ioCopy(dst io.Writer, src io.Reader) (err error) {
for {
select {
case <-b.blackholec:
- io.Copy(ioutil.Discard, src)
+ io.Copy(io.Discard, src)
return nil
default:
}
diff --git a/tests/framework/integration/cluster.go b/tests/framework/integration/cluster.go
new file mode 100644
index 00000000000..229611fe2fa
--- /dev/null
+++ b/tests/framework/integration/cluster.go
@@ -0,0 +1,1693 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/soheilhy/cmux"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/client/pkg/v3/tlsutil"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/grpctesting"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election"
+ epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock"
+ lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
+ "go.etcd.io/etcd/server/v3/features"
+ "go.etcd.io/etcd/server/v3/verify"
+ framecfg "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+ "go.etcd.io/raft/v3"
+)
+
+const (
+ // RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
+ RequestWaitTimeout = 5 * time.Second
+ RequestTimeout = 20 * time.Second
+
+ ClusterName = "etcd"
+ BasePort = 21000
+ URLScheme = "unix"
+ URLSchemeTLS = "unixs"
+ BaseGRPCPort = 30000
+)
+
+var (
+ ElectionTicks = 10
+
+ // UniqueCount integration test is used to set unique member ids
+ UniqueCount = int32(0)
+
+ TestTLSInfo = transport.TLSInfo{
+ KeyFile: testutils.MustAbsPath("../fixtures/server.key.insecure"),
+ CertFile: testutils.MustAbsPath("../fixtures/server.crt"),
+ TrustedCAFile: testutils.MustAbsPath("../fixtures/ca.crt"),
+ ClientCertAuth: true,
+ }
+
+ TestTLSInfoWithSpecificUsage = transport.TLSInfo{
+ KeyFile: testutils.MustAbsPath("../fixtures/server-serverusage.key.insecure"),
+ CertFile: testutils.MustAbsPath("../fixtures/server-serverusage.crt"),
+ ClientKeyFile: testutils.MustAbsPath("../fixtures/client-clientusage.key.insecure"),
+ ClientCertFile: testutils.MustAbsPath("../fixtures/client-clientusage.crt"),
+ TrustedCAFile: testutils.MustAbsPath("../fixtures/ca.crt"),
+ ClientCertAuth: true,
+ }
+
+ TestTLSInfoIP = transport.TLSInfo{
+ KeyFile: testutils.MustAbsPath("../fixtures/server-ip.key.insecure"),
+ CertFile: testutils.MustAbsPath("../fixtures/server-ip.crt"),
+ TrustedCAFile: testutils.MustAbsPath("../fixtures/ca.crt"),
+ ClientCertAuth: true,
+ }
+
+ TestTLSInfoExpired = transport.TLSInfo{
+ KeyFile: testutils.MustAbsPath("./fixtures-expired/server.key.insecure"),
+ CertFile: testutils.MustAbsPath("./fixtures-expired/server.crt"),
+ TrustedCAFile: testutils.MustAbsPath("./fixtures-expired/ca.crt"),
+ ClientCertAuth: true,
+ }
+
+ TestTLSInfoExpiredIP = transport.TLSInfo{
+ KeyFile: testutils.MustAbsPath("./fixtures-expired/server-ip.key.insecure"),
+ CertFile: testutils.MustAbsPath("./fixtures-expired/server-ip.crt"),
+ TrustedCAFile: testutils.MustAbsPath("./fixtures-expired/ca.crt"),
+ ClientCertAuth: true,
+ }
+
+ DefaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=2s",
+ testutils.MustAbsPath("../fixtures/server.crt"), testutils.MustAbsPath("../fixtures/server.key.insecure"))
+
+ // UniqueNumber is used to generate unique port numbers
+ // Should only be accessed via atomic package methods.
+ UniqueNumber int32
+)
+
+type ClusterConfig struct {
+ Size int
+ PeerTLS *transport.TLSInfo
+ ClientTLS *transport.TLSInfo
+
+ DiscoveryURL string
+
+ AuthToken string
+
+ QuotaBackendBytes int64
+ BackendBatchInterval time.Duration
+
+ MaxTxnOps uint
+ MaxRequestBytes uint
+
+ SnapshotCount uint64
+ SnapshotCatchUpEntries uint64
+
+ GRPCKeepAliveMinTime time.Duration
+ GRPCKeepAliveInterval time.Duration
+ GRPCKeepAliveTimeout time.Duration
+ GRPCAdditionalServerOptions []grpc.ServerOption
+
+ ClientMaxCallSendMsgSize int
+ ClientMaxCallRecvMsgSize int
+
+ // UseIP is true to use only IP for gRPC requests.
+ UseIP bool
+ // UseBridge adds bridge between client and grpc server. Should be used in tests that
+ // want to manipulate connection or require connection not breaking despite server stop/restart.
+ UseBridge bool
+ // UseTCP configures server listen on tcp socket. If disabled unix socket is used.
+ UseTCP bool
+
+ EnableLeaseCheckpoint bool
+ LeaseCheckpointInterval time.Duration
+ LeaseCheckpointPersist bool
+
+ WatchProgressNotifyInterval time.Duration
+ ExperimentalMaxLearners int
+ DisableStrictReconfigCheck bool
+ CorruptCheckTime time.Duration
+}
+
+type Cluster struct {
+ Cfg *ClusterConfig
+ Members []*Member
+ LastMemberNum int
+
+ mu sync.Mutex
+}
+
+func SchemeFromTLSInfo(tls *transport.TLSInfo) string {
+ if tls == nil {
+ return URLScheme
+ }
+ return URLSchemeTLS
+}
+
+// fillClusterForMembers fills up Member.InitialPeerURLsMap from each member's [name, scheme and PeerListeners address]
+func (c *Cluster) fillClusterForMembers() error {
+ if c.Cfg.DiscoveryURL != "" {
+ // Cluster will be discovered
+ return nil
+ }
+
+ addrs := make([]string, 0)
+ for _, m := range c.Members {
+ scheme := SchemeFromTLSInfo(m.PeerTLSInfo)
+ for _, l := range m.PeerListeners {
+ addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String()))
+ }
+ }
+ clusterStr := strings.Join(addrs, ",")
+ var err error
+ for _, m := range c.Members {
+ m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *Cluster) Launch(t testutil.TB) {
+ t.Logf("Launching new cluster...")
+ errc := make(chan error)
+ for _, m := range c.Members {
+ // Members are launched in separate goroutines because if they boot
+ // using discovery url, they have to wait for others to register to continue.
+ go func(m *Member) {
+ errc <- m.Launch()
+ }(m)
+ }
+ for range c.Members {
+ if err := <-errc; err != nil {
+ c.Terminate(t)
+ t.Fatalf("error setting up member: %v", err)
+ }
+ }
+ // wait Cluster to be stable to receive future client requests
+ c.WaitMembersMatch(t, c.ProtoMembers())
+ c.waitVersion()
+ for _, m := range c.Members {
+ t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL)
+ }
+}
+
+// ProtoMembers returns a list of all active members as etcdserverpb.Member
+func (c *Cluster) ProtoMembers() []*pb.Member {
+ var ms []*pb.Member
+ for _, m := range c.Members {
+ pScheme := SchemeFromTLSInfo(m.PeerTLSInfo)
+ cScheme := SchemeFromTLSInfo(m.ClientTLSInfo)
+ cm := &pb.Member{Name: m.Name}
+ for _, ln := range m.PeerListeners {
+ cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String())
+ }
+ for _, ln := range m.ClientListeners {
+ cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String())
+ }
+ ms = append(ms, cm)
+ }
+ return ms
+}
+
+func (c *Cluster) MustNewMember(t testutil.TB) *Member {
+ memberNumber := c.LastMemberNum
+ c.LastMemberNum++
+
+ m := MustNewMember(t,
+ MemberConfig{
+ Name: fmt.Sprintf("m%v", memberNumber),
+ MemberNumber: memberNumber,
+ AuthToken: c.Cfg.AuthToken,
+ PeerTLS: c.Cfg.PeerTLS,
+ ClientTLS: c.Cfg.ClientTLS,
+ QuotaBackendBytes: c.Cfg.QuotaBackendBytes,
+ BackendBatchInterval: c.Cfg.BackendBatchInterval,
+ MaxTxnOps: c.Cfg.MaxTxnOps,
+ MaxRequestBytes: c.Cfg.MaxRequestBytes,
+ SnapshotCount: c.Cfg.SnapshotCount,
+ SnapshotCatchUpEntries: c.Cfg.SnapshotCatchUpEntries,
+ GRPCKeepAliveMinTime: c.Cfg.GRPCKeepAliveMinTime,
+ GRPCKeepAliveInterval: c.Cfg.GRPCKeepAliveInterval,
+ GRPCKeepAliveTimeout: c.Cfg.GRPCKeepAliveTimeout,
+ GRPCAdditionalServerOptions: c.Cfg.GRPCAdditionalServerOptions,
+ ClientMaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize,
+ ClientMaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize,
+ UseIP: c.Cfg.UseIP,
+ UseBridge: c.Cfg.UseBridge,
+ UseTCP: c.Cfg.UseTCP,
+ EnableLeaseCheckpoint: c.Cfg.EnableLeaseCheckpoint,
+ LeaseCheckpointInterval: c.Cfg.LeaseCheckpointInterval,
+ LeaseCheckpointPersist: c.Cfg.LeaseCheckpointPersist,
+ WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval,
+ ExperimentalMaxLearners: c.Cfg.ExperimentalMaxLearners,
+ DisableStrictReconfigCheck: c.Cfg.DisableStrictReconfigCheck,
+ CorruptCheckTime: c.Cfg.CorruptCheckTime,
+ })
+ m.DiscoveryURL = c.Cfg.DiscoveryURL
+ return m
+}
+
+// addMember return PeerURLs of the added member.
+func (c *Cluster) addMember(t testutil.TB) types.URLs {
+ m := c.MustNewMember(t)
+
+ scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS)
+
+ // send add request to the Cluster
+ var err error
+ for i := 0; i < len(c.Members); i++ {
+ peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
+ if err = c.AddMemberByURL(t, c.Members[i].Client, peerURL); err == nil {
+ break
+ }
+ }
+ if err != nil {
+ t.Fatalf("add member failed on all members error: %v", err)
+ }
+
+ m.InitialPeerURLsMap = types.URLsMap{}
+ for _, mm := range c.Members {
+ m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
+ }
+ m.InitialPeerURLsMap[m.Name] = m.PeerURLs
+ m.NewCluster = false
+ if err := m.Launch(); err != nil {
+ t.Fatal(err)
+ }
+ c.Members = append(c.Members, m)
+ // wait Cluster to be stable to receive future client requests
+ c.WaitMembersMatch(t, c.ProtoMembers())
+ return m.PeerURLs
+}
+
+func (c *Cluster) AddMemberByURL(t testutil.TB, cc *clientv3.Client, peerURL string) error {
+ ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
+ _, err := cc.MemberAdd(ctx, []string{peerURL})
+ cancel()
+ if err != nil {
+ return err
+ }
+
+ // wait for the add node entry applied in the Cluster
+ members := append(c.ProtoMembers(), &pb.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
+ c.WaitMembersMatch(t, members)
+ return nil
+}
+
+// AddMember return PeerURLs of the added member.
+func (c *Cluster) AddMember(t testutil.TB) types.URLs {
+ return c.addMember(t)
+}
+
+func (c *Cluster) RemoveMember(t testutil.TB, cc *clientv3.Client, id uint64) error {
+ // send remove request to the Cluster
+
+ ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
+ _, err := cc.MemberRemove(ctx, id)
+ cancel()
+ if err != nil {
+ return err
+ }
+ newMembers := make([]*Member, 0)
+ for _, m := range c.Members {
+ if uint64(m.Server.MemberID()) != id {
+ newMembers = append(newMembers, m)
+ } else {
+ m.Client.Close()
+ select {
+ case <-m.Server.StopNotify():
+ m.Terminate(t)
+ // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
+ // TODO: remove connection write timeout by selecting on http response closeNotifier
+ // blocking on https://github.com/golang/go/issues/9524
+ case <-time.After(time.Second + time.Duration(ElectionTicks)*framecfg.TickDuration + time.Second + rafthttp.ConnWriteTimeout):
+ t.Fatalf("failed to remove member %s in time", m.Server.MemberID())
+ }
+ }
+ }
+
+ c.Members = newMembers
+ c.WaitMembersMatch(t, c.ProtoMembers())
+ return nil
+}
+
+func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []*pb.Member) {
+ ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
+ defer cancel()
+ for _, m := range c.Members {
+ cc := ToGRPC(m.Client)
+ select {
+ case <-m.Server.StopNotify():
+ continue
+ default:
+ }
+ for {
+ resp, err := cc.Cluster.MemberList(ctx, &pb.MemberListRequest{Linearizable: false})
+ if errors.Is(err, context.DeadlineExceeded) {
+ t.Fatal(err)
+ }
+ if err != nil {
+ continue
+ }
+ if isMembersEqual(resp.Members, membs) {
+ break
+ }
+ time.Sleep(framecfg.TickDuration)
+ }
+ }
+}
+
+// WaitLeader returns index of the member in c.Members that is leader
+// or fails the test (if not established in 30s).
+func (c *Cluster) WaitLeader(t testing.TB) int {
+ return c.WaitMembersForLeader(t, c.Members)
+}
+
+// WaitMembersForLeader waits until given members agree on the same leader,
+// and returns its 'index' in the 'membs' list
+func (c *Cluster) WaitMembersForLeader(t testing.TB, membs []*Member) int {
+ t.Logf("WaitMembersForLeader")
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ l := 0
+ for l = c.waitMembersForLeader(ctx, t, membs); l < 0; {
+ if ctx.Err() != nil {
+ t.Fatalf("WaitLeader FAILED: %v", ctx.Err())
+ }
+ }
+ t.Logf("WaitMembersForLeader succeeded. Cluster leader index: %v", l)
+
+ // TODO: Consider second pass check as sometimes leadership is lost
+ // soon after election:
+ //
+ // We perform multiple attempts, as some-times just after successful WaitLLeader
+ // there is a race and leadership is quickly lost:
+ // - MsgAppResp message with higher term from 2acc3d3b521981 [term: 3] {"member": "m0"}
+ // - 9903a56eaf96afac became follower at term 3 {"member": "m0"}
+ // - 9903a56eaf96afac lost leader 9903a56eaf96afac at term 3 {"member": "m0"}
+
+ return l
+}
+
+// WaitMembersForLeader waits until given members agree on the same leader,
+// and returns its 'index' in the 'membs' list
+func (c *Cluster) waitMembersForLeader(ctx context.Context, t testing.TB, membs []*Member) int {
+ possibleLead := make(map[uint64]bool)
+ var lead uint64
+ for _, m := range membs {
+ possibleLead[uint64(m.Server.MemberID())] = true
+ }
+ cc, err := c.ClusterClient(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // ensure leader is up via linearizable get
+ for {
+ fctx, fcancel := context.WithTimeout(ctx, 10*framecfg.TickDuration+time.Second)
+ _, err := cc.Get(fctx, "0")
+ fcancel()
+ if err == nil || strings.Contains(err.Error(), "Key not found") {
+ break
+ }
+ }
+
+ for lead == 0 || !possibleLead[lead] {
+ lead = 0
+ for _, m := range membs {
+ select {
+ case <-m.Server.StopNotify():
+ continue
+ default:
+ }
+ if lead != 0 && lead != m.Server.Lead() {
+ lead = 0
+ time.Sleep(10 * framecfg.TickDuration)
+ break
+ }
+ lead = m.Server.Lead()
+ }
+ }
+
+ for i, m := range membs {
+ if uint64(m.Server.MemberID()) == lead {
+ t.Logf("waitMembersForLeader found leader. Member: %v lead: %x", i, lead)
+ return i
+ }
+ }
+
+ t.Logf("waitMembersForLeader failed (-1)")
+ return -1
+}
+
+func (c *Cluster) WaitNoLeader() { c.WaitMembersNoLeader(c.Members) }
+
+// WaitMembersNoLeader waits until given members lose leader.
+func (c *Cluster) WaitMembersNoLeader(membs []*Member) {
+ noLeader := false
+ for !noLeader {
+ noLeader = true
+ for _, m := range membs {
+ select {
+ case <-m.Server.StopNotify():
+ continue
+ default:
+ }
+ if m.Server.Lead() != 0 {
+ noLeader = false
+ time.Sleep(10 * framecfg.TickDuration)
+ break
+ }
+ }
+ }
+}
+
+func (c *Cluster) waitVersion() {
+ for _, m := range c.Members {
+ for {
+ if m.Server.ClusterVersion() != nil {
+ break
+ }
+ time.Sleep(framecfg.TickDuration)
+ }
+ }
+}
+
+// isMembersEqual checks whether two members equal except ID field.
+// The given wmembs should always set ID field to empty string.
+func isMembersEqual(membs []*pb.Member, wmembs []*pb.Member) bool {
+ sort.Sort(SortableMemberSliceByPeerURLs(membs))
+ sort.Sort(SortableMemberSliceByPeerURLs(wmembs))
+ return cmp.Equal(membs, wmembs, cmpopts.IgnoreFields(pb.Member{}, "ID", "PeerURLs", "ClientURLs"))
+}
+
+func NewLocalListener(t testutil.TB) net.Listener {
+ c := atomic.AddInt32(&UniqueCount, 1)
+ // Go 1.8+ allows only numbers in port
+ addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+BasePort, os.Getpid())
+ return NewListenerWithAddr(t, addr)
+}
+
+func NewListenerWithAddr(t testutil.TB, addr string) net.Listener {
+ t.Logf("Creating listener with addr: %v", addr)
+ l, err := transport.NewUnixListener(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return l
+}
+
+type Member struct {
+ config.ServerConfig
+ UniqNumber int
+ MemberNumber int
+ Port string
+ PeerListeners, ClientListeners []net.Listener
+ GRPCListener net.Listener
+ // PeerTLSInfo enables peer TLS when set
+ PeerTLSInfo *transport.TLSInfo
+ // ClientTLSInfo enables client TLS when set
+ ClientTLSInfo *transport.TLSInfo
+ DialOptions []grpc.DialOption
+
+ RaftHandler *testutil.PauseableHandler
+ Server *etcdserver.EtcdServer
+ ServerClosers []func()
+
+ GRPCServerOpts []grpc.ServerOption
+ GRPCServer *grpc.Server
+ GRPCURL string
+ GRPCBridge *bridge
+
+ // ServerClient is a clientv3 that directly calls the etcdserver.
+ ServerClient *clientv3.Client
+ // Client is a clientv3 that communicates via socket, either UNIX or TCP.
+ Client *clientv3.Client
+
+ KeepDataDirTerminate bool
+ ClientMaxCallSendMsgSize int
+ ClientMaxCallRecvMsgSize int
+ UseIP bool
+ UseBridge bool
+ UseTCP bool
+
+ IsLearner bool
+ Closed bool
+
+ GRPCServerRecorder *grpctesting.GRPCRecorder
+
+ LogObserver *testutils.LogObserver
+}
+
+type MemberConfig struct {
+ Name string
+ UniqNumber int64
+ MemberNumber int
+ PeerTLS *transport.TLSInfo
+ ClientTLS *transport.TLSInfo
+ AuthToken string
+ QuotaBackendBytes int64
+ BackendBatchInterval time.Duration
+ MaxTxnOps uint
+ MaxRequestBytes uint
+ SnapshotCount uint64
+ SnapshotCatchUpEntries uint64
+ GRPCKeepAliveMinTime time.Duration
+ GRPCKeepAliveInterval time.Duration
+ GRPCKeepAliveTimeout time.Duration
+ GRPCAdditionalServerOptions []grpc.ServerOption
+ ClientMaxCallSendMsgSize int
+ ClientMaxCallRecvMsgSize int
+ UseIP bool
+ UseBridge bool
+ UseTCP bool
+ EnableLeaseCheckpoint bool
+ LeaseCheckpointInterval time.Duration
+ LeaseCheckpointPersist bool
+ WatchProgressNotifyInterval time.Duration
+ ExperimentalMaxLearners int
+ DisableStrictReconfigCheck bool
+ CorruptCheckTime time.Duration
+}
+
+// MustNewMember return an inited member with the given name. If peerTLS is
+// set, it will use https scheme to communicate between peers.
+func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member {
+ var err error
+ m := &Member{
+ MemberNumber: mcfg.MemberNumber,
+ UniqNumber: int(atomic.AddInt32(&UniqueCount, 1)),
+ }
+
+ peerScheme := SchemeFromTLSInfo(mcfg.PeerTLS)
+ clientScheme := SchemeFromTLSInfo(mcfg.ClientTLS)
+
+ pln := NewLocalListener(t)
+ m.PeerListeners = []net.Listener{pln}
+ m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ m.PeerTLSInfo = mcfg.PeerTLS
+
+ cln := NewLocalListener(t)
+ m.ClientListeners = []net.Listener{cln}
+ m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ m.ClientTLSInfo = mcfg.ClientTLS
+
+ m.Name = mcfg.Name
+
+ m.DataDir, err = os.MkdirTemp(t.TempDir(), "etcd")
+ if err != nil {
+ t.Fatal(err)
+ }
+ clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.Name, peerScheme, pln.Addr().String())
+ m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ m.InitialClusterToken = ClusterName
+ m.NewCluster = true
+ m.BootstrapTimeout = 10 * time.Millisecond
+ if m.PeerTLSInfo != nil {
+ m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
+ }
+ m.ElectionTicks = ElectionTicks
+ m.InitialElectionTickAdvance = true
+ m.TickMs = uint(framecfg.TickDuration / time.Millisecond)
+ m.PreVote = true
+ m.QuotaBackendBytes = mcfg.QuotaBackendBytes
+ m.BackendBatchInterval = mcfg.BackendBatchInterval
+ m.MaxTxnOps = mcfg.MaxTxnOps
+ if m.MaxTxnOps == 0 {
+ m.MaxTxnOps = embed.DefaultMaxTxnOps
+ }
+ m.MaxRequestBytes = mcfg.MaxRequestBytes
+ if m.MaxRequestBytes == 0 {
+ m.MaxRequestBytes = embed.DefaultMaxRequestBytes
+ }
+ m.SnapshotCount = etcdserver.DefaultSnapshotCount
+ if mcfg.SnapshotCount != 0 {
+ m.SnapshotCount = mcfg.SnapshotCount
+ }
+ m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries
+ if mcfg.SnapshotCatchUpEntries != 0 {
+ m.SnapshotCatchUpEntries = mcfg.SnapshotCatchUpEntries
+ }
+
+ // for the purpose of integration testing, simple token is enough
+ m.AuthToken = "simple"
+ if mcfg.AuthToken != "" {
+ m.AuthToken = mcfg.AuthToken
+ }
+
+ m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing
+
+ m.GRPCServerOpts = []grpc.ServerOption{}
+ if mcfg.GRPCKeepAliveMinTime > time.Duration(0) {
+ m.GRPCServerOpts = append(m.GRPCServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: mcfg.GRPCKeepAliveMinTime,
+ PermitWithoutStream: false,
+ }))
+ }
+ if mcfg.GRPCKeepAliveInterval > time.Duration(0) &&
+ mcfg.GRPCKeepAliveTimeout > time.Duration(0) {
+ m.GRPCServerOpts = append(m.GRPCServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: mcfg.GRPCKeepAliveInterval,
+ Timeout: mcfg.GRPCKeepAliveTimeout,
+ }))
+ }
+ m.GRPCServerOpts = append(m.GRPCServerOpts, mcfg.GRPCAdditionalServerOptions...)
+ m.ClientMaxCallSendMsgSize = mcfg.ClientMaxCallSendMsgSize
+ m.ClientMaxCallRecvMsgSize = mcfg.ClientMaxCallRecvMsgSize
+ m.UseIP = mcfg.UseIP
+ m.UseBridge = mcfg.UseBridge
+ m.UseTCP = mcfg.UseTCP
+ m.EnableLeaseCheckpoint = mcfg.EnableLeaseCheckpoint
+ m.LeaseCheckpointInterval = mcfg.LeaseCheckpointInterval
+ m.LeaseCheckpointPersist = mcfg.LeaseCheckpointPersist
+
+ m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval
+
+ m.InitialCorruptCheck = true
+ if mcfg.CorruptCheckTime > time.Duration(0) {
+ m.CorruptCheckTime = mcfg.CorruptCheckTime
+ }
+ m.WarningApplyDuration = embed.DefaultWarningApplyDuration
+ m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration
+ m.ExperimentalMaxLearners = membership.DefaultMaxLearners
+ if mcfg.ExperimentalMaxLearners != 0 {
+ m.ExperimentalMaxLearners = mcfg.ExperimentalMaxLearners
+ }
+ m.V2Deprecation = config.V2_DEPR_DEFAULT
+ m.GRPCServerRecorder = &grpctesting.GRPCRecorder{}
+
+ m.Logger, m.LogObserver = memberLogger(t, mcfg.Name)
+ m.ServerFeatureGate = features.NewDefaultServerFeatureGate(m.Name, m.Logger)
+
+ m.StrictReconfigCheck = !mcfg.DisableStrictReconfigCheck
+ if err := m.listenGRPC(); err != nil {
+ t.Fatalf("listenGRPC FAILED: %v", err)
+ }
+ t.Cleanup(func() {
+ // if we didn't cleanup the logger, the consecutive test
+ // might reuse this (t).
+ raft.ResetDefaultLogger()
+ })
+ return m
+}
+
+func memberLogger(t testutil.TB, name string) (*zap.Logger, *testutils.LogObserver) {
+ level := zapcore.InfoLevel
+ if os.Getenv("CLUSTER_DEBUG") != "" {
+ level = zapcore.DebugLevel
+ }
+
+ obCore, logOb := testutils.NewLogObserver(level)
+
+ options := zaptest.WrapOptions(
+ zap.Fields(zap.String("member", name)),
+
+ // copy logged entities to log observer
+ zap.WrapCore(func(oldCore zapcore.Core) zapcore.Core {
+ return zapcore.NewTee(oldCore, obCore)
+ }),
+ )
+ return zaptest.NewLogger(t, zaptest.Level(level), options).Named(name), logOb
+}
+
+// listenGRPC starts a grpc server over a unix domain socket on the member
+func (m *Member) listenGRPC() error {
+ // prefix with localhost so cert has right domain
+ network, host, port := m.grpcAddr()
+ grpcAddr := net.JoinHostPort(host, port)
+ wd, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+ m.Logger.Info("LISTEN GRPC", zap.String("grpcAddr", grpcAddr), zap.String("m.Name", m.Name), zap.String("workdir", wd))
+ grpcListener, err := net.Listen(network, grpcAddr)
+ if err != nil {
+ return fmt.Errorf("listen failed on grpc socket %s (%w)", grpcAddr, err)
+ }
+
+ addr := grpcListener.Addr().String()
+ _, port, err = net.SplitHostPort(addr)
+ if err != nil {
+ return fmt.Errorf("failed to parse grpc listen port from address %s (%w)", addr, err)
+ }
+ m.Port = port
+ m.GRPCURL = fmt.Sprintf("%s://%s", m.clientScheme(), addr)
+ m.Logger.Info("LISTEN GRPC SUCCESS", zap.String("grpcAddr", m.GRPCURL), zap.String("m.Name", m.Name),
+ zap.String("workdir", wd), zap.String("port", m.Port))
+
+ if m.UseBridge {
+ _, err = m.addBridge()
+ if err != nil {
+ grpcListener.Close()
+ return err
+ }
+ }
+ m.GRPCListener = grpcListener
+ return nil
+}
+
+func (m *Member) clientScheme() string {
+ switch {
+ case m.UseTCP && m.ClientTLSInfo != nil:
+ return "https"
+ case m.UseTCP && m.ClientTLSInfo == nil:
+ return "http"
+ case !m.UseTCP && m.ClientTLSInfo != nil:
+ return "unixs"
+ case !m.UseTCP && m.ClientTLSInfo == nil:
+ return "unix"
+ }
+ m.Logger.Panic("Failed to determine client schema")
+ return ""
+}
+
+func (m *Member) addBridge() (*bridge, error) {
+ network, host, port := m.grpcAddr()
+ grpcAddr := net.JoinHostPort(host, m.Port)
+ bridgePort := fmt.Sprintf("%s%s", port, "0")
+ if m.UseTCP {
+ bridgePort = "0"
+ }
+ bridgeAddr := net.JoinHostPort(host, bridgePort)
+ m.Logger.Info("LISTEN BRIDGE", zap.String("grpc-address", bridgeAddr), zap.String("member", m.Name))
+ bridgeListener, err := transport.NewUnixListener(bridgeAddr)
+ if err != nil {
+ return nil, fmt.Errorf("listen failed on bridge socket %s (%w)", bridgeAddr, err)
+ }
+ m.GRPCBridge = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener)
+
+ addr := bridgeListener.Addr().String()
+ m.Logger.Info("LISTEN BRIDGE SUCCESS", zap.String("grpc-address", addr), zap.String("member", m.Name))
+ m.GRPCURL = m.clientScheme() + "://" + addr
+ return m.GRPCBridge, nil
+}
+
+func (m *Member) Bridge() *bridge {
+ if !m.UseBridge {
+ m.Logger.Panic("Bridge not available. Please configure using bridge before creating Cluster.")
+ }
+ return m.GRPCBridge
+}
+
+func (m *Member) grpcAddr() (network, host, port string) {
+ // prefix with localhost so cert has right domain
+ host = "localhost"
+ if m.UseIP { // for IP-only TLS certs
+ host = "127.0.0.1"
+ }
+ network = "unix"
+ if m.UseTCP {
+ network = "tcp"
+ }
+
+ if m.Port != "" {
+ return network, host, m.Port
+ }
+
+ port = m.Name
+ if m.UseTCP {
+ // let net.Listen choose the port automatically
+ port = fmt.Sprintf("%d", 0)
+ }
+ return network, host, port
+}
+
+func (m *Member) GRPCPortNumber() string {
+ return m.Port
+}
+
+type dialer struct {
+ network string
+ addr string
+}
+
+func (d dialer) Dial() (net.Conn, error) {
+ return net.Dial(d.network, d.addr)
+}
+
+func (m *Member) ElectionTimeout() time.Duration {
+ return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond
+}
+
+func (m *Member) ID() types.ID { return m.Server.MemberID() }
+
+// NewClientV3 creates a new grpc client connection to the member
+func NewClientV3(m *Member) (*clientv3.Client, error) {
+ if m.GRPCURL == "" {
+ return nil, fmt.Errorf("member not configured for grpc")
+ }
+
+ cfg := clientv3.Config{
+ Endpoints: []string{m.GRPCURL},
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize,
+ MaxCallRecvMsgSize: m.ClientMaxCallRecvMsgSize,
+ Logger: m.Logger.Named("client"),
+ }
+
+ if m.ClientTLSInfo != nil {
+ tls, err := m.ClientTLSInfo.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ cfg.TLS = tls
+ }
+ if m.DialOptions != nil {
+ cfg.DialOptions = append(cfg.DialOptions, m.DialOptions...)
+ }
+ return newClientV3(cfg)
+}
+
+// Clone returns a member with the same server configuration. The returned
+// member will not set PeerListeners and ClientListeners.
+func (m *Member) Clone(t testutil.TB) *Member {
+ mm := &Member{}
+ mm.ServerConfig = m.ServerConfig
+
+ var err error
+ clientURLStrs := m.ClientURLs.StringSlice()
+ mm.ClientURLs, err = types.NewURLs(clientURLStrs)
+ if err != nil {
+ // this should never fail
+ panic(err)
+ }
+ peerURLStrs := m.PeerURLs.StringSlice()
+ mm.PeerURLs, err = types.NewURLs(peerURLStrs)
+ if err != nil {
+ // this should never fail
+ panic(err)
+ }
+ clusterStr := m.InitialPeerURLsMap.String()
+ mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
+ if err != nil {
+ // this should never fail
+ panic(err)
+ }
+ mm.InitialClusterToken = m.InitialClusterToken
+ mm.ElectionTicks = m.ElectionTicks
+ mm.PeerTLSInfo = m.PeerTLSInfo
+ mm.ClientTLSInfo = m.ClientTLSInfo
+ mm.Logger, mm.LogObserver = memberLogger(t, mm.Name+"c")
+ return mm
+}
+
+// Launch starts a member based on ServerConfig, PeerListeners
+// and ClientListeners.
+func (m *Member) Launch() error {
+ m.Logger.Info(
+ "launching a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ )
+ var err error
+ if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil {
+ return fmt.Errorf("failed to initialize the etcd server: %w", err)
+ }
+ m.Server.SyncTicker = time.NewTicker(500 * time.Millisecond)
+ m.Server.Start()
+
+ var peerTLScfg *tls.Config
+ if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() {
+ if peerTLScfg, err = m.PeerTLSInfo.ServerConfig(); err != nil {
+ return err
+ }
+ }
+
+ if m.GRPCListener != nil {
+ var tlscfg *tls.Config
+ if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() {
+ tlscfg, err = m.ClientTLSInfo.ServerConfig()
+ if err != nil {
+ return err
+ }
+ }
+ m.GRPCServer = v3rpc.Server(m.Server, tlscfg, m.GRPCServerRecorder.UnaryInterceptor(), m.GRPCServerOpts...)
+ m.ServerClient = v3client.New(m.Server)
+ lockpb.RegisterLockServer(m.GRPCServer, v3lock.NewLockServer(m.ServerClient))
+ epb.RegisterElectionServer(m.GRPCServer, v3election.NewElectionServer(m.ServerClient))
+ go m.GRPCServer.Serve(m.GRPCListener)
+ }
+
+ m.RaftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.Server)}
+
+ h := (http.Handler)(m.RaftHandler)
+ if m.GRPCListener != nil {
+ h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ m.RaftHandler.ServeHTTP(w, r)
+ })
+ }
+
+ for _, ln := range m.PeerListeners {
+ cm := cmux.New(ln)
+ // don't hang on matcher after closing listener
+ cm.SetReadTimeout(time.Second)
+
+ // serve http1/http2 rafthttp/grpc
+ ll := cm.Match(cmux.Any())
+ if peerTLScfg != nil {
+ if ll, err = transport.NewTLSListener(ll, m.PeerTLSInfo); err != nil {
+ return err
+ }
+ }
+ hs := &httptest.Server{
+ Listener: ll,
+ Config: &http.Server{
+ Handler: h,
+ TLSConfig: peerTLScfg,
+ ErrorLog: log.New(io.Discard, "net/http", 0),
+ },
+ TLS: peerTLScfg,
+ }
+ hs.Start()
+
+ donec := make(chan struct{})
+ go func() {
+ defer close(donec)
+ cm.Serve()
+ }()
+ closer := func() {
+ ll.Close()
+ hs.CloseClientConnections()
+ hs.Close()
+ <-donec
+ }
+ m.ServerClosers = append(m.ServerClosers, closer)
+ }
+ for _, ln := range m.ClientListeners {
+ handler := http.NewServeMux()
+ etcdhttp.HandleDebug(handler)
+ etcdhttp.HandleVersion(handler, m.Server)
+ etcdhttp.HandleMetrics(handler)
+ etcdhttp.HandleHealth(m.Logger, handler, m.Server)
+ hs := &httptest.Server{
+ Listener: ln,
+ Config: &http.Server{
+ Handler: handler,
+ ErrorLog: log.New(io.Discard, "net/http", 0),
+ },
+ }
+ if m.ClientTLSInfo == nil {
+ hs.Start()
+ } else {
+ info := m.ClientTLSInfo
+ hs.TLS, err = info.ServerConfig()
+ if err != nil {
+ return err
+ }
+
+ // baseConfig is called on initial TLS handshake start.
+ //
+ // Previously,
+ // 1. Server has non-empty (*tls.Config).Certificates on client hello
+ // 2. Server calls (*tls.Config).GetCertificate iff:
+ // - Server'Server (*tls.Config).Certificates is not empty, or
+ // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
+ //
+ // When (*tls.Config).Certificates is always populated on initial handshake,
+ // client is expected to provide a valid matching SNI to pass the TLS
+ // verification, thus trigger server (*tls.Config).GetCertificate to reload
+ // TLS assets. However, a cert whose SAN field does not include domain names
+ // but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
+ // it was never able to trigger TLS reload on initial handshake; first
+ // ceritifcate object was being used, never being updated.
+ //
+ // Now, (*tls.Config).Certificates is created empty on initial TLS client
+ // handshake, in order to trigger (*tls.Config).GetCertificate and populate
+ // rest of the certificates on every new TLS connection, even when client
+ // SNI is empty (e.g. cert only includes IPs).
+ //
+ // This introduces another problem with "httptest.Server":
+ // when server initial certificates are empty, certificates
+ // are overwritten by Go'Server internal test certs, which have
+ // different SAN fields (e.g. example.com). To work around,
+ // re-overwrite (*tls.Config).Certificates before starting
+ // test server.
+ tlsCert, nerr := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
+ if nerr != nil {
+ return nerr
+ }
+ hs.TLS.Certificates = []tls.Certificate{*tlsCert}
+
+ hs.StartTLS()
+ }
+ closer := func() {
+ ln.Close()
+ hs.CloseClientConnections()
+ hs.Close()
+ }
+ m.ServerClosers = append(m.ServerClosers, closer)
+ }
+ if m.GRPCURL != "" && m.Client == nil {
+ m.Client, err = NewClientV3(m)
+ if err != nil {
+ return err
+ }
+ }
+
+ m.Logger.Info(
+ "launched a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ )
+ return nil
+}
+
+func (m *Member) RecordedRequests() []grpctesting.RequestInfo {
+ return m.GRPCServerRecorder.RecordedRequests()
+}
+
+func (m *Member) WaitOK(t testutil.TB) {
+ m.WaitStarted(t)
+ for m.Server.Leader() == 0 {
+ time.Sleep(framecfg.TickDuration)
+ }
+}
+
+func (m *Member) WaitStarted(t testutil.TB) {
+ for {
+ ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
+ _, err := m.Client.Get(ctx, "/", clientv3.WithSerializable())
+ if err != nil {
+ time.Sleep(framecfg.TickDuration)
+ continue
+ }
+ cancel()
+ break
+ }
+}
+
+func WaitClientV3(t testutil.TB, kv clientv3.KV) {
+ WaitClientV3WithKey(t, kv, "/")
+}
+
+func WaitClientV3WithKey(t testutil.TB, kv clientv3.KV, key string) {
+ timeout := time.Now().Add(RequestTimeout)
+ var err error
+ for time.Now().Before(timeout) {
+ ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
+ _, err = kv.Get(ctx, key)
+ cancel()
+ if err == nil {
+ return
+ }
+ time.Sleep(framecfg.TickDuration)
+ }
+ if err != nil {
+ t.Fatalf("timed out waiting for client: %v", err)
+ }
+}
+
+func (m *Member) URL() string { return m.ClientURLs[0].String() }
+
+func (m *Member) Pause() {
+ m.RaftHandler.Pause()
+ m.Server.PauseSending()
+}
+
+func (m *Member) Resume() {
+ m.RaftHandler.Resume()
+ m.Server.ResumeSending()
+}
+
+// Close stops the member'Server etcdserver and closes its connections
+func (m *Member) Close() {
+ if m.GRPCBridge != nil {
+ m.GRPCBridge.Close()
+ m.GRPCBridge = nil
+ }
+ if m.ServerClient != nil {
+ m.ServerClient.Close()
+ m.ServerClient = nil
+ }
+ if m.GRPCServer != nil {
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ // close listeners to stop accepting new connections,
+ // will block on any existing transports
+ m.GRPCServer.GracefulStop()
+ }()
+ // wait until all pending RPCs are finished
+ select {
+ case <-ch:
+ case <-time.After(2 * time.Second):
+ // took too long, manually close open transports
+ // e.g. watch streams
+ m.GRPCServer.Stop()
+ <-ch
+ }
+ m.GRPCServer = nil
+ }
+ if m.Server != nil {
+ m.Server.HardStop()
+ }
+ for _, f := range m.ServerClosers {
+ f()
+ }
+ if !m.Closed {
+ // Avoid verification of the same file multiple times
+ // (that might not exist any longer)
+ verify.MustVerifyIfEnabled(verify.Config{
+ Logger: m.Logger,
+ DataDir: m.DataDir,
+ ExactIndex: false,
+ })
+ }
+ m.Closed = true
+}
+
+// Stop stops the member, but the data dir of the member is preserved.
+func (m *Member) Stop(_ testutil.TB) {
+ m.Logger.Info(
+ "stopping a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ )
+ m.Close()
+ m.ServerClosers = nil
+ m.Logger.Info(
+ "stopped a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ )
+}
+
+// CheckLeaderTransition waits for leader transition, returning the new leader ID.
+func CheckLeaderTransition(m *Member, oldLead uint64) uint64 {
+ interval := time.Duration(m.Server.Cfg.TickMs) * time.Millisecond
+ for m.Server.Lead() == 0 || (m.Server.Lead() == oldLead) {
+ time.Sleep(interval)
+ }
+ return m.Server.Lead()
+}
+
+// StopNotify unblocks when a member stop completes
+func (m *Member) StopNotify() <-chan struct{} {
+ return m.Server.StopNotify()
+}
+
+// Restart starts the member using the preserved data dir.
+func (m *Member) Restart(t testutil.TB) error {
+ m.Logger.Info(
+ "restarting a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ )
+ newPeerListeners := make([]net.Listener, 0)
+ for _, ln := range m.PeerListeners {
+ newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String()))
+ }
+ m.PeerListeners = newPeerListeners
+ newClientListeners := make([]net.Listener, 0)
+ for _, ln := range m.ClientListeners {
+ newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String()))
+ }
+ m.ClientListeners = newClientListeners
+
+ if m.GRPCListener != nil {
+ if err := m.listenGRPC(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ err := m.Launch()
+ m.Logger.Info(
+ "restarted a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ zap.Error(err),
+ )
+ return err
+}
+
+// Terminate stops the member and removes the data dir.
+func (m *Member) Terminate(t testutil.TB) {
+ m.Logger.Info(
+ "terminating a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ )
+ m.Close()
+ if !m.KeepDataDirTerminate {
+ if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
+ t.Fatal(err)
+ }
+ }
+ m.Logger.Info(
+ "terminated a member",
+ zap.String("name", m.Name),
+ zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
+ zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
+ zap.String("grpc-url", m.GRPCURL),
+ )
+}
+
+// Metric gets the metric value for a member
+func (m *Member) Metric(metricName string, expectLabels ...string) (string, error) {
+ cfgtls := transport.TLSInfo{}
+ tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
+ if err != nil {
+ return "", err
+ }
+ cli := &http.Client{Transport: tr}
+ resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ b, rerr := io.ReadAll(resp.Body)
+ if rerr != nil {
+ return "", rerr
+ }
+ lines := strings.Split(string(b), "\n")
+ for _, l := range lines {
+ if !strings.HasPrefix(l, metricName) {
+ continue
+ }
+ ok := true
+ for _, lv := range expectLabels {
+ if !strings.Contains(l, lv) {
+ ok = false
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ return strings.Split(l, " ")[1], nil
+ }
+ return "", nil
+}
+
+// InjectPartition drops connections from m to others, vice versa.
+func (m *Member) InjectPartition(t testutil.TB, others ...*Member) {
+ for _, other := range others {
+ m.Server.CutPeer(other.Server.MemberID())
+ other.Server.CutPeer(m.Server.MemberID())
+ t.Logf("network partition injected between: %v <-> %v", m.Server.MemberID(), other.Server.MemberID())
+ }
+}
+
+// RecoverPartition recovers connections from m to others, vice versa.
+func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) {
+ for _, other := range others {
+ m.Server.MendPeer(other.Server.MemberID())
+ other.Server.MendPeer(m.Server.MemberID())
+ t.Logf("network partition between: %v <-> %v", m.Server.MemberID(), other.Server.MemberID())
+ }
+}
+
+func (m *Member) ReadyNotify() <-chan struct{} {
+ return m.Server.ReadyNotify()
+}
+
+type SortableMemberSliceByPeerURLs []*pb.Member
+
+func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) }
+func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool {
+ return p[i].PeerURLs[0] < p[j].PeerURLs[0]
+}
+func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// NewCluster returns a launched Cluster with a grpc client connection
+// for each Cluster member.
+func NewCluster(t testutil.TB, cfg *ClusterConfig) *Cluster {
+ t.Helper()
+
+ assertInTestContext(t)
+
+ testutil.SkipTestIfShortMode(t, "Cannot start etcd Cluster in --short tests")
+
+ c := &Cluster{Cfg: cfg}
+ ms := make([]*Member, cfg.Size)
+ for i := 0; i < cfg.Size; i++ {
+ ms[i] = c.MustNewMember(t)
+ }
+ c.Members = ms
+ if err := c.fillClusterForMembers(); err != nil {
+ t.Fatalf("fillClusterForMembers failed: %v", err)
+ }
+ c.Launch(t)
+
+ return c
+}
+
+func (c *Cluster) TakeClient(idx int) {
+ c.mu.Lock()
+ c.Members[idx].Client = nil
+ c.mu.Unlock()
+}
+
+func (c *Cluster) Terminate(t testutil.TB) {
+ if t != nil {
+ t.Logf("========= Cluster termination started =====================")
+ }
+ for _, m := range c.Members {
+ if m.Client != nil {
+ m.Client.Close()
+ }
+ }
+ var wg sync.WaitGroup
+ wg.Add(len(c.Members))
+ for _, m := range c.Members {
+ go func(mm *Member) {
+ defer wg.Done()
+ mm.Terminate(t)
+ }(m)
+ }
+ wg.Wait()
+ if t != nil {
+ t.Logf("========= Cluster termination succeeded ===================")
+ }
+}
+
+func (c *Cluster) RandClient() *clientv3.Client {
+ return c.Members[rand.Intn(len(c.Members))].Client
+}
+
+func (c *Cluster) Client(i int) *clientv3.Client {
+ return c.Members[i].Client
+}
+
+func (c *Cluster) Endpoints() []string {
+ var endpoints []string
+ for _, m := range c.Members {
+ endpoints = append(endpoints, m.GRPCURL)
+ }
+ return endpoints
+}
+
+func (c *Cluster) ClusterClient(t testing.TB, opts ...framecfg.ClientOption) (client *clientv3.Client, err error) {
+ cfg, err := c.newClientCfg()
+ if err != nil {
+ return nil, err
+ }
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ client, err = newClientV3(*cfg)
+ if err != nil {
+ return nil, err
+ }
+ t.Cleanup(func() {
+ client.Close()
+ })
+ return client, nil
+}
+
+func WithAuth(userName, password string) framecfg.ClientOption {
+ return func(c any) {
+ cfg := c.(*clientv3.Config)
+ cfg.Username = userName
+ cfg.Password = password
+ }
+}
+
+func WithEndpoints(endpoints []string) framecfg.ClientOption {
+ return func(c any) {
+ cfg := c.(*clientv3.Config)
+ cfg.Endpoints = endpoints
+ }
+}
+
+func (c *Cluster) newClientCfg() (*clientv3.Config, error) {
+ cfg := &clientv3.Config{
+ Endpoints: c.Endpoints(),
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ MaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize,
+ MaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize,
+ }
+ if c.Cfg.ClientTLS != nil {
+ tls, err := c.Cfg.ClientTLS.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ cfg.TLS = tls
+ }
+ return cfg, nil
+}
+
+// NewClientV3 creates a new grpc client connection to the member
+func (c *Cluster) NewClientV3(memberIndex int) (*clientv3.Client, error) {
+ return NewClientV3(c.Members[memberIndex])
+}
+
+func makeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client {
+ var mu sync.Mutex
+ *clients = nil
+ return func() *clientv3.Client {
+ cli, err := clus.NewClientV3(chooseMemberIndex())
+ if err != nil {
+ t.Fatalf("cannot create client: %v", err)
+ }
+ mu.Lock()
+ *clients = append(*clients, cli)
+ mu.Unlock()
+ return cli
+ }
+}
+
+// MakeSingleNodeClients creates factory of clients that all connect to member 0.
+// All the created clients are put on the 'clients' list. The factory is thread-safe.
+func MakeSingleNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
+ return makeClients(t, clus, clients, func() int { return 0 })
+}
+
+// MakeMultiNodeClients creates factory of clients that all connect to random members.
+// All the created clients are put on the 'clients' list. The factory is thread-safe.
+func MakeMultiNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
+ return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) })
+}
+
+// CloseClients closes all the clients from the 'clients' list.
+func CloseClients(t testutil.TB, clients []*clientv3.Client) {
+ for _, cli := range clients {
+ if err := cli.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+type GRPCAPI struct {
+ // Cluster is the Cluster API for the client'Server connection.
+ Cluster pb.ClusterClient
+ // KV is the keyvalue API for the client'Server connection.
+ KV pb.KVClient
+ // Lease is the lease API for the client'Server connection.
+ Lease pb.LeaseClient
+ // Watch is the watch API for the client'Server connection.
+ Watch pb.WatchClient
+ // Maintenance is the maintenance API for the client'Server connection.
+ Maintenance pb.MaintenanceClient
+ // Auth is the authentication API for the client'Server connection.
+ Auth pb.AuthClient
+ // Lock is the lock API for the client'Server connection.
+ Lock lockpb.LockClient
+ // Election is the election API for the client'Server connection.
+ Election epb.ElectionClient
+}
+
+// GetLearnerMembers returns the list of learner members in Cluster using MemberList API.
+func (c *Cluster) GetLearnerMembers() ([]*pb.Member, error) {
+ cli := c.Client(0)
+ resp, err := cli.MemberList(context.Background())
+ if err != nil {
+ return nil, fmt.Errorf("failed to list member %w", err)
+ }
+ var learners []*pb.Member
+ for _, m := range resp.Members {
+ if m.IsLearner {
+ learners = append(learners, m)
+ }
+ }
+ return learners, nil
+}
+
+// AddAndLaunchLearnerMember creates a learner member, adds it to Cluster
+// via v3 MemberAdd API, and then launches the new member.
+func (c *Cluster) AddAndLaunchLearnerMember(t testutil.TB) {
+ m := c.MustNewMember(t)
+ m.IsLearner = true
+
+ scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS)
+ peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()}
+
+ cli := c.Client(0)
+ _, err := cli.MemberAddAsLearner(context.Background(), peerURLs)
+ if err != nil {
+ t.Fatalf("failed to add learner member %v", err)
+ }
+
+ m.InitialPeerURLsMap = types.URLsMap{}
+ for _, mm := range c.Members {
+ m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
+ }
+ m.InitialPeerURLsMap[m.Name] = m.PeerURLs
+ m.NewCluster = false
+
+ if err := m.Launch(); err != nil {
+ t.Fatal(err)
+ }
+
+ c.Members = append(c.Members, m)
+
+ c.waitMembersMatch(t)
+}
+
+// getMembers returns a list of members in Cluster, in format of etcdserverpb.Member
+func (c *Cluster) getMembers() []*pb.Member {
+ var mems []*pb.Member
+ for _, m := range c.Members {
+ mem := &pb.Member{
+ Name: m.Name,
+ PeerURLs: m.PeerURLs.StringSlice(),
+ ClientURLs: m.ClientURLs.StringSlice(),
+ IsLearner: m.IsLearner,
+ }
+ mems = append(mems, mem)
+ }
+ return mems
+}
+
+// waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the
+// local 'c.Members', which is the local recording of members in the testing Cluster. With
+// the exception that the local recording c.Members does not have info on Member.ID, which
+// is generated when the member is been added to Cluster.
+//
+// Note:
+// A successful match means the Member.clientURLs are matched. This means member has already
+// finished publishing its server attributes to Cluster. Publishing attributes is a Cluster-wide
+// write request (in v2 server). Therefore, at this point, any raft log entries prior to this
+// would have already been applied.
+//
+// If a new member was added to an existing Cluster, at this point, it has finished publishing
+// its own server attributes to the Cluster. And therefore by the same argument, it has already
+// applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point,
+// the new member has the correct view of the Cluster configuration.
+//
+// Special note on learner member:
+// Learner member is only added to a Cluster via v3rpc MemberAdd API (as of v3.4). When starting
+// the learner member, its initial view of the Cluster created by peerURLs map does not have info
+// on whether or not the new member itself is learner. But at this point, a successful match does
+// indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry
+// which was used to add the learner itself to the Cluster, and therefore it has the correct info
+// on learner.
+func (c *Cluster) waitMembersMatch(t testutil.TB) {
+ wMembers := c.getMembers()
+ sort.Sort(SortableProtoMemberSliceByPeerURLs(wMembers))
+ cli := c.Client(0)
+ for {
+ resp, err := cli.MemberList(context.Background())
+ if err != nil {
+ t.Fatalf("failed to list member %v", err)
+ }
+
+ if len(resp.Members) != len(wMembers) {
+ continue
+ }
+ sort.Sort(SortableProtoMemberSliceByPeerURLs(resp.Members))
+ for _, m := range resp.Members {
+ m.ID = 0
+ }
+ if reflect.DeepEqual(resp.Members, wMembers) {
+ return
+ }
+
+ time.Sleep(framecfg.TickDuration)
+ }
+}
+
+type SortableProtoMemberSliceByPeerURLs []*pb.Member
+
+func (p SortableProtoMemberSliceByPeerURLs) Len() int { return len(p) }
+func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool {
+ return p[i].PeerURLs[0] < p[j].PeerURLs[0]
+}
+func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// InitializeMemberWithResponse initializes a member with the response
+func (c *Cluster) InitializeMemberWithResponse(t testutil.TB, m *Member, resp *clientv3.MemberAddResponse) {
+ m.IsLearner = resp.Member.IsLearner
+ m.NewCluster = false
+
+ m.InitialPeerURLsMap = types.URLsMap{}
+ for _, mm := range c.Members {
+ m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
+ }
+ m.InitialPeerURLsMap[m.Name] = types.MustNewURLs(resp.Member.PeerURLs)
+ c.Members = append(c.Members, m)
+}
diff --git a/tests/framework/integration/cluster_direct.go b/tests/framework/integration/cluster_direct.go
new file mode 100644
index 00000000000..71c2de5a5fa
--- /dev/null
+++ b/tests/framework/integration/cluster_direct.go
@@ -0,0 +1,43 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package integration
+
+import (
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+)
+
+const ThroughProxy = false
+
+func ToGRPC(c *clientv3.Client) GRPCAPI {
+ return GRPCAPI{
+ pb.NewClusterClient(c.ActiveConnection()),
+ pb.NewKVClient(c.ActiveConnection()),
+ pb.NewLeaseClient(c.ActiveConnection()),
+ pb.NewWatchClient(c.ActiveConnection()),
+ pb.NewMaintenanceClient(c.ActiveConnection()),
+ pb.NewAuthClient(c.ActiveConnection()),
+ v3lockpb.NewLockClient(c.ActiveConnection()),
+ v3electionpb.NewElectionClient(c.ActiveConnection()),
+ }
+}
+
+func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
+ return clientv3.New(cfg)
+}
diff --git a/tests/framework/integration/cluster_proxy.go b/tests/framework/integration/cluster_proxy.go
new file mode 100644
index 00000000000..0ba8b59d02f
--- /dev/null
+++ b/tests/framework/integration/cluster_proxy.go
@@ -0,0 +1,129 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build cluster_proxy
+
+package integration
+
+import (
+ "context"
+ "sync"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/namespace"
+ "go.etcd.io/etcd/server/v3/proxy/grpcproxy"
+ "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter"
+)
+
+const ThroughProxy = true
+
+var (
+ pmu sync.Mutex
+ proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
+)
+
+const proxyNamespace = "proxy-namespace"
+
+type grpcClientProxy struct {
+ ctx context.Context
+ ctxCancel func()
+ grpc GRPCAPI
+ wdonec <-chan struct{}
+ kvdonec <-chan struct{}
+ lpdonec <-chan struct{}
+}
+
+func ToGRPC(c *clientv3.Client) GRPCAPI {
+ pmu.Lock()
+ defer pmu.Unlock()
+
+ // dedicated context bound to 'grpc-proxy' lifetype
+ // (so in practice lifetime of the client connection to the proxy).
+ // TODO: Refactor to a separate clientv3.Client instance instead of the context alone.
+ ctx, ctxCancel := context.WithCancel(context.WithValue(context.TODO(), "_name", "grpcProxyContext"))
+
+ lg := c.GetLogger()
+
+ if v, ok := proxies[c]; ok {
+ return v.grpc
+ }
+
+ // test namespacing proxy
+ c.KV = namespace.NewKV(c.KV, proxyNamespace)
+ c.Watcher = namespace.NewWatcher(c.Watcher, proxyNamespace)
+ c.Lease = namespace.NewLease(c.Lease, proxyNamespace)
+ // test coalescing/caching proxy
+ kvp, kvpch := grpcproxy.NewKvProxy(c)
+ wp, wpch := grpcproxy.NewWatchProxy(ctx, lg, c)
+ lp, lpch := grpcproxy.NewLeaseProxy(ctx, c)
+ mp := grpcproxy.NewMaintenanceProxy(c)
+ clp, _ := grpcproxy.NewClusterProxy(lg, c, "", "") // without registering proxy URLs
+ authp := grpcproxy.NewAuthProxy(c)
+ lockp := grpcproxy.NewLockProxy(c)
+ electp := grpcproxy.NewElectionProxy(c)
+
+ grpc := GRPCAPI{
+ adapter.ClusterServerToClusterClient(clp),
+ adapter.KvServerToKvClient(kvp),
+ adapter.LeaseServerToLeaseClient(lp),
+ adapter.WatchServerToWatchClient(wp),
+ adapter.MaintenanceServerToMaintenanceClient(mp),
+ adapter.AuthServerToAuthClient(authp),
+ adapter.LockServerToLockClient(lockp),
+ adapter.ElectionServerToElectionClient(electp),
+ }
+ proxies[c] = grpcClientProxy{ctx: ctx, ctxCancel: ctxCancel, grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch}
+ return grpc
+}
+
+type proxyCloser struct {
+ clientv3.Watcher
+ proxyCtxCancel func()
+ wdonec <-chan struct{}
+ kvdonec <-chan struct{}
+ lclose func()
+ lpdonec <-chan struct{}
+}
+
+func (pc *proxyCloser) Close() error {
+ pc.proxyCtxCancel()
+ <-pc.kvdonec
+ err := pc.Watcher.Close()
+ <-pc.wdonec
+ pc.lclose()
+ <-pc.lpdonec
+ return err
+}
+
+func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
+ c, err := clientv3.New(cfg)
+ if err != nil {
+ return nil, err
+ }
+ rpc := ToGRPC(c)
+ c.KV = clientv3.NewKVFromKVClient(rpc.KV, c)
+ pmu.Lock()
+ lc := c.Lease
+ c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, c, cfg.DialTimeout)
+ c.Watcher = &proxyCloser{
+ Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch, c),
+ wdonec: proxies[c].wdonec,
+ kvdonec: proxies[c].kvdonec,
+ lclose: func() { lc.Close() },
+ lpdonec: proxies[c].lpdonec,
+ proxyCtxCancel: proxies[c].ctxCancel,
+ }
+ pmu.Unlock()
+ return c, nil
+}
diff --git a/tests/framework/integration/integration.go b/tests/framework/integration/integration.go
new file mode 100644
index 00000000000..6e5de0cd528
--- /dev/null
+++ b/tests/framework/integration/integration.go
@@ -0,0 +1,427 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+
+ "go.uber.org/zap"
+ healthpb "google.golang.org/grpc/health/grpc_health_v1"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ etcdctlcmd "go.etcd.io/etcd/etcdctl/v3/ctlv3/command"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+)
+
+type integrationRunner struct{}
+
+func NewIntegrationRunner() intf.TestRunner {
+ return &integrationRunner{}
+}
+
+func (e integrationRunner) TestMain(m *testing.M) {
+ testutil.MustTestMainWithLeakDetection(m)
+}
+
+func (e integrationRunner) BeforeTest(t testing.TB) {
+ BeforeTest(t)
+}
+
+func (e integrationRunner) NewCluster(ctx context.Context, t testing.TB, opts ...config.ClusterOption) intf.Cluster {
+ var err error
+ cfg := config.NewClusterConfig(opts...)
+ integrationCfg := ClusterConfig{
+ Size: cfg.ClusterSize,
+ QuotaBackendBytes: cfg.QuotaBackendBytes,
+ DisableStrictReconfigCheck: !cfg.StrictReconfigCheck,
+ AuthToken: cfg.AuthToken,
+ SnapshotCount: cfg.SnapshotCount,
+ }
+ integrationCfg.ClientTLS, err = tlsInfo(t, cfg.ClientTLS)
+ if err != nil {
+ t.Fatalf("ClientTLS: %s", err)
+ }
+ integrationCfg.PeerTLS, err = tlsInfo(t, cfg.PeerTLS)
+ if err != nil {
+ t.Fatalf("PeerTLS: %s", err)
+ }
+ return &integrationCluster{
+ Cluster: NewCluster(t, &integrationCfg),
+ t: t,
+ ctx: ctx,
+ }
+}
+
+func tlsInfo(t testing.TB, cfg config.TLSConfig) (*transport.TLSInfo, error) {
+ switch cfg {
+ case config.NoTLS:
+ return nil, nil
+ case config.AutoTLS:
+ tls, err := transport.SelfCert(zap.NewNop(), t.TempDir(), []string{"localhost"}, 1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate cert: %w", err)
+ }
+ return &tls, nil
+ case config.ManualTLS:
+ return &TestTLSInfo, nil
+ default:
+ return nil, fmt.Errorf("config %q not supported", cfg)
+ }
+}
+
+type integrationCluster struct {
+ *Cluster
+ t testing.TB
+ ctx context.Context
+}
+
+func (c *integrationCluster) Members() (ms []intf.Member) {
+ for _, m := range c.Cluster.Members {
+ ms = append(ms, integrationMember{Member: m, t: c.t})
+ }
+ return ms
+}
+
+type integrationMember struct {
+ *Member
+ t testing.TB
+}
+
+func (m integrationMember) Client() intf.Client {
+ return integrationClient{Client: m.Member.Client}
+}
+
+func (m integrationMember) Start(ctx context.Context) error {
+ return m.Member.Restart(m.t)
+}
+
+func (m integrationMember) Stop() {
+ m.Member.Stop(m.t)
+}
+
+func (c *integrationCluster) Close() error {
+ c.Terminate(c.t)
+ return nil
+}
+
+func (c *integrationCluster) Client(opts ...config.ClientOption) (intf.Client, error) {
+ cc, err := c.ClusterClient(c.t, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return integrationClient{Client: cc}, nil
+}
+
+type integrationClient struct {
+ *clientv3.Client
+}
+
+func (c integrationClient) Get(ctx context.Context, key string, o config.GetOptions) (*clientv3.GetResponse, error) {
+ if o.Timeout != 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, o.Timeout)
+ defer cancel()
+ }
+ var clientOpts []clientv3.OpOption
+ if o.Revision != 0 {
+ clientOpts = append(clientOpts, clientv3.WithRev(int64(o.Revision)))
+ }
+ if o.End != "" {
+ clientOpts = append(clientOpts, clientv3.WithRange(o.End))
+ }
+ if o.Serializable {
+ clientOpts = append(clientOpts, clientv3.WithSerializable())
+ }
+ if o.Prefix {
+ clientOpts = append(clientOpts, clientv3.WithPrefix())
+ }
+ if o.Limit != 0 {
+ clientOpts = append(clientOpts, clientv3.WithLimit(int64(o.Limit)))
+ }
+ if o.FromKey {
+ clientOpts = append(clientOpts, clientv3.WithFromKey())
+ }
+ if o.CountOnly {
+ clientOpts = append(clientOpts, clientv3.WithCountOnly())
+ }
+ if o.SortBy != clientv3.SortByKey || o.Order != clientv3.SortNone {
+ clientOpts = append(clientOpts, clientv3.WithSort(o.SortBy, o.Order))
+ }
+ return c.Client.Get(ctx, key, clientOpts...)
+}
+
+func (c integrationClient) Put(ctx context.Context, key, value string, opts config.PutOptions) error {
+ if opts.Timeout != 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, opts.Timeout)
+ defer cancel()
+ }
+ var clientOpts []clientv3.OpOption
+ if opts.LeaseID != 0 {
+ clientOpts = append(clientOpts, clientv3.WithLease(opts.LeaseID))
+ }
+ _, err := c.Client.Put(ctx, key, value, clientOpts...)
+ return err
+}
+
+func (c integrationClient) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) {
+ var clientOpts []clientv3.OpOption
+ if o.Prefix {
+ clientOpts = append(clientOpts, clientv3.WithPrefix())
+ }
+ if o.FromKey {
+ clientOpts = append(clientOpts, clientv3.WithFromKey())
+ }
+ if o.End != "" {
+ clientOpts = append(clientOpts, clientv3.WithRange(o.End))
+ }
+ return c.Client.Delete(ctx, key, clientOpts...)
+}
+
+func (c integrationClient) Compact(ctx context.Context, rev int64, o config.CompactOption) (*clientv3.CompactResponse, error) {
+ if o.Timeout != 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, o.Timeout)
+ defer cancel()
+ }
+ var clientOpts []clientv3.CompactOption
+ if o.Physical {
+ clientOpts = append(clientOpts, clientv3.WithCompactPhysical())
+ }
+ return c.Client.Compact(ctx, rev, clientOpts...)
+}
+
+func (c integrationClient) Status(ctx context.Context) ([]*clientv3.StatusResponse, error) {
+ endpoints := c.Client.Endpoints()
+ var resp []*clientv3.StatusResponse
+ for _, ep := range endpoints {
+ status, err := c.Client.Status(ctx, ep)
+ if err != nil {
+ return nil, err
+ }
+ resp = append(resp, status)
+ }
+ return resp, nil
+}
+
+func (c integrationClient) HashKV(ctx context.Context, rev int64) ([]*clientv3.HashKVResponse, error) {
+ endpoints := c.Client.Endpoints()
+ var resp []*clientv3.HashKVResponse
+ for _, ep := range endpoints {
+ hashKV, err := c.Client.HashKV(ctx, ep, rev)
+ if err != nil {
+ return nil, err
+ }
+ resp = append(resp, hashKV)
+ }
+ return resp, nil
+}
+
+func (c integrationClient) Health(ctx context.Context) error {
+ cli := healthpb.NewHealthClient(c.Client.ActiveConnection())
+ resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
+ if err != nil {
+ return err
+ }
+ if resp.Status != healthpb.HealthCheckResponse_SERVING {
+ return fmt.Errorf("status expected %s, got %s", healthpb.HealthCheckResponse_SERVING, resp.Status)
+ }
+ return nil
+}
+
+func (c integrationClient) Defragment(ctx context.Context, o config.DefragOption) error {
+ if o.Timeout != 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, o.Timeout)
+ defer cancel()
+ }
+ for _, ep := range c.Endpoints() {
+ _, err := c.Client.Defragment(ctx, ep)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c integrationClient) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
+ var leaseOpts []clientv3.LeaseOption
+ if o.WithAttachedKeys {
+ leaseOpts = append(leaseOpts, clientv3.WithAttachedKeys())
+ }
+
+ return c.Client.TimeToLive(ctx, id, leaseOpts...)
+}
+
+func (c integrationClient) Leases(ctx context.Context) (*clientv3.LeaseLeasesResponse, error) {
+ return c.Client.Leases(ctx)
+}
+
+func (c integrationClient) KeepAliveOnce(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseKeepAliveResponse, error) {
+ return c.Client.KeepAliveOnce(ctx, id)
+}
+
+func (c integrationClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) {
+ return c.Client.Revoke(ctx, id)
+}
+
+func (c integrationClient) AuthEnable(ctx context.Context) error {
+ _, err := c.Client.AuthEnable(ctx)
+ return err
+}
+
+func (c integrationClient) AuthDisable(ctx context.Context) error {
+ _, err := c.Client.AuthDisable(ctx)
+ return err
+}
+
+func (c integrationClient) AuthStatus(ctx context.Context) (*clientv3.AuthStatusResponse, error) {
+ return c.Client.AuthStatus(ctx)
+}
+
+func (c integrationClient) UserAdd(ctx context.Context, name, password string, opts config.UserAddOptions) (*clientv3.AuthUserAddResponse, error) {
+ return c.Client.UserAddWithOptions(ctx, name, password, &clientv3.UserAddOptions{
+ NoPassword: opts.NoPassword,
+ })
+}
+
+func (c integrationClient) UserGet(ctx context.Context, name string) (*clientv3.AuthUserGetResponse, error) {
+ return c.Client.UserGet(ctx, name)
+}
+
+func (c integrationClient) UserList(ctx context.Context) (*clientv3.AuthUserListResponse, error) {
+ return c.Client.UserList(ctx)
+}
+
+func (c integrationClient) UserDelete(ctx context.Context, name string) (*clientv3.AuthUserDeleteResponse, error) {
+ return c.Client.UserDelete(ctx, name)
+}
+
+func (c integrationClient) UserChangePass(ctx context.Context, user, newPass string) error {
+ _, err := c.Client.UserChangePassword(ctx, user, newPass)
+ return err
+}
+
+func (c integrationClient) UserGrantRole(ctx context.Context, user string, role string) (*clientv3.AuthUserGrantRoleResponse, error) {
+ return c.Client.UserGrantRole(ctx, user, role)
+}
+
+func (c integrationClient) UserRevokeRole(ctx context.Context, user string, role string) (*clientv3.AuthUserRevokeRoleResponse, error) {
+ return c.Client.UserRevokeRole(ctx, user, role)
+}
+
+func (c integrationClient) RoleAdd(ctx context.Context, name string) (*clientv3.AuthRoleAddResponse, error) {
+ return c.Client.RoleAdd(ctx, name)
+}
+
+func (c integrationClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType clientv3.PermissionType) (*clientv3.AuthRoleGrantPermissionResponse, error) {
+ return c.Client.RoleGrantPermission(ctx, name, key, rangeEnd, permType)
+}
+
+func (c integrationClient) RoleGet(ctx context.Context, role string) (*clientv3.AuthRoleGetResponse, error) {
+ return c.Client.RoleGet(ctx, role)
+}
+
+func (c integrationClient) RoleList(ctx context.Context) (*clientv3.AuthRoleListResponse, error) {
+ return c.Client.RoleList(ctx)
+}
+
+func (c integrationClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*clientv3.AuthRoleRevokePermissionResponse, error) {
+ return c.Client.RoleRevokePermission(ctx, role, key, rangeEnd)
+}
+
+func (c integrationClient) RoleDelete(ctx context.Context, role string) (*clientv3.AuthRoleDeleteResponse, error) {
+ return c.Client.RoleDelete(ctx, role)
+}
+
+func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) {
+ txn := c.Client.Txn(ctx)
+ var cmps []clientv3.Cmp
+ for _, c := range compares {
+ cmp, err := etcdctlcmd.ParseCompare(c)
+ if err != nil {
+ return nil, err
+ }
+ cmps = append(cmps, *cmp)
+ }
+
+ succOps := getOps(ifSucess)
+
+ failOps := getOps(ifFail)
+
+ txnrsp, err := txn.
+ If(cmps...).
+ Then(succOps...).
+ Else(failOps...).
+ Commit()
+ return txnrsp, err
+}
+
+func getOps(ss []string) []clientv3.Op {
+ var ops []clientv3.Op
+ for _, s := range ss {
+ s = strings.TrimSpace(s)
+ args := etcdctlcmd.Argify(s)
+ switch args[0] {
+ case "get":
+ ops = append(ops, clientv3.OpGet(args[1]))
+ case "put":
+ ops = append(ops, clientv3.OpPut(args[1], args[2]))
+ case "del":
+ ops = append(ops, clientv3.OpDelete(args[1]))
+ }
+ }
+ return ops
+}
+
+func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
+ var opOpts []clientv3.OpOption
+ if opts.Prefix {
+ opOpts = append(opOpts, clientv3.WithPrefix())
+ }
+ if opts.Revision != 0 {
+ opOpts = append(opOpts, clientv3.WithRev(opts.Revision))
+ }
+ if opts.RangeEnd != "" {
+ opOpts = append(opOpts, clientv3.WithRange(opts.RangeEnd))
+ }
+
+ return c.Client.Watch(ctx, key, opOpts...)
+}
+
+func (c integrationClient) MemberAdd(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
+ return c.Client.MemberAdd(ctx, peerAddrs)
+}
+
+func (c integrationClient) MemberAddAsLearner(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
+ return c.Client.MemberAddAsLearner(ctx, peerAddrs)
+}
+
+func (c integrationClient) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) {
+ return c.Client.MemberRemove(ctx, id)
+}
+
+func (c integrationClient) MemberList(ctx context.Context, serializable bool) (*clientv3.MemberListResponse, error) {
+ if serializable {
+ return c.Client.MemberList(ctx, clientv3.WithSerializable())
+ }
+ return c.Client.MemberList(ctx)
+}
diff --git a/tests/framework/integration/testing.go b/tests/framework/integration/testing.go
new file mode 100644
index 00000000000..a4d03c53212
--- /dev/null
+++ b/tests/framework/integration/testing.go
@@ -0,0 +1,155 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "os"
+ "testing"
+
+ grpclogsettable "github.com/grpc-ecosystem/go-grpc-middleware/logging/settable"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zapgrpc"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/embed"
+ gofail "go.etcd.io/gofail/runtime"
+)
+
+var (
+ grpcLogger grpclogsettable.SettableLoggerV2
+ insideTestContext bool
+)
+
+func init() {
+ grpcLogger = grpclogsettable.ReplaceGrpcLoggerV2()
+}
+
+type testOptions struct {
+ goLeakDetection bool
+ skipInShort bool
+ failpoint *failpoint
+}
+
+type failpoint struct {
+ name string
+ payload string
+}
+
+func newTestOptions(opts ...TestOption) *testOptions {
+ o := &testOptions{goLeakDetection: true, skipInShort: true}
+ for _, opt := range opts {
+ opt(o)
+ }
+ return o
+}
+
+type TestOption func(opt *testOptions)
+
+// WithoutGoLeakDetection disables checking whether a testcase leaked a goroutine.
+func WithoutGoLeakDetection() TestOption {
+ return func(opt *testOptions) { opt.goLeakDetection = false }
+}
+
+func WithoutSkipInShort() TestOption {
+ return func(opt *testOptions) { opt.skipInShort = false }
+}
+
+// WithFailpoint registers a go fail point
+func WithFailpoint(name, payload string) TestOption {
+ return func(opt *testOptions) { opt.failpoint = &failpoint{name: name, payload: payload} }
+}
+
+// BeforeTestExternal initializes test context and is targeted for external APIs.
+// In general the `integration` package is not targeted to be used outside of
+// etcd project, but till the dedicated package is developed, this is
+// the best entry point so far (without backward compatibility promise).
+func BeforeTestExternal(t testutil.TB) {
+ BeforeTest(t, WithoutSkipInShort(), WithoutGoLeakDetection())
+}
+
+func BeforeTest(t testutil.TB, opts ...TestOption) {
+ t.Helper()
+ options := newTestOptions(opts...)
+
+ if insideTestContext {
+ t.Fatal("already in test context. BeforeTest was likely already called")
+ }
+
+ if options.skipInShort {
+ testutil.SkipTestIfShortMode(t, "Cannot create clusters in --short tests")
+ }
+
+ if options.goLeakDetection {
+ testutil.RegisterLeakDetection(t)
+ }
+
+ if options.failpoint != nil && len(options.failpoint.name) != 0 {
+ if len(gofail.List()) == 0 {
+ t.Skip("please run 'make gofail-enable' before running the test")
+ }
+ require.NoError(t, gofail.Enable(options.failpoint.name, options.failpoint.payload))
+ t.Cleanup(func() {
+ require.NoError(t, gofail.Disable(options.failpoint.name))
+ })
+ }
+
+ previousWD, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ previousInsideTestContext := insideTestContext
+
+ // Integration tests should verify written state as much as possible.
+ revertFunc := verify.EnableAllVerifications()
+
+ // Registering cleanup early, such it will get executed even if the helper fails.
+ t.Cleanup(func() {
+ grpcLogger.Reset()
+ insideTestContext = previousInsideTestContext
+ os.Chdir(previousWD)
+ revertFunc()
+ })
+
+ grpcLogger.Set(zapgrpc.NewLogger(zaptest.NewLogger(t).Named("grpc")))
+ insideTestContext = true
+
+ os.Chdir(t.TempDir())
+}
+
+func assertInTestContext(t testutil.TB) {
+ if !insideTestContext {
+ t.Errorf("the function can be called only in the test context. Was integration.BeforeTest() called ?")
+ }
+}
+
+func NewEmbedConfig(t testing.TB, name string) *embed.Config {
+ cfg := embed.NewConfig()
+ cfg.Name = name
+ lg := zaptest.NewLogger(t, zaptest.Level(zapcore.InfoLevel)).Named(cfg.Name)
+ cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(lg)
+ cfg.Dir = t.TempDir()
+ return cfg
+}
+
+func NewClient(t testing.TB, cfg clientv3.Config) (*clientv3.Client, error) {
+ if cfg.Logger == nil {
+ cfg.Logger = zaptest.NewLogger(t).Named("client")
+ }
+ return clientv3.New(cfg)
+}
diff --git a/tests/framework/interfaces/interface.go b/tests/framework/interfaces/interface.go
new file mode 100644
index 00000000000..0477ea5f0ec
--- /dev/null
+++ b/tests/framework/interfaces/interface.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+import (
+ "context"
+ "testing"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+)
+
+type TestRunner interface {
+ TestMain(m *testing.M)
+ BeforeTest(testing.TB)
+ NewCluster(context.Context, testing.TB, ...config.ClusterOption) Cluster
+}
+
+type Cluster interface {
+ Members() []Member
+ Client(opts ...config.ClientOption) (Client, error)
+ WaitLeader(t testing.TB) int
+ Close() error
+ Endpoints() []string
+}
+
+type Member interface {
+ Client() Client
+ Start(ctx context.Context) error
+ Stop()
+}
+
+type Client interface {
+ Put(context context.Context, key, value string, opts config.PutOptions) error
+ Get(context context.Context, key string, opts config.GetOptions) (*clientv3.GetResponse, error)
+ Delete(context context.Context, key string, opts config.DeleteOptions) (*clientv3.DeleteResponse, error)
+ Compact(context context.Context, rev int64, opts config.CompactOption) (*clientv3.CompactResponse, error)
+ Status(context context.Context) ([]*clientv3.StatusResponse, error)
+ HashKV(context context.Context, rev int64) ([]*clientv3.HashKVResponse, error)
+ Health(context context.Context) error
+ Defragment(context context.Context, opts config.DefragOption) error
+ AlarmList(context context.Context) (*clientv3.AlarmResponse, error)
+ AlarmDisarm(context context.Context, alarmMember *clientv3.AlarmMember) (*clientv3.AlarmResponse, error)
+ Grant(context context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error)
+ TimeToLive(context context.Context, id clientv3.LeaseID, opts config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error)
+ Leases(context context.Context) (*clientv3.LeaseLeasesResponse, error)
+ KeepAliveOnce(context context.Context, id clientv3.LeaseID) (*clientv3.LeaseKeepAliveResponse, error)
+ Revoke(context context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error)
+
+ AuthEnable(context context.Context) error
+ AuthDisable(context context.Context) error
+ AuthStatus(context context.Context) (*clientv3.AuthStatusResponse, error)
+ UserAdd(context context.Context, name, password string, opts config.UserAddOptions) (*clientv3.AuthUserAddResponse, error)
+ UserGet(context context.Context, name string) (*clientv3.AuthUserGetResponse, error)
+ UserList(context context.Context) (*clientv3.AuthUserListResponse, error)
+ UserDelete(context context.Context, name string) (*clientv3.AuthUserDeleteResponse, error)
+ UserChangePass(context context.Context, user, newPass string) error
+ UserGrantRole(context context.Context, user string, role string) (*clientv3.AuthUserGrantRoleResponse, error)
+ UserRevokeRole(context context.Context, user string, role string) (*clientv3.AuthUserRevokeRoleResponse, error)
+ RoleAdd(context context.Context, name string) (*clientv3.AuthRoleAddResponse, error)
+ RoleGrantPermission(context context.Context, name string, key, rangeEnd string, permType clientv3.PermissionType) (*clientv3.AuthRoleGrantPermissionResponse, error)
+ RoleGet(context context.Context, role string) (*clientv3.AuthRoleGetResponse, error)
+ RoleList(context context.Context) (*clientv3.AuthRoleListResponse, error)
+ RoleRevokePermission(context context.Context, role string, key, rangeEnd string) (*clientv3.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(context context.Context, role string) (*clientv3.AuthRoleDeleteResponse, error)
+
+ Txn(context context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error)
+
+ MemberList(context context.Context, serializable bool) (*clientv3.MemberListResponse, error)
+ MemberAdd(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error)
+ MemberAddAsLearner(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error)
+ MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error)
+
+ Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan
+}
diff --git a/tests/framework/testrunner.go b/tests/framework/testrunner.go
new file mode 100644
index 00000000000..3467314b36c
--- /dev/null
+++ b/tests/framework/testrunner.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package framework
+
+import (
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+ "go.etcd.io/etcd/tests/v3/framework/unit"
+)
+
+var (
+ // UnitTestRunner only runs in `--short` mode, will fail otherwise. Attempts in cluster creation will result in tests being skipped.
+ UnitTestRunner intf.TestRunner = unit.NewUnitRunner()
+ // E2eTestRunner runs etcd and etcdctl binaries in a separate process.
+ E2eTestRunner = e2e.NewE2eRunner()
+ // IntegrationTestRunner runs etcdserver.EtcdServer in separate goroutine and uses client libraries to communicate.
+ IntegrationTestRunner = integration.NewIntegrationRunner()
+)
diff --git a/tests/framework/testutils/execute.go b/tests/framework/testutils/execute.go
new file mode 100644
index 00000000000..d9c3d335879
--- /dev/null
+++ b/tests/framework/testutils/execute.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutils
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+)
+
+func ExecuteWithTimeout(t *testing.T, timeout time.Duration, f func()) {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ ExecuteUntil(ctx, t, f)
+}
+
+func ExecuteUntil(ctx context.Context, t *testing.T, f func()) {
+ deadline, deadlineSet := ctx.Deadline()
+ timeout := time.Until(deadline)
+ donec := make(chan struct{})
+ go func() {
+ defer close(donec)
+ f()
+ }()
+
+ select {
+ case <-ctx.Done():
+ msg := ctx.Err().Error()
+ if deadlineSet {
+ msg = fmt.Sprintf("test timed out after %v, err: %v", timeout, msg)
+ }
+ testutil.FatalStack(t, msg)
+ case <-donec:
+ }
+}
diff --git a/tests/framework/testutils/helpters.go b/tests/framework/testutils/helpters.go
new file mode 100644
index 00000000000..91363176c2f
--- /dev/null
+++ b/tests/framework/testutils/helpters.go
@@ -0,0 +1,71 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutils
+
+import (
+ "errors"
+ "time"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+)
+
+type KV struct {
+ Key, Val string
+}
+
+func KeysFromGetResponse(resp *clientv3.GetResponse) (kvs []string) {
+ for _, kv := range resp.Kvs {
+ kvs = append(kvs, string(kv.Key))
+ }
+ return kvs
+}
+
+func KeyValuesFromGetResponse(resp *clientv3.GetResponse) (kvs []KV) {
+ for _, kv := range resp.Kvs {
+ kvs = append(kvs, KV{Key: string(kv.Key), Val: string(kv.Value)})
+ }
+ return kvs
+}
+
+func KeyValuesFromWatchResponse(resp clientv3.WatchResponse) (kvs []KV) {
+ for _, event := range resp.Events {
+ kvs = append(kvs, KV{Key: string(event.Kv.Key), Val: string(event.Kv.Value)})
+ }
+ return kvs
+}
+
+func KeyValuesFromWatchChan(wch clientv3.WatchChan, wantedLen int, timeout time.Duration) (kvs []KV, err error) {
+ for {
+ select {
+ case watchResp, ok := <-wch:
+ if ok {
+ kvs = append(kvs, KeyValuesFromWatchResponse(watchResp)...)
+ if len(kvs) == wantedLen {
+ return kvs, nil
+ }
+ }
+ case <-time.After(timeout):
+ return nil, errors.New("closed watcher channel should not block")
+ }
+ }
+}
+
+func MustClient(c intf.Client, err error) intf.Client {
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
diff --git a/tests/framework/testutils/log_observer.go b/tests/framework/testutils/log_observer.go
new file mode 100644
index 00000000000..ac68fc98a39
--- /dev/null
+++ b/tests/framework/testutils/log_observer.go
@@ -0,0 +1,98 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutils
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ zapobserver "go.uber.org/zap/zaptest/observer"
+)
+
+type LogObserver struct {
+ ob *zapobserver.ObservedLogs
+ enc zapcore.Encoder
+
+ mu sync.Mutex
+ // entries stores all the logged entries after syncLogs.
+ entries []zapobserver.LoggedEntry
+}
+
+func NewLogObserver(level zapcore.LevelEnabler) (zapcore.Core, *LogObserver) {
+ // align with zaptest
+ enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig())
+
+ co, ob := zapobserver.New(level)
+ return co, &LogObserver{
+ ob: ob,
+ enc: enc,
+ }
+}
+
+// Expect returns the first N lines containing the given string.
+func (logOb *LogObserver) Expect(ctx context.Context, s string, count int) ([]string, error) {
+ return logOb.ExpectFunc(ctx, func(log string) bool { return strings.Contains(log, s) }, count)
+}
+
+// ExpectFunc returns the first N line satisfying the function f.
+func (logOb *LogObserver) ExpectFunc(ctx context.Context, filter func(string) bool, count int) ([]string, error) {
+ i := 0
+ res := make([]string, 0, count)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ entries := logOb.syncLogs()
+
+ // The order of entries won't be changed because of append-only.
+ // It's safe to skip scanned entries by reusing `i`.
+ for ; i < len(entries); i++ {
+ buf, err := logOb.enc.EncodeEntry(entries[i].Entry, entries[i].Context)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode entry: %w", err)
+ }
+
+ logInStr := buf.String()
+ if filter(logInStr) {
+ res = append(res, logInStr)
+ }
+
+ if len(res) >= count {
+ return res, nil
+ }
+ }
+
+ time.Sleep(10 * time.Millisecond)
+ }
+}
+
+// syncLogs is to take all the existing logged entries from zapobserver and
+// truncate zapobserver's entries slice.
+func (logOb *LogObserver) syncLogs() []zapobserver.LoggedEntry {
+ logOb.mu.Lock()
+ defer logOb.mu.Unlock()
+
+ logOb.entries = append(logOb.entries, logOb.ob.TakeAll()...)
+ return logOb.entries
+}
diff --git a/tests/framework/testutils/log_observer_test.go b/tests/framework/testutils/log_observer_test.go
new file mode 100644
index 00000000000..695caf97261
--- /dev/null
+++ b/tests/framework/testutils/log_observer_test.go
@@ -0,0 +1,82 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutils
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+)
+
+func TestLogObserver_Timeout(t *testing.T) {
+ logCore, logOb := NewLogObserver(zap.InfoLevel)
+
+ logger := zap.New(logCore)
+ logger.Info(t.Name())
+
+ ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
+ _, err := logOb.Expect(ctx, "unknown", 1)
+ cancel()
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+
+ assert.Len(t, logOb.entries, 1)
+}
+
+func TestLogObserver_Expect(t *testing.T) {
+ logCore, logOb := NewLogObserver(zap.InfoLevel)
+
+ logger := zap.New(logCore)
+
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+
+ resCh := make(chan []string, 1)
+ go func() {
+ defer close(resCh)
+
+ res, err := logOb.Expect(ctx, t.Name(), 2)
+ assert.NoError(t, err)
+ resCh <- res
+ }()
+
+ msgs := []string{"Hello " + t.Name(), t.Name() + ", World"}
+ for _, msg := range msgs {
+ logger.Info(msg)
+ time.Sleep(40 * time.Millisecond)
+ }
+
+ res := <-resCh
+ assert.Len(t, res, 2)
+
+ // The logged message should be like
+ //
+ // 2023-04-16T11:46:19.367+0800 INFO Hello TestLogObserver_Expect
+ // 2023-04-16T11:46:19.408+0800 INFO TestLogObserver_Expect, World
+ //
+ // The prefix timestamp is unpredictable so we should assert the suffix
+ // only.
+ for idx := range msgs {
+ expected := fmt.Sprintf("\tINFO\t%s\n", msgs[idx])
+ assert.True(t, strings.HasSuffix(res[idx], expected))
+ }
+
+ assert.Len(t, logOb.entries, 2)
+}
diff --git a/tests/framework/testutils/path.go b/tests/framework/testutils/path.go
new file mode 100644
index 00000000000..3b9e6521369
--- /dev/null
+++ b/tests/framework/testutils/path.go
@@ -0,0 +1,25 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutils
+
+import "path/filepath"
+
+func MustAbsPath(path string) string {
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ panic(err)
+ }
+ return abs
+}
diff --git a/tests/framework/unit/unit.go b/tests/framework/unit/unit.go
new file mode 100644
index 00000000000..f822b7dd1f9
--- /dev/null
+++ b/tests/framework/unit/unit.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package unit
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
+)
+
+type unitRunner struct{}
+
+var _ intf.TestRunner = (*unitRunner)(nil)
+
+func NewUnitRunner() intf.TestRunner {
+ return &unitRunner{}
+}
+
+func (e unitRunner) TestMain(m *testing.M) {
+ flag.Parse()
+ if !testing.Short() {
+ fmt.Println(`No test mode selected, please selected either e2e mode with "--tags e2e" or integration mode with "--tags integration"`)
+ os.Exit(1)
+ }
+}
+
+func (e unitRunner) BeforeTest(t testing.TB) {
+}
+
+func (e unitRunner) NewCluster(ctx context.Context, t testing.TB, opts ...config.ClusterOption) intf.Cluster {
+ testutil.SkipTestIfShortMode(t, "Cannot create clusters in --short tests")
+ return nil
+}
diff --git a/tests/functional/Dockerfile b/tests/functional/Dockerfile
deleted file mode 100644
index 38c834bc77b..00000000000
--- a/tests/functional/Dockerfile
+++ /dev/null
@@ -1,42 +0,0 @@
-FROM fedora:28
-
-RUN dnf check-update || true \
- && dnf install --assumeyes \
- git curl wget mercurial meld gcc gcc-c++ which \
- gcc automake autoconf dh-autoreconf libtool libtool-ltdl \
- tar unzip gzip \
- && dnf check-update || true \
- && dnf upgrade --assumeyes || true \
- && dnf autoremove --assumeyes || true \
- && dnf clean all || true
-
-ENV GOROOT /usr/local/go
-ENV GOPATH /go
-ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
-ENV GO_VERSION 1.14.3
-ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
-RUN rm -rf ${GOROOT} \
- && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
- && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
- && go version
-
-RUN mkdir -p ${GOPATH}/src/go.etcd.io/etcd
-ADD . ${GOPATH}/src/go.etcd.io/etcd
-ADD ./tests/functional/functional.yaml /functional.yaml
-
-RUN go get -v go.etcd.io/gofail \
- && pushd ${GOPATH}/src/go.etcd.io/etcd \
- && GO_BUILD_FLAGS="-v" ./build.sh \
- && mkdir -p /bin \
- && cp ./bin/etcd /bin/etcd \
- && cp ./bin/etcdctl /bin/etcdctl \
- && GO_BUILD_FLAGS="-v" FAILPOINTS=1 ./build.sh \
- && cp ./bin/etcd /bin/etcd-failpoints \
- && ./tests/functional/build \
- && cp ./bin/etcd-agent /bin/etcd-agent \
- && cp ./bin/etcd-proxy /bin/etcd-proxy \
- && cp ./bin/etcd-runner /bin/etcd-runner \
- && cp ./bin/etcd-tester /bin/etcd-tester \
- && go build -v -o /bin/benchmark ./tools/benchmark \
- && popd \
- && rm -rf ${GOPATH}/src/go.etcd.io/etcd
diff --git a/tests/functional/Procfile-proxy b/tests/functional/Procfile-proxy
deleted file mode 100644
index 66730ee779a..00000000000
--- a/tests/functional/Procfile-proxy
+++ /dev/null
@@ -1,14 +0,0 @@
-s1: bin/etcd --name s1 --data-dir /tmp/etcd-proxy-data.s1 --listen-client-urls http://127.0.0.1:1379 --advertise-client-urls http://127.0.0.1:13790 --listen-peer-urls http://127.0.0.1:1380 --initial-advertise-peer-urls http://127.0.0.1:13800 --initial-cluster-token tkn --initial-cluster 's1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800' --initial-cluster-state new
-
-s1-client-proxy: bin/etcd-proxy --from localhost:13790 --to localhost:1379 --http-port 1378
-s1-peer-proxy: bin/etcd-proxy --from localhost:13800 --to localhost:1380 --http-port 1381
-
-s2: bin/etcd --name s2 --data-dir /tmp/etcd-proxy-data.s2 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:23790 --listen-peer-urls http://127.0.0.1:2380 --initial-advertise-peer-urls http://127.0.0.1:23800 --initial-cluster-token tkn --initial-cluster 's1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800' --initial-cluster-state new
-
-s2-client-proxy: bin/etcd-proxy --from localhost:23790 --to localhost:2379 --http-port 2378
-s2-peer-proxy: bin/etcd-proxy --from localhost:23800 --to localhost:2380 --http-port 2381
-
-s3: bin/etcd --name s3 --data-dir /tmp/etcd-proxy-data.s3 --listen-client-urls http://127.0.0.1:3379 --advertise-client-urls http://127.0.0.1:33790 --listen-peer-urls http://127.0.0.1:3380 --initial-advertise-peer-urls http://127.0.0.1:33800 --initial-cluster-token tkn --initial-cluster 's1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800' --initial-cluster-state new
-
-s3-client-proxy: bin/etcd-proxy --from localhost:33790 --to localhost:3379 --http-port 3378
-s3-client-proxy: bin/etcd-proxy --from localhost:33800 --to localhost:3380 --http-port 3381
diff --git a/tests/functional/README.md b/tests/functional/README.md
deleted file mode 100644
index fe91b86cbef..00000000000
--- a/tests/functional/README.md
+++ /dev/null
@@ -1,218 +0,0 @@
-## etcd Functional Testing
-
-[`functional`](https://godoc.org/github.com/coreos/etcd/functional) verifies the correct behavior of etcd under various system and network malfunctions. It sets up an etcd cluster under high pressure loads and continuously injects failures into the cluster. Then it expects the etcd cluster to recover within a few seconds. This has been extremely helpful to find critical bugs.
-
-See [`rpcpb.Case`](https://godoc.org/github.com/coreos/etcd/functional/rpcpb#Case) for all failure cases.
-
-See [functional.yaml](https://github.com/etcd-io/etcd/blob/main/tests/functional/functional.yaml) for an example configuration.
-
-### Run locally
-
-```bash
-PASSES=functional ./test
-```
-
-### Run with Docker
-
-```bash
-pushd ..
-make build-docker-functional push-docker-functional pull-docker-functional
-popd
-```
-
-And run [example scripts](./scripts).
-
-```bash
-# run 3 agents for 3-node local etcd cluster
-./functional/scripts/docker-local-agent.sh 1
-./functional/scripts/docker-local-agent.sh 2
-./functional/scripts/docker-local-agent.sh 3
-
-# to run only 1 tester round
-./functional/scripts/docker-local-tester.sh
-```
-
-## etcd Proxy
-
-Proxy layer that simulates various network conditions.
-
-Test locally
-
-```bash
-$ ./build.sh
-$ ./bin/etcd
-
-$ make build-functional
-
-$ ./bin/etcd-proxy --help
-$ ./bin/etcd-proxy --from localhost:23790 --to localhost:2379 --http-port 2378 --verbose
-
-$ ETCDCTL_API=3 ./bin/etcdctl --endpoints localhost:2379 put foo bar
-$ ETCDCTL_API=3 ./bin/etcdctl --endpoints localhost:23790 put foo bar
-```
-
-Proxy overhead per request is under 500Îŧs
-
-```bash
-$ go build -v -o ./bin/benchmark ./tools/benchmark
-
-$ ./bin/benchmark \
- --endpoints localhost:2379 \
- --conns 5 \
- --clients 15 \
- put \
- --key-size 48 \
- --val-size 50000 \
- --total 10000
-
-< tcp://localhost:2379]
-
-$ ETCDCTL_API=3 ./bin/etcdctl \
- --endpoints localhost:23790 \
- put foo bar
-# Error: context deadline exceeded
-
-$ curl -L http://localhost:2378/pause-tx -X DELETE
-# unpaused forwarding [tcp://localhost:23790 -> tcp://localhost:2379]
-```
-
-Drop client packets
-
-```bash
-$ curl -L http://localhost:2378/blackhole-tx -X PUT
-# blackholed; dropping packets [tcp://localhost:23790 -> tcp://localhost:2379]
-
-$ ETCDCTL_API=3 ./bin/etcdctl --endpoints localhost:23790 put foo bar
-# Error: context deadline exceeded
-
-$ curl -L http://localhost:2378/blackhole-tx -X DELETE
-# unblackholed; restart forwarding [tcp://localhost:23790 -> tcp://localhost:2379]
-```
-
-Trigger leader election
-
-```bash
-$ ./build.sh
-$ make build-functional
-
-$ rm -rf /tmp/etcd-proxy-data.s*
-$ goreman -f ./functional/Procfile-proxy start
-
-$ ETCDCTL_API=3 ./bin/etcdctl \
- --endpoints localhost:13790,localhost:23790,localhost:33790 \
- member list
-
-# isolate s1 when s1 is the current leader
-$ curl -L http://localhost:1381/blackhole-tx -X PUT
-$ curl -L http://localhost:1381/blackhole-rx -X PUT
-# s1 becomes follower after election timeout
-```
diff --git a/tests/functional/agent/doc.go b/tests/functional/agent/doc.go
deleted file mode 100644
index 0195c4c7404..00000000000
--- a/tests/functional/agent/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package agent implements functional-tester agent server.
-package agent
diff --git a/tests/functional/agent/handler.go b/tests/functional/agent/handler.go
deleted file mode 100644
index 767a48ad137..00000000000
--- a/tests/functional/agent/handler.go
+++ /dev/null
@@ -1,772 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package agent
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "net/url"
- "os"
- "os/exec"
- "path/filepath"
- "syscall"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/pkg/v3/proxy"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-// return error for system errors (e.g. fail to create files)
-// return status error in response for wrong configuration/operation (e.g. start etcd twice)
-func (srv *Server) handleTesterRequest(req *rpcpb.Request) (resp *rpcpb.Response, err error) {
- defer func() {
- if err == nil && req != nil {
- srv.last = req.Operation
- srv.lg.Info("handler success", zap.String("operation", req.Operation.String()))
- }
- }()
- if req != nil {
- srv.Member = req.Member
- srv.Tester = req.Tester
- }
-
- switch req.Operation {
- case rpcpb.Operation_INITIAL_START_ETCD:
- return srv.handle_INITIAL_START_ETCD(req)
- case rpcpb.Operation_RESTART_ETCD:
- return srv.handle_RESTART_ETCD(req)
-
- case rpcpb.Operation_SIGTERM_ETCD:
- return srv.handle_SIGTERM_ETCD()
- case rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA:
- return srv.handle_SIGQUIT_ETCD_AND_REMOVE_DATA()
-
- case rpcpb.Operation_SAVE_SNAPSHOT:
- return srv.handle_SAVE_SNAPSHOT()
- case rpcpb.Operation_RESTORE_RESTART_FROM_SNAPSHOT:
- return srv.handle_RESTORE_RESTART_FROM_SNAPSHOT(req)
- case rpcpb.Operation_RESTART_FROM_SNAPSHOT:
- return srv.handle_RESTART_FROM_SNAPSHOT(req)
-
- case rpcpb.Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA:
- return srv.handle_SIGQUIT_ETCD_AND_ARCHIVE_DATA()
- case rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT:
- return srv.handle_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT()
-
- case rpcpb.Operation_BLACKHOLE_PEER_PORT_TX_RX:
- return srv.handle_BLACKHOLE_PEER_PORT_TX_RX(), nil
- case rpcpb.Operation_UNBLACKHOLE_PEER_PORT_TX_RX:
- return srv.handle_UNBLACKHOLE_PEER_PORT_TX_RX(), nil
- case rpcpb.Operation_DELAY_PEER_PORT_TX_RX:
- return srv.handle_DELAY_PEER_PORT_TX_RX(), nil
- case rpcpb.Operation_UNDELAY_PEER_PORT_TX_RX:
- return srv.handle_UNDELAY_PEER_PORT_TX_RX(), nil
-
- default:
- msg := fmt.Sprintf("operation not found (%v)", req.Operation)
- return &rpcpb.Response{Success: false, Status: msg}, errors.New(msg)
- }
-}
-
-// just archive the first file
-func (srv *Server) createEtcdLogFile() error {
- var err error
- srv.etcdLogFile, err = os.Create(srv.Member.Etcd.LogOutputs[0])
- if err != nil {
- return err
- }
- srv.lg.Info("created etcd log file", zap.String("path", srv.Member.Etcd.LogOutputs[0]))
- return nil
-}
-
-func (srv *Server) creatEtcd(fromSnapshot bool, failpoints string) error {
- if !fileutil.Exist(srv.Member.EtcdExec) {
- return fmt.Errorf("unknown etcd exec path %q does not exist", srv.Member.EtcdExec)
- }
-
- etcdPath, etcdFlags := srv.Member.EtcdExec, srv.Member.Etcd.Flags()
- if fromSnapshot {
- etcdFlags = srv.Member.EtcdOnSnapshotRestore.Flags()
- }
- u, _ := url.Parse(srv.Member.FailpointHTTPAddr)
- srv.lg.Info(
- "creating etcd command",
- zap.String("etcd-exec", etcdPath),
- zap.Strings("etcd-flags", etcdFlags),
- zap.String("GOFAIL_FAILPOINTS", failpoints),
- zap.String("failpoint-http-addr", srv.Member.FailpointHTTPAddr),
- zap.String("failpoint-addr", u.Host),
- )
- srv.etcdCmd = exec.Command(etcdPath, etcdFlags...)
- srv.etcdCmd.Env = []string{"GOFAIL_HTTP=" + u.Host}
- if failpoints != "" {
- srv.etcdCmd.Env = append(srv.etcdCmd.Env, "GOFAIL_FAILPOINTS="+failpoints)
- }
- srv.etcdCmd.Stdout = srv.etcdLogFile
- srv.etcdCmd.Stderr = srv.etcdLogFile
- return nil
-}
-
-// start but do not wait for it to complete
-func (srv *Server) runEtcd() error {
- errc := make(chan error)
- go func() {
- time.Sleep(5 * time.Second)
- // server advertise client/peer listener had to start first
- // before setting up proxy listener
- errc <- srv.startProxy()
- }()
-
- if srv.etcdCmd != nil {
- srv.lg.Info(
- "starting etcd command",
- zap.String("command-path", srv.etcdCmd.Path),
- )
- err := srv.etcdCmd.Start()
- perr := <-errc
- srv.lg.Info(
- "started etcd command",
- zap.String("command-path", srv.etcdCmd.Path),
- zap.Errors("errors", []error{err, perr}),
- )
- if err != nil {
- return err
- }
- return perr
- }
-
- select {
- case <-srv.etcdServer.Server.ReadyNotify():
- srv.lg.Info("embedded etcd is ready")
- case <-time.After(time.Minute):
- srv.etcdServer.Close()
- return fmt.Errorf("took too long to start %v", <-srv.etcdServer.Err())
- }
- return <-errc
-}
-
-// SIGQUIT to exit with stackstrace
-func (srv *Server) stopEtcd(sig os.Signal) error {
- srv.stopProxy()
-
- if srv.etcdCmd != nil {
- srv.lg.Info(
- "stopping etcd command",
- zap.String("command-path", srv.etcdCmd.Path),
- zap.String("signal", sig.String()),
- )
-
- err := srv.etcdCmd.Process.Signal(sig)
- if err != nil {
- return err
- }
-
- errc := make(chan error)
- go func() {
- _, ew := srv.etcdCmd.Process.Wait()
- errc <- ew
- close(errc)
- }()
-
- select {
- case <-time.After(5 * time.Second):
- srv.etcdCmd.Process.Kill()
- case e := <-errc:
- return e
- }
-
- err = <-errc
-
- srv.lg.Info(
- "stopped etcd command",
- zap.String("command-path", srv.etcdCmd.Path),
- zap.String("signal", sig.String()),
- zap.Error(err),
- )
- return err
- }
-
- srv.lg.Info("stopping embedded etcd")
- srv.etcdServer.Server.HardStop()
- srv.etcdServer.Close()
- srv.lg.Info("stopped embedded etcd")
- return nil
-}
-
-func (srv *Server) startProxy() error {
- if srv.Member.EtcdClientProxy {
- advertiseClientURL, advertiseClientURLPort, err := getURLAndPort(srv.Member.Etcd.AdvertiseClientURLs[0])
- if err != nil {
- return err
- }
- listenClientURL, _, err := getURLAndPort(srv.Member.Etcd.ListenClientURLs[0])
- if err != nil {
- return err
- }
-
- srv.lg.Info("starting proxy on client traffic", zap.String("url", advertiseClientURL.String()))
- srv.advertiseClientPortToProxy[advertiseClientURLPort] = proxy.NewServer(proxy.ServerConfig{
- Logger: srv.lg,
- From: *advertiseClientURL,
- To: *listenClientURL,
- })
- select {
- case err = <-srv.advertiseClientPortToProxy[advertiseClientURLPort].Error():
- return err
- case <-time.After(2 * time.Second):
- srv.lg.Info("started proxy on client traffic", zap.String("url", advertiseClientURL.String()))
- }
- }
-
- if srv.Member.EtcdPeerProxy {
- advertisePeerURL, advertisePeerURLPort, err := getURLAndPort(srv.Member.Etcd.AdvertisePeerURLs[0])
- if err != nil {
- return err
- }
- listenPeerURL, _, err := getURLAndPort(srv.Member.Etcd.ListenPeerURLs[0])
- if err != nil {
- return err
- }
-
- srv.lg.Info("starting proxy on peer traffic", zap.String("url", advertisePeerURL.String()))
- srv.advertisePeerPortToProxy[advertisePeerURLPort] = proxy.NewServer(proxy.ServerConfig{
- Logger: srv.lg,
- From: *advertisePeerURL,
- To: *listenPeerURL,
- })
- select {
- case err = <-srv.advertisePeerPortToProxy[advertisePeerURLPort].Error():
- return err
- case <-time.After(2 * time.Second):
- srv.lg.Info("started proxy on peer traffic", zap.String("url", advertisePeerURL.String()))
- }
- }
- return nil
-}
-
-func (srv *Server) stopProxy() {
- if srv.Member.EtcdClientProxy && len(srv.advertiseClientPortToProxy) > 0 {
- for port, px := range srv.advertiseClientPortToProxy {
- if err := px.Close(); err != nil {
- srv.lg.Warn("failed to close proxy", zap.Int("port", port))
- continue
- }
- select {
- case <-px.Done():
- // enough time to release port
- time.Sleep(time.Second)
- case <-time.After(time.Second):
- }
- srv.lg.Info("closed proxy",
- zap.Int("port", port),
- zap.String("from", px.From()),
- zap.String("to", px.To()),
- )
- }
- srv.advertiseClientPortToProxy = make(map[int]proxy.Server)
- }
- if srv.Member.EtcdPeerProxy && len(srv.advertisePeerPortToProxy) > 0 {
- for port, px := range srv.advertisePeerPortToProxy {
- if err := px.Close(); err != nil {
- srv.lg.Warn("failed to close proxy", zap.Int("port", port))
- continue
- }
- select {
- case <-px.Done():
- // enough time to release port
- time.Sleep(time.Second)
- case <-time.After(time.Second):
- }
- srv.lg.Info("closed proxy",
- zap.Int("port", port),
- zap.String("from", px.From()),
- zap.String("to", px.To()),
- )
- }
- srv.advertisePeerPortToProxy = make(map[int]proxy.Server)
- }
-}
-
-// if started with manual TLS, stores TLS assets
-// from tester/client to disk before starting etcd process
-func (srv *Server) saveTLSAssets() error {
- if srv.Member.PeerCertPath != "" {
- if srv.Member.PeerCertData == "" {
- return fmt.Errorf("got empty data for %q", srv.Member.PeerCertPath)
- }
- if err := ioutil.WriteFile(srv.Member.PeerCertPath, []byte(srv.Member.PeerCertData), 0644); err != nil {
- return err
- }
- }
- if srv.Member.PeerKeyPath != "" {
- if srv.Member.PeerKeyData == "" {
- return fmt.Errorf("got empty data for %q", srv.Member.PeerKeyPath)
- }
- if err := ioutil.WriteFile(srv.Member.PeerKeyPath, []byte(srv.Member.PeerKeyData), 0644); err != nil {
- return err
- }
- }
- if srv.Member.PeerTrustedCAPath != "" {
- if srv.Member.PeerTrustedCAData == "" {
- return fmt.Errorf("got empty data for %q", srv.Member.PeerTrustedCAPath)
- }
- if err := ioutil.WriteFile(srv.Member.PeerTrustedCAPath, []byte(srv.Member.PeerTrustedCAData), 0644); err != nil {
- return err
- }
- }
- if srv.Member.PeerCertPath != "" &&
- srv.Member.PeerKeyPath != "" &&
- srv.Member.PeerTrustedCAPath != "" {
- srv.lg.Info(
- "wrote",
- zap.String("peer-cert", srv.Member.PeerCertPath),
- zap.String("peer-key", srv.Member.PeerKeyPath),
- zap.String("peer-trusted-ca", srv.Member.PeerTrustedCAPath),
- )
- }
-
- if srv.Member.ClientCertPath != "" {
- if srv.Member.ClientCertData == "" {
- return fmt.Errorf("got empty data for %q", srv.Member.ClientCertPath)
- }
- if err := ioutil.WriteFile(srv.Member.ClientCertPath, []byte(srv.Member.ClientCertData), 0644); err != nil {
- return err
- }
- }
- if srv.Member.ClientKeyPath != "" {
- if srv.Member.ClientKeyData == "" {
- return fmt.Errorf("got empty data for %q", srv.Member.ClientKeyPath)
- }
- if err := ioutil.WriteFile(srv.Member.ClientKeyPath, []byte(srv.Member.ClientKeyData), 0644); err != nil {
- return err
- }
- }
- if srv.Member.ClientTrustedCAPath != "" {
- if srv.Member.ClientTrustedCAData == "" {
- return fmt.Errorf("got empty data for %q", srv.Member.ClientTrustedCAPath)
- }
- if err := ioutil.WriteFile(srv.Member.ClientTrustedCAPath, []byte(srv.Member.ClientTrustedCAData), 0644); err != nil {
- return err
- }
- }
- if srv.Member.ClientCertPath != "" &&
- srv.Member.ClientKeyPath != "" &&
- srv.Member.ClientTrustedCAPath != "" {
- srv.lg.Info(
- "wrote",
- zap.String("client-cert", srv.Member.ClientCertPath),
- zap.String("client-key", srv.Member.ClientKeyPath),
- zap.String("client-trusted-ca", srv.Member.ClientTrustedCAPath),
- )
- }
- return nil
-}
-
-func (srv *Server) loadAutoTLSAssets() error {
- if srv.Member.Etcd.PeerAutoTLS {
- // in case of slow disk
- time.Sleep(time.Second)
-
- fdir := filepath.Join(srv.Member.Etcd.DataDir, "fixtures", "peer")
-
- srv.lg.Info(
- "loading peer auto TLS assets",
- zap.String("dir", fdir),
- zap.String("endpoint", srv.EtcdClientEndpoint),
- )
-
- certPath := filepath.Join(fdir, "cert.pem")
- if !fileutil.Exist(certPath) {
- return fmt.Errorf("cannot find %q", certPath)
- }
- certData, err := ioutil.ReadFile(certPath)
- if err != nil {
- return fmt.Errorf("cannot read %q (%v)", certPath, err)
- }
- srv.Member.PeerCertData = string(certData)
-
- keyPath := filepath.Join(fdir, "key.pem")
- if !fileutil.Exist(keyPath) {
- return fmt.Errorf("cannot find %q", keyPath)
- }
- keyData, err := ioutil.ReadFile(keyPath)
- if err != nil {
- return fmt.Errorf("cannot read %q (%v)", keyPath, err)
- }
- srv.Member.PeerKeyData = string(keyData)
-
- srv.lg.Info(
- "loaded peer auto TLS assets",
- zap.String("peer-cert-path", certPath),
- zap.Int("peer-cert-length", len(certData)),
- zap.String("peer-key-path", keyPath),
- zap.Int("peer-key-length", len(keyData)),
- )
- }
-
- if srv.Member.Etcd.ClientAutoTLS {
- // in case of slow disk
- time.Sleep(time.Second)
-
- fdir := filepath.Join(srv.Member.Etcd.DataDir, "fixtures", "client")
-
- srv.lg.Info(
- "loading client TLS assets",
- zap.String("dir", fdir),
- zap.String("endpoint", srv.EtcdClientEndpoint),
- )
-
- certPath := filepath.Join(fdir, "cert.pem")
- if !fileutil.Exist(certPath) {
- return fmt.Errorf("cannot find %q", certPath)
- }
- certData, err := ioutil.ReadFile(certPath)
- if err != nil {
- return fmt.Errorf("cannot read %q (%v)", certPath, err)
- }
- srv.Member.ClientCertData = string(certData)
-
- keyPath := filepath.Join(fdir, "key.pem")
- if !fileutil.Exist(keyPath) {
- return fmt.Errorf("cannot find %q", keyPath)
- }
- keyData, err := ioutil.ReadFile(keyPath)
- if err != nil {
- return fmt.Errorf("cannot read %q (%v)", keyPath, err)
- }
- srv.Member.ClientKeyData = string(keyData)
-
- srv.lg.Info(
- "loaded client TLS assets",
- zap.String("client-cert-path", certPath),
- zap.Int("client-cert-length", len(certData)),
- zap.String("client-key-path", keyPath),
- zap.Int("client-key-length", len(keyData)),
- )
- }
-
- return nil
-}
-
-func (srv *Server) handle_INITIAL_START_ETCD(req *rpcpb.Request) (*rpcpb.Response, error) {
- if srv.last != rpcpb.Operation_NOT_STARTED {
- return &rpcpb.Response{
- Success: false,
- Status: fmt.Sprintf("%q is not valid; last server operation was %q", rpcpb.Operation_INITIAL_START_ETCD.String(), srv.last.String()),
- Member: req.Member,
- }, nil
- }
-
- err := fileutil.TouchDirAll(srv.Member.BaseDir)
- if err != nil {
- return nil, err
- }
- srv.lg.Info("created base directory", zap.String("path", srv.Member.BaseDir))
-
- if srv.etcdServer == nil {
- if err = srv.createEtcdLogFile(); err != nil {
- return nil, err
- }
- }
-
- if err = srv.saveTLSAssets(); err != nil {
- return nil, err
- }
- if err = srv.creatEtcd(false, req.Member.Failpoints); err != nil {
- return nil, err
- }
- if err = srv.runEtcd(); err != nil {
- return nil, err
- }
- if err = srv.loadAutoTLSAssets(); err != nil {
- return nil, err
- }
-
- return &rpcpb.Response{
- Success: true,
- Status: "start etcd PASS",
- Member: srv.Member,
- }, nil
-}
-
-func (srv *Server) handle_RESTART_ETCD(req *rpcpb.Request) (*rpcpb.Response, error) {
- var err error
- if !fileutil.Exist(srv.Member.BaseDir) {
- err = fileutil.TouchDirAll(srv.Member.BaseDir)
- if err != nil {
- return nil, err
- }
- }
-
- if err = srv.saveTLSAssets(); err != nil {
- return nil, err
- }
- if err = srv.creatEtcd(false, req.Member.Failpoints); err != nil {
- return nil, err
- }
- if err = srv.runEtcd(); err != nil {
- return nil, err
- }
- if err = srv.loadAutoTLSAssets(); err != nil {
- return nil, err
- }
-
- return &rpcpb.Response{
- Success: true,
- Status: "restart etcd PASS",
- Member: srv.Member,
- }, nil
-}
-
-func (srv *Server) handle_SIGTERM_ETCD() (*rpcpb.Response, error) {
- if err := srv.stopEtcd(syscall.SIGTERM); err != nil {
- return nil, err
- }
-
- if srv.etcdServer != nil {
- srv.etcdServer.GetLogger().Sync()
- } else {
- srv.etcdLogFile.Sync()
- }
-
- return &rpcpb.Response{
- Success: true,
- Status: "killed etcd",
- }, nil
-}
-
-func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA() (*rpcpb.Response, error) {
- err := srv.stopEtcd(syscall.SIGQUIT)
- if err != nil {
- return nil, err
- }
-
- if srv.etcdServer != nil {
- srv.etcdServer.GetLogger().Sync()
- } else {
- srv.etcdLogFile.Sync()
- srv.etcdLogFile.Close()
- }
-
- // for debugging purposes, rename instead of removing
- if err = os.RemoveAll(srv.Member.BaseDir + ".backup"); err != nil {
- return nil, err
- }
- if err = os.Rename(srv.Member.BaseDir, srv.Member.BaseDir+".backup"); err != nil {
- return nil, err
- }
- srv.lg.Info(
- "renamed",
- zap.String("base-dir", srv.Member.BaseDir),
- zap.String("new-dir", srv.Member.BaseDir+".backup"),
- )
-
- // create a new log file for next new member restart
- if !fileutil.Exist(srv.Member.BaseDir) {
- err = fileutil.TouchDirAll(srv.Member.BaseDir)
- if err != nil {
- return nil, err
- }
- }
-
- return &rpcpb.Response{
- Success: true,
- Status: "killed etcd and removed base directory",
- }, nil
-}
-
-func (srv *Server) handle_SAVE_SNAPSHOT() (*rpcpb.Response, error) {
- err := srv.Member.SaveSnapshot(srv.lg)
- if err != nil {
- return nil, err
- }
- return &rpcpb.Response{
- Success: true,
- Status: "saved snapshot",
- SnapshotInfo: srv.Member.SnapshotInfo,
- }, nil
-}
-
-func (srv *Server) handle_RESTORE_RESTART_FROM_SNAPSHOT(req *rpcpb.Request) (resp *rpcpb.Response, err error) {
- err = srv.Member.RestoreSnapshot(srv.lg)
- if err != nil {
- return nil, err
- }
- resp, err = srv.handle_RESTART_FROM_SNAPSHOT(req)
- if resp != nil && err == nil {
- resp.Status = "restored snapshot and " + resp.Status
- }
- return resp, err
-}
-
-func (srv *Server) handle_RESTART_FROM_SNAPSHOT(req *rpcpb.Request) (resp *rpcpb.Response, err error) {
- if err = srv.saveTLSAssets(); err != nil {
- return nil, err
- }
- if err = srv.creatEtcd(true, req.Member.Failpoints); err != nil {
- return nil, err
- }
- if err = srv.runEtcd(); err != nil {
- return nil, err
- }
- if err = srv.loadAutoTLSAssets(); err != nil {
- return nil, err
- }
-
- return &rpcpb.Response{
- Success: true,
- Status: "restarted etcd from snapshot",
- SnapshotInfo: srv.Member.SnapshotInfo,
- }, nil
-}
-
-func (srv *Server) handle_SIGQUIT_ETCD_AND_ARCHIVE_DATA() (*rpcpb.Response, error) {
- err := srv.stopEtcd(syscall.SIGQUIT)
- if err != nil {
- return nil, err
- }
-
- if srv.etcdServer != nil {
- srv.etcdServer.GetLogger().Sync()
- } else {
- srv.etcdLogFile.Sync()
- srv.etcdLogFile.Close()
- }
-
- // TODO: support separate WAL directory
- if err = archive(
- srv.Member.BaseDir,
- srv.Member.Etcd.LogOutputs[0],
- srv.Member.Etcd.DataDir,
- ); err != nil {
- return nil, err
- }
- srv.lg.Info("archived data", zap.String("base-dir", srv.Member.BaseDir))
-
- if srv.etcdServer == nil {
- if err = srv.createEtcdLogFile(); err != nil {
- return nil, err
- }
- }
-
- // TODO: Verify whether this cleaning of 'cache pages' is needed.
- srv.lg.Info("cleaning up page cache")
- if err := cleanPageCache(); err != nil {
- srv.lg.Warn("failed to clean up page cache", zap.String("error", err.Error()))
- }
- srv.lg.Info("cleaned up page cache")
-
- return &rpcpb.Response{
- Success: true,
- Status: "cleaned up etcd",
- }, nil
-}
-
-// stop proxy, etcd, delete data directory
-func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT() (*rpcpb.Response, error) {
- err := srv.stopEtcd(syscall.SIGQUIT)
- if err != nil {
- return nil, err
- }
-
- if srv.etcdServer != nil {
- srv.etcdServer.GetLogger().Sync()
- } else {
- srv.etcdLogFile.Sync()
- srv.etcdLogFile.Close()
- }
-
- err = os.RemoveAll(srv.Member.BaseDir)
- if err != nil {
- return nil, err
- }
- srv.lg.Info("removed base directory", zap.String("dir", srv.Member.BaseDir))
-
- // stop agent server
- srv.Stop()
-
- return &rpcpb.Response{
- Success: true,
- Status: "destroyed etcd and agent",
- }, nil
-}
-
-func (srv *Server) handle_BLACKHOLE_PEER_PORT_TX_RX() *rpcpb.Response {
- for port, px := range srv.advertisePeerPortToProxy {
- srv.lg.Info("blackholing", zap.Int("peer-port", port))
- px.BlackholeTx()
- px.BlackholeRx()
- srv.lg.Info("blackholed", zap.Int("peer-port", port))
- }
- return &rpcpb.Response{
- Success: true,
- Status: "blackholed peer port tx/rx",
- }
-}
-
-func (srv *Server) handle_UNBLACKHOLE_PEER_PORT_TX_RX() *rpcpb.Response {
- for port, px := range srv.advertisePeerPortToProxy {
- srv.lg.Info("unblackholing", zap.Int("peer-port", port))
- px.UnblackholeTx()
- px.UnblackholeRx()
- srv.lg.Info("unblackholed", zap.Int("peer-port", port))
- }
- return &rpcpb.Response{
- Success: true,
- Status: "unblackholed peer port tx/rx",
- }
-}
-
-func (srv *Server) handle_DELAY_PEER_PORT_TX_RX() *rpcpb.Response {
- lat := time.Duration(srv.Tester.UpdatedDelayLatencyMs) * time.Millisecond
- rv := time.Duration(srv.Tester.DelayLatencyMsRv) * time.Millisecond
-
- for port, px := range srv.advertisePeerPortToProxy {
- srv.lg.Info("delaying",
- zap.Int("peer-port", port),
- zap.Duration("latency", lat),
- zap.Duration("random-variable", rv),
- )
- px.DelayTx(lat, rv)
- px.DelayRx(lat, rv)
- srv.lg.Info("delayed",
- zap.Int("peer-port", port),
- zap.Duration("latency", lat),
- zap.Duration("random-variable", rv),
- )
- }
-
- return &rpcpb.Response{
- Success: true,
- Status: "delayed peer port tx/rx",
- }
-}
-
-func (srv *Server) handle_UNDELAY_PEER_PORT_TX_RX() *rpcpb.Response {
- for port, px := range srv.advertisePeerPortToProxy {
- srv.lg.Info("undelaying", zap.Int("peer-port", port))
- px.UndelayTx()
- px.UndelayRx()
- srv.lg.Info("undelayed", zap.Int("peer-port", port))
- }
- return &rpcpb.Response{
- Success: true,
- Status: "undelayed peer port tx/rx",
- }
-}
diff --git a/tests/functional/agent/server.go b/tests/functional/agent/server.go
deleted file mode 100644
index bfd5e018d5c..00000000000
--- a/tests/functional/agent/server.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package agent
-
-import (
- "math"
- "net"
- "os"
- "os/exec"
- "strings"
-
- "go.etcd.io/etcd/pkg/v3/proxy"
- "go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
-)
-
-// Server implements "rpcpb.TransportServer"
-// and other etcd operations as an agent
-// no need to lock fields since request operations are
-// serialized in tester-side
-type Server struct {
- lg *zap.Logger
-
- grpcServer *grpc.Server
-
- network string
- address string
- ln net.Listener
-
- rpcpb.TransportServer
- last rpcpb.Operation
-
- *rpcpb.Member
- *rpcpb.Tester
-
- etcdServer *embed.Etcd
- etcdCmd *exec.Cmd
- etcdLogFile *os.File
-
- // forward incoming advertise URLs traffic to listen URLs
- advertiseClientPortToProxy map[int]proxy.Server
- advertisePeerPortToProxy map[int]proxy.Server
-}
-
-// NewServer returns a new agent server.
-func NewServer(
- lg *zap.Logger,
- network string,
- address string,
-) *Server {
- return &Server{
- lg: lg,
- network: network,
- address: address,
- last: rpcpb.Operation_NOT_STARTED,
- advertiseClientPortToProxy: make(map[int]proxy.Server),
- advertisePeerPortToProxy: make(map[int]proxy.Server),
- }
-}
-
-const (
- maxRequestBytes = 1.5 * 1024 * 1024
- grpcOverheadBytes = 512 * 1024
- maxStreams = math.MaxUint32
- maxSendBytes = math.MaxInt32
-)
-
-// StartServe starts serving agent server.
-func (srv *Server) StartServe() error {
- var err error
- srv.ln, err = net.Listen(srv.network, srv.address)
- if err != nil {
- return err
- }
-
- var opts []grpc.ServerOption
- opts = append(opts, grpc.MaxRecvMsgSize(int(maxRequestBytes+grpcOverheadBytes)))
- opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
- opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
- srv.grpcServer = grpc.NewServer(opts...)
-
- rpcpb.RegisterTransportServer(srv.grpcServer, srv)
-
- srv.lg.Info(
- "gRPC server started",
- zap.String("address", srv.address),
- zap.String("listener-address", srv.ln.Addr().String()),
- )
- err = srv.grpcServer.Serve(srv.ln)
- if err != nil && strings.Contains(err.Error(), "use of closed network connection") {
- srv.lg.Info(
- "gRPC server is shut down",
- zap.String("address", srv.address),
- zap.Error(err),
- )
- } else {
- srv.lg.Warn(
- "gRPC server returned with error",
- zap.String("address", srv.address),
- zap.Error(err),
- )
- }
- return err
-}
-
-// Stop stops serving gRPC server.
-func (srv *Server) Stop() {
- srv.lg.Info("gRPC server stopping", zap.String("address", srv.address))
- srv.grpcServer.Stop()
- srv.lg.Info("gRPC server stopped", zap.String("address", srv.address))
-}
-
-// Transport communicates with etcd tester.
-func (srv *Server) Transport(stream rpcpb.Transport_TransportServer) (reterr error) {
- errc := make(chan error, 1)
- go func() {
- for {
- var req *rpcpb.Request
- var err error
- req, err = stream.Recv()
- if err != nil {
- errc <- err
- // TODO: handle error and retry
- return
- }
- if req.Member != nil {
- srv.Member = req.Member
- }
- if req.Tester != nil {
- srv.Tester = req.Tester
- }
-
- var resp *rpcpb.Response
- resp, err = srv.handleTesterRequest(req)
- if err != nil {
- errc <- err
- // TODO: handle error and retry
- return
- }
-
- if err = stream.Send(resp); err != nil {
- errc <- err
- // TODO: handle error and retry
- return
- }
- }
- }()
-
- select {
- case reterr = <-errc:
- case <-stream.Context().Done():
- reterr = stream.Context().Err()
- }
- return reterr
-}
diff --git a/tests/functional/agent/utils.go b/tests/functional/agent/utils.go
deleted file mode 100644
index 37a2c7adfae..00000000000
--- a/tests/functional/agent/utils.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package agent
-
-import (
- "io"
- "net"
- "net/url"
- "os"
- "os/exec"
- "path/filepath"
- "strconv"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
-)
-
-// TODO: support separate WAL directory
-func archive(baseDir, etcdLogPath, dataDir string) error {
- dir := filepath.Join(baseDir, "etcd-failure-archive", time.Now().Format(time.RFC3339))
- if existDir(dir) {
- dir = filepath.Join(baseDir, "etcd-failure-archive", time.Now().Add(time.Second).Format(time.RFC3339))
- }
- if err := fileutil.TouchDirAll(dir); err != nil {
- return err
- }
-
- dst := filepath.Join(dir, "etcd.log")
- if err := copyFile(etcdLogPath, dst); err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- }
- if err := os.Rename(dataDir, filepath.Join(dir, filepath.Base(dataDir))); err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- }
-
- return nil
-}
-
-func existDir(fpath string) bool {
- st, err := os.Stat(fpath)
- if err != nil {
- if os.IsNotExist(err) {
- return false
- }
- } else {
- return st.IsDir()
- }
- return false
-}
-
-func getURLAndPort(addr string) (urlAddr *url.URL, port int, err error) {
- urlAddr, err = url.Parse(addr)
- if err != nil {
- return nil, -1, err
- }
- var s string
- _, s, err = net.SplitHostPort(urlAddr.Host)
- if err != nil {
- return nil, -1, err
- }
- port, err = strconv.Atoi(s)
- if err != nil {
- return nil, -1, err
- }
- return urlAddr, port, err
-}
-
-func copyFile(src, dst string) error {
- f, err := os.Open(src)
- if err != nil {
- return err
- }
- defer f.Close()
-
- w, err := os.Create(dst)
- if err != nil {
- return err
- }
- defer w.Close()
-
- if _, err = io.Copy(w, f); err != nil {
- return err
- }
- return w.Sync()
-}
-
-func cleanPageCache() error {
- // https://www.kernel.org/doc/Documentation/sysctl/vm.txt
- // https://github.com/torvalds/linux/blob/master/fs/drop_caches.c
- cmd := exec.Command("/bin/sh", "-c", `echo "echo 1 > /proc/sys/vm/drop_caches" | sudo -s -n`)
- return cmd.Run()
-}
diff --git a/tests/functional/agent/utils_test.go b/tests/functional/agent/utils_test.go
deleted file mode 100644
index 16230030438..00000000000
--- a/tests/functional/agent/utils_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package agent
-
-import (
- "net/url"
- "reflect"
- "testing"
-)
-
-func TestGetURLAndPort(t *testing.T) {
- addr := "https://127.0.0.1:2379"
- urlAddr, port, err := getURLAndPort(addr)
- if err != nil {
- t.Fatal(err)
- }
- exp := &url.URL{Scheme: "https", Host: "127.0.0.1:2379"}
- if !reflect.DeepEqual(urlAddr, exp) {
- t.Fatalf("expected %+v, got %+v", exp, urlAddr)
- }
- if port != 2379 {
- t.Fatalf("port expected 2379, got %d", port)
- }
-}
diff --git a/tests/functional/build b/tests/functional/build
deleted file mode 100755
index 324c8670cec..00000000000
--- a/tests/functional/build
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env bash
-
-if ! [[ "$0" =~ "tests/functional/build" ]]; then
- echo "must be run from repository root"
- exit 255
-fi
-
-(
- cd ./tests
- CGO_ENABLED=0 go build -v -installsuffix cgo -ldflags "-s" -o ../bin/etcd-agent ./functional/cmd/etcd-agent
- CGO_ENABLED=0 go build -v -installsuffix cgo -ldflags "-s" -o ../bin/etcd-proxy ./functional/cmd/etcd-proxy
- CGO_ENABLED=0 go build -v -installsuffix cgo -ldflags "-s" -o ../bin/etcd-runner ./functional/cmd/etcd-runner
- CGO_ENABLED=0 go build -v -installsuffix cgo -ldflags "-s" -o ../bin/etcd-tester ./functional/cmd/etcd-tester
-)
diff --git a/tests/functional/cmd/etcd-agent/main.go b/tests/functional/cmd/etcd-agent/main.go
deleted file mode 100644
index d70f9b85a15..00000000000
--- a/tests/functional/cmd/etcd-agent/main.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// etcd-agent is a program that runs functional-tester agent.
-package main
-
-import (
- "flag"
-
- "go.etcd.io/etcd/tests/v3/functional/agent"
-
- "go.uber.org/zap"
-)
-
-var logger *zap.Logger
-
-func init() {
- var err error
- logger, err = zap.NewProduction()
- if err != nil {
- panic(err)
- }
-}
-
-func main() {
- network := flag.String("network", "tcp", "network to serve agent server")
- address := flag.String("address", "127.0.0.1:9027", "address to serve agent server")
- flag.Parse()
-
- defer logger.Sync()
-
- srv := agent.NewServer(logger, *network, *address)
- err := srv.StartServe()
- logger.Info("agent exiting", zap.Error(err))
-}
diff --git a/tests/functional/cmd/etcd-proxy/main.go b/tests/functional/cmd/etcd-proxy/main.go
deleted file mode 100644
index d1b184cb1a6..00000000000
--- a/tests/functional/cmd/etcd-proxy/main.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// etcd-proxy is a proxy layer that simulates various network conditions.
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
- "net/url"
- "os"
- "os/signal"
- "syscall"
- "time"
-
- "go.etcd.io/etcd/pkg/v3/proxy"
-
- "go.uber.org/zap"
-)
-
-var from string
-var to string
-var httpPort int
-var verbose bool
-
-func main() {
- // TODO: support TLS
- flag.StringVar(&from, "from", "localhost:23790", "Address URL to proxy from.")
- flag.StringVar(&to, "to", "localhost:2379", "Address URL to forward.")
- flag.IntVar(&httpPort, "http-port", 2378, "Port to serve etcd-proxy API.")
- flag.BoolVar(&verbose, "verbose", false, "'true' to run proxy in verbose mode.")
-
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage of %q:\n", os.Args[0])
- fmt.Fprintln(os.Stderr, `
-etcd-proxy simulates various network conditions for etcd testing purposes.
-See README.md for more examples.
-
-Example:
-
-# build etcd
-$ ./build.sh
-$ ./bin/etcd
-
-# build etcd-proxy
-$ make build-etcd-proxy
-
-# to test etcd with proxy layer
-$ ./bin/etcd-proxy --help
-$ ./bin/etcd-proxy --from localhost:23790 --to localhost:2379 --http-port 2378 --verbose
-
-$ ./bin/etcdctl --endpoints localhost:2379 put foo bar
-$ ./bin/etcdctl --endpoints localhost:23790 put foo bar`)
- flag.PrintDefaults()
- }
-
- flag.Parse()
-
- cfg := proxy.ServerConfig{
- From: url.URL{Scheme: "tcp", Host: from},
- To: url.URL{Scheme: "tcp", Host: to},
- }
- if verbose {
- cfg.Logger = zap.NewExample()
- }
- p := proxy.NewServer(cfg)
- <-p.Ready()
- defer p.Close()
-
- mux := http.NewServeMux()
- mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
- w.Write([]byte(fmt.Sprintf("proxying [%s -> %s]\n", p.From(), p.To())))
- })
- mux.HandleFunc("/delay-tx", func(w http.ResponseWriter, req *http.Request) {
- switch req.Method {
- case http.MethodGet:
- w.Write([]byte(fmt.Sprintf("current send latency %v\n", p.LatencyTx())))
- case http.MethodPut, http.MethodPost:
- if err := req.ParseForm(); err != nil {
- w.Write([]byte(fmt.Sprintf("wrong form %q\n", err.Error())))
- return
- }
- lat, err := time.ParseDuration(req.PostForm.Get("latency"))
- if err != nil {
- w.Write([]byte(fmt.Sprintf("wrong latency form %q\n", err.Error())))
- return
- }
- rv, err := time.ParseDuration(req.PostForm.Get("random-variable"))
- if err != nil {
- w.Write([]byte(fmt.Sprintf("wrong random-variable form %q\n", err.Error())))
- return
- }
- p.DelayTx(lat, rv)
- w.Write([]byte(fmt.Sprintf("added send latency %vÂą%v (current latency %v)\n", lat, rv, p.LatencyTx())))
- case http.MethodDelete:
- lat := p.LatencyTx()
- p.UndelayTx()
- w.Write([]byte(fmt.Sprintf("removed latency %v\n", lat)))
- default:
- w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method)))
- }
- })
- mux.HandleFunc("/delay-rx", func(w http.ResponseWriter, req *http.Request) {
- switch req.Method {
- case http.MethodGet:
- w.Write([]byte(fmt.Sprintf("current receive latency %v\n", p.LatencyRx())))
- case http.MethodPut, http.MethodPost:
- if err := req.ParseForm(); err != nil {
- w.Write([]byte(fmt.Sprintf("wrong form %q\n", err.Error())))
- return
- }
- lat, err := time.ParseDuration(req.PostForm.Get("latency"))
- if err != nil {
- w.Write([]byte(fmt.Sprintf("wrong latency form %q\n", err.Error())))
- return
- }
- rv, err := time.ParseDuration(req.PostForm.Get("random-variable"))
- if err != nil {
- w.Write([]byte(fmt.Sprintf("wrong random-variable form %q\n", err.Error())))
- return
- }
- p.DelayRx(lat, rv)
- w.Write([]byte(fmt.Sprintf("added receive latency %vÂą%v (current latency %v)\n", lat, rv, p.LatencyRx())))
- case http.MethodDelete:
- lat := p.LatencyRx()
- p.UndelayRx()
- w.Write([]byte(fmt.Sprintf("removed latency %v\n", lat)))
- default:
- w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method)))
- }
- })
- mux.HandleFunc("/pause-tx", func(w http.ResponseWriter, req *http.Request) {
- switch req.Method {
- case http.MethodPut, http.MethodPost:
- p.PauseTx()
- w.Write([]byte(fmt.Sprintf("paused forwarding [%s -> %s]\n", p.From(), p.To())))
- case http.MethodDelete:
- p.UnpauseTx()
- w.Write([]byte(fmt.Sprintf("unpaused forwarding [%s -> %s]\n", p.From(), p.To())))
- default:
- w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method)))
- }
- })
- mux.HandleFunc("/pause-rx", func(w http.ResponseWriter, req *http.Request) {
- switch req.Method {
- case http.MethodPut, http.MethodPost:
- p.PauseRx()
- w.Write([]byte(fmt.Sprintf("paused forwarding [%s <- %s]\n", p.From(), p.To())))
- case http.MethodDelete:
- p.UnpauseRx()
- w.Write([]byte(fmt.Sprintf("unpaused forwarding [%s <- %s]\n", p.From(), p.To())))
- default:
- w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method)))
- }
- })
- mux.HandleFunc("/blackhole-tx", func(w http.ResponseWriter, req *http.Request) {
- switch req.Method {
- case http.MethodPut, http.MethodPost:
- p.BlackholeTx()
- w.Write([]byte(fmt.Sprintf("blackholed; dropping packets [%s -> %s]\n", p.From(), p.To())))
- case http.MethodDelete:
- p.UnblackholeTx()
- w.Write([]byte(fmt.Sprintf("unblackholed; restart forwarding [%s -> %s]\n", p.From(), p.To())))
- default:
- w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method)))
- }
- })
- mux.HandleFunc("/blackhole-rx", func(w http.ResponseWriter, req *http.Request) {
- switch req.Method {
- case http.MethodPut, http.MethodPost:
- p.BlackholeRx()
- w.Write([]byte(fmt.Sprintf("blackholed; dropping packets [%s <- %s]\n", p.From(), p.To())))
- case http.MethodDelete:
- p.UnblackholeRx()
- w.Write([]byte(fmt.Sprintf("unblackholed; restart forwarding [%s <- %s]\n", p.From(), p.To())))
- default:
- w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method)))
- }
- })
- srv := &http.Server{
- Addr: fmt.Sprintf(":%d", httpPort),
- Handler: mux,
- ErrorLog: log.New(ioutil.Discard, "net/http", 0),
- }
- defer srv.Close()
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
- defer signal.Stop(sig)
-
- go func() {
- s := <-sig
- fmt.Printf("\n\nreceived signal %q, shutting down HTTP server\n\n", s)
- ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
- err := srv.Shutdown(ctx)
- cancel()
- fmt.Printf("gracefully stopped HTTP server with %v\n\n", err)
- os.Exit(0)
- }()
-
- fmt.Printf("\nserving HTTP server http://localhost:%d\n\n", httpPort)
- err := srv.ListenAndServe()
- fmt.Printf("HTTP server exit with error %v\n", err)
-}
diff --git a/tests/functional/cmd/etcd-runner/main.go b/tests/functional/cmd/etcd-runner/main.go
deleted file mode 100644
index 3afe40e1f22..00000000000
--- a/tests/functional/cmd/etcd-runner/main.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// etcd-runner is a program for testing etcd clientv3 features
-// against a fault injected cluster.
-package main
-
-import "go.etcd.io/etcd/tests/v3/functional/runner"
-
-func main() {
- runner.Start()
-}
diff --git a/tests/functional/cmd/etcd-tester/main.go b/tests/functional/cmd/etcd-tester/main.go
deleted file mode 100644
index c480076cb6c..00000000000
--- a/tests/functional/cmd/etcd-tester/main.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// etcd-tester is a program that runs functional-tester client.
-package main
-
-import (
- "flag"
-
- _ "github.com/etcd-io/gofail/runtime"
- "go.etcd.io/etcd/tests/v3/functional/tester"
- "go.uber.org/zap"
-)
-
-var logger *zap.Logger
-
-func init() {
- var err error
- logger, err = zap.NewProduction()
- if err != nil {
- panic(err)
- }
-}
-
-func main() {
- config := flag.String("config", "", "path to tester configuration")
- flag.Parse()
-
- defer logger.Sync()
-
- clus, err := tester.NewCluster(logger, *config)
- if err != nil {
- logger.Fatal("failed to create a cluster", zap.Error(err))
- }
-
- err = clus.Send_INITIAL_START_ETCD()
- if err != nil {
- logger.Fatal("Bootstrap failed", zap.Error(err))
- }
- defer clus.Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT()
-
- logger.Info("wait health after bootstrap")
- err = clus.WaitHealth()
- if err != nil {
- logger.Fatal("WaitHealth failed", zap.Error(err))
- }
-
- clus.Run()
-}
diff --git a/tests/functional/functional.yaml b/tests/functional/functional.yaml
deleted file mode 100644
index 91a8e5ecd53..00000000000
--- a/tests/functional/functional.yaml
+++ /dev/null
@@ -1,253 +0,0 @@
-agent-configs:
-- etcd-exec: ./bin/etcd
- agent-addr: 127.0.0.1:19027
- failpoint-http-addr: http://127.0.0.1:7381
- base-dir: /tmp/etcd-functional-1
- etcd-client-proxy: false
- etcd-peer-proxy: true
- etcd-client-endpoint: 127.0.0.1:1379
- etcd:
- name: s1
- data-dir: /tmp/etcd-functional-1/etcd.data
- wal-dir: /tmp/etcd-functional-1/etcd.data/member/wal
- heartbeat-interval: 100
- election-timeout: 1000
- listen-client-urls: ["https://127.0.0.1:1379"]
- advertise-client-urls: ["https://127.0.0.1:1379"]
- auto-tls: true
- client-cert-auth: false
- cert-file: ""
- key-file: ""
- trusted-ca-file: ""
- listen-peer-urls: ["https://127.0.0.1:1380"]
- initial-advertise-peer-urls: ["https://127.0.0.1:1381"]
- peer-auto-tls: true
- peer-client-cert-auth: false
- peer-cert-file: ""
- peer-key-file: ""
- peer-trusted-ca-file: ""
- initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
- initial-cluster-state: new
- initial-cluster-token: tkn
- snapshot-count: 2000
- quota-backend-bytes: 10740000000 # 10 GiB
- pre-vote: true
- initial-corrupt-check: true
- logger: zap
- log-outputs: [/tmp/etcd-functional-1/etcd.log]
- log-level: info
- client-cert-data: ""
- client-cert-path: ""
- client-key-data: ""
- client-key-path: ""
- client-trusted-ca-data: ""
- client-trusted-ca-path: ""
- peer-cert-data: ""
- peer-cert-path: ""
- peer-key-data: ""
- peer-key-path: ""
- peer-trusted-ca-data: ""
- peer-trusted-ca-path: ""
- snapshot-path: /tmp/etcd-functional-1.snapshot.db
-
-- etcd-exec: ./bin/etcd
- agent-addr: 127.0.0.1:29027
- failpoint-http-addr: http://127.0.0.1:7382
- base-dir: /tmp/etcd-functional-2
- etcd-client-proxy: false
- etcd-peer-proxy: true
- etcd-client-endpoint: 127.0.0.1:2379
- etcd:
- name: s2
- data-dir: /tmp/etcd-functional-2/etcd.data
- wal-dir: /tmp/etcd-functional-2/etcd.data/member/wal
- heartbeat-interval: 100
- election-timeout: 1000
- listen-client-urls: ["https://127.0.0.1:2379"]
- advertise-client-urls: ["https://127.0.0.1:2379"]
- auto-tls: true
- client-cert-auth: false
- cert-file: ""
- key-file: ""
- trusted-ca-file: ""
- listen-peer-urls: ["https://127.0.0.1:2380"]
- initial-advertise-peer-urls: ["https://127.0.0.1:2381"]
- peer-auto-tls: true
- peer-client-cert-auth: false
- peer-cert-file: ""
- peer-key-file: ""
- peer-trusted-ca-file: ""
- initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
- initial-cluster-state: new
- initial-cluster-token: tkn
- snapshot-count: 2000
- quota-backend-bytes: 10740000000 # 10 GiB
- pre-vote: true
- initial-corrupt-check: true
- logger: zap
- log-outputs: [/tmp/etcd-functional-2/etcd.log]
- log-level: info
- client-cert-data: ""
- client-cert-path: ""
- client-key-data: ""
- client-key-path: ""
- client-trusted-ca-data: ""
- client-trusted-ca-path: ""
- peer-cert-data: ""
- peer-cert-path: ""
- peer-key-data: ""
- peer-key-path: ""
- peer-trusted-ca-data: ""
- peer-trusted-ca-path: ""
- snapshot-path: /tmp/etcd-functional-2.snapshot.db
-
-- etcd-exec: ./bin/etcd
- agent-addr: 127.0.0.1:39027
- failpoint-http-addr: http://127.0.0.1:7383
- base-dir: /tmp/etcd-functional-3
- etcd-client-proxy: false
- etcd-peer-proxy: true
- etcd-client-endpoint: 127.0.0.1:3379
- etcd:
- name: s3
- data-dir: /tmp/etcd-functional-3/etcd.data
- wal-dir: /tmp/etcd-functional-3/etcd.data/member/wal
- heartbeat-interval: 100
- election-timeout: 1000
- listen-client-urls: ["https://127.0.0.1:3379"]
- advertise-client-urls: ["https://127.0.0.1:3379"]
- auto-tls: true
- client-cert-auth: false
- cert-file: ""
- key-file: ""
- trusted-ca-file: ""
- listen-peer-urls: ["https://127.0.0.1:3380"]
- initial-advertise-peer-urls: ["https://127.0.0.1:3381"]
- peer-auto-tls: true
- peer-client-cert-auth: false
- peer-cert-file: ""
- peer-key-file: ""
- peer-trusted-ca-file: ""
- initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
- initial-cluster-state: new
- initial-cluster-token: tkn
- snapshot-count: 2000
- quota-backend-bytes: 10740000000 # 10 GiB
- pre-vote: true
- initial-corrupt-check: true
- logger: zap
- log-outputs: [/tmp/etcd-functional-3/etcd.log]
- log-level: info
- client-cert-data: ""
- client-cert-path: ""
- client-key-data: ""
- client-key-path: ""
- client-trusted-ca-data: ""
- client-trusted-ca-path: ""
- peer-cert-data: ""
- peer-cert-path: ""
- peer-key-data: ""
- peer-key-path: ""
- peer-trusted-ca-data: ""
- peer-trusted-ca-path: ""
- snapshot-path: /tmp/etcd-functional-3.snapshot.db
-
-tester-config:
- data-dir: /tmp/etcd-tester-data
- network: tcp
- addr: 127.0.0.1:9028
-
- # slow enough to trigger election
- delay-latency-ms: 5000
- delay-latency-ms-rv: 500
-
- round-limit: 1
- exit-on-failure: true
- enable-pprof: true
-
- case-delay-ms: 7000
- case-shuffle: true
-
- # For full descriptions,
- # https://godoc.org/github.com/etcd-io/etcd/functional/rpcpb#Case
- cases:
- - SIGTERM_ONE_FOLLOWER
- - SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- - SIGTERM_LEADER
- - SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT
- - SIGTERM_QUORUM
- - SIGTERM_ALL
- - SIGQUIT_AND_REMOVE_ONE_FOLLOWER
- - SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- - BLACKHOLE_PEER_PORT_TX_RX_LEADER
- - BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- - BLACKHOLE_PEER_PORT_TX_RX_QUORUM
- - BLACKHOLE_PEER_PORT_TX_RX_ALL
- - DELAY_PEER_PORT_TX_RX_LEADER
- - RANDOM_DELAY_PEER_PORT_TX_RX_LEADER
- - DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- - RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- - DELAY_PEER_PORT_TX_RX_QUORUM
- - RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM
- - DELAY_PEER_PORT_TX_RX_ALL
- - RANDOM_DELAY_PEER_PORT_TX_RX_ALL
- - NO_FAIL_WITH_STRESS
- - NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS
- # - FAILPOINTS_WITH_DISK_IO_LATENCY
-
- # TODO: use iptables for discarding outbound rafthttp traffic to peer port
- # - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER
- # - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- # - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
- # - RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
- # - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- # - RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- # - SIGQUIT_AND_REMOVE_LEADER
- # - SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT
- # - SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH
-
- failpoint-commands:
- - panic("etcd-tester")
- # - panic("etcd-tester"),1*sleep(1000)
- # - sleep(3000)
-
- runner-exec-path: ./bin/etcd-runner
- external-exec-path: ""
-
- # make up Âą70% of workloads with writes
- stressers:
- - type: KV_WRITE_SMALL
- weight: 0.35
- - type: KV_WRITE_LARGE
- weight: 0.002
- - type: KV_READ_ONE_KEY
- weight: 0.07
- - type: KV_READ_RANGE
- weight: 0.07
- - type: KV_DELETE_ONE_KEY
- weight: 0.07
- - type: KV_DELETE_RANGE
- weight: 0.07
- - type: KV_TXN_WRITE_DELETE
- weight: 0.35
- - type: LEASE
- weight: 0.0
-
- # - ELECTION_RUNNER
- # - WATCH_RUNNER
- # - LOCK_RACER_RUNNER
- # - LEASE_RUNNER
-
- checkers:
- - KV_HASH
- - LEASE_EXPIRE
- #- SHORT_TTL_LEASE_EXPIRE
-
- stress-key-size: 100
- stress-key-size-large: 32769
- stress-key-suffix-range: 250000
- stress-key-suffix-range-txn: 100
- stress-key-txn-ops: 10
-
- stress-clients: 100
- stress-qps: 2000
diff --git a/tests/functional/rpcpb/etcd_config.go b/tests/functional/rpcpb/etcd_config.go
deleted file mode 100644
index 639c1bc55e6..00000000000
--- a/tests/functional/rpcpb/etcd_config.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcpb
-
-import (
- "fmt"
- "reflect"
- "strings"
-)
-
-var etcdFields = []string{
- "Name",
- "DataDir",
- "WALDir",
-
- "HeartbeatIntervalMs",
- "ElectionTimeoutMs",
-
- "ListenClientURLs",
- "AdvertiseClientURLs",
- "ClientAutoTLS",
- "ClientCertAuth",
- "ClientCertFile",
- "ClientKeyFile",
- "ClientTrustedCAFile",
-
- "ListenPeerURLs",
- "AdvertisePeerURLs",
- "PeerAutoTLS",
- "PeerClientCertAuth",
- "PeerCertFile",
- "PeerKeyFile",
- "PeerTrustedCAFile",
-
- "InitialCluster",
- "InitialClusterState",
- "InitialClusterToken",
-
- "SnapshotCount",
- "QuotaBackendBytes",
-
- "PreVote",
- "InitialCorruptCheck",
-
- "Logger",
- "LogOutputs",
- "LogLevel",
-}
-
-// Flags returns etcd flags in string slice.
-func (e *Etcd) Flags() (fs []string) {
- tp := reflect.TypeOf(*e)
- vo := reflect.ValueOf(*e)
- for _, name := range etcdFields {
- field, ok := tp.FieldByName(name)
- if !ok {
- panic(fmt.Errorf("field %q not found", name))
- }
- fv := reflect.Indirect(vo).FieldByName(name)
- var sv string
- switch fv.Type().Kind() {
- case reflect.String:
- sv = fv.String()
- case reflect.Slice:
- n := fv.Len()
- sl := make([]string, n)
- for i := 0; i < n; i++ {
- sl[i] = fv.Index(i).String()
- }
- sv = strings.Join(sl, ",")
- case reflect.Int64:
- sv = fmt.Sprintf("%d", fv.Int())
- case reflect.Bool:
- sv = fmt.Sprintf("%v", fv.Bool())
- default:
- panic(fmt.Errorf("field %q (%v) cannot be parsed", name, fv.Type().Kind()))
- }
-
- fname := field.Tag.Get("yaml")
-
- // TODO: remove this
- if fname == "initial-corrupt-check" {
- fname = "experimental-" + fname
- }
-
- if sv != "" {
- fs = append(fs, fmt.Sprintf("--%s=%s", fname, sv))
- }
- }
- return fs
-}
diff --git a/tests/functional/rpcpb/etcd_config_test.go b/tests/functional/rpcpb/etcd_config_test.go
deleted file mode 100644
index abd6df9d431..00000000000
--- a/tests/functional/rpcpb/etcd_config_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcpb
-
-import (
- "reflect"
- "testing"
-)
-
-func TestEtcd(t *testing.T) {
- e := &Etcd{
- Name: "s1",
- DataDir: "/tmp/etcd-functionl-1/etcd.data",
- WALDir: "/tmp/etcd-functionl-1/etcd.data/member/wal",
-
- HeartbeatIntervalMs: 100,
- ElectionTimeoutMs: 1000,
-
- ListenClientURLs: []string{"https://127.0.0.1:1379"},
- AdvertiseClientURLs: []string{"https://127.0.0.1:13790"},
- ClientAutoTLS: true,
- ClientCertAuth: false,
- ClientCertFile: "",
- ClientKeyFile: "",
- ClientTrustedCAFile: "",
-
- ListenPeerURLs: []string{"https://127.0.0.1:1380"},
- AdvertisePeerURLs: []string{"https://127.0.0.1:13800"},
- PeerAutoTLS: true,
- PeerClientCertAuth: false,
- PeerCertFile: "",
- PeerKeyFile: "",
- PeerTrustedCAFile: "",
-
- InitialCluster: "s1=https://127.0.0.1:13800,s2=https://127.0.0.1:23800,s3=https://127.0.0.1:33800",
- InitialClusterState: "new",
- InitialClusterToken: "tkn",
-
- SnapshotCount: 10000,
- QuotaBackendBytes: 10740000000,
-
- PreVote: true,
- InitialCorruptCheck: true,
-
- Logger: "zap",
- LogOutputs: []string{"/tmp/etcd-functional-1/etcd.log"},
- LogLevel: "info",
- }
-
- exps := []string{
- "--name=s1",
- "--data-dir=/tmp/etcd-functionl-1/etcd.data",
- "--wal-dir=/tmp/etcd-functionl-1/etcd.data/member/wal",
- "--heartbeat-interval=100",
- "--election-timeout=1000",
- "--listen-client-urls=https://127.0.0.1:1379",
- "--advertise-client-urls=https://127.0.0.1:13790",
- "--auto-tls=true",
- "--client-cert-auth=false",
- "--listen-peer-urls=https://127.0.0.1:1380",
- "--initial-advertise-peer-urls=https://127.0.0.1:13800",
- "--peer-auto-tls=true",
- "--peer-client-cert-auth=false",
- "--initial-cluster=s1=https://127.0.0.1:13800,s2=https://127.0.0.1:23800,s3=https://127.0.0.1:33800",
- "--initial-cluster-state=new",
- "--initial-cluster-token=tkn",
- "--snapshot-count=10000",
- "--quota-backend-bytes=10740000000",
- "--pre-vote=true",
- "--experimental-initial-corrupt-check=true",
- "--logger=zap",
- "--log-outputs=/tmp/etcd-functional-1/etcd.log",
- "--log-level=info",
- }
- fs := e.Flags()
- if !reflect.DeepEqual(exps, fs) {
- t.Fatalf("expected %q, got %q", exps, fs)
- }
-}
diff --git a/tests/functional/rpcpb/member.go b/tests/functional/rpcpb/member.go
deleted file mode 100644
index a74a3b71b4a..00000000000
--- a/tests/functional/rpcpb/member.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcpb
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "net/url"
- "os"
- "time"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
-
- "github.com/dustin/go-humanize"
- "go.uber.org/zap"
- grpc "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
-)
-
-// ElectionTimeout returns an election timeout duration.
-func (m *Member) ElectionTimeout() time.Duration {
- return time.Duration(m.Etcd.ElectionTimeoutMs) * time.Millisecond
-}
-
-// DialEtcdGRPCServer creates a raw gRPC connection to an etcd member.
-func (m *Member) DialEtcdGRPCServer(opts ...grpc.DialOption) (*grpc.ClientConn, error) {
- dialOpts := []grpc.DialOption{
- grpc.WithTimeout(5 * time.Second),
- grpc.WithBlock(),
- }
-
- secure := false
- for _, cu := range m.Etcd.AdvertiseClientURLs {
- u, err := url.Parse(cu)
- if err != nil {
- return nil, err
- }
- if u.Scheme == "https" { // TODO: handle unix
- secure = true
- }
- }
-
- if secure {
- // assume save TLS assets are already stord on disk
- tlsInfo := transport.TLSInfo{
- CertFile: m.ClientCertPath,
- KeyFile: m.ClientKeyPath,
- TrustedCAFile: m.ClientTrustedCAPath,
-
- // TODO: remove this with generated certs
- // only need it for auto TLS
- InsecureSkipVerify: true,
- }
- tlsConfig, err := tlsInfo.ClientConfig()
- if err != nil {
- return nil, err
- }
- creds := credentials.NewTLS(tlsConfig)
- dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds))
- } else {
- dialOpts = append(dialOpts, grpc.WithInsecure())
- }
- dialOpts = append(dialOpts, opts...)
- return grpc.Dial(m.EtcdClientEndpoint, dialOpts...)
-}
-
-// CreateEtcdClientConfig creates a client configuration from member.
-func (m *Member) CreateEtcdClientConfig(opts ...grpc.DialOption) (cfg *clientv3.Config, err error) {
- secure := false
- for _, cu := range m.Etcd.AdvertiseClientURLs {
- var u *url.URL
- u, err = url.Parse(cu)
- if err != nil {
- return nil, err
- }
- if u.Scheme == "https" { // TODO: handle unix
- secure = true
- }
- }
-
- // TODO: make this configurable
- level := "error"
- if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
- level = "debug"
- }
- lcfg := logutil.DefaultZapLoggerConfig
- lcfg.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(level))
-
- cfg = &clientv3.Config{
- Endpoints: []string{m.EtcdClientEndpoint},
- DialTimeout: 10 * time.Second,
- DialOptions: opts,
- LogConfig: &lcfg,
- }
- if secure {
- // assume save TLS assets are already stord on disk
- tlsInfo := transport.TLSInfo{
- CertFile: m.ClientCertPath,
- KeyFile: m.ClientKeyPath,
- TrustedCAFile: m.ClientTrustedCAPath,
-
- // TODO: remove this with generated certs
- // only need it for auto TLS
- InsecureSkipVerify: true,
- }
- var tlsConfig *tls.Config
- tlsConfig, err = tlsInfo.ClientConfig()
- if err != nil {
- return nil, err
- }
- cfg.TLS = tlsConfig
- }
- return cfg, err
-}
-
-// CreateEtcdClient creates a client from member.
-func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error) {
- cfg, err := m.CreateEtcdClientConfig(opts...)
- if err != nil {
- return nil, err
- }
- return clientv3.New(*cfg)
-}
-
-// CheckCompact ensures that historical data before given revision has been compacted.
-func (m *Member) CheckCompact(rev int64) error {
- cli, err := m.CreateEtcdClient()
- if err != nil {
- return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
- defer cli.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- wch := cli.Watch(ctx, "\x00", clientv3.WithFromKey(), clientv3.WithRev(rev-1))
- wr, ok := <-wch
- cancel()
-
- if !ok {
- return fmt.Errorf("watch channel terminated (endpoint %q)", m.EtcdClientEndpoint)
- }
- if wr.CompactRevision != rev {
- return fmt.Errorf("got compact revision %v, wanted %v (endpoint %q)", wr.CompactRevision, rev, m.EtcdClientEndpoint)
- }
-
- return nil
-}
-
-// Defrag runs defragmentation on this member.
-func (m *Member) Defrag() error {
- cli, err := m.CreateEtcdClient()
- if err != nil {
- return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
- defer cli.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
- _, err = cli.Defragment(ctx, m.EtcdClientEndpoint)
- cancel()
- return err
-}
-
-// RevHash fetches current revision and hash on this member.
-func (m *Member) RevHash() (int64, int64, error) {
- conn, err := m.DialEtcdGRPCServer()
- if err != nil {
- return 0, 0, err
- }
- defer conn.Close()
-
- mt := pb.NewMaintenanceClient(conn)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- resp, err := mt.Hash(ctx, &pb.HashRequest{}, grpc.WaitForReady(true))
- cancel()
-
- if err != nil {
- return 0, 0, err
- }
-
- return resp.Header.Revision, int64(resp.Hash), nil
-}
-
-// Rev fetches current revision on this member.
-func (m *Member) Rev(ctx context.Context) (int64, error) {
- cli, err := m.CreateEtcdClient()
- if err != nil {
- return 0, fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
- defer cli.Close()
-
- resp, err := cli.Status(ctx, m.EtcdClientEndpoint)
- if err != nil {
- return 0, err
- }
- return resp.Header.Revision, nil
-}
-
-// Compact compacts member storage with given revision.
-// It blocks until it's physically done.
-func (m *Member) Compact(rev int64, timeout time.Duration) error {
- cli, err := m.CreateEtcdClient()
- if err != nil {
- return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
- defer cli.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- _, err = cli.Compact(ctx, rev, clientv3.WithCompactPhysical())
- cancel()
- return err
-}
-
-// IsLeader returns true if this member is the current cluster leader.
-func (m *Member) IsLeader() (bool, error) {
- cli, err := m.CreateEtcdClient()
- if err != nil {
- return false, fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
- defer cli.Close()
-
- resp, err := cli.Status(context.Background(), m.EtcdClientEndpoint)
- if err != nil {
- return false, err
- }
- return resp.Header.MemberId == resp.Leader, nil
-}
-
-// WriteHealthKey writes a health key to this member.
-func (m *Member) WriteHealthKey() error {
- cli, err := m.CreateEtcdClient()
- if err != nil {
- return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
- defer cli.Close()
-
- // give enough time-out in case expensive requests (range/delete) are pending
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- _, err = cli.Put(ctx, "health", "good")
- cancel()
- if err != nil {
- return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
- return nil
-}
-
-// SaveSnapshot downloads a snapshot file from this member, locally.
-// It's meant to requested remotely, so that local member can store
-// snapshot file on local disk.
-func (m *Member) SaveSnapshot(lg *zap.Logger) (err error) {
- // remove existing snapshot first
- if err = os.RemoveAll(m.SnapshotPath); err != nil {
- return err
- }
-
- var ccfg *clientv3.Config
- ccfg, err = m.CreateEtcdClientConfig()
- if err != nil {
- return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
- }
-
- lg.Info(
- "snapshot save START",
- zap.String("member-name", m.Etcd.Name),
- zap.Strings("member-client-urls", m.Etcd.AdvertiseClientURLs),
- zap.String("snapshot-path", m.SnapshotPath),
- )
- now := time.Now()
- mgr := snapshot.NewV3(lg)
- if err = mgr.Save(context.Background(), *ccfg, m.SnapshotPath); err != nil {
- return err
- }
- took := time.Since(now)
-
- var fi os.FileInfo
- fi, err = os.Stat(m.SnapshotPath)
- if err != nil {
- return err
- }
- var st snapshot.Status
- st, err = mgr.Status(m.SnapshotPath)
- if err != nil {
- return err
- }
- m.SnapshotInfo = &SnapshotInfo{
- MemberName: m.Etcd.Name,
- MemberClientURLs: m.Etcd.AdvertiseClientURLs,
- SnapshotPath: m.SnapshotPath,
- SnapshotFileSize: humanize.Bytes(uint64(fi.Size())),
- SnapshotTotalSize: humanize.Bytes(uint64(st.TotalSize)),
- SnapshotTotalKey: int64(st.TotalKey),
- SnapshotHash: int64(st.Hash),
- SnapshotRevision: st.Revision,
- Took: fmt.Sprintf("%v", took),
- }
- lg.Info(
- "snapshot save END",
- zap.String("member-name", m.SnapshotInfo.MemberName),
- zap.Strings("member-client-urls", m.SnapshotInfo.MemberClientURLs),
- zap.String("snapshot-path", m.SnapshotPath),
- zap.String("snapshot-file-size", m.SnapshotInfo.SnapshotFileSize),
- zap.String("snapshot-total-size", m.SnapshotInfo.SnapshotTotalSize),
- zap.Int64("snapshot-total-key", m.SnapshotInfo.SnapshotTotalKey),
- zap.Int64("snapshot-hash", m.SnapshotInfo.SnapshotHash),
- zap.Int64("snapshot-revision", m.SnapshotInfo.SnapshotRevision),
- zap.String("took", m.SnapshotInfo.Took),
- )
- return nil
-}
-
-// RestoreSnapshot restores a cluster from a given snapshot file on disk.
-// It's meant to requested remotely, so that local member can load the
-// snapshot file from local disk.
-func (m *Member) RestoreSnapshot(lg *zap.Logger) (err error) {
- if err = os.RemoveAll(m.EtcdOnSnapshotRestore.DataDir); err != nil {
- return err
- }
- if err = os.RemoveAll(m.EtcdOnSnapshotRestore.WALDir); err != nil {
- return err
- }
-
- lg.Info(
- "snapshot restore START",
- zap.String("member-name", m.Etcd.Name),
- zap.Strings("member-client-urls", m.Etcd.AdvertiseClientURLs),
- zap.String("snapshot-path", m.SnapshotPath),
- )
- now := time.Now()
- mgr := snapshot.NewV3(lg)
- err = mgr.Restore(snapshot.RestoreConfig{
- SnapshotPath: m.SnapshotInfo.SnapshotPath,
- Name: m.EtcdOnSnapshotRestore.Name,
- OutputDataDir: m.EtcdOnSnapshotRestore.DataDir,
- OutputWALDir: m.EtcdOnSnapshotRestore.WALDir,
- PeerURLs: m.EtcdOnSnapshotRestore.AdvertisePeerURLs,
- InitialCluster: m.EtcdOnSnapshotRestore.InitialCluster,
- InitialClusterToken: m.EtcdOnSnapshotRestore.InitialClusterToken,
- SkipHashCheck: false,
- // TODO: set SkipHashCheck it true, to recover from existing db file
- })
- took := time.Since(now)
- lg.Info(
- "snapshot restore END",
- zap.String("member-name", m.SnapshotInfo.MemberName),
- zap.Strings("member-client-urls", m.SnapshotInfo.MemberClientURLs),
- zap.String("snapshot-path", m.SnapshotPath),
- zap.String("snapshot-file-size", m.SnapshotInfo.SnapshotFileSize),
- zap.String("snapshot-total-size", m.SnapshotInfo.SnapshotTotalSize),
- zap.Int64("snapshot-total-key", m.SnapshotInfo.SnapshotTotalKey),
- zap.Int64("snapshot-hash", m.SnapshotInfo.SnapshotHash),
- zap.Int64("snapshot-revision", m.SnapshotInfo.SnapshotRevision),
- zap.String("took", took.String()),
- zap.Error(err),
- )
- return err
-}
diff --git a/tests/functional/rpcpb/rpc.pb.go b/tests/functional/rpcpb/rpc.pb.go
deleted file mode 100644
index 34c9535580a..00000000000
--- a/tests/functional/rpcpb/rpc.pb.go
+++ /dev/null
@@ -1,5943 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: rpcpb/rpc.proto
-
-package rpcpb
-
-import (
- context "context"
- encoding_binary "encoding/binary"
- fmt "fmt"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type StresserType int32
-
-const (
- StresserType_KV_WRITE_SMALL StresserType = 0
- StresserType_KV_WRITE_LARGE StresserType = 1
- StresserType_KV_READ_ONE_KEY StresserType = 2
- StresserType_KV_READ_RANGE StresserType = 3
- StresserType_KV_DELETE_ONE_KEY StresserType = 4
- StresserType_KV_DELETE_RANGE StresserType = 5
- StresserType_KV_TXN_WRITE_DELETE StresserType = 6
- StresserType_LEASE StresserType = 10
- StresserType_ELECTION_RUNNER StresserType = 20
- StresserType_WATCH_RUNNER StresserType = 31
- StresserType_LOCK_RACER_RUNNER StresserType = 41
- StresserType_LEASE_RUNNER StresserType = 51
-)
-
-var StresserType_name = map[int32]string{
- 0: "KV_WRITE_SMALL",
- 1: "KV_WRITE_LARGE",
- 2: "KV_READ_ONE_KEY",
- 3: "KV_READ_RANGE",
- 4: "KV_DELETE_ONE_KEY",
- 5: "KV_DELETE_RANGE",
- 6: "KV_TXN_WRITE_DELETE",
- 10: "LEASE",
- 20: "ELECTION_RUNNER",
- 31: "WATCH_RUNNER",
- 41: "LOCK_RACER_RUNNER",
- 51: "LEASE_RUNNER",
-}
-
-var StresserType_value = map[string]int32{
- "KV_WRITE_SMALL": 0,
- "KV_WRITE_LARGE": 1,
- "KV_READ_ONE_KEY": 2,
- "KV_READ_RANGE": 3,
- "KV_DELETE_ONE_KEY": 4,
- "KV_DELETE_RANGE": 5,
- "KV_TXN_WRITE_DELETE": 6,
- "LEASE": 10,
- "ELECTION_RUNNER": 20,
- "WATCH_RUNNER": 31,
- "LOCK_RACER_RUNNER": 41,
- "LEASE_RUNNER": 51,
-}
-
-func (x StresserType) String() string {
- return proto.EnumName(StresserType_name, int32(x))
-}
-
-func (StresserType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{0}
-}
-
-type Checker int32
-
-const (
- Checker_KV_HASH Checker = 0
- Checker_LEASE_EXPIRE Checker = 1
- Checker_RUNNER Checker = 2
- Checker_NO_CHECK Checker = 3
- Checker_SHORT_TTL_LEASE_EXPIRE Checker = 4
-)
-
-var Checker_name = map[int32]string{
- 0: "KV_HASH",
- 1: "LEASE_EXPIRE",
- 2: "RUNNER",
- 3: "NO_CHECK",
- 4: "SHORT_TTL_LEASE_EXPIRE",
-}
-
-var Checker_value = map[string]int32{
- "KV_HASH": 0,
- "LEASE_EXPIRE": 1,
- "RUNNER": 2,
- "NO_CHECK": 3,
- "SHORT_TTL_LEASE_EXPIRE": 4,
-}
-
-func (x Checker) String() string {
- return proto.EnumName(Checker_name, int32(x))
-}
-
-func (Checker) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{1}
-}
-
-type Operation int32
-
-const (
- // NOT_STARTED is the agent status before etcd first start.
- Operation_NOT_STARTED Operation = 0
- // INITIAL_START_ETCD is only called to start etcd, the very first time.
- Operation_INITIAL_START_ETCD Operation = 10
- // RESTART_ETCD is sent to restart killed etcd.
- Operation_RESTART_ETCD Operation = 11
- // SIGTERM_ETCD pauses etcd process while keeping data directories
- // and previous etcd configurations.
- Operation_SIGTERM_ETCD Operation = 20
- // SIGQUIT_ETCD_AND_REMOVE_DATA kills etcd process and removes all data
- // directories to simulate destroying the whole machine.
- Operation_SIGQUIT_ETCD_AND_REMOVE_DATA Operation = 21
- // SAVE_SNAPSHOT is sent to trigger local member to download its snapshot
- // onto its local disk with the specified path from tester.
- Operation_SAVE_SNAPSHOT Operation = 30
- // RESTORE_RESTART_FROM_SNAPSHOT is sent to trigger local member to
- // restore a cluster from existing snapshot from disk, and restart
- // an etcd instance from recovered data.
- Operation_RESTORE_RESTART_FROM_SNAPSHOT Operation = 31
- // RESTART_FROM_SNAPSHOT is sent to trigger local member to restart
- // and join an existing cluster that has been recovered from a snapshot.
- // Local member joins this cluster with fresh data.
- Operation_RESTART_FROM_SNAPSHOT Operation = 32
- // SIGQUIT_ETCD_AND_ARCHIVE_DATA is sent when consistency check failed,
- // thus need to archive etcd data directories.
- Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA Operation = 40
- // SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT destroys etcd process,
- // etcd data, and agent server.
- Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT Operation = 41
- // BLACKHOLE_PEER_PORT_TX_RX drops all outgoing/incoming packets from/to
- // the peer port on target member's peer port.
- Operation_BLACKHOLE_PEER_PORT_TX_RX Operation = 100
- // UNBLACKHOLE_PEER_PORT_TX_RX removes outgoing/incoming packet dropping.
- Operation_UNBLACKHOLE_PEER_PORT_TX_RX Operation = 101
- // DELAY_PEER_PORT_TX_RX delays all outgoing/incoming packets from/to
- // the peer port on target member's peer port.
- Operation_DELAY_PEER_PORT_TX_RX Operation = 200
- // UNDELAY_PEER_PORT_TX_RX removes all outgoing/incoming delays.
- Operation_UNDELAY_PEER_PORT_TX_RX Operation = 201
-)
-
-var Operation_name = map[int32]string{
- 0: "NOT_STARTED",
- 10: "INITIAL_START_ETCD",
- 11: "RESTART_ETCD",
- 20: "SIGTERM_ETCD",
- 21: "SIGQUIT_ETCD_AND_REMOVE_DATA",
- 30: "SAVE_SNAPSHOT",
- 31: "RESTORE_RESTART_FROM_SNAPSHOT",
- 32: "RESTART_FROM_SNAPSHOT",
- 40: "SIGQUIT_ETCD_AND_ARCHIVE_DATA",
- 41: "SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT",
- 100: "BLACKHOLE_PEER_PORT_TX_RX",
- 101: "UNBLACKHOLE_PEER_PORT_TX_RX",
- 200: "DELAY_PEER_PORT_TX_RX",
- 201: "UNDELAY_PEER_PORT_TX_RX",
-}
-
-var Operation_value = map[string]int32{
- "NOT_STARTED": 0,
- "INITIAL_START_ETCD": 10,
- "RESTART_ETCD": 11,
- "SIGTERM_ETCD": 20,
- "SIGQUIT_ETCD_AND_REMOVE_DATA": 21,
- "SAVE_SNAPSHOT": 30,
- "RESTORE_RESTART_FROM_SNAPSHOT": 31,
- "RESTART_FROM_SNAPSHOT": 32,
- "SIGQUIT_ETCD_AND_ARCHIVE_DATA": 40,
- "SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT": 41,
- "BLACKHOLE_PEER_PORT_TX_RX": 100,
- "UNBLACKHOLE_PEER_PORT_TX_RX": 101,
- "DELAY_PEER_PORT_TX_RX": 200,
- "UNDELAY_PEER_PORT_TX_RX": 201,
-}
-
-func (x Operation) String() string {
- return proto.EnumName(Operation_name, int32(x))
-}
-
-func (Operation) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{2}
-}
-
-// Case defines various system faults or test case in distributed systems,
-// in order to verify correct behavior of etcd servers and clients.
-type Case int32
-
-const (
- // SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader)
- // but does not delete its data directories on disk for next restart.
- // It waits "delay-ms" before recovering this failure.
- // The expected behavior is that the follower comes back online
- // and rejoins the cluster, and then each member continues to process
- // client requests ('Put' request that requires Raft consensus).
- Case_SIGTERM_ONE_FOLLOWER Case = 0
- // SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen
- // follower but does not delete its data directories on disk for next
- // restart. And waits until most up-to-date node (leader) applies the
- // snapshot count of entries since the stop operation.
- // The expected behavior is that the follower comes back online and
- // rejoins the cluster, and then active leader sends snapshot
- // to the follower to force it to follow the leader's log.
- // As always, after recovery, each member must be able to process
- // client requests.
- Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 1
- // SIGTERM_LEADER stops the active leader node but does not delete its
- // data directories on disk for next restart. Then it waits "delay-ms"
- // before recovering this failure, in order to trigger election timeouts.
- // The expected behavior is that a new leader gets elected, and the
- // old leader comes back online and rejoins the cluster as a follower.
- // As always, after recovery, each member must be able to process
- // client requests.
- Case_SIGTERM_LEADER Case = 2
- // SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node
- // but does not delete its data directories on disk for next restart.
- // And waits until most up-to-date node ("new" leader) applies the
- // snapshot count of entries since the stop operation.
- // The expected behavior is that cluster elects a new leader, and the
- // old leader comes back online and rejoins the cluster as a follower.
- // And it receives the snapshot from the new leader to overwrite its
- // store. As always, after recovery, each member must be able to
- // process client requests.
- Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 3
- // SIGTERM_QUORUM stops majority number of nodes to make the whole cluster
- // inoperable but does not delete data directories on stopped nodes
- // for next restart. And it waits "delay-ms" before recovering failure.
- // The expected behavior is that nodes come back online, thus cluster
- // comes back operative as well. As always, after recovery, each member
- // must be able to process client requests.
- Case_SIGTERM_QUORUM Case = 4
- // SIGTERM_ALL stops the whole cluster but does not delete data directories
- // on disk for next restart. And it waits "delay-ms" before recovering
- // this failure.
- // The expected behavior is that nodes come back online, thus cluster
- // comes back operative as well. As always, after recovery, each member
- // must be able to process client requests.
- Case_SIGTERM_ALL Case = 5
- // SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower
- // (non-leader), deletes its data directories on disk, and removes
- // this member from cluster (membership reconfiguration). On recovery,
- // tester adds a new member, and this member joins the existing cluster
- // with fresh data. It waits "delay-ms" before recovering this
- // failure. This simulates destroying one follower machine, where operator
- // needs to add a new member from a fresh machine.
- // The expected behavior is that a new member joins the existing cluster,
- // and then each member continues to process client requests.
- Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER Case = 10
- // SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly
- // chosen follower, deletes its data directories on disk, and removes
- // this member from cluster (membership reconfiguration). On recovery,
- // tester adds a new member, and this member joins the existing cluster
- // restart. On member remove, cluster waits until most up-to-date node
- // (leader) applies the snapshot count of entries since the stop operation.
- // This simulates destroying a leader machine, where operator needs to add
- // a new member from a fresh machine.
- // The expected behavior is that a new member joins the existing cluster,
- // and receives a snapshot from the active leader. As always, after
- // recovery, each member must be able to process client requests.
- Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 11
- // SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its
- // data directories on disk, and removes this member from cluster.
- // On recovery, tester adds a new member, and this member joins the
- // existing cluster with fresh data. It waits "delay-ms" before
- // recovering this failure. This simulates destroying a leader machine,
- // where operator needs to add a new member from a fresh machine.
- // The expected behavior is that a new member joins the existing cluster,
- // and then each member continues to process client requests.
- Case_SIGQUIT_AND_REMOVE_LEADER Case = 12
- // SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader,
- // deletes its data directories on disk, and removes this member from
- // cluster (membership reconfiguration). On recovery, tester adds a new
- // member, and this member joins the existing cluster restart. On member
- // remove, cluster waits until most up-to-date node (new leader) applies
- // the snapshot count of entries since the stop operation. This simulates
- // destroying a leader machine, where operator needs to add a new member
- // from a fresh machine.
- // The expected behavior is that on member remove, cluster elects a new
- // leader, and a new member joins the existing cluster and receives a
- // snapshot from the newly elected leader. As always, after recovery, each
- // member must be able to process client requests.
- Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 13
- // SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first
- // stops majority number of nodes, deletes data directories on those quorum
- // nodes, to make the whole cluster inoperable. Now that quorum and their
- // data are totally destroyed, cluster cannot even remove unavailable nodes
- // (e.g. 2 out of 3 are lost, so no leader can be elected).
- // Let's assume 3-node cluster of node A, B, and C. One day, node A and B
- // are destroyed and all their data are gone. The only viable solution is
- // to recover from C's latest snapshot.
- //
- // To simulate:
- // 1. Assume node C is the current leader with most up-to-date data.
- // 2. Download snapshot from node C, before destroying node A and B.
- // 3. Destroy node A and B, and make the whole cluster inoperable.
- // 4. Now node C cannot operate either.
- // 5. SIGTERM node C and remove its data directories.
- // 6. Restore a new seed member from node C's latest snapshot file.
- // 7. Add another member to establish 2-node cluster.
- // 8. Add another member to establish 3-node cluster.
- // 9. Add more if any.
- //
- // The expected behavior is that etcd successfully recovers from such
- // disastrous situation as only 1-node survives out of 3-node cluster,
- // new members joins the existing cluster, and previous data from snapshot
- // are still preserved after recovery process. As always, after recovery,
- // each member must be able to process client requests.
- Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH Case = 14
- // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming
- // packets from/to the peer port on a randomly chosen follower
- // (non-leader), and waits for "delay-ms" until recovery.
- // The expected behavior is that once dropping operation is undone,
- // each member must be able to process client requests.
- Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 100
- // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops
- // all outgoing/incoming packets from/to the peer port on a randomly
- // chosen follower (non-leader), and waits for most up-to-date node
- // (leader) applies the snapshot count of entries since the blackhole
- // operation.
- // The expected behavior is that once packet drop operation is undone,
- // the slow follower tries to catch up, possibly receiving the snapshot
- // from the active leader. As always, after recovery, each member must
- // be able to process client requests.
- Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 101
- // BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets
- // from/to the peer port on the active leader (isolated), and waits for
- // "delay-ms" until recovery, in order to trigger election timeout.
- // The expected behavior is that after election timeout, a new leader gets
- // elected, and once dropping operation is undone, the old leader comes
- // back and rejoins the cluster as a follower. As always, after recovery,
- // each member must be able to process client requests.
- Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER Case = 102
- // BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all
- // outgoing/incoming packets from/to the peer port on the active leader,
- // and waits for most up-to-date node (leader) applies the snapshot
- // count of entries since the blackhole operation.
- // The expected behavior is that cluster elects a new leader, and once
- // dropping operation is undone, the old leader comes back and rejoins
- // the cluster as a follower. The slow follower tries to catch up, likely
- // receiving the snapshot from the new active leader. As always, after
- // recovery, each member must be able to process client requests.
- Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 103
- // BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets
- // from/to the peer ports on majority nodes of cluster, thus losing its
- // leader and cluster being inoperable. And it waits for "delay-ms"
- // until recovery.
- // The expected behavior is that once packet drop operation is undone,
- // nodes come back online, thus cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM Case = 104
- // BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets
- // from/to the peer ports on all nodes, thus making cluster totally
- // inoperable. It waits for "delay-ms" until recovery.
- // The expected behavior is that once packet drop operation is undone,
- // nodes come back online, thus cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- Case_BLACKHOLE_PEER_PORT_TX_RX_ALL Case = 105
- // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets
- // from/to the peer port on a randomly chosen follower (non-leader).
- // It waits for "delay-ms" until recovery.
- // The expected behavior is that once packet delay operation is undone,
- // the follower comes back and tries to catch up with latest changes from
- // cluster. And as always, after recovery, each member must be able to
- // process client requests.
- Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 200
- // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming
- // packets from/to the peer port on a randomly chosen follower
- // (non-leader) with a randomized time duration (thus isolated). It
- // waits for "delay-ms" until recovery.
- // The expected behavior is that once packet delay operation is undone,
- // each member must be able to process client requests.
- Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 201
- // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on a randomly chosen
- // follower (non-leader), and waits for most up-to-date node (leader)
- // applies the snapshot count of entries since the delay operation.
- // The expected behavior is that the delayed follower gets isolated
- // and behind the current active leader, and once delay operation is undone,
- // the slow follower comes back and catches up possibly receiving snapshot
- // from the active leader. As always, after recovery, each member must be
- // able to process client requests.
- Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 202
- // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on a randomly chosen
- // follower (non-leader) with a randomized time duration, and waits for
- // most up-to-date node (leader) applies the snapshot count of entries
- // since the delay operation.
- // The expected behavior is that the delayed follower gets isolated
- // and behind the current active leader, and once delay operation is undone,
- // the slow follower comes back and catches up, possibly receiving a
- // snapshot from the active leader. As always, after recovery, each member
- // must be able to process client requests.
- Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 203
- // DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to
- // the peer port on the active leader. And waits for "delay-ms" until
- // recovery.
- // The expected behavior is that cluster may elect a new leader, and
- // once packet delay operation is undone, the (old) leader comes back
- // and tries to catch up with latest changes from cluster. As always,
- // after recovery, each member must be able to process client requests.
- Case_DELAY_PEER_PORT_TX_RX_LEADER Case = 204
- // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets
- // from/to the peer port on the active leader with a randomized time
- // duration. And waits for "delay-ms" until recovery.
- // The expected behavior is that cluster may elect a new leader, and
- // once packet delay operation is undone, the (old) leader comes back
- // and tries to catch up with latest changes from cluster. As always,
- // after recovery, each member must be able to process client requests.
- Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER Case = 205
- // DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on the active leader,
- // and waits for most up-to-date node (current or new leader) applies the
- // snapshot count of entries since the delay operation.
- // The expected behavior is that cluster may elect a new leader, and
- // the old leader gets isolated and behind the current active leader,
- // and once delay operation is undone, the slow follower comes back
- // and catches up, likely receiving a snapshot from the active leader.
- // As always, after recovery, each member must be able to process client
- // requests.
- Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 206
- // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on the active leader,
- // with a randomized time duration. And it waits for most up-to-date node
- // (current or new leader) applies the snapshot count of entries since the
- // delay operation.
- // The expected behavior is that cluster may elect a new leader, and
- // the old leader gets isolated and behind the current active leader,
- // and once delay operation is undone, the slow follower comes back
- // and catches up, likely receiving a snapshot from the active leader.
- // As always, after recovery, each member must be able to process client
- // requests.
- Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 207
- // DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to
- // the peer ports on majority nodes of cluster. And it waits for
- // "delay-ms" until recovery, likely to trigger election timeouts.
- // The expected behavior is that cluster may elect a new leader, while
- // quorum of nodes struggle with slow networks, and once delay operation
- // is undone, nodes come back and cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- Case_DELAY_PEER_PORT_TX_RX_QUORUM Case = 208
- // RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets
- // from/to the peer ports on majority nodes of cluster, with randomized
- // time durations. And it waits for "delay-ms" until recovery, likely
- // to trigger election timeouts.
- // The expected behavior is that cluster may elect a new leader, while
- // quorum of nodes struggle with slow networks, and once delay operation
- // is undone, nodes come back and cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM Case = 209
- // DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the
- // peer ports on all nodes. And it waits for "delay-ms" until recovery,
- // likely to trigger election timeouts.
- // The expected behavior is that cluster may become totally inoperable,
- // struggling with slow networks across the whole cluster. Once delay
- // operation is undone, nodes come back and cluster comes back operative.
- // As always, after recovery, each member must be able to process client
- // requests.
- Case_DELAY_PEER_PORT_TX_RX_ALL Case = 210
- // RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets
- // from/to the peer ports on all nodes, with randomized time durations.
- // And it waits for "delay-ms" until recovery, likely to trigger
- // election timeouts.
- // The expected behavior is that cluster may become totally inoperable,
- // struggling with slow networks across the whole cluster. Once delay
- // operation is undone, nodes come back and cluster comes back operative.
- // As always, after recovery, each member must be able to process client
- // requests.
- Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL Case = 211
- // NO_FAIL_WITH_STRESS stops injecting failures while testing the
- // consistency and correctness under pressure loads, for the duration of
- // "delay-ms". Goal is to ensure cluster be still making progress
- // on recovery, and verify system does not deadlock following a sequence
- // of failure injections.
- // The expected behavior is that cluster remains fully operative in healthy
- // condition. As always, after recovery, each member must be able to process
- // client requests.
- Case_NO_FAIL_WITH_STRESS Case = 300
- // NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor
- // sends stressig client requests to the cluster, for the duration of
- // "delay-ms". Goal is to ensure cluster be still making progress
- // on recovery, and verify system does not deadlock following a sequence
- // of failure injections.
- // The expected behavior is that cluster remains fully operative in healthy
- // condition, and clients requests during liveness period succeed without
- // errors.
- // Note: this is how Google Chubby does failure injection testing
- // https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf.
- Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS Case = 301
- // FAILPOINTS injects failpoints to etcd server runtime, triggering panics
- // in critical code paths.
- Case_FAILPOINTS Case = 400
- // FAILPOINTS_WITH_DISK_IO_LATENCY injects high disk I/O latency failure in raftAfterSave code paths.
- Case_FAILPOINTS_WITH_DISK_IO_LATENCY Case = 401
- // EXTERNAL runs external failure injection scripts.
- Case_EXTERNAL Case = 500
-)
-
-var Case_name = map[int32]string{
- 0: "SIGTERM_ONE_FOLLOWER",
- 1: "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- 2: "SIGTERM_LEADER",
- 3: "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- 4: "SIGTERM_QUORUM",
- 5: "SIGTERM_ALL",
- 10: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER",
- 11: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- 12: "SIGQUIT_AND_REMOVE_LEADER",
- 13: "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- 14: "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH",
- 100: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
- 101: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- 102: "BLACKHOLE_PEER_PORT_TX_RX_LEADER",
- 103: "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- 104: "BLACKHOLE_PEER_PORT_TX_RX_QUORUM",
- 105: "BLACKHOLE_PEER_PORT_TX_RX_ALL",
- 200: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
- 201: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
- 202: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- 203: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- 204: "DELAY_PEER_PORT_TX_RX_LEADER",
- 205: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER",
- 206: "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- 207: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- 208: "DELAY_PEER_PORT_TX_RX_QUORUM",
- 209: "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM",
- 210: "DELAY_PEER_PORT_TX_RX_ALL",
- 211: "RANDOM_DELAY_PEER_PORT_TX_RX_ALL",
- 300: "NO_FAIL_WITH_STRESS",
- 301: "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
- 400: "FAILPOINTS",
- 401: "FAILPOINTS_WITH_DISK_IO_LATENCY",
- 500: "EXTERNAL",
-}
-
-var Case_value = map[string]int32{
- "SIGTERM_ONE_FOLLOWER": 0,
- "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 1,
- "SIGTERM_LEADER": 2,
- "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT": 3,
- "SIGTERM_QUORUM": 4,
- "SIGTERM_ALL": 5,
- "SIGQUIT_AND_REMOVE_ONE_FOLLOWER": 10,
- "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 11,
- "SIGQUIT_AND_REMOVE_LEADER": 12,
- "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT": 13,
- "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH": 14,
- "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": 100,
- "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 101,
- "BLACKHOLE_PEER_PORT_TX_RX_LEADER": 102,
- "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 103,
- "BLACKHOLE_PEER_PORT_TX_RX_QUORUM": 104,
- "BLACKHOLE_PEER_PORT_TX_RX_ALL": 105,
- "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 200,
- "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 201,
- "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 202,
- "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 203,
- "DELAY_PEER_PORT_TX_RX_LEADER": 204,
- "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER": 205,
- "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 206,
- "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 207,
- "DELAY_PEER_PORT_TX_RX_QUORUM": 208,
- "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM": 209,
- "DELAY_PEER_PORT_TX_RX_ALL": 210,
- "RANDOM_DELAY_PEER_PORT_TX_RX_ALL": 211,
- "NO_FAIL_WITH_STRESS": 300,
- "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS": 301,
- "FAILPOINTS": 400,
- "FAILPOINTS_WITH_DISK_IO_LATENCY": 401,
- "EXTERNAL": 500,
-}
-
-func (x Case) String() string {
- return proto.EnumName(Case_name, int32(x))
-}
-
-func (Case) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{3}
-}
-
-type Request struct {
- Operation Operation `protobuf:"varint,1,opt,name=Operation,proto3,enum=rpcpb.Operation" json:"Operation,omitempty"`
- // Member contains the same Member object from tester configuration.
- Member *Member `protobuf:"bytes,2,opt,name=Member,proto3" json:"Member,omitempty"`
- // Tester contains tester configuration.
- Tester *Tester `protobuf:"bytes,3,opt,name=Tester,proto3" json:"Tester,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Request.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Request) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Request.Merge(m, src)
-}
-func (m *Request) XXX_Size() int {
- return m.Size()
-}
-func (m *Request) XXX_DiscardUnknown() {
- xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-// SnapshotInfo contains SAVE_SNAPSHOT request results.
-type SnapshotInfo struct {
- MemberName string `protobuf:"bytes,1,opt,name=MemberName,proto3" json:"MemberName,omitempty"`
- MemberClientURLs []string `protobuf:"bytes,2,rep,name=MemberClientURLs,proto3" json:"MemberClientURLs,omitempty"`
- SnapshotPath string `protobuf:"bytes,3,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty"`
- SnapshotFileSize string `protobuf:"bytes,4,opt,name=SnapshotFileSize,proto3" json:"SnapshotFileSize,omitempty"`
- SnapshotTotalSize string `protobuf:"bytes,5,opt,name=SnapshotTotalSize,proto3" json:"SnapshotTotalSize,omitempty"`
- SnapshotTotalKey int64 `protobuf:"varint,6,opt,name=SnapshotTotalKey,proto3" json:"SnapshotTotalKey,omitempty"`
- SnapshotHash int64 `protobuf:"varint,7,opt,name=SnapshotHash,proto3" json:"SnapshotHash,omitempty"`
- SnapshotRevision int64 `protobuf:"varint,8,opt,name=SnapshotRevision,proto3" json:"SnapshotRevision,omitempty"`
- Took string `protobuf:"bytes,9,opt,name=Took,proto3" json:"Took,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SnapshotInfo) Reset() { *m = SnapshotInfo{} }
-func (m *SnapshotInfo) String() string { return proto.CompactTextString(m) }
-func (*SnapshotInfo) ProtoMessage() {}
-func (*SnapshotInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{1}
-}
-func (m *SnapshotInfo) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SnapshotInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SnapshotInfo.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SnapshotInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SnapshotInfo.Merge(m, src)
-}
-func (m *SnapshotInfo) XXX_Size() int {
- return m.Size()
-}
-func (m *SnapshotInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_SnapshotInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotInfo proto.InternalMessageInfo
-
-type Response struct {
- Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"`
- Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"`
- // Member contains the same Member object from tester request.
- Member *Member `protobuf:"bytes,3,opt,name=Member,proto3" json:"Member,omitempty"`
- // SnapshotInfo contains SAVE_SNAPSHOT request results.
- SnapshotInfo *SnapshotInfo `protobuf:"bytes,4,opt,name=SnapshotInfo,proto3" json:"SnapshotInfo,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{2}
-}
-func (m *Response) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Response.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Response) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Response.Merge(m, src)
-}
-func (m *Response) XXX_Size() int {
- return m.Size()
-}
-func (m *Response) XXX_DiscardUnknown() {
- xxx_messageInfo_Response.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Response proto.InternalMessageInfo
-
-type Member struct {
- // EtcdExec is the executable etcd binary path in agent server.
- EtcdExec string `protobuf:"bytes,1,opt,name=EtcdExec,proto3" json:"EtcdExec,omitempty" yaml:"etcd-exec"`
- // AgentAddr is the agent HTTP server address.
- AgentAddr string `protobuf:"bytes,11,opt,name=AgentAddr,proto3" json:"AgentAddr,omitempty" yaml:"agent-addr"`
- // FailpointHTTPAddr is the agent's failpoints HTTP server address.
- FailpointHTTPAddr string `protobuf:"bytes,12,opt,name=FailpointHTTPAddr,proto3" json:"FailpointHTTPAddr,omitempty" yaml:"failpoint-http-addr"`
- // BaseDir is the base directory where all logs and etcd data are stored.
- BaseDir string `protobuf:"bytes,101,opt,name=BaseDir,proto3" json:"BaseDir,omitempty" yaml:"base-dir"`
- // EtcdClientProxy is true when client traffic needs to be proxied.
- // If true, listen client URL port must be different than advertise client URL port.
- EtcdClientProxy bool `protobuf:"varint,201,opt,name=EtcdClientProxy,proto3" json:"EtcdClientProxy,omitempty" yaml:"etcd-client-proxy"`
- // EtcdPeerProxy is true when peer traffic needs to be proxied.
- // If true, listen peer URL port must be different than advertise peer URL port.
- EtcdPeerProxy bool `protobuf:"varint,202,opt,name=EtcdPeerProxy,proto3" json:"EtcdPeerProxy,omitempty" yaml:"etcd-peer-proxy"`
- // EtcdClientEndpoint is the etcd client endpoint.
- EtcdClientEndpoint string `protobuf:"bytes,301,opt,name=EtcdClientEndpoint,proto3" json:"EtcdClientEndpoint,omitempty" yaml:"etcd-client-endpoint"`
- // Etcd defines etcd binary configuration flags.
- Etcd *Etcd `protobuf:"bytes,302,opt,name=Etcd,proto3" json:"Etcd,omitempty" yaml:"etcd"`
- // EtcdOnSnapshotRestore defines one-time use configuration during etcd
- // snapshot recovery process.
- EtcdOnSnapshotRestore *Etcd `protobuf:"bytes,303,opt,name=EtcdOnSnapshotRestore,proto3" json:"EtcdOnSnapshotRestore,omitempty"`
- // ClientCertData contains cert file contents from this member's etcd server.
- ClientCertData string `protobuf:"bytes,401,opt,name=ClientCertData,proto3" json:"ClientCertData,omitempty" yaml:"client-cert-data"`
- ClientCertPath string `protobuf:"bytes,402,opt,name=ClientCertPath,proto3" json:"ClientCertPath,omitempty" yaml:"client-cert-path"`
- // ClientKeyData contains key file contents from this member's etcd server.
- ClientKeyData string `protobuf:"bytes,403,opt,name=ClientKeyData,proto3" json:"ClientKeyData,omitempty" yaml:"client-key-data"`
- ClientKeyPath string `protobuf:"bytes,404,opt,name=ClientKeyPath,proto3" json:"ClientKeyPath,omitempty" yaml:"client-key-path"`
- // ClientTrustedCAData contains trusted CA file contents from this member's etcd server.
- ClientTrustedCAData string `protobuf:"bytes,405,opt,name=ClientTrustedCAData,proto3" json:"ClientTrustedCAData,omitempty" yaml:"client-trusted-ca-data"`
- ClientTrustedCAPath string `protobuf:"bytes,406,opt,name=ClientTrustedCAPath,proto3" json:"ClientTrustedCAPath,omitempty" yaml:"client-trusted-ca-path"`
- // PeerCertData contains cert file contents from this member's etcd server.
- PeerCertData string `protobuf:"bytes,501,opt,name=PeerCertData,proto3" json:"PeerCertData,omitempty" yaml:"peer-cert-data"`
- PeerCertPath string `protobuf:"bytes,502,opt,name=PeerCertPath,proto3" json:"PeerCertPath,omitempty" yaml:"peer-cert-path"`
- // PeerKeyData contains key file contents from this member's etcd server.
- PeerKeyData string `protobuf:"bytes,503,opt,name=PeerKeyData,proto3" json:"PeerKeyData,omitempty" yaml:"peer-key-data"`
- PeerKeyPath string `protobuf:"bytes,504,opt,name=PeerKeyPath,proto3" json:"PeerKeyPath,omitempty" yaml:"peer-key-path"`
- // PeerTrustedCAData contains trusted CA file contents from this member's etcd server.
- PeerTrustedCAData string `protobuf:"bytes,505,opt,name=PeerTrustedCAData,proto3" json:"PeerTrustedCAData,omitempty" yaml:"peer-trusted-ca-data"`
- PeerTrustedCAPath string `protobuf:"bytes,506,opt,name=PeerTrustedCAPath,proto3" json:"PeerTrustedCAPath,omitempty" yaml:"peer-trusted-ca-path"`
- // SnapshotPath is the snapshot file path to store or restore from.
- SnapshotPath string `protobuf:"bytes,601,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty" yaml:"snapshot-path"`
- // SnapshotInfo contains last SAVE_SNAPSHOT request results.
- SnapshotInfo *SnapshotInfo `protobuf:"bytes,602,opt,name=SnapshotInfo,proto3" json:"SnapshotInfo,omitempty"`
- // Failpoints is the GOFAIL_FAILPOINTS environment variable value to use when starting etcd.
- Failpoints string `protobuf:"bytes,701,opt,name=Failpoints,proto3" json:"Failpoints,omitempty" yaml:"failpoints"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Member) Reset() { *m = Member{} }
-func (m *Member) String() string { return proto.CompactTextString(m) }
-func (*Member) ProtoMessage() {}
-func (*Member) Descriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{3}
-}
-func (m *Member) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Member.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Member) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Member.Merge(m, src)
-}
-func (m *Member) XXX_Size() int {
- return m.Size()
-}
-func (m *Member) XXX_DiscardUnknown() {
- xxx_messageInfo_Member.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Member proto.InternalMessageInfo
-
-type Tester struct {
- DataDir string `protobuf:"bytes,1,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
- Network string `protobuf:"bytes,2,opt,name=Network,proto3" json:"Network,omitempty" yaml:"network"`
- Addr string `protobuf:"bytes,3,opt,name=Addr,proto3" json:"Addr,omitempty" yaml:"addr"`
- // DelayLatencyMsRv is the delay latency in milliseconds,
- // to inject to simulated slow network.
- DelayLatencyMs uint32 `protobuf:"varint,11,opt,name=DelayLatencyMs,proto3" json:"DelayLatencyMs,omitempty" yaml:"delay-latency-ms"`
- // DelayLatencyMsRv is the delay latency random variable in milliseconds.
- DelayLatencyMsRv uint32 `protobuf:"varint,12,opt,name=DelayLatencyMsRv,proto3" json:"DelayLatencyMsRv,omitempty" yaml:"delay-latency-ms-rv"`
- // UpdatedDelayLatencyMs is the update delay latency in milliseconds,
- // to inject to simulated slow network. It's the final latency to apply,
- // in case the latency numbers are randomly generated from given delay latency field.
- UpdatedDelayLatencyMs uint32 `protobuf:"varint,13,opt,name=UpdatedDelayLatencyMs,proto3" json:"UpdatedDelayLatencyMs,omitempty" yaml:"updated-delay-latency-ms"`
- // RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
- RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"`
- // ExitOnCaseFail is true, then exit tester on first failure.
- ExitOnCaseFail bool `protobuf:"varint,22,opt,name=ExitOnCaseFail,proto3" json:"ExitOnCaseFail,omitempty" yaml:"exit-on-failure"`
- // EnablePprof is true to enable profiler.
- EnablePprof bool `protobuf:"varint,23,opt,name=EnablePprof,proto3" json:"EnablePprof,omitempty" yaml:"enable-pprof"`
- // CaseDelayMs is the delay duration after failure is injected.
- // Useful when triggering snapshot or no-op failure cases.
- CaseDelayMs uint32 `protobuf:"varint,31,opt,name=CaseDelayMs,proto3" json:"CaseDelayMs,omitempty" yaml:"case-delay-ms"`
- // CaseShuffle is true to randomize failure injecting order.
- CaseShuffle bool `protobuf:"varint,32,opt,name=CaseShuffle,proto3" json:"CaseShuffle,omitempty" yaml:"case-shuffle"`
- // Cases is the selected test cases to schedule.
- // If empty, run all failure cases.
- Cases []string `protobuf:"bytes,33,rep,name=Cases,proto3" json:"Cases,omitempty" yaml:"cases"`
- // FailpointCommands is the list of "gofail" commands
- // (e.g. panic("etcd-tester"),1*sleep(1000).
- FailpointCommands []string `protobuf:"bytes,34,rep,name=FailpointCommands,proto3" json:"FailpointCommands,omitempty" yaml:"failpoint-commands"`
- // RunnerExecPath is a path of etcd-runner binary.
- RunnerExecPath string `protobuf:"bytes,41,opt,name=RunnerExecPath,proto3" json:"RunnerExecPath,omitempty" yaml:"runner-exec-path"`
- // ExternalExecPath is a path of script for enabling/disabling an external fault injector.
- ExternalExecPath string `protobuf:"bytes,42,opt,name=ExternalExecPath,proto3" json:"ExternalExecPath,omitempty" yaml:"external-exec-path"`
- // Stressers is the list of stresser types:
- // KV, LEASE, ELECTION_RUNNER, WATCH_RUNNER, LOCK_RACER_RUNNER, LEASE_RUNNER.
- Stressers []*Stresser `protobuf:"bytes,101,rep,name=Stressers,proto3" json:"Stressers,omitempty" yaml:"stressers"`
- // Checkers is the list of consistency checker types:
- // KV_HASH, LEASE_EXPIRE, NO_CHECK, RUNNER.
- // Leave empty to skip consistency checks.
- Checkers []string `protobuf:"bytes,102,rep,name=Checkers,proto3" json:"Checkers,omitempty" yaml:"checkers"`
- // StressKeySize is the size of each small key written into etcd.
- StressKeySize int32 `protobuf:"varint,201,opt,name=StressKeySize,proto3" json:"StressKeySize,omitempty" yaml:"stress-key-size"`
- // StressKeySizeLarge is the size of each large key written into etcd.
- StressKeySizeLarge int32 `protobuf:"varint,202,opt,name=StressKeySizeLarge,proto3" json:"StressKeySizeLarge,omitempty" yaml:"stress-key-size-large"`
- // StressKeySuffixRange is the count of key range written into etcd.
- // Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)".
- StressKeySuffixRange int32 `protobuf:"varint,203,opt,name=StressKeySuffixRange,proto3" json:"StressKeySuffixRange,omitempty" yaml:"stress-key-suffix-range"`
- // StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100).
- // Stress keys are created with "fmt.Sprintf("/k%03d", i)".
- StressKeySuffixRangeTxn int32 `protobuf:"varint,204,opt,name=StressKeySuffixRangeTxn,proto3" json:"StressKeySuffixRangeTxn,omitempty" yaml:"stress-key-suffix-range-txn"`
- // StressKeyTxnOps is the number of operations per a transaction (max 64).
- StressKeyTxnOps int32 `protobuf:"varint,205,opt,name=StressKeyTxnOps,proto3" json:"StressKeyTxnOps,omitempty" yaml:"stress-key-txn-ops"`
- // StressClients is the number of concurrent stressing clients
- // with "one" shared TCP connection.
- StressClients int32 `protobuf:"varint,301,opt,name=StressClients,proto3" json:"StressClients,omitempty" yaml:"stress-clients"`
- // StressQPS is the maximum number of stresser requests per second.
- StressQPS int32 `protobuf:"varint,302,opt,name=StressQPS,proto3" json:"StressQPS,omitempty" yaml:"stress-qps"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Tester) Reset() { *m = Tester{} }
-func (m *Tester) String() string { return proto.CompactTextString(m) }
-func (*Tester) ProtoMessage() {}
-func (*Tester) Descriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{4}
-}
-func (m *Tester) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Tester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Tester.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Tester) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Tester.Merge(m, src)
-}
-func (m *Tester) XXX_Size() int {
- return m.Size()
-}
-func (m *Tester) XXX_DiscardUnknown() {
- xxx_messageInfo_Tester.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Tester proto.InternalMessageInfo
-
-type Stresser struct {
- Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty" yaml:"type"`
- Weight float64 `protobuf:"fixed64,2,opt,name=Weight,proto3" json:"Weight,omitempty" yaml:"weight"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Stresser) Reset() { *m = Stresser{} }
-func (m *Stresser) String() string { return proto.CompactTextString(m) }
-func (*Stresser) ProtoMessage() {}
-func (*Stresser) Descriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{5}
-}
-func (m *Stresser) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Stresser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Stresser.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Stresser) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Stresser.Merge(m, src)
-}
-func (m *Stresser) XXX_Size() int {
- return m.Size()
-}
-func (m *Stresser) XXX_DiscardUnknown() {
- xxx_messageInfo_Stresser.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Stresser proto.InternalMessageInfo
-
-type Etcd struct {
- Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"`
- DataDir string `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
- WALDir string `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"`
- // HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval.
- // Default value is 100, which is 100ms.
- HeartbeatIntervalMs int64 `protobuf:"varint,11,opt,name=HeartbeatIntervalMs,proto3" json:"HeartbeatIntervalMs,omitempty" yaml:"heartbeat-interval"`
- // ElectionTimeoutMs is the time (in milliseconds) for an election to timeout.
- // Default value is 1000, which is 1s.
- ElectionTimeoutMs int64 `protobuf:"varint,12,opt,name=ElectionTimeoutMs,proto3" json:"ElectionTimeoutMs,omitempty" yaml:"election-timeout"`
- ListenClientURLs []string `protobuf:"bytes,21,rep,name=ListenClientURLs,proto3" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"`
- AdvertiseClientURLs []string `protobuf:"bytes,22,rep,name=AdvertiseClientURLs,proto3" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"`
- ClientAutoTLS bool `protobuf:"varint,23,opt,name=ClientAutoTLS,proto3" json:"ClientAutoTLS,omitempty" yaml:"auto-tls"`
- ClientCertAuth bool `protobuf:"varint,24,opt,name=ClientCertAuth,proto3" json:"ClientCertAuth,omitempty" yaml:"client-cert-auth"`
- ClientCertFile string `protobuf:"bytes,25,opt,name=ClientCertFile,proto3" json:"ClientCertFile,omitempty" yaml:"cert-file"`
- ClientKeyFile string `protobuf:"bytes,26,opt,name=ClientKeyFile,proto3" json:"ClientKeyFile,omitempty" yaml:"key-file"`
- ClientTrustedCAFile string `protobuf:"bytes,27,opt,name=ClientTrustedCAFile,proto3" json:"ClientTrustedCAFile,omitempty" yaml:"trusted-ca-file"`
- ListenPeerURLs []string `protobuf:"bytes,31,rep,name=ListenPeerURLs,proto3" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"`
- AdvertisePeerURLs []string `protobuf:"bytes,32,rep,name=AdvertisePeerURLs,proto3" json:"AdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"`
- PeerAutoTLS bool `protobuf:"varint,33,opt,name=PeerAutoTLS,proto3" json:"PeerAutoTLS,omitempty" yaml:"peer-auto-tls"`
- PeerClientCertAuth bool `protobuf:"varint,34,opt,name=PeerClientCertAuth,proto3" json:"PeerClientCertAuth,omitempty" yaml:"peer-client-cert-auth"`
- PeerCertFile string `protobuf:"bytes,35,opt,name=PeerCertFile,proto3" json:"PeerCertFile,omitempty" yaml:"peer-cert-file"`
- PeerKeyFile string `protobuf:"bytes,36,opt,name=PeerKeyFile,proto3" json:"PeerKeyFile,omitempty" yaml:"peer-key-file"`
- PeerTrustedCAFile string `protobuf:"bytes,37,opt,name=PeerTrustedCAFile,proto3" json:"PeerTrustedCAFile,omitempty" yaml:"peer-trusted-ca-file"`
- InitialCluster string `protobuf:"bytes,41,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"`
- InitialClusterState string `protobuf:"bytes,42,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"`
- InitialClusterToken string `protobuf:"bytes,43,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"`
- SnapshotCount int64 `protobuf:"varint,51,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"`
- QuotaBackendBytes int64 `protobuf:"varint,52,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"`
- PreVote bool `protobuf:"varint,63,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"`
- InitialCorruptCheck bool `protobuf:"varint,64,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"`
- Logger string `protobuf:"bytes,71,opt,name=Logger,proto3" json:"Logger,omitempty" yaml:"logger"`
- // LogOutputs is the log file to store current etcd server logs.
- LogOutputs []string `protobuf:"bytes,72,rep,name=LogOutputs,proto3" json:"LogOutputs,omitempty" yaml:"log-outputs"`
- LogLevel string `protobuf:"bytes,73,opt,name=LogLevel,proto3" json:"LogLevel,omitempty" yaml:"log-level"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Etcd) Reset() { *m = Etcd{} }
-func (m *Etcd) String() string { return proto.CompactTextString(m) }
-func (*Etcd) ProtoMessage() {}
-func (*Etcd) Descriptor() ([]byte, []int) {
- return fileDescriptor_4fbc93a8dcc3881e, []int{6}
-}
-func (m *Etcd) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Etcd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Etcd.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Etcd) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Etcd.Merge(m, src)
-}
-func (m *Etcd) XXX_Size() int {
- return m.Size()
-}
-func (m *Etcd) XXX_DiscardUnknown() {
- xxx_messageInfo_Etcd.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Etcd proto.InternalMessageInfo
-
-func init() {
- proto.RegisterEnum("rpcpb.StresserType", StresserType_name, StresserType_value)
- proto.RegisterEnum("rpcpb.Checker", Checker_name, Checker_value)
- proto.RegisterEnum("rpcpb.Operation", Operation_name, Operation_value)
- proto.RegisterEnum("rpcpb.Case", Case_name, Case_value)
- proto.RegisterType((*Request)(nil), "rpcpb.Request")
- proto.RegisterType((*SnapshotInfo)(nil), "rpcpb.SnapshotInfo")
- proto.RegisterType((*Response)(nil), "rpcpb.Response")
- proto.RegisterType((*Member)(nil), "rpcpb.Member")
- proto.RegisterType((*Tester)(nil), "rpcpb.Tester")
- proto.RegisterType((*Stresser)(nil), "rpcpb.Stresser")
- proto.RegisterType((*Etcd)(nil), "rpcpb.Etcd")
-}
-
-func init() { proto.RegisterFile("rpcpb/rpc.proto", fileDescriptor_4fbc93a8dcc3881e) }
-
-var fileDescriptor_4fbc93a8dcc3881e = []byte{
- // 3039 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x59, 0xcb, 0x77, 0xdb, 0xc6,
- 0xf5, 0x36, 0x44, 0x49, 0x96, 0x46, 0x2f, 0x68, 0x64, 0xd9, 0xf0, 0x4b, 0x90, 0xe1, 0x38, 0x3f,
- 0x59, 0x09, 0x6c, 0xff, 0xec, 0x9c, 0x3c, 0x9c, 0x26, 0x0e, 0x48, 0x41, 0x12, 0x4b, 0x88, 0xa4,
- 0x87, 0x90, 0x6c, 0x77, 0x51, 0x1c, 0x88, 0x1c, 0x49, 0x3c, 0xa6, 0x00, 0x06, 0x18, 0x3a, 0x52,
- 0xfe, 0x81, 0xee, 0x7a, 0x9a, 0x36, 0xed, 0x69, 0xcf, 0xe9, 0x9f, 0xd0, 0xb4, 0x8b, 0xae, 0xbb,
- 0x77, 0x5e, 0x6d, 0xda, 0xae, 0xda, 0x05, 0x4f, 0x9b, 0x6e, 0xba, 0xe6, 0xe9, 0x7b, 0xd1, 0xd3,
- 0x33, 0x33, 0x00, 0x39, 0x00, 0x49, 0x59, 0x2b, 0x0b, 0xf7, 0x7e, 0xdf, 0x37, 0x17, 0xf7, 0x0e,
- 0xe6, 0xde, 0x31, 0xc1, 0x5c, 0xd0, 0xac, 0x36, 0x77, 0x6f, 0x07, 0xcd, 0xea, 0xad, 0x66, 0xe0,
- 0x13, 0x1f, 0x8e, 0x31, 0xc3, 0x25, 0x7d, 0xbf, 0x4e, 0x0e, 0x5a, 0xbb, 0xb7, 0xaa, 0xfe, 0xe1,
- 0xed, 0x7d, 0x7f, 0xdf, 0xbf, 0xcd, 0xbc, 0xbb, 0xad, 0x3d, 0xf6, 0xc4, 0x1e, 0xd8, 0x5f, 0x9c,
- 0xa5, 0x7d, 0x47, 0x02, 0x67, 0x11, 0x7e, 0xbf, 0x85, 0x43, 0x02, 0x6f, 0x81, 0xc9, 0x52, 0x13,
- 0x07, 0x2e, 0xa9, 0xfb, 0x9e, 0x22, 0x2d, 0x4b, 0x2b, 0xb3, 0x77, 0xe5, 0x5b, 0x4c, 0xf5, 0x56,
- 0xd7, 0x8e, 0x7a, 0x10, 0x78, 0x03, 0x8c, 0x6f, 0xe1, 0xc3, 0x5d, 0x1c, 0x28, 0x23, 0xcb, 0xd2,
- 0xca, 0xd4, 0xdd, 0x99, 0x08, 0xcc, 0x8d, 0x28, 0x72, 0x52, 0x98, 0x8d, 0x43, 0x82, 0x03, 0x25,
- 0x93, 0x80, 0x71, 0x23, 0x8a, 0x9c, 0xda, 0x5f, 0x47, 0xc0, 0x74, 0xc5, 0x73, 0x9b, 0xe1, 0x81,
- 0x4f, 0xf2, 0xde, 0x9e, 0x0f, 0x97, 0x00, 0xe0, 0x0a, 0x45, 0xf7, 0x10, 0xb3, 0x78, 0x26, 0x91,
- 0x60, 0x81, 0xab, 0x40, 0xe6, 0x4f, 0xb9, 0x46, 0x1d, 0x7b, 0x64, 0x1b, 0x59, 0xa1, 0x32, 0xb2,
- 0x9c, 0x59, 0x99, 0x44, 0x7d, 0x76, 0xa8, 0xf5, 0xb4, 0xcb, 0x2e, 0x39, 0x60, 0x91, 0x4c, 0xa2,
- 0x84, 0x8d, 0xea, 0xc5, 0xcf, 0xeb, 0xf5, 0x06, 0xae, 0xd4, 0x3f, 0xc4, 0xca, 0x28, 0xc3, 0xf5,
- 0xd9, 0xe1, 0xab, 0x60, 0x3e, 0xb6, 0xd9, 0x3e, 0x71, 0x1b, 0x0c, 0x3c, 0xc6, 0xc0, 0xfd, 0x0e,
- 0x51, 0x99, 0x19, 0x0b, 0xf8, 0x58, 0x19, 0x5f, 0x96, 0x56, 0x32, 0xa8, 0xcf, 0x2e, 0x46, 0xba,
- 0xe9, 0x86, 0x07, 0xca, 0x59, 0x86, 0x4b, 0xd8, 0x44, 0x3d, 0x84, 0x9f, 0xd5, 0x43, 0x5a, 0xaf,
- 0x89, 0xa4, 0x5e, 0x6c, 0x87, 0x10, 0x8c, 0xda, 0xbe, 0xff, 0x54, 0x99, 0x64, 0xc1, 0xb1, 0xbf,
- 0xb5, 0x9f, 0x4a, 0x60, 0x02, 0xe1, 0xb0, 0xe9, 0x7b, 0x21, 0x86, 0x0a, 0x38, 0x5b, 0x69, 0x55,
- 0xab, 0x38, 0x0c, 0x59, 0x8e, 0x27, 0x50, 0xfc, 0x08, 0xcf, 0x83, 0xf1, 0x0a, 0x71, 0x49, 0x2b,
- 0x64, 0xf5, 0x9d, 0x44, 0xd1, 0x93, 0x50, 0xf7, 0xcc, 0x49, 0x75, 0x7f, 0x23, 0x59, 0x4f, 0x96,
- 0xcb, 0xa9, 0xbb, 0x0b, 0x11, 0x58, 0x74, 0xa1, 0x04, 0x50, 0xfb, 0x6c, 0x3a, 0x5e, 0x00, 0xde,
- 0x01, 0x13, 0x26, 0xa9, 0xd6, 0xcc, 0x23, 0x5c, 0xe5, 0x3b, 0x20, 0x7b, 0xae, 0xd3, 0x56, 0xe5,
- 0x63, 0xf7, 0xb0, 0x71, 0x5f, 0xc3, 0xa4, 0x5a, 0xd3, 0xf1, 0x11, 0xae, 0x6a, 0xa8, 0x8b, 0x82,
- 0xf7, 0xc0, 0xa4, 0xb1, 0x8f, 0x3d, 0x62, 0xd4, 0x6a, 0x81, 0x32, 0xc5, 0x28, 0x8b, 0x9d, 0xb6,
- 0x3a, 0xcf, 0x29, 0x2e, 0x75, 0xe9, 0x6e, 0xad, 0x16, 0x68, 0xa8, 0x87, 0x83, 0x16, 0x98, 0x5f,
- 0x77, 0xeb, 0x8d, 0xa6, 0x5f, 0xf7, 0xc8, 0xa6, 0x6d, 0x97, 0x19, 0x79, 0x9a, 0x91, 0x97, 0x3a,
- 0x6d, 0xf5, 0x12, 0x27, 0xef, 0xc5, 0x10, 0xfd, 0x80, 0x90, 0x66, 0xa4, 0xd2, 0x4f, 0x84, 0x3a,
- 0x38, 0x9b, 0x75, 0x43, 0xbc, 0x56, 0x0f, 0x14, 0xcc, 0x34, 0x16, 0x3a, 0x6d, 0x75, 0x8e, 0x6b,
- 0xec, 0xba, 0x21, 0xd6, 0x6b, 0xf5, 0x40, 0x43, 0x31, 0x06, 0x6e, 0x80, 0x39, 0x1a, 0x3d, 0xdf,
- 0xad, 0xe5, 0xc0, 0x3f, 0x3a, 0x56, 0x3e, 0x65, 0x95, 0xc8, 0x5e, 0xe9, 0xb4, 0x55, 0x45, 0x78,
- 0xd7, 0x2a, 0x83, 0xe8, 0x4d, 0x8a, 0xd1, 0x50, 0x9a, 0x05, 0x0d, 0x30, 0x43, 0x4d, 0x65, 0x8c,
- 0x03, 0x2e, 0xf3, 0x19, 0x97, 0xb9, 0xd4, 0x69, 0xab, 0xe7, 0x05, 0x99, 0x26, 0xc6, 0x41, 0x2c,
- 0x92, 0x64, 0xc0, 0x32, 0x80, 0x3d, 0x55, 0xd3, 0xab, 0xb1, 0x17, 0x53, 0x3e, 0x61, 0xf5, 0xcf,
- 0xaa, 0x9d, 0xb6, 0x7a, 0xb9, 0x3f, 0x1c, 0x1c, 0xc1, 0x34, 0x34, 0x80, 0x0b, 0xff, 0x1f, 0x8c,
- 0x52, 0xab, 0xf2, 0x73, 0x7e, 0x46, 0x4c, 0x45, 0xe5, 0xa7, 0xb6, 0xec, 0x5c, 0xa7, 0xad, 0x4e,
- 0xf5, 0x04, 0x35, 0xc4, 0xa0, 0x30, 0x0b, 0x16, 0xe9, 0xbf, 0x25, 0xaf, 0xb7, 0x99, 0x43, 0xe2,
- 0x07, 0x58, 0xf9, 0x45, 0xbf, 0x06, 0x1a, 0x0c, 0x85, 0x6b, 0x60, 0x96, 0x07, 0x92, 0xc3, 0x01,
- 0x59, 0x73, 0x89, 0xab, 0x7c, 0xc4, 0xbe, 0xf9, 0xec, 0xe5, 0x4e, 0x5b, 0xbd, 0xc0, 0xd7, 0x8c,
- 0xe2, 0xaf, 0xe2, 0x80, 0xe8, 0x35, 0x97, 0xb8, 0x1a, 0x4a, 0x71, 0x92, 0x2a, 0xec, 0xe0, 0xf8,
- 0xfe, 0x89, 0x2a, 0x4d, 0x97, 0x1c, 0x24, 0x54, 0xd8, 0xc1, 0x62, 0x80, 0x19, 0x6e, 0x29, 0xe0,
- 0x63, 0x16, 0xca, 0x0f, 0xb8, 0x88, 0x50, 0x97, 0x48, 0xe4, 0x29, 0x3e, 0x8e, 0x22, 0x49, 0x32,
- 0x12, 0x12, 0x2c, 0x8e, 0x8f, 0x4f, 0x92, 0xe0, 0x61, 0x24, 0x19, 0xd0, 0x06, 0x0b, 0xdc, 0x60,
- 0x07, 0xad, 0x90, 0xe0, 0x5a, 0xce, 0x60, 0xb1, 0xfc, 0x90, 0x0b, 0x5d, 0xeb, 0xb4, 0xd5, 0xab,
- 0x09, 0x21, 0xc2, 0x61, 0x7a, 0xd5, 0x8d, 0x42, 0x1a, 0x44, 0x1f, 0xa0, 0xca, 0xc2, 0xfb, 0xd1,
- 0x29, 0x54, 0x79, 0x94, 0x83, 0xe8, 0xf0, 0x5d, 0x30, 0x4d, 0xf7, 0x64, 0xb7, 0x76, 0x7f, 0xe7,
- 0x72, 0x17, 0x3b, 0x6d, 0x75, 0x91, 0xcb, 0xb1, 0x3d, 0x2c, 0x54, 0x2e, 0x81, 0x17, 0xf9, 0x2c,
- 0x9c, 0x7f, 0x9c, 0xc0, 0xe7, 0x61, 0x24, 0xf0, 0xf0, 0x6d, 0x30, 0x45, 0x9f, 0xe3, 0x7a, 0xfd,
- 0x93, 0xd3, 0x95, 0x4e, 0x5b, 0x3d, 0x27, 0xd0, 0x7b, 0xd5, 0x12, 0xd1, 0x02, 0x99, 0xad, 0xfd,
- 0xaf, 0xe1, 0x64, 0xbe, 0xb4, 0x88, 0x86, 0x45, 0x30, 0x4f, 0x1f, 0x93, 0x35, 0xfa, 0x77, 0x26,
- 0xfd, 0xfd, 0x31, 0x89, 0xbe, 0x0a, 0xf5, 0x53, 0xfb, 0xf4, 0x58, 0x48, 0xff, 0x79, 0xa1, 0x1e,
- 0x8f, 0xac, 0x9f, 0x0a, 0xdf, 0x49, 0x35, 0xd2, 0x3f, 0x8c, 0xa6, 0xdf, 0x2e, 0x8c, 0xdc, 0x71,
- 0x62, 0x13, 0x3d, 0xf6, 0xcd, 0x54, 0x4f, 0xf8, 0xe3, 0x69, 0x9b, 0x02, 0x7c, 0x1d, 0x80, 0xee,
- 0x49, 0x1b, 0x2a, 0xbf, 0x1a, 0x4b, 0x9f, 0xec, 0xdd, 0xc3, 0x39, 0xd4, 0x90, 0x80, 0xd4, 0x7e,
- 0x39, 0x1d, 0x8f, 0x1f, 0xf4, 0x5c, 0xa6, 0x39, 0xa1, 0xe7, 0xb2, 0x94, 0x3e, 0x97, 0x69, 0x02,
- 0xa3, 0x73, 0x39, 0xc2, 0xc0, 0x57, 0xc1, 0xd9, 0x22, 0x26, 0x1f, 0xf8, 0xc1, 0x53, 0xde, 0xff,
- 0xb2, 0xb0, 0xd3, 0x56, 0x67, 0x39, 0xdc, 0xe3, 0x0e, 0x0d, 0xc5, 0x10, 0x78, 0x1d, 0x8c, 0xb2,
- 0xae, 0xc1, 0x53, 0x2b, 0x9c, 0x6c, 0xbc, 0x4d, 0x30, 0x27, 0xcc, 0x81, 0xd9, 0x35, 0xdc, 0x70,
- 0x8f, 0x2d, 0x97, 0x60, 0xaf, 0x7a, 0xbc, 0x15, 0xb2, 0x0e, 0x35, 0x23, 0x1e, 0x27, 0x35, 0xea,
- 0xd7, 0x1b, 0x1c, 0xa0, 0x1f, 0x86, 0x1a, 0x4a, 0x51, 0xe0, 0x37, 0x81, 0x9c, 0xb4, 0xa0, 0x67,
- 0xac, 0x57, 0xcd, 0x88, 0xbd, 0x2a, 0x2d, 0xa3, 0x07, 0xcf, 0x34, 0xd4, 0xc7, 0x83, 0x4f, 0xc0,
- 0xe2, 0x76, 0xb3, 0xe6, 0x12, 0x5c, 0x4b, 0xc5, 0x35, 0xc3, 0x04, 0xaf, 0x77, 0xda, 0xaa, 0xca,
- 0x05, 0x5b, 0x1c, 0xa6, 0xf7, 0xc7, 0x37, 0x58, 0x81, 0x16, 0x0c, 0xf9, 0x2d, 0xaf, 0x66, 0xd5,
- 0x0f, 0xeb, 0x44, 0x59, 0x5c, 0x96, 0x56, 0xc6, 0xb2, 0xe7, 0x3b, 0x6d, 0x15, 0x72, 0xbd, 0x80,
- 0xfa, 0xf4, 0x06, 0x75, 0x6a, 0x48, 0x40, 0xc2, 0x2c, 0x98, 0x35, 0x8f, 0xea, 0xa4, 0xe4, 0xe5,
- 0xdc, 0x10, 0xd3, 0x42, 0x2a, 0xe7, 0xfb, 0xba, 0xd8, 0x51, 0x9d, 0xe8, 0xbe, 0xa7, 0xd3, 0x9a,
- 0xb7, 0x02, 0xac, 0xa1, 0x14, 0x03, 0xbe, 0x05, 0xa6, 0x4c, 0xcf, 0xdd, 0x6d, 0xe0, 0x72, 0x33,
- 0xf0, 0xf7, 0x94, 0x0b, 0x4c, 0xe0, 0x42, 0xa7, 0xad, 0x2e, 0x44, 0x02, 0xcc, 0xa9, 0x37, 0xa9,
- 0x57, 0x43, 0x22, 0x16, 0xde, 0x07, 0x53, 0x54, 0x86, 0xbd, 0xcc, 0x56, 0xa8, 0xa8, 0x2c, 0x0f,
- 0xc2, 0xf6, 0xae, 0xb2, 0x06, 0xce, 0x92, 0x40, 0x5f, 0x5e, 0x04, 0xd3, 0x65, 0xe9, 0x63, 0xe5,
- 0xa0, 0xb5, 0xb7, 0xd7, 0xc0, 0xca, 0x72, 0x7a, 0x59, 0xc6, 0x0d, 0xb9, 0x37, 0xa2, 0x46, 0x58,
- 0xf8, 0x32, 0x18, 0xa3, 0x8f, 0xa1, 0x72, 0x8d, 0x4e, 0xb0, 0x59, 0xb9, 0xd3, 0x56, 0xa7, 0x7b,
- 0xa4, 0x50, 0x43, 0xdc, 0x0d, 0x0b, 0xc2, 0xa4, 0x92, 0xf3, 0x0f, 0x0f, 0x5d, 0xaf, 0x16, 0x2a,
- 0x1a, 0xe3, 0x5c, 0xed, 0xb4, 0xd5, 0x8b, 0xe9, 0x49, 0xa5, 0x1a, 0x61, 0xc4, 0x41, 0x25, 0xe6,
- 0xd1, 0xed, 0x88, 0x5a, 0x9e, 0x87, 0x03, 0x3a, 0x39, 0xb1, 0xcf, 0xf9, 0x66, 0xba, 0xbb, 0x05,
- 0xcc, 0xcf, 0xa6, 0xac, 0xb8, 0xbb, 0x25, 0x29, 0x30, 0x0f, 0x64, 0xf3, 0x88, 0xe0, 0xc0, 0x73,
- 0x1b, 0x5d, 0x99, 0x55, 0x26, 0x23, 0x04, 0x84, 0x23, 0x84, 0x28, 0xd4, 0x47, 0x83, 0x39, 0x30,
- 0x59, 0x21, 0x01, 0x0e, 0x43, 0x1c, 0x84, 0x0a, 0x5e, 0xce, 0xac, 0x4c, 0xdd, 0x9d, 0x8b, 0x4f,
- 0x86, 0xc8, 0x2e, 0xce, 0x7f, 0x61, 0x8c, 0xd5, 0x50, 0x8f, 0x07, 0x6f, 0x83, 0x89, 0xdc, 0x01,
- 0xae, 0x3e, 0xa5, 0x1a, 0x7b, 0x2c, 0x31, 0xc2, 0x67, 0x5e, 0x8d, 0x3c, 0x1a, 0xea, 0x82, 0x68,
- 0x6f, 0xe5, 0xec, 0x02, 0x3e, 0x66, 0x73, 0x3c, 0x9b, 0xbe, 0xc6, 0xc4, 0x0d, 0xc7, 0x57, 0x62,
- 0x67, 0x76, 0x58, 0xff, 0x10, 0x6b, 0x28, 0xc9, 0x80, 0x0f, 0x01, 0x4c, 0x18, 0x2c, 0x37, 0xd8,
- 0xc7, 0x7c, 0xfc, 0x1a, 0xcb, 0x2e, 0x77, 0xda, 0xea, 0x95, 0x81, 0x3a, 0x7a, 0x83, 0xe2, 0x34,
- 0x34, 0x80, 0x0c, 0x1f, 0x81, 0x73, 0x3d, 0x6b, 0x6b, 0x6f, 0xaf, 0x7e, 0x84, 0x5c, 0x6f, 0x1f,
- 0x2b, 0x9f, 0x73, 0x51, 0xad, 0xd3, 0x56, 0x97, 0xfa, 0x45, 0x19, 0x50, 0x0f, 0x28, 0x52, 0x43,
- 0x03, 0x05, 0xa0, 0x0b, 0x2e, 0x0c, 0xb2, 0xdb, 0x47, 0x9e, 0xf2, 0x05, 0xd7, 0x7e, 0xb9, 0xd3,
- 0x56, 0xb5, 0x13, 0xb5, 0x75, 0x72, 0xe4, 0x69, 0x68, 0x98, 0x0e, 0xdc, 0x04, 0x73, 0x5d, 0x97,
- 0x7d, 0xe4, 0x95, 0x9a, 0xa1, 0xf2, 0x25, 0x97, 0x16, 0xb6, 0x84, 0x20, 0x4d, 0x8e, 0x3c, 0xdd,
- 0x6f, 0x86, 0x1a, 0x4a, 0xd3, 0xe0, 0x7b, 0x71, 0x6d, 0xf8, 0x94, 0x10, 0xf2, 0x51, 0x74, 0x4c,
- 0xec, 0xe4, 0x91, 0x0e, 0x9f, 0x2f, 0xc2, 0x6e, 0x69, 0x22, 0x02, 0x7c, 0x2d, 0xde, 0x53, 0x0f,
- 0xcb, 0x15, 0x3e, 0x84, 0x8e, 0x89, 0x6d, 0x23, 0x62, 0xbf, 0xdf, 0xec, 0x6d, 0xa2, 0x87, 0xe5,
- 0x8a, 0xf6, 0x2d, 0x30, 0x11, 0xef, 0x28, 0x7a, 0xb2, 0xdb, 0xc7, 0xcd, 0xe8, 0x06, 0x2a, 0x9e,
- 0xec, 0xe4, 0xb8, 0x89, 0x35, 0xc4, 0x9c, 0xf0, 0x26, 0x18, 0x7f, 0x84, 0xeb, 0xfb, 0x07, 0x84,
- 0xf5, 0x0a, 0x29, 0x3b, 0xdf, 0x69, 0xab, 0x33, 0x1c, 0xf6, 0x01, 0xb3, 0x6b, 0x28, 0x02, 0x68,
- 0xdf, 0x9d, 0xe3, 0x23, 0x31, 0x15, 0xee, 0x5d, 0x6d, 0x45, 0x61, 0xcf, 0x3d, 0xa4, 0xc2, 0xec,
- 0x96, 0x2b, 0x34, 0xad, 0x91, 0x53, 0x34, 0xad, 0x55, 0x30, 0xfe, 0xc8, 0xb0, 0x28, 0x3a, 0x93,
- 0xee, 0x59, 0x1f, 0xb8, 0x0d, 0x0e, 0x8e, 0x10, 0xb0, 0x04, 0x16, 0x36, 0xb1, 0x1b, 0x90, 0x5d,
- 0xec, 0x92, 0xbc, 0x47, 0x70, 0xf0, 0xcc, 0x6d, 0x44, 0x2d, 0x29, 0x23, 0x56, 0xea, 0x20, 0x06,
- 0xe9, 0xf5, 0x08, 0xa5, 0xa1, 0x41, 0x4c, 0x98, 0x07, 0xf3, 0x66, 0x03, 0x57, 0x49, 0xdd, 0xf7,
- 0xec, 0xfa, 0x21, 0xf6, 0x5b, 0x64, 0x2b, 0x64, 0xad, 0x29, 0x23, 0x1e, 0x29, 0x38, 0x82, 0xe8,
- 0x84, 0x63, 0x34, 0xd4, 0xcf, 0xa2, 0xa7, 0x8a, 0x55, 0x0f, 0x09, 0xf6, 0x84, 0xcb, 0xfd, 0x62,
- 0xfa, 0x98, 0x6b, 0x30, 0x44, 0x7c, 0x0f, 0x69, 0x05, 0x8d, 0x50, 0x43, 0x7d, 0x34, 0x88, 0xc0,
- 0x82, 0x51, 0x7b, 0x86, 0x03, 0x52, 0x0f, 0xb1, 0xa0, 0x76, 0x9e, 0xa9, 0x09, 0x1f, 0xa7, 0x1b,
- 0x83, 0x92, 0x82, 0x83, 0xc8, 0xf0, 0xad, 0x78, 0x1e, 0x37, 0x5a, 0xc4, 0xb7, 0xad, 0x4a, 0xd4,
- 0x62, 0x84, 0xda, 0xb8, 0x2d, 0xe2, 0xeb, 0x84, 0x0a, 0x24, 0x91, 0xf4, 0xd0, 0xed, 0xdd, 0x0f,
- 0x8c, 0x16, 0x39, 0x50, 0x14, 0xc6, 0x1d, 0x72, 0xa5, 0x70, 0x5b, 0xa9, 0x2b, 0x05, 0xa5, 0xc0,
- 0x6f, 0x88, 0x22, 0xeb, 0xf5, 0x06, 0x56, 0x2e, 0xa6, 0x6f, 0xc7, 0x8c, 0xbd, 0x57, 0xa7, 0x9d,
- 0x26, 0x85, 0xed, 0x45, 0x5f, 0xc0, 0xc7, 0x8c, 0x7c, 0x29, 0xbd, 0xb3, 0xe8, 0x57, 0xc9, 0xb9,
- 0x49, 0x24, 0xb4, 0xfa, 0xe6, 0x7d, 0x26, 0x70, 0x39, 0x7d, 0x1b, 0x11, 0x66, 0x49, 0xae, 0x33,
- 0x88, 0x46, 0x73, 0xc1, 0xcb, 0x45, 0x07, 0x4d, 0x56, 0x15, 0x95, 0x55, 0x45, 0xc8, 0x45, 0x54,
- 0x63, 0x36, 0xa0, 0xf2, 0x82, 0xa4, 0x28, 0xd0, 0x06, 0xf3, 0xdd, 0x12, 0x75, 0x75, 0x96, 0x99,
- 0x8e, 0x70, 0x92, 0xd5, 0xbd, 0x3a, 0xa9, 0xbb, 0x0d, 0xbd, 0x57, 0x65, 0x41, 0xb2, 0x5f, 0x80,
- 0xce, 0x01, 0xf4, 0xef, 0xb8, 0xbe, 0xd7, 0x58, 0x8d, 0xd2, 0x43, 0x7c, 0xaf, 0xc8, 0x22, 0x98,
- 0xde, 0xa2, 0xd9, 0x75, 0x22, 0x59, 0x66, 0x8d, 0x49, 0x08, 0x1b, 0x8e, 0xdf, 0x41, 0xfa, 0x6a,
- 0x3d, 0x80, 0x4b, 0xc7, 0xee, 0xf8, 0x82, 0xc2, 0xf2, 0x7d, 0x7d, 0xf8, 0x7d, 0x86, 0xa7, 0x3b,
- 0x01, 0x8f, 0x5f, 0x26, 0x2e, 0xf7, 0x4b, 0x43, 0x6f, 0x24, 0x9c, 0x2c, 0x82, 0xe1, 0x56, 0xea,
- 0x06, 0xc1, 0x14, 0x6e, 0xbc, 0xe8, 0x02, 0xc1, 0x85, 0xfa, 0x99, 0x74, 0xbc, 0xcb, 0xf3, 0x52,
- 0xe4, 0x1a, 0x2d, 0xf6, 0xbf, 0x82, 0x37, 0xd3, 0x7b, 0x27, 0x2e, 0x55, 0x95, 0x03, 0x34, 0x94,
- 0x62, 0xd0, 0x2f, 0x3a, 0x69, 0xa9, 0x10, 0x97, 0xe0, 0x68, 0xea, 0x10, 0x12, 0x9c, 0x12, 0xd2,
- 0x43, 0x0a, 0xd3, 0xd0, 0x20, 0x72, 0xbf, 0xa6, 0xed, 0x3f, 0xc5, 0x9e, 0xf2, 0xca, 0x8b, 0x34,
- 0x09, 0x85, 0xf5, 0x69, 0x32, 0x32, 0x7c, 0x00, 0x66, 0xe2, 0x3b, 0x4c, 0xce, 0x6f, 0x79, 0x44,
- 0xb9, 0xc7, 0xce, 0x42, 0xb1, 0x79, 0xc5, 0x97, 0xa5, 0x2a, 0xf5, 0xd3, 0xe6, 0x25, 0xe2, 0xa1,
- 0x05, 0xe6, 0x1f, 0xb6, 0x7c, 0xe2, 0x66, 0xdd, 0xea, 0x53, 0xec, 0xd5, 0xb2, 0xc7, 0x04, 0x87,
- 0xca, 0x6b, 0x4c, 0x44, 0x98, 0xf5, 0xdf, 0xa7, 0x10, 0x7d, 0x97, 0x63, 0xf4, 0x5d, 0x0a, 0xd2,
- 0x50, 0x3f, 0x91, 0xb6, 0x92, 0x72, 0x80, 0x77, 0x7c, 0x82, 0x95, 0x07, 0xe9, 0xe3, 0xaa, 0x19,
- 0x60, 0xfd, 0x99, 0x4f, 0xb3, 0x13, 0x63, 0xc4, 0x8c, 0xf8, 0x41, 0xd0, 0x6a, 0x12, 0x36, 0x31,
- 0x29, 0xef, 0xa5, 0xb7, 0x71, 0x37, 0x23, 0x1c, 0xa5, 0xb3, 0x19, 0x4b, 0xc8, 0x88, 0x40, 0xa6,
- 0x6d, 0xd2, 0xf2, 0xf7, 0xf7, 0x71, 0xa0, 0x6c, 0xb0, 0xc4, 0x0a, 0x6d, 0xb2, 0xc1, 0xec, 0x1a,
- 0x8a, 0x00, 0xf4, 0xfe, 0x60, 0xf9, 0xfb, 0xa5, 0x16, 0x69, 0xb6, 0x48, 0xa8, 0x6c, 0xb2, 0xef,
- 0x59, 0xb8, 0x3f, 0x34, 0xfc, 0x7d, 0xdd, 0xe7, 0x4e, 0x0d, 0x09, 0x48, 0x78, 0x07, 0x4c, 0x58,
- 0xfe, 0xbe, 0x85, 0x9f, 0xe1, 0x86, 0x92, 0x4f, 0x1f, 0x8a, 0x94, 0xd5, 0xa0, 0x2e, 0x0d, 0x75,
- 0x51, 0xab, 0xff, 0x95, 0xc0, 0x74, 0xdc, 0xed, 0x59, 0x33, 0x87, 0x60, 0xb6, 0xb0, 0xe3, 0x3c,
- 0x42, 0x79, 0xdb, 0x74, 0x2a, 0x5b, 0x86, 0x65, 0xc9, 0x67, 0x12, 0x36, 0xcb, 0x40, 0x1b, 0xa6,
- 0x2c, 0xc1, 0x05, 0x30, 0x57, 0xd8, 0x71, 0x90, 0x69, 0xac, 0x39, 0xa5, 0xa2, 0xe9, 0x14, 0xcc,
- 0x27, 0xf2, 0x08, 0x9c, 0x07, 0x33, 0xb1, 0x11, 0x19, 0xc5, 0x0d, 0x53, 0xce, 0xc0, 0x45, 0x30,
- 0x5f, 0xd8, 0x71, 0xd6, 0x4c, 0xcb, 0xb4, 0xcd, 0x2e, 0x72, 0x34, 0xa2, 0x47, 0x66, 0x8e, 0x1d,
- 0x83, 0x17, 0xc0, 0x42, 0x61, 0xc7, 0xb1, 0x1f, 0x17, 0xa3, 0xb5, 0xb8, 0x5b, 0x1e, 0x87, 0x93,
- 0x60, 0xcc, 0x32, 0x8d, 0x8a, 0x29, 0x03, 0x4a, 0x34, 0x2d, 0x33, 0x67, 0xe7, 0x4b, 0x45, 0x07,
- 0x6d, 0x17, 0x8b, 0x26, 0x92, 0xcf, 0x41, 0x19, 0x4c, 0x3f, 0x32, 0xec, 0xdc, 0x66, 0x6c, 0x51,
- 0xe9, 0xb2, 0x56, 0x29, 0x57, 0x70, 0x90, 0x91, 0x33, 0x51, 0x6c, 0xbe, 0x49, 0x81, 0x4c, 0x28,
- 0xb6, 0xdc, 0x5b, 0xfd, 0x36, 0x38, 0x1b, 0x4d, 0xc3, 0x70, 0x0a, 0x9c, 0x2d, 0xec, 0x38, 0x9b,
- 0x46, 0x65, 0x53, 0x3e, 0xd3, 0x43, 0x9a, 0x8f, 0xcb, 0x79, 0x44, 0xdf, 0x18, 0x80, 0xf1, 0x88,
- 0x35, 0x02, 0xa7, 0xc1, 0x44, 0xb1, 0xe4, 0xe4, 0x36, 0xcd, 0x5c, 0x41, 0xce, 0xc0, 0x4b, 0xe0,
- 0x7c, 0x65, 0xb3, 0x84, 0x6c, 0xc7, 0xb6, 0x2d, 0x27, 0xc1, 0x1a, 0x5d, 0xfd, 0x49, 0x46, 0xf8,
- 0x65, 0x01, 0xce, 0x81, 0xa9, 0x62, 0xc9, 0x76, 0x2a, 0xb6, 0x81, 0x6c, 0x73, 0x4d, 0x3e, 0x03,
- 0xcf, 0x03, 0x98, 0x2f, 0xe6, 0xed, 0xbc, 0x61, 0x71, 0xa3, 0x63, 0xda, 0xb9, 0x35, 0x19, 0xd0,
- 0xe5, 0x91, 0x29, 0x58, 0xa6, 0xa8, 0xa5, 0x92, 0xdf, 0xb0, 0x4d, 0xb4, 0xc5, 0x2d, 0xe7, 0xe0,
- 0x32, 0xb8, 0x52, 0xc9, 0x6f, 0x3c, 0xdc, 0xce, 0x73, 0x8c, 0x63, 0x14, 0xd7, 0x1c, 0x64, 0x6e,
- 0x95, 0x76, 0x4c, 0x67, 0xcd, 0xb0, 0x0d, 0x79, 0x91, 0xd6, 0xa3, 0x62, 0xec, 0x98, 0x4e, 0xa5,
- 0x68, 0x94, 0x2b, 0x9b, 0x25, 0x5b, 0x5e, 0x82, 0xd7, 0xc0, 0x55, 0x2a, 0x5c, 0x42, 0xa6, 0x13,
- 0x2f, 0xb0, 0x8e, 0x4a, 0x5b, 0x3d, 0x88, 0x0a, 0x2f, 0x82, 0xc5, 0xc1, 0xae, 0x65, 0xca, 0xee,
- 0x5b, 0xd2, 0x40, 0xb9, 0xcd, 0x7c, 0xbc, 0xe6, 0x0a, 0xbc, 0x0d, 0x5e, 0x39, 0x29, 0x2a, 0xf6,
- 0x5c, 0xb1, 0x4b, 0x65, 0xc7, 0xd8, 0x30, 0x8b, 0xb6, 0x7c, 0x13, 0x5e, 0x05, 0x17, 0xb3, 0x96,
- 0x91, 0x2b, 0x6c, 0x96, 0x2c, 0xd3, 0x29, 0x9b, 0x26, 0x72, 0xca, 0x2c, 0x97, 0x8f, 0x1d, 0xf4,
- 0x58, 0xae, 0x41, 0x15, 0x5c, 0xde, 0x2e, 0x0e, 0x07, 0x60, 0x78, 0x09, 0x2c, 0xae, 0x99, 0x96,
- 0xf1, 0xa4, 0xcf, 0xf5, 0x5c, 0x82, 0x57, 0xc0, 0x85, 0xed, 0xe2, 0x60, 0xef, 0xa7, 0xd2, 0xea,
- 0xc7, 0x53, 0x60, 0x94, 0x5e, 0x2d, 0xa1, 0x02, 0xce, 0xc5, 0xb9, 0xa5, 0x5b, 0x74, 0xbd, 0x64,
- 0x59, 0xa5, 0x47, 0x26, 0x92, 0xcf, 0x44, 0x6f, 0xd3, 0xe7, 0x71, 0xb6, 0x8b, 0x76, 0xde, 0x72,
- 0x6c, 0x94, 0xdf, 0xd8, 0x30, 0x51, 0x2f, 0x43, 0x12, 0xfd, 0x56, 0x62, 0x82, 0x65, 0x1a, 0x6b,
- 0x6c, 0xb7, 0xdc, 0x04, 0x37, 0x92, 0xb6, 0x61, 0xf4, 0x8c, 0x48, 0x7f, 0xb8, 0x5d, 0x42, 0xdb,
- 0x5b, 0xf2, 0x28, 0xdd, 0x34, 0xb1, 0x8d, 0x7e, 0x8f, 0x63, 0xf0, 0x3a, 0x50, 0xe3, 0x14, 0x0b,
- 0xd9, 0x4d, 0x44, 0x0e, 0xe0, 0x7d, 0xf0, 0xfa, 0x0b, 0x40, 0xc3, 0xa2, 0x98, 0xa2, 0x25, 0x19,
- 0xc0, 0x8d, 0xde, 0x67, 0x1a, 0xbe, 0x06, 0xee, 0x0c, 0x75, 0x0f, 0x13, 0x9d, 0x81, 0xeb, 0x20,
- 0x3b, 0x80, 0xc5, 0xdf, 0x32, 0xb2, 0xf0, 0x7d, 0x19, 0x09, 0xc5, 0xd4, 0x68, 0x13, 0xe6, 0x10,
- 0xfd, 0xc2, 0xe5, 0x59, 0xb8, 0x0a, 0x5e, 0x1e, 0xba, 0x1d, 0x92, 0x49, 0xa8, 0x41, 0x03, 0xbc,
- 0x73, 0x3a, 0xec, 0xb0, 0xb0, 0x31, 0x7c, 0x09, 0x2c, 0x0f, 0x97, 0x88, 0x52, 0xb2, 0x07, 0xdf,
- 0x06, 0x6f, 0xbc, 0x08, 0x35, 0x6c, 0x89, 0xfd, 0x93, 0x97, 0x88, 0xb6, 0xc1, 0x01, 0xfd, 0xf6,
- 0x86, 0xa3, 0xe8, 0xc6, 0xa8, 0xc3, 0xff, 0x03, 0xda, 0xc0, 0xcd, 0x9e, 0x4c, 0xcb, 0x73, 0x09,
- 0xde, 0x02, 0x37, 0x91, 0x51, 0x5c, 0x2b, 0x6d, 0x39, 0xa7, 0xc0, 0x7f, 0x2a, 0xc1, 0x77, 0xc1,
- 0x5b, 0x2f, 0x06, 0x0e, 0x7b, 0xc1, 0xcf, 0x24, 0x68, 0x82, 0xf7, 0x4e, 0xbd, 0xde, 0x30, 0x99,
- 0xcf, 0x25, 0x78, 0x0d, 0x5c, 0x19, 0xcc, 0x8f, 0xea, 0xf0, 0x85, 0x04, 0x57, 0xc0, 0xf5, 0x13,
- 0x57, 0x8a, 0x90, 0x5f, 0x4a, 0xf0, 0x4d, 0x70, 0xef, 0x24, 0xc8, 0xb0, 0x30, 0x7e, 0x2d, 0xc1,
- 0x07, 0xe0, 0xfe, 0x29, 0xd6, 0x18, 0x26, 0xf0, 0x9b, 0x13, 0xde, 0x23, 0x2a, 0xf6, 0x57, 0x2f,
- 0x7e, 0x8f, 0x08, 0xf9, 0x5b, 0x09, 0x2e, 0x81, 0x8b, 0x83, 0x21, 0x74, 0x4f, 0xfc, 0x4e, 0x82,
- 0x37, 0xc0, 0xf2, 0x89, 0x4a, 0x14, 0xf6, 0x7b, 0x09, 0x2a, 0x60, 0xa1, 0x58, 0x72, 0xd6, 0x8d,
- 0xbc, 0xe5, 0x3c, 0xca, 0xdb, 0x9b, 0x4e, 0xc5, 0x46, 0x66, 0xa5, 0x22, 0xff, 0x6c, 0x84, 0x86,
- 0x92, 0xf0, 0x14, 0x4b, 0x91, 0xd3, 0x59, 0x2f, 0x21, 0xc7, 0xca, 0xef, 0x98, 0x45, 0x8a, 0xfc,
- 0x64, 0x04, 0xce, 0x01, 0x40, 0x61, 0xe5, 0x52, 0xbe, 0x68, 0x57, 0xe4, 0xef, 0x65, 0xe0, 0x4b,
- 0x40, 0xed, 0x19, 0x38, 0x7b, 0x2d, 0x5f, 0x29, 0x38, 0xf9, 0x92, 0x63, 0x19, 0xb6, 0x59, 0xcc,
- 0x3d, 0x91, 0x3f, 0xca, 0xc0, 0x19, 0x30, 0x61, 0x3e, 0xb6, 0x4d, 0x54, 0x34, 0x2c, 0xf9, 0x6f,
- 0x99, 0xbb, 0x0f, 0xc0, 0xa4, 0x1d, 0xb8, 0x5e, 0xd8, 0xf4, 0x03, 0x02, 0xef, 0x8a, 0x0f, 0xb3,
- 0xd1, 0xff, 0x88, 0x45, 0xbf, 0xda, 0x5f, 0x9a, 0xeb, 0x3e, 0xf3, 0x1f, 0x74, 0xb5, 0x33, 0x2b,
- 0xd2, 0x1d, 0x29, 0x7b, 0xee, 0xf9, 0x9f, 0x97, 0xce, 0x3c, 0xff, 0x7a, 0x49, 0xfa, 0xea, 0xeb,
- 0x25, 0xe9, 0x4f, 0x5f, 0x2f, 0x49, 0x3f, 0xfe, 0xcb, 0xd2, 0x99, 0xdd, 0x71, 0xf6, 0xab, 0xff,
- 0xbd, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x21, 0xcd, 0x4e, 0x90, 0x3e, 0x20, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// TransportClient is the client API for Transport service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type TransportClient interface {
- Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error)
-}
-
-type transportClient struct {
- cc *grpc.ClientConn
-}
-
-func NewTransportClient(cc *grpc.ClientConn) TransportClient {
- return &transportClient{cc}
-}
-
-func (c *transportClient) Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Transport_serviceDesc.Streams[0], "/rpcpb.Transport/Transport", opts...)
- if err != nil {
- return nil, err
- }
- x := &transportTransportClient{stream}
- return x, nil
-}
-
-type Transport_TransportClient interface {
- Send(*Request) error
- Recv() (*Response, error)
- grpc.ClientStream
-}
-
-type transportTransportClient struct {
- grpc.ClientStream
-}
-
-func (x *transportTransportClient) Send(m *Request) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *transportTransportClient) Recv() (*Response, error) {
- m := new(Response)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// TransportServer is the server API for Transport service.
-type TransportServer interface {
- Transport(Transport_TransportServer) error
-}
-
-// UnimplementedTransportServer can be embedded to have forward compatible implementations.
-type UnimplementedTransportServer struct {
-}
-
-func (*UnimplementedTransportServer) Transport(srv Transport_TransportServer) error {
- return status.Errorf(codes.Unimplemented, "method Transport not implemented")
-}
-
-func RegisterTransportServer(s *grpc.Server, srv TransportServer) {
- s.RegisterService(&_Transport_serviceDesc, srv)
-}
-
-func _Transport_Transport_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(TransportServer).Transport(&transportTransportServer{stream})
-}
-
-type Transport_TransportServer interface {
- Send(*Response) error
- Recv() (*Request, error)
- grpc.ServerStream
-}
-
-type transportTransportServer struct {
- grpc.ServerStream
-}
-
-func (x *transportTransportServer) Send(m *Response) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *transportTransportServer) Recv() (*Request, error) {
- m := new(Request)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-var _Transport_serviceDesc = grpc.ServiceDesc{
- ServiceName: "rpcpb.Transport",
- HandlerType: (*TransportServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Transport",
- Handler: _Transport_Transport_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "rpcpb/rpc.proto",
-}
-
-func (m *Request) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Request) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Tester != nil {
- {
- size, err := m.Tester.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.Member != nil {
- {
- size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Operation != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Operation))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SnapshotInfo) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SnapshotInfo) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Took) > 0 {
- i -= len(m.Took)
- copy(dAtA[i:], m.Took)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Took)))
- i--
- dAtA[i] = 0x4a
- }
- if m.SnapshotRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotRevision))
- i--
- dAtA[i] = 0x40
- }
- if m.SnapshotHash != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotHash))
- i--
- dAtA[i] = 0x38
- }
- if m.SnapshotTotalKey != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotTotalKey))
- i--
- dAtA[i] = 0x30
- }
- if len(m.SnapshotTotalSize) > 0 {
- i -= len(m.SnapshotTotalSize)
- copy(dAtA[i:], m.SnapshotTotalSize)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotTotalSize)))
- i--
- dAtA[i] = 0x2a
- }
- if len(m.SnapshotFileSize) > 0 {
- i -= len(m.SnapshotFileSize)
- copy(dAtA[i:], m.SnapshotFileSize)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotFileSize)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.SnapshotPath) > 0 {
- i -= len(m.SnapshotPath)
- copy(dAtA[i:], m.SnapshotPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotPath)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.MemberClientURLs) > 0 {
- for iNdEx := len(m.MemberClientURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.MemberClientURLs[iNdEx])
- copy(dAtA[i:], m.MemberClientURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.MemberClientURLs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.MemberName) > 0 {
- i -= len(m.MemberName)
- copy(dAtA[i:], m.MemberName)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.MemberName)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Response) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Response) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.SnapshotInfo != nil {
- {
- size, err := m.SnapshotInfo.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if m.Member != nil {
- {
- size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Status) > 0 {
- i -= len(m.Status)
- copy(dAtA[i:], m.Status)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Status)))
- i--
- dAtA[i] = 0x12
- }
- if m.Success {
- i--
- if m.Success {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Member) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Member) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Failpoints) > 0 {
- i -= len(m.Failpoints)
- copy(dAtA[i:], m.Failpoints)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Failpoints)))
- i--
- dAtA[i] = 0x2b
- i--
- dAtA[i] = 0xea
- }
- if m.SnapshotInfo != nil {
- {
- size, err := m.SnapshotInfo.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x25
- i--
- dAtA[i] = 0xd2
- }
- if len(m.SnapshotPath) > 0 {
- i -= len(m.SnapshotPath)
- copy(dAtA[i:], m.SnapshotPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotPath)))
- i--
- dAtA[i] = 0x25
- i--
- dAtA[i] = 0xca
- }
- if len(m.PeerTrustedCAPath) > 0 {
- i -= len(m.PeerTrustedCAPath)
- copy(dAtA[i:], m.PeerTrustedCAPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerTrustedCAPath)))
- i--
- dAtA[i] = 0x1f
- i--
- dAtA[i] = 0xd2
- }
- if len(m.PeerTrustedCAData) > 0 {
- i -= len(m.PeerTrustedCAData)
- copy(dAtA[i:], m.PeerTrustedCAData)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerTrustedCAData)))
- i--
- dAtA[i] = 0x1f
- i--
- dAtA[i] = 0xca
- }
- if len(m.PeerKeyPath) > 0 {
- i -= len(m.PeerKeyPath)
- copy(dAtA[i:], m.PeerKeyPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerKeyPath)))
- i--
- dAtA[i] = 0x1f
- i--
- dAtA[i] = 0xc2
- }
- if len(m.PeerKeyData) > 0 {
- i -= len(m.PeerKeyData)
- copy(dAtA[i:], m.PeerKeyData)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerKeyData)))
- i--
- dAtA[i] = 0x1f
- i--
- dAtA[i] = 0xba
- }
- if len(m.PeerCertPath) > 0 {
- i -= len(m.PeerCertPath)
- copy(dAtA[i:], m.PeerCertPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerCertPath)))
- i--
- dAtA[i] = 0x1f
- i--
- dAtA[i] = 0xb2
- }
- if len(m.PeerCertData) > 0 {
- i -= len(m.PeerCertData)
- copy(dAtA[i:], m.PeerCertData)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerCertData)))
- i--
- dAtA[i] = 0x1f
- i--
- dAtA[i] = 0xaa
- }
- if len(m.ClientTrustedCAPath) > 0 {
- i -= len(m.ClientTrustedCAPath)
- copy(dAtA[i:], m.ClientTrustedCAPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientTrustedCAPath)))
- i--
- dAtA[i] = 0x19
- i--
- dAtA[i] = 0xb2
- }
- if len(m.ClientTrustedCAData) > 0 {
- i -= len(m.ClientTrustedCAData)
- copy(dAtA[i:], m.ClientTrustedCAData)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientTrustedCAData)))
- i--
- dAtA[i] = 0x19
- i--
- dAtA[i] = 0xaa
- }
- if len(m.ClientKeyPath) > 0 {
- i -= len(m.ClientKeyPath)
- copy(dAtA[i:], m.ClientKeyPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientKeyPath)))
- i--
- dAtA[i] = 0x19
- i--
- dAtA[i] = 0xa2
- }
- if len(m.ClientKeyData) > 0 {
- i -= len(m.ClientKeyData)
- copy(dAtA[i:], m.ClientKeyData)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientKeyData)))
- i--
- dAtA[i] = 0x19
- i--
- dAtA[i] = 0x9a
- }
- if len(m.ClientCertPath) > 0 {
- i -= len(m.ClientCertPath)
- copy(dAtA[i:], m.ClientCertPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientCertPath)))
- i--
- dAtA[i] = 0x19
- i--
- dAtA[i] = 0x92
- }
- if len(m.ClientCertData) > 0 {
- i -= len(m.ClientCertData)
- copy(dAtA[i:], m.ClientCertData)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientCertData)))
- i--
- dAtA[i] = 0x19
- i--
- dAtA[i] = 0x8a
- }
- if m.EtcdOnSnapshotRestore != nil {
- {
- size, err := m.EtcdOnSnapshotRestore.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i--
- dAtA[i] = 0xfa
- }
- if m.Etcd != nil {
- {
- size, err := m.Etcd.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i--
- dAtA[i] = 0xf2
- }
- if len(m.EtcdClientEndpoint) > 0 {
- i -= len(m.EtcdClientEndpoint)
- copy(dAtA[i:], m.EtcdClientEndpoint)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdClientEndpoint)))
- i--
- dAtA[i] = 0x12
- i--
- dAtA[i] = 0xea
- }
- if m.EtcdPeerProxy {
- i--
- if m.EtcdPeerProxy {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0xc
- i--
- dAtA[i] = 0xd0
- }
- if m.EtcdClientProxy {
- i--
- if m.EtcdClientProxy {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0xc
- i--
- dAtA[i] = 0xc8
- }
- if len(m.BaseDir) > 0 {
- i -= len(m.BaseDir)
- copy(dAtA[i:], m.BaseDir)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.BaseDir)))
- i--
- dAtA[i] = 0x6
- i--
- dAtA[i] = 0xaa
- }
- if len(m.FailpointHTTPAddr) > 0 {
- i -= len(m.FailpointHTTPAddr)
- copy(dAtA[i:], m.FailpointHTTPAddr)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.FailpointHTTPAddr)))
- i--
- dAtA[i] = 0x62
- }
- if len(m.AgentAddr) > 0 {
- i -= len(m.AgentAddr)
- copy(dAtA[i:], m.AgentAddr)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.AgentAddr)))
- i--
- dAtA[i] = 0x5a
- }
- if len(m.EtcdExec) > 0 {
- i -= len(m.EtcdExec)
- copy(dAtA[i:], m.EtcdExec)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdExec)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Tester) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Tester) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Tester) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.StressQPS != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StressQPS))
- i--
- dAtA[i] = 0x12
- i--
- dAtA[i] = 0xf0
- }
- if m.StressClients != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StressClients))
- i--
- dAtA[i] = 0x12
- i--
- dAtA[i] = 0xe8
- }
- if m.StressKeyTxnOps != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StressKeyTxnOps))
- i--
- dAtA[i] = 0xc
- i--
- dAtA[i] = 0xe8
- }
- if m.StressKeySuffixRangeTxn != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySuffixRangeTxn))
- i--
- dAtA[i] = 0xc
- i--
- dAtA[i] = 0xe0
- }
- if m.StressKeySuffixRange != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySuffixRange))
- i--
- dAtA[i] = 0xc
- i--
- dAtA[i] = 0xd8
- }
- if m.StressKeySizeLarge != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySizeLarge))
- i--
- dAtA[i] = 0xc
- i--
- dAtA[i] = 0xd0
- }
- if m.StressKeySize != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySize))
- i--
- dAtA[i] = 0xc
- i--
- dAtA[i] = 0xc8
- }
- if len(m.Checkers) > 0 {
- for iNdEx := len(m.Checkers) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Checkers[iNdEx])
- copy(dAtA[i:], m.Checkers[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Checkers[iNdEx])))
- i--
- dAtA[i] = 0x6
- i--
- dAtA[i] = 0xb2
- }
- }
- if len(m.Stressers) > 0 {
- for iNdEx := len(m.Stressers) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Stressers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x6
- i--
- dAtA[i] = 0xaa
- }
- }
- if len(m.ExternalExecPath) > 0 {
- i -= len(m.ExternalExecPath)
- copy(dAtA[i:], m.ExternalExecPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ExternalExecPath)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xd2
- }
- if len(m.RunnerExecPath) > 0 {
- i -= len(m.RunnerExecPath)
- copy(dAtA[i:], m.RunnerExecPath)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RunnerExecPath)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xca
- }
- if len(m.FailpointCommands) > 0 {
- for iNdEx := len(m.FailpointCommands) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.FailpointCommands[iNdEx])
- copy(dAtA[i:], m.FailpointCommands[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.FailpointCommands[iNdEx])))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x92
- }
- }
- if len(m.Cases) > 0 {
- for iNdEx := len(m.Cases) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Cases[iNdEx])
- copy(dAtA[i:], m.Cases[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Cases[iNdEx])))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x8a
- }
- }
- if m.CaseShuffle {
- i--
- if m.CaseShuffle {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x80
- }
- if m.CaseDelayMs != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.CaseDelayMs))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xf8
- }
- if m.EnablePprof {
- i--
- if m.EnablePprof {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xb8
- }
- if m.ExitOnCaseFail {
- i--
- if m.ExitOnCaseFail {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xb0
- }
- if m.RoundLimit != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RoundLimit))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xa8
- }
- if m.UpdatedDelayLatencyMs != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.UpdatedDelayLatencyMs))
- i--
- dAtA[i] = 0x68
- }
- if m.DelayLatencyMsRv != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.DelayLatencyMsRv))
- i--
- dAtA[i] = 0x60
- }
- if m.DelayLatencyMs != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.DelayLatencyMs))
- i--
- dAtA[i] = 0x58
- }
- if len(m.Addr) > 0 {
- i -= len(m.Addr)
- copy(dAtA[i:], m.Addr)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Addr)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Network) > 0 {
- i -= len(m.Network)
- copy(dAtA[i:], m.Network)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Network)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.DataDir) > 0 {
- i -= len(m.DataDir)
- copy(dAtA[i:], m.DataDir)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.DataDir)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Stresser) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Stresser) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Stresser) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Weight != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Weight))))
- i--
- dAtA[i] = 0x11
- }
- if len(m.Type) > 0 {
- i -= len(m.Type)
- copy(dAtA[i:], m.Type)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Type)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Etcd) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Etcd) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.LogLevel) > 0 {
- i -= len(m.LogLevel)
- copy(dAtA[i:], m.LogLevel)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.LogLevel)))
- i--
- dAtA[i] = 0x4
- i--
- dAtA[i] = 0xca
- }
- if len(m.LogOutputs) > 0 {
- for iNdEx := len(m.LogOutputs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.LogOutputs[iNdEx])
- copy(dAtA[i:], m.LogOutputs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.LogOutputs[iNdEx])))
- i--
- dAtA[i] = 0x4
- i--
- dAtA[i] = 0xc2
- }
- }
- if len(m.Logger) > 0 {
- i -= len(m.Logger)
- copy(dAtA[i:], m.Logger)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Logger)))
- i--
- dAtA[i] = 0x4
- i--
- dAtA[i] = 0xba
- }
- if m.InitialCorruptCheck {
- i--
- if m.InitialCorruptCheck {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x4
- i--
- dAtA[i] = 0x80
- }
- if m.PreVote {
- i--
- if m.PreVote {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x3
- i--
- dAtA[i] = 0xf8
- }
- if m.QuotaBackendBytes != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.QuotaBackendBytes))
- i--
- dAtA[i] = 0x3
- i--
- dAtA[i] = 0xa0
- }
- if m.SnapshotCount != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotCount))
- i--
- dAtA[i] = 0x3
- i--
- dAtA[i] = 0x98
- }
- if len(m.InitialClusterToken) > 0 {
- i -= len(m.InitialClusterToken)
- copy(dAtA[i:], m.InitialClusterToken)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterToken)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xda
- }
- if len(m.InitialClusterState) > 0 {
- i -= len(m.InitialClusterState)
- copy(dAtA[i:], m.InitialClusterState)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterState)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xd2
- }
- if len(m.InitialCluster) > 0 {
- i -= len(m.InitialCluster)
- copy(dAtA[i:], m.InitialCluster)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialCluster)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xca
- }
- if len(m.PeerTrustedCAFile) > 0 {
- i -= len(m.PeerTrustedCAFile)
- copy(dAtA[i:], m.PeerTrustedCAFile)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerTrustedCAFile)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xaa
- }
- if len(m.PeerKeyFile) > 0 {
- i -= len(m.PeerKeyFile)
- copy(dAtA[i:], m.PeerKeyFile)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerKeyFile)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0xa2
- }
- if len(m.PeerCertFile) > 0 {
- i -= len(m.PeerCertFile)
- copy(dAtA[i:], m.PeerCertFile)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerCertFile)))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x9a
- }
- if m.PeerClientCertAuth {
- i--
- if m.PeerClientCertAuth {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x90
- }
- if m.PeerAutoTLS {
- i--
- if m.PeerAutoTLS {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x88
- }
- if len(m.AdvertisePeerURLs) > 0 {
- for iNdEx := len(m.AdvertisePeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.AdvertisePeerURLs[iNdEx])
- copy(dAtA[i:], m.AdvertisePeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.AdvertisePeerURLs[iNdEx])))
- i--
- dAtA[i] = 0x2
- i--
- dAtA[i] = 0x82
- }
- }
- if len(m.ListenPeerURLs) > 0 {
- for iNdEx := len(m.ListenPeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ListenPeerURLs[iNdEx])
- copy(dAtA[i:], m.ListenPeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ListenPeerURLs[iNdEx])))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xfa
- }
- }
- if len(m.ClientTrustedCAFile) > 0 {
- i -= len(m.ClientTrustedCAFile)
- copy(dAtA[i:], m.ClientTrustedCAFile)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientTrustedCAFile)))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xda
- }
- if len(m.ClientKeyFile) > 0 {
- i -= len(m.ClientKeyFile)
- copy(dAtA[i:], m.ClientKeyFile)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientKeyFile)))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xd2
- }
- if len(m.ClientCertFile) > 0 {
- i -= len(m.ClientCertFile)
- copy(dAtA[i:], m.ClientCertFile)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientCertFile)))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xca
- }
- if m.ClientCertAuth {
- i--
- if m.ClientCertAuth {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xc0
- }
- if m.ClientAutoTLS {
- i--
- if m.ClientAutoTLS {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xb8
- }
- if len(m.AdvertiseClientURLs) > 0 {
- for iNdEx := len(m.AdvertiseClientURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.AdvertiseClientURLs[iNdEx])
- copy(dAtA[i:], m.AdvertiseClientURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.AdvertiseClientURLs[iNdEx])))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xb2
- }
- }
- if len(m.ListenClientURLs) > 0 {
- for iNdEx := len(m.ListenClientURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ListenClientURLs[iNdEx])
- copy(dAtA[i:], m.ListenClientURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ListenClientURLs[iNdEx])))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0xaa
- }
- }
- if m.ElectionTimeoutMs != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ElectionTimeoutMs))
- i--
- dAtA[i] = 0x60
- }
- if m.HeartbeatIntervalMs != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.HeartbeatIntervalMs))
- i--
- dAtA[i] = 0x58
- }
- if len(m.WALDir) > 0 {
- i -= len(m.WALDir)
- copy(dAtA[i:], m.WALDir)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.WALDir)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.DataDir) > 0 {
- i -= len(m.DataDir)
- copy(dAtA[i:], m.DataDir)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.DataDir)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
- offset -= sovRpc(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Request) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Operation != 0 {
- n += 1 + sovRpc(uint64(m.Operation))
- }
- if m.Member != nil {
- l = m.Member.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Tester != nil {
- l = m.Tester.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *SnapshotInfo) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.MemberName)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.MemberClientURLs) > 0 {
- for _, s := range m.MemberClientURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- l = len(m.SnapshotPath)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.SnapshotFileSize)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.SnapshotTotalSize)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.SnapshotTotalKey != 0 {
- n += 1 + sovRpc(uint64(m.SnapshotTotalKey))
- }
- if m.SnapshotHash != 0 {
- n += 1 + sovRpc(uint64(m.SnapshotHash))
- }
- if m.SnapshotRevision != 0 {
- n += 1 + sovRpc(uint64(m.SnapshotRevision))
- }
- l = len(m.Took)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Response) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Success {
- n += 2
- }
- l = len(m.Status)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Member != nil {
- l = m.Member.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.SnapshotInfo != nil {
- l = m.SnapshotInfo.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Member) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.EtcdExec)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.AgentAddr)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.FailpointHTTPAddr)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.BaseDir)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.EtcdClientProxy {
- n += 3
- }
- if m.EtcdPeerProxy {
- n += 3
- }
- l = len(m.EtcdClientEndpoint)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.Etcd != nil {
- l = m.Etcd.Size()
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.EtcdOnSnapshotRestore != nil {
- l = m.EtcdOnSnapshotRestore.Size()
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientCertData)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientCertPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientKeyData)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientKeyPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientTrustedCAData)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientTrustedCAPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerCertData)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerCertPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerKeyData)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerKeyPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerTrustedCAData)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerTrustedCAPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.SnapshotPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.SnapshotInfo != nil {
- l = m.SnapshotInfo.Size()
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.Failpoints)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Tester) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.DataDir)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Network)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Addr)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.DelayLatencyMs != 0 {
- n += 1 + sovRpc(uint64(m.DelayLatencyMs))
- }
- if m.DelayLatencyMsRv != 0 {
- n += 1 + sovRpc(uint64(m.DelayLatencyMsRv))
- }
- if m.UpdatedDelayLatencyMs != 0 {
- n += 1 + sovRpc(uint64(m.UpdatedDelayLatencyMs))
- }
- if m.RoundLimit != 0 {
- n += 2 + sovRpc(uint64(m.RoundLimit))
- }
- if m.ExitOnCaseFail {
- n += 3
- }
- if m.EnablePprof {
- n += 3
- }
- if m.CaseDelayMs != 0 {
- n += 2 + sovRpc(uint64(m.CaseDelayMs))
- }
- if m.CaseShuffle {
- n += 3
- }
- if len(m.Cases) > 0 {
- for _, s := range m.Cases {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- if len(m.FailpointCommands) > 0 {
- for _, s := range m.FailpointCommands {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- l = len(m.RunnerExecPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ExternalExecPath)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if len(m.Stressers) > 0 {
- for _, e := range m.Stressers {
- l = e.Size()
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- if len(m.Checkers) > 0 {
- for _, s := range m.Checkers {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- if m.StressKeySize != 0 {
- n += 2 + sovRpc(uint64(m.StressKeySize))
- }
- if m.StressKeySizeLarge != 0 {
- n += 2 + sovRpc(uint64(m.StressKeySizeLarge))
- }
- if m.StressKeySuffixRange != 0 {
- n += 2 + sovRpc(uint64(m.StressKeySuffixRange))
- }
- if m.StressKeySuffixRangeTxn != 0 {
- n += 2 + sovRpc(uint64(m.StressKeySuffixRangeTxn))
- }
- if m.StressKeyTxnOps != 0 {
- n += 2 + sovRpc(uint64(m.StressKeyTxnOps))
- }
- if m.StressClients != 0 {
- n += 2 + sovRpc(uint64(m.StressClients))
- }
- if m.StressQPS != 0 {
- n += 2 + sovRpc(uint64(m.StressQPS))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Stresser) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Type)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Weight != 0 {
- n += 9
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Etcd) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.DataDir)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.WALDir)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.HeartbeatIntervalMs != 0 {
- n += 1 + sovRpc(uint64(m.HeartbeatIntervalMs))
- }
- if m.ElectionTimeoutMs != 0 {
- n += 1 + sovRpc(uint64(m.ElectionTimeoutMs))
- }
- if len(m.ListenClientURLs) > 0 {
- for _, s := range m.ListenClientURLs {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- if len(m.AdvertiseClientURLs) > 0 {
- for _, s := range m.AdvertiseClientURLs {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- if m.ClientAutoTLS {
- n += 3
- }
- if m.ClientCertAuth {
- n += 3
- }
- l = len(m.ClientCertFile)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientKeyFile)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.ClientTrustedCAFile)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if len(m.ListenPeerURLs) > 0 {
- for _, s := range m.ListenPeerURLs {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- if len(m.AdvertisePeerURLs) > 0 {
- for _, s := range m.AdvertisePeerURLs {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- if m.PeerAutoTLS {
- n += 3
- }
- if m.PeerClientCertAuth {
- n += 3
- }
- l = len(m.PeerCertFile)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerKeyFile)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.PeerTrustedCAFile)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.InitialCluster)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.InitialClusterState)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- l = len(m.InitialClusterToken)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.SnapshotCount != 0 {
- n += 2 + sovRpc(uint64(m.SnapshotCount))
- }
- if m.QuotaBackendBytes != 0 {
- n += 2 + sovRpc(uint64(m.QuotaBackendBytes))
- }
- if m.PreVote {
- n += 3
- }
- if m.InitialCorruptCheck {
- n += 3
- }
- l = len(m.Logger)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if len(m.LogOutputs) > 0 {
- for _, s := range m.LogOutputs {
- l = len(s)
- n += 2 + l + sovRpc(uint64(l))
- }
- }
- l = len(m.LogLevel)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovRpc(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRpc(x uint64) (n int) {
- return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Request) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Request: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType)
- }
- m.Operation = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Operation |= Operation(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Member == nil {
- m.Member = &Member{}
- }
- if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Tester", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Tester == nil {
- m.Tester = &Tester{}
- }
- if err := m.Tester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SnapshotInfo) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SnapshotInfo: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SnapshotInfo: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.MemberName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberClientURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.MemberClientURLs = append(m.MemberClientURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SnapshotPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotFileSize", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SnapshotFileSize = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTotalSize", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SnapshotTotalSize = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTotalKey", wireType)
- }
- m.SnapshotTotalKey = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SnapshotTotalKey |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotHash", wireType)
- }
- m.SnapshotHash = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SnapshotHash |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotRevision", wireType)
- }
- m.SnapshotRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SnapshotRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Took", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Took = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Response) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Response: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Success = bool(v != 0)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Status = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Member == nil {
- m.Member = &Member{}
- }
- if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInfo", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.SnapshotInfo == nil {
- m.SnapshotInfo = &SnapshotInfo{}
- }
- if err := m.SnapshotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Member) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Member: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EtcdExec", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EtcdExec = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AgentAddr", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AgentAddr = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FailpointHTTPAddr", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FailpointHTTPAddr = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 101:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field BaseDir", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.BaseDir = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 201:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientProxy", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.EtcdClientProxy = bool(v != 0)
- case 202:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field EtcdPeerProxy", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.EtcdPeerProxy = bool(v != 0)
- case 301:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientEndpoint", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EtcdClientEndpoint = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 302:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Etcd", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Etcd == nil {
- m.Etcd = &Etcd{}
- }
- if err := m.Etcd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 303:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EtcdOnSnapshotRestore", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.EtcdOnSnapshotRestore == nil {
- m.EtcdOnSnapshotRestore = &Etcd{}
- }
- if err := m.EtcdOnSnapshotRestore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 401:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientCertData", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientCertData = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 402:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientCertPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientCertPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 403:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientKeyData", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientKeyData = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 404:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientKeyPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientKeyPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 405:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientTrustedCAData", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientTrustedCAData = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 406:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientTrustedCAPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientTrustedCAPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 501:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerCertData", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerCertData = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 502:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerCertPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerCertPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 503:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerKeyData", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerKeyData = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 504:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerKeyPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerKeyPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 505:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerTrustedCAData", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerTrustedCAData = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 506:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerTrustedCAPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerTrustedCAPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 601:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SnapshotPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 602:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInfo", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.SnapshotInfo == nil {
- m.SnapshotInfo = &SnapshotInfo{}
- }
- if err := m.SnapshotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 701:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Failpoints", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Failpoints = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Tester) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Tester: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Tester: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataDir", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataDir = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Network = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Addr = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DelayLatencyMs", wireType)
- }
- m.DelayLatencyMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DelayLatencyMs |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DelayLatencyMsRv", wireType)
- }
- m.DelayLatencyMsRv = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DelayLatencyMsRv |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 13:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UpdatedDelayLatencyMs", wireType)
- }
- m.UpdatedDelayLatencyMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UpdatedDelayLatencyMs |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 21:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RoundLimit", wireType)
- }
- m.RoundLimit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RoundLimit |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 22:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExitOnCaseFail", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.ExitOnCaseFail = bool(v != 0)
- case 23:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field EnablePprof", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.EnablePprof = bool(v != 0)
- case 31:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CaseDelayMs", wireType)
- }
- m.CaseDelayMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CaseDelayMs |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 32:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CaseShuffle", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.CaseShuffle = bool(v != 0)
- case 33:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Cases", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Cases = append(m.Cases, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 34:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FailpointCommands", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FailpointCommands = append(m.FailpointCommands, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 41:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RunnerExecPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RunnerExecPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 42:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExternalExecPath", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ExternalExecPath = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 101:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Stressers", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Stressers = append(m.Stressers, &Stresser{})
- if err := m.Stressers[len(m.Stressers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 102:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Checkers", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Checkers = append(m.Checkers, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 201:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StressKeySize", wireType)
- }
- m.StressKeySize = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StressKeySize |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 202:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StressKeySizeLarge", wireType)
- }
- m.StressKeySizeLarge = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StressKeySizeLarge |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 203:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StressKeySuffixRange", wireType)
- }
- m.StressKeySuffixRange = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StressKeySuffixRange |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 204:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StressKeySuffixRangeTxn", wireType)
- }
- m.StressKeySuffixRangeTxn = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StressKeySuffixRangeTxn |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 205:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StressKeyTxnOps", wireType)
- }
- m.StressKeyTxnOps = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StressKeyTxnOps |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 301:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StressClients", wireType)
- }
- m.StressClients = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StressClients |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 302:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StressQPS", wireType)
- }
- m.StressQPS = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StressQPS |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Stresser) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Stresser: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Stresser: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Type = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Weight = float64(math.Float64frombits(v))
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Etcd) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Etcd: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Etcd: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataDir", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataDir = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field WALDir", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.WALDir = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatIntervalMs", wireType)
- }
- m.HeartbeatIntervalMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HeartbeatIntervalMs |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ElectionTimeoutMs", wireType)
- }
- m.ElectionTimeoutMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ElectionTimeoutMs |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 21:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListenClientURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ListenClientURLs = append(m.ListenClientURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 22:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdvertiseClientURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AdvertiseClientURLs = append(m.AdvertiseClientURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 23:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientAutoTLS", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.ClientAutoTLS = bool(v != 0)
- case 24:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientCertAuth", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.ClientCertAuth = bool(v != 0)
- case 25:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientCertFile", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientCertFile = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 26:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientKeyFile", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientKeyFile = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 27:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientTrustedCAFile", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientTrustedCAFile = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 31:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListenPeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ListenPeerURLs = append(m.ListenPeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 32:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdvertisePeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AdvertisePeerURLs = append(m.AdvertisePeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 33:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerAutoTLS", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PeerAutoTLS = bool(v != 0)
- case 34:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerClientCertAuth", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PeerClientCertAuth = bool(v != 0)
- case 35:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerCertFile", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerCertFile = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 36:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerKeyFile", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerKeyFile = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 37:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerTrustedCAFile", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerTrustedCAFile = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 41:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field InitialCluster", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.InitialCluster = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 42:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterState", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.InitialClusterState = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 43:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterToken", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.InitialClusterToken = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 51:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType)
- }
- m.SnapshotCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SnapshotCount |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 52:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field QuotaBackendBytes", wireType)
- }
- m.QuotaBackendBytes = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.QuotaBackendBytes |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 63:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PreVote", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PreVote = bool(v != 0)
- case 64:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InitialCorruptCheck", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.InitialCorruptCheck = bool(v != 0)
- case 71:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Logger", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Logger = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 72:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LogOutputs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LogOutputs = append(m.LogOutputs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 73:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LogLevel", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LogLevel = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRpc(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRpc
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupRpc
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRpc
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/tests/functional/rpcpb/rpc.proto b/tests/functional/rpcpb/rpc.proto
deleted file mode 100644
index 87ba0533779..00000000000
--- a/tests/functional/rpcpb/rpc.proto
+++ /dev/null
@@ -1,634 +0,0 @@
-syntax = "proto3";
-package rpcpb;
-
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message Request {
- Operation Operation = 1;
- // Member contains the same Member object from tester configuration.
- Member Member = 2;
- // Tester contains tester configuration.
- Tester Tester = 3;
-}
-
-// SnapshotInfo contains SAVE_SNAPSHOT request results.
-message SnapshotInfo {
- string MemberName = 1;
- repeated string MemberClientURLs = 2;
- string SnapshotPath = 3;
- string SnapshotFileSize = 4;
- string SnapshotTotalSize = 5;
- int64 SnapshotTotalKey = 6;
- int64 SnapshotHash = 7;
- int64 SnapshotRevision = 8;
- string Took = 9;
-}
-
-message Response {
- bool Success = 1;
- string Status = 2;
-
- // Member contains the same Member object from tester request.
- Member Member = 3;
-
- // SnapshotInfo contains SAVE_SNAPSHOT request results.
- SnapshotInfo SnapshotInfo = 4;
-}
-
-service Transport {
- rpc Transport(stream Request) returns (stream Response) {}
-}
-
-message Member {
- // EtcdExec is the executable etcd binary path in agent server.
- string EtcdExec = 1 [(gogoproto.moretags) = "yaml:\"etcd-exec\""];
-
- // AgentAddr is the agent HTTP server address.
- string AgentAddr = 11 [(gogoproto.moretags) = "yaml:\"agent-addr\""];
- // FailpointHTTPAddr is the agent's failpoints HTTP server address.
- string FailpointHTTPAddr = 12 [(gogoproto.moretags) = "yaml:\"failpoint-http-addr\""];
-
- // BaseDir is the base directory where all logs and etcd data are stored.
- string BaseDir = 101 [(gogoproto.moretags) = "yaml:\"base-dir\""];
-
- // EtcdClientProxy is true when client traffic needs to be proxied.
- // If true, listen client URL port must be different than advertise client URL port.
- bool EtcdClientProxy = 201 [(gogoproto.moretags) = "yaml:\"etcd-client-proxy\""];
- // EtcdPeerProxy is true when peer traffic needs to be proxied.
- // If true, listen peer URL port must be different than advertise peer URL port.
- bool EtcdPeerProxy = 202 [(gogoproto.moretags) = "yaml:\"etcd-peer-proxy\""];
-
- // EtcdClientEndpoint is the etcd client endpoint.
- string EtcdClientEndpoint = 301 [(gogoproto.moretags) = "yaml:\"etcd-client-endpoint\""];
- // Etcd defines etcd binary configuration flags.
- Etcd Etcd = 302 [(gogoproto.moretags) = "yaml:\"etcd\""];
- // EtcdOnSnapshotRestore defines one-time use configuration during etcd
- // snapshot recovery process.
- Etcd EtcdOnSnapshotRestore = 303;
-
- // ClientCertData contains cert file contents from this member's etcd server.
- string ClientCertData = 401 [(gogoproto.moretags) = "yaml:\"client-cert-data\""];
- string ClientCertPath = 402 [(gogoproto.moretags) = "yaml:\"client-cert-path\""];
- // ClientKeyData contains key file contents from this member's etcd server.
- string ClientKeyData = 403 [(gogoproto.moretags) = "yaml:\"client-key-data\""];
- string ClientKeyPath = 404 [(gogoproto.moretags) = "yaml:\"client-key-path\""];
- // ClientTrustedCAData contains trusted CA file contents from this member's etcd server.
- string ClientTrustedCAData = 405 [(gogoproto.moretags) = "yaml:\"client-trusted-ca-data\""];
- string ClientTrustedCAPath = 406 [(gogoproto.moretags) = "yaml:\"client-trusted-ca-path\""];
-
- // PeerCertData contains cert file contents from this member's etcd server.
- string PeerCertData = 501 [(gogoproto.moretags) = "yaml:\"peer-cert-data\""];
- string PeerCertPath = 502 [(gogoproto.moretags) = "yaml:\"peer-cert-path\""];
- // PeerKeyData contains key file contents from this member's etcd server.
- string PeerKeyData = 503 [(gogoproto.moretags) = "yaml:\"peer-key-data\""];
- string PeerKeyPath = 504 [(gogoproto.moretags) = "yaml:\"peer-key-path\""];
- // PeerTrustedCAData contains trusted CA file contents from this member's etcd server.
- string PeerTrustedCAData = 505 [(gogoproto.moretags) = "yaml:\"peer-trusted-ca-data\""];
- string PeerTrustedCAPath = 506 [(gogoproto.moretags) = "yaml:\"peer-trusted-ca-path\""];
-
- // SnapshotPath is the snapshot file path to store or restore from.
- string SnapshotPath = 601 [(gogoproto.moretags) = "yaml:\"snapshot-path\""];
- // SnapshotInfo contains last SAVE_SNAPSHOT request results.
- SnapshotInfo SnapshotInfo = 602;
-
- // Failpoints is the GOFAIL_FAILPOINTS environment variable value to use when starting etcd.
- string Failpoints = 701 [(gogoproto.moretags) = "yaml:\"failpoints\""];
-}
-
-message Tester {
- string DataDir = 1 [(gogoproto.moretags) = "yaml:\"data-dir\""];
- string Network = 2 [(gogoproto.moretags) = "yaml:\"network\""];
- string Addr = 3 [(gogoproto.moretags) = "yaml:\"addr\""];
-
- // DelayLatencyMsRv is the delay latency in milliseconds,
- // to inject to simulated slow network.
- uint32 DelayLatencyMs = 11 [(gogoproto.moretags) = "yaml:\"delay-latency-ms\""];
- // DelayLatencyMsRv is the delay latency random variable in milliseconds.
- uint32 DelayLatencyMsRv = 12 [(gogoproto.moretags) = "yaml:\"delay-latency-ms-rv\""];
- // UpdatedDelayLatencyMs is the update delay latency in milliseconds,
- // to inject to simulated slow network. It's the final latency to apply,
- // in case the latency numbers are randomly generated from given delay latency field.
- uint32 UpdatedDelayLatencyMs = 13 [(gogoproto.moretags) = "yaml:\"updated-delay-latency-ms\""];
-
- // RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
- int32 RoundLimit = 21 [(gogoproto.moretags) = "yaml:\"round-limit\""];
- // ExitOnCaseFail is true, then exit tester on first failure.
- bool ExitOnCaseFail = 22 [(gogoproto.moretags) = "yaml:\"exit-on-failure\""];
- // EnablePprof is true to enable profiler.
- bool EnablePprof = 23 [(gogoproto.moretags) = "yaml:\"enable-pprof\""];
-
- // CaseDelayMs is the delay duration after failure is injected.
- // Useful when triggering snapshot or no-op failure cases.
- uint32 CaseDelayMs = 31 [(gogoproto.moretags) = "yaml:\"case-delay-ms\""];
- // CaseShuffle is true to randomize failure injecting order.
- bool CaseShuffle = 32 [(gogoproto.moretags) = "yaml:\"case-shuffle\""];
- // Cases is the selected test cases to schedule.
- // If empty, run all failure cases.
- repeated string Cases = 33 [(gogoproto.moretags) = "yaml:\"cases\""];
- // FailpointCommands is the list of "gofail" commands
- // (e.g. panic("etcd-tester"),1*sleep(1000).
- repeated string FailpointCommands = 34 [(gogoproto.moretags) = "yaml:\"failpoint-commands\""];
-
- // RunnerExecPath is a path of etcd-runner binary.
- string RunnerExecPath = 41 [(gogoproto.moretags) = "yaml:\"runner-exec-path\""];
- // ExternalExecPath is a path of script for enabling/disabling an external fault injector.
- string ExternalExecPath = 42 [(gogoproto.moretags) = "yaml:\"external-exec-path\""];
-
- // Stressers is the list of stresser types:
- // KV, LEASE, ELECTION_RUNNER, WATCH_RUNNER, LOCK_RACER_RUNNER, LEASE_RUNNER.
- repeated Stresser Stressers = 101 [(gogoproto.moretags) = "yaml:\"stressers\""];
- // Checkers is the list of consistency checker types:
- // KV_HASH, LEASE_EXPIRE, NO_CHECK, RUNNER.
- // Leave empty to skip consistency checks.
- repeated string Checkers = 102 [(gogoproto.moretags) = "yaml:\"checkers\""];
-
- // StressKeySize is the size of each small key written into etcd.
- int32 StressKeySize = 201 [(gogoproto.moretags) = "yaml:\"stress-key-size\""];
- // StressKeySizeLarge is the size of each large key written into etcd.
- int32 StressKeySizeLarge = 202 [(gogoproto.moretags) = "yaml:\"stress-key-size-large\""];
- // StressKeySuffixRange is the count of key range written into etcd.
- // Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)".
- int32 StressKeySuffixRange = 203 [(gogoproto.moretags) = "yaml:\"stress-key-suffix-range\""];
- // StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100).
- // Stress keys are created with "fmt.Sprintf("/k%03d", i)".
- int32 StressKeySuffixRangeTxn = 204 [(gogoproto.moretags) = "yaml:\"stress-key-suffix-range-txn\""];
- // StressKeyTxnOps is the number of operations per a transaction (max 64).
- int32 StressKeyTxnOps = 205 [(gogoproto.moretags) = "yaml:\"stress-key-txn-ops\""];
-
- // StressClients is the number of concurrent stressing clients
- // with "one" shared TCP connection.
- int32 StressClients = 301 [(gogoproto.moretags) = "yaml:\"stress-clients\""];
- // StressQPS is the maximum number of stresser requests per second.
- int32 StressQPS = 302 [(gogoproto.moretags) = "yaml:\"stress-qps\""];
-}
-
-enum StresserType {
- KV_WRITE_SMALL = 0;
- KV_WRITE_LARGE = 1;
- KV_READ_ONE_KEY = 2;
- KV_READ_RANGE = 3;
- KV_DELETE_ONE_KEY = 4;
- KV_DELETE_RANGE = 5;
- KV_TXN_WRITE_DELETE = 6;
-
- LEASE = 10;
-
- ELECTION_RUNNER = 20;
- WATCH_RUNNER = 31;
- LOCK_RACER_RUNNER = 41;
- LEASE_RUNNER = 51;
-}
-
-message Stresser {
- string Type = 1 [(gogoproto.moretags) = "yaml:\"type\""];
- double Weight = 2 [(gogoproto.moretags) = "yaml:\"weight\""];
-}
-
-enum Checker {
- KV_HASH = 0;
- LEASE_EXPIRE = 1;
- RUNNER = 2;
- NO_CHECK = 3;
- SHORT_TTL_LEASE_EXPIRE = 4;
-}
-
-message Etcd {
- string Name = 1 [(gogoproto.moretags) = "yaml:\"name\""];
- string DataDir = 2 [(gogoproto.moretags) = "yaml:\"data-dir\""];
- string WALDir = 3 [(gogoproto.moretags) = "yaml:\"wal-dir\""];
-
- // HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval.
- // Default value is 100, which is 100ms.
- int64 HeartbeatIntervalMs = 11 [(gogoproto.moretags) = "yaml:\"heartbeat-interval\""];
- // ElectionTimeoutMs is the time (in milliseconds) for an election to timeout.
- // Default value is 1000, which is 1s.
- int64 ElectionTimeoutMs = 12 [(gogoproto.moretags) = "yaml:\"election-timeout\""];
-
- repeated string ListenClientURLs = 21 [(gogoproto.moretags) = "yaml:\"listen-client-urls\""];
- repeated string AdvertiseClientURLs = 22 [(gogoproto.moretags) = "yaml:\"advertise-client-urls\""];
- bool ClientAutoTLS = 23 [(gogoproto.moretags) = "yaml:\"auto-tls\""];
- bool ClientCertAuth = 24 [(gogoproto.moretags) = "yaml:\"client-cert-auth\""];
- string ClientCertFile = 25 [(gogoproto.moretags) = "yaml:\"cert-file\""];
- string ClientKeyFile = 26 [(gogoproto.moretags) = "yaml:\"key-file\""];
- string ClientTrustedCAFile = 27 [(gogoproto.moretags) = "yaml:\"trusted-ca-file\""];
-
- repeated string ListenPeerURLs = 31 [(gogoproto.moretags) = "yaml:\"listen-peer-urls\""];
- repeated string AdvertisePeerURLs = 32 [(gogoproto.moretags) = "yaml:\"initial-advertise-peer-urls\""];
- bool PeerAutoTLS = 33 [(gogoproto.moretags) = "yaml:\"peer-auto-tls\""];
- bool PeerClientCertAuth = 34 [(gogoproto.moretags) = "yaml:\"peer-client-cert-auth\""];
- string PeerCertFile = 35 [(gogoproto.moretags) = "yaml:\"peer-cert-file\""];
- string PeerKeyFile = 36 [(gogoproto.moretags) = "yaml:\"peer-key-file\""];
- string PeerTrustedCAFile = 37 [(gogoproto.moretags) = "yaml:\"peer-trusted-ca-file\""];
-
- string InitialCluster = 41 [(gogoproto.moretags) = "yaml:\"initial-cluster\""];
- string InitialClusterState = 42 [(gogoproto.moretags) = "yaml:\"initial-cluster-state\""];
- string InitialClusterToken = 43 [(gogoproto.moretags) = "yaml:\"initial-cluster-token\""];
-
- int64 SnapshotCount = 51 [(gogoproto.moretags) = "yaml:\"snapshot-count\""];
- int64 QuotaBackendBytes = 52 [(gogoproto.moretags) = "yaml:\"quota-backend-bytes\""];
-
- bool PreVote = 63 [(gogoproto.moretags) = "yaml:\"pre-vote\""];
- bool InitialCorruptCheck = 64 [(gogoproto.moretags) = "yaml:\"initial-corrupt-check\""];
-
- string Logger = 71 [(gogoproto.moretags) = "yaml:\"logger\""];
- // LogOutputs is the log file to store current etcd server logs.
- repeated string LogOutputs = 72 [(gogoproto.moretags) = "yaml:\"log-outputs\""];
- string LogLevel = 73 [(gogoproto.moretags) = "yaml:\"log-level\""];
-}
-
-enum Operation {
- // NOT_STARTED is the agent status before etcd first start.
- NOT_STARTED = 0;
-
- // INITIAL_START_ETCD is only called to start etcd, the very first time.
- INITIAL_START_ETCD = 10;
- // RESTART_ETCD is sent to restart killed etcd.
- RESTART_ETCD = 11;
-
- // SIGTERM_ETCD pauses etcd process while keeping data directories
- // and previous etcd configurations.
- SIGTERM_ETCD = 20;
- // SIGQUIT_ETCD_AND_REMOVE_DATA kills etcd process and removes all data
- // directories to simulate destroying the whole machine.
- SIGQUIT_ETCD_AND_REMOVE_DATA = 21;
-
- // SAVE_SNAPSHOT is sent to trigger local member to download its snapshot
- // onto its local disk with the specified path from tester.
- SAVE_SNAPSHOT = 30;
- // RESTORE_RESTART_FROM_SNAPSHOT is sent to trigger local member to
- // restore a cluster from existing snapshot from disk, and restart
- // an etcd instance from recovered data.
- RESTORE_RESTART_FROM_SNAPSHOT = 31;
- // RESTART_FROM_SNAPSHOT is sent to trigger local member to restart
- // and join an existing cluster that has been recovered from a snapshot.
- // Local member joins this cluster with fresh data.
- RESTART_FROM_SNAPSHOT = 32;
-
- // SIGQUIT_ETCD_AND_ARCHIVE_DATA is sent when consistency check failed,
- // thus need to archive etcd data directories.
- SIGQUIT_ETCD_AND_ARCHIVE_DATA = 40;
- // SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT destroys etcd process,
- // etcd data, and agent server.
- SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT = 41;
-
- // BLACKHOLE_PEER_PORT_TX_RX drops all outgoing/incoming packets from/to
- // the peer port on target member's peer port.
- BLACKHOLE_PEER_PORT_TX_RX = 100;
- // UNBLACKHOLE_PEER_PORT_TX_RX removes outgoing/incoming packet dropping.
- UNBLACKHOLE_PEER_PORT_TX_RX = 101;
-
- // DELAY_PEER_PORT_TX_RX delays all outgoing/incoming packets from/to
- // the peer port on target member's peer port.
- DELAY_PEER_PORT_TX_RX = 200;
- // UNDELAY_PEER_PORT_TX_RX removes all outgoing/incoming delays.
- UNDELAY_PEER_PORT_TX_RX = 201;
-}
-
-// Case defines various system faults or test case in distributed systems,
-// in order to verify correct behavior of etcd servers and clients.
-enum Case {
- // SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader)
- // but does not delete its data directories on disk for next restart.
- // It waits "delay-ms" before recovering this failure.
- // The expected behavior is that the follower comes back online
- // and rejoins the cluster, and then each member continues to process
- // client requests ('Put' request that requires Raft consensus).
- SIGTERM_ONE_FOLLOWER = 0;
-
- // SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen
- // follower but does not delete its data directories on disk for next
- // restart. And waits until most up-to-date node (leader) applies the
- // snapshot count of entries since the stop operation.
- // The expected behavior is that the follower comes back online and
- // rejoins the cluster, and then active leader sends snapshot
- // to the follower to force it to follow the leader's log.
- // As always, after recovery, each member must be able to process
- // client requests.
- SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 1;
-
- // SIGTERM_LEADER stops the active leader node but does not delete its
- // data directories on disk for next restart. Then it waits "delay-ms"
- // before recovering this failure, in order to trigger election timeouts.
- // The expected behavior is that a new leader gets elected, and the
- // old leader comes back online and rejoins the cluster as a follower.
- // As always, after recovery, each member must be able to process
- // client requests.
- SIGTERM_LEADER = 2;
-
- // SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node
- // but does not delete its data directories on disk for next restart.
- // And waits until most up-to-date node ("new" leader) applies the
- // snapshot count of entries since the stop operation.
- // The expected behavior is that cluster elects a new leader, and the
- // old leader comes back online and rejoins the cluster as a follower.
- // And it receives the snapshot from the new leader to overwrite its
- // store. As always, after recovery, each member must be able to
- // process client requests.
- SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT = 3;
-
- // SIGTERM_QUORUM stops majority number of nodes to make the whole cluster
- // inoperable but does not delete data directories on stopped nodes
- // for next restart. And it waits "delay-ms" before recovering failure.
- // The expected behavior is that nodes come back online, thus cluster
- // comes back operative as well. As always, after recovery, each member
- // must be able to process client requests.
- SIGTERM_QUORUM = 4;
-
- // SIGTERM_ALL stops the whole cluster but does not delete data directories
- // on disk for next restart. And it waits "delay-ms" before recovering
- // this failure.
- // The expected behavior is that nodes come back online, thus cluster
- // comes back operative as well. As always, after recovery, each member
- // must be able to process client requests.
- SIGTERM_ALL = 5;
-
- // SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower
- // (non-leader), deletes its data directories on disk, and removes
- // this member from cluster (membership reconfiguration). On recovery,
- // tester adds a new member, and this member joins the existing cluster
- // with fresh data. It waits "delay-ms" before recovering this
- // failure. This simulates destroying one follower machine, where operator
- // needs to add a new member from a fresh machine.
- // The expected behavior is that a new member joins the existing cluster,
- // and then each member continues to process client requests.
- SIGQUIT_AND_REMOVE_ONE_FOLLOWER = 10;
-
- // SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly
- // chosen follower, deletes its data directories on disk, and removes
- // this member from cluster (membership reconfiguration). On recovery,
- // tester adds a new member, and this member joins the existing cluster
- // restart. On member remove, cluster waits until most up-to-date node
- // (leader) applies the snapshot count of entries since the stop operation.
- // This simulates destroying a leader machine, where operator needs to add
- // a new member from a fresh machine.
- // The expected behavior is that a new member joins the existing cluster,
- // and receives a snapshot from the active leader. As always, after
- // recovery, each member must be able to process client requests.
- SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 11;
-
- // SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its
- // data directories on disk, and removes this member from cluster.
- // On recovery, tester adds a new member, and this member joins the
- // existing cluster with fresh data. It waits "delay-ms" before
- // recovering this failure. This simulates destroying a leader machine,
- // where operator needs to add a new member from a fresh machine.
- // The expected behavior is that a new member joins the existing cluster,
- // and then each member continues to process client requests.
- SIGQUIT_AND_REMOVE_LEADER = 12;
-
- // SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader,
- // deletes its data directories on disk, and removes this member from
- // cluster (membership reconfiguration). On recovery, tester adds a new
- // member, and this member joins the existing cluster restart. On member
- // remove, cluster waits until most up-to-date node (new leader) applies
- // the snapshot count of entries since the stop operation. This simulates
- // destroying a leader machine, where operator needs to add a new member
- // from a fresh machine.
- // The expected behavior is that on member remove, cluster elects a new
- // leader, and a new member joins the existing cluster and receives a
- // snapshot from the newly elected leader. As always, after recovery, each
- // member must be able to process client requests.
- SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT = 13;
-
- // SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first
- // stops majority number of nodes, deletes data directories on those quorum
- // nodes, to make the whole cluster inoperable. Now that quorum and their
- // data are totally destroyed, cluster cannot even remove unavailable nodes
- // (e.g. 2 out of 3 are lost, so no leader can be elected).
- // Let's assume 3-node cluster of node A, B, and C. One day, node A and B
- // are destroyed and all their data are gone. The only viable solution is
- // to recover from C's latest snapshot.
- //
- // To simulate:
- // 1. Assume node C is the current leader with most up-to-date data.
- // 2. Download snapshot from node C, before destroying node A and B.
- // 3. Destroy node A and B, and make the whole cluster inoperable.
- // 4. Now node C cannot operate either.
- // 5. SIGTERM node C and remove its data directories.
- // 6. Restore a new seed member from node C's latest snapshot file.
- // 7. Add another member to establish 2-node cluster.
- // 8. Add another member to establish 3-node cluster.
- // 9. Add more if any.
- //
- // The expected behavior is that etcd successfully recovers from such
- // disastrous situation as only 1-node survives out of 3-node cluster,
- // new members joins the existing cluster, and previous data from snapshot
- // are still preserved after recovery process. As always, after recovery,
- // each member must be able to process client requests.
- SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH = 14;
-
- // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming
- // packets from/to the peer port on a randomly chosen follower
- // (non-leader), and waits for "delay-ms" until recovery.
- // The expected behavior is that once dropping operation is undone,
- // each member must be able to process client requests.
- BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER = 100;
-
- // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops
- // all outgoing/incoming packets from/to the peer port on a randomly
- // chosen follower (non-leader), and waits for most up-to-date node
- // (leader) applies the snapshot count of entries since the blackhole
- // operation.
- // The expected behavior is that once packet drop operation is undone,
- // the slow follower tries to catch up, possibly receiving the snapshot
- // from the active leader. As always, after recovery, each member must
- // be able to process client requests.
- BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 101;
-
- // BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets
- // from/to the peer port on the active leader (isolated), and waits for
- // "delay-ms" until recovery, in order to trigger election timeout.
- // The expected behavior is that after election timeout, a new leader gets
- // elected, and once dropping operation is undone, the old leader comes
- // back and rejoins the cluster as a follower. As always, after recovery,
- // each member must be able to process client requests.
- BLACKHOLE_PEER_PORT_TX_RX_LEADER = 102;
-
- // BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all
- // outgoing/incoming packets from/to the peer port on the active leader,
- // and waits for most up-to-date node (leader) applies the snapshot
- // count of entries since the blackhole operation.
- // The expected behavior is that cluster elects a new leader, and once
- // dropping operation is undone, the old leader comes back and rejoins
- // the cluster as a follower. The slow follower tries to catch up, likely
- // receiving the snapshot from the new active leader. As always, after
- // recovery, each member must be able to process client requests.
- BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 103;
-
- // BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets
- // from/to the peer ports on majority nodes of cluster, thus losing its
- // leader and cluster being inoperable. And it waits for "delay-ms"
- // until recovery.
- // The expected behavior is that once packet drop operation is undone,
- // nodes come back online, thus cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- BLACKHOLE_PEER_PORT_TX_RX_QUORUM = 104;
-
- // BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets
- // from/to the peer ports on all nodes, thus making cluster totally
- // inoperable. It waits for "delay-ms" until recovery.
- // The expected behavior is that once packet drop operation is undone,
- // nodes come back online, thus cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- BLACKHOLE_PEER_PORT_TX_RX_ALL = 105;
-
- // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets
- // from/to the peer port on a randomly chosen follower (non-leader).
- // It waits for "delay-ms" until recovery.
- // The expected behavior is that once packet delay operation is undone,
- // the follower comes back and tries to catch up with latest changes from
- // cluster. And as always, after recovery, each member must be able to
- // process client requests.
- DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 200;
-
- // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming
- // packets from/to the peer port on a randomly chosen follower
- // (non-leader) with a randomized time duration (thus isolated). It
- // waits for "delay-ms" until recovery.
- // The expected behavior is that once packet delay operation is undone,
- // each member must be able to process client requests.
- RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 201;
-
- // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on a randomly chosen
- // follower (non-leader), and waits for most up-to-date node (leader)
- // applies the snapshot count of entries since the delay operation.
- // The expected behavior is that the delayed follower gets isolated
- // and behind the current active leader, and once delay operation is undone,
- // the slow follower comes back and catches up possibly receiving snapshot
- // from the active leader. As always, after recovery, each member must be
- // able to process client requests.
- DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 202;
-
- // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on a randomly chosen
- // follower (non-leader) with a randomized time duration, and waits for
- // most up-to-date node (leader) applies the snapshot count of entries
- // since the delay operation.
- // The expected behavior is that the delayed follower gets isolated
- // and behind the current active leader, and once delay operation is undone,
- // the slow follower comes back and catches up, possibly receiving a
- // snapshot from the active leader. As always, after recovery, each member
- // must be able to process client requests.
- RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 203;
-
- // DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to
- // the peer port on the active leader. And waits for "delay-ms" until
- // recovery.
- // The expected behavior is that cluster may elect a new leader, and
- // once packet delay operation is undone, the (old) leader comes back
- // and tries to catch up with latest changes from cluster. As always,
- // after recovery, each member must be able to process client requests.
- DELAY_PEER_PORT_TX_RX_LEADER = 204;
-
- // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets
- // from/to the peer port on the active leader with a randomized time
- // duration. And waits for "delay-ms" until recovery.
- // The expected behavior is that cluster may elect a new leader, and
- // once packet delay operation is undone, the (old) leader comes back
- // and tries to catch up with latest changes from cluster. As always,
- // after recovery, each member must be able to process client requests.
- RANDOM_DELAY_PEER_PORT_TX_RX_LEADER = 205;
-
- // DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on the active leader,
- // and waits for most up-to-date node (current or new leader) applies the
- // snapshot count of entries since the delay operation.
- // The expected behavior is that cluster may elect a new leader, and
- // the old leader gets isolated and behind the current active leader,
- // and once delay operation is undone, the slow follower comes back
- // and catches up, likely receiving a snapshot from the active leader.
- // As always, after recovery, each member must be able to process client
- // requests.
- DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 206;
-
- // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
- // outgoing/incoming packets from/to the peer port on the active leader,
- // with a randomized time duration. And it waits for most up-to-date node
- // (current or new leader) applies the snapshot count of entries since the
- // delay operation.
- // The expected behavior is that cluster may elect a new leader, and
- // the old leader gets isolated and behind the current active leader,
- // and once delay operation is undone, the slow follower comes back
- // and catches up, likely receiving a snapshot from the active leader.
- // As always, after recovery, each member must be able to process client
- // requests.
- RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 207;
-
- // DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to
- // the peer ports on majority nodes of cluster. And it waits for
- // "delay-ms" until recovery, likely to trigger election timeouts.
- // The expected behavior is that cluster may elect a new leader, while
- // quorum of nodes struggle with slow networks, and once delay operation
- // is undone, nodes come back and cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- DELAY_PEER_PORT_TX_RX_QUORUM = 208;
-
- // RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets
- // from/to the peer ports on majority nodes of cluster, with randomized
- // time durations. And it waits for "delay-ms" until recovery, likely
- // to trigger election timeouts.
- // The expected behavior is that cluster may elect a new leader, while
- // quorum of nodes struggle with slow networks, and once delay operation
- // is undone, nodes come back and cluster comes back operative. As always,
- // after recovery, each member must be able to process client requests.
- RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM = 209;
-
- // DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the
- // peer ports on all nodes. And it waits for "delay-ms" until recovery,
- // likely to trigger election timeouts.
- // The expected behavior is that cluster may become totally inoperable,
- // struggling with slow networks across the whole cluster. Once delay
- // operation is undone, nodes come back and cluster comes back operative.
- // As always, after recovery, each member must be able to process client
- // requests.
- DELAY_PEER_PORT_TX_RX_ALL = 210;
-
- // RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets
- // from/to the peer ports on all nodes, with randomized time durations.
- // And it waits for "delay-ms" until recovery, likely to trigger
- // election timeouts.
- // The expected behavior is that cluster may become totally inoperable,
- // struggling with slow networks across the whole cluster. Once delay
- // operation is undone, nodes come back and cluster comes back operative.
- // As always, after recovery, each member must be able to process client
- // requests.
- RANDOM_DELAY_PEER_PORT_TX_RX_ALL = 211;
-
- // NO_FAIL_WITH_STRESS stops injecting failures while testing the
- // consistency and correctness under pressure loads, for the duration of
- // "delay-ms". Goal is to ensure cluster be still making progress
- // on recovery, and verify system does not deadlock following a sequence
- // of failure injections.
- // The expected behavior is that cluster remains fully operative in healthy
- // condition. As always, after recovery, each member must be able to process
- // client requests.
- NO_FAIL_WITH_STRESS = 300;
-
- // NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor
- // sends stressig client requests to the cluster, for the duration of
- // "delay-ms". Goal is to ensure cluster be still making progress
- // on recovery, and verify system does not deadlock following a sequence
- // of failure injections.
- // The expected behavior is that cluster remains fully operative in healthy
- // condition, and clients requests during liveness period succeed without
- // errors.
- // Note: this is how Google Chubby does failure injection testing
- // https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf.
- NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS = 301;
-
- // FAILPOINTS injects failpoints to etcd server runtime, triggering panics
- // in critical code paths.
- FAILPOINTS = 400;
-
- // FAILPOINTS_WITH_DISK_IO_LATENCY injects high disk I/O latency failure in raftAfterSave code paths.
- FAILPOINTS_WITH_DISK_IO_LATENCY = 401;
-
- // EXTERNAL runs external failure injection scripts.
- EXTERNAL = 500;
-}
diff --git a/tests/functional/runner/election_command.go b/tests/functional/runner/election_command.go
deleted file mode 100644
index 4a0d194bdda..00000000000
--- a/tests/functional/runner/election_command.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runner
-
-import (
- "context"
- "errors"
- "fmt"
-
- "go.etcd.io/etcd/client/v3/concurrency"
-
- "github.com/spf13/cobra"
-)
-
-// NewElectionCommand returns the cobra command for "election runner".
-func NewElectionCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "election [election name (defaults to 'elector')]",
- Short: "Performs election operation",
- Run: runElectionFunc,
- }
- cmd.Flags().IntVar(&totalClientConnections, "total-client-connections", 10, "total number of client connections")
- return cmd
-}
-
-func runElectionFunc(cmd *cobra.Command, args []string) {
- election := "elector"
- if len(args) == 1 {
- election = args[0]
- }
- if len(args) > 1 {
- ExitWithError(ExitBadArgs, errors.New("election takes at most one argument"))
- }
-
- rcs := make([]roundClient, totalClientConnections)
- validatec := make(chan struct{}, len(rcs))
- // nextc closes when election is ready for next round.
- nextc := make(chan struct{})
- eps := endpointsFromFlag(cmd)
-
- for i := range rcs {
- v := fmt.Sprintf("%d", i)
- observedLeader := ""
- validateWaiters := 0
- var rcNextc chan struct{}
- setRcNextc := func() {
- rcNextc = nextc
- }
-
- rcs[i].c = newClient(eps, dialTimeout)
- var (
- s *concurrency.Session
- err error
- )
- for {
- s, err = concurrency.NewSession(rcs[i].c)
- if err == nil {
- break
- }
- }
-
- e := concurrency.NewElection(s, election)
- rcs[i].acquire = func() (err error) {
- ctx, cancel := context.WithCancel(context.Background())
- donec := make(chan struct{})
- go func() {
- defer close(donec)
- for ctx.Err() == nil {
- if ol, ok := <-e.Observe(ctx); ok {
- observedLeader = string(ol.Kvs[0].Value)
- break
- }
- }
- if observedLeader != v {
- cancel()
- }
- }()
- err = e.Campaign(ctx, v)
- cancel()
- <-donec
- if err == nil {
- observedLeader = v
- }
- if observedLeader == v {
- validateWaiters = len(rcs)
- }
- select {
- case <-ctx.Done():
- return nil
- default:
- return err
- }
- }
- rcs[i].validate = func() error {
- l, err := e.Leader(context.TODO())
- if err == nil && string(l.Kvs[0].Value) != observedLeader {
- return fmt.Errorf("expected leader %q, got %q", observedLeader, l.Kvs[0].Value)
- }
- if err != nil {
- return err
- }
- setRcNextc()
- validatec <- struct{}{}
- return nil
- }
- rcs[i].release = func() error {
- for validateWaiters > 0 {
- select {
- case <-validatec:
- validateWaiters--
- default:
- return fmt.Errorf("waiting on followers")
- }
- }
- if err := e.Resign(context.TODO()); err != nil {
- return err
- }
- if observedLeader == v {
- oldNextc := nextc
- nextc = make(chan struct{})
- close(oldNextc)
-
- }
- <-rcNextc
- observedLeader = ""
- return nil
- }
- }
- // each client creates 1 key from Campaign() and delete it from Resign()
- // a round involves in 2*len(rcs) requests.
- doRounds(rcs, rounds, 2*len(rcs))
-}
diff --git a/tests/functional/runner/error.go b/tests/functional/runner/error.go
deleted file mode 100644
index b9c279bce68..00000000000
--- a/tests/functional/runner/error.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runner
-
-import (
- "fmt"
- "os"
-
- "go.etcd.io/etcd/client/v2"
-)
-
-const (
- // http://tldp.org/LDP/abs/html/exitcodes.html
- ExitSuccess = iota
- ExitError
- ExitBadConnection
- ExitInvalidInput // for txn, watch command
- ExitBadFeature // provided a valid flag with an unsupported value
- ExitInterrupted
- ExitIO
- ExitBadArgs = 128
-)
-
-func ExitWithError(code int, err error) {
- fmt.Fprintln(os.Stderr, "Error: ", err)
- if cerr, ok := err.(*client.ClusterError); ok {
- fmt.Fprintln(os.Stderr, cerr.Detail())
- }
- os.Exit(code)
-}
diff --git a/tests/functional/runner/global.go b/tests/functional/runner/global.go
deleted file mode 100644
index 902baac663a..00000000000
--- a/tests/functional/runner/global.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runner
-
-import (
- "context"
- "fmt"
- "log"
- "sync"
- "time"
-
- "go.etcd.io/etcd/client/v3"
-
- "github.com/spf13/cobra"
- "golang.org/x/time/rate"
-)
-
-// shared flags
-var (
- totalClientConnections int // total number of client connections to be made with server
- endpoints []string
- dialTimeout time.Duration
- rounds int // total number of rounds to run; set to <= 0 to run forever.
- reqRate int // maximum number of requests per second.
-)
-
-type roundClient struct {
- c *clientv3.Client
- progress int
- acquire func() error
- validate func() error
- release func() error
-}
-
-func newClient(eps []string, timeout time.Duration) *clientv3.Client {
- c, err := clientv3.New(clientv3.Config{
- Endpoints: eps,
- DialTimeout: timeout * time.Second,
- })
- if err != nil {
- log.Fatal(err)
- }
- return c
-}
-
-func doRounds(rcs []roundClient, rounds int, requests int) {
- var wg sync.WaitGroup
-
- wg.Add(len(rcs))
- finished := make(chan struct{})
- limiter := rate.NewLimiter(rate.Limit(reqRate), reqRate)
- for i := range rcs {
- go func(rc *roundClient) {
- defer wg.Done()
- for rc.progress < rounds || rounds <= 0 {
- if err := limiter.WaitN(context.Background(), requests/len(rcs)); err != nil {
- log.Panicf("rate limiter error %v", err)
- }
-
- for rc.acquire() != nil { /* spin */
- }
-
- if err := rc.validate(); err != nil {
- log.Fatal(err)
- }
-
- time.Sleep(10 * time.Millisecond)
- rc.progress++
- finished <- struct{}{}
-
- for rc.release() != nil { /* spin */
- }
- }
- }(&rcs[i])
- }
-
- start := time.Now()
- for i := 1; i < len(rcs)*rounds+1 || rounds <= 0; i++ {
- select {
- case <-finished:
- if i%100 == 0 {
- fmt.Printf("finished %d, took %v\n", i, time.Since(start))
- start = time.Now()
- }
- case <-time.After(time.Minute):
- log.Panic("no progress after 1 minute!")
- }
- }
- wg.Wait()
-
- for _, rc := range rcs {
- rc.c.Close()
- }
-}
-
-func endpointsFromFlag(cmd *cobra.Command) []string {
- eps, err := cmd.Flags().GetStringSlice("endpoints")
- if err != nil {
- ExitWithError(ExitError, err)
- }
- return eps
-}
diff --git a/tests/functional/runner/help.go b/tests/functional/runner/help.go
deleted file mode 100644
index 9bf9560a74d..00000000000
--- a/tests/functional/runner/help.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// copied from https://github.com/rkt/rkt/blob/master/rkt/help.go
-
-package runner
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "strings"
- "text/tabwriter"
- "text/template"
-
- "go.etcd.io/etcd/api/v3/version"
-
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
-)
-
-var (
- commandUsageTemplate *template.Template
- templFuncs = template.FuncMap{
- "descToLines": func(s string) []string {
- // trim leading/trailing whitespace and split into slice of lines
- return strings.Split(strings.Trim(s, "\n\t "), "\n")
- },
- "cmdName": func(cmd *cobra.Command, startCmd *cobra.Command) string {
- parts := []string{cmd.Name()}
- for cmd.HasParent() && cmd.Parent().Name() != startCmd.Name() {
- cmd = cmd.Parent()
- parts = append([]string{cmd.Name()}, parts...)
- }
- return strings.Join(parts, " ")
- },
- }
-)
-
-func init() {
- commandUsage := `
-{{ $cmd := .Cmd }}\
-{{ $cmdname := cmdName .Cmd .Cmd.Root }}\
-NAME:
-{{ if not .Cmd.HasParent }}\
-{{printf "\t%s - %s" .Cmd.Name .Cmd.Short}}
-{{else}}\
-{{printf "\t%s - %s" $cmdname .Cmd.Short}}
-{{end}}\
-
-USAGE:
-{{printf "\t%s" .Cmd.UseLine}}
-{{ if not .Cmd.HasParent }}\
-
-VERSION:
-{{printf "\t%s" .Version}}
-{{end}}\
-{{if .Cmd.HasSubCommands}}\
-
-API VERSION:
-{{printf "\t%s" .APIVersion}}
-{{end}}\
-{{if .Cmd.HasSubCommands}}\
-
-
-COMMANDS:
-{{range .SubCommands}}\
-{{ $cmdname := cmdName . $cmd }}\
-{{ if .Runnable }}\
-{{printf "\t%s\t%s" $cmdname .Short}}
-{{end}}\
-{{end}}\
-{{end}}\
-{{ if .Cmd.Long }}\
-
-DESCRIPTION:
-{{range $line := descToLines .Cmd.Long}}{{printf "\t%s" $line}}
-{{end}}\
-{{end}}\
-{{if .Cmd.HasLocalFlags}}\
-
-OPTIONS:
-{{.LocalFlags}}\
-{{end}}\
-{{if .Cmd.HasInheritedFlags}}\
-
-GLOBAL OPTIONS:
-{{.GlobalFlags}}\
-{{end}}
-`[1:]
-
- commandUsageTemplate = template.Must(template.New("command_usage").Funcs(templFuncs).Parse(strings.Replace(commandUsage, "\\\n", "", -1)))
-}
-
-func etcdFlagUsages(flagSet *pflag.FlagSet) string {
- x := new(bytes.Buffer)
-
- flagSet.VisitAll(func(flag *pflag.Flag) {
- if len(flag.Deprecated) > 0 {
- return
- }
- var format string
- if len(flag.Shorthand) > 0 {
- format = " -%s, --%s"
- } else {
- format = " %s --%s"
- }
- if len(flag.NoOptDefVal) > 0 {
- format = format + "["
- }
- if flag.Value.Type() == "string" {
- // put quotes on the value
- format = format + "=%q"
- } else {
- format = format + "=%s"
- }
- if len(flag.NoOptDefVal) > 0 {
- format = format + "]"
- }
- format = format + "\t%s\n"
- shorthand := flag.Shorthand
- fmt.Fprintf(x, format, shorthand, flag.Name, flag.DefValue, flag.Usage)
- })
-
- return x.String()
-}
-
-func getSubCommands(cmd *cobra.Command) []*cobra.Command {
- var subCommands []*cobra.Command
- for _, subCmd := range cmd.Commands() {
- subCommands = append(subCommands, subCmd)
- subCommands = append(subCommands, getSubCommands(subCmd)...)
- }
- return subCommands
-}
-
-func usageFunc(cmd *cobra.Command) error {
- subCommands := getSubCommands(cmd)
- tabOut := getTabOutWithWriter(os.Stdout)
- commandUsageTemplate.Execute(tabOut, struct {
- Cmd *cobra.Command
- LocalFlags string
- GlobalFlags string
- SubCommands []*cobra.Command
- Version string
- APIVersion string
- }{
- cmd,
- etcdFlagUsages(cmd.LocalFlags()),
- etcdFlagUsages(cmd.InheritedFlags()),
- subCommands,
- version.Version,
- version.APIVersion,
- })
- tabOut.Flush()
- return nil
-}
-
-func getTabOutWithWriter(writer io.Writer) *tabwriter.Writer {
- aTabOut := new(tabwriter.Writer)
- aTabOut.Init(writer, 0, 8, 1, '\t', 0)
- return aTabOut
-}
diff --git a/tests/functional/runner/lease_renewer_command.go b/tests/functional/runner/lease_renewer_command.go
deleted file mode 100644
index e7b147d75db..00000000000
--- a/tests/functional/runner/lease_renewer_command.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runner
-
-import (
- "context"
- "errors"
- "fmt"
- "log"
- "time"
-
- "go.etcd.io/etcd/client/v3"
-
- "github.com/spf13/cobra"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var (
- leaseTTL int64
-)
-
-// NewLeaseRenewerCommand returns the cobra command for "lease-renewer runner".
-func NewLeaseRenewerCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "lease-renewer",
- Short: "Performs lease renew operation",
- Run: runLeaseRenewerFunc,
- }
- cmd.Flags().Int64Var(&leaseTTL, "ttl", 5, "lease's ttl")
- return cmd
-}
-
-func runLeaseRenewerFunc(cmd *cobra.Command, args []string) {
- if len(args) > 0 {
- ExitWithError(ExitBadArgs, errors.New("lease-renewer does not take any argument"))
- }
-
- eps := endpointsFromFlag(cmd)
- c := newClient(eps, dialTimeout)
- ctx := context.Background()
-
- for {
- var (
- l *clientv3.LeaseGrantResponse
- lk *clientv3.LeaseKeepAliveResponse
- err error
- )
- for {
- l, err = c.Lease.Grant(ctx, leaseTTL)
- if err == nil {
- break
- }
- }
- expire := time.Now().Add(time.Duration(l.TTL-1) * time.Second)
-
- for {
- lk, err = c.Lease.KeepAliveOnce(ctx, l.ID)
- if ev, ok := status.FromError(err); ok && ev.Code() == codes.NotFound {
- if time.Since(expire) < 0 {
- log.Fatalf("bad renew! exceeded: %v", time.Since(expire))
- for {
- lk, err = c.Lease.KeepAliveOnce(ctx, l.ID)
- fmt.Println(lk, err)
- time.Sleep(time.Second)
- }
- }
- log.Fatalf("lost lease %d, expire: %v\n", l.ID, expire)
- break
- }
- if err != nil {
- continue
- }
- expire = time.Now().Add(time.Duration(lk.TTL-1) * time.Second)
- log.Printf("renewed lease %d, expire: %v\n", lk.ID, expire)
- time.Sleep(time.Duration(lk.TTL-2) * time.Second)
- }
- }
-}
diff --git a/tests/functional/runner/lock_racer_command.go b/tests/functional/runner/lock_racer_command.go
deleted file mode 100644
index c6f1b941006..00000000000
--- a/tests/functional/runner/lock_racer_command.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runner
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
-
- "go.etcd.io/etcd/client/v3/concurrency"
-
- "github.com/spf13/cobra"
-)
-
-// NewLockRacerCommand returns the cobra command for "lock-racer runner".
-func NewLockRacerCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "lock-racer [name of lock (defaults to 'racers')]",
- Short: "Performs lock race operation",
- Run: runRacerFunc,
- }
- cmd.Flags().IntVar(&totalClientConnections, "total-client-connections", 10, "total number of client connections")
- return cmd
-}
-
-func runRacerFunc(cmd *cobra.Command, args []string) {
- racers := "racers"
- if len(args) == 1 {
- racers = args[0]
- }
-
- if len(args) > 1 {
- ExitWithError(ExitBadArgs, errors.New("lock-racer takes at most one argument"))
- }
-
- rcs := make([]roundClient, totalClientConnections)
- ctx := context.Background()
- // mu ensures validate and release funcs are atomic.
- var mu sync.Mutex
- cnt := 0
-
- eps := endpointsFromFlag(cmd)
-
- for i := range rcs {
- var (
- s *concurrency.Session
- err error
- )
-
- rcs[i].c = newClient(eps, dialTimeout)
-
- for {
- s, err = concurrency.NewSession(rcs[i].c)
- if err == nil {
- break
- }
- }
- m := concurrency.NewMutex(s, racers)
- rcs[i].acquire = func() error { return m.Lock(ctx) }
- rcs[i].validate = func() error {
- mu.Lock()
- defer mu.Unlock()
- if cnt++; cnt != 1 {
- return fmt.Errorf("bad lock; count: %d", cnt)
- }
- return nil
- }
- rcs[i].release = func() error {
- mu.Lock()
- defer mu.Unlock()
- if err := m.Unlock(ctx); err != nil {
- return err
- }
- cnt = 0
- return nil
- }
- }
- // each client creates 1 key from NewMutex() and delete it from Unlock()
- // a round involves in 2*len(rcs) requests.
- doRounds(rcs, rounds, 2*len(rcs))
-}
diff --git a/tests/functional/runner/root.go b/tests/functional/runner/root.go
deleted file mode 100644
index abd74af1bc9..00000000000
--- a/tests/functional/runner/root.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package runner implements individual etcd-runner commands for the etcd-runner utility.
-package runner
-
-import (
- "log"
- "math/rand"
- "time"
-
- "github.com/spf13/cobra"
-)
-
-const (
- cliName = "etcd-runner"
- cliDescription = "Stress tests using clientv3 functionality.."
-
- defaultDialTimeout = 2 * time.Second
-)
-
-var (
- rootCmd = &cobra.Command{
- Use: cliName,
- Short: cliDescription,
- SuggestFor: []string{"etcd-runner"},
- }
-)
-
-func init() {
- cobra.EnablePrefixMatching = true
-
- rand.Seed(time.Now().UnixNano())
-
- log.SetFlags(log.Lmicroseconds)
-
- rootCmd.PersistentFlags().StringSliceVar(&endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC endpoints")
- rootCmd.PersistentFlags().DurationVar(&dialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections")
- rootCmd.PersistentFlags().IntVar(&reqRate, "req-rate", 30, "maximum number of requests per second")
- rootCmd.PersistentFlags().IntVar(&rounds, "rounds", 100, "number of rounds to run; 0 to run forever")
-
- rootCmd.AddCommand(
- NewElectionCommand(),
- NewLeaseRenewerCommand(),
- NewLockRacerCommand(),
- NewWatchCommand(),
- )
-}
-
-func Start() {
- rootCmd.SetUsageFunc(usageFunc)
-
- // Make help just show the usage
- rootCmd.SetHelpTemplate(`{{.UsageString}}`)
-
- if err := rootCmd.Execute(); err != nil {
- ExitWithError(ExitError, err)
- }
-}
diff --git a/tests/functional/runner/watch_command.go b/tests/functional/runner/watch_command.go
deleted file mode 100644
index d3a7e11e9a5..00000000000
--- a/tests/functional/runner/watch_command.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runner
-
-import (
- "context"
- "errors"
- "fmt"
- "log"
- "sync"
- "time"
-
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/stringutil"
-
- "github.com/spf13/cobra"
- "golang.org/x/time/rate"
-)
-
-var (
- runningTime time.Duration // time for which operation should be performed
- noOfPrefixes int // total number of prefixes which will be watched upon
- watchPerPrefix int // number of watchers per prefix
- watchPrefix string // prefix append to keys in watcher
- totalKeys int // total number of keys for operation
-)
-
-// NewWatchCommand returns the cobra command for "watcher runner".
-func NewWatchCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "watcher",
- Short: "Performs watch operation",
- Run: runWatcherFunc,
- }
- cmd.Flags().DurationVar(&runningTime, "running-time", 60, "number of seconds to run")
- cmd.Flags().StringVar(&watchPrefix, "prefix", "", "the prefix to append on all keys")
- cmd.Flags().IntVar(&noOfPrefixes, "total-prefixes", 10, "total no of prefixes to use")
- cmd.Flags().IntVar(&watchPerPrefix, "watch-per-prefix", 10, "number of watchers per prefix")
- cmd.Flags().IntVar(&totalKeys, "total-keys", 1000, "total number of keys to watch")
-
- return cmd
-}
-
-func runWatcherFunc(cmd *cobra.Command, args []string) {
- if len(args) > 0 {
- ExitWithError(ExitBadArgs, errors.New("watcher does not take any argument"))
- }
-
- ctx := context.Background()
- for round := 0; round < rounds || rounds <= 0; round++ {
- fmt.Println("round", round)
- performWatchOnPrefixes(ctx, cmd, round)
- }
-}
-
-func performWatchOnPrefixes(ctx context.Context, cmd *cobra.Command, round int) {
- keyPerPrefix := totalKeys / noOfPrefixes
- prefixes := stringutil.UniqueStrings(5, noOfPrefixes)
- keys := stringutil.RandomStrings(10, keyPerPrefix)
-
- roundPrefix := fmt.Sprintf("%16x", round)
-
- eps := endpointsFromFlag(cmd)
-
- var (
- revision int64
- wg sync.WaitGroup
- gr *clientv3.GetResponse
- err error
- )
-
- client := newClient(eps, dialTimeout)
- defer client.Close()
-
- gr, err = getKey(ctx, client, "non-existent")
- if err != nil {
- log.Fatalf("failed to get the initial revision: %v", err)
- }
- revision = gr.Header.Revision
-
- ctxt, cancel := context.WithDeadline(ctx, time.Now().Add(runningTime*time.Second))
- defer cancel()
-
- // generate and put keys in cluster
- limiter := rate.NewLimiter(rate.Limit(reqRate), reqRate)
-
- go func() {
- for _, key := range keys {
- for _, prefix := range prefixes {
- if err = limiter.Wait(ctxt); err != nil {
- return
- }
- if err = putKeyAtMostOnce(ctxt, client, watchPrefix+"-"+roundPrefix+"-"+prefix+"-"+key); err != nil {
- log.Fatalf("failed to put key: %v", err)
- return
- }
- }
- }
- }()
-
- ctxc, cancelc := context.WithCancel(ctx)
-
- wcs := make([]clientv3.WatchChan, 0)
- rcs := make([]*clientv3.Client, 0)
-
- for _, prefix := range prefixes {
- for j := 0; j < watchPerPrefix; j++ {
- rc := newClient(eps, dialTimeout)
- rcs = append(rcs, rc)
-
- wprefix := watchPrefix + "-" + roundPrefix + "-" + prefix
-
- wc := rc.Watch(ctxc, wprefix, clientv3.WithPrefix(), clientv3.WithRev(revision))
- wcs = append(wcs, wc)
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- checkWatchResponse(wc, wprefix, keys)
- }()
- }
- }
- wg.Wait()
-
- cancelc()
-
- // verify all watch channels are closed
- for e, wc := range wcs {
- if _, ok := <-wc; ok {
- log.Fatalf("expected wc to be closed, but received %v", e)
- }
- }
-
- for _, rc := range rcs {
- rc.Close()
- }
-
- if err = deletePrefix(ctx, client, watchPrefix); err != nil {
- log.Fatalf("failed to clean up keys after test: %v", err)
- }
-}
-
-func checkWatchResponse(wc clientv3.WatchChan, prefix string, keys []string) {
- for n := 0; n < len(keys); {
- wr, more := <-wc
- if !more {
- log.Fatalf("expect more keys (received %d/%d) for %s", n, len(keys), prefix)
- }
- for _, event := range wr.Events {
- expectedKey := prefix + "-" + keys[n]
- receivedKey := string(event.Kv.Key)
- if expectedKey != receivedKey {
- log.Fatalf("expected key %q, got %q for prefix : %q\n", expectedKey, receivedKey, prefix)
- }
- n++
- }
- }
-}
-
-func putKeyAtMostOnce(ctx context.Context, client *clientv3.Client, key string) error {
- gr, err := getKey(ctx, client, key)
- if err != nil {
- return err
- }
-
- var modrev int64
- if len(gr.Kvs) > 0 {
- modrev = gr.Kvs[0].ModRevision
- }
-
- for ctx.Err() == nil {
- _, err := client.Txn(ctx).If(clientv3.Compare(clientv3.ModRevision(key), "=", modrev)).Then(clientv3.OpPut(key, key)).Commit()
-
- if err == nil {
- return nil
- }
- }
-
- return ctx.Err()
-}
-
-func deletePrefix(ctx context.Context, client *clientv3.Client, key string) error {
- for ctx.Err() == nil {
- if _, err := client.Delete(ctx, key, clientv3.WithPrefix()); err == nil {
- return nil
- }
- }
- return ctx.Err()
-}
-
-func getKey(ctx context.Context, client *clientv3.Client, key string) (*clientv3.GetResponse, error) {
- for ctx.Err() == nil {
- if gr, err := client.Get(ctx, key); err == nil {
- return gr, nil
- }
- }
- return nil, ctx.Err()
-}
diff --git a/tests/functional/scripts/docker-local-agent.sh b/tests/functional/scripts/docker-local-agent.sh
deleted file mode 100755
index 81b8d97f714..00000000000
--- a/tests/functional/scripts/docker-local-agent.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env bash
-
-< snapshotCount {
- clus.lg.Info(
- "trigger snapshot PASS",
- zap.Int("retries", i),
- zap.String("desc", c.Desc()),
- zap.Int64("committed-entries", diff),
- zap.Int64("etcd-snapshot-count", snapshotCount),
- zap.Int64("start-revision", startRev),
- zap.Int64("last-revision", lastRev),
- zap.Duration("took", time.Since(now)),
- )
- return nil
- }
-
- clus.lg.Info(
- "trigger snapshot RETRY",
- zap.Int("retries", i),
- zap.Int64("committed-entries", diff),
- zap.Int64("etcd-snapshot-count", snapshotCount),
- zap.Int64("start-revision", startRev),
- zap.Int64("last-revision", lastRev),
- zap.Duration("took", time.Since(now)),
- zap.Error(err),
- )
- time.Sleep(time.Second)
- if err != nil {
- time.Sleep(2 * time.Second)
- }
- }
-
- return fmt.Errorf("cluster too slow: only %d commits in %d retries", lastRev-startRev, retries)
-}
-
-func (c *caseUntilSnapshot) Desc() string {
- if c.desc != "" {
- return c.desc
- }
- if c.rpcpbCase.String() != "" {
- return c.rpcpbCase.String()
- }
- return c.Case.Desc()
-}
-
-func (c *caseUntilSnapshot) TestCase() rpcpb.Case {
- return c.rpcpbCase
-}
diff --git a/tests/functional/tester/case_delay.go b/tests/functional/tester/case_delay.go
deleted file mode 100644
index d06d1d65dc4..00000000000
--- a/tests/functional/tester/case_delay.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "time"
-
- "go.uber.org/zap"
-)
-
-type caseDelay struct {
- Case
- delayDuration time.Duration
-}
-
-func (c *caseDelay) Inject(clus *Cluster) error {
- if err := c.Case.Inject(clus); err != nil {
- return err
- }
- if c.delayDuration > 0 {
- clus.lg.Info(
- "wait after inject",
- zap.Duration("delay", c.delayDuration),
- zap.String("desc", c.Case.Desc()),
- )
- time.Sleep(c.delayDuration)
- }
- return nil
-}
diff --git a/tests/functional/tester/case_external.go b/tests/functional/tester/case_external.go
deleted file mode 100644
index cf4ee10bf2f..00000000000
--- a/tests/functional/tester/case_external.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "os/exec"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-)
-
-type caseExternal struct {
- Case
-
- desc string
- rpcpbCase rpcpb.Case
-
- scriptPath string
-}
-
-func (c *caseExternal) Inject(clus *Cluster) error {
- return exec.Command(c.scriptPath, "enable", fmt.Sprintf("%d", clus.rd)).Run()
-}
-
-func (c *caseExternal) Recover(clus *Cluster) error {
- return exec.Command(c.scriptPath, "disable", fmt.Sprintf("%d", clus.rd)).Run()
-}
-
-func (c *caseExternal) Desc() string {
- return c.desc
-}
-
-func (c *caseExternal) TestCase() rpcpb.Case {
- return c.rpcpbCase
-}
-
-func new_Case_EXTERNAL(scriptPath string) Case {
- return &caseExternal{
- desc: fmt.Sprintf("external fault injector (script: %q)", scriptPath),
- rpcpbCase: rpcpb.Case_EXTERNAL,
- scriptPath: scriptPath,
- }
-}
diff --git a/tests/functional/tester/case_failpoints.go b/tests/functional/tester/case_failpoints.go
deleted file mode 100644
index f0508666b24..00000000000
--- a/tests/functional/tester/case_failpoints.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "strings"
- "sync"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-)
-
-type failpointStats struct {
- mu sync.Mutex
- // crashes counts the number of crashes for a failpoint
- crashes map[string]int
-}
-
-var fpStats failpointStats
-
-func failpointFailures(clus *Cluster) (ret []Case, err error) {
- var fps []string
- fps, err = failpointPaths(clus.Members[0].FailpointHTTPAddr)
- if err != nil {
- return nil, err
- }
- // create failure objects for all failpoints
- for _, fp := range fps {
- if len(fp) == 0 {
- continue
- }
-
- fpFails := casesFromFailpoint(fp, clus.Tester.FailpointCommands)
-
- // wrap in delays so failpoint has time to trigger
- for i, fpf := range fpFails {
- if strings.Contains(fp, "Snap") {
- // hack to trigger snapshot failpoints
- fpFails[i] = &caseUntilSnapshot{
- desc: fpf.Desc(),
- rpcpbCase: rpcpb.Case_FAILPOINTS,
- Case: fpf,
- }
- } else {
- fpFails[i] = &caseDelay{
- Case: fpf,
- delayDuration: clus.GetCaseDelayDuration(),
- }
- }
- }
- ret = append(ret, fpFails...)
- }
- fpStats.crashes = make(map[string]int)
- return ret, err
-}
-
-func failpointPaths(endpoint string) ([]string, error) {
- resp, err := http.Get(endpoint)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- body, rerr := ioutil.ReadAll(resp.Body)
- if rerr != nil {
- return nil, rerr
- }
- var fps []string
- for _, l := range strings.Split(string(body), "\n") {
- fp := strings.Split(l, "=")[0]
- fps = append(fps, fp)
- }
- return fps, nil
-}
-
-// failpoints follows FreeBSD FAIL_POINT syntax.
-// e.g. panic("etcd-tester"),1*sleep(1000)->panic("etcd-tester")
-func casesFromFailpoint(fp string, failpointCommands []string) (fs []Case) {
- recov := makeRecoverFailpoint(fp)
- for _, fcmd := range failpointCommands {
- inject := makeInjectFailpoint(fp, fcmd)
- fs = append(fs, []Case{
- &caseFollower{
- caseByFunc: caseByFunc{
- desc: fmt.Sprintf("failpoint %q (one: %q)", fp, fcmd),
- rpcpbCase: rpcpb.Case_FAILPOINTS,
- injectMember: inject,
- recoverMember: recov,
- },
- last: -1,
- lead: -1,
- },
- &caseLeader{
- caseByFunc: caseByFunc{
- desc: fmt.Sprintf("failpoint %q (leader: %q)", fp, fcmd),
- rpcpbCase: rpcpb.Case_FAILPOINTS,
- injectMember: inject,
- recoverMember: recov,
- },
- last: -1,
- lead: -1,
- },
- &caseQuorum{
- caseByFunc: caseByFunc{
- desc: fmt.Sprintf("failpoint %q (quorum: %q)", fp, fcmd),
- rpcpbCase: rpcpb.Case_FAILPOINTS,
- injectMember: inject,
- recoverMember: recov,
- },
- injected: make(map[int]struct{}),
- },
- &caseAll{
- desc: fmt.Sprintf("failpoint %q (all: %q)", fp, fcmd),
- rpcpbCase: rpcpb.Case_FAILPOINTS,
- injectMember: inject,
- recoverMember: recov,
- },
- }...)
- }
- return fs
-}
-
-func makeInjectFailpoint(fp, val string) injectMemberFunc {
- return func(clus *Cluster, idx int) (err error) {
- // Add the failpoint into the member's list of failpoints so that if the member is restarted, the
- // failpoint state is persisted (via the GOFAIL_FAILPOINTS environment variable)
- addFailpointToMemberList(clus.Members[idx], idx, fp)
-
- // Enable the failpoint
- return putFailpoint(clus.Members[idx].FailpointHTTPAddr, fp, val)
- }
-}
-
-func makeRecoverFailpoint(fp string) recoverMemberFunc {
- return func(clus *Cluster, idx int) error {
- // Remove the failpoint into the member's list of failpoints.
- removeFailpointFromMemberList(clus.Members[idx], idx, fp)
-
- // Disable the failpoint
- if err := delFailpoint(clus.Members[idx].FailpointHTTPAddr, fp); err == nil {
- return nil
- }
- // node not responding, likely dead from fp panic; restart
- fpStats.mu.Lock()
- fpStats.crashes[fp]++
- fpStats.mu.Unlock()
- return recover_SIGTERM_ETCD(clus, idx)
- }
-}
-
-func addFailpointToMemberList(member *rpcpb.Member, idx int, fp string) {
- failpoints := strings.Split(member.Failpoints, ";")
- failpoints = append(failpoints, fp)
- member.Failpoints = strings.Join(failpoints, ";")
-}
-
-func removeFailpointFromMemberList(member *rpcpb.Member, idx int, fp string) {
- failpoints := strings.Split(member.Failpoints, ";")
- for i, f := range failpoints {
- if f == fp {
- failpoints = append(failpoints[:i], failpoints[i+1:]...)
- break
- }
- }
- member.Failpoints = strings.Join(failpoints, ";")
-}
-
-func putFailpoint(ep, fp, val string) error {
- req, _ := http.NewRequest(http.MethodPut, ep+"/"+fp, strings.NewReader(val))
- c := http.Client{}
- resp, err := c.Do(req)
- if err != nil {
- return err
- }
- resp.Body.Close()
- if resp.StatusCode/100 != 2 {
- return fmt.Errorf("failed to PUT %s=%s at %s (%v)", fp, val, ep, resp.Status)
- }
- return nil
-}
-
-func delFailpoint(ep, fp string) error {
- req, _ := http.NewRequest(http.MethodDelete, ep+"/"+fp, strings.NewReader(""))
- c := http.Client{}
- resp, err := c.Do(req)
- if err != nil {
- return err
- }
- resp.Body.Close()
- if resp.StatusCode/100 != 2 {
- return fmt.Errorf("failed to DELETE %s at %s (%v)", fp, ep, resp.Status)
- }
- return nil
-}
diff --git a/tests/functional/tester/case_failpoints_disk_io.go b/tests/functional/tester/case_failpoints_disk_io.go
deleted file mode 100644
index 4cc2396b679..00000000000
--- a/tests/functional/tester/case_failpoints_disk_io.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "strings"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-)
-
-const (
- diskIOFailpoint = "raftAfterSave"
-)
-
-func failpointDiskIOFailures(clus *Cluster) (ret []Case, err error) {
- fps, err := failpointPaths(clus.Members[0].FailpointHTTPAddr)
- if err != nil {
- return nil, err
- }
- var detailDiskIOLatencyFailpointPath string
- for i := 0; i < len(fps); i++ {
- if strings.HasSuffix(fps[i], diskIOFailpoint) {
- detailDiskIOLatencyFailpointPath = fps[i]
- break
- }
- }
- // create failure objects for diskIOFailpoint
- fpFails := casesFromDiskIOFailpoint(detailDiskIOLatencyFailpointPath, clus.Tester.FailpointCommands)
- // wrap in delays so failpoint has time to trigger
- for i, fpf := range fpFails {
- fpFails[i] = &caseDelay{
- Case: fpf,
- delayDuration: clus.GetCaseDelayDuration(),
- }
- }
- ret = append(ret, fpFails...)
- return ret, nil
-}
-
-func casesFromDiskIOFailpoint(fp string, failpointCommands []string) (fs []Case) {
- recov := makeRecoverFailpoint(fp)
- for _, fcmd := range failpointCommands {
- inject := makeInjectFailpoint(fp, fcmd)
- fs = append(fs, []Case{
- &caseLeader{
- caseByFunc: caseByFunc{
- desc: fmt.Sprintf("failpoint %q (leader: %q)", fp, fcmd),
- rpcpbCase: rpcpb.Case_FAILPOINTS,
- injectMember: inject,
- recoverMember: recov,
- },
- last: -1,
- lead: -1,
- },
- }...)
- }
- return fs
-}
diff --git a/tests/functional/tester/case_network_blackhole.go b/tests/functional/tester/case_network_blackhole.go
deleted file mode 100644
index 3b0602050a4..00000000000
--- a/tests/functional/tester/case_network_blackhole.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
-func inject_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
- return clus.sendOp(idx, rpcpb.Operation_BLACKHOLE_PEER_PORT_TX_RX)
-}
-
-func recover_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
- return clus.sendOp(idx, rpcpb.Operation_UNBLACKHOLE_PEER_PORT_TX_RX)
-}
-
-func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER,
- injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
- recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
- }
- c := &caseFollower{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT() Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
- injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
- recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
- }
- c := &caseFollower{cc, -1, -1}
- return &caseUntilSnapshot{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
- Case: c,
- }
-}
-
-func new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus *Cluster) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER,
- injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
- recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
- }
- c := &caseLeader{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT() Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
- injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
- recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
- }
- c := &caseLeader{cc, -1, -1}
- return &caseUntilSnapshot{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
- Case: c,
- }
-}
-
-func new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus *Cluster) Case {
- c := &caseQuorum{
- caseByFunc: caseByFunc{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM,
- injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
- recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
- },
- injected: make(map[int]struct{}),
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus *Cluster) Case {
- c := &caseAll{
- rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ALL,
- injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX,
- recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX,
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
diff --git a/tests/functional/tester/case_network_delay.go b/tests/functional/tester/case_network_delay.go
deleted file mode 100644
index bedcd9e4fc0..00000000000
--- a/tests/functional/tester/case_network_delay.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "time"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-const (
- // Wait more when it recovers from slow network, because network layer
- // needs extra time to propagate traffic control (tc command) change.
- // Otherwise, we get different hash values from the previous revision.
- // For more detail, please see https://github.com/etcd-io/etcd/issues/5121.
- waitRecover = 5 * time.Second
-)
-
-func inject_DELAY_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
- clus.lg.Info(
- "injecting delay latency",
- zap.Duration("latency", time.Duration(clus.Tester.UpdatedDelayLatencyMs)*time.Millisecond),
- zap.Duration("latency-rv", time.Duration(clus.Tester.DelayLatencyMsRv)*time.Millisecond),
- zap.String("endpoint", clus.Members[idx].EtcdClientEndpoint),
- )
- return clus.sendOp(idx, rpcpb.Operation_DELAY_PEER_PORT_TX_RX)
-}
-
-func recover_DELAY_PEER_PORT_TX_RX(clus *Cluster, idx int) error {
- err := clus.sendOp(idx, rpcpb.Operation_UNDELAY_PEER_PORT_TX_RX)
- time.Sleep(waitRecover)
- return err
-}
-
-func new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster, random bool) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER,
- injectMember: inject_DELAY_PEER_PORT_TX_RX,
- recoverMember: recover_DELAY_PEER_PORT_TX_RX,
- }
- clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
- if random {
- clus.UpdateDelayLatencyMs()
- cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
- }
- c := &caseFollower{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
- injectMember: inject_DELAY_PEER_PORT_TX_RX,
- recoverMember: recover_DELAY_PEER_PORT_TX_RX,
- }
- clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
- if random {
- clus.UpdateDelayLatencyMs()
- cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- }
- c := &caseFollower{cc, -1, -1}
- return &caseUntilSnapshot{
- rpcpbCase: cc.rpcpbCase,
- Case: c,
- }
-}
-
-func new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus *Cluster, random bool) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_LEADER,
- injectMember: inject_DELAY_PEER_PORT_TX_RX,
- recoverMember: recover_DELAY_PEER_PORT_TX_RX,
- }
- clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
- if random {
- clus.UpdateDelayLatencyMs()
- cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER
- }
- c := &caseLeader{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT,
- injectMember: inject_DELAY_PEER_PORT_TX_RX,
- recoverMember: recover_DELAY_PEER_PORT_TX_RX,
- }
- clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
- if random {
- clus.UpdateDelayLatencyMs()
- cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- }
- c := &caseLeader{cc, -1, -1}
- return &caseUntilSnapshot{
- rpcpbCase: cc.rpcpbCase,
- Case: c,
- }
-}
-
-func new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus *Cluster, random bool) Case {
- c := &caseQuorum{
- caseByFunc: caseByFunc{
- rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_QUORUM,
- injectMember: inject_DELAY_PEER_PORT_TX_RX,
- recoverMember: recover_DELAY_PEER_PORT_TX_RX,
- },
- injected: make(map[int]struct{}),
- }
- clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
- if random {
- clus.UpdateDelayLatencyMs()
- c.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus *Cluster, random bool) Case {
- c := &caseAll{
- rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ALL,
- injectMember: inject_DELAY_PEER_PORT_TX_RX,
- recoverMember: recover_DELAY_PEER_PORT_TX_RX,
- }
- clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
- if random {
- clus.UpdateDelayLatencyMs()
- c.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
diff --git a/tests/functional/tester/case_no_fail.go b/tests/functional/tester/case_no_fail.go
deleted file mode 100644
index 16c6371d711..00000000000
--- a/tests/functional/tester/case_no_fail.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "time"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-type caseNoFailWithStress caseByFunc
-
-func (c *caseNoFailWithStress) Inject(clus *Cluster) error {
- return nil
-}
-
-func (c *caseNoFailWithStress) Recover(clus *Cluster) error {
- return nil
-}
-
-func (c *caseNoFailWithStress) Desc() string {
- if c.desc != "" {
- return c.desc
- }
- return c.rpcpbCase.String()
-}
-
-func (c *caseNoFailWithStress) TestCase() rpcpb.Case {
- return c.rpcpbCase
-}
-
-func new_Case_NO_FAIL_WITH_STRESS(clus *Cluster) Case {
- c := &caseNoFailWithStress{
- rpcpbCase: rpcpb.Case_NO_FAIL_WITH_STRESS,
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-type caseNoFailWithNoStressForLiveness caseByFunc
-
-func (c *caseNoFailWithNoStressForLiveness) Inject(clus *Cluster) error {
- clus.lg.Info(
- "extra delay for liveness mode with no stresser",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.String("desc", c.Desc()),
- )
- time.Sleep(clus.GetCaseDelayDuration())
-
- clus.lg.Info(
- "wait health in liveness mode",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.String("desc", c.Desc()),
- )
- return clus.WaitHealth()
-}
-
-func (c *caseNoFailWithNoStressForLiveness) Recover(clus *Cluster) error {
- return nil
-}
-
-func (c *caseNoFailWithNoStressForLiveness) Desc() string {
- if c.desc != "" {
- return c.desc
- }
- return c.rpcpbCase.String()
-}
-
-func (c *caseNoFailWithNoStressForLiveness) TestCase() rpcpb.Case {
- return c.rpcpbCase
-}
-
-func new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus *Cluster) Case {
- c := &caseNoFailWithNoStressForLiveness{
- rpcpbCase: rpcpb.Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS,
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
diff --git a/tests/functional/tester/case_sigquit_remove.go b/tests/functional/tester/case_sigquit_remove.go
deleted file mode 100644
index add3b572bf9..00000000000
--- a/tests/functional/tester/case_sigquit_remove.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "context"
- "fmt"
- "sort"
- "strings"
- "time"
-
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-func inject_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error {
- cli1, err := clus.Members[idx1].CreateEtcdClient()
- if err != nil {
- return err
- }
- defer cli1.Close()
-
- var mresp *clientv3.MemberListResponse
- mresp, err = cli1.MemberList(context.Background())
- mss := []string{}
- if err == nil && mresp != nil {
- mss = describeMembers(mresp)
- }
- clus.lg.Info(
- "member list before disastrous machine failure",
- zap.String("request-to", clus.Members[idx1].EtcdClientEndpoint),
- zap.Strings("members", mss),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- sresp, serr := cli1.Status(context.Background(), clus.Members[idx1].EtcdClientEndpoint)
- if serr != nil {
- return serr
- }
- id1 := sresp.Header.MemberId
- is1 := fmt.Sprintf("%016x", id1)
-
- clus.lg.Info(
- "disastrous machine failure START",
- zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
- zap.String("target-member-id", is1),
- zap.Error(err),
- )
- err = clus.sendOp(idx1, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA)
- clus.lg.Info(
- "disastrous machine failure END",
- zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
- zap.String("target-member-id", is1),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- time.Sleep(2 * time.Second)
-
- idx2 := (idx1 + 1) % len(clus.Members)
- var cli2 *clientv3.Client
- cli2, err = clus.Members[idx2].CreateEtcdClient()
- if err != nil {
- return err
- }
- defer cli2.Close()
-
- // FIXME(bug): this may block forever during
- // "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT"
- // is the new leader too busy with snapshotting?
- // is raft proposal dropped?
- // enable client keepalive for failover?
- clus.lg.Info(
- "member remove after disaster START",
- zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
- zap.String("target-member-id", is1),
- zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
- )
- ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
- _, err = cli2.MemberRemove(ctx, id1)
- cancel()
- clus.lg.Info(
- "member remove after disaster END",
- zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
- zap.String("target-member-id", is1),
- zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- time.Sleep(2 * time.Second)
-
- mresp, err = cli2.MemberList(context.Background())
- mss = []string{}
- if err == nil && mresp != nil {
- mss = describeMembers(mresp)
- }
- clus.lg.Info(
- "member list after member remove",
- zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
- zap.Strings("members", mss),
- zap.Error(err),
- )
- return err
-}
-
-func recover_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error {
- idx2 := (idx1 + 1) % len(clus.Members)
- cli2, err := clus.Members[idx2].CreateEtcdClient()
- if err != nil {
- return err
- }
- defer cli2.Close()
-
- _, err = cli2.MemberAdd(context.Background(), clus.Members[idx1].Etcd.AdvertisePeerURLs)
- clus.lg.Info(
- "member add before fresh restart",
- zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
- zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- time.Sleep(2 * time.Second)
-
- clus.Members[idx1].Etcd.InitialClusterState = "existing"
- err = clus.sendOp(idx1, rpcpb.Operation_RESTART_ETCD)
- clus.lg.Info(
- "fresh restart after member add",
- zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- time.Sleep(2 * time.Second)
-
- var mresp *clientv3.MemberListResponse
- mresp, err = cli2.MemberList(context.Background())
- mss := []string{}
- if err == nil && mresp != nil {
- mss = describeMembers(mresp)
- }
- clus.lg.Info(
- "member list after member add",
- zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
- zap.Strings("members", mss),
- zap.Error(err),
- )
- return err
-}
-
-func new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus *Cluster) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER,
- injectMember: inject_SIGQUIT_ETCD_AND_REMOVE_DATA,
- recoverMember: recover_SIGQUIT_ETCD_AND_REMOVE_DATA,
- }
- c := &caseFollower{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
- return &caseUntilSnapshot{
- rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
- Case: new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus),
- }
-}
-
-func new_Case_SIGQUIT_AND_REMOVE_LEADER(clus *Cluster) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_LEADER,
- injectMember: inject_SIGQUIT_ETCD_AND_REMOVE_DATA,
- recoverMember: recover_SIGQUIT_ETCD_AND_REMOVE_DATA,
- }
- c := &caseLeader{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
- return &caseUntilSnapshot{
- rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT,
- Case: new_Case_SIGQUIT_AND_REMOVE_LEADER(clus),
- }
-}
-
-func describeMembers(mresp *clientv3.MemberListResponse) (ss []string) {
- ss = make([]string, len(mresp.Members))
- for i, m := range mresp.Members {
- ss[i] = fmt.Sprintf("Name %s / ID %016x / ClientURLs %s / PeerURLs %s",
- m.Name,
- m.ID,
- strings.Join(m.ClientURLs, ","),
- strings.Join(m.PeerURLs, ","),
- )
- }
- sort.Strings(ss)
- return ss
-}
diff --git a/tests/functional/tester/case_sigquit_remove_quorum.go b/tests/functional/tester/case_sigquit_remove_quorum.go
deleted file mode 100644
index 401cfbeca82..00000000000
--- a/tests/functional/tester/case_sigquit_remove_quorum.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "context"
- "fmt"
- "strings"
- "time"
-
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-type fetchSnapshotCaseQuorum struct {
- desc string
- rpcpbCase rpcpb.Case
- injected map[int]struct{}
- snapshotted int
-}
-
-func (c *fetchSnapshotCaseQuorum) Inject(clus *Cluster) error {
- // 1. Assume node C is the current leader with most up-to-date data.
- lead, err := clus.GetLeader()
- if err != nil {
- return err
- }
- c.snapshotted = lead
-
- // 2. Download snapshot from node C, before destroying node A and B.
- clus.lg.Info(
- "save snapshot on leader node START",
- zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
- )
- var resp *rpcpb.Response
- resp, err = clus.sendOpWithResp(lead, rpcpb.Operation_SAVE_SNAPSHOT)
- if resp == nil || (resp != nil && !resp.Success) || err != nil {
- clus.lg.Info(
- "save snapshot on leader node FAIL",
- zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
- zap.Error(err),
- )
- return err
- }
- clus.lg.Info(
- "save snapshot on leader node SUCCESS",
- zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
- zap.String("member-name", resp.SnapshotInfo.MemberName),
- zap.Strings("member-client-urls", resp.SnapshotInfo.MemberClientURLs),
- zap.String("snapshot-path", resp.SnapshotInfo.SnapshotPath),
- zap.String("snapshot-file-size", resp.SnapshotInfo.SnapshotFileSize),
- zap.String("snapshot-total-size", resp.SnapshotInfo.SnapshotTotalSize),
- zap.Int64("snapshot-total-key", resp.SnapshotInfo.SnapshotTotalKey),
- zap.Int64("snapshot-hash", resp.SnapshotInfo.SnapshotHash),
- zap.Int64("snapshot-revision", resp.SnapshotInfo.SnapshotRevision),
- zap.String("took", resp.SnapshotInfo.Took),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
- clus.Members[lead].SnapshotInfo = resp.SnapshotInfo
-
- leaderc, err := clus.Members[lead].CreateEtcdClient()
- if err != nil {
- return err
- }
- defer leaderc.Close()
- var mresp *clientv3.MemberListResponse
- mresp, err = leaderc.MemberList(context.Background())
- mss := []string{}
- if err == nil && mresp != nil {
- mss = describeMembers(mresp)
- }
- clus.lg.Info(
- "member list before disastrous machine failure",
- zap.String("request-to", clus.Members[lead].EtcdClientEndpoint),
- zap.Strings("members", mss),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- // simulate real life; machine failures may happen
- // after some time since last snapshot save
- time.Sleep(time.Second)
-
- // 3. Destroy node A and B, and make the whole cluster inoperable.
- for {
- c.injected = pickQuorum(len(clus.Members))
- if _, ok := c.injected[lead]; !ok {
- break
- }
- }
- for idx := range c.injected {
- clus.lg.Info(
- "disastrous machine failure to quorum START",
- zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
- )
- err = clus.sendOp(idx, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA)
- clus.lg.Info(
- "disastrous machine failure to quorum END",
- zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
- }
-
- // 4. Now node C cannot operate either.
- // 5. SIGTERM node C and remove its data directories.
- clus.lg.Info(
- "disastrous machine failure to old leader START",
- zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
- )
- err = clus.sendOp(lead, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA)
- clus.lg.Info(
- "disastrous machine failure to old leader END",
- zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
- zap.Error(err),
- )
- return err
-}
-
-func (c *fetchSnapshotCaseQuorum) Recover(clus *Cluster) error {
- // 6. Restore a new seed member from node C's latest snapshot file.
- oldlead := c.snapshotted
-
- // configuration on restart from recovered snapshot
- // seed member's configuration is all the same as previous one
- // except initial cluster string is now a single-node cluster
- clus.Members[oldlead].EtcdOnSnapshotRestore = clus.Members[oldlead].Etcd
- clus.Members[oldlead].EtcdOnSnapshotRestore.InitialClusterState = "existing"
- name := clus.Members[oldlead].Etcd.Name
- initClus := []string{}
- for _, u := range clus.Members[oldlead].Etcd.AdvertisePeerURLs {
- initClus = append(initClus, fmt.Sprintf("%s=%s", name, u))
- }
- clus.Members[oldlead].EtcdOnSnapshotRestore.InitialCluster = strings.Join(initClus, ",")
-
- clus.lg.Info(
- "restore snapshot and restart from snapshot request START",
- zap.String("target-endpoint", clus.Members[oldlead].EtcdClientEndpoint),
- zap.Strings("initial-cluster", initClus),
- )
- err := clus.sendOp(oldlead, rpcpb.Operation_RESTORE_RESTART_FROM_SNAPSHOT)
- clus.lg.Info(
- "restore snapshot and restart from snapshot request END",
- zap.String("target-endpoint", clus.Members[oldlead].EtcdClientEndpoint),
- zap.Strings("initial-cluster", initClus),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- leaderc, err := clus.Members[oldlead].CreateEtcdClient()
- if err != nil {
- return err
- }
- defer leaderc.Close()
-
- // 7. Add another member to establish 2-node cluster.
- // 8. Add another member to establish 3-node cluster.
- // 9. Add more if any.
- idxs := make([]int, 0, len(c.injected))
- for idx := range c.injected {
- idxs = append(idxs, idx)
- }
- clus.lg.Info("member add START", zap.Int("members-to-add", len(idxs)))
- for i, idx := range idxs {
- clus.lg.Info(
- "member add request SENT",
- zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
- zap.Strings("peer-urls", clus.Members[idx].Etcd.AdvertisePeerURLs),
- )
- ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
- _, err := leaderc.MemberAdd(ctx, clus.Members[idx].Etcd.AdvertisePeerURLs)
- cancel()
- clus.lg.Info(
- "member add request DONE",
- zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
- zap.Strings("peer-urls", clus.Members[idx].Etcd.AdvertisePeerURLs),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- // start the added(new) member with fresh data
- clus.Members[idx].EtcdOnSnapshotRestore = clus.Members[idx].Etcd
- clus.Members[idx].EtcdOnSnapshotRestore.InitialClusterState = "existing"
- name := clus.Members[idx].Etcd.Name
- for _, u := range clus.Members[idx].Etcd.AdvertisePeerURLs {
- initClus = append(initClus, fmt.Sprintf("%s=%s", name, u))
- }
- clus.Members[idx].EtcdOnSnapshotRestore.InitialCluster = strings.Join(initClus, ",")
- clus.lg.Info(
- "restart from snapshot request SENT",
- zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
- zap.Strings("initial-cluster", initClus),
- )
- err = clus.sendOp(idx, rpcpb.Operation_RESTART_FROM_SNAPSHOT)
- clus.lg.Info(
- "restart from snapshot request DONE",
- zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
- zap.Strings("initial-cluster", initClus),
- zap.Error(err),
- )
- if err != nil {
- return err
- }
-
- if i != len(c.injected)-1 {
- // wait until membership reconfiguration entry gets applied
- // TODO: test concurrent member add
- dur := 5 * clus.Members[idx].ElectionTimeout()
- clus.lg.Info(
- "waiting after restart from snapshot request",
- zap.Int("i", i),
- zap.Int("idx", idx),
- zap.Duration("sleep", dur),
- )
- time.Sleep(dur)
- } else {
- clus.lg.Info(
- "restart from snapshot request ALL END",
- zap.Int("i", i),
- zap.Int("idx", idx),
- )
- }
- }
- return nil
-}
-
-func (c *fetchSnapshotCaseQuorum) Desc() string {
- if c.desc != "" {
- return c.desc
- }
- return c.rpcpbCase.String()
-}
-
-func (c *fetchSnapshotCaseQuorum) TestCase() rpcpb.Case {
- return c.rpcpbCase
-}
-
-func new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster) Case {
- c := &fetchSnapshotCaseQuorum{
- rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
- injected: make(map[int]struct{}),
- snapshotted: -1,
- }
- // simulate real life; machine replacements may happen
- // after some time since disaster
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
diff --git a/tests/functional/tester/case_sigterm.go b/tests/functional/tester/case_sigterm.go
deleted file mode 100644
index 49b20a000af..00000000000
--- a/tests/functional/tester/case_sigterm.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
-func inject_SIGTERM_ETCD(clus *Cluster, idx int) error {
- return clus.sendOp(idx, rpcpb.Operation_SIGTERM_ETCD)
-}
-
-func recover_SIGTERM_ETCD(clus *Cluster, idx int) error {
- return clus.sendOp(idx, rpcpb.Operation_RESTART_ETCD)
-}
-
-func new_Case_SIGTERM_ONE_FOLLOWER(clus *Cluster) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_SIGTERM_ONE_FOLLOWER,
- injectMember: inject_SIGTERM_ETCD,
- recoverMember: recover_SIGTERM_ETCD,
- }
- c := &caseFollower{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
- return &caseUntilSnapshot{
- rpcpbCase: rpcpb.Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT,
- Case: new_Case_SIGTERM_ONE_FOLLOWER(clus),
- }
-}
-
-func new_Case_SIGTERM_LEADER(clus *Cluster) Case {
- cc := caseByFunc{
- rpcpbCase: rpcpb.Case_SIGTERM_LEADER,
- injectMember: inject_SIGTERM_ETCD,
- recoverMember: recover_SIGTERM_ETCD,
- }
- c := &caseLeader{cc, -1, -1}
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case {
- return &caseUntilSnapshot{
- rpcpbCase: rpcpb.Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT,
- Case: new_Case_SIGTERM_LEADER(clus),
- }
-}
-
-func new_Case_SIGTERM_QUORUM(clus *Cluster) Case {
- c := &caseQuorum{
- caseByFunc: caseByFunc{
- rpcpbCase: rpcpb.Case_SIGTERM_QUORUM,
- injectMember: inject_SIGTERM_ETCD,
- recoverMember: recover_SIGTERM_ETCD,
- },
- injected: make(map[int]struct{}),
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
-
-func new_Case_SIGTERM_ALL(clus *Cluster) Case {
- c := &caseAll{
- rpcpbCase: rpcpb.Case_SIGTERM_ALL,
- injectMember: inject_SIGTERM_ETCD,
- recoverMember: recover_SIGTERM_ETCD,
- }
- return &caseDelay{
- Case: c,
- delayDuration: clus.GetCaseDelayDuration(),
- }
-}
diff --git a/tests/functional/tester/checker.go b/tests/functional/tester/checker.go
deleted file mode 100644
index f6f21761647..00000000000
--- a/tests/functional/tester/checker.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
-// Checker checks cluster consistency.
-type Checker interface {
- // Type returns the checker type.
- Type() rpcpb.Checker
- // EtcdClientEndpoints returns the client endpoints of
- // all checker target nodes..
- EtcdClientEndpoints() []string
- // Check returns an error if the system fails a consistency check.
- Check() error
-}
diff --git a/tests/functional/tester/checker_kv_hash.go b/tests/functional/tester/checker_kv_hash.go
deleted file mode 100644
index cd42f727449..00000000000
--- a/tests/functional/tester/checker_kv_hash.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "time"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-const retries = 7
-
-type kvHashChecker struct {
- ctype rpcpb.Checker
- clus *Cluster
-}
-
-func newKVHashChecker(clus *Cluster) Checker {
- return &kvHashChecker{
- ctype: rpcpb.Checker_KV_HASH,
- clus: clus,
- }
-}
-
-func (hc *kvHashChecker) checkRevAndHashes() (err error) {
- var (
- revs map[string]int64
- hashes map[string]int64
- )
- // retries in case of transient failure or etcd cluster has not stablized yet.
- for i := 0; i < retries; i++ {
- revs, hashes, err = hc.clus.getRevisionHash()
- if err != nil {
- hc.clus.lg.Warn(
- "failed to get revision and hash",
- zap.Int("retries", i),
- zap.Error(err),
- )
- } else {
- sameRev := getSameValue(revs)
- sameHashes := getSameValue(hashes)
- if sameRev && sameHashes {
- return nil
- }
- hc.clus.lg.Warn(
- "retrying; etcd cluster is not stable",
- zap.Int("retries", i),
- zap.Bool("same-revisions", sameRev),
- zap.Bool("same-hashes", sameHashes),
- zap.String("revisions", fmt.Sprintf("%+v", revs)),
- zap.String("hashes", fmt.Sprintf("%+v", hashes)),
- )
- }
- time.Sleep(time.Second)
- }
-
- if err != nil {
- return fmt.Errorf("failed revision and hash check (%v)", err)
- }
-
- return fmt.Errorf("etcd cluster is not stable: [revisions: %v] and [hashes: %v]", revs, hashes)
-}
-
-func (hc *kvHashChecker) Type() rpcpb.Checker {
- return hc.ctype
-}
-
-func (hc *kvHashChecker) EtcdClientEndpoints() []string {
- return hc.clus.EtcdClientEndpoints()
-}
-
-func (hc *kvHashChecker) Check() error {
- return hc.checkRevAndHashes()
-}
diff --git a/tests/functional/tester/checker_lease_expire.go b/tests/functional/tester/checker_lease_expire.go
deleted file mode 100644
index 0429b20f619..00000000000
--- a/tests/functional/tester/checker_lease_expire.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "context"
- "fmt"
- "time"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
-)
-
-type leaseExpireChecker struct {
- ctype rpcpb.Checker
- lg *zap.Logger
- m *rpcpb.Member
- ls *leaseStresser
- cli *clientv3.Client
-}
-
-func newLeaseExpireChecker(ls *leaseStresser) Checker {
- return &leaseExpireChecker{
- ctype: rpcpb.Checker_LEASE_EXPIRE,
- lg: ls.lg,
- m: ls.m,
- ls: ls,
- }
-}
-
-func (lc *leaseExpireChecker) Type() rpcpb.Checker {
- return lc.ctype
-}
-
-func (lc *leaseExpireChecker) EtcdClientEndpoints() []string {
- return []string{lc.m.EtcdClientEndpoint}
-}
-
-func (lc *leaseExpireChecker) Check() error {
- if lc.ls == nil {
- return nil
- }
- if lc.ls != nil &&
- (lc.ls.revokedLeases == nil ||
- lc.ls.aliveLeases == nil ||
- lc.ls.shortLivedLeases == nil) {
- return nil
- }
-
- cli, err := lc.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(time.Second))
- if err != nil {
- return fmt.Errorf("%v (%q)", err, lc.m.EtcdClientEndpoint)
- }
- defer func() {
- if cli != nil {
- cli.Close()
- }
- }()
- lc.cli = cli
-
- if err := check(lc.lg, lc.cli, true, lc.ls.revokedLeases.leases); err != nil {
- return err
- }
- if err := check(lc.lg, lc.cli, false, lc.ls.aliveLeases.leases); err != nil {
- return err
- }
-
- return lc.checkShortLivedLeases()
-}
-
-const leaseExpireCheckerTimeout = 10 * time.Second
-
-// checkShortLivedLeases ensures leases expire.
-func (lc *leaseExpireChecker) checkShortLivedLeases() error {
- ctx, cancel := context.WithTimeout(context.Background(), leaseExpireCheckerTimeout)
- errc := make(chan error)
- defer cancel()
- for leaseID := range lc.ls.shortLivedLeases.leases {
- go func(id int64) {
- errc <- lc.checkShortLivedLease(ctx, id)
- }(leaseID)
- }
-
- var errs []error
- for range lc.ls.shortLivedLeases.leases {
- if err := <-errc; err != nil {
- errs = append(errs, err)
- }
- }
- return errsToError(errs)
-}
-
-func (lc *leaseExpireChecker) checkShortLivedLease(ctx context.Context, leaseID int64) (err error) {
- // retry in case of transient failure or lease is expired but not yet revoked due to the fact that etcd cluster didn't have enought time to delete it.
- var resp *clientv3.LeaseTimeToLiveResponse
- for i := 0; i < retries; i++ {
- resp, err = getLeaseByID(ctx, lc.cli, leaseID)
- // lease not found, for ~v3.1 compatibilities, check ErrLeaseNotFound
- if (err == nil && resp.TTL == -1) || (err != nil && rpctypes.Error(err) == rpctypes.ErrLeaseNotFound) {
- return nil
- }
- if err != nil {
- lc.lg.Debug(
- "retrying; Lease TimeToLive failed",
- zap.Int("retries", i),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(err),
- )
- continue
- }
- if resp.TTL > 0 {
- dur := time.Duration(resp.TTL) * time.Second
- lc.lg.Debug(
- "lease has not been expired, wait until expire",
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Int64("ttl", resp.TTL),
- zap.Duration("wait-duration", dur),
- )
- time.Sleep(dur)
- } else {
- lc.lg.Debug(
- "lease expired but not yet revoked",
- zap.Int("retries", i),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Int64("ttl", resp.TTL),
- zap.Duration("wait-duration", time.Second),
- )
- time.Sleep(time.Second)
- }
- if err = checkLease(ctx, lc.lg, lc.cli, false, leaseID); err != nil {
- continue
- }
- return nil
- }
- return err
-}
-
-func checkLease(ctx context.Context, lg *zap.Logger, cli *clientv3.Client, expired bool, leaseID int64) error {
- keysExpired, err := hasKeysAttachedToLeaseExpired(ctx, lg, cli, leaseID)
- if err != nil {
- lg.Warn(
- "hasKeysAttachedToLeaseExpired failed",
- zap.Any("endpoint", cli.Endpoints()),
- zap.Error(err),
- )
- return err
- }
- leaseExpired, err := hasLeaseExpired(ctx, lg, cli, leaseID)
- if err != nil {
- lg.Warn(
- "hasLeaseExpired failed",
- zap.Any("endpoint", cli.Endpoints()),
- zap.Error(err),
- )
- return err
- }
- if leaseExpired != keysExpired {
- return fmt.Errorf("lease %v expiration mismatch (lease expired=%v, keys expired=%v)", leaseID, leaseExpired, keysExpired)
- }
- if leaseExpired != expired {
- return fmt.Errorf("lease %v expected expired=%v, got %v", leaseID, expired, leaseExpired)
- }
- return nil
-}
-
-func check(lg *zap.Logger, cli *clientv3.Client, expired bool, leases map[int64]time.Time) error {
- ctx, cancel := context.WithTimeout(context.Background(), leaseExpireCheckerTimeout)
- defer cancel()
- for leaseID := range leases {
- if err := checkLease(ctx, lg, cli, expired, leaseID); err != nil {
- return err
- }
- }
- return nil
-}
-
-// TODO: handle failures from "grpc.WaitForReady(true)"
-func getLeaseByID(ctx context.Context, cli *clientv3.Client, leaseID int64) (*clientv3.LeaseTimeToLiveResponse, error) {
- return cli.TimeToLive(
- ctx,
- clientv3.LeaseID(leaseID),
- clientv3.WithAttachedKeys(),
- )
-}
-
-func hasLeaseExpired(ctx context.Context, lg *zap.Logger, cli *clientv3.Client, leaseID int64) (bool, error) {
- // keep retrying until lease's state is known or ctx is being canceled
- for ctx.Err() == nil {
- resp, err := getLeaseByID(ctx, cli, leaseID)
- if err != nil {
- // for ~v3.1 compatibilities
- if rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
- return true, nil
- }
- } else {
- return resp.TTL == -1, nil
- }
- lg.Warn(
- "hasLeaseExpired getLeaseByID failed",
- zap.Any("endpoint", cli.Endpoints()),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(err),
- )
- }
- return false, ctx.Err()
-}
-
-// The keys attached to the lease has the format of "_" where idx is the ordering key creation
-// Since the format of keys contains about leaseID, finding keys base on "" prefix
-// determines whether the attached keys for a given leaseID has been deleted or not
-func hasKeysAttachedToLeaseExpired(ctx context.Context, lg *zap.Logger, cli *clientv3.Client, leaseID int64) (bool, error) {
- resp, err := cli.Get(ctx, fmt.Sprintf("%d", leaseID), clientv3.WithPrefix())
- if err != nil {
- lg.Warn(
- "hasKeysAttachedToLeaseExpired failed",
- zap.Any("endpoint", cli.Endpoints()),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(err),
- )
- return false, err
- }
- return len(resp.Kvs) == 0, nil
-}
diff --git a/tests/functional/tester/checker_no_check.go b/tests/functional/tester/checker_no_check.go
deleted file mode 100644
index b0aef6e2168..00000000000
--- a/tests/functional/tester/checker_no_check.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
-type noCheck struct{}
-
-func newNoChecker() Checker { return &noCheck{} }
-func (nc *noCheck) Type() rpcpb.Checker { return rpcpb.Checker_NO_CHECK }
-func (nc *noCheck) EtcdClientEndpoints() []string { return nil }
-func (nc *noCheck) Check() error { return nil }
diff --git a/tests/functional/tester/checker_runner.go b/tests/functional/tester/checker_runner.go
deleted file mode 100644
index 944ecc6a397..00000000000
--- a/tests/functional/tester/checker_runner.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
-type runnerChecker struct {
- ctype rpcpb.Checker
- etcdClientEndpoint string
- errc chan error
-}
-
-func newRunnerChecker(ep string, errc chan error) Checker {
- return &runnerChecker{
- ctype: rpcpb.Checker_RUNNER,
- etcdClientEndpoint: ep,
- errc: errc,
- }
-}
-
-func (rc *runnerChecker) Type() rpcpb.Checker {
- return rc.ctype
-}
-
-func (rc *runnerChecker) EtcdClientEndpoints() []string {
- return []string{rc.etcdClientEndpoint}
-}
-
-func (rc *runnerChecker) Check() error {
- select {
- case err := <-rc.errc:
- return err
- default:
- return nil
- }
-}
diff --git a/tests/functional/tester/checker_short_ttl_lease_expire.go b/tests/functional/tester/checker_short_ttl_lease_expire.go
deleted file mode 100644
index bf242f0241b..00000000000
--- a/tests/functional/tester/checker_short_ttl_lease_expire.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "time"
-
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
-)
-
-type shortTTLLeaseExpireChecker struct {
- ctype rpcpb.Checker
- lg *zap.Logger
- m *rpcpb.Member
- ls *leaseStresser
- cli *clientv3.Client
-}
-
-func newShortTTLLeaseExpireChecker(ls *leaseStresser) Checker {
- return &shortTTLLeaseExpireChecker{
- ctype: rpcpb.Checker_SHORT_TTL_LEASE_EXPIRE,
- lg: ls.lg,
- m: ls.m,
- ls: ls,
- }
-}
-
-func (lc *shortTTLLeaseExpireChecker) Type() rpcpb.Checker {
- return lc.ctype
-}
-
-func (lc *shortTTLLeaseExpireChecker) EtcdClientEndpoints() []string {
- return []string{lc.m.EtcdClientEndpoint}
-}
-
-func (lc *shortTTLLeaseExpireChecker) Check() error {
- if lc.ls == nil {
- return nil
- }
- if lc.ls != nil && lc.ls.alivedLeasesWithShortTTL == nil {
- return nil
- }
-
- cli, err := lc.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(time.Second))
- if err != nil {
- return fmt.Errorf("%v (%q)", err, lc.m.EtcdClientEndpoint)
- }
- defer func() {
- if cli != nil {
- cli.Close()
- }
- }()
- lc.cli = cli
- if err := check(lc.lg, lc.cli, false, lc.ls.alivedLeasesWithShortTTL.leases); err != nil {
- lc.lg.Error("failed to check alivedLeasesWithShortTTL", zap.Error(err))
- return err
- }
- lc.lg.Info("check alivedLeasesWithShortTTL succ", zap.Int("num", len(lc.ls.alivedLeasesWithShortTTL.leases)))
- return nil
-}
diff --git a/tests/functional/tester/cluster.go b/tests/functional/tester/cluster.go
deleted file mode 100644
index 08c70afe994..00000000000
--- a/tests/functional/tester/cluster.go
+++ /dev/null
@@ -1,773 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math/rand"
- "net/http"
- "net/url"
- "path/filepath"
- "strings"
- "sync"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/pkg/v3/debugutil"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "github.com/prometheus/client_golang/prometheus/promhttp"
- "go.uber.org/zap"
- "golang.org/x/time/rate"
- "google.golang.org/grpc"
-)
-
-// Cluster defines tester cluster.
-type Cluster struct {
- lg *zap.Logger
-
- agentConns []*grpc.ClientConn
- agentClients []rpcpb.TransportClient
- agentStreams []rpcpb.Transport_TransportClient
- agentRequests []*rpcpb.Request
-
- testerHTTPServer *http.Server
-
- Members []*rpcpb.Member `yaml:"agent-configs"`
- Tester *rpcpb.Tester `yaml:"tester-config"`
-
- cases []Case
-
- rateLimiter *rate.Limiter
- stresser Stresser
- checkers []Checker
-
- currentRevision int64
- rd int
- cs int
-}
-
-var dialOpts = []grpc.DialOption{
- grpc.WithInsecure(),
- grpc.WithTimeout(5 * time.Second),
- grpc.WithBlock(),
-}
-
-// NewCluster creates a client from a tester configuration.
-func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
- clus, err := read(lg, fpath)
- if err != nil {
- return nil, err
- }
-
- clus.agentConns = make([]*grpc.ClientConn, len(clus.Members))
- clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))
- clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))
- clus.agentRequests = make([]*rpcpb.Request, len(clus.Members))
- clus.cases = make([]Case, 0)
-
- for i, ap := range clus.Members {
- var err error
- clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)
- if err != nil {
- return nil, err
- }
- clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])
- clus.lg.Info("connected", zap.String("agent-address", ap.AgentAddr))
-
- clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())
- if err != nil {
- return nil, err
- }
- clus.lg.Info("created stream", zap.String("agent-address", ap.AgentAddr))
- }
-
- mux := http.NewServeMux()
- mux.Handle("/metrics", promhttp.Handler())
- if clus.Tester.EnablePprof {
- for p, h := range debugutil.PProfHandlers() {
- mux.Handle(p, h)
- }
- }
- clus.testerHTTPServer = &http.Server{
- Addr: clus.Tester.Addr,
- Handler: mux,
- ErrorLog: log.New(ioutil.Discard, "net/http", 0),
- }
- go clus.serveTesterServer()
-
- clus.rateLimiter = rate.NewLimiter(
- rate.Limit(int(clus.Tester.StressQPS)),
- int(clus.Tester.StressQPS),
- )
-
- clus.setStresserChecker()
-
- return clus, nil
-}
-
-// EtcdClientEndpoints returns all etcd client endpoints.
-func (clus *Cluster) EtcdClientEndpoints() (css []string) {
- css = make([]string, len(clus.Members))
- for i := range clus.Members {
- css[i] = clus.Members[i].EtcdClientEndpoint
- }
- return css
-}
-
-func (clus *Cluster) serveTesterServer() {
- clus.lg.Info(
- "started tester HTTP server",
- zap.String("tester-address", clus.Tester.Addr),
- )
- err := clus.testerHTTPServer.ListenAndServe()
- clus.lg.Info(
- "tester HTTP server returned",
- zap.String("tester-address", clus.Tester.Addr),
- zap.Error(err),
- )
- if err != nil && err != http.ErrServerClosed {
- clus.lg.Fatal("tester HTTP errored", zap.Error(err))
- }
-}
-
-func (clus *Cluster) updateCases() {
- for _, cs := range clus.Tester.Cases {
- switch cs {
- case "SIGTERM_ONE_FOLLOWER":
- clus.cases = append(clus.cases,
- new_Case_SIGTERM_ONE_FOLLOWER(clus))
- case "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
- case "SIGTERM_LEADER":
- clus.cases = append(clus.cases,
- new_Case_SIGTERM_LEADER(clus))
- case "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
- case "SIGTERM_QUORUM":
- clus.cases = append(clus.cases,
- new_Case_SIGTERM_QUORUM(clus))
- case "SIGTERM_ALL":
- clus.cases = append(clus.cases,
- new_Case_SIGTERM_ALL(clus))
-
- case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER":
- clus.cases = append(clus.cases,
- new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus))
- case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
- case "SIGQUIT_AND_REMOVE_LEADER":
- clus.cases = append(clus.cases,
- new_Case_SIGQUIT_AND_REMOVE_LEADER(clus))
- case "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
- case "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH":
- clus.cases = append(clus.cases,
- new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus))
-
- case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
- clus.cases = append(clus.cases,
- new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus))
- case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT())
- case "BLACKHOLE_PEER_PORT_TX_RX_LEADER":
- clus.cases = append(clus.cases,
- new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus))
- case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT())
- case "BLACKHOLE_PEER_PORT_TX_RX_QUORUM":
- clus.cases = append(clus.cases,
- new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus))
- case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
- clus.cases = append(clus.cases,
- new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus))
-
- case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, false))
- case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, true))
- case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
- case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
- case "DELAY_PEER_PORT_TX_RX_LEADER":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, false))
- case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, true))
- case "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
- case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
- case "DELAY_PEER_PORT_TX_RX_QUORUM":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, false))
- case "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, true))
- case "DELAY_PEER_PORT_TX_RX_ALL":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, false))
- case "RANDOM_DELAY_PEER_PORT_TX_RX_ALL":
- clus.cases = append(clus.cases,
- new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, true))
-
- case "NO_FAIL_WITH_STRESS":
- clus.cases = append(clus.cases,
- new_Case_NO_FAIL_WITH_STRESS(clus))
- case "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS":
- clus.cases = append(clus.cases,
- new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus))
-
- case "EXTERNAL":
- clus.cases = append(clus.cases,
- new_Case_EXTERNAL(clus.Tester.ExternalExecPath))
- case "FAILPOINTS":
- fpFailures, fperr := failpointFailures(clus)
- if len(fpFailures) == 0 {
- clus.lg.Info("no failpoints found!", zap.Error(fperr))
- }
- clus.cases = append(clus.cases,
- fpFailures...)
- case "FAILPOINTS_WITH_DISK_IO_LATENCY":
- fpFailures, fperr := failpointDiskIOFailures(clus)
- if len(fpFailures) == 0 {
- clus.lg.Info("no failpoints found!", zap.Error(fperr))
- }
- clus.cases = append(clus.cases,
- fpFailures...)
- }
- }
-}
-
-func (clus *Cluster) listCases() (css []string) {
- css = make([]string, len(clus.cases))
- for i := range clus.cases {
- css[i] = clus.cases[i].Desc()
- }
- return css
-}
-
-// UpdateDelayLatencyMs updates delay latency with random value
-// within election timeout.
-func (clus *Cluster) UpdateDelayLatencyMs() {
- rand.Seed(time.Now().UnixNano())
- clus.Tester.UpdatedDelayLatencyMs = uint32(rand.Int63n(clus.Members[0].Etcd.ElectionTimeoutMs))
-
- minLatRv := clus.Tester.DelayLatencyMsRv + clus.Tester.DelayLatencyMsRv/5
- if clus.Tester.UpdatedDelayLatencyMs <= minLatRv {
- clus.Tester.UpdatedDelayLatencyMs += minLatRv
- }
-}
-
-func (clus *Cluster) setStresserChecker() {
- css := &compositeStresser{}
- lss := []*leaseStresser{}
- rss := []*runnerStresser{}
- for _, m := range clus.Members {
- sss := newStresser(clus, m)
- css.stressers = append(css.stressers, &compositeStresser{sss})
- for _, s := range sss {
- if v, ok := s.(*leaseStresser); ok {
- lss = append(lss, v)
- clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
- }
- if v, ok := s.(*runnerStresser); ok {
- rss = append(rss, v)
- clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
- }
- }
- }
- clus.stresser = css
-
- for _, cs := range clus.Tester.Checkers {
- switch cs {
- case "KV_HASH":
- clus.checkers = append(clus.checkers, newKVHashChecker(clus))
-
- case "LEASE_EXPIRE":
- for _, ls := range lss {
- clus.checkers = append(clus.checkers, newLeaseExpireChecker(ls))
- }
-
- case "RUNNER":
- for _, rs := range rss {
- clus.checkers = append(clus.checkers, newRunnerChecker(rs.etcdClientEndpoint, rs.errc))
- }
-
- case "NO_CHECK":
- clus.checkers = append(clus.checkers, newNoChecker())
-
- case "SHORT_TTL_LEASE_EXPIRE":
- for _, ls := range lss {
- clus.checkers = append(clus.checkers, newShortTTLLeaseExpireChecker(ls))
- }
- }
- }
- clus.lg.Info("updated stressers")
-}
-
-func (clus *Cluster) runCheckers(exceptions ...rpcpb.Checker) (err error) {
- defer func() {
- if err != nil {
- return
- }
- if err = clus.updateRevision(); err != nil {
- clus.lg.Warn(
- "updateRevision failed",
- zap.Error(err),
- )
- return
- }
- }()
-
- exs := make(map[rpcpb.Checker]struct{})
- for _, e := range exceptions {
- exs[e] = struct{}{}
- }
- for _, chk := range clus.checkers {
- clus.lg.Warn(
- "consistency check START",
- zap.String("checker", chk.Type().String()),
- zap.Strings("client-endpoints", chk.EtcdClientEndpoints()),
- )
- err = chk.Check()
- clus.lg.Warn(
- "consistency check END",
- zap.String("checker", chk.Type().String()),
- zap.Strings("client-endpoints", chk.EtcdClientEndpoints()),
- zap.Error(err),
- )
- if err != nil {
- _, ok := exs[chk.Type()]
- if !ok {
- return err
- }
- clus.lg.Warn(
- "consistency check SKIP FAIL",
- zap.String("checker", chk.Type().String()),
- zap.Strings("client-endpoints", chk.EtcdClientEndpoints()),
- zap.Error(err),
- )
- }
- }
- return nil
-}
-
-// Send_INITIAL_START_ETCD bootstraps etcd cluster the very first time.
-// After this, just continue to call kill/restart.
-func (clus *Cluster) Send_INITIAL_START_ETCD() error {
- // this is the only time that creates request from scratch
- return clus.broadcast(rpcpb.Operation_INITIAL_START_ETCD)
-}
-
-// send_SIGQUIT_ETCD_AND_ARCHIVE_DATA sends "send_SIGQUIT_ETCD_AND_ARCHIVE_DATA" operation.
-func (clus *Cluster) send_SIGQUIT_ETCD_AND_ARCHIVE_DATA() error {
- return clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA)
-}
-
-// send_RESTART_ETCD sends restart operation.
-func (clus *Cluster) send_RESTART_ETCD() error {
- return clus.broadcast(rpcpb.Operation_RESTART_ETCD)
-}
-
-func (clus *Cluster) broadcast(op rpcpb.Operation) error {
- var wg sync.WaitGroup
- wg.Add(len(clus.agentStreams))
-
- errc := make(chan error, len(clus.agentStreams))
- for i := range clus.agentStreams {
- go func(idx int, o rpcpb.Operation) {
- defer wg.Done()
- errc <- clus.sendOp(idx, o)
- }(i, op)
- }
- wg.Wait()
- close(errc)
-
- errs := []string{}
- for err := range errc {
- if err == nil {
- continue
- }
-
- if err != nil {
- destroyed := false
- if op == rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT {
- if err == io.EOF {
- destroyed = true
- }
- if strings.Contains(err.Error(),
- "rpc error: code = Unavailable desc = transport is closing") {
- // agent server has already closed;
- // so this error is expected
- destroyed = true
- }
- if strings.Contains(err.Error(),
- "desc = os: process already finished") {
- destroyed = true
- }
- }
- if !destroyed {
- errs = append(errs, err.Error())
- }
- }
- }
-
- if len(errs) == 0 {
- return nil
- }
- return errors.New(strings.Join(errs, ", "))
-}
-
-func (clus *Cluster) sendOp(idx int, op rpcpb.Operation) error {
- _, err := clus.sendOpWithResp(idx, op)
- return err
-}
-
-func (clus *Cluster) sendOpWithResp(idx int, op rpcpb.Operation) (*rpcpb.Response, error) {
- // maintain the initial member object
- // throughout the test time
- clus.agentRequests[idx] = &rpcpb.Request{
- Operation: op,
- Member: clus.Members[idx],
- Tester: clus.Tester,
- }
-
- err := clus.agentStreams[idx].Send(clus.agentRequests[idx])
- clus.lg.Info(
- "sent request",
- zap.String("operation", op.String()),
- zap.String("to", clus.Members[idx].EtcdClientEndpoint),
- zap.Error(err),
- )
- if err != nil {
- return nil, err
- }
-
- resp, err := clus.agentStreams[idx].Recv()
- if resp != nil {
- clus.lg.Info(
- "received response",
- zap.String("operation", op.String()),
- zap.String("from", clus.Members[idx].EtcdClientEndpoint),
- zap.Bool("success", resp.Success),
- zap.String("status", resp.Status),
- zap.Error(err),
- )
- } else {
- clus.lg.Info(
- "received empty response",
- zap.String("operation", op.String()),
- zap.String("from", clus.Members[idx].EtcdClientEndpoint),
- zap.Error(err),
- )
- }
- if err != nil {
- return nil, err
- }
-
- if !resp.Success {
- return nil, errors.New(resp.Status)
- }
-
- m, secure := clus.Members[idx], false
- for _, cu := range m.Etcd.AdvertiseClientURLs {
- u, perr := url.Parse(cu)
- if perr != nil {
- return nil, perr
- }
- if u.Scheme == "https" { // TODO: handle unix
- secure = true
- }
- }
-
- // store TLS assets from agents/servers onto disk
- if secure && (op == rpcpb.Operation_INITIAL_START_ETCD || op == rpcpb.Operation_RESTART_ETCD) {
- dirClient := filepath.Join(
- clus.Tester.DataDir,
- clus.Members[idx].Etcd.Name,
- "fixtures",
- "client",
- )
- if err = fileutil.TouchDirAll(dirClient); err != nil {
- return nil, err
- }
-
- clientCertData := []byte(resp.Member.ClientCertData)
- if len(clientCertData) == 0 {
- return nil, fmt.Errorf("got empty client cert from %q", m.EtcdClientEndpoint)
- }
- clientCertPath := filepath.Join(dirClient, "cert.pem")
- if err = ioutil.WriteFile(clientCertPath, clientCertData, 0644); err != nil { // overwrite if exists
- return nil, err
- }
- resp.Member.ClientCertPath = clientCertPath
- clus.lg.Info(
- "saved client cert file",
- zap.String("path", clientCertPath),
- )
-
- clientKeyData := []byte(resp.Member.ClientKeyData)
- if len(clientKeyData) == 0 {
- return nil, fmt.Errorf("got empty client key from %q", m.EtcdClientEndpoint)
- }
- clientKeyPath := filepath.Join(dirClient, "key.pem")
- if err = ioutil.WriteFile(clientKeyPath, clientKeyData, 0644); err != nil { // overwrite if exists
- return nil, err
- }
- resp.Member.ClientKeyPath = clientKeyPath
- clus.lg.Info(
- "saved client key file",
- zap.String("path", clientKeyPath),
- )
-
- clientTrustedCAData := []byte(resp.Member.ClientTrustedCAData)
- if len(clientTrustedCAData) != 0 {
- // TODO: disable this when auto TLS is deprecated
- clientTrustedCAPath := filepath.Join(dirClient, "ca.pem")
- if err = ioutil.WriteFile(clientTrustedCAPath, clientTrustedCAData, 0644); err != nil { // overwrite if exists
- return nil, err
- }
- resp.Member.ClientTrustedCAPath = clientTrustedCAPath
- clus.lg.Info(
- "saved client trusted CA file",
- zap.String("path", clientTrustedCAPath),
- )
- }
-
- // no need to store peer certs for tester clients
-
- clus.Members[idx] = resp.Member
- }
-
- return resp, nil
-}
-
-// Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT terminates all tester connections to agents and etcd servers.
-func (clus *Cluster) Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT() {
- err := clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT)
- if err != nil {
- clus.lg.Warn("destroying etcd/agents FAIL", zap.Error(err))
- } else {
- clus.lg.Info("destroying etcd/agents PASS")
- }
-
- for i, conn := range clus.agentConns {
- err := conn.Close()
- clus.lg.Info("closed connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr), zap.Error(err))
- }
-
- if clus.testerHTTPServer != nil {
- ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
- err := clus.testerHTTPServer.Shutdown(ctx)
- cancel()
- clus.lg.Info("closed tester HTTP server", zap.String("tester-address", clus.Tester.Addr), zap.Error(err))
- }
-}
-
-// WaitHealth ensures all members are healthy
-// by writing a test key to etcd cluster.
-func (clus *Cluster) WaitHealth() error {
- var err error
- // wait 60s to check cluster health.
- // TODO: set it to a reasonable value. It is set that high because
- // follower may use long time to catch up the leader when reboot under
- // reasonable workload (https://github.com/etcd-io/etcd/issues/2698)
- for i := 0; i < 60; i++ {
- for _, m := range clus.Members {
- if err = m.WriteHealthKey(); err != nil {
- clus.lg.Warn(
- "health check FAIL",
- zap.Int("retries", i),
- zap.String("endpoint", m.EtcdClientEndpoint),
- zap.Error(err),
- )
- break
- }
- clus.lg.Info(
- "health check PASS",
- zap.Int("retries", i),
- zap.String("endpoint", m.EtcdClientEndpoint),
- )
- }
- if err == nil {
- clus.lg.Info("health check ALL PASS")
- return nil
- }
- time.Sleep(time.Second)
- }
- return err
-}
-
-// GetLeader returns the index of leader and error if any.
-func (clus *Cluster) GetLeader() (int, error) {
- for i, m := range clus.Members {
- isLeader, err := m.IsLeader()
- if isLeader || err != nil {
- return i, err
- }
- }
- return 0, fmt.Errorf("no leader found")
-}
-
-// maxRev returns the maximum revision found on the cluster.
-func (clus *Cluster) maxRev() (rev int64, err error) {
- ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
- defer cancel()
- revc, errc := make(chan int64, len(clus.Members)), make(chan error, len(clus.Members))
- for i := range clus.Members {
- go func(m *rpcpb.Member) {
- mrev, merr := m.Rev(ctx)
- revc <- mrev
- errc <- merr
- }(clus.Members[i])
- }
- for i := 0; i < len(clus.Members); i++ {
- if merr := <-errc; merr != nil {
- err = merr
- }
- if mrev := <-revc; mrev > rev {
- rev = mrev
- }
- }
- return rev, err
-}
-
-func (clus *Cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
- revs := make(map[string]int64)
- hashes := make(map[string]int64)
- for _, m := range clus.Members {
- rev, hash, err := m.RevHash()
- if err != nil {
- return nil, nil, err
- }
- revs[m.EtcdClientEndpoint] = rev
- hashes[m.EtcdClientEndpoint] = hash
- }
- return revs, hashes, nil
-}
-
-func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
- if rev <= 0 {
- return nil
- }
-
- for i, m := range clus.Members {
- clus.lg.Info(
- "compact START",
- zap.String("endpoint", m.EtcdClientEndpoint),
- zap.Int64("compact-revision", rev),
- zap.Duration("timeout", timeout),
- )
- now := time.Now()
- cerr := m.Compact(rev, timeout)
- succeed := true
- if cerr != nil {
- if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
- clus.lg.Info(
- "compact error is ignored",
- zap.String("endpoint", m.EtcdClientEndpoint),
- zap.Int64("compact-revision", rev),
- zap.Error(cerr),
- )
- } else {
- clus.lg.Warn(
- "compact FAIL",
- zap.String("endpoint", m.EtcdClientEndpoint),
- zap.Int64("compact-revision", rev),
- zap.Error(cerr),
- )
- err = cerr
- succeed = false
- }
- }
-
- if succeed {
- clus.lg.Info(
- "compact PASS",
- zap.String("endpoint", m.EtcdClientEndpoint),
- zap.Int64("compact-revision", rev),
- zap.Duration("timeout", timeout),
- zap.Duration("took", time.Since(now)),
- )
- }
- }
- return err
-}
-
-func (clus *Cluster) checkCompact(rev int64) error {
- if rev == 0 {
- return nil
- }
- for _, m := range clus.Members {
- if err := m.CheckCompact(rev); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (clus *Cluster) defrag() error {
- for _, m := range clus.Members {
- if err := m.Defrag(); err != nil {
- clus.lg.Warn(
- "defrag FAIL",
- zap.String("endpoint", m.EtcdClientEndpoint),
- zap.Error(err),
- )
- return err
- }
- clus.lg.Info(
- "defrag PASS",
- zap.String("endpoint", m.EtcdClientEndpoint),
- )
- }
- clus.lg.Info(
- "defrag ALL PASS",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- )
- return nil
-}
-
-// GetCaseDelayDuration computes failure delay duration.
-func (clus *Cluster) GetCaseDelayDuration() time.Duration {
- return time.Duration(clus.Tester.CaseDelayMs) * time.Millisecond
-}
-
-// Report reports the number of modified keys.
-func (clus *Cluster) Report() int64 {
- return clus.stresser.ModifiedKeys()
-}
diff --git a/tests/functional/tester/cluster_read_config.go b/tests/functional/tester/cluster_read_config.go
deleted file mode 100644
index d9a5cf0589d..00000000000
--- a/tests/functional/tester/cluster_read_config.go
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "net/url"
- "path/filepath"
- "strings"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
- yaml "gopkg.in/yaml.v2"
-)
-
-func read(lg *zap.Logger, fpath string) (*Cluster, error) {
- bts, err := ioutil.ReadFile(fpath)
- if err != nil {
- return nil, err
- }
- lg.Info("opened configuration file", zap.String("path", fpath))
-
- clus := &Cluster{lg: lg}
- if err = yaml.Unmarshal(bts, clus); err != nil {
- return nil, err
- }
-
- if len(clus.Members) < 3 {
- return nil, fmt.Errorf("len(clus.Members) expects at least 3, got %d", len(clus.Members))
- }
-
- failpointsEnabled := false
- for _, c := range clus.Tester.Cases {
- if c == rpcpb.Case_FAILPOINTS.String() {
- failpointsEnabled = true
- break
- }
- }
-
- if len(clus.Tester.Cases) == 0 {
- return nil, errors.New("cases not found")
- }
- if clus.Tester.DelayLatencyMs <= clus.Tester.DelayLatencyMsRv*5 {
- return nil, fmt.Errorf("delay latency %d ms must be greater than 5x of delay latency random variable %d ms", clus.Tester.DelayLatencyMs, clus.Tester.DelayLatencyMsRv)
- }
- if clus.Tester.UpdatedDelayLatencyMs == 0 {
- clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs
- }
-
- for _, v := range clus.Tester.Cases {
- if _, ok := rpcpb.Case_value[v]; !ok {
- return nil, fmt.Errorf("%q is not defined in 'rpcpb.Case_value'", v)
- }
- }
-
- for _, s := range clus.Tester.Stressers {
- if _, ok := rpcpb.StresserType_value[s.Type]; !ok {
- return nil, fmt.Errorf("unknown 'StresserType' %+v", s)
- }
- }
-
- for _, v := range clus.Tester.Checkers {
- if _, ok := rpcpb.Checker_value[v]; !ok {
- return nil, fmt.Errorf("Checker is unknown; got %q", v)
- }
- }
-
- if clus.Tester.StressKeySuffixRangeTxn > 100 {
- return nil, fmt.Errorf("StressKeySuffixRangeTxn maximum value is 100, got %v", clus.Tester.StressKeySuffixRangeTxn)
- }
- if clus.Tester.StressKeyTxnOps > 64 {
- return nil, fmt.Errorf("StressKeyTxnOps maximum value is 64, got %v", clus.Tester.StressKeyTxnOps)
- }
-
- for i, mem := range clus.Members {
- if mem.EtcdExec == "embed" && failpointsEnabled {
- return nil, errors.New("EtcdExec 'embed' cannot be run with failpoints enabled")
- }
- if mem.BaseDir == "" {
- return nil, fmt.Errorf("BaseDir cannot be empty (got %q)", mem.BaseDir)
- }
- if mem.Etcd.Name == "" {
- return nil, fmt.Errorf("'--name' cannot be empty (got %+v)", mem)
- }
- if mem.Etcd.DataDir == "" {
- return nil, fmt.Errorf("'--data-dir' cannot be empty (got %+v)", mem)
- }
- if mem.Etcd.SnapshotCount == 0 {
- return nil, fmt.Errorf("'--snapshot-count' cannot be 0 (got %+v)", mem.Etcd.SnapshotCount)
- }
- if mem.Etcd.DataDir == "" {
- return nil, fmt.Errorf("'--data-dir' cannot be empty (got %q)", mem.Etcd.DataDir)
- }
- if mem.Etcd.WALDir == "" {
- clus.Members[i].Etcd.WALDir = filepath.Join(mem.Etcd.DataDir, "member", "wal")
- }
-
- switch mem.Etcd.InitialClusterState {
- case "new":
- case "existing":
- default:
- return nil, fmt.Errorf("'--initial-cluster-state' got %q", mem.Etcd.InitialClusterState)
- }
-
- if mem.Etcd.HeartbeatIntervalMs == 0 {
- return nil, fmt.Errorf("'--heartbeat-interval' cannot be 0 (got %+v)", mem.Etcd)
- }
- if mem.Etcd.ElectionTimeoutMs == 0 {
- return nil, fmt.Errorf("'--election-timeout' cannot be 0 (got %+v)", mem.Etcd)
- }
- if int64(clus.Tester.DelayLatencyMs) <= mem.Etcd.ElectionTimeoutMs {
- return nil, fmt.Errorf("delay latency %d ms must be greater than election timeout %d ms", clus.Tester.DelayLatencyMs, mem.Etcd.ElectionTimeoutMs)
- }
-
- port := ""
- listenClientPorts := make([]string, len(clus.Members))
- for i, u := range mem.Etcd.ListenClientURLs {
- if !isValidURL(u) {
- return nil, fmt.Errorf("'--listen-client-urls' has valid URL %q", u)
- }
- listenClientPorts[i], err = getPort(u)
- if err != nil {
- return nil, fmt.Errorf("'--listen-client-urls' has no port %q", u)
- }
- }
- for i, u := range mem.Etcd.AdvertiseClientURLs {
- if !isValidURL(u) {
- return nil, fmt.Errorf("'--advertise-client-urls' has valid URL %q", u)
- }
- port, err = getPort(u)
- if err != nil {
- return nil, fmt.Errorf("'--advertise-client-urls' has no port %q", u)
- }
- if mem.EtcdClientProxy && listenClientPorts[i] == port {
- return nil, fmt.Errorf("clus.Members[%d] requires client port proxy, but advertise port %q conflicts with listener port %q", i, port, listenClientPorts[i])
- }
- }
-
- listenPeerPorts := make([]string, len(clus.Members))
- for i, u := range mem.Etcd.ListenPeerURLs {
- if !isValidURL(u) {
- return nil, fmt.Errorf("'--listen-peer-urls' has valid URL %q", u)
- }
- listenPeerPorts[i], err = getPort(u)
- if err != nil {
- return nil, fmt.Errorf("'--listen-peer-urls' has no port %q", u)
- }
- }
- for j, u := range mem.Etcd.AdvertisePeerURLs {
- if !isValidURL(u) {
- return nil, fmt.Errorf("'--initial-advertise-peer-urls' has valid URL %q", u)
- }
- port, err = getPort(u)
- if err != nil {
- return nil, fmt.Errorf("'--initial-advertise-peer-urls' has no port %q", u)
- }
- if mem.EtcdPeerProxy && listenPeerPorts[j] == port {
- return nil, fmt.Errorf("clus.Members[%d] requires peer port proxy, but advertise port %q conflicts with listener port %q", i, port, listenPeerPorts[j])
- }
- }
-
- if !strings.HasPrefix(mem.Etcd.DataDir, mem.BaseDir) {
- return nil, fmt.Errorf("Etcd.DataDir must be prefixed with BaseDir (got %q)", mem.Etcd.DataDir)
- }
-
- // TODO: support separate WALDir that can be handled via failure-archive
- if !strings.HasPrefix(mem.Etcd.WALDir, mem.BaseDir) {
- return nil, fmt.Errorf("Etcd.WALDir must be prefixed with BaseDir (got %q)", mem.Etcd.WALDir)
- }
-
- // TODO: only support generated certs with TLS generator
- // deprecate auto TLS
- if mem.Etcd.PeerAutoTLS && mem.Etcd.PeerCertFile != "" {
- return nil, fmt.Errorf("Etcd.PeerAutoTLS 'true', but Etcd.PeerCertFile is %q", mem.Etcd.PeerCertFile)
- }
- if mem.Etcd.PeerAutoTLS && mem.Etcd.PeerKeyFile != "" {
- return nil, fmt.Errorf("Etcd.PeerAutoTLS 'true', but Etcd.PeerKeyFile is %q", mem.Etcd.PeerKeyFile)
- }
- if mem.Etcd.PeerAutoTLS && mem.Etcd.PeerTrustedCAFile != "" {
- return nil, fmt.Errorf("Etcd.PeerAutoTLS 'true', but Etcd.PeerTrustedCAFile is %q", mem.Etcd.PeerTrustedCAFile)
- }
- if mem.Etcd.ClientAutoTLS && mem.Etcd.ClientCertFile != "" {
- return nil, fmt.Errorf("Etcd.ClientAutoTLS 'true', but Etcd.ClientCertFile is %q", mem.Etcd.ClientCertFile)
- }
- if mem.Etcd.ClientAutoTLS && mem.Etcd.ClientKeyFile != "" {
- return nil, fmt.Errorf("Etcd.ClientAutoTLS 'true', but Etcd.ClientKeyFile is %q", mem.Etcd.ClientKeyFile)
- }
- if mem.Etcd.ClientAutoTLS && mem.Etcd.ClientTrustedCAFile != "" {
- return nil, fmt.Errorf("Etcd.ClientAutoTLS 'true', but Etcd.ClientTrustedCAFile is %q", mem.Etcd.ClientTrustedCAFile)
- }
-
- if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerCertFile == "" {
- return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'true', but Etcd.PeerCertFile is %q", mem.Etcd.PeerCertFile)
- }
- if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerKeyFile == "" {
- return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'true', but Etcd.PeerKeyFile is %q", mem.Etcd.PeerCertFile)
- }
- // only support self-signed certs
- if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerTrustedCAFile == "" {
- return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'true', but Etcd.PeerTrustedCAFile is %q", mem.Etcd.PeerCertFile)
- }
- if !mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerCertFile != "" {
- return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'false', but Etcd.PeerCertFile is %q", mem.Etcd.PeerCertFile)
- }
- if !mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerKeyFile != "" {
- return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'false', but Etcd.PeerKeyFile is %q", mem.Etcd.PeerCertFile)
- }
- if !mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerTrustedCAFile != "" {
- return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'false', but Etcd.PeerTrustedCAFile is %q", mem.Etcd.PeerTrustedCAFile)
- }
- if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerAutoTLS {
- return nil, fmt.Errorf("Etcd.PeerClientCertAuth and Etcd.PeerAutoTLS cannot be both 'true'")
- }
- if (mem.Etcd.PeerCertFile == "") != (mem.Etcd.PeerKeyFile == "") {
- return nil, fmt.Errorf("both Etcd.PeerCertFile %q and Etcd.PeerKeyFile %q must be either empty or non-empty", mem.Etcd.PeerCertFile, mem.Etcd.PeerKeyFile)
- }
- if mem.Etcd.ClientCertAuth && mem.Etcd.ClientAutoTLS {
- return nil, fmt.Errorf("Etcd.ClientCertAuth and Etcd.ClientAutoTLS cannot be both 'true'")
- }
- if mem.Etcd.ClientCertAuth && mem.Etcd.ClientCertFile == "" {
- return nil, fmt.Errorf("Etcd.ClientCertAuth 'true', but Etcd.ClientCertFile is %q", mem.Etcd.PeerCertFile)
- }
- if mem.Etcd.ClientCertAuth && mem.Etcd.ClientKeyFile == "" {
- return nil, fmt.Errorf("Etcd.ClientCertAuth 'true', but Etcd.ClientKeyFile is %q", mem.Etcd.PeerCertFile)
- }
- if mem.Etcd.ClientCertAuth && mem.Etcd.ClientTrustedCAFile == "" {
- return nil, fmt.Errorf("Etcd.ClientCertAuth 'true', but Etcd.ClientTrustedCAFile is %q", mem.Etcd.ClientTrustedCAFile)
- }
- if !mem.Etcd.ClientCertAuth && mem.Etcd.ClientCertFile != "" {
- return nil, fmt.Errorf("Etcd.ClientCertAuth 'false', but Etcd.ClientCertFile is %q", mem.Etcd.PeerCertFile)
- }
- if !mem.Etcd.ClientCertAuth && mem.Etcd.ClientKeyFile != "" {
- return nil, fmt.Errorf("Etcd.ClientCertAuth 'false', but Etcd.ClientKeyFile is %q", mem.Etcd.PeerCertFile)
- }
- if !mem.Etcd.ClientCertAuth && mem.Etcd.ClientTrustedCAFile != "" {
- return nil, fmt.Errorf("Etcd.ClientCertAuth 'false', but Etcd.ClientTrustedCAFile is %q", mem.Etcd.PeerCertFile)
- }
- if (mem.Etcd.ClientCertFile == "") != (mem.Etcd.ClientKeyFile == "") {
- return nil, fmt.Errorf("both Etcd.ClientCertFile %q and Etcd.ClientKeyFile %q must be either empty or non-empty", mem.Etcd.ClientCertFile, mem.Etcd.ClientKeyFile)
- }
-
- peerTLS := mem.Etcd.PeerAutoTLS ||
- (mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerCertFile != "" && mem.Etcd.PeerKeyFile != "" && mem.Etcd.PeerTrustedCAFile != "")
- if peerTLS {
- for _, cu := range mem.Etcd.ListenPeerURLs {
- var u *url.URL
- u, err = url.Parse(cu)
- if err != nil {
- return nil, err
- }
- if u.Scheme != "https" { // TODO: support unix
- return nil, fmt.Errorf("peer TLS is enabled with wrong scheme %q", cu)
- }
- }
- for _, cu := range mem.Etcd.AdvertisePeerURLs {
- var u *url.URL
- u, err = url.Parse(cu)
- if err != nil {
- return nil, err
- }
- if u.Scheme != "https" { // TODO: support unix
- return nil, fmt.Errorf("peer TLS is enabled with wrong scheme %q", cu)
- }
- }
- clus.Members[i].PeerCertPath = mem.Etcd.PeerCertFile
- if mem.Etcd.PeerCertFile != "" {
- var data []byte
- data, err = ioutil.ReadFile(mem.Etcd.PeerCertFile)
- if err != nil {
- return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.PeerCertFile, err)
- }
- clus.Members[i].PeerCertData = string(data)
- }
- clus.Members[i].PeerKeyPath = mem.Etcd.PeerKeyFile
- if mem.Etcd.PeerKeyFile != "" {
- var data []byte
- data, err = ioutil.ReadFile(mem.Etcd.PeerKeyFile)
- if err != nil {
- return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.PeerKeyFile, err)
- }
- clus.Members[i].PeerCertData = string(data)
- }
- clus.Members[i].PeerTrustedCAPath = mem.Etcd.PeerTrustedCAFile
- if mem.Etcd.PeerTrustedCAFile != "" {
- var data []byte
- data, err = ioutil.ReadFile(mem.Etcd.PeerTrustedCAFile)
- if err != nil {
- return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.PeerTrustedCAFile, err)
- }
- clus.Members[i].PeerCertData = string(data)
- }
- }
-
- clientTLS := mem.Etcd.ClientAutoTLS ||
- (mem.Etcd.ClientCertAuth && mem.Etcd.ClientCertFile != "" && mem.Etcd.ClientKeyFile != "" && mem.Etcd.ClientTrustedCAFile != "")
- if clientTLS {
- for _, cu := range mem.Etcd.ListenClientURLs {
- var u *url.URL
- u, err = url.Parse(cu)
- if err != nil {
- return nil, err
- }
- if u.Scheme != "https" { // TODO: support unix
- return nil, fmt.Errorf("client TLS is enabled with wrong scheme %q", cu)
- }
- }
- for _, cu := range mem.Etcd.AdvertiseClientURLs {
- var u *url.URL
- u, err = url.Parse(cu)
- if err != nil {
- return nil, err
- }
- if u.Scheme != "https" { // TODO: support unix
- return nil, fmt.Errorf("client TLS is enabled with wrong scheme %q", cu)
- }
- }
- clus.Members[i].ClientCertPath = mem.Etcd.ClientCertFile
- if mem.Etcd.ClientCertFile != "" {
- var data []byte
- data, err = ioutil.ReadFile(mem.Etcd.ClientCertFile)
- if err != nil {
- return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.ClientCertFile, err)
- }
- clus.Members[i].ClientCertData = string(data)
- }
- clus.Members[i].ClientKeyPath = mem.Etcd.ClientKeyFile
- if mem.Etcd.ClientKeyFile != "" {
- var data []byte
- data, err = ioutil.ReadFile(mem.Etcd.ClientKeyFile)
- if err != nil {
- return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.ClientKeyFile, err)
- }
- clus.Members[i].ClientCertData = string(data)
- }
- clus.Members[i].ClientTrustedCAPath = mem.Etcd.ClientTrustedCAFile
- if mem.Etcd.ClientTrustedCAFile != "" {
- var data []byte
- data, err = ioutil.ReadFile(mem.Etcd.ClientTrustedCAFile)
- if err != nil {
- return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.ClientTrustedCAFile, err)
- }
- clus.Members[i].ClientCertData = string(data)
- }
-
- if len(mem.Etcd.LogOutputs) == 0 {
- return nil, fmt.Errorf("mem.Etcd.LogOutputs cannot be empty")
- }
- for _, v := range mem.Etcd.LogOutputs {
- switch v {
- case "stderr", "stdout", "/dev/null", "default":
- default:
- if !strings.HasPrefix(v, mem.BaseDir) {
- return nil, fmt.Errorf("LogOutput %q must be prefixed with BaseDir %q", v, mem.BaseDir)
- }
- }
- }
- }
- }
-
- return clus, err
-}
diff --git a/tests/functional/tester/cluster_run.go b/tests/functional/tester/cluster_run.go
deleted file mode 100644
index ac65ebca3e9..00000000000
--- a/tests/functional/tester/cluster_run.go
+++ /dev/null
@@ -1,378 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "os"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-// compactQPS is rough number of compact requests per second.
-// Previous tests showed etcd can compact about 60,000 entries per second.
-const compactQPS = 50000
-
-// Run starts tester.
-func (clus *Cluster) Run() {
- defer printReport()
-
- // updateCases must be executed after etcd is started, because the FAILPOINTS case
- // needs to obtain all the failpoints from the etcd member.
- clus.updateCases()
-
- if err := fileutil.TouchDirAll(clus.Tester.DataDir); err != nil {
- clus.lg.Panic(
- "failed to create test data directory",
- zap.String("dir", clus.Tester.DataDir),
- zap.Error(err),
- )
- }
-
- var preModifiedKey int64
- for round := 0; round < int(clus.Tester.RoundLimit) || clus.Tester.RoundLimit == -1; round++ {
- roundTotalCounter.Inc()
- clus.rd = round
-
- if err := clus.doRound(); err != nil {
- clus.lg.Error(
- "round FAIL",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Error(err),
- )
- if clus.cleanup(err) != nil {
- return
- }
- // reset preModifiedKey after clean up
- preModifiedKey = 0
- continue
- }
-
- // -1 so that logPrefix doesn't print out 'case'
- clus.cs = -1
-
- revToCompact := max(0, clus.currentRevision-10000)
- currentModifiedKey := clus.stresser.ModifiedKeys()
- modifiedKey := currentModifiedKey - preModifiedKey
- preModifiedKey = currentModifiedKey
- timeout := 10 * time.Second
- timeout += time.Duration(modifiedKey/compactQPS) * time.Second
- clus.lg.Info(
- "compact START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Duration("timeout", timeout),
- )
- if err := clus.compact(revToCompact, timeout); err != nil {
- clus.lg.Warn(
- "compact FAIL",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Error(err),
- )
- if err = clus.cleanup(err); err != nil {
- clus.lg.Warn(
- "cleanup FAIL",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Error(err),
- )
- return
- }
- // reset preModifiedKey after clean up
- preModifiedKey = 0
- }
- if round > 0 && round%500 == 0 { // every 500 rounds
- if err := clus.defrag(); err != nil {
- clus.failed(err)
- return
- }
- }
- }
-
- clus.lg.Info(
- "functional-tester PASS",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- )
-}
-
-func (clus *Cluster) doRound() error {
- if clus.Tester.CaseShuffle {
- clus.shuffleCases()
- }
-
- roundNow := time.Now()
- clus.lg.Info(
- "round START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Strings("cases", clus.listCases()),
- )
- for i, fa := range clus.cases {
- clus.cs = i
-
- caseTotal[fa.Desc()]++
- caseTotalCounter.WithLabelValues(fa.Desc()).Inc()
-
- caseNow := time.Now()
- clus.lg.Info(
- "case START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- )
-
- clus.lg.Info("wait health before injecting failures")
- if err := clus.WaitHealth(); err != nil {
- return fmt.Errorf("wait full health error: %v", err)
- }
-
- stressStarted := false
- fcase := fa.TestCase()
- if fcase != rpcpb.Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS {
- clus.lg.Info(
- "stress START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- )
- if err := clus.stresser.Stress(); err != nil {
- return fmt.Errorf("start stresser error: %v", err)
- }
- stressStarted = true
- }
-
- clus.lg.Info(
- "inject START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- )
- if err := fa.Inject(clus); err != nil {
- return fmt.Errorf("injection error: %v", err)
- }
-
- // if run local, recovering server may conflict
- // with stressing client ports
- // TODO: use unix for local tests
- clus.lg.Info(
- "recover START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- )
- if err := fa.Recover(clus); err != nil {
- return fmt.Errorf("recovery error: %v", err)
- }
-
- if stressStarted {
- clus.lg.Info(
- "stress PAUSE",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- )
- ems := clus.stresser.Pause()
- if fcase == rpcpb.Case_NO_FAIL_WITH_STRESS && len(ems) > 0 {
- ess := make([]string, 0, len(ems))
- cnt := 0
- for k, v := range ems {
- ess = append(ess, fmt.Sprintf("%s (count: %d)", k, v))
- cnt += v
- }
- clus.lg.Warn(
- "expected no errors",
- zap.String("desc", fa.Desc()),
- zap.Strings("errors", ess),
- )
-
- // with network delay, some ongoing requests may fail
- // only return error, if more than 30% of QPS requests fail
- if cnt > int(float64(clus.Tester.StressQPS)*0.3) {
- return fmt.Errorf("expected no error in %q, got %q", fcase.String(), ess)
- }
- }
- }
-
- clus.lg.Info(
- "health check START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- )
- if err := clus.WaitHealth(); err != nil {
- return fmt.Errorf("wait full health error: %v", err)
- }
-
- checkerFailExceptions := []rpcpb.Checker{}
- switch fcase {
- case rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH:
- // TODO: restore from snapshot
- checkerFailExceptions = append(checkerFailExceptions, rpcpb.Checker_LEASE_EXPIRE)
- }
-
- clus.lg.Info(
- "consistency check START",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- )
- if err := clus.runCheckers(checkerFailExceptions...); err != nil {
- return fmt.Errorf("consistency check error (%v)", err)
- }
- clus.lg.Info(
- "consistency check PASS",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.String("desc", fa.Desc()),
- zap.Duration("took", time.Since(caseNow)),
- )
- }
-
- clus.lg.Info(
- "round ALL PASS",
- zap.Int("round", clus.rd),
- zap.Strings("cases", clus.listCases()),
- zap.Int("case-total", len(clus.cases)),
- zap.Duration("took", time.Since(roundNow)),
- )
- return nil
-}
-
-func (clus *Cluster) updateRevision() error {
- revs, _, err := clus.getRevisionHash()
- for _, rev := range revs {
- clus.currentRevision = rev
- break // just need get one of the current revisions
- }
-
- clus.lg.Info(
- "updated current revision",
- zap.Int64("current-revision", clus.currentRevision),
- )
- return err
-}
-
-func (clus *Cluster) compact(rev int64, timeout time.Duration) (err error) {
- if err = clus.compactKV(rev, timeout); err != nil {
- clus.lg.Warn(
- "compact FAIL",
- zap.Int64("current-revision", clus.currentRevision),
- zap.Int64("compact-revision", rev),
- zap.Error(err),
- )
- return err
- }
- clus.lg.Info(
- "compact DONE",
- zap.Int64("current-revision", clus.currentRevision),
- zap.Int64("compact-revision", rev),
- )
-
- if err = clus.checkCompact(rev); err != nil {
- clus.lg.Warn(
- "check compact FAIL",
- zap.Int64("current-revision", clus.currentRevision),
- zap.Int64("compact-revision", rev),
- zap.Error(err),
- )
- return err
- }
- clus.lg.Info(
- "check compact DONE",
- zap.Int64("current-revision", clus.currentRevision),
- zap.Int64("compact-revision", rev),
- )
-
- return nil
-}
-
-func (clus *Cluster) failed(err error) {
- clus.lg.Error(
- "functional-tester FAIL",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Error(err),
- )
- clus.Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT()
-
- os.Exit(2)
-}
-
-func (clus *Cluster) cleanup(err error) error {
- if clus.Tester.ExitOnCaseFail {
- defer clus.failed(err)
- }
-
- roundFailedTotalCounter.Inc()
- desc := "compact/defrag"
- if clus.cs != -1 {
- desc = clus.cases[clus.cs].Desc()
- }
- caseFailedTotalCounter.WithLabelValues(desc).Inc()
-
- clus.lg.Info(
- "closing stressers before archiving failure data",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- )
- clus.stresser.Close()
-
- if err := clus.send_SIGQUIT_ETCD_AND_ARCHIVE_DATA(); err != nil {
- clus.lg.Warn(
- "cleanup FAIL",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Error(err),
- )
- return err
- }
- if err := clus.send_RESTART_ETCD(); err != nil {
- clus.lg.Warn(
- "restart FAIL",
- zap.Int("round", clus.rd),
- zap.Int("case", clus.cs),
- zap.Int("case-total", len(clus.cases)),
- zap.Error(err),
- )
- return err
- }
-
- clus.setStresserChecker()
- return nil
-}
diff --git a/tests/functional/tester/cluster_shuffle.go b/tests/functional/tester/cluster_shuffle.go
deleted file mode 100644
index 16c79b2f6e2..00000000000
--- a/tests/functional/tester/cluster_shuffle.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "math/rand"
- "time"
-
- "go.uber.org/zap"
-)
-
-func (clus *Cluster) shuffleCases() {
- rand.Seed(time.Now().UnixNano())
- offset := rand.Intn(1000)
- n := len(clus.cases)
- cp := coprime(n)
-
- css := make([]Case, n)
- for i := 0; i < n; i++ {
- css[i] = clus.cases[(cp*i+offset)%n]
- }
- clus.cases = css
- clus.lg.Info("shuffled test failure cases", zap.Int("total", n))
-}
-
-/*
-x and y of GCD 1 are coprime to each other
-
-x1 = ( coprime of n * idx1 + offset ) % n
-x2 = ( coprime of n * idx2 + offset ) % n
-(x2 - x1) = coprime of n * (idx2 - idx1) % n
- = (idx2 - idx1) = 1
-
-Consecutive x's are guaranteed to be distinct
-*/
-func coprime(n int) int {
- coprime := 1
- for i := n / 2; i < n; i++ {
- if gcd(i, n) == 1 {
- coprime = i
- break
- }
- }
- return coprime
-}
-
-func gcd(x, y int) int {
- if y == 0 {
- return x
- }
- return gcd(y, x%y)
-}
diff --git a/tests/functional/tester/cluster_test.go b/tests/functional/tester/cluster_test.go
deleted file mode 100644
index f1b7392c93b..00000000000
--- a/tests/functional/tester/cluster_test.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "reflect"
- "sort"
- "testing"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-func Test_read(t *testing.T) {
- exp := &Cluster{
- Members: []*rpcpb.Member{
- {
- EtcdExec: "./bin/etcd",
- AgentAddr: "127.0.0.1:19027",
- FailpointHTTPAddr: "http://127.0.0.1:7381",
- BaseDir: "/tmp/etcd-functional-1",
- EtcdClientProxy: false,
- EtcdPeerProxy: true,
- EtcdClientEndpoint: "127.0.0.1:1379",
- Etcd: &rpcpb.Etcd{
- Name: "s1",
- DataDir: "/tmp/etcd-functional-1/etcd.data",
- WALDir: "/tmp/etcd-functional-1/etcd.data/member/wal",
- HeartbeatIntervalMs: 100,
- ElectionTimeoutMs: 1000,
- ListenClientURLs: []string{"https://127.0.0.1:1379"},
- AdvertiseClientURLs: []string{"https://127.0.0.1:1379"},
- ClientAutoTLS: true,
- ClientCertAuth: false,
- ClientCertFile: "",
- ClientKeyFile: "",
- ClientTrustedCAFile: "",
- ListenPeerURLs: []string{"https://127.0.0.1:1380"},
- AdvertisePeerURLs: []string{"https://127.0.0.1:1381"},
- PeerAutoTLS: true,
- PeerClientCertAuth: false,
- PeerCertFile: "",
- PeerKeyFile: "",
- PeerTrustedCAFile: "",
- InitialCluster: "s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381",
- InitialClusterState: "new",
- InitialClusterToken: "tkn",
- SnapshotCount: 2000,
- QuotaBackendBytes: 10740000000,
- PreVote: true,
- InitialCorruptCheck: true,
- Logger: "zap",
- LogOutputs: []string{"/tmp/etcd-functional-1/etcd.log"},
- LogLevel: "info",
- },
- ClientCertData: "",
- ClientCertPath: "",
- ClientKeyData: "",
- ClientKeyPath: "",
- ClientTrustedCAData: "",
- ClientTrustedCAPath: "",
- PeerCertData: "",
- PeerCertPath: "",
- PeerKeyData: "",
- PeerKeyPath: "",
- PeerTrustedCAData: "",
- PeerTrustedCAPath: "",
- SnapshotPath: "/tmp/etcd-functional-1.snapshot.db",
- },
- {
- EtcdExec: "./bin/etcd",
- AgentAddr: "127.0.0.1:29027",
- FailpointHTTPAddr: "http://127.0.0.1:7382",
- BaseDir: "/tmp/etcd-functional-2",
- EtcdClientProxy: false,
- EtcdPeerProxy: true,
- EtcdClientEndpoint: "127.0.0.1:2379",
- Etcd: &rpcpb.Etcd{
- Name: "s2",
- DataDir: "/tmp/etcd-functional-2/etcd.data",
- WALDir: "/tmp/etcd-functional-2/etcd.data/member/wal",
- HeartbeatIntervalMs: 100,
- ElectionTimeoutMs: 1000,
- ListenClientURLs: []string{"https://127.0.0.1:2379"},
- AdvertiseClientURLs: []string{"https://127.0.0.1:2379"},
- ClientAutoTLS: true,
- ClientCertAuth: false,
- ClientCertFile: "",
- ClientKeyFile: "",
- ClientTrustedCAFile: "",
- ListenPeerURLs: []string{"https://127.0.0.1:2380"},
- AdvertisePeerURLs: []string{"https://127.0.0.1:2381"},
- PeerAutoTLS: true,
- PeerClientCertAuth: false,
- PeerCertFile: "",
- PeerKeyFile: "",
- PeerTrustedCAFile: "",
- InitialCluster: "s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381",
- InitialClusterState: "new",
- InitialClusterToken: "tkn",
- SnapshotCount: 2000,
- QuotaBackendBytes: 10740000000,
- PreVote: true,
- InitialCorruptCheck: true,
- Logger: "zap",
- LogOutputs: []string{"/tmp/etcd-functional-2/etcd.log"},
- LogLevel: "info",
- },
- ClientCertData: "",
- ClientCertPath: "",
- ClientKeyData: "",
- ClientKeyPath: "",
- ClientTrustedCAData: "",
- ClientTrustedCAPath: "",
- PeerCertData: "",
- PeerCertPath: "",
- PeerKeyData: "",
- PeerKeyPath: "",
- PeerTrustedCAData: "",
- PeerTrustedCAPath: "",
- SnapshotPath: "/tmp/etcd-functional-2.snapshot.db",
- },
- {
- EtcdExec: "./bin/etcd",
- AgentAddr: "127.0.0.1:39027",
- FailpointHTTPAddr: "http://127.0.0.1:7383",
- BaseDir: "/tmp/etcd-functional-3",
- EtcdClientProxy: false,
- EtcdPeerProxy: true,
- EtcdClientEndpoint: "127.0.0.1:3379",
- Etcd: &rpcpb.Etcd{
- Name: "s3",
- DataDir: "/tmp/etcd-functional-3/etcd.data",
- WALDir: "/tmp/etcd-functional-3/etcd.data/member/wal",
- HeartbeatIntervalMs: 100,
- ElectionTimeoutMs: 1000,
- ListenClientURLs: []string{"https://127.0.0.1:3379"},
- AdvertiseClientURLs: []string{"https://127.0.0.1:3379"},
- ClientAutoTLS: true,
- ClientCertAuth: false,
- ClientCertFile: "",
- ClientKeyFile: "",
- ClientTrustedCAFile: "",
- ListenPeerURLs: []string{"https://127.0.0.1:3380"},
- AdvertisePeerURLs: []string{"https://127.0.0.1:3381"},
- PeerAutoTLS: true,
- PeerClientCertAuth: false,
- PeerCertFile: "",
- PeerKeyFile: "",
- PeerTrustedCAFile: "",
- InitialCluster: "s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381",
- InitialClusterState: "new",
- InitialClusterToken: "tkn",
- SnapshotCount: 2000,
- QuotaBackendBytes: 10740000000,
- PreVote: true,
- InitialCorruptCheck: true,
- Logger: "zap",
- LogOutputs: []string{"/tmp/etcd-functional-3/etcd.log"},
- LogLevel: "info",
- },
- ClientCertData: "",
- ClientCertPath: "",
- ClientKeyData: "",
- ClientKeyPath: "",
- ClientTrustedCAData: "",
- ClientTrustedCAPath: "",
- PeerCertData: "",
- PeerCertPath: "",
- PeerKeyData: "",
- PeerKeyPath: "",
- PeerTrustedCAData: "",
- PeerTrustedCAPath: "",
- SnapshotPath: "/tmp/etcd-functional-3.snapshot.db",
- },
- },
- Tester: &rpcpb.Tester{
- DataDir: "/tmp/etcd-tester-data",
- Network: "tcp",
- Addr: "127.0.0.1:9028",
- DelayLatencyMs: 5000,
- DelayLatencyMsRv: 500,
- UpdatedDelayLatencyMs: 5000,
- RoundLimit: 1,
- ExitOnCaseFail: true,
- EnablePprof: true,
- CaseDelayMs: 7000,
- CaseShuffle: true,
- Cases: []string{
- "SIGTERM_ONE_FOLLOWER",
- "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- "SIGTERM_LEADER",
- "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- "SIGTERM_QUORUM",
- "SIGTERM_ALL",
- "SIGQUIT_AND_REMOVE_ONE_FOLLOWER",
- "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- // "SIGQUIT_AND_REMOVE_LEADER",
- // "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- // "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH",
- // "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
- // "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- "BLACKHOLE_PEER_PORT_TX_RX_LEADER",
- "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- "BLACKHOLE_PEER_PORT_TX_RX_QUORUM",
- "BLACKHOLE_PEER_PORT_TX_RX_ALL",
- // "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
- // "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
- // "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- // "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
- "DELAY_PEER_PORT_TX_RX_LEADER",
- "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER",
- "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
- "DELAY_PEER_PORT_TX_RX_QUORUM",
- "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM",
- "DELAY_PEER_PORT_TX_RX_ALL",
- "RANDOM_DELAY_PEER_PORT_TX_RX_ALL",
- "NO_FAIL_WITH_STRESS",
- "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
- },
- FailpointCommands: []string{`panic("etcd-tester")`},
- RunnerExecPath: "./bin/etcd-runner",
- ExternalExecPath: "",
- Stressers: []*rpcpb.Stresser{
- {Type: "KV_WRITE_SMALL", Weight: 0.35},
- {Type: "KV_WRITE_LARGE", Weight: 0.002},
- {Type: "KV_READ_ONE_KEY", Weight: 0.07},
- {Type: "KV_READ_RANGE", Weight: 0.07},
- {Type: "KV_DELETE_ONE_KEY", Weight: 0.07},
- {Type: "KV_DELETE_RANGE", Weight: 0.07},
- {Type: "KV_TXN_WRITE_DELETE", Weight: 0.35},
- {Type: "LEASE", Weight: 0.0},
- },
- Checkers: []string{"KV_HASH", "LEASE_EXPIRE"},
- StressKeySize: 100,
- StressKeySizeLarge: 32769,
- StressKeySuffixRange: 250000,
- StressKeySuffixRangeTxn: 100,
- StressKeyTxnOps: 10,
- StressClients: 100,
- StressQPS: 2000,
- },
- }
-
- logger, err := zap.NewProduction()
- if err != nil {
- t.Fatal(err)
- }
- defer logger.Sync()
-
- cfg, err := read(logger, "../functional.yaml")
- if err != nil {
- t.Fatal(err)
- }
- cfg.lg = nil
-
- if !reflect.DeepEqual(exp, cfg) {
- t.Fatalf(`exp != cfg:
- expected %+v
- got %+v`, exp, cfg)
- }
-
- cfg.lg = logger
-
- cfg.updateCases()
- fs1 := cfg.listCases()
-
- cfg.shuffleCases()
- fs2 := cfg.listCases()
- if reflect.DeepEqual(fs1, fs2) {
- t.Fatalf("expected shuffled failure cases, got %q", fs2)
- }
-
- cfg.shuffleCases()
- fs3 := cfg.listCases()
- if reflect.DeepEqual(fs2, fs3) {
- t.Fatalf("expected reshuffled failure cases from %q, got %q", fs2, fs3)
- }
-
- // shuffle ensures visit all exactly once
- // so when sorted, failure cases must be equal
- sort.Strings(fs1)
- sort.Strings(fs2)
- sort.Strings(fs3)
-
- if !reflect.DeepEqual(fs1, fs2) {
- t.Fatalf("expected %q, got %q", fs1, fs2)
- }
- if !reflect.DeepEqual(fs2, fs3) {
- t.Fatalf("expected %q, got %q", fs2, fs3)
- }
-}
diff --git a/tests/functional/tester/doc.go b/tests/functional/tester/doc.go
deleted file mode 100644
index d1e23e94134..00000000000
--- a/tests/functional/tester/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tester implements functional-tester tester server.
-package tester
diff --git a/tests/functional/tester/metrics_report.go b/tests/functional/tester/metrics_report.go
deleted file mode 100644
index c82e58f5b64..00000000000
--- a/tests/functional/tester/metrics_report.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "sort"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- caseTotal = make(map[string]int)
-
- caseTotalCounter = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "funcational_tester",
- Name: "case_total",
- Help: "Total number of finished test cases",
- },
- []string{"desc"},
- )
-
- caseFailedTotalCounter = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "funcational_tester",
- Name: "case_failed_total",
- Help: "Total number of failed test cases",
- },
- []string{"desc"},
- )
-
- roundTotalCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "funcational_tester",
- Name: "round_total",
- Help: "Total number of finished test rounds.",
- })
-
- roundFailedTotalCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "funcational_tester",
- Name: "round_failed_total",
- Help: "Total number of failed test rounds.",
- })
-)
-
-func init() {
- prometheus.MustRegister(caseTotalCounter)
- prometheus.MustRegister(caseFailedTotalCounter)
- prometheus.MustRegister(roundTotalCounter)
- prometheus.MustRegister(roundFailedTotalCounter)
-}
-
-func printReport() {
- rows := make([]string, 0, len(caseTotal))
- for k, v := range caseTotal {
- rows = append(rows, fmt.Sprintf("%s: %d", k, v))
- }
- sort.Strings(rows)
-
- println()
- for _, row := range rows {
- fmt.Println(row)
- }
- println()
-}
diff --git a/tests/functional/tester/stresser.go b/tests/functional/tester/stresser.go
deleted file mode 100644
index f147c6cee20..00000000000
--- a/tests/functional/tester/stresser.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "time"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
-)
-
-// Stresser defines stressing client operations.
-type Stresser interface {
- // Stress starts to stress the etcd cluster
- Stress() error
- // Pause stops the stresser from sending requests to etcd. Resume by calling Stress.
- Pause() map[string]int
- // Close releases all of the Stresser's resources.
- Close() map[string]int
- // ModifiedKeys reports the number of keys created and deleted by stresser
- ModifiedKeys() int64
-}
-
-// newStresser creates stresser from a comma separated list of stresser types.
-func newStresser(clus *Cluster, m *rpcpb.Member) (stressers []Stresser) {
- // TODO: Too intensive stressing clients can panic etcd member with
- // 'out of memory' error. Put rate limits in server side.
- ks := &keyStresser{
- lg: clus.lg,
- m: m,
- keySize: int(clus.Tester.StressKeySize),
- keyLargeSize: int(clus.Tester.StressKeySizeLarge),
- keySuffixRange: int(clus.Tester.StressKeySuffixRange),
- keyTxnSuffixRange: int(clus.Tester.StressKeySuffixRangeTxn),
- keyTxnOps: int(clus.Tester.StressKeyTxnOps),
- clientsN: int(clus.Tester.StressClients),
- rateLimiter: clus.rateLimiter,
- }
- ksExist := false
-
- for _, s := range clus.Tester.Stressers {
- clus.lg.Info(
- "creating stresser",
- zap.String("type", s.Type),
- zap.Float64("weight", s.Weight),
- zap.String("endpoint", m.EtcdClientEndpoint),
- )
- switch s.Type {
- case "KV_WRITE_SMALL":
- ksExist = true
- ks.weightKVWriteSmall = s.Weight
- case "KV_WRITE_LARGE":
- ksExist = true
- ks.weightKVWriteLarge = s.Weight
- case "KV_READ_ONE_KEY":
- ksExist = true
- ks.weightKVReadOneKey = s.Weight
- case "KV_READ_RANGE":
- ksExist = true
- ks.weightKVReadRange = s.Weight
- case "KV_DELETE_ONE_KEY":
- ksExist = true
- ks.weightKVDeleteOneKey = s.Weight
- case "KV_DELETE_RANGE":
- ksExist = true
- ks.weightKVDeleteRange = s.Weight
- case "KV_TXN_WRITE_DELETE":
- ksExist = true
- ks.weightKVTxnWriteDelete = s.Weight
-
- case "LEASE":
- stressers = append(stressers, &leaseStresser{
- stype: rpcpb.StresserType_LEASE,
- lg: clus.lg,
- m: m,
- numLeases: 10, // TODO: configurable
- keysPerLease: 10, // TODO: configurable
- rateLimiter: clus.rateLimiter,
- })
-
- case "ELECTION_RUNNER":
- reqRate := 100
- args := []string{
- "election",
- fmt.Sprintf("%v", time.Now().UnixNano()), // election name as current nano time
- "--dial-timeout=10s",
- "--endpoints", m.EtcdClientEndpoint,
- "--total-client-connections=10",
- "--rounds=0", // runs forever
- "--req-rate", fmt.Sprintf("%v", reqRate),
- }
- stressers = append(stressers, newRunnerStresser(
- rpcpb.StresserType_ELECTION_RUNNER,
- m.EtcdClientEndpoint,
- clus.lg,
- clus.Tester.RunnerExecPath,
- args,
- clus.rateLimiter,
- reqRate,
- ))
-
- case "WATCH_RUNNER":
- reqRate := 100
- args := []string{
- "watcher",
- "--prefix", fmt.Sprintf("%v", time.Now().UnixNano()), // prefix all keys with nano time
- "--total-keys=1",
- "--total-prefixes=1",
- "--watch-per-prefix=1",
- "--endpoints", m.EtcdClientEndpoint,
- "--rounds=0", // runs forever
- "--req-rate", fmt.Sprintf("%v", reqRate),
- }
- stressers = append(stressers, newRunnerStresser(
- rpcpb.StresserType_WATCH_RUNNER,
- m.EtcdClientEndpoint,
- clus.lg,
- clus.Tester.RunnerExecPath,
- args,
- clus.rateLimiter,
- reqRate,
- ))
-
- case "LOCK_RACER_RUNNER":
- reqRate := 100
- args := []string{
- "lock-racer",
- fmt.Sprintf("%v", time.Now().UnixNano()), // locker name as current nano time
- "--endpoints", m.EtcdClientEndpoint,
- "--total-client-connections=10",
- "--rounds=0", // runs forever
- "--req-rate", fmt.Sprintf("%v", reqRate),
- }
- stressers = append(stressers, newRunnerStresser(
- rpcpb.StresserType_LOCK_RACER_RUNNER,
- m.EtcdClientEndpoint,
- clus.lg,
- clus.Tester.RunnerExecPath,
- args,
- clus.rateLimiter,
- reqRate,
- ))
-
- case "LEASE_RUNNER":
- args := []string{
- "lease-renewer",
- "--ttl=30",
- "--endpoints", m.EtcdClientEndpoint,
- }
- stressers = append(stressers, newRunnerStresser(
- rpcpb.StresserType_LEASE_RUNNER,
- m.EtcdClientEndpoint,
- clus.lg,
- clus.Tester.RunnerExecPath,
- args,
- clus.rateLimiter,
- 0,
- ))
- }
- }
-
- if ksExist {
- return append(stressers, ks)
- }
- return stressers
-}
diff --git a/tests/functional/tester/stresser_composite.go b/tests/functional/tester/stresser_composite.go
deleted file mode 100644
index 09dcb55ff63..00000000000
--- a/tests/functional/tester/stresser_composite.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import "sync"
-
-// compositeStresser implements a Stresser that runs a slice of
-// stressing clients concurrently.
-type compositeStresser struct {
- stressers []Stresser
-}
-
-func (cs *compositeStresser) Stress() error {
- for i, s := range cs.stressers {
- if err := s.Stress(); err != nil {
- for j := 0; j < i; j++ {
- cs.stressers[j].Close()
- }
- return err
- }
- }
- return nil
-}
-
-func (cs *compositeStresser) Pause() (ems map[string]int) {
- var emu sync.Mutex
- ems = make(map[string]int)
- var wg sync.WaitGroup
- wg.Add(len(cs.stressers))
- for i := range cs.stressers {
- go func(s Stresser) {
- defer wg.Done()
- errs := s.Pause()
- for k, v := range errs {
- emu.Lock()
- ems[k] += v
- emu.Unlock()
- }
- }(cs.stressers[i])
- }
- wg.Wait()
- return ems
-}
-
-func (cs *compositeStresser) Close() (ems map[string]int) {
- var emu sync.Mutex
- ems = make(map[string]int)
- var wg sync.WaitGroup
- wg.Add(len(cs.stressers))
- for i := range cs.stressers {
- go func(s Stresser) {
- defer wg.Done()
- errs := s.Close()
- for k, v := range errs {
- emu.Lock()
- ems[k] += v
- emu.Unlock()
- }
- }(cs.stressers[i])
- }
- wg.Wait()
- return ems
-}
-
-func (cs *compositeStresser) ModifiedKeys() (modifiedKey int64) {
- for _, stress := range cs.stressers {
- modifiedKey += stress.ModifiedKeys()
- }
- return modifiedKey
-}
diff --git a/tests/functional/tester/stresser_key.go b/tests/functional/tester/stresser_key.go
deleted file mode 100644
index 007a9535e7a..00000000000
--- a/tests/functional/tester/stresser_key.go
+++ /dev/null
@@ -1,361 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "context"
- "fmt"
- "math/rand"
- "reflect"
- "sync"
- "sync/atomic"
- "time"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
- "golang.org/x/time/rate"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-type keyStresser struct {
- lg *zap.Logger
-
- m *rpcpb.Member
-
- weightKVWriteSmall float64
- weightKVWriteLarge float64
- weightKVReadOneKey float64
- weightKVReadRange float64
- weightKVDeleteOneKey float64
- weightKVDeleteRange float64
- weightKVTxnWriteDelete float64
-
- keySize int
- keyLargeSize int
- keySuffixRange int
- keyTxnSuffixRange int
- keyTxnOps int
-
- rateLimiter *rate.Limiter
-
- wg sync.WaitGroup
- clientsN int
-
- ctx context.Context
- cancel func()
- cli *clientv3.Client
-
- emu sync.RWMutex
- ems map[string]int
- paused bool
-
- // atomicModifiedKeys records the number of keys created and deleted by the stresser.
- atomicModifiedKeys int64
-
- stressTable *stressTable
-}
-
-func (s *keyStresser) Stress() error {
- var err error
- s.cli, err = s.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(1 * time.Second))
- if err != nil {
- return fmt.Errorf("%v (%q)", err, s.m.EtcdClientEndpoint)
- }
- s.ctx, s.cancel = context.WithCancel(context.Background())
-
- s.wg.Add(s.clientsN)
-
- s.stressTable = createStressTable([]stressEntry{
- {weight: s.weightKVWriteSmall, f: newStressPut(s.cli, s.keySuffixRange, s.keySize)},
- {weight: s.weightKVWriteLarge, f: newStressPut(s.cli, s.keySuffixRange, s.keyLargeSize)},
- {weight: s.weightKVReadOneKey, f: newStressRange(s.cli, s.keySuffixRange)},
- {weight: s.weightKVReadRange, f: newStressRangeInterval(s.cli, s.keySuffixRange)},
- {weight: s.weightKVDeleteOneKey, f: newStressDelete(s.cli, s.keySuffixRange)},
- {weight: s.weightKVDeleteRange, f: newStressDeleteInterval(s.cli, s.keySuffixRange)},
- {weight: s.weightKVTxnWriteDelete, f: newStressTxn(s.cli, s.keyTxnSuffixRange, s.keyTxnOps)},
- })
-
- s.emu.Lock()
- s.paused = false
- s.ems = make(map[string]int, 100)
- s.emu.Unlock()
- for i := 0; i < s.clientsN; i++ {
- go s.run()
- }
-
- s.lg.Info(
- "stress START",
- zap.String("stress-type", "KV"),
- zap.String("endpoint", s.m.EtcdClientEndpoint),
- )
- return nil
-}
-
-func (s *keyStresser) run() {
- defer s.wg.Done()
-
- for {
- if err := s.rateLimiter.Wait(s.ctx); err == context.Canceled {
- return
- }
-
- // TODO: 10-second is enough timeout to cover leader failure
- // and immediate leader election. Find out what other cases this
- // could be timed out.
- sctx, scancel := context.WithTimeout(s.ctx, 10*time.Second)
- modifiedKeys, err := s.stressTable.choose()(sctx)
- scancel()
- if err == nil {
- atomic.AddInt64(&s.atomicModifiedKeys, modifiedKeys)
- continue
- }
-
- if !s.isRetryableError(err) {
- return
- }
-
- // only record errors before pausing stressers
- s.emu.Lock()
- if !s.paused {
- s.ems[err.Error()]++
- }
- s.emu.Unlock()
- }
-}
-
-func (s *keyStresser) isRetryableError(err error) bool {
- switch rpctypes.ErrorDesc(err) {
- // retryable
- case context.DeadlineExceeded.Error():
- // This retries when request is triggered at the same time as
- // leader failure. When we terminate the leader, the request to
- // that leader cannot be processed, and times out. Also requests
- // to followers cannot be forwarded to the old leader, so timing out
- // as well. We want to keep stressing until the cluster elects a
- // new leader and start processing requests again.
- return true
- case etcdserver.ErrTimeoutDueToLeaderFail.Error(), etcdserver.ErrTimeout.Error():
- // This retries when request is triggered at the same time as
- // leader failure and follower nodes receive time out errors
- // from losing their leader. Followers should retry to connect
- // to the new leader.
- return true
- case etcdserver.ErrStopped.Error():
- // one of the etcd nodes stopped from failure injection
- return true
- case rpctypes.ErrNotCapable.Error():
- // capability check has not been done (in the beginning)
- return true
- case rpctypes.ErrTooManyRequests.Error():
- // hitting the recovering member.
- return true
- case raft.ErrProposalDropped.Error():
- // removed member, or leadership has changed (old leader got raftpb.MsgProp)
- return true
-
- // not retryable.
- case context.Canceled.Error():
- // from stresser.Cancel method:
- return false
- }
-
- if status.Convert(err).Code() == codes.Unavailable {
- // gRPC connection errors are translated to status.Unavailable
- return true
- }
-
- s.lg.Warn(
- "stress run exiting",
- zap.String("stress-type", "KV"),
- zap.String("endpoint", s.m.EtcdClientEndpoint),
- zap.String("error-type", reflect.TypeOf(err).String()),
- zap.String("error-desc", rpctypes.ErrorDesc(err)),
- zap.Error(err),
- )
- return false
-}
-
-func (s *keyStresser) Pause() map[string]int {
- return s.Close()
-}
-
-func (s *keyStresser) Close() map[string]int {
- s.cancel()
- s.cli.Close()
- s.wg.Wait()
-
- s.emu.Lock()
- s.paused = true
- ess := s.ems
- s.ems = make(map[string]int, 100)
- s.emu.Unlock()
-
- s.lg.Info(
- "stress STOP",
- zap.String("stress-type", "KV"),
- zap.String("endpoint", s.m.EtcdClientEndpoint),
- )
- return ess
-}
-
-func (s *keyStresser) ModifiedKeys() int64 {
- return atomic.LoadInt64(&s.atomicModifiedKeys)
-}
-
-type stressFunc func(ctx context.Context) (modifiedKeys int64, err error)
-
-type stressEntry struct {
- weight float64
- f stressFunc
-}
-
-type stressTable struct {
- entries []stressEntry
- sumWeights float64
-}
-
-func createStressTable(entries []stressEntry) *stressTable {
- st := stressTable{entries: entries}
- for _, entry := range st.entries {
- st.sumWeights += entry.weight
- }
- return &st
-}
-
-func (st *stressTable) choose() stressFunc {
- v := rand.Float64() * st.sumWeights
- var sum float64
- var idx int
- for i := range st.entries {
- sum += st.entries[i].weight
- if sum >= v {
- idx = i
- break
- }
- }
- return st.entries[idx].f
-}
-
-func newStressPut(cli *clientv3.Client, keySuffixRange, keySize int) stressFunc {
- return func(ctx context.Context) (int64, error) {
- _, err := cli.Put(
- ctx,
- fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)),
- string(randBytes(keySize)),
- )
- return 1, err
- }
-}
-
-func newStressTxn(cli *clientv3.Client, keyTxnSuffixRange, txnOps int) stressFunc {
- keys := make([]string, keyTxnSuffixRange)
- for i := range keys {
- keys[i] = fmt.Sprintf("/k%03d", i)
- }
- return writeTxn(cli, keys, txnOps)
-}
-
-func writeTxn(cli *clientv3.Client, keys []string, txnOps int) stressFunc {
- return func(ctx context.Context) (int64, error) {
- ks := make(map[string]struct{}, txnOps)
- for len(ks) != txnOps {
- ks[keys[rand.Intn(len(keys))]] = struct{}{}
- }
- selected := make([]string, 0, txnOps)
- for k := range ks {
- selected = append(selected, k)
- }
- com, delOp, putOp := getTxnOps(selected[0], "bar00")
- thenOps := []clientv3.Op{delOp}
- elseOps := []clientv3.Op{putOp}
- for i := 1; i < txnOps; i++ { // nested txns
- k, v := selected[i], fmt.Sprintf("bar%02d", i)
- com, delOp, putOp = getTxnOps(k, v)
- txnOp := clientv3.OpTxn(
- []clientv3.Cmp{com},
- []clientv3.Op{delOp},
- []clientv3.Op{putOp},
- )
- thenOps = append(thenOps, txnOp)
- elseOps = append(elseOps, txnOp)
- }
- _, err := cli.Txn(ctx).
- If(com).
- Then(thenOps...).
- Else(elseOps...).
- Commit()
- return int64(txnOps), err
- }
-}
-
-func getTxnOps(k, v string) (
- cmp clientv3.Cmp,
- dop clientv3.Op,
- pop clientv3.Op) {
- // if key exists (version > 0)
- cmp = clientv3.Compare(clientv3.Version(k), ">", 0)
- dop = clientv3.OpDelete(k)
- pop = clientv3.OpPut(k, v)
- return cmp, dop, pop
-}
-
-func newStressRange(cli *clientv3.Client, keySuffixRange int) stressFunc {
- return func(ctx context.Context) (int64, error) {
- _, err := cli.Get(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)))
- return 0, err
- }
-}
-
-func newStressRangeInterval(cli *clientv3.Client, keySuffixRange int) stressFunc {
- return func(ctx context.Context) (int64, error) {
- start := rand.Intn(keySuffixRange)
- end := start + 500
- _, err := cli.Get(
- ctx,
- fmt.Sprintf("foo%016x", start),
- clientv3.WithRange(fmt.Sprintf("foo%016x", end)),
- )
- return 0, err
- }
-}
-
-func newStressDelete(cli *clientv3.Client, keySuffixRange int) stressFunc {
- return func(ctx context.Context) (int64, error) {
- _, err := cli.Delete(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)))
- return 1, err
- }
-}
-
-func newStressDeleteInterval(cli *clientv3.Client, keySuffixRange int) stressFunc {
- return func(ctx context.Context) (int64, error) {
- start := rand.Intn(keySuffixRange)
- end := start + 500
- resp, err := cli.Delete(ctx,
- fmt.Sprintf("foo%016x", start),
- clientv3.WithRange(fmt.Sprintf("foo%016x", end)),
- )
- if err == nil {
- return resp.Deleted, nil
- }
- return 0, err
- }
-}
diff --git a/tests/functional/tester/stresser_lease.go b/tests/functional/tester/stresser_lease.go
deleted file mode 100644
index a0f7cf64fb8..00000000000
--- a/tests/functional/tester/stresser_lease.go
+++ /dev/null
@@ -1,521 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "context"
- "fmt"
- "math/rand"
- "sync"
- "sync/atomic"
- "time"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
- "golang.org/x/time/rate"
- "google.golang.org/grpc"
-)
-
-const (
- // time to live for lease
- defaultTTL = 120
- defaultTTLShort = 2
-)
-
-type leaseStresser struct {
- stype rpcpb.StresserType
- lg *zap.Logger
-
- m *rpcpb.Member
- cli *clientv3.Client
- ctx context.Context
- cancel func()
-
- rateLimiter *rate.Limiter
- // atomicModifiedKey records the number of keys created and deleted during a test case
- atomicModifiedKey int64
- numLeases int
- keysPerLease int
- aliveLeases *atomicLeases
- alivedLeasesWithShortTTL *atomicLeases
- revokedLeases *atomicLeases
- shortLivedLeases *atomicLeases
-
- runWg sync.WaitGroup
- aliveWg sync.WaitGroup
-}
-
-type atomicLeases struct {
- // rwLock is used to protect read/write access of leases map
- // which are accessed and modified by different goroutines.
- rwLock sync.RWMutex
- leases map[int64]time.Time
-}
-
-func (al *atomicLeases) add(leaseID int64, t time.Time) {
- al.rwLock.Lock()
- al.leases[leaseID] = t
- al.rwLock.Unlock()
-}
-
-func (al *atomicLeases) update(leaseID int64, t time.Time) {
- al.rwLock.Lock()
- _, ok := al.leases[leaseID]
- if ok {
- al.leases[leaseID] = t
- }
- al.rwLock.Unlock()
-}
-
-func (al *atomicLeases) read(leaseID int64) (rv time.Time, ok bool) {
- al.rwLock.RLock()
- rv, ok = al.leases[leaseID]
- al.rwLock.RUnlock()
- return rv, ok
-}
-
-func (al *atomicLeases) remove(leaseID int64) {
- al.rwLock.Lock()
- delete(al.leases, leaseID)
- al.rwLock.Unlock()
-}
-
-func (al *atomicLeases) getLeasesMap() map[int64]time.Time {
- leasesCopy := make(map[int64]time.Time)
- al.rwLock.RLock()
- for k, v := range al.leases {
- leasesCopy[k] = v
- }
- al.rwLock.RUnlock()
- return leasesCopy
-}
-
-func (ls *leaseStresser) setupOnce() error {
- if ls.aliveLeases != nil {
- return nil
- }
- if ls.numLeases == 0 {
- panic("expect numLeases to be set")
- }
- if ls.keysPerLease == 0 {
- panic("expect keysPerLease to be set")
- }
-
- ls.aliveLeases = &atomicLeases{leases: make(map[int64]time.Time)}
- return nil
-}
-
-func (ls *leaseStresser) Stress() error {
- ls.lg.Info(
- "stress START",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- )
-
- if err := ls.setupOnce(); err != nil {
- return err
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- ls.ctx = ctx
- ls.cancel = cancel
-
- cli, err := ls.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(1 * time.Second))
- if err != nil {
- return fmt.Errorf("%v (%s)", err, ls.m.EtcdClientEndpoint)
- }
- ls.cli = cli
-
- ls.revokedLeases = &atomicLeases{leases: make(map[int64]time.Time)}
- ls.shortLivedLeases = &atomicLeases{leases: make(map[int64]time.Time)}
- ls.alivedLeasesWithShortTTL = &atomicLeases{leases: make(map[int64]time.Time)}
-
- ls.runWg.Add(1)
- go ls.run()
- return nil
-}
-
-func (ls *leaseStresser) run() {
- defer ls.runWg.Done()
- ls.restartKeepAlives()
- for {
- // the number of keys created and deleted is roughly 2x the number of created keys for an iteration.
- // the rateLimiter therefore consumes 2x ls.numLeases*ls.keysPerLease tokens where each token represents a create/delete operation for key.
- err := ls.rateLimiter.WaitN(ls.ctx, 2*ls.numLeases*ls.keysPerLease)
- if err == context.Canceled {
- return
- }
-
- ls.lg.Debug(
- "stress creating leases",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- )
- ls.createLeases()
- ls.lg.Debug(
- "stress created leases",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- )
-
- ls.lg.Debug(
- "stress dropped leases",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- )
- ls.randomlyDropLeases()
- ls.lg.Debug(
- "stress dropped leases",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- )
- }
-}
-
-func (ls *leaseStresser) restartKeepAlives() {
- for leaseID := range ls.aliveLeases.getLeasesMap() {
- ls.aliveWg.Add(1)
- go func(id int64) {
- ls.keepLeaseAlive(id)
- }(leaseID)
- }
- for leaseID := range ls.alivedLeasesWithShortTTL.getLeasesMap() {
- ls.aliveWg.Add(1)
- go func(id int64) {
- ls.keepLeaseAlive(id)
- }(leaseID)
- }
-}
-
-func (ls *leaseStresser) createLeases() {
- ls.createAliveLeasesWithShortTTL()
- ls.createAliveLeases()
- ls.createShortLivedLeases()
-}
-
-func (ls *leaseStresser) createAliveLeases() {
- neededLeases := ls.numLeases - len(ls.aliveLeases.getLeasesMap())
- var wg sync.WaitGroup
- for i := 0; i < neededLeases; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- leaseID, err := ls.createLeaseWithKeys(defaultTTL)
- if err != nil {
- ls.lg.Debug(
- "createLeaseWithKeys failed",
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.Error(err),
- )
- return
- }
- ls.aliveLeases.add(leaseID, time.Now())
- // keep track of all the keep lease alive goroutines
- ls.aliveWg.Add(1)
- go ls.keepLeaseAlive(leaseID)
- }()
- }
- wg.Wait()
-}
-
-func (ls *leaseStresser) createAliveLeasesWithShortTTL() {
- neededLeases := 2
- var wg sync.WaitGroup
- for i := 0; i < neededLeases; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- leaseID, err := ls.createLeaseWithKeys(defaultTTLShort)
- if err != nil {
- ls.lg.Debug(
- "createLeaseWithKeys failed",
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.Error(err),
- )
- return
- }
- ls.lg.Warn("createAliveLeasesWithShortTTL", zap.Int64("lease-id", leaseID))
- ls.alivedLeasesWithShortTTL.add(leaseID, time.Now())
- // keep track of all the keep lease alive goroutines
- ls.aliveWg.Add(1)
- go ls.keepLeaseAlive(leaseID)
- }()
- }
- wg.Wait()
-}
-
-func (ls *leaseStresser) createShortLivedLeases() {
- // one round of createLeases() might not create all the short lived leases we want due to failures.
- // thus, we want to create remaining short lived leases in the future round.
- neededLeases := ls.numLeases - len(ls.shortLivedLeases.getLeasesMap())
- var wg sync.WaitGroup
- for i := 0; i < neededLeases; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- leaseID, err := ls.createLeaseWithKeys(defaultTTLShort)
- if err != nil {
- return
- }
- ls.shortLivedLeases.add(leaseID, time.Now())
- }()
- }
- wg.Wait()
-}
-
-func (ls *leaseStresser) createLeaseWithKeys(ttl int64) (int64, error) {
- leaseID, err := ls.createLease(ttl)
- if err != nil {
- ls.lg.Debug(
- "createLease failed",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.Error(err),
- )
- return -1, err
- }
-
- ls.lg.Debug(
- "createLease created lease",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- )
- if err := ls.attachKeysWithLease(leaseID); err != nil {
- return -1, err
- }
- return leaseID, nil
-}
-
-func (ls *leaseStresser) randomlyDropLeases() {
- var wg sync.WaitGroup
- for l := range ls.aliveLeases.getLeasesMap() {
- wg.Add(1)
- go func(leaseID int64) {
- defer wg.Done()
- dropped, err := ls.randomlyDropLease(leaseID)
- // if randomlyDropLease encountered an error such as context is cancelled, remove the lease from aliveLeases
- // because we can't tell whether the lease is dropped or not.
- if err != nil {
- ls.lg.Debug(
- "randomlyDropLease failed",
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(err),
- )
- ls.aliveLeases.remove(leaseID)
- return
- }
- if !dropped {
- return
- }
- ls.lg.Debug(
- "randomlyDropLease dropped a lease",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- )
- ls.revokedLeases.add(leaseID, time.Now())
- ls.aliveLeases.remove(leaseID)
- }(l)
- }
- wg.Wait()
-}
-
-func (ls *leaseStresser) createLease(ttl int64) (int64, error) {
- resp, err := ls.cli.Grant(ls.ctx, ttl)
- if err != nil {
- return -1, err
- }
- return int64(resp.ID), nil
-}
-
-func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
- defer ls.aliveWg.Done()
- ctx, cancel := context.WithCancel(ls.ctx)
- stream, err := ls.cli.KeepAlive(ctx, clientv3.LeaseID(leaseID))
- defer func() { cancel() }()
- for {
- select {
- case <-time.After(500 * time.Millisecond):
- case <-ls.ctx.Done():
- ls.lg.Debug(
- "keepLeaseAlive context canceled",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(ls.ctx.Err()),
- )
- // it is possible that lease expires at invariant checking phase but not at keepLeaseAlive() phase.
- // this scenario is possible when alive lease is just about to expire when keepLeaseAlive() exists and expires at invariant checking phase.
- // to circumvent that scenario, we check each lease before keepalive loop exist to see if it has been renewed in last TTL/2 duration.
- // if it is renewed, this means that invariant checking have at least ttl/2 time before lease expires which is long enough for the checking to finish.
- // if it is not renewed, we remove the lease from the alive map so that the lease doesn't expire during invariant checking
- renewTime, ok := ls.aliveLeases.read(leaseID)
- if ok && renewTime.Add(defaultTTL/2*time.Second).Before(time.Now()) {
- ls.aliveLeases.remove(leaseID)
- ls.lg.Debug(
- "keepLeaseAlive lease has not been renewed, dropped it",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- )
- }
- return
- }
-
- if err != nil {
- ls.lg.Debug(
- "keepLeaseAlive lease creates stream error",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(err),
- )
- cancel()
- ctx, cancel = context.WithCancel(ls.ctx)
- stream, err = ls.cli.KeepAlive(ctx, clientv3.LeaseID(leaseID))
- cancel()
- continue
- }
- if err != nil {
- ls.lg.Debug(
- "keepLeaseAlive failed to receive lease keepalive response",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(err),
- )
- continue
- }
-
- ls.lg.Debug(
- "keepLeaseAlive waiting on lease stream",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- )
- leaseRenewTime := time.Now()
- respRC := <-stream
- if respRC == nil {
- ls.lg.Debug(
- "keepLeaseAlive received nil lease keepalive response",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- )
- continue
- }
-
- // lease expires after TTL become 0
- // don't send keepalive if the lease has expired
- if respRC.TTL <= 0 {
- ls.lg.Debug(
- "keepLeaseAlive stream received lease keepalive response TTL <= 0",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Int64("ttl", respRC.TTL),
- )
- ls.aliveLeases.remove(leaseID)
- return
- }
- // renew lease timestamp only if lease is present
- ls.lg.Debug(
- "keepLeaseAlive renewed a lease",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- )
- ls.aliveLeases.update(leaseID, leaseRenewTime)
- }
-}
-
-// attachKeysWithLease function attaches keys to the lease.
-// the format of key is the concat of leaseID + '_' + ''
-// e.g 5186835655248304152_0 for first created key and 5186835655248304152_1 for second created key
-func (ls *leaseStresser) attachKeysWithLease(leaseID int64) error {
- var txnPuts []clientv3.Op
- for j := 0; j < ls.keysPerLease; j++ {
- txnput := clientv3.OpPut(
- fmt.Sprintf("%d%s%d", leaseID, "_", j),
- fmt.Sprintf("bar"),
- clientv3.WithLease(clientv3.LeaseID(leaseID)),
- )
- txnPuts = append(txnPuts, txnput)
- }
- // keep retrying until lease is not found or ctx is being canceled
- for ls.ctx.Err() == nil {
- _, err := ls.cli.Txn(ls.ctx).Then(txnPuts...).Commit()
- if err == nil {
- // since all created keys will be deleted too, the number of operations on keys will be roughly 2x the number of created keys
- atomic.AddInt64(&ls.atomicModifiedKey, 2*int64(ls.keysPerLease))
- return nil
- }
- if rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
- return err
- }
- }
- return ls.ctx.Err()
-}
-
-// randomlyDropLease drops the lease only when the rand.Int(2) returns 1.
-// This creates a 50/50 percents chance of dropping a lease
-func (ls *leaseStresser) randomlyDropLease(leaseID int64) (bool, error) {
- if rand.Intn(2) != 0 {
- return false, nil
- }
-
- // keep retrying until a lease is dropped or ctx is being canceled
- for ls.ctx.Err() == nil {
- _, err := ls.cli.Revoke(ls.ctx, clientv3.LeaseID(leaseID))
- if err == nil || rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
- return true, nil
- }
- }
-
- ls.lg.Debug(
- "randomlyDropLease error",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
- zap.Error(ls.ctx.Err()),
- )
- return false, ls.ctx.Err()
-}
-
-func (ls *leaseStresser) Pause() map[string]int {
- return ls.Close()
-}
-
-func (ls *leaseStresser) Close() map[string]int {
- ls.cancel()
- ls.runWg.Wait()
- ls.aliveWg.Wait()
- ls.cli.Close()
- ls.lg.Info(
- "stress STOP",
- zap.String("stress-type", ls.stype.String()),
- zap.String("endpoint", ls.m.EtcdClientEndpoint),
- )
- return nil
-}
-
-func (ls *leaseStresser) ModifiedKeys() int64 {
- return atomic.LoadInt64(&ls.atomicModifiedKey)
-}
diff --git a/tests/functional/tester/stresser_runner.go b/tests/functional/tester/stresser_runner.go
deleted file mode 100644
index ccfdab4df5b..00000000000
--- a/tests/functional/tester/stresser_runner.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "io/ioutil"
- "os/exec"
- "syscall"
-
- "go.etcd.io/etcd/tests/v3/functional/rpcpb"
-
- "go.uber.org/zap"
- "golang.org/x/time/rate"
-)
-
-type runnerStresser struct {
- stype rpcpb.StresserType
- etcdClientEndpoint string
- lg *zap.Logger
-
- cmd *exec.Cmd
- cmdStr string
- args []string
- rl *rate.Limiter
- reqRate int
-
- errc chan error
- donec chan struct{}
-}
-
-func newRunnerStresser(
- stype rpcpb.StresserType,
- ep string,
- lg *zap.Logger,
- cmdStr string,
- args []string,
- rl *rate.Limiter,
- reqRate int,
-) *runnerStresser {
- rl.SetLimit(rl.Limit() - rate.Limit(reqRate))
- return &runnerStresser{
- stype: stype,
- etcdClientEndpoint: ep,
- lg: lg,
- cmdStr: cmdStr,
- args: args,
- rl: rl,
- reqRate: reqRate,
- errc: make(chan error, 1),
- donec: make(chan struct{}),
- }
-}
-
-func (rs *runnerStresser) setupOnce() (err error) {
- if rs.cmd != nil {
- return nil
- }
-
- rs.cmd = exec.Command(rs.cmdStr, rs.args...)
- stderr, err := rs.cmd.StderrPipe()
- if err != nil {
- return err
- }
-
- go func() {
- defer close(rs.donec)
- out, err := ioutil.ReadAll(stderr)
- if err != nil {
- rs.errc <- err
- } else {
- rs.errc <- fmt.Errorf("(%v %v) stderr %v", rs.cmdStr, rs.args, string(out))
- }
- }()
-
- return rs.cmd.Start()
-}
-
-func (rs *runnerStresser) Stress() (err error) {
- rs.lg.Info(
- "stress START",
- zap.String("stress-type", rs.stype.String()),
- )
- if err = rs.setupOnce(); err != nil {
- return err
- }
- return syscall.Kill(rs.cmd.Process.Pid, syscall.SIGCONT)
-}
-
-func (rs *runnerStresser) Pause() map[string]int {
- rs.lg.Info(
- "stress STOP",
- zap.String("stress-type", rs.stype.String()),
- )
- syscall.Kill(rs.cmd.Process.Pid, syscall.SIGSTOP)
- return nil
-}
-
-func (rs *runnerStresser) Close() map[string]int {
- syscall.Kill(rs.cmd.Process.Pid, syscall.SIGINT)
- rs.cmd.Wait()
- <-rs.donec
- rs.rl.SetLimit(rs.rl.Limit() + rate.Limit(rs.reqRate))
- return nil
-}
-
-func (rs *runnerStresser) ModifiedKeys() int64 {
- return 1
-}
diff --git a/tests/functional/tester/utils.go b/tests/functional/tester/utils.go
deleted file mode 100644
index 74e34146d53..00000000000
--- a/tests/functional/tester/utils.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
- "fmt"
- "math/rand"
- "net"
- "net/url"
- "strings"
-)
-
-func isValidURL(u string) bool {
- _, err := url.Parse(u)
- return err == nil
-}
-
-func getPort(addr string) (port string, err error) {
- urlAddr, err := url.Parse(addr)
- if err != nil {
- return "", err
- }
- _, port, err = net.SplitHostPort(urlAddr.Host)
- if err != nil {
- return "", err
- }
- return port, nil
-}
-
-func getSameValue(vals map[string]int64) bool {
- var rv int64
- for _, v := range vals {
- if rv == 0 {
- rv = v
- }
- if rv != v {
- return false
- }
- }
- return true
-}
-
-func max(n1, n2 int64) int64 {
- if n1 > n2 {
- return n1
- }
- return n2
-}
-
-func errsToError(errs []error) error {
- if len(errs) == 0 {
- return nil
- }
- stringArr := make([]string, len(errs))
- for i, err := range errs {
- stringArr[i] = err.Error()
- }
- return fmt.Errorf(strings.Join(stringArr, ", "))
-}
-
-func randBytes(size int) []byte {
- data := make([]byte, size)
- for i := 0; i < size; i++ {
- data[i] = byte(int('a') + rand.Intn(26))
- }
- return data
-}
diff --git a/tests/go.mod b/tests/go.mod
index e5cc2afc581..20c95ca4dfa 100644
--- a/tests/go.mod
+++ b/tests/go.mod
@@ -1,45 +1,105 @@
module go.etcd.io/etcd/tests/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
replace (
go.etcd.io/etcd/api/v3 => ../api
go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
- go.etcd.io/etcd/client/v2 => ../client/v2
+ go.etcd.io/etcd/client/v2 => ./../client/internal/v2
go.etcd.io/etcd/client/v3 => ../client/v3
go.etcd.io/etcd/etcdctl/v3 => ../etcdctl
go.etcd.io/etcd/etcdutl/v3 => ../etcdutl
go.etcd.io/etcd/pkg/v3 => ../pkg
- go.etcd.io/etcd/raft/v3 => ../raft
go.etcd.io/etcd/server/v3 => ../server
)
require (
- github.com/dustin/go-humanize v1.0.0
- github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca
- github.com/gogo/protobuf v1.3.2
- github.com/golang/protobuf v1.5.2
+ github.com/anishathalye/porcupine v0.1.4
+ github.com/coreos/go-semver v0.3.1
+ github.com/golang/protobuf v1.5.4
+ github.com/google/go-cmp v0.6.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/grpc-ecosystem/grpc-gateway v1.16.0
- github.com/prometheus/client_golang v1.5.1
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1
+ github.com/prometheus/client_golang v1.20.5
+ github.com/prometheus/common v0.61.0
github.com/soheilhy/cmux v0.1.5
- github.com/spf13/cobra v1.1.3
- github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.0
- go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0
- go.etcd.io/etcd/api/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/client/v2 v2.305.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/etcdutl/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0
- go.etcd.io/etcd/server/v3 v3.5.0-alpha.0
- go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19
- golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
- golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
- golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
- google.golang.org/grpc v1.37.0
- gopkg.in/yaml.v2 v2.4.0
+ github.com/stretchr/testify v1.10.0
+ go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/client/v2 v2.306.0-alpha.0
+ go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
+ go.etcd.io/etcd/server/v3 v3.6.0-alpha.0
+ go.etcd.io/gofail v0.2.0
+ go.etcd.io/raft/v3 v3.6.0-beta.0
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0
+ go.opentelemetry.io/otel v1.33.0
+ go.opentelemetry.io/otel/sdk v1.33.0
+ go.opentelemetry.io/otel/trace v1.33.0
+ go.opentelemetry.io/proto/otlp v1.4.0
+ go.uber.org/zap v1.27.0
+ golang.org/x/crypto v0.31.0
+ golang.org/x/sync v0.10.0
+ golang.org/x/time v0.8.0
+ google.golang.org/grpc v1.69.2
+ google.golang.org/protobuf v1.36.1
+)
+
+require (
+ github.com/VividCortex/ewma v1.2.0 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/bgentry/speakeasy v0.2.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/cheggaaa/pb/v3 v3.1.5 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/creack/pty v1.1.18 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/fatih/color v1.18.0 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jonboulle/clockwork v0.4.0 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/olekukonko/tablewriter v0.0.5 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/spf13/cobra v1.8.1 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
+ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
+ go.etcd.io/bbolt v1.4.0-beta.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
+ go.opentelemetry.io/otel/metric v1.33.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
+ sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/tests/go.sum b/tests/go.sum
index 25a535a4cd7..1e7e5ddad27 100644
--- a/tests/go.sum
+++ b/tests/go.sum
@@ -1,529 +1,272 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
+github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
+github.com/anishathalye/porcupine v0.1.4 h1:rRekB2jH1mbtLPEzuqyMHp4scU52Bcc1jgkPi1kWFQA=
+github.com/anishathalye/porcupine v0.1.4/go.mod h1:/X9OQYnVb7DzfKCQVO4tI1Aq+o56UJW+RvN/5U4EuZA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E=
+github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cheggaaa/pb/v3 v3.1.5 h1:QuuUzeM2WsAqG2gMqtzaWithDJv0i+i6UlnwSCI4QLk=
+github.com/cheggaaa/pb/v3 v3.1.5/go.mod h1:CrxkeghYTXi1lQBEI7jSn+3svI3cuc19haAj6jM60XI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
+github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca h1:Y2I0lxOttdUKz+hNaIdG3FtjuQrTmwXun1opRV65IZc=
-github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
+github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
+github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0 h1:FPuyGXkE6qPKJ71PyS0sdXuxUvYGXAXxV0XHpx0qjHE=
-go.etcd.io/bbolt v1.3.6-0.20210426205525-9c92be978ae0/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM=
+go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec=
+go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA=
+go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o=
+go.etcd.io/raft/v3 v3.6.0-beta.0 h1:MZFQVjCQxPJj5K9oS69Y+atNvYnGNyOQBnroTdw56jQ=
+go.etcd.io/raft/v3 v3.6.0-beta.0/go.mod h1:C2JoekRXfvImSrk5GnqD0aZ3a+cGVRnyem9qqn2DCEw=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
+go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19 h1:040c3dLNhgFQkoojH2AMpHCy4SrvhmxdU72d9GLGGE0=
-go.uber.org/zap v1.16.1-0.20210329175301-c23abee72d19/go.mod h1:aMfIlz3TDBfB0BwTCKFU1XbEmj9zevr5S5LcBr85MXw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/tests/integration/client/client_test.go b/tests/integration/client/client_test.go
deleted file mode 100644
index 630344cb877..00000000000
--- a/tests/integration/client/client_test.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package integration
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/http/httptest"
- "os"
- "strings"
- "sync/atomic"
- "testing"
-
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/tests/v3/integration"
-)
-
-// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
-func TestV2NoRetryEOF(t *testing.T) {
- integration.BeforeTest(t)
- // generate an EOF response; specify address so appears first in sorted ep list
- lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
- defer lEOF.Close()
- tries := uint32(0)
- go func() {
- for {
- conn, err := lEOF.Accept()
- if err != nil {
- return
- }
- atomic.AddUint32(&tries, 1)
- conn.Close()
- }
- }()
- eofURL := integration.URLScheme + "://" + lEOF.Addr().String()
- cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
- kapi := client.NewKeysAPI(cli)
- for i, f := range noRetryList(kapi) {
- startTries := atomic.LoadUint32(&tries)
- if err := f(); err == nil {
- t.Errorf("#%d: expected EOF error, got nil", i)
- }
- endTries := atomic.LoadUint32(&tries)
- if startTries+1 != endTries {
- t.Errorf("#%d: expected 1 try, got %d", i, endTries-startTries)
- }
- }
-}
-
-// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
-func TestV2NoRetryNoLeader(t *testing.T) {
- integration.BeforeTest(t)
- lHTTP := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
- eh := &errHandler{errCode: http.StatusServiceUnavailable}
- srv := httptest.NewUnstartedServer(eh)
- defer lHTTP.Close()
- defer srv.Close()
- srv.Listener = lHTTP
- go srv.Start()
- lHTTPURL := integration.URLScheme + "://" + lHTTP.Addr().String()
-
- cli := integration.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil)
- kapi := client.NewKeysAPI(cli)
- // test error code
- for i, f := range noRetryList(kapi) {
- reqs := eh.reqs
- if err := f(); err == nil || !strings.Contains(err.Error(), "no leader") {
- t.Errorf("#%d: expected \"no leader\", got %v", i, err)
- }
- if eh.reqs != reqs+1 {
- t.Errorf("#%d: expected 1 request, got %d", i, eh.reqs-reqs)
- }
- }
-}
-
-// TestV2RetryRefuse tests destructive api calls will retry if a connection is refused.
-func TestV2RetryRefuse(t *testing.T) {
- integration.BeforeTest(t)
- cl := integration.NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
- // test connection refused; expect no error failover
- cli := integration.MustNewHTTPClient(t, []string{integration.URLScheme + "://refuseconn:123", cl.URL(0)}, nil)
- kapi := client.NewKeysAPI(cli)
- if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil {
- t.Fatal(err)
- }
- for i, f := range noRetryList(kapi) {
- if err := f(); err != nil {
- t.Errorf("#%d: unexpected retry failure (%v)", i, err)
- }
- }
-}
-
-type errHandler struct {
- errCode int
- reqs int
-}
-
-func (eh *errHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- req.Body.Close()
- eh.reqs++
- w.WriteHeader(eh.errCode)
-}
-
-func noRetryList(kapi client.KeysAPI) []func() error {
- return []func() error{
- func() error {
- opts := &client.SetOptions{PrevExist: client.PrevNoExist}
- _, err := kapi.Set(context.Background(), "/setkey", "bar", opts)
- return err
- },
- func() error {
- _, err := kapi.Delete(context.Background(), "/delkey", nil)
- return err
- },
- }
-}
diff --git a/tests/integration/client/doc.go b/tests/integration/client/doc.go
deleted file mode 100644
index e9c58d67f5c..00000000000
--- a/tests/integration/client/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package integration implements tests built upon embedded etcd, focusing on
-// the correctness of the etcd v2 client.
-package integration
diff --git a/tests/integration/client/examples/example_keys_test.go b/tests/integration/client/examples/example_keys_test.go
deleted file mode 100644
index 87a76769f0a..00000000000
--- a/tests/integration/client/examples/example_keys_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client_test
-
-import (
- "context"
- "fmt"
- "log"
- "sort"
-
- "go.etcd.io/etcd/client/v2"
-)
-
-func mockKeysAPI_directory() {
- // TODO: Replace with proper mocking
- fmt.Println(`Key: "/myNodes/key1", Value: "value1"`)
- fmt.Println(`Key: "/myNodes/key2", Value: "value2"`)
-}
-
-func ExampleKeysAPI_directory() {
- forUnitTestsRunInMockedContext(
- mockKeysAPI_directory,
- func() {
- c, err := client.New(client.Config{
- Endpoints: exampleEndpoints(),
- Transport: exampleTransport(),
- })
- if err != nil {
- log.Fatal(err)
- }
- kapi := client.NewKeysAPI(c)
-
- // Setting '/myNodes' to create a directory that will hold some keys.
- o := client.SetOptions{Dir: true}
- resp, err := kapi.Set(context.Background(), "/myNodes", "", &o)
- if err != nil {
- log.Fatal(err)
- }
-
- // Add keys to /myNodes directory.
- resp, err = kapi.Set(context.Background(), "/myNodes/key1", "value1", nil)
- if err != nil {
- log.Fatal(err)
- }
- resp, err = kapi.Set(context.Background(), "/myNodes/key2", "value2", nil)
- if err != nil {
- log.Fatal(err)
- }
-
- // fetch directory
- resp, err = kapi.Get(context.Background(), "/myNodes", nil)
- if err != nil {
- log.Fatal(err)
- }
- // print directory keys
- sort.Sort(resp.Node.Nodes)
- for _, n := range resp.Node.Nodes {
- fmt.Printf("Key: %q, Value: %q\n", n.Key, n.Value)
- }
- })
-
- // Output:
- // Key: "/myNodes/key1", Value: "value1"
- // Key: "/myNodes/key2", Value: "value2"
-}
-
-func mockKeysAPI_setget() {
- fmt.Println(`"/foo" key has "bar" value`)
-}
-
-func ExampleKeysAPI_setget() {
- forUnitTestsRunInMockedContext(
- mockKeysAPI_setget,
- func() {
- c, err := client.New(client.Config{
- Endpoints: exampleEndpoints(),
- Transport: exampleTransport(),
- })
- if err != nil {
- log.Fatal(err)
- }
- kapi := client.NewKeysAPI(c)
-
- // Set key "/foo" to value "bar".
- resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
- if err != nil {
- log.Fatal(err)
- }
- // Get key "/foo"
- resp, err = kapi.Get(context.Background(), "/foo", nil)
- if err != nil {
- log.Fatal(err)
- }
-
- fmt.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
- })
-
- // Output: "/foo" key has "bar" value
-}
diff --git a/tests/integration/client/examples/main_test.go b/tests/integration/client/examples/main_test.go
deleted file mode 100644
index 4323e95806b..00000000000
--- a/tests/integration/client/examples/main_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client_test
-
-import (
- "net/http"
- "os"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/tests/v3/integration"
-)
-
-var lazyCluster = integration.NewLazyCluster()
-
-func exampleEndpoints() []string { return lazyCluster.EndpointsV2() }
-func exampleTransport() *http.Transport { return lazyCluster.Transport() }
-
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
- // For integration tests runs in the provided environment
- example()
-}
-
-// TestMain sets up an etcd cluster if running the examples.
-func TestMain(m *testing.M) {
- v := m.Run()
- lazyCluster.Terminate()
- if v == 0 {
- testutil.MustCheckLeakedGoroutine()
- }
- os.Exit(v)
-}
diff --git a/tests/integration/client/main_test.go b/tests/integration/client/main_test.go
deleted file mode 100644
index e783205834f..00000000000
--- a/tests/integration/client/main_test.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package integration
-
-import (
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func TestMain(m *testing.M) {
- testutil.MustTestMainWithLeakDetection(m)
-}
diff --git a/tests/integration/clientv3/cluster_test.go b/tests/integration/clientv3/cluster_test.go
index eff15cf7d2d..7de4d0ff8bc 100644
--- a/tests/integration/clientv3/cluster_test.go
+++ b/tests/integration/clientv3/cluster_test.go
@@ -16,6 +16,7 @@ package clientv3test
import (
"context"
+ "fmt"
"math/rand"
"reflect"
"strings"
@@ -23,13 +24,13 @@ import (
"time"
"go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestMemberList(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
capi := clus.RandClient()
@@ -45,9 +46,9 @@ func TestMemberList(t *testing.T) {
}
func TestMemberAdd(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.RandClient()
@@ -64,9 +65,9 @@ func TestMemberAdd(t *testing.T) {
}
func TestMemberAddWithExistingURLs(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.RandClient()
@@ -88,9 +89,9 @@ func TestMemberAddWithExistingURLs(t *testing.T) {
}
func TestMemberRemove(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.Client(1)
@@ -126,9 +127,9 @@ func TestMemberRemove(t *testing.T) {
}
func TestMemberUpdate(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
capi := clus.RandClient()
@@ -154,9 +155,9 @@ func TestMemberUpdate(t *testing.T) {
}
func TestMemberAddUpdateWrongURLs(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
capi := clus.RandClient()
@@ -187,9 +188,9 @@ func TestMemberAddUpdateWrongURLs(t *testing.T) {
}
func TestMemberAddForLearner(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
capi := clus.RandClient()
@@ -216,9 +217,9 @@ func TestMemberAddForLearner(t *testing.T) {
}
func TestMemberPromote(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// member promote request can be sent to any server in cluster,
@@ -229,7 +230,8 @@ func TestMemberPromote(t *testing.T) {
followerIdx := (leaderIdx + 1) % 3
capi := clus.Client(followerIdx)
- urls := []string{"http://127.0.0.1:1234"}
+ learnerMember := clus.MustNewMember(t)
+ urls := learnerMember.PeerURLs.StringSlice()
memberAddResp, err := capi.MemberAddAsLearner(context.Background(), urls)
if err != nil {
t.Fatalf("failed to add member %v", err)
@@ -261,11 +263,11 @@ func TestMemberPromote(t *testing.T) {
t.Fatalf("expecting error to contain %s, got %s", expectedErrKeywords, err.Error())
}
- // create and launch learner member based on the response of V3 Member Add API.
+ // Initialize and launch learner member based on the response of V3 Member Add API.
// (the response has information on peer urls of the existing members in cluster)
- learnerMember := clus.MustNewMember(t, memberAddResp)
+ clus.InitializeMemberWithResponse(t, learnerMember, memberAddResp)
- if err := learnerMember.Launch(); err != nil {
+ if err = learnerMember.Launch(); err != nil {
t.Fatal(err)
}
@@ -293,9 +295,9 @@ func TestMemberPromote(t *testing.T) {
// TestMemberPromoteMemberNotLearner ensures that promoting a voting member fails.
func TestMemberPromoteMemberNotLearner(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t, integration2.WithFailpoint("raftBeforeAdvance", `sleep(100)`))
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// member promote request can be sent to any server in cluster,
@@ -329,9 +331,9 @@ func TestMemberPromoteMemberNotLearner(t *testing.T) {
// TestMemberPromoteMemberNotExist ensures that promoting a member that does not exist in cluster fails.
func TestMemberPromoteMemberNotExist(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// member promote request can be sent to any server in cluster,
@@ -376,18 +378,30 @@ func TestMemberPromoteMemberNotExist(t *testing.T) {
}
}
-// TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster is 1
+// TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster
func TestMaxLearnerInCluster(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t, integration2.WithFailpoint("raftBeforeAdvance", `sleep(100)`))
- // 1. start with a cluster with 3 voting member and 0 learner member
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ // 1. start with a cluster with 3 voting member and max learner 2
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, ExperimentalMaxLearners: 2, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
- // 2. adding a learner member should succeed
- resp1, err := clus.Client(0).MemberAddAsLearner(context.Background(), []string{"http://127.0.0.1:1234"})
+ // 2. adding 2 learner members should succeed
+ for i := 0; i < 2; i++ {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ _, err := clus.Client(0).MemberAddAsLearner(ctx, []string{fmt.Sprintf("http://127.0.0.1:123%d", i)})
+ cancel()
+ if err != nil {
+ t.Fatalf("failed to add learner member %v", err)
+ }
+ }
+
+ // ensure client endpoint is voting member
+ leaderIdx := clus.WaitLeader(t)
+ capi := clus.Client(leaderIdx)
+ resp1, err := capi.MemberList(context.Background())
if err != nil {
- t.Fatalf("failed to add learner member %v", err)
+ t.Fatalf("failed to get member list")
}
numberOfLearners := 0
for _, m := range resp1.Members {
@@ -395,12 +409,12 @@ func TestMaxLearnerInCluster(t *testing.T) {
numberOfLearners++
}
}
- if numberOfLearners != 1 {
- t.Fatalf("Added 1 learner node to cluster, got %d", numberOfLearners)
+ if numberOfLearners != 2 {
+ t.Fatalf("added 2 learner node to cluster, got %d", numberOfLearners)
}
- // 3. cluster has 3 voting member and 1 learner, adding another learner should fail
- _, err = clus.Client(0).MemberAddAsLearner(context.Background(), []string{"http://127.0.0.1:2345"})
+ // 3. cluster has 3 voting member and 2 learner, adding another learner should fail
+ _, err = clus.Client(0).MemberAddAsLearner(context.Background(), []string{"http://127.0.0.1:2342"})
if err == nil {
t.Fatalf("expect member add to fail, got no error")
}
@@ -410,7 +424,7 @@ func TestMaxLearnerInCluster(t *testing.T) {
}
// 4. cluster has 3 voting member and 1 learner, adding a voting member should succeed
- _, err = clus.Client(0).MemberAdd(context.Background(), []string{"http://127.0.0.1:3456"})
+ _, err = clus.Client(0).MemberAdd(context.Background(), []string{"http://127.0.0.1:3453"})
if err != nil {
t.Errorf("failed to add member %v", err)
}
diff --git a/tests/integration/clientv3/concurrency/election_test.go b/tests/integration/clientv3/concurrency/election_test.go
index 650bdc01546..951c6a91fbf 100644
--- a/tests/integration/clientv3/concurrency/election_test.go
+++ b/tests/integration/clientv3/concurrency/election_test.go
@@ -21,15 +21,15 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestResumeElection(t *testing.T) {
const prefix = "/resume-election/"
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
if err != nil {
log.Fatal(err)
}
diff --git a/tests/integration/clientv3/concurrency/example_election_test.go b/tests/integration/clientv3/concurrency/example_election_test.go
index 7c9e968eb20..e3a448e36c6 100644
--- a/tests/integration/clientv3/concurrency/example_election_test.go
+++ b/tests/integration/clientv3/concurrency/example_election_test.go
@@ -21,18 +21,18 @@ import (
"sync"
"time"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
)
-func mockElection_Campaign() {
+func mockElectionCampaign() {
fmt.Println("completed first election with e2")
fmt.Println("completed second election with e1")
}
func ExampleElection_Campaign() {
forUnitTestsRunInMockedContext(
- mockElection_Campaign,
+ mockElectionCampaign,
func() {
cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()})
if err != nil {
diff --git a/tests/integration/clientv3/concurrency/example_mutex_test.go b/tests/integration/clientv3/concurrency/example_mutex_test.go
index 671f464d70a..eba8c543f2c 100644
--- a/tests/integration/clientv3/concurrency/example_mutex_test.go
+++ b/tests/integration/clientv3/concurrency/example_mutex_test.go
@@ -16,14 +16,15 @@ package concurrency_test
import (
"context"
+ "errors"
"fmt"
"log"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
)
-func mockMutex_TryLock() {
+func mockMutexTryLock() {
fmt.Println("acquired lock for s1")
fmt.Println("cannot acquire lock for s2, as already locked in another session")
fmt.Println("released lock for s1")
@@ -32,7 +33,7 @@ func mockMutex_TryLock() {
func ExampleMutex_TryLock() {
forUnitTestsRunInMockedContext(
- mockMutex_TryLock,
+ mockMutexTryLock,
func() {
cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()})
if err != nil {
@@ -64,7 +65,7 @@ func ExampleMutex_TryLock() {
if err = m2.TryLock(context.TODO()); err == nil {
log.Fatal("should not acquire lock")
}
- if err == concurrency.ErrLocked {
+ if errors.Is(err, concurrency.ErrLocked) {
fmt.Println("cannot acquire lock for s2, as already locked in another session")
}
@@ -85,7 +86,7 @@ func ExampleMutex_TryLock() {
// acquired lock for s2
}
-func mockMutex_Lock() {
+func mockMutexLock() {
fmt.Println("acquired lock for s1")
fmt.Println("released lock for s1")
fmt.Println("acquired lock for s2")
@@ -93,7 +94,7 @@ func mockMutex_Lock() {
func ExampleMutex_Lock() {
forUnitTestsRunInMockedContext(
- mockMutex_Lock,
+ mockMutexLock,
func() {
cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()})
if err != nil {
diff --git a/tests/integration/clientv3/concurrency/example_stm_test.go b/tests/integration/clientv3/concurrency/example_stm_test.go
index 59dcf535754..074976b9496 100644
--- a/tests/integration/clientv3/concurrency/example_stm_test.go
+++ b/tests/integration/clientv3/concurrency/example_stm_test.go
@@ -21,11 +21,11 @@ import (
"math/rand"
"sync"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
)
-func mockSTM_apply() {
+func mockSTMApply() {
fmt.Println("account sum is 500")
}
@@ -33,7 +33,7 @@ func mockSTM_apply() {
// transfer between balances.
func ExampleSTM_apply() {
forUnitTestsRunInMockedContext(
- mockSTM_apply,
+ mockSTMApply,
func() {
cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()})
if err != nil {
@@ -50,11 +50,11 @@ func ExampleSTM_apply() {
}
}
- exchange := func(stm concurrency.STM) error {
+ exchange := func(stm concurrency.STM) {
from, to := rand.Intn(totalAccounts), rand.Intn(totalAccounts)
if from == to {
// nothing to do
- return nil
+ return
}
// read values
fromK, toK := fmt.Sprintf("accts/%d", from), fmt.Sprintf("accts/%d", to)
@@ -70,7 +70,6 @@ func ExampleSTM_apply() {
// write back
stm.Put(fromK, fmt.Sprintf("%d", fromInt))
stm.Put(toK, fmt.Sprintf("%d", toInt))
- return nil
}
// concurrently exchange values between accounts
@@ -79,7 +78,10 @@ func ExampleSTM_apply() {
for i := 0; i < 10; i++ {
go func() {
defer wg.Done()
- if _, serr := concurrency.NewSTM(cli, exchange); serr != nil {
+ if _, serr := concurrency.NewSTM(cli, func(stm concurrency.STM) error {
+ exchange(stm)
+ return nil
+ }); serr != nil {
log.Fatal(serr)
}
}()
diff --git a/tests/integration/clientv3/concurrency/main_test.go b/tests/integration/clientv3/concurrency/main_test.go
index c54df6b4b0f..c15ae1704dd 100644
--- a/tests/integration/clientv3/concurrency/main_test.go
+++ b/tests/integration/clientv3/concurrency/main_test.go
@@ -24,21 +24,22 @@ import (
var lazyCluster = integration.NewLazyCluster()
-func exampleEndpoints() []string { return lazyCluster.EndpointsV3() }
+func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() }
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
+func forUnitTestsRunInMockedContext(_mocking func(), example func()) {
// For integration tests runs in the provided environment
example()
}
// TestMain sets up an etcd cluster if running the examples.
func TestMain(m *testing.M) {
- testutil.ExitInShortMode("Skipping: the tests require real cluster")
+ cleanup := testutil.BeforeIntegrationExamples(m)
v := m.Run()
lazyCluster.Terminate()
if v == 0 {
testutil.MustCheckLeakedGoroutine()
}
+ cleanup()
os.Exit(v)
}
diff --git a/tests/integration/clientv3/concurrency/mutex_test.go b/tests/integration/clientv3/concurrency/mutex_test.go
index 1d264bf4eab..bf5b187686f 100644
--- a/tests/integration/clientv3/concurrency/mutex_test.go
+++ b/tests/integration/clientv3/concurrency/mutex_test.go
@@ -16,15 +16,16 @@ package concurrency_test
import (
"context"
+ "errors"
"testing"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestMutexLockSessionExpired(t *testing.T) {
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
if err != nil {
t.Fatal(err)
}
@@ -45,7 +46,7 @@ func TestMutexLockSessionExpired(t *testing.T) {
m2 := concurrency.NewMutex(s2, "/my-lock/")
// acquire lock for s1
- if err := m1.Lock(context.TODO()); err != nil {
+ if err = m1.Lock(context.TODO()); err != nil {
t.Fatal(err)
}
@@ -70,3 +71,42 @@ func TestMutexLockSessionExpired(t *testing.T) {
<-m2Locked
}
+
+func TestMutexUnlock(t *testing.T) {
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer cli.Close()
+
+ s1, err := concurrency.NewSession(cli)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s1.Close()
+
+ m1 := concurrency.NewMutex(s1, "/my-lock/")
+ err = m1.Unlock(context.TODO())
+ if err == nil {
+ t.Fatal("expect lock released error")
+ }
+ if !errors.Is(err, concurrency.ErrLockReleased) {
+ t.Fatal(err)
+ }
+
+ if err = m1.Lock(context.TODO()); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = m1.Unlock(context.TODO()); err != nil {
+ t.Fatal(err)
+ }
+
+ err = m1.Unlock(context.TODO())
+ if err == nil {
+ t.Fatal("expect lock released error")
+ }
+ if !errors.Is(err, concurrency.ErrLockReleased) {
+ t.Fatal(err)
+ }
+}
diff --git a/tests/integration/clientv3/concurrency/session_test.go b/tests/integration/clientv3/concurrency/session_test.go
new file mode 100644
index 00000000000..dff508972a2
--- /dev/null
+++ b/tests/integration/clientv3/concurrency/session_test.go
@@ -0,0 +1,112 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/concurrency"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestSessionOptions(t *testing.T) {
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer cli.Close()
+ lease, err := cli.Grant(context.Background(), 100)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s, err := concurrency.NewSession(cli, concurrency.WithLease(lease.ID))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s.Close()
+ assert.Equal(t, s.Lease(), lease.ID)
+
+ go s.Orphan()
+ select {
+ case <-s.Done():
+ case <-time.After(time.Millisecond * 100):
+ t.Fatal("session did not get orphaned as expected")
+ }
+}
+
+func TestSessionTTLOptions(t *testing.T) {
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer cli.Close()
+
+ setTTL := 90
+ s, err := concurrency.NewSession(cli, concurrency.WithTTL(setTTL))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s.Close()
+
+ leaseID := s.Lease()
+ // TTL retrieved should be less than the set TTL, but not equal to default:60 or exprired:-1
+ resp, err := cli.Lease.TimeToLive(context.Background(), leaseID)
+ if err != nil {
+ t.Log(err)
+ }
+ if resp.TTL == -1 {
+ t.Errorf("client lease should not be expired: %d", resp.TTL)
+ }
+ if resp.TTL == 60 {
+ t.Errorf("default TTL value is used in the session, instead of set TTL: %d", setTTL)
+ }
+ if resp.TTL >= int64(setTTL) || resp.TTL < int64(setTTL)-20 {
+ t.Errorf("Session TTL from lease should be less, but close to set TTL %d, have: %d", setTTL, resp.TTL)
+ }
+}
+
+func TestSessionCtx(t *testing.T) {
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer cli.Close()
+ lease, err := cli.Grant(context.Background(), 100)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s, err := concurrency.NewSession(cli, concurrency.WithLease(lease.ID))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer s.Close()
+ assert.Equal(t, s.Lease(), lease.ID)
+
+ childCtx, cancel := context.WithCancel(s.Ctx())
+ defer cancel()
+
+ go s.Orphan()
+ select {
+ case <-childCtx.Done():
+ case <-time.After(time.Millisecond * 100):
+ t.Fatal("child context of session context is not canceled")
+ }
+ assert.Equal(t, childCtx.Err(), context.Canceled)
+}
diff --git a/tests/integration/clientv3/connectivity/black_hole_test.go b/tests/integration/clientv3/connectivity/black_hole_test.go
index ff56bbd0955..e1fc1c57b97 100644
--- a/tests/integration/clientv3/connectivity/black_hole_test.go
+++ b/tests/integration/clientv3/connectivity/black_hole_test.go
@@ -13,35 +13,37 @@
// limitations under the License.
//go:build !cluster_proxy
-// +build !cluster_proxy
package connectivity_test
import (
"context"
+ "errors"
"testing"
"time"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.etcd.io/etcd/tests/v3/integration/clientv3"
"google.golang.org/grpc"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+ clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
)
// TestBalancerUnderBlackholeKeepAliveWatch tests when watch discovers it cannot talk to
// blackholed endpoint, client balancer switches to healthy one.
// TODO: test server-to-client keepalive ping
func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
Size: 2,
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
+ UseBridge: true,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
@@ -57,9 +59,9 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
// dial for unhealthy endpoint.
// then we can reduce 3s to 1s.
- timeout := pingInterval + integration.RequestWaitTimeout
+ timeout := pingInterval + integration2.RequestWaitTimeout
- cli, err := integration.NewClient(t, ccfg)
+ cli, err := integration2.NewClient(t, ccfg)
if err != nil {
t.Fatal(err)
}
@@ -76,7 +78,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
// give enough time for balancer resolution
time.Sleep(5 * time.Second)
- clus.Members[0].Blackhole()
+ clus.Members[0].Bridge().Blackhole()
if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
t.Fatal(err)
@@ -87,12 +89,12 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
t.Error("took too long to receive watch events")
}
- clus.Members[0].Unblackhole()
+ clus.Members[0].Bridge().Unblackhole()
// waiting for moving eps[0] out of unhealthy, so that it can be re-pined.
time.Sleep(ccfg.DialTimeout)
- clus.Members[1].Blackhole()
+ clus.Members[1].Bridge().Blackhole()
// make sure client[0] can connect to eps[0] after remove the blackhole.
if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil {
@@ -112,7 +114,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
func TestBalancerUnderBlackholeNoKeepAlivePut(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Put(ctx, "foo", "bar")
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || errors.Is(err, rpctypes.ErrTimeout) {
return errExpected
}
return err
@@ -122,7 +124,7 @@ func TestBalancerUnderBlackholeNoKeepAlivePut(t *testing.T) {
func TestBalancerUnderBlackholeNoKeepAliveDelete(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Delete(ctx, "foo")
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || errors.Is(err, rpctypes.ErrTimeout) {
return errExpected
}
return err
@@ -135,7 +137,7 @@ func TestBalancerUnderBlackholeNoKeepAliveTxn(t *testing.T) {
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
Then(clientv3.OpPut("foo", "bar")).
Else(clientv3.OpPut("foo", "baz")).Commit()
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || errors.Is(err, rpctypes.ErrTimeout) {
return errExpected
}
return err
@@ -145,7 +147,7 @@ func TestBalancerUnderBlackholeNoKeepAliveTxn(t *testing.T) {
func TestBalancerUnderBlackholeNoKeepAliveLinearizableGet(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "a")
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || errors.Is(err, rpctypes.ErrTimeout) {
return errExpected
}
return err
@@ -165,22 +167,22 @@ func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) {
// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint
// fails due to context timeout, but succeeds on next try, with endpoint switch.
func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 2,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 2,
+ UseBridge: true,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
DialTimeout: 1 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
- cli, err := integration.NewClient(t, ccfg)
+ cli, err := integration2.NewClient(t, ccfg)
if err != nil {
t.Fatal(err)
}
@@ -194,7 +196,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
cli.SetEndpoints(eps...)
// blackhole eps[0]
- clus.Members[0].Blackhole()
+ clus.Members[0].Bridge().Blackhole()
// With round robin balancer, client will make a request to a healthy endpoint
// within a few requests.
@@ -206,7 +208,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
cancel()
if err == nil {
break
- } else if err == errExpected {
+ } else if errors.Is(err, errExpected) {
t.Logf("#%d: current error %v", i, err)
} else {
t.Errorf("#%d: failed with error %v", i, err)
diff --git a/tests/integration/clientv3/connectivity/dial_test.go b/tests/integration/clientv3/connectivity/dial_test.go
index f02ea61aa78..769ce17f4df 100644
--- a/tests/integration/clientv3/connectivity/dial_test.go
+++ b/tests/integration/clientv3/connectivity/dial_test.go
@@ -21,34 +21,36 @@ import (
"testing"
"time"
+ "google.golang.org/grpc"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
- "google.golang.org/grpc"
)
var (
testTLSInfo = transport.TLSInfo{
- KeyFile: integration.MustAbsPath("../../../fixtures/server.key.insecure"),
- CertFile: integration.MustAbsPath("../../../fixtures/server.crt"),
- TrustedCAFile: integration.MustAbsPath("../../../fixtures/ca.crt"),
+ KeyFile: testutils.MustAbsPath("../../../fixtures/server.key.insecure"),
+ CertFile: testutils.MustAbsPath("../../../fixtures/server.crt"),
+ TrustedCAFile: testutils.MustAbsPath("../../../fixtures/ca.crt"),
ClientCertAuth: true,
}
testTLSInfoExpired = transport.TLSInfo{
- KeyFile: integration.MustAbsPath("../../fixtures-expired/server.key.insecure"),
- CertFile: integration.MustAbsPath("../../fixtures-expired/server.crt"),
- TrustedCAFile: integration.MustAbsPath("../../fixtures-expired/ca.crt"),
+ KeyFile: testutils.MustAbsPath("../../fixtures-expired/server.key.insecure"),
+ CertFile: testutils.MustAbsPath("../../fixtures-expired/server.crt"),
+ TrustedCAFile: testutils.MustAbsPath("../../fixtures-expired/ca.crt"),
ClientCertAuth: true,
}
)
// TestDialTLSExpired tests client with expired certs fails to dial.
func TestDialTLSExpired(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo})
defer clus.Terminate(t)
tls, err := testTLSInfoExpired.ClientConfig()
@@ -56,8 +58,8 @@ func TestDialTLSExpired(t *testing.T) {
t.Fatal(err)
}
// expect remote errors "tls: bad certificate"
- _, err = integration.NewClient(t, clientv3.Config{
- Endpoints: []string{clus.Members[0].GRPCAddr()},
+ _, err = integration2.NewClient(t, clientv3.Config{
+ Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: 3 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: tls,
@@ -70,12 +72,12 @@ func TestDialTLSExpired(t *testing.T) {
// TestDialTLSNoConfig ensures the client fails to dial / times out
// when TLS endpoints (https, unixs) are given but no tls config.
func TestDialTLSNoConfig(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo})
defer clus.Terminate(t)
// expect "signed by unknown authority"
- c, err := integration.NewClient(t, clientv3.Config{
- Endpoints: []string{clus.Members[0].GRPCAddr()},
+ c, err := integration2.NewClient(t, clientv3.Config{
+ Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
})
@@ -101,14 +103,14 @@ func TestDialSetEndpointsAfterFail(t *testing.T) {
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func testDialSetEndpoints(t *testing.T, setBefore bool) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 3)
for i := range eps {
- eps[i] = clus.Members[i].GRPCAddr()
+ eps[i] = clus.Members[i].GRPCURL
}
toKill := rand.Intn(len(eps))
@@ -117,7 +119,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
DialTimeout: 1 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
- cli, err := integration.NewClient(t, cfg)
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -134,7 +136,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
}
time.Sleep(time.Second * 2)
- ctx, cancel := context.WithTimeout(context.Background(), integration.RequestWaitTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout)
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}
@@ -144,12 +146,12 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint
// with a new one that doesn't include original endpoint.
func TestSwitchSetEndpoints(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get non partitioned members endpoints
- eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+ eps := []string{clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
cli := clus.Client(0)
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
@@ -164,18 +166,18 @@ func TestSwitchSetEndpoints(t *testing.T) {
}
func TestRejectOldCluster(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
// 2 endpoints to test multi-endpoint Status
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2})
defer clus.Terminate(t)
cfg := clientv3.Config{
- Endpoints: []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()},
+ Endpoints: []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
RejectOldCluster: true,
}
- cli, err := integration.NewClient(t, cfg)
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -185,8 +187,8 @@ func TestRejectOldCluster(t *testing.T) {
// TestDialForeignEndpoint checks an endpoint that is not registered
// with the balancer can be dialed.
func TestDialForeignEndpoint(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2})
defer clus.Terminate(t)
conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0])
@@ -208,11 +210,11 @@ func TestDialForeignEndpoint(t *testing.T) {
// TestSetEndpointAndPut checks that a Put following a SetEndpoints
// to a working endpoint will always succeed.
func TestSetEndpointAndPut(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2})
defer clus.Terminate(t)
- clus.Client(1).SetEndpoints(clus.Members[0].GRPCAddr())
+ clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL)
_, err := clus.Client(1).Put(context.TODO(), "foo", "bar")
if err != nil && !strings.Contains(err.Error(), "closing") {
t.Fatal(err)
diff --git a/tests/integration/clientv3/connectivity/network_partition_test.go b/tests/integration/clientv3/connectivity/network_partition_test.go
index 0e050e2f94e..6c99c32d04c 100644
--- a/tests/integration/clientv3/connectivity/network_partition_test.go
+++ b/tests/integration/clientv3/connectivity/network_partition_test.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !cluster_proxy
-// +build !cluster_proxy
package connectivity_test
@@ -23,24 +22,29 @@ import (
"testing"
"time"
+ "google.golang.org/grpc"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.etcd.io/etcd/tests/v3/integration/clientv3"
- "go.uber.org/zap/zaptest"
- "google.golang.org/grpc"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+ clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
)
var errExpected = errors.New("expected error")
+func isErrorExpected(err error) bool {
+ return clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) ||
+ errors.Is(err, rpctypes.ErrTimeout) || errors.Is(err, rpctypes.ErrTimeoutDueToLeaderFail)
+}
+
// TestBalancerUnderNetworkPartitionPut tests when one member becomes isolated,
// first Put request fails, and following retry succeeds with client balancer
// switching to others.
func TestBalancerUnderNetworkPartitionPut(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Put(ctx, "a", "b")
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if isErrorExpected(err) {
return errExpected
}
return err
@@ -50,7 +54,7 @@ func TestBalancerUnderNetworkPartitionPut(t *testing.T) {
func TestBalancerUnderNetworkPartitionDelete(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Delete(ctx, "a")
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if isErrorExpected(err) {
return errExpected
}
return err
@@ -63,7 +67,7 @@ func TestBalancerUnderNetworkPartitionTxn(t *testing.T) {
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
Then(clientv3.OpPut("foo", "bar")).
Else(clientv3.OpPut("foo", "baz")).Commit()
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if isErrorExpected(err) {
return errExpected
}
return err
@@ -76,7 +80,7 @@ func TestBalancerUnderNetworkPartitionTxn(t *testing.T) {
func TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "a")
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout {
+ if isErrorExpected(err) {
return errExpected
}
return err
@@ -104,24 +108,22 @@ func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) {
}
func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 3,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 3,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
// expect pin eps[0]
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
DialTimeout: 3 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
- Logger: zaptest.NewLogger(t).Named("client"),
}
- cli, err := integration.NewClient(t, ccfg)
+ cli, err := integration2.NewClient(t, ccfg)
if err != nil {
t.Fatal(err)
}
@@ -143,7 +145,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
if err == nil {
break
}
- if err != errExpected {
+ if !errors.Is(err, errExpected) {
t.Errorf("#%d: expected '%v', got '%v'", i, errExpected, err)
}
// give enough time for endpoint switch
@@ -161,20 +163,19 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
// switches endpoint when leader fails and linearizable get requests returns
// "etcdserver: request timed out".
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 3,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 3,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
lead := clus.WaitLeader(t)
timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
- cli, err := integration.NewClient(t, clientv3.Config{
+ cli, err := integration2.NewClient(t, clientv3.Config{
Endpoints: []string{eps[(lead+1)%2]},
DialTimeout: 2 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
@@ -216,15 +217,14 @@ func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) {
// testBalancerUnderNetworkPartitionWatch ensures watch stream
// to a partitioned node be closed when context requires leader.
func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 3,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 3,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
target := clus.WaitLeader(t)
if !isolateLeader {
@@ -232,7 +232,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
}
// pin eps[target]
- watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
+ watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
if err != nil {
t.Fatal(err)
}
@@ -250,7 +250,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
select {
case <-wch:
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("took too long to create watch")
}
@@ -267,31 +267,30 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
if len(ev.Events) != 0 {
t.Fatal("expected no event")
}
- if err = ev.Err(); err != rpctypes.ErrNoLeader {
+ if err = ev.Err(); !errors.Is(err, rpctypes.ErrNoLeader) {
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
}
- case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost
+ case <-time.After(integration2.RequestWaitTimeout): // enough time to detect leader lost
t.Fatal("took too long to detect leader lost")
}
}
func TestDropReadUnderNetworkPartition(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 3,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 3,
})
defer clus.Terminate(t)
leaderIndex := clus.WaitLeader(t)
// get a follower endpoint
- eps := []string{clus.Members[(leaderIndex+1)%3].GRPCAddr()}
+ eps := []string{clus.Members[(leaderIndex+1)%3].GRPCURL}
ccfg := clientv3.Config{
Endpoints: eps,
DialTimeout: 10 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
- cli, err := integration.NewClient(t, ccfg)
+ cli, err := integration2.NewClient(t, ccfg)
if err != nil {
t.Fatal(err)
}
@@ -303,7 +302,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
// add other endpoints for later endpoint switch
cli.SetEndpoints(eps...)
time.Sleep(time.Second * 2)
- conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCAddr())
+ conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL)
if err != nil {
t.Fatal(err)
}
@@ -314,7 +313,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
_, err = kvc.Get(ctx, "a")
cancel()
- if err != rpctypes.ErrLeaderChanged {
+ if !errors.Is(err, rpctypes.ErrLeaderChanged) {
t.Fatalf("expected %v, got %v", rpctypes.ErrLeaderChanged, err)
}
@@ -323,7 +322,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
_, err = kvc.Get(ctx, "a")
cancel()
if err != nil {
- if err == rpctypes.ErrTimeout {
+ if errors.Is(err, rpctypes.ErrTimeout) {
<-time.After(time.Second)
i++
continue
diff --git a/tests/integration/clientv3/connectivity/server_shutdown_test.go b/tests/integration/clientv3/connectivity/server_shutdown_test.go
index fc5f18fb2ca..3afc8eb4c01 100644
--- a/tests/integration/clientv3/connectivity/server_shutdown_test.go
+++ b/tests/integration/clientv3/connectivity/server_shutdown_test.go
@@ -17,32 +17,34 @@ package connectivity_test
import (
"bytes"
"context"
+ "errors"
+ "fmt"
"testing"
"time"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.etcd.io/etcd/tests/v3/integration/clientv3"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+ clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
)
// TestBalancerUnderServerShutdownWatch expects that watch client
// switch its endpoints when the member of the pinned endpoint fails.
func TestBalancerUnderServerShutdownWatch(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 3,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 3,
+ UseBridge: true,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
lead := clus.WaitLeader(t)
// pin eps[lead]
- watchCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}})
+ watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}})
if err != nil {
t.Fatal(err)
}
@@ -59,7 +61,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
select {
case <-wch:
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("took too long to create watch")
}
@@ -88,7 +90,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
clus.Members[lead].Terminate(t)
// writes to eps[lead+1]
- putCli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
+ putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
if err != nil {
t.Fatal(err)
}
@@ -100,7 +102,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
if err == nil {
break
}
- if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout || err == rpctypes.ErrTimeoutDueToLeaderFail {
+ if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || errors.Is(err, rpctypes.ErrTimeout) || errors.Is(err, rpctypes.ErrTimeoutDueToLeaderFail) {
continue
}
t.Fatal(err)
@@ -141,18 +143,17 @@ func TestBalancerUnderServerShutdownTxn(t *testing.T) {
// the pinned endpoint is shut down, the balancer switches its endpoints
// and all subsequent put/delete/txn requests succeed with new endpoints.
func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 3,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 3,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
// pin eps[0]
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
if err != nil {
t.Fatal(err)
}
@@ -199,18 +200,17 @@ func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) {
// the pinned endpoint is shut down, the balancer switches its endpoints
// and all subsequent range requests succeed with new endpoints.
func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{
- Size: 3,
- SkipCreatingClient: true,
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{
+ Size: 3,
})
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
// pin eps[0]
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
if err != nil {
t.Errorf("failed to create client: %v", err)
}
@@ -243,8 +243,10 @@ func TestBalancerUnderServerStopInflightLinearizableGetOnRestart(t *testing.T) {
{pinLeader: false, stopPinFirst: true},
{pinLeader: false, stopPinFirst: false},
}
- for i := range tt {
- testBalancerUnderServerStopInflightRangeOnRestart(t, true, tt[i])
+ for _, w := range tt {
+ t.Run(fmt.Sprintf("%#v", w), func(t *testing.T) {
+ testBalancerUnderServerStopInflightRangeOnRestart(t, true, w)
+ })
}
}
@@ -255,8 +257,10 @@ func TestBalancerUnderServerStopInflightSerializableGetOnRestart(t *testing.T) {
{pinLeader: false, stopPinFirst: true},
{pinLeader: false, stopPinFirst: false},
}
- for i := range tt {
- testBalancerUnderServerStopInflightRangeOnRestart(t, false, tt[i])
+ for _, w := range tt {
+ t.Run(fmt.Sprintf("%#v", w), func(t *testing.T) {
+ testBalancerUnderServerStopInflightRangeOnRestart(t, false, w)
+ })
}
}
@@ -268,21 +272,21 @@ type pinTestOpt struct {
// testBalancerUnderServerStopInflightRangeOnRestart expects
// inflight range request reconnects on server restart.
func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cfg := &integration.ClusterConfig{
- Size: 2,
- SkipCreatingClient: true,
+ cfg := &integration2.ClusterConfig{
+ Size: 2,
+ UseBridge: true,
}
if linearizable {
cfg.Size = 3
}
- clus := integration.NewClusterV3(t, cfg)
+ clus := integration2.NewCluster(t, cfg)
defer clus.Terminate(t)
- eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
+ eps := []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL}
if linearizable {
- eps = append(eps, clus.Members[2].GRPCAddr())
+ eps = append(eps, clus.Members[2].GRPCURL)
}
lead := clus.WaitLeader(t)
@@ -293,7 +297,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
}
// pin eps[target]
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}})
if err != nil {
t.Errorf("failed to create client: %v", err)
}
@@ -354,7 +358,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
clus.Members[target].Restart(t)
select {
- case <-time.After(clientTimeout + integration.RequestWaitTimeout):
+ case <-time.After(clientTimeout + integration2.RequestWaitTimeout):
t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
case <-donec:
}
diff --git a/tests/integration/clientv3/examples/example_auth_test.go b/tests/integration/clientv3/examples/example_auth_test.go
index 29ed4b61c8b..b062d799cc0 100644
--- a/tests/integration/clientv3/examples/example_auth_test.go
+++ b/tests/integration/clientv3/examples/example_auth_test.go
@@ -19,7 +19,7 @@ import (
"fmt"
"log"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
func mockAuth() {
diff --git a/tests/integration/clientv3/examples/example_cluster_test.go b/tests/integration/clientv3/examples/example_cluster_test.go
index 933e7fc5a45..1d2da78c777 100644
--- a/tests/integration/clientv3/examples/example_cluster_test.go
+++ b/tests/integration/clientv3/examples/example_cluster_test.go
@@ -19,7 +19,7 @@ import (
"fmt"
"log"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
func mockCluster_memberList() {
diff --git a/tests/integration/clientv3/examples/example_kv_test.go b/tests/integration/clientv3/examples/example_kv_test.go
index 6fa6b23ddbe..e4fa4bf5f44 100644
--- a/tests/integration/clientv3/examples/example_kv_test.go
+++ b/tests/integration/clientv3/examples/example_kv_test.go
@@ -20,7 +20,7 @@ import (
"log"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
func mockKV_put() {}
@@ -322,7 +322,8 @@ func ExampleKV_do() {
ops := []clientv3.Op{
clientv3.OpPut("put-key", "123"),
clientv3.OpGet("put-key"),
- clientv3.OpPut("put-key", "456")}
+ clientv3.OpPut("put-key", "456"),
+ }
for _, op := range ops {
if _, err := cli.Do(context.TODO(), op); err != nil {
diff --git a/tests/integration/clientv3/examples/example_lease_test.go b/tests/integration/clientv3/examples/example_lease_test.go
index 7fe7949b802..b0e6c5ef366 100644
--- a/tests/integration/clientv3/examples/example_lease_test.go
+++ b/tests/integration/clientv3/examples/example_lease_test.go
@@ -19,7 +19,7 @@ import (
"fmt"
"log"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
func mockLease_grant() {
@@ -48,7 +48,7 @@ func ExampleLease_grant() {
log.Fatal(err)
}
})
- //Output:
+ // Output:
}
func mockLease_revoke() {
diff --git a/tests/integration/clientv3/examples/example_maintenance_test.go b/tests/integration/clientv3/examples/example_maintenance_test.go
index 0426ea553fd..ff545e8de7d 100644
--- a/tests/integration/clientv3/examples/example_maintenance_test.go
+++ b/tests/integration/clientv3/examples/example_maintenance_test.go
@@ -18,7 +18,7 @@ import (
"context"
"log"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
func mockMaintenance_status() {}
diff --git a/tests/integration/clientv3/examples/example_metrics_test.go b/tests/integration/clientv3/examples/example_metrics_test.go
index cdcd91854dc..d21c6d393e2 100644
--- a/tests/integration/clientv3/examples/example_metrics_test.go
+++ b/tests/integration/clientv3/examples/example_metrics_test.go
@@ -17,16 +17,15 @@ package clientv3_test
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"log"
"net"
"net/http"
"strings"
- "go.etcd.io/etcd/client/v3"
-
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
+ clientv3 "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc"
)
@@ -72,7 +71,7 @@ func ExampleClient_metrics() {
if err != nil {
log.Fatalf("fetch error: %v", err)
}
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Fatalf("fetch error: reading %s: %v", url, err)
diff --git a/tests/integration/clientv3/examples/example_test.go b/tests/integration/clientv3/examples/example_test.go
index d01c3676f5b..b9b8be461e7 100644
--- a/tests/integration/clientv3/examples/example_test.go
+++ b/tests/integration/clientv3/examples/example_test.go
@@ -19,7 +19,7 @@ import (
"log"
"go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
func mockConfig_insecure() {}
diff --git a/tests/integration/clientv3/examples/example_watch_test.go b/tests/integration/clientv3/examples/example_watch_test.go
index b5c9e808a80..ac44f8ca38d 100644
--- a/tests/integration/clientv3/examples/example_watch_test.go
+++ b/tests/integration/clientv3/examples/example_watch_test.go
@@ -20,7 +20,7 @@ import (
"log"
"time"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
func mockWatcher_watch() {
diff --git a/tests/integration/clientv3/examples/main_test.go b/tests/integration/clientv3/examples/main_test.go
index 3a61a962f1e..338a1ed6468 100644
--- a/tests/integration/clientv3/examples/main_test.go
+++ b/tests/integration/clientv3/examples/main_test.go
@@ -15,11 +15,13 @@
package clientv3_test
import (
+ "log"
"os"
"testing"
"time"
"go.etcd.io/etcd/client/pkg/v3/testutil"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
"go.etcd.io/etcd/tests/v3/integration"
)
@@ -29,13 +31,15 @@ const (
)
var lazyCluster = integration.NewLazyClusterWithConfig(
- integration.ClusterConfig{
+ integration2.ClusterConfig{
Size: 3,
- WatchProgressNotifyInterval: 200 * time.Millisecond})
+ WatchProgressNotifyInterval: 200 * time.Millisecond,
+ DisableStrictReconfigCheck: true,
+ })
-func exampleEndpoints() []string { return lazyCluster.EndpointsV3() }
+func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() }
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
+func forUnitTestsRunInMockedContext(_ func(), example func()) {
// For integration tests runs in the provided environment
example()
}
@@ -43,6 +47,20 @@ func forUnitTestsRunInMockedContext(mocking func(), example func()) {
// TestMain sets up an etcd cluster if running the examples.
func TestMain(m *testing.M) {
testutil.ExitInShortMode("Skipping: the tests require real cluster")
+
+ tempDir, err := os.MkdirTemp(os.TempDir(), "etcd-integration")
+ if err != nil {
+ log.Printf("Failed to obtain tempDir: %v", tempDir)
+ os.Exit(1)
+ }
+ defer os.RemoveAll(tempDir)
+
+ err = os.Chdir(tempDir)
+ if err != nil {
+ log.Printf("Failed to change working dir to: %s: %v", tempDir, err)
+ os.Exit(1)
+ }
+ log.Printf("Running tests (examples) in dir(%v): ...", tempDir)
v := m.Run()
lazyCluster.Terminate()
diff --git a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go
index 52bde238d43..b6dfef385a7 100644
--- a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go
+++ b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go
@@ -15,24 +15,25 @@
package recipes_test
import (
+ "context"
"testing"
"time"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestBarrierSingleNode(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) })
}
func TestBarrierMultiNode(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() })
}
@@ -46,6 +47,11 @@ func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client
t.Fatalf("able to double-hold barrier")
}
+ // put a random key to move the revision forward
+ if _, err := chooseClient().Put(context.Background(), "x", ""); err != nil {
+ t.Errorf("could not put x (%v)", err)
+ }
+
donec := make(chan struct{})
stopc := make(chan struct{})
defer close(stopc)
@@ -60,7 +66,6 @@ func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client
case donec <- struct{}{}:
case <-stopc:
}
-
}()
}
@@ -83,3 +88,42 @@ func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client
}
}
}
+
+func TestBarrierWaitNonexistentKey(t *testing.T) {
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+ cli := clus.Client(0)
+
+ if _, err := cli.Put(cli.Ctx(), "test-barrier-0", ""); err != nil {
+ t.Errorf("could not put test-barrier0, err:%v", err)
+ }
+
+ donec := make(chan struct{})
+ stopc := make(chan struct{})
+ defer close(stopc)
+
+ waiters := 5
+ for i := 0; i < waiters; i++ {
+ go func() {
+ br := recipe.NewBarrier(cli, "test-barrier")
+ if err := br.Wait(); err != nil {
+ t.Errorf("could not wait on barrier (%v)", err)
+ }
+ select {
+ case donec <- struct{}{}:
+ case <-stopc:
+ }
+ }()
+ }
+
+ // all waiters should return immediately if waiting on a nonexistent key "test-barrier" even if key "test-barrier-0" exists
+ timerC := time.After(time.Duration(waiters*100) * time.Millisecond)
+ for i := 0; i < waiters; i++ {
+ select {
+ case <-timerC:
+ t.Fatal("barrier timed out")
+ case <-donec:
+ }
+ }
+}
diff --git a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go
index 463bb605194..680476b48da 100644
--- a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go
+++ b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go
@@ -15,18 +15,24 @@
package recipes_test
import (
+ "context"
+ "errors"
+ "sync"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestDoubleBarrier(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
waiters := 10
@@ -97,10 +103,72 @@ func TestDoubleBarrier(t *testing.T) {
}
}
+func TestDoubleBarrierTooManyClients(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ waiters := 10
+ session, err := concurrency.NewSession(clus.RandClient())
+ if err != nil {
+ t.Error(err)
+ }
+ defer session.Orphan()
+
+ b := recipe.NewDoubleBarrier(session, "test-barrier", waiters)
+ donec := make(chan struct{})
+ var (
+ wgDone sync.WaitGroup // make sure all clients have finished the tasks
+ wgEntered sync.WaitGroup // make sure all clients have entered the double barrier
+ )
+ wgDone.Add(waiters)
+ wgEntered.Add(waiters)
+ for i := 0; i < waiters; i++ {
+ go func() {
+ defer wgDone.Done()
+
+ gsession, gerr := concurrency.NewSession(clus.RandClient())
+ if gerr != nil {
+ t.Error(gerr)
+ }
+ defer gsession.Orphan()
+
+ bb := recipe.NewDoubleBarrier(session, "test-barrier", waiters)
+ if gerr = bb.Enter(); gerr != nil {
+ t.Errorf("could not enter on barrier (%v)", gerr)
+ }
+ wgEntered.Done()
+ <-donec
+ if gerr = bb.Leave(); gerr != nil {
+ t.Errorf("could not leave on barrier (%v)", gerr)
+ }
+ }()
+ }
+
+ // Wait until all clients have already entered the double barrier, so
+ // no any other client can enter the barrier.
+ wgEntered.Wait()
+ t.Log("Try to enter into double barrier")
+ if err = b.Enter(); !errors.Is(err, recipe.ErrTooManyClients) {
+ t.Errorf("Unexcepted error, expected: ErrTooManyClients, got: %v", err)
+ }
+
+ resp, err := clus.RandClient().Get(context.TODO(), "test-barrier/waiters", clientv3.WithPrefix())
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ // Make sure the extra `b.Enter()` did not create a new ephemeral key
+ assert.Len(t, resp.Kvs, waiters)
+ close(donec)
+
+ wgDone.Wait()
+}
+
func TestDoubleBarrierFailover(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
waiters := 10
diff --git a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go
index 7104c3ce74f..1fcbc46e144 100644
--- a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go
+++ b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go
@@ -16,61 +16,65 @@ package recipes_test
import (
"context"
+ "errors"
+ "fmt"
"math/rand"
+ "sync"
"testing"
"time"
"go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestMutexLockSingleNode(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
- testMutexLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
- integration.CloseClients(t, clients)
+ testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
+ integration2.CloseClients(t, clients)
}
func TestMutexLockMultiNode(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
- testMutexLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
- integration.CloseClients(t, clients)
+ testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
+ integration2.CloseClients(t, clients)
}
func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
// stream lock acquisitions
- lockedC := make(chan *concurrency.Mutex)
- stopC := make(chan struct{})
- defer close(stopC)
+ lockedC := make(chan *concurrency.Mutex, waiters)
+ errC := make(chan error, waiters)
+
+ var wg sync.WaitGroup
+ wg.Add(waiters)
for i := 0; i < waiters; i++ {
- go func() {
+ go func(i int) {
+ defer wg.Done()
session, err := concurrency.NewSession(chooseClient())
if err != nil {
- t.Error(err)
+ errC <- fmt.Errorf("#%d: failed to create new session: %w", i, err)
+ return
}
m := concurrency.NewMutex(session, "test-mutex")
if err := m.Lock(context.TODO()); err != nil {
- t.Errorf("could not wait on lock (%v)", err)
- }
- select {
- case lockedC <- m:
- case <-stopC:
+ errC <- fmt.Errorf("#%d: failed to wait on lock: %w", i, err)
+ return
}
-
- }()
+ lockedC <- m
+ }(i)
}
// unlock locked mutexes
timerC := time.After(time.Duration(waiters) * time.Second)
@@ -78,6 +82,8 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie
select {
case <-timerC:
t.Fatalf("timed out waiting for lock %d", i)
+ case err := <-errC:
+ t.Fatalf("Unexpected error: %v", err)
case m := <-lockedC:
// lock acquired with m
select {
@@ -90,60 +96,63 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie
}
}
}
+ wg.Wait()
}
func TestMutexTryLockSingleNode(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
-
+ t.Logf("3 nodes cluster created...")
var clients []*clientv3.Client
- testMutexTryLock(t, 5, integration.MakeSingleNodeClients(t, clus, &clients))
- integration.CloseClients(t, clients)
+ testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
+ integration2.CloseClients(t, clients)
}
func TestMutexTryLockMultiNode(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
- testMutexTryLock(t, 5, integration.MakeMultiNodeClients(t, clus, &clients))
- integration.CloseClients(t, clients)
+ testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
+ integration2.CloseClients(t, clients)
}
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
- integration.BeforeTest(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
lockedC := make(chan *concurrency.Mutex)
notlockedC := make(chan *concurrency.Mutex)
- stopC := make(chan struct{})
- defer close(stopC)
+
for i := 0; i < lockers; i++ {
- go func() {
+ go func(i int) {
session, err := concurrency.NewSession(chooseClient())
if err != nil {
t.Error(err)
}
m := concurrency.NewMutex(session, "test-mutex-try-lock")
- err = m.TryLock(context.TODO())
+ err = m.TryLock(ctx)
if err == nil {
select {
case lockedC <- m:
- case <-stopC:
+ case <-ctx.Done():
+ t.Errorf("Thread: %v, Context failed: %v", i, err)
}
- } else if err == concurrency.ErrLocked {
+ } else if errors.Is(err, concurrency.ErrLocked) {
select {
case notlockedC <- m:
- case <-stopC:
+ case <-ctx.Done():
+ t.Errorf("Thread: %v, Context failed: %v", i, err)
}
} else {
- t.Errorf("Unexpected Error %v", err)
+ t.Errorf("Thread: %v; Unexpected Error %v", i, err)
}
- }()
+ }(i)
}
- timerC := time.After(time.Second)
+ timerC := time.After(30 * time.Second)
select {
case <-lockedC:
for i := 0; i < lockers-1; i++ {
@@ -156,16 +165,16 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C
}
}
case <-timerC:
- t.Errorf("timed out waiting for lock")
+ t.Errorf("timed out waiting for lock (30s)")
}
}
// TestMutexSessionRelock ensures that acquiring the same lock with the same
// session will not result in deadlock.
func TestMutexSessionRelock(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
session, err := concurrency.NewSession(clus.RandClient())
if err != nil {
@@ -187,9 +196,9 @@ func TestMutexSessionRelock(t *testing.T) {
// waiters older than the new owner are gone by testing the case where
// the waiter prior to the acquirer expires before the current holder.
func TestMutexWaitsOnCurrentHolder(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cctx := context.Background()
@@ -295,9 +304,9 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) {
}
func BenchmarkMutex4Waiters(b *testing.B) {
- integration.BeforeTest(b)
+ integration2.BeforeTest(b)
// XXX switch tests to use TB interface
- clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(nil)
for i := 0; i < b.N; i++ {
testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
@@ -305,15 +314,15 @@ func BenchmarkMutex4Waiters(b *testing.B) {
}
func TestRWMutexSingleNode(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) })
}
func TestRWMutexMultiNode(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
}
diff --git a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go
index 45d1855b936..73ed5552fe2 100644
--- a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go
+++ b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go
@@ -21,7 +21,7 @@ import (
"testing"
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
const (
@@ -31,12 +31,15 @@ const (
// TestQueueOneReaderOneWriter confirms the queue is FIFO
func TestQueueOneReaderOneWriter(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
done := make(chan struct{})
+ defer func() {
+ <-done
+ }()
go func() {
defer func() {
done <- struct{}{}
@@ -61,7 +64,6 @@ func TestQueueOneReaderOneWriter(t *testing.T) {
t.Fatalf("expected dequeue value %v, got %v", s, i)
}
}
- <-done
}
func TestQueueManyReaderOneWriter(t *testing.T) {
@@ -78,10 +80,10 @@ func TestQueueManyReaderManyWriter(t *testing.T) {
// BenchmarkQueue benchmarks Queues using many/many readers/writers
func BenchmarkQueue(b *testing.B) {
- integration.BeforeTest(b)
+ integration2.BeforeTest(b)
// XXX switch tests to use TB interface
- clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(nil)
for i := 0; i < b.N; i++ {
testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients)
@@ -90,9 +92,9 @@ func BenchmarkQueue(b *testing.B) {
// TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities.
func TestPrQueueOneReaderOneWriter(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
// write out five items with random priority
@@ -124,21 +126,21 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) {
}
func TestPrQueueManyReaderManyWriter(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
rqs := newPriorityQueues(clus, manyQueueClients)
wqs := newPriorityQueues(clus, manyQueueClients)
testReadersWriters(t, rqs, wqs)
}
-// BenchmarkQueue benchmarks Queues using n/n readers/writers
+// BenchmarkPrQueueOneReaderOneWriter benchmarks Queues using n/n readers/writers
func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
- integration.BeforeTest(b)
+ integration2.BeforeTest(b)
// XXX switch tests to use TB interface
- clus := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(nil)
rqs := newPriorityQueues(clus, 1)
wqs := newPriorityQueues(clus, 1)
@@ -148,13 +150,13 @@ func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
}
func testQueueNReaderMWriter(t *testing.T, n int, m int) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
testReadersWriters(t, newQueues(clus, n), newQueues(clus, m))
}
-func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
+func newQueues(clus *integration2.Cluster, n int) (qs []testQueue) {
for i := 0; i < n; i++ {
etcdc := clus.RandClient()
qs = append(qs, recipe.NewQueue(etcdc, "q"))
@@ -162,7 +164,7 @@ func newQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
return qs
}
-func newPriorityQueues(clus *integration.ClusterV3, n int) (qs []testQueue) {
+func newPriorityQueues(clus *integration2.Cluster, n int) (qs []testQueue) {
for i := 0; i < n; i++ {
etcdc := clus.RandClient()
q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")}
@@ -232,6 +234,7 @@ func (q *flatPriorityQueue) Enqueue(val string) error {
// randomized to stress dequeuing logic; order isn't important
return q.PriorityQueue.Enqueue(val, uint16(rand.Intn(2)))
}
+
func (q *flatPriorityQueue) Dequeue() (string, error) {
return q.PriorityQueue.Dequeue()
}
diff --git a/tests/integration/clientv3/kv_test.go b/tests/integration/clientv3/kv_test.go
index 3d1bb9e9cf9..66a9bce59d5 100644
--- a/tests/integration/clientv3/kv_test.go
+++ b/tests/integration/clientv3/kv_test.go
@@ -17,6 +17,7 @@ package clientv3test
import (
"bytes"
"context"
+ "errors"
"fmt"
"os"
"reflect"
@@ -25,36 +26,37 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/api/v3/version"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestKVPutError(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
var (
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
quota = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486.
)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
defer clus.Terminate(t)
kv := clus.RandClient()
ctx := context.TODO()
_, err := kv.Put(ctx, "", "bar")
- if err != rpctypes.ErrEmptyKey {
+ if !errors.Is(err, rpctypes.ErrEmptyKey) {
t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
}
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100)))
- if err != rpctypes.ErrRequestTooLarge {
+ if !errors.Is(err, rpctypes.ErrRequestTooLarge) {
t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
}
@@ -66,15 +68,15 @@ func TestKVPutError(t *testing.T) {
time.Sleep(1 * time.Second) // give enough time for commit
_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
- if err != rpctypes.ErrNoSpace { // over quota
+ if !errors.Is(err, rpctypes.ErrNoSpace) { // over quota
t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
}
}
-func TestKVPut(t *testing.T) {
- integration.BeforeTest(t)
+func TestKVPutWithLease(t *testing.T) {
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clus.RandClient()
@@ -82,50 +84,42 @@ func TestKVPut(t *testing.T) {
kv := clus.RandClient()
ctx := context.TODO()
- resp, err := lapi.Grant(context.Background(), 10)
+ lease, err := lapi.Grant(context.Background(), 10)
if err != nil {
t.Fatalf("failed to create lease %v", err)
}
- tests := []struct {
- key, val string
- leaseID clientv3.LeaseID
- }{
- {"foo", "bar", clientv3.NoLease},
- {"hello", "world", resp.ID},
+ key := "hello"
+ val := "world"
+ if _, err = kv.Put(ctx, key, val, clientv3.WithLease(lease.ID)); err != nil {
+ t.Fatalf("couldn't put %q (%v)", key, err)
}
-
- for i, tt := range tests {
- if _, err := kv.Put(ctx, tt.key, tt.val, clientv3.WithLease(tt.leaseID)); err != nil {
- t.Fatalf("#%d: couldn't put %q (%v)", i, tt.key, err)
- }
- resp, err := kv.Get(ctx, tt.key)
- if err != nil {
- t.Fatalf("#%d: couldn't get key (%v)", i, err)
- }
- if len(resp.Kvs) != 1 {
- t.Fatalf("#%d: expected 1 key, got %d", i, len(resp.Kvs))
- }
- if !bytes.Equal([]byte(tt.val), resp.Kvs[0].Value) {
- t.Errorf("#%d: val = %s, want %s", i, tt.val, resp.Kvs[0].Value)
- }
- if tt.leaseID != clientv3.LeaseID(resp.Kvs[0].Lease) {
- t.Errorf("#%d: val = %d, want %d", i, tt.leaseID, resp.Kvs[0].Lease)
- }
+ resp, err := kv.Get(ctx, key)
+ if err != nil {
+ t.Fatalf("couldn't get key (%v)", err)
+ }
+ if len(resp.Kvs) != 1 {
+ t.Fatalf("expected 1 key, got %d", len(resp.Kvs))
+ }
+ if !bytes.Equal([]byte(val), resp.Kvs[0].Value) {
+ t.Errorf("val = %s, want %s", val, resp.Kvs[0].Value)
+ }
+ if lease.ID != clientv3.LeaseID(resp.Kvs[0].Lease) {
+ t.Errorf("val = %d, want %d", lease.ID, resp.Kvs[0].Lease)
}
}
// TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value.
func TestKVPutWithIgnoreValue(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kv := clus.RandClient()
_, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue())
- if err != rpctypes.ErrKeyNotFound {
+ if !errors.Is(err, rpctypes.ErrKeyNotFound) {
t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
}
@@ -150,9 +144,9 @@ func TestKVPutWithIgnoreValue(t *testing.T) {
// TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key.
func TestKVPutWithIgnoreLease(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kv := clus.RandClient()
@@ -164,7 +158,7 @@ func TestKVPutWithIgnoreLease(t *testing.T) {
t.Errorf("failed to create lease %v", err)
}
- if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()); err != rpctypes.ErrKeyNotFound {
+ if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()); !errors.Is(err, rpctypes.ErrKeyNotFound) {
t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
}
@@ -189,9 +183,9 @@ func TestKVPutWithIgnoreLease(t *testing.T) {
}
func TestKVPutWithRequireLeader(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
clus.Members[1].Stop(t)
@@ -206,7 +200,7 @@ func TestKVPutWithRequireLeader(t *testing.T) {
kv := clus.Client(0)
_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
- if err != rpctypes.ErrNoLeader {
+ if !errors.Is(err, rpctypes.ErrNoLeader) {
t.Fatal(err)
}
@@ -235,9 +229,9 @@ func TestKVPutWithRequireLeader(t *testing.T) {
}
func TestKVRange(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
kv := clus.RandClient()
@@ -262,180 +256,12 @@ func TestKVRange(t *testing.T) {
wantSet []*mvccpb.KeyValue
}{
- // range first two
- {
- "a", "c",
- 0,
- nil,
-
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- },
- },
- // range first two with serializable
- {
- "a", "c",
- 0,
- []clientv3.OpOption{clientv3.WithSerializable()},
-
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- },
- },
- // range all with rev
- {
- "a", "x",
- 2,
- nil,
-
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- },
- },
- // range all with countOnly
- {
- "a", "x",
- 2,
- []clientv3.OpOption{clientv3.WithCountOnly()},
-
- nil,
- },
- // range all with SortByKey, SortAscend
- {
- "a", "x",
- 0,
- []clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
-
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- },
- },
- // range all with SortByKey, missing sorting order (ASCEND by default)
- {
- "a", "x",
- 0,
- []clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortNone)},
-
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- },
- },
- // range all with SortByCreateRevision, SortDescend
- {
- "a", "x",
- 0,
- []clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortDescend)},
-
- []*mvccpb.KeyValue{
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- },
- },
- // range all with SortByCreateRevision, missing sorting order (ASCEND by default)
- {
- "a", "x",
- 0,
- []clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortNone)},
-
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- },
- },
- // range all with SortByModRevision, SortDescend
- {
- "a", "x",
- 0,
- []clientv3.OpOption{clientv3.WithSort(clientv3.SortByModRevision, clientv3.SortDescend)},
-
- []*mvccpb.KeyValue{
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- },
- },
- // WithPrefix
- {
- "foo", "",
- 0,
- []clientv3.OpOption{clientv3.WithPrefix()},
-
- []*mvccpb.KeyValue{
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- },
- },
- // WithFromKey
- {
- "fo", "",
- 0,
- []clientv3.OpOption{clientv3.WithFromKey()},
-
- []*mvccpb.KeyValue{
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- },
- },
// fetch entire keyspace using WithFromKey
{
"\x00", "",
0,
[]clientv3.OpOption{clientv3.WithFromKey(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- },
- },
- // fetch entire keyspace using WithPrefix
- {
- "", "",
- 0,
- []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
-
- []*mvccpb.KeyValue{
- {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
- {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
- {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
- {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
- {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
- {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
- },
- },
- // fetch keyspace with empty key using WithFromKey
- {
- "", "",
- 0,
- []clientv3.OpOption{clientv3.WithFromKey(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
-
[]*mvccpb.KeyValue{
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
@@ -464,9 +290,9 @@ func TestKVRange(t *testing.T) {
}
func TestKVGetErrConnClosed(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -486,16 +312,16 @@ func TestKVGetErrConnClosed(t *testing.T) {
}()
select {
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("kv.Get took too long")
case <-donec:
}
}
func TestKVNewAfterClose(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -513,54 +339,31 @@ func TestKVNewAfterClose(t *testing.T) {
close(donec)
}()
select {
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("kv.Get took too long")
case <-donec:
}
}
func TestKVDeleteRange(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
kv := clus.RandClient()
ctx := context.TODO()
tests := []struct {
- key string
- opts []clientv3.OpOption
-
+ key string
+ opts []clientv3.OpOption
wkeys []string
}{
- // [a, c)
- {
- key: "a",
- opts: []clientv3.OpOption{clientv3.WithRange("c")},
-
- wkeys: []string{"c", "c/abc", "d"},
- },
- // >= c
- {
- key: "c",
- opts: []clientv3.OpOption{clientv3.WithFromKey()},
-
- wkeys: []string{"a", "b"},
- },
- // c*
- {
- key: "c",
- opts: []clientv3.OpOption{clientv3.WithPrefix()},
-
- wkeys: []string{"a", "b", "d"},
- },
// *
{
- key: "\x00",
- opts: []clientv3.OpOption{clientv3.WithFromKey()},
-
- wkeys: []string{},
+ key: "\x00",
+ opts: []clientv3.OpOption{clientv3.WithFromKey()},
+ wkeys: nil,
},
}
@@ -581,7 +384,7 @@ func TestKVDeleteRange(t *testing.T) {
if err != nil {
t.Fatalf("#%d: couldn't get keys (%v)", i, err)
}
- keys := []string{}
+ var keys []string
for _, kv := range resp.Kvs {
keys = append(keys, string(kv.Key))
}
@@ -591,42 +394,10 @@ func TestKVDeleteRange(t *testing.T) {
}
}
-func TestKVDelete(t *testing.T) {
- integration.BeforeTest(t)
-
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
- defer clus.Terminate(t)
-
- kv := clus.RandClient()
- ctx := context.TODO()
-
- presp, err := kv.Put(ctx, "foo", "")
- if err != nil {
- t.Fatalf("couldn't put 'foo' (%v)", err)
- }
- if presp.Header.Revision != 2 {
- t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
- }
- resp, err := kv.Delete(ctx, "foo")
- if err != nil {
- t.Fatalf("couldn't delete key (%v)", err)
- }
- if resp.Header.Revision != 3 {
- t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
- }
- gresp, err := kv.Get(ctx, "foo")
- if err != nil {
- t.Fatalf("couldn't get key (%v)", err)
- }
- if len(gresp.Kvs) > 0 {
- t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
- }
-}
-
func TestKVCompactError(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kv := clus.RandClient()
@@ -643,20 +414,20 @@ func TestKVCompactError(t *testing.T) {
}
_, err = kv.Compact(ctx, 6)
- if err != rpctypes.ErrCompacted {
+ if !errors.Is(err, rpctypes.ErrCompacted) {
t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
}
_, err = kv.Compact(ctx, 100)
- if err != rpctypes.ErrFutureRev {
+ if !errors.Is(err, rpctypes.ErrFutureRev) {
t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
}
}
func TestKVCompact(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
kv := clus.RandClient()
@@ -673,7 +444,7 @@ func TestKVCompact(t *testing.T) {
t.Fatalf("couldn't compact kv space (%v)", err)
}
_, err = kv.Compact(ctx, 7)
- if err == nil || err != rpctypes.ErrCompacted {
+ if err == nil || !errors.Is(err, rpctypes.ErrCompacted) {
t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted)
}
@@ -690,7 +461,7 @@ func TestKVCompact(t *testing.T) {
if !wr.Canceled {
t.Fatalf("expected canceled watcher on compacted revision, got %v", wr.Canceled)
}
- if wr.Err() != rpctypes.ErrCompacted {
+ if !errors.Is(wr.Err(), rpctypes.ErrCompacted) {
t.Fatalf("watch response error expected %v, got %v", rpctypes.ErrCompacted, wr.Err())
}
wr, ok := <-wchan
@@ -702,17 +473,17 @@ func TestKVCompact(t *testing.T) {
}
_, err = kv.Compact(ctx, 1000)
- if err == nil || err != rpctypes.ErrFutureRev {
+ if err == nil || !errors.Is(err, rpctypes.ErrFutureRev) {
t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev)
}
}
// TestKVGetRetry ensures get will retry on disconnect.
func TestKVGetRetry(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
clusterSize := 3
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: clusterSize, UseBridge: true})
defer clus.Terminate(t)
// because killing leader and following election
@@ -763,9 +534,9 @@ func TestKVGetRetry(t *testing.T) {
// TestKVPutFailGetRetry ensures a get will retry following a failed put.
func TestKVPutFailGetRetry(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kv := clus.Client(0)
@@ -803,9 +574,9 @@ func TestKVPutFailGetRetry(t *testing.T) {
// TestKVGetCancel tests that a context cancel on a Get terminates as expected.
func TestKVGetCancel(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
oldconn := clus.Client(0).ActiveConnection()
@@ -826,9 +597,9 @@ func TestKVGetCancel(t *testing.T) {
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
func TestKVGetStoppedServerAndClose(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -844,9 +615,9 @@ func TestKVGetStoppedServerAndClose(t *testing.T) {
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
func TestKVPutStoppedServerAndClose(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -875,8 +646,8 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
// TestKVPutAtMostOnce ensures that a Put will only occur at most once
// in the presence of network errors.
func TestKVPutAtMostOnce(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
@@ -884,12 +655,12 @@ func TestKVPutAtMostOnce(t *testing.T) {
}
for i := 0; i < 10; i++ {
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
donec := make(chan struct{})
go func() {
defer close(donec)
for i := 0; i < 10; i++ {
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
time.Sleep(5 * time.Millisecond)
}
}()
@@ -911,7 +682,7 @@ func TestKVPutAtMostOnce(t *testing.T) {
// TestKVLargeRequests tests various client/server side request limits.
func TestKVLargeRequests(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
tests := []struct {
// make sure that "MaxCallSendMsgSize" < server-side default send/recv limit
maxRequestBytesServer uint
@@ -932,14 +703,12 @@ func TestKVLargeRequests(t *testing.T) {
// without proper client-side receive size limit
// "code = ResourceExhausted desc = grpc: received message larger than max (5242929 vs. 4194304)"
{
-
maxRequestBytesServer: 7*1024*1024 + 512*1024,
maxCallSendBytesClient: 7 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 5 * 1024 * 1024,
expectError: nil,
},
-
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 100 * 1024 * 1024,
@@ -970,8 +739,8 @@ func TestKVLargeRequests(t *testing.T) {
},
}
for i, test := range tests {
- clus := integration.NewClusterV3(t,
- &integration.ClusterConfig{
+ clus := integration2.NewCluster(t,
+ &integration2.ClusterConfig{
Size: 1,
MaxRequestBytes: test.maxRequestBytesServer,
ClientMaxCallSendMsgSize: test.maxCallSendBytesClient,
@@ -981,8 +750,9 @@ func TestKVLargeRequests(t *testing.T) {
cli := clus.Client(0)
_, err := cli.Put(context.TODO(), "foo", strings.Repeat("a", test.valueSize))
- if _, ok := err.(rpctypes.EtcdError); ok {
- if err != test.expectError {
+ var etcdErr rpctypes.EtcdError
+ if errors.As(err, &etcdErr) {
+ if !errors.Is(err, test.expectError) {
t.Errorf("#%d: expected %v, got %v", i, test.expectError, err)
}
} else if err != nil && !strings.HasPrefix(err.Error(), test.expectError.Error()) {
@@ -1003,9 +773,9 @@ func TestKVLargeRequests(t *testing.T) {
// TestKVForLearner ensures learner member only accepts serializable read request.
func TestKVForLearner(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because
@@ -1027,14 +797,14 @@ func TestKVForLearner(t *testing.T) {
// 1. clus.Members[3] is the newly added learner member, which was appended to clus.Members
// 2. we are using member's grpcAddr instead of clientURLs as the endpoint for clientv3.Config,
// because the implementation of integration test has diverged from embed/etcd.go.
- learnerEp := clus.Members[3].GRPCAddr()
+ learnerEp := clus.Members[3].GRPCURL
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
// this client only has endpoint of the learner member
- cli, err := integration.NewClient(t, cfg)
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatalf("failed to create clientv3: %v", err)
}
@@ -1082,9 +852,9 @@ func TestKVForLearner(t *testing.T) {
// TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member
func TestBalancerSupportLearner(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because
@@ -1100,13 +870,13 @@ func TestBalancerSupportLearner(t *testing.T) {
}
// clus.Members[3] is the newly added learner member, which was appended to clus.Members
- learnerEp := clus.Members[3].GRPCAddr()
+ learnerEp := clus.Members[3].GRPCURL
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
- cli, err := integration.NewClient(t, cfg)
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatalf("failed to create clientv3: %v", err)
}
@@ -1115,11 +885,12 @@ func TestBalancerSupportLearner(t *testing.T) {
// wait until learner member is ready
<-clus.Members[3].ReadyNotify()
- if _, err := cli.Get(context.Background(), "foo"); err == nil {
+ if _, err = cli.Get(context.Background(), "foo"); err == nil {
t.Fatalf("expect Get request to learner to fail, got no error")
}
+ t.Logf("Expected: Read from learner error: %v", err)
- eps := []string{learnerEp, clus.Members[0].GRPCAddr()}
+ eps := []string{learnerEp, clus.Members[0].GRPCURL}
cli.SetEndpoints(eps...)
if _, err := cli.Get(context.Background(), "foo"); err != nil {
t.Errorf("expect no error (balancer should retry when request to learner fails), got error: %v", err)
diff --git a/tests/integration/clientv3/lease/lease_test.go b/tests/integration/clientv3/lease/lease_test.go
index 326289949b7..b27730d207f 100644
--- a/tests/integration/clientv3/lease/lease_test.go
+++ b/tests/integration/clientv3/lease/lease_test.go
@@ -16,6 +16,7 @@ package lease_test
import (
"context"
+ "errors"
"fmt"
"reflect"
"sort"
@@ -23,30 +24,32 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestLeaseNotFoundError(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kv := clus.RandClient()
_, err := kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(500)))
- if err != rpctypes.ErrLeaseNotFound {
+ if !errors.Is(err, rpctypes.ErrLeaseNotFound) {
t.Fatalf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
}
}
func TestLeaseGrant(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clus.RandClient()
@@ -54,7 +57,7 @@ func TestLeaseGrant(t *testing.T) {
kv := clus.RandClient()
_, merr := lapi.Grant(context.Background(), clientv3.MaxLeaseTTL+1)
- if merr != rpctypes.ErrLeaseTTLTooLarge {
+ if !errors.Is(merr, rpctypes.ErrLeaseTTLTooLarge) {
t.Fatalf("err = %v, want %v", merr, rpctypes.ErrLeaseTTLTooLarge)
}
@@ -70,9 +73,9 @@ func TestLeaseGrant(t *testing.T) {
}
func TestLeaseRevoke(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clus.RandClient()
@@ -90,15 +93,15 @@ func TestLeaseRevoke(t *testing.T) {
}
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
- if err != rpctypes.ErrLeaseNotFound {
+ if !errors.Is(err, rpctypes.ErrLeaseNotFound) {
t.Fatalf("err = %v, want %v", err, rpctypes.ErrLeaseNotFound)
}
}
func TestLeaseKeepAliveOnce(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clus.RandClient()
@@ -114,15 +117,15 @@ func TestLeaseKeepAliveOnce(t *testing.T) {
}
_, err = lapi.KeepAliveOnce(context.Background(), clientv3.LeaseID(0))
- if err != rpctypes.ErrLeaseNotFound {
+ if !errors.Is(err, rpctypes.ErrLeaseNotFound) {
t.Errorf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
}
}
func TestLeaseKeepAlive(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clus.Client(0)
@@ -133,7 +136,14 @@ func TestLeaseKeepAlive(t *testing.T) {
t.Errorf("failed to create lease %v", err)
}
- rc, kerr := lapi.KeepAlive(context.Background(), resp.ID)
+ type uncomparableCtx struct {
+ context.Context
+ _ func()
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ rc, kerr := lapi.KeepAlive(uncomparableCtx{Context: ctx}, resp.ID)
if kerr != nil {
t.Errorf("failed to keepalive lease %v", kerr)
}
@@ -151,6 +161,26 @@ func TestLeaseKeepAlive(t *testing.T) {
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
}
+ ctx2, cancel2 := context.WithCancel(context.Background())
+ rc2, kerr2 := lapi.KeepAlive(uncomparableCtx{Context: ctx2}, resp.ID)
+ if kerr2 != nil {
+ t.Errorf("failed to keepalive lease %v", kerr2)
+ }
+
+ cancel2()
+
+ _, ok = <-rc2
+ if ok {
+ t.Errorf("chan is not closed, want cancel stop keepalive")
+ }
+
+ select {
+ case <-rc:
+ // cancel2() should not affect first keepalive
+ t.Errorf("chan is closed, want keepalive continue")
+ default:
+ }
+
lapi.Close()
_, ok = <-rc
@@ -160,9 +190,9 @@ func TestLeaseKeepAlive(t *testing.T) {
}
func TestLeaseKeepAliveOneSecond(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -183,14 +213,15 @@ func TestLeaseKeepAliveOneSecond(t *testing.T) {
}
}
+// TestLeaseKeepAliveHandleFailure tests lease keep alive handling faillure
// TODO: add a client that can connect to all the members of cluster via unix sock.
// TODO: test handle more complicated failures.
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
t.Skip("test it when we have a cluster client")
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
// TODO: change this line to get a cluster client
@@ -243,13 +274,13 @@ type leaseCh struct {
// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases.
func TestLeaseKeepAliveNotFound(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
- lchs := []leaseCh{}
+ var lchs []leaseCh
for i := 0; i < 3; i++ {
resp, rerr := cli.Grant(context.TODO(), 5)
if rerr != nil {
@@ -276,9 +307,9 @@ func TestLeaseKeepAliveNotFound(t *testing.T) {
}
func TestLeaseGrantErrConnClosed(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -298,7 +329,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
}()
select {
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("le.Grant took too long")
case <-donec:
}
@@ -308,9 +339,9 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
// queue is full thus dropping keepalive response sends,
// keepalive request is sent with the same rate of TTL / 3.
func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lapi := clus.Client(0)
@@ -348,9 +379,9 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
}
func TestLeaseGrantNewAfterClose(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -368,16 +399,16 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
close(donec)
}()
select {
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("le.Grant took too long")
case <-donec:
}
}
func TestLeaseRevokeNewAfterClose(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -402,11 +433,11 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
}
}()
select {
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("le.Revoke took too long")
case errMsg := <-errMsgCh:
if errMsg != "" {
- t.Fatalf(errMsg)
+ t.Fatalf("%v", errMsg)
}
}
}
@@ -414,9 +445,9 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed
// following a disconnection, lease revoke, then reconnect.
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -460,9 +491,9 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
// the initial keep alive request never gets a response.
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -490,12 +521,12 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) {
clus.Members[0].Restart(t)
}
-// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
+// TestLeaseKeepAliveTTLTimeout ensures the keep alive channel closes if
// a keep alive request after the first never gets a response.
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -528,9 +559,9 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
}
func TestLeaseTimeToLive(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
c := clus.RandClient()
@@ -544,9 +575,8 @@ func TestLeaseTimeToLive(t *testing.T) {
kv := clus.RandClient()
keys := []string{"foo1", "foo2"}
for i := range keys {
- if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil {
- t.Fatal(err)
- }
+ _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID))
+ require.NoError(t, err)
}
// linearized read to ensure Puts propagated to server backing lapi
@@ -586,9 +616,9 @@ func TestLeaseTimeToLive(t *testing.T) {
}
func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
@@ -621,14 +651,14 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
}
func TestLeaseLeases(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
- ids := []clientv3.LeaseID{}
+ var ids []clientv3.LeaseID
for i := 0; i < 5; i++ {
resp, err := cli.Grant(context.Background(), 10)
if err != nil {
@@ -654,9 +684,9 @@ func TestLeaseLeases(t *testing.T) {
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
// for a while.
func TestLeaseRenewLostQuorum(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -702,9 +732,9 @@ func TestLeaseRenewLostQuorum(t *testing.T) {
}
func TestLeaseKeepAliveLoopExit(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx := context.Background()
@@ -718,7 +748,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
cli.Close()
_, err = cli.KeepAlive(ctx, resp.ID)
- if _, ok := err.(clientv3.ErrKeepAliveHalted); !ok {
+ var keepAliveHaltedErr clientv3.ErrKeepAliveHalted
+ if !errors.As(err, &keepAliveHaltedErr) {
t.Fatalf("expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err)
}
}
@@ -727,8 +758,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates
// transient cluster failure.
func TestV3LeaseFailureOverlap(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
defer clus.Terminate(t)
numReqs := 5
@@ -760,7 +791,7 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
go func() {
defer wg.Done()
err := updown(n)
- if err == nil || err == rpctypes.ErrTimeoutDueToConnectionLost {
+ if err == nil || errors.Is(err, rpctypes.ErrTimeoutDueToConnectionLost) {
return
}
t.Error(err)
@@ -780,9 +811,9 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
// TestLeaseWithRequireLeader checks keep-alive channel close when no leader.
func TestLeaseWithRequireLeader(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
defer clus.Terminate(t)
c := clus.Client(0)
diff --git a/tests/integration/clientv3/lease/leasing_test.go b/tests/integration/clientv3/lease/leasing_test.go
index 02814aa4655..22c98f9ed05 100644
--- a/tests/integration/clientv3/lease/leasing_test.go
+++ b/tests/integration/clientv3/lease/leasing_test.go
@@ -16,6 +16,7 @@ package lease_test
import (
"context"
+ "errors"
"fmt"
"math/rand"
"reflect"
@@ -23,29 +24,31 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/client/v3/leasing"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestLeasingPutGet(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lKV1, closeLKV1, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV1()
lKV2, closeLKV2, err := leasing.NewKV(clus.Client(1), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV2()
lKV3, closeLKV3, err := leasing.NewKV(clus.Client(2), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV3()
resp, err := lKV1.Get(context.TODO(), "abc")
@@ -90,19 +93,18 @@ func TestLeasingPutGet(t *testing.T) {
// TestLeasingInterval checks the leasing KV fetches key intervals.
func TestLeasingInterval(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
keys := []string{"abc/a", "abc/b", "abc/a/a"}
for _, k := range keys {
- if _, err = clus.Client(0).Put(context.TODO(), k, "v"); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Put(context.TODO(), k, "v")
+ require.NoError(t, err)
}
resp, err := lkv.Get(context.TODO(), "abc/", clientv3.WithPrefix())
@@ -129,12 +131,12 @@ func TestLeasingInterval(t *testing.T) {
// TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key.
func TestLeasingPutInvalidateNew(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = lkv.Get(context.TODO(), "k"); err != nil {
@@ -159,8 +161,8 @@ func TestLeasingPutInvalidateNew(t *testing.T) {
// TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key.
func TestLeasingPutInvalidateExisting(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -168,7 +170,7 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
}
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = lkv.Get(context.TODO(), "k"); err != nil {
@@ -193,41 +195,41 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
func TestLeasingGetNoLeaseTTL(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
lresp, err := clus.Client(0).Grant(context.TODO(), 60)
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = clus.Client(0).Put(context.TODO(), "k", "v", clientv3.WithLease(lresp.ID))
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
gresp, err := lkv.Get(context.TODO(), "k")
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, len(gresp.Kvs), 1)
+ require.NoError(t, err)
+ assert.Len(t, gresp.Kvs, 1)
clus.Members[0].Stop(t)
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
_, err = lkv.Get(ctx, "k")
cancel()
- testutil.AssertEqual(t, err, ctx.Err())
+ assert.Equal(t, err, ctx.Err())
}
// TestLeasingGetSerializable checks the leasing KV can make serialized requests
// when the etcd cluster is partitioned.
func TestLeasingGetSerializable(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil {
@@ -262,12 +264,12 @@ func TestLeasingGetSerializable(t *testing.T) {
// TestLeasingPrevKey checks the cache respects WithPrevKV on puts.
func TestLeasingPrevKey(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -288,12 +290,12 @@ func TestLeasingPrevKey(t *testing.T) {
// TestLeasingRevGet checks the cache respects Get by Revision.
func TestLeasingRevGet(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
putResp, err := clus.Client(0).Put(context.TODO(), "k", "abc")
@@ -324,12 +326,12 @@ func TestLeasingRevGet(t *testing.T) {
// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server.
func TestLeasingGetWithOpts(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -351,12 +353,11 @@ func TestLeasingGetWithOpts(t *testing.T) {
clientv3.WithSerializable(),
}
for _, opt := range opts {
- if _, err := lkv.Get(context.TODO(), "k", opt); err != nil {
- t.Fatal(err)
- }
+ _, err := lkv.Get(context.TODO(), "k", opt)
+ require.NoError(t, err)
}
- getOpts := []clientv3.OpOption{}
+ var getOpts []clientv3.OpOption
for i := 0; i < len(opts); i++ {
getOpts = append(getOpts, opts[rand.Intn(len(opts))])
}
@@ -369,12 +370,12 @@ func TestLeasingGetWithOpts(t *testing.T) {
// TestLeasingConcurrentPut ensures that a get after concurrent puts returns
// the recently put data.
func TestLeasingConcurrentPut(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
// force key into leasing key cache
@@ -416,12 +417,12 @@ func TestLeasingConcurrentPut(t *testing.T) {
}
func TestLeasingDisconnectedGet(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil {
@@ -445,12 +446,12 @@ func TestLeasingDisconnectedGet(t *testing.T) {
}
func TestLeasingDeleteOwner(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -479,16 +480,16 @@ func TestLeasingDeleteOwner(t *testing.T) {
}
func TestLeasingDeleteNonOwner(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV1()
lkv2, closeLKV2, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV2()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -514,12 +515,12 @@ func TestLeasingDeleteNonOwner(t *testing.T) {
}
func TestLeasingOverwriteResponse(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -548,12 +549,12 @@ func TestLeasingOverwriteResponse(t *testing.T) {
}
func TestLeasingOwnerPutResponse(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -586,20 +587,19 @@ func TestLeasingOwnerPutResponse(t *testing.T) {
}
func TestLeasingTxnOwnerGetRange(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
keyCount := rand.Intn(10) + 1
for i := 0; i < keyCount; i++ {
k := fmt.Sprintf("k-%d", i)
- if _, err := clus.Client(0).Put(context.TODO(), k, k+k); err != nil {
- t.Fatal(err)
- }
+ _, err := clus.Client(0).Put(context.TODO(), k, k+k)
+ require.NoError(t, err)
}
if _, err := lkv.Get(context.TODO(), "k-"); err != nil {
t.Fatal(err)
@@ -615,14 +615,14 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) {
}
func TestLeasingTxnOwnerGet(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
client := clus.Client(0)
lkv, closeLKV, err := leasing.NewKV(client, "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer func() {
// In '--tags cluster_proxy' mode the client need to be closed before
@@ -647,9 +647,8 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
}
presps[i] = presp
- if _, err = lkv.Get(context.TODO(), k); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Get(context.TODO(), k)
+ require.NoError(t, err)
ops = append(ops, clientv3.OpGet(k))
}
@@ -692,7 +691,7 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
k := fmt.Sprintf("k-%d", i)
rr := tresp.Responses[i].GetResponseRange()
if rr == nil {
- t.Errorf("expected get response, got %+v", tresp.Responses[i])
+ t.Fatalf("expected get response, got %+v", tresp.Responses[i])
}
if string(rr.Kvs[0].Key) != k || string(rr.Kvs[0].Value) != k+k {
t.Errorf(`expected key for %q, got %+v`, k, rr.Kvs)
@@ -701,12 +700,12 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
}
func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
keyCount := rand.Intn(10) + 1
@@ -740,12 +739,12 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
}
func TestLeasingTxnOwnerDelete(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -771,12 +770,12 @@ func TestLeasingTxnOwnerDelete(t *testing.T) {
}
func TestLeasingTxnOwnerIf(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -865,16 +864,16 @@ func TestLeasingTxnOwnerIf(t *testing.T) {
}
func TestLeasingTxnCancel(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV1()
lkv2, closeLKV2, err := leasing.NewKV(clus.Client(1), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV2()
// acquire lease but disconnect so no revoke in time
@@ -893,22 +892,22 @@ func TestLeasingTxnCancel(t *testing.T) {
time.Sleep(100 * time.Millisecond)
cancel()
}()
- if _, err := lkv2.Txn(ctx).Then(clientv3.OpPut("k", "v")).Commit(); err != context.Canceled {
+ if _, err := lkv2.Txn(ctx).Then(clientv3.OpPut("k", "v")).Commit(); !errors.Is(err, context.Canceled) {
t.Fatalf("expected %v, got %v", context.Canceled, err)
}
}
func TestLeasingTxnNonOwnerPut(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
lkv2, closeLKV2, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV2()
if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil {
@@ -962,7 +961,7 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
clientv3.WithPrefix())
wresp := <-w
c := 0
- evs := []clientv3.Event{}
+ var evs []clientv3.Event
for _, ev := range wresp.Events {
evs = append(evs, *ev)
if ev.Kv.ModRevision == tresp.Header.Revision {
@@ -977,16 +976,16 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
// TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then
// issues a random If/{Then,Else} transaction on those keys to one client.
func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err1)
+ require.NoError(t, err1)
defer closeLKV1()
lkv2, closeLKV2, err2 := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err2)
+ require.NoError(t, err2)
defer closeLKV2()
keyCount := 16
@@ -1020,7 +1019,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
// random list of comparisons, all true
cmps, useThen := randCmps("k-", dat)
// random list of puts/gets; unique keys
- ops := []clientv3.Op{}
+ var ops []clientv3.Op
usedIdx := make(map[int]struct{})
for i := 0; i < keyCount; i++ {
idx := rand.Intn(keyCount)
@@ -1083,12 +1082,12 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
}
func TestLeasingOwnerPutError(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = lkv.Get(context.TODO(), "k"); err != nil {
@@ -1104,12 +1103,12 @@ func TestLeasingOwnerPutError(t *testing.T) {
}
func TestLeasingOwnerDeleteError(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
if _, err = lkv.Get(context.TODO(), "k"); err != nil {
@@ -1125,12 +1124,12 @@ func TestLeasingOwnerDeleteError(t *testing.T) {
}
func TestLeasingNonOwnerPutError(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
clus.Members[0].Stop(t)
@@ -1150,18 +1149,17 @@ func TestLeasingOwnerDeleteFrom(t *testing.T) {
}
func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "0/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
for i := 0; i < 8; i++ {
- if _, err = clus.Client(0).Put(context.TODO(), fmt.Sprintf("key/%d", i), "123"); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Put(context.TODO(), fmt.Sprintf("key/%d", i), "123")
+ require.NoError(t, err)
}
if _, err = lkv.Get(context.TODO(), "key/1"); err != nil {
@@ -1199,25 +1197,23 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
}
func TestLeasingDeleteRangeBounds(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeDelKV()
getkv, closeGetKv, err := leasing.NewKV(clus.Client(0), "0/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeGetKv()
for _, k := range []string{"j", "m"} {
- if _, err = clus.Client(0).Put(context.TODO(), k, "123"); err != nil {
- t.Fatal(err)
- }
- if _, err = getkv.Get(context.TODO(), k); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Put(context.TODO(), k, "123")
+ require.NoError(t, err)
+ _, err = getkv.Get(context.TODO(), k)
+ require.NoError(t, err)
}
if _, err = delkv.Delete(context.TODO(), "k", clientv3.WithPrefix()); err != nil {
@@ -1257,38 +1253,41 @@ func TestLeaseDeleteRangeContendDel(t *testing.T) {
}
func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeDelKV()
putkv, closePutKV, err := leasing.NewKV(clus.Client(0), "0/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closePutKV()
- for i := 0; i < 8; i++ {
+ const maxKey = 8
+ for i := 0; i < maxKey; i++ {
key := fmt.Sprintf("key/%d", i)
- if _, err = clus.Client(0).Put(context.TODO(), key, "123"); err != nil {
- t.Fatal(err)
- }
- if _, err = putkv.Get(context.TODO(), key); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Put(context.TODO(), key, "123")
+ require.NoError(t, err)
+ _, err = putkv.Get(context.TODO(), key)
+ require.NoError(t, err)
}
ctx, cancel := context.WithCancel(context.TODO())
donec := make(chan struct{})
- go func() {
+ go func(t *testing.T) {
defer close(donec)
for i := 0; ctx.Err() == nil; i++ {
- key := fmt.Sprintf("key/%d", i%8)
- putkv.Put(ctx, key, "123")
- putkv.Get(ctx, key)
+ key := fmt.Sprintf("key/%d", i%maxKey)
+ if _, err = putkv.Put(context.TODO(), key, "123"); err != nil {
+ t.Errorf("fail putting key %s: %v", key, err)
+ }
+ if _, err = putkv.Get(context.TODO(), key); err != nil {
+ t.Errorf("fail getting key %s: %v", key, err)
+ }
}
- }()
+ }(t)
_, delErr := delkv.Do(context.TODO(), op)
cancel()
@@ -1298,7 +1297,7 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
}
// confirm keys on non-deleter match etcd
- for i := 0; i < 8; i++ {
+ for i := 0; i < maxKey; i++ {
key := fmt.Sprintf("key/%d", i)
resp, err := putkv.Get(context.TODO(), key)
if err != nil {
@@ -1315,29 +1314,26 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) {
}
func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkvs := make([]clientv3.KV, 16)
for i := range lkvs {
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
lkvs[i] = lkv
}
getdel := func(kv clientv3.KV) {
- if _, err := kv.Put(context.TODO(), "k", "abc"); err != nil {
- t.Fatal(err)
- }
+ _, err := kv.Put(context.TODO(), "k", "abc")
+ require.NoError(t, err)
time.Sleep(time.Millisecond)
- if _, err := kv.Get(context.TODO(), "k"); err != nil {
- t.Fatal(err)
- }
- if _, err := kv.Delete(context.TODO(), "k"); err != nil {
- t.Fatal(err)
- }
+ _, err = kv.Get(context.TODO(), "k")
+ require.NoError(t, err)
+ _, err = kv.Delete(context.TODO(), "k")
+ require.NoError(t, err)
time.Sleep(2 * time.Millisecond)
}
@@ -1374,16 +1370,16 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
// TestLeasingReconnectOwnerRevoke checks that revocation works if
// disconnected when trying to submit revoke txn.
func TestLeasingReconnectOwnerRevoke(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err1)
+ require.NoError(t, err1)
defer closeLKV1()
lkv2, closeLKV2, err2 := leasing.NewKV(clus.Client(1), "foo/")
- testutil.AssertNil(t, err2)
+ require.NoError(t, err2)
defer closeLKV2()
if _, err := lkv1.Get(context.TODO(), "k"); err != nil {
@@ -1435,16 +1431,16 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) {
// TestLeasingReconnectOwnerRevokeCompact checks that revocation works if
// disconnected and the watch is compacted.
func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err1)
+ require.NoError(t, err1)
defer closeLKV1()
lkv2, closeLKV2, err2 := leasing.NewKV(clus.Client(1), "foo/")
- testutil.AssertNil(t, err2)
+ require.NoError(t, err2)
defer closeLKV2()
if _, err := lkv1.Get(context.TODO(), "k"); err != nil {
@@ -1488,32 +1484,29 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
// TestLeasingReconnectOwnerConsistency checks a write error on an owner will
// not cause inconsistency between the server and the client.
func TestLeasingReconnectOwnerConsistency(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
defer closeLKV()
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
- if _, err = lkv.Put(context.TODO(), "k", "x"); err != nil {
- t.Fatal(err)
- }
- if _, err = lkv.Put(context.TODO(), "kk", "y"); err != nil {
- t.Fatal(err)
- }
- if _, err = lkv.Get(context.TODO(), "k"); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Put(context.TODO(), "k", "x")
+ require.NoError(t, err)
+ _, err = lkv.Put(context.TODO(), "kk", "y")
+ require.NoError(t, err)
+ _, err = lkv.Get(context.TODO(), "k")
+ require.NoError(t, err)
for i := 0; i < 10; i++ {
v := fmt.Sprintf("%d", i)
donec := make(chan struct{})
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
go func() {
defer close(donec)
for i := 0; i < 20; i++ {
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
time.Sleep(time.Millisecond)
}
}()
@@ -1549,25 +1542,21 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) {
}
lresp, lerr := lkv.Get(context.TODO(), "k")
- if lerr != nil {
- t.Fatal(lerr)
- }
+ require.NoError(t, lerr)
cresp, cerr := clus.Client(0).Get(context.TODO(), "k")
- if cerr != nil {
- t.Fatal(cerr)
- }
+ require.NoError(t, cerr)
if !reflect.DeepEqual(lresp.Kvs, cresp.Kvs) {
t.Fatalf("expected %+v, got %+v", cresp, lresp)
}
}
func TestLeasingTxnAtomicCache(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
puts, gets := make([]clientv3.Op, 16), make([]clientv3.Op, 16)
@@ -1575,13 +1564,11 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
k := fmt.Sprintf("k-%d", i)
puts[i], gets[i] = clientv3.OpPut(k, k), clientv3.OpGet(k)
}
- if _, err = clus.Client(0).Txn(context.TODO()).Then(puts...).Commit(); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Txn(context.TODO()).Then(puts...).Commit()
+ require.NoError(t, err)
for i := range gets {
- if _, err = lkv.Do(context.TODO(), gets[i]); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Do(context.TODO(), gets[i])
+ require.NoError(t, err)
}
numPutters, numGetters := 16, 16
@@ -1648,24 +1635,23 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
// TestLeasingReconnectTxn checks that Txn is resilient to disconnects.
func TestLeasingReconnectTxn(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
- if _, err = lkv.Get(context.TODO(), "k"); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Get(context.TODO(), "k")
+ require.NoError(t, err)
donec := make(chan struct{})
go func() {
defer close(donec)
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
for i := 0; i < 10; i++ {
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
time.Sleep(time.Millisecond)
}
time.Sleep(10 * time.Millisecond)
@@ -1676,38 +1662,35 @@ func TestLeasingReconnectTxn(t *testing.T) {
Then(clientv3.OpGet("k")).
Commit()
<-donec
- if lerr != nil {
- t.Fatal(lerr)
- }
+ require.NoError(t, lerr)
}
// TestLeasingReconnectNonOwnerGet checks a get error on an owner will
// not cause inconsistency between the server and the client.
func TestLeasingReconnectNonOwnerGet(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
// populate a few keys so some leasing gets have keys
for i := 0; i < 4; i++ {
k := fmt.Sprintf("k-%d", i*2)
- if _, err = lkv.Put(context.TODO(), k, k[2:]); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Put(context.TODO(), k, k[2:])
+ require.NoError(t, err)
}
n := 0
for i := 0; i < 10; i++ {
donec := make(chan struct{})
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
go func() {
defer close(donec)
for j := 0; j < 10; j++ {
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
time.Sleep(time.Millisecond)
}
}()
@@ -1721,13 +1704,9 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) {
for i := 0; i < n; i++ {
k := fmt.Sprintf("k-%d", i)
lresp, lerr := lkv.Get(context.TODO(), k)
- if lerr != nil {
- t.Fatal(lerr)
- }
+ require.NoError(t, lerr)
cresp, cerr := clus.Client(0).Get(context.TODO(), k)
- if cerr != nil {
- t.Fatal(cerr)
- }
+ require.NoError(t, cerr)
if !reflect.DeepEqual(lresp.Kvs, cresp.Kvs) {
t.Fatalf("expected %+v, got %+v", cresp, lresp)
}
@@ -1735,47 +1714,41 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) {
}
func TestLeasingTxnRangeCmp(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
- if _, err = clus.Client(0).Put(context.TODO(), "k", "a"); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Put(context.TODO(), "k", "a")
+ require.NoError(t, err)
// k2 version = 2
- if _, err = clus.Client(0).Put(context.TODO(), "k2", "a"); err != nil {
- t.Fatal(err)
- }
- if _, err = clus.Client(0).Put(context.TODO(), "k2", "a"); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Put(context.TODO(), "k2", "a")
+ require.NoError(t, err)
+ _, err = clus.Client(0).Put(context.TODO(), "k2", "a")
+ require.NoError(t, err)
// cache k
- if _, err = lkv.Get(context.TODO(), "k"); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Get(context.TODO(), "k")
+ require.NoError(t, err)
cmp := clientv3.Compare(clientv3.Version("k").WithPrefix(), "=", 1)
tresp, terr := lkv.Txn(context.TODO()).If(cmp).Commit()
- if terr != nil {
- t.Fatal(terr)
- }
+ require.NoError(t, terr)
if tresp.Succeeded {
t.Fatalf("expected Succeeded=false, got %+v", tresp)
}
}
func TestLeasingDo(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
ops := []clientv3.Op{
@@ -1803,38 +1776,33 @@ func TestLeasingDo(t *testing.T) {
}
gresp, err := clus.Client(0).Get(context.TODO(), "a", clientv3.WithPrefix())
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if len(gresp.Kvs) != 0 {
t.Fatalf("expected no keys, got %+v", gresp.Kvs)
}
}
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
n := 0
treeOp := makePutTreeOp("tree", &n, 4)
for i := 0; i < n; i++ {
k := fmt.Sprintf("tree/%d", i)
- if _, err = clus.Client(0).Put(context.TODO(), k, "a"); err != nil {
- t.Fatal(err)
- }
- if _, err = lkv.Get(context.TODO(), k); err != nil {
- t.Fatal(err)
- }
+ _, err = clus.Client(0).Put(context.TODO(), k, "a")
+ require.NoError(t, err)
+ _, err = lkv.Get(context.TODO(), k)
+ require.NoError(t, err)
}
- if _, err = lkv.Do(context.TODO(), treeOp); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Do(context.TODO(), treeOp)
+ require.NoError(t, err)
// lkv shouldn't need to call out to server for updated leased keys
clus.Members[0].Stop(t)
@@ -1842,13 +1810,9 @@ func TestLeasingTxnOwnerPutBranch(t *testing.T) {
for i := 0; i < n; i++ {
k := fmt.Sprintf("tree/%d", i)
lkvResp, err := lkv.Get(context.TODO(), k)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
clusResp, err := clus.Client(1).Get(context.TODO(), k)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if !reflect.DeepEqual(clusResp.Kvs, lkvResp.Kvs) {
t.Fatalf("expected %+v, got %+v", clusResp.Kvs, lkvResp.Kvs)
}
@@ -1892,7 +1856,6 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the
cmp = clientv3.Compare(clientv3.CreateRevision(k), "=", rev)
case 3:
cmp = clientv3.Compare(clientv3.CreateRevision(k), "!=", rev+1)
-
}
cmps = append(cmps, cmp)
}
@@ -1906,39 +1869,34 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the
}
func TestLeasingSessionExpire(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
lkv2, closeLKV2, err := leasing.NewKV(clus.Client(0), "foo/")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV2()
// acquire lease on abc
- if _, err = lkv.Get(context.TODO(), "abc"); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Get(context.TODO(), "abc")
+ require.NoError(t, err)
// down endpoint lkv uses for keepalives
clus.Members[0].Stop(t)
- if err = waitForLeasingExpire(clus.Client(1), "foo/abc"); err != nil {
- t.Fatal(err)
- }
+ err = waitForLeasingExpire(clus.Client(1), "foo/abc")
+ require.NoError(t, err)
waitForExpireAck(t, lkv)
clus.Members[0].Restart(t)
- integration.WaitClientV3(t, lkv2)
- if _, err = lkv2.Put(context.TODO(), "abc", "def"); err != nil {
- t.Fatal(err)
- }
+ integration2.WaitClientV3(t, lkv2)
+ _, err = lkv2.Put(context.TODO(), "abc", "def")
+ require.NoError(t, err)
resp, err := lkv.Get(context.TODO(), "abc")
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if v := string(resp.Kvs[0].Value); v != "def" {
t.Fatalf("expected %q, got %q", "v", v)
}
@@ -1982,23 +1940,21 @@ func TestLeasingSessionExpireCancel(t *testing.T) {
}
for i := range tests {
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
defer closeLKV()
- if _, err = lkv.Get(context.TODO(), "abc"); err != nil {
- t.Fatal(err)
- }
+ _, err = lkv.Get(context.TODO(), "abc")
+ require.NoError(t, err)
// down endpoint lkv uses for keepalives
clus.Members[0].Stop(t)
- if err := waitForLeasingExpire(clus.Client(1), "foo/abc"); err != nil {
- t.Fatal(err)
- }
+ err = waitForLeasingExpire(clus.Client(1), "foo/abc")
+ require.NoError(t, err)
waitForExpireAck(t, lkv)
ctx, cancel := context.WithCancel(context.TODO())
@@ -2010,7 +1966,7 @@ func TestLeasingSessionExpireCancel(t *testing.T) {
select {
case err := <-errc:
- if err != ctx.Err() {
+ if !errors.Is(err, ctx.Err()) {
t.Errorf("#%d: expected %v of server unavailable, got %v", i, ctx.Err(), err)
}
case <-time.After(5 * time.Second):
@@ -2041,7 +1997,7 @@ func waitForExpireAck(t *testing.T, kv clientv3.KV) {
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
_, err := kv.Get(ctx, "abc")
cancel()
- if err == ctx.Err() {
+ if errors.Is(err, ctx.Err()) {
return
} else if err != nil {
t.Logf("current error: %v", err)
diff --git a/tests/integration/clientv3/maintenance_test.go b/tests/integration/clientv3/maintenance_test.go
index dfef11a6c49..ab9be52c071 100644
--- a/tests/integration/clientv3/maintenance_test.go
+++ b/tests/integration/clientv3/maintenance_test.go
@@ -17,45 +17,50 @@ package clientv3test
import (
"bytes"
"context"
+ "crypto/sha256"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"math"
+ "os"
"path/filepath"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"google.golang.org/grpc"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/api/v3/version"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- "go.etcd.io/etcd/tests/v3/integration"
+ "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/mvcc/testutil"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestMaintenanceHashKV(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
for i := 0; i < 3; i++ {
- if _, err := clus.RandClient().Put(context.Background(), "foo", "bar"); err != nil {
- t.Fatal(err)
- }
+ _, err := clus.RandClient().Put(context.Background(), "foo", "bar")
+ require.NoError(t, err)
}
var hv uint32
for i := 0; i < 3; i++ {
cli := clus.Client(i)
// ensure writes are replicated
- if _, err := cli.Get(context.TODO(), "foo"); err != nil {
- t.Fatal(err)
- }
- hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCAddr(), 0)
+ _, err := cli.Get(context.TODO(), "foo")
+ require.NoError(t, err)
+ hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL, 0)
if err != nil {
t.Fatal(err)
}
@@ -69,10 +74,58 @@ func TestMaintenanceHashKV(t *testing.T) {
}
}
+// TestCompactionHash tests compaction hash
+// TODO: Change this to fuzz test
+func TestCompactionHash(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ cc, err := clus.ClusterClient(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL}, 1000)
+}
+
+type hashTestCase struct {
+ *clientv3.Client
+ url string
+}
+
+func (tc hashTestCase) Put(ctx context.Context, key, value string) error {
+ _, err := tc.Client.Put(ctx, key, value)
+ return err
+}
+
+func (tc hashTestCase) Delete(ctx context.Context, key string) error {
+ _, err := tc.Client.Delete(ctx, key)
+ return err
+}
+
+func (tc hashTestCase) HashByRev(ctx context.Context, rev int64) (testutil.KeyValueHash, error) {
+ resp, err := tc.Client.HashKV(ctx, tc.url, rev)
+ return testutil.KeyValueHash{Hash: resp.Hash, CompactRevision: resp.CompactRevision, Revision: resp.Header.Revision}, err
+}
+
+func (tc hashTestCase) Defrag(ctx context.Context) error {
+ _, err := tc.Client.Defragment(ctx, tc.url)
+ return err
+}
+
+func (tc hashTestCase) Compact(ctx context.Context, rev int64) error {
+ _, err := tc.Client.Compact(ctx, rev)
+ // Wait for compaction to be compacted
+ time.Sleep(50 * time.Millisecond)
+ return err
+}
+
func TestMaintenanceMoveLeader(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
@@ -81,7 +134,7 @@ func TestMaintenanceMoveLeader(t *testing.T) {
cli := clus.Client(targetIdx)
_, err := cli.MoveLeader(context.Background(), target)
- if err != rpctypes.ErrNotLeader {
+ if !errors.Is(err, rpctypes.ErrNotLeader) {
t.Fatalf("error expected %v, got %v", rpctypes.ErrNotLeader, err)
}
@@ -98,32 +151,87 @@ func TestMaintenanceMoveLeader(t *testing.T) {
}
}
-// TestMaintenanceSnapshotError ensures that context cancel/timeout
+// TestMaintenanceSnapshotCancel ensures that context cancel
// before snapshot reading returns corresponding context errors.
-func TestMaintenanceSnapshotError(t *testing.T) {
- integration.BeforeTest(t)
+func TestMaintenanceSnapshotCancel(t *testing.T) {
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
// reading snapshot with canceled context should error out
ctx, cancel := context.WithCancel(context.Background())
+
+ // Since http2 spec defines the receive windows's size and max size of
+ // frame in the stream, the underlayer - gRPC client can pre-read data
+ // from server even if the application layer hasn't read it yet.
+ //
+ // And the initialized cluster has 20KiB snapshot, which can be
+ // pre-read by underlayer. We should increase the snapshot's size here,
+ // just in case that io.Copy won't return the canceled error.
+ populateDataIntoCluster(t, clus, 3, 1024*1024)
+
rc1, err := clus.RandClient().Snapshot(ctx)
if err != nil {
t.Fatal(err)
}
defer rc1.Close()
+ // read 16 bytes to ensure that server opens snapshot
+ buf := make([]byte, 16)
+ n, err := rc1.Read(buf)
+ require.NoError(t, err)
+ assert.Equal(t, 16, n)
+
cancel()
- _, err = io.Copy(ioutil.Discard, rc1)
- if err != context.Canceled {
+ _, err = io.Copy(io.Discard, rc1)
+ if !errors.Is(err, context.Canceled) {
t.Errorf("expected %v, got %v", context.Canceled, err)
}
+}
+
+// TestMaintenanceSnapshotWithVersionTimeout ensures that SnapshotWithVersion function
+// returns corresponding context errors when context timeout happened before snapshot reading
+func TestMaintenanceSnapshotWithVersionTimeout(t *testing.T) {
+ testMaintenanceSnapshotTimeout(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) {
+ resp, err := client.SnapshotWithVersion(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Snapshot, nil
+ })
+}
+
+// TestMaintenanceSnapshotTimeout ensures that Snapshot function
+// returns corresponding context errors when context timeout happened before snapshot reading
+func TestMaintenanceSnapshotTimeout(t *testing.T) {
+ testMaintenanceSnapshotTimeout(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) {
+ return client.Snapshot(ctx)
+ })
+}
+
+// testMaintenanceSnapshotTimeout given snapshot function ensures that it
+// returns corresponding context errors when context timeout happened before snapshot reading
+func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
+ integration2.BeforeTest(t)
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
// reading snapshot with deadline exceeded should error out
- ctx, cancel = context.WithTimeout(context.Background(), time.Second)
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
- rc2, err := clus.RandClient().Snapshot(ctx)
+
+ // Since http2 spec defines the receive windows's size and max size of
+ // frame in the stream, the underlayer - gRPC client can pre-read data
+ // from server even if the application layer hasn't read it yet.
+ //
+ // And the initialized cluster has 20KiB snapshot, which can be
+ // pre-read by underlayer. We should increase the snapshot's size here,
+ // just in case that io.Copy won't return the timeout error.
+ populateDataIntoCluster(t, clus, 3, 1024*1024)
+
+ rc2, err := snapshot(ctx, clus.RandClient())
if err != nil {
t.Fatal(err)
}
@@ -131,25 +239,46 @@ func TestMaintenanceSnapshotError(t *testing.T) {
time.Sleep(2 * time.Second)
- _, err = io.Copy(ioutil.Discard, rc2)
+ _, err = io.Copy(io.Discard, rc2)
if err != nil && !IsClientTimeout(err) {
t.Errorf("expected client timeout, got %v", err)
}
}
-// TestMaintenanceSnapshotErrorInflight ensures that inflight context cancel/timeout
-// fails snapshot reading with corresponding context errors.
+// TestMaintenanceSnapshotWithVersionErrorInflight ensures that ReaderCloser returned by SnapshotWithVersion function
+// will fail to read with corresponding context errors on inflight context cancel timeout.
+func TestMaintenanceSnapshotWithVersionErrorInflight(t *testing.T) {
+ testMaintenanceSnapshotErrorInflight(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) {
+ resp, err := client.SnapshotWithVersion(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Snapshot, nil
+ })
+}
+
+// TestMaintenanceSnapshotErrorInflight ensures that ReaderCloser returned by Snapshot function
+// will fail to read with corresponding context errors on inflight context cancel timeout.
func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
- integration.BeforeTest(t)
+ testMaintenanceSnapshotErrorInflight(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) {
+ return client.Snapshot(ctx)
+ })
+}
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+// testMaintenanceSnapshotErrorInflight given snapshot function ensures that ReaderCloser returned by it
+// will fail to read with corresponding context errors on inflight context cancel timeout.
+func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
+ integration2.BeforeTest(t)
+ lg := zaptest.NewLogger(t)
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
// take about 1-second to read snapshot
clus.Members[0].Stop(t)
dpath := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
- b := backend.NewDefaultBackend(dpath)
- s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
+ b := backend.NewDefaultBackend(lg, dpath)
+ s := mvcc.NewStore(lg, b, &lease.FakeLessor{}, mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
rev := 100000
for i := 2; i <= rev; i++ {
s.Put([]byte(fmt.Sprintf("%10d", i)), bytes.Repeat([]byte("a"), 1024), lease.NoLease)
@@ -158,10 +287,9 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
b.Close()
clus.Members[0].Restart(t)
- cli := clus.RandClient()
// reading snapshot with canceled context should error out
ctx, cancel := context.WithCancel(context.Background())
- rc1, err := cli.Snapshot(ctx)
+ rc1, err := snapshot(ctx, clus.RandClient())
if err != nil {
t.Fatal(err)
}
@@ -173,8 +301,8 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
cancel()
close(donec)
}()
- _, err = io.Copy(ioutil.Discard, rc1)
- if err != nil && err != context.Canceled {
+ _, err = io.Copy(io.Discard, rc1)
+ if err != nil && !errors.Is(err, context.Canceled) {
t.Errorf("expected %v, got %v", context.Canceled, err)
}
<-donec
@@ -182,7 +310,7 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
// reading snapshot with deadline exceeded should error out
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
defer cancel()
- rc2, err := clus.RandClient().Snapshot(ctx)
+ rc2, err := snapshot(ctx, clus.RandClient())
if err != nil {
t.Fatal(err)
}
@@ -190,30 +318,104 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
// 300ms left and expect timeout while snapshot reading is in progress
time.Sleep(700 * time.Millisecond)
- _, err = io.Copy(ioutil.Discard, rc2)
+ _, err = io.Copy(io.Discard, rc2)
if err != nil && !IsClientTimeout(err) {
t.Errorf("expected client timeout, got %v", err)
}
}
+// TestMaintenanceSnapshotWithVersionVersion ensures that SnapshotWithVersion returns correct version value.
+func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ // Set SnapshotCount to 1 to force raft snapshot to ensure that storage version is set
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, SnapshotCount: 1})
+ defer clus.Terminate(t)
+
+ // Put some keys to ensure that wal snapshot is triggered
+ for i := 0; i < 10; i++ {
+ clus.RandClient().Put(context.Background(), fmt.Sprintf("%d", i), "1")
+ }
+
+ // reading snapshot with canceled context should error out
+ resp, err := clus.RandClient().SnapshotWithVersion(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Snapshot.Close()
+ if resp.Version != "3.6.0" {
+ t.Errorf("unexpected version, expected %q, got %q", version.Version, resp.Version)
+ }
+}
+
+func TestMaintenanceSnapshotContentDigest(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ populateDataIntoCluster(t, clus, 3, 1024*1024)
+
+ // reading snapshot with canceled context should error out
+ resp, err := clus.RandClient().SnapshotWithVersion(context.Background())
+ require.NoError(t, err)
+ defer resp.Snapshot.Close()
+
+ tmpDir := t.TempDir()
+ snapFile, err := os.Create(filepath.Join(tmpDir, t.Name()))
+ require.NoError(t, err)
+ defer snapFile.Close()
+
+ snapSize, err := io.Copy(snapFile, resp.Snapshot)
+ require.NoError(t, err)
+
+ // read the checksum
+ checksumSize := int64(sha256.Size)
+ _, err = snapFile.Seek(-checksumSize, io.SeekEnd)
+ require.NoError(t, err)
+
+ checksumInBytes, err := io.ReadAll(snapFile)
+ require.NoError(t, err)
+ require.Len(t, checksumInBytes, int(checksumSize))
+
+ // remove the checksum part and rehash
+ err = snapFile.Truncate(snapSize - checksumSize)
+ require.NoError(t, err)
+
+ _, err = snapFile.Seek(0, io.SeekStart)
+ require.NoError(t, err)
+
+ hashWriter := sha256.New()
+ _, err = io.Copy(hashWriter, snapFile)
+ require.NoError(t, err)
+
+ // compare the checksum
+ actualChecksum := hashWriter.Sum(nil)
+ require.Equal(t, checksumInBytes, actualChecksum)
+}
+
func TestMaintenanceStatus(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, QuotaBackendBytes: storage.DefaultQuotaBytes})
defer clus.Terminate(t)
+ t.Logf("Waiting for leader...")
clus.WaitLeader(t)
+ t.Logf("Leader established.")
eps := make([]string, 3)
for i := 0; i < 3; i++ {
- eps[i] = clus.Members[i].GRPCAddr()
+ eps[i] = clus.Members[i].GRPCURL
}
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})
+ t.Logf("Creating client...")
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})
if err != nil {
t.Fatal(err)
}
defer cli.Close()
+ t.Logf("Creating client [DONE]")
prevID, leaderFound := uint64(0), false
for i := 0; i < 3; i++ {
@@ -221,6 +423,10 @@ func TestMaintenanceStatus(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ t.Logf("Response from %v: %v", i, resp)
+ if resp.DbSizeQuota != storage.DefaultQuotaBytes {
+ t.Errorf("unexpected backend default quota returned: %d, expected %d", resp.DbSizeQuota, storage.DefaultQuotaBytes)
+ }
if prevID == 0 {
prevID, leaderFound = resp.Header.MemberId, resp.Header.MemberId == resp.Leader
continue
diff --git a/tests/integration/clientv3/metrics_test.go b/tests/integration/clientv3/metrics_test.go
index 494923d3c13..081a2fc0bd5 100644
--- a/tests/integration/clientv3/metrics_test.go
+++ b/tests/integration/clientv3/metrics_test.go
@@ -17,6 +17,7 @@ package clientv3test
import (
"bufio"
"context"
+ "errors"
"io"
"net"
"net/http"
@@ -25,17 +26,17 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
-
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestV3ClientMetrics(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
var (
addr = "localhost:27989"
@@ -59,29 +60,27 @@ func TestV3ClientMetrics(t *testing.T) {
// listen for all Prometheus metrics
go func() {
- var err error
-
defer close(donec)
- err = srv.Serve(ln)
- if err != nil && !transport.IsClosedConnError(err) {
- t.Errorf("Err serving http requests: %v", err)
+ serr := srv.Serve(ln)
+ if serr != nil && !transport.IsClosedConnError(serr) {
+ t.Errorf("Err serving http requests: %v", serr)
}
}()
url := "unix://" + addr + "/metrics"
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cfg := clientv3.Config{
- Endpoints: []string{clus.Members[0].GRPCAddr()},
+ Endpoints: []string{clus.Members[0].GRPCURL},
DialOptions: []grpc.DialOption{
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
},
}
- cli, cerr := integration.NewClient(t, cfg)
+ cli, cerr := integration2.NewClient(t, cfg)
if cerr != nil {
t.Fatal(cerr)
}
@@ -163,11 +162,11 @@ func getHTTPBodyAsLines(t *testing.T, url string) []string {
}
reader := bufio.NewReader(resp.Body)
- lines := []string{}
+ var lines []string
for {
line, err := reader.ReadString('\n')
if err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
} else {
t.Fatalf("error reading: %v", err)
diff --git a/tests/integration/clientv3/mirror_auth_test.go b/tests/integration/clientv3/mirror_auth_test.go
new file mode 100644
index 00000000000..8dedd4e94d2
--- /dev/null
+++ b/tests/integration/clientv3/mirror_auth_test.go
@@ -0,0 +1,103 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !cluster_proxy
+
+package clientv3test
+
+import (
+ "context"
+ "reflect"
+ "testing"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/mirror"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestMirrorSync_Authenticated(t *testing.T) {
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ initialClient := clus.Client(0)
+
+ // Create a user to run the mirror process that only has access to /syncpath
+ initialClient.RoleAdd(context.Background(), "syncer")
+ initialClient.RoleGrantPermission(context.Background(), "syncer", "/syncpath", clientv3.GetPrefixRangeEnd("/syncpath"), clientv3.PermissionType(clientv3.PermReadWrite))
+ initialClient.UserAdd(context.Background(), "syncer", "syncfoo")
+ initialClient.UserGrantRole(context.Background(), "syncer", "syncer")
+
+ // Seed /syncpath with some initial data
+ _, err := initialClient.KV.Put(context.TODO(), "/syncpath/foo", "bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Require authentication
+ authSetupRoot(t, initialClient.Auth)
+
+ // Create a client as the `syncer` user.
+ cfg := clientv3.Config{
+ Endpoints: initialClient.Endpoints(),
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ Username: "syncer",
+ Password: "syncfoo",
+ }
+ syncClient, err := integration2.NewClient(t, cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer syncClient.Close()
+
+ // Now run the sync process, create changes, and get the initial sync state
+ syncer := mirror.NewSyncer(syncClient, "/syncpath", 0)
+ gch, ech := syncer.SyncBase(context.TODO())
+ wkvs := []*mvccpb.KeyValue{{Key: []byte("/syncpath/foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}
+
+ for g := range gch {
+ if !reflect.DeepEqual(g.Kvs, wkvs) {
+ t.Fatalf("kv = %v, want %v", g.Kvs, wkvs)
+ }
+ }
+
+ for e := range ech {
+ t.Fatalf("unexpected error %v", e)
+ }
+
+ // Start a continuous sync
+ wch := syncer.SyncUpdates(context.TODO())
+
+ // Update state
+ _, err = syncClient.KV.Put(context.TODO(), "/syncpath/foo", "baz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Wait for the updated state to sync
+ select {
+ case r := <-wch:
+ wkv := &mvccpb.KeyValue{Key: []byte("/syncpath/foo"), Value: []byte("baz"), CreateRevision: 2, ModRevision: 3, Version: 2}
+ if !reflect.DeepEqual(r.Events[0].Kv, wkv) {
+ t.Fatalf("kv = %v, want %v", r.Events[0].Kv, wkv)
+ }
+ case <-time.After(time.Second):
+ t.Fatal("failed to receive update in one second")
+ }
+}
diff --git a/tests/integration/clientv3/mirror_test.go b/tests/integration/clientv3/mirror_test.go
index c9246e0f2d3..f21551bbdf0 100644
--- a/tests/integration/clientv3/mirror_test.go
+++ b/tests/integration/clientv3/mirror_test.go
@@ -24,13 +24,13 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3/mirror"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestMirrorSync(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
c := clus.Client(0)
@@ -72,9 +72,9 @@ func TestMirrorSync(t *testing.T) {
}
func TestMirrorSyncBase(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
cli := cluster.Client(0)
diff --git a/tests/integration/clientv3/namespace_test.go b/tests/integration/clientv3/namespace_test.go
index b7e07f4f383..2aad010f987 100644
--- a/tests/integration/clientv3/namespace_test.go
+++ b/tests/integration/clientv3/namespace_test.go
@@ -20,15 +20,15 @@ import (
"testing"
"go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/namespace"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestNamespacePutGet(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
c := clus.Client(0)
@@ -55,9 +55,9 @@ func TestNamespacePutGet(t *testing.T) {
}
func TestNamespaceWatch(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
c := clus.Client(0)
diff --git a/tests/integration/clientv3/naming/endpoints_test.go b/tests/integration/clientv3/naming/endpoints_test.go
index 7076168512c..3c93c6c7d19 100644
--- a/tests/integration/clientv3/naming/endpoints_test.go
+++ b/tests/integration/clientv3/naming/endpoints_test.go
@@ -19,16 +19,17 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
+
etcd "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/naming/endpoints"
-
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestEndpointManager(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
em, err := endpoints.NewManager(clus.RandClient(), "foo")
@@ -88,9 +89,9 @@ func TestEndpointManager(t *testing.T) {
// correctly with multiple hosts and correctly receive multiple
// updates in a single revision.
func TestEndpointManagerAtomicity(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
c := clus.RandClient()
@@ -101,17 +102,14 @@ func TestEndpointManagerAtomicity(t *testing.T) {
err = em.Update(context.TODO(), []*endpoints.UpdateWithOpts{
endpoints.NewAddUpdateOpts("foo/host", endpoints.Endpoint{Addr: "127.0.0.1:2000"}),
- endpoints.NewAddUpdateOpts("foo/host2", endpoints.Endpoint{Addr: "127.0.0.1:2001"})})
- if err != nil {
- t.Fatal(err)
- }
+ endpoints.NewAddUpdateOpts("foo/host2", endpoints.Endpoint{Addr: "127.0.0.1:2001"}),
+ })
+ require.NoError(t, err)
ctx, watchCancel := context.WithCancel(context.Background())
defer watchCancel()
w, err := em.NewWatchChannel(ctx)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
updates := <-w
if len(updates) != 2 {
@@ -119,9 +117,7 @@ func TestEndpointManagerAtomicity(t *testing.T) {
}
_, err = c.Txn(context.TODO()).Then(etcd.OpDelete("foo/host"), etcd.OpDelete("foo/host2")).Commit()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
updates = <-w
if len(updates) != 2 || (updates[0].Op != endpoints.Delete && updates[1].Op != endpoints.Delete) {
@@ -130,9 +126,9 @@ func TestEndpointManagerAtomicity(t *testing.T) {
}
func TestEndpointManagerCRUD(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
em, err := endpoints.NewManager(clus.RandClient(), "foo")
diff --git a/tests/integration/clientv3/naming/resolver_test.go b/tests/integration/clientv3/naming/resolver_test.go
index 980580c1637..f39dbb2c234 100644
--- a/tests/integration/clientv3/naming/resolver_test.go
+++ b/tests/integration/clientv3/naming/resolver_test.go
@@ -17,38 +17,39 @@ package naming_test
import (
"bytes"
"context"
+ "fmt"
+ "strconv"
"testing"
- "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ testpb "google.golang.org/grpc/interop/grpc_testing"
"go.etcd.io/etcd/client/v3/naming/endpoints"
"go.etcd.io/etcd/client/v3/naming/resolver"
- "go.etcd.io/etcd/pkg/v3/grpc_testing"
- "go.etcd.io/etcd/tests/v3/integration"
-
- "google.golang.org/grpc"
- testpb "google.golang.org/grpc/test/grpc_testing"
+ "go.etcd.io/etcd/pkg/v3/grpctesting"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
-// This test mimics scenario described in grpc_naming.md doc.
-
-func TestEtcdGrpcResolver(t *testing.T) {
- integration.BeforeTest(t)
-
- s1PayloadBody := []byte{'1'}
- s1 := grpc_testing.NewDummyStubServer(s1PayloadBody)
+func testEtcdGRPCResolver(t *testing.T, lbPolicy string) {
+ // Setup two new dummy stub servers
+ payloadBody := []byte{'1'}
+ s1 := grpctesting.NewDummyStubServer(payloadBody)
if err := s1.Start(nil); err != nil {
t.Fatal("failed to start dummy grpc server (s1)", err)
}
defer s1.Stop()
- s2PayloadBody := []byte{'2'}
- s2 := grpc_testing.NewDummyStubServer(s2PayloadBody)
+ s2 := grpctesting.NewDummyStubServer(payloadBody)
if err := s2.Start(nil); err != nil {
t.Fatal("failed to start dummy grpc server (s2)", err)
}
defer s2.Stop()
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ // Create new cluster with endpoint manager with two endpoints
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
em, err := endpoints.NewManager(clus.Client(0), "foo")
@@ -64,51 +65,122 @@ func TestEtcdGrpcResolver(t *testing.T) {
t.Fatal("failed to add foo", err)
}
+ err = em.AddEndpoint(context.TODO(), "foo/e2", e2)
+ if err != nil {
+ t.Fatal("failed to add foo", err)
+ }
+
b, err := resolver.NewBuilder(clus.Client(1))
if err != nil {
t.Fatal("failed to new resolver builder", err)
}
- conn, err := grpc.Dial("etcd:///foo", grpc.WithInsecure(), grpc.WithResolvers(b))
+ // Create connection with provided lb policy
+ conn, err := grpc.Dial("etcd:///foo", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(b),
+ grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingPolicy":"%s"}`, lbPolicy)))
if err != nil {
t.Fatal("failed to connect to foo", err)
}
defer conn.Close()
+ // Send an initial request that should go to e1
c := testpb.NewTestServiceClient(conn)
resp, err := c.UnaryCall(context.TODO(), &testpb.SimpleRequest{}, grpc.WaitForReady(true))
if err != nil {
t.Fatal("failed to invoke rpc to foo (e1)", err)
}
- if resp.GetPayload() == nil || !bytes.Equal(resp.GetPayload().GetBody(), s1PayloadBody) {
+ if resp.GetPayload() == nil || !bytes.Equal(resp.GetPayload().GetBody(), payloadBody) {
t.Fatalf("unexpected response from foo (e1): %s", resp.GetPayload().GetBody())
}
- em.DeleteEndpoint(context.TODO(), "foo/e1")
- em.AddEndpoint(context.TODO(), "foo/e2", e2)
+ // Send more requests
+ lastResponse := []byte{'1'}
+ totalRequests := 3500
+ for i := 1; i < totalRequests; i++ {
+ resp, err := c.UnaryCall(context.TODO(), &testpb.SimpleRequest{}, grpc.WaitForReady(true))
+ if err != nil {
+ t.Fatal("failed to invoke rpc to foo", err)
+ }
- // We use a loop with deadline of 30s to avoid test getting flake
- // as it's asynchronous for gRPC Client to update underlying connections.
- maxRetries := 300
- retryPeriod := 100 * time.Millisecond
- retries := 0
- for {
- time.Sleep(retryPeriod)
- retries++
+ t.Logf("Response: %v", string(resp.GetPayload().GetBody()))
- resp, err = c.UnaryCall(context.TODO(), &testpb.SimpleRequest{})
- if err != nil {
- if retries < maxRetries {
- continue
- }
- t.Fatal("failed to invoke rpc to foo (e2)", err)
+ if resp.GetPayload() == nil {
+ t.Fatalf("unexpected response from foo: %s", resp.GetPayload().GetBody())
}
- if resp.GetPayload() == nil || !bytes.Equal(resp.GetPayload().GetBody(), s2PayloadBody) {
- if retries < maxRetries {
- continue
- }
- t.Fatalf("unexpected response from foo (e2): %s", resp.GetPayload().GetBody())
+ lastResponse = resp.GetPayload().GetBody()
+ }
+
+ // If the load balancing policy is pick first then return payload should equal number of requests
+ t.Logf("Last response: %v", string(lastResponse))
+ if lbPolicy == "pick_first" {
+ if string(lastResponse) != "3500" {
+ t.Fatalf("unexpected total responses from foo: %s", lastResponse)
}
- break
}
+
+ // If the load balancing policy is round robin we should see roughly half total requests served by each server
+ if lbPolicy == "round_robin" {
+ responses, err := strconv.Atoi(string(lastResponse))
+ if err != nil {
+ t.Fatalf("couldn't convert to int: %s", lastResponse)
+ }
+
+ // Allow 25% tolerance as round robin is not perfect and we don't want the test to flake
+ expected := float64(totalRequests) * 0.5
+ assert.InEpsilonf(t, expected, float64(responses), 0.25, "unexpected total responses from foo: %s", lastResponse)
+ }
+}
+
+// TestEtcdGrpcResolverPickFirst mimics scenarios described in grpc_naming.md doc.
+func TestEtcdGrpcResolverPickFirst(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ // Pick first is the default load balancer policy for grpc-go
+ testEtcdGRPCResolver(t, "pick_first")
+}
+
+// TestEtcdGrpcResolverRoundRobin mimics scenarios described in grpc_naming.md doc.
+func TestEtcdGrpcResolverRoundRobin(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ // Round robin is a common alternative for more production oriented scenarios
+ testEtcdGRPCResolver(t, "round_robin")
+}
+
+func TestEtcdEndpointManager(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ s1PayloadBody := []byte{'1'}
+ s1 := grpctesting.NewDummyStubServer(s1PayloadBody)
+ err := s1.Start(nil)
+ require.NoError(t, err)
+ defer s1.Stop()
+
+ s2PayloadBody := []byte{'2'}
+ s2 := grpctesting.NewDummyStubServer(s2PayloadBody)
+ err = s2.Start(nil)
+ require.NoError(t, err)
+ defer s2.Stop()
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ // Check if any endpoint with the same prefix "foo" will not break the logic with multiple endpoints
+ em, err := endpoints.NewManager(clus.Client(0), "foo")
+ require.NoError(t, err)
+ emOther, err := endpoints.NewManager(clus.Client(1), "foo_other")
+ require.NoError(t, err)
+
+ e1 := endpoints.Endpoint{Addr: s1.Addr()}
+ e2 := endpoints.Endpoint{Addr: s2.Addr()}
+
+ em.AddEndpoint(context.Background(), "foo/e1", e1)
+ emOther.AddEndpoint(context.Background(), "foo_other/e2", e2)
+
+ epts, err := em.List(context.Background())
+ require.NoError(t, err)
+ eptsOther, err := emOther.List(context.Background())
+ require.NoError(t, err)
+ assert.Len(t, epts, 1)
+ assert.Len(t, eptsOther, 1)
}
diff --git a/tests/integration/clientv3/ordering_kv_test.go b/tests/integration/clientv3/ordering_kv_test.go
index b1f4f54ef68..e798a5d0865 100644
--- a/tests/integration/clientv3/ordering_kv_test.go
+++ b/tests/integration/clientv3/ordering_kv_test.go
@@ -21,26 +21,28 @@ import (
"time"
"github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/client/v3"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/ordering"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestDetectKvOrderViolation(t *testing.T) {
- var errOrderViolation = errors.New("DetectedOrderViolation")
+ errOrderViolation := errors.New("DetectedOrderViolation")
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{
- clus.Members[0].GRPCAddr(),
- clus.Members[1].GRPCAddr(),
- clus.Members[2].GRPCAddr(),
+ clus.Members[0].GRPCURL,
+ clus.Members[1].GRPCURL,
+ clus.Members[2].GRPCURL,
},
}
- cli, err := integration.NewClient(t, cfg)
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -80,34 +82,34 @@ func TestDetectKvOrderViolation(t *testing.T) {
// ensure that only the third member is queried during requests
clus.Members[0].Stop(t)
clus.Members[1].Stop(t)
- assert.NoError(t, clus.Members[2].Restart(t))
+ require.NoError(t, clus.Members[2].Restart(t))
// force OrderingKv to query the third member
- cli.SetEndpoints(clus.Members[2].GRPCAddr())
+ cli.SetEndpoints(clus.Members[2].GRPCURL)
time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed
t.Logf("Quering m2 after restart")
v, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
- t.Logf("Quering m2 returned: v:%v erro:%v ", v, err)
- if err != errOrderViolation {
+ t.Logf("Quering m2 returned: v:%v err:%v ", v, err)
+ if !errors.Is(err, errOrderViolation) {
t.Fatalf("expected %v, got err:%v v:%v", errOrderViolation, err, v)
}
}
func TestDetectTxnOrderViolation(t *testing.T) {
- var errOrderViolation = errors.New("DetectedOrderViolation")
+ errOrderViolation := errors.New("DetectedOrderViolation")
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{
- clus.Members[0].GRPCAddr(),
- clus.Members[1].GRPCAddr(),
- clus.Members[2].GRPCAddr(),
+ clus.Members[0].GRPCURL,
+ clus.Members[1].GRPCURL,
+ clus.Members[2].GRPCURL,
},
}
- cli, err := integration.NewClient(t, cfg)
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -149,12 +151,12 @@ func TestDetectTxnOrderViolation(t *testing.T) {
// ensure that only the third member is queried during requests
clus.Members[0].Stop(t)
clus.Members[1].Stop(t)
- assert.NoError(t, clus.Members[2].Restart(t))
+ require.NoError(t, clus.Members[2].Restart(t))
// force OrderingKv to query the third member
- cli.SetEndpoints(clus.Members[2].GRPCAddr())
+ cli.SetEndpoints(clus.Members[2].GRPCURL)
time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
- if err != errOrderViolation {
+ if !errors.Is(err, errOrderViolation) {
t.Fatalf("expected %v, got %v", errOrderViolation, err)
}
orderingTxn = orderingKv.Txn(ctx)
@@ -163,7 +165,7 @@ func TestDetectTxnOrderViolation(t *testing.T) {
).Then(
clientv3.OpGet("foo", clientv3.WithSerializable()),
).Commit()
- if err != errOrderViolation {
+ if !errors.Is(err, errOrderViolation) {
t.Fatalf("expected %v, got %v", errOrderViolation, err)
}
}
diff --git a/tests/integration/clientv3/ordering_util_test.go b/tests/integration/clientv3/ordering_util_test.go
index db3fddd9974..6313957bf3f 100644
--- a/tests/integration/clientv3/ordering_util_test.go
+++ b/tests/integration/clientv3/ordering_util_test.go
@@ -16,25 +16,29 @@ package clientv3test
import (
"context"
+ "errors"
"testing"
"time"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/ordering"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
+// TestEndpointSwitchResolvesViolation ensures
+// - ErrNoGreaterRev error is returned from partitioned member when it has stale revision
+// - no more error after partition recovers
func TestEndpointSwitchResolvesViolation(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
eps := []string{
- clus.Members[0].GRPCAddr(),
- clus.Members[1].GRPCAddr(),
- clus.Members[2].GRPCAddr(),
+ clus.Members[0].GRPCURL,
+ clus.Members[1].GRPCURL,
+ clus.Members[2].GRPCURL,
}
- cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCAddr()}}
- cli, err := integration.NewClient(t, cfg)
+ cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL}}
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -62,6 +66,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
}
cli.SetEndpoints(eps...)
+ time.Sleep(1 * time.Second) // give enough time for the operation
orderingKv := ordering.NewKV(cli.KV, ordering.NewOrderViolationSwitchEndpointClosure(cli))
// set prevRev to the second member's revision of "foo" such that
// the revision is higher than the third member's revision of "foo"
@@ -71,27 +76,36 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
}
t.Logf("Reconfigure client to speak only to the 'partitioned' member")
- cli.SetEndpoints(clus.Members[2].GRPCAddr())
+ cli.SetEndpoints(clus.Members[2].GRPCURL)
+ time.Sleep(1 * time.Second) // give enough time for the operation
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
- if err != ordering.ErrNoGreaterRev {
+ if !errors.Is(err, ordering.ErrNoGreaterRev) {
t.Fatal("While speaking to partitioned leader, we should get ErrNoGreaterRev error")
}
+
+ clus.Members[2].RecoverPartition(t, clus.Members[:2]...)
+ time.Sleep(1 * time.Second) // give enough time for the operation
+ _, err = orderingKv.Get(ctx, "foo")
+ if err != nil {
+ t.Fatal("After partition recovered, third member should recover and return no error")
+ }
}
+// TestUnresolvableOrderViolation ensures ErrNoGreaterRev error is returned when available members only have stale revisions
func TestUnresolvableOrderViolation(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 5, UseBridge: true})
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{
- clus.Members[0].GRPCAddr(),
- clus.Members[1].GRPCAddr(),
- clus.Members[2].GRPCAddr(),
- clus.Members[3].GRPCAddr(),
- clus.Members[4].GRPCAddr(),
+ clus.Members[0].GRPCURL,
+ clus.Members[1].GRPCURL,
+ clus.Members[2].GRPCURL,
+ clus.Members[3].GRPCURL,
+ clus.Members[4].GRPCURL,
},
}
- cli, err := integration.NewClient(t, cfg)
+ cli, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -99,7 +113,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
eps := cli.Endpoints()
ctx := context.TODO()
- cli.SetEndpoints(clus.Members[0].GRPCAddr())
+ cli.SetEndpoints(clus.Members[0].GRPCURL)
time.Sleep(1 * time.Second)
_, err = cli.Put(ctx, "foo", "bar")
if err != nil {
@@ -139,10 +153,11 @@ func TestUnresolvableOrderViolation(t *testing.T) {
t.Fatal(err)
}
clus.Members[3].WaitStarted(t)
- cli.SetEndpoints(clus.Members[3].GRPCAddr())
+ cli.SetEndpoints(clus.Members[3].GRPCURL)
+ time.Sleep(1 * time.Second) // give enough time for operation
_, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable())
- if err != ordering.ErrNoGreaterRev {
+ if !errors.Is(err, ordering.ErrNoGreaterRev) {
t.Fatalf("expected %v, got %v", ordering.ErrNoGreaterRev, err)
}
}
diff --git a/tests/integration/clientv3/role_test.go b/tests/integration/clientv3/role_test.go
deleted file mode 100644
index 62ba49d3acd..00000000000
--- a/tests/integration/clientv3/role_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3test
-
-import (
- "context"
- "testing"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/tests/v3/integration"
-)
-
-func TestRoleError(t *testing.T) {
- integration.BeforeTest(t)
-
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
- defer clus.Terminate(t)
-
- authapi := clus.RandClient()
-
- _, err := authapi.RoleAdd(context.TODO(), "test-role")
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = authapi.RoleAdd(context.TODO(), "test-role")
- if err != rpctypes.ErrRoleAlreadyExist {
- t.Fatalf("expected %v, got %v", rpctypes.ErrRoleAlreadyExist, err)
- }
-
- _, err = authapi.RoleAdd(context.TODO(), "")
- if err != rpctypes.ErrRoleEmpty {
- t.Fatalf("expected %v, got %v", rpctypes.ErrRoleEmpty, err)
- }
-}
diff --git a/tests/integration/clientv3/snapshot/v3_snapshot_test.go b/tests/integration/clientv3/snapshot/v3_snapshot_test.go
index 82b03214f5a..c39b092e1cf 100644
--- a/tests/integration/clientv3/snapshot/v3_snapshot_test.go
+++ b/tests/integration/clientv3/snapshot/v3_snapshot_test.go
@@ -24,13 +24,15 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/snapshot"
"go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.uber.org/zap/zaptest"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestSaveSnapshotFilePermissions ensures that the snapshot is saved with
@@ -38,7 +40,7 @@ import (
func TestSaveSnapshotFilePermissions(t *testing.T) {
expectedFileMode := os.FileMode(fileutil.PrivateFileMode)
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
- dbPath := createSnapshotFile(t, kvs)
+ _, dbPath := createSnapshotFile(t, newEmbedConfig(t), kvs)
defer os.RemoveAll(dbPath)
dbInfo, err := os.Stat(dbPath)
@@ -52,27 +54,47 @@ func TestSaveSnapshotFilePermissions(t *testing.T) {
}
}
+// TestSaveSnapshotVersion ensures that the snapshot returns proper storage version.
+func TestSaveSnapshotVersion(t *testing.T) {
+ // Put some keys to ensure that wal snapshot is triggered
+ var kvs []kv
+ for i := 0; i < 10; i++ {
+ kvs = append(kvs, kv{fmt.Sprintf("%d", i), "test"})
+ }
+ cfg := newEmbedConfig(t)
+ // Force raft snapshot to ensure that storage version is set
+ cfg.SnapshotCount = 1
+ ver, dbPath := createSnapshotFile(t, cfg, kvs)
+ defer os.RemoveAll(dbPath)
+
+ if ver != "3.6.0" {
+ t.Fatalf("expected snapshot version %s, got %s:", "3.6.0", ver)
+ }
+}
+
type kv struct {
k, v string
}
-// creates a snapshot file and returns the file path.
-func createSnapshotFile(t *testing.T, kvs []kv) string {
- testutil.SkipTestIfShortMode(t,
- "Snapshot creation tests are depending on embedded etcServer so are integration-level tests.")
+func newEmbedConfig(t *testing.T) *embed.Config {
clusterN := 1
urls := newEmbedURLs(clusterN * 2)
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
-
- cfg := integration.NewEmbedConfig(t, "default")
+ cfg := integration2.NewEmbedConfig(t, "default")
cfg.ClusterState = "new"
- cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
- cfg.LPUrls, cfg.APUrls = pURLs, pURLs
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = cURLs, cURLs
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = pURLs, pURLs
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String())
+ return cfg
+}
+
+// creates a snapshot file and returns the file path.
+func createSnapshotFile(t *testing.T, cfg *embed.Config, kvs []kv) (version string, dbPath string) {
+ testutil.SkipTestIfShortMode(t,
+ "Snapshot creation tests are depending on embedded etcd server so are integration-level tests.")
+
srv, err := embed.StartEtcd(cfg)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
defer func() {
srv.Close()
}()
@@ -82,33 +104,26 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
t.Fatalf("failed to start embed.Etcd for creating snapshots")
}
- ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
- cli, err := integration.NewClient(t, ccfg)
- if err != nil {
- t.Fatal(err)
- }
+ ccfg := clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}}
+ cli, err := integration2.NewClient(t, ccfg)
+ require.NoError(t, err)
defer cli.Close()
for i := range kvs {
ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout)
_, err = cli.Put(ctx, kvs[i].k, kvs[i].v)
cancel()
- if err != nil {
- t.Fatal(err)
- }
- }
-
- dpPath := filepath.Join(t.TempDir(), fmt.Sprintf("snapshot%d.db", time.Now().Nanosecond()))
- if err = snapshot.Save(context.Background(), zaptest.NewLogger(t), ccfg, dpPath); err != nil {
- t.Fatal(err)
+ require.NoError(t, err)
}
- return dpPath
+ dbPath = filepath.Join(t.TempDir(), fmt.Sprintf("snapshot%d.db", time.Now().Nanosecond()))
+ version, err = snapshot.SaveWithVersion(context.Background(), zaptest.NewLogger(t), ccfg, dbPath)
+ require.NoError(t, err)
+ return version, dbPath
}
func newEmbedURLs(n int) (urls []url.URL) {
urls = make([]url.URL, n)
for i := 0; i < n; i++ {
- rand.Seed(int64(time.Now().Nanosecond()))
u, _ := url.Parse(fmt.Sprintf("unix://localhost:%d", rand.Intn(45000)))
urls[i] = *u
}
diff --git a/tests/integration/clientv3/txn_test.go b/tests/integration/clientv3/txn_test.go
index ffe93e096e0..221247d2f7a 100644
--- a/tests/integration/clientv3/txn_test.go
+++ b/tests/integration/clientv3/txn_test.go
@@ -16,27 +16,28 @@ package clientv3test
import (
"context"
+ "errors"
"fmt"
"testing"
"time"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestTxnError(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kv := clus.RandClient()
ctx := context.TODO()
_, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar1"), clientv3.OpPut("foo", "bar2")).Commit()
- if err != rpctypes.ErrDuplicateKey {
+ if !errors.Is(err, rpctypes.ErrDuplicateKey) {
t.Fatalf("expected %v, got %v", rpctypes.ErrDuplicateKey, err)
}
@@ -45,15 +46,15 @@ func TestTxnError(t *testing.T) {
ops[i] = clientv3.OpPut(fmt.Sprintf("foo%d", i), "")
}
_, err = kv.Txn(ctx).Then(ops...).Commit()
- if err != rpctypes.ErrTooManyOps {
+ if !errors.Is(err, rpctypes.ErrTooManyOps) {
t.Fatalf("expected %v, got %v", rpctypes.ErrTooManyOps, err)
}
}
func TestTxnWriteFail(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kv := clus.Client(0)
@@ -101,9 +102,9 @@ func TestTxnWriteFail(t *testing.T) {
func TestTxnReadRetry(t *testing.T) {
t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request")
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kv := clus.Client(0)
@@ -140,9 +141,9 @@ func TestTxnReadRetry(t *testing.T) {
}
func TestTxnSuccess(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
kv := clus.Client(0)
@@ -163,9 +164,9 @@ func TestTxnSuccess(t *testing.T) {
}
func TestTxnCompareRange(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kv := clus.Client(0)
@@ -190,9 +191,9 @@ func TestTxnCompareRange(t *testing.T) {
}
func TestTxnNested(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
kv := clus.Client(0)
diff --git a/tests/integration/clientv3/user_test.go b/tests/integration/clientv3/user_test.go
index fe8b4cde263..e1f20fe1986 100644
--- a/tests/integration/clientv3/user_test.go
+++ b/tests/integration/clientv3/user_test.go
@@ -16,19 +16,22 @@ package clientv3test
import (
"context"
+ "errors"
"testing"
"time"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
+ "github.com/stretchr/testify/require"
"google.golang.org/grpc"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestUserError(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
authapi := clus.RandClient()
@@ -39,32 +42,82 @@ func TestUserError(t *testing.T) {
}
_, err = authapi.UserAdd(context.TODO(), "foo", "bar")
- if err != rpctypes.ErrUserAlreadyExist {
+ if !errors.Is(err, rpctypes.ErrUserAlreadyExist) {
t.Fatalf("expected %v, got %v", rpctypes.ErrUserAlreadyExist, err)
}
_, err = authapi.UserDelete(context.TODO(), "not-exist-user")
- if err != rpctypes.ErrUserNotFound {
+ if !errors.Is(err, rpctypes.ErrUserNotFound) {
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
}
_, err = authapi.UserGrantRole(context.TODO(), "foo", "test-role-does-not-exist")
- if err != rpctypes.ErrRoleNotFound {
+ if !errors.Is(err, rpctypes.ErrRoleNotFound) {
t.Fatalf("expected %v, got %v", rpctypes.ErrRoleNotFound, err)
}
}
+func TestAddUserAfterDelete(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ authapi := clus.RandClient()
+ authSetupRoot(t, authapi.Auth)
+ cfg := clientv3.Config{
+ Endpoints: authapi.Endpoints(),
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ }
+ cfg.Username, cfg.Password = "root", "123"
+ authed, err := integration2.NewClient(t, cfg)
+ require.NoError(t, err)
+ defer authed.Close()
+
+ // add user
+ _, err = authed.UserAdd(context.TODO(), "foo", "bar")
+ require.NoError(t, err)
+ _, err = authapi.Authenticate(context.TODO(), "foo", "bar")
+ require.NoError(t, err)
+ // delete user
+ _, err = authed.UserDelete(context.TODO(), "foo")
+ require.NoError(t, err)
+ if _, err = authed.Authenticate(context.TODO(), "foo", "bar"); err == nil {
+ t.Errorf("expect Authenticate error for old password")
+ }
+ // add user back
+ _, err = authed.UserAdd(context.TODO(), "foo", "bar")
+ require.NoError(t, err)
+ _, err = authed.Authenticate(context.TODO(), "foo", "bar")
+ require.NoError(t, err)
+ // change password
+ _, err = authed.UserChangePassword(context.TODO(), "foo", "bar2")
+ require.NoError(t, err)
+ _, err = authed.UserChangePassword(context.TODO(), "foo", "bar1")
+ require.NoError(t, err)
+
+ if _, err = authed.Authenticate(context.TODO(), "foo", "bar"); err == nil {
+ t.Errorf("expect Authenticate error for old password")
+ }
+ if _, err = authed.Authenticate(context.TODO(), "foo", "bar2"); err == nil {
+ t.Errorf("expect Authenticate error for old password")
+ }
+ _, err = authed.Authenticate(context.TODO(), "foo", "bar1")
+ require.NoError(t, err)
+}
+
func TestUserErrorAuth(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
authapi := clus.RandClient()
authSetupRoot(t, authapi.Auth)
// unauthenticated client
- if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserEmpty {
+ if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); !errors.Is(err, rpctypes.ErrUserEmpty) {
t.Fatalf("expected %v, got %v", rpctypes.ErrUserEmpty, err)
}
@@ -75,16 +128,16 @@ func TestUserErrorAuth(t *testing.T) {
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
cfg.Username, cfg.Password = "wrong-id", "123"
- if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
+ if _, err := integration2.NewClient(t, cfg); !errors.Is(err, rpctypes.ErrAuthFailed) {
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
}
cfg.Username, cfg.Password = "root", "wrong-pass"
- if _, err := integration.NewClient(t, cfg); err != rpctypes.ErrAuthFailed {
+ if _, err := integration2.NewClient(t, cfg); !errors.Is(err, rpctypes.ErrAuthFailed) {
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
}
cfg.Username, cfg.Password = "root", "123"
- authed, err := integration.NewClient(t, cfg)
+ authed, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -110,11 +163,12 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) {
}
}
-// Client can connect to etcd even if they supply credentials and the server is in AuthDisable mode.
+// TestGetTokenWithoutAuth is when Client can connect to etcd even if they
+// supply credentials and the server is in AuthDisable mode.
func TestGetTokenWithoutAuth(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2})
defer clus.Terminate(t)
authapi := clus.RandClient()
@@ -135,15 +189,15 @@ func TestGetTokenWithoutAuth(t *testing.T) {
Password: "123",
}
- client, err = integration.NewClient(t, cfg)
+ client, err = integration2.NewClient(t, cfg)
if err == nil {
defer client.Close()
}
- switch err {
- case nil:
+ switch {
+ case err == nil:
t.Log("passes as expected")
- case context.DeadlineExceeded:
+ case errors.Is(err, context.DeadlineExceeded):
t.Errorf("not expected result:%v with endpoint:%s", err, authapi.Endpoints())
default:
t.Errorf("other errors:%v", err)
diff --git a/tests/integration/clientv3/util.go b/tests/integration/clientv3/util.go
index 26ee793c94c..0e84115704c 100644
--- a/tests/integration/clientv3/util.go
+++ b/tests/integration/clientv3/util.go
@@ -16,13 +16,17 @@ package clientv3test
import (
"context"
+ "errors"
+ "fmt"
"strings"
"testing"
"time"
- "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
// MustWaitPinReady waits up to 3-second until connection is up (pin endpoint).
@@ -61,7 +65,7 @@ func IsClientTimeout(err error) bool {
if err == nil {
return false
}
- if err == context.DeadlineExceeded {
+ if errors.Is(err, context.DeadlineExceeded) {
return true
}
ev, ok := status.FromError(err)
@@ -76,7 +80,7 @@ func IsCanceled(err error) bool {
if err == nil {
return false
}
- if err == context.Canceled {
+ if errors.Is(err, context.Canceled) {
return true
}
ev, ok := status.FromError(err)
@@ -91,7 +95,7 @@ func IsUnavailable(err error) bool {
if err == nil {
return false
}
- if err == context.Canceled {
+ if errors.Is(err, context.Canceled) {
return true
}
ev, ok := status.FromError(err)
@@ -101,3 +105,17 @@ func IsUnavailable(err error) bool {
code := ev.Code()
return code == codes.Unavailable
}
+
+// populateDataIntoCluster populates the key-value pairs into cluster and the
+// key will be named by testing.T.Name()-index.
+func populateDataIntoCluster(t *testing.T, cluster *integration2.Cluster, numKeys int, valueSize int) {
+ ctx := context.Background()
+
+ for i := 0; i < numKeys; i++ {
+ _, err := cluster.RandClient().Put(ctx,
+ fmt.Sprintf("%s-%v", t.Name(), i), strings.Repeat("a", valueSize))
+ if err != nil {
+ t.Errorf("populating data expected no error, but got %v", err)
+ }
+ }
+}
diff --git a/tests/integration/clientv3/watch_fragment_test.go b/tests/integration/clientv3/watch_fragment_test.go
index 7f564fe103c..81450f5f9aa 100644
--- a/tests/integration/clientv3/watch_fragment_test.go
+++ b/tests/integration/clientv3/watch_fragment_test.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !cluster_proxy
-// +build !cluster_proxy
package clientv3test
@@ -25,8 +24,8 @@ import (
"time"
"go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/tests/v3/integration"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestWatchFragmentDisable ensures that large watch
@@ -64,16 +63,16 @@ func TestWatchFragmentEnableWithGRPCLimit(t *testing.T) {
// testWatchFragment triggers watch response that spans over multiple
// revisions exceeding server request limits when combined.
func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cfg := &integration.ClusterConfig{
+ cfg := &integration2.ClusterConfig{
Size: 1,
MaxRequestBytes: 1.5 * 1024 * 1024,
}
if exceedRecvLimit {
cfg.ClientMaxCallRecvMsgSize = 1.5 * 1024 * 1024
}
- clus := integration.NewClusterV3(t, cfg)
+ clus := integration2.NewCluster(t, cfg)
defer clus.Terminate(t)
cli := clus.Client(0)
diff --git a/tests/integration/clientv3/watch_test.go b/tests/integration/clientv3/watch_test.go
index aab6b6b1361..6b9eade44b8 100644
--- a/tests/integration/clientv3/watch_test.go
+++ b/tests/integration/clientv3/watch_test.go
@@ -16,6 +16,7 @@ package clientv3test
import (
"context"
+ "errors"
"fmt"
"math/rand"
"reflect"
@@ -24,19 +25,21 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/metadata"
+
mvccpb "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
- "go.etcd.io/etcd/tests/v3/integration"
- "google.golang.org/grpc/metadata"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
type watcherTest func(*testing.T, *watchctx)
type watchctx struct {
- clus *integration.ClusterV3
+ clus *integration2.Cluster
w clientv3.Watcher
kv clientv3.KV
wclientMember int
@@ -45,9 +48,9 @@ type watchctx struct {
}
func runWatchTest(t *testing.T, f watcherTest) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
wclientMember := rand.Intn(3)
@@ -74,6 +77,12 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
keys := []string{"foo", "bar", "baz"}
donec := make(chan struct{})
+ // wait for watcher shutdown
+ defer func() {
+ for i := 0; i < len(keys)+1; i++ {
+ <-donec
+ }
+ }()
readyc := make(chan struct{})
for _, k := range keys {
// key watcher
@@ -104,7 +113,7 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
t.Errorf("expected watcher channel, got nil")
}
readyc <- struct{}{}
- evs := []*clientv3.Event{}
+ var evs []*clientv3.Event
for i := 0; i < numKeyUpdates*2; i++ {
resp, ok := <-prefixc
if !ok {
@@ -114,14 +123,14 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
}
// check response
- expected := []string{}
+ var expected []string
bkeys := []string{"bar", "baz"}
for _, k := range bkeys {
for i := 0; i < numKeyUpdates; i++ {
expected = append(expected, fmt.Sprintf("%s-%d", k, i))
}
}
- got := []string{}
+ var got []string
for _, ev := range evs {
got = append(got, string(ev.Kv.Value))
}
@@ -156,10 +165,6 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
}
}
}
- // wait for watcher shutdown
- for i := 0; i < len(keys)+1; i++ {
- <-donec
- }
}
// TestWatchRange tests watcher creates ranges
@@ -188,7 +193,7 @@ func testWatchReconnRequest(t *testing.T, wctx *watchctx) {
defer close(donec)
// take down watcher connection
for {
- wctx.clus.Members[wctx.wclientMember].DropConnections()
+ wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
select {
case <-timer:
// spinning on close may live lock reconnection
@@ -230,7 +235,7 @@ func testWatchReconnInit(t *testing.T, wctx *watchctx) {
if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
t.Fatalf("expected non-nil channel")
}
- wctx.clus.Members[wctx.wclientMember].DropConnections()
+ wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
// watcher should recover
putAndWatch(t, wctx, "a", "a")
}
@@ -247,7 +252,7 @@ func testWatchReconnRunning(t *testing.T, wctx *watchctx) {
}
putAndWatch(t, wctx, "a", "a")
// take down watcher connection
- wctx.clus.Members[wctx.wclientMember].DropConnections()
+ wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
// watcher should recover
putAndWatch(t, wctx, "a", "b")
}
@@ -299,8 +304,6 @@ func TestWatchCancelRunning(t *testing.T) {
}
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
- integration.BeforeTest(t)
-
ctx, cancel := context.WithCancel(context.Background())
if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
t.Fatalf("expected non-nil watcher channel")
@@ -340,15 +343,20 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
if !ok {
t.Fatalf("unexpected watch close")
}
+ if err := v.Err(); err != nil {
+ t.Fatalf("unexpected watch response error: %v", err)
+ }
if string(v.Events[0].Kv.Value) != val {
t.Fatalf("bad value got %v, wanted %v", v.Events[0].Kv.Value, val)
}
}
}
-func TestWatchResumeInitRev(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+// TestWatchResumeAfterDisconnect tests watch resume after member disconnects then connects.
+// It ensures that correct events are returned corresponding to the start revision.
+func TestWatchResumeAfterDisconnect(t *testing.T) {
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -363,13 +371,16 @@ func TestWatchResumeInitRev(t *testing.T) {
t.Fatal(err)
}
+ // watch from revision 1
wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify())
+ // response for the create watch request, no events are in this response
+ // the current revision of etcd should be 4
if resp, ok := <-wch; !ok || resp.Header.Revision != 4 {
t.Fatalf("got (%v, %v), expected create notification rev=4", resp, ok)
}
// pause wch
- clus.Members[0].DropConnections()
- clus.Members[0].PauseConnections()
+ clus.Members[0].Bridge().DropConnections()
+ clus.Members[0].Bridge().PauseConnections()
select {
case resp, ok := <-wch:
@@ -378,19 +389,23 @@ func TestWatchResumeInitRev(t *testing.T) {
}
// resume wch
- clus.Members[0].UnpauseConnections()
+ clus.Members[0].Bridge().UnpauseConnections()
select {
case resp, ok := <-wch:
if !ok {
t.Fatal("unexpected watch close")
}
- if len(resp.Events) == 0 {
- t.Fatal("expected event on watch")
+ // Events should be put(a, 3) and put(a, 4)
+ if len(resp.Events) != 2 {
+ t.Fatal("expected two events on watch")
}
if string(resp.Events[0].Kv.Value) != "3" {
t.Fatalf("expected value=3, got event %+v", resp.Events[0])
}
+ if string(resp.Events[1].Kv.Value) != "4" {
+ t.Fatalf("expected value=4, got event %+v", resp.Events[1])
+ }
case <-time.After(5 * time.Second):
t.Fatal("watch timed out")
}
@@ -402,9 +417,9 @@ func TestWatchResumeInitRev(t *testing.T) {
// either a compaction error or all keys by staying in sync before the compaction
// is finally applied.
func TestWatchResumeCompacted(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
// create a waiting watcher at rev 1
@@ -417,23 +432,14 @@ func TestWatchResumeCompacted(t *testing.T) {
}
clus.Members[0].Stop(t)
- ticker := time.After(time.Second * 10)
- for clus.WaitLeader(t) <= 0 {
- select {
- case <-ticker:
- t.Fatalf("failed to wait for new leader")
- default:
- time.Sleep(10 * time.Millisecond)
- }
- }
+ clus.WaitLeader(t)
// put some data and compact away
numPuts := 5
kv := clus.Client(1)
for i := 0; i < numPuts; i++ {
- if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
- t.Fatal(err)
- }
+ _, err := kv.Put(context.TODO(), "foo", "bar")
+ require.NoError(t, err)
}
if _, err := kv.Compact(context.TODO(), 3); err != nil {
t.Fatal(err)
@@ -466,7 +472,7 @@ func TestWatchResumeCompacted(t *testing.T) {
if wresp.Err() == nil {
continue
}
- if wresp.Err() != rpctypes.ErrCompacted {
+ if !errors.Is(wresp.Err(), rpctypes.ErrCompacted) {
t.Fatalf("wresp.Err() expected %v, got %+v", rpctypes.ErrCompacted, wresp.Err())
}
break
@@ -489,17 +495,16 @@ func TestWatchResumeCompacted(t *testing.T) {
// TestWatchCompactRevision ensures the CompactRevision error is given on a
// compaction event ahead of a watcher.
func TestWatchCompactRevision(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
// set some keys
kv := clus.RandClient()
for i := 0; i < 5; i++ {
- if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
- t.Fatal(err)
- }
+ _, err := kv.Put(context.TODO(), "foo", "bar")
+ require.NoError(t, err)
}
w := clus.RandClient()
@@ -514,7 +519,7 @@ func TestWatchCompactRevision(t *testing.T) {
if !ok {
t.Fatalf("expected wresp, but got closed channel")
}
- if wresp.Err() != rpctypes.ErrCompacted {
+ if !errors.Is(wresp.Err(), rpctypes.ErrCompacted) {
t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err())
}
if !wresp.Canceled {
@@ -531,7 +536,7 @@ func TestWatchWithProgressNotify(t *testing.T) { testWatchWithProgressNot
func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) }
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
// accelerate report interval so test terminates quickly
oldpi := v3rpc.GetProgressReportInterval()
@@ -540,7 +545,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
pi := 3 * time.Second
defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
wc := clus.RandClient()
@@ -571,8 +576,10 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision)
}
if watchOnPut { // wait for put if watch on the put key
- ev := []*clientv3.Event{{Type: clientv3.EventTypePut,
- Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}}
+ ev := []*clientv3.Event{{
+ Type: clientv3.EventTypePut,
+ Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
+ }}
if !reflect.DeepEqual(ev, resp.Events) {
t.Fatalf("expected %+v, got %+v", ev, resp.Events)
}
@@ -585,11 +592,11 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
}
func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
progressInterval := 200 * time.Millisecond
- clus := integration.NewClusterV3(t,
- &integration.ClusterConfig{
+ clus := integration2.NewCluster(t,
+ &integration2.ClusterConfig{
Size: 3,
WatchProgressNotifyInterval: progressInterval,
})
@@ -611,9 +618,7 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
}
func TestWatchRequestProgress(t *testing.T) {
- integration.BeforeTest(t)
-
- if integration.ThroughProxy {
+ if integration2.ThroughProxy {
t.Skipf("grpc-proxy does not support WatchProgress yet")
}
testCases := []struct {
@@ -627,11 +632,11 @@ func TestWatchRequestProgress(t *testing.T) {
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
watchTimeout := 3 * time.Second
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
wc := clus.RandClient()
@@ -688,9 +693,9 @@ func TestWatchRequestProgress(t *testing.T) {
}
func TestWatchEventType(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
@@ -762,9 +767,9 @@ func TestWatchEventType(t *testing.T) {
}
func TestWatchErrConnClosed(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -785,16 +790,16 @@ func TestWatchErrConnClosed(t *testing.T) {
clus.TakeClient(0)
select {
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("wc.Watch took too long")
case <-donec:
}
}
func TestWatchAfterClose(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -806,13 +811,13 @@ func TestWatchAfterClose(t *testing.T) {
donec := make(chan struct{})
go func() {
cli.Watch(context.TODO(), "foo")
- if err := cli.Close(); err != nil && err != context.Canceled {
+ if err := cli.Close(); err != nil && !errors.Is(err, context.Canceled) {
t.Errorf("expected %v, got %v", context.Canceled, err)
}
close(donec)
}()
select {
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("wc.Watch took too long")
case <-donec:
}
@@ -820,9 +825,9 @@ func TestWatchAfterClose(t *testing.T) {
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
func TestWatchWithRequireLeader(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// Put a key for the non-require leader watch to read as an event.
@@ -855,10 +860,10 @@ func TestWatchWithRequireLeader(t *testing.T) {
if !ok {
t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
}
- if resp.Err() != rpctypes.ErrNoLeader {
+ if !errors.Is(resp.Err(), rpctypes.ErrNoLeader) {
t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
}
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("watch without leader took too long to close")
}
@@ -867,7 +872,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
if ok {
t.Fatalf("expected closed channel, got response %v", resp)
}
- case <-time.After(integration.RequestWaitTimeout):
+ case <-time.After(integration2.RequestWaitTimeout):
t.Fatal("waited too long for channel to close")
}
@@ -894,9 +899,9 @@ func TestWatchWithRequireLeader(t *testing.T) {
// TestWatchWithFilter checks that watch filtering works.
func TestWatchWithFilter(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
@@ -933,9 +938,9 @@ func TestWatchWithFilter(t *testing.T) {
// TestWatchWithCreatedNotification checks that WithCreatedNotify returns a
// Created watch response.
func TestWatchWithCreatedNotification(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
@@ -955,9 +960,9 @@ func TestWatchWithCreatedNotification(t *testing.T) {
// a watcher with created notify does not post duplicate
// created events from disconnect.
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer cluster.Terminate(t)
client := cluster.RandClient()
@@ -970,7 +975,7 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
t.Fatalf("expected created event, got %v", resp)
}
- cluster.Members[0].DropConnections()
+ cluster.Members[0].Bridge().DropConnections()
// check watch channel doesn't post another watch response.
select {
@@ -984,9 +989,9 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
func TestWatchCancelOnServer(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
@@ -1045,27 +1050,27 @@ func TestWatchCancelOnServer(t *testing.T) {
// TestWatchOverlapContextCancel stresses the watcher stream teardown path by
// creating/canceling watchers to ensure that new watchers are not taken down
// by a torn down watch stream. The sort of race that's being detected:
-// 1. create w1 using a cancelable ctx with %v as "ctx"
-// 2. cancel ctx
-// 3. watcher client begins tearing down watcher grpc stream since no more watchers
-// 3. start creating watcher w2 using a new "ctx" (not canceled), attaches to old grpc stream
-// 4. watcher client finishes tearing down stream on "ctx"
-// 5. w2 comes back canceled
+// 1. create w1 using a cancelable ctx with %v as "ctx"
+// 2. cancel ctx
+// 3. watcher client begins tearing down watcher grpc stream since no more watchers
+// 3. start creating watcher w2 using a new "ctx" (not canceled), attaches to old grpc stream
+// 4. watcher client finishes tearing down stream on "ctx"
+// 5. w2 comes back canceled
func TestWatchOverlapContextCancel(t *testing.T) {
- f := func(clus *integration.ClusterV3) {}
+ f := func(clus *integration2.Cluster) {}
testWatchOverlapContextCancel(t, f)
}
func TestWatchOverlapDropConnContextCancel(t *testing.T) {
- f := func(clus *integration.ClusterV3) {
- clus.Members[0].DropConnections()
+ f := func(clus *integration2.Cluster) {
+ clus.Members[0].Bridge().DropConnections()
}
testWatchOverlapContextCancel(t, f)
}
-func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.Cluster)) {
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
n := 100
@@ -1084,6 +1089,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
t.Fatal(err)
}
ch := make(chan struct{}, n)
+ tCtx, cancelFunc := context.WithCancel(context.Background())
+ defer cancelFunc()
for i := 0; i < n; i++ {
go func() {
defer func() { ch <- struct{}{} }()
@@ -1091,6 +1098,12 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
ctx, cancel := context.WithCancel(ctxs[idx])
ctxc[idx] <- struct{}{}
wch := cli.Watch(ctx, "abc", clientv3.WithRev(1))
+ select {
+ case <-tCtx.Done():
+ cancel()
+ return
+ default:
+ }
f(clus)
select {
case _, ok := <-wch:
@@ -1125,8 +1138,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately
// closing the client does not return a client closing error.
func TestWatchCancelAndCloseClient(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
ctx, cancel := context.WithCancel(context.Background())
@@ -1155,8 +1168,8 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
// to put them in resuming mode, cancels them so some resumes by cancel fail,
// then closes the watcher interface to ensure correct clean up.
func TestWatchStressResumeClose(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -1166,7 +1179,7 @@ func TestWatchStressResumeClose(t *testing.T) {
for i := range wchs {
wchs[i] = cli.Watch(ctx, "abc")
}
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
cancel()
if err := cli.Close(); err != nil {
t.Fatal(err)
@@ -1177,8 +1190,8 @@ func TestWatchStressResumeClose(t *testing.T) {
// TestWatchCancelDisconnected ensures canceling a watcher works when
// its grpc stream is disconnected / reconnecting.
func TestWatchCancelDisconnected(t *testing.T) {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ integration2.BeforeTest(t)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
ctx, cancel := context.WithCancel(context.Background())
diff --git a/tests/integration/cluster.go b/tests/integration/cluster.go
deleted file mode 100644
index c5302e42920..00000000000
--- a/tests/integration/cluster.go
+++ /dev/null
@@ -1,1541 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package integration
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "io/ioutil"
- "log"
- "math/rand"
- "net"
- "net/http"
- "net/http/httptest"
- "os"
- "reflect"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/pkg/v3/tlsutil"
- "go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/client/v2"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/raft/v3"
- "go.etcd.io/etcd/server/v3/config"
- "go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
- "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2http"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3election"
- epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock"
- lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
- "go.etcd.io/etcd/server/v3/verify"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest"
-
- "github.com/soheilhy/cmux"
- "go.uber.org/zap"
- "golang.org/x/crypto/bcrypt"
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
-)
-
-const (
- // RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
- RequestWaitTimeout = 5 * time.Second
- tickDuration = 10 * time.Millisecond
- requestTimeout = 20 * time.Second
-
- clusterName = "etcd"
- basePort = 21000
- URLScheme = "unix"
- URLSchemeTLS = "unixs"
-)
-
-var (
- electionTicks = 10
-
- // integration test uses unique ports, counting up, to listen for each
- // member, ensuring restarted members can listen on the same port again.
- localListenCount = int64(0)
-
- testTLSInfo = transport.TLSInfo{
- KeyFile: MustAbsPath("../fixtures/server.key.insecure"),
- CertFile: MustAbsPath("../fixtures/server.crt"),
- TrustedCAFile: MustAbsPath("../fixtures/ca.crt"),
- ClientCertAuth: true,
- }
-
- testTLSInfoWithSpecificUsage = transport.TLSInfo{
- KeyFile: MustAbsPath("../fixtures/server-serverusage.key.insecure"),
- CertFile: MustAbsPath("../fixtures/server-serverusage.crt"),
- ClientKeyFile: MustAbsPath("../fixtures/client-clientusage.key.insecure"),
- ClientCertFile: MustAbsPath("../fixtures/client-clientusage.crt"),
- TrustedCAFile: MustAbsPath("../fixtures/ca.crt"),
- ClientCertAuth: true,
- }
-
- testTLSInfoIP = transport.TLSInfo{
- KeyFile: MustAbsPath("../fixtures/server-ip.key.insecure"),
- CertFile: MustAbsPath("../fixtures/server-ip.crt"),
- TrustedCAFile: MustAbsPath("../fixtures/ca.crt"),
- ClientCertAuth: true,
- }
-
- testTLSInfoExpired = transport.TLSInfo{
- KeyFile: MustAbsPath("./fixtures-expired/server.key.insecure"),
- CertFile: MustAbsPath("./fixtures-expired/server.crt"),
- TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"),
- ClientCertAuth: true,
- }
-
- testTLSInfoExpiredIP = transport.TLSInfo{
- KeyFile: MustAbsPath("./fixtures-expired/server-ip.key.insecure"),
- CertFile: MustAbsPath("./fixtures-expired/server-ip.crt"),
- TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"),
- ClientCertAuth: true,
- }
-
- defaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s",
- MustAbsPath("../fixtures/server.crt"), MustAbsPath("../fixtures/server.key.insecure"))
-)
-
-type ClusterConfig struct {
- Size int
- PeerTLS *transport.TLSInfo
- ClientTLS *transport.TLSInfo
-
- DiscoveryURL string
-
- AuthToken string
-
- UseGRPC bool
-
- QuotaBackendBytes int64
-
- MaxTxnOps uint
- MaxRequestBytes uint
- SnapshotCount uint64
- SnapshotCatchUpEntries uint64
-
- GRPCKeepAliveMinTime time.Duration
- GRPCKeepAliveInterval time.Duration
- GRPCKeepAliveTimeout time.Duration
-
- // SkipCreatingClient to skip creating clients for each member.
- SkipCreatingClient bool
-
- ClientMaxCallSendMsgSize int
- ClientMaxCallRecvMsgSize int
-
- // UseIP is true to use only IP for gRPC requests.
- UseIP bool
-
- EnableLeaseCheckpoint bool
- LeaseCheckpointInterval time.Duration
-
- WatchProgressNotifyInterval time.Duration
-}
-
-type cluster struct {
- cfg *ClusterConfig
- Members []*member
- lastMemberNum int
-}
-
-func (c *cluster) generateMemberName() string {
- c.lastMemberNum++
- return fmt.Sprintf("m%v", c.lastMemberNum-1)
-}
-
-func schemeFromTLSInfo(tls *transport.TLSInfo) string {
- if tls == nil {
- return URLScheme
- }
- return URLSchemeTLS
-}
-
-func (c *cluster) fillClusterForMembers() error {
- if c.cfg.DiscoveryURL != "" {
- // cluster will be discovered
- return nil
- }
-
- addrs := make([]string, 0)
- for _, m := range c.Members {
- scheme := schemeFromTLSInfo(m.PeerTLSInfo)
- for _, l := range m.PeerListeners {
- addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String()))
- }
- }
- clusterStr := strings.Join(addrs, ",")
- var err error
- for _, m := range c.Members {
- m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func newCluster(t testutil.TB, cfg *ClusterConfig) *cluster {
- testutil.SkipTestIfShortMode(t, "Cannot start etcd cluster in --short tests")
-
- c := &cluster{cfg: cfg}
- ms := make([]*member, cfg.Size)
- for i := 0; i < cfg.Size; i++ {
- ms[i] = c.mustNewMember(t)
- }
- c.Members = ms
- if err := c.fillClusterForMembers(); err != nil {
- t.Fatal(err)
- }
-
- return c
-}
-
-// NewCluster returns an unlaunched cluster of the given size which has been
-// set to use static bootstrap.
-func NewCluster(t testutil.TB, size int) *cluster {
- t.Helper()
- return newCluster(t, &ClusterConfig{Size: size})
-}
-
-// NewClusterByConfig returns an unlaunched cluster defined by a cluster configuration
-func NewClusterByConfig(t testutil.TB, cfg *ClusterConfig) *cluster {
- return newCluster(t, cfg)
-}
-
-func (c *cluster) Launch(t testutil.TB) {
- errc := make(chan error)
- for _, m := range c.Members {
- // Members are launched in separate goroutines because if they boot
- // using discovery url, they have to wait for others to register to continue.
- go func(m *member) {
- errc <- m.Launch()
- }(m)
- }
- for range c.Members {
- if err := <-errc; err != nil {
- c.Terminate(t)
- t.Fatalf("error setting up member: %v", err)
- }
- }
- // wait cluster to be stable to receive future client requests
- c.waitMembersMatch(t, c.HTTPMembers())
- c.waitVersion()
- for _, m := range c.Members {
- t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCAddr())
- }
-}
-
-func (c *cluster) URL(i int) string {
- return c.Members[i].ClientURLs[0].String()
-}
-
-// URLs returns a list of all active client URLs in the cluster
-func (c *cluster) URLs() []string {
- return getMembersURLs(c.Members)
-}
-
-func getMembersURLs(members []*member) []string {
- urls := make([]string, 0)
- for _, m := range members {
- select {
- case <-m.s.StopNotify():
- continue
- default:
- }
- for _, u := range m.ClientURLs {
- urls = append(urls, u.String())
- }
- }
- return urls
-}
-
-// HTTPMembers returns a list of all active members as client.Members
-func (c *cluster) HTTPMembers() []client.Member {
- ms := []client.Member{}
- for _, m := range c.Members {
- pScheme := schemeFromTLSInfo(m.PeerTLSInfo)
- cScheme := schemeFromTLSInfo(m.ClientTLSInfo)
- cm := client.Member{Name: m.Name}
- for _, ln := range m.PeerListeners {
- cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String())
- }
- for _, ln := range m.ClientListeners {
- cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String())
- }
- ms = append(ms, cm)
- }
- return ms
-}
-
-func (c *cluster) mustNewMember(t testutil.TB) *member {
- m := mustNewMember(t,
- memberConfig{
- name: c.generateMemberName(),
- authToken: c.cfg.AuthToken,
- peerTLS: c.cfg.PeerTLS,
- clientTLS: c.cfg.ClientTLS,
- quotaBackendBytes: c.cfg.QuotaBackendBytes,
- maxTxnOps: c.cfg.MaxTxnOps,
- maxRequestBytes: c.cfg.MaxRequestBytes,
- snapshotCount: c.cfg.SnapshotCount,
- snapshotCatchUpEntries: c.cfg.SnapshotCatchUpEntries,
- grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
- grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
- grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout,
- clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
- clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
- useIP: c.cfg.UseIP,
- enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint,
- leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval,
- WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval,
- })
- m.DiscoveryURL = c.cfg.DiscoveryURL
- if c.cfg.UseGRPC {
- if err := m.listenGRPC(); err != nil {
- t.Fatal(err)
- }
- }
- return m
-}
-
-// addMember return PeerURLs of the added member.
-func (c *cluster) addMember(t testutil.TB) types.URLs {
- m := c.mustNewMember(t)
-
- scheme := schemeFromTLSInfo(c.cfg.PeerTLS)
-
- // send add request to the cluster
- var err error
- for i := 0; i < len(c.Members); i++ {
- clientURL := c.URL(i)
- peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
- if err = c.addMemberByURL(t, clientURL, peerURL); err == nil {
- break
- }
- }
- if err != nil {
- t.Fatalf("add member failed on all members error: %v", err)
- }
-
- m.InitialPeerURLsMap = types.URLsMap{}
- for _, mm := range c.Members {
- m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
- }
- m.InitialPeerURLsMap[m.Name] = m.PeerURLs
- m.NewCluster = false
- if err := m.Launch(); err != nil {
- t.Fatal(err)
- }
- c.Members = append(c.Members, m)
- // wait cluster to be stable to receive future client requests
- c.waitMembersMatch(t, c.HTTPMembers())
- return m.PeerURLs
-}
-
-func (c *cluster) addMemberByURL(t testutil.TB, clientURL, peerURL string) error {
- cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS)
- ma := client.NewMembersAPI(cc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- _, err := ma.Add(ctx, peerURL)
- cancel()
- if err != nil {
- return err
- }
-
- // wait for the add node entry applied in the cluster
- members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
- c.waitMembersMatch(t, members)
- return nil
-}
-
-// AddMember return PeerURLs of the added member.
-func (c *cluster) AddMember(t testutil.TB) types.URLs {
- return c.addMember(t)
-}
-
-func (c *cluster) RemoveMember(t testutil.TB, id uint64) {
- if err := c.removeMember(t, id); err != nil {
- t.Fatal(err)
- }
-}
-
-func (c *cluster) removeMember(t testutil.TB, id uint64) error {
- // send remove request to the cluster
- cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
- ma := client.NewMembersAPI(cc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- err := ma.Remove(ctx, types.ID(id).String())
- cancel()
- if err != nil {
- return err
- }
- newMembers := make([]*member, 0)
- for _, m := range c.Members {
- if uint64(m.s.ID()) != id {
- newMembers = append(newMembers, m)
- } else {
- select {
- case <-m.s.StopNotify():
- m.Terminate(t)
- // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
- // TODO: remove connection write timeout by selecting on http response closeNotifier
- // blocking on https://github.com/golang/go/issues/9524
- case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout):
- t.Fatalf("failed to remove member %s in time", m.s.ID())
- }
- }
- }
- c.Members = newMembers
- c.waitMembersMatch(t, c.HTTPMembers())
- return nil
-}
-
-func (c *cluster) Terminate(t testutil.TB) {
- var wg sync.WaitGroup
- wg.Add(len(c.Members))
- for _, m := range c.Members {
- go func(mm *member) {
- defer wg.Done()
- mm.Terminate(t)
- }(m)
- }
- wg.Wait()
-}
-
-func (c *cluster) waitMembersMatch(t testutil.TB, membs []client.Member) {
- for _, u := range c.URLs() {
- cc := MustNewHTTPClient(t, []string{u}, c.cfg.ClientTLS)
- ma := client.NewMembersAPI(cc)
- for {
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- ms, err := ma.List(ctx)
- cancel()
- if err == nil && isMembersEqual(ms, membs) {
- break
- }
- time.Sleep(tickDuration)
- }
- }
-}
-
-// WaitLeader returns index of the member in c.Members that is leader (or -1).
-func (c *cluster) WaitLeader(t testutil.TB) int { return c.waitLeader(t, c.Members) }
-
-// waitLeader waits until given members agree on the same leader,
-// and returns its 'index' in the 'membs' list (or -1).
-func (c *cluster) waitLeader(t testutil.TB, membs []*member) int {
- possibleLead := make(map[uint64]bool)
- var lead uint64
- for _, m := range membs {
- possibleLead[uint64(m.s.ID())] = true
- }
- cc := MustNewHTTPClient(t, getMembersURLs(membs), nil)
- kapi := client.NewKeysAPI(cc)
-
- // ensure leader is up via linearizable get
- for {
- ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second)
- _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true})
- cancel()
- if err == nil || strings.Contains(err.Error(), "Key not found") {
- break
- }
- }
-
- for lead == 0 || !possibleLead[lead] {
- lead = 0
- for _, m := range membs {
- select {
- case <-m.s.StopNotify():
- continue
- default:
- }
- if lead != 0 && lead != m.s.Lead() {
- lead = 0
- time.Sleep(10 * tickDuration)
- break
- }
- lead = m.s.Lead()
- }
- }
-
- for i, m := range membs {
- if uint64(m.s.ID()) == lead {
- return i
- }
- }
-
- return -1
-}
-
-func (c *cluster) WaitNoLeader() { c.waitNoLeader(c.Members) }
-
-// waitNoLeader waits until given members lose leader.
-func (c *cluster) waitNoLeader(membs []*member) {
- noLeader := false
- for !noLeader {
- noLeader = true
- for _, m := range membs {
- select {
- case <-m.s.StopNotify():
- continue
- default:
- }
- if m.s.Lead() != 0 {
- noLeader = false
- time.Sleep(10 * tickDuration)
- break
- }
- }
- }
-}
-
-func (c *cluster) waitVersion() {
- for _, m := range c.Members {
- for {
- if m.s.ClusterVersion() != nil {
- break
- }
- time.Sleep(tickDuration)
- }
- }
-}
-
-// isMembersEqual checks whether two members equal except ID field.
-// The given wmembs should always set ID field to empty string.
-func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
- sort.Sort(SortableMemberSliceByPeerURLs(membs))
- sort.Sort(SortableMemberSliceByPeerURLs(wmembs))
- for i := range membs {
- membs[i].ID = ""
- }
- return reflect.DeepEqual(membs, wmembs)
-}
-
-func newLocalListener(t testutil.TB) net.Listener {
- c := atomic.AddInt64(&localListenCount, 1)
- // Go 1.8+ allows only numbers in port
- addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid())
- return NewListenerWithAddr(t, addr)
-}
-
-func NewListenerWithAddr(t testutil.TB, addr string) net.Listener {
- l, err := transport.NewUnixListener(addr)
- if err != nil {
- t.Fatal(err)
- }
- return l
-}
-
-type member struct {
- config.ServerConfig
- PeerListeners, ClientListeners []net.Listener
- grpcListener net.Listener
- // PeerTLSInfo enables peer TLS when set
- PeerTLSInfo *transport.TLSInfo
- // ClientTLSInfo enables client TLS when set
- ClientTLSInfo *transport.TLSInfo
- DialOptions []grpc.DialOption
-
- raftHandler *testutil.PauseableHandler
- s *etcdserver.EtcdServer
- serverClosers []func()
-
- grpcServerOpts []grpc.ServerOption
- grpcServer *grpc.Server
- grpcServerPeer *grpc.Server
- grpcAddr string
- grpcBridge *bridge
-
- // serverClient is a clientv3 that directly calls the etcdserver.
- serverClient *clientv3.Client
-
- keepDataDirTerminate bool
- clientMaxCallSendMsgSize int
- clientMaxCallRecvMsgSize int
- useIP bool
-
- isLearner bool
- closed bool
-}
-
-func (m *member) GRPCAddr() string { return m.grpcAddr }
-
-type memberConfig struct {
- name string
- peerTLS *transport.TLSInfo
- clientTLS *transport.TLSInfo
- authToken string
- quotaBackendBytes int64
- maxTxnOps uint
- maxRequestBytes uint
- snapshotCount uint64
- snapshotCatchUpEntries uint64
- grpcKeepAliveMinTime time.Duration
- grpcKeepAliveInterval time.Duration
- grpcKeepAliveTimeout time.Duration
- clientMaxCallSendMsgSize int
- clientMaxCallRecvMsgSize int
- useIP bool
- enableLeaseCheckpoint bool
- leaseCheckpointInterval time.Duration
- WatchProgressNotifyInterval time.Duration
-}
-
-// mustNewMember return an inited member with the given name. If peerTLS is
-// set, it will use https scheme to communicate between peers.
-func mustNewMember(t testutil.TB, mcfg memberConfig) *member {
- var err error
- m := &member{}
-
- peerScheme := schemeFromTLSInfo(mcfg.peerTLS)
- clientScheme := schemeFromTLSInfo(mcfg.clientTLS)
-
- pln := newLocalListener(t)
- m.PeerListeners = []net.Listener{pln}
- m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()})
- if err != nil {
- t.Fatal(err)
- }
- m.PeerTLSInfo = mcfg.peerTLS
-
- cln := newLocalListener(t)
- m.ClientListeners = []net.Listener{cln}
- m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()})
- if err != nil {
- t.Fatal(err)
- }
- m.ClientTLSInfo = mcfg.clientTLS
-
- m.Name = mcfg.name
-
- m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd")
- if err != nil {
- t.Fatal(err)
- }
- clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String())
- m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
- if err != nil {
- t.Fatal(err)
- }
- m.InitialClusterToken = clusterName
- m.NewCluster = true
- m.BootstrapTimeout = 10 * time.Millisecond
- if m.PeerTLSInfo != nil {
- m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
- }
- m.ElectionTicks = electionTicks
- m.InitialElectionTickAdvance = true
- m.TickMs = uint(tickDuration / time.Millisecond)
- m.QuotaBackendBytes = mcfg.quotaBackendBytes
- m.MaxTxnOps = mcfg.maxTxnOps
- if m.MaxTxnOps == 0 {
- m.MaxTxnOps = embed.DefaultMaxTxnOps
- }
- m.MaxRequestBytes = mcfg.maxRequestBytes
- if m.MaxRequestBytes == 0 {
- m.MaxRequestBytes = embed.DefaultMaxRequestBytes
- }
- m.SnapshotCount = etcdserver.DefaultSnapshotCount
- if mcfg.snapshotCount != 0 {
- m.SnapshotCount = mcfg.snapshotCount
- }
- m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries
- if mcfg.snapshotCatchUpEntries != 0 {
- m.SnapshotCatchUpEntries = mcfg.snapshotCatchUpEntries
- }
-
- // for the purpose of integration testing, simple token is enough
- m.AuthToken = "simple"
- if mcfg.authToken != "" {
- m.AuthToken = mcfg.authToken
- }
-
- m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing
-
- m.grpcServerOpts = []grpc.ServerOption{}
- if mcfg.grpcKeepAliveMinTime > time.Duration(0) {
- m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
- MinTime: mcfg.grpcKeepAliveMinTime,
- PermitWithoutStream: false,
- }))
- }
- if mcfg.grpcKeepAliveInterval > time.Duration(0) &&
- mcfg.grpcKeepAliveTimeout > time.Duration(0) {
- m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: mcfg.grpcKeepAliveInterval,
- Timeout: mcfg.grpcKeepAliveTimeout,
- }))
- }
- m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
- m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
- m.useIP = mcfg.useIP
- m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint
- m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval
-
- m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval
-
- m.InitialCorruptCheck = true
- m.WarningApplyDuration = embed.DefaultWarningApplyDuration
-
- m.V2Deprecation = config.V2_DEPR_DEFAULT
-
- m.Logger = memberLogger(t, mcfg.name)
- t.Cleanup(func() {
- // if we didn't cleanup the logger, the consecutive test
- // might reuse this (t).
- raft.ResetDefaultLogger()
- })
- return m
-}
-
-func memberLogger(t testutil.TB, name string) *zap.Logger {
- level := zapcore.InfoLevel
- if os.Getenv("CLUSTER_DEBUG") != "" {
- level = zapcore.DebugLevel
- }
-
- options := zaptest.WrapOptions(zap.Fields(zap.String("member", name)))
- return zaptest.NewLogger(t, zaptest.Level(level), options).Named(name)
-}
-
-// listenGRPC starts a grpc server over a unix domain socket on the member
-func (m *member) listenGRPC() error {
- // prefix with localhost so cert has right domain
- m.grpcAddr = "localhost:" + m.Name
- m.Logger.Info("LISTEN GRPC", zap.String("m.grpcAddr", m.grpcAddr), zap.String("m.Name", m.Name))
- if m.useIP { // for IP-only TLS certs
- m.grpcAddr = "127.0.0.1:" + m.Name
- }
- l, err := transport.NewUnixListener(m.grpcAddr)
- if err != nil {
- return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
- }
- m.grpcBridge, err = newBridge(m.grpcAddr)
- if err != nil {
- l.Close()
- return err
- }
- m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr
- m.grpcListener = l
- return nil
-}
-
-func (m *member) ElectionTimeout() time.Duration {
- return time.Duration(m.s.Cfg.ElectionTicks*int(m.s.Cfg.TickMs)) * time.Millisecond
-}
-
-func (m *member) ID() types.ID { return m.s.ID() }
-
-func (m *member) DropConnections() { m.grpcBridge.Reset() }
-func (m *member) PauseConnections() { m.grpcBridge.Pause() }
-func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() }
-func (m *member) Blackhole() { m.grpcBridge.Blackhole() }
-func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() }
-
-// NewClientV3 creates a new grpc client connection to the member
-func NewClientV3(m *member) (*clientv3.Client, error) {
- if m.grpcAddr == "" {
- return nil, fmt.Errorf("member not configured for grpc")
- }
-
- cfg := clientv3.Config{
- Endpoints: []string{m.grpcAddr},
- DialTimeout: 5 * time.Second,
- DialOptions: []grpc.DialOption{grpc.WithBlock()},
- MaxCallSendMsgSize: m.clientMaxCallSendMsgSize,
- MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize,
- }
-
- if m.ClientTLSInfo != nil {
- tls, err := m.ClientTLSInfo.ClientConfig()
- if err != nil {
- return nil, err
- }
- cfg.TLS = tls
- }
- if m.DialOptions != nil {
- cfg.DialOptions = append(cfg.DialOptions, m.DialOptions...)
- }
- return newClientV3(cfg, m.Logger.Named("client"))
-}
-
-// Clone returns a member with the same server configuration. The returned
-// member will not set PeerListeners and ClientListeners.
-func (m *member) Clone(t testutil.TB) *member {
- mm := &member{}
- mm.ServerConfig = m.ServerConfig
-
- var err error
- clientURLStrs := m.ClientURLs.StringSlice()
- mm.ClientURLs, err = types.NewURLs(clientURLStrs)
- if err != nil {
- // this should never fail
- panic(err)
- }
- peerURLStrs := m.PeerURLs.StringSlice()
- mm.PeerURLs, err = types.NewURLs(peerURLStrs)
- if err != nil {
- // this should never fail
- panic(err)
- }
- clusterStr := m.InitialPeerURLsMap.String()
- mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
- if err != nil {
- // this should never fail
- panic(err)
- }
- mm.InitialClusterToken = m.InitialClusterToken
- mm.ElectionTicks = m.ElectionTicks
- mm.PeerTLSInfo = m.PeerTLSInfo
- mm.ClientTLSInfo = m.ClientTLSInfo
- mm.Logger = memberLogger(t, mm.Name+"c")
- return mm
-}
-
-// Launch starts a member based on ServerConfig, PeerListeners
-// and ClientListeners.
-func (m *member) Launch() error {
- m.Logger.Info(
- "launching a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- )
- var err error
- if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil {
- return fmt.Errorf("failed to initialize the etcd server: %v", err)
- }
- m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
- m.s.Start()
-
- var peerTLScfg *tls.Config
- if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() {
- if peerTLScfg, err = m.PeerTLSInfo.ServerConfig(); err != nil {
- return err
- }
- }
-
- if m.grpcListener != nil {
- var (
- tlscfg *tls.Config
- )
- if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() {
- tlscfg, err = m.ClientTLSInfo.ServerConfig()
- if err != nil {
- return err
- }
- }
- m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...)
- m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg)
- m.serverClient = v3client.New(m.s)
- lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient))
- epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient))
- go m.grpcServer.Serve(m.grpcListener)
- }
-
- m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.s)}
-
- h := (http.Handler)(m.raftHandler)
- if m.grpcListener != nil {
- h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
- m.grpcServerPeer.ServeHTTP(w, r)
- } else {
- m.raftHandler.ServeHTTP(w, r)
- }
- })
- }
-
- for _, ln := range m.PeerListeners {
- cm := cmux.New(ln)
- // don't hang on matcher after closing listener
- cm.SetReadTimeout(time.Second)
-
- if m.grpcServer != nil {
- grpcl := cm.Match(cmux.HTTP2())
- go m.grpcServerPeer.Serve(grpcl)
- }
-
- // serve http1/http2 rafthttp/grpc
- ll := cm.Match(cmux.Any())
- if peerTLScfg != nil {
- if ll, err = transport.NewTLSListener(ll, m.PeerTLSInfo); err != nil {
- return err
- }
- }
- hs := &httptest.Server{
- Listener: ll,
- Config: &http.Server{
- Handler: h,
- TLSConfig: peerTLScfg,
- ErrorLog: log.New(ioutil.Discard, "net/http", 0),
- },
- TLS: peerTLScfg,
- }
- hs.Start()
-
- donec := make(chan struct{})
- go func() {
- defer close(donec)
- cm.Serve()
- }()
- closer := func() {
- ll.Close()
- hs.CloseClientConnections()
- hs.Close()
- <-donec
- }
- m.serverClosers = append(m.serverClosers, closer)
- }
- for _, ln := range m.ClientListeners {
- hs := &httptest.Server{
- Listener: ln,
- Config: &http.Server{
- Handler: v2http.NewClientHandler(
- m.Logger,
- m.s,
- m.ServerConfig.ReqTimeout(),
- ),
- ErrorLog: log.New(ioutil.Discard, "net/http", 0),
- },
- }
- if m.ClientTLSInfo == nil {
- hs.Start()
- } else {
- info := m.ClientTLSInfo
- hs.TLS, err = info.ServerConfig()
- if err != nil {
- return err
- }
-
- // baseConfig is called on initial TLS handshake start.
- //
- // Previously,
- // 1. Server has non-empty (*tls.Config).Certificates on client hello
- // 2. Server calls (*tls.Config).GetCertificate iff:
- // - Server's (*tls.Config).Certificates is not empty, or
- // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
- //
- // When (*tls.Config).Certificates is always populated on initial handshake,
- // client is expected to provide a valid matching SNI to pass the TLS
- // verification, thus trigger server (*tls.Config).GetCertificate to reload
- // TLS assets. However, a cert whose SAN field does not include domain names
- // but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
- // it was never able to trigger TLS reload on initial handshake; first
- // ceritifcate object was being used, never being updated.
- //
- // Now, (*tls.Config).Certificates is created empty on initial TLS client
- // handshake, in order to trigger (*tls.Config).GetCertificate and populate
- // rest of the certificates on every new TLS connection, even when client
- // SNI is empty (e.g. cert only includes IPs).
- //
- // This introduces another problem with "httptest.Server":
- // when server initial certificates are empty, certificates
- // are overwritten by Go's internal test certs, which have
- // different SAN fields (e.g. example.com). To work around,
- // re-overwrite (*tls.Config).Certificates before starting
- // test server.
- tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
- if err != nil {
- return err
- }
- hs.TLS.Certificates = []tls.Certificate{*tlsCert}
-
- hs.StartTLS()
- }
- closer := func() {
- ln.Close()
- hs.CloseClientConnections()
- hs.Close()
- }
- m.serverClosers = append(m.serverClosers, closer)
- }
-
- m.Logger.Info(
- "launched a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- )
- return nil
-}
-
-func (m *member) WaitOK(t testutil.TB) {
- m.WaitStarted(t)
- for m.s.Leader() == 0 {
- time.Sleep(tickDuration)
- }
-}
-
-func (m *member) WaitStarted(t testutil.TB) {
- cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo)
- kapi := client.NewKeysAPI(cc)
- for {
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- _, err := kapi.Get(ctx, "/", nil)
- if err != nil {
- time.Sleep(tickDuration)
- continue
- }
- cancel()
- break
- }
-}
-
-func WaitClientV3(t testutil.TB, kv clientv3.KV) {
- timeout := time.Now().Add(requestTimeout)
- var err error
- for time.Now().Before(timeout) {
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- _, err = kv.Get(ctx, "/")
- cancel()
- if err == nil {
- return
- }
- time.Sleep(tickDuration)
- }
- if err != nil {
- t.Fatalf("timed out waiting for client: %v", err)
- }
-}
-
-func (m *member) URL() string { return m.ClientURLs[0].String() }
-
-func (m *member) Pause() {
- m.raftHandler.Pause()
- m.s.PauseSending()
-}
-
-func (m *member) Resume() {
- m.raftHandler.Resume()
- m.s.ResumeSending()
-}
-
-// Close stops the member's etcdserver and closes its connections
-func (m *member) Close() {
- if m.grpcBridge != nil {
- m.grpcBridge.Close()
- m.grpcBridge = nil
- }
- if m.serverClient != nil {
- m.serverClient.Close()
- m.serverClient = nil
- }
- if m.grpcServer != nil {
- ch := make(chan struct{})
- go func() {
- defer close(ch)
- // close listeners to stop accepting new connections,
- // will block on any existing transports
- m.grpcServer.GracefulStop()
- }()
- // wait until all pending RPCs are finished
- select {
- case <-ch:
- case <-time.After(2 * time.Second):
- // took too long, manually close open transports
- // e.g. watch streams
- m.grpcServer.Stop()
- <-ch
- }
- m.grpcServer = nil
- m.grpcServerPeer.GracefulStop()
- m.grpcServerPeer.Stop()
- m.grpcServerPeer = nil
- }
- if m.s != nil {
- m.s.HardStop()
- }
- for _, f := range m.serverClosers {
- f()
- }
- if !m.closed {
- // Avoid verification of the same file multiple times
- // (that might not exist any longer)
- verify.MustVerifyIfEnabled(verify.Config{
- Logger: m.Logger,
- DataDir: m.DataDir,
- ExactIndex: false,
- })
- }
- m.closed = true
-}
-
-// Stop stops the member, but the data dir of the member is preserved.
-func (m *member) Stop(_ testutil.TB) {
- m.Logger.Info(
- "stopping a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- )
- m.Close()
- m.serverClosers = nil
- m.Logger.Info(
- "stopped a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- )
-}
-
-// checkLeaderTransition waits for leader transition, returning the new leader ID.
-func checkLeaderTransition(m *member, oldLead uint64) uint64 {
- interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond
- for m.s.Lead() == 0 || (m.s.Lead() == oldLead) {
- time.Sleep(interval)
- }
- return m.s.Lead()
-}
-
-// StopNotify unblocks when a member stop completes
-func (m *member) StopNotify() <-chan struct{} {
- return m.s.StopNotify()
-}
-
-// Restart starts the member using the preserved data dir.
-func (m *member) Restart(t testutil.TB) error {
- m.Logger.Info(
- "restarting a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- )
- newPeerListeners := make([]net.Listener, 0)
- for _, ln := range m.PeerListeners {
- newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String()))
- }
- m.PeerListeners = newPeerListeners
- newClientListeners := make([]net.Listener, 0)
- for _, ln := range m.ClientListeners {
- newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String()))
- }
- m.ClientListeners = newClientListeners
-
- if m.grpcListener != nil {
- if err := m.listenGRPC(); err != nil {
- t.Fatal(err)
- }
- }
-
- err := m.Launch()
- m.Logger.Info(
- "restarted a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- zap.Error(err),
- )
- return err
-}
-
-// Terminate stops the member and removes the data dir.
-func (m *member) Terminate(t testutil.TB) {
- m.Logger.Info(
- "terminating a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- )
- m.Close()
- if !m.keepDataDirTerminate {
- if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
- t.Fatal(err)
- }
- }
- m.Logger.Info(
- "terminated a member",
- zap.String("name", m.Name),
- zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
- zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
- zap.String("grpc-address", m.grpcAddr),
- )
-}
-
-// Metric gets the metric value for a member
-func (m *member) Metric(metricName string, expectLabels ...string) (string, error) {
- cfgtls := transport.TLSInfo{}
- tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
- if err != nil {
- return "", err
- }
- cli := &http.Client{Transport: tr}
- resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
- b, rerr := ioutil.ReadAll(resp.Body)
- if rerr != nil {
- return "", rerr
- }
- lines := strings.Split(string(b), "\n")
- for _, l := range lines {
- if !strings.HasPrefix(l, metricName) {
- continue
- }
- ok := true
- for _, lv := range expectLabels {
- if !strings.Contains(l, lv) {
- ok = false
- break
- }
- }
- if !ok {
- continue
- }
- return strings.Split(l, " ")[1], nil
- }
- return "", nil
-}
-
-// InjectPartition drops connections from m to others, vice versa.
-func (m *member) InjectPartition(t testutil.TB, others ...*member) {
- for _, other := range others {
- m.s.CutPeer(other.s.ID())
- other.s.CutPeer(m.s.ID())
- t.Logf("network partition injected between: %v <-> %v", m.s.ID(), other.s.ID())
- }
-}
-
-// RecoverPartition recovers connections from m to others, vice versa.
-func (m *member) RecoverPartition(t testutil.TB, others ...*member) {
- for _, other := range others {
- m.s.MendPeer(other.s.ID())
- other.s.MendPeer(m.s.ID())
- t.Logf("network partition between: %v <-> %v", m.s.ID(), other.s.ID())
- }
-}
-
-func (m *member) ReadyNotify() <-chan struct{} {
- return m.s.ReadyNotify()
-}
-
-func MustNewHTTPClient(t testutil.TB, eps []string, tls *transport.TLSInfo) client.Client {
- cfgtls := transport.TLSInfo{}
- if tls != nil {
- cfgtls = *tls
- }
- cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps}
- c, err := client.New(cfg)
- if err != nil {
- t.Fatal(err)
- }
- return c
-}
-
-func mustNewTransport(t testutil.TB, tlsInfo transport.TLSInfo) *http.Transport {
- // tick in integration test is short, so 1s dial timeout could play well.
- tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
- if err != nil {
- t.Fatal(err)
- }
- return tr
-}
-
-type SortableMemberSliceByPeerURLs []client.Member
-
-func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) }
-func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool {
- return p[i].PeerURLs[0] < p[j].PeerURLs[0]
-}
-func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-type ClusterV3 struct {
- *cluster
-
- mu sync.Mutex
- clients []*clientv3.Client
-}
-
-// NewClusterV3 returns a launched cluster with a grpc client connection
-// for each cluster member.
-func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 {
- t.Helper()
- testutil.SkipTestIfShortMode(t, "Cannot create clusters in --short tests")
-
- wd, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
- if !strings.HasPrefix(wd, os.TempDir()) {
- t.Errorf("Working directory '%s' expected to be in temp-dir ('%s')."+
- "Have you executed integration.BeforeTest(t) ?", wd, os.TempDir())
- }
-
- cfg.UseGRPC = true
-
- clus := &ClusterV3{
- cluster: NewClusterByConfig(t, cfg),
- }
- clus.Launch(t)
-
- if !cfg.SkipCreatingClient {
- for _, m := range clus.Members {
- client, err := NewClientV3(m)
- if err != nil {
- t.Fatalf("cannot create client: %v", err)
- }
- clus.clients = append(clus.clients, client)
- }
- }
-
- return clus
-}
-
-func (c *ClusterV3) TakeClient(idx int) {
- c.mu.Lock()
- c.clients[idx] = nil
- c.mu.Unlock()
-}
-
-func (c *ClusterV3) Terminate(t testutil.TB) {
- c.mu.Lock()
- for _, client := range c.clients {
- if client == nil {
- continue
- }
- if err := client.Close(); err != nil {
- t.Error(err)
- }
- }
- c.mu.Unlock()
- c.cluster.Terminate(t)
-}
-
-func (c *ClusterV3) RandClient() *clientv3.Client {
- return c.clients[rand.Intn(len(c.clients))]
-}
-
-func (c *ClusterV3) Client(i int) *clientv3.Client {
- return c.clients[i]
-}
-
-// NewClientV3 creates a new grpc client connection to the member
-func (c *ClusterV3) NewClientV3(memberIndex int) (*clientv3.Client, error) {
- return NewClientV3(c.Members[memberIndex])
-}
-
-func makeClients(t testutil.TB, clus *ClusterV3, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client {
- var mu sync.Mutex
- *clients = nil
- return func() *clientv3.Client {
- cli, err := clus.NewClientV3(chooseMemberIndex())
- if err != nil {
- t.Fatalf("cannot create client: %v", err)
- }
- mu.Lock()
- *clients = append(*clients, cli)
- mu.Unlock()
- return cli
- }
-}
-
-// MakeSingleNodeClients creates factory of clients that all connect to member 0.
-// All the created clients are put on the 'clients' list. The factory is thread-safe.
-func MakeSingleNodeClients(t testutil.TB, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client {
- return makeClients(t, clus, clients, func() int { return 0 })
-}
-
-// MakeMultiNodeClients creates factory of clients that all connect to random members.
-// All the created clients are put on the 'clients' list. The factory is thread-safe.
-func MakeMultiNodeClients(t testutil.TB, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client {
- return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) })
-}
-
-// CloseClients closes all the clients from the 'clients' list.
-func CloseClients(t testutil.TB, clients []*clientv3.Client) {
- for _, cli := range clients {
- if err := cli.Close(); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-type grpcAPI struct {
- // Cluster is the cluster API for the client's connection.
- Cluster pb.ClusterClient
- // KV is the keyvalue API for the client's connection.
- KV pb.KVClient
- // Lease is the lease API for the client's connection.
- Lease pb.LeaseClient
- // Watch is the watch API for the client's connection.
- Watch pb.WatchClient
- // Maintenance is the maintenance API for the client's connection.
- Maintenance pb.MaintenanceClient
- // Auth is the authentication API for the client's connection.
- Auth pb.AuthClient
- // Lock is the lock API for the client's connection.
- Lock lockpb.LockClient
- // Election is the election API for the client's connection.
- Election epb.ElectionClient
-}
-
-// GetLearnerMembers returns the list of learner members in cluster using MemberList API.
-func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) {
- cli := c.Client(0)
- resp, err := cli.MemberList(context.Background())
- if err != nil {
- return nil, fmt.Errorf("failed to list member %v", err)
- }
- var learners []*pb.Member
- for _, m := range resp.Members {
- if m.IsLearner {
- learners = append(learners, m)
- }
- }
- return learners, nil
-}
-
-// AddAndLaunchLearnerMember creates a leaner member, adds it to cluster
-// via v3 MemberAdd API, and then launches the new member.
-func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) {
- m := c.mustNewMember(t)
- m.isLearner = true
-
- scheme := schemeFromTLSInfo(c.cfg.PeerTLS)
- peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()}
-
- cli := c.Client(0)
- _, err := cli.MemberAddAsLearner(context.Background(), peerURLs)
- if err != nil {
- t.Fatalf("failed to add learner member %v", err)
- }
-
- m.InitialPeerURLsMap = types.URLsMap{}
- for _, mm := range c.Members {
- m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
- }
- m.InitialPeerURLsMap[m.Name] = m.PeerURLs
- m.NewCluster = false
-
- if err := m.Launch(); err != nil {
- t.Fatal(err)
- }
-
- c.Members = append(c.Members, m)
-
- c.waitMembersMatch(t)
-}
-
-// getMembers returns a list of members in cluster, in format of etcdserverpb.Member
-func (c *ClusterV3) getMembers() []*pb.Member {
- var mems []*pb.Member
- for _, m := range c.Members {
- mem := &pb.Member{
- Name: m.Name,
- PeerURLs: m.PeerURLs.StringSlice(),
- ClientURLs: m.ClientURLs.StringSlice(),
- IsLearner: m.isLearner,
- }
- mems = append(mems, mem)
- }
- return mems
-}
-
-// waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the
-// local 'c.Members', which is the local recording of members in the testing cluster. With
-// the exception that the local recording c.Members does not have info on Member.ID, which
-// is generated when the member is been added to cluster.
-//
-// Note:
-// A successful match means the Member.clientURLs are matched. This means member has already
-// finished publishing its server attributes to cluster. Publishing attributes is a cluster-wide
-// write request (in v2 server). Therefore, at this point, any raft log entries prior to this
-// would have already been applied.
-//
-// If a new member was added to an existing cluster, at this point, it has finished publishing
-// its own server attributes to the cluster. And therefore by the same argument, it has already
-// applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point,
-// the new member has the correct view of the cluster configuration.
-//
-// Special note on learner member:
-// Learner member is only added to a cluster via v3rpc MemberAdd API (as of v3.4). When starting
-// the learner member, its initial view of the cluster created by peerURLs map does not have info
-// on whether or not the new member itself is learner. But at this point, a successful match does
-// indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry
-// which was used to add the learner itself to the cluster, and therefore it has the correct info
-// on learner.
-func (c *ClusterV3) waitMembersMatch(t testutil.TB) {
- wMembers := c.getMembers()
- sort.Sort(SortableProtoMemberSliceByPeerURLs(wMembers))
- cli := c.Client(0)
- for {
- resp, err := cli.MemberList(context.Background())
- if err != nil {
- t.Fatalf("failed to list member %v", err)
- }
-
- if len(resp.Members) != len(wMembers) {
- continue
- }
- sort.Sort(SortableProtoMemberSliceByPeerURLs(resp.Members))
- for _, m := range resp.Members {
- m.ID = 0
- }
- if reflect.DeepEqual(resp.Members, wMembers) {
- return
- }
-
- time.Sleep(tickDuration)
- }
-}
-
-type SortableProtoMemberSliceByPeerURLs []*pb.Member
-
-func (p SortableProtoMemberSliceByPeerURLs) Len() int { return len(p) }
-func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool {
- return p[i].PeerURLs[0] < p[j].PeerURLs[0]
-}
-func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-// MustNewMember creates a new member instance based on the response of V3 Member Add API.
-func (c *ClusterV3) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *member {
- m := c.mustNewMember(t)
- m.isLearner = resp.Member.IsLearner
- m.NewCluster = false
-
- m.InitialPeerURLsMap = types.URLsMap{}
- for _, mm := range c.Members {
- m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
- }
- m.InitialPeerURLsMap[m.Name] = types.MustNewURLs(resp.Member.PeerURLs)
- c.Members = append(c.Members, m)
- return m
-}
diff --git a/tests/integration/cluster_direct.go b/tests/integration/cluster_direct.go
deleted file mode 100644
index 67daf7caeab..00000000000
--- a/tests/integration/cluster_direct.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !cluster_proxy
-// +build !cluster_proxy
-
-package integration
-
-import (
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
- "go.uber.org/zap"
-)
-
-const ThroughProxy = false
-
-func toGRPC(c *clientv3.Client) grpcAPI {
- return grpcAPI{
- pb.NewClusterClient(c.ActiveConnection()),
- pb.NewKVClient(c.ActiveConnection()),
- pb.NewLeaseClient(c.ActiveConnection()),
- pb.NewWatchClient(c.ActiveConnection()),
- pb.NewMaintenanceClient(c.ActiveConnection()),
- pb.NewAuthClient(c.ActiveConnection()),
- v3lockpb.NewLockClient(c.ActiveConnection()),
- v3electionpb.NewElectionClient(c.ActiveConnection()),
- }
-}
-
-func newClientV3(cfg clientv3.Config, lg *zap.Logger) (*clientv3.Client, error) {
- cfg.Logger = lg
- return clientv3.New(cfg)
-}
diff --git a/tests/integration/cluster_proxy.go b/tests/integration/cluster_proxy.go
deleted file mode 100644
index e8549eea3f7..00000000000
--- a/tests/integration/cluster_proxy.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build cluster_proxy
-// +build cluster_proxy
-
-package integration
-
-import (
- "context"
- "sync"
-
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/namespace"
- "go.etcd.io/etcd/server/v3/proxy/grpcproxy"
- "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter"
- "go.uber.org/zap"
-)
-
-const ThroughProxy = true
-
-var (
- pmu sync.Mutex
- proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
-)
-
-const proxyNamespace = "proxy-namespace"
-
-type grpcClientProxy struct {
- ctx context.Context
- ctxCancel func()
- grpc grpcAPI
- wdonec <-chan struct{}
- kvdonec <-chan struct{}
- lpdonec <-chan struct{}
-}
-
-func toGRPC(c *clientv3.Client) grpcAPI {
- pmu.Lock()
- defer pmu.Unlock()
-
- // dedicated context bound to 'grpc-proxy' lifetype
- // (so in practice lifetime of the client connection to the proxy).
- // TODO: Refactor to a separate clientv3.Client instance instead of the context alone.
- ctx, ctxCancel := context.WithCancel(context.WithValue(context.TODO(), "_name", "grpcProxyContext"))
-
- lg := c.GetLogger()
-
- if v, ok := proxies[c]; ok {
- return v.grpc
- }
-
- // test namespacing proxy
- c.KV = namespace.NewKV(c.KV, proxyNamespace)
- c.Watcher = namespace.NewWatcher(c.Watcher, proxyNamespace)
- c.Lease = namespace.NewLease(c.Lease, proxyNamespace)
- // test coalescing/caching proxy
- kvp, kvpch := grpcproxy.NewKvProxy(c)
- wp, wpch := grpcproxy.NewWatchProxy(ctx, lg, c)
- lp, lpch := grpcproxy.NewLeaseProxy(ctx, c)
- mp := grpcproxy.NewMaintenanceProxy(c)
- clp, _ := grpcproxy.NewClusterProxy(lg, c, "", "") // without registering proxy URLs
- authp := grpcproxy.NewAuthProxy(c)
- lockp := grpcproxy.NewLockProxy(c)
- electp := grpcproxy.NewElectionProxy(c)
-
- grpc := grpcAPI{
- adapter.ClusterServerToClusterClient(clp),
- adapter.KvServerToKvClient(kvp),
- adapter.LeaseServerToLeaseClient(lp),
- adapter.WatchServerToWatchClient(wp),
- adapter.MaintenanceServerToMaintenanceClient(mp),
- adapter.AuthServerToAuthClient(authp),
- adapter.LockServerToLockClient(lockp),
- adapter.ElectionServerToElectionClient(electp),
- }
- proxies[c] = grpcClientProxy{ctx: ctx, ctxCancel: ctxCancel, grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch}
- return grpc
-}
-
-type proxyCloser struct {
- clientv3.Watcher
- proxyCtxCancel func()
- wdonec <-chan struct{}
- kvdonec <-chan struct{}
- lclose func()
- lpdonec <-chan struct{}
-}
-
-func (pc *proxyCloser) Close() error {
- pc.proxyCtxCancel()
- <-pc.kvdonec
- err := pc.Watcher.Close()
- <-pc.wdonec
- pc.lclose()
- <-pc.lpdonec
- return err
-}
-
-func newClientV3(cfg clientv3.Config, lg *zap.Logger) (*clientv3.Client, error) {
- cfg.Logger = lg
- c, err := clientv3.New(cfg)
- if err != nil {
- return nil, err
- }
- rpc := toGRPC(c)
- c.KV = clientv3.NewKVFromKVClient(rpc.KV, c)
- pmu.Lock()
- lc := c.Lease
- c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, c, cfg.DialTimeout)
- c.Watcher = &proxyCloser{
- Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch, c),
- wdonec: proxies[c].wdonec,
- kvdonec: proxies[c].kvdonec,
- lclose: func() { lc.Close() },
- lpdonec: proxies[c].lpdonec,
- proxyCtxCancel: proxies[c].ctxCancel,
- }
- pmu.Unlock()
- return c, nil
-}
diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go
index e25d77f211a..4aac7e2c824 100644
--- a/tests/integration/cluster_test.go
+++ b/tests/integration/cluster_test.go
@@ -25,8 +25,12 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/v2"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
func init() {
@@ -34,7 +38,7 @@ func init() {
log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
if i, err := strconv.ParseInt(t, 10, 64); err == nil {
- electionTicks = int(i)
+ integration.ElectionTicks = int(i)
}
}
}
@@ -43,78 +47,25 @@ func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
func testCluster(t *testing.T, size int) {
- BeforeTest(t)
- c := NewCluster(t, size)
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: size})
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestTLSClusterOf3(t *testing.T) {
- BeforeTest(t)
- c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo})
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
-// Test that a cluster can progress when using separate client and server certs when peering. This supports certificate
-// authorities that don't issue dual-usage certificates.
+// TestTLSClusterOf3WithSpecificUsage tests that a cluster can progress when
+// using separate client and server certs when peering. This supports
+// certificate authorities that don't issue dual-usage certificates.
func TestTLSClusterOf3WithSpecificUsage(t *testing.T) {
- BeforeTest(t)
- c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfoWithSpecificUsage})
- c.Launch(t)
- defer c.Terminate(t)
- clusterMustProgress(t, c.Members)
-}
-
-func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1) }
-func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
-
-func testClusterUsingDiscovery(t *testing.T, size int) {
- BeforeTest(t)
- dc := NewCluster(t, 1)
- dc.Launch(t)
- defer dc.Terminate(t)
- // init discovery token space
- dcc := MustNewHTTPClient(t, dc.URLs(), nil)
- dkapi := client.NewKeysAPI(dcc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
- t.Fatal(err)
- }
- cancel()
-
- c := NewClusterByConfig(
- t,
- &ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
- )
- c.Launch(t)
- defer c.Terminate(t)
- clusterMustProgress(t, c.Members)
-}
-
-func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
- BeforeTest(t)
- dc := NewCluster(t, 1)
- dc.Launch(t)
- defer dc.Terminate(t)
- // init discovery token space
- dcc := MustNewHTTPClient(t, dc.URLs(), nil)
- dkapi := client.NewKeysAPI(dcc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
- t.Fatal(err)
- }
- cancel()
-
- c := NewClusterByConfig(t,
- &ClusterConfig{
- Size: 3,
- PeerTLS: &testTLSInfo,
- DiscoveryURL: dc.URL(0) + "/v2/keys"},
- )
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage})
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
@@ -123,9 +74,8 @@ func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
func testDoubleClusterSize(t *testing.T, size int) {
- BeforeTest(t)
- c := NewCluster(t, size)
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: size, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
for i := 0; i < size; i++ {
@@ -135,9 +85,13 @@ func testDoubleClusterSize(t *testing.T, size int) {
}
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
- BeforeTest(t)
- c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
- c.Launch(t)
+ integration.BeforeTest(t)
+ cfg := &integration.ClusterConfig{
+ Size: 1,
+ PeerTLS: &integration.TestTLSInfo,
+ DisableStrictReconfigCheck: true,
+ }
+ c := integration.NewCluster(t, cfg)
defer c.Terminate(t)
for i := 0; i < 3; i++ {
@@ -150,16 +104,15 @@ func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
func testDecreaseClusterSize(t *testing.T, size int) {
- BeforeTest(t)
- c := NewCluster(t, size)
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: size, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// TODO: remove the last but one member
for i := 0; i < size-1; i++ {
- id := c.Members[len(c.Members)-1].s.ID()
+ id := c.Members[len(c.Members)-1].Server.MemberID()
// may hit second leader election on slow machines
- if err := c.removeMember(t, uint64(id)); err != nil {
+ if err := c.RemoveMember(t, c.Members[0].Client, uint64(id)); err != nil {
if strings.Contains(err.Error(), "no leader") {
t.Logf("got leader error (%v)", err)
i--
@@ -167,26 +120,35 @@ func testDecreaseClusterSize(t *testing.T, size int) {
}
t.Fatal(err)
}
- c.waitLeader(t, c.Members)
+ c.WaitMembersForLeader(t, c.Members)
}
clusterMustProgress(t, c.Members)
}
func TestForceNewCluster(t *testing.T) {
- c := NewCluster(t, 3)
- c.Launch(t)
- cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
- kapi := client.NewKeysAPI(cc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- resp, err := kapi.Create(ctx, "/foo", "bar")
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
+ defer c.Terminate(t)
+
+ ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
+ resp, err := c.Members[0].Client.Put(ctx, "/foo", "bar")
if err != nil {
t.Fatalf("unexpected create error: %v", err)
}
cancel()
// ensure create has been applied in this machine
- ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
- if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
- t.Fatalf("unexpected watch error: %v", err)
+ ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout)
+ watch := c.Members[0].Client.Watcher.Watch(ctx, "/foo", clientv3.WithRev(resp.Header.Revision-1))
+ for resp := range watch {
+ if len(resp.Events) != 0 {
+ break
+ }
+ if resp.Err() != nil {
+ t.Fatalf("unexpected watch error: %q", resp.Err())
+ }
+ if resp.Canceled {
+ t.Fatalf("watch cancelled")
+ }
}
cancel()
@@ -198,72 +160,73 @@ func TestForceNewCluster(t *testing.T) {
if err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
- defer c.Members[0].Terminate(t)
- c.waitLeader(t, c.Members[:1])
+ c.WaitMembersForLeader(t, c.Members[:1])
// use new http client to init new connection
- cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
- kapi = client.NewKeysAPI(cc)
- // ensure force restart keep the old data, and new cluster can make progress
- ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
- if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
- t.Fatalf("unexpected watch error: %v", err)
+ // ensure force restart keep the old data, and new Cluster can make progress
+ ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout)
+ watch = c.Members[0].Client.Watcher.Watch(ctx, "/foo", clientv3.WithRev(resp.Header.Revision-1))
+ for resp := range watch {
+ if len(resp.Events) != 0 {
+ break
+ }
+ if resp.Err() != nil {
+ t.Fatalf("unexpected watch error: %q", resp.Err())
+ }
+ if resp.Canceled {
+ t.Fatalf("watch cancelled")
+ }
}
cancel()
clusterMustProgress(t, c.Members[:1])
}
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
- BeforeTest(t)
- c := NewCluster(t, 3)
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// remove all the previous three members and add in three new members.
for i := 0; i < 3; i++ {
- c.RemoveMember(t, uint64(c.Members[0].s.ID()))
- c.waitLeader(t, c.Members)
+ err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[1].Server.MemberID()))
+ require.NoError(t, err)
+ c.WaitMembersForLeader(t, c.Members)
c.AddMember(t)
- c.waitLeader(t, c.Members)
+ c.WaitMembersForLeader(t, c.Members)
}
c.AddMember(t)
- c.waitLeader(t, c.Members)
+ c.WaitMembersForLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
-// Ensure we can remove a member then add a new one back immediately.
+// TestIssue2681 ensures we can remove a member then add a new one back immediately.
func TestIssue2681(t *testing.T) {
- BeforeTest(t)
- c := NewCluster(t, 5)
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
- c.RemoveMember(t, uint64(c.Members[4].s.ID()))
- c.waitLeader(t, c.Members)
+ if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberID())); err != nil {
+ t.Fatal(err)
+ }
+ c.WaitMembersForLeader(t, c.Members)
c.AddMember(t)
- c.waitLeader(t, c.Members)
+ c.WaitMembersForLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
-// Ensure we can remove a member after a snapshot then add a new one back.
+// TestIssue2746 ensures we can remove a member after a snapshot then add a new one back.
func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
-// With 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
+// TestIssue2746WithThree tests with 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
func testIssue2746(t *testing.T, members int) {
- BeforeTest(t)
- c := NewCluster(t, members)
-
- for _, m := range c.Members {
- m.SnapshotCount = 10
- }
-
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: members, SnapshotCount: 10, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// force a snapshot
@@ -271,56 +234,56 @@ func testIssue2746(t *testing.T, members int) {
clusterMustProgress(t, c.Members)
}
- c.RemoveMember(t, uint64(c.Members[members-1].s.ID()))
- c.waitLeader(t, c.Members)
+ if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberID())); err != nil {
+ t.Fatal(err)
+ }
+ c.WaitMembersForLeader(t, c.Members)
c.AddMember(t)
- c.waitLeader(t, c.Members)
+ c.WaitMembersForLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
-// Ensure etcd will not panic when removing a just started member.
+// TestIssue2904 ensures etcd will not panic when removing a just started member.
func TestIssue2904(t *testing.T) {
- BeforeTest(t)
- // start 1-member cluster to ensure member 0 is the leader of the cluster.
- c := NewCluster(t, 1)
- c.Launch(t)
+ integration.BeforeTest(t)
+ // start 1-member Cluster to ensure member 0 is the leader of the Cluster.
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 2, UseBridge: true, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
+ c.WaitLeader(t)
c.AddMember(t)
- c.Members[1].Stop(t)
+ c.Members[2].Stop(t)
- // send remove member-1 request to the cluster.
- cc := MustNewHTTPClient(t, c.URLs(), nil)
- ma := client.NewMembersAPI(cc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
+ // send remove member-1 request to the Cluster.
+ ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
// the proposal is not committed because member 1 is stopped, but the
- // proposal is appended to leader's raft log.
- ma.Remove(ctx, c.Members[1].s.ID().String())
+ // proposal is appended to leader'Server raft log.
+ c.Members[0].Client.MemberRemove(ctx, uint64(c.Members[2].Server.MemberID()))
cancel()
// restart member, and expect it to send UpdateAttributes request.
// the log in the leader is like this:
// [..., remove 1, ..., update attr 1, ...]
- c.Members[1].Restart(t)
+ c.Members[2].Restart(t)
// when the member comes back, it ack the proposal to remove itself,
// and apply it.
- <-c.Members[1].s.StopNotify()
+ <-c.Members[2].Server.StopNotify()
// terminate removed member
- c.Members[1].Terminate(t)
- c.Members = c.Members[:1]
+ c.Members[2].Client.Close()
+ c.Members[2].Terminate(t)
+ c.Members = c.Members[:2]
// wait member to be removed.
- c.waitMembersMatch(t, c.HTTPMembers())
+ c.WaitMembersMatch(t, c.ProtoMembers())
}
// TestIssue3699 tests minority failure during cluster configuration; it was
// deadlocking.
func TestIssue3699(t *testing.T) {
- // start a cluster of 3 nodes a, b, c
- BeforeTest(t)
- c := NewCluster(t, 3)
- c.Launch(t)
+ // start a Cluster of 3 nodes a, b, c
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true, DisableStrictReconfigCheck: true})
defer c.Terminate(t)
// make node a unavailable
@@ -329,67 +292,69 @@ func TestIssue3699(t *testing.T) {
// add node d
c.AddMember(t)
+ t.Logf("Disturbing cluster till member:3 will become a leader")
+
// electing node d as leader makes node a unable to participate
- leaderID := c.waitLeader(t, c.Members)
+ leaderID := c.WaitMembersForLeader(t, c.Members)
for leaderID != 3 {
c.Members[leaderID].Stop(t)
- <-c.Members[leaderID].s.StopNotify()
+ <-c.Members[leaderID].Server.StopNotify()
// do not restart the killed member immediately.
// the member will advance its election timeout after restart,
// so it will have a better chance to become the leader again.
- time.Sleep(time.Duration(electionTicks * int(tickDuration)))
+ time.Sleep(time.Duration(integration.ElectionTicks * int(config.TickDuration)))
c.Members[leaderID].Restart(t)
- leaderID = c.waitLeader(t, c.Members)
+ leaderID = c.WaitMembersForLeader(t, c.Members)
}
+ t.Logf("Finally elected member 3 as the leader.")
+
+ t.Logf("Restarting member '0'...")
// bring back node a
// node a will remain useless as long as d is the leader.
if err := c.Members[0].Restart(t); err != nil {
t.Fatal(err)
}
+ t.Logf("Restarted member '0'.")
+
select {
// waiting for ReadyNotify can take several seconds
case <-time.After(10 * time.Second):
t.Fatalf("waited too long for ready notification")
- case <-c.Members[0].s.StopNotify():
+ case <-c.Members[0].Server.StopNotify():
t.Fatalf("should not be stopped")
- case <-c.Members[0].s.ReadyNotify():
+ case <-c.Members[0].Server.ReadyNotify():
}
- // must waitLeader so goroutines don't leak on terminate
- c.waitLeader(t, c.Members)
+ // must WaitMembersForLeader so goroutines don't leak on terminate
+ c.WaitLeader(t)
- // try to participate in cluster
- cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
- kapi := client.NewKeysAPI(cc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil {
- t.Fatalf("unexpected error on Set (%v)", err)
+ t.Logf("Expecting successful put...")
+ // try to participate in Cluster
+ ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
+ if _, err := c.Members[0].Client.Put(ctx, "/foo", "bar"); err != nil {
+ t.Fatalf("unexpected error on Put (%v)", err)
}
cancel()
}
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
func TestRejectUnhealthyAdd(t *testing.T) {
- BeforeTest(t)
- c := NewCluster(t, 3)
- for _, m := range c.Members {
- m.ServerConfig.StrictReconfigCheck = true
- }
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer c.Terminate(t)
- // make cluster unhealthy and wait for downed peer
+ // make Cluster unhealthy and wait for downed peer
c.Members[0].Stop(t)
c.WaitLeader(t)
// all attempts to add member should fail
for i := 1; i < len(c.Members); i++ {
- err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345")
+ err := c.AddMemberByURL(t, c.Members[i].Client, "unix://foo:12345")
if err == nil {
t.Fatalf("should have failed adding peer")
}
// TODO: client should return descriptive error codes for internal errors
- if !strings.Contains(err.Error(), "has no leader") {
+ if !strings.Contains(err.Error(), "unhealthy cluster") {
t.Errorf("unexpected error (%v)", err)
}
}
@@ -399,50 +364,46 @@ func TestRejectUnhealthyAdd(t *testing.T) {
c.WaitLeader(t)
time.Sleep(2 * etcdserver.HealthInterval)
- // add member should succeed now that it's healthy
+ // add member should succeed now that it'Server healthy
var err error
for i := 1; i < len(c.Members); i++ {
- if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
+ if err = c.AddMemberByURL(t, c.Members[i].Client, "unix://foo:12345"); err == nil {
break
}
}
if err != nil {
- t.Fatalf("should have added peer to healthy cluster (%v)", err)
+ t.Fatalf("should have added peer to healthy Cluster (%v)", err)
}
}
// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
// if quorum will be lost.
func TestRejectUnhealthyRemove(t *testing.T) {
- BeforeTest(t)
- c := NewCluster(t, 5)
- for _, m := range c.Members {
- m.ServerConfig.StrictReconfigCheck = true
- }
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, UseBridge: true})
defer c.Terminate(t)
// make cluster unhealthy and wait for downed peer; (3 up, 2 down)
c.Members[0].Stop(t)
c.Members[1].Stop(t)
- c.WaitLeader(t)
+ leader := c.WaitLeader(t)
// reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
- err := c.removeMember(t, uint64(c.Members[2].s.ID()))
+ err := c.RemoveMember(t, c.Members[leader].Client, uint64(c.Members[2].Server.MemberID()))
if err == nil {
- t.Fatalf("should reject quorum breaking remove")
+ t.Fatalf("should reject quorum breaking remove: %s", err)
}
// TODO: client should return more descriptive error codes for internal errors
- if !strings.Contains(err.Error(), "has no leader") {
+ if !strings.Contains(err.Error(), "unhealthy cluster") {
t.Errorf("unexpected error (%v)", err)
}
// member stopped after launch; wait for missing heartbeats
- time.Sleep(time.Duration(electionTicks * int(tickDuration)))
+ time.Sleep(time.Duration(integration.ElectionTicks * int(config.TickDuration)))
// permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
- if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
- t.Fatalf("should accept removing down member")
+ if err = c.RemoveMember(t, c.Members[2].Client, uint64(c.Members[0].Server.MemberID())); err != nil {
+ t.Fatalf("should accept removing down member: %s", err)
}
// bring cluster to (4,1)
@@ -452,7 +413,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
time.Sleep((3 * etcdserver.HealthInterval) / 2)
// accept remove member since (4,1)-(1,0) => (3,1) has quorum
- if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
+ if err = c.RemoveMember(t, c.Members[1].Client, uint64(c.Members[0].Server.MemberID())); err != nil {
t.Fatalf("expected to remove member, got error %v", err)
}
}
@@ -461,41 +422,38 @@ func TestRejectUnhealthyRemove(t *testing.T) {
// if 'initial-cluster-state' is set 'new' and old data directory still exists
// (see https://github.com/etcd-io/etcd/issues/7512 for more).
func TestRestartRemoved(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- // 1. start single-member cluster
- c := NewCluster(t, 1)
- for _, m := range c.Members {
- m.ServerConfig.StrictReconfigCheck = true
- }
- c.Launch(t)
+ // 1. start single-member Cluster
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer c.Terminate(t)
// 2. add a new member
+ c.Cfg.DisableStrictReconfigCheck = true
c.AddMember(t)
c.WaitLeader(t)
- oldm := c.Members[0]
- oldm.keepDataDirTerminate = true
+ firstMember := c.Members[0]
+ firstMember.KeepDataDirTerminate = true
// 3. remove first member, shut down without deleting data
- if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
+ if err := c.RemoveMember(t, c.Members[1].Client, uint64(firstMember.Server.MemberID())); err != nil {
t.Fatalf("expected to remove member, got error %v", err)
}
c.WaitLeader(t)
// 4. restart first member with 'initial-cluster-state=new'
// wrong config, expects exit within ReqTimeout
- oldm.ServerConfig.NewCluster = false
- if err := oldm.Restart(t); err != nil {
+ firstMember.ServerConfig.NewCluster = false
+ if err := firstMember.Restart(t); err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
defer func() {
- oldm.Close()
- os.RemoveAll(oldm.ServerConfig.DataDir)
+ firstMember.Close()
+ os.RemoveAll(firstMember.ServerConfig.DataDir)
}()
select {
- case <-oldm.s.StopNotify():
+ case <-firstMember.Server.StopNotify():
case <-time.After(time.Minute):
t.Fatalf("removed member didn't exit within %v", time.Minute)
}
@@ -504,43 +462,47 @@ func TestRestartRemoved(t *testing.T) {
// clusterMustProgress ensures that cluster can make progress. It creates
// a random key first, and check the new key could be got from all client urls
// of the cluster.
-func clusterMustProgress(t *testing.T, membs []*member) {
- cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
- kapi := client.NewKeysAPI(cc)
+func clusterMustProgress(t *testing.T, members []*integration.Member) {
key := fmt.Sprintf("foo%d", rand.Int())
var (
err error
- resp *client.Response
+ resp *clientv3.PutResponse
)
// retry in case of leader loss induced by slow CI
for i := 0; i < 3; i++ {
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
- resp, err = kapi.Create(ctx, "/"+key, "bar")
+ ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
+ resp, err = members[0].Client.Put(ctx, key, "bar")
cancel()
if err == nil {
break
}
- t.Logf("failed to create key on %q (%v)", membs[0].URL(), err)
+ t.Logf("failed to create key on #0 (%v)", err)
}
if err != nil {
- t.Fatalf("create on %s error: %v", membs[0].URL(), err)
+ t.Fatalf("create on #0 error: %v", err)
}
- for i, m := range membs {
- u := m.URL()
- mcc := MustNewHTTPClient(t, []string{u}, nil)
- mkapi := client.NewKeysAPI(mcc)
- mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
- if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
- t.Fatalf("#%d: watch on %s error: %v", i, u, err)
+ for i, m := range members {
+ mctx, mcancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
+ watch := m.Client.Watcher.Watch(mctx, key, clientv3.WithRev(resp.Header.Revision-1))
+ for resp := range watch {
+ if len(resp.Events) != 0 {
+ break
+ }
+ if resp.Err() != nil {
+ t.Fatalf("#%d: watch error: %q", i, resp.Err())
+ }
+ if resp.Canceled {
+ t.Fatalf("#%d: watch: cancelled", i)
+ }
}
mcancel()
}
}
func TestSpeedyTerminate(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
// Stop/Restart so requests will time out on lost leaders
for i := 0; i < 3; i++ {
clus.Members[i].Stop(t)
@@ -553,7 +515,55 @@ func TestSpeedyTerminate(t *testing.T) {
}()
select {
case <-time.After(10 * time.Second):
- t.Fatalf("cluster took too long to terminate")
+ t.Fatalf("Cluster took too long to terminate")
case <-donec:
}
}
+
+// TestConcurrentRemoveMember demonstrated a panic in mayRemoveMember with
+// concurrent calls to MemberRemove. To reliably reproduce the panic, a delay
+// needed to be injected in IsMemberExist, which is done using a failpoint.
+// After fixing the bug, IsMemberExist is no longer called by mayRemoveMember.
+func TestConcurrentRemoveMember(t *testing.T) {
+ integration.BeforeTest(t, integration.WithFailpoint("afterIsMemberExist", `sleep("1s")`))
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer c.Terminate(t)
+
+ addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ removeID := addResp.Member.ID
+ done := make(chan struct{})
+ go func() {
+ time.Sleep(time.Second / 2)
+ c.Members[0].Client.MemberRemove(context.Background(), removeID)
+ close(done)
+ }()
+ if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil {
+ t.Fatal(err)
+ }
+ <-done
+}
+
+func TestConcurrentMoveLeader(t *testing.T) {
+ integration.BeforeTest(t, integration.WithFailpoint("afterIsMemberExist", `sleep("1s")`))
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer c.Terminate(t)
+
+ addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ removeID := addResp.Member.ID
+ done := make(chan struct{})
+ go func() {
+ time.Sleep(time.Second / 2)
+ c.Members[0].Client.MoveLeader(context.Background(), removeID)
+ close(done)
+ }()
+ if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil {
+ t.Fatal(err)
+ }
+ <-done
+}
diff --git a/tests/integration/corrupt_test.go b/tests/integration/corrupt_test.go
new file mode 100644
index 00000000000..ddd7234e2d1
--- /dev/null
+++ b/tests/integration/corrupt_test.go
@@ -0,0 +1,233 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/storage/mvcc/testutil"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestPeriodicCheck(t *testing.T) {
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ cc, err := clus.ClusterClient(t)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ var totalRevisions int64 = 1210
+ var rev int64
+ for ; rev < totalRevisions; rev += testutil.CompactionCycle {
+ testPeriodicCheck(ctx, t, cc, clus, rev, rev+testutil.CompactionCycle)
+ }
+ testPeriodicCheck(ctx, t, cc, clus, rev, rev+totalRevisions)
+ alarmResponse, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+ assert.Equal(t, []*etcdserverpb.AlarmMember(nil), alarmResponse.Alarms)
+}
+
+func testPeriodicCheck(ctx context.Context, t *testing.T, cc *clientv3.Client, clus *integration.Cluster, start, stop int64) {
+ for i := start; i <= stop; i++ {
+ if i%67 == 0 {
+ _, err := cc.Delete(ctx, testutil.PickKey(i+83))
+ require.NoErrorf(t, err, "error on delete")
+ } else {
+ _, err := cc.Put(ctx, testutil.PickKey(i), fmt.Sprint(i))
+ require.NoErrorf(t, err, "error on put")
+ }
+ }
+ err := clus.Members[0].Server.CorruptionChecker().PeriodicCheck()
+ assert.NoErrorf(t, err, "error on periodic check (rev %v)", stop)
+}
+
+func TestPeriodicCheckDetectsCorruption(t *testing.T) {
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ cc, err := clus.ClusterClient(t)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ for i := 0; i < 10; i++ {
+ _, err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
+ require.NoErrorf(t, err, "error on put")
+ }
+
+ err = clus.Members[0].Server.CorruptionChecker().PeriodicCheck()
+ require.NoErrorf(t, err, "error on periodic check")
+ clus.Members[0].Stop(t)
+ clus.WaitLeader(t)
+
+ err = testutil.CorruptBBolt(clus.Members[0].BackendPath())
+ require.NoError(t, err)
+
+ err = clus.Members[0].Restart(t)
+ require.NoError(t, err)
+ time.Sleep(50 * time.Millisecond)
+ leader := clus.WaitLeader(t)
+
+ err = clus.Members[leader].Server.CorruptionChecker().PeriodicCheck()
+ require.NoErrorf(t, err, "error on periodic check")
+ time.Sleep(50 * time.Millisecond)
+
+ alarmResponse, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+ assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: uint64(clus.Members[0].ID())}}, alarmResponse.Alarms)
+}
+
+func TestCompactHashCheck(t *testing.T) {
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ cc, err := clus.ClusterClient(t)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ var totalRevisions int64 = 1210
+ var rev int64
+ for ; rev < totalRevisions; rev += testutil.CompactionCycle {
+ testCompactionHash(ctx, t, cc, clus, rev, rev+testutil.CompactionCycle)
+ }
+ testCompactionHash(ctx, t, cc, clus, rev, rev+totalRevisions)
+}
+
+func testCompactionHash(ctx context.Context, t *testing.T, cc *clientv3.Client, clus *integration.Cluster, start, stop int64) {
+ for i := start; i <= stop; i++ {
+ if i%67 == 0 {
+ _, err := cc.Delete(ctx, testutil.PickKey(i+83))
+ require.NoErrorf(t, err, "error on delete")
+ } else {
+ _, err := cc.Put(ctx, testutil.PickKey(i), fmt.Sprint(i))
+ require.NoErrorf(t, err, "error on put")
+ }
+ }
+ _, err := cc.Compact(ctx, stop)
+ require.NoErrorf(t, err, "error on compact (rev %v)", stop)
+ // Wait for compaction to be compacted
+ time.Sleep(50 * time.Millisecond)
+
+ clus.Members[0].Server.CorruptionChecker().CompactHashCheck()
+}
+
+func TestCompactHashCheckDetectCorruption(t *testing.T) {
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ cc, err := clus.ClusterClient(t)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ for i := 0; i < 10; i++ {
+ _, err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
+ require.NoErrorf(t, err, "error on put")
+ }
+
+ clus.Members[0].Server.CorruptionChecker().CompactHashCheck()
+ clus.Members[0].Stop(t)
+ clus.WaitLeader(t)
+
+ err = testutil.CorruptBBolt(clus.Members[0].BackendPath())
+ require.NoError(t, err)
+
+ err = clus.Members[0].Restart(t)
+ require.NoError(t, err)
+ _, err = cc.Compact(ctx, 5)
+ require.NoError(t, err)
+ time.Sleep(50 * time.Millisecond)
+ leader := clus.WaitLeader(t)
+
+ clus.Members[leader].Server.CorruptionChecker().CompactHashCheck()
+ time.Sleep(50 * time.Millisecond)
+ alarmResponse, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+ assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: uint64(clus.Members[0].ID())}}, alarmResponse.Alarms)
+}
+
+func TestCompactHashCheckDetectMultipleCorruption(t *testing.T) {
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 5})
+ defer clus.Terminate(t)
+
+ cc, err := clus.ClusterClient(t)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ for i := 0; i < 10; i++ {
+ _, err = cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i))
+ require.NoErrorf(t, err, "error on put")
+ }
+
+ clus.Members[0].Server.CorruptionChecker().CompactHashCheck()
+ clus.Members[0].Stop(t)
+ clus.Members[1].Server.CorruptionChecker().CompactHashCheck()
+ clus.Members[1].Stop(t)
+ clus.WaitLeader(t)
+
+ err = testutil.CorruptBBolt(clus.Members[0].BackendPath())
+ require.NoError(t, err)
+ err = testutil.CorruptBBolt(clus.Members[1].BackendPath())
+ require.NoError(t, err)
+
+ err = clus.Members[0].Restart(t)
+ require.NoError(t, err)
+ err = clus.Members[1].Restart(t)
+ require.NoError(t, err)
+
+ _, err = cc.Compact(ctx, 5)
+ require.NoError(t, err)
+ time.Sleep(50 * time.Millisecond)
+ leader := clus.WaitLeader(t)
+
+ clus.Members[leader].Server.CorruptionChecker().CompactHashCheck()
+ time.Sleep(50 * time.Millisecond)
+ alarmResponse, err := cc.AlarmList(ctx)
+ require.NoErrorf(t, err, "error on alarm list")
+
+ expectedAlarmMap := map[uint64]etcdserverpb.AlarmType{
+ uint64(clus.Members[0].ID()): etcdserverpb.AlarmType_CORRUPT,
+ uint64(clus.Members[1].ID()): etcdserverpb.AlarmType_CORRUPT,
+ }
+
+ actualAlarmMap := make(map[uint64]etcdserverpb.AlarmType)
+ for _, alarm := range alarmResponse.Alarms {
+ actualAlarmMap[alarm.MemberID] = alarm.Alarm
+ }
+
+ require.Equal(t, expectedAlarmMap, actualAlarmMap)
+}
diff --git a/tests/integration/embed/embed_proxy_test.go b/tests/integration/embed/embed_proxy_test.go
index 43a80dd1fa4..50f2a175fbd 100644
--- a/tests/integration/embed/embed_proxy_test.go
+++ b/tests/integration/embed/embed_proxy_test.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build cluster_proxy
-// +build cluster_proxy
// The purpose of this (empty) package is too keep following test working:
// # go test -tags=cluster_proxy ./integration/embed
diff --git a/tests/integration/embed/embed_test.go b/tests/integration/embed/embed_test.go
index c04bf97c961..7a3d11f1cd4 100644
--- a/tests/integration/embed/embed_test.go
+++ b/tests/integration/embed/embed_test.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build !cluster_proxy
-// +build !cluster_proxy
// Keep the test in a separate package from other tests such that
// .setupLogging method does not race with other (previously running) servers (grpclog is global).
@@ -30,21 +29,23 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
)
-var (
- testTLSInfo = transport.TLSInfo{
- KeyFile: integration.MustAbsPath("../../fixtures/server.key.insecure"),
- CertFile: integration.MustAbsPath("../../fixtures/server.crt"),
- TrustedCAFile: integration.MustAbsPath("../../fixtures/ca.crt"),
- ClientCertAuth: true,
- }
-)
+var testTLSInfo = transport.TLSInfo{
+ KeyFile: testutils.MustAbsPath("../../fixtures/server.key.insecure"),
+ CertFile: testutils.MustAbsPath("../../fixtures/server.crt"),
+ TrustedCAFile: testutils.MustAbsPath("../../fixtures/ca.crt"),
+ ClientCertAuth: true,
+}
func TestEmbedEtcd(t *testing.T) {
testutil.SkipTestIfShortMode(t, "Cannot start embedded cluster in --short tests")
@@ -78,7 +79,7 @@ func TestEmbedEtcd(t *testing.T) {
tests[0].cfg.Durl = "abc"
setupEmbedCfg(&tests[1].cfg, []url.URL{urls[0]}, []url.URL{urls[1]})
- tests[1].cfg.ACUrls = nil
+ tests[1].cfg.AdvertiseClientUrls = nil
tests[2].cfg.TickMs = tests[2].cfg.ElectionMs - 1
tests[3].cfg.ElectionMs = 999999
setupEmbedCfg(&tests[4].cfg, []url.URL{urls[2]}, []url.URL{urls[3]})
@@ -86,10 +87,10 @@ func TestEmbedEtcd(t *testing.T) {
setupEmbedCfg(&tests[6].cfg, []url.URL{urls[7], urls[8]}, []url.URL{urls[9]})
dnsURL, _ := url.Parse("http://whatever.test:12345")
- tests[7].cfg.LCUrls = []url.URL{*dnsURL}
- tests[8].cfg.LPUrls = []url.URL{*dnsURL}
+ tests[7].cfg.ListenClientUrls = []url.URL{*dnsURL}
+ tests[8].cfg.ListenPeerUrls = []url.URL{*dnsURL}
- dir := filepath.Join(t.TempDir(), fmt.Sprintf("embed-etcd"))
+ dir := filepath.Join(t.TempDir(), "embed-etcd")
for i, tt := range tests {
tests[i].cfg.Dir = dir
@@ -143,12 +144,10 @@ func testEmbedEtcdGracefulStop(t *testing.T, secure bool) {
urls := newEmbedURLs(secure, 2)
setupEmbedCfg(cfg, []url.URL{urls[0]}, []url.URL{urls[1]})
- cfg.Dir = filepath.Join(t.TempDir(), fmt.Sprintf("embed-etcd"))
+ cfg.Dir = filepath.Join(t.TempDir(), "embed-etcd")
e, err := embed.StartEtcd(cfg)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
<-e.Server.ReadyNotify() // wait for e.Server to join the cluster
clientCfg := clientv3.Config{
@@ -156,14 +155,10 @@ func testEmbedEtcdGracefulStop(t *testing.T, secure bool) {
}
if secure {
clientCfg.TLS, err = testTLSInfo.ClientConfig()
- if err != nil {
- t.Fatal(err)
- }
- }
- cli, err := integration.NewClient(t, clientCfg)
- if err != nil {
- t.Fatal(err)
+ require.NoError(t, err)
}
+ cli, err := integration2.NewClient(t, clientCfg)
+ require.NoError(t, err)
defer cli.Close()
// open watch connection
@@ -180,9 +175,7 @@ func testEmbedEtcdGracefulStop(t *testing.T, secure bool) {
t.Fatalf("took too long to close server")
}
err = <-e.Err()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
}
func newEmbedURLs(secure bool, n int) (urls []url.URL) {
@@ -202,11 +195,27 @@ func setupEmbedCfg(cfg *embed.Config, curls []url.URL, purls []url.URL) {
cfg.LogOutputs = []string{"/dev/null"}
cfg.ClusterState = "new"
- cfg.LCUrls, cfg.ACUrls = curls, curls
- cfg.LPUrls, cfg.APUrls = purls, purls
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = curls, curls
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = purls, purls
cfg.InitialCluster = ""
for i := range purls {
cfg.InitialCluster += ",default=" + purls[i].String()
}
cfg.InitialCluster = cfg.InitialCluster[1:]
}
+
+func TestEmbedEtcdAutoCompactionRetentionRetained(t *testing.T) {
+ cfg := embed.NewConfig()
+ urls := newEmbedURLs(false, 2)
+ setupEmbedCfg(cfg, []url.URL{urls[0]}, []url.URL{urls[1]})
+ cfg.Dir = filepath.Join(t.TempDir(), "embed-etcd")
+
+ cfg.AutoCompactionRetention = "2"
+
+ e, err := embed.StartEtcd(cfg)
+ require.NoError(t, err)
+ autoCompactionRetention := e.Server.Cfg.AutoCompactionRetention
+ durationToCompare, _ := time.ParseDuration("2h0m0s")
+ assert.Equal(t, durationToCompare, autoCompactionRetention)
+ e.Close()
+}
diff --git a/tests/integration/fixtures-expired/gencerts.sh b/tests/integration/fixtures-expired/gencerts.sh
index aecdd423bba..8eea747ffbd 100755
--- a/tests/integration/fixtures-expired/gencerts.sh
+++ b/tests/integration/fixtures-expired/gencerts.sh
@@ -1,12 +1,21 @@
#!/bin/bash
+set -euo pipefail
+
if ! [[ "$0" =~ "./gencerts.sh" ]]; then
echo "must be run from 'fixtures'"
exit 255
fi
-if ! which cfssl; then
+if ! command -v cfssl; then
echo "cfssl is not installed"
+ echo 'use: bash -c "cd ../../../tools/mod; go install github.com/cloudflare/cfssl/cmd/cfssl"'
+ exit 255
+fi
+
+if ! command -v cfssljson; then
+ echo "cfssljson is not installed"
+ echo 'use: bash -c "cd ../../../tools/mod; go install github.com/cloudflare/cfssl/cmd/cfssljson"'
exit 255
fi
diff --git a/tests/integration/grpc_test.go b/tests/integration/grpc_test.go
new file mode 100644
index 00000000000..7061ed61e63
--- /dev/null
+++ b/tests/integration/grpc_test.go
@@ -0,0 +1,186 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "context"
+ tls "crypto/tls"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "google.golang.org/grpc"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestAuthority(t *testing.T) {
+ tcs := []struct {
+ name string
+ useTCP bool
+ useTLS bool
+ clientURLPattern string
+ expectAuthorityPattern string
+ }{
+ {
+ name: "unix:path",
+ clientURLPattern: "unix:localhost:${MEMBER_NAME}",
+ expectAuthorityPattern: "localhost:${MEMBER_NAME}",
+ },
+ {
+ name: "unix://absolute_path",
+ clientURLPattern: "unix://localhost:${MEMBER_NAME}",
+ expectAuthorityPattern: "localhost:${MEMBER_NAME}",
+ },
+ // "unixs" is not standard schema supported by etcd
+ {
+ name: "unixs:absolute_path",
+ useTLS: true,
+ clientURLPattern: "unixs:localhost:${MEMBER_NAME}",
+ expectAuthorityPattern: "localhost:${MEMBER_NAME}",
+ },
+ {
+ name: "unixs://absolute_path",
+ useTLS: true,
+ clientURLPattern: "unixs://localhost:${MEMBER_NAME}",
+ expectAuthorityPattern: "localhost:${MEMBER_NAME}",
+ },
+ {
+ name: "http://domain[:port]",
+ useTCP: true,
+ clientURLPattern: "http://localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "https://domain[:port]",
+ useTLS: true,
+ useTCP: true,
+ clientURLPattern: "https://localhost:${MEMBER_PORT}",
+ expectAuthorityPattern: "localhost:${MEMBER_PORT}",
+ },
+ {
+ name: "http://address[:port]",
+ useTCP: true,
+ clientURLPattern: "http://127.0.0.1:${MEMBER_PORT}",
+ expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}",
+ },
+ {
+ name: "https://address[:port]",
+ useTCP: true,
+ useTLS: true,
+ clientURLPattern: "https://127.0.0.1:${MEMBER_PORT}",
+ expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}",
+ },
+ }
+ for _, tc := range tcs {
+ for _, clusterSize := range []int{1, 3} {
+ t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
+ integration.BeforeTest(t)
+ cfg := integration.ClusterConfig{
+ Size: clusterSize,
+ UseTCP: tc.useTCP,
+ UseIP: tc.useTCP,
+ }
+ cfg, tlsConfig := setupTLS(t, tc.useTLS, cfg)
+ clus := integration.NewCluster(t, &cfg)
+ defer clus.Terminate(t)
+
+ kv := setupClient(t, tc.clientURLPattern, clus, tlsConfig)
+ defer kv.Close()
+
+ putRequestMethod := "/etcdserverpb.KV/Put"
+ for i := 0; i < 100; i++ {
+ _, err := kv.Put(context.TODO(), "foo", "bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ assertAuthority(t, tc.expectAuthorityPattern, clus, putRequestMethod)
+ })
+ }
+ }
+}
+
+func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integration.ClusterConfig, *tls.Config) {
+ t.Helper()
+ if useTLS {
+ cfg.ClientTLS = &integration.TestTLSInfo
+ tlsConfig, err := integration.TestTLSInfo.ClientConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return cfg, tlsConfig
+ }
+ return cfg, nil
+}
+
+func setupClient(t *testing.T, endpointPattern string, clus *integration.Cluster, tlsConfig *tls.Config) *clientv3.Client {
+ t.Helper()
+ endpoints := templateEndpoints(t, endpointPattern, clus)
+ kv, err := clientv3.New(clientv3.Config{
+ Endpoints: endpoints,
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ TLS: tlsConfig,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ return kv
+}
+
+func templateEndpoints(t *testing.T, pattern string, clus *integration.Cluster) []string {
+ t.Helper()
+ var endpoints []string
+ for _, m := range clus.Members {
+ ent := pattern
+ ent = strings.ReplaceAll(ent, "${MEMBER_PORT}", m.GRPCPortNumber())
+ ent = strings.ReplaceAll(ent, "${MEMBER_NAME}", m.Name)
+ endpoints = append(endpoints, ent)
+ }
+ return endpoints
+}
+
+func templateAuthority(t *testing.T, pattern string, m *integration.Member) string {
+ t.Helper()
+ authority := pattern
+ authority = strings.ReplaceAll(authority, "${MEMBER_PORT}", m.GRPCPortNumber())
+ authority = strings.ReplaceAll(authority, "${MEMBER_NAME}", m.Name)
+ return authority
+}
+
+func assertAuthority(t *testing.T, expectedAuthorityPattern string, clus *integration.Cluster, filterMethod string) {
+ t.Helper()
+ for _, m := range clus.Members {
+ requestsFound := 0
+ expectedAuthority := templateAuthority(t, expectedAuthorityPattern, m)
+ for _, r := range m.RecordedRequests() {
+ if filterMethod != "" && r.FullMethod != filterMethod {
+ continue
+ }
+ if r.Authority == expectedAuthority {
+ requestsFound++
+ } else {
+ t.Errorf("Got unexpected authority header, member: %q, request: %q, got authority: %q, expected %q", m.Name, r.FullMethod, r.Authority, expectedAuthority)
+ }
+ }
+ if requestsFound == 0 {
+ t.Errorf("Expect at least one request with matched authority header value was recorded by the server intercepter on member %s but got 0", m.Name)
+ }
+ }
+}
diff --git a/tests/integration/hashkv_test.go b/tests/integration/hashkv_test.go
new file mode 100644
index 00000000000..cbc83d0159e
--- /dev/null
+++ b/tests/integration/hashkv_test.go
@@ -0,0 +1,85 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "testing"
+ "time"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/storage/mvcc/testutil"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+// TestCompactionHash tests the compaction hash
+// TODO: Change this to fuzz test
+func TestCompactionHash(t *testing.T) {
+ integration2.BeforeTest(t)
+
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ cc, err := clus.ClusterClient(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client := &http.Client{
+ Transport: &http.Transport{
+ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
+ return net.Dial("unix", clus.Members[0].PeerURLs[0].Host)
+ },
+ },
+ }
+
+ testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL, client, clus.Members[0].Server}, 1000)
+}
+
+type hashTestCase struct {
+ *clientv3.Client
+ url string
+ http *http.Client
+ server *etcdserver.EtcdServer
+}
+
+func (tc hashTestCase) Put(ctx context.Context, key, value string) error {
+ _, err := tc.Client.Put(ctx, key, value)
+ return err
+}
+
+func (tc hashTestCase) Delete(ctx context.Context, key string) error {
+ _, err := tc.Client.Delete(ctx, key)
+ return err
+}
+
+func (tc hashTestCase) HashByRev(ctx context.Context, rev int64) (testutil.KeyValueHash, error) {
+ resp, err := etcdserver.HashByRev(ctx, tc.server.Cluster().ID(), tc.http, "http://unix", rev)
+ return testutil.KeyValueHash{Hash: resp.Hash, CompactRevision: resp.CompactRevision, Revision: resp.Header.Revision}, err
+}
+
+func (tc hashTestCase) Defrag(ctx context.Context) error {
+ _, err := tc.Client.Defragment(ctx, tc.url)
+ return err
+}
+
+func (tc hashTestCase) Compact(ctx context.Context, rev int64) error {
+ _, err := tc.Client.Compact(ctx, rev)
+ // Wait for compaction to be compacted
+ time.Sleep(50 * time.Millisecond)
+ return err
+}
diff --git a/tests/integration/lazy_cluster.go b/tests/integration/lazy_cluster.go
index 4cc7ae765d3..e27677c71e9 100644
--- a/tests/integration/lazy_cluster.go
+++ b/tests/integration/lazy_cluster.go
@@ -22,6 +22,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// Infrastructure to provision a single shared cluster for tests - only
@@ -30,19 +31,19 @@ import (
// See ./tests/integration/clientv3/examples/main_test.go for canonical usage.
// Please notice that the shared (LazyCluster's) state is preserved between
// testcases, so left-over state might has cross-testcase effects.
-// Prefer dedicated clusters for substancial test-cases.
+// Prefer dedicated clusters for substantial test-cases.
type LazyCluster interface {
- // EndpointsV2 - exposes connection points for client v2.
+ // EndpointsHTTP - exposes connection points for http endpoints.
// Calls to this method might initialize the cluster.
- EndpointsV2() []string
+ EndpointsHTTP() []string
- // EndpointsV3 - exposes connection points for client v3.
+ // EndpointsGRPC - exposes connection points for client v3.
// Calls to this method might initialize the cluster.
- EndpointsV3() []string
+ EndpointsGRPC() []string
// Cluster - calls to this method might initialize the cluster.
- Cluster() *ClusterV3
+ Cluster() *integration.Cluster
// Transport - call to this method might initialize the cluster.
Transport() *http.Transport
@@ -53,8 +54,8 @@ type LazyCluster interface {
}
type lazyCluster struct {
- cfg ClusterConfig
- cluster *ClusterV3
+ cfg integration.ClusterConfig
+ cluster *integration.Cluster
transport *http.Transport
once sync.Once
tb testutil.TB
@@ -64,29 +65,34 @@ type lazyCluster struct {
// NewLazyCluster returns a new test cluster handler that gets created on the
// first call to GetEndpoints() or GetTransport()
func NewLazyCluster() LazyCluster {
- return NewLazyClusterWithConfig(ClusterConfig{Size: 1})
+ return NewLazyClusterWithConfig(integration.ClusterConfig{Size: 1})
}
// NewLazyClusterWithConfig returns a new test cluster handler that gets created
// on the first call to GetEndpoints() or GetTransport()
-func NewLazyClusterWithConfig(cfg ClusterConfig) LazyCluster {
+func NewLazyClusterWithConfig(cfg integration.ClusterConfig) LazyCluster {
tb, closer := testutil.NewTestingTBProthesis("lazy_cluster")
return &lazyCluster{cfg: cfg, tb: tb, closer: closer}
}
func (lc *lazyCluster) mustLazyInit() {
lc.once.Do(func() {
+ lc.tb.Logf("LazyIniting ...")
var err error
lc.transport, err = transport.NewTransport(transport.TLSInfo{}, time.Second)
if err != nil {
log.Fatal(err)
}
- lc.cluster = NewClusterV3(lc.tb, &lc.cfg)
+ lc.cluster = integration.NewCluster(lc.tb, &lc.cfg)
+ lc.tb.Logf("LazyIniting [Done]")
})
}
func (lc *lazyCluster) Terminate() {
- lc.tb.Logf("Terminating...")
+ if lc != nil && lc.tb != nil {
+ lc.tb.Logf("Terminating...")
+ }
+
if lc != nil && lc.cluster != nil {
lc.cluster.Terminate(nil)
lc.cluster = nil
@@ -97,15 +103,15 @@ func (lc *lazyCluster) Terminate() {
}
}
-func (lc *lazyCluster) EndpointsV2() []string {
+func (lc *lazyCluster) EndpointsHTTP() []string {
return []string{lc.Cluster().Members[0].URL()}
}
-func (lc *lazyCluster) EndpointsV3() []string {
+func (lc *lazyCluster) EndpointsGRPC() []string {
return lc.Cluster().Client(0).Endpoints()
}
-func (lc *lazyCluster) Cluster() *ClusterV3 {
+func (lc *lazyCluster) Cluster() *integration.Cluster {
lc.mustLazyInit()
return lc.cluster
}
diff --git a/tests/integration/member_test.go b/tests/integration/member_test.go
index 5493924c9d2..efd6598f684 100644
--- a/tests/integration/member_test.go
+++ b/tests/integration/member_test.go
@@ -17,65 +17,61 @@ package integration
import (
"context"
"fmt"
- "io/ioutil"
- "reflect"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
- "go.etcd.io/etcd/client/v2"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestPauseMember(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- c := NewCluster(t, 5)
- c.Launch(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5})
defer c.Terminate(t)
for i := 0; i < 5; i++ {
c.Members[i].Pause()
- membs := append([]*member{}, c.Members[:i]...)
+ membs := append([]*integration.Member{}, c.Members[:i]...)
membs = append(membs, c.Members[i+1:]...)
- c.waitLeader(t, membs)
+ c.WaitMembersForLeader(t, membs)
clusterMustProgress(t, membs)
c.Members[i].Resume()
}
- c.waitLeader(t, c.Members)
+ c.WaitMembersForLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
func TestRestartMember(t *testing.T) {
- BeforeTest(t)
- c := NewCluster(t, 3)
- c.Launch(t)
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer c.Terminate(t)
for i := 0; i < 3; i++ {
c.Members[i].Stop(t)
- membs := append([]*member{}, c.Members[:i]...)
+ membs := append([]*integration.Member{}, c.Members[:i]...)
membs = append(membs, c.Members[i+1:]...)
- c.waitLeader(t, membs)
+ c.WaitMembersForLeader(t, membs)
clusterMustProgress(t, membs)
err := c.Members[i].Restart(t)
if err != nil {
t.Fatal(err)
}
}
- c.waitLeader(t, c.Members)
+ c.WaitMembersForLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
size := 3
- c := NewCluster(t, size)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: size})
m := c.Members[0].Clone(t)
- var err error
- m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd")
- if err != nil {
- t.Fatal(err)
- }
- c.Launch(t)
+ m.DataDir = t.TempDir()
defer c.Terminate(t)
if err := m.Launch(); err == nil {
@@ -87,21 +83,19 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
}
func TestSnapshotAndRestartMember(t *testing.T) {
- BeforeTest(t)
- m := mustNewMember(t, memberConfig{name: "snapAndRestartTest"})
+ integration.BeforeTest(t)
+ m := integration.MustNewMember(t, integration.MemberConfig{Name: "snapAndRestartTest", UseBridge: true})
m.SnapshotCount = 100
m.Launch()
defer m.Terminate(t)
+ defer m.Client.Close()
m.WaitOK(t)
- resps := make([]*client.Response, 120)
var err error
for i := 0; i < 120; i++ {
- cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
- kapi := client.NewKeysAPI(cc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
key := fmt.Sprintf("foo%d", i)
- resps[i], err = kapi.Create(ctx, "/"+key, "bar")
+ _, err = m.Client.Put(ctx, "/"+key, "bar")
if err != nil {
t.Fatalf("#%d: create on %s error: %v", i, m.URL(), err)
}
@@ -112,18 +106,43 @@ func TestSnapshotAndRestartMember(t *testing.T) {
m.WaitOK(t)
for i := 0; i < 120; i++ {
- cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
- kapi := client.NewKeysAPI(cc)
- ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
key := fmt.Sprintf("foo%d", i)
- resp, err := kapi.Get(ctx, "/"+key, nil)
+ resp, err := m.Client.Get(ctx, "/"+key)
if err != nil {
t.Fatalf("#%d: get on %s error: %v", i, m.URL(), err)
}
cancel()
- if !reflect.DeepEqual(resp.Node, resps[i].Node) {
- t.Errorf("#%d: node = %v, want %v", i, resp.Node, resps[i].Node)
+ if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" {
+ t.Errorf("#%d: got = %v, want %v", i, resp.Kvs[0], "bar")
}
}
}
+
+func TestRemoveMember(t *testing.T) {
+ integration.BeforeTest(t)
+ c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true, BackendBatchInterval: 1000 * time.Second})
+ defer c.Terminate(t)
+ // membership changes additionally require cluster to be stable for etcdserver.HealthInterval
+ time.Sleep(etcdserver.HealthInterval)
+
+ err := c.RemoveMember(t, c.Client(2), uint64(c.Members[0].ID()))
+ require.NoError(t, err)
+
+ checkMemberCount(t, c.Members[0], 2)
+ checkMemberCount(t, c.Members[1], 2)
+}
+
+func checkMemberCount(t *testing.T, m *integration.Member, expectedMemberCount int) {
+ be := schema.NewMembershipBackend(m.Logger, m.Server.Backend())
+ membersFromBackend, _ := be.MustReadMembersFromBackend()
+ if len(membersFromBackend) != expectedMemberCount {
+ t.Errorf("Expect member count read from backend=%d, got %d", expectedMemberCount, len(membersFromBackend))
+ }
+ membersResp, err := m.Client.MemberList(context.Background())
+ require.NoError(t, err)
+ if len(membersResp.Members) != expectedMemberCount {
+ t.Errorf("Expect len(MemberList)=%d, got %d", expectedMemberCount, len(membersResp.Members))
+ }
+}
diff --git a/tests/integration/metrics_test.go b/tests/integration/metrics_test.go
index 61276fc94b6..e6f5a3f3e9c 100644
--- a/tests/integration/metrics_test.go
+++ b/tests/integration/metrics_test.go
@@ -22,15 +22,19 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/server/v3/etcdserver"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestMetricDbSizeBoot checks that the db size metric is set on boot.
func TestMetricDbSizeBoot(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes")
@@ -49,21 +53,20 @@ func TestMetricDbSizeDefrag(t *testing.T) {
// testMetricDbSizeDefrag checks that the db size metric is set after defrag.
func testMetricDbSizeDefrag(t *testing.T, name string) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.Client(0)).KV
- mc := toGRPC(clus.Client(0)).Maintenance
+ kvc := integration.ToGRPC(clus.Client(0)).KV
+ mc := integration.ToGRPC(clus.Client(0)).Maintenance
// expand the db size
numPuts := 25 // large enough to write more than 1 page
putreq := &pb.PutRequest{Key: []byte("k"), Value: make([]byte, 4096)}
for i := 0; i < numPuts; i++ {
time.Sleep(10 * time.Millisecond) // to execute multiple backend txn
- if _, err := kvc.Put(context.TODO(), putreq); err != nil {
- t.Fatal(err)
- }
+ _, err := kvc.Put(context.TODO(), putreq)
+ require.NoError(t, err)
}
// wait for backend txn sync
@@ -101,18 +104,19 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
validateAfterCompactionInUse := func() error {
// Put to move PendingPages to FreePages
- if _, err = kvc.Put(context.TODO(), putreq); err != nil {
- t.Fatal(err)
+ _, verr := kvc.Put(context.TODO(), putreq)
+ if verr != nil {
+ t.Fatal(verr)
}
time.Sleep(500 * time.Millisecond)
- afterCompactionInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
- if err != nil {
- t.Fatal(err)
+ afterCompactionInUse, verr := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
+ if verr != nil {
+ t.Fatal(verr)
}
- aciu, err := strconv.Atoi(afterCompactionInUse)
- if err != nil {
- t.Fatal(err)
+ aciu, verr := strconv.Atoi(afterCompactionInUse)
+ if verr != nil {
+ t.Fatal(verr)
}
if biu <= aciu {
return fmt.Errorf("expected less than %d, got %d after compaction", biu, aciu)
@@ -124,13 +128,13 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
// which causes the result to be flaky. Retry 3 times.
maxRetry, retry := 3, 0
for {
- err := validateAfterCompactionInUse()
+ err = validateAfterCompactionInUse()
if err == nil {
break
}
retry++
if retry >= maxRetry {
- t.Fatalf(err.Error())
+ t.Fatalf("%v", err.Error())
}
}
@@ -163,8 +167,8 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
}
func TestMetricQuotaBackendBytes(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes")
@@ -175,14 +179,14 @@ func TestMetricQuotaBackendBytes(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if int64(qv) != etcdserver.DefaultQuotaBytes {
- t.Fatalf("expected %d, got %f", etcdserver.DefaultQuotaBytes, qv)
+ if int64(qv) != storage.DefaultQuotaBytes {
+ t.Fatalf("expected %d, got %f", storage.DefaultQuotaBytes, qv)
}
}
func TestMetricsHealth(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second)
@@ -209,3 +213,34 @@ func TestMetricsHealth(t *testing.T) {
t.Fatalf("expected '0' from etcd_server_health_failures, got %q", hv)
}
}
+
+func TestMetricsRangeDurationSeconds(t *testing.T) {
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ client := clus.RandClient()
+
+ keys := []string{
+ "my-namespace/foobar", "my-namespace/foobar1", "namespace/foobar1",
+ }
+ for _, key := range keys {
+ _, err := client.Put(context.Background(), key, "data")
+ require.NoError(t, err)
+ }
+
+ _, err := client.Get(context.Background(), "", clientv3.WithFromKey())
+ require.NoError(t, err)
+
+ rangeDurationSeconds, err := clus.Members[0].Metric("etcd_server_range_duration_seconds")
+ require.NoError(t, err)
+
+ require.NotEmptyf(t, rangeDurationSeconds, "expected a number from etcd_server_range_duration_seconds")
+
+ rangeDuration, err := strconv.ParseFloat(rangeDurationSeconds, 64)
+ require.NoErrorf(t, err, "failed to parse duration: %s", rangeDurationSeconds)
+
+ maxRangeDuration := 600.0
+ require.GreaterOrEqualf(t, rangeDuration, 0.0, "expected etcd_server_range_duration_seconds to be between 0 and %f, got %f", maxRangeDuration, rangeDuration)
+ require.LessOrEqualf(t, rangeDuration, maxRangeDuration, "expected etcd_server_range_duration_seconds to be between 0 and %f, got %f", maxRangeDuration, rangeDuration)
+}
diff --git a/tests/integration/network_partition_test.go b/tests/integration/network_partition_test.go
index 6abc36700a4..059e9376266 100644
--- a/tests/integration/network_partition_test.go
+++ b/tests/integration/network_partition_test.go
@@ -18,12 +18,14 @@ import (
"fmt"
"testing"
"time"
+
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 5})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 5})
defer clus.Terminate(t)
leadIndex := clus.WaitLeader(t)
@@ -32,20 +34,20 @@ func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
minority := []int{leadIndex, (leadIndex + 1) % 5}
majority := []int{(leadIndex + 2) % 5, (leadIndex + 3) % 5, (leadIndex + 4) % 5}
- minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
- majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
+ minorityMembers := getMembersByIndexSlice(clus, minority)
+ majorityMembers := getMembersByIndexSlice(clus, majority)
// network partition (bi-directional)
injectPartition(t, minorityMembers, majorityMembers)
// minority leader must be lost
- clus.waitNoLeader(minorityMembers)
+ clus.WaitMembersNoLeader(minorityMembers)
// wait extra election timeout
time.Sleep(2 * majorityMembers[0].ElectionTimeout())
// new leader must be from majority
- clus.waitLeader(t, majorityMembers)
+ clus.WaitMembersForLeader(t, majorityMembers)
// recover network partition (bi-directional)
recoverPartition(t, minorityMembers, majorityMembers)
@@ -69,9 +71,9 @@ func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) {
}
func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 5})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 5})
defer clus.Terminate(t)
leadIndex := clus.WaitLeader(t)
@@ -80,21 +82,21 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
majority := []int{leadIndex, (leadIndex + 1) % 5, (leadIndex + 2) % 5}
minority := []int{(leadIndex + 3) % 5, (leadIndex + 4) % 5}
- majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
- minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
+ majorityMembers := getMembersByIndexSlice(clus, majority)
+ minorityMembers := getMembersByIndexSlice(clus, minority)
// network partition (bi-directional)
injectPartition(t, majorityMembers, minorityMembers)
// minority leader must be lost
- clus.waitNoLeader(minorityMembers)
+ clus.WaitMembersNoLeader(minorityMembers)
// wait extra election timeout
time.Sleep(2 * majorityMembers[0].ElectionTimeout())
// leader must be hold in majority
- leadIndex2 := clus.waitLeader(t, majorityMembers)
- leadID, leadID2 := clus.Members[leadIndex].s.ID(), majorityMembers[leadIndex2].s.ID()
+ leadIndex2 := clus.WaitMembersForLeader(t, majorityMembers)
+ leadID, leadID2 := clus.Members[leadIndex].Server.MemberID(), majorityMembers[leadIndex2].Server.MemberID()
if leadID != leadID2 {
return fmt.Errorf("unexpected leader change from %s, got %s", leadID, leadID2)
}
@@ -108,9 +110,9 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
}
func TestNetworkPartition4Members(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 4})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 4})
defer clus.Terminate(t)
leadIndex := clus.WaitLeader(t)
@@ -119,8 +121,8 @@ func TestNetworkPartition4Members(t *testing.T) {
groupA := []int{leadIndex, (leadIndex + 1) % 4}
groupB := []int{(leadIndex + 2) % 4, (leadIndex + 3) % 4}
- leaderPartition := getMembersByIndexSlice(clus.cluster, groupA)
- followerPartition := getMembersByIndexSlice(clus.cluster, groupB)
+ leaderPartition := getMembersByIndexSlice(clus, groupA)
+ followerPartition := getMembersByIndexSlice(clus, groupB)
// network partition (bi-directional)
injectPartition(t, leaderPartition, followerPartition)
@@ -137,21 +139,21 @@ func TestNetworkPartition4Members(t *testing.T) {
clusterMustProgress(t, clus.Members)
}
-func getMembersByIndexSlice(clus *cluster, idxs []int) []*member {
- ms := make([]*member, len(idxs))
+func getMembersByIndexSlice(clus *integration.Cluster, idxs []int) []*integration.Member {
+ ms := make([]*integration.Member, len(idxs))
for i, idx := range idxs {
ms[i] = clus.Members[idx]
}
return ms
}
-func injectPartition(t *testing.T, src, others []*member) {
+func injectPartition(t *testing.T, src, others []*integration.Member) {
for _, m := range src {
m.InjectPartition(t, others...)
}
}
-func recoverPartition(t *testing.T, src, others []*member) {
+func recoverPartition(t *testing.T, src, others []*integration.Member) {
for _, m := range src {
m.RecoverPartition(t, others...)
}
diff --git a/tests/integration/proxy/grpcproxy/cluster_test.go b/tests/integration/proxy/grpcproxy/cluster_test.go
index 5be35c23289..ca2fcb506b3 100644
--- a/tests/integration/proxy/grpcproxy/cluster_test.go
+++ b/tests/integration/proxy/grpcproxy/cluster_test.go
@@ -17,40 +17,48 @@ package grpcproxy
import (
"context"
"net"
+ "os"
"testing"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/proxy/grpcproxy"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.uber.org/zap/zaptest"
-
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
"google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/naming/endpoints"
+ "go.etcd.io/etcd/server/v3/proxy/grpcproxy"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestClusterProxyMemberList(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCAddr()}, t)
+ lg := zaptest.NewLogger(t)
+ serverEps := []string{clus.Members[0].GRPCURL}
+ prefix := "test-prefix"
+ hostname, _ := os.Hostname()
+ cts := newClusterProxyServer(lg, serverEps, prefix, t)
defer cts.close(t)
cfg := clientv3.Config{
Endpoints: []string{cts.caddr},
DialTimeout: 5 * time.Second,
}
- client, err := integration.NewClient(t, cfg)
+ client, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatalf("err %v, want nil", err)
}
defer client.Close()
// wait some time for register-loop to write keys
- time.Sleep(time.Second)
+ time.Sleep(200 * time.Millisecond)
var mresp *clientv3.MemberListResponse
mresp, err = client.Cluster.MemberList(context.Background())
@@ -64,9 +72,38 @@ func TestClusterProxyMemberList(t *testing.T) {
if len(mresp.Members[0].ClientURLs) != 1 {
t.Fatalf("len(mresp.Members[0].ClientURLs) expected 1, got %d (%+v)", len(mresp.Members[0].ClientURLs), mresp.Members[0].ClientURLs[0])
}
- if mresp.Members[0].ClientURLs[0] != cts.caddr {
- t.Fatalf("mresp.Members[0].ClientURLs[0] expected %q, got %q", cts.caddr, mresp.Members[0].ClientURLs[0])
+ assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{cts.caddr}})
+
+ // test proxy member add
+ newMemberAddr := "127.0.0.2:6789"
+ grpcproxy.Register(lg, cts.c, prefix, newMemberAddr, 7)
+ // wait some time for proxy update members
+ time.Sleep(200 * time.Millisecond)
+
+ // check add member succ
+ mresp, err = client.Cluster.MemberList(context.Background())
+ if err != nil {
+ t.Fatalf("err %v, want nil", err)
+ }
+ if len(mresp.Members) != 2 {
+ t.Fatalf("len(mresp.Members) expected 2, got %d (%+v)", len(mresp.Members), mresp.Members)
+ }
+ assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{newMemberAddr}})
+
+ // test proxy member delete
+ deregisterMember(cts.c, prefix, newMemberAddr, t)
+ // wait some time for proxy update members
+ time.Sleep(200 * time.Millisecond)
+
+ // check delete member succ
+ mresp, err = client.Cluster.MemberList(context.Background())
+ if err != nil {
+ t.Fatalf("err %v, want nil", err)
+ }
+ if len(mresp.Members) != 1 {
+ t.Fatalf("len(mresp.Members) expected 1, got %d (%+v)", len(mresp.Members), mresp.Members)
}
+ assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{cts.caddr}})
}
type clusterproxyTestServer struct {
@@ -90,23 +127,19 @@ func (cts *clusterproxyTestServer) close(t *testing.T) {
}
}
-func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *clusterproxyTestServer {
+func newClusterProxyServer(lg *zap.Logger, endpoints []string, prefix string, t *testing.T) *clusterproxyTestServer {
cfg := clientv3.Config{
Endpoints: endpoints,
DialTimeout: 5 * time.Second,
}
- client, err := integration.NewClient(t, cfg)
- if err != nil {
- t.Fatal(err)
- }
+ client, err := integration2.NewClient(t, cfg)
+ require.NoError(t, err)
cts := &clusterproxyTestServer{
c: client,
}
cts.l, err = net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
var opts []grpc.ServerOption
cts.server = grpc.NewServer(opts...)
servec := make(chan struct{})
@@ -115,8 +148,8 @@ func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *cl
cts.server.Serve(cts.l)
}()
- grpcproxy.Register(lg, client, "test-prefix", cts.l.Addr().String(), 7)
- cts.cp, cts.donec = grpcproxy.NewClusterProxy(lg, client, cts.l.Addr().String(), "test-prefix")
+ grpcproxy.Register(lg, client, prefix, cts.l.Addr().String(), 7)
+ cts.cp, cts.donec = grpcproxy.NewClusterProxy(lg, client, cts.l.Addr().String(), prefix)
cts.caddr = cts.l.Addr().String()
pb.RegisterClusterServer(cts.server, cts.cp)
close(servec)
@@ -126,3 +159,13 @@ func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *cl
return cts
}
+
+func deregisterMember(c *clientv3.Client, prefix, addr string, t *testing.T) {
+ em, err := endpoints.NewManager(c, prefix)
+ if err != nil {
+ t.Fatalf("new endpoint manager failed, err %v", err)
+ }
+ if err = em.DeleteEndpoint(c.Ctx(), prefix+"/"+addr); err != nil {
+ t.Fatalf("delete endpoint failed, err %v", err)
+ }
+}
diff --git a/tests/integration/proxy/grpcproxy/kv_test.go b/tests/integration/proxy/grpcproxy/kv_test.go
index 1ff106e4a2c..b871eca95ea 100644
--- a/tests/integration/proxy/grpcproxy/kv_test.go
+++ b/tests/integration/proxy/grpcproxy/kv_test.go
@@ -20,21 +20,22 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
- "go.etcd.io/etcd/tests/v3/integration"
-
- "google.golang.org/grpc"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestKVProxyRange(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvts := newKVProxyServer([]string{clus.Members[0].GRPCAddr()}, t)
+ kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL}, t)
defer kvts.close()
// create a client and try to get key from proxy.
@@ -42,7 +43,7 @@ func TestKVProxyRange(t *testing.T) {
Endpoints: []string{kvts.l.Addr().String()},
DialTimeout: 5 * time.Second,
}
- client, err := integration.NewClient(t, cfg)
+ client, err := integration2.NewClient(t, cfg)
if err != nil {
t.Fatalf("err = %v, want nil", err)
}
@@ -71,10 +72,8 @@ func newKVProxyServer(endpoints []string, t *testing.T) *kvproxyTestServer {
Endpoints: endpoints,
DialTimeout: 5 * time.Second,
}
- client, err := integration.NewClient(t, cfg)
- if err != nil {
- t.Fatal(err)
- }
+ client, err := integration2.NewClient(t, cfg)
+ require.NoError(t, err)
kvp, _ := grpcproxy.NewKvProxy(client)
@@ -88,9 +87,7 @@ func newKVProxyServer(endpoints []string, t *testing.T) *kvproxyTestServer {
pb.RegisterKVServer(kvts.server, kvts.kp)
kvts.l, err = net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
go kvts.server.Serve(kvts.l)
diff --git a/tests/integration/proxy/grpcproxy/register_test.go b/tests/integration/proxy/grpcproxy/register_test.go
index 4fbe08e0889..a0fb5272c52 100644
--- a/tests/integration/proxy/grpcproxy/register_test.go
+++ b/tests/integration/proxy/grpcproxy/register_test.go
@@ -18,20 +18,21 @@ import (
"testing"
"time"
+ "go.uber.org/zap/zaptest"
+
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/naming/endpoints"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.uber.org/zap/zaptest"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestRegister(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
- paddr := clus.Members[0].GRPCAddr()
+ paddr := clus.Members[0].GRPCURL
testPrefix := "test-name"
wa := mustCreateWatcher(t, cli, testPrefix)
diff --git a/tests/integration/revision_test.go b/tests/integration/revision_test.go
new file mode 100644
index 00000000000..e5c1d270fac
--- /dev/null
+++ b/tests/integration/revision_test.go
@@ -0,0 +1,165 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "google.golang.org/grpc/status"
+
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestRevisionMonotonicWithLeaderPartitions(t *testing.T) {
+ testRevisionMonotonicWithFailures(t, 12*time.Second, func(clus *integration.Cluster) {
+ for i := 0; i < 5; i++ {
+ leader := clus.WaitLeader(t)
+ time.Sleep(time.Second)
+ clus.Members[leader].InjectPartition(t, clus.Members[(leader+1)%3], clus.Members[(leader+2)%3])
+ time.Sleep(time.Second)
+ clus.Members[leader].RecoverPartition(t, clus.Members[(leader+1)%3], clus.Members[(leader+2)%3])
+ }
+ })
+}
+
+func TestRevisionMonotonicWithPartitions(t *testing.T) {
+ testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) {
+ for i := 0; i < 5; i++ {
+ time.Sleep(time.Second)
+ clus.Members[i%3].InjectPartition(t, clus.Members[(i+1)%3], clus.Members[(i+2)%3])
+ time.Sleep(time.Second)
+ clus.Members[i%3].RecoverPartition(t, clus.Members[(i+1)%3], clus.Members[(i+2)%3])
+ }
+ })
+}
+
+func TestRevisionMonotonicWithLeaderRestarts(t *testing.T) {
+ testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) {
+ for i := 0; i < 5; i++ {
+ leader := clus.WaitLeader(t)
+ time.Sleep(time.Second)
+ clus.Members[leader].Stop(t)
+ time.Sleep(time.Second)
+ clus.Members[leader].Restart(t)
+ }
+ })
+}
+
+func TestRevisionMonotonicWithRestarts(t *testing.T) {
+ testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) {
+ for i := 0; i < 5; i++ {
+ time.Sleep(time.Second)
+ clus.Members[i%3].Stop(t)
+ time.Sleep(time.Second)
+ clus.Members[i%3].Restart(t)
+ }
+ })
+}
+
+func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration, injectFailures func(clus *integration.Cluster)) {
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
+ defer clus.Terminate(t)
+
+ ctx, cancel := context.WithTimeout(context.Background(), testDuration)
+ defer cancel()
+
+ wg := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ putWorker(ctx, t, clus)
+ }()
+ }
+
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ getWorker(ctx, t, clus)
+ }()
+ }
+
+ injectFailures(clus)
+ wg.Wait()
+ kv := clus.Client(0)
+ resp, err := kv.Get(context.Background(), "foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("Revision %d", resp.Header.Revision)
+}
+
+func putWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) {
+ for i := 0; ; i++ {
+ kv := clus.Client(i % 3)
+ _, err := kv.Put(ctx, "foo", fmt.Sprintf("%d", i))
+ if errors.Is(err, context.DeadlineExceeded) {
+ return
+ }
+ if silenceConnectionErrors(err) != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func getWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) {
+ var prevRev int64
+ for i := 0; ; i++ {
+ kv := clus.Client(i % 3)
+ resp, err := kv.Get(ctx, "foo")
+ if errors.Is(err, context.DeadlineExceeded) {
+ return
+ }
+ if silenceConnectionErrors(err) != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ continue
+ }
+ if prevRev > resp.Header.Revision {
+ t.Fatalf("rev is less than previously observed revision, rev: %d, prevRev: %d", resp.Header.Revision, prevRev)
+ }
+ prevRev = resp.Header.Revision
+ }
+}
+
+func silenceConnectionErrors(err error) error {
+ if err == nil {
+ return nil
+ }
+ s := status.Convert(err)
+ for _, msg := range connectionErrorMessages {
+ if strings.Contains(s.Message(), msg) {
+ return nil
+ }
+ }
+ return err
+}
+
+var connectionErrorMessages = []string{
+ "context deadline exceeded",
+ "etcdserver: request timed out",
+ "error reading from server: EOF",
+ "read: connection reset by peer",
+ "use of closed network connection",
+}
diff --git a/tests/integration/snapshot/member_test.go b/tests/integration/snapshot/member_test.go
index 076d928bbc3..be419efdc27 100644
--- a/tests/integration/snapshot/member_test.go
+++ b/tests/integration/snapshot/member_test.go
@@ -20,18 +20,20 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/tests/v3/integration"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestSnapshotV3RestoreMultiMemberAdd ensures that multiple members
// can boot into the same cluster after being restored from a same
// snapshot file, and also be able to add another member to the cluster.
func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
dbPath := createSnapshotFile(t, kvs)
@@ -48,26 +50,23 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
// wait for health interval + leader election
time.Sleep(etcdserver.HealthInterval + 2*time.Second)
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}})
- if err != nil {
- t.Fatal(err)
- }
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}})
+ require.NoError(t, err)
defer cli.Close()
- urls := newEmbedURLs(2)
+ urls := newEmbedURLs(t, 2)
newCURLs, newPURLs := urls[:1], urls[1:]
- if _, err = cli.MemberAdd(context.Background(), []string{newPURLs[0].String()}); err != nil {
- t.Fatal(err)
- }
+ _, err = cli.MemberAdd(context.Background(), []string{newPURLs[0].String()})
+ require.NoError(t, err)
// wait for membership reconfiguration apply
time.Sleep(testutil.ApplyTimeout)
- cfg := integration.NewEmbedConfig(t, "3")
+ cfg := integration2.NewEmbedConfig(t, "3")
cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing"
- cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs
- cfg.LPUrls, cfg.APUrls = newPURLs, newPURLs
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = newCURLs, newCURLs
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = newPURLs, newPURLs
cfg.InitialCluster = ""
for i := 0; i < clusterN; i++ {
cfg.InitialCluster += fmt.Sprintf(",%d=%s", i, pURLs[i].String())
@@ -76,9 +75,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
cfg.InitialCluster += fmt.Sprintf(",%s=%s", cfg.Name, newPURLs[0].String())
srv, err := embed.StartEtcd(cfg)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
defer func() {
srv.Close()
}()
@@ -88,18 +85,14 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
t.Fatalf("failed to start the newly added etcd member")
}
- cli2, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}})
- if err != nil {
- t.Fatal(err)
- }
+ cli2, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}})
+ require.NoError(t, err)
defer cli2.Close()
ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout)
mresp, err := cli2.MemberList(ctx)
cancel()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if len(mresp.Members) != 4 {
t.Fatalf("expected 4 members, got %+v", mresp)
}
@@ -109,15 +102,13 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
ctx, cancel = context.WithTimeout(context.Background(), testutil.RequestTimeout)
gresp, err = cli2.Get(ctx, "foo", clientv3.WithPrefix())
cancel()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
for i := range gresp.Kvs {
if string(gresp.Kvs[i].Key) != kvs[i].k {
- t.Fatalf("#%d: key expected %s, got %s", i, kvs[i].k, string(gresp.Kvs[i].Key))
+ t.Fatalf("#%d: key expected %s, got %s", i, kvs[i].k, gresp.Kvs[i].Key)
}
if string(gresp.Kvs[i].Value) != kvs[i].v {
- t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, string(gresp.Kvs[i].Value))
+ t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[i].Value)
}
}
}
diff --git a/tests/integration/snapshot/v3_snapshot_test.go b/tests/integration/snapshot/v3_snapshot_test.go
index 36886c40bf9..893fa4fd038 100644
--- a/tests/integration/snapshot/v3_snapshot_test.go
+++ b/tests/integration/snapshot/v3_snapshot_test.go
@@ -17,7 +17,6 @@ package snapshot_test
import (
"context"
"fmt"
- "math/rand"
"net/url"
"os"
"path/filepath"
@@ -25,31 +24,34 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest"
+
"go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/etcdutl/v3/snapshot"
"go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/tests/v3/integration"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
)
// TestSnapshotV3RestoreSingle tests single node cluster restoring
// from a snapshot file.
func TestSnapshotV3RestoreSingle(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
dbPath := createSnapshotFile(t, kvs)
clusterN := 1
- urls := newEmbedURLs(clusterN * 2)
+ urls := newEmbedURLs(t, clusterN*2)
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
- cfg := integration.NewEmbedConfig(t, "s1")
+ cfg := integration2.NewEmbedConfig(t, "s1")
cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing"
- cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
- cfg.LPUrls, cfg.APUrls = pURLs, pURLs
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = cURLs, cURLs
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = pURLs, pURLs
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String())
sp := snapshot.NewV3(zaptest.NewLogger(t))
@@ -57,21 +59,18 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
for _, p := range pURLs {
pss = append(pss, p.String())
}
- if err := sp.Restore(snapshot.RestoreConfig{
+ err := sp.Restore(snapshot.RestoreConfig{
SnapshotPath: dbPath,
Name: cfg.Name,
OutputDataDir: cfg.Dir,
InitialCluster: cfg.InitialCluster,
InitialClusterToken: cfg.InitialClusterToken,
PeerURLs: pss,
- }); err != nil {
- t.Fatal(err)
- }
+ })
+ require.NoError(t, err)
srv, err := embed.StartEtcd(cfg)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
defer func() {
srv.Close()
}()
@@ -82,19 +81,15 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
}
var cli *clientv3.Client
- cli, err = integration.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}})
- if err != nil {
- t.Fatal(err)
- }
+ cli, err = integration2.NewClient(t, clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}})
+ require.NoError(t, err)
defer cli.Close()
for i := range kvs {
var gresp *clientv3.GetResponse
gresp, err = cli.Get(context.Background(), kvs[i].k)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if string(gresp.Kvs[0].Value) != kvs[i].v {
- t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, string(gresp.Kvs[0].Value))
+ t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[0].Value)
}
}
}
@@ -103,7 +98,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
// can boot into the same cluster after being restored from a same
// snapshot file.
func TestSnapshotV3RestoreMulti(t *testing.T) {
- integration.BeforeTest(t)
+ integration2.BeforeTest(t)
kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}}
dbPath := createSnapshotFile(t, kvs)
@@ -119,19 +114,15 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
time.Sleep(time.Second)
for i := 0; i < clusterN; i++ {
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}})
- if err != nil {
- t.Fatal(err)
- }
+ cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}})
+ require.NoError(t, err)
defer cli.Close()
for i := range kvs {
var gresp *clientv3.GetResponse
gresp, err = cli.Get(context.Background(), kvs[i].k)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
if string(gresp.Kvs[0].Value) != kvs[i].v {
- t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, string(gresp.Kvs[0].Value))
+ t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[0].Value)
}
}
}
@@ -139,8 +130,8 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
// TestCorruptedBackupFileCheck tests if we can correctly identify a corrupted backup file.
func TestCorruptedBackupFileCheck(t *testing.T) {
- dbPath := integration.MustAbsPath("testdata/corrupted_backup.db")
- integration.BeforeTest(t)
+ dbPath := testutils.MustAbsPath("testdata/corrupted_backup.db")
+ integration2.BeforeTest(t)
if _, err := os.Stat(dbPath); err != nil {
t.Fatalf("test file [%s] does not exist: %v", dbPath, err)
}
@@ -170,20 +161,18 @@ type kv struct {
// creates a snapshot file and returns the file path.
func createSnapshotFile(t *testing.T, kvs []kv) string {
testutil.SkipTestIfShortMode(t,
- "Snapshot creation tests are depending on embedded etcServer so are integration-level tests.")
+ "Snapshot creation tests are depending on embedded etcd server so are integration-level tests.")
clusterN := 1
- urls := newEmbedURLs(clusterN * 2)
+ urls := newEmbedURLs(t, clusterN*2)
cURLs, pURLs := urls[:clusterN], urls[clusterN:]
- cfg := integration.NewEmbedConfig(t, "default")
+ cfg := integration2.NewEmbedConfig(t, "default")
cfg.ClusterState = "new"
- cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
- cfg.LPUrls, cfg.APUrls = pURLs, pURLs
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = cURLs, cURLs
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = pURLs, pURLs
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String())
srv, err := embed.StartEtcd(cfg)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
defer func() {
srv.Close()
}()
@@ -193,27 +182,21 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
t.Fatalf("failed to start embed.Etcd for creating snapshots")
}
- ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
- cli, err := integration.NewClient(t, ccfg)
- if err != nil {
- t.Fatal(err)
- }
+ ccfg := clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}}
+ cli, err := integration2.NewClient(t, ccfg)
+ require.NoError(t, err)
defer cli.Close()
for i := range kvs {
ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout)
_, err = cli.Put(ctx, kvs[i].k, kvs[i].v)
cancel()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
}
sp := snapshot.NewV3(zaptest.NewLogger(t))
dpPath := filepath.Join(t.TempDir(), fmt.Sprintf("snapshot%d.db", time.Now().Nanosecond()))
- if err = sp.Save(context.Background(), ccfg, dpPath); err != nil {
- t.Fatal(err)
- }
-
+ _, err = sp.Save(context.Background(), ccfg, dpPath)
+ require.NoError(t, err)
return dpPath
}
@@ -222,8 +205,9 @@ const testClusterTkn = "tkn"
func restoreCluster(t *testing.T, clusterN int, dbPath string) (
cURLs []url.URL,
pURLs []url.URL,
- srvs []*embed.Etcd) {
- urls := newEmbedURLs(clusterN * 2)
+ srvs []*embed.Etcd,
+) {
+ urls := newEmbedURLs(t, clusterN*2)
cURLs, pURLs = urls[:clusterN], urls[clusterN:]
ics := ""
@@ -234,26 +218,25 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) (
cfgs := make([]*embed.Config, clusterN)
for i := 0; i < clusterN; i++ {
- cfg := integration.NewEmbedConfig(t, fmt.Sprintf("m%d", i))
+ cfg := integration2.NewEmbedConfig(t, fmt.Sprintf("m%d", i))
cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing"
- cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]}
- cfg.LPUrls, cfg.APUrls = []url.URL{pURLs[i]}, []url.URL{pURLs[i]}
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]}
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = []url.URL{pURLs[i]}, []url.URL{pURLs[i]}
cfg.InitialCluster = ics
sp := snapshot.NewV3(
zaptest.NewLogger(t, zaptest.Level(zapcore.InfoLevel)).Named(cfg.Name).Named("sm"))
- if err := sp.Restore(snapshot.RestoreConfig{
+ err := sp.Restore(snapshot.RestoreConfig{
SnapshotPath: dbPath,
Name: cfg.Name,
OutputDataDir: cfg.Dir,
PeerURLs: []string{pURLs[i].String()},
InitialCluster: ics,
InitialClusterToken: cfg.InitialClusterToken,
- }); err != nil {
- t.Fatal(err)
- }
+ })
+ require.NoError(t, err)
cfgs[i] = cfg
}
@@ -284,11 +267,14 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) (
}
// TODO: TLS
-func newEmbedURLs(n int) (urls []url.URL) {
+func newEmbedURLs(t testutil.TB, n int) (urls []url.URL) {
urls = make([]url.URL, n)
for i := 0; i < n; i++ {
- rand.Seed(int64(time.Now().Nanosecond()))
- u, _ := url.Parse(fmt.Sprintf("unix://localhost:%d", rand.Intn(45000)))
+ l := integration2.NewLocalListener(t)
+ defer l.Close()
+
+ u, err := url.Parse(fmt.Sprintf("unix://%s", l.Addr()))
+ require.NoError(t, err)
urls[i] = *u
}
return urls
diff --git a/tests/integration/testing.go b/tests/integration/testing.go
deleted file mode 100644
index 8d0ee40ff21..00000000000
--- a/tests/integration/testing.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package integration
-
-import (
- "os"
- "path/filepath"
- "testing"
-
- grpc_logsettable "github.com/grpc-ecosystem/go-grpc-middleware/logging/settable"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/embed"
- "go.etcd.io/etcd/server/v3/verify"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zapgrpc"
- "go.uber.org/zap/zaptest"
-)
-
-var grpc_logger grpc_logsettable.SettableLoggerV2
-
-func init() {
- grpc_logger = grpc_logsettable.ReplaceGrpcLoggerV2()
-}
-
-func BeforeTest(t testutil.TB) {
- testutil.BeforeTest(t)
-
- grpc_logger.Set(zapgrpc.NewLogger(zaptest.NewLogger(t).Named("grpc")))
-
- // Integration tests should verify written state as much as possible.
- os.Setenv(verify.ENV_VERIFY, verify.ENV_VERIFY_ALL_VALUE)
-
- previousWD, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
- os.Chdir(t.TempDir())
- t.Cleanup(func() {
- grpc_logger.Reset()
- os.Chdir(previousWD)
- })
-
-}
-
-func MustAbsPath(path string) string {
- abs, err := filepath.Abs(path)
- if err != nil {
- panic(err)
- }
- return abs
-}
-
-func NewEmbedConfig(t testing.TB, name string) *embed.Config {
- cfg := embed.NewConfig()
- cfg.Name = name
- lg := zaptest.NewLogger(t, zaptest.Level(zapcore.InfoLevel)).Named(cfg.Name)
- cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(lg)
- cfg.Dir = t.TempDir()
- return cfg
-}
-
-func NewClient(t testing.TB, cfg clientv3.Config) (*clientv3.Client, error) {
- if cfg.Logger != nil {
- cfg.Logger = zaptest.NewLogger(t)
- }
- return clientv3.New(cfg)
-}
diff --git a/tests/integration/testing_test.go b/tests/integration/testing_test.go
new file mode 100644
index 00000000000..f49e58d83d7
--- /dev/null
+++ b/tests/integration/testing_test.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration_test
+
+import (
+ "testing"
+
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestBeforeTestWithoutLeakDetection(t *testing.T) {
+ integration2.BeforeTest(t, integration2.WithoutGoLeakDetection(), integration2.WithoutSkipInShort())
+ // Intentional leak that should get ignored
+ go func() {
+ }()
+}
diff --git a/tests/integration/tracing_test.go b/tests/integration/tracing_test.go
new file mode 100644
index 00000000000..5b87208848a
--- /dev/null
+++ b/tests/integration/tracing_test.go
@@ -0,0 +1,151 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "context"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.opentelemetry.io/otel/propagation"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/trace"
+ traceservice "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+// TestTracing ensures that distributed tracing is setup when the feature flag is enabled.
+func TestTracing(t *testing.T) {
+ testutil.SkipTestIfShortMode(t,
+ "Wal creation tests are depending on embedded etcd server so are integration-level tests.")
+ // set up trace collector
+ listener, err := net.Listen("tcp", "localhost:")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ traceFound := make(chan struct{})
+ defer close(traceFound)
+
+ srv := grpc.NewServer()
+ traceservice.RegisterTraceServiceServer(srv, &traceServer{
+ traceFound: traceFound,
+ filterFunc: containsNodeListSpan,
+ })
+
+ go srv.Serve(listener)
+ defer srv.Stop()
+
+ cfg := integration.NewEmbedConfig(t, "default")
+ cfg.ExperimentalEnableDistributedTracing = true
+ cfg.ExperimentalDistributedTracingAddress = listener.Addr().String()
+ cfg.ExperimentalDistributedTracingServiceName = "integration-test-tracing"
+ cfg.ExperimentalDistributedTracingSamplingRatePerMillion = 100
+
+ // start an etcd instance with tracing enabled
+ etcdSrv, err := embed.StartEtcd(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer etcdSrv.Close()
+
+ select {
+ case <-etcdSrv.Server.ReadyNotify():
+ case <-time.After(5 * time.Second):
+ // default randomized election timeout is 1 to 2s, single node will fast-forward 900ms
+ // change the timeout from 1 to 5 seconds to ensure de-flaking this test
+ t.Fatalf("failed to start embed.Etcd for test")
+ }
+
+ // create a client that has tracing enabled
+ tracer := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
+ defer tracer.Shutdown(context.TODO())
+ tp := trace.TracerProvider(tracer)
+
+ tracingOpts := []otelgrpc.Option{
+ otelgrpc.WithTracerProvider(tp),
+ otelgrpc.WithPropagators(
+ propagation.NewCompositeTextMapPropagator(
+ propagation.TraceContext{},
+ propagation.Baggage{},
+ )),
+ }
+
+ dialOptions := []grpc.DialOption{
+ grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor(tracingOpts...)),
+ grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor(tracingOpts...)),
+ }
+ ccfg := clientv3.Config{DialOptions: dialOptions, Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}}
+ cli, err := integration.NewClient(t, ccfg)
+ if err != nil {
+ etcdSrv.Close()
+ t.Fatal(err)
+ }
+ defer cli.Close()
+
+ // make a request with the instrumented client
+ resp, err := cli.Get(context.TODO(), "key")
+ require.NoError(t, err)
+ require.Empty(t, resp.Kvs)
+
+ // Wait for a span to be recorded from our request
+ select {
+ case <-traceFound:
+ return
+ case <-time.After(30 * time.Second):
+ t.Fatal("Timed out waiting for trace")
+ }
+}
+
+func containsNodeListSpan(req *traceservice.ExportTraceServiceRequest) bool {
+ for _, resourceSpans := range req.GetResourceSpans() {
+ for _, attr := range resourceSpans.GetResource().GetAttributes() {
+ if attr.GetKey() != "service.name" && attr.GetValue().GetStringValue() != "integration-test-tracing" {
+ continue
+ }
+ for _, scoped := range resourceSpans.GetScopeSpans() {
+ for _, span := range scoped.GetSpans() {
+ if span.GetName() == "etcdserverpb.KV/Range" {
+ return true
+ }
+ }
+ }
+ }
+ }
+ return false
+}
+
+// traceServer implements TracesServiceServer
+type traceServer struct {
+ traceFound chan struct{}
+ filterFunc func(req *traceservice.ExportTraceServiceRequest) bool
+ traceservice.UnimplementedTraceServiceServer
+}
+
+func (t *traceServer) Export(ctx context.Context, req *traceservice.ExportTraceServiceRequest) (*traceservice.ExportTraceServiceResponse, error) {
+ emptyValue := traceservice.ExportTraceServiceResponse{}
+ if t.filterFunc(req) {
+ t.traceFound <- struct{}{}
+ }
+ return &emptyValue, nil
+}
diff --git a/tests/integration/utl_wal_version_test.go b/tests/integration/utl_wal_version_test.go
new file mode 100644
index 00000000000..33e1b0aecd4
--- /dev/null
+++ b/tests/integration/utl_wal_version_test.go
@@ -0,0 +1,107 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ framecfg "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+func TestEtcdVersionFromWAL(t *testing.T) {
+ testutil.SkipTestIfShortMode(t,
+ "Wal creation tests are depending on embedded etcd server so are integration-level tests.")
+ cfg := integration.NewEmbedConfig(t, "default")
+ srv, err := embed.StartEtcd(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ select {
+ case <-srv.Server.ReadyNotify():
+ case <-time.After(3 * time.Second):
+ t.Fatalf("failed to start embed.Etcd for test")
+ }
+
+ // When the member becomes leader, it will update the cluster version
+ // with the cluster's minimum version. As it's updated asynchronously,
+ // it could not be updated in time before close. Wait for it to become
+ // ready.
+ if err = waitForClusterVersionReady(srv); err != nil {
+ srv.Close()
+ t.Fatalf("failed to wait for cluster version to become ready: %v", err)
+ }
+
+ ccfg := clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}}
+ cli, err := integration.NewClient(t, ccfg)
+ if err != nil {
+ srv.Close()
+ t.Fatal(err)
+ }
+
+ // Once the cluster version has been updated, any entity's storage
+ // version should be align with cluster version.
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout)
+ _, err = cli.AuthStatus(ctx)
+ cancel()
+ if err != nil {
+ srv.Close()
+ t.Fatalf("failed to get auth status: %v", err)
+ }
+
+ cli.Close()
+ srv.Close()
+
+ w, err := wal.Open(zap.NewNop(), cfg.Dir+"/member/wal", walpb.Snapshot{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+
+ walVersion, err := wal.ReadWALVersion(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, &semver.Version{Major: 3, Minor: 6}, walVersion.MinimalEtcdVersion())
+}
+
+func waitForClusterVersionReady(srv *embed.Etcd) error {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if srv.Server.ClusterVersion() != nil {
+ return nil
+ }
+ time.Sleep(framecfg.TickDuration)
+ }
+}
diff --git a/tests/integration/v2_http_kv_test.go b/tests/integration/v2_http_kv_test.go
deleted file mode 100644
index 36f7cea82f5..00000000000
--- a/tests/integration/v2_http_kv_test.go
+++ /dev/null
@@ -1,1152 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package integration
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/transport"
-)
-
-func TestV2Set(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
- v := url.Values{}
- v.Set("value", "bar")
- vAndNoValue := url.Values{}
- vAndNoValue.Set("value", "bar")
- vAndNoValue.Set("noValueOnSuccess", "true")
-
- tests := []struct {
- relativeURL string
- value url.Values
- wStatus int
- w string
- }{
- {
- "/v2/keys/foo/bar",
- v,
- http.StatusCreated,
- `{"action":"set","node":{"key":"/foo/bar","value":"bar","modifiedIndex":4,"createdIndex":4}}`,
- },
- {
- "/v2/keys/foodir?dir=true",
- url.Values{},
- http.StatusCreated,
- `{"action":"set","node":{"key":"/foodir","dir":true,"modifiedIndex":5,"createdIndex":5}}`,
- },
- {
- "/v2/keys/fooempty",
- url.Values(map[string][]string{"value": {""}}),
- http.StatusCreated,
- `{"action":"set","node":{"key":"/fooempty","value":"","modifiedIndex":6,"createdIndex":6}}`,
- },
- {
- "/v2/keys/foo/novalue",
- vAndNoValue,
- http.StatusCreated,
- `{"action":"set"}`,
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, tt.relativeURL), tt.value)
- if err != nil {
- t.Errorf("#%d: err = %v, want nil", i, err)
- }
- g := string(tc.ReadBody(resp))
- w := tt.w + "\n"
- if g != w {
- t.Errorf("#%d: body = %v, want %v", i, g, w)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- }
-}
-
-func TestV2CreateUpdate(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- tests := []struct {
- relativeURL string
- value url.Values
- wStatus int
- w map[string]interface{}
- }{
- // key with ttl
- {
- "/v2/keys/ttl/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "ttl": {"20"}}),
- http.StatusCreated,
- map[string]interface{}{
- "node": map[string]interface{}{
- "value": "XXX",
- "ttl": float64(20),
- },
- },
- },
- // key with bad ttl
- {
- "/v2/keys/ttl/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "ttl": {"bad_ttl"}}),
- http.StatusBadRequest,
- map[string]interface{}{
- "errorCode": float64(202),
- "message": "The given TTL in POST form is not a number",
- },
- },
- // create key
- {
- "/v2/keys/create/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevExist": {"false"}}),
- http.StatusCreated,
- map[string]interface{}{
- "node": map[string]interface{}{
- "value": "XXX",
- },
- },
- },
- // created key failed
- {
- "/v2/keys/create/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevExist": {"false"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(105),
- "message": "Key already exists",
- "cause": "/create/foo",
- },
- },
- // update the newly created key with ttl
- {
- "/v2/keys/create/foo",
- url.Values(map[string][]string{"value": {"YYY"}, "prevExist": {"true"}, "ttl": {"20"}}),
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "value": "YYY",
- "ttl": float64(20),
- },
- "action": "update",
- },
- },
- // update the ttl to none
- {
- "/v2/keys/create/foo",
- url.Values(map[string][]string{"value": {"ZZZ"}, "prevExist": {"true"}}),
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "value": "ZZZ",
- },
- "action": "update",
- },
- },
- // update on a non-existing key
- {
- "/v2/keys/nonexist",
- url.Values(map[string][]string{"value": {"XXX"}, "prevExist": {"true"}}),
- http.StatusNotFound,
- map[string]interface{}{
- "errorCode": float64(100),
- "message": "Key not found",
- "cause": "/nonexist",
- },
- },
- // create with no value on success
- {
- "/v2/keys/create/novalue",
- url.Values(map[string][]string{"value": {"XXX"}, "prevExist": {"false"}, "noValueOnSuccess": {"true"}}),
- http.StatusCreated,
- map[string]interface{}{},
- },
- // update with no value on success
- {
- "/v2/keys/create/novalue",
- url.Values(map[string][]string{"value": {"XXX"}, "prevExist": {"true"}, "noValueOnSuccess": {"true"}}),
- http.StatusOK,
- map[string]interface{}{},
- },
- // created key failed with no value on success
- {
- "/v2/keys/create/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevExist": {"false"}, "noValueOnSuccess": {"true"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(105),
- "message": "Key already exists",
- "cause": "/create/foo",
- },
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, tt.relativeURL), tt.value)
- if err != nil {
- t.Fatalf("#%d: put err = %v, want nil", i, err)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- if err := checkBody(tc.ReadBodyJSON(resp), tt.w); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestV2CAS(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- tests := []struct {
- relativeURL string
- value url.Values
- wStatus int
- w map[string]interface{}
- }{
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"XXX"}}),
- http.StatusCreated,
- nil,
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"YYY"}, "prevIndex": {"4"}}),
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "value": "YYY",
- "modifiedIndex": float64(5),
- },
- "action": "compareAndSwap",
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"YYY"}, "prevIndex": {"10"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[10 != 5]",
- "index": float64(5),
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"YYY"}, "prevIndex": {"bad_index"}}),
- http.StatusBadRequest,
- map[string]interface{}{
- "errorCode": float64(203),
- "message": "The given index in POST form is not a number",
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"ZZZ"}, "prevValue": {"YYY"}}),
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "value": "ZZZ",
- },
- "action": "compareAndSwap",
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevValue": {"bad_value"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[bad_value != ZZZ]",
- },
- },
- // prevValue is required
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevValue": {""}}),
- http.StatusBadRequest,
- map[string]interface{}{
- "errorCode": float64(201),
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevValue": {"bad_value"}, "prevIndex": {"100"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[bad_value != ZZZ] [100 != 6]",
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevValue": {"ZZZ"}, "prevIndex": {"100"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[100 != 6]",
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"XXX"}, "prevValue": {"bad_value"}, "prevIndex": {"6"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[bad_value != ZZZ]",
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"YYY"}, "prevIndex": {"6"}, "noValueOnSuccess": {"true"}}),
- http.StatusOK,
- map[string]interface{}{
- "action": "compareAndSwap",
- },
- },
- {
- "/v2/keys/cas/foo",
- url.Values(map[string][]string{"value": {"YYY"}, "prevIndex": {"10"}, "noValueOnSuccess": {"true"}}),
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[10 != 7]",
- "index": float64(7),
- },
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, tt.relativeURL), tt.value)
- if err != nil {
- t.Fatalf("#%d: put err = %v, want nil", i, err)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- if err := checkBody(tc.ReadBodyJSON(resp), tt.w); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestV2Delete(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- v := url.Values{}
- v.Set("value", "XXX")
- r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo"), v)
- if err != nil {
- t.Error(err)
- }
- r.Body.Close()
- r, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/emptydir?dir=true"), v)
- if err != nil {
- t.Error(err)
- }
- r.Body.Close()
- r, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foodir/bar?dir=true"), v)
- if err != nil {
- t.Error(err)
- }
- r.Body.Close()
-
- tests := []struct {
- relativeURL string
- wStatus int
- w map[string]interface{}
- }{
- {
- "/v2/keys/foo",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo",
- },
- "prevNode": map[string]interface{}{
- "key": "/foo",
- "value": "XXX",
- },
- "action": "delete",
- },
- },
- {
- "/v2/keys/emptydir",
- http.StatusForbidden,
- map[string]interface{}{
- "errorCode": float64(102),
- "message": "Not a file",
- "cause": "/emptydir",
- },
- },
- {
- "/v2/keys/emptydir?dir=true",
- http.StatusOK,
- nil,
- },
- {
- "/v2/keys/foodir?dir=true",
- http.StatusForbidden,
- map[string]interface{}{
- "errorCode": float64(108),
- "message": "Directory not empty",
- "cause": "/foodir",
- },
- },
- {
- "/v2/keys/foodir?recursive=true",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foodir",
- "dir": true,
- },
- "prevNode": map[string]interface{}{
- "key": "/foodir",
- "dir": true,
- },
- "action": "delete",
- },
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.DeleteForm(fmt.Sprintf("%s%s", u, tt.relativeURL), nil)
- if err != nil {
- t.Fatalf("#%d: delete err = %v, want nil", i, err)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- if err := checkBody(tc.ReadBodyJSON(resp), tt.w); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestV2CAD(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- v := url.Values{}
- v.Set("value", "XXX")
- r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo"), v)
- if err != nil {
- t.Error(err)
- }
- r.Body.Close()
-
- r, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foovalue"), v)
- if err != nil {
- t.Error(err)
- }
- r.Body.Close()
-
- tests := []struct {
- relativeURL string
- wStatus int
- w map[string]interface{}
- }{
- {
- "/v2/keys/foo?prevIndex=100",
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[100 != 4]",
- },
- },
- {
- "/v2/keys/foo?prevIndex=bad_index",
- http.StatusBadRequest,
- map[string]interface{}{
- "errorCode": float64(203),
- "message": "The given index in POST form is not a number",
- },
- },
- {
- "/v2/keys/foo?prevIndex=4",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo",
- "modifiedIndex": float64(6),
- },
- "action": "compareAndDelete",
- },
- },
- {
- "/v2/keys/foovalue?prevValue=YYY",
- http.StatusPreconditionFailed,
- map[string]interface{}{
- "errorCode": float64(101),
- "message": "Compare failed",
- "cause": "[YYY != XXX]",
- },
- },
- {
- "/v2/keys/foovalue?prevValue=",
- http.StatusBadRequest,
- map[string]interface{}{
- "errorCode": float64(201),
- "cause": `"prevValue" cannot be empty`,
- },
- },
- {
- "/v2/keys/foovalue?prevValue=XXX",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foovalue",
- "modifiedIndex": float64(7),
- },
- "action": "compareAndDelete",
- },
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.DeleteForm(fmt.Sprintf("%s%s", u, tt.relativeURL), nil)
- if err != nil {
- t.Fatalf("#%d: delete err = %v, want nil", i, err)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- if err := checkBody(tc.ReadBodyJSON(resp), tt.w); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestV2Unique(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- tests := []struct {
- relativeURL string
- value url.Values
- wStatus int
- w map[string]interface{}
- }{
- {
- "/v2/keys/foo",
- url.Values(map[string][]string{"value": {"XXX"}}),
- http.StatusCreated,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo/00000000000000000004",
- "value": "XXX",
- },
- "action": "create",
- },
- },
- {
- "/v2/keys/foo",
- url.Values(map[string][]string{"value": {"XXX"}}),
- http.StatusCreated,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo/00000000000000000005",
- "value": "XXX",
- },
- "action": "create",
- },
- },
- {
- "/v2/keys/bar",
- url.Values(map[string][]string{"value": {"XXX"}}),
- http.StatusCreated,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/bar/00000000000000000006",
- "value": "XXX",
- },
- "action": "create",
- },
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.PostForm(fmt.Sprintf("%s%s", u, tt.relativeURL), tt.value)
- if err != nil {
- t.Fatalf("#%d: post err = %v, want nil", i, err)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- if err := checkBody(tc.ReadBodyJSON(resp), tt.w); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestV2Get(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- v := url.Values{}
- v.Set("value", "XXX")
- r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar/zar"), v)
- if err != nil {
- t.Error(err)
- }
- r.Body.Close()
-
- tests := []struct {
- relativeURL string
- wStatus int
- w map[string]interface{}
- }{
- {
- "/v2/keys/foo/bar/zar",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo/bar/zar",
- "value": "XXX",
- },
- "action": "get",
- },
- },
- {
- "/v2/keys/foo",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo",
- "dir": true,
- "nodes": []interface{}{
- map[string]interface{}{
- "key": "/foo/bar",
- "dir": true,
- "createdIndex": float64(4),
- "modifiedIndex": float64(4),
- },
- },
- },
- "action": "get",
- },
- },
- {
- "/v2/keys/foo?recursive=true",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo",
- "dir": true,
- "nodes": []interface{}{
- map[string]interface{}{
- "key": "/foo/bar",
- "dir": true,
- "createdIndex": float64(4),
- "modifiedIndex": float64(4),
- "nodes": []interface{}{
- map[string]interface{}{
- "key": "/foo/bar/zar",
- "value": "XXX",
- "createdIndex": float64(4),
- "modifiedIndex": float64(4),
- },
- },
- },
- },
- },
- "action": "get",
- },
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.Get(fmt.Sprintf("%s%s", u, tt.relativeURL))
- if err != nil {
- t.Fatalf("#%d: get err = %v, want nil", i, err)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- if resp.Header.Get("Content-Type") != "application/json" {
- t.Errorf("#%d: header = %v, want %v", i, resp.Header.Get("Content-Type"), "application/json")
- }
- if err := checkBody(tc.ReadBodyJSON(resp), tt.w); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestV2QuorumGet(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- v := url.Values{}
- v.Set("value", "XXX")
- r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar/zar?quorum=true"), v)
- if err != nil {
- t.Error(err)
- }
- r.Body.Close()
-
- tests := []struct {
- relativeURL string
- wStatus int
- w map[string]interface{}
- }{
- {
- "/v2/keys/foo/bar/zar",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo/bar/zar",
- "value": "XXX",
- },
- "action": "get",
- },
- },
- {
- "/v2/keys/foo",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo",
- "dir": true,
- "nodes": []interface{}{
- map[string]interface{}{
- "key": "/foo/bar",
- "dir": true,
- "createdIndex": float64(4),
- "modifiedIndex": float64(4),
- },
- },
- },
- "action": "get",
- },
- },
- {
- "/v2/keys/foo?recursive=true",
- http.StatusOK,
- map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo",
- "dir": true,
- "nodes": []interface{}{
- map[string]interface{}{
- "key": "/foo/bar",
- "dir": true,
- "createdIndex": float64(4),
- "modifiedIndex": float64(4),
- "nodes": []interface{}{
- map[string]interface{}{
- "key": "/foo/bar/zar",
- "value": "XXX",
- "createdIndex": float64(4),
- "modifiedIndex": float64(4),
- },
- },
- },
- },
- },
- "action": "get",
- },
- },
- }
-
- for i, tt := range tests {
- resp, err := tc.Get(fmt.Sprintf("%s%s", u, tt.relativeURL))
- if err != nil {
- t.Fatalf("#%d: get err = %v, want nil", i, err)
- }
- if resp.StatusCode != tt.wStatus {
- t.Errorf("#%d: status = %d, want %d", i, resp.StatusCode, tt.wStatus)
- }
- if resp.Header.Get("Content-Type") != "application/json" {
- t.Errorf("#%d: header = %v, want %v", i, resp.Header.Get("Content-Type"), "application/json")
- }
- if err := checkBody(tc.ReadBodyJSON(resp), tt.w); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestV2Watch(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- watchResp, err := tc.Get(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar?wait=true"))
- if err != nil {
- t.Fatalf("watch err = %v, want nil", err)
- }
-
- // Set a value.
- v := url.Values{}
- v.Set("value", "XXX")
- resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar"), v)
- if err != nil {
- t.Fatalf("put err = %v, want nil", err)
- }
- resp.Body.Close()
-
- body := tc.ReadBodyJSON(watchResp)
- w := map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo/bar",
- "value": "XXX",
- "modifiedIndex": float64(4),
- },
- "action": "set",
- }
-
- if err := checkBody(body, w); err != nil {
- t.Error(err)
- }
-}
-
-func TestV2WatchWithIndex(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- var body map[string]interface{}
- c := make(chan bool, 1)
- go func() {
- resp, err := tc.Get(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar?wait=true&waitIndex=5"))
- if err != nil {
- t.Errorf("watch err = %v, want nil", err)
- }
- body = tc.ReadBodyJSON(resp)
- c <- true
- }()
-
- select {
- case <-c:
- t.Fatal("should not get the watch result")
- case <-time.After(time.Millisecond):
- }
-
- // Set a value (before given index).
- v := url.Values{}
- v.Set("value", "XXX")
- resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar"), v)
- if err != nil {
- t.Fatalf("put err = %v, want nil", err)
- }
- resp.Body.Close()
-
- select {
- case <-c:
- t.Fatal("should not get the watch result")
- case <-time.After(time.Millisecond):
- }
-
- // Set a value (before given index).
- resp, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar"), v)
- if err != nil {
- t.Fatalf("put err = %v, want nil", err)
- }
- resp.Body.Close()
-
- select {
- case <-c:
- case <-time.After(time.Second):
- t.Fatal("cannot get watch result")
- }
-
- w := map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/foo/bar",
- "value": "XXX",
- "modifiedIndex": float64(5),
- },
- "action": "set",
- }
- if err := checkBody(body, w); err != nil {
- t.Error(err)
- }
-}
-
-func TestV2WatchKeyInDir(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- var body map[string]interface{}
- c := make(chan bool, 1)
-
- // Create an expiring directory
- v := url.Values{}
- v.Set("dir", "true")
- v.Set("ttl", "1")
- resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/keyindir"), v)
- if err != nil {
- t.Fatalf("put err = %v, want nil", err)
- }
- resp.Body.Close()
-
- // Create a permanent node within the directory
- v = url.Values{}
- v.Set("value", "XXX")
- resp, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/keyindir/bar"), v)
- if err != nil {
- t.Fatalf("put err = %v, want nil", err)
- }
- resp.Body.Close()
-
- go func() {
- // Expect a notification when watching the node
- resp, err := tc.Get(fmt.Sprintf("%s%s", u, "/v2/keys/keyindir/bar?wait=true"))
- if err != nil {
- t.Errorf("watch err = %v, want nil", err)
- }
- body = tc.ReadBodyJSON(resp)
- c <- true
- }()
-
- select {
- case <-c:
- // 1s ttl + 0.5s sync delay + 1.5s disk and network delay
- // We set that long disk and network delay because travis may be slow
- // when do system calls.
- case <-time.After(3 * time.Second):
- t.Fatal("timed out waiting for watch result")
- }
-
- w := map[string]interface{}{
- "node": map[string]interface{}{
- "key": "/keyindir",
- },
- "action": "expire",
- }
- if err := checkBody(body, w); err != nil {
- t.Error(err)
- }
-}
-
-func TestV2Head(t *testing.T) {
- BeforeTest(t)
- cl := NewCluster(t, 1)
- cl.Launch(t)
- defer cl.Terminate(t)
-
- u := cl.URL(0)
- tc := NewTestClient()
-
- v := url.Values{}
- v.Set("value", "XXX")
- fullURL := fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar")
- resp, err := tc.Head(fullURL)
- if err != nil {
- t.Fatalf("head err = %v, want nil", err)
- }
- resp.Body.Close()
- if resp.StatusCode != http.StatusNotFound {
- t.Errorf("status = %d, want %d", resp.StatusCode, http.StatusNotFound)
- }
- if resp.ContentLength <= 0 {
- t.Errorf("ContentLength = %d, want > 0", resp.ContentLength)
- }
-
- resp, err = tc.PutForm(fullURL, v)
- if err != nil {
- t.Fatalf("put err = %v, want nil", err)
- }
- resp.Body.Close()
-
- resp, err = tc.Head(fullURL)
- if err != nil {
- t.Fatalf("head err = %v, want nil", err)
- }
- resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- t.Errorf("status = %d, want %d", resp.StatusCode, http.StatusOK)
- }
- if resp.ContentLength <= 0 {
- t.Errorf("ContentLength = %d, want > 0", resp.ContentLength)
- }
-}
-
-func checkBody(body map[string]interface{}, w map[string]interface{}) error {
- if body["node"] != nil {
- if w["node"] != nil {
- wn := w["node"].(map[string]interface{})
- n := body["node"].(map[string]interface{})
- for k := range n {
- if wn[k] == nil {
- delete(n, k)
- }
- }
- body["node"] = n
- }
- if w["prevNode"] != nil {
- wn := w["prevNode"].(map[string]interface{})
- n := body["prevNode"].(map[string]interface{})
- for k := range n {
- if wn[k] == nil {
- delete(n, k)
- }
- }
- body["prevNode"] = n
- }
- }
- for k, v := range w {
- g := body[k]
- if !reflect.DeepEqual(g, v) {
- return fmt.Errorf("%v = %+v, want %+v", k, g, v)
- }
- }
- return nil
-}
-
-type testHttpClient struct {
- *http.Client
-}
-
-// Creates a new HTTP client with KeepAlive disabled.
-func NewTestClient() *testHttpClient {
- tr, _ := transport.NewTransport(transport.TLSInfo{}, time.Second)
- tr.DisableKeepAlives = true
- return &testHttpClient{&http.Client{Transport: tr}}
-}
-
-// Reads the body from the response and closes it.
-func (t *testHttpClient) ReadBody(resp *http.Response) []byte {
- if resp == nil {
- return []byte{}
- }
- body, _ := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- return body
-}
-
-// Reads the body from the response and parses it as JSON.
-func (t *testHttpClient) ReadBodyJSON(resp *http.Response) map[string]interface{} {
- m := make(map[string]interface{})
- b := t.ReadBody(resp)
- if err := json.Unmarshal(b, &m); err != nil {
- panic(fmt.Sprintf("HTTP body JSON parse error: %v: %s", err, string(b)))
- }
- return m
-}
-
-func (t *testHttpClient) Head(url string) (*http.Response, error) {
- return t.send("HEAD", url, "application/json", nil)
-}
-
-func (t *testHttpClient) Get(url string) (*http.Response, error) {
- return t.send("GET", url, "application/json", nil)
-}
-
-func (t *testHttpClient) Post(url string, bodyType string, body io.Reader) (*http.Response, error) {
- return t.send("POST", url, bodyType, body)
-}
-
-func (t *testHttpClient) PostForm(url string, data url.Values) (*http.Response, error) {
- return t.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-func (t *testHttpClient) Put(url string, bodyType string, body io.Reader) (*http.Response, error) {
- return t.send("PUT", url, bodyType, body)
-}
-
-func (t *testHttpClient) PutForm(url string, data url.Values) (*http.Response, error) {
- return t.Put(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-func (t *testHttpClient) Delete(url string, bodyType string, body io.Reader) (*http.Response, error) {
- return t.send("DELETE", url, bodyType, body)
-}
-
-func (t *testHttpClient) DeleteForm(url string, data url.Values) (*http.Response, error) {
- return t.Delete(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-func (t *testHttpClient) send(method string, url string, bodyType string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return t.Do(req)
-}
diff --git a/tests/integration/v2store/main_test.go b/tests/integration/v2store/main_test.go
index 89026c5a773..b5534268a0b 100644
--- a/tests/integration/v2store/main_test.go
+++ b/tests/integration/v2store/main_test.go
@@ -20,8 +20,6 @@ import (
"go.etcd.io/etcd/client/pkg/v3/testutil"
)
-//var endpoints []string
-
func TestMain(m *testing.M) {
//cfg := integration.ClusterConfig{Size: 1}
//clus := integration.NewClusterV3(nil, &cfg)
diff --git a/tests/integration/v2store/store_tag_not_v2v3_test.go b/tests/integration/v2store/store_tag_not_v2v3_test.go
deleted file mode 100644
index fbc63b94a00..00000000000
--- a/tests/integration/v2store/store_tag_not_v2v3_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !v2v3
-// +build !v2v3
-
-package v2store_test
-
-import (
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/tests/v3/integration"
-)
-
-type v2TestStore struct {
- v2store.Store
-}
-
-func (s *v2TestStore) Close() {}
-
-func newTestStore(t *testing.T, ns ...string) StoreCloser {
- integration.BeforeTest(t)
- if len(ns) == 0 {
- t.Logf("new v2 store with no namespace")
- }
- return &v2TestStore{v2store.New(ns...)}
-}
-
-// Ensure that the store can recover from a previously saved state.
-func TestStoreRecover(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
- var eidx uint64 = 4
- s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- s.Create("/foo/x", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- s.Update("/foo/x", "barbar", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- s.Create("/foo/y", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- b, err := s.Save()
- testutil.AssertNil(t, err)
-
- s2 := newTestStore(t)
- s2.Recovery(b)
-
- e, err := s.Get("/foo/x", false, false)
- testutil.AssertEqual(t, e.Node.CreatedIndex, uint64(2))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(3))
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, *e.Node.Value, "barbar")
-
- e, err = s.Get("/foo/y", false, false)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, *e.Node.Value, "baz")
-}
diff --git a/tests/integration/v2store/store_tag_test.go b/tests/integration/v2store/store_tag_test.go
new file mode 100644
index 00000000000..b735104e822
--- /dev/null
+++ b/tests/integration/v2store/store_tag_test.go
@@ -0,0 +1,53 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+)
+
+// TestStoreRecover ensures that the store can recover from a previously saved state.
+func TestStoreRecover(t *testing.T) {
+ integration2.BeforeTest(t)
+ s := v2store.New()
+ var eidx uint64 = 4
+ s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
+ s.Create("/foo/x", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
+ s.Update("/foo/x", "barbar", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
+ s.Create("/foo/y", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
+ b, err := s.Save()
+ require.NoError(t, err)
+
+ s2 := v2store.New()
+ s2.Recovery(b)
+
+ e, err := s.Get("/foo/x", false, false)
+ assert.Equal(t, uint64(2), e.Node.CreatedIndex)
+ assert.Equal(t, uint64(3), e.Node.ModifiedIndex)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ require.NoError(t, err)
+ assert.Equal(t, "barbar", *e.Node.Value)
+
+ e, err = s.Get("/foo/y", false, false)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ require.NoError(t, err)
+ assert.Equal(t, "baz", *e.Node.Value)
+}
diff --git a/tests/integration/v2store/store_tag_v2v3_test.go b/tests/integration/v2store/store_tag_v2v3_test.go
deleted file mode 100644
index f4e8e21fba3..00000000000
--- a/tests/integration/v2store/store_tag_v2v3_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build v2v3
-// +build v2v3
-
-package v2store_test
-
-import (
- "testing"
-
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3"
- "go.etcd.io/etcd/tests/v3/integration"
-)
-
-type v2v3TestStore struct {
- v2store.Store
- clus *integration.ClusterV3
- t *testing.T
-}
-
-func (s *v2v3TestStore) Close() { s.clus.Terminate(s.t) }
-
-func newTestStore(t *testing.T, ns ...string) StoreCloser {
- integration.BeforeTest(t)
- clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
- return &v2v3TestStore{
- v2v3.NewStore(clus.Client(0), "/v2/"),
- clus,
- t,
- }
-}
diff --git a/tests/integration/v2store/store_test.go b/tests/integration/v2store/store_test.go
index 562ff53976d..842b57deb62 100644
--- a/tests/integration/v2store/store_test.go
+++ b/tests/integration/v2store/store_test.go
@@ -19,7 +19,9 @@ import (
"testing"
"time"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
)
@@ -30,34 +32,31 @@ type StoreCloser interface {
}
func TestNewStoreWithNamespaces(t *testing.T) {
- s := newTestStore(t, "/0", "/1")
- defer s.Close()
+ s := v2store.New("/0", "/1")
_, err := s.Get("/0", false, false)
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = s.Get("/1", false, false)
- testutil.AssertNil(t, err)
+ assert.NoError(t, err)
}
-// Ensure that the store can retrieve an existing value.
+// TestStoreGetValue ensures that the store can retrieve an existing value.
func TestStoreGetValue(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
var eidx uint64 = 1
e, err := s.Get("/foo", false, false)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "get")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertEqual(t, *e.Node.Value, "bar")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "get", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.Equal(t, "bar", *e.Node.Value)
}
-// Ensure that the store can retrieve a directory in sorted order.
+// TestStoreGetSorted ensures that the store can retrieve a directory in sorted order.
func TestStoreGetSorted(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
s.Create("/foo/x", false, "0", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
@@ -67,8 +66,8 @@ func TestStoreGetSorted(t *testing.T) {
s.Create("/foo/y/b", false, "0", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
var eidx uint64 = 6
e, err := s.Get("/foo", true, true)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
var yNodes v2store.NodeExterns
sortedStrings := []string{"/foo/x", "/foo/y", "/foo/z"}
@@ -92,240 +91,233 @@ func TestStoreGetSorted(t *testing.T) {
}
func TestSet(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
// Set /foo=""
var eidx uint64 = 1
e, err := s.Set("/foo", false, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "set")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "")
- testutil.AssertNil(t, e.Node.Nodes)
- testutil.AssertNil(t, e.Node.Expiration)
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(1))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "set", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "", *e.Node.Value)
+ assert.Nil(t, e.Node.Nodes)
+ assert.Nil(t, e.Node.Expiration)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(1), e.Node.ModifiedIndex)
// Set /foo="bar"
eidx = 2
e, err = s.Set("/foo", false, "bar", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "set")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "bar")
- testutil.AssertNil(t, e.Node.Nodes)
- testutil.AssertNil(t, e.Node.Expiration)
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(2))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "set", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "bar", *e.Node.Value)
+ assert.Nil(t, e.Node.Nodes)
+ assert.Nil(t, e.Node.Expiration)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(2), e.Node.ModifiedIndex)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "")
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1))
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "", *e.PrevNode.Value)
+ assert.Equal(t, uint64(1), e.PrevNode.ModifiedIndex)
// Set /foo="baz" (for testing prevNode)
eidx = 3
e, err = s.Set("/foo", false, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "set")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "baz")
- testutil.AssertNil(t, e.Node.Nodes)
- testutil.AssertNil(t, e.Node.Expiration)
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(3))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "set", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "baz", *e.Node.Value)
+ assert.Nil(t, e.Node.Nodes)
+ assert.Nil(t, e.Node.Expiration)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(3), e.Node.ModifiedIndex)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(2))
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
+ assert.Equal(t, uint64(2), e.PrevNode.ModifiedIndex)
// Set /a/b/c/d="efg"
eidx = 4
e, err = s.Set("/a/b/c/d", false, "efg", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Node.Key, "/a/b/c/d")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "efg")
- testutil.AssertNil(t, e.Node.Nodes)
- testutil.AssertNil(t, e.Node.Expiration)
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(4))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "/a/b/c/d", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "efg", *e.Node.Value)
+ assert.Nil(t, e.Node.Nodes)
+ assert.Nil(t, e.Node.Expiration)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(4), e.Node.ModifiedIndex)
// Set /dir as a directory
eidx = 5
e, err = s.Set("/dir", true, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "set")
- testutil.AssertEqual(t, e.Node.Key, "/dir")
- testutil.AssertTrue(t, e.Node.Dir)
- testutil.AssertNil(t, e.Node.Value)
- testutil.AssertNil(t, e.Node.Nodes)
- testutil.AssertNil(t, e.Node.Expiration)
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(5))
-}
-
-// Ensure that the store can create a new key if it doesn't already exist.
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "set", e.Action)
+ assert.Equal(t, "/dir", e.Node.Key)
+ assert.True(t, e.Node.Dir)
+ assert.Nil(t, e.Node.Value)
+ assert.Nil(t, e.Node.Nodes)
+ assert.Nil(t, e.Node.Expiration)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(5), e.Node.ModifiedIndex)
+}
+
+// TestStoreCreateValue ensures that the store can create a new key if it doesn't already exist.
func TestStoreCreateValue(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
// Create /foo=bar
var eidx uint64 = 1
e, err := s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "bar")
- testutil.AssertNil(t, e.Node.Nodes)
- testutil.AssertNil(t, e.Node.Expiration)
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(1))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "bar", *e.Node.Value)
+ assert.Nil(t, e.Node.Nodes)
+ assert.Nil(t, e.Node.Expiration)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(1), e.Node.ModifiedIndex)
// Create /empty=""
eidx = 2
e, err = s.Create("/empty", false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/empty")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "")
- testutil.AssertNil(t, e.Node.Nodes)
- testutil.AssertNil(t, e.Node.Expiration)
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(2))
-
-}
-
-// Ensure that the store can create a new directory if it doesn't already exist.
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/empty", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "", *e.Node.Value)
+ assert.Nil(t, e.Node.Nodes)
+ assert.Nil(t, e.Node.Expiration)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(2), e.Node.ModifiedIndex)
+}
+
+// TestStoreCreateDirectory ensures that the store can create a new directory if it doesn't already exist.
func TestStoreCreateDirectory(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
e, err := s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertTrue(t, e.Node.Dir)
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.True(t, e.Node.Dir)
}
-// Ensure that the store fails to create a key if it already exists.
+// TestStoreCreateFailsIfExists ensure that the store fails to create a key if it already exists.
func TestStoreCreateFailsIfExists(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
// create /foo as dir
s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
// create /foo as dir again
e, _err := s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeNodeExist)
- testutil.AssertEqual(t, err.Message, "Key already exists")
- testutil.AssertEqual(t, err.Cause, "/foo")
- testutil.AssertEqual(t, err.Index, uint64(1))
- testutil.AssertNil(t, e)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeNodeExist, err.ErrorCode)
+ assert.Equal(t, "Key already exists", err.Message)
+ assert.Equal(t, "/foo", err.Cause)
+ assert.Equal(t, uint64(1), err.Index)
+ assert.Nil(t, e)
}
-// Ensure that the store can update a key if it already exists.
+// TestStoreUpdateValue ensures that the store can update a key if it already exists.
func TestStoreUpdateValue(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
// create /foo=bar
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
// update /foo="bzr"
var eidx uint64 = 2
e, err := s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "update")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "baz")
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(2))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "update", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "baz", *e.Node.Value)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(2), e.Node.ModifiedIndex)
// check prevNode
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
- testutil.AssertEqual(t, e.PrevNode.TTL, int64(0))
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1))
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
+ assert.Equal(t, int64(0), e.PrevNode.TTL)
+ assert.Equal(t, uint64(1), e.PrevNode.ModifiedIndex)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, *e.Node.Value, "baz")
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
+ assert.Equal(t, "baz", *e.Node.Value)
+ assert.Equal(t, eidx, e.EtcdIndex)
// update /foo=""
eidx = 3
e, err = s.Update("/foo", "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "update")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertFalse(t, e.Node.Dir)
- testutil.AssertEqual(t, *e.Node.Value, "")
- testutil.AssertEqual(t, e.Node.TTL, int64(0))
- testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(3))
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "update", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.False(t, e.Node.Dir)
+ assert.Equal(t, "", *e.Node.Value)
+ assert.Equal(t, int64(0), e.Node.TTL)
+ assert.Equal(t, uint64(3), e.Node.ModifiedIndex)
// check prevNode
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "baz")
- testutil.AssertEqual(t, e.PrevNode.TTL, int64(0))
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(2))
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "baz", *e.PrevNode.Value)
+ assert.Equal(t, int64(0), e.PrevNode.TTL)
+ assert.Equal(t, uint64(2), e.PrevNode.ModifiedIndex)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, *e.Node.Value, "")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "", *e.Node.Value)
}
-// Ensure that the store cannot update a directory.
+// TestStoreUpdateFailsIfDirectory ensures that the store cannot update a directory.
func TestStoreUpdateFailsIfDirectory(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, _err := s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeNotFile)
- testutil.AssertEqual(t, err.Message, "Not a file")
- testutil.AssertEqual(t, err.Cause, "/foo")
- testutil.AssertNil(t, e)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeNotFile, err.ErrorCode)
+ assert.Equal(t, "Not a file", err.Message)
+ assert.Equal(t, "/foo", err.Cause)
+ assert.Nil(t, e)
}
-// Ensure that the store can delete a value.
+// TestStoreDeleteValue ensures that the store can delete a value.
func TestStoreDeleteValue(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 2
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, err := s.Delete("/foo", false, false)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "delete")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "delete", e.Action)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
}
-// Ensure that the store can delete a directory if recursive is specified.
+// TestStoreDeleteDirectory ensures that the store can delete a directory if recursive is specified.
func TestStoreDeleteDirectory(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
// create directory /foo
var eidx uint64 = 2
@@ -333,506 +325,496 @@ func TestStoreDeleteDirectory(t *testing.T) {
// delete /foo with dir = true and recursive = false
// this should succeed, since the directory is empty
e, err := s.Delete("/foo", true, false)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "delete")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "delete", e.Action)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, e.PrevNode.Dir, true)
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.True(t, e.PrevNode.Dir)
// create directory /foo and directory /foo/bar
_, err = s.Create("/foo/bar", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
// delete /foo with dir = true and recursive = false
// this should fail, since the directory is not empty
_, err = s.Delete("/foo", true, false)
- testutil.AssertNotNil(t, err)
+ require.Error(t, err)
// delete /foo with dir=false and recursive = true
// this should succeed, since recursive implies dir=true
// and recursively delete should be able to delete all
// items under the given directory
e, err = s.Delete("/foo", false, true)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.Action, "delete")
-
+ require.NoError(t, err)
+ assert.Equal(t, "delete", e.Action)
}
-// Ensure that the store cannot delete a directory if both of recursive
-// and dir are not specified.
+// TestStoreDeleteDirectoryFailsIfNonRecursiveAndDir ensures that the
+// store cannot delete a directory if both of recursive and dir are not specified.
func TestStoreDeleteDirectoryFailsIfNonRecursiveAndDir(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, _err := s.Delete("/foo", false, false)
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeNotFile)
- testutil.AssertEqual(t, err.Message, "Not a file")
- testutil.AssertNil(t, e)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeNotFile, err.ErrorCode)
+ assert.Equal(t, "Not a file", err.Message)
+ assert.Nil(t, e)
}
func TestRootRdOnly(t *testing.T) {
- s := newTestStore(t, "/0")
- defer s.Close()
+ s := v2store.New("/0")
for _, tt := range []string{"/", "/0"} {
_, err := s.Set(tt, true, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNotNil(t, err)
+ require.Error(t, err)
_, err = s.Delete(tt, true, true)
- testutil.AssertNotNil(t, err)
+ require.Error(t, err)
_, err = s.Create(tt, true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNotNil(t, err)
+ require.Error(t, err)
_, err = s.Update(tt, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNotNil(t, err)
+ require.Error(t, err)
_, err = s.CompareAndSwap(tt, "", 0, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNotNil(t, err)
+ require.Error(t, err)
}
}
func TestStoreCompareAndDeletePrevValue(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 2
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, err := s.CompareAndDelete("/foo", "bar", 0)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "compareAndDelete")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "compareAndDelete", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1))
- testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1))
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
+ assert.Equal(t, uint64(1), e.PrevNode.ModifiedIndex)
+ assert.Equal(t, uint64(1), e.PrevNode.CreatedIndex)
}
func TestStoreCompareAndDeletePrevValueFailsIfNotMatch(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, _err := s.CompareAndDelete("/foo", "baz", 0)
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeTestFailed)
- testutil.AssertEqual(t, err.Message, "Compare failed")
- testutil.AssertNil(t, e)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeTestFailed, err.ErrorCode)
+ assert.Equal(t, "Compare failed", err.Message)
+ assert.Nil(t, e)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, *e.Node.Value, "bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "bar", *e.Node.Value)
}
func TestStoreCompareAndDeletePrevIndex(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 2
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, err := s.CompareAndDelete("/foo", "", 1)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "compareAndDelete")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "compareAndDelete", e.Action)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1))
- testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1))
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
+ assert.Equal(t, uint64(1), e.PrevNode.ModifiedIndex)
+ assert.Equal(t, uint64(1), e.PrevNode.CreatedIndex)
}
func TestStoreCompareAndDeletePrevIndexFailsIfNotMatch(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, _err := s.CompareAndDelete("/foo", "", 100)
- testutil.AssertNotNil(t, _err)
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeTestFailed)
- testutil.AssertEqual(t, err.Message, "Compare failed")
- testutil.AssertNil(t, e)
+ require.Error(t, _err)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeTestFailed, err.ErrorCode)
+ assert.Equal(t, "Compare failed", err.Message)
+ assert.Nil(t, e)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, *e.Node.Value, "bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "bar", *e.Node.Value)
}
-// Ensure that the store cannot delete a directory.
+// TestStoreCompareAndDeleteDirectoryFail ensures that the store cannot delete a directory.
func TestStoreCompareAndDeleteDirectoryFail(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
_, _err := s.CompareAndDelete("/foo", "", 0)
- testutil.AssertNotNil(t, _err)
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeNotFile)
+ require.Error(t, _err)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeNotFile, err.ErrorCode)
}
-// Ensure that the store can conditionally update a key if it has a previous value.
+// TestStoreCompareAndSwapPrevValue ensures that the store can conditionally
+// update a key if it has a previous value.
func TestStoreCompareAndSwapPrevValue(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 2
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, err := s.CompareAndSwap("/foo", "bar", 0, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "compareAndSwap")
- testutil.AssertEqual(t, *e.Node.Value, "baz")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "compareAndSwap", e.Action)
+ assert.Equal(t, "baz", *e.Node.Value)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1))
- testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1))
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
+ assert.Equal(t, uint64(1), e.PrevNode.ModifiedIndex)
+ assert.Equal(t, uint64(1), e.PrevNode.CreatedIndex)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, *e.Node.Value, "baz")
+ assert.Equal(t, "baz", *e.Node.Value)
}
-// Ensure that the store cannot conditionally update a key if it has the wrong previous value.
+// TestStoreCompareAndSwapPrevValueFailsIfNotMatch ensure that the store cannot
+// conditionally update a key if it has the wrong previous value.
func TestStoreCompareAndSwapPrevValueFailsIfNotMatch(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, _err := s.CompareAndSwap("/foo", "wrong_value", 0, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeTestFailed)
- testutil.AssertEqual(t, err.Message, "Compare failed")
- testutil.AssertNil(t, e)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeTestFailed, err.ErrorCode)
+ assert.Equal(t, "Compare failed", err.Message)
+ assert.Nil(t, e)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, *e.Node.Value, "bar")
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
+ assert.Equal(t, "bar", *e.Node.Value)
+ assert.Equal(t, eidx, e.EtcdIndex)
}
-// Ensure that the store can conditionally update a key if it has a previous index.
+// TestStoreCompareAndSwapPrevIndex ensures that the store can conditionally
+// update a key if it has a previous index.
func TestStoreCompareAndSwapPrevIndex(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 2
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, err := s.CompareAndSwap("/foo", "", 1, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "compareAndSwap")
- testutil.AssertEqual(t, *e.Node.Value, "baz")
+ require.NoError(t, err)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "compareAndSwap", e.Action)
+ assert.Equal(t, "baz", *e.Node.Value)
// check prevNode
- testutil.AssertNotNil(t, e.PrevNode)
- testutil.AssertEqual(t, e.PrevNode.Key, "/foo")
- testutil.AssertEqual(t, *e.PrevNode.Value, "bar")
- testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1))
- testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1))
+ require.NotNil(t, e.PrevNode)
+ assert.Equal(t, "/foo", e.PrevNode.Key)
+ assert.Equal(t, "bar", *e.PrevNode.Value)
+ assert.Equal(t, uint64(1), e.PrevNode.ModifiedIndex)
+ assert.Equal(t, uint64(1), e.PrevNode.CreatedIndex)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, *e.Node.Value, "baz")
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
+ assert.Equal(t, "baz", *e.Node.Value)
+ assert.Equal(t, eidx, e.EtcdIndex)
}
-// Ensure that the store cannot conditionally update a key if it has the wrong previous index.
+// TestStoreCompareAndSwapPrevIndexFailsIfNotMatch ensures that the store cannot
+// conditionally update a key if it has the wrong previous index.
func TestStoreCompareAndSwapPrevIndexFailsIfNotMatch(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e, _err := s.CompareAndSwap("/foo", "", 100, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
- err := _err.(*v2error.Error)
- testutil.AssertEqual(t, err.ErrorCode, v2error.EcodeTestFailed)
- testutil.AssertEqual(t, err.Message, "Compare failed")
- testutil.AssertNil(t, e)
+ var err *v2error.Error
+ require.ErrorAs(t, _err, &err)
+ assert.Equal(t, v2error.EcodeTestFailed, err.ErrorCode)
+ assert.Equal(t, "Compare failed", err.Message)
+ assert.Nil(t, e)
e, _ = s.Get("/foo", false, false)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, *e.Node.Value, "bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "bar", *e.Node.Value)
}
-// Ensure that the store can watch for key creation.
+// TestStoreWatchCreate ensures that the store can watch for key creation.
func TestStoreWatchCreate(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
- var eidx uint64 = 0
+ s := v2store.New()
+ var eidx uint64
w, _ := s.Watch("/foo", false, false, 0)
c := w.EventChan()
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
eidx = 1
e := timeoutSelect(t, c)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
select {
case e = <-w.EventChan():
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
case <-time.After(100 * time.Millisecond):
}
}
-// Ensure that the store can watch for recursive key creation.
+// TestStoreWatchRecursiveCreate ensures that the store
+// can watch for recursive key creation.
func TestStoreWatchRecursiveCreate(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
- var eidx uint64 = 0
+ s := v2store.New()
+ var eidx uint64
w, err := s.Watch("/foo", true, false, 0)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ require.NoError(t, err)
+ assert.Equal(t, eidx, w.StartIndex())
eidx = 1
s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/foo/bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/foo/bar", e.Node.Key)
}
-// Ensure that the store can watch for key updates.
+// TestStoreWatchUpdate ensures that the store can watch for key updates.
func TestStoreWatchUpdate(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/foo", false, false, 0)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
eidx = 2
s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "update")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "update", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
}
-// Ensure that the store can watch for recursive key updates.
+// TestStoreWatchRecursiveUpdate ensures that the store can watch for recursive key updates.
func TestStoreWatchRecursiveUpdate(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, err := s.Watch("/foo", true, false, 0)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ require.NoError(t, err)
+ assert.Equal(t, eidx, w.StartIndex())
eidx = 2
s.Update("/foo/bar", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "update")
- testutil.AssertEqual(t, e.Node.Key, "/foo/bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "update", e.Action)
+ assert.Equal(t, "/foo/bar", e.Node.Key)
}
-// Ensure that the store can watch for key deletions.
+// TestStoreWatchDelete ensures that the store can watch for key deletions.
func TestStoreWatchDelete(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/foo", false, false, 0)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
eidx = 2
s.Delete("/foo", false, false)
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "delete")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "delete", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
}
-// Ensure that the store can watch for recursive key deletions.
+// TestStoreWatchRecursiveDelete ensures that the store can watch for recursive key deletions.
func TestStoreWatchRecursiveDelete(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, err := s.Watch("/foo", true, false, 0)
- testutil.AssertNil(t, err)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ require.NoError(t, err)
+ assert.Equal(t, eidx, w.StartIndex())
eidx = 2
s.Delete("/foo/bar", false, false)
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "delete")
- testutil.AssertEqual(t, e.Node.Key, "/foo/bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "delete", e.Action)
+ assert.Equal(t, "/foo/bar", e.Node.Key)
}
-// Ensure that the store can watch for CAS updates.
+// TestStoreWatchCompareAndSwap ensures that the store can watch for CAS updates.
func TestStoreWatchCompareAndSwap(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/foo", false, false, 0)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
eidx = 2
s.CompareAndSwap("/foo", "bar", 0, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "compareAndSwap")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "compareAndSwap", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
}
-// Ensure that the store can watch for recursive CAS updates.
+// TestStoreWatchRecursiveCompareAndSwap ensures that the
+// store can watch for recursive CAS updates.
func TestStoreWatchRecursiveCompareAndSwap(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/foo", true, false, 0)
- testutil.AssertEqual(t, w.StartIndex(), eidx)
+ assert.Equal(t, eidx, w.StartIndex())
eidx = 2
s.CompareAndSwap("/foo/bar", "baz", 0, "bat", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "compareAndSwap")
- testutil.AssertEqual(t, e.Node.Key, "/foo/bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "compareAndSwap", e.Action)
+ assert.Equal(t, "/foo/bar", e.Node.Key)
}
-// Ensure that the store can watch in streaming mode.
+// TestStoreWatchStream ensures that the store can watch in streaming mode.
func TestStoreWatchStream(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
w, _ := s.Watch("/foo", false, true, 0)
// first modification
s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertEqual(t, *e.Node.Value, "bar")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.Equal(t, "bar", *e.Node.Value)
select {
case e = <-w.EventChan():
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
case <-time.After(100 * time.Millisecond):
}
// second modification
eidx = 2
s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e = timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "update")
- testutil.AssertEqual(t, e.Node.Key, "/foo")
- testutil.AssertEqual(t, *e.Node.Value, "baz")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "update", e.Action)
+ assert.Equal(t, "/foo", e.Node.Key)
+ assert.Equal(t, "baz", *e.Node.Value)
select {
case e = <-w.EventChan():
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
case <-time.After(100 * time.Millisecond):
}
}
-// Ensure that the store can watch for hidden keys as long as it's an exact path match.
+// TestStoreWatchCreateWithHiddenKey ensure that the store can
+// watch for hidden keys as long as it's an exact path match.
func TestStoreWatchCreateWithHiddenKey(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
w, _ := s.Watch("/_foo", false, false, 0)
s.Create("/_foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/_foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/_foo", e.Node.Key)
select {
case e = <-w.EventChan():
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
case <-time.After(100 * time.Millisecond):
}
}
-// Ensure that the store doesn't see hidden key creates without an exact path match in recursive mode.
+// TestStoreWatchRecursiveCreateWithHiddenKey ensures that the store doesn't
+// see hidden key creates without an exact path match in recursive mode.
func TestStoreWatchRecursiveCreateWithHiddenKey(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
w, _ := s.Watch("/foo", true, false, 0)
s.Create("/foo/_bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := nbselect(w.EventChan())
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
w, _ = s.Watch("/foo", true, false, 0)
s.Create("/foo/_baz", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
select {
case e = <-w.EventChan():
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
case <-time.After(100 * time.Millisecond):
}
s.Create("/foo/_baz/quux", false, "quux", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
select {
case e = <-w.EventChan():
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
case <-time.After(100 * time.Millisecond):
}
}
-// Ensure that the store doesn't see hidden key updates.
+// TestStoreWatchUpdateWithHiddenKey ensures that the store
+// doesn't see hidden key updates.
func TestStoreWatchUpdateWithHiddenKey(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/_foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/_foo", false, false, 0)
s.Update("/_foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.Action, "update")
- testutil.AssertEqual(t, e.Node.Key, "/_foo")
+ assert.Equal(t, "update", e.Action)
+ assert.Equal(t, "/_foo", e.Node.Key)
e = nbselect(w.EventChan())
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
}
-// Ensure that the store doesn't see hidden key updates without an exact path match in recursive mode.
+// TestStoreWatchRecursiveUpdateWithHiddenKey ensures that the store doesn't
+// see hidden key updates without an exact path match in recursive mode.
func TestStoreWatchRecursiveUpdateWithHiddenKey(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/foo/_bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/foo", true, false, 0)
s.Update("/foo/_bar", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := nbselect(w.EventChan())
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
}
-// Ensure that the store can watch for key deletions.
+// TestStoreWatchDeleteWithHiddenKey ensures that the store can watch for key deletions.
func TestStoreWatchDeleteWithHiddenKey(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 2
s.Create("/_foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/_foo", false, false, 0)
s.Delete("/_foo", false, false)
e := timeoutSelect(t, w.EventChan())
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "delete")
- testutil.AssertEqual(t, e.Node.Key, "/_foo")
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "delete", e.Action)
+ assert.Equal(t, "/_foo", e.Node.Key)
e = nbselect(w.EventChan())
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
}
-// Ensure that the store doesn't see hidden key deletes without an exact path match in recursive mode.
+// TestStoreWatchRecursiveDeleteWithHiddenKey ensures that the store doesn't see
+// hidden key deletes without an exact path match in recursive mode.
func TestStoreWatchRecursiveDeleteWithHiddenKey(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Create("/foo/_bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
w, _ := s.Watch("/foo", true, false, 0)
s.Delete("/foo/_bar", false, false)
e := nbselect(w.EventChan())
- testutil.AssertNil(t, e)
+ assert.Nil(t, e)
}
-// Ensure that the store does see hidden key creates if watching deeper than a hidden key in recursive mode.
+// TestStoreWatchRecursiveCreateDeeperThanHiddenKey ensures that the store does see
+// hidden key creates if watching deeper than a hidden key in recursive mode.
func TestStoreWatchRecursiveCreateDeeperThanHiddenKey(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
var eidx uint64 = 1
w, _ := s.Watch("/_foo/bar", true, false, 0)
s.Create("/_foo/bar/baz", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
e := timeoutSelect(t, w.EventChan())
- testutil.AssertNotNil(t, e)
- testutil.AssertEqual(t, e.EtcdIndex, eidx)
- testutil.AssertEqual(t, e.Action, "create")
- testutil.AssertEqual(t, e.Node.Key, "/_foo/bar/baz")
+ require.NotNil(t, e)
+ assert.Equal(t, eidx, e.EtcdIndex)
+ assert.Equal(t, "create", e.Action)
+ assert.Equal(t, "/_foo/bar/baz", e.Node.Key)
}
-// Ensure that slow consumers are handled properly.
+// TestStoreWatchSlowConsumer ensures that slow consumers are handled properly.
//
// Since Watcher.EventChan() has a buffer of size 100 we can only queue 100
// event per watcher. If the consumer cannot consume the event on time and
@@ -840,17 +822,16 @@ func TestStoreWatchRecursiveCreateDeeperThanHiddenKey(t *testing.T) {
// This test ensures that after closing the channel, the store can continue
// to operate correctly.
func TestStoreWatchSlowConsumer(t *testing.T) {
- s := newTestStore(t)
- defer s.Close()
+ s := v2store.New()
s.Watch("/foo", true, true, 0) // stream must be true
// Fill watch channel with 100 events
for i := 1; i <= 100; i++ {
s.Set("/foo", false, fmt.Sprint(i), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
}
- // testutil.AssertEqual(t, s.WatcherHub.count, int64(1))
+ // assert.Equal(t, s.WatcherHub.count, int64(1))
s.Set("/foo", false, "101", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
// remove watcher
- // testutil.AssertEqual(t, s.WatcherHub.count, int64(0))
+ // assert.Equal(t, s.WatcherHub.count, int64(0))
s.Set("/foo", false, "102", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // must not panic
}
diff --git a/tests/integration/v2store/store_v2v3_test.go b/tests/integration/v2store/store_v2v3_test.go
deleted file mode 100644
index 3a6eab14a10..00000000000
--- a/tests/integration/v2store/store_v2v3_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2store_test
-
-import (
- "strings"
- "testing"
-
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3"
- "go.etcd.io/etcd/tests/v3/integration"
-)
-
-// TODO: fix tests
-
-func runWithCluster(t testing.TB, runner func(testing.TB, []string)) {
- integration.BeforeTest(t)
- cfg := integration.ClusterConfig{Size: 1}
- clus := integration.NewClusterV3(t, &cfg)
- defer clus.Terminate(t)
- endpoints := []string{clus.Client(0).Endpoints()[0]}
- runner(t, endpoints)
-}
-
-func TestCreateKV(t *testing.T) { runWithCluster(t, testCreateKV) }
-
-func testCreateKV(t testing.TB, endpoints []string) {
- integration.BeforeTest(t)
- testCases := []struct {
- key string
- value string
- nodes int
- unique bool
- wantErr bool
- wantKeyMatch bool
- }{
- {key: "/cdir/create", value: "1", nodes: 1, wantKeyMatch: true},
- {key: "/cdir/create", value: "4", wantErr: true},
- // TODO: unique doesn't create nodes, skip these tests for now
- //{key: "hello", value: "2", unique: true, wantKeyMatch: false},
- //{key: "hello", value: "3", unique: true, wantKeyMatch: false},
- }
-
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints})
- if err != nil {
- t.Fatal(err)
- }
- defer cli.Close()
- v2 := v2v3.NewStore(cli, "")
-
- for ti, tc := range testCases {
- ev, err := v2.Create(tc.key, false, tc.value, tc.unique, v2store.TTLOptionSet{})
- if tc.wantErr && err != nil {
- continue
- }
- if err != nil {
- t.Skipf("%d: got err %v", ti, err)
- }
-
- if tc.wantKeyMatch && tc.key != ev.Node.Key {
- t.Skipf("%d: %v != %v", ti, tc.key, ev.Node.Key)
- }
- if !tc.wantKeyMatch && !strings.HasPrefix(ev.Node.Key, tc.key) {
- t.Skipf("%d: %v is not prefix of %v", ti, tc.key, ev.Node.Key)
- }
-
- evg, err := v2.Get(tc.key, false, false)
- if err != nil {
- t.Fatal(err)
- }
-
- if evg.Node.CreatedIndex != ev.Node.CreatedIndex {
- t.Skipf("%d: %v != %v", ti, evg.Node.CreatedIndex, ev.Node.CreatedIndex)
- }
-
- t.Logf("%d: %v %s %v\n", ti, ev.Node.Key, *ev.Node.Value, ev.Node.CreatedIndex)
- }
-}
-
-func TestSetKV(t *testing.T) { runWithCluster(t, testSetKV) }
-
-func testSetKV(t testing.TB, endpoints []string) {
- testCases := []struct {
- key string
- value string
- wantIndexMatch bool
- }{
- {key: "/sdir/set", value: "1", wantIndexMatch: true},
- {key: "/sdir/set", value: "4", wantIndexMatch: false},
- }
-
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints})
- if err != nil {
- t.Fatal(err)
- }
- defer cli.Close()
- v2 := v2v3.NewStore(cli, "")
-
- for ti, tc := range testCases {
- ev, err := v2.Set(tc.key, false, tc.value, v2store.TTLOptionSet{})
- if err != nil {
- t.Skipf("%d: got err %v", ti, err)
- }
-
- if tc.value != *ev.Node.Value {
- t.Skipf("%d: %v != %v", ti, tc.value, *ev.Node.Value)
- }
-
- if tc.wantIndexMatch && ev.Node.CreatedIndex != ev.Node.ModifiedIndex {
- t.Skipf("%d: index %v != %v", ti, ev.Node.CreatedIndex, ev.Node.ModifiedIndex)
- }
-
- t.Logf("%d: %v %s %v\n", ti, ev.Node.Key, *ev.Node.Value, ev.Node.CreatedIndex)
- }
-}
-
-func TestCreateSetDir(t *testing.T) { runWithCluster(t, testCreateSetDir) }
-
-func testCreateSetDir(t testing.TB, endpoints []string) {
- integration.BeforeTest(t)
- testCases := []struct {
- dir string
- }{
- {dir: "/ddir/1/2/3"},
- {dir: "/ddir/1/2/3"},
- }
-
- cli, err := integration.NewClient(t, clientv3.Config{Endpoints: endpoints})
- if err != nil {
- t.Fatal(err)
- }
- defer cli.Close()
- v2 := v2v3.NewStore(cli, "")
-
- for ti, tc := range testCases {
- _, err := v2.Create(tc.dir, true, "", false, v2store.TTLOptionSet{})
- if err != nil {
- t.Skipf("%d: got err %v", ti, err)
- }
- _, err = v2.Create(tc.dir, true, "", false, v2store.TTLOptionSet{})
- if err == nil {
- t.Skipf("%d: expected err got nil", ti)
- }
-
- ev, err := v2.Delete("ddir", true, true)
- if err != nil {
- t.Skipf("%d: got err %v", ti, err)
- }
-
- t.Logf("%d: %v %s %v\n", ti, ev.EtcdIndex, ev.PrevNode.Key, ev.PrevNode.CreatedIndex)
- }
-}
diff --git a/tests/integration/v3_alarm_test.go b/tests/integration/v3_alarm_test.go
index 0151dc27fcb..50a701e6809 100644
--- a/tests/integration/v3_alarm_test.go
+++ b/tests/integration/v3_alarm_test.go
@@ -22,29 +22,33 @@ import (
"testing"
"time"
+ "go.uber.org/zap/zaptest"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/v3/traceutil"
- "go.etcd.io/etcd/server/v3/mvcc"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
- "go.uber.org/zap/zaptest"
+ "go.etcd.io/etcd/server/v3/lease/leasepb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestV3StorageQuotaApply tests the V3 server respects quotas during apply
func TestV3StorageQuotaApply(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
quotasize := int64(16 * os.Getpagesize())
- clus := NewClusterV3(t, &ClusterConfig{Size: 2})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
- kvc0 := toGRPC(clus.Client(0)).KV
- kvc1 := toGRPC(clus.Client(1)).KV
+ kvc1 := integration.ToGRPC(clus.Client(1)).KV
// Set a quota on one node
clus.Members[0].QuotaBackendBytes = quotasize
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
+ kvc0 := integration.ToGRPC(clus.Client(0)).KV
waitForRestart(t, kvc0)
key := []byte("abc")
@@ -73,7 +77,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
stopc := time.After(5 * time.Second)
for {
req := &pb.AlarmRequest{Action: pb.AlarmRequest_GET}
- resp, aerr := clus.Members[0].s.Alarm(context.TODO(), req)
+ resp, aerr := clus.Members[0].Server.Alarm(context.TODO(), req)
if aerr != nil {
t.Fatal(aerr)
}
@@ -87,7 +91,31 @@ func TestV3StorageQuotaApply(t *testing.T) {
}
}
- ctx, cancel := context.WithTimeout(context.TODO(), RequestWaitTimeout)
+ // txn with non-mutating Ops should go through when NOSPACE alarm is raised
+ _, err = kvc0.Txn(context.TODO(), &pb.TxnRequest{
+ Compare: []*pb.Compare{
+ {
+ Key: key,
+ Result: pb.Compare_EQUAL,
+ Target: pb.Compare_CREATE,
+ TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 0},
+ },
+ },
+ Success: []*pb.RequestOp{
+ {
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: key,
+ },
+ },
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout)
defer cancel()
// small quota machine should reject put
@@ -103,7 +131,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
// reset large quota node to ensure alarm persisted
clus.Members[1].Stop(t)
clus.Members[1].Restart(t)
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
if _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {
t.Fatalf("alarmed instance should reject put after reset")
@@ -112,12 +140,12 @@ func TestV3StorageQuotaApply(t *testing.T) {
// TestV3AlarmDeactivate ensures that space alarms can be deactivated so puts go through.
func TestV3AlarmDeactivate(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
- mt := toGRPC(clus.RandClient()).Maintenance
+ kvc := integration.ToGRPC(clus.RandClient()).KV
+ mt := integration.ToGRPC(clus.RandClient()).Maintenance
alarmReq := &pb.AlarmRequest{
MemberID: 123,
@@ -146,8 +174,9 @@ func TestV3AlarmDeactivate(t *testing.T) {
}
func TestV3CorruptAlarm(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ lg := zaptest.NewLogger(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
var wg sync.WaitGroup
@@ -165,8 +194,8 @@ func TestV3CorruptAlarm(t *testing.T) {
// Corrupt member 0 by modifying backend offline.
clus.Members[0].Stop(t)
fp := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
- be := backend.NewDefaultBackend(fp)
- s := mvcc.NewStore(zaptest.NewLogger(t), be, nil, mvcc.StoreConfig{})
+ be := backend.NewDefaultBackend(lg, fp)
+ s := mvcc.NewStore(lg, be, nil, mvcc.StoreConfig{})
// NOTE: cluster_proxy mode with namespacing won't set 'k', but namespace/'k'.
s.Put([]byte("abc"), []byte("def"), 0)
s.Put([]byte("xyz"), []byte("123"), 0)
@@ -228,3 +257,100 @@ func TestV3CorruptAlarm(t *testing.T) {
}
t.Fatalf("expected error %v after %s", rpctypes.ErrCorrupt, 5*time.Second)
}
+
+func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) {
+ integration.BeforeTest(t)
+ lg := zaptest.NewLogger(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{
+ CorruptCheckTime: time.Second,
+ Size: 3,
+ SnapshotCount: 10,
+ SnapshotCatchUpEntries: 5,
+ DisableStrictReconfigCheck: true,
+ })
+ defer clus.Terminate(t)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{ID: 1, TTL: 60})
+ if err != nil {
+ t.Errorf("could not create lease 1 (%v)", err)
+ }
+ if lresp.ID != 1 {
+ t.Errorf("got id %v, wanted id %v", lresp.ID, 1)
+ }
+
+ putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}
+ // Trigger snapshot from the leader to new member
+ for i := 0; i < 15; i++ {
+ _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr)
+ if err != nil {
+ t.Errorf("#%d: couldn't put key (%v)", i, err)
+ }
+ }
+
+ if err = clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())); err != nil {
+ t.Fatal(err)
+ }
+ clus.WaitMembersForLeader(t, clus.Members)
+
+ clus.AddMember(t)
+ clus.WaitMembersForLeader(t, clus.Members)
+ // Wait for new member to catch up
+ integration.WaitClientV3(t, clus.Members[2].Client)
+
+ // Corrupt member 2 by modifying backend lease bucket offline.
+ clus.Members[2].Stop(t)
+ fp := filepath.Join(clus.Members[2].DataDir, "member", "snap", "db")
+ bcfg := backend.DefaultBackendConfig(lg)
+ bcfg.Path = fp
+ be := backend.New(bcfg)
+
+ olpb := leasepb.Lease{ID: int64(1), TTL: 60}
+ tx := be.BatchTx()
+ schema.UnsafeDeleteLease(tx, &olpb)
+ lpb := leasepb.Lease{ID: int64(2), TTL: 60}
+ schema.MustUnsafePutLease(tx, &lpb)
+ tx.Commit()
+
+ if err = be.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = clus.Members[2].Restart(t); err != nil {
+ t.Fatal(err)
+ }
+
+ clus.Members[1].WaitOK(t)
+ clus.Members[2].WaitOK(t)
+
+ // Revoke lease should remove key except the member with corruption
+ _, err = integration.ToGRPC(clus.Members[0].Client).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp.ID})
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp0, err0 := clus.Members[1].Client.KV.Get(context.TODO(), "foo")
+ if err0 != nil {
+ t.Fatal(err0)
+ }
+ resp1, err1 := clus.Members[2].Client.KV.Get(context.TODO(), "foo")
+ if err1 != nil {
+ t.Fatal(err1)
+ }
+
+ if resp0.Header.Revision == resp1.Header.Revision {
+ t.Fatalf("matching Revision values")
+ }
+
+ // Wait for CorruptCheckTime
+ time.Sleep(time.Second)
+ presp, perr := clus.Client(0).Put(context.TODO(), "abc", "aaa")
+ if perr != nil {
+ if !eqErrGRPC(perr, rpctypes.ErrCorrupt) {
+ t.Fatalf("expected %v, got %+v (%v)", rpctypes.ErrCorrupt, presp, perr)
+ } else {
+ return
+ }
+ }
+}
diff --git a/tests/integration/v3_auth_test.go b/tests/integration/v3_auth_test.go
index 286f2dbe67f..d62ea7a03f1 100644
--- a/tests/integration/v3_auth_test.go
+++ b/tests/integration/v3_auth_test.go
@@ -21,23 +21,26 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"go.etcd.io/etcd/api/v3/authpb"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestV3AuthEmptyUserGet ensures that a get with an empty user will return an empty user error.
func TestV3AuthEmptyUserGet(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer cancel()
- api := toGRPC(clus.Client(0))
+ api := integration.ToGRPC(clus.Client(0))
authSetupRoot(t, api.Auth)
_, err := api.KV.Range(ctx, &pb.RangeRequest{Key: []byte("abc")})
@@ -46,16 +49,43 @@ func TestV3AuthEmptyUserGet(t *testing.T) {
}
}
+// TestV3AuthEmptyUserPut ensures that a put with an empty user will return an empty user error,
+// and the consistent_index should be moved forward even the apply-->Put fails.
+func TestV3AuthEmptyUserPut(t *testing.T) {
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{
+ Size: 1,
+ SnapshotCount: 3,
+ })
+ defer clus.Terminate(t)
+
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+
+ api := integration.ToGRPC(clus.Client(0))
+ authSetupRoot(t, api.Auth)
+
+ // The SnapshotCount is 3, so there must be at least 3 new snapshot files being created.
+ // The VERIFY logic will check whether the consistent_index >= last snapshot index on
+ // cluster terminating.
+ for i := 0; i < 10; i++ {
+ _, err := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
+ if !eqErrGRPC(err, rpctypes.ErrUserEmpty) {
+ t.Fatalf("got %v, expected %v", err, rpctypes.ErrUserEmpty)
+ }
+ }
+}
+
// TestV3AuthTokenWithDisable tests that auth won't crash if
// given a valid token when authentication is disabled
func TestV3AuthTokenWithDisable(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
+ authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
- c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
+ c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
if cerr != nil {
t.Fatal(cerr)
}
@@ -81,11 +111,11 @@ func TestV3AuthTokenWithDisable(t *testing.T) {
}
func TestV3AuthRevision(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- api := toGRPC(clus.Client(0))
+ api := integration.ToGRPC(clus.Client(0))
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
presp, perr := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
@@ -109,25 +139,25 @@ func TestV3AuthRevision(t *testing.T) {
// TestV3AuthWithLeaseRevokeWithRoot ensures that granted leases
// with root user be revoked after TTL.
func TestV3AuthWithLeaseRevokeWithRoot(t *testing.T) {
- testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1})
+ testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1})
}
// TestV3AuthWithLeaseRevokeWithRootJWT creates a lease with a JWT-token enabled cluster.
// And tests if server is able to revoke expiry lease item.
func TestV3AuthWithLeaseRevokeWithRootJWT(t *testing.T) {
- testV3AuthWithLeaseRevokeWithRoot(t, ClusterConfig{Size: 1, AuthToken: defaultTokenJWT})
+ testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1, AuthToken: integration.DefaultTokenJWT})
}
-func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg ClusterConfig) {
- BeforeTest(t)
+func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterConfig) {
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ccfg)
+ clus := integration.NewCluster(t, &ccfg)
defer clus.Terminate(t)
- api := toGRPC(clus.Client(0))
+ api := integration.ToGRPC(clus.Client(0))
authSetupRoot(t, api.Auth)
- rootc, cerr := NewClient(t, clientv3.Config{
+ rootc, cerr := integration.NewClient(t, clientv3.Config{
Endpoints: clus.Client(0).Endpoints(),
Username: "root",
Password: "123",
@@ -150,12 +180,10 @@ func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg ClusterConfig) {
// wait for lease expire
time.Sleep(3 * time.Second)
- tresp, terr := api.Lease.LeaseTimeToLive(
+ tresp, terr := rootc.TimeToLive(
context.TODO(),
- &pb.LeaseTimeToLiveRequest{
- ID: int64(leaseID),
- Keys: true,
- },
+ leaseID,
+ clientv3.WithAttachedKeys(),
)
if terr != nil {
t.Error(terr)
@@ -177,8 +205,8 @@ type user struct {
}
func TestV3AuthWithLeaseRevoke(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
users := []user{
@@ -190,11 +218,11 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) {
end: "k2",
},
}
- authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users)
+ authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users)
- authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
+ authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
- rootc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
+ rootc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"})
if cerr != nil {
t.Fatal(cerr)
}
@@ -211,7 +239,7 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) {
t.Fatal(err)
}
- userc, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
+ userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
if cerr != nil {
t.Fatal(cerr)
}
@@ -223,8 +251,8 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) {
}
func TestV3AuthWithLeaseAttach(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
users := []user{
@@ -243,17 +271,17 @@ func TestV3AuthWithLeaseAttach(t *testing.T) {
end: "k4",
},
}
- authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users)
+ authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users)
- authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
+ authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
- user1c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
+ user1c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
if cerr != nil {
t.Fatal(cerr)
}
defer user1c.Close()
- user2c, cerr := NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"})
+ user2c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"})
if cerr != nil {
t.Fatal(cerr)
}
@@ -294,15 +322,12 @@ func TestV3AuthWithLeaseAttach(t *testing.T) {
func authSetupUsers(t *testing.T, auth pb.AuthClient, users []user) {
for _, user := range users {
- if _, err := auth.UserAdd(context.TODO(), &pb.AuthUserAddRequest{Name: user.name, Password: user.password, Options: &authpb.UserAddOptions{NoPassword: false}}); err != nil {
- t.Fatal(err)
- }
- if _, err := auth.RoleAdd(context.TODO(), &pb.AuthRoleAddRequest{Name: user.role}); err != nil {
- t.Fatal(err)
- }
- if _, err := auth.UserGrantRole(context.TODO(), &pb.AuthUserGrantRoleRequest{User: user.name, Role: user.role}); err != nil {
- t.Fatal(err)
- }
+ _, err := auth.UserAdd(context.TODO(), &pb.AuthUserAddRequest{Name: user.name, Password: user.password, Options: &authpb.UserAddOptions{NoPassword: false}})
+ require.NoError(t, err)
+ _, err = auth.RoleAdd(context.TODO(), &pb.AuthRoleAddRequest{Name: user.role})
+ require.NoError(t, err)
+ _, err = auth.UserGrantRole(context.TODO(), &pb.AuthUserGrantRoleRequest{User: user.name, Role: user.role})
+ require.NoError(t, err)
if len(user.key) == 0 {
continue
@@ -313,9 +338,8 @@ func authSetupUsers(t *testing.T, auth pb.AuthClient, users []user) {
Key: []byte(user.key),
RangeEnd: []byte(user.end),
}
- if _, err := auth.RoleGrantPermission(context.TODO(), &pb.AuthRoleGrantPermissionRequest{Name: user.role, Perm: perm}); err != nil {
- t.Fatal(err)
- }
+ _, err = auth.RoleGrantPermission(context.TODO(), &pb.AuthRoleGrantPermissionRequest{Name: user.role, Perm: perm})
+ require.NoError(t, err)
}
}
@@ -335,8 +359,8 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) {
}
func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
nonAuthedKV := clus.Client(0).KV
@@ -348,7 +372,7 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
t.Fatalf("couldn't put key (%v)", err)
}
- authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
+ authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
respput, err := nonAuthedKV.Put(context.TODO(), key, val)
if !eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty) {
@@ -357,20 +381,19 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
}
func TestV3AuthOldRevConcurrent(t *testing.T) {
- t.Skip() // TODO(jingyih): re-enable the test when #10408 is fixed.
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- authSetupRoot(t, toGRPC(clus.Client(0)).Auth)
+ authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
- c, cerr := NewClient(t, clientv3.Config{
+ c, cerr := integration.NewClient(t, clientv3.Config{
Endpoints: clus.Client(0).Endpoints(),
DialTimeout: 5 * time.Second,
Username: "root",
Password: "123",
})
- testutil.AssertNil(t, cerr)
+ require.NoError(t, cerr)
defer c.Close()
var wg sync.WaitGroup
@@ -378,13 +401,13 @@ func TestV3AuthOldRevConcurrent(t *testing.T) {
defer wg.Done()
role, user := fmt.Sprintf("test-role-%d", i), fmt.Sprintf("test-user-%d", i)
_, err := c.RoleAdd(context.TODO(), role)
- testutil.AssertNil(t, err)
- _, err = c.RoleGrantPermission(context.TODO(), role, "", clientv3.GetPrefixRangeEnd(""), clientv3.PermissionType(clientv3.PermReadWrite))
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
+ _, err = c.RoleGrantPermission(context.TODO(), role, "\x00", clientv3.GetPrefixRangeEnd(""), clientv3.PermissionType(clientv3.PermReadWrite))
+ require.NoError(t, err)
_, err = c.UserAdd(context.TODO(), user, "123")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = c.Put(context.TODO(), "a", "b")
- testutil.AssertNil(t, err)
+ assert.NoError(t, err)
}
// needs concurrency to trigger
numRoles := 2
@@ -394,3 +417,57 @@ func TestV3AuthOldRevConcurrent(t *testing.T) {
}
wg.Wait()
}
+
+func TestV3AuthWatchErrorAndWatchId0(t *testing.T) {
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
+ defer cancel()
+
+ users := []user{
+ {
+ name: "user1",
+ password: "user1-123",
+ role: "role1",
+ key: "k1",
+ end: "k2",
+ },
+ }
+ authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users)
+
+ authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
+
+ c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"})
+ if cerr != nil {
+ t.Fatal(cerr)
+ }
+ defer c.Close()
+
+ watchStartCh, watchEndCh := make(chan any), make(chan any)
+
+ go func() {
+ wChan := c.Watch(ctx, "k1", clientv3.WithRev(1))
+ watchStartCh <- struct{}{}
+ watchResponse := <-wChan
+ t.Logf("watch response from k1: %v", watchResponse)
+ assert.NotEmpty(t, watchResponse.Events)
+ watchEndCh <- struct{}{}
+ }()
+
+ // Chan for making sure that the above goroutine invokes Watch()
+ // So the above Watch() can get watch ID = 0
+ <-watchStartCh
+
+ wChan := c.Watch(ctx, "non-allowed-key", clientv3.WithRev(1))
+ watchResponse := <-wChan
+ require.Error(t, watchResponse.Err()) // permission denied
+
+ _, err := c.Put(ctx, "k1", "val")
+ if err != nil {
+ t.Fatalf("Unexpected error from Put: %v", err)
+ }
+
+ <-watchEndCh
+}
diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go
index a3aed9ba60b..350bfb354dd 100644
--- a/tests/integration/v3_election_test.go
+++ b/tests/integration/v3_election_test.go
@@ -17,29 +17,33 @@ package integration
import (
"context"
"fmt"
+ "sync"
"testing"
"time"
- "go.etcd.io/etcd/client/v3"
+ "github.com/stretchr/testify/require"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestElectionWait tests if followers can correctly wait for elections.
func TestElectionWait(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
leaders := 3
followers := 3
var clients []*clientv3.Client
- newClient := MakeMultiNodeClients(t, clus, &clients)
+ newClient := integration.MakeMultiNodeClients(t, clus, &clients)
defer func() {
- CloseClients(t, clients)
+ integration.CloseClients(t, clients)
}()
electedc := make(chan string)
- nextc := []chan struct{}{}
+ var nextc []chan struct{}
// wait for all elections
donec := make(chan struct{})
@@ -108,8 +112,8 @@ func TestElectionWait(t *testing.T) {
// TestElectionFailover tests that an election will
func TestElectionFailover(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
cctx, cancel := context.WithCancel(context.TODO())
@@ -119,7 +123,7 @@ func TestElectionFailover(t *testing.T) {
for i := 0; i < 3; i++ {
var err error
- ss[i], err = concurrency.NewSession(clus.clients[i])
+ ss[i], err = concurrency.NewSession(clus.Client(i))
if err != nil {
t.Error(err)
}
@@ -173,11 +177,11 @@ func TestElectionFailover(t *testing.T) {
}
}
-// TestElectionSessionRelock ensures that campaigning twice on the same election
+// TestElectionSessionRecampaign ensures that campaigning twice on the same election
// with the same lock will Proclaim instead of deadlocking.
func TestElectionSessionRecampaign(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
@@ -207,10 +211,9 @@ func TestElectionSessionRecampaign(t *testing.T) {
// candidate can be elected on a new key that is a prefix
// of an existing key. To wit, check for regression
// of bug #6278. https://github.com/etcd-io/etcd/issues/6278
-//
func TestElectionOnPrefixOfExistingKey(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
@@ -236,8 +239,8 @@ func TestElectionOnPrefixOfExistingKey(t *testing.T) {
// in a new session with the same lease id) does not result in loss of
// leadership.
func TestElectionOnSessionRestart(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
@@ -283,8 +286,8 @@ func TestElectionOnSessionRestart(t *testing.T) {
// TestElectionObserveCompacted checks that observe can tolerate
// a leader key with a modrev less than the compaction revision.
func TestElectionObserveCompacted(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
@@ -316,3 +319,112 @@ func TestElectionObserveCompacted(t *testing.T) {
t.Fatalf(`expected leader value "abc", got %q`, string(v.Kvs[0].Value))
}
}
+
+// TestElectionWithAuthEnabled verifies the election interface when auth is enabled.
+// Refer to the discussion in https://github.com/etcd-io/etcd/issues/17502
+func TestElectionWithAuthEnabled(t *testing.T) {
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ users := []user{
+ {
+ name: "user1",
+ password: "123",
+ role: "role1",
+ key: "/foo1", // prefix /foo1
+ end: "/foo2",
+ },
+ {
+ name: "user2",
+ password: "456",
+ role: "role2",
+ key: "/bar1", // prefix /bar1
+ end: "/bar2",
+ },
+ }
+
+ t.Log("Setting rbac info and enable auth.")
+ authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users)
+ authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
+
+ c1, c1err := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "123"})
+ require.NoError(t, c1err)
+ defer c1.Close()
+
+ c2, c2err := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "456"})
+ require.NoError(t, c2err)
+ defer c2.Close()
+
+ campaigns := []struct {
+ name string
+ c *clientv3.Client
+ pfx string
+ sleepTime time.Duration // time to sleep before campaigning
+ }{
+ {
+ name: "client1 first campaign",
+ c: c1,
+ pfx: "/foo1/a",
+ },
+ {
+ name: "client1 second campaign",
+ c: c1,
+ pfx: "/foo1/a",
+ },
+ {
+ name: "client2 first campaign",
+ c: c2,
+ pfx: "/bar1/b",
+ sleepTime: 5 * time.Second,
+ },
+ {
+ name: "client2 second campaign",
+ c: c2,
+ pfx: "/bar1/b",
+ sleepTime: 6 * time.Second,
+ },
+ }
+
+ t.Log("Starting to campaign with multiple users.")
+ var wg sync.WaitGroup
+ errC := make(chan error, 8)
+ doneC := make(chan error)
+ for _, campaign := range campaigns {
+ campaign := campaign
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if campaign.sleepTime > 0 {
+ time.Sleep(campaign.sleepTime)
+ }
+
+ s, serr := concurrency.NewSession(campaign.c, concurrency.WithTTL(10))
+ if serr != nil {
+ errC <- fmt.Errorf("[NewSession] %s: %w", campaign.name, serr)
+ }
+ s.Orphan()
+
+ e := concurrency.NewElection(s, campaign.pfx)
+ eerr := e.Campaign(context.Background(), "whatever")
+ if eerr != nil {
+ errC <- fmt.Errorf("[Campaign] %s: %w", campaign.name, eerr)
+ }
+ }()
+ }
+
+ go func() {
+ t.Log("Waiting for all goroutines to finish.")
+ defer close(doneC)
+ wg.Wait()
+ }()
+
+ select {
+ case err := <-errC:
+ t.Fatalf("Error: %v", err)
+ case <-doneC:
+ t.Log("All goroutine done!")
+ case <-time.After(30 * time.Second):
+ t.Fatal("Timed out")
+ }
+}
diff --git a/tests/integration/v3_failover_test.go b/tests/integration/v3_failover_test.go
new file mode 100644
index 00000000000..b4ce09e1a38
--- /dev/null
+++ b/tests/integration/v3_failover_test.go
@@ -0,0 +1,173 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "testing"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
+ clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
+)
+
+func TestFailover(t *testing.T) {
+ cases := []struct {
+ name string
+ testFunc func(*testing.T, *tls.Config, *integration2.Cluster) (*clientv3.Client, error)
+ }{
+ {
+ name: "create client before the first server down",
+ testFunc: createClientBeforeServerDown,
+ },
+ {
+ name: "create client after the first server down",
+ testFunc: createClientAfterServerDown,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Logf("Starting test [%s]", tc.name)
+ integration2.BeforeTest(t)
+
+ // Launch an etcd cluster with 3 members
+ t.Logf("Launching an etcd cluster with 3 members [%s]", tc.name)
+ clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, ClientTLS: &integration2.TestTLSInfo})
+ defer clus.Terminate(t)
+
+ cc, err := integration2.TestTLSInfo.ClientConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Create an etcd client before or after first server down
+ t.Logf("Creating an etcd client [%s]", tc.name)
+ cli, err := tc.testFunc(t, cc, clus)
+ if err != nil {
+ t.Fatalf("Failed to create client: %v", err)
+ }
+ defer cli.Close()
+
+ // Sanity test
+ t.Logf("Running sanity test [%s]", tc.name)
+ key, val := "key1", "val1"
+ putWithRetries(t, cli, key, val, 10)
+ getWithRetries(t, cli, key, val, 10)
+
+ t.Logf("Test done [%s]", tc.name)
+ })
+ }
+}
+
+func createClientBeforeServerDown(t *testing.T, cc *tls.Config, clus *integration2.Cluster) (*clientv3.Client, error) {
+ cli, err := createClient(t, cc, clus)
+ if err != nil {
+ return nil, err
+ }
+ clus.Members[0].Close()
+ return cli, nil
+}
+
+func createClientAfterServerDown(t *testing.T, cc *tls.Config, clus *integration2.Cluster) (*clientv3.Client, error) {
+ clus.Members[0].Close()
+ return createClient(t, cc, clus)
+}
+
+func createClient(t *testing.T, cc *tls.Config, clus *integration2.Cluster) (*clientv3.Client, error) {
+ cli, err := integration2.NewClient(t, clientv3.Config{
+ Endpoints: clus.Endpoints(),
+ DialTimeout: 5 * time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ TLS: cc,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return cli, nil
+}
+
+func putWithRetries(t *testing.T, cli *clientv3.Client, key, val string, retryCount int) {
+ for retryCount > 0 {
+ // put data test
+ err := func() error {
+ t.Log("Sanity test, putting data")
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ if _, putErr := cli.Put(ctx, key, val); putErr != nil {
+ t.Logf("Failed to put data (%v)", putErr)
+ return putErr
+ }
+ return nil
+ }()
+ if err != nil {
+ retryCount--
+ if shouldRetry(err) {
+ continue
+ } else {
+ t.Fatal(err)
+ }
+ }
+ break
+ }
+}
+
+func getWithRetries(t *testing.T, cli *clientv3.Client, key, val string, retryCount int) {
+ for retryCount > 0 {
+ // get data test
+ err := func() error {
+ t.Log("Sanity test, getting data")
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ resp, getErr := cli.Get(ctx, key)
+ if getErr != nil {
+ t.Logf("Failed to get key (%v)", getErr)
+ return getErr
+ }
+ if len(resp.Kvs) != 1 {
+ t.Fatalf("Expected 1 key, got %d", len(resp.Kvs))
+ }
+ if !bytes.Equal([]byte(val), resp.Kvs[0].Value) {
+ t.Fatalf("Unexpected value, expected: %s, got: %s", val, resp.Kvs[0].Value)
+ }
+ return nil
+ }()
+ if err != nil {
+ retryCount--
+ if shouldRetry(err) {
+ continue
+ } else {
+ t.Fatal(err)
+ }
+ }
+ break
+ }
+}
+
+func shouldRetry(err error) bool {
+ if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) ||
+ errors.Is(err, rpctypes.ErrTimeout) || errors.Is(err, rpctypes.ErrTimeoutDueToLeaderFail) {
+ return true
+ }
+ return false
+}
diff --git a/tests/integration/v3_grpc_inflight_test.go b/tests/integration/v3_grpc_inflight_test.go
index 9f5085112d9..7968e614edc 100644
--- a/tests/integration/v3_grpc_inflight_test.go
+++ b/tests/integration/v3_grpc_inflight_test.go
@@ -20,22 +20,24 @@ import (
"testing"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestV3MaintenanceDefragmentInflightRange ensures inflight range requests
// does not panic the mvcc backend while defragment is running.
func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
- kvc := toGRPC(cli).KV
+ kvc := integration.ToGRPC(cli).KV
if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatal(err)
}
@@ -48,7 +50,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo")})
}()
- mvc := toGRPC(cli).Maintenance
+ mvc := integration.ToGRPC(cli).Maintenance
mvc.Defragment(context.Background(), &pb.DefragmentRequest{})
cancel()
@@ -60,12 +62,12 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
// They are either finished or canceled, but never crash the backend.
// See https://github.com/etcd-io/etcd/issues/7322 for more detail.
func TestV3KVInflightRangeRequests(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.RandClient()
- kvc := toGRPC(cli).KV
+ kvc := integration.ToGRPC(cli).KV
if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatal(err)
diff --git a/tests/integration/v3_grpc_test.go b/tests/integration/v3_grpc_test.go
index 06d93219703..999d28ba8d9 100644
--- a/tests/integration/v3_grpc_test.go
+++ b/tests/integration/v3_grpc_test.go
@@ -17,33 +17,37 @@ package integration
import (
"bytes"
"context"
+ "errors"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"reflect"
+ "strings"
"testing"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/v3"
-
+ "github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
-// TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
+// TestV3PutOverwrite puts a key with the v3 api to a random Cluster member,
// overwrites it, then checks that the change was applied.
func TestV3PutOverwrite(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
key := []byte("foo")
reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true}
@@ -85,45 +89,45 @@ func TestV3PutOverwrite(t *testing.T) {
}
}
-// TestPutRestart checks if a put after an unrelated member restart succeeds
+// TestV3PutRestart checks if a put after an unrelated member restart succeeds
func TestV3PutRestart(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kvIdx := rand.Intn(3)
- kvc := toGRPC(clus.Client(kvIdx)).KV
+ kvc := integration.ToGRPC(clus.Client(kvIdx)).KV
stopIdx := kvIdx
for stopIdx == kvIdx {
stopIdx = rand.Intn(3)
}
- clus.clients[stopIdx].Close()
+ clus.Client(stopIdx).Close()
clus.Members[stopIdx].Stop(t)
clus.Members[stopIdx].Restart(t)
- c, cerr := NewClientV3(clus.Members[stopIdx])
+ c, cerr := integration.NewClientV3(clus.Members[stopIdx])
if cerr != nil {
t.Fatalf("cannot create client: %v", cerr)
}
- clus.clients[stopIdx] = c
+ clus.Members[stopIdx].ServerClient = c
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
defer cancel()
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
_, err := kvc.Put(ctx, reqput)
- if err != nil && err == ctx.Err() {
+ if err != nil && errors.Is(err, ctx.Err()) {
t.Fatalf("expected grpc error, got local ctx error (%v)", err)
}
}
// TestV3CompactCurrentRev ensures keys are present when compacting on current revision.
func TestV3CompactCurrentRev(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
if _, err := kvc.Put(context.Background(), preq); err != nil {
@@ -153,12 +157,12 @@ func TestV3CompactCurrentRev(t *testing.T) {
// TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev.
func TestV3HashKV(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
- mvc := toGRPC(clus.RandClient()).Maintenance
+ kvc := integration.ToGRPC(clus.RandClient()).KV
+ mvc := integration.ToGRPC(clus.RandClient()).Maintenance
for i := 0; i < 10; i++ {
resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))})
@@ -201,12 +205,12 @@ func TestV3HashKV(t *testing.T) {
}
func TestV3TxnTooManyOps(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
maxTxnOps := uint(128)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
// unique keys
i := new(int)
@@ -249,9 +253,10 @@ func TestV3TxnTooManyOps(t *testing.T) {
newTxn := &pb.TxnRequest{}
addSuccessOps(newTxn)
txn.Success = append(txn.Success,
- &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
- RequestTxn: newTxn,
- },
+ &pb.RequestOp{
+ Request: &pb.RequestOp_RequestTxn{
+ RequestTxn: newTxn,
+ },
},
)
}
@@ -277,52 +282,61 @@ func TestV3TxnTooManyOps(t *testing.T) {
}
func TestV3TxnDuplicateKeys(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}}
- delKeyReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
- RequestDeleteRange: &pb.DeleteRangeRequest{
- Key: []byte("abc"),
+ delKeyReq := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: []byte("abc"),
+ },
},
- },
}
- delInRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
- RequestDeleteRange: &pb.DeleteRangeRequest{
- Key: []byte("a"), RangeEnd: []byte("b"),
+ delInRangeReq := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: []byte("a"), RangeEnd: []byte("b"),
+ },
},
- },
}
- delOutOfRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{
- RequestDeleteRange: &pb.DeleteRangeRequest{
- Key: []byte("abb"), RangeEnd: []byte("abc"),
+ delOutOfRangeReq := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &pb.DeleteRangeRequest{
+ Key: []byte("abb"), RangeEnd: []byte("abc"),
+ },
},
- },
}
- txnDelReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
- RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}},
- },
+ txnDelReq := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestTxn{
+ RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}},
+ },
}
- txnDelReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
- RequestTxn: &pb.TxnRequest{
- Success: []*pb.RequestOp{delInRangeReq},
- Failure: []*pb.RequestOp{delInRangeReq}},
- },
+ txnDelReqTwoSide := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestTxn{
+ RequestTxn: &pb.TxnRequest{
+ Success: []*pb.RequestOp{delInRangeReq},
+ Failure: []*pb.RequestOp{delInRangeReq},
+ },
+ },
}
- txnPutReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
- RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}},
- },
+ txnPutReq := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestTxn{
+ RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}},
+ },
}
- txnPutReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{
- RequestTxn: &pb.TxnRequest{
- Success: []*pb.RequestOp{putreq},
- Failure: []*pb.RequestOp{putreq}},
- },
+ txnPutReqTwoSide := &pb.RequestOp{
+ Request: &pb.RequestOp_RequestTxn{
+ RequestTxn: &pb.TxnRequest{
+ Success: []*pb.RequestOp{putreq},
+ Failure: []*pb.RequestOp{putreq},
+ },
+ },
}
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
tests := []struct {
txnSuccess []*pb.RequestOp
@@ -393,13 +407,13 @@ func TestV3TxnDuplicateKeys(t *testing.T) {
}
}
-// Testv3TxnRevision tests that the transaction header revision is set as expected.
+// TestV3TxnRevision tests that the transaction header revision is set as expected.
func TestV3TxnRevision(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}
presp, err := kvc.Put(context.TODO(), pr)
if err != nil {
@@ -443,14 +457,14 @@ func TestV3TxnRevision(t *testing.T) {
}
}
-// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected
+// TestV3TxnCmpHeaderRev tests that the txn header revision is set as expected
// when compared to the Succeeded field in the txn response.
func TestV3TxnCmpHeaderRev(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
for i := 0; i < 10; i++ {
// Concurrently put a key with a txn comparing on it.
@@ -469,7 +483,8 @@ func TestV3TxnCmpHeaderRev(t *testing.T) {
// The read-only txn uses the optimized readindex server path.
txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{
- RequestRange: &pb.RangeRequest{Key: []byte("k")}}}
+ RequestRange: &pb.RangeRequest{Key: []byte("k")},
+ }}
txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}}
// i = 0 /\ Succeeded => put followed txn
cmp := &pb.Compare{
@@ -486,9 +501,8 @@ func TestV3TxnCmpHeaderRev(t *testing.T) {
}
prev := <-revc
- if err := <-errCh; err != nil {
- t.Fatal(err)
- }
+ err = <-errCh
+ require.NoError(t, err)
// put followed txn; should eval to false
if prev > tresp.Header.Revision && !tresp.Succeeded {
t.Errorf("#%d: got else but put rev %d followed txn rev (%+v)", i, prev, tresp)
@@ -502,15 +516,14 @@ func TestV3TxnCmpHeaderRev(t *testing.T) {
// TestV3TxnRangeCompare tests range comparisons in txns
func TestV3TxnRangeCompare(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
// put keys, named by expected revision
for _, k := range []string{"/a/2", "/a/3", "/a/4", "/f/5"} {
- if _, err := clus.Client(0).Put(context.TODO(), k, "x"); err != nil {
- t.Fatal(err)
- }
+ _, err := clus.Client(0).Put(context.TODO(), k, "x")
+ require.NoError(t, err)
}
tests := []struct {
@@ -597,7 +610,7 @@ func TestV3TxnRangeCompare(t *testing.T) {
},
}
- kvc := toGRPC(clus.Client(0)).KV
+ kvc := integration.ToGRPC(clus.Client(0)).KV
for i, tt := range tests {
txn := &pb.TxnRequest{}
txn.Compare = append(txn.Compare, &tt.cmp)
@@ -611,13 +624,13 @@ func TestV3TxnRangeCompare(t *testing.T) {
}
}
-// TestV3TxnNested tests nested txns follow paths as expected.
+// TestV3TxnNestedPath tests nested txns follow paths as expected.
func TestV3TxnNestedPath(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
cmpTrue := &pb.Compare{
Result: pb.Compare_EQUAL,
@@ -666,17 +679,17 @@ func TestV3TxnNestedPath(t *testing.T) {
// TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair.
func TestV3PutIgnoreValue(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
key, val := []byte("foo"), []byte("bar")
putReq := pb.PutRequest{Key: key, Value: val}
// create lease
- lc := toGRPC(clus.RandClient()).Lease
+ lc := integration.ToGRPC(clus.RandClient()).Lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err != nil {
t.Fatal(err)
@@ -707,7 +720,8 @@ func TestV3PutIgnoreValue(t *testing.T) {
preq.IgnoreValue = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
- Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
+ Request: &pb.RequestOp_RequestPut{RequestPut: &preq},
+ })
_, err := kvc.Txn(context.TODO(), txn)
return err
},
@@ -730,7 +744,8 @@ func TestV3PutIgnoreValue(t *testing.T) {
preq.IgnoreValue = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
- Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
+ Request: &pb.RequestOp_RequestPut{RequestPut: &preq},
+ })
_, err := kvc.Txn(context.TODO(), txn)
return err
},
@@ -799,15 +814,15 @@ func TestV3PutIgnoreValue(t *testing.T) {
// TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites.
func TestV3PutIgnoreLease(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
// create lease
- lc := toGRPC(clus.RandClient()).Lease
+ lc := integration.ToGRPC(clus.RandClient()).Lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err != nil {
t.Fatal(err)
@@ -842,7 +857,8 @@ func TestV3PutIgnoreLease(t *testing.T) {
preq.IgnoreLease = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
- Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
+ Request: &pb.RequestOp_RequestPut{RequestPut: &preq},
+ })
_, err := kvc.Txn(context.TODO(), txn)
return err
},
@@ -868,7 +884,8 @@ func TestV3PutIgnoreLease(t *testing.T) {
preq.IgnoreLease = true
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
- Request: &pb.RequestOp_RequestPut{RequestPut: &preq}})
+ Request: &pb.RequestOp_RequestPut{RequestPut: &preq},
+ })
_, err := kvc.Txn(context.TODO(), txn)
return err
},
@@ -939,11 +956,11 @@ func TestV3PutIgnoreLease(t *testing.T) {
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
key := []byte("foo")
preq := &pb.PutRequest{Key: key, Lease: 123456}
tests := []func(){
@@ -958,7 +975,9 @@ func TestV3PutMissingLease(t *testing.T) {
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
- RequestPut: preq}})
+ RequestPut: preq,
+ },
+ })
if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
}
@@ -968,7 +987,9 @@ func TestV3PutMissingLease(t *testing.T) {
txn := &pb.TxnRequest{}
txn.Failure = append(txn.Failure, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
- RequestPut: preq}})
+ RequestPut: preq,
+ },
+ })
cmp := &pb.Compare{
Result: pb.Compare_GREATER,
Target: pb.Compare_CREATE,
@@ -985,10 +1006,14 @@ func TestV3PutMissingLease(t *testing.T) {
rreq := &pb.RangeRequest{Key: []byte("bar")}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestRange{
- RequestRange: rreq}})
+ RequestRange: rreq,
+ },
+ })
txn.Failure = append(txn.Failure, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
- RequestPut: preq}})
+ RequestPut: preq,
+ },
+ })
if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
}
@@ -1010,8 +1035,10 @@ func TestV3PutMissingLease(t *testing.T) {
// TestV3DeleteRange tests various edge cases in the DeleteRange API.
func TestV3DeleteRange(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
tests := []struct {
+ name string
+
keySet []string
begin string
end string
@@ -1020,110 +1047,118 @@ func TestV3DeleteRange(t *testing.T) {
wantSet [][]byte
deleted int64
}{
- // delete middle
{
+ "delete middle",
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop", false,
- [][]byte{[]byte("foo"), []byte("fop")}, 1,
+ [][]byte{[]byte("foo"), []byte("fop")},
+ 1,
},
- // no delete
{
+ "no delete",
[]string{"foo", "foo/abc", "fop"},
"foo/", "foo/", false,
- [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")}, 0,
+ [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
+ 0,
},
- // delete first
{
+ "delete first",
[]string{"foo", "foo/abc", "fop"},
"fo", "fop", false,
- [][]byte{[]byte("fop")}, 2,
+ [][]byte{[]byte("fop")},
+ 2,
},
- // delete tail
{
+ "delete tail",
[]string{"foo", "foo/abc", "fop"},
"foo/", "fos", false,
- [][]byte{[]byte("foo")}, 2,
+ [][]byte{[]byte("foo")},
+ 2,
},
- // delete exact
{
+ "delete exact",
[]string{"foo", "foo/abc", "fop"},
"foo/abc", "", false,
- [][]byte{[]byte("foo"), []byte("fop")}, 1,
+ [][]byte{[]byte("foo"), []byte("fop")},
+ 1,
},
- // delete none, [x,x)
{
+ "delete none [x,x)",
[]string{"foo"},
"foo", "foo", false,
- [][]byte{[]byte("foo")}, 0,
+ [][]byte{[]byte("foo")},
+ 0,
},
- // delete middle with preserveKVs set
{
+ "delete middle with preserveKVs set",
[]string{"foo", "foo/abc", "fop"},
"foo/", "fop", true,
- [][]byte{[]byte("foo"), []byte("fop")}, 1,
+ [][]byte{[]byte("foo"), []byte("fop")},
+ 1,
},
}
for i, tt := range tests {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
- kvc := toGRPC(clus.RandClient()).KV
+ t.Run(tt.name, func(t *testing.T) {
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ kvc := integration.ToGRPC(clus.RandClient()).KV
+ defer clus.Terminate(t)
+
+ ks := tt.keySet
+ for j := range ks {
+ reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
+ _, err := kvc.Put(context.TODO(), reqput)
+ if err != nil {
+ t.Fatalf("couldn't put key (%v)", err)
+ }
+ }
- ks := tt.keySet
- for j := range ks {
- reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
- _, err := kvc.Put(context.TODO(), reqput)
+ dreq := &pb.DeleteRangeRequest{
+ Key: []byte(tt.begin),
+ RangeEnd: []byte(tt.end),
+ PrevKv: tt.prevKV,
+ }
+ dresp, err := kvc.DeleteRange(context.TODO(), dreq)
if err != nil {
- t.Fatalf("couldn't put key (%v)", err)
+ t.Fatalf("couldn't delete range on test %d (%v)", i, err)
}
- }
-
- dreq := &pb.DeleteRangeRequest{
- Key: []byte(tt.begin),
- RangeEnd: []byte(tt.end),
- PrevKv: tt.prevKV,
- }
- dresp, err := kvc.DeleteRange(context.TODO(), dreq)
- if err != nil {
- t.Fatalf("couldn't delete range on test %d (%v)", i, err)
- }
- if tt.deleted != dresp.Deleted {
- t.Errorf("expected %d on test %v, got %d", tt.deleted, i, dresp.Deleted)
- }
- if tt.prevKV {
- if len(dresp.PrevKvs) != int(dresp.Deleted) {
- t.Errorf("preserve %d keys, want %d", len(dresp.PrevKvs), dresp.Deleted)
+ if tt.deleted != dresp.Deleted {
+ t.Errorf("expected %d on test %v, got %d", tt.deleted, i, dresp.Deleted)
+ }
+ if tt.prevKV {
+ if len(dresp.PrevKvs) != int(dresp.Deleted) {
+ t.Errorf("preserve %d keys, want %d", len(dresp.PrevKvs), dresp.Deleted)
+ }
}
- }
- rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
- rresp, err := kvc.Range(context.TODO(), rreq)
- if err != nil {
- t.Errorf("couldn't get range on test %v (%v)", i, err)
- }
- if dresp.Header.Revision != rresp.Header.Revision {
- t.Errorf("expected revision %v, got %v",
- dresp.Header.Revision, rresp.Header.Revision)
- }
+ rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
+ rresp, err := kvc.Range(context.TODO(), rreq)
+ if err != nil {
+ t.Errorf("couldn't get range on test %v (%v)", i, err)
+ }
+ if dresp.Header.Revision != rresp.Header.Revision {
+ t.Errorf("expected revision %v, got %v",
+ dresp.Header.Revision, rresp.Header.Revision)
+ }
- keys := [][]byte{}
- for j := range rresp.Kvs {
- keys = append(keys, rresp.Kvs[j].Key)
- }
- if !reflect.DeepEqual(tt.wantSet, keys) {
- t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
- }
- // can't defer because tcp ports will be in use
- clus.Terminate(t)
+ var keys [][]byte
+ for j := range rresp.Kvs {
+ keys = append(keys, rresp.Kvs[j].Key)
+ }
+ if !reflect.DeepEqual(tt.wantSet, keys) {
+ t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
+ }
+ })
}
}
// TestV3TxnInvalidRange tests that invalid ranges are rejected in txns.
func TestV3TxnInvalidRange(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
for i := 0; i < 3; i++ {
@@ -1142,12 +1177,16 @@ func TestV3TxnInvalidRange(t *testing.T) {
txn := &pb.TxnRequest{}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
- RequestPut: preq}})
+ RequestPut: preq,
+ },
+ })
rreq := &pb.RangeRequest{Key: []byte("foo"), Revision: 100}
txn.Success = append(txn.Success, &pb.RequestOp{
Request: &pb.RequestOp_RequestRange{
- RequestRange: rreq}})
+ RequestRange: rreq,
+ },
+ })
if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCFutureRev) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCFutureRev)
@@ -1162,12 +1201,12 @@ func TestV3TxnInvalidRange(t *testing.T) {
}
func TestV3TooLargeRequest(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
// 2MB request value
largeV := make([]byte, 2*1024*1024)
@@ -1181,13 +1220,13 @@ func TestV3TooLargeRequest(t *testing.T) {
// TestV3Hash tests hash.
func TestV3Hash(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
cli := clus.RandClient()
- kvc := toGRPC(cli).KV
- m := toGRPC(cli).Maintenance
+ kvc := integration.ToGRPC(cli).KV
+ m := integration.ToGRPC(cli).Maintenance
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
@@ -1206,12 +1245,12 @@ func TestV3Hash(t *testing.T) {
// TestV3HashRestart ensures that hash stays the same after restart.
func TestV3HashRestart(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.RandClient()
- resp, err := toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
+ resp, err := integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
@@ -1219,12 +1258,12 @@ func TestV3HashRestart(t *testing.T) {
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
- clus.waitLeader(t, clus.Members)
- kvc := toGRPC(clus.Client(0)).KV
+ clus.WaitMembersForLeader(t, clus.Members)
+ kvc := integration.ToGRPC(clus.Client(0)).KV
waitForRestart(t, kvc)
cli = clus.RandClient()
- resp, err = toGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
+ resp, err = integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{})
if err != nil || resp.Hash == 0 {
t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash)
}
@@ -1237,10 +1276,10 @@ func TestV3HashRestart(t *testing.T) {
// TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer
func TestV3StorageQuotaAPI(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
quotasize := int64(16 * os.Getpagesize())
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
// Set a quota on one node
clus.Members[0].QuotaBackendBytes = quotasize
@@ -1248,7 +1287,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
clus.Members[0].Restart(t)
defer clus.Terminate(t)
- kvc := toGRPC(clus.Client(0)).KV
+ kvc := integration.ToGRPC(clus.Client(0)).KV
waitForRestart(t, kvc)
key := []byte("abc")
@@ -1284,16 +1323,19 @@ func TestV3StorageQuotaAPI(t *testing.T) {
}
func TestV3RangeRequest(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
tests := []struct {
+ name string
+
putKeys []string
reqs []pb.RangeRequest
- wresps [][]string
- wmores []bool
+ wresps [][]string
+ wmores []bool
+ wcounts []int64
}{
- // single key
{
+ "single key",
[]string{"foo", "bar"},
[]pb.RangeRequest{
// exists
@@ -1307,9 +1349,10 @@ func TestV3RangeRequest(t *testing.T) {
{},
},
[]bool{false, false},
+ []int64{1, 0},
},
- // multi-key
{
+ "multi-key",
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
// all in range
@@ -1335,9 +1378,10 @@ func TestV3RangeRequest(t *testing.T) {
{"a", "b", "c", "d", "e"},
},
[]bool{false, false, false, false, false, false},
+ []int64{5, 2, 0, 0, 0, 5},
},
- // revision
{
+ "revision",
[]string{"a", "b", "c", "d", "e"},
[]pb.RangeRequest{
{Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
@@ -1353,25 +1397,33 @@ func TestV3RangeRequest(t *testing.T) {
{"a", "b"},
},
[]bool{false, false, false, false},
+ []int64{5, 0, 1, 2},
},
- // limit
{
- []string{"foo", "bar"},
+ "limit",
+ []string{"a", "b", "c"},
[]pb.RangeRequest{
// more
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
- // no more
+ // half
{Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
+ // no more
+ {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 3},
+ // limit over
+ {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 4},
},
[][]string{
- {"bar"},
- {"bar", "foo"},
+ {"a"},
+ {"a", "b"},
+ {"a", "b", "c"},
+ {"a", "b", "c"},
},
- []bool{true, false},
+ []bool{true, true, false, false},
+ []int64{3, 3, 3, 3},
},
- // sort
{
+ "sort",
[]string{"b", "a", "c", "d", "c"},
[]pb.RangeRequest{
{
@@ -1421,9 +1473,10 @@ func TestV3RangeRequest(t *testing.T) {
{"b", "a", "c", "d"},
},
[]bool{true, true, true, true, false, false},
+ []int64{4, 4, 4, 4, 0, 4},
},
- // min/max mod rev
{
+ "min/max mod rev",
[]string{"rev2", "rev3", "rev4", "rev5", "rev6"},
[]pb.RangeRequest{
{
@@ -1452,9 +1505,10 @@ func TestV3RangeRequest(t *testing.T) {
{"rev2", "rev3", "rev4", "rev5", "rev6"},
},
[]bool{false, false, false, false},
+ []int64{5, 5, 5, 5},
},
- // min/max create rev
{
+ "min/max create rev",
[]string{"rev2", "rev3", "rev2", "rev2", "rev6", "rev3"},
[]pb.RangeRequest{
{
@@ -1483,67 +1537,65 @@ func TestV3RangeRequest(t *testing.T) {
{"rev2", "rev3", "rev6"},
},
[]bool{false, false, false, false},
+ []int64{3, 3, 3, 3},
},
}
for i, tt := range tests {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
- for _, k := range tt.putKeys {
- kvc := toGRPC(clus.RandClient()).KV
- req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
- if _, err := kvc.Put(context.TODO(), req); err != nil {
- t.Fatalf("#%d: couldn't put key (%v)", i, err)
+ t.Run(tt.name, func(t *testing.T) {
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+ for _, k := range tt.putKeys {
+ kvc := integration.ToGRPC(clus.RandClient()).KV
+ req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
+ if _, err := kvc.Put(context.TODO(), req); err != nil {
+ t.Fatalf("#%d: couldn't put key (%v)", i, err)
+ }
}
- }
- for j, req := range tt.reqs {
- kvc := toGRPC(clus.RandClient()).KV
- resp, err := kvc.Range(context.TODO(), &req)
- if err != nil {
- t.Errorf("#%d.%d: Range error: %v", i, j, err)
- continue
- }
- if len(resp.Kvs) != len(tt.wresps[j]) {
- t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
- continue
- }
- for k, wKey := range tt.wresps[j] {
- respKey := string(resp.Kvs[k].Key)
- if respKey != wKey {
- t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
+ for j, req := range tt.reqs {
+ kvc := integration.ToGRPC(clus.RandClient()).KV
+ resp, err := kvc.Range(context.TODO(), &req)
+ if err != nil {
+ t.Errorf("#%d.%d: Range error: %v", i, j, err)
+ continue
+ }
+ if len(resp.Kvs) != len(tt.wresps[j]) {
+ t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
+ continue
+ }
+ for k, wKey := range tt.wresps[j] {
+ respKey := string(resp.Kvs[k].Key)
+ if respKey != wKey {
+ t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
+ }
+ }
+ if resp.More != tt.wmores[j] {
+ t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
+ }
+ if resp.GetCount() != tt.wcounts[j] {
+ t.Errorf("#%d.%d: bad count. got = %v, want = %v, ", i, j, resp.GetCount(), tt.wcounts[j])
+ }
+ wrev := int64(len(tt.putKeys) + 1)
+ if resp.Header.Revision != wrev {
+ t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
}
}
- if resp.More != tt.wmores[j] {
- t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
- }
- wrev := int64(len(tt.putKeys) + 1)
- if resp.Header.Revision != wrev {
- t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
- }
- }
- clus.Terminate(t)
+ })
}
}
-func newClusterV3NoClients(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
- cfg.UseGRPC = true
- clus := &ClusterV3{cluster: NewClusterByConfig(t, cfg)}
- clus.Launch(t)
- return clus
-}
-
// TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client.
func TestTLSGRPCRejectInsecureClient(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
- clus := newClusterV3NoClients(t, &cfg)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo})
defer clus.Terminate(t)
// nil out TLS field so client will use an insecure connection
clus.Members[0].ClientTLSInfo = nil
- client, err := NewClientV3(clus.Members[0])
- if err != nil && err != context.DeadlineExceeded {
+ client, err := integration.NewClientV3(clus.Members[0])
+ if err != nil && !errors.Is(err, context.DeadlineExceeded) {
t.Fatalf("unexpected error (%v)", err)
} else if client == nil {
// Ideally, no client would be returned. However, grpc will
@@ -1557,7 +1609,7 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) {
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
- _, perr := toGRPC(client).KV.Put(ctx, reqput)
+ _, perr := integration.ToGRPC(client).KV.Put(ctx, reqput)
cancel()
donec <- perr
}()
@@ -1569,38 +1621,38 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) {
// TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server.
func TestTLSGRPCRejectSecureClient(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- cfg := ClusterConfig{Size: 3}
- clus := newClusterV3NoClients(t, &cfg)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- clus.Members[0].ClientTLSInfo = &testTLSInfo
+ clus.Members[0].ClientTLSInfo = &integration.TestTLSInfo
clus.Members[0].DialOptions = []grpc.DialOption{grpc.WithBlock()}
- client, err := NewClientV3(clus.Members[0])
+ clus.Members[0].GRPCURL = strings.Replace(clus.Members[0].GRPCURL, "http://", "https://", 1)
+ client, err := integration.NewClientV3(clus.Members[0])
if client != nil || err == nil {
+ client.Close()
t.Fatalf("expected no client")
- } else if err != context.DeadlineExceeded {
+ } else if !errors.Is(err, context.DeadlineExceeded) {
t.Fatalf("unexpected error (%v)", err)
}
}
// TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS
func TestTLSGRPCAcceptSecureAll(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- cfg := ClusterConfig{Size: 3, ClientTLS: &testTLSInfo}
- clus := newClusterV3NoClients(t, &cfg)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo})
defer clus.Terminate(t)
- client, err := NewClientV3(clus.Members[0])
+ client, err := integration.NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("expected tls client (%v)", err)
}
defer client.Close()
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
- if _, err := toGRPC(client).KV.Put(context.TODO(), reqput); err != nil {
+ if _, err := integration.ToGRPC(client).KV.Put(context.TODO(), reqput); err != nil {
t.Fatalf("unexpected error on put over tls (%v)", err)
}
}
@@ -1609,57 +1661,39 @@ func TestTLSGRPCAcceptSecureAll(t *testing.T) {
// when all certs are atomically replaced by directory renaming.
// And expects server to reject client requests, and vice versa.
func TestTLSReloadAtomicReplace(t *testing.T) {
- tmpDir, err := ioutil.TempDir(t.TempDir(), "fixtures-tmp")
- if err != nil {
- t.Fatal(err)
- }
+ tmpDir := t.TempDir()
os.RemoveAll(tmpDir)
- defer os.RemoveAll(tmpDir)
- certsDir, err := ioutil.TempDir(t.TempDir(), "fixtures-to-load")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(certsDir)
+ certsDir := t.TempDir()
- certsDirExp, err := ioutil.TempDir(t.TempDir(), "fixtures-expired")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(certsDirExp)
+ certsDirExp := t.TempDir()
cloneFunc := func() transport.TLSInfo {
- tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
+ tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir)
if terr != nil {
t.Fatal(terr)
}
- if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil {
- t.Fatal(err)
- }
+ _, err := copyTLSFiles(integration.TestTLSInfoExpired, certsDirExp)
+ require.NoError(t, err)
return tlsInfo
}
replaceFunc := func() {
- if err = os.Rename(certsDir, tmpDir); err != nil {
- t.Fatal(err)
- }
- if err = os.Rename(certsDirExp, certsDir); err != nil {
- t.Fatal(err)
- }
+ err := os.Rename(certsDir, tmpDir)
+ require.NoError(t, err)
+ err = os.Rename(certsDirExp, certsDir)
+ require.NoError(t, err)
// after rename,
// 'certsDir' contains expired certs
// 'tmpDir' contains valid certs
// 'certsDirExp' does not exist
}
revertFunc := func() {
- if err = os.Rename(tmpDir, certsDirExp); err != nil {
- t.Fatal(err)
- }
- if err = os.Rename(certsDir, tmpDir); err != nil {
- t.Fatal(err)
- }
- if err = os.Rename(certsDirExp, certsDir); err != nil {
- t.Fatal(err)
- }
+ err := os.Rename(tmpDir, certsDirExp)
+ require.NoError(t, err)
+ err = os.Rename(certsDir, tmpDir)
+ require.NoError(t, err)
+ err = os.Rename(certsDirExp, certsDir)
+ require.NoError(t, err)
}
testTLSReload(t, cloneFunc, replaceFunc, revertFunc, false)
}
@@ -1668,28 +1702,22 @@ func TestTLSReloadAtomicReplace(t *testing.T) {
// when new certs are copied over, one by one. And expects server
// to reject client requests, and vice versa.
func TestTLSReloadCopy(t *testing.T) {
- certsDir, err := ioutil.TempDir(t.TempDir(), "fixtures-to-load")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(certsDir)
+ certsDir := t.TempDir()
cloneFunc := func() transport.TLSInfo {
- tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
+ tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir)
if terr != nil {
t.Fatal(terr)
}
return tlsInfo
}
replaceFunc := func() {
- if _, err = copyTLSFiles(testTLSInfoExpired, certsDir); err != nil {
- t.Fatal(err)
- }
+ _, err := copyTLSFiles(integration.TestTLSInfoExpired, certsDir)
+ require.NoError(t, err)
}
revertFunc := func() {
- if _, err = copyTLSFiles(testTLSInfo, certsDir); err != nil {
- t.Fatal(err)
- }
+ _, err := copyTLSFiles(integration.TestTLSInfo, certsDir)
+ require.NoError(t, err)
}
testTLSReload(t, cloneFunc, replaceFunc, revertFunc, false)
}
@@ -1698,28 +1726,22 @@ func TestTLSReloadCopy(t *testing.T) {
// when new certs are copied over, one by one. And expects server
// to reject client requests, and vice versa.
func TestTLSReloadCopyIPOnly(t *testing.T) {
- certsDir, err := ioutil.TempDir(t.TempDir(), "fixtures-to-load")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(certsDir)
+ certsDir := t.TempDir()
cloneFunc := func() transport.TLSInfo {
- tlsInfo, terr := copyTLSFiles(testTLSInfoIP, certsDir)
+ tlsInfo, terr := copyTLSFiles(integration.TestTLSInfoIP, certsDir)
if terr != nil {
t.Fatal(terr)
}
return tlsInfo
}
replaceFunc := func() {
- if _, err = copyTLSFiles(testTLSInfoExpiredIP, certsDir); err != nil {
- t.Fatal(err)
- }
+ _, err := copyTLSFiles(integration.TestTLSInfoExpiredIP, certsDir)
+ require.NoError(t, err)
}
revertFunc := func() {
- if _, err = copyTLSFiles(testTLSInfoIP, certsDir); err != nil {
- t.Fatal(err)
- }
+ _, err := copyTLSFiles(integration.TestTLSInfoIP, certsDir)
+ require.NoError(t, err)
}
testTLSReload(t, cloneFunc, replaceFunc, revertFunc, true)
}
@@ -1729,14 +1751,15 @@ func testTLSReload(
cloneFunc func() transport.TLSInfo,
replaceFunc func(),
revertFunc func(),
- useIP bool) {
- BeforeTest(t)
+ useIP bool,
+) {
+ integration.BeforeTest(t)
// 1. separate copies for TLS assets modification
tlsInfo := cloneFunc()
// 2. start cluster with valid certs
- clus := NewClusterV3(t, &ClusterConfig{
+ clus := integration.NewCluster(t, &integration.ClusterConfig{
Size: 1,
PeerTLS: &tlsInfo,
ClientTLS: &tlsInfo,
@@ -1758,9 +1781,9 @@ func testTLSReload(
t.Log(err)
continue
}
- cli, cerr := NewClient(t, clientv3.Config{
+ cli, cerr := integration.NewClient(t, clientv3.Config{
DialOptions: []grpc.DialOption{grpc.WithBlock()},
- Endpoints: []string{clus.Members[0].GRPCAddr()},
+ Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: time.Second,
TLS: cc,
})
@@ -1778,7 +1801,7 @@ func testTLSReload(
// 5. expect dial time-out when loading expired certs
select {
case gerr := <-errc:
- if gerr != context.DeadlineExceeded {
+ if !errors.Is(gerr, context.DeadlineExceeded) {
t.Fatalf("expected %v, got %v", context.DeadlineExceeded, gerr)
}
case <-time.After(5 * time.Second):
@@ -1793,8 +1816,8 @@ func testTLSReload(
if terr != nil {
t.Fatal(terr)
}
- cl, cerr := NewClient(t, clientv3.Config{
- Endpoints: []string{clus.Members[0].GRPCAddr()},
+ cl, cerr := integration.NewClient(t, clientv3.Config{
+ Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: 5 * time.Second,
TLS: tls,
})
@@ -1805,46 +1828,44 @@ func testTLSReload(
}
func TestGRPCRequireLeader(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- cfg := ClusterConfig{Size: 3}
- clus := newClusterV3NoClients(t, &cfg)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
- client, err := NewClientV3(clus.Members[0])
+ client, err := integration.NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
defer client.Close()
// wait for election timeout, then member[0] will not have a leader.
- time.Sleep(time.Duration(3*electionTicks) * tickDuration)
+ time.Sleep(time.Duration(3*integration.ElectionTicks) * config.TickDuration)
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewOutgoingContext(context.Background(), md)
reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
- if _, err := toGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
+ if _, err := integration.ToGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
}
}
func TestGRPCStreamRequireLeader(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- cfg := ClusterConfig{Size: 3}
- clus := newClusterV3NoClients(t, &cfg)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
- client, err := NewClientV3(clus.Members[0])
+ client, err := integration.NewClientV3(clus.Members[0])
if err != nil {
t.Fatalf("failed to create client (%v)", err)
}
defer client.Close()
- wAPI := toGRPC(client).Watch
+ wAPI := integration.ToGRPC(client).Watch
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
ctx := metadata.NewOutgoingContext(context.Background(), md)
wStream, err := wAPI.Watch(ctx)
@@ -1874,8 +1895,8 @@ func TestGRPCStreamRequireLeader(t *testing.T) {
clus.Members[1].Restart(t)
clus.Members[2].Restart(t)
- clus.waitLeader(t, clus.Members)
- time.Sleep(time.Duration(2*electionTicks) * tickDuration)
+ clus.WaitMembersForLeader(t, clus.Members)
+ time.Sleep(time.Duration(2*integration.ElectionTicks) * config.TickDuration)
// new stream should also be OK now after we restarted the other members
wStream, err = wAPI.Watch(ctx)
@@ -1895,7 +1916,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) {
// TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended.
func TestV3LargeRequests(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
tests := []struct {
maxRequestBytes uint
valueSize int
@@ -1908,25 +1929,92 @@ func TestV3LargeRequests(t *testing.T) {
{10 * 1024 * 1024, 10*1024*1024 + 5, rpctypes.ErrGRPCRequestTooLarge},
}
for i, test := range tests {
- clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes})
- kvcli := toGRPC(clus.Client(0)).KV
- reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)}
- _, err := kvcli.Put(context.TODO(), reqput)
- if !eqErrGRPC(err, test.expectError) {
- t.Errorf("#%d: expected error %v, got %v", i, test.expectError, err)
- }
+ t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes})
+ defer clus.Terminate(t)
+ kvcli := integration.ToGRPC(clus.Client(0)).KV
+ reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)}
+ _, err := kvcli.Put(context.TODO(), reqput)
+ if !eqErrGRPC(err, test.expectError) {
+ t.Errorf("#%d: expected error %v, got %v", i, test.expectError, err)
+ }
- // request went through, expect large response back from server
- if test.expectError == nil {
- reqget := &pb.RangeRequest{Key: []byte("foo")}
- // limit receive call size with original value + gRPC overhead bytes
- _, err = kvcli.Range(context.TODO(), reqget, grpc.MaxCallRecvMsgSize(test.valueSize+512*1024))
- if err != nil {
- t.Errorf("#%d: range expected no error, got %v", i, err)
+ // request went through, expect large response back from server
+ if test.expectError == nil {
+ reqget := &pb.RangeRequest{Key: []byte("foo")}
+ // limit receive call size with original value + gRPC overhead bytes
+ _, err = kvcli.Range(context.TODO(), reqget, grpc.MaxCallRecvMsgSize(test.valueSize+512*1024))
+ if err != nil {
+ t.Errorf("#%d: range expected no error, got %v", i, err)
+ }
}
- }
+ })
+ }
+}
- clus.Terminate(t)
+// TestV3AdditionalGRPCOptions ensures that configurable GRPCAdditionalServerOptions works as intended.
+func TestV3AdditionalGRPCOptions(t *testing.T) {
+ integration.BeforeTest(t)
+ tests := []struct {
+ name string
+ maxRequestBytes uint
+ grpcOpts []grpc.ServerOption
+ valueSize int
+ expectError error
+ }{
+ {
+ name: "requests will get a gRPC error because it's larger than gRPC MaxRecvMsgSize",
+ maxRequestBytes: 8 * 1024 * 1024,
+ grpcOpts: nil,
+ valueSize: 9 * 1024 * 1024,
+ expectError: status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max"),
+ },
+ {
+ name: "requests will get an etcd custom gRPC error because it's larger than MaxRequestBytes",
+ maxRequestBytes: 8 * 1024 * 1024,
+ grpcOpts: []grpc.ServerOption{grpc.MaxRecvMsgSize(10 * 1024 * 1024)},
+ valueSize: 9 * 1024 * 1024,
+ expectError: rpctypes.ErrGRPCRequestTooLarge,
+ },
+ {
+ name: "requests size is smaller than MaxRequestBytes but larger than MaxRecvMsgSize",
+ maxRequestBytes: 8 * 1024 * 1024,
+ grpcOpts: []grpc.ServerOption{grpc.MaxRecvMsgSize(4 * 1024 * 1024)},
+ valueSize: 6 * 1024 * 1024,
+ expectError: status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max"),
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ clus := integration.NewCluster(t, &integration.ClusterConfig{
+ Size: 1,
+ MaxRequestBytes: test.maxRequestBytes,
+ ClientMaxCallSendMsgSize: 12 * 1024 * 1024,
+ GRPCAdditionalServerOptions: test.grpcOpts,
+ })
+ defer clus.Terminate(t)
+ kvcli := integration.ToGRPC(clus.Client(0)).KV
+ reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)}
+ if _, err := kvcli.Put(context.TODO(), reqput); err != nil {
+ var etcdErr rpctypes.EtcdError
+ if errors.As(err, &etcdErr) {
+ if err.Error() != status.Convert(test.expectError).Message() {
+ t.Errorf("expected %v, got %v", status.Convert(test.expectError).Message(), err.Error())
+ }
+ } else if !strings.HasPrefix(err.Error(), test.expectError.Error()) {
+ t.Errorf("expected error starting with '%s', got '%s'", test.expectError.Error(), err.Error())
+ }
+ }
+ // request went through, expect large response back from server
+ if test.expectError == nil {
+ reqget := &pb.RangeRequest{Key: []byte("foo")}
+ // limit receive call size with original value + gRPC overhead bytes
+ _, err := kvcli.Range(context.TODO(), reqget, grpc.MaxCallRecvMsgSize(test.valueSize+512*1024))
+ if err != nil {
+ t.Errorf("range expected no error, got %v", err)
+ }
+ }
+ })
}
}
diff --git a/tests/integration/v3_health_test.go b/tests/integration/v3_health_test.go
deleted file mode 100644
index fd7257fb96a..00000000000
--- a/tests/integration/v3_health_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package integration
-
-import (
- "context"
- "testing"
-
- healthpb "google.golang.org/grpc/health/grpc_health_v1"
-)
-
-func TestHealthCheck(t *testing.T) {
- BeforeTest(t)
-
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
- defer clus.Terminate(t)
-
- cli := healthpb.NewHealthClient(clus.RandClient().ActiveConnection())
- resp, err := cli.Check(context.TODO(), &healthpb.HealthCheckRequest{})
- if err != nil {
- t.Fatal(err)
- }
- if resp.Status != healthpb.HealthCheckResponse_SERVING {
- t.Fatalf("status expected %s, got %s", healthpb.HealthCheckResponse_SERVING, resp.Status)
- }
-}
diff --git a/tests/integration/v3_kv_test.go b/tests/integration/v3_kv_test.go
index aca4aeb4927..eacb821af60 100644
--- a/tests/integration/v3_kv_test.go
+++ b/tests/integration/v3_kv_test.go
@@ -1,18 +1,33 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package integration
import (
"context"
"testing"
- "go.etcd.io/etcd/client/v3"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/namespace"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestKVWithEmptyValue ensures that a get/delete with an empty value, and with WithFromKey/WithPrefix function will return an empty error.
func TestKVWithEmptyValue(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
client := clus.RandClient()
@@ -49,7 +64,7 @@ func TestKVWithEmptyValue(t *testing.T) {
t.Log(string(kv.Key), "=", string(kv.Value))
}
- //Remove all keys without WithFromKey/WithPrefix func
+ // Remove all keys without WithFromKey/WithPrefix func
_, err = client.Delete(context.Background(), "")
if err == nil {
// fatal error duo to without WithFromKey/WithPrefix func called.
diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go
index e530bbdfc42..2054acbade6 100644
--- a/tests/integration/v3_leadership_test.go
+++ b/tests/integration/v3_leadership_test.go
@@ -21,22 +21,24 @@ import (
"testing"
"time"
+ "golang.org/x/sync/errgroup"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "golang.org/x/sync/errgroup"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) }
func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) }
func testMoveLeader(t *testing.T, auto bool) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
- oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID())
+ oldLeadID := uint64(clus.Members[oldLeadIdx].Server.MemberID())
// ensure followers go through leader transition while leadership transfer
idc := make(chan uint64)
@@ -45,23 +47,23 @@ func testMoveLeader(t *testing.T, auto bool) {
for i := range clus.Members {
if oldLeadIdx != i {
- go func(m *member) {
+ go func(m *integration.Member) {
select {
- case idc <- checkLeaderTransition(m, oldLeadID):
+ case idc <- integration.CheckLeaderTransition(m, oldLeadID):
case <-stopc:
}
}(clus.Members[i])
}
}
- target := uint64(clus.Members[(oldLeadIdx+1)%3].s.ID())
+ target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.MemberID())
if auto {
- err := clus.Members[oldLeadIdx].s.TransferLeadership()
+ err := clus.Members[oldLeadIdx].Server.TryTransferLeadershipOnShutdown()
if err != nil {
t.Fatal(err)
}
} else {
- mvc := toGRPC(clus.Client(oldLeadIdx)).Maintenance
+ mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
if err != nil {
t.Fatal(err)
@@ -98,17 +100,17 @@ func testMoveLeader(t *testing.T, auto bool) {
// TestMoveLeaderError ensures that request to non-leader fail.
func TestMoveLeaderError(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
followerIdx := (oldLeadIdx + 1) % 3
- target := uint64(clus.Members[(oldLeadIdx+2)%3].s.ID())
+ target := uint64(clus.Members[(oldLeadIdx+2)%3].Server.MemberID())
- mvc := toGRPC(clus.Client(followerIdx)).Maintenance
+ mvc := integration.ToGRPC(clus.Client(followerIdx)).Maintenance
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader)
@@ -117,9 +119,9 @@ func TestMoveLeaderError(t *testing.T) {
// TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail.
func TestMoveLeaderToLearnerError(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because
@@ -128,10 +130,10 @@ func TestMoveLeaderToLearnerError(t *testing.T) {
learners, err := clus.GetLearnerMembers()
if err != nil {
- t.Fatalf("failed to get the learner members in cluster: %v", err)
+ t.Fatalf("failed to get the learner members in Cluster: %v", err)
}
if len(learners) != 1 {
- t.Fatalf("added 1 learner to cluster, got %d", len(learners))
+ t.Fatalf("added 1 learner to Cluster, got %d", len(learners))
}
learnerID := learners[0].ID
@@ -147,30 +149,30 @@ func TestMoveLeaderToLearnerError(t *testing.T) {
}
}
-// TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is
+// TestTransferLeadershipWithLearner ensures TryTransferLeadershipOnShutdown does not timeout due to learner is
// automatically picked by leader as transferee.
func TestTransferLeadershipWithLearner(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
clus.AddAndLaunchLearnerMember(t)
learners, err := clus.GetLearnerMembers()
if err != nil {
- t.Fatalf("failed to get the learner members in cluster: %v", err)
+ t.Fatalf("failed to get the learner members in Cluster: %v", err)
}
if len(learners) != 1 {
- t.Fatalf("added 1 learner to cluster, got %d", len(learners))
+ t.Fatalf("added 1 learner to Cluster, got %d", len(learners))
}
leaderIdx := clus.WaitLeader(t)
errCh := make(chan error, 1)
go func() {
- // note that this cluster has 1 leader and 1 learner. TransferLeadership should return nil.
+ // note that this cluster has 1 leader and 1 learner. TryTransferLeadershipOnShutdown should return nil.
// Leadership transfer is skipped in cluster with 1 voting member.
- errCh <- clus.Members[leaderIdx].s.TransferLeadership()
+ errCh <- clus.Members[leaderIdx].Server.TryTransferLeadershipOnShutdown()
}()
select {
case err := <-errCh:
@@ -183,25 +185,24 @@ func TestTransferLeadershipWithLearner(t *testing.T) {
}
func TestFirstCommitNotification(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
ctx := context.Background()
clusterSize := 3
- cluster := NewClusterV3(t, &ClusterConfig{Size: clusterSize})
+ cluster := integration.NewCluster(t, &integration.ClusterConfig{Size: clusterSize})
defer cluster.Terminate(t)
oldLeaderIdx := cluster.WaitLeader(t)
oldLeaderClient := cluster.Client(oldLeaderIdx)
newLeaderIdx := (oldLeaderIdx + 1) % clusterSize
- newLeaderId := uint64(cluster.Members[newLeaderIdx].ID())
+ newLeaderID := uint64(cluster.Members[newLeaderIdx].ID())
notifiers := make(map[int]<-chan struct{}, clusterSize)
for i, clusterMember := range cluster.Members {
- notifiers[i] = clusterMember.s.FirstCommitInTermNotify()
+ notifiers[i] = clusterMember.Server.FirstCommitInTermNotify()
}
- _, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderId)
-
+ _, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderID)
if err != nil {
t.Errorf("got error during leadership transfer: %v", err)
}
@@ -210,12 +211,12 @@ func TestFirstCommitNotification(t *testing.T) {
t.Logf("Submitting write to make sure empty and 'foo' index entry was already flushed")
cli := cluster.RandClient()
- if _, err := cli.Put(ctx, "foo", "bar"); err != nil {
+ if _, err = cli.Put(ctx, "foo", "bar"); err != nil {
t.Fatalf("Failed to put kv pair.")
}
// It's guaranteed now that leader contains the 'foo'->'bar' index entry.
- leaderAppliedIndex := cluster.Members[newLeaderIdx].s.AppliedIndex()
+ leaderAppliedIndex := cluster.Members[newLeaderIdx].Server.AppliedIndex()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
@@ -223,7 +224,8 @@ func TestFirstCommitNotification(t *testing.T) {
group, groupContext := errgroup.WithContext(ctx)
for i, notifier := range notifiers {
- member, notifier := cluster.Members[i], notifier
+ member := cluster.Members[i]
+ notifier := notifier
group.Go(func() error {
return checkFirstCommitNotification(groupContext, t, member, leaderAppliedIndex, notifier)
})
@@ -238,13 +240,13 @@ func TestFirstCommitNotification(t *testing.T) {
func checkFirstCommitNotification(
ctx context.Context,
t testing.TB,
- member *member,
+ member *integration.Member,
leaderAppliedIndex uint64,
notifier <-chan struct{},
) error {
// wait until server applies all the changes of leader
- for member.s.AppliedIndex() < leaderAppliedIndex {
- t.Logf("member.s.AppliedIndex():%v <= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex)
+ for member.Server.AppliedIndex() < leaderAppliedIndex {
+ t.Logf("member.Server.AppliedIndex():%v <= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex)
select {
case <-ctx.Done():
return ctx.Err()
@@ -262,7 +264,7 @@ func checkFirstCommitNotification(
)
}
default:
- t.Logf("member.s.AppliedIndex():%v >= leaderAppliedIndex:%v", member.s.AppliedIndex(), leaderAppliedIndex)
+ t.Logf("member.Server.AppliedIndex():%v >= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex)
return fmt.Errorf(
"notification was not triggered, member ID: %d",
member.ID(),
diff --git a/tests/integration/v3_lease_test.go b/tests/integration/v3_lease_test.go
index 41f33ef6266..3c5e9040b4a 100644
--- a/tests/integration/v3_lease_test.go
+++ b/tests/integration/v3_lease_test.go
@@ -16,31 +16,39 @@ package integration
import (
"context"
+ "errors"
"fmt"
+ "math"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/testutil"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ framecfg "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+ gofail "go.etcd.io/gofail/runtime"
)
-// TestV3LeasePrmote ensures the newly elected leader can promote itself
+// TestV3LeasePromote ensures the newly elected leader can promote itself
// to the primary lessor, refresh the leases and start to manage leases.
// TODO: use customized clock to make this test go faster?
func TestV3LeasePromote(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
// create lease
- lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3})
+ lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3})
ttl := time.Duration(lresp.TTL) * time.Second
afterGrant := time.Now()
if err != nil {
@@ -54,19 +62,19 @@ func TestV3LeasePromote(t *testing.T) {
time.Sleep(time.Until(afterGrant.Add(ttl - time.Second)))
// kill the current leader, all leases should be refreshed.
- toStop := clus.waitLeader(t, clus.Members)
+ toStop := clus.WaitMembersForLeader(t, clus.Members)
beforeStop := time.Now()
clus.Members[toStop].Stop(t)
- var toWait []*member
+ var toWait []*integration.Member
for i, m := range clus.Members {
if i != toStop {
toWait = append(toWait, m)
}
}
- clus.waitLeader(t, toWait)
+ clus.WaitMembersForLeader(t, toWait)
clus.Members[toStop].Restart(t)
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
afterReelect := time.Now()
// ensure lease is refreshed by waiting for a "long" time.
@@ -96,22 +104,22 @@ func TestV3LeasePromote(t *testing.T) {
// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
func TestV3LeaseRevoke(t *testing.T) {
- BeforeTest(t)
- testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
- lc := toGRPC(clus.RandClient()).Lease
+ integration.BeforeTest(t)
+ testLeaseRemoveLeasedKey(t, func(clus *integration.Cluster, leaseID int64) error {
+ lc := integration.ToGRPC(clus.RandClient()).Lease
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
return err
})
}
-// TestV3LeaseGrantById ensures leases may be created by a given id.
+// TestV3LeaseGrantByID ensures leases may be created by a given id.
func TestV3LeaseGrantByID(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create fixed lease
- lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
+ lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{ID: 1, TTL: 1})
if err != nil {
@@ -122,7 +130,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
}
// create duplicate fixed lease
- _, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
+ _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{ID: 1, TTL: 1})
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) {
@@ -130,7 +138,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
}
// create fresh fixed lease
- lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
+ lresp, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{ID: 2, TTL: 1})
if err != nil {
@@ -141,22 +149,109 @@ func TestV3LeaseGrantByID(t *testing.T) {
}
}
+// TestV3LeaseNegativeID ensures restarted member lessor can recover negative leaseID from backend.
+//
+// When the negative leaseID is used for lease revoke, all etcd nodes will remove the lease
+// and delete associated keys to ensure kv store data consistency
+//
+// It ensures issue 12535 is fixed by PR 13676
+func TestV3LeaseNegativeID(t *testing.T) {
+ tcs := []struct {
+ leaseID int64
+ k []byte
+ v []byte
+ }{
+ {
+ leaseID: -1, // int64 -1 is 2^64 -1 in uint64
+ k: []byte("foo"),
+ v: []byte("bar"),
+ },
+ {
+ leaseID: math.MaxInt64,
+ k: []byte("bar"),
+ v: []byte("foo"),
+ },
+ {
+ leaseID: math.MinInt64,
+ k: []byte("hello"),
+ v: []byte("world"),
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(fmt.Sprintf("test with lease ID %16x", tc.leaseID), func(t *testing.T) {
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ cc := clus.RandClient()
+ lresp, err := integration.ToGRPC(cc).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{ID: tc.leaseID, TTL: 300})
+ if err != nil {
+ t.Errorf("could not create lease %d (%v)", tc.leaseID, err)
+ }
+ if lresp.ID != tc.leaseID {
+ t.Errorf("got id %v, wanted id %v", lresp.ID, tc.leaseID)
+ }
+ putr := &pb.PutRequest{Key: tc.k, Value: tc.v, Lease: tc.leaseID}
+ _, err = integration.ToGRPC(cc).KV.Put(ctx, putr)
+ if err != nil {
+ t.Errorf("couldn't put key (%v)", err)
+ }
+
+ // wait for backend Commit
+ time.Sleep(100 * time.Millisecond)
+ // restore lessor from db file
+ clus.Members[2].Stop(t)
+ if err = clus.Members[2].Restart(t); err != nil {
+ t.Fatal(err)
+ }
+
+ // revoke lease should remove key
+ integration.WaitClientV3(t, clus.Members[2].Client)
+ _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: tc.leaseID})
+ if err != nil {
+ t.Errorf("could not revoke lease %d (%v)", tc.leaseID, err)
+ }
+ var revision int64
+ for _, m := range clus.Members {
+ getr := &pb.RangeRequest{Key: tc.k}
+ getresp, err := integration.ToGRPC(m.Client).KV.Range(ctx, getr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if revision == 0 {
+ revision = getresp.Header.Revision
+ }
+ if revision != getresp.Header.Revision {
+ t.Errorf("expect revision %d, but got %d", revision, getresp.Header.Revision)
+ }
+ if len(getresp.Kvs) != 0 {
+ t.Errorf("lease removed but key remains")
+ }
+ }
+ })
+ }
+}
+
// TestV3LeaseExpire ensures a key is deleted once a key expires.
func TestV3LeaseExpire(t *testing.T) {
- BeforeTest(t)
- testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
+ integration.BeforeTest(t)
+ testLeaseRemoveLeasedKey(t, func(clus *integration.Cluster, leaseID int64) error {
// let lease lapse; wait for deleted key
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- wStream, err := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ wStream, err := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if err != nil {
return err
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo"), StartRevision: 1}}}
+ Key: []byte("foo"), StartRevision: 1,
+ },
+ }}
if err := wStream.Send(wreq); err != nil {
return err
}
@@ -195,9 +290,9 @@ func TestV3LeaseExpire(t *testing.T) {
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
- BeforeTest(t)
- testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
- lc := toGRPC(clus.RandClient()).Lease
+ integration.BeforeTest(t)
+ testLeaseRemoveLeasedKey(t, func(clus *integration.Cluster, leaseID int64) error {
+ lc := integration.ToGRPC(clus.RandClient()).Lease
lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -229,69 +324,122 @@ func TestV3LeaseKeepAlive(t *testing.T) {
// TestV3LeaseCheckpoint ensures a lease checkpoint results in a remaining TTL being persisted
// across leader elections.
func TestV3LeaseCheckpoint(t *testing.T) {
- BeforeTest(t)
-
- var ttl int64 = 300
- leaseInterval := 2 * time.Second
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{
- Size: 3,
- EnableLeaseCheckpoint: true,
- LeaseCheckpointInterval: leaseInterval,
- })
- defer clus.Terminate(t)
-
- // create lease
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- c := toGRPC(clus.RandClient())
- lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: ttl})
- if err != nil {
- t.Fatal(err)
- }
+ tcs := []struct {
+ name string
+ checkpointingEnabled bool
+ ttl time.Duration
+ checkpointingInterval time.Duration
+ leaderChanges int
+ clusterSize int
+ expectTTLIsGT time.Duration
+ expectTTLIsLT time.Duration
+ }{
+ {
+ name: "Checkpointing disabled, lease TTL is reset",
+ ttl: 300 * time.Second,
+ leaderChanges: 1,
+ clusterSize: 3,
+ expectTTLIsGT: 298 * time.Second,
+ },
+ {
+ name: "Checkpointing enabled 10s, lease TTL is preserved after leader change",
+ ttl: 300 * time.Second,
+ checkpointingEnabled: true,
+ checkpointingInterval: 10 * time.Second,
+ leaderChanges: 1,
+ clusterSize: 3,
+ expectTTLIsLT: 290 * time.Second,
+ },
+ {
+ name: "Checkpointing enabled 10s, lease TTL is preserved after cluster restart",
+ ttl: 300 * time.Second,
+ checkpointingEnabled: true,
+ checkpointingInterval: 10 * time.Second,
+ leaderChanges: 1,
+ clusterSize: 1,
+ expectTTLIsLT: 290 * time.Second,
+ },
+ {
+ // Checking if checkpointing continues after the first leader change.
+ name: "Checkpointing enabled 10s, lease TTL is preserved after 2 leader changes",
+ ttl: 300 * time.Second,
+ checkpointingEnabled: true,
+ checkpointingInterval: 10 * time.Second,
+ leaderChanges: 2,
+ clusterSize: 3,
+ expectTTLIsLT: 280 * time.Second,
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ integration.BeforeTest(t)
+ config := &integration.ClusterConfig{
+ Size: tc.clusterSize,
+ EnableLeaseCheckpoint: tc.checkpointingEnabled,
+ LeaseCheckpointInterval: tc.checkpointingInterval,
+ }
+ clus := integration.NewCluster(t, config)
+ defer clus.Terminate(t)
+
+ // create lease
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ c := integration.ToGRPC(clus.RandClient())
+ lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: int64(tc.ttl.Seconds())})
+ if err != nil {
+ t.Fatal(err)
+ }
- // wait for a checkpoint to occur
- time.Sleep(leaseInterval + 1*time.Second)
+ for i := 0; i < tc.leaderChanges; i++ {
+ // wait for a checkpoint to occur
+ time.Sleep(tc.checkpointingInterval + 1*time.Second)
- // Force a leader election
- leaderId := clus.WaitLeader(t)
- leader := clus.Members[leaderId]
- leader.Stop(t)
- time.Sleep(time.Duration(3*electionTicks) * tickDuration)
- leader.Restart(t)
- newLeaderId := clus.WaitLeader(t)
- c2 := toGRPC(clus.Client(newLeaderId))
+ // Force a leader election
+ leaderID := clus.WaitLeader(t)
+ leader := clus.Members[leaderID]
+ leader.Stop(t)
+ time.Sleep(time.Duration(3*integration.ElectionTicks) * framecfg.TickDuration)
+ leader.Restart(t)
+ }
- time.Sleep(250 * time.Millisecond)
+ newLeaderID := clus.WaitLeader(t)
+ c2 := integration.ToGRPC(clus.Client(newLeaderID))
+
+ time.Sleep(250 * time.Millisecond)
+
+ // Check the TTL of the new leader
+ var ttlresp *pb.LeaseTimeToLiveResponse
+ for i := 0; i < 10; i++ {
+ if ttlresp, err = c2.Lease.LeaseTimeToLive(ctx, &pb.LeaseTimeToLiveRequest{ID: lresp.ID}); err != nil {
+ if status, ok := status.FromError(err); ok && status.Code() == codes.Unavailable {
+ time.Sleep(time.Millisecond * 250)
+ } else {
+ t.Fatal(err)
+ }
+ }
+ }
- // Check the TTL of the new leader
- var ttlresp *pb.LeaseTimeToLiveResponse
- for i := 0; i < 10; i++ {
- if ttlresp, err = c2.Lease.LeaseTimeToLive(ctx, &pb.LeaseTimeToLiveRequest{ID: lresp.ID}); err != nil {
- if status, ok := status.FromError(err); ok && status.Code() == codes.Unavailable {
- time.Sleep(time.Millisecond * 250)
- } else {
- t.Fatal(err)
+ if tc.expectTTLIsGT != 0 && time.Duration(ttlresp.TTL)*time.Second < tc.expectTTLIsGT {
+ t.Errorf("Expected lease ttl (%v) to be >= than (%v)", time.Duration(ttlresp.TTL)*time.Second, tc.expectTTLIsGT)
}
- }
- }
- expectedTTL := ttl - int64(leaseInterval.Seconds())
- if ttlresp.TTL < expectedTTL-1 || ttlresp.TTL > expectedTTL {
- t.Fatalf("expected lease to be checkpointed after restart such that %d < TTL <%d, but got TTL=%d", expectedTTL-1, expectedTTL, ttlresp.TTL)
+ if tc.expectTTLIsLT != 0 && time.Duration(ttlresp.TTL)*time.Second > tc.expectTTLIsLT {
+ t.Errorf("Expected lease ttl (%v) to be lower than (%v)", time.Duration(ttlresp.TTL)*time.Second, tc.expectTTLIsLT)
+ }
+ })
}
}
// TestV3LeaseExists creates a lease on a random client and confirms it exists in the cluster.
func TestV3LeaseExists(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create lease
ctx0, cancel0 := context.WithCancel(context.Background())
defer cancel0()
- lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
+ lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
ctx0,
&pb.LeaseGrantRequest{TTL: 30})
if err != nil {
@@ -308,17 +456,17 @@ func TestV3LeaseExists(t *testing.T) {
// TestV3LeaseLeases creates leases and confirms list RPC fetches created ones.
func TestV3LeaseLeases(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx0, cancel0 := context.WithCancel(context.Background())
defer cancel0()
// create leases
- ids := []int64{}
+ var ids []int64
for i := 0; i < 5; i++ {
- lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
+ lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
ctx0,
&pb.LeaseGrantRequest{TTL: 30})
if err != nil {
@@ -330,7 +478,7 @@ func TestV3LeaseLeases(t *testing.T) {
ids = append(ids, lresp.ID)
}
- lresp, err := toGRPC(clus.RandClient()).Lease.LeaseLeases(
+ lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases(
context.Background(),
&pb.LeaseLeasesRequest{})
if err != nil {
@@ -347,35 +495,58 @@ func TestV3LeaseLeases(t *testing.T) {
// it was oberserved that the immediate lease renewal after granting a lease from follower resulted lease not found.
// related issue https://github.com/etcd-io/etcd/issues/6978
func TestV3LeaseRenewStress(t *testing.T) {
- testLeaseStress(t, stressLeaseRenew)
+ testLeaseStress(t, stressLeaseRenew, false)
+}
+
+// TestV3LeaseRenewStressWithClusterClient is similar to TestV3LeaseRenewStress,
+// but it uses a cluster client instead of a specific member's client.
+// The related issue is https://github.com/etcd-io/etcd/issues/13675.
+func TestV3LeaseRenewStressWithClusterClient(t *testing.T) {
+ testLeaseStress(t, stressLeaseRenew, true)
}
// TestV3LeaseTimeToLiveStress keeps creating lease and retrieving it immediately to ensure the lease can be retrieved.
// it was oberserved that the immediate lease retrieval after granting a lease from follower resulted lease not found.
// related issue https://github.com/etcd-io/etcd/issues/6978
func TestV3LeaseTimeToLiveStress(t *testing.T) {
- testLeaseStress(t, stressLeaseTimeToLive)
+ testLeaseStress(t, stressLeaseTimeToLive, false)
}
-func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+// TestV3LeaseTimeToLiveStressWithClusterClient is similar to TestV3LeaseTimeToLiveStress,
+// but it uses a cluster client instead of a specific member's client.
+// The related issue is https://github.com/etcd-io/etcd/issues/13675.
+func TestV3LeaseTimeToLiveStressWithClusterClient(t *testing.T) {
+ testLeaseStress(t, stressLeaseTimeToLive, true)
+}
+
+func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error, useClusterClient bool) {
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
errc := make(chan error)
- for i := 0; i < 30; i++ {
- for j := 0; j < 3; j++ {
- go func(i int) { errc <- stresser(ctx, toGRPC(clus.Client(i)).Lease) }(j)
+ if useClusterClient {
+ clusterClient, err := clus.ClusterClient(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 300; i++ {
+ go func() { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }()
+ }
+ } else {
+ for i := 0; i < 100; i++ {
+ for j := 0; j < 3; j++ {
+ go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clus.Client(i)).Lease) }(j)
+ }
}
}
- for i := 0; i < 90; i++ {
- if err := <-errc; err != nil {
- t.Fatal(err)
- }
+ for i := 0; i < 300; i++ {
+ err := <-errc
+ require.NoError(t, err)
}
}
@@ -403,7 +574,7 @@ func stressLeaseRenew(tctx context.Context, lc pb.LeaseClient) (reterr error) {
continue
}
if rresp.TTL == 0 {
- return fmt.Errorf("TTL shouldn't be 0 so soon")
+ return errors.New("TTL shouldn't be 0 so soon")
}
}
return nil
@@ -421,7 +592,7 @@ func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr erro
continue
}
_, kerr := lc.LeaseTimeToLive(tctx, &pb.LeaseTimeToLiveRequest{ID: resp.ID})
- if rpctypes.Error(kerr) == rpctypes.ErrLeaseNotFound {
+ if errors.Is(rpctypes.Error(kerr), rpctypes.ErrLeaseNotFound) {
return kerr
}
}
@@ -429,8 +600,8 @@ func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr erro
}
func TestV3PutOnNonExistLease(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithCancel(context.Background())
@@ -438,7 +609,7 @@ func TestV3PutOnNonExistLease(t *testing.T) {
badLeaseID := int64(0x12345678)
putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: badLeaseID}
- _, err := toGRPC(clus.RandClient()).KV.Put(ctx, putr)
+ _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr)
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseNotFound) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCLeaseNotFound)
}
@@ -447,13 +618,13 @@ func TestV3PutOnNonExistLease(t *testing.T) {
// TestV3GetNonExistLease ensures client retrieving nonexistent lease on a follower doesn't result node panic
// related issue https://github.com/etcd-io/etcd/issues/6537
func TestV3GetNonExistLease(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- lc := toGRPC(clus.RandClient()).Lease
+ lc := integration.ToGRPC(clus.RandClient()).Lease
lresp, err := lc.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 10})
if err != nil {
t.Errorf("failed to create lease %v", err)
@@ -468,12 +639,11 @@ func TestV3GetNonExistLease(t *testing.T) {
Keys: true,
}
- for _, client := range clus.clients {
+ for _, m := range clus.Members {
// quorum-read to ensure revoke completes before TimeToLive
- if _, err := toGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil {
- t.Fatal(err)
- }
- resp, err := toGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr)
+ _, err := integration.ToGRPC(m.Client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")})
+ require.NoError(t, err)
+ resp, err := integration.ToGRPC(m.Client).Lease.LeaseTimeToLive(ctx, leaseTTLr)
if err != nil {
t.Fatalf("expected non nil error, but go %v", err)
}
@@ -485,8 +655,8 @@ func TestV3GetNonExistLease(t *testing.T) {
// TestV3LeaseSwitch tests a key can be switched from one lease to another.
func TestV3LeaseSwitch(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
key := "foo"
@@ -494,34 +664,34 @@ func TestV3LeaseSwitch(t *testing.T) {
// create lease
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- lresp1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
+ lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
if err1 != nil {
t.Fatal(err1)
}
- lresp2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
+ lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
if err2 != nil {
t.Fatal(err2)
}
// attach key on lease1 then switch it to lease2
put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID}
- _, err := toGRPC(clus.RandClient()).KV.Put(ctx, put1)
+ _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1)
if err != nil {
t.Fatal(err)
}
put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID}
- _, err = toGRPC(clus.RandClient()).KV.Put(ctx, put2)
+ _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2)
if err != nil {
t.Fatal(err)
}
// revoke lease1 should not remove key
- _, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID})
+ _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID})
if err != nil {
t.Fatal(err)
}
rreq := &pb.RangeRequest{Key: []byte("foo")}
- rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
+ rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
@@ -530,11 +700,11 @@ func TestV3LeaseSwitch(t *testing.T) {
}
// revoke lease2 should remove key
- _, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID})
+ _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID})
if err != nil {
t.Fatal(err)
}
- rresp, err = toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
+ rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
@@ -547,14 +717,14 @@ func TestV3LeaseSwitch(t *testing.T) {
// election timeout after it loses its quorum. And the new leader extends the TTL of
// the lease to at least TTL + election timeout.
func TestV3LeaseFailover(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- toIsolate := clus.waitLeader(t, clus.Members)
+ toIsolate := clus.WaitMembersForLeader(t, clus.Members)
- lc := toGRPC(clus.Client(toIsolate)).Lease
+ lc := integration.ToGRPC(clus.Client(toIsolate)).Lease
// create lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5})
@@ -581,7 +751,7 @@ func TestV3LeaseFailover(t *testing.T) {
// send keep alive to old leader until the old leader starts
// to drop lease request.
- var expectedExp time.Time
+ expectedExp := time.Now().Add(5 * time.Second)
for {
if err = lac.Send(lreq); err != nil {
break
@@ -595,7 +765,7 @@ func TestV3LeaseFailover(t *testing.T) {
}
clus.Members[toIsolate].Resume()
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
// lease should not expire at the last received expire deadline.
time.Sleep(time.Until(expectedExp) - 500*time.Millisecond)
@@ -608,12 +778,12 @@ func TestV3LeaseFailover(t *testing.T) {
// TestV3LeaseRequireLeader ensures that a Recv will get a leader
// loss error if there is no leader.
func TestV3LeaseRequireLeader(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- lc := toGRPC(clus.Client(0)).Lease
+ lc := integration.ToGRPC(clus.Client(0)).Lease
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
@@ -648,13 +818,13 @@ const fiveMinTTL int64 = 300
// TestV3LeaseRecoverAndRevoke ensures that revoking a lease after restart deletes the attached key.
func TestV3LeaseRecoverAndRevoke(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
- kvc := toGRPC(clus.Client(0)).KV
- lsc := toGRPC(clus.Client(0)).Lease
+ kvc := integration.ToGRPC(clus.Client(0)).KV
+ lsc := integration.ToGRPC(clus.Client(0)).Lease
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
if err != nil {
@@ -671,16 +841,16 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) {
// restart server and ensure lease still exists
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
- nc, err := NewClientV3(clus.Members[0])
+ nc, err := integration.NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
- kvc = toGRPC(nc).KV
- lsc = toGRPC(nc).Lease
+ kvc = integration.ToGRPC(nc).KV
+ lsc = integration.ToGRPC(nc).Lease
defer nc.Close()
// revoke should delete the key
@@ -699,13 +869,13 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) {
// TestV3LeaseRevokeAndRecover ensures that revoked key stays deleted after restart.
func TestV3LeaseRevokeAndRecover(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
- kvc := toGRPC(clus.Client(0)).KV
- lsc := toGRPC(clus.Client(0)).Lease
+ kvc := integration.ToGRPC(clus.Client(0)).KV
+ lsc := integration.ToGRPC(clus.Client(0)).Lease
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
if err != nil {
@@ -728,15 +898,15 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) {
// restart server and ensure revoked key doesn't exist
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
- nc, err := NewClientV3(clus.Members[0])
+ nc, err := integration.NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
- kvc = toGRPC(nc).KV
+ kvc = integration.ToGRPC(nc).KV
defer nc.Close()
rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
@@ -751,13 +921,13 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) {
// TestV3LeaseRecoverKeyWithDetachedLease ensures that revoking a detached lease after restart
// does not delete the key.
func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
- kvc := toGRPC(clus.Client(0)).KV
- lsc := toGRPC(clus.Client(0)).Lease
+ kvc := integration.ToGRPC(clus.Client(0)).KV
+ lsc := integration.ToGRPC(clus.Client(0)).Lease
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
if err != nil {
@@ -780,16 +950,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
// restart server and ensure lease still exists
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
- nc, err := NewClientV3(clus.Members[0])
+ nc, err := integration.NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
- kvc = toGRPC(nc).KV
- lsc = toGRPC(nc).Lease
+ kvc = integration.ToGRPC(nc).KV
+ lsc = integration.ToGRPC(nc).Lease
defer nc.Close()
// revoke the detached lease
@@ -807,13 +977,13 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
}
func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
- kvc := toGRPC(clus.Client(0)).KV
- lsc := toGRPC(clus.Client(0)).Lease
+ kvc := integration.ToGRPC(clus.Client(0)).KV
+ lsc := integration.ToGRPC(clus.Client(0)).Lease
var leaseIDs []int64
for i := 0; i < 2; i++ {
@@ -835,7 +1005,7 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
// restart server and ensure lease still exists
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
- clus.waitLeader(t, clus.Members)
+ clus.WaitMembersForLeader(t, clus.Members)
for i, leaseID := range leaseIDs {
if !leaseExist(t, clus, leaseID) {
t.Errorf("#%d: unexpected lease not exists", i)
@@ -844,12 +1014,12 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
- nc, err := NewClientV3(clus.Members[0])
+ nc, err := integration.NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
- kvc = toGRPC(nc).KV
- lsc = toGRPC(nc).Lease
+ kvc = integration.ToGRPC(nc).KV
+ lsc = integration.ToGRPC(nc).Lease
defer nc.Close()
// revoke the old lease
@@ -880,10 +1050,82 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
}
}
+func TestV3LeaseTimeToLiveWithLeaderChanged(t *testing.T) {
+ t.Run("normal", func(subT *testing.T) {
+ testV3LeaseTimeToLiveWithLeaderChanged(subT, "beforeLookupWhenLeaseTimeToLive")
+ })
+
+ t.Run("forward", func(subT *testing.T) {
+ testV3LeaseTimeToLiveWithLeaderChanged(subT, "beforeLookupWhenForwardLeaseTimeToLive")
+ })
+}
+
+func testV3LeaseTimeToLiveWithLeaderChanged(t *testing.T, fpName string) {
+ if len(gofail.List()) == 0 {
+ t.Skip("please run 'make gofail-enable' before running the test")
+ }
+
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ oldLeadIdx := clus.WaitLeader(t)
+ followerIdx := (oldLeadIdx + 1) % 3
+
+ followerMemberID := clus.Members[followerIdx].ID()
+
+ oldLeadC := clus.Client(oldLeadIdx)
+
+ leaseResp, err := oldLeadC.Grant(ctx, 100)
+ require.NoError(t, err)
+
+ require.NoError(t, gofail.Enable(fpName, `sleep("3s")`))
+ t.Cleanup(func() {
+ terr := gofail.Disable(fpName)
+ if terr != nil && !errors.Is(terr, gofail.ErrDisabled) {
+ t.Fatalf("failed to disable %s: %v", fpName, terr)
+ }
+ })
+
+ readyCh := make(chan struct{})
+ errCh := make(chan error, 1)
+
+ var targetC *clientv3.Client
+ switch fpName {
+ case "beforeLookupWhenLeaseTimeToLive":
+ targetC = oldLeadC
+ case "beforeLookupWhenForwardLeaseTimeToLive":
+ targetC = clus.Client((oldLeadIdx + 2) % 3)
+ default:
+ t.Fatalf("unsupported %s failpoint", fpName)
+ }
+
+ go func() {
+ <-readyCh
+ time.Sleep(1 * time.Second)
+
+ _, merr := oldLeadC.MoveLeader(ctx, uint64(followerMemberID))
+ assert.NoError(t, gofail.Disable(fpName))
+ errCh <- merr
+ }()
+
+ close(readyCh)
+
+ ttlResp, err := targetC.TimeToLive(ctx, leaseResp.ID)
+ require.NoError(t, err)
+ require.GreaterOrEqual(t, int64(100), ttlResp.TTL)
+
+ require.NoError(t, <-errCh)
+}
+
// acquireLeaseAndKey creates a new lease and creates an attached key.
-func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
+func acquireLeaseAndKey(clus *integration.Cluster, key string) (int64, error) {
// create lease
- lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
+ lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{TTL: 1})
if err != nil {
@@ -894,7 +1136,7 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
}
// attach to key
put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID}
- if _, err := toGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil {
+ if _, err := integration.ToGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil {
return 0, err
}
return lresp.ID, nil
@@ -902,8 +1144,8 @@ func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
// testLeaseRemoveLeasedKey performs some action while holding a lease with an
// attached key "foo", then confirms the key is gone.
-func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.Cluster, int64) error) {
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
leaseID, err := acquireLeaseAndKey(clus, "foo")
@@ -917,7 +1159,7 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
// confirm no key
rreq := &pb.RangeRequest{Key: []byte("foo")}
- rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
+ rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
@@ -926,8 +1168,8 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
}
}
-func leaseExist(t *testing.T, clus *ClusterV3, leaseID int64) bool {
- l := toGRPC(clus.RandClient()).Lease
+func leaseExist(t *testing.T, clus *integration.Cluster, leaseID int64) bool {
+ l := integration.ToGRPC(clus.RandClient()).Lease
_, err := l.LeaseGrant(context.Background(), &pb.LeaseGrantRequest{ID: leaseID, TTL: 5})
if err == nil {
diff --git a/tests/integration/v3_stm_test.go b/tests/integration/v3_stm_test.go
index 89c666b0257..5880009d58b 100644
--- a/tests/integration/v3_stm_test.go
+++ b/tests/integration/v3_stm_test.go
@@ -21,16 +21,18 @@ import (
"strconv"
"testing"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
+ "github.com/stretchr/testify/require"
+
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestSTMConflict tests that conflicts are retried.
func TestSTMConflict(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
etcdc := clus.RandClient()
@@ -46,7 +48,7 @@ func TestSTMConflict(t *testing.T) {
for i := range keys {
curEtcdc := clus.RandClient()
srcKey := keys[i]
- applyf := func(stm concurrency.STM) error {
+ applyf := func(stm concurrency.STM) {
src := stm.Get(srcKey)
// must be different key to avoid double-adding
dstKey := srcKey
@@ -58,16 +60,21 @@ func TestSTMConflict(t *testing.T) {
dstV, _ := strconv.ParseInt(dst, 10, 64)
if srcV == 0 {
// can't rand.Intn on 0, so skip this transaction
- return nil
+ return
}
xfer := int64(rand.Intn(int(srcV)) / 2)
stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer))
stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer))
- return nil
}
go func() {
iso := concurrency.WithIsolation(concurrency.RepeatableReads)
- _, err := concurrency.NewSTM(curEtcdc, applyf, iso)
+ _, err := concurrency.NewSTM(curEtcdc,
+ func(stm concurrency.STM) error {
+ applyf(stm)
+ return nil
+ },
+ iso,
+ )
errc <- err
}()
}
@@ -96,9 +103,9 @@ func TestSTMConflict(t *testing.T) {
// TestSTMPutNewKey confirms a STM put on a new key is visible after commit.
func TestSTMPutNewKey(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
etcdc := clus.RandClient()
@@ -123,9 +130,9 @@ func TestSTMPutNewKey(t *testing.T) {
// TestSTMAbort tests that an aborted txn does not modify any keys.
func TestSTMAbort(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
etcdc := clus.RandClient()
@@ -154,9 +161,9 @@ func TestSTMAbort(t *testing.T) {
// TestSTMSerialize tests that serialization is honored when serializable.
func TestSTMSerialize(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
etcdc := clus.RandClient()
@@ -173,7 +180,7 @@ func TestSTMSerialize(t *testing.T) {
defer close(updatec)
for i := 0; i < 5; i++ {
s := fmt.Sprintf("%d", i)
- ops := []v3.Op{}
+ var ops []v3.Op
for _, k := range keys {
ops = append(ops, v3.OpPut(k, s))
}
@@ -189,7 +196,7 @@ func TestSTMSerialize(t *testing.T) {
for range updatec {
curEtcdc := clus.RandClient()
applyf := func(stm concurrency.STM) error {
- vs := []string{}
+ var vs []string
for i := range keys {
vs = append(vs, stm.Get(keys[i]))
}
@@ -217,9 +224,9 @@ func TestSTMSerialize(t *testing.T) {
// TestSTMApplyOnConcurrentDeletion ensures that concurrent key deletion
// fails the first GET revision comparison within STM; trigger retry.
func TestSTMApplyOnConcurrentDeletion(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
etcdc := clus.RandClient()
@@ -266,15 +273,15 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) {
}
func TestSTMSerializableSnapshotPut(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
// key with lower create/mod revision than keys being updated
_, err := cli.Put(context.TODO(), "a", "0")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
tries := 0
applyf := func(stm concurrency.STM) error {
@@ -289,12 +296,12 @@ func TestSTMSerializableSnapshotPut(t *testing.T) {
iso := concurrency.WithIsolation(concurrency.SerializableSnapshot)
_, err = concurrency.NewSTM(cli, applyf, iso)
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
_, err = concurrency.NewSTM(cli, applyf, iso)
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
resp, err := cli.Get(context.TODO(), "b")
- testutil.AssertNil(t, err)
+ require.NoError(t, err)
if resp.Kvs[0].Version != 2 {
t.Fatalf("bad version. got %+v, expected version 2", resp)
}
diff --git a/tests/integration/v3_tls_test.go b/tests/integration/v3_tls_test.go
index 4d77bee1387..81601d18a5f 100644
--- a/tests/integration/v3_tls_test.go
+++ b/tests/integration/v3_tls_test.go
@@ -17,11 +17,16 @@ package integration
import (
"context"
"crypto/tls"
+ "errors"
"testing"
"time"
- "go.etcd.io/etcd/client/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"google.golang.org/grpc"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
func TestTLSClientCipherSuitesValid(t *testing.T) { testTLSCipherSuites(t, true) }
@@ -30,7 +35,7 @@ func TestTLSClientCipherSuitesMismatch(t *testing.T) { testTLSCipherSuites(t, fa
// testTLSCipherSuites ensures mismatching client-side cipher suite
// fail TLS handshake with the server.
func testTLSCipherSuites(t *testing.T, valid bool) {
- BeforeTest(t)
+ integration.BeforeTest(t)
cipherSuites := []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
@@ -40,22 +45,28 @@ func testTLSCipherSuites(t *testing.T, valid bool) {
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
}
- srvTLS, cliTLS := testTLSInfo, testTLSInfo
+ srvTLS, cliTLS := integration.TestTLSInfo, integration.TestTLSInfo
if valid {
srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites, cipherSuites
} else {
srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:]
}
- clus := NewClusterV3(t, &ClusterConfig{Size: 1, ClientTLS: &srvTLS})
+ // go1.13 enables TLS 1.3 by default
+ // and in TLS 1.3, cipher suites are not configurable,
+ // so setting Max TLS version to TLS 1.2 to test cipher config.
+ srvTLS.MaxVersion = tls.VersionTLS12
+ cliTLS.MaxVersion = tls.VersionTLS12
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, ClientTLS: &srvTLS})
defer clus.Terminate(t)
cc, err := cliTLS.ClientConfig()
if err != nil {
t.Fatal(err)
}
- cli, cerr := NewClient(t, clientv3.Config{
- Endpoints: []string{clus.Members[0].GRPCAddr()},
+ cli, cerr := integration.NewClient(t, clientv3.Config{
+ Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: cc,
@@ -63,10 +74,73 @@ func testTLSCipherSuites(t *testing.T, valid bool) {
if cli != nil {
cli.Close()
}
- if !valid && cerr != context.DeadlineExceeded {
+ if !valid && !errors.Is(cerr, context.DeadlineExceeded) {
t.Fatalf("expected %v with TLS handshake failure, got %v", context.DeadlineExceeded, cerr)
}
if valid && cerr != nil {
t.Fatalf("expected TLS handshake success, got %v", cerr)
}
}
+
+func TestTLSMinMaxVersion(t *testing.T) {
+ integration.BeforeTest(t)
+
+ tests := []struct {
+ name string
+ minVersion uint16
+ maxVersion uint16
+ expectError bool
+ }{
+ {
+ name: "Connect with default TLS version should succeed",
+ minVersion: 0,
+ maxVersion: 0,
+ },
+ {
+ name: "Connect with TLS 1.2 only should fail",
+ minVersion: tls.VersionTLS12,
+ maxVersion: tls.VersionTLS12,
+ expectError: true,
+ },
+ {
+ name: "Connect with TLS 1.2 and 1.3 should succeed",
+ minVersion: tls.VersionTLS12,
+ maxVersion: tls.VersionTLS13,
+ },
+ {
+ name: "Connect with TLS 1.3 only should succeed",
+ minVersion: tls.VersionTLS13,
+ maxVersion: tls.VersionTLS13,
+ },
+ }
+
+ // Configure server to support TLS 1.3 only.
+ srvTLS := integration.TestTLSInfo
+ srvTLS.MinVersion = tls.VersionTLS13
+ srvTLS.MaxVersion = tls.VersionTLS13
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, ClientTLS: &srvTLS})
+ defer clus.Terminate(t)
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cc, err := integration.TestTLSInfo.ClientConfig()
+ require.NoError(t, err)
+
+ cc.MinVersion = tt.minVersion
+ cc.MaxVersion = tt.maxVersion
+ cli, cerr := integration.NewClient(t, clientv3.Config{
+ Endpoints: []string{clus.Members[0].GRPCURL},
+ DialTimeout: time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
+ TLS: cc,
+ })
+ if cerr != nil {
+ assert.Truef(t, tt.expectError, "got TLS handshake error while expecting success: %v", cerr)
+ assert.Equal(t, context.DeadlineExceeded, cerr)
+ return
+ }
+
+ cli.Close()
+ })
+ }
+}
diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go
index a07dd138c16..b3fc8236f78 100644
--- a/tests/integration/v3_watch_restore_test.go
+++ b/tests/integration/v3_watch_restore_test.go
@@ -21,13 +21,15 @@ import (
"time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/tests/v3/framework/config"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// MustFetchNotEmptyMetric attempts to fetch given 'metric' from 'member',
// waiting for not-empty value or 'timeout'.
-func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeout <-chan time.Time) string {
+func MustFetchNotEmptyMetric(tb testing.TB, member *integration.Member, metric string, timeout <-chan time.Time) string {
metricValue := ""
- tick := time.Tick(tickDuration)
+ tick := time.Tick(config.TickDuration)
for metricValue == "" {
tb.Logf("Waiting for metric: %v", metric)
select {
@@ -50,9 +52,9 @@ func MustFetchNotEmptyMetric(tb testing.TB, member *member, metric string, timeo
// that were created in synced watcher group in the first place.
// TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision"
func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{
+ clus := integration.NewCluster(t, &integration.ClusterConfig{
Size: 3,
SnapshotCount: 10,
SnapshotCatchUpEntries: 5,
@@ -62,12 +64,13 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
// spawn a watcher before shutdown, and put it in synced watcher
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- wStream, errW := toGRPC(clus.Client(0)).Watch.Watch(ctx)
+ wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx)
if errW != nil {
t.Fatal(errW)
}
if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
- CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}}}); err != nil {
+ CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5},
+ }}); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
wresp, errR := wStream.Recv()
@@ -79,13 +82,13 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
}
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
- initialLead := clus.waitLeader(t, clus.Members[1:])
- t.Logf("elected lead: %v", clus.Members[initialLead].s.ID())
+ initialLead := clus.WaitMembersForLeader(t, clus.Members[1:]) + 1
+ t.Logf("elected lead: %v", clus.Members[initialLead].Server.MemberID())
t.Logf("sleeping for 2 seconds")
time.Sleep(2 * time.Second)
t.Logf("sleeping for 2 seconds DONE")
- kvc := toGRPC(clus.Client(1)).KV
+ kvc := integration.ToGRPC(clus.Client(1)).KV
// to trigger snapshot from the leader to the stopped follower
for i := 0; i < 15; i++ {
@@ -95,10 +98,28 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
}
}
- // trigger snapshot send from leader to this slow follower
- // which then calls watchable store Restore
+ // NOTE: When starting a new cluster with 3 members, each member will
+ // apply 3 ConfChange directly at the beginning before a leader is
+ // elected. Leader will apply 3 MemberAttrSet and 1 ClusterVersionSet
+ // changes. So member 0 has index 8 in raft log before network
+ // partition. We need to trigger EtcdServer.snapshot() at least twice.
+ //
+ // SnapshotCount: 10, SnapshotCatchUpEntries: 5
+ //
+ // T1: L(snapshot-index: 11, compacted-index: 6), F_m0(index:8)
+ // T2: L(snapshot-index: 22, compacted-index: 17), F_m0(index:8, out of date)
+ //
+ // Since there is no way to confirm server has compacted the log, we
+ // use log monitor to watch and expect "compacted Raft logs" content.
+ // In v3.6 we no longer generates "compacted Raft logs" log as raft compaction happens independently to snapshot.
+ // For now let's use snapshot log which should be equivalent to compaction.
+ expectMemberLog(t, clus.Members[initialLead], 5*time.Second, "saved snapshot to disk", 2)
+
+ // After RecoverPartition, leader L will send snapshot to slow F_m0
+ // follower, because F_m0(index:8) is 'out of date' compared to
+ // L(compacted-index:17).
clus.Members[0].RecoverPartition(t, clus.Members[1:]...)
- // We don't expect leadership change here, just recompute the leader's index
+ // We don't expect leadership change here, just recompute the leader'Server index
// within clus.Members list.
lead := clus.WaitLeader(t)
@@ -121,6 +142,8 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
t.Fatalf("inflight snapshot receives expected 0 or 1, got %q", receives)
}
+ expectMemberLog(t, clus.Members[0], 5*time.Second, "received and saved database snapshot", 1)
+
t.Logf("sleeping for 2 seconds")
time.Sleep(2 * time.Second)
t.Logf("sleeping for 2 seconds DONE")
@@ -129,7 +152,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
// should be able to notify on old-revision watchers in unsynced
// make sure restore watch operation correctly moves watchers
// between synced and unsynced watchers
- errc := make(chan error)
+ errc := make(chan error, 1)
go func() {
cresp, cerr := wStream.Recv()
if cerr != nil {
@@ -152,3 +175,16 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
}
}
}
+
+func expectMemberLog(t *testing.T, m *integration.Member, timeout time.Duration, s string, count int) {
+ ctx, cancel := context.WithTimeout(context.TODO(), timeout)
+ defer cancel()
+
+ lines, err := m.LogObserver.Expect(ctx, s, count)
+ if err != nil {
+ t.Fatalf("failed to expect (log:%s, count:%v): %v", s, count, err)
+ }
+ for _, line := range lines {
+ t.Logf("[expected line]: %v", line)
+ }
+}
diff --git a/tests/integration/v3_watch_test.go b/tests/integration/v3_watch_test.go
index 240af36f4bf..d9bc3e70eab 100644
--- a/tests/integration/v3_watch_test.go
+++ b/tests/integration/v3_watch_test.go
@@ -17,6 +17,7 @@ package integration
import (
"bytes"
"context"
+ "errors"
"fmt"
"reflect"
"sort"
@@ -24,26 +25,38 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
+ gofail "go.etcd.io/gofail/runtime"
)
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
tests := []struct {
+ name string
+
putKeys []string
watchRequest *pb.WatchRequest
wresps []*pb.WatchResponse
}{
- // watch the key, matching
{
+ "watch the key, matching",
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo")}}},
+ Key: []byte("foo"),
+ },
+ }},
[]*pb.WatchResponse{
{
@@ -58,22 +71,26 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
},
},
},
- // watch the key, non-matching
{
+ "watch the key, non-matching",
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("helloworld")}}},
+ Key: []byte("helloworld"),
+ },
+ }},
[]*pb.WatchResponse{},
},
- // watch the prefix, matching
{
+ "watch the prefix, matching",
[]string{"fooLong"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"),
- RangeEnd: []byte("fop")}}},
+ RangeEnd: []byte("fop"),
+ },
+ }},
[]*pb.WatchResponse{
{
@@ -88,23 +105,27 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
},
},
},
- // watch the prefix, non-matching
{
+ "watch the prefix, non-matching",
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("helloworld"),
- RangeEnd: []byte("helloworle")}}},
+ RangeEnd: []byte("helloworle"),
+ },
+ }},
[]*pb.WatchResponse{},
},
- // watch full range, matching
{
+ "watch full range, matching",
[]string{"fooLong"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte(""),
- RangeEnd: []byte("\x00")}}},
+ RangeEnd: []byte("\x00"),
+ },
+ }},
[]*pb.WatchResponse{
{
@@ -119,12 +140,14 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
},
},
},
- // multiple puts, one watcher with matching key
{
+ "multiple puts, one watcher with matching key",
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo")}}},
+ Key: []byte("foo"),
+ },
+ }},
[]*pb.WatchResponse{
{
@@ -159,13 +182,15 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
},
},
},
- // multiple puts, one watcher with matching prefix
{
+ "multiple puts, one watcher with matching prefix",
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"),
- RangeEnd: []byte("fop")}}},
+ RangeEnd: []byte("fop"),
+ },
+ }},
[]*pb.WatchResponse{
{
@@ -203,106 +228,98 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
}
for i, tt := range tests {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
-
- wAPI := toGRPC(clus.RandClient()).Watch
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- defer cancel()
- wStream, err := wAPI.Watch(ctx)
- if err != nil {
- t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
- }
-
- err = wStream.Send(tt.watchRequest)
- if err != nil {
- t.Fatalf("#%d: wStream.Send error: %v", i, err)
- }
-
- // ensure watcher request created a new watcher
- cresp, err := wStream.Recv()
- if err != nil {
- t.Errorf("#%d: wStream.Recv error: %v", i, err)
- clus.Terminate(t)
- continue
- }
- if !cresp.Created {
- t.Errorf("#%d: did not create watchid, got %+v", i, cresp)
- clus.Terminate(t)
- continue
- }
- if cresp.Canceled {
- t.Errorf("#%d: canceled watcher on create %+v", i, cresp)
- clus.Terminate(t)
- continue
- }
-
- createdWatchId := cresp.WatchId
- if cresp.Header == nil || cresp.Header.Revision != 1 {
- t.Errorf("#%d: header revision got +%v, wanted revison 1", i, cresp)
- clus.Terminate(t)
- continue
- }
-
- // asynchronously create keys
- ch := make(chan struct{}, 1)
- go func() {
- for _, k := range tt.putKeys {
- kvc := toGRPC(clus.RandClient()).KV
- req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
- if _, err := kvc.Put(context.TODO(), req); err != nil {
- t.Errorf("#%d: couldn't put key (%v)", i, err)
- }
+ t.Run(tt.name, func(t *testing.T) {
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
+ defer clus.Terminate(t)
+
+ wAPI := integration.ToGRPC(clus.RandClient()).Watch
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ wStream, err := wAPI.Watch(ctx)
+ if err != nil {
+ t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
}
- ch <- struct{}{}
- }()
- // check stream results
- for j, wresp := range tt.wresps {
- resp, err := wStream.Recv()
+ err = wStream.Send(tt.watchRequest)
if err != nil {
- t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
+ t.Fatalf("#%d: wStream.Send error: %v", i, err)
}
- if resp.Header == nil {
- t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
- }
- if resp.Header.Revision != wresp.Header.Revision {
- t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
+ // ensure watcher request created a new watcher
+ cresp, err := wStream.Recv()
+ if err != nil {
+ t.Fatalf("#%d: wStream.Recv error: %v", i, err)
}
-
- if wresp.Created != resp.Created {
- t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
+ if !cresp.Created {
+ t.Fatalf("#%d: did not create watchid, got %+v", i, cresp)
}
- if resp.WatchId != createdWatchId {
- t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
+ if cresp.Canceled {
+ t.Fatalf("#%d: canceled watcher on create %+v", i, cresp)
}
- if !reflect.DeepEqual(resp.Events, wresp.Events) {
- t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
+ createdWatchID := cresp.WatchId
+ if cresp.Header == nil || cresp.Header.Revision != 1 {
+ t.Fatalf("#%d: header revision got +%v, wanted revison 1", i, cresp)
}
- }
- rok, nr := waitResponse(wStream, 1*time.Second)
- if !rok {
- t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
- }
+ // asynchronously create keys
+ ch := make(chan struct{}, 1)
+ go func() {
+ for _, k := range tt.putKeys {
+ kvc := integration.ToGRPC(clus.RandClient()).KV
+ req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
+ if _, err := kvc.Put(context.TODO(), req); err != nil {
+ t.Errorf("#%d: couldn't put key (%v)", i, err)
+ }
+ }
+ ch <- struct{}{}
+ }()
- // wait for the client to finish sending the keys before terminating the cluster
- <-ch
+ // check stream results
+ for j, wresp := range tt.wresps {
+ resp, err := wStream.Recv()
+ if err != nil {
+ t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
+ }
+
+ if resp.Header == nil {
+ t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
+ }
+ if resp.Header.Revision != wresp.Header.Revision {
+ t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
+ }
+
+ if wresp.Created != resp.Created {
+ t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
+ }
+ if resp.WatchId != createdWatchID {
+ t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchID)
+ }
+
+ if !reflect.DeepEqual(resp.Events, wresp.Events) {
+ t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
+ }
+ }
+
+ rok, nr := waitResponse(wStream, 1*time.Second)
+ if !rok {
+ t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
+ }
- // can't defer because tcp ports will be in use
- clus.Terminate(t)
+ // wait for the client to finish sending the keys before terminating the cluster
+ <-ch
+ })
}
}
// TestV3WatchFutureRevision tests Watch APIs from a future revision.
func TestV3WatchFutureRevision(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- wAPI := toGRPC(clus.RandClient()).Watch
+ wAPI := integration.ToGRPC(clus.RandClient()).Watch
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, err := wAPI.Watch(ctx)
@@ -313,7 +330,8 @@ func TestV3WatchFutureRevision(t *testing.T) {
wkey := []byte("foo")
wrev := int64(10)
req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
- CreateRequest: &pb.WatchCreateRequest{Key: wkey, StartRevision: wrev}}}
+ CreateRequest: &pb.WatchCreateRequest{Key: wkey, StartRevision: wrev},
+ }}
err = wStream.Send(req)
if err != nil {
t.Fatalf("wStream.Send error: %v", err)
@@ -328,7 +346,7 @@ func TestV3WatchFutureRevision(t *testing.T) {
t.Fatalf("create %v, want %v", cresp.Created, true)
}
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
for {
req := &pb.PutRequest{Key: wkey, Value: []byte("bar")}
resp, rerr := kvc.Put(context.TODO(), req)
@@ -358,12 +376,12 @@ func TestV3WatchFutureRevision(t *testing.T) {
// TestV3WatchWrongRange tests wrong range does not create watchers.
func TestV3WatchWrongRange(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- wAPI := toGRPC(clus.RandClient()).Watch
+ wAPI := integration.ToGRPC(clus.RandClient()).Watch
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, err := wAPI.Watch(ctx)
@@ -382,7 +400,8 @@ func TestV3WatchWrongRange(t *testing.T) {
}
for i, tt := range tests {
if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
- CreateRequest: &pb.WatchCreateRequest{Key: tt.key, RangeEnd: tt.end, StartRevision: 1}}}); err != nil {
+ CreateRequest: &pb.WatchCreateRequest{Key: tt.key, RangeEnd: tt.end, StartRevision: 1},
+ }}); err != nil {
t.Fatalf("#%d: wStream.Send error: %v", i, err)
}
cresp, err := wStream.Recv()
@@ -395,38 +414,40 @@ func TestV3WatchWrongRange(t *testing.T) {
if cresp.Canceled != tt.canceled {
t.Fatalf("#%d: canceled %v, want %v", i, tt.canceled, cresp.Canceled)
}
- if tt.canceled && cresp.WatchId != -1 {
- t.Fatalf("#%d: canceled watch ID %d, want -1", i, cresp.WatchId)
+ if tt.canceled && cresp.WatchId != clientv3.InvalidWatchID {
+ t.Fatalf("#%d: canceled watch ID %d, want %d", i, cresp.WatchId, clientv3.InvalidWatchID)
}
}
}
// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
func TestV3WatchCancelSynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchCancel(t, 0)
}
// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
func TestV3WatchCancelUnsynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchCancel(t, 1)
}
func testV3WatchCancel(t *testing.T, startRev int64) {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo"), StartRevision: startRev}}}
+ Key: []byte("foo"), StartRevision: startRev,
+ },
+ }}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
@@ -441,7 +462,9 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
- WatchId: wresp.WatchId}}}
+ WatchId: wresp.WatchId,
+ },
+ }}
if err := wStream.Send(creq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
@@ -454,7 +477,7 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
}
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Errorf("couldn't put key (%v)", err)
}
@@ -469,13 +492,13 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
// TestV3WatchCurrentPutOverlap ensures current watchers receive all events with
// overlapping puts.
func TestV3WatchCurrentPutOverlap(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
@@ -488,7 +511,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Errorf("couldn't put key (%v)", err)
@@ -500,7 +523,8 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
progress := make(map[int64]int64)
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
- CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), RangeEnd: []byte("fop")}}}
+ CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), RangeEnd: []byte("fop")},
+ }}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("first watch request failed (%v)", err)
}
@@ -553,21 +577,23 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
// TestV3WatchEmptyKey ensures synced watchers see empty key PUTs as PUT events
func TestV3WatchEmptyKey(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if werr != nil {
t.Fatal(werr)
}
req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo")}}}
+ Key: []byte("foo"),
+ },
+ }}
if err := ws.Send(req); err != nil {
t.Fatal(err)
}
@@ -576,7 +602,7 @@ func TestV3WatchEmptyKey(t *testing.T) {
}
// put a key with empty value
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo")}
if _, err := kvc.Put(context.TODO(), preq); err != nil {
t.Fatal(err)
@@ -599,12 +625,12 @@ func TestV3WatchEmptyKey(t *testing.T) {
}
func TestV3WatchMultipleWatchersSynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchMultipleWatchers(t, 0)
}
func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchMultipleWatchers(t, 1)
}
@@ -613,14 +639,14 @@ func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
// that matches all watchers, and another key that matches only
// one watcher to test if it receives expected events.
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
@@ -631,11 +657,15 @@ func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
if i < watchKeyN {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo"), StartRevision: startRev}}}
+ Key: []byte("foo"), StartRevision: startRev,
+ },
+ }}
} else {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("fo"), RangeEnd: []byte("fp"), StartRevision: startRev}}}
+ Key: []byte("fo"), RangeEnd: []byte("fp"), StartRevision: startRev,
+ },
+ }}
}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
@@ -704,30 +734,32 @@ func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
}
func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchMultipleEventsTxn(t, 0)
}
func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchMultipleEventsTxn(t, 1)
}
// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: startRev}}}
+ Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: startRev,
+ },
+ }}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
@@ -735,13 +767,15 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
t.Fatalf("create response failed: resp=%v, err=%v", resp, err)
}
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
txn := pb.TxnRequest{}
for i := 0; i < 3; i++ {
ru := &pb.RequestOp{}
ru.Request = &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
- Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}}
+ Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar"),
+ },
+ }
txn.Success = append(txn.Success, ru)
}
@@ -753,7 +787,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
t.Fatalf("kvc.Txn failed: %+v", tresp)
}
- events := []*mvccpb.Event{}
+ var events []*mvccpb.Event
for len(events) < 3 {
resp, err := wStream.Recv()
if err != nil {
@@ -797,11 +831,11 @@ func (evs eventsSortByKey) Less(i, j int) bool {
}
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
@@ -812,14 +846,16 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: 1}}}
+ Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: 1,
+ },
+ }}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
@@ -850,7 +886,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
},
}
- events := []*mvccpb.Event{}
+ var events []*mvccpb.Event
for len(events) < 4 {
resp, err := wStream.Recv()
if err != nil {
@@ -875,23 +911,129 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
}
}
+// TestV3WatchProgressOnMemberRestart verifies the client side doesn't
+// receive duplicated events.
+// Refer to https://github.com/etcd-io/etcd/pull/15248#issuecomment-1423225742.
+func TestV3WatchProgressOnMemberRestart(t *testing.T) {
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{
+ Size: 1,
+ WatchProgressNotifyInterval: time.Second,
+ })
+ defer clus.Terminate(t)
+
+ client := clus.RandClient()
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ errC := make(chan error, 1)
+ watchReady := make(chan struct{}, 1)
+ doneC := make(chan struct{}, 1)
+ progressNotifyC := make(chan struct{}, 1)
+ go func() {
+ defer close(doneC)
+
+ var (
+ lastWatchedModRevision int64
+ gotProgressNotification bool
+ )
+
+ wch := client.Watch(ctx, "foo", clientv3.WithProgressNotify())
+ watchReady <- struct{}{}
+ for wr := range wch {
+ if wr.Err() != nil {
+ errC <- fmt.Errorf("watch error: %w", wr.Err())
+ return
+ }
+
+ if len(wr.Events) == 0 {
+ // We need to make sure at least one progress notification
+ // is received after receiving the normal watch response
+ // and before restarting the member.
+ if lastWatchedModRevision > 0 {
+ gotProgressNotification = true
+ progressNotifyC <- struct{}{}
+ }
+ continue
+ }
+
+ for _, event := range wr.Events {
+ if event.Kv.ModRevision <= lastWatchedModRevision {
+ errC <- fmt.Errorf("got an unexpected revision: %d, lastWatchedModRevision: %d",
+ event.Kv.ModRevision,
+ lastWatchedModRevision)
+ return
+ }
+ lastWatchedModRevision = event.Kv.ModRevision
+ }
+
+ if gotProgressNotification {
+ return
+ }
+ }
+ }()
+
+ // waiting for the watcher ready
+ t.Log("Waiting for the watcher to be ready.")
+ <-watchReady
+ time.Sleep(time.Second)
+
+ // write a K/V firstly
+ t.Log("Writing key 'foo' firstly")
+ _, err := client.Put(ctx, "foo", "bar1")
+ require.NoError(t, err)
+
+ // make sure at least one progress notification is received
+ // before restarting the member
+ t.Log("Waiting for the progress notification")
+ select {
+ case <-progressNotifyC:
+ case <-time.After(5 * time.Second):
+ t.Log("Do not receive the progress notification in 5 seconds, move forward anyway.")
+ }
+
+ // restart the member
+ t.Log("Restarting the member")
+ clus.Members[0].Stop(t)
+ clus.Members[0].Restart(t)
+ clus.Members[0].WaitOK(t)
+
+ // write the same key again after the member restarted
+ t.Log("Writing the same key 'foo' again after restarting the member")
+ _, err = client.Put(ctx, "foo", "bar2")
+ require.NoError(t, err)
+
+ t.Log("Waiting for result")
+ select {
+ case <-progressNotifyC:
+ t.Log("Progress notification received")
+ case err := <-errC:
+ t.Fatal(err)
+ case <-doneC:
+ t.Log("Done")
+ case <-time.After(15 * time.Second):
+ t.Fatal("Timed out waiting for the response")
+ }
+}
+
func TestV3WatchMultipleStreamsSynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchMultipleStreams(t, 0)
}
func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
testV3WatchMultipleStreams(t, 1)
}
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
- wAPI := toGRPC(clus.RandClient()).Watch
- kvc := toGRPC(clus.RandClient()).KV
+ wAPI := integration.ToGRPC(clus.RandClient()).Watch
+ kvc := integration.ToGRPC(clus.RandClient()).KV
streams := make([]pb.Watch_WatchClient, 5)
for i := range streams {
@@ -903,7 +1045,9 @@ func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
- Key: []byte("foo"), StartRevision: startRev}}}
+ Key: []byte("foo"), StartRevision: startRev,
+ },
+ }}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
@@ -989,25 +1133,27 @@ func TestWatchWithProgressNotify(t *testing.T) {
testInterval := 3 * time.Second
defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
// create two watchers, one with progressNotify set.
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
- CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1, ProgressNotify: true}}}
+ CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1, ProgressNotify: true},
+ }}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
- CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}}}
+ CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1},
+ }}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
@@ -1037,14 +1183,14 @@ func TestWatchWithProgressNotify(t *testing.T) {
}
}
-// TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams.
+// TestV3WatchClose opens many watchers concurrently on multiple streams.
func TestV3WatchClose(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
c := clus.Client(0)
- wapi := toGRPC(c).Watch
+ wapi := integration.ToGRPC(c).Watch
var wg sync.WaitGroup
wg.Add(100)
@@ -1062,27 +1208,29 @@ func TestV3WatchClose(t *testing.T) {
cr := &pb.WatchCreateRequest{Key: []byte("a")}
req := &pb.WatchRequest{
RequestUnion: &pb.WatchRequest_CreateRequest{
- CreateRequest: cr}}
+ CreateRequest: cr,
+ },
+ }
ws.Send(req)
ws.Recv()
}()
}
- clus.Members[0].DropConnections()
+ clus.Members[0].Bridge().DropConnections()
wg.Wait()
}
// TestV3WatchWithFilter ensures watcher filters out the events correctly.
func TestV3WatchWithFilter(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- ws, werr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
+ ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx)
if werr != nil {
t.Fatal(werr)
}
@@ -1090,7 +1238,8 @@ func TestV3WatchWithFilter(t *testing.T) {
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"),
Filters: []pb.WatchCreateRequest_FilterType{pb.WatchCreateRequest_NOPUT},
- }}}
+ },
+ }}
if err := ws.Send(req); err != nil {
t.Fatal(err)
}
@@ -1109,7 +1258,7 @@ func TestV3WatchWithFilter(t *testing.T) {
}()
// put a key with empty value
- kvc := toGRPC(clus.RandClient()).KV
+ kvc := integration.ToGRPC(clus.RandClient()).KV
preq := &pb.PutRequest{Key: []byte("foo")}
if _, err := kvc.Put(context.TODO(), preq); err != nil {
t.Fatal(err)
@@ -1143,8 +1292,8 @@ func TestV3WatchWithFilter(t *testing.T) {
}
func TestV3WatchWithPrevKV(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
wctx, wcancel := context.WithCancel(context.Background())
@@ -1164,12 +1313,11 @@ func TestV3WatchWithPrevKV(t *testing.T) {
vals: []string{"first", "second"},
}}
for i, tt := range tests {
- kvc := toGRPC(clus.RandClient()).KV
- if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[0])}); err != nil {
- t.Fatal(err)
- }
+ kvc := integration.ToGRPC(clus.RandClient()).KV
+ _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[0])})
+ require.NoError(t, err)
- ws, werr := toGRPC(clus.RandClient()).Watch.Watch(wctx)
+ ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(wctx)
if werr != nil {
t.Fatal(werr)
}
@@ -1179,17 +1327,15 @@ func TestV3WatchWithPrevKV(t *testing.T) {
Key: []byte(tt.key),
RangeEnd: []byte(tt.end),
PrevKv: true,
- }}}
- if err := ws.Send(req); err != nil {
- t.Fatal(err)
- }
- if _, err := ws.Recv(); err != nil {
- t.Fatal(err)
- }
+ },
+ }}
+ err = ws.Send(req)
+ require.NoError(t, err)
+ _, err = ws.Recv()
+ require.NoError(t, err)
- if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[1])}); err != nil {
- t.Fatal(err)
- }
+ _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[1])})
+ require.NoError(t, err)
recv := make(chan *pb.WatchResponse, 1)
go func() {
@@ -1217,9 +1363,9 @@ func TestV3WatchWithPrevKV(t *testing.T) {
// TestV3WatchCancellation ensures that watch cancellation frees up server resources.
func TestV3WatchCancellation(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
@@ -1231,9 +1377,9 @@ func TestV3WatchCancellation(t *testing.T) {
cli.Watch(ctx, "/foo")
for i := 0; i < 1000; i++ {
- ctx, cancel := context.WithCancel(ctx)
- cli.Watch(ctx, "/foo")
- cancel()
+ wctx, wcancel := context.WithCancel(ctx)
+ cli.Watch(wctx, "/foo")
+ wcancel()
}
// Wait a little for cancellations to take hold
@@ -1245,7 +1391,7 @@ func TestV3WatchCancellation(t *testing.T) {
}
var expected string
- if ThroughProxy {
+ if integration.ThroughProxy {
// grpc proxy has additional 2 watches open
expected = "3"
} else {
@@ -1259,9 +1405,9 @@ func TestV3WatchCancellation(t *testing.T) {
// TestV3WatchCloseCancelRace ensures that watch close doesn't decrement the watcher total too far.
func TestV3WatchCloseCancelRace(t *testing.T) {
- BeforeTest(t)
+ integration.BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
@@ -1270,9 +1416,9 @@ func TestV3WatchCloseCancelRace(t *testing.T) {
cli := clus.RandClient()
for i := 0; i < 1000; i++ {
- ctx, cancel := context.WithCancel(ctx)
- cli.Watch(ctx, "/foo")
- cancel()
+ wctx, wcancel := context.WithCancel(ctx)
+ cli.Watch(wctx, "/foo")
+ wcancel()
}
// Wait a little for cancellations to take hold
@@ -1284,7 +1430,7 @@ func TestV3WatchCloseCancelRace(t *testing.T) {
}
var expected string
- if ThroughProxy {
+ if integration.ThroughProxy {
// grpc proxy has additional 2 watches open
expected = "2"
} else {
@@ -1295,3 +1441,168 @@ func TestV3WatchCloseCancelRace(t *testing.T) {
t.Fatalf("expected %s watch, got %s", expected, minWatches)
}
}
+
+// TestV3WatchProgressWaitsForSync checks that progress notifications
+// don't get sent until the watcher is synchronised
+func TestV3WatchProgressWaitsForSync(t *testing.T) {
+ // Disable for gRPC proxy, as it does not support requesting
+ // progress notifications
+ if integration.ThroughProxy {
+ t.Skip("grpc proxy currently does not support requesting progress notifications")
+ }
+
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ client := clus.RandClient()
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ // Write a couple values into key to make sure there's a
+ // non-trivial amount of history.
+ count := 1001
+ t.Logf("Writing key 'foo' %d times", count)
+ for i := 0; i < count; i++ {
+ _, err := client.Put(ctx, "foo", fmt.Sprintf("bar%d", i))
+ require.NoError(t, err)
+ }
+
+ // Create watch channel starting at revision 1 (i.e. it starts
+ // unsynced because of the update above)
+ wch := client.Watch(ctx, "foo", clientv3.WithRev(1))
+
+ // Immediately request a progress notification. As the client
+ // is unsynchronised, the server will not sent any notification,
+ // as client can infer progress from events.
+ err := client.RequestProgress(ctx)
+ require.NoError(t, err)
+
+ // Verify that we get the watch responses first. Note that
+ // events might be spread across multiple packets.
+ eventCount := 0
+ for eventCount < count {
+ wr := <-wch
+ if wr.Err() != nil {
+ t.Fatal(fmt.Errorf("watch error: %w", wr.Err()))
+ }
+ if wr.IsProgressNotify() {
+ t.Fatal("Progress notification from unsynced client!")
+ }
+ if wr.Header.Revision != int64(count+1) {
+ t.Fatal("Incomplete watch response!")
+ }
+ eventCount += len(wr.Events)
+ }
+ // client needs to request progress notification again
+ err = client.RequestProgress(ctx)
+ require.NoError(t, err)
+ wr2 := <-wch
+ if wr2.Err() != nil {
+ t.Fatal(fmt.Errorf("watch error: %w", wr2.Err()))
+ }
+ if !wr2.IsProgressNotify() {
+ t.Fatal("Did not receive progress notification!")
+ }
+ if wr2.Header.Revision != int64(count+1) {
+ t.Fatal("Wrong revision in progress notification!")
+ }
+}
+
+func TestV3WatchProgressWaitsForSyncNoEvents(t *testing.T) {
+ if integration.ThroughProxy {
+ t.Skip("grpc proxy currently does not support requesting progress notifications")
+ }
+ integration.BeforeTest(t)
+
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ client := clus.RandClient()
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ resp, err := client.Put(ctx, "bar", "1")
+ require.NoError(t, err)
+
+ wch := client.Watch(ctx, "foo", clientv3.WithRev(resp.Header.Revision))
+ // Request the progress notification on newly created watch that was not yet synced.
+ err = client.RequestProgress(ctx)
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+
+ require.NoError(t, err)
+ gotProgressNotification := false
+ for {
+ select {
+ case <-ticker.C:
+ err := client.RequestProgress(ctx)
+ require.NoError(t, err)
+ case resp := <-wch:
+ if resp.Err() != nil {
+ t.Fatal(fmt.Errorf("watch error: %w", resp.Err()))
+ }
+ if resp.IsProgressNotify() {
+ gotProgressNotification = true
+ }
+ }
+ if gotProgressNotification {
+ break
+ }
+ }
+ require.Truef(t, gotProgressNotification, "Expected to get progress notification")
+}
+
+// TestV3NoEventsLostOnCompact verifies that slow watchers exit with compacted watch response
+// if its next revision of events are compacted and no lost events sent to client.
+func TestV3NoEventsLostOnCompact(t *testing.T) {
+ if integration.ThroughProxy {
+ t.Skip("grpc proxy currently does not support requesting progress notifications")
+ }
+ integration.BeforeTest(t)
+ if len(gofail.List()) == 0 {
+ t.Skip("please run 'make gofail-enable' before running the test")
+ }
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
+ defer clus.Terminate(t)
+
+ client := clus.RandClient()
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // sendLoop throughput is rate-limited to 1 event per second
+ require.NoError(t, gofail.Enable("beforeSendWatchResponse", `sleep("1s")`))
+ wch := client.Watch(ctx, "foo")
+
+ var rev int64
+ writeCount := mvcc.ChanBufLen() * 11 / 10
+ for i := 0; i < writeCount; i++ {
+ resp, err := client.Put(ctx, "foo", "bar")
+ require.NoError(t, err)
+ rev = resp.Header.Revision
+ }
+ _, err := client.Compact(ctx, rev)
+ require.NoError(t, err)
+
+ time.Sleep(time.Second)
+ require.NoError(t, gofail.Disable("beforeSendWatchResponse"))
+
+ eventCount := 0
+ compacted := false
+ for resp := range wch {
+ err = resp.Err()
+ if err != nil {
+ if !errors.Is(err, rpctypes.ErrCompacted) {
+ t.Fatalf("want watch response err %v but got %v", rpctypes.ErrCompacted, err)
+ }
+ compacted = true
+ break
+ }
+ eventCount += len(resp.Events)
+ if eventCount == writeCount {
+ break
+ }
+ }
+ assert.Truef(t, compacted, "Expected stream to get compacted, instead we got %d events out of %d events", eventCount, writeCount)
+}
diff --git a/tests/integration/v3election_grpc_test.go b/tests/integration/v3election_grpc_test.go
index d5f67dd9fc8..d0ca72b4255 100644
--- a/tests/integration/v3election_grpc_test.go
+++ b/tests/integration/v3election_grpc_test.go
@@ -20,27 +20,30 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestV3ElectionCampaign checks that Campaign will not give
// simultaneous leadership to multiple campaigners.
func TestV3ElectionCampaign(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
+ lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err1 != nil {
t.Fatal(err1)
}
- lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
+ lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err2 != nil {
t.Fatal(err2)
}
- lc := toGRPC(clus.Client(0)).Election
+ lc := integration.ToGRPC(clus.Client(0)).Election
req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")}
l1, lerr1 := lc.Campaign(context.TODO(), req1)
if lerr1 != nil {
@@ -89,11 +92,11 @@ func TestV3ElectionCampaign(t *testing.T) {
// TestV3ElectionObserve checks that an Observe stream receives
// proclamations from different leaders uninterrupted.
func TestV3ElectionObserve(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- lc := toGRPC(clus.Client(0)).Election
+ lc := integration.ToGRPC(clus.Client(0)).Election
// observe leadership events
observec := make(chan struct{}, 1)
@@ -125,7 +128,7 @@ func TestV3ElectionObserve(t *testing.T) {
t.Fatalf("observe stream took too long to start")
}
- lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
+ lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err1 != nil {
t.Fatal(err1)
}
@@ -139,7 +142,7 @@ func TestV3ElectionObserve(t *testing.T) {
go func() {
defer close(leader2c)
- lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
+ lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err2 != nil {
t.Error(err2)
}
@@ -159,9 +162,8 @@ func TestV3ElectionObserve(t *testing.T) {
for i := 1; i < 5; i++ {
v := []byte(fmt.Sprintf("%d", i))
req := &epb.ProclaimRequest{Leader: c1.Leader, Value: v}
- if _, err := lc.Proclaim(context.TODO(), req); err != nil {
- t.Fatal(err)
- }
+ _, err := lc.Proclaim(context.TODO(), req)
+ require.NoError(t, err)
}
// start second leader
lc.Resign(context.TODO(), &epb.ResignRequest{Leader: c1.Leader})
diff --git a/tests/integration/v3lock_grpc_test.go b/tests/integration/v3lock_grpc_test.go
index 38a36369f20..f293bc1a556 100644
--- a/tests/integration/v3lock_grpc_test.go
+++ b/tests/integration/v3lock_grpc_test.go
@@ -21,25 +21,26 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+ "go.etcd.io/etcd/tests/v3/framework/integration"
)
// TestV3LockLockWaiter tests that a client will wait for a lock, then acquire it
// once it is unlocked.
func TestV3LockLockWaiter(t *testing.T) {
- BeforeTest(t)
- clus := NewClusterV3(t, &ClusterConfig{Size: 1})
+ integration.BeforeTest(t)
+ clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
- lease1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
+ lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err1 != nil {
t.Fatal(err1)
}
- lease2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
+ lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})
if err2 != nil {
t.Fatal(err2)
}
- lc := toGRPC(clus.Client(0)).Lock
+ lc := integration.ToGRPC(clus.Client(0)).Lock
l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID})
if lerr1 != nil {
t.Fatal(lerr1)
diff --git a/tests/revive.toml b/tests/revive.toml
deleted file mode 100644
index 53a58b51a77..00000000000
--- a/tests/revive.toml
+++ /dev/null
@@ -1,38 +0,0 @@
-ignoreGeneratedHeader = false
-severity = "warning"
-confidence = 0.8
-errorCode = 0
-warningCode = 0
-
-[rule.blank-imports]
-[rule.context-as-argument]
-[rule.dot-imports]
-[rule.error-return]
-[rule.error-naming]
-[rule.if-return]
-[rule.increment-decrement]
-[rule.var-declaration]
-[rule.package-comments]
-[rule.range]
-[rule.receiver-naming]
-[rule.time-naming]
-[rule.indent-error-flow]
-[rule.errorf]
-
-
-# TODO: enable following
-
-# grpcproxy context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token)
-# [rule.context-keys-type]
-
-# punctuation in error value
-# [rule.error-strings]
-
-# underscore variables
-# [rule.var-naming]
-
-# godoc
-# [rule.exported]
-
-# return unexported type
-# [rule.unexported-return]
diff --git a/tests/robustness/OWNERS b/tests/robustness/OWNERS
new file mode 100644
index 00000000000..97e39374aa2
--- /dev/null
+++ b/tests/robustness/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/robustness-testing
diff --git a/tests/robustness/README.md b/tests/robustness/README.md
new file mode 100644
index 00000000000..f8915a85d0c
--- /dev/null
+++ b/tests/robustness/README.md
@@ -0,0 +1,254 @@
+# etcd Robustness Testing
+
+This document describes the robustness testing framework for etcd, a distributed key-value store.
+The purpose of these tests is to rigorously validate that etcd maintains its [KV API guarantees] and [watch API guarantees] under a wide range of conditions and failures.
+
+[KV API guarantees]: https://etcd.io/docs/v3.6/learning/api_guarantees/#kv-apis
+[watch API guarantees]: https://etcd.io/docs/v3.6/learning/api_guarantees/#watch-apis
+
+## Robustness track record
+
+| Correctness / Consistency issue | Report | Introduced in | Discovered by | Reproducible by robustness test | Command |
+|-----------------------------------------------------------------|----------|-----------------|---------------|-------------------------------------------------|-----------------------------------|
+| Inconsistent revision caused by crash during high load [#13766] | Mar 2022 | v3.5 | User | Yes, report preceded robustness tests | `make test-robustness-issue13766` |
+| Single node cluster can loose a write on crash [#14370] | Aug 2022 | v3.4 or earlier | User | Yes, report preceded robustness tests | `make test-robustness-issue14370` |
+| Enabling auth can lead to inconsistency [#14571] | Oct 2022 | v3.4 or earlier | User | No, authorization is not covered. | |
+| Inconsistent revision caused by crash during defrag [#14685] | Nov 2022 | v3.5 | Robustness | Yes, after covering defragmentation. | `make test-robustness-issue14685` |
+| Watch progress notification not synced with steam [#15220] | Jan 2023 | v3.4 or earlier | User | Yes, after covering watch progress notification | |
+| Watch traveling back in time after network partition [#15271] | Feb 2023 | v3.4 or earlier | Robustness | Yes, after covering network partitions | `make test-robustness-issue15271` |
+| Duplicated watch event due to bug in TXN caching [#17247] | Jan 2024 | main branch | Robustness | Yes, prevented regression in v3.6 | |
+| Watch events lost during stream starvation [#17529] | Mar 2024 | v3.4 or earlier | User | Yes, after covering of slow watch | `make test-robustness-issue17529` |
+| Revision decreasing caused by crash during compaction [#17780] | Apr 2024 | v3.4 or earlier | Robustness | Yes, after covering compaction | |
+| Watch dropping an event when compacting on delete [#18089] | May 2024 | v3.4 or earlier | Robustness | Yes, after covering of compaction | |
+| Inconsistency when reading compacted revision in TXN [#18667] | Oct 2024 | v3.4 or earlier | User | | |
+
+[#13766]: https://github.com/etcd-io/etcd/issues/13766
+[#14370]: https://github.com/etcd-io/etcd/issues/14370
+[#14571]: https://github.com/etcd-io/etcd/issues/14571
+[#14685]: https://github.com/etcd-io/etcd/pull/14685
+[#15220]: https://github.com/etcd-io/etcd/issues/15220
+[#15271]: https://github.com/etcd-io/etcd/issues/15271
+[#17247]: https://github.com/etcd-io/etcd/issues/17247
+[#17529]: https://github.com/etcd-io/etcd/issues/17529
+[#17780]: https://github.com/etcd-io/etcd/issues/17780
+[#18089]: https://github.com/etcd-io/etcd/issues/18089
+[#18667]: https://github.com/etcd-io/etcd/issues/18667
+
+## How Robustness Tests Work
+
+Robustness tests compare etcd cluster behavior against a simplified model of its expected behavior.
+These tests cover various scenarios, including:
+
+* **Different etcd cluster setups:** Cluster sizes, configurations, and deployment topologies.
+* **Client traffic types:** Variety of key-value operations (puts, ranges, transactions) and watch patterns.
+* **Failures:** Network partitions, node crashes, disk failures, and other disruptions.
+
+**Test Procedure:**
+
+1. **Cluster Creation:** A new etcd cluster is created with the specified configuration.
+2. **Traffic and Failures:** Client traffic is generated and sent to the cluster while failures are injected.
+3. **History Collection:** All client operations and their results are recorded.
+4. **Validation:** The collected history is validated against the etcd model and a set of validators to ensure consistency and correctness.
+5. **Report Generation:** If a failure is detected and a detailed report is generated to help diagnose the issue.
+ This report includes information about the client operations, etcd data directories.
+
+## Key Concepts
+
+### Distributed System Terminology
+
+* **Consensus:** A process where nodes in a distributed system agree on a single data value. Etcd uses the Raft algorithm to achieve consensus.
+* **Strict vs Eventual consistency:**
+ * **Strict Consistency:** All components see the same data at the same time after an update.
+ * **Eventual Consistency:** Components may temporarily see different data after an update but converge to the same view eventually.
+* **Consistency Models (https://jepsen.io/consistency)**
+ * **Single-Object Consistency Models:**
+ * **Sequential Consistency:** A strong single-object model. Operations appear to take place in some total order, consistent with the order of operations on each individual process.
+ * **Linearizable Consistency:** The strongest single-object model. Operations appear to happen instantly and in order, consistent with real-time ordering.
+ * **Transactional Consistency Models**
+ * **Serializable Consistency:** A transactional model guaranteeing that transactions appear to occur in some total order. Operations within a transaction are atomic and do not interleave with other transactions. It's a multi-object property, applying to the entire system, not just individual objects.
+ * **Strict Serializable Consistency:** The strongest transactional model. Combines the total order of serializability with the real-time ordering constraints of linearizability.
+
+Etcd provides strict serializability for KV operations and eventual consistency for Watch.
+
+**Etcd Guarantees**
+
+* **Key-value API operations** https://etcd.io/docs/latest/learning/api_guarantees/#kv-apis
+* **Watch API guarantees** https://etcd.io/docs/latest/learning/api_guarantees/#watch-apis
+
+### Kubernetes Integration
+
+* **[Implicit Kubernetes-ETCD Contract]:** Defines how Kubernetes uses etcd to store cluster state.
+* **ResourceVersion:** A string used by Kubernetes to track resource versions, corresponding to etcd revisions.
+* **Sharding resource types:** Kubernetes treats each resource type as a totally independent entity.
+ It allows sharding each resource type into a separate etcd cluster.
+
+[Implicit Kubernetes-ETCD Contract]: https://docs.google.com/document/d/1NUZDiJeiIH5vo_FMaTWf0JtrQKCx0kpEaIIuPoj9P6A/edit?usp=sharing
+
+## Running locally
+
+1. Build etcd with failpoints
+ ```bash
+ make gofail-enable
+ make build
+ make gofail-disable
+ ```
+2. Run the tests
+
+ ```bash
+ make test-robustness
+ ```
+
+ Optionally you can pass environment variables:
+ * `GO_TEST_FLAGS` - to pass additional arguments to `go test`.
+ It is recommended to run tests multiple times with failfast enabled. this can be done by setting `GO_TEST_FLAGS='--count=100 --failfast'`.
+ * `EXPECT_DEBUG=true` - to get logs from the cluster.
+ * `RESULTS_DIR` - to change location where results report will be saved.
+ * `PERSIST_RESULTS` - to persist the results report of the test. By default this will not be persisted in the case of a successful run.
+
+## Re-evaluate existing report
+
+Robustness test validation is constantly changing and improving.
+Errors in etcd model could be causing false positives, which makes the ability to re-evaluate the reports after we fix the issue important.
+
+> Note: Robustness test report format is not stable, and it's expected that not all old reports can be re-evaluated using the newest version.
+
+1. Identify location of the robustness test report.
+
+ > Note: By default robustness test report is only generated for failed test.
+
+ * **For local runs:** this would be by identifying log line, in the following example that would be `/tmp/TestRobustnessExploratory_Etcd_HighTraffic_ClusterOfSize1`:
+ ```
+ logger.go:146: 2024-04-08T09:45:27.734+0200 INFO Saving robustness test report {"path": "/tmp/TestRobustnessExploratory_Etcd_HighTraffic_ClusterOfSize1"}
+ ```
+
+ * **For remote runs on CI:** you need to go to the [Prow Dashboard](https://prow.k8s.io/job-history/gs/kubernetes-jenkins/logs/ci-etcd-robustness-amd64), go to a build, download one of the Artifacts (`artifacts/results.zip`), and extract it locally.
+
+ ![Prow job run page](readme-images/prow_job.png)
+
+ ![Prow job artifacts run page](readme-images/prow_job_artifacts_page.png)
+
+ ![Prow job artifacts run page artifacts dir](readme-images/prow_job_artifacts_dir_page.png)
+
+ Each directory will be prefixed by `TestRobustness` each containing a robustness test report.
+
+ ![artifact archive](readme-images/artifact_archive.png)
+
+ Pick one of the directories within the archive corresponding to the failed test scenario.
+ The largest directory by size usually corresponds to the failed scenario.
+ If you are not sure, you may check which scenario failed in the test logs.
+
+2. Copy the robustness report directory into the `testdata` directory.
+
+ The `testdata` directory can contain multiple robustness test reports.
+ The name of the report directory doesn't matter, as long as it's unique to prevent clashing with reports already present in `testdata` directory.
+ For example path for `history.html` file could look like `$REPO_ROOT/tests/robustness/testdata/v3.5_failure_24_April/history.html`.
+
+3. Run `make test-robustness-reports` to validate all reports in the `testdata` directory.
+
+## Analysing failure
+
+If robustness tests fails we want to analyse the report to confirm if the issue is on etcd side. Location of the directory with the report
+is mentioned `Saving robustness test report` log. Logs from report generation should look like:
+```
+ logger.go:146: 2024-05-08T10:42:54.429+0200 INFO Saving robustness test report {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550"}
+ logger.go:146: 2024-05-08T10:42:54.429+0200 INFO Saving member data dir {"member": "TestRobustnessRegressionIssue14370-test-0", "path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/server-TestRobustnessRegressionIssue14370-test-0"}
+ logger.go:146: 2024-05-08T10:42:54.430+0200 INFO no watch operations for client, skip persisting {"client-id": 1}
+ logger.go:146: 2024-05-08T10:42:54.430+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-1/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.430+0200 INFO Saving watch operations {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-2/watch.json"}
+ logger.go:146: 2024-05-08T10:42:54.431+0200 INFO no KV operations for client, skip persisting {"client-id": 2}
+ logger.go:146: 2024-05-08T10:42:54.431+0200 INFO no watch operations for client, skip persisting {"client-id": 3}
+ logger.go:146: 2024-05-08T10:42:54.431+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-3/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.433+0200 INFO no watch operations for client, skip persisting {"client-id": 4}
+ logger.go:146: 2024-05-08T10:42:54.433+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-4/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.434+0200 INFO no watch operations for client, skip persisting {"client-id": 5}
+ logger.go:146: 2024-05-08T10:42:54.434+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-5/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.435+0200 INFO no watch operations for client, skip persisting {"client-id": 6}
+ logger.go:146: 2024-05-08T10:42:54.435+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-6/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.437+0200 INFO no watch operations for client, skip persisting {"client-id": 7}
+ logger.go:146: 2024-05-08T10:42:54.437+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-7/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.438+0200 INFO no watch operations for client, skip persisting {"client-id": 8}
+ logger.go:146: 2024-05-08T10:42:54.438+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-8/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.439+0200 INFO no watch operations for client, skip persisting {"client-id": 9}
+ logger.go:146: 2024-05-08T10:42:54.439+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-9/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.440+0200 INFO no watch operations for client, skip persisting {"client-id": 10}
+ logger.go:146: 2024-05-08T10:42:54.440+0200 INFO Saving operation history {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/client-10/operations.json"}
+ logger.go:146: 2024-05-08T10:42:54.441+0200 INFO Saving visualization {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/history.html"}
+```
+
+Report follows the hierarchy:
+* `server-*` - etcd server data directories, can be used to verify disk/memory corruption.
+ * `member`
+ * `wal` - Write Ahead Log (WAL) directory, that can be analysed using `etcd-dump-logs` command line tool available in `tools` directory.
+ * `snap` - Snapshot directory, includes the bbolt database file `db`, that can be analysed using `etcd-dump-db` command line tool available in `tools` directory.
+* `client-*` - Client request and response dumps in json format.
+ * `watch.jon` - Watch requests and responses, can be used to validate [watch API guarantees].
+ * `operations.json` - KV operation history
+* `history.html` - Visualization of KV operation history, can be used to validate [KV API guarantees].
+
+### Example analysis of linearization issue
+
+Let's reproduce and analyse robustness test report for issue [#14370].
+To reproduce the issue by yourself run `make test-robustness-issue14370`.
+After a couple of tries robustness tests should fail with a log `Linearization failed` and save report locally.
+
+Example:
+```
+ logger.go:146: 2024-05-08T10:42:53.379+0200 INFO Validating linearizable operations {"timeout": "5m0s"}
+ logger.go:146: 2024-05-08T10:42:54.429+0200 ERROR Linearization failed {"duration": "1.050105973s"}
+ validate.go:39: Failed linearization, skipping further validation
+ logger.go:146: 2024-05-08T10:42:54.429+0200 INFO Saving robustness test report {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550"}
+ ...
+ logger.go:146: 2024-05-08T10:42:54.441+0200 INFO Saving visualization {"path": "/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/history.html"}
+```
+
+Linearization issues are easiest to analyse via history visualization.
+Open `/tmp/TestRobustnessRegression_Issue14370/1715157774429416550/history.html` file in your browser.
+Jump to the error in linearization by clicking `[ jump to first error ]` on the top of the page.
+
+You should see a graph similar to the one on the image below.
+![issue14370](readme-images/issue14370.png)
+
+Last correct request (connected with grey line) is a `Put` request that succeeded and got revision `168`.
+All following requests are invalid (connected with red line) as they have revision `167`.
+Etcd guarantee that revision is non-decreasing, so this shows a bug in etcd as there is no way revision should decrease.
+This is consistent with the root cause of [#14370] as it was issue with process crash causing last write to be lost.
+
+[#14370]: https://github.com/etcd-io/etcd/issues/14370
+
+### Example analysis of watch issue
+
+Let's reproduce and analyse robustness test report for issue [#15271].
+To reproduce the issue by yourself run `make test-robustness-issue15271`.
+After a couple of tries robustness tests should fail with a logs `Broke watch guarantee` and save report locally.
+
+Example:
+```
+ logger.go:146: 2024-05-08T10:50:11.301+0200 INFO Validating linearizable operations {"timeout": "5m0s"}
+ logger.go:146: 2024-05-08T10:50:15.754+0200 INFO Linearization success {"duration": "4.453346487s"}
+ logger.go:146: 2024-05-08T10:50:15.754+0200 INFO Validating watch
+ logger.go:146: 2024-05-08T10:50:15.849+0200 ERROR Broke watch guarantee {"guarantee": "ordered", "client": 4, "revision": 3}
+ validate.go:45: Failed validating watch history, err: broke Ordered - events are ordered by revision; an event will never appear on a watch if it precedes an event in time that has already been posted
+ logger.go:146: 2024-05-08T10:50:15.849+0200 INFO Validating serializable operations
+ logger.go:146: 2024-05-08T10:50:15.866+0200 INFO Saving robustness test report {"path": "/tmp/TestRobustnessRegression_Issue15271/1715158215866033806"}
+```
+
+Watch issues are easiest to analyse by reading the recorded watch history.
+Watch history is recorded for each client separated in different subdirectory under `/tmp/TestRobustnessRegression_Issue15271/1715158215866033806`
+Open `watch.json` for client mentioned in log `Broke watch guarantee`.
+For client `4` that broke the watch guarantee open `/tmp/TestRobustnessRegression_Issue15271/1715158215866033806/client-4/watch.json`.
+
+Each line consists of json blob corresponding to single watch request sent by client.
+Look for events with `Revision` equal to revision mentioned in the first log with `Broke watch guarantee`, in this case look for `"Revision":3,`.
+You should see watch responses where the `Revision` decreases like ones below:
+```
+{"Events":[{"Type":"put-operation","Key":"key5","Value":{"Value":"793","Hash":0},"Revision":799,"IsCreate":false,"PrevValue":null}],"IsProgressNotify":false,"Revision":799,"Time":3202907249,"Error":""}
+{"Events":[{"Type":"put-operation","Key":"key4","Value":{"Value":"1","Hash":0},"Revision":3,"IsCreate":true,"PrevValue":null}, ...
+```
+
+Up to the first response the `Revision` of events only increased up to a value of `799`.
+However, the following line includes an event with `Revision` equal `3`.
+If you follow the `revision` throughout the file you should notice that watch replayed revisions second time.
+This is incorrect and breaks `Ordered` [watch API guarantees].
+This is consistent with the root cause of [#14370] where member reconnecting to cluster will resend revisions.
+
+[#15271]: https://github.com/etcd-io/etcd/issues/15271
diff --git a/tests/robustness/client/client.go b/tests/robustness/client/client.go
new file mode 100644
index 00000000000..2a1e76ce030
--- /dev/null
+++ b/tests/robustness/client/client.go
@@ -0,0 +1,347 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+// RecordingClient provides a semi etcd client (different interface than
+// clientv3.Client) that records all the requests and responses made. Doesn't
+// allow for concurrent requests to confirm to model.AppendableHistory requirements.
+type RecordingClient struct {
+ ID int
+ client clientv3.Client
+ // using baseTime time-measuring operation to get monotonic clock reading
+ // see https://github.com/golang/go/blob/master/src/time/time.go#L17
+ baseTime time.Time
+
+ watchMux sync.Mutex
+ watchOperations []model.WatchOperation
+ // mux ensures order of request appending.
+ kvMux sync.Mutex
+ kvOperations *model.AppendableHistory
+}
+
+type TimedWatchEvent struct {
+ model.WatchEvent
+ Time time.Duration
+}
+
+func NewRecordingClient(endpoints []string, ids identity.Provider, baseTime time.Time) (*RecordingClient, error) {
+ cc, err := clientv3.New(clientv3.Config{
+ Endpoints: endpoints,
+ Logger: zap.NewNop(),
+ DialKeepAliveTime: 10 * time.Second,
+ DialKeepAliveTimeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &RecordingClient{
+ ID: ids.NewClientID(),
+ client: *cc,
+ kvOperations: model.NewAppendableHistory(ids),
+ baseTime: baseTime,
+ }, nil
+}
+
+func (c *RecordingClient) Close() error {
+ return c.client.Close()
+}
+
+func (c *RecordingClient) Report() report.ClientReport {
+ return report.ClientReport{
+ ClientID: c.ID,
+ KeyValue: c.kvOperations.History.Operations(),
+ Watch: c.watchOperations,
+ }
+}
+
+func (c *RecordingClient) Get(ctx context.Context, key string, revision int64) (kv *mvccpb.KeyValue, rev int64, err error) {
+ resp, err := c.Range(ctx, key, "", revision, 0)
+ if err != nil {
+ return nil, 0, err
+ }
+ if len(resp.Kvs) == 1 {
+ kv = resp.Kvs[0]
+ }
+ return kv, resp.Header.Revision, nil
+}
+
+func (c *RecordingClient) Range(ctx context.Context, start, end string, revision, limit int64) (*clientv3.GetResponse, error) {
+ ops := []clientv3.OpOption{}
+ if end != "" {
+ ops = append(ops, clientv3.WithRange(end))
+ }
+ if revision != 0 {
+ ops = append(ops, clientv3.WithRev(revision))
+ }
+ if limit != 0 {
+ ops = append(ops, clientv3.WithLimit(limit))
+ }
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Get(ctx, start, ops...)
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendRange(start, end, revision, limit, callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) Put(ctx context.Context, key, value string) (*clientv3.PutResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Put(ctx, key, value)
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendPut(key, value, callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) Delete(ctx context.Context, key string) (*clientv3.DeleteResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Delete(ctx, key)
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendDelete(key, callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) Txn(ctx context.Context, conditions []clientv3.Cmp, onSuccess []clientv3.Op, onFailure []clientv3.Op) (*clientv3.TxnResponse, error) {
+ txn := c.client.Txn(ctx).If(
+ conditions...,
+ ).Then(
+ onSuccess...,
+ ).Else(
+ onFailure...,
+ )
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := txn.Commit()
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendTxn(conditions, onSuccess, onFailure, callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) LeaseGrant(ctx context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Lease.Grant(ctx, ttl)
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendLeaseGrant(callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) LeaseRevoke(ctx context.Context, leaseID int64) (*clientv3.LeaseRevokeResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Lease.Revoke(ctx, clientv3.LeaseID(leaseID))
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendLeaseRevoke(leaseID, callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) PutWithLease(ctx context.Context, key string, value string, leaseID int64) (*clientv3.PutResponse, error) {
+ opts := clientv3.WithLease(clientv3.LeaseID(leaseID))
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Put(ctx, key, value, opts)
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendPutWithLease(key, value, leaseID, callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) Defragment(ctx context.Context) (*clientv3.DefragmentResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Defragment(ctx, c.client.Endpoints()[0])
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendDefragment(callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) Compact(ctx context.Context, rev int64) (*clientv3.CompactResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ callTime := time.Since(c.baseTime)
+ resp, err := c.client.Compact(ctx, rev)
+ returnTime := time.Since(c.baseTime)
+ c.kvOperations.AppendCompact(rev, callTime, returnTime, resp, err)
+ return resp, err
+}
+
+func (c *RecordingClient) MemberList(ctx context.Context, opts ...clientv3.OpOption) (*clientv3.MemberListResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ resp, err := c.client.MemberList(ctx, opts...)
+ return resp, err
+}
+
+func (c *RecordingClient) MemberAdd(ctx context.Context, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ resp, err := c.client.MemberAdd(ctx, peerAddrs)
+ return resp, err
+}
+
+func (c *RecordingClient) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ resp, err := c.client.MemberAddAsLearner(ctx, peerAddrs)
+ return resp, err
+}
+
+func (c *RecordingClient) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ resp, err := c.client.MemberRemove(ctx, id)
+ return resp, err
+}
+
+func (c *RecordingClient) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*clientv3.MemberUpdateResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ resp, err := c.client.MemberUpdate(ctx, id, peerAddrs)
+ return resp, err
+}
+
+func (c *RecordingClient) MemberPromote(ctx context.Context, id uint64) (*clientv3.MemberPromoteResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ resp, err := c.client.MemberPromote(ctx, id)
+ return resp, err
+}
+
+func (c *RecordingClient) Status(ctx context.Context, endpoint string) (*clientv3.StatusResponse, error) {
+ c.kvMux.Lock()
+ defer c.kvMux.Unlock()
+ resp, err := c.client.Status(ctx, endpoint)
+ return resp, err
+}
+
+func (c *RecordingClient) Endpoints() []string {
+ return c.client.Endpoints()
+}
+
+func (c *RecordingClient) Watch(ctx context.Context, key string, rev int64, withPrefix bool, withProgressNotify bool, withPrevKV bool) clientv3.WatchChan {
+ request := model.WatchRequest{
+ Key: key,
+ Revision: rev,
+ WithPrefix: withPrefix,
+ WithProgressNotify: withProgressNotify,
+ WithPrevKV: withPrevKV,
+ }
+ return c.watch(ctx, request)
+}
+
+func (c *RecordingClient) watch(ctx context.Context, request model.WatchRequest) clientv3.WatchChan {
+ ops := []clientv3.OpOption{}
+ if request.WithPrefix {
+ ops = append(ops, clientv3.WithPrefix())
+ }
+ if request.Revision != 0 {
+ ops = append(ops, clientv3.WithRev(request.Revision))
+ }
+ if request.WithProgressNotify {
+ ops = append(ops, clientv3.WithProgressNotify())
+ }
+ if request.WithPrevKV {
+ ops = append(ops, clientv3.WithPrevKV())
+ }
+ respCh := make(chan clientv3.WatchResponse)
+
+ c.watchMux.Lock()
+ c.watchOperations = append(c.watchOperations, model.WatchOperation{
+ Request: request,
+ Responses: []model.WatchResponse{},
+ })
+ index := len(c.watchOperations) - 1
+ c.watchMux.Unlock()
+
+ go func() {
+ defer close(respCh)
+ for r := range c.client.Watch(ctx, request.Key, ops...) {
+ c.watchOperations[index].Responses = append(c.watchOperations[index].Responses, ToWatchResponse(r, c.baseTime))
+ select {
+ case respCh <- r:
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+ return respCh
+}
+
+func (c *RecordingClient) RequestProgress(ctx context.Context) error {
+ return c.client.RequestProgress(ctx)
+}
+
+func ToWatchResponse(r clientv3.WatchResponse, baseTime time.Time) model.WatchResponse {
+ // using time.Since time-measuring operation to get monotonic clock reading
+ // see https://github.com/golang/go/blob/master/src/time/time.go#L17
+ resp := model.WatchResponse{Time: time.Since(baseTime)}
+ for _, event := range r.Events {
+ resp.Events = append(resp.Events, toWatchEvent(*event))
+ }
+ resp.IsProgressNotify = r.IsProgressNotify()
+ resp.Revision = r.Header.Revision
+ err := r.Err()
+ if err != nil {
+ resp.Error = r.Err().Error()
+ }
+ return resp
+}
+
+func toWatchEvent(event clientv3.Event) (watch model.WatchEvent) {
+ watch.Revision = event.Kv.ModRevision
+ watch.Key = string(event.Kv.Key)
+ watch.Value = model.ToValueOrHash(string(event.Kv.Value))
+
+ if event.PrevKv != nil {
+ watch.PrevValue = &model.ValueRevision{
+ Value: model.ToValueOrHash(string(event.PrevKv.Value)),
+ ModRevision: event.PrevKv.ModRevision,
+ }
+ }
+ watch.IsCreate = event.IsCreate()
+
+ switch event.Type {
+ case mvccpb.PUT:
+ watch.Type = model.PutOperation
+ case mvccpb.DELETE:
+ watch.Type = model.DeleteOperation
+ default:
+ panic(fmt.Sprintf("Unexpected event type: %s", event.Type))
+ }
+ return watch
+}
diff --git a/tests/robustness/client/watch.go b/tests/robustness/client/watch.go
new file mode 100644
index 00000000000..c980fdfba05
--- /dev/null
+++ b/tests/robustness/client/watch.go
@@ -0,0 +1,142 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+func CollectClusterWatchEvents(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, maxRevisionChan <-chan int64, cfg WatchConfig, baseTime time.Time, ids identity.Provider) []report.ClientReport {
+ mux := sync.Mutex{}
+ var wg sync.WaitGroup
+ reports := make([]report.ClientReport, len(clus.Procs))
+ memberMaxRevisionChans := make([]chan int64, len(clus.Procs))
+ for i, member := range clus.Procs {
+ c, err := NewRecordingClient(member.EndpointsGRPC(), ids, baseTime)
+ require.NoError(t, err)
+ memberMaxRevisionChan := make(chan int64, 1)
+ memberMaxRevisionChans[i] = memberMaxRevisionChan
+ wg.Add(1)
+ go func(i int, c *RecordingClient) {
+ defer wg.Done()
+ defer c.Close()
+ watchUntilRevision(ctx, t, c, memberMaxRevisionChan, cfg)
+ mux.Lock()
+ reports[i] = c.Report()
+ mux.Unlock()
+ }(i, c)
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ maxRevision := <-maxRevisionChan
+ for _, memberChan := range memberMaxRevisionChans {
+ memberChan <- maxRevision
+ }
+ }()
+ wg.Wait()
+ return reports
+}
+
+type WatchConfig struct {
+ RequestProgress bool
+}
+
+// watchUntilRevision watches all changes until context is cancelled, it has observed revision provided via maxRevisionChan or maxRevisionChan was closed.
+func watchUntilRevision(ctx context.Context, t *testing.T, c *RecordingClient, maxRevisionChan <-chan int64, cfg WatchConfig) {
+ var maxRevision int64
+ var lastRevision int64 = 1
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+resetWatch:
+ for {
+ watch := c.Watch(ctx, "", lastRevision+1, true, true, false)
+ for {
+ select {
+ case <-ctx.Done():
+ if maxRevision == 0 {
+ t.Errorf("Client didn't collect all events, max revision not set")
+ }
+ if lastRevision < maxRevision {
+ t.Errorf("Client didn't collect all events, revision got %d, expected: %d", lastRevision, maxRevision)
+ }
+ return
+ case revision, ok := <-maxRevisionChan:
+ if ok {
+ maxRevision = revision
+ if lastRevision >= maxRevision {
+ cancel()
+ }
+ } else {
+ // Only cancel if maxRevision was never set.
+ if maxRevision == 0 {
+ cancel()
+ }
+ }
+ case resp, ok := <-watch:
+ if !ok {
+ t.Logf("Watch channel closed")
+ continue resetWatch
+ }
+ if cfg.RequestProgress {
+ c.RequestProgress(ctx)
+ }
+
+ if resp.Err() != nil {
+ if resp.Canceled {
+ if resp.CompactRevision > lastRevision {
+ lastRevision = resp.CompactRevision
+ }
+ continue resetWatch
+ }
+ t.Errorf("Watch stream received error, err %v", resp.Err())
+ }
+ if len(resp.Events) > 0 {
+ lastRevision = resp.Events[len(resp.Events)-1].Kv.ModRevision
+ }
+ if maxRevision != 0 && lastRevision >= maxRevision {
+ cancel()
+ }
+ }
+ }
+ }
+}
+
+func ValidateGotAtLeastOneProgressNotify(t *testing.T, reports []report.ClientReport, expectProgressNotify bool) {
+ gotProgressNotify := false
+external:
+ for _, r := range reports {
+ for _, op := range r.Watch {
+ for _, resp := range op.Responses {
+ if resp.IsProgressNotify {
+ gotProgressNotify = true
+ break external
+ }
+ }
+ }
+ }
+ if gotProgressNotify != expectProgressNotify {
+ t.Errorf("Progress notify does not match, expect: %v, got: %v", expectProgressNotify, gotProgressNotify)
+ }
+}
diff --git a/tests/robustness/failpoint/cluster.go b/tests/robustness/failpoint/cluster.go
new file mode 100644
index 00000000000..95b36260991
--- /dev/null
+++ b/tests/robustness/failpoint/cluster.go
@@ -0,0 +1,275 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package failpoint
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/expect"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+)
+
+var (
+ MemberReplace Failpoint = memberReplace{}
+ MemberDowngrade Failpoint = memberDowngrade{}
+)
+
+type memberReplace struct{}
+
+func (f memberReplace) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ memberID := uint64(rand.Int() % len(clus.Procs))
+ member := clus.Procs[memberID]
+ endpoints := []string{clus.Procs[(int(memberID)+1)%len(clus.Procs)].EndpointsGRPC()[0]}
+ cc, err := clientv3.New(clientv3.Config{
+ Endpoints: endpoints,
+ Logger: zap.NewNop(),
+ DialKeepAliveTime: 10 * time.Second,
+ DialKeepAliveTimeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer cc.Close()
+ memberID, found, err := getID(ctx, cc, member.Config().Name)
+ if err != nil {
+ return nil, err
+ }
+ require.Truef(t, found, "Member not found")
+
+ // Need to wait health interval for cluster to accept member changes
+ time.Sleep(etcdserver.HealthInterval)
+ lg.Info("Removing member", zap.String("member", member.Config().Name))
+ _, err = cc.MemberRemove(ctx, memberID)
+ if err != nil {
+ return nil, err
+ }
+ _, found, err = getID(ctx, cc, member.Config().Name)
+ if err != nil {
+ return nil, err
+ }
+ require.Falsef(t, found, "Expected member to be removed")
+
+ for member.IsRunning() {
+ err = member.Kill()
+ if err != nil {
+ lg.Info("Sending kill signal failed", zap.Error(err))
+ }
+ err = member.Wait(ctx)
+ if err != nil && !strings.Contains(err.Error(), "unexpected exit code") {
+ lg.Info("Failed to kill the process", zap.Error(err))
+ return nil, fmt.Errorf("failed to kill the process within %s, err: %w", triggerTimeout, err)
+ }
+ }
+ lg.Info("Removing member data", zap.String("member", member.Config().Name))
+ err = os.RemoveAll(member.Config().DataDirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ lg.Info("Adding member back", zap.String("member", member.Config().Name))
+ removedMemberPeerURL := member.Config().PeerURL.String()
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ reqCtx, cancel := context.WithTimeout(ctx, time.Second)
+ _, err = cc.MemberAdd(reqCtx, []string{removedMemberPeerURL})
+ cancel()
+ if err == nil {
+ break
+ }
+ }
+ err = patchArgs(member.Config().Args, "initial-cluster-state", "existing")
+ if err != nil {
+ return nil, err
+ }
+ lg.Info("Starting member", zap.String("member", member.Config().Name))
+ err = member.Start(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ _, found, err := getID(ctx, cc, member.Config().Name)
+ if err != nil {
+ continue
+ }
+ if found {
+ break
+ }
+ }
+ return nil, nil
+}
+
+func (f memberReplace) Name() string {
+ return "MemberReplace"
+}
+
+func (f memberReplace) Available(config e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, profile traffic.Profile) bool {
+ // a lower etcd version may not be able to join a cluster with higher cluster version.
+ return config.ClusterSize > 1 && (config.Version == e2e.QuorumLastVersion || member.Config().ExecPath == e2e.BinPath.Etcd)
+}
+
+type memberDowngrade struct{}
+
+func (f memberDowngrade) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
+ if err != nil {
+ return nil, err
+ }
+ targetVersion := semver.Version{Major: v.Major, Minor: v.Minor - 1}
+ numberOfMembersToDowngrade := rand.Int()%len(clus.Procs) + 1
+ membersToDowngrade := rand.Perm(len(clus.Procs))[:numberOfMembersToDowngrade]
+ lg.Info("Test downgrading members", zap.Any("members", membersToDowngrade))
+
+ member := clus.Procs[0]
+ endpoints := []string{member.EndpointsGRPC()[0]}
+ cc, err := clientv3.New(clientv3.Config{
+ Endpoints: endpoints,
+ Logger: zap.NewNop(),
+ DialKeepAliveTime: 10 * time.Second,
+ DialKeepAliveTimeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer cc.Close()
+
+ // Need to wait health interval for cluster to accept changes
+ time.Sleep(etcdserver.HealthInterval)
+ lg.Info("Enable downgrade")
+ err = enableDowngrade(ctx, cc, &targetVersion)
+ if err != nil {
+ return nil, err
+ }
+ // Need to wait health interval for cluster to prepare for downgrade
+ time.Sleep(etcdserver.HealthInterval)
+
+ for _, memberID := range membersToDowngrade {
+ member = clus.Procs[memberID]
+ lg.Info("Downgrading member", zap.String("member", member.Config().Name))
+ if err = member.Stop(); err != nil {
+ return nil, err
+ }
+ member.Config().ExecPath = e2e.BinPath.EtcdLastRelease
+ lg.Info("Restarting member", zap.String("member", member.Config().Name))
+ err = member.Start(ctx)
+ if err != nil {
+ return nil, err
+ }
+ err = verifyVersion(t, clus, member, targetVersion)
+ }
+ time.Sleep(etcdserver.HealthInterval)
+ return nil, err
+}
+
+func (f memberDowngrade) Name() string {
+ return "MemberDowngrade"
+}
+
+func (f memberDowngrade) Available(config e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, profile traffic.Profile) bool {
+ if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ return false
+ }
+ v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
+ if err != nil {
+ panic("Failed checking etcd version binary")
+ }
+ v3_6 := semver.Version{Major: 3, Minor: 6}
+ // only current version cluster can be downgraded.
+ return v.Compare(v3_6) >= 0 && (config.Version == e2e.CurrentVersion && member.Config().ExecPath == e2e.BinPath.Etcd)
+}
+
+func getID(ctx context.Context, cc *clientv3.Client, name string) (id uint64, found bool, err error) {
+ // Ensure linearized MemberList by first making a linearized Get request from the same member.
+ // This is required for v3.4 support as it doesn't support linearized MemberList https://github.com/etcd-io/etcd/issues/18929
+ // TODO: Remove preceding Get when v3.4 is no longer supported.
+ getResp, err := cc.Get(ctx, "linearized-list-before-member-list")
+ if err != nil {
+ return 0, false, err
+ }
+ resp, err := cc.MemberList(ctx)
+ if err != nil {
+ return 0, false, err
+ }
+ if getResp.Header.MemberId != resp.Header.MemberId {
+ return 0, false, fmt.Errorf("expected Get and MemberList to be sent to the same member, got: %d and %d", getResp.Header.MemberId, resp.Header.MemberId)
+ }
+ for _, member := range resp.Members {
+ if name == member.Name {
+ return member.ID, true, nil
+ }
+ }
+ return 0, false, nil
+}
+
+func patchArgs(args []string, flag, newValue string) error {
+ for i, arg := range args {
+ if strings.Contains(arg, flag) {
+ args[i] = fmt.Sprintf("--%s=%s", flag, newValue)
+ return nil
+ }
+ }
+ return fmt.Errorf("--%s flag not found", flag)
+}
+
+func enableDowngrade(ctx context.Context, cc *clientv3.Client, targetVersion *semver.Version) error {
+ _, err := cc.Maintenance.Downgrade(ctx, clientv3.DowngradeAction(pb.DowngradeRequest_VALIDATE), targetVersion.String())
+ if err != nil {
+ return err
+ }
+ _, err = cc.Maintenance.Downgrade(ctx, clientv3.DowngradeAction(pb.DowngradeRequest_ENABLE), targetVersion.String())
+ return err
+}
+
+func verifyVersion(t *testing.T, clus *e2e.EtcdProcessCluster, member e2e.EtcdProcess, expectedVersion semver.Version) error {
+ var err error
+ expected := fmt.Sprintf(`"etcdserver":"%d.%d\..*"etcdcluster":"%d\.%d\.`, expectedVersion.Major, expectedVersion.Minor, expectedVersion.Major, expectedVersion.Minor)
+ for i := 0; i < 35; i++ {
+ if err = e2e.CURLGetFromMember(clus, member, e2e.CURLReq{Endpoint: "/version", Expected: expect.ExpectedResponse{Value: expected, IsRegularExpr: true}}); err != nil {
+ t.Logf("#%d: v3 is not ready yet (%v)", i, err)
+ time.Sleep(200 * time.Millisecond)
+ continue
+ }
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("failed to verify version, expected %v got (%w)", expected, err)
+ }
+ return nil
+}
diff --git a/tests/robustness/failpoint/failpoint.go b/tests/robustness/failpoint/failpoint.go
new file mode 100644
index 00000000000..17c0d11b8e7
--- /dev/null
+++ b/tests/robustness/failpoint/failpoint.go
@@ -0,0 +1,148 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package failpoint
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "go.uber.org/zap"
+ healthpb "google.golang.org/grpc/health/grpc_health_v1"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+)
+
+const (
+ triggerTimeout = time.Minute
+)
+
+var allFailpoints = []Failpoint{
+ KillFailpoint, BeforeCommitPanic, AfterCommitPanic, RaftBeforeSavePanic, RaftAfterSavePanic,
+ DefragBeforeCopyPanic, DefragBeforeRenamePanic, BackendBeforePreCommitHookPanic, BackendAfterPreCommitHookPanic,
+ BackendBeforeStartDBTxnPanic, BackendAfterStartDBTxnPanic, BackendBeforeWritebackBufPanic,
+ BackendAfterWritebackBufPanic, CompactBeforeCommitScheduledCompactPanic, CompactAfterCommitScheduledCompactPanic,
+ CompactBeforeSetFinishedCompactPanic, CompactAfterSetFinishedCompactPanic, CompactBeforeCommitBatchPanic,
+ CompactAfterCommitBatchPanic, RaftBeforeLeaderSendPanic, BlackholePeerNetwork, DelayPeerNetwork,
+ RaftBeforeFollowerSendPanic, RaftBeforeApplySnapPanic, RaftAfterApplySnapPanic, RaftAfterWALReleasePanic,
+ RaftBeforeSaveSnapPanic, RaftAfterSaveSnapPanic, BlackholeUntilSnapshot,
+ BeforeApplyOneConfChangeSleep,
+ MemberReplace,
+ MemberDowngrade,
+ DropPeerNetwork,
+ RaftBeforeSaveSleep,
+ RaftAfterSaveSleep,
+ ApplyBeforeOpenSnapshot,
+ SleepBeforeSendWatchResponse,
+}
+
+func PickRandom(clus *e2e.EtcdProcessCluster, profile traffic.Profile) (Failpoint, error) {
+ availableFailpoints := make([]Failpoint, 0, len(allFailpoints))
+ for _, failpoint := range allFailpoints {
+ err := Validate(clus, failpoint, profile)
+ if err != nil {
+ continue
+ }
+ availableFailpoints = append(availableFailpoints, failpoint)
+ }
+ if len(availableFailpoints) == 0 {
+ return nil, fmt.Errorf("no available failpoints")
+ }
+ return availableFailpoints[rand.Int()%len(availableFailpoints)], nil
+}
+
+func Validate(clus *e2e.EtcdProcessCluster, failpoint Failpoint, profile traffic.Profile) error {
+ for _, proc := range clus.Procs {
+ if !failpoint.Available(*clus.Cfg, proc, profile) {
+ return fmt.Errorf("failpoint %q not available on %s", failpoint.Name(), proc.Config().Name)
+ }
+ }
+ return nil
+}
+
+func Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, failpoint Failpoint, baseTime time.Time, ids identity.Provider) (*report.FailpointReport, error) {
+ ctx, cancel := context.WithTimeout(ctx, triggerTimeout)
+ defer cancel()
+ var err error
+
+ if err = verifyClusterHealth(ctx, t, clus); err != nil {
+ return nil, fmt.Errorf("failed to verify cluster health before failpoint injection, err: %w", err)
+ }
+ lg.Info("Triggering failpoint", zap.String("failpoint", failpoint.Name()))
+ start := time.Since(baseTime)
+ clientReport, err := failpoint.Inject(ctx, t, lg, clus, baseTime, ids)
+ if err != nil {
+ lg.Error("Failed to trigger failpoint", zap.String("failpoint", failpoint.Name()), zap.Error(err))
+ return nil, fmt.Errorf("failed triggering failpoint, err: %w", err)
+ }
+ if err = verifyClusterHealth(ctx, t, clus); err != nil {
+ return nil, fmt.Errorf("failed to verify cluster health after failpoint injection, err: %w", err)
+ }
+ lg.Info("Finished triggering failpoint", zap.String("failpoint", failpoint.Name()))
+ end := time.Since(baseTime)
+
+ return &report.FailpointReport{
+ FailpointInjection: report.FailpointInjection{
+ Start: start,
+ End: end,
+ Name: failpoint.Name(),
+ },
+ Client: clientReport,
+ }, nil
+}
+
+func verifyClusterHealth(ctx context.Context, _ *testing.T, clus *e2e.EtcdProcessCluster) error {
+ for i := 0; i < len(clus.Procs); i++ {
+ clusterClient, err := clientv3.New(clientv3.Config{
+ Endpoints: clus.Procs[i].EndpointsGRPC(),
+ Logger: zap.NewNop(),
+ DialKeepAliveTime: 10 * time.Second,
+ DialKeepAliveTimeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return fmt.Errorf("Error creating client for cluster %s: %w", clus.Procs[i].Config().Name, err)
+ }
+ defer clusterClient.Close()
+
+ cli := healthpb.NewHealthClient(clusterClient.ActiveConnection())
+ resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
+ if err != nil {
+ return fmt.Errorf("Error checking member %s health: %w", clus.Procs[i].Config().Name, err)
+ }
+ if resp.Status != healthpb.HealthCheckResponse_SERVING {
+ return fmt.Errorf("Member %s health status expected %s, got %s",
+ clus.Procs[i].Config().Name,
+ healthpb.HealthCheckResponse_SERVING,
+ resp.Status)
+ }
+ }
+ return nil
+}
+
+type Failpoint interface {
+ Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error)
+ Name() string
+ AvailabilityChecker
+}
+
+type AvailabilityChecker interface {
+ Available(e2e.EtcdProcessClusterConfig, e2e.EtcdProcess, traffic.Profile) bool
+}
diff --git a/tests/robustness/failpoint/gofail.go b/tests/robustness/failpoint/gofail.go
new file mode 100644
index 00000000000..3ce2ff39a53
--- /dev/null
+++ b/tests/robustness/failpoint/gofail.go
@@ -0,0 +1,249 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package failpoint
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "strings"
+ "testing"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+)
+
+var (
+ DefragBeforeCopyPanic Failpoint = goPanicFailpoint{"defragBeforeCopy", triggerDefrag{}, AnyMember}
+ DefragBeforeRenamePanic Failpoint = goPanicFailpoint{"defragBeforeRename", triggerDefrag{}, AnyMember}
+ BeforeCommitPanic Failpoint = goPanicFailpoint{"beforeCommit", nil, AnyMember}
+ AfterCommitPanic Failpoint = goPanicFailpoint{"afterCommit", nil, AnyMember}
+ RaftBeforeSavePanic Failpoint = goPanicFailpoint{"raftBeforeSave", nil, AnyMember}
+ RaftAfterSavePanic Failpoint = goPanicFailpoint{"raftAfterSave", nil, AnyMember}
+ BackendBeforePreCommitHookPanic Failpoint = goPanicFailpoint{"commitBeforePreCommitHook", nil, AnyMember}
+ BackendAfterPreCommitHookPanic Failpoint = goPanicFailpoint{"commitAfterPreCommitHook", nil, AnyMember}
+ BackendBeforeStartDBTxnPanic Failpoint = goPanicFailpoint{"beforeStartDBTxn", nil, AnyMember}
+ BackendAfterStartDBTxnPanic Failpoint = goPanicFailpoint{"afterStartDBTxn", nil, AnyMember}
+ BackendBeforeWritebackBufPanic Failpoint = goPanicFailpoint{"beforeWritebackBuf", nil, AnyMember}
+ BackendAfterWritebackBufPanic Failpoint = goPanicFailpoint{"afterWritebackBuf", nil, AnyMember}
+ CompactBeforeCommitScheduledCompactPanic Failpoint = goPanicFailpoint{"compactBeforeCommitScheduledCompact", triggerCompact{}, AnyMember}
+ CompactAfterCommitScheduledCompactPanic Failpoint = goPanicFailpoint{"compactAfterCommitScheduledCompact", triggerCompact{}, AnyMember}
+ CompactBeforeSetFinishedCompactPanic Failpoint = goPanicFailpoint{"compactBeforeSetFinishedCompact", triggerCompact{}, AnyMember}
+ BatchCompactBeforeSetFinishedCompactPanic Failpoint = goPanicFailpoint{"compactBeforeSetFinishedCompact", triggerCompact{multiBatchCompaction: true}, AnyMember}
+ CompactAfterSetFinishedCompactPanic Failpoint = goPanicFailpoint{"compactAfterSetFinishedCompact", triggerCompact{}, AnyMember}
+ CompactBeforeCommitBatchPanic Failpoint = goPanicFailpoint{"compactBeforeCommitBatch", triggerCompact{multiBatchCompaction: true}, AnyMember}
+ CompactAfterCommitBatchPanic Failpoint = goPanicFailpoint{"compactAfterCommitBatch", triggerCompact{multiBatchCompaction: true}, AnyMember}
+ RaftBeforeLeaderSendPanic Failpoint = goPanicFailpoint{"raftBeforeLeaderSend", nil, Leader}
+ RaftBeforeFollowerSendPanic Failpoint = goPanicFailpoint{"raftBeforeFollowerSend", nil, Follower}
+ RaftBeforeApplySnapPanic Failpoint = goPanicFailpoint{"raftBeforeApplySnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
+ RaftAfterApplySnapPanic Failpoint = goPanicFailpoint{"raftAfterApplySnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
+ RaftAfterWALReleasePanic Failpoint = goPanicFailpoint{"raftAfterWALRelease", triggerBlackhole{waitTillSnapshot: true}, Follower}
+ RaftBeforeSaveSnapPanic Failpoint = goPanicFailpoint{"raftBeforeSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
+ RaftAfterSaveSnapPanic Failpoint = goPanicFailpoint{"raftAfterSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
+ ApplyBeforeOpenSnapshot Failpoint = goPanicFailpoint{"applyBeforeOpenSnapshot", triggerBlackhole{waitTillSnapshot: true}, Follower}
+ BeforeApplyOneConfChangeSleep Failpoint = killAndGofailSleep{"beforeApplyOneConfChange", time.Second}
+ RaftBeforeSaveSleep Failpoint = gofailSleepAndDeactivate{"raftBeforeSave", time.Second}
+ RaftAfterSaveSleep Failpoint = gofailSleepAndDeactivate{"raftAfterSave", time.Second}
+ SleepBeforeSendWatchResponse Failpoint = gofailSleepAndDeactivate{"beforeSendWatchResponse", time.Second}
+)
+
+type goPanicFailpoint struct {
+ failpoint string
+ trigger trigger
+ target failpointTarget
+}
+
+type failpointTarget string
+
+const (
+ AnyMember failpointTarget = "AnyMember"
+ Leader failpointTarget = "Leader"
+ Follower failpointTarget = "Follower"
+)
+
+func (f goPanicFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) (reports []report.ClientReport, err error) {
+ member := f.pickMember(t, clus)
+
+ for member.IsRunning() {
+ select {
+ case <-ctx.Done():
+ return reports, ctx.Err()
+ default:
+ }
+ lg.Info("Setting up gofailpoint", zap.String("failpoint", f.Name()))
+ err = member.Failpoints().SetupHTTP(ctx, f.failpoint, "panic")
+ if err != nil {
+ lg.Info("goFailpoint setup failed", zap.String("failpoint", f.Name()), zap.Error(err))
+ continue
+ }
+ break
+ }
+
+ if f.trigger != nil {
+ for member.IsRunning() {
+ select {
+ case <-ctx.Done():
+ return reports, ctx.Err()
+ default:
+ }
+ var r []report.ClientReport
+ lg.Info("Triggering gofailpoint", zap.String("failpoint", f.Name()))
+ r, err = f.trigger.Trigger(ctx, t, member, clus, baseTime, ids)
+ if err != nil {
+ lg.Info("gofailpoint trigger failed", zap.String("failpoint", f.Name()), zap.Error(err))
+ continue
+ }
+ if r != nil {
+ reports = append(reports, r...)
+ }
+ break
+ }
+ }
+
+ lg.Info("Waiting for member to exit", zap.String("member", member.Config().Name))
+ err = member.Wait(ctx)
+ if err != nil && !strings.Contains(err.Error(), "unexpected exit code") {
+ lg.Info("Member didn't exit as expected", zap.String("member", member.Config().Name), zap.Error(err))
+ return reports, fmt.Errorf("member didn't exit as expected: %w", err)
+ }
+ lg.Info("Member exited as expected", zap.String("member", member.Config().Name))
+
+ if lazyfs := member.LazyFS(); lazyfs != nil {
+ lg.Info("Removing data that was not fsynced")
+ err := lazyfs.ClearCache(ctx)
+ if err != nil {
+ return reports, err
+ }
+ }
+
+ return reports, member.Start(ctx)
+}
+
+func (f goPanicFailpoint) pickMember(t *testing.T, clus *e2e.EtcdProcessCluster) e2e.EtcdProcess {
+ switch f.target {
+ case AnyMember:
+ return clus.Procs[rand.Int()%len(clus.Procs)]
+ case Leader:
+ return clus.Procs[clus.WaitLeader(t)]
+ case Follower:
+ return clus.Procs[(clus.WaitLeader(t)+1)%len(clus.Procs)]
+ default:
+ panic("unknown target")
+ }
+}
+
+func (f goPanicFailpoint) Available(config e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, profile traffic.Profile) bool {
+ if f.target == Follower && config.ClusterSize == 1 {
+ return false
+ }
+ if f.trigger != nil && !f.trigger.Available(config, member, profile) {
+ return false
+ }
+ memberFailpoints := member.Failpoints()
+ if memberFailpoints == nil {
+ return false
+ }
+ return memberFailpoints.Available(f.failpoint)
+}
+
+func (f goPanicFailpoint) Name() string {
+ return fmt.Sprintf("%s=panic", f.failpoint)
+}
+
+type killAndGofailSleep struct {
+ failpoint string
+ time time.Duration
+}
+
+func (f killAndGofailSleep) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ member := clus.Procs[rand.Int()%len(clus.Procs)]
+ for member.IsRunning() {
+ err := member.Kill()
+ if err != nil {
+ lg.Info("Sending kill signal failed", zap.Error(err))
+ }
+ err = member.Wait(ctx)
+ if err != nil && !strings.Contains(err.Error(), "unexpected exit code") {
+ lg.Info("Failed to kill the process", zap.Error(err))
+ return nil, fmt.Errorf("failed to kill the process within %s, err: %w", triggerTimeout, err)
+ }
+ }
+ lg.Info("Setting up goFailpoint", zap.String("failpoint", f.Name()))
+ err := member.Failpoints().SetupEnv(f.failpoint, fmt.Sprintf(`sleep(%q)`, f.time))
+ if err != nil {
+ return nil, err
+ }
+ err = member.Start(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // TODO: Check gofail status (https://github.com/etcd-io/gofail/pull/47) and wait for sleep to beis executed at least once.
+ return nil, nil
+}
+
+func (f killAndGofailSleep) Name() string {
+ return fmt.Sprintf("%s=sleep", f.failpoint)
+}
+
+func (f killAndGofailSleep) Available(config e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, profile traffic.Profile) bool {
+ if config.ClusterSize == 1 {
+ return false
+ }
+ memberFailpoints := member.Failpoints()
+ if memberFailpoints == nil {
+ return false
+ }
+ return memberFailpoints.Available(f.failpoint)
+}
+
+type gofailSleepAndDeactivate struct {
+ failpoint string
+ time time.Duration
+}
+
+func (f gofailSleepAndDeactivate) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ member := clus.Procs[rand.Int()%len(clus.Procs)]
+ lg.Info("Setting up gofailpoint", zap.String("failpoint", f.Name()))
+ err := member.Failpoints().SetupHTTP(ctx, f.failpoint, fmt.Sprintf(`sleep(%q)`, f.time))
+ if err != nil {
+ lg.Info("goFailpoint setup failed", zap.String("failpoint", f.Name()), zap.Error(err))
+ return nil, fmt.Errorf("goFailpoint %s setup failed, err:%w", f.Name(), err)
+ }
+ time.Sleep(f.time)
+ lg.Info("Deactivating gofailpoint", zap.String("failpoint", f.Name()))
+ err = member.Failpoints().DeactivateHTTP(ctx, f.failpoint)
+ if err != nil {
+ lg.Info("goFailpoint deactivate failed", zap.String("failpoint", f.Name()), zap.Error(err))
+ return nil, fmt.Errorf("goFailpoint %s deactivate failed, err: %w", f.Name(), err)
+ }
+ return nil, nil
+}
+
+func (f gofailSleepAndDeactivate) Name() string {
+ return fmt.Sprintf("%s=sleep", f.failpoint)
+}
+
+func (f gofailSleepAndDeactivate) Available(config e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, profile traffic.Profile) bool {
+ memberFailpoints := member.Failpoints()
+ if memberFailpoints == nil {
+ return false
+ }
+ return memberFailpoints.Available(f.failpoint)
+}
diff --git a/tests/robustness/failpoint/kill.go b/tests/robustness/failpoint/kill.go
new file mode 100644
index 00000000000..c905019cbf7
--- /dev/null
+++ b/tests/robustness/failpoint/kill.go
@@ -0,0 +1,71 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package failpoint
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "strings"
+ "testing"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+)
+
+var KillFailpoint Failpoint = killFailpoint{}
+
+type killFailpoint struct{}
+
+func (f killFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ member := clus.Procs[rand.Int()%len(clus.Procs)]
+
+ for member.IsRunning() {
+ err := member.Kill()
+ if err != nil {
+ lg.Info("Sending kill signal failed", zap.Error(err))
+ }
+ err = member.Wait(ctx)
+ if err != nil && !strings.Contains(err.Error(), "unexpected exit code") {
+ lg.Info("Failed to kill the process", zap.Error(err))
+ return nil, fmt.Errorf("failed to kill the process within %s, err: %w", triggerTimeout, err)
+ }
+ }
+ if lazyfs := member.LazyFS(); lazyfs != nil {
+ lg.Info("Removing data that was not fsynced")
+ err := lazyfs.ClearCache(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ err := member.Start(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (f killFailpoint) Name() string {
+ return "Kill"
+}
+
+func (f killFailpoint) Available(e2e.EtcdProcessClusterConfig, e2e.EtcdProcess, traffic.Profile) bool {
+ return true
+}
diff --git a/tests/robustness/failpoint/network.go b/tests/robustness/failpoint/network.go
new file mode 100644
index 00000000000..27504c396b9
--- /dev/null
+++ b/tests/robustness/failpoint/network.go
@@ -0,0 +1,219 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package failpoint
+
+import (
+ "context"
+ "math/rand"
+ "testing"
+ "time"
+
+ "go.uber.org/zap"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+)
+
+var (
+ BlackholePeerNetwork Failpoint = blackholePeerNetworkFailpoint{triggerBlackhole{waitTillSnapshot: false}}
+ BlackholeUntilSnapshot Failpoint = blackholePeerNetworkFailpoint{triggerBlackhole{waitTillSnapshot: true}}
+ DelayPeerNetwork Failpoint = delayPeerNetworkFailpoint{duration: time.Second, baseLatency: 75 * time.Millisecond, randomizedLatency: 50 * time.Millisecond}
+ DropPeerNetwork Failpoint = dropPeerNetworkFailpoint{duration: time.Second, dropProbabilityPercent: 50}
+)
+
+type blackholePeerNetworkFailpoint struct {
+ triggerBlackhole
+}
+
+func (f blackholePeerNetworkFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ member := clus.Procs[rand.Int()%len(clus.Procs)]
+ return f.Trigger(ctx, t, member, clus, baseTime, ids)
+}
+
+func (f blackholePeerNetworkFailpoint) Name() string {
+ return "blackholePeerNetwork"
+}
+
+type triggerBlackhole struct {
+ waitTillSnapshot bool
+}
+
+func (tb triggerBlackhole) Trigger(ctx context.Context, t *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ return nil, Blackhole(ctx, t, member, clus, tb.waitTillSnapshot)
+}
+
+func (tb triggerBlackhole) Available(config e2e.EtcdProcessClusterConfig, process e2e.EtcdProcess, profile traffic.Profile) bool {
+ // Avoid triggering failpoint if waiting for failpoint would take too long to fit into timeout.
+ // Number of required entries for snapshot depends on etcd configuration.
+ if tb.waitTillSnapshot && (entriesToGuaranteeSnapshot(config) > 200 || !e2e.CouldSetSnapshotCatchupEntries(process.Config().ExecPath)) {
+ return false
+ }
+ return config.ClusterSize > 1 && process.PeerProxy() != nil
+}
+
+func Blackhole(ctx context.Context, t *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster, shouldWaitTillSnapshot bool) error {
+ proxy := member.PeerProxy()
+
+ // Blackholing will cause peers to not be able to use streamWriters registered with member
+ // but peer traffic is still possible because member has 'pipeline' with peers
+ // TODO: find a way to stop all traffic
+ t.Logf("Blackholing traffic from and to member %q", member.Config().Name)
+ proxy.BlackholeTx()
+ proxy.BlackholeRx()
+ defer func() {
+ t.Logf("Traffic restored from and to member %q", member.Config().Name)
+ proxy.UnblackholeTx()
+ proxy.UnblackholeRx()
+ }()
+ if shouldWaitTillSnapshot {
+ return waitTillSnapshot(ctx, t, clus, member)
+ }
+ time.Sleep(time.Second)
+ return nil
+}
+
+func waitTillSnapshot(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, blackholedMember e2e.EtcdProcess) error {
+ var endpoints []string
+ for _, ep := range clus.EndpointsGRPC() {
+ if ep == blackholedMember.Config().ClientURL {
+ continue
+ }
+ endpoints = append(endpoints, ep)
+ }
+ clusterClient, err := clientv3.New(clientv3.Config{
+ Endpoints: endpoints,
+ Logger: zap.NewNop(),
+ DialKeepAliveTime: 10 * time.Second,
+ DialKeepAliveTimeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return err
+ }
+ defer clusterClient.Close()
+
+ blackholedMemberClient, err := clientv3.New(clientv3.Config{
+ Endpoints: []string{blackholedMember.Config().ClientURL},
+ Logger: zap.NewNop(),
+ DialKeepAliveTime: 10 * time.Second,
+ DialKeepAliveTimeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return err
+ }
+ defer blackholedMemberClient.Close()
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ // Have to refresh blackholedMemberRevision. It can still increase as blackholedMember processes changes that are received but not yet applied.
+ blackholedMemberRevision, err := latestRevisionForEndpoint(ctx, blackholedMemberClient)
+ if err != nil {
+ return err
+ }
+ clusterRevision, err := latestRevisionForEndpoint(ctx, clusterClient)
+ if err != nil {
+ return err
+ }
+ t.Logf("clusterRevision: %d, blackholedMemberRevision: %d", clusterRevision, blackholedMemberRevision)
+ // Blackholed member has to be sufficiently behind to trigger snapshot transfer.
+ if clusterRevision-blackholedMemberRevision > int64(entriesToGuaranteeSnapshot(*clus.Cfg)) {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ return nil
+}
+
+func entriesToGuaranteeSnapshot(config e2e.EtcdProcessClusterConfig) uint64 {
+ // Need to make sure leader compacted latest revBlackholedMem inside EtcdServer.snapshot.
+ // That's why we wait for clus.Cfg.SnapshotCount (to trigger snapshot) + clus.Cfg.SnapshotCatchUpEntries (EtcdServer.snapshot compaction offset)
+ return config.ServerConfig.SnapshotCount + config.ServerConfig.SnapshotCatchUpEntries
+}
+
+// latestRevisionForEndpoint gets latest revision of the first endpoint in Client.Endpoints list
+func latestRevisionForEndpoint(ctx context.Context, c *clientv3.Client) (int64, error) {
+ resp, err := c.Status(ctx, c.Endpoints()[0])
+ if err != nil {
+ return 0, err
+ }
+ return resp.Header.Revision, err
+}
+
+type delayPeerNetworkFailpoint struct {
+ duration time.Duration
+ baseLatency time.Duration
+ randomizedLatency time.Duration
+}
+
+func (f delayPeerNetworkFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ member := clus.Procs[rand.Int()%len(clus.Procs)]
+ proxy := member.PeerProxy()
+
+ proxy.DelayRx(f.baseLatency, f.randomizedLatency)
+ proxy.DelayTx(f.baseLatency, f.randomizedLatency)
+ lg.Info("Delaying traffic from and to member", zap.String("member", member.Config().Name), zap.Duration("baseLatency", f.baseLatency), zap.Duration("randomizedLatency", f.randomizedLatency))
+ time.Sleep(f.duration)
+ lg.Info("Traffic delay removed", zap.String("member", member.Config().Name))
+ proxy.UndelayRx()
+ proxy.UndelayTx()
+ return nil, nil
+}
+
+func (f delayPeerNetworkFailpoint) Name() string {
+ return "delayPeerNetwork"
+}
+
+func (f delayPeerNetworkFailpoint) Available(config e2e.EtcdProcessClusterConfig, clus e2e.EtcdProcess, profile traffic.Profile) bool {
+ return config.ClusterSize > 1 && clus.PeerProxy() != nil
+}
+
+type dropPeerNetworkFailpoint struct {
+ duration time.Duration
+ dropProbabilityPercent int
+}
+
+func (f dropPeerNetworkFailpoint) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ member := clus.Procs[rand.Int()%len(clus.Procs)]
+ proxy := member.PeerProxy()
+
+ proxy.ModifyRx(f.modifyPacket)
+ proxy.ModifyTx(f.modifyPacket)
+ lg.Info("Dropping traffic from and to member", zap.String("member", member.Config().Name), zap.Int("probability", f.dropProbabilityPercent))
+ time.Sleep(f.duration)
+ lg.Info("Traffic drop removed", zap.String("member", member.Config().Name))
+ proxy.UnmodifyRx()
+ proxy.UnmodifyTx()
+ return nil, nil
+}
+
+func (f dropPeerNetworkFailpoint) modifyPacket(data []byte) []byte {
+ if rand.Intn(100) < f.dropProbabilityPercent {
+ return nil
+ }
+ return data
+}
+
+func (f dropPeerNetworkFailpoint) Name() string {
+ return "dropPeerNetwork"
+}
+
+func (f dropPeerNetworkFailpoint) Available(config e2e.EtcdProcessClusterConfig, clus e2e.EtcdProcess, profile traffic.Profile) bool {
+ return config.ClusterSize > 1 && clus.PeerProxy() != nil
+}
diff --git a/tests/robustness/failpoint/trigger.go b/tests/robustness/failpoint/trigger.go
new file mode 100644
index 00000000000..55ef0614ea6
--- /dev/null
+++ b/tests/robustness/failpoint/trigger.go
@@ -0,0 +1,106 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package failpoint
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/client"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+)
+
+type trigger interface {
+ Trigger(ctx context.Context, t *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error)
+ AvailabilityChecker
+}
+
+type triggerDefrag struct{}
+
+func (t triggerDefrag) Trigger(ctx context.Context, _ *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ cc, err := client.NewRecordingClient(member.EndpointsGRPC(), ids, baseTime)
+ if err != nil {
+ return nil, fmt.Errorf("failed creating client: %w", err)
+ }
+ defer cc.Close()
+ _, err = cc.Defragment(ctx)
+ if err != nil && !connectionError(err) {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (t triggerDefrag) Available(e2e.EtcdProcessClusterConfig, e2e.EtcdProcess, traffic.Profile) bool {
+ return true
+}
+
+type triggerCompact struct {
+ multiBatchCompaction bool
+}
+
+func (t triggerCompact) Trigger(ctx context.Context, _ *testing.T, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Second)
+ defer cancel()
+ cc, err := client.NewRecordingClient(member.EndpointsGRPC(), ids, baseTime)
+ if err != nil {
+ return nil, fmt.Errorf("failed creating client: %w", err)
+ }
+ defer cc.Close()
+
+ var rev int64
+ for {
+ _, rev, err = cc.Get(ctx, "/", 0)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get revision: %w", err)
+ }
+
+ if !t.multiBatchCompaction || rev > int64(clus.Cfg.ServerConfig.ExperimentalCompactionBatchLimit) {
+ break
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+ _, err = cc.Compact(ctx, rev)
+ if err != nil && !connectionError(err) {
+ return nil, fmt.Errorf("failed to compact: %w", err)
+ }
+ return []report.ClientReport{cc.Report()}, nil
+}
+
+func (t triggerCompact) Available(config e2e.EtcdProcessClusterConfig, _ e2e.EtcdProcess, profile traffic.Profile) bool {
+ if profile.ForbidCompaction {
+ return false
+ }
+ // Since introduction of compaction into traffic, injecting compaction failpoints started interfeering with peer proxy.
+ // TODO: Re-enable the peer proxy for compact failpoints when we confirm the root cause.
+ if config.PeerProxy {
+ return false
+ }
+ // For multiBatchCompaction we need to guarantee that there are enough revisions between two compaction requests.
+ // With addition of compaction requests to traffic this might be hard if experimental-compaction-batch-limit is too high.
+ if t.multiBatchCompaction {
+ return config.ServerConfig.ExperimentalCompactionBatchLimit <= 10
+ }
+ return true
+}
+
+func connectionError(err error) bool {
+ return strings.Contains(err.Error(), "error reading from server: EOF") || strings.HasSuffix(err.Error(), "read: connection reset by peer")
+}
diff --git a/tests/robustness/identity/id.go b/tests/robustness/identity/id.go
new file mode 100644
index 00000000000..14954485391
--- /dev/null
+++ b/tests/robustness/identity/id.go
@@ -0,0 +1,48 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package identity
+
+import "sync/atomic"
+
+type Provider interface {
+ // NewStreamID returns an integer starting from zero to make it render nicely by porcupine visualization.
+ NewStreamID() int
+ // NewRequestID returns unique identification used to make write requests unique.
+ NewRequestID() int
+ // NewClientID returns unique identification for client and their reports.
+ NewClientID() int
+}
+
+func NewIDProvider() Provider {
+ return &atomicProvider{}
+}
+
+type atomicProvider struct {
+ streamID atomic.Int64
+ requestID atomic.Int64
+ clientID atomic.Int64
+}
+
+func (id *atomicProvider) NewStreamID() int {
+ return int(id.streamID.Add(1) - 1)
+}
+
+func (id *atomicProvider) NewRequestID() int {
+ return int(id.requestID.Add(1))
+}
+
+func (id *atomicProvider) NewClientID() int {
+ return int(id.clientID.Add(1))
+}
diff --git a/tests/robustness/identity/lease_ids.go b/tests/robustness/identity/lease_ids.go
new file mode 100644
index 00000000000..abce05b0a09
--- /dev/null
+++ b/tests/robustness/identity/lease_ids.go
@@ -0,0 +1,53 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package identity
+
+import (
+ "sync"
+)
+
+type LeaseIDStorage interface {
+ LeaseID(int) int64
+ AddLeaseID(int, int64)
+ RemoveLeaseID(int)
+}
+
+func NewLeaseIDStorage() LeaseIDStorage {
+ return &atomicClientID2LeaseIDMapper{m: map[int]int64{}}
+}
+
+type atomicClientID2LeaseIDMapper struct {
+ sync.RWMutex
+ // m is used to store clientId to leaseId mapping.
+ m map[int]int64
+}
+
+func (lm *atomicClientID2LeaseIDMapper) LeaseID(clientID int) int64 {
+ lm.RLock()
+ defer lm.RUnlock()
+ return lm.m[clientID]
+}
+
+func (lm *atomicClientID2LeaseIDMapper) AddLeaseID(clientID int, leaseID int64) {
+ lm.Lock()
+ defer lm.Unlock()
+ lm.m[clientID] = leaseID
+}
+
+func (lm *atomicClientID2LeaseIDMapper) RemoveLeaseID(clientID int) {
+ lm.Lock()
+ defer lm.Unlock()
+ delete(lm.m, clientID)
+}
diff --git a/tests/robustness/main_test.go b/tests/robustness/main_test.go
new file mode 100644
index 00000000000..32e03283820
--- /dev/null
+++ b/tests/robustness/main_test.go
@@ -0,0 +1,179 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package robustness
+
+import (
+ "context"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+ "golang.org/x/sync/errgroup"
+
+ "go.etcd.io/etcd/tests/v3/framework"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/client"
+ "go.etcd.io/etcd/tests/v3/robustness/failpoint"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+ "go.etcd.io/etcd/tests/v3/robustness/scenarios"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+ "go.etcd.io/etcd/tests/v3/robustness/validate"
+)
+
+var testRunner = framework.E2eTestRunner
+
+var (
+ WaitBeforeFailpoint = time.Second
+ WaitJitter = traffic.CompactionPeriod
+ WaitAfterFailpoint = time.Second
+)
+
+func TestMain(m *testing.M) {
+ rand.Seed(time.Now().UnixNano())
+ testRunner.TestMain(m)
+}
+
+func TestRobustnessExploratory(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, s := range scenarios.Exploratory(t) {
+ t.Run(s.Name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ s.Cluster.Logger = lg
+ ctx := context.Background()
+ c, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&s.Cluster))
+ require.NoError(t, err)
+ defer forcestopCluster(c)
+ s.Failpoint, err = failpoint.PickRandom(c, s.Profile)
+ require.NoError(t, err)
+ t.Run(s.Failpoint.Name(), func(t *testing.T) {
+ testRobustness(ctx, t, lg, s, c)
+ })
+ })
+ }
+}
+
+func TestRobustnessRegression(t *testing.T) {
+ testRunner.BeforeTest(t)
+ for _, s := range scenarios.Regression(t) {
+ t.Run(s.Name, func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ s.Cluster.Logger = lg
+ ctx := context.Background()
+ c, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&s.Cluster))
+ require.NoError(t, err)
+ defer forcestopCluster(c)
+ testRobustness(ctx, t, lg, s, c)
+ })
+ }
+}
+
+func testRobustness(ctx context.Context, t *testing.T, lg *zap.Logger, s scenarios.TestScenario, c *e2e.EtcdProcessCluster) {
+ r := report.TestReport{Logger: lg, Cluster: c}
+ // t.Failed() returns false during panicking. We need to forcibly
+ // save data on panicking.
+ // Refer to: https://github.com/golang/go/issues/49929
+ panicked := true
+ defer func() {
+ r.Report(t, panicked)
+ }()
+ r.Client = runScenario(ctx, t, s, lg, c)
+ persistedRequests, err := report.PersistedRequestsCluster(lg, c)
+ require.NoError(t, err)
+
+ failpointImpactingWatch := s.Failpoint == failpoint.SleepBeforeSendWatchResponse
+ if !failpointImpactingWatch {
+ watchProgressNotifyEnabled := c.Cfg.ServerConfig.ExperimentalWatchProgressNotifyInterval != 0
+ client.ValidateGotAtLeastOneProgressNotify(t, r.Client, s.Watch.RequestProgress || watchProgressNotifyEnabled)
+ }
+ validateConfig := validate.Config{ExpectRevisionUnique: s.Traffic.ExpectUniqueRevision()}
+ r.Visualize = validate.ValidateAndReturnVisualize(t, lg, validateConfig, r.Client, persistedRequests, 5*time.Minute)
+
+ panicked = false
+}
+
+func runScenario(ctx context.Context, t *testing.T, s scenarios.TestScenario, lg *zap.Logger, clus *e2e.EtcdProcessCluster) (reports []report.ClientReport) {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ g := errgroup.Group{}
+ var operationReport, watchReport, failpointClientReport []report.ClientReport
+ failpointInjected := make(chan report.FailpointInjection, 1)
+
+ // using baseTime time-measuring operation to get monotonic clock reading
+ // see https://github.com/golang/go/blob/master/src/time/time.go#L17
+ baseTime := time.Now()
+ ids := identity.NewIDProvider()
+ g.Go(func() error {
+ defer close(failpointInjected)
+ // Give some time for traffic to reach qps target before injecting failpoint.
+ time.Sleep(randomizeTime(WaitBeforeFailpoint, WaitJitter))
+ fr, err := failpoint.Inject(ctx, t, lg, clus, s.Failpoint, baseTime, ids)
+ if err != nil {
+ t.Error(err)
+ cancel()
+ }
+ // Give some time for traffic to reach qps target after injecting failpoint.
+ time.Sleep(randomizeTime(WaitAfterFailpoint, WaitJitter))
+ if fr != nil {
+ failpointInjected <- fr.FailpointInjection
+ failpointClientReport = fr.Client
+ }
+ return nil
+ })
+ maxRevisionChan := make(chan int64, 1)
+ g.Go(func() error {
+ defer close(maxRevisionChan)
+ operationReport = traffic.SimulateTraffic(ctx, t, lg, clus, s.Profile, s.Traffic, failpointInjected, baseTime, ids)
+ maxRevision := operationsMaxRevision(operationReport)
+ maxRevisionChan <- maxRevision
+ lg.Info("Finished simulating Traffic", zap.Int64("max-revision", maxRevision))
+ return nil
+ })
+ g.Go(func() error {
+ watchReport = client.CollectClusterWatchEvents(ctx, t, clus, maxRevisionChan, s.Watch, baseTime, ids)
+ return nil
+ })
+ g.Wait()
+ return append(operationReport, append(failpointClientReport, watchReport...)...)
+}
+
+func randomizeTime(base time.Duration, jitter time.Duration) time.Duration {
+ return base - jitter + time.Duration(rand.Int63n(int64(jitter)*2))
+}
+
+func operationsMaxRevision(reports []report.ClientReport) int64 {
+ var maxRevision int64
+ for _, r := range reports {
+ for _, op := range r.KeyValue {
+ resp := op.Output.(model.MaybeEtcdResponse)
+ if resp.Revision > maxRevision {
+ maxRevision = resp.Revision
+ }
+ }
+ }
+ return maxRevision
+}
+
+// forcestopCluster stops the etcd member with signal kill.
+func forcestopCluster(clus *e2e.EtcdProcessCluster) error {
+ for _, member := range clus.Procs {
+ member.Kill()
+ }
+ return clus.ConcurrentStop()
+}
diff --git a/tests/robustness/makefile.mk b/tests/robustness/makefile.mk
new file mode 100644
index 00000000000..56108976c7e
--- /dev/null
+++ b/tests/robustness/makefile.mk
@@ -0,0 +1,167 @@
+.PHONY: test-robustness-reports
+test-robustness-reports: export GOTOOLCHAIN := go$(shell cat .go-version)
+test-robustness-reports:
+ cd ./tests && go test ./robustness/validate -v --count 1 --run TestDataReports
+
+# Test main and previous release branches
+
+.PHONY: test-robustness-main
+test-robustness-main: /tmp/etcd-main-failpoints/bin /tmp/etcd-release-3.5-failpoints/bin
+ GO_TEST_FLAGS="$${GO_TEST_FLAGS} --bin-dir=/tmp/etcd-main-failpoints/bin --bin-last-release=/tmp/etcd-release-3.5-failpoints/bin/etcd" $(MAKE) test-robustness
+
+.PHONY: test-robustness-release-3.5
+test-robustness-release-3.5: /tmp/etcd-release-3.5-failpoints/bin /tmp/etcd-release-3.4-failpoints/bin
+ GO_TEST_FLAGS="$${GO_TEST_FLAGS} --bin-dir=/tmp/etcd-release-3.5-failpoints/bin --bin-last-release=/tmp/etcd-release-3.4-failpoints/bin/etcd" $(MAKE) test-robustness
+
+.PHONY: test-robustness-release-3.4
+test-robustness-release-3.4: /tmp/etcd-release-3.4-failpoints/bin
+ GO_TEST_FLAGS="$${GO_TEST_FLAGS} --bin-dir=/tmp/etcd-release-3.4-failpoints/bin" $(MAKE) test-robustness
+
+# Reproduce historical issues
+
+.PHONY: test-robustness-issue14370
+test-robustness-issue14370: /tmp/etcd-v3.5.4-failpoints/bin
+ GO_TEST_FLAGS='-v --run=TestRobustnessRegression/Issue14370 --count 100 --failfast --bin-dir=/tmp/etcd-v3.5.4-failpoints/bin' $(MAKE) test-robustness && \
+ echo "Failed to reproduce" || echo "Successful reproduction"
+
+.PHONY: test-robustness-issue13766
+test-robustness-issue13766: /tmp/etcd-v3.5.2-failpoints/bin
+ GO_TEST_FLAGS='-v --run=TestRobustnessRegression/Issue13766 --count 100 --failfast --bin-dir=/tmp/etcd-v3.5.2-failpoints/bin' $(MAKE) test-robustness && \
+ echo "Failed to reproduce" || echo "Successful reproduction"
+
+.PHONY: test-robustness-issue14685
+test-robustness-issue14685: /tmp/etcd-v3.5.5-failpoints/bin
+ GO_TEST_FLAGS='-v --run=TestRobustnessRegression/Issue14685 --count 100 --failfast --bin-dir=/tmp/etcd-v3.5.5-failpoints/bin' $(MAKE) test-robustness && \
+ echo "Failed to reproduce" || echo "Successful reproduction"
+
+.PHONY: test-robustness-issue15271
+test-robustness-issue15271: /tmp/etcd-v3.5.7-failpoints/bin
+ GO_TEST_FLAGS='-v --run=TestRobustnessRegression/Issue15271 --count 100 --failfast --bin-dir=/tmp/etcd-v3.5.7-failpoints/bin' $(MAKE) test-robustness && \
+ echo "Failed to reproduce" || echo "Successful reproduction"
+
+.PHONY: test-robustness-issue17529
+test-robustness-issue17529: /tmp/etcd-v3.5.12-beforeSendWatchResponse/bin
+ GO_TEST_FLAGS='-v --run=TestRobustnessRegression/Issue17529 --count 100 --failfast --bin-dir=/tmp/etcd-v3.5.12-beforeSendWatchResponse/bin' $(MAKE) test-robustness && \
+ echo "Failed to reproduce" || echo "Successful reproduction"
+
+.PHONY: test-robustness-issue17780
+test-robustness-issue17780: /tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/bin
+ GO_TEST_FLAGS='-v --run=TestRobustnessRegression/Issue17780 --count 200 --failfast --bin-dir=/tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/bin' make test-robustness && \
+ echo "Failed to reproduce" || echo "Successful reproduction"
+
+# Failpoints
+
+GOPATH = $(shell go env GOPATH)
+GOFAIL_VERSION = $(shell cd tools/mod && go list -m -f {{.Version}} go.etcd.io/gofail)
+
+.PHONY:install-gofail
+install-gofail: $(GOPATH)/bin/gofail
+
+.PHONY: gofail-enable
+gofail-enable: $(GOPATH)/bin/gofail
+ $(GOPATH)/bin/gofail enable server/etcdserver/ server/lease/leasehttp server/storage/backend/ server/storage/mvcc/ server/storage/wal/ server/etcdserver/api/v3rpc/ server/etcdserver/api/membership/
+ cd ./server && go get go.etcd.io/gofail@${GOFAIL_VERSION}
+ cd ./etcdutl && go get go.etcd.io/gofail@${GOFAIL_VERSION}
+ cd ./etcdctl && go get go.etcd.io/gofail@${GOFAIL_VERSION}
+ cd ./tests && go get go.etcd.io/gofail@${GOFAIL_VERSION}
+
+.PHONY: gofail-disable
+gofail-disable: $(GOPATH)/bin/gofail
+ $(GOPATH)/bin/gofail disable server/etcdserver/ server/lease/leasehttp server/storage/backend/ server/storage/mvcc/ server/storage/wal/ server/etcdserver/api/v3rpc/ server/etcdserver/api/membership/
+ cd ./server && go mod tidy
+ cd ./etcdutl && go mod tidy
+ cd ./etcdctl && go mod tidy
+ cd ./tests && go mod tidy
+
+$(GOPATH)/bin/gofail: tools/mod/go.mod tools/mod/go.sum
+ go install go.etcd.io/gofail@${GOFAIL_VERSION}
+
+# Build main and previous releases for robustness tests
+
+/tmp/etcd-main-failpoints/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-main-failpoints/
+ mkdir -p /tmp/etcd-main-failpoints/
+ cd /tmp/etcd-main-failpoints/; \
+ git clone --depth 1 --branch main https://github.com/etcd-io/etcd.git .; \
+ $(MAKE) gofail-enable; \
+ $(MAKE) build;
+
+/tmp/etcd-v3.6.0-failpoints/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-v3.6.0-failpoints/
+ mkdir -p /tmp/etcd-v3.6.0-failpoints/
+ cd /tmp/etcd-v3.6.0-failpoints/; \
+ git clone --depth 1 --branch main https://github.com/etcd-io/etcd.git .; \
+ $(MAKE) gofail-enable; \
+ $(MAKE) build;
+
+/tmp/etcd-v3.5.2-failpoints/bin:
+/tmp/etcd-v3.5.4-failpoints/bin:
+/tmp/etcd-v3.5.5-failpoints/bin:
+/tmp/etcd-v3.5.%-failpoints/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-v3.5.$*-failpoints/
+ mkdir -p /tmp/etcd-v3.5.$*-failpoints/
+ cd /tmp/etcd-v3.5.$*-failpoints/; \
+ git clone --depth 1 --branch v3.5.$* https://github.com/etcd-io/etcd.git .; \
+ go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
+ (cd server; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd etcdctl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd etcdutl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd tools/mod; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ FAILPOINTS=true ./build;
+
+/tmp/etcd-v3.5.12-beforeSendWatchResponse/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-v3.5.12-beforeSendWatchResponse/
+ mkdir -p /tmp/etcd-v3.5.12-beforeSendWatchResponse/
+ git clone --depth 1 --branch v3.5.12 https://github.com/etcd-io/etcd.git /tmp/etcd-v3.5.12-beforeSendWatchResponse/
+ cp -r ./tests/robustness/patches/beforeSendWatchResponse /tmp/etcd-v3.5.12-beforeSendWatchResponse/
+ cd /tmp/etcd-v3.5.12-beforeSendWatchResponse/; \
+ patch -l server/etcdserver/api/v3rpc/watch.go ./beforeSendWatchResponse/watch.patch; \
+ patch -l build.sh ./beforeSendWatchResponse/build.patch; \
+ go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
+ (cd server; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd etcdctl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd etcdutl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd tools/mod; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ FAILPOINTS=true ./build;
+
+/tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/
+ mkdir -p /tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/
+ git clone --depth 1 --branch v3.5.13 https://github.com/etcd-io/etcd.git /tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/
+ cp -r ./tests/robustness/patches/compactBeforeSetFinishedCompact /tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/
+ cd /tmp/etcd-v3.5.13-compactBeforeSetFinishedCompact/; \
+ patch -l server/mvcc/kvstore_compaction.go ./compactBeforeSetFinishedCompact/kvstore_compaction.patch; \
+ go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
+ (cd server; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd etcdctl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd etcdutl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ (cd tools/mod; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ FAILPOINTS=true ./build;
+
+/tmp/etcd-release-3.5-failpoints/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-release-3.5-failpoints/
+ mkdir -p /tmp/etcd-release-3.5-failpoints/
+ cd /tmp/etcd-release-3.5-failpoints/; \
+ git clone --depth 1 --branch release-3.5 https://github.com/etcd-io/etcd.git .; \
+ go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
+ (cd tools/mod; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ FAILPOINTS=true ./build;
+
+/tmp/etcd-v3.4.23-failpoints/bin:
+/tmp/etcd-v3.4.%-failpoints/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-v3.4.$*-failpoints/
+ mkdir -p /tmp/etcd-v3.4.$*-failpoints/
+ cd /tmp/etcd-v3.4.$*-failpoints/; \
+ git clone --depth 1 --branch v3.4.$* https://github.com/etcd-io/etcd.git .; \
+ go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
+ (cd tools/mod; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ FAILPOINTS=true ./build;
+
+/tmp/etcd-release-3.4-failpoints/bin: $(GOPATH)/bin/gofail
+ rm -rf /tmp/etcd-release-3.4-failpoints/
+ mkdir -p /tmp/etcd-release-3.4-failpoints/
+ cd /tmp/etcd-release-3.4-failpoints/; \
+ git clone --depth 1 --branch release-3.4 https://github.com/etcd-io/etcd.git .; \
+ go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
+ (cd tools/mod; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
+ FAILPOINTS=true ./build;
diff --git a/tests/robustness/model/describe.go b/tests/robustness/model/describe.go
new file mode 100644
index 00000000000..71f2b15f26b
--- /dev/null
+++ b/tests/robustness/model/describe.go
@@ -0,0 +1,216 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strings"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+func describeEtcdResponse(request EtcdRequest, response MaybeEtcdResponse) string {
+ if response.Error != "" {
+ return fmt.Sprintf("err: %q", response.Error)
+ }
+ if response.ClientError != "" {
+ return fmt.Sprintf("err: %q", response.ClientError)
+ }
+ if response.Persisted {
+ if response.PersistedRevision != 0 {
+ return fmt.Sprintf("unknown, rev: %d", response.PersistedRevision)
+ }
+ return "unknown"
+ }
+ switch request.Type {
+ case Range:
+ return fmt.Sprintf("%s, rev: %d", describeRangeResponse(request.Range.RangeOptions, *response.Range), response.Revision)
+ case Txn:
+ return fmt.Sprintf("%s, rev: %d", describeTxnResponse(request.Txn, response.Txn), response.Revision)
+ case LeaseGrant, LeaseRevoke, Defragment:
+ if response.Revision == 0 {
+ return "ok"
+ }
+ return fmt.Sprintf("ok, rev: %d", response.Revision)
+ case Compact:
+ return "ok"
+ default:
+ return fmt.Sprintf("", request.Type)
+ }
+}
+
+func describeEtcdRequest(request EtcdRequest) string {
+ switch request.Type {
+ case Range:
+ return describeRangeRequest(request.Range.RangeOptions, request.Range.Revision)
+ case Txn:
+ guaranteedTxnDescription := describeGuaranteedTxn(request.Txn)
+ if guaranteedTxnDescription != "" {
+ return guaranteedTxnDescription
+ }
+ onSuccess := describeEtcdOperations(request.Txn.OperationsOnSuccess)
+ if len(request.Txn.Conditions) != 0 {
+ if len(request.Txn.OperationsOnFailure) == 0 {
+ return fmt.Sprintf("if(%s).then(%s)", describeEtcdConditions(request.Txn.Conditions), onSuccess)
+ }
+ onFailure := describeEtcdOperations(request.Txn.OperationsOnFailure)
+ return fmt.Sprintf("if(%s).then(%s).else(%s)", describeEtcdConditions(request.Txn.Conditions), onSuccess, onFailure)
+ }
+ return onSuccess
+ case LeaseGrant:
+ return fmt.Sprintf("leaseGrant(%d)", request.LeaseGrant.LeaseID)
+ case LeaseRevoke:
+ return fmt.Sprintf("leaseRevoke(%d)", request.LeaseRevoke.LeaseID)
+ case Defragment:
+ return "defragment()"
+ case Compact:
+ return fmt.Sprintf("compact(%d)", request.Compact.Revision)
+ default:
+ return fmt.Sprintf("", request.Type)
+ }
+}
+
+func describeGuaranteedTxn(txn *TxnRequest) string {
+ if len(txn.Conditions) != 1 || len(txn.OperationsOnSuccess) != 1 || len(txn.OperationsOnFailure) > 1 {
+ return ""
+ }
+ switch txn.OperationsOnSuccess[0].Type {
+ case PutOperation:
+ if txn.Conditions[0].Key != txn.OperationsOnSuccess[0].Put.Key || (len(txn.OperationsOnFailure) == 1 && txn.Conditions[0].Key != txn.OperationsOnFailure[0].Range.Start) {
+ return ""
+ }
+ if txn.Conditions[0].ExpectedRevision == 0 {
+ return fmt.Sprintf("guaranteedCreate(%q, %s)", txn.Conditions[0].Key, describeValueOrHash(txn.OperationsOnSuccess[0].Put.Value))
+ }
+ return fmt.Sprintf("guaranteedUpdate(%q, %s, mod_rev=%d)", txn.Conditions[0].Key, describeValueOrHash(txn.OperationsOnSuccess[0].Put.Value), txn.Conditions[0].ExpectedRevision)
+ case DeleteOperation:
+ if txn.Conditions[0].Key != txn.OperationsOnSuccess[0].Delete.Key || (len(txn.OperationsOnFailure) == 1 && txn.Conditions[0].Key != txn.OperationsOnFailure[0].Range.Start) {
+ return ""
+ }
+ return fmt.Sprintf("guaranteedDelete(%q, mod_rev=%d)", txn.Conditions[0].Key, txn.Conditions[0].ExpectedRevision)
+ }
+ return ""
+}
+
+func describeEtcdConditions(conds []EtcdCondition) string {
+ opsDescription := make([]string, len(conds))
+ for i := range conds {
+ opsDescription[i] = fmt.Sprintf("mod_rev(%s)==%d", conds[i].Key, conds[i].ExpectedRevision)
+ }
+ return strings.Join(opsDescription, " && ")
+}
+
+func describeEtcdOperations(ops []EtcdOperation) string {
+ opsDescription := make([]string, len(ops))
+ for i := range ops {
+ opsDescription[i] = describeEtcdOperation(ops[i])
+ }
+ return strings.Join(opsDescription, ", ")
+}
+
+func describeTxnResponse(request *TxnRequest, response *TxnResponse) string {
+ respDescription := make([]string, len(response.Results))
+ for i, result := range response.Results {
+ if response.Failure {
+ respDescription[i] = describeEtcdOperationResponse(request.OperationsOnFailure[i], result)
+ } else {
+ respDescription[i] = describeEtcdOperationResponse(request.OperationsOnSuccess[i], result)
+ }
+ }
+ description := strings.Join(respDescription, ", ")
+ if len(request.Conditions) == 0 {
+ return description
+ }
+ if response.Failure {
+ return fmt.Sprintf("failure(%s)", description)
+ }
+ return fmt.Sprintf("success(%s)", description)
+}
+
+func describeEtcdOperation(op EtcdOperation) string {
+ switch op.Type {
+ case RangeOperation:
+ return describeRangeRequest(op.Range, 0)
+ case PutOperation:
+ if op.Put.LeaseID != 0 {
+ return fmt.Sprintf("put(%q, %s, %d)", op.Put.Key, describeValueOrHash(op.Put.Value), op.Put.LeaseID)
+ }
+ return fmt.Sprintf("put(%q, %s)", op.Put.Key, describeValueOrHash(op.Put.Value))
+ case DeleteOperation:
+ return fmt.Sprintf("delete(%q)", op.Delete.Key)
+ default:
+ return fmt.Sprintf("", op.Type)
+ }
+}
+
+func describeRangeRequest(opts RangeOptions, revision int64) string {
+ kwargs := []string{}
+ if revision != 0 {
+ kwargs = append(kwargs, fmt.Sprintf("rev=%d", revision))
+ }
+ if opts.Limit != 0 {
+ kwargs = append(kwargs, fmt.Sprintf("limit=%d", opts.Limit))
+ }
+ kwargsString := strings.Join(kwargs, ", ")
+ if kwargsString != "" {
+ kwargsString = ", " + kwargsString
+ }
+ switch {
+ case opts.End == "":
+ return fmt.Sprintf("get(%q%s)", opts.Start, kwargsString)
+ case opts.End == clientv3.GetPrefixRangeEnd(opts.Start):
+ return fmt.Sprintf("list(%q%s)", opts.Start, kwargsString)
+ default:
+ return fmt.Sprintf("range(%q..%q%s)", opts.Start, opts.End, kwargsString)
+ }
+}
+
+func describeEtcdOperationResponse(op EtcdOperation, resp EtcdOperationResult) string {
+ switch op.Type {
+ case RangeOperation:
+ return describeRangeResponse(op.Range, resp.RangeResponse)
+ case PutOperation:
+ return "ok"
+ case DeleteOperation:
+ return fmt.Sprintf("deleted: %d", resp.Deleted)
+ default:
+ return fmt.Sprintf("", op.Type)
+ }
+}
+
+func describeRangeResponse(request RangeOptions, response RangeResponse) string {
+ if request.End != "" {
+ kvs := make([]string, len(response.KVs))
+ for i, kv := range response.KVs {
+ kvs[i] = describeValueOrHash(kv.Value)
+ }
+ return fmt.Sprintf("[%s], count: %d", strings.Join(kvs, ","), response.Count)
+ }
+
+ if len(response.KVs) == 0 {
+ return "nil"
+ }
+ return describeValueOrHash(response.KVs[0].Value)
+}
+
+func describeValueOrHash(value ValueOrHash) string {
+ if value.Hash != 0 {
+ return fmt.Sprintf("hash: %d", value.Hash)
+ }
+ if value.Value == "" {
+ return "nil"
+ }
+ return fmt.Sprintf("%q", value.Value)
+}
diff --git a/tests/robustness/model/describe_test.go b/tests/robustness/model/describe_test.go
new file mode 100644
index 00000000000..8ef2439ad8d
--- /dev/null
+++ b/tests/robustness/model/describe_test.go
@@ -0,0 +1,176 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "errors"
+ "testing"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestModelDescribe(t *testing.T) {
+ tcs := []struct {
+ req EtcdRequest
+ resp MaybeEtcdResponse
+ expectDescribe string
+ }{
+ {
+ req: getRequest("key1"),
+ resp: emptyGetResponse(1),
+ expectDescribe: `get("key1") -> nil, rev: 1`,
+ },
+ {
+ req: getRequest("key2"),
+ resp: getResponse("key", "2", 2, 2),
+ expectDescribe: `get("key2") -> "2", rev: 2`,
+ },
+ {
+ req: getRequest("key2b"),
+ resp: getResponse("key2b", "01234567890123456789", 2, 2),
+ expectDescribe: `get("key2b") -> hash: 2945867837, rev: 2`,
+ },
+ {
+ req: putRequest("key3", "3"),
+ resp: putResponse(3),
+ expectDescribe: `put("key3", "3") -> ok, rev: 3`,
+ },
+ {
+ req: putWithLeaseRequest("key3b", "3b", 3),
+ resp: putResponse(3),
+ expectDescribe: `put("key3b", "3b", 3) -> ok, rev: 3`,
+ },
+ {
+ req: putRequest("key3c", "01234567890123456789"),
+ resp: putResponse(3),
+ expectDescribe: `put("key3c", hash: 2945867837) -> ok, rev: 3`,
+ },
+ {
+ req: putRequest("key4", "4"),
+ resp: failedResponse(errors.New("failed")),
+ expectDescribe: `put("key4", "4") -> err: "failed"`,
+ },
+ {
+ req: putRequest("key4b", "4b"),
+ resp: partialResponse(42),
+ expectDescribe: `put("key4b", "4b") -> unknown, rev: 42`,
+ },
+ {
+ req: deleteRequest("key5"),
+ resp: deleteResponse(1, 5),
+ expectDescribe: `delete("key5") -> deleted: 1, rev: 5`,
+ },
+ {
+ req: deleteRequest("key6"),
+ resp: failedResponse(errors.New("failed")),
+ expectDescribe: `delete("key6") -> err: "failed"`,
+ },
+ {
+ req: compareRevisionAndPutRequest("key7", 7, "77"),
+ resp: txnEmptyResponse(false, 7),
+ expectDescribe: `guaranteedUpdate("key7", "77", mod_rev=7) -> failure(), rev: 7`,
+ },
+ {
+ req: compareRevisionAndPutRequest("key8", 8, "88"),
+ resp: txnPutResponse(true, 8),
+ expectDescribe: `guaranteedUpdate("key8", "88", mod_rev=8) -> success(ok), rev: 8`,
+ },
+ {
+ req: compareRevisionAndPutRequest("key8", 0, "89"),
+ resp: txnPutResponse(true, 8),
+ expectDescribe: `guaranteedCreate("key8", "89") -> success(ok), rev: 8`,
+ },
+ {
+ req: compareRevisionAndPutRequest("key9", 9, "99"),
+ resp: failedResponse(errors.New("failed")),
+ expectDescribe: `guaranteedUpdate("key9", "99", mod_rev=9) -> err: "failed"`,
+ },
+ {
+ req: txnRequest([]EtcdCondition{{Key: "key9b", ExpectedRevision: 9}}, []EtcdOperation{{Type: PutOperation, Put: PutOptions{Key: "key9b", Value: ValueOrHash{Value: "991"}}}}, []EtcdOperation{{Type: RangeOperation, Range: RangeOptions{Start: "key9b"}}}),
+ resp: txnResponse([]EtcdOperationResult{{}}, true, 10),
+ expectDescribe: `guaranteedUpdate("key9b", "991", mod_rev=9) -> success(ok), rev: 10`,
+ },
+ {
+ req: txnRequest([]EtcdCondition{{Key: "key9c", ExpectedRevision: 9}}, []EtcdOperation{{Type: PutOperation, Put: PutOptions{Key: "key9c", Value: ValueOrHash{Value: "992"}}}}, []EtcdOperation{{Type: RangeOperation, Range: RangeOptions{Start: "key9c"}}}),
+ resp: txnResponse([]EtcdOperationResult{{RangeResponse: RangeResponse{KVs: []KeyValue{{Key: "key9c", ValueRevision: ValueRevision{Value: ValueOrHash{Value: "993"}, ModRevision: 10}}}}}}, false, 10),
+ expectDescribe: `guaranteedUpdate("key9c", "992", mod_rev=9) -> failure("993"), rev: 10`,
+ },
+ {
+ req: txnRequest(nil, []EtcdOperation{{Type: RangeOperation, Range: RangeOptions{Start: "10"}}, {Type: PutOperation, Put: PutOptions{Key: "11", Value: ValueOrHash{Value: "111"}}}, {Type: DeleteOperation, Delete: DeleteOptions{Key: "12"}}}, nil),
+ resp: txnResponse([]EtcdOperationResult{{RangeResponse: RangeResponse{KVs: []KeyValue{{ValueRevision: ValueRevision{Value: ValueOrHash{Value: "110"}}}}}}, {}, {Deleted: 1}}, true, 10),
+ expectDescribe: `get("10"), put("11", "111"), delete("12") -> "110", ok, deleted: 1, rev: 10`,
+ },
+ {
+ req: txnRequest([]EtcdCondition{{Key: "key11", ExpectedRevision: 11}}, []EtcdOperation{{Type: PutOperation, Put: PutOptions{Key: "key11", Value: ValueOrHash{Value: "11"}}}}, []EtcdOperation{{Type: RangeOperation, Range: RangeOptions{Start: "key12"}}}),
+ resp: txnResponse([]EtcdOperationResult{{}}, true, 11),
+ expectDescribe: `if(mod_rev(key11)==11).then(put("key11", "11")).else(get("key12")) -> success(ok), rev: 11`,
+ },
+ {
+ req: txnRequest([]EtcdCondition{{Key: "key11", ExpectedRevision: 11}}, []EtcdOperation{{Type: PutOperation, Put: PutOptions{Key: "key12", Value: ValueOrHash{Value: "11"}}}}, nil),
+ resp: txnResponse([]EtcdOperationResult{{}}, true, 11),
+ expectDescribe: `if(mod_rev(key11)==11).then(put("key12", "11")) -> success(ok), rev: 11`,
+ },
+ {
+ req: defragmentRequest(),
+ resp: defragmentResponse(10),
+ expectDescribe: `defragment() -> ok, rev: 10`,
+ },
+ {
+ req: listRequest("key11", 0),
+ resp: rangeResponse(nil, 0, 11),
+ expectDescribe: `list("key11") -> [], count: 0, rev: 11`,
+ },
+ {
+ req: listRequest("key12", 0),
+ resp: rangeResponse([]*mvccpb.KeyValue{{Value: []byte("12")}}, 2, 12),
+ expectDescribe: `list("key12") -> ["12"], count: 2, rev: 12`,
+ },
+ {
+ req: listRequest("key13", 0),
+ resp: rangeResponse([]*mvccpb.KeyValue{{Value: []byte("01234567890123456789")}}, 1, 13),
+ expectDescribe: `list("key13") -> [hash: 2945867837], count: 1, rev: 13`,
+ },
+ {
+ req: listRequest("key14", 14),
+ resp: rangeResponse(nil, 0, 14),
+ expectDescribe: `list("key14", limit=14) -> [], count: 0, rev: 14`,
+ },
+ {
+ req: staleListRequest("key15", 0, 15),
+ resp: rangeResponse(nil, 0, 15),
+ expectDescribe: `list("key15", rev=15) -> [], count: 0, rev: 15`,
+ },
+ {
+ req: staleListRequest("key15", 2, 15),
+ resp: rangeResponse(nil, 0, 15),
+ expectDescribe: `list("key15", rev=15, limit=2) -> [], count: 0, rev: 15`,
+ },
+ {
+ req: rangeRequest("key16", "key16b", 0),
+ resp: rangeResponse(nil, 0, 16),
+ expectDescribe: `range("key16".."key16b") -> [], count: 0, rev: 16`,
+ },
+ {
+ req: rangeRequest("key16", "key16b", 2),
+ resp: rangeResponse(nil, 0, 16),
+ expectDescribe: `range("key16".."key16b", limit=2) -> [], count: 0, rev: 16`,
+ },
+ }
+ for _, tc := range tcs {
+ assert.Equal(t, tc.expectDescribe, NonDeterministicModel.DescribeOperation(tc.req, tc.resp))
+ }
+}
diff --git a/tests/robustness/model/deterministic.go b/tests/robustness/model/deterministic.go
new file mode 100644
index 00000000000..6f570ce0825
--- /dev/null
+++ b/tests/robustness/model/deterministic.go
@@ -0,0 +1,460 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "maps"
+ "reflect"
+ "sort"
+
+ "github.com/anishathalye/porcupine"
+
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+// DeterministicModel assumes a deterministic execution of etcd requests. All
+// requests that client called were executed and persisted by etcd. This
+// assumption is good for simulating etcd behavior (aka writing a fake), but not
+// for validating correctness as requests might be lost or interrupted. It
+// requires perfect knowledge of what happened to request which is not possible
+// in real systems.
+//
+// Model can still respond with error or partial response.
+// - Error for etcd known errors, like future revision or compacted revision.
+// - Incomplete response when requests is correct, but model doesn't have all
+// to provide a full response. For example stale reads as model doesn't store
+// whole change history as real etcd does.
+var DeterministicModel = porcupine.Model{
+ Init: func() any {
+ data, err := json.Marshal(freshEtcdState())
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+ },
+ Step: func(st any, in any, out any) (bool, any) {
+ var s EtcdState
+ err := json.Unmarshal([]byte(st.(string)), &s)
+ if err != nil {
+ panic(err)
+ }
+ ok, s := s.apply(in.(EtcdRequest), out.(EtcdResponse))
+ data, err := json.Marshal(s)
+ if err != nil {
+ panic(err)
+ }
+ return ok, string(data)
+ },
+ DescribeOperation: func(in, out any) string {
+ return fmt.Sprintf("%s -> %s", describeEtcdRequest(in.(EtcdRequest)), describeEtcdResponse(in.(EtcdRequest), MaybeEtcdResponse{EtcdResponse: out.(EtcdResponse)}))
+ },
+}
+
+type EtcdState struct {
+ Revision int64
+ CompactRevision int64
+ KeyValues map[string]ValueRevision
+ KeyLeases map[string]int64
+ Leases map[int64]EtcdLease
+}
+
+func (s EtcdState) apply(request EtcdRequest, response EtcdResponse) (bool, EtcdState) {
+ newState, modelResponse := s.Step(request)
+ return Match(MaybeEtcdResponse{EtcdResponse: response}, modelResponse), newState
+}
+
+func (s EtcdState) DeepCopy() EtcdState {
+ newState := EtcdState{
+ Revision: s.Revision,
+ CompactRevision: s.CompactRevision,
+ }
+
+ newState.KeyValues = maps.Clone(s.KeyValues)
+ newState.KeyLeases = maps.Clone(s.KeyLeases)
+
+ newLeases := map[int64]EtcdLease{}
+ for key, val := range s.Leases {
+ newLeases[key] = val.DeepCopy()
+ }
+ newState.Leases = newLeases
+ return newState
+}
+
+func freshEtcdState() EtcdState {
+ return EtcdState{
+ Revision: 1,
+ // Start from CompactRevision equal -1 as etcd allows client to compact revision 0 for some reason.
+ CompactRevision: -1,
+ KeyValues: map[string]ValueRevision{},
+ KeyLeases: map[string]int64{},
+ Leases: map[int64]EtcdLease{},
+ }
+}
+
+// Step handles a successful request, returning updated state and response it would generate.
+func (s EtcdState) Step(request EtcdRequest) (EtcdState, MaybeEtcdResponse) {
+ newState := s.DeepCopy()
+
+ switch request.Type {
+ case Range:
+ if request.Range.Revision == 0 || request.Range.Revision == newState.Revision {
+ resp := newState.getRange(request.Range.RangeOptions)
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Range: &resp, Revision: newState.Revision}}
+ }
+ if request.Range.Revision > newState.Revision {
+ return newState, MaybeEtcdResponse{Error: ErrEtcdFutureRev.Error()}
+ }
+ if request.Range.Revision < newState.CompactRevision {
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{ClientError: mvcc.ErrCompacted.Error()}}
+ }
+ return newState, MaybeEtcdResponse{Persisted: true, PersistedRevision: newState.Revision}
+ case Txn:
+ failure := false
+ for _, cond := range request.Txn.Conditions {
+ if val := newState.KeyValues[cond.Key]; val.ModRevision != cond.ExpectedRevision {
+ failure = true
+ break
+ }
+ }
+ operations := request.Txn.OperationsOnSuccess
+ if failure {
+ operations = request.Txn.OperationsOnFailure
+ }
+ opResp := make([]EtcdOperationResult, len(operations))
+ increaseRevision := false
+ for i, op := range operations {
+ switch op.Type {
+ case RangeOperation:
+ opResp[i] = EtcdOperationResult{
+ RangeResponse: newState.getRange(op.Range),
+ }
+ case PutOperation:
+ _, leaseExists := newState.Leases[op.Put.LeaseID]
+ if op.Put.LeaseID != 0 && !leaseExists {
+ break
+ }
+ newState.KeyValues[op.Put.Key] = ValueRevision{
+ Value: op.Put.Value,
+ ModRevision: newState.Revision + 1,
+ }
+ increaseRevision = true
+ newState = detachFromOldLease(newState, op.Put.Key)
+ if leaseExists {
+ newState = attachToNewLease(newState, op.Put.LeaseID, op.Put.Key)
+ }
+ case DeleteOperation:
+ if _, ok := newState.KeyValues[op.Delete.Key]; ok {
+ delete(newState.KeyValues, op.Delete.Key)
+ increaseRevision = true
+ newState = detachFromOldLease(newState, op.Delete.Key)
+ opResp[i].Deleted = 1
+ }
+ default:
+ panic("unsupported operation")
+ }
+ }
+ if increaseRevision {
+ newState.Revision++
+ }
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Txn: &TxnResponse{Failure: failure, Results: opResp}, Revision: newState.Revision}}
+ case LeaseGrant:
+ lease := EtcdLease{
+ LeaseID: request.LeaseGrant.LeaseID,
+ Keys: map[string]struct{}{},
+ }
+ newState.Leases[request.LeaseGrant.LeaseID] = lease
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Revision: newState.Revision, LeaseGrant: &LeaseGrantReponse{}}}
+ case LeaseRevoke:
+ // Delete the keys attached to the lease
+ keyDeleted := false
+ for key := range newState.Leases[request.LeaseRevoke.LeaseID].Keys {
+ // same as delete.
+ if _, ok := newState.KeyValues[key]; ok {
+ if !keyDeleted {
+ keyDeleted = true
+ }
+ delete(newState.KeyValues, key)
+ delete(newState.KeyLeases, key)
+ }
+ }
+ // delete the lease
+ delete(newState.Leases, request.LeaseRevoke.LeaseID)
+ if keyDeleted {
+ newState.Revision++
+ }
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Revision: newState.Revision, LeaseRevoke: &LeaseRevokeResponse{}}}
+ case Defragment:
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Defragment: &DefragmentResponse{}, Revision: newState.Revision}}
+ case Compact:
+ if request.Compact.Revision <= newState.CompactRevision {
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{ClientError: mvcc.ErrCompacted.Error()}}
+ }
+ newState.CompactRevision = request.Compact.Revision
+ // Set fake revision as compaction returns non-linearizable revision.
+ // TODO: Model non-linearizable response revision in model.
+ return newState, MaybeEtcdResponse{EtcdResponse: EtcdResponse{Compact: &CompactResponse{}, Revision: -1}}
+ default:
+ panic(fmt.Sprintf("Unknown request type: %v", request.Type))
+ }
+}
+
+func (s EtcdState) getRange(options RangeOptions) RangeResponse {
+ response := RangeResponse{
+ KVs: []KeyValue{},
+ }
+ if options.End != "" {
+ var count int64
+ for k, v := range s.KeyValues {
+ if k >= options.Start && k < options.End {
+ response.KVs = append(response.KVs, KeyValue{Key: k, ValueRevision: v})
+ count++
+ }
+ }
+ sort.Slice(response.KVs, func(j, k int) bool {
+ return response.KVs[j].Key < response.KVs[k].Key
+ })
+ if options.Limit != 0 && count > options.Limit {
+ response.KVs = response.KVs[:options.Limit]
+ }
+ response.Count = count
+ } else {
+ value, ok := s.KeyValues[options.Start]
+ if ok {
+ response.KVs = append(response.KVs, KeyValue{
+ Key: options.Start,
+ ValueRevision: value,
+ })
+ response.Count = 1
+ }
+ }
+ return response
+}
+
+func detachFromOldLease(s EtcdState, key string) EtcdState {
+ if oldLeaseID, ok := s.KeyLeases[key]; ok {
+ delete(s.Leases[oldLeaseID].Keys, key)
+ delete(s.KeyLeases, key)
+ }
+ return s
+}
+
+func attachToNewLease(s EtcdState, leaseID int64, key string) EtcdState {
+ s.KeyLeases[key] = leaseID
+ s.Leases[leaseID].Keys[key] = leased
+ return s
+}
+
+type RequestType string
+
+const (
+ Range RequestType = "range"
+ Txn RequestType = "txn"
+ LeaseGrant RequestType = "leaseGrant"
+ LeaseRevoke RequestType = "leaseRevoke"
+ Defragment RequestType = "defragment"
+ Compact RequestType = "compact"
+)
+
+type EtcdRequest struct {
+ Type RequestType
+ LeaseGrant *LeaseGrantRequest
+ LeaseRevoke *LeaseRevokeRequest
+ Range *RangeRequest
+ Txn *TxnRequest
+ Defragment *DefragmentRequest
+ Compact *CompactRequest
+}
+
+func (r *EtcdRequest) IsRead() bool {
+ if r.Type == Range {
+ return true
+ }
+ if r.Type != Txn {
+ return false
+ }
+ for _, op := range append(r.Txn.OperationsOnSuccess, r.Txn.OperationsOnFailure...) {
+ if op.Type != RangeOperation {
+ return false
+ }
+ }
+ return true
+}
+
+type RangeRequest struct {
+ RangeOptions
+ Revision int64
+}
+
+type RangeOptions struct {
+ Start string
+ End string
+ Limit int64
+}
+
+type PutOptions struct {
+ Key string
+ Value ValueOrHash
+ LeaseID int64
+}
+
+type DeleteOptions struct {
+ Key string
+}
+
+type TxnRequest struct {
+ Conditions []EtcdCondition
+ OperationsOnSuccess []EtcdOperation
+ OperationsOnFailure []EtcdOperation
+}
+
+type EtcdCondition struct {
+ Key string
+ ExpectedRevision int64
+}
+
+type EtcdOperation struct {
+ Type OperationType
+ Range RangeOptions
+ Put PutOptions
+ Delete DeleteOptions
+}
+
+type OperationType string
+
+const (
+ RangeOperation OperationType = "range-operation"
+ PutOperation OperationType = "put-operation"
+ DeleteOperation OperationType = "delete-operation"
+)
+
+type LeaseGrantRequest struct {
+ LeaseID int64
+}
+type LeaseRevokeRequest struct {
+ LeaseID int64
+}
+type DefragmentRequest struct{}
+
+// MaybeEtcdResponse extends EtcdResponse to include partial information about responses to a request.
+// Possible response state information:
+// * Normal response. Client observed response. Only EtcdResponse is set.
+// * Persisted. Client didn't observe response, but we know it was persisted by etcd. Only Persisted is set
+// * Persisted with Revision. Client didn't observe response, but we know that it was persisted, and it's revision. Both Persisted and PersistedRevision is set.
+// * Error response. Client observed error, but we don't know if it was persisted. Only Error is set.
+type MaybeEtcdResponse struct {
+ EtcdResponse
+ Persisted bool
+ PersistedRevision int64
+ Error string
+}
+
+var ErrEtcdFutureRev = errors.New("future rev")
+
+type EtcdResponse struct {
+ Txn *TxnResponse
+ Range *RangeResponse
+ LeaseGrant *LeaseGrantReponse
+ LeaseRevoke *LeaseRevokeResponse
+ Defragment *DefragmentResponse
+ Compact *CompactResponse
+ ClientError string
+ Revision int64
+}
+
+func Match(r1, r2 MaybeEtcdResponse) bool {
+ r1Revision := r1.Revision
+ if r1.Persisted {
+ r1Revision = r1.PersistedRevision
+ }
+ r2Revision := r2.Revision
+ if r2.Persisted {
+ r2Revision = r2.PersistedRevision
+ }
+ return (r1.Persisted && r1.PersistedRevision == 0) || (r2.Persisted && r2.PersistedRevision == 0) || ((r1.Persisted || r2.Persisted) && (r1.Error != "" || r2.Error != "" || r1Revision == r2Revision)) || reflect.DeepEqual(r1, r2)
+}
+
+type TxnResponse struct {
+ Failure bool
+ Results []EtcdOperationResult
+}
+
+type RangeResponse struct {
+ KVs []KeyValue
+ Count int64
+}
+
+type LeaseGrantReponse struct {
+ LeaseID int64
+}
+type (
+ LeaseRevokeResponse struct{}
+ DefragmentResponse struct{}
+)
+
+type EtcdOperationResult struct {
+ RangeResponse
+ Deleted int64
+}
+
+type KeyValue struct {
+ Key string
+ ValueRevision
+}
+
+var leased = struct{}{}
+
+type EtcdLease struct {
+ LeaseID int64
+ Keys map[string]struct{}
+}
+
+func (el EtcdLease) DeepCopy() EtcdLease {
+ return EtcdLease{
+ LeaseID: el.LeaseID,
+ Keys: maps.Clone(el.Keys),
+ }
+}
+
+type ValueRevision struct {
+ Value ValueOrHash
+ ModRevision int64
+}
+
+type ValueOrHash struct {
+ Value string
+ Hash uint32
+}
+
+func ToValueOrHash(value string) ValueOrHash {
+ v := ValueOrHash{}
+ if len(value) < 20 {
+ v.Value = value
+ } else {
+ h := fnv.New32a()
+ h.Write([]byte(value))
+ v.Hash = h.Sum32()
+ }
+ return v
+}
+
+type CompactResponse struct{}
+
+type CompactRequest struct {
+ Revision int64
+}
diff --git a/tests/robustness/model/deterministic_test.go b/tests/robustness/model/deterministic_test.go
new file mode 100644
index 00000000000..5e7bc0f0585
--- /dev/null
+++ b/tests/robustness/model/deterministic_test.go
@@ -0,0 +1,407 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+)
+
+func TestModelDeterministic(t *testing.T) {
+ for _, tc := range commonTestScenarios {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ state := DeterministicModel.Init()
+ for _, op := range tc.operations {
+ ok, newState := DeterministicModel.Step(state, op.req, op.resp.EtcdResponse)
+ if op.expectFailure == ok {
+ t.Logf("state: %v", state)
+ t.Errorf("Unexpected operation result, expect: %v, got: %v, operation: %s", !op.expectFailure, ok, DeterministicModel.DescribeOperation(op.req, op.resp.EtcdResponse))
+ var loadedState EtcdState
+ err := json.Unmarshal([]byte(state.(string)), &loadedState)
+ require.NoErrorf(t, err, "Failed to load state")
+ _, resp := loadedState.Step(op.req)
+ t.Errorf("Response diff: %s", cmp.Diff(op.resp, resp))
+ break
+ }
+ if ok {
+ state = newState
+ t.Logf("state: %v", state)
+ }
+ }
+ })
+ }
+}
+
+type modelTestCase struct {
+ name string
+ operations []testOperation
+}
+
+type testOperation struct {
+ req EtcdRequest
+ resp MaybeEtcdResponse
+ expectFailure bool
+}
+
+var commonTestScenarios = []modelTestCase{
+ {
+ name: "Get response data should match put",
+ operations: []testOperation{
+ {req: putRequest("key1", "11"), resp: putResponse(2)},
+ {req: putRequest("key2", "12"), resp: putResponse(3)},
+ {req: getRequest("key1"), resp: getResponse("key1", "11", 2, 2), expectFailure: true},
+ {req: getRequest("key1"), resp: getResponse("key1", "12", 2, 2), expectFailure: true},
+ {req: getRequest("key1"), resp: getResponse("key1", "12", 3, 3), expectFailure: true},
+ {req: getRequest("key1"), resp: getResponse("key1", "11", 2, 3)},
+ {req: getRequest("key2"), resp: getResponse("key2", "11", 3, 3), expectFailure: true},
+ {req: getRequest("key2"), resp: getResponse("key2", "12", 2, 2), expectFailure: true},
+ {req: getRequest("key2"), resp: getResponse("key2", "11", 2, 2), expectFailure: true},
+ {req: getRequest("key2"), resp: getResponse("key2", "12", 3, 3)},
+ },
+ },
+ {
+ name: "Range response data should match put",
+ operations: []testOperation{
+ {req: putRequest("key1", "1"), resp: putResponse(2)},
+ {req: putRequest("key2", "2"), resp: putResponse(3)},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{{Key: []byte("key1"), Value: []byte("1"), ModRevision: 2}, {Key: []byte("key2"), Value: []byte("2"), ModRevision: 3}}, 2, 3)},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{{Key: []byte("key1"), Value: []byte("1"), ModRevision: 2}, {Key: []byte("key2"), Value: []byte("2"), ModRevision: 3}}, 2, 3)},
+ },
+ },
+ {
+ name: "Range limit should reduce number of kvs, but maintain count",
+ operations: []testOperation{
+ {req: putRequest("key1", "1"), resp: putResponse(2)},
+ {req: putRequest("key2", "2"), resp: putResponse(3)},
+ {req: putRequest("key3", "3"), resp: putResponse(4)},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key1"), Value: []byte("1"), ModRevision: 2},
+ {Key: []byte("key2"), Value: []byte("2"), ModRevision: 3},
+ {Key: []byte("key3"), Value: []byte("3"), ModRevision: 4},
+ }, 3, 4)},
+ {req: listRequest("key", 4), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key1"), Value: []byte("1"), ModRevision: 2},
+ {Key: []byte("key2"), Value: []byte("2"), ModRevision: 3},
+ {Key: []byte("key3"), Value: []byte("3"), ModRevision: 4},
+ }, 3, 4)},
+ {req: listRequest("key", 3), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key1"), Value: []byte("1"), ModRevision: 2},
+ {Key: []byte("key2"), Value: []byte("2"), ModRevision: 3},
+ {Key: []byte("key3"), Value: []byte("3"), ModRevision: 4},
+ }, 3, 4)},
+ {req: listRequest("key", 2), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key1"), Value: []byte("1"), ModRevision: 2},
+ {Key: []byte("key2"), Value: []byte("2"), ModRevision: 3},
+ }, 3, 4)},
+ {req: listRequest("key", 1), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key1"), Value: []byte("1"), ModRevision: 2},
+ }, 3, 4)},
+ },
+ },
+ {
+ name: "Range response should be ordered by key",
+ operations: []testOperation{
+ {req: putRequest("key3", "3"), resp: putResponse(2)},
+ {req: putRequest("key2", "1"), resp: putResponse(3)},
+ {req: putRequest("key1", "2"), resp: putResponse(4)},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key1"), Value: []byte("2"), ModRevision: 4},
+ {Key: []byte("key2"), Value: []byte("1"), ModRevision: 3},
+ {Key: []byte("key3"), Value: []byte("3"), ModRevision: 2},
+ }, 3, 4)},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key2"), Value: []byte("1"), ModRevision: 3},
+ {Key: []byte("key1"), Value: []byte("2"), ModRevision: 4},
+ {Key: []byte("key3"), Value: []byte("3"), ModRevision: 2},
+ }, 3, 4), expectFailure: true},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{
+ {Key: []byte("key3"), Value: []byte("3"), ModRevision: 2},
+ {Key: []byte("key2"), Value: []byte("1"), ModRevision: 3},
+ {Key: []byte("key1"), Value: []byte("2"), ModRevision: 4},
+ }, 3, 4), expectFailure: true},
+ },
+ },
+ {
+ name: "Range response data should match large put",
+ operations: []testOperation{
+ {req: putRequest("key", "012345678901234567890"), resp: putResponse(2)},
+ {req: getRequest("key"), resp: getResponse("key", "123456789012345678901", 2, 2), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "012345678901234567890", 2, 2)},
+ {req: putRequest("key", "123456789012345678901"), resp: putResponse(3)},
+ {req: getRequest("key"), resp: getResponse("key", "123456789012345678901", 3, 3)},
+ {req: getRequest("key"), resp: getResponse("key", "012345678901234567890", 3, 3), expectFailure: true},
+ },
+ },
+ {
+ name: "Stale Get doesn't need to match put if asking about old revision",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: staleGetRequest("key", 1), resp: getResponse("key", "2", 2, 2)},
+ {req: staleGetRequest("key", 1), resp: getResponse("key", "1", 2, 2)},
+ },
+ },
+ {
+ name: "Stale Get need to match put if asking about matching revision",
+ operations: []testOperation{
+ {req: putRequest("key1", "1"), resp: putResponse(2)},
+ {req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 3, 2), expectFailure: true},
+ {req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 2, 3), expectFailure: true},
+ {req: staleGetRequest("key1", 2), resp: getResponse("key1", "2", 2, 2), expectFailure: true},
+ {req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 2, 2)},
+ },
+ },
+ {
+ name: "Stale Get need to have a proper response revision",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: staleGetRequest("key", 2), resp: getResponse("key", "1", 2, 3), expectFailure: true},
+ {req: staleGetRequest("key", 2), resp: getResponse("key", "1", 2, 2)},
+ {req: putRequest("key", "2"), resp: putResponse(3)},
+ {req: staleGetRequest("key", 2), resp: getResponse("key", "1", 2, 3)},
+ },
+ },
+ {
+ name: "Put must increase revision by 1",
+ operations: []testOperation{
+ {req: getRequest("key"), resp: emptyGetResponse(1)},
+ {req: putRequest("key", "1"), resp: putResponse(1), expectFailure: true},
+ {req: putRequest("key", "1"), resp: putResponse(3), expectFailure: true},
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ },
+ },
+ {
+ name: "Delete only increases revision on success",
+ operations: []testOperation{
+ {req: putRequest("key1", "11"), resp: putResponse(2)},
+ {req: putRequest("key2", "12"), resp: putResponse(3)},
+ {req: deleteRequest("key1"), resp: deleteResponse(1, 3), expectFailure: true},
+ {req: deleteRequest("key1"), resp: deleteResponse(1, 4)},
+ {req: deleteRequest("key1"), resp: deleteResponse(0, 5), expectFailure: true},
+ {req: deleteRequest("key1"), resp: deleteResponse(0, 4)},
+ },
+ },
+ {
+ name: "Delete not existing key",
+ operations: []testOperation{
+ {req: getRequest("key"), resp: emptyGetResponse(1)},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 2), expectFailure: true},
+ {req: deleteRequest("key"), resp: deleteResponse(0, 1)},
+ },
+ },
+ {
+ name: "Delete clears value",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 3)},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 2), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "1", 3, 3), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 3), expectFailure: true},
+ {req: getRequest("key"), resp: emptyGetResponse(3)},
+ },
+ },
+ {
+ name: "Txn executes onSuccess if revision matches expected",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: compareRevisionAndPutResponse(true, 2), expectFailure: true},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: compareRevisionAndPutResponse(false, 3), expectFailure: true},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: compareRevisionAndPutResponse(false, 2), expectFailure: true},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: compareRevisionAndPutResponse(true, 3)},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 2), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 3), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "1", 3, 3), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "2", 2, 2), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "2", 3, 3)},
+ },
+ },
+ {
+ name: "Txn can expect on key not existing",
+ operations: []testOperation{
+ {req: getRequest("key1"), resp: emptyGetResponse(1)},
+ {req: compareRevisionAndPutRequest("key1", 0, "2"), resp: compareRevisionAndPutResponse(true, 2)},
+ {req: compareRevisionAndPutRequest("key1", 0, "3"), resp: compareRevisionAndPutResponse(true, 3), expectFailure: true},
+ {req: txnRequestSingleOperation(compareRevision("key1", 0), putOperation("key1", "4"), putOperation("key1", "5")), resp: txnPutResponse(false, 3)},
+ {req: getRequest("key1"), resp: getResponse("key1", "5", 3, 3)},
+ {req: compareRevisionAndPutRequest("key2", 0, "6"), resp: compareRevisionAndPutResponse(true, 4)},
+ },
+ },
+ {
+ name: "Txn executes onFailure if revision doesn't match expected",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: txnRequestSingleOperation(compareRevision("key", 2), nil, putOperation("key", "2")), resp: txnPutResponse(false, 3), expectFailure: true},
+ {req: txnRequestSingleOperation(compareRevision("key", 2), nil, putOperation("key", "2")), resp: txnEmptyResponse(false, 3), expectFailure: true},
+ {req: txnRequestSingleOperation(compareRevision("key", 2), nil, putOperation("key", "2")), resp: txnEmptyResponse(true, 3), expectFailure: true},
+ {req: txnRequestSingleOperation(compareRevision("key", 2), nil, putOperation("key", "2")), resp: txnPutResponse(true, 2), expectFailure: true},
+ {req: txnRequestSingleOperation(compareRevision("key", 2), nil, putOperation("key", "2")), resp: txnEmptyResponse(true, 2)},
+ {req: txnRequestSingleOperation(compareRevision("key", 3), nil, putOperation("key", "2")), resp: txnPutResponse(false, 3)},
+ },
+ },
+ {
+ name: "Put with valid lease id should succeed. Put with invalid lease id should fail",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)},
+ {req: putWithLeaseRequest("key", "3", 2), resp: putResponse(3), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "2", 2, 2)},
+ },
+ },
+ {
+ name: "Put with valid lease id should succeed. Put with expired lease id should fail",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)},
+ {req: getRequest("key"), resp: getResponse("key", "2", 2, 2)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ {req: putWithLeaseRequest("key", "4", 1), resp: putResponse(4), expectFailure: true},
+ {req: getRequest("key"), resp: emptyGetResponse(3)},
+ },
+ },
+ {
+ name: "Revoke should increment the revision",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ {req: getRequest("key"), resp: emptyGetResponse(3)},
+ },
+ },
+ {
+ name: "Put following a PutWithLease will detach the key from the lease",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)},
+ {req: putRequest("key", "3"), resp: putResponse(3)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ {req: getRequest("key"), resp: getResponse("key", "3", 3, 3)},
+ },
+ },
+ {
+ name: "Change lease. Revoking older lease should not increment revision",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: leaseGrantRequest(2), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)},
+ {req: putWithLeaseRequest("key", "3", 2), resp: putResponse(3)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ {req: getRequest("key"), resp: getResponse("key", "3", 3, 3)},
+ {req: leaseRevokeRequest(2), resp: leaseRevokeResponse(4)},
+ {req: getRequest("key"), resp: emptyGetResponse(4)},
+ },
+ },
+ {
+ name: "Update key with same lease",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)},
+ {req: putWithLeaseRequest("key", "3", 1), resp: putResponse(3)},
+ {req: getRequest("key"), resp: getResponse("key", "3", 3, 3)},
+ },
+ },
+ {
+ name: "Deleting a leased key - revoke should not increment revision",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 3)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(4), expectFailure: true},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ },
+ },
+ {
+ name: "Lease a few keys - revoke should increment revision only once",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key1", "1", 1), resp: putResponse(2)},
+ {req: putWithLeaseRequest("key2", "2", 1), resp: putResponse(3)},
+ {req: putWithLeaseRequest("key3", "3", 1), resp: putResponse(4)},
+ {req: putWithLeaseRequest("key4", "4", 1), resp: putResponse(5)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(6)},
+ },
+ },
+ {
+ name: "Lease some keys then delete some of them. Revoke should increment revision since some keys were still leased",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key1", "1", 1), resp: putResponse(2)},
+ {req: putWithLeaseRequest("key2", "2", 1), resp: putResponse(3)},
+ {req: putWithLeaseRequest("key3", "3", 1), resp: putResponse(4)},
+ {req: putWithLeaseRequest("key4", "4", 1), resp: putResponse(5)},
+ {req: deleteRequest("key1"), resp: deleteResponse(1, 6)},
+ {req: deleteRequest("key3"), resp: deleteResponse(1, 7)},
+ {req: deleteRequest("key4"), resp: deleteResponse(1, 8)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(9)},
+ {req: deleteRequest("key2"), resp: deleteResponse(0, 9)},
+ {req: getRequest("key1"), resp: emptyGetResponse(9)},
+ {req: getRequest("key2"), resp: emptyGetResponse(9)},
+ {req: getRequest("key3"), resp: emptyGetResponse(9)},
+ {req: getRequest("key4"), resp: emptyGetResponse(9)},
+ },
+ },
+ {
+ name: "Lease some keys then delete all of them. Revoke should not increment",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key1", "1", 1), resp: putResponse(2)},
+ {req: putWithLeaseRequest("key2", "2", 1), resp: putResponse(3)},
+ {req: putWithLeaseRequest("key3", "3", 1), resp: putResponse(4)},
+ {req: putWithLeaseRequest("key4", "4", 1), resp: putResponse(5)},
+ {req: deleteRequest("key1"), resp: deleteResponse(1, 6)},
+ {req: deleteRequest("key2"), resp: deleteResponse(1, 7)},
+ {req: deleteRequest("key3"), resp: deleteResponse(1, 8)},
+ {req: deleteRequest("key4"), resp: deleteResponse(1, 9)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(9)},
+ },
+ },
+ {
+ name: "All request types",
+ operations: []testOperation{
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: putWithLeaseRequest("key", "1", 1), resp: putResponse(2)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ {req: putRequest("key", "4"), resp: putResponse(4)},
+ {req: getRequest("key"), resp: getResponse("key", "4", 4, 4)},
+ {req: compareRevisionAndPutRequest("key", 4, "5"), resp: compareRevisionAndPutResponse(true, 5)},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 6)},
+ {req: defragmentRequest(), resp: defragmentResponse(6)},
+ },
+ },
+ {
+ name: "Defragment success between all other request types",
+ operations: []testOperation{
+ {req: defragmentRequest(), resp: defragmentResponse(1)},
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: defragmentRequest(), resp: defragmentResponse(1)},
+ {req: putWithLeaseRequest("key", "1", 1), resp: putResponse(2)},
+ {req: defragmentRequest(), resp: defragmentResponse(2)},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ {req: defragmentRequest(), resp: defragmentResponse(3)},
+ {req: putRequest("key", "4"), resp: putResponse(4)},
+ {req: defragmentRequest(), resp: defragmentResponse(4)},
+ {req: getRequest("key"), resp: getResponse("key", "4", 4, 4)},
+ {req: defragmentRequest(), resp: defragmentResponse(4)},
+ {req: compareRevisionAndPutRequest("key", 4, "5"), resp: compareRevisionAndPutResponse(true, 5)},
+ {req: defragmentRequest(), resp: defragmentResponse(5)},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 6)},
+ {req: defragmentRequest(), resp: defragmentResponse(6)},
+ },
+ },
+}
diff --git a/tests/robustness/model/history.go b/tests/robustness/model/history.go
new file mode 100644
index 00000000000..20ebd1c7cff
--- /dev/null
+++ b/tests/robustness/model/history.go
@@ -0,0 +1,523 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/anishathalye/porcupine"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+)
+
+// AppendableHistory allows to collect history of sequential operations.
+//
+// Ensures that operation history is compatible with porcupine library, by preventing concurrent requests sharing the
+// same stream id. For failed requests, we don't know their return time, so generate new stream id.
+//
+// Appending needs to be done in order of operation execution time (start, end time).
+// Operations time should be calculated as time.Since common base time to ensure that Go monotonic time is used.
+// More in https://github.com/golang/go/blob/96add980ad27faed627f26ef1ab09e8fe45d6bd1/src/time/time.go#L10.
+type AppendableHistory struct {
+ // streamID for the next operation. Used for porcupine.Operation.ClientId as porcupine assumes no concurrent requests.
+ streamID int
+ // If needed a new streamId is requested from idProvider.
+ idProvider identity.Provider
+
+ History
+}
+
+func NewAppendableHistory(ids identity.Provider) *AppendableHistory {
+ return &AppendableHistory{
+ streamID: ids.NewStreamID(),
+ idProvider: ids,
+ History: History{
+ operations: []porcupine.Operation{},
+ },
+ }
+}
+
+func (h *AppendableHistory) AppendRange(startKey, endKey string, revision, limit int64, start, end time.Duration, resp *clientv3.GetResponse, err error) {
+ request := staleRangeRequest(startKey, endKey, limit, revision)
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var respRevision int64
+ if resp != nil && resp.Header != nil {
+ respRevision = resp.Header.Revision
+ }
+ h.appendSuccessful(request, start, end, rangeResponse(resp.Kvs, resp.Count, respRevision))
+}
+
+func (h *AppendableHistory) AppendPut(key, value string, start, end time.Duration, resp *clientv3.PutResponse, err error) {
+ request := putRequest(key, value)
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var revision int64
+ if resp != nil && resp.Header != nil {
+ revision = resp.Header.Revision
+ }
+ h.appendSuccessful(request, start, end, putResponse(revision))
+}
+
+func (h *AppendableHistory) AppendPutWithLease(key, value string, leaseID int64, start, end time.Duration, resp *clientv3.PutResponse, err error) {
+ request := putWithLeaseRequest(key, value, leaseID)
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var revision int64
+ if resp != nil && resp.Header != nil {
+ revision = resp.Header.Revision
+ }
+ h.appendSuccessful(request, start, end, putResponse(revision))
+}
+
+func (h *AppendableHistory) AppendLeaseGrant(start, end time.Duration, resp *clientv3.LeaseGrantResponse, err error) {
+ var leaseID int64
+ if resp != nil {
+ leaseID = int64(resp.ID)
+ }
+ request := leaseGrantRequest(leaseID)
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var revision int64
+ if resp != nil && resp.ResponseHeader != nil {
+ revision = resp.ResponseHeader.Revision
+ }
+ h.appendSuccessful(request, start, end, leaseGrantResponse(revision))
+}
+
+func (h *AppendableHistory) AppendLeaseRevoke(id int64, start, end time.Duration, resp *clientv3.LeaseRevokeResponse, err error) {
+ request := leaseRevokeRequest(id)
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var revision int64
+ if resp != nil && resp.Header != nil {
+ revision = resp.Header.Revision
+ }
+ h.appendSuccessful(request, start, end, leaseRevokeResponse(revision))
+}
+
+func (h *AppendableHistory) AppendDelete(key string, start, end time.Duration, resp *clientv3.DeleteResponse, err error) {
+ request := deleteRequest(key)
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var revision int64
+ var deleted int64
+ if resp != nil && resp.Header != nil {
+ revision = resp.Header.Revision
+ deleted = resp.Deleted
+ }
+ h.appendSuccessful(request, start, end, deleteResponse(deleted, revision))
+}
+
+func (h *AppendableHistory) AppendTxn(cmp []clientv3.Cmp, clientOnSuccessOps, clientOnFailure []clientv3.Op, start, end time.Duration, resp *clientv3.TxnResponse, err error) {
+ conds := []EtcdCondition{}
+ for _, cmp := range cmp {
+ conds = append(conds, toEtcdCondition(cmp))
+ }
+ modelOnSuccess := []EtcdOperation{}
+ for _, op := range clientOnSuccessOps {
+ modelOnSuccess = append(modelOnSuccess, toEtcdOperation(op))
+ }
+ modelOnFailure := []EtcdOperation{}
+ for _, op := range clientOnFailure {
+ modelOnFailure = append(modelOnFailure, toEtcdOperation(op))
+ }
+ request := txnRequest(conds, modelOnSuccess, modelOnFailure)
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var revision int64
+ if resp != nil && resp.Header != nil {
+ revision = resp.Header.Revision
+ }
+ results := []EtcdOperationResult{}
+ for _, resp := range resp.Responses {
+ results = append(results, toEtcdOperationResult(resp))
+ }
+ h.appendSuccessful(request, start, end, txnResponse(results, resp.Succeeded, revision))
+}
+
+func (h *AppendableHistory) appendSuccessful(request EtcdRequest, start, end time.Duration, response MaybeEtcdResponse) {
+ op := porcupine.Operation{
+ ClientId: h.streamID,
+ Input: request,
+ Call: start.Nanoseconds(),
+ Output: response,
+ Return: end.Nanoseconds(),
+ }
+ h.append(op)
+}
+
+func toEtcdCondition(cmp clientv3.Cmp) (cond EtcdCondition) {
+ switch {
+ case cmp.Result == etcdserverpb.Compare_EQUAL && cmp.Target == etcdserverpb.Compare_MOD:
+ cond.Key = string(cmp.KeyBytes())
+ case cmp.Result == etcdserverpb.Compare_EQUAL && cmp.Target == etcdserverpb.Compare_CREATE:
+ cond.Key = string(cmp.KeyBytes())
+ default:
+ panic(fmt.Sprintf("Compare not supported, target: %q, result: %q", cmp.Target, cmp.Result))
+ }
+ cond.ExpectedRevision = cmp.TargetUnion.(*etcdserverpb.Compare_ModRevision).ModRevision
+ return cond
+}
+
+func toEtcdOperation(option clientv3.Op) (op EtcdOperation) {
+ switch {
+ case option.IsGet():
+ op.Type = RangeOperation
+ op.Range = RangeOptions{
+ Start: string(option.KeyBytes()),
+ End: string(option.RangeBytes()),
+ }
+ case option.IsPut():
+ op.Type = PutOperation
+ op.Put = PutOptions{
+ Key: string(option.KeyBytes()),
+ Value: ValueOrHash{Value: string(option.ValueBytes())},
+ }
+ case option.IsDelete():
+ op.Type = DeleteOperation
+ op.Delete = DeleteOptions{
+ Key: string(option.KeyBytes()),
+ }
+ default:
+ panic("Unsupported operation")
+ }
+ return op
+}
+
+func toEtcdOperationResult(resp *etcdserverpb.ResponseOp) EtcdOperationResult {
+ switch {
+ case resp.GetResponseRange() != nil:
+ getResp := resp.GetResponseRange()
+ kvs := make([]KeyValue, len(getResp.Kvs))
+ for i, kv := range getResp.Kvs {
+ kvs[i] = KeyValue{
+ Key: string(kv.Key),
+ ValueRevision: ValueRevision{
+ Value: ToValueOrHash(string(kv.Value)),
+ ModRevision: kv.ModRevision,
+ },
+ }
+ }
+ return EtcdOperationResult{
+ RangeResponse: RangeResponse{
+ KVs: kvs,
+ Count: getResp.Count,
+ },
+ }
+ case resp.GetResponsePut() != nil:
+ return EtcdOperationResult{}
+ case resp.GetResponseDeleteRange() != nil:
+ return EtcdOperationResult{
+ Deleted: resp.GetResponseDeleteRange().Deleted,
+ }
+ default:
+ panic("Unsupported operation")
+ }
+}
+
+func (h *AppendableHistory) AppendDefragment(start, end time.Duration, resp *clientv3.DefragmentResponse, err error) {
+ request := defragmentRequest()
+ if err != nil {
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ var revision int64
+ if resp != nil && resp.Header != nil {
+ revision = resp.Header.Revision
+ }
+ h.appendSuccessful(request, start, end, defragmentResponse(revision))
+}
+
+func (h *AppendableHistory) AppendCompact(rev int64, start, end time.Duration, resp *clientv3.CompactResponse, err error) {
+ request := compactRequest(rev)
+ if err != nil {
+ if strings.Contains(err.Error(), mvcc.ErrCompacted.Error()) {
+ h.appendSuccessful(request, start, end, MaybeEtcdResponse{
+ EtcdResponse: EtcdResponse{ClientError: mvcc.ErrCompacted.Error()},
+ })
+ return
+ }
+ h.appendFailed(request, start, end, err)
+ return
+ }
+ // Set fake revision as compaction returns non-linearizable revision.
+ // TODO: Model non-linearizable response revision in model.
+ h.appendSuccessful(request, start, end, compactResponse(-1))
+}
+
+func (h *AppendableHistory) appendFailed(request EtcdRequest, start, end time.Duration, err error) {
+ op := porcupine.Operation{
+ ClientId: h.streamID,
+ Input: request,
+ Call: start.Nanoseconds(),
+ Output: failedResponse(err),
+ Return: end.Nanoseconds(),
+ }
+ isRead := request.IsRead()
+ if !isRead {
+ // Failed writes can still be persisted, setting -1 for now as don't know when request has took effect.
+ op.Return = -1
+ // Operations of single client needs to be sequential.
+ // As we don't know return time of failed operations, all new writes need to be done with new stream id.
+ h.streamID = h.idProvider.NewStreamID()
+ }
+ h.append(op)
+}
+
+func (h *AppendableHistory) append(op porcupine.Operation) {
+ if op.Return != -1 && op.Call >= op.Return {
+ panic(fmt.Sprintf("Invalid operation, call(%d) >= return(%d)", op.Call, op.Return))
+ }
+ if len(h.operations) > 0 {
+ prev := h.operations[len(h.operations)-1]
+ if op.Call <= prev.Call {
+ panic(fmt.Sprintf("Out of order append, new.call(%d) <= prev.call(%d)", op.Call, prev.Call))
+ }
+ if op.Call <= prev.Return {
+ panic(fmt.Sprintf("Overlapping operations, new.call(%d) <= prev.return(%d)", op.Call, prev.Return))
+ }
+ }
+ h.operations = append(h.operations, op)
+}
+
+func getRequest(key string) EtcdRequest {
+ return rangeRequest(key, "", 0)
+}
+
+func staleGetRequest(key string, revision int64) EtcdRequest {
+ return staleRangeRequest(key, "", 0, revision)
+}
+
+func rangeRequest(start, end string, limit int64) EtcdRequest {
+ return staleRangeRequest(start, end, limit, 0)
+}
+
+func listRequest(key string, limit int64) EtcdRequest {
+ return staleListRequest(key, limit, 0)
+}
+
+func staleListRequest(key string, limit, revision int64) EtcdRequest {
+ return staleRangeRequest(key, clientv3.GetPrefixRangeEnd(key), limit, revision)
+}
+
+func staleRangeRequest(start, end string, limit, revision int64) EtcdRequest {
+ return EtcdRequest{Type: Range, Range: &RangeRequest{RangeOptions: RangeOptions{Start: start, End: end, Limit: limit}, Revision: revision}}
+}
+
+func emptyGetResponse(revision int64) MaybeEtcdResponse {
+ return rangeResponse([]*mvccpb.KeyValue{}, 0, revision)
+}
+
+func getResponse(key, value string, modRevision, revision int64) MaybeEtcdResponse {
+ return rangeResponse([]*mvccpb.KeyValue{{Key: []byte(key), Value: []byte(value), ModRevision: modRevision}}, 1, revision)
+}
+
+func rangeResponse(kvs []*mvccpb.KeyValue, count int64, revision int64) MaybeEtcdResponse {
+ result := RangeResponse{KVs: make([]KeyValue, len(kvs)), Count: count}
+
+ for i, kv := range kvs {
+ result.KVs[i] = KeyValue{
+ Key: string(kv.Key),
+ ValueRevision: ValueRevision{
+ Value: ToValueOrHash(string(kv.Value)),
+ ModRevision: kv.ModRevision,
+ },
+ }
+ }
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{Range: &result, Revision: revision}}
+}
+
+func failedResponse(err error) MaybeEtcdResponse {
+ return MaybeEtcdResponse{Error: err.Error()}
+}
+
+func partialResponse(revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{Persisted: true, PersistedRevision: revision}
+}
+
+func putRequest(key, value string) EtcdRequest {
+ return EtcdRequest{Type: Txn, Txn: &TxnRequest{OperationsOnSuccess: []EtcdOperation{{Type: PutOperation, Put: PutOptions{Key: key, Value: ToValueOrHash(value)}}}}}
+}
+
+func putResponse(revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{Txn: &TxnResponse{Results: []EtcdOperationResult{{}}}, Revision: revision}}
+}
+
+func deleteRequest(key string) EtcdRequest {
+ return EtcdRequest{Type: Txn, Txn: &TxnRequest{OperationsOnSuccess: []EtcdOperation{{Type: DeleteOperation, Delete: DeleteOptions{Key: key}}}}}
+}
+
+func deleteResponse(deleted int64, revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{Txn: &TxnResponse{Results: []EtcdOperationResult{{Deleted: deleted}}}, Revision: revision}}
+}
+
+func compareRevisionAndPutRequest(key string, expectedRevision int64, value string) EtcdRequest {
+ return txnRequestSingleOperation(compareRevision(key, expectedRevision), putOperation(key, value), nil)
+}
+
+func compareRevisionAndPutResponse(succeeded bool, revision int64) MaybeEtcdResponse {
+ if succeeded {
+ return txnPutResponse(succeeded, revision)
+ }
+ return txnEmptyResponse(succeeded, revision)
+}
+
+func compareRevision(key string, expectedRevision int64) *EtcdCondition {
+ return &EtcdCondition{Key: key, ExpectedRevision: expectedRevision}
+}
+
+func putOperation(key, value string) *EtcdOperation {
+ return &EtcdOperation{Type: PutOperation, Put: PutOptions{Key: key, Value: ToValueOrHash(value)}}
+}
+
+func txnRequestSingleOperation(cond *EtcdCondition, onSuccess, onFailure *EtcdOperation) EtcdRequest {
+ var conds []EtcdCondition
+ if cond != nil {
+ conds = []EtcdCondition{*cond}
+ }
+ var onSuccess2 []EtcdOperation
+ if onSuccess != nil {
+ onSuccess2 = []EtcdOperation{*onSuccess}
+ }
+ var onFailure2 []EtcdOperation
+ if onFailure != nil {
+ onFailure2 = []EtcdOperation{*onFailure}
+ }
+ return txnRequest(conds, onSuccess2, onFailure2)
+}
+
+func txnRequest(conds []EtcdCondition, onSuccess, onFailure []EtcdOperation) EtcdRequest {
+ return EtcdRequest{Type: Txn, Txn: &TxnRequest{Conditions: conds, OperationsOnSuccess: onSuccess, OperationsOnFailure: onFailure}}
+}
+
+func txnPutResponse(succeeded bool, revision int64) MaybeEtcdResponse {
+ return txnResponse([]EtcdOperationResult{{}}, succeeded, revision)
+}
+
+func txnEmptyResponse(succeeded bool, revision int64) MaybeEtcdResponse {
+ return txnResponse([]EtcdOperationResult{}, succeeded, revision)
+}
+
+func txnResponse(result []EtcdOperationResult, succeeded bool, revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{Txn: &TxnResponse{Results: result, Failure: !succeeded}, Revision: revision}}
+}
+
+func putWithLeaseRequest(key, value string, leaseID int64) EtcdRequest {
+ return EtcdRequest{Type: Txn, Txn: &TxnRequest{OperationsOnSuccess: []EtcdOperation{{Type: PutOperation, Put: PutOptions{Key: key, Value: ToValueOrHash(value), LeaseID: leaseID}}}}}
+}
+
+func leaseGrantRequest(leaseID int64) EtcdRequest {
+ return EtcdRequest{Type: LeaseGrant, LeaseGrant: &LeaseGrantRequest{LeaseID: leaseID}}
+}
+
+func leaseGrantResponse(revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{LeaseGrant: &LeaseGrantReponse{}, Revision: revision}}
+}
+
+func leaseRevokeRequest(leaseID int64) EtcdRequest {
+ return EtcdRequest{Type: LeaseRevoke, LeaseRevoke: &LeaseRevokeRequest{LeaseID: leaseID}}
+}
+
+func leaseRevokeResponse(revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{LeaseRevoke: &LeaseRevokeResponse{}, Revision: revision}}
+}
+
+func defragmentRequest() EtcdRequest {
+ return EtcdRequest{Type: Defragment, Defragment: &DefragmentRequest{}}
+}
+
+func defragmentResponse(revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{Defragment: &DefragmentResponse{}, Revision: revision}}
+}
+
+func compactRequest(rev int64) EtcdRequest {
+ return EtcdRequest{Type: Compact, Compact: &CompactRequest{Revision: rev}}
+}
+
+func compactResponse(revision int64) MaybeEtcdResponse {
+ return MaybeEtcdResponse{EtcdResponse: EtcdResponse{Compact: &CompactResponse{}, Revision: revision}}
+}
+
+type History struct {
+ operations []porcupine.Operation
+}
+
+func (h History) Len() int {
+ return len(h.operations)
+}
+
+func (h History) Operations() []porcupine.Operation {
+ operations := make([]porcupine.Operation, 0, len(h.operations))
+ maxTime := h.lastObservedTime()
+ for _, op := range h.operations {
+ // Failed requests don't have a known return time.
+ if op.Return == -1 {
+ // Simulate Infinity by using last observed time.
+ op.Return = maxTime + time.Second.Nanoseconds()
+ }
+ operations = append(operations, op)
+ }
+ return operations
+}
+
+func (h History) lastObservedTime() int64 {
+ var maxTime int64
+ for _, op := range h.operations {
+ if op.Return == -1 {
+ // Collect call time from failed operations
+ if op.Call > maxTime {
+ maxTime = op.Call
+ }
+ } else {
+ // Collect return time from successful operations
+ if op.Return > maxTime {
+ maxTime = op.Return
+ }
+ }
+ }
+ return maxTime
+}
+
+func (h History) MaxRevision() int64 {
+ var maxRevision int64
+ for _, op := range h.operations {
+ revision := op.Output.(MaybeEtcdResponse).Revision
+ if revision > maxRevision {
+ maxRevision = revision
+ }
+ }
+ return maxRevision
+}
diff --git a/tests/robustness/model/non_deterministic.go b/tests/robustness/model/non_deterministic.go
new file mode 100644
index 00000000000..3167e340fd1
--- /dev/null
+++ b/tests/robustness/model/non_deterministic.go
@@ -0,0 +1,117 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+
+ "github.com/anishathalye/porcupine"
+)
+
+// NonDeterministicModel extends DeterministicModel to allow for clients with imperfect knowledge of request destiny.
+// Unknown/error response doesn't inform whether request was persisted or not, so model
+// considers both cases. This is represented as multiple equally possible deterministic states.
+// Failed requests fork the possible states, while successful requests merge and filter them.
+var NonDeterministicModel = porcupine.Model{
+ Init: func() any {
+ data, err := json.Marshal(nonDeterministicState{freshEtcdState()})
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+ },
+ Step: func(st any, in any, out any) (bool, any) {
+ var states nonDeterministicState
+ err := json.Unmarshal([]byte(st.(string)), &states)
+ if err != nil {
+ panic(err)
+ }
+ ok, states := states.apply(in.(EtcdRequest), out.(MaybeEtcdResponse))
+ data, err := json.Marshal(states)
+ if err != nil {
+ panic(err)
+ }
+ return ok, string(data)
+ },
+ DescribeOperation: func(in, out any) string {
+ return fmt.Sprintf("%s -> %s", describeEtcdRequest(in.(EtcdRequest)), describeEtcdResponse(in.(EtcdRequest), out.(MaybeEtcdResponse)))
+ },
+}
+
+type nonDeterministicState []EtcdState
+
+func (states nonDeterministicState) apply(request EtcdRequest, response MaybeEtcdResponse) (bool, nonDeterministicState) {
+ var newStates nonDeterministicState
+ switch {
+ case response.Error != "":
+ newStates = states.applyFailedRequest(request)
+ case response.Persisted && response.PersistedRevision == 0:
+ newStates = states.applyPersistedRequest(request)
+ case response.Persisted && response.PersistedRevision != 0:
+ newStates = states.applyPersistedRequestWithRevision(request, response.PersistedRevision)
+ default:
+ newStates = states.applyRequestWithResponse(request, response.EtcdResponse)
+ }
+ return len(newStates) > 0, newStates
+}
+
+// applyFailedRequest returns both the original states and states with applied request. It considers both cases, request was persisted and request was lost.
+func (states nonDeterministicState) applyFailedRequest(request EtcdRequest) nonDeterministicState {
+ newStates := make(nonDeterministicState, 0, len(states)*2)
+ for _, s := range states {
+ newStates = append(newStates, s)
+ newState, _ := s.Step(request)
+ if !reflect.DeepEqual(newState, s) {
+ newStates = append(newStates, newState)
+ }
+ }
+ return newStates
+}
+
+// applyPersistedRequest applies request to all possible states.
+func (states nonDeterministicState) applyPersistedRequest(request EtcdRequest) nonDeterministicState {
+ newStates := make(nonDeterministicState, 0, len(states))
+ for _, s := range states {
+ newState, _ := s.Step(request)
+ newStates = append(newStates, newState)
+ }
+ return newStates
+}
+
+// applyPersistedRequestWithRevision applies request to all possible states, but leaves only states that would return proper revision.
+func (states nonDeterministicState) applyPersistedRequestWithRevision(request EtcdRequest, responseRevision int64) nonDeterministicState {
+ newStates := make(nonDeterministicState, 0, len(states))
+ for _, s := range states {
+ newState, modelResponse := s.Step(request)
+ if modelResponse.Revision == responseRevision {
+ newStates = append(newStates, newState)
+ }
+ }
+ return newStates
+}
+
+// applyRequestWithResponse applies request to all possible states, but leaves only state that would return proper response.
+func (states nonDeterministicState) applyRequestWithResponse(request EtcdRequest, response EtcdResponse) nonDeterministicState {
+ newStates := make(nonDeterministicState, 0, len(states))
+ for _, s := range states {
+ newState, modelResponse := s.Step(request)
+ if Match(modelResponse, MaybeEtcdResponse{EtcdResponse: response}) {
+ newStates = append(newStates, newState)
+ }
+ }
+ return newStates
+}
diff --git a/tests/robustness/model/non_deterministic_test.go b/tests/robustness/model/non_deterministic_test.go
new file mode 100644
index 00000000000..d5c981973e5
--- /dev/null
+++ b/tests/robustness/model/non_deterministic_test.go
@@ -0,0 +1,575 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "errors"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+)
+
+func TestModelNonDeterministic(t *testing.T) {
+ nonDeterministicTestScenarios := append(commonTestScenarios, []modelTestCase{
+ {
+ name: "First Put request fails, but is persisted",
+ operations: []testOperation{
+ {req: putRequest("key1", "1"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key2", "2"), resp: putResponse(3)},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{{Key: []byte("key1"), Value: []byte("1"), ModRevision: 2}, {Key: []byte("key2"), Value: []byte("2"), ModRevision: 3}}, 2, 3)},
+ },
+ },
+ {
+ name: "First Put request fails, and is lost",
+ operations: []testOperation{
+ {req: putRequest("key1", "1"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key2", "2"), resp: putResponse(2)},
+ {req: listRequest("key", 0), resp: rangeResponse([]*mvccpb.KeyValue{{Key: []byte("key2"), Value: []byte("2"), ModRevision: 2}}, 1, 2)},
+ },
+ },
+ {
+ name: "Put can fail and be lost before get",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 2)},
+ {req: getRequest("key"), resp: getResponse("key", "2", 2, 2), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 3), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "2", 2, 3), expectFailure: true},
+ },
+ },
+ {
+ name: "Put can fail and be lost before put",
+ operations: []testOperation{
+ {req: getRequest("key"), resp: emptyGetResponse(1)},
+ {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "3"), resp: putResponse(2)},
+ },
+ },
+ {
+ name: "Put can fail and be lost before delete",
+ operations: []testOperation{
+ {req: deleteRequest("key"), resp: deleteResponse(0, 1)},
+ {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(0, 1)},
+ },
+ },
+ {
+ name: "Put can fail and be lost before txn",
+ operations: []testOperation{
+ // Txn failure
+ {req: getRequest("key"), resp: emptyGetResponse(1)},
+ {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 2, "3"), resp: compareRevisionAndPutResponse(false, 1)},
+ // Txn success
+ {req: putRequest("key", "2"), resp: putResponse(2)},
+ {req: putRequest("key", "4"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 2, "5"), resp: compareRevisionAndPutResponse(true, 3)},
+ },
+ },
+ {
+ name: "Put can fail but be persisted and increase revision before get",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: putRequest("key", "2"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "3", 3, 3), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "3", 2, 3), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "2", 2, 2), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "2", 3, 3)},
+ // Two failed request, two persisted.
+ {req: putRequest("key", "3"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "4"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "4", 5, 5)},
+ },
+ },
+ {
+ name: "Put can fail but be persisted and increase revision before delete",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: deleteRequest("key"), resp: deleteResponse(0, 1)},
+ {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 1), expectFailure: true},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 2), expectFailure: true},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 3)},
+ // Two failed request, two persisted.
+ {req: putRequest("key", "4"), resp: putResponse(4)},
+ {req: putRequest("key", "5"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "6"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 7)},
+ // Two failed request, one persisted.
+ {req: putRequest("key", "8"), resp: putResponse(8)},
+ {req: putRequest("key", "9"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "10"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 10)},
+ },
+ },
+ {
+ name: "Put can fail but be persisted before txn",
+ operations: []testOperation{
+ // Txn success
+ {req: getRequest("key"), resp: emptyGetResponse(1)},
+ {req: putRequest("key", "2"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 2, ""), resp: compareRevisionAndPutResponse(true, 2), expectFailure: true},
+ {req: compareRevisionAndPutRequest("key", 2, ""), resp: compareRevisionAndPutResponse(true, 3)},
+ // Txn failure
+ {req: putRequest("key", "4"), resp: putResponse(4)},
+ {req: compareRevisionAndPutRequest("key", 5, ""), resp: compareRevisionAndPutResponse(false, 4)},
+ {req: putRequest("key", "5"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "5", 5, 5)},
+ },
+ },
+ {
+ name: "Delete can fail and be lost before get",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 2)},
+ {req: getRequest("key"), resp: emptyGetResponse(3), expectFailure: true},
+ {req: getRequest("key"), resp: emptyGetResponse(3), expectFailure: true},
+ {req: getRequest("key"), resp: emptyGetResponse(2), expectFailure: true},
+ },
+ },
+ {
+ name: "Delete can fail and be lost before delete",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 2), expectFailure: true},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 3)},
+ },
+ },
+ {
+ name: "Delete can fail and be lost before put",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "1"), resp: putResponse(3)},
+ },
+ },
+ {
+ name: "Delete can fail but be persisted before get",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: emptyGetResponse(3)},
+ // Two failed request, one persisted.
+ {req: putRequest("key", "3"), resp: putResponse(4)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: emptyGetResponse(5)},
+ },
+ },
+ {
+ name: "Delete can fail but be persisted before put",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "3"), resp: putResponse(4)},
+ // Two failed request, one persisted.
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "5"), resp: putResponse(6)},
+ },
+ },
+ {
+ name: "Delete can fail but be persisted before delete",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(0, 3)},
+ {req: putRequest("key", "3"), resp: putResponse(4)},
+ // Two failed request, one persisted.
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(0, 5)},
+ },
+ },
+ {
+ name: "Delete can fail but be persisted before txn",
+ operations: []testOperation{
+ // Txn success
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 0, "3"), resp: compareRevisionAndPutResponse(true, 4)},
+ // Txn failure
+ {req: putRequest("key", "4"), resp: putResponse(5)},
+ {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 5, "5"), resp: compareRevisionAndPutResponse(false, 6)},
+ },
+ },
+ {
+ name: "Txn can fail and be lost before get",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "1", 2, 2)},
+ {req: getRequest("key"), resp: getResponse("key", "2", 3, 3), expectFailure: true},
+ },
+ },
+ {
+ name: "Txn can fail and be lost before delete",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 3)},
+ },
+ },
+ {
+ name: "Txn can fail and be lost before put",
+ operations: []testOperation{
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "3"), resp: putResponse(3)},
+ },
+ },
+ {
+ name: "Txn can fail but be persisted before get",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "2", 2, 2), expectFailure: true},
+ {req: getRequest("key"), resp: getResponse("key", "2", 3, 3)},
+ // Two failed request, two persisted.
+ {req: putRequest("key", "3"), resp: putResponse(4)},
+ {req: compareRevisionAndPutRequest("key", 4, "4"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 5, "5"), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "5", 6, 6)},
+ },
+ },
+ {
+ name: "Txn can fail but be persisted before put",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "3"), resp: putResponse(4)},
+ // Two failed request, two persisted.
+ {req: putRequest("key", "4"), resp: putResponse(5)},
+ {req: compareRevisionAndPutRequest("key", 5, "5"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 6, "6"), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "7"), resp: putResponse(8)},
+ },
+ },
+ {
+ name: "Txn can fail but be persisted before delete",
+ operations: []testOperation{
+ // One failed request, one persisted.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 4)},
+ // Two failed request, two persisted.
+ {req: putRequest("key", "4"), resp: putResponse(5)},
+ {req: compareRevisionAndPutRequest("key", 5, "5"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 6, "6"), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 8)},
+ },
+ },
+ {
+ name: "Txn can fail but be persisted before txn",
+ operations: []testOperation{
+ // One failed request, one persisted with success.
+ {req: putRequest("key", "1"), resp: putResponse(2)},
+ {req: compareRevisionAndPutRequest("key", 2, "2"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 3, "3"), resp: compareRevisionAndPutResponse(true, 4)},
+ // Two failed request, two persisted with success.
+ {req: putRequest("key", "4"), resp: putResponse(5)},
+ {req: compareRevisionAndPutRequest("key", 5, "5"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 6, "6"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 7, "7"), resp: compareRevisionAndPutResponse(true, 8)},
+ // One failed request, one persisted with failure.
+ {req: putRequest("key", "8"), resp: putResponse(9)},
+ {req: compareRevisionAndPutRequest("key", 9, "9"), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 9, "10"), resp: compareRevisionAndPutResponse(false, 10)},
+ },
+ },
+ {
+ name: "Defragment failures between all other request types",
+ operations: []testOperation{
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)},
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ {req: putWithLeaseRequest("key", "1", 1), resp: putResponse(2)},
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)},
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ {req: putRequest("key", "4"), resp: putResponse(4)},
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ {req: getRequest("key"), resp: getResponse("key", "4", 4, 4)},
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ {req: compareRevisionAndPutRequest("key", 4, "5"), resp: compareRevisionAndPutResponse(true, 5)},
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ {req: deleteRequest("key"), resp: deleteResponse(1, 6)},
+ {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))},
+ },
+ },
+ }...)
+ for _, tc := range nonDeterministicTestScenarios {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ state := NonDeterministicModel.Init()
+ for _, op := range tc.operations {
+ ok, newState := NonDeterministicModel.Step(state, op.req, op.resp)
+ if ok != !op.expectFailure {
+ t.Logf("state: %v", state)
+ t.Errorf("Unexpected operation result, expect: %v, got: %v, operation: %s", !op.expectFailure, ok, NonDeterministicModel.DescribeOperation(op.req, op.resp))
+ var loadedState nonDeterministicState
+ err := json.Unmarshal([]byte(state.(string)), &loadedState)
+ require.NoErrorf(t, err, "Failed to load state")
+ for i, s := range loadedState {
+ _, resp := s.Step(op.req)
+ t.Errorf("For state %d, response diff: %s", i, cmp.Diff(op.resp, resp))
+ }
+ break
+ }
+ if ok {
+ state = newState
+ t.Logf("state: %v", state)
+ }
+ }
+ })
+ }
+}
+
+func TestModelResponseMatch(t *testing.T) {
+ tcs := []struct {
+ resp1 MaybeEtcdResponse
+ resp2 MaybeEtcdResponse
+ expectMatch bool
+ }{
+ {
+ resp1: getResponse("key", "a", 1, 1),
+ resp2: getResponse("key", "a", 1, 1),
+ expectMatch: true,
+ },
+ {
+ resp1: getResponse("key", "a", 1, 1),
+ resp2: getResponse("key", "b", 1, 1),
+ expectMatch: false,
+ },
+ {
+ resp1: getResponse("key", "a", 1, 1),
+ resp2: getResponse("key", "a", 2, 1),
+ expectMatch: false,
+ },
+ {
+ resp1: getResponse("key", "a", 1, 1),
+ resp2: getResponse("key", "a", 1, 2),
+ expectMatch: false,
+ },
+ {
+ resp1: getResponse("key", "a", 1, 1),
+ resp2: failedResponse(errors.New("failed request")),
+ expectMatch: false,
+ },
+ {
+ resp1: getResponse("key", "a", 1, 1),
+ resp2: partialResponse(1),
+ expectMatch: true,
+ },
+ {
+ resp1: getResponse("key", "a", 1, 1),
+ resp2: partialResponse(2),
+ expectMatch: false,
+ },
+ {
+ resp1: putResponse(3),
+ resp2: putResponse(3),
+ expectMatch: true,
+ },
+ {
+ resp1: putResponse(3),
+ resp2: putResponse(4),
+ expectMatch: false,
+ },
+ {
+ resp1: putResponse(3),
+ resp2: failedResponse(errors.New("failed request")),
+ expectMatch: false,
+ },
+ {
+ resp1: putResponse(3),
+ resp2: partialResponse(3),
+ expectMatch: true,
+ },
+ {
+ resp1: putResponse(3),
+ resp2: partialResponse(1),
+ expectMatch: false,
+ },
+ {
+ resp1: putResponse(3),
+ resp2: partialResponse(0),
+ expectMatch: true,
+ },
+ {
+ resp1: deleteResponse(1, 5),
+ resp2: deleteResponse(1, 5),
+ expectMatch: true,
+ },
+ {
+ resp1: deleteResponse(1, 5),
+ resp2: deleteResponse(0, 5),
+ expectMatch: false,
+ },
+ {
+ resp1: deleteResponse(1, 5),
+ resp2: deleteResponse(1, 6),
+ expectMatch: false,
+ },
+ {
+ resp1: deleteResponse(1, 5),
+ resp2: failedResponse(errors.New("failed request")),
+ expectMatch: false,
+ },
+ {
+ resp1: deleteResponse(1, 5),
+ resp2: partialResponse(5),
+ expectMatch: true,
+ },
+ {
+ resp1: deleteResponse(0, 5),
+ resp2: partialResponse(4),
+ expectMatch: false,
+ },
+ {
+ resp1: deleteResponse(0, 5),
+ resp2: partialResponse(0),
+ expectMatch: true,
+ },
+ {
+ resp1: deleteResponse(1, 5),
+ resp2: partialResponse(0),
+ expectMatch: true,
+ },
+ {
+ resp1: deleteResponse(0, 5),
+ resp2: partialResponse(2),
+ expectMatch: false,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(false, 7),
+ resp2: compareRevisionAndPutResponse(false, 7),
+ expectMatch: true,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(true, 7),
+ resp2: compareRevisionAndPutResponse(false, 7),
+ expectMatch: false,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(false, 7),
+ resp2: compareRevisionAndPutResponse(false, 8),
+ expectMatch: false,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(false, 7),
+ resp2: failedResponse(errors.New("failed request")),
+ expectMatch: false,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(true, 7),
+ resp2: partialResponse(7),
+ expectMatch: true,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(false, 7),
+ resp2: partialResponse(7),
+ expectMatch: true,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(true, 7),
+ resp2: partialResponse(4),
+ expectMatch: false,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(false, 7),
+ resp2: partialResponse(3),
+ expectMatch: false,
+ },
+ {
+ resp1: compareRevisionAndPutResponse(false, 7),
+ resp2: partialResponse(0),
+ expectMatch: true,
+ },
+ {
+ resp1: MaybeEtcdResponse{EtcdResponse: EtcdResponse{Revision: 1, Txn: &TxnResponse{Failure: false, Results: []EtcdOperationResult{{Deleted: 1}}}}},
+ resp2: failedResponse(errors.New("failed request")),
+ expectMatch: false,
+ },
+ {
+ resp1: failedResponse(errors.New("failed request 1")),
+ resp2: failedResponse(errors.New("failed request 2")),
+ expectMatch: false,
+ },
+ {
+ resp1: failedResponse(errors.New("failed request")),
+ resp2: failedResponse(errors.New("failed request")),
+ expectMatch: true,
+ },
+ {
+ resp1: putResponse(2),
+ resp2: MaybeEtcdResponse{Persisted: true},
+ expectMatch: true,
+ },
+ {
+ resp1: putResponse(2),
+ resp2: MaybeEtcdResponse{Persisted: true, PersistedRevision: 2},
+ expectMatch: true,
+ },
+ {
+ resp1: putResponse(2),
+ resp2: MaybeEtcdResponse{Persisted: true, PersistedRevision: 3},
+ expectMatch: false,
+ },
+ {
+ resp1: failedResponse(errors.New("failed request")),
+ resp2: MaybeEtcdResponse{Persisted: true},
+ expectMatch: true,
+ },
+ {
+ resp1: failedResponse(errors.New("failed request")),
+ resp2: MaybeEtcdResponse{Persisted: true, PersistedRevision: 2},
+ expectMatch: true,
+ },
+ {
+ resp1: MaybeEtcdResponse{Persisted: true},
+ resp2: MaybeEtcdResponse{Persisted: true, PersistedRevision: 2},
+ expectMatch: true,
+ },
+ {
+ resp1: MaybeEtcdResponse{Persisted: true, PersistedRevision: 2},
+ resp2: MaybeEtcdResponse{Persisted: true, PersistedRevision: 2},
+ expectMatch: true,
+ },
+ {
+ resp1: MaybeEtcdResponse{Persisted: true, PersistedRevision: 1},
+ resp2: MaybeEtcdResponse{Persisted: true, PersistedRevision: 2},
+ expectMatch: false,
+ },
+ }
+ for i, tc := range tcs {
+ assert.Equalf(t, tc.expectMatch, Match(tc.resp1, tc.resp2), "%d %+v %+v", i, tc.resp1, tc.resp2)
+ }
+}
diff --git a/tests/robustness/model/replay.go b/tests/robustness/model/replay.go
new file mode 100644
index 00000000000..7f2be26e8ed
--- /dev/null
+++ b/tests/robustness/model/replay.go
@@ -0,0 +1,166 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+func NewReplay(persistedRequests []EtcdRequest) *EtcdReplay {
+ state := freshEtcdState()
+ // Padding for index 0 and 1, so index matches revision..
+ revisionToEtcdState := []EtcdState{state, state}
+ var events []PersistedEvent
+ for _, request := range persistedRequests {
+ newState, response := state.Step(request)
+ if state.Revision != newState.Revision {
+ revisionToEtcdState = append(revisionToEtcdState, newState)
+ }
+ events = append(events, toWatchEvents(&state, request, response)...)
+ state = newState
+ }
+ return &EtcdReplay{
+ revisionToEtcdState: revisionToEtcdState,
+ Events: events,
+ }
+}
+
+type EtcdReplay struct {
+ revisionToEtcdState []EtcdState
+ Events []PersistedEvent
+}
+
+func (r *EtcdReplay) StateForRevision(revision int64) (EtcdState, error) {
+ if int(revision) >= len(r.revisionToEtcdState) {
+ return EtcdState{}, fmt.Errorf("requested revision %d, higher than observed in replay %d", revision, len(r.revisionToEtcdState)-1)
+ }
+ return r.revisionToEtcdState[revision], nil
+}
+
+func (r *EtcdReplay) EventsForWatch(watch WatchRequest) (events []PersistedEvent) {
+ for _, e := range r.Events {
+ if e.Revision < watch.Revision || !e.Match(watch) {
+ continue
+ }
+ events = append(events, e)
+ }
+ return events
+}
+
+func toWatchEvents(prevState *EtcdState, request EtcdRequest, response MaybeEtcdResponse) (events []PersistedEvent) {
+ if response.Error != "" {
+ return events
+ }
+
+ switch request.Type {
+ case Txn:
+ var ops []EtcdOperation
+ if response.Txn.Failure {
+ ops = request.Txn.OperationsOnFailure
+ } else {
+ ops = request.Txn.OperationsOnSuccess
+ }
+ for _, op := range ops {
+ switch op.Type {
+ case RangeOperation:
+ case DeleteOperation:
+ e := PersistedEvent{
+ Event: Event{
+ Type: op.Type,
+ Key: op.Delete.Key,
+ },
+ Revision: response.Revision,
+ }
+ if _, ok := prevState.KeyValues[op.Delete.Key]; ok {
+ events = append(events, e)
+ }
+ case PutOperation:
+ _, leaseExists := prevState.Leases[op.Put.LeaseID]
+ if op.Put.LeaseID != 0 && !leaseExists {
+ break
+ }
+
+ e := PersistedEvent{
+ Event: Event{
+ Type: op.Type,
+ Key: op.Put.Key,
+ Value: op.Put.Value,
+ },
+ Revision: response.Revision,
+ }
+ if _, ok := prevState.KeyValues[op.Put.Key]; !ok {
+ e.IsCreate = true
+ }
+ events = append(events, e)
+ default:
+ panic(fmt.Sprintf("unsupported operation type: %v", op))
+ }
+ }
+ case LeaseRevoke:
+ deletedKeys := []string{}
+ for key := range prevState.Leases[request.LeaseRevoke.LeaseID].Keys {
+ if _, ok := prevState.KeyValues[key]; ok {
+ deletedKeys = append(deletedKeys, key)
+ }
+ }
+
+ sort.Strings(deletedKeys)
+ for _, key := range deletedKeys {
+ e := PersistedEvent{
+ Event: Event{
+ Type: DeleteOperation,
+ Key: key,
+ },
+ Revision: response.Revision,
+ }
+ events = append(events, e)
+ }
+ }
+ return events
+}
+
+type WatchEvent struct {
+ PersistedEvent
+ PrevValue *ValueRevision
+}
+
+type PersistedEvent struct {
+ Event
+ Revision int64
+ IsCreate bool
+}
+
+type Event struct {
+ Type OperationType
+ Key string
+ Value ValueOrHash
+}
+
+func (e Event) Match(request WatchRequest) bool {
+ if request.WithPrefix {
+ return strings.HasPrefix(e.Key, request.Key)
+ }
+ return e.Key == request.Key
+}
+
+type WatchRequest struct {
+ Key string
+ Revision int64
+ WithPrefix bool
+ WithProgressNotify bool
+ WithPrevKV bool
+}
diff --git a/tests/robustness/model/watch.go b/tests/robustness/model/watch.go
new file mode 100644
index 00000000000..fc880e30ede
--- /dev/null
+++ b/tests/robustness/model/watch.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import "time"
+
+type WatchOperation struct {
+ Request WatchRequest
+ Responses []WatchResponse
+}
+
+type WatchResponse struct {
+ Events []WatchEvent
+ IsProgressNotify bool
+ Revision int64
+ Time time.Duration
+ Error string
+}
diff --git a/tests/robustness/options/cluster_options.go b/tests/robustness/options/cluster_options.go
new file mode 100644
index 00000000000..01031e21846
--- /dev/null
+++ b/tests/robustness/options/cluster_options.go
@@ -0,0 +1,50 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "math/rand"
+ "time"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+var internalRand = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+type ClusterOptions []e2e.EPClusterOption
+
+// WithClusterOptionGroups takes an array of EPClusterOption arrays, and randomly picks one EPClusterOption array when constructing the config.
+// This function is mainly used to group strongly coupled config options together, so that we can dynamically test different groups of options.
+func WithClusterOptionGroups(input ...ClusterOptions) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ optsPicked := input[internalRand.Intn(len(input))]
+ for _, opt := range optsPicked {
+ opt(c)
+ }
+ }
+}
+
+// WithSubsetOptions randomly select a subset of input options, and apply the subset to the cluster config.
+func WithSubsetOptions(input ...e2e.EPClusterOption) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ // selects random subsetLen (0 to len(input)) elements from the input array.
+ subsetLen := internalRand.Intn(len(input) + 1)
+ perm := internalRand.Perm(len(input))
+ for i := 0; i < subsetLen; i++ {
+ opt := input[perm[i]]
+ opt(c)
+ }
+ }
+}
diff --git a/tests/robustness/options/cluster_options_test.go b/tests/robustness/options/cluster_options_test.go
new file mode 100644
index 00000000000..290ffd6e3bf
--- /dev/null
+++ b/tests/robustness/options/cluster_options_test.go
@@ -0,0 +1,110 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "math/rand"
+ "testing"
+
+ "go.etcd.io/etcd/server/v3/embed"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func mockRand(source rand.Source) func() {
+ tmp := internalRand
+ internalRand = rand.New(source)
+ return func() {
+ internalRand = tmp
+ }
+}
+
+func TestWithClusterOptionGroups(t *testing.T) {
+ restore := mockRand(rand.NewSource(1))
+ defer restore()
+ tickOptions1 := ClusterOptions{WithTickMs(101), WithElectionMs(1001)}
+ tickOptions2 := ClusterOptions{WithTickMs(202), WithElectionMs(2002)}
+ tickOptions3 := ClusterOptions{WithTickMs(303), WithElectionMs(3003)}
+ opts := ClusterOptions{
+ WithSnapshotCount(100, 150, 200),
+ WithClusterOptionGroups(tickOptions1, tickOptions2, tickOptions3),
+ WithSnapshotCatchUpEntries(100),
+ }
+
+ expectedServerConfigs := []embed.Config{
+ {SnapshotCount: 200, SnapshotCatchUpEntries: 100, TickMs: 101, ElectionMs: 1001},
+ {SnapshotCount: 100, SnapshotCatchUpEntries: 100, TickMs: 202, ElectionMs: 2002},
+ {SnapshotCount: 200, SnapshotCatchUpEntries: 100, TickMs: 202, ElectionMs: 2002},
+ {SnapshotCount: 200, SnapshotCatchUpEntries: 100, TickMs: 101, ElectionMs: 1001},
+ {SnapshotCount: 200, SnapshotCatchUpEntries: 100, TickMs: 101, ElectionMs: 1001},
+ {SnapshotCount: 150, SnapshotCatchUpEntries: 100, TickMs: 202, ElectionMs: 2002},
+ }
+ for i, tt := range expectedServerConfigs {
+ cluster := *e2e.NewConfig(opts...)
+ if cluster.ServerConfig.SnapshotCount != tt.SnapshotCount {
+ t.Errorf("Test case %d: SnapshotCount = %v, want %v\n", i, cluster.ServerConfig.SnapshotCount, tt.SnapshotCount)
+ }
+ if cluster.ServerConfig.SnapshotCatchUpEntries != tt.SnapshotCatchUpEntries {
+ t.Errorf("Test case %d: SnapshotCatchUpEntries = %v, want %v\n", i, cluster.ServerConfig.SnapshotCatchUpEntries, tt.SnapshotCatchUpEntries)
+ }
+ if cluster.ServerConfig.TickMs != tt.TickMs {
+ t.Errorf("Test case %d: TickMs = %v, want %v\n", i, cluster.ServerConfig.TickMs, tt.TickMs)
+ }
+ if cluster.ServerConfig.ElectionMs != tt.ElectionMs {
+ t.Errorf("Test case %d: ElectionMs = %v, want %v\n", i, cluster.ServerConfig.ElectionMs, tt.ElectionMs)
+ }
+ }
+}
+
+func TestWithOptionsSubset(t *testing.T) {
+ restore := mockRand(rand.NewSource(1))
+ defer restore()
+ tickOptions := ClusterOptions{WithTickMs(50), WithElectionMs(500)}
+ opts := ClusterOptions{
+ WithSnapshotCatchUpEntries(100),
+ WithSubsetOptions(WithSnapshotCount(100, 150, 200), WithClusterOptionGroups(tickOptions)),
+ }
+
+ expectedServerConfigs := []embed.Config{
+ {SnapshotCount: 10000, SnapshotCatchUpEntries: 100, TickMs: 100, ElectionMs: 1000},
+ {SnapshotCount: 10000, SnapshotCatchUpEntries: 100, TickMs: 100, ElectionMs: 1000},
+ {SnapshotCount: 10000, SnapshotCatchUpEntries: 100, TickMs: 100, ElectionMs: 1000},
+ // both SnapshotCount and TickMs&ElectionMs are not default values.
+ {SnapshotCount: 200, SnapshotCatchUpEntries: 100, TickMs: 50, ElectionMs: 500},
+ {SnapshotCount: 10000, SnapshotCatchUpEntries: 100, TickMs: 100, ElectionMs: 1000},
+ // only TickMs&ElectionMs are not default values.
+ {SnapshotCount: 10000, SnapshotCatchUpEntries: 100, TickMs: 50, ElectionMs: 500},
+ // both SnapshotCount and TickMs&ElectionMs are not default values.
+ {SnapshotCount: 200, SnapshotCatchUpEntries: 100, TickMs: 50, ElectionMs: 500},
+ // both SnapshotCount and TickMs&ElectionMs are not default values.
+ {SnapshotCount: 10000, SnapshotCatchUpEntries: 100, TickMs: 50, ElectionMs: 500},
+ // only SnapshotCount is not default value.
+ {SnapshotCount: 100, SnapshotCatchUpEntries: 100, TickMs: 100, ElectionMs: 1000},
+ }
+ for i, tt := range expectedServerConfigs {
+ cluster := *e2e.NewConfig(opts...)
+ if cluster.ServerConfig.SnapshotCount != tt.SnapshotCount {
+ t.Errorf("Test case %d: SnapshotCount = %v, want %v\n", i, cluster.ServerConfig.SnapshotCount, tt.SnapshotCount)
+ }
+ if cluster.ServerConfig.SnapshotCatchUpEntries != tt.SnapshotCatchUpEntries {
+ t.Errorf("Test case %d: SnapshotCatchUpEntries = %v, want %v\n", i, cluster.ServerConfig.SnapshotCatchUpEntries, tt.SnapshotCatchUpEntries)
+ }
+ if cluster.ServerConfig.TickMs != tt.TickMs {
+ t.Errorf("Test case %d: TickMs = %v, want %v\n", i, cluster.ServerConfig.TickMs, tt.TickMs)
+ }
+ if cluster.ServerConfig.ElectionMs != tt.ElectionMs {
+ t.Errorf("Test case %d: ElectionMs = %v, want %v\n", i, cluster.ServerConfig.ElectionMs, tt.ElectionMs)
+ }
+ }
+}
diff --git a/tests/robustness/options/server_config_options.go b/tests/robustness/options/server_config_options.go
new file mode 100644
index 00000000000..a0502a1f7dc
--- /dev/null
+++ b/tests/robustness/options/server_config_options.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "time"
+
+ e2e "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+func WithSnapshotCount(input ...uint64) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ c.ServerConfig.SnapshotCount = input[internalRand.Intn(len(input))]
+ }
+}
+
+func WithCompactionBatchLimit(input ...int) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ c.ServerConfig.ExperimentalCompactionBatchLimit = input[internalRand.Intn(len(input))]
+ }
+}
+
+func WithSnapshotCatchUpEntries(input ...uint64) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ c.ServerConfig.SnapshotCatchUpEntries = input[internalRand.Intn(len(input))]
+ }
+}
+
+func WithTickMs(input ...uint) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ c.ServerConfig.TickMs = input[internalRand.Intn(len(input))]
+ }
+}
+
+func WithElectionMs(input ...uint) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ c.ServerConfig.ElectionMs = input[internalRand.Intn(len(input))]
+ }
+}
+
+func WithExperimentalWatchProgressNotifyInterval(input ...time.Duration) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) {
+ c.ServerConfig.ExperimentalWatchProgressNotifyInterval = input[internalRand.Intn(len(input))]
+ }
+}
+
+func WithVersion(input ...e2e.ClusterVersion) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) { c.Version = input[internalRand.Intn(len(input))] }
+}
+
+func WithInitialLeaderIndex(input ...int) e2e.EPClusterOption {
+ return func(c *e2e.EtcdProcessClusterConfig) { c.InitialLeaderIndex = input[internalRand.Intn(len(input))] }
+}
diff --git a/tests/robustness/patches/beforeSendWatchResponse/build.patch b/tests/robustness/patches/beforeSendWatchResponse/build.patch
new file mode 100644
index 00000000000..af93a7e5a90
--- /dev/null
+++ b/tests/robustness/patches/beforeSendWatchResponse/build.patch
@@ -0,0 +1,9 @@
+@@ -25,7 +26,7 @@ GOFAIL_VERSION=$(cd tools/mod && go list -m -f {{.Version}} go.etcd.io/gofail)
+ toggle_failpoints() {
+ mode="$1"
+ if command -v gofail >/dev/null 2>&1; then
+- run gofail "$mode" server/etcdserver/ server/mvcc/ server/wal/ server/mvcc/backend/
++ run gofail "$mode" server/etcdserver/ server/mvcc/ server/wal/ server/mvcc/backend/ server/etcdserver/api/v3rpc/
+ if [[ "$mode" == "enable" ]]; then
+ go get go.etcd.io/gofail@${GOFAIL_VERSION}
+ cd ./server && go get go.etcd.io/gofail@${GOFAIL_VERSION}
diff --git a/tests/robustness/patches/beforeSendWatchResponse/watch.patch b/tests/robustness/patches/beforeSendWatchResponse/watch.patch
new file mode 100644
index 00000000000..4ae45f06b40
--- /dev/null
+++ b/tests/robustness/patches/beforeSendWatchResponse/watch.patch
@@ -0,0 +1,12 @@
+diff --git a/server/etcdserver/api/v3rpc/watch.go b/server/etcdserver/api/v3rpc/watch.go
+index cd834aa..e6aaf2b 100644
+--- a/server/etcdserver/api/v3rpc/watch.go
++++ b/server/etcdserver/api/v3rpc/watch.go
+@@ -460,6 +460,7 @@ func (sws *serverWatchStream) sendLoop() {
+ sws.mu.RUnlock()
+
+ var serr error
++ // gofail: var beforeSendWatchResponse struct{}
+ if !fragmented && !ok {
+ serr = sws.gRPCStream.Send(wr)
+ } else {
diff --git a/tests/robustness/patches/compactBeforeSetFinishedCompact/kvstore_compaction.patch b/tests/robustness/patches/compactBeforeSetFinishedCompact/kvstore_compaction.patch
new file mode 100644
index 00000000000..b04978b7c3e
--- /dev/null
+++ b/tests/robustness/patches/compactBeforeSetFinishedCompact/kvstore_compaction.patch
@@ -0,0 +1,26 @@
+From 6b034466aa0ac2b46fe01fb5bd2233946f46d453 Mon Sep 17 00:00:00 2001
+From: Wei Fu
+Date: Wed, 24 Apr 2024 12:14:27 +0800
+Subject: [PATCH] server/mvcc: introduce compactBeforeSetFinishedCompact
+ failpoint
+
+Signed-off-by: Wei Fu
+---
+ server/mvcc/kvstore_compaction.go | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/server/mvcc/kvstore_compaction.go b/server/mvcc/kvstore_compaction.go
+index c7d343d5c..89defbd9e 100644
+--- a/server/mvcc/kvstore_compaction.go
++++ b/server/mvcc/kvstore_compaction.go
+@@ -59,6 +59,7 @@ func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyVal
+ }
+
+ if len(keys) < s.cfg.CompactionBatchLimit {
++ // gofail: var compactBeforeSetFinishedCompact struct{}
+ rbytes := make([]byte, 8+1+8)
+ revToBytes(revision{main: compactMainRev}, rbytes)
+ tx.UnsafePut(buckets.Meta, finishedCompactKeyName, rbytes)
+--
+2.34.1
+
diff --git a/tests/robustness/random/random.go b/tests/robustness/random/random.go
new file mode 100644
index 00000000000..7fa5f03ddfa
--- /dev/null
+++ b/tests/robustness/random/random.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package random
+
+import (
+ "math/rand"
+ "strings"
+)
+
+func RandString(size int) string {
+ data := strings.Builder{}
+ data.Grow(size)
+ for i := 0; i < size; i++ {
+ data.WriteByte(byte(int('a') + rand.Intn(26)))
+ }
+ return data.String()
+}
+
+func RandRange(start, end int64) int64 {
+ return rand.Int63n(end-start) + start
+}
+
+type ChoiceWeight[T any] struct {
+ Choice T
+ Weight int
+}
+
+func PickRandom[T any](choices []ChoiceWeight[T]) T {
+ sum := 0
+ for _, op := range choices {
+ sum += op.Weight
+ }
+ roll := rand.Int() % sum
+ for _, op := range choices {
+ if roll < op.Weight {
+ return op.Choice
+ }
+ roll -= op.Weight
+ }
+ panic("unexpected")
+}
diff --git a/tests/robustness/readme-images/artifact_archive.png b/tests/robustness/readme-images/artifact_archive.png
new file mode 100644
index 00000000000..1947d373443
Binary files /dev/null and b/tests/robustness/readme-images/artifact_archive.png differ
diff --git a/tests/robustness/readme-images/issue14370.png b/tests/robustness/readme-images/issue14370.png
new file mode 100644
index 00000000000..2b418c486af
Binary files /dev/null and b/tests/robustness/readme-images/issue14370.png differ
diff --git a/tests/robustness/readme-images/prow_job.png b/tests/robustness/readme-images/prow_job.png
new file mode 100644
index 00000000000..367c0bb13a9
Binary files /dev/null and b/tests/robustness/readme-images/prow_job.png differ
diff --git a/tests/robustness/readme-images/prow_job_artifacts_dir_page.png b/tests/robustness/readme-images/prow_job_artifacts_dir_page.png
new file mode 100644
index 00000000000..4488e476bf9
Binary files /dev/null and b/tests/robustness/readme-images/prow_job_artifacts_dir_page.png differ
diff --git a/tests/robustness/readme-images/prow_job_artifacts_page.png b/tests/robustness/readme-images/prow_job_artifacts_page.png
new file mode 100644
index 00000000000..3987e66ec5e
Binary files /dev/null and b/tests/robustness/readme-images/prow_job_artifacts_page.png differ
diff --git a/tests/robustness/report/client.go b/tests/robustness/report/client.go
new file mode 100644
index 00000000000..48d29b8ae81
--- /dev/null
+++ b/tests/robustness/report/client.go
@@ -0,0 +1,204 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package report
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/anishathalye/porcupine"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+)
+
+type ClientReport struct {
+ ClientID int
+ KeyValue []porcupine.Operation
+ Watch []model.WatchOperation
+}
+
+func (r ClientReport) WatchEventCount() int {
+ count := 0
+ for _, op := range r.Watch {
+ for _, resp := range op.Responses {
+ count += len(resp.Events)
+ }
+ }
+ return count
+}
+
+func persistClientReports(t *testing.T, lg *zap.Logger, path string, reports []ClientReport) {
+ sort.Slice(reports, func(i, j int) bool {
+ return reports[i].ClientID < reports[j].ClientID
+ })
+ for _, r := range reports {
+ clientDir := filepath.Join(path, fmt.Sprintf("client-%d", r.ClientID))
+ err := os.MkdirAll(clientDir, 0o700)
+ require.NoError(t, err)
+ if len(r.Watch) != 0 {
+ persistWatchOperations(t, lg, filepath.Join(clientDir, "watch.json"), r.Watch)
+ } else {
+ lg.Info("no watch operations for client, skip persisting", zap.Int("client-id", r.ClientID))
+ }
+ if len(r.KeyValue) != 0 {
+ persistKeyValueOperations(t, lg, filepath.Join(clientDir, "operations.json"), r.KeyValue)
+ } else {
+ lg.Info("no KV operations for client, skip persisting", zap.Int("client-id", r.ClientID))
+ }
+ }
+}
+
+func LoadClientReports(path string) ([]ClientReport, error) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ reports := []ClientReport{}
+ for _, file := range files {
+ if file.IsDir() && strings.HasPrefix(file.Name(), "client-") {
+ idString := strings.Replace(file.Name(), "client-", "", 1)
+ id, err := strconv.Atoi(idString)
+ if err != nil {
+ return nil, fmt.Errorf("failed to extract clientID from directory: %q", file.Name())
+ }
+ r, err := loadClientReport(filepath.Join(path, file.Name()))
+ if err != nil {
+ return nil, err
+ }
+ r.ClientID = id
+ reports = append(reports, r)
+ }
+ }
+ sort.Slice(reports, func(i, j int) bool {
+ return reports[i].ClientID < reports[j].ClientID
+ })
+ return reports, nil
+}
+
+func loadClientReport(path string) (report ClientReport, err error) {
+ report.Watch, err = loadWatchOperations(filepath.Join(path, "watch.json"))
+ if err != nil {
+ return report, err
+ }
+ report.KeyValue, err = loadKeyValueOperations(filepath.Join(path, "operations.json"))
+ if err != nil {
+ return report, err
+ }
+ return report, nil
+}
+
+func loadWatchOperations(path string) (operations []model.WatchOperation, err error) {
+ _, err = os.Stat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("failed to open watch operation file: %q, err: %w", path, err)
+ }
+ file, err := os.OpenFile(path, os.O_RDONLY, 0o755)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open watch operation file: %q, err: %w", path, err)
+ }
+ defer file.Close()
+ decoder := json.NewDecoder(file)
+ for decoder.More() {
+ var watch model.WatchOperation
+ err = decoder.Decode(&watch)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode watch operation, err: %w", err)
+ }
+ operations = append(operations, watch)
+ }
+ return operations, nil
+}
+
+func loadKeyValueOperations(path string) (operations []porcupine.Operation, err error) {
+ _, err = os.Stat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("failed to open watch operation file: %q, err: %w", path, err)
+ }
+ file, err := os.OpenFile(path, os.O_RDONLY, 0o755)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open watch operation file: %q, err: %w", path, err)
+ }
+ defer file.Close()
+ decoder := json.NewDecoder(file)
+ for decoder.More() {
+ var operation struct {
+ ClientID int
+ Input model.EtcdRequest
+ Call int64
+ Output model.MaybeEtcdResponse
+ Return int64
+ }
+ err = decoder.Decode(&operation)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode watch operation, err: %w", err)
+ }
+ operations = append(operations, porcupine.Operation{
+ ClientId: operation.ClientID,
+ Input: operation.Input,
+ Call: operation.Call,
+ Output: operation.Output,
+ Return: operation.Return,
+ })
+ }
+ return operations, nil
+}
+
+func persistWatchOperations(t *testing.T, lg *zap.Logger, path string, responses []model.WatchOperation) {
+ lg.Info("Saving watch operations", zap.String("path", path))
+ file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755)
+ if err != nil {
+ t.Errorf("Failed to save watch operations: %v", err)
+ return
+ }
+ defer file.Close()
+ encoder := json.NewEncoder(file)
+ for _, resp := range responses {
+ err := encoder.Encode(resp)
+ if err != nil {
+ t.Errorf("Failed to encode operation: %v", err)
+ }
+ }
+}
+
+func persistKeyValueOperations(t *testing.T, lg *zap.Logger, path string, operations []porcupine.Operation) {
+ lg.Info("Saving operation history", zap.String("path", path))
+ file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755)
+ if err != nil {
+ t.Errorf("Failed to save operation history: %v", err)
+ return
+ }
+ defer file.Close()
+ encoder := json.NewEncoder(file)
+ for _, op := range operations {
+ err := encoder.Encode(op)
+ if err != nil {
+ t.Errorf("Failed to encode operation: %v", err)
+ }
+ }
+}
diff --git a/tests/robustness/report/client_test.go b/tests/robustness/report/client_test.go
new file mode 100644
index 00000000000..07da049c74e
--- /dev/null
+++ b/tests/robustness/report/client_test.go
@@ -0,0 +1,142 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package report
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+)
+
+func TestPersistLoadClientReports(t *testing.T) {
+ h := model.NewAppendableHistory(identity.NewIDProvider())
+ baseTime := time.Now()
+
+ start := time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop := time.Since(baseTime)
+ h.AppendRange("key", "", 0, 0, start, stop, &clientv3.GetResponse{Header: &etcdserverpb.ResponseHeader{Revision: 2}, Count: 2, Kvs: []*mvccpb.KeyValue{{
+ Key: []byte("key"),
+ ModRevision: 2,
+ Value: []byte("value"),
+ }}}, nil)
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendPut("key1", "1", start, stop, &clientv3.PutResponse{Header: &etcdserverpb.ResponseHeader{Revision: 2}}, nil)
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendPut("key", "value", start, stop, nil, errors.New("failed"))
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendPutWithLease("key1", "1", 1, start, stop, &clientv3.PutResponse{Header: &etcdserverpb.ResponseHeader{Revision: 2}}, nil)
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendLeaseGrant(start, stop, &clientv3.LeaseGrantResponse{ID: 1, ResponseHeader: &etcdserverpb.ResponseHeader{Revision: 2}}, nil)
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendLeaseRevoke(1, start, stop, &clientv3.LeaseRevokeResponse{Header: &etcdserverpb.ResponseHeader{Revision: 2}}, nil)
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendDelete("key", start, stop, &clientv3.DeleteResponse{Deleted: 1, Header: &etcdserverpb.ResponseHeader{Revision: 3}}, nil)
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendTxn([]clientv3.Cmp{clientv3.Compare(clientv3.ModRevision("key"), "=", 2)}, []clientv3.Op{clientv3.OpPut("key", "value")}, []clientv3.Op{clientv3.OpDelete("key")}, start, stop, &clientv3.TxnResponse{Header: &etcdserverpb.ResponseHeader{Revision: 2}}, nil)
+
+ start = time.Since(baseTime)
+ time.Sleep(time.Nanosecond)
+ stop = time.Since(baseTime)
+ h.AppendDefragment(start, stop, &clientv3.DefragmentResponse{Header: &etcdserverpb.ResponseHeader{Revision: 2}}, nil)
+
+ watch := model.WatchOperation{
+ Request: model.WatchRequest{
+ Key: "key",
+ Revision: 0,
+ WithPrefix: true,
+ WithProgressNotify: false,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ {
+ PersistedEvent: model.PersistedEvent{
+ Event: model.Event{
+ Type: model.PutOperation,
+ Key: "key1",
+ Value: model.ToValueOrHash("1"),
+ },
+ Revision: 2,
+ },
+ },
+ {
+ PersistedEvent: model.PersistedEvent{
+ Event: model.Event{
+ Type: model.DeleteOperation,
+ Key: "key2",
+ },
+ Revision: 3,
+ },
+ },
+ },
+ IsProgressNotify: false,
+ Revision: 3,
+ Time: 100,
+ },
+ },
+ }
+ reports := []ClientReport{
+ {
+ ClientID: 1,
+ KeyValue: h.Operations(),
+ Watch: []model.WatchOperation{watch},
+ },
+ {
+ ClientID: 2,
+ KeyValue: nil,
+ Watch: []model.WatchOperation{watch},
+ },
+ }
+ path := t.TempDir()
+ persistClientReports(t, zaptest.NewLogger(t), path, reports)
+ got, err := LoadClientReports(path)
+ require.NoError(t, err)
+ if diff := cmp.Diff(reports, got, cmpopts.EquateEmpty()); diff != "" {
+ t.Errorf("Reports don't match after persist and load, %s", diff)
+ }
+}
diff --git a/tests/robustness/report/failpoint.go b/tests/robustness/report/failpoint.go
new file mode 100644
index 00000000000..17b604e3890
--- /dev/null
+++ b/tests/robustness/report/failpoint.go
@@ -0,0 +1,29 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package report
+
+import (
+ "time"
+)
+
+type FailpointReport struct {
+ FailpointInjection
+ Client []ClientReport
+}
+
+type FailpointInjection struct {
+ Start, End time.Duration
+ Name string
+}
diff --git a/tests/robustness/report/report.go b/tests/robustness/report/report.go
new file mode 100644
index 00000000000..b3d3d5a5e32
--- /dev/null
+++ b/tests/robustness/report/report.go
@@ -0,0 +1,91 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package report
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+)
+
+type TestReport struct {
+ Logger *zap.Logger
+ Cluster *e2e.EtcdProcessCluster
+ Client []ClientReport
+ Visualize func(path string) error
+}
+
+func testResultsDirectory(t *testing.T) string {
+ resultsDirectory, ok := os.LookupEnv("RESULTS_DIR")
+ if !ok {
+ resultsDirectory = "/tmp/"
+ }
+ resultsDirectory, err := filepath.Abs(resultsDirectory)
+ if err != nil {
+ panic(err)
+ }
+ path, err := filepath.Abs(filepath.Join(
+ resultsDirectory, strings.ReplaceAll(t.Name(), "/", "_"), fmt.Sprintf("%v", time.Now().UnixNano())))
+ require.NoError(t, err)
+ err = os.RemoveAll(path)
+ require.NoError(t, err)
+ err = os.MkdirAll(path, 0o700)
+ require.NoError(t, err)
+ return path
+}
+
+func (r *TestReport) Report(t *testing.T, force bool) {
+ _, persistResultsEnvSet := os.LookupEnv("PERSIST_RESULTS")
+ if !t.Failed() && !force && !persistResultsEnvSet {
+ return
+ }
+ path := testResultsDirectory(t)
+ r.Logger.Info("Saving robustness test report", zap.String("path", path))
+ for _, member := range r.Cluster.Procs {
+ memberDataDir := filepath.Join(path, fmt.Sprintf("server-%s", member.Config().Name))
+ persistMemberDataDir(t, r.Logger, member, memberDataDir)
+ }
+ if r.Client != nil {
+ persistClientReports(t, r.Logger, path, r.Client)
+ }
+ if r.Visualize != nil {
+ err := r.Visualize(filepath.Join(path, "history.html"))
+ if err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func persistMemberDataDir(t *testing.T, lg *zap.Logger, member e2e.EtcdProcess, path string) {
+ lg.Info("Saving member data dir", zap.String("member", member.Config().Name), zap.String("path", path))
+ err := os.Rename(memberDataDir(member), path)
+ require.NoError(t, err)
+}
+
+func memberDataDir(member e2e.EtcdProcess) string {
+ lazyFS := member.LazyFS()
+ if lazyFS != nil {
+ return filepath.Join(lazyFS.LazyFSDir, "data")
+ }
+ return member.Config().DataDirPath
+}
diff --git a/tests/robustness/report/wal.go b/tests/robustness/report/wal.go
new file mode 100644
index 00000000000..5efa8b9b23b
--- /dev/null
+++ b/tests/robustness/report/wal.go
@@ -0,0 +1,255 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package report
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/google/go-cmp/cmp"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func LoadClusterPersistedRequests(lg *zap.Logger, path string) ([]model.EtcdRequest, error) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ dataDirs := []string{}
+ for _, file := range files {
+ if file.IsDir() && strings.HasPrefix(file.Name(), "server-") {
+ dataDirs = append(dataDirs, filepath.Join(path, file.Name()))
+ }
+ }
+ return PersistedRequestsDirs(lg, dataDirs)
+}
+
+func PersistedRequestsCluster(lg *zap.Logger, cluster *e2e.EtcdProcessCluster) ([]model.EtcdRequest, error) {
+ dataDirs := []string{}
+ for _, proc := range cluster.Procs {
+ dataDirs = append(dataDirs, memberDataDir(proc))
+ }
+ return PersistedRequestsDirs(lg, dataDirs)
+}
+
+func PersistedRequestsDirs(lg *zap.Logger, dataDirs []string) ([]model.EtcdRequest, error) {
+ persistedRequests := []model.EtcdRequest{}
+ // Allow failure in minority of etcd cluster.
+ // 0 failures in 1 node cluster, 1 failure in 3 node cluster
+ allowedFailures := len(dataDirs) / 2
+ for _, dir := range dataDirs {
+ memberRequests, err := requestsPersistedInWAL(lg, dir)
+ if err != nil {
+ if allowedFailures < 1 {
+ return nil, err
+ }
+ allowedFailures--
+ continue
+ }
+ minLength := min(len(persistedRequests), len(memberRequests))
+ if diff := cmp.Diff(memberRequests[:minLength], persistedRequests[:minLength]); diff != "" {
+ return nil, fmt.Errorf("unexpected differences between wal entries, diff:\n%s", diff)
+ }
+ if len(memberRequests) > len(persistedRequests) {
+ persistedRequests = memberRequests
+ }
+ }
+ return persistedRequests, nil
+}
+
+func requestsPersistedInWAL(lg *zap.Logger, dataDir string) ([]model.EtcdRequest, error) {
+ _, ents, err := ReadWAL(lg, dataDir)
+ if err != nil {
+ return nil, err
+ }
+ requests := make([]model.EtcdRequest, 0, len(ents))
+ for _, ent := range ents {
+ if ent.Type != raftpb.EntryNormal || len(ent.Data) == 0 {
+ continue
+ }
+ request, err := parseEntryNormal(ent)
+ if err != nil {
+ return nil, err
+ }
+ if request != nil {
+ requests = append(requests, *request)
+ }
+ }
+ return requests, nil
+}
+
+func ReadWAL(lg *zap.Logger, dataDir string) (state raftpb.HardState, ents []raftpb.Entry, err error) {
+ walDir := datadir.ToWALDir(dataDir)
+ repaired := false
+ for {
+ w, err := wal.OpenForRead(lg, walDir, walpb.Snapshot{Index: 0})
+ if err != nil {
+ return state, nil, fmt.Errorf("failed to open WAL, err: %w", err)
+ }
+ _, state, ents, err = w.ReadAll()
+ w.Close()
+ if err != nil {
+ if errors.Is(err, wal.ErrSnapshotNotFound) {
+ return state, ents, nil
+ }
+ // we can only repair ErrUnexpectedEOF and we never repair twice.
+ if repaired || !errors.Is(err, io.ErrUnexpectedEOF) {
+ return state, nil, fmt.Errorf("failed to read WAL, cannot be repaired, err: %w", err)
+ }
+ if !wal.Repair(lg, walDir) {
+ return state, nil, fmt.Errorf("failed to repair WAL, err: %w", err)
+ }
+ lg.Info("repaired WAL", zap.Error(err))
+ repaired = true
+ continue
+ }
+ return state, ents, nil
+ }
+}
+
+func parseEntryNormal(ent raftpb.Entry) (*model.EtcdRequest, error) {
+ var raftReq pb.InternalRaftRequest
+ if err := raftReq.Unmarshal(ent.Data); err != nil {
+ var r pb.Request
+ isV2Entry := pbutil.MaybeUnmarshal(&r, ent.Data)
+ if !isV2Entry {
+ return nil, err
+ }
+ return nil, nil
+ }
+ switch {
+ case raftReq.Put != nil:
+ op := model.PutOptions{
+ Key: string(raftReq.Put.Key),
+ Value: model.ToValueOrHash(string(raftReq.Put.Value)),
+ LeaseID: raftReq.Put.Lease,
+ }
+ request := model.EtcdRequest{
+ Type: model.Txn,
+ Txn: &model.TxnRequest{
+ OperationsOnSuccess: []model.EtcdOperation{
+ {Type: model.PutOperation, Put: op},
+ },
+ },
+ }
+ return &request, nil
+ case raftReq.DeleteRange != nil:
+ op := model.DeleteOptions{Key: string(raftReq.DeleteRange.Key)}
+ request := model.EtcdRequest{
+ Type: model.Txn,
+ Txn: &model.TxnRequest{
+ OperationsOnSuccess: []model.EtcdOperation{
+ {Type: model.DeleteOperation, Delete: op},
+ },
+ },
+ }
+ return &request, nil
+ case raftReq.LeaseRevoke != nil:
+ return &model.EtcdRequest{
+ Type: model.LeaseRevoke,
+ LeaseRevoke: &model.LeaseRevokeRequest{LeaseID: raftReq.LeaseRevoke.ID},
+ }, nil
+ case raftReq.LeaseGrant != nil:
+ return &model.EtcdRequest{
+ Type: model.LeaseGrant,
+ LeaseGrant: &model.LeaseGrantRequest{LeaseID: raftReq.LeaseGrant.ID},
+ }, nil
+ case raftReq.ClusterMemberAttrSet != nil:
+ return nil, nil
+ case raftReq.ClusterVersionSet != nil:
+ return nil, nil
+ case raftReq.DowngradeInfoSet != nil:
+ return nil, nil
+ case raftReq.Compaction != nil:
+ request := model.EtcdRequest{
+ Type: model.Compact,
+ Compact: &model.CompactRequest{Revision: raftReq.Compaction.Revision},
+ }
+ return &request, nil
+ case raftReq.Txn != nil:
+ txn := model.TxnRequest{
+ Conditions: []model.EtcdCondition{},
+ OperationsOnSuccess: []model.EtcdOperation{},
+ OperationsOnFailure: []model.EtcdOperation{},
+ }
+ for _, cmp := range raftReq.Txn.Compare {
+ txn.Conditions = append(txn.Conditions, model.EtcdCondition{
+ Key: string(cmp.Key),
+ ExpectedRevision: cmp.GetModRevision(),
+ })
+ }
+ for _, op := range raftReq.Txn.Success {
+ txn.OperationsOnSuccess = append(txn.OperationsOnSuccess, toEtcdOperation(op))
+ }
+ for _, op := range raftReq.Txn.Failure {
+ txn.OperationsOnFailure = append(txn.OperationsOnFailure, toEtcdOperation(op))
+ }
+ request := model.EtcdRequest{
+ Type: model.Txn,
+ Txn: &txn,
+ }
+ return &request, nil
+ default:
+ panic(fmt.Sprintf("Unhandled raft request: %+v", raftReq))
+ }
+}
+
+func toEtcdOperation(op *pb.RequestOp) (operation model.EtcdOperation) {
+ switch {
+ case op.GetRequestRange() != nil:
+ rangeOp := op.GetRequestRange()
+ operation = model.EtcdOperation{
+ Type: model.RangeOperation,
+ Range: model.RangeOptions{
+ Start: string(rangeOp.Key),
+ End: string(rangeOp.RangeEnd),
+ Limit: rangeOp.Limit,
+ },
+ }
+ case op.GetRequestPut() != nil:
+ putOp := op.GetRequestPut()
+ operation = model.EtcdOperation{
+ Type: model.PutOperation,
+ Put: model.PutOptions{
+ Key: string(putOp.Key),
+ Value: model.ToValueOrHash(string(putOp.Value)),
+ },
+ }
+ case op.GetRequestDeleteRange() != nil:
+ deleteOp := op.GetRequestDeleteRange()
+ operation = model.EtcdOperation{
+ Type: model.DeleteOperation,
+ Delete: model.DeleteOptions{
+ Key: string(deleteOp.Key),
+ },
+ }
+ default:
+ panic(fmt.Sprintf("Unknown op type %v", op))
+ }
+ return operation
+}
diff --git a/tests/robustness/scenarios/scenarios.go b/tests/robustness/scenarios/scenarios.go
new file mode 100644
index 00000000000..afad5879fee
--- /dev/null
+++ b/tests/robustness/scenarios/scenarios.go
@@ -0,0 +1,244 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scenarios
+
+import (
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/client"
+ "go.etcd.io/etcd/tests/v3/robustness/failpoint"
+ "go.etcd.io/etcd/tests/v3/robustness/options"
+ "go.etcd.io/etcd/tests/v3/robustness/random"
+ "go.etcd.io/etcd/tests/v3/robustness/traffic"
+)
+
+type TrafficProfile struct {
+ Name string
+ Traffic traffic.Traffic
+ Profile traffic.Profile
+}
+
+var trafficProfiles = []TrafficProfile{
+ {
+ Name: "EtcdHighTraffic",
+ Traffic: traffic.EtcdPut,
+ Profile: traffic.HighTrafficProfile,
+ },
+ {
+ Name: "EtcdTrafficDeleteLeases",
+ Traffic: traffic.EtcdPutDeleteLease,
+ Profile: traffic.LowTraffic,
+ },
+ {
+ Name: "KubernetesHighTraffic",
+ Traffic: traffic.Kubernetes,
+ Profile: traffic.HighTrafficProfile,
+ },
+ {
+ Name: "KubernetesLowTraffic",
+ Traffic: traffic.Kubernetes,
+ Profile: traffic.LowTraffic,
+ },
+}
+
+type TestScenario struct {
+ Name string
+ Failpoint failpoint.Failpoint
+ Cluster e2e.EtcdProcessClusterConfig
+ Traffic traffic.Traffic
+ Profile traffic.Profile
+ Watch client.WatchConfig
+}
+
+func Exploratory(_ *testing.T) []TestScenario {
+ randomizableOptions := []e2e.EPClusterOption{
+ options.WithClusterOptionGroups(
+ options.ClusterOptions{options.WithTickMs(29), options.WithElectionMs(271)},
+ options.ClusterOptions{options.WithTickMs(101), options.WithElectionMs(521)},
+ options.ClusterOptions{options.WithTickMs(100), options.WithElectionMs(2000)}),
+ }
+
+ mixedVersionOptionChoices := []random.ChoiceWeight[options.ClusterOptions]{
+ // 60% with all members of current version
+ {Choice: options.ClusterOptions{options.WithVersion(e2e.CurrentVersion)}, Weight: 60},
+ // 10% with 2 members of current version, 1 member last version, leader is current version
+ {Choice: options.ClusterOptions{options.WithVersion(e2e.MinorityLastVersion), options.WithInitialLeaderIndex(0)}, Weight: 10},
+ // 10% with 2 members of current version, 1 member last version, leader is last version
+ {Choice: options.ClusterOptions{options.WithVersion(e2e.MinorityLastVersion), options.WithInitialLeaderIndex(2)}, Weight: 10},
+ // 10% with 2 members of last version, 1 member current version, leader is last version
+ {Choice: options.ClusterOptions{options.WithVersion(e2e.QuorumLastVersion), options.WithInitialLeaderIndex(0)}, Weight: 10},
+ // 10% with 2 members of last version, 1 member current version, leader is current version
+ {Choice: options.ClusterOptions{options.WithVersion(e2e.QuorumLastVersion), options.WithInitialLeaderIndex(2)}, Weight: 10},
+ }
+ mixedVersionOption := options.WithClusterOptionGroups(random.PickRandom[options.ClusterOptions](mixedVersionOptionChoices))
+
+ baseOptions := []e2e.EPClusterOption{
+ options.WithSnapshotCount(50, 100, 1000),
+ options.WithSubsetOptions(randomizableOptions...),
+ e2e.WithGoFailEnabled(true),
+ // Set low minimal compaction batch limit to allow for triggering multi batch compaction failpoints.
+ options.WithCompactionBatchLimit(10, 100, 1000),
+ e2e.WithWatchProcessNotifyInterval(100 * time.Millisecond),
+ }
+
+ if e2e.CouldSetSnapshotCatchupEntries(e2e.BinPath.Etcd) {
+ baseOptions = append(baseOptions, e2e.WithSnapshotCatchUpEntries(100))
+ }
+ scenarios := []TestScenario{}
+ for _, tp := range trafficProfiles {
+ name := filepath.Join(tp.Name, "ClusterOfSize1")
+ clusterOfSize1Options := baseOptions
+ clusterOfSize1Options = append(clusterOfSize1Options, e2e.WithClusterSize(1))
+ scenarios = append(scenarios, TestScenario{
+ Name: name,
+ Traffic: tp.Traffic,
+ Profile: tp.Profile,
+ Cluster: *e2e.NewConfig(clusterOfSize1Options...),
+ })
+ }
+
+ for _, tp := range trafficProfiles {
+ name := filepath.Join(tp.Name, "ClusterOfSize3")
+ clusterOfSize3Options := baseOptions
+ clusterOfSize3Options = append(clusterOfSize3Options, e2e.WithIsPeerTLS(true))
+ clusterOfSize3Options = append(clusterOfSize3Options, e2e.WithPeerProxy(true))
+ if fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
+ clusterOfSize3Options = append(clusterOfSize3Options, mixedVersionOption)
+ }
+ scenarios = append(scenarios, TestScenario{
+ Name: name,
+ Traffic: tp.Traffic,
+ Profile: tp.Profile,
+ Cluster: *e2e.NewConfig(clusterOfSize3Options...),
+ })
+ }
+ if e2e.BinPath.LazyFSAvailable() {
+ newScenarios := scenarios
+ for _, s := range scenarios {
+ // LazyFS increases the load on CPU, so we run it with more lightweight case.
+ if s.Profile.MinimalQPS <= 100 && s.Cluster.ClusterSize == 1 {
+ lazyfsCluster := s.Cluster
+ lazyfsCluster.LazyFSEnabled = true
+ newScenarios = append(newScenarios, TestScenario{
+ Name: filepath.Join(s.Name, "LazyFS"),
+ Failpoint: s.Failpoint,
+ Cluster: lazyfsCluster,
+ Traffic: s.Traffic,
+ Profile: s.Profile.WithoutCompaction(),
+ Watch: s.Watch,
+ })
+ }
+ }
+ scenarios = newScenarios
+ }
+ return scenarios
+}
+
+func Regression(t *testing.T) []TestScenario {
+ v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
+ require.NoErrorf(t, err, "Failed checking etcd version binary, binary: %q", e2e.BinPath.Etcd)
+
+ scenarios := []TestScenario{}
+ scenarios = append(scenarios, TestScenario{
+ Name: "Issue14370",
+ Failpoint: failpoint.RaftBeforeSavePanic,
+ Profile: traffic.LowTraffic,
+ Traffic: traffic.EtcdPutDeleteLease,
+ Cluster: *e2e.NewConfig(
+ e2e.WithClusterSize(1),
+ e2e.WithGoFailEnabled(true),
+ ),
+ })
+ scenarios = append(scenarios, TestScenario{
+ Name: "Issue14685",
+ Failpoint: failpoint.DefragBeforeCopyPanic,
+ Profile: traffic.LowTraffic,
+ Traffic: traffic.EtcdPutDeleteLease,
+ Cluster: *e2e.NewConfig(
+ e2e.WithClusterSize(1),
+ e2e.WithGoFailEnabled(true),
+ ),
+ })
+ scenarios = append(scenarios, TestScenario{
+ Name: "Issue13766",
+ Failpoint: failpoint.KillFailpoint,
+ Profile: traffic.HighTrafficProfile,
+ Traffic: traffic.EtcdPut,
+ Cluster: *e2e.NewConfig(
+ e2e.WithSnapshotCount(100),
+ ),
+ })
+ scenarios = append(scenarios, TestScenario{
+ Name: "Issue15220",
+ Watch: client.WatchConfig{
+ RequestProgress: true,
+ },
+ Profile: traffic.LowTraffic,
+ Traffic: traffic.EtcdPutDeleteLease,
+ Failpoint: failpoint.KillFailpoint,
+ Cluster: *e2e.NewConfig(
+ e2e.WithClusterSize(1),
+ ),
+ })
+ scenarios = append(scenarios, TestScenario{
+ Name: "Issue17529",
+ Profile: traffic.HighTrafficProfile,
+ Traffic: traffic.Kubernetes,
+ Failpoint: failpoint.SleepBeforeSendWatchResponse,
+ Cluster: *e2e.NewConfig(
+ e2e.WithClusterSize(1),
+ e2e.WithGoFailEnabled(true),
+ options.WithSnapshotCount(100),
+ ),
+ })
+
+ scenarios = append(scenarios, TestScenario{
+ Name: "Issue17780",
+ Profile: traffic.LowTraffic.WithoutCompaction(),
+ Failpoint: failpoint.BatchCompactBeforeSetFinishedCompactPanic,
+ Traffic: traffic.Kubernetes,
+ Cluster: *e2e.NewConfig(
+ e2e.WithClusterSize(1),
+ e2e.WithCompactionBatchLimit(300),
+ e2e.WithSnapshotCount(1000),
+ e2e.WithGoFailEnabled(true),
+ ),
+ })
+ if v.Compare(version.V3_5) >= 0 {
+ opts := []e2e.EPClusterOption{
+ e2e.WithSnapshotCount(100),
+ e2e.WithPeerProxy(true),
+ e2e.WithIsPeerTLS(true),
+ }
+ if e2e.CouldSetSnapshotCatchupEntries(e2e.BinPath.Etcd) {
+ opts = append(opts, e2e.WithSnapshotCatchUpEntries(100))
+ }
+ scenarios = append(scenarios, TestScenario{
+ Name: "Issue15271",
+ Failpoint: failpoint.BlackholeUntilSnapshot,
+ Profile: traffic.HighTrafficProfile,
+ Traffic: traffic.EtcdPut,
+ Cluster: *e2e.NewConfig(opts...),
+ })
+ }
+ return scenarios
+}
diff --git a/tests/robustness/testdata/.gitignore b/tests/robustness/testdata/.gitignore
new file mode 100644
index 00000000000..c96a04f008e
--- /dev/null
+++ b/tests/robustness/testdata/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
\ No newline at end of file
diff --git a/tests/robustness/traffic/etcd.go b/tests/robustness/traffic/etcd.go
new file mode 100644
index 00000000000..6210ee93d59
--- /dev/null
+++ b/tests/robustness/traffic/etcd.go
@@ -0,0 +1,328 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traffic
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+
+ "golang.org/x/time/rate"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/robustness/client"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/random"
+)
+
+var (
+ EtcdPutDeleteLease Traffic = etcdTraffic{
+ keyCount: 10,
+ leaseTTL: DefaultLeaseTTL,
+ largePutSize: 32769,
+ // Please keep the sum of weights equal 100.
+ requests: []random.ChoiceWeight[etcdRequestType]{
+ {Choice: Get, Weight: 15},
+ {Choice: List, Weight: 15},
+ {Choice: StaleGet, Weight: 10},
+ {Choice: StaleList, Weight: 10},
+ {Choice: Delete, Weight: 5},
+ {Choice: MultiOpTxn, Weight: 5},
+ {Choice: PutWithLease, Weight: 5},
+ {Choice: LeaseRevoke, Weight: 5},
+ {Choice: CompareAndSet, Weight: 5},
+ {Choice: Put, Weight: 20},
+ {Choice: LargePut, Weight: 5},
+ },
+ }
+ EtcdPut Traffic = etcdTraffic{
+ keyCount: 10,
+ largePutSize: 32769,
+ leaseTTL: DefaultLeaseTTL,
+ // Please keep the sum of weights equal 100.
+ requests: []random.ChoiceWeight[etcdRequestType]{
+ {Choice: Get, Weight: 15},
+ {Choice: List, Weight: 15},
+ {Choice: StaleGet, Weight: 10},
+ {Choice: StaleList, Weight: 10},
+ {Choice: MultiOpTxn, Weight: 5},
+ {Choice: LargePut, Weight: 5},
+ {Choice: Put, Weight: 40},
+ },
+ }
+)
+
+type etcdTraffic struct {
+ keyCount int
+ requests []random.ChoiceWeight[etcdRequestType]
+ leaseTTL int64
+ largePutSize int
+}
+
+func (t etcdTraffic) ExpectUniqueRevision() bool {
+ return false
+}
+
+type etcdRequestType string
+
+const (
+ Get etcdRequestType = "get"
+ StaleGet etcdRequestType = "staleGet"
+ List etcdRequestType = "list"
+ StaleList etcdRequestType = "staleList"
+ Put etcdRequestType = "put"
+ LargePut etcdRequestType = "largePut"
+ Delete etcdRequestType = "delete"
+ MultiOpTxn etcdRequestType = "multiOpTxn"
+ PutWithLease etcdRequestType = "putWithLease"
+ LeaseRevoke etcdRequestType = "leaseRevoke"
+ CompareAndSet etcdRequestType = "compareAndSet"
+ Defragment etcdRequestType = "defragment"
+)
+
+func (t etcdTraffic) Name() string {
+ return "Etcd"
+}
+
+func (t etcdTraffic) Run(ctx context.Context, c *client.RecordingClient, limiter *rate.Limiter, ids identity.Provider, lm identity.LeaseIDStorage, nonUniqueWriteLimiter ConcurrencyLimiter, finish <-chan struct{}) {
+ lastOperationSucceeded := true
+ var lastRev int64
+ var requestType etcdRequestType
+ client := etcdTrafficClient{
+ etcdTraffic: t,
+ keyPrefix: "key",
+ client: c,
+ limiter: limiter,
+ idProvider: ids,
+ leaseStorage: lm,
+ }
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-finish:
+ return
+ default:
+ }
+ shouldReturn := false
+
+ // Avoid multiple failed writes in a row
+ if lastOperationSucceeded {
+ choices := t.requests
+ if shouldReturn = nonUniqueWriteLimiter.Take(); !shouldReturn {
+ choices = filterOutNonUniqueEtcdWrites(choices)
+ }
+ requestType = random.PickRandom(choices)
+ } else {
+ requestType = Get
+ }
+ rev, err := client.Request(ctx, requestType, lastRev)
+ if shouldReturn {
+ nonUniqueWriteLimiter.Return()
+ }
+ lastOperationSucceeded = err == nil
+ if err != nil {
+ continue
+ }
+ if rev != 0 {
+ lastRev = rev
+ }
+ limiter.Wait(ctx)
+ }
+}
+
+func filterOutNonUniqueEtcdWrites(choices []random.ChoiceWeight[etcdRequestType]) (resp []random.ChoiceWeight[etcdRequestType]) {
+ for _, choice := range choices {
+ if choice.Choice != Delete && choice.Choice != LeaseRevoke {
+ resp = append(resp, choice)
+ }
+ }
+ return resp
+}
+
+type etcdTrafficClient struct {
+ etcdTraffic
+ keyPrefix string
+ client *client.RecordingClient
+ limiter *rate.Limiter
+ idProvider identity.Provider
+ leaseStorage identity.LeaseIDStorage
+}
+
+func (c etcdTrafficClient) Request(ctx context.Context, request etcdRequestType, lastRev int64) (rev int64, err error) {
+ opCtx, cancel := context.WithTimeout(ctx, RequestTimeout)
+ defer cancel()
+
+ var limit int64
+ switch request {
+ case StaleGet:
+ _, rev, err = c.client.Get(opCtx, c.randomKey(), lastRev)
+ case Get:
+ _, rev, err = c.client.Get(opCtx, c.randomKey(), 0)
+ case List:
+ var resp *clientv3.GetResponse
+ resp, err = c.client.Range(ctx, c.keyPrefix, clientv3.GetPrefixRangeEnd(c.keyPrefix), 0, limit)
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ case StaleList:
+ var resp *clientv3.GetResponse
+ resp, err = c.client.Range(ctx, c.keyPrefix, clientv3.GetPrefixRangeEnd(c.keyPrefix), lastRev, limit)
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ case Put:
+ var resp *clientv3.PutResponse
+ resp, err = c.client.Put(opCtx, c.randomKey(), fmt.Sprintf("%d", c.idProvider.NewRequestID()))
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ case LargePut:
+ var resp *clientv3.PutResponse
+ resp, err = c.client.Put(opCtx, c.randomKey(), random.RandString(c.largePutSize))
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ case Delete:
+ var resp *clientv3.DeleteResponse
+ resp, err = c.client.Delete(opCtx, c.randomKey())
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ case MultiOpTxn:
+ var resp *clientv3.TxnResponse
+ resp, err = c.client.Txn(opCtx, nil, c.pickMultiTxnOps(), nil)
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ case CompareAndSet:
+ var kv *mvccpb.KeyValue
+ key := c.randomKey()
+ kv, rev, err = c.client.Get(opCtx, key, 0)
+ if err == nil {
+ c.limiter.Wait(ctx)
+ var expectedRevision int64
+ if kv != nil {
+ expectedRevision = kv.ModRevision
+ }
+ txnCtx, txnCancel := context.WithTimeout(ctx, RequestTimeout)
+ var resp *clientv3.TxnResponse
+ resp, err = c.client.Txn(txnCtx, []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision)}, []clientv3.Op{clientv3.OpPut(key, fmt.Sprintf("%d", c.idProvider.NewRequestID()))}, nil)
+ txnCancel()
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ }
+ case PutWithLease:
+ leaseID := c.leaseStorage.LeaseID(c.client.ID)
+ if leaseID == 0 {
+ var resp *clientv3.LeaseGrantResponse
+ resp, err = c.client.LeaseGrant(opCtx, c.leaseTTL)
+ if resp != nil {
+ leaseID = int64(resp.ID)
+ rev = resp.ResponseHeader.Revision
+ }
+ if err == nil {
+ c.leaseStorage.AddLeaseID(c.client.ID, leaseID)
+ c.limiter.Wait(ctx)
+ }
+ }
+ if leaseID != 0 {
+ putCtx, putCancel := context.WithTimeout(ctx, RequestTimeout)
+ var resp *clientv3.PutResponse
+ resp, err = c.client.PutWithLease(putCtx, c.randomKey(), fmt.Sprintf("%d", c.idProvider.NewRequestID()), leaseID)
+ putCancel()
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ }
+ case LeaseRevoke:
+ leaseID := c.leaseStorage.LeaseID(c.client.ID)
+ if leaseID != 0 {
+ var resp *clientv3.LeaseRevokeResponse
+ resp, err = c.client.LeaseRevoke(opCtx, leaseID)
+ // if LeaseRevoke has failed, do not remove the mapping.
+ if err == nil {
+ c.leaseStorage.RemoveLeaseID(c.client.ID)
+ }
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ }
+ case Defragment:
+ var resp *clientv3.DefragmentResponse
+ resp, err = c.client.Defragment(opCtx)
+ if resp != nil {
+ rev = resp.Header.Revision
+ }
+ default:
+ panic("invalid choice")
+ }
+ return rev, err
+}
+
+func (c etcdTrafficClient) pickMultiTxnOps() (ops []clientv3.Op) {
+ keys := rand.Perm(c.keyCount)
+ opTypes := make([]model.OperationType, 4)
+
+ atLeastOnePut := false
+ for i := 0; i < MultiOpTxnOpCount; i++ {
+ opTypes[i] = c.pickOperationType()
+ if opTypes[i] == model.PutOperation {
+ atLeastOnePut = true
+ }
+ }
+ // Ensure at least one put to make operation unique
+ if !atLeastOnePut {
+ opTypes[0] = model.PutOperation
+ }
+
+ for i, opType := range opTypes {
+ key := c.key(keys[i])
+ switch opType {
+ case model.RangeOperation:
+ ops = append(ops, clientv3.OpGet(key))
+ case model.PutOperation:
+ value := fmt.Sprintf("%d", c.idProvider.NewRequestID())
+ ops = append(ops, clientv3.OpPut(key, value))
+ case model.DeleteOperation:
+ ops = append(ops, clientv3.OpDelete(key))
+ default:
+ panic("unsuported choice type")
+ }
+ }
+ return ops
+}
+
+func (c etcdTrafficClient) randomKey() string {
+ return c.key(rand.Int())
+}
+
+func (c etcdTrafficClient) key(i int) string {
+ return fmt.Sprintf("%s%d", c.keyPrefix, i%c.keyCount)
+}
+
+func (t etcdTraffic) pickOperationType() model.OperationType {
+ roll := rand.Int() % 100
+ if roll < 10 {
+ return model.DeleteOperation
+ }
+ if roll < 50 {
+ return model.RangeOperation
+ }
+ return model.PutOperation
+}
diff --git a/tests/robustness/traffic/kubernetes.go b/tests/robustness/traffic/kubernetes.go
new file mode 100644
index 00000000000..52dfe5cc8f5
--- /dev/null
+++ b/tests/robustness/traffic/kubernetes.go
@@ -0,0 +1,332 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traffic
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/time/rate"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/stringutil"
+ "go.etcd.io/etcd/tests/v3/robustness/client"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/random"
+)
+
+var Kubernetes Traffic = kubernetesTraffic{
+ averageKeyCount: 10,
+ resource: "pods",
+ namespace: "default",
+ // Please keep the sum of weights equal 1000.
+ writeChoices: []random.ChoiceWeight[KubernetesRequestType]{
+ {Choice: KubernetesUpdate, Weight: 90},
+ {Choice: KubernetesDelete, Weight: 5},
+ {Choice: KubernetesCreate, Weight: 5},
+ },
+}
+
+type kubernetesTraffic struct {
+ averageKeyCount int
+ resource string
+ namespace string
+ writeChoices []random.ChoiceWeight[KubernetesRequestType]
+}
+
+func (t kubernetesTraffic) ExpectUniqueRevision() bool {
+ return true
+}
+
+func (t kubernetesTraffic) Run(ctx context.Context, c *client.RecordingClient, limiter *rate.Limiter, ids identity.Provider, lm identity.LeaseIDStorage, nonUniqueWriteLimiter ConcurrencyLimiter, finish <-chan struct{}) {
+ kc := &kubernetesClient{client: c}
+ s := newStorage()
+ keyPrefix := "/registry/" + t.resource + "/"
+ g := errgroup.Group{}
+ readLimit := t.averageKeyCount
+
+ g.Go(func() error {
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-finish:
+ return nil
+ default:
+ }
+ rev, err := t.Read(ctx, kc, s, limiter, keyPrefix, readLimit)
+ if err != nil {
+ continue
+ }
+ t.Watch(ctx, kc, s, limiter, keyPrefix, rev+1)
+ }
+ })
+ g.Go(func() error {
+ lastWriteFailed := false
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-finish:
+ return nil
+ default:
+ }
+ // Avoid multiple failed writes in a row
+ if lastWriteFailed {
+ _, err := t.Read(ctx, kc, s, limiter, keyPrefix, 0)
+ if err != nil {
+ continue
+ }
+ }
+ err := t.Write(ctx, kc, ids, s, limiter, nonUniqueWriteLimiter)
+ lastWriteFailed = err != nil
+ if err != nil {
+ continue
+ }
+ }
+ })
+ g.Wait()
+}
+
+func (t kubernetesTraffic) Read(ctx context.Context, kc *kubernetesClient, s *storage, limiter *rate.Limiter, keyPrefix string, limit int) (rev int64, err error) {
+ rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
+
+ hasMore := true
+ rangeStart := keyPrefix
+ var kvs []*mvccpb.KeyValue
+ var revision int64
+
+ for hasMore {
+ readCtx, cancel := context.WithTimeout(ctx, RequestTimeout)
+ resp, err := kc.Range(readCtx, rangeStart, rangeEnd, revision, int64(limit))
+ cancel()
+ if err != nil {
+ return 0, err
+ }
+ limiter.Wait(ctx)
+
+ hasMore = resp.More
+ if len(resp.Kvs) > 0 && hasMore {
+ rangeStart = string(resp.Kvs[len(resp.Kvs)-1].Key) + "\x00"
+ }
+ kvs = append(kvs, resp.Kvs...)
+ if revision == 0 {
+ revision = resp.Header.Revision
+ }
+ }
+ s.Reset(revision, kvs)
+ return revision, nil
+}
+
+func (t kubernetesTraffic) Write(ctx context.Context, kc *kubernetesClient, ids identity.Provider, s *storage, limiter *rate.Limiter, nonUniqueWriteLimiter ConcurrencyLimiter) (err error) {
+ writeCtx, cancel := context.WithTimeout(ctx, RequestTimeout)
+ defer cancel()
+ count := s.Count()
+ if count < t.averageKeyCount/2 {
+ err = kc.OptimisticCreate(writeCtx, t.generateKey(), fmt.Sprintf("%d", ids.NewRequestID()))
+ } else {
+ key, rev := s.PickRandom()
+ if rev == 0 {
+ return errors.New("storage empty")
+ }
+ if count > t.averageKeyCount*3/2 && nonUniqueWriteLimiter.Take() {
+ _, err = kc.OptimisticDelete(writeCtx, key, rev)
+ nonUniqueWriteLimiter.Return()
+ } else {
+ shouldReturn := false
+
+ choices := t.writeChoices
+ if shouldReturn = nonUniqueWriteLimiter.Take(); !shouldReturn {
+ choices = filterOutNonUniqueKubernetesWrites(t.writeChoices)
+ }
+ op := random.PickRandom(choices)
+ switch op {
+ case KubernetesDelete:
+ _, err = kc.OptimisticDelete(writeCtx, key, rev)
+ case KubernetesUpdate:
+ _, err = kc.OptimisticUpdate(writeCtx, key, fmt.Sprintf("%d", ids.NewRequestID()), rev)
+ case KubernetesCreate:
+ err = kc.OptimisticCreate(writeCtx, t.generateKey(), fmt.Sprintf("%d", ids.NewRequestID()))
+ default:
+ panic(fmt.Sprintf("invalid choice: %q", op))
+ }
+ if shouldReturn {
+ nonUniqueWriteLimiter.Return()
+ }
+ }
+ }
+ if err != nil {
+ return err
+ }
+ limiter.Wait(ctx)
+ return nil
+}
+
+func filterOutNonUniqueKubernetesWrites(choices []random.ChoiceWeight[KubernetesRequestType]) (resp []random.ChoiceWeight[KubernetesRequestType]) {
+ for _, choice := range choices {
+ if choice.Choice != KubernetesDelete {
+ resp = append(resp, choice)
+ }
+ }
+ return resp
+}
+
+func (t kubernetesTraffic) Watch(ctx context.Context, kc *kubernetesClient, s *storage, limiter *rate.Limiter, keyPrefix string, revision int64) {
+ watchCtx, cancel := context.WithTimeout(ctx, WatchTimeout)
+ defer cancel()
+
+ // Kubernetes issues Watch requests by requiring a leader to exist
+ // in the cluster:
+ // https://github.com/kubernetes/kubernetes/blob/2016fab3085562b4132e6d3774b6ded5ba9939fd/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go#L872
+ watchCtx = clientv3.WithRequireLeader(watchCtx)
+ for e := range kc.client.Watch(watchCtx, keyPrefix, revision, true, true, true) {
+ s.Update(e)
+ }
+ limiter.Wait(ctx)
+}
+
+func (t kubernetesTraffic) generateKey() string {
+ return fmt.Sprintf("/registry/%s/%s/%s", t.resource, t.namespace, stringutil.RandString(5))
+}
+
+type KubernetesRequestType string
+
+const (
+ KubernetesDelete KubernetesRequestType = "delete"
+ KubernetesUpdate KubernetesRequestType = "update"
+ KubernetesCreate KubernetesRequestType = "create"
+)
+
+type kubernetesClient struct {
+ client *client.RecordingClient
+}
+
+func (k kubernetesClient) List(ctx context.Context, prefix string, revision, limit int64) (*clientv3.GetResponse, error) {
+ resp, err := k.client.Range(ctx, prefix, clientv3.GetPrefixRangeEnd(prefix), revision, limit)
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+}
+
+func (k kubernetesClient) Range(ctx context.Context, start, end string, revision, limit int64) (*clientv3.GetResponse, error) {
+ return k.client.Range(ctx, start, end, revision, limit)
+}
+
+func (k kubernetesClient) OptimisticDelete(ctx context.Context, key string, expectedRevision int64) (*mvccpb.KeyValue, error) {
+ return k.optimisticOperationOrGet(ctx, key, clientv3.OpDelete(key), expectedRevision)
+}
+
+func (k kubernetesClient) OptimisticUpdate(ctx context.Context, key, value string, expectedRevision int64) (*mvccpb.KeyValue, error) {
+ return k.optimisticOperationOrGet(ctx, key, clientv3.OpPut(key, value), expectedRevision)
+}
+
+func (k kubernetesClient) OptimisticCreate(ctx context.Context, key, value string) error {
+ _, err := k.client.Txn(ctx, []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision(key), "=", 0)}, []clientv3.Op{clientv3.OpPut(key, value)}, nil)
+ return err
+}
+
+func (k kubernetesClient) RequestProgress(ctx context.Context) error {
+ // Kubernetes makes RequestProgress calls by requiring a leader to be
+ // present in the cluster:
+ // https://github.com/kubernetes/kubernetes/blob/2016fab3085562b4132e6d3774b6ded5ba9939fd/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go#L87
+ return k.client.RequestProgress(clientv3.WithRequireLeader(ctx))
+}
+
+// Kubernetes optimistically assumes that key didn't change since it was last observed, so it executes operations within a transaction conditioned on key not changing.
+// However, if the keys value changed it wants imminently to read it, thus the Get operation on failure.
+func (k kubernetesClient) optimisticOperationOrGet(ctx context.Context, key string, operation clientv3.Op, expectedRevision int64) (*mvccpb.KeyValue, error) {
+ resp, err := k.client.Txn(ctx, []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision)}, []clientv3.Op{operation}, []clientv3.Op{clientv3.OpGet(key)})
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ getResp := (*clientv3.GetResponse)(resp.Responses[0].GetResponseRange())
+ if err != nil || len(getResp.Kvs) == 0 {
+ return nil, err
+ }
+ if len(getResp.Kvs) == 1 {
+ return getResp.Kvs[0], err
+ }
+ }
+ return nil, err
+}
+
+type storage struct {
+ mux sync.RWMutex
+ keyRevision map[string]int64
+ revision int64
+}
+
+func newStorage() *storage {
+ return &storage{
+ keyRevision: map[string]int64{},
+ }
+}
+
+func (s *storage) Update(resp clientv3.WatchResponse) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+ for _, e := range resp.Events {
+ if e.Kv.ModRevision < s.revision {
+ continue
+ }
+ s.revision = e.Kv.ModRevision
+ switch e.Type {
+ case mvccpb.PUT:
+ s.keyRevision[string(e.Kv.Key)] = e.Kv.ModRevision
+ case mvccpb.DELETE:
+ delete(s.keyRevision, string(e.Kv.Key))
+ }
+ }
+}
+
+func (s *storage) Reset(revision int64, kvs []*mvccpb.KeyValue) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+ if revision <= s.revision {
+ return
+ }
+ s.keyRevision = make(map[string]int64, len(kvs))
+ for _, kv := range kvs {
+ s.keyRevision[string(kv.Key)] = kv.ModRevision
+ }
+ s.revision = revision
+}
+
+func (s *storage) Count() int {
+ s.mux.RLock()
+ defer s.mux.RUnlock()
+ return len(s.keyRevision)
+}
+
+func (s *storage) PickRandom() (key string, rev int64) {
+ s.mux.RLock()
+ defer s.mux.RUnlock()
+ n := rand.Intn(len(s.keyRevision))
+ i := 0
+ for k, v := range s.keyRevision {
+ if i == n {
+ return k, v
+ }
+ i++
+ }
+ return "", 0
+}
diff --git a/tests/robustness/traffic/limiter.go b/tests/robustness/traffic/limiter.go
new file mode 100644
index 00000000000..f2561f4168c
--- /dev/null
+++ b/tests/robustness/traffic/limiter.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traffic
+
+func NewConcurrencyLimiter(size int) ConcurrencyLimiter {
+ return &concurrencyLimiter{
+ ch: make(chan struct{}, size),
+ }
+}
+
+type ConcurrencyLimiter interface {
+ Take() bool
+ Return()
+}
+
+type concurrencyLimiter struct {
+ ch chan struct{}
+}
+
+func (c *concurrencyLimiter) Take() bool {
+ select {
+ case c.ch <- struct{}{}:
+ return true
+ default:
+ return false
+ }
+}
+
+func (c *concurrencyLimiter) Return() {
+ select {
+ case <-c.ch:
+ default:
+ panic("Call to Return() without a successful Take")
+ }
+}
diff --git a/tests/robustness/traffic/limiter_test.go b/tests/robustness/traffic/limiter_test.go
new file mode 100644
index 00000000000..ef3ead7444d
--- /dev/null
+++ b/tests/robustness/traffic/limiter_test.go
@@ -0,0 +1,61 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traffic
+
+import (
+ "sync/atomic"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestLimiter(t *testing.T) {
+ limiter := NewConcurrencyLimiter(3)
+ counter := &atomic.Int64{}
+ g := errgroup.Group{}
+ for i := 0; i < 10; i++ {
+ g.Go(func() error {
+ if limiter.Take() {
+ counter.Add(1)
+ }
+ return nil
+ })
+ }
+ g.Wait()
+ assert.Equal(t, 3, int(counter.Load()))
+ assert.False(t, limiter.Take())
+
+ limiter.Return()
+ counter.Store(0)
+ for i := 0; i < 10; i++ {
+ g.Go(func() error {
+ if limiter.Take() {
+ counter.Add(1)
+ }
+ return nil
+ })
+ }
+ g.Wait()
+ assert.Equal(t, 1, int(counter.Load()))
+ assert.False(t, limiter.Take())
+
+ limiter.Return()
+ limiter.Return()
+ limiter.Return()
+ assert.Panics(t, func() {
+ limiter.Return()
+ })
+}
diff --git a/tests/robustness/traffic/traffic.go b/tests/robustness/traffic/traffic.go
new file mode 100644
index 00000000000..c2de307218b
--- /dev/null
+++ b/tests/robustness/traffic/traffic.go
@@ -0,0 +1,218 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traffic
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+
+ "go.etcd.io/etcd/tests/v3/framework/e2e"
+ "go.etcd.io/etcd/tests/v3/robustness/client"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/random"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+var (
+ DefaultLeaseTTL int64 = 7200
+ RequestTimeout = 200 * time.Millisecond
+ WatchTimeout = time.Second
+ MultiOpTxnOpCount = 4
+ CompactionPeriod = 200 * time.Millisecond
+
+ LowTraffic = Profile{
+ MinimalQPS: 100,
+ MaximalQPS: 200,
+ ClientCount: 8,
+ MaxNonUniqueRequestConcurrency: 3,
+ }
+ HighTrafficProfile = Profile{
+ MinimalQPS: 100,
+ MaximalQPS: 1000,
+ ClientCount: 8,
+ MaxNonUniqueRequestConcurrency: 3,
+ }
+)
+
+func SimulateTraffic(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, profile Profile, traffic Traffic, failpointInjected <-chan report.FailpointInjection, baseTime time.Time, ids identity.Provider) []report.ClientReport {
+ mux := sync.Mutex{}
+ endpoints := clus.EndpointsGRPC()
+
+ lm := identity.NewLeaseIDStorage()
+ reports := []report.ClientReport{}
+ limiter := rate.NewLimiter(rate.Limit(profile.MaximalQPS), 200)
+
+ cc, err := client.NewRecordingClient(endpoints, ids, baseTime)
+ require.NoError(t, err)
+ defer cc.Close()
+ // Ensure that first operation succeeds
+ _, err = cc.Put(ctx, "start", "true")
+ require.NoErrorf(t, err, "First operation failed, validation requires first operation to succeed")
+ wg := sync.WaitGroup{}
+ nonUniqueWriteLimiter := NewConcurrencyLimiter(profile.MaxNonUniqueRequestConcurrency)
+ finish := make(chan struct{})
+ lg.Info("Start traffic")
+ startTime := time.Since(baseTime)
+ for i := 0; i < profile.ClientCount; i++ {
+ wg.Add(1)
+ c, nerr := client.NewRecordingClient([]string{endpoints[i%len(endpoints)]}, ids, baseTime)
+ require.NoError(t, nerr)
+ go func(c *client.RecordingClient) {
+ defer wg.Done()
+ defer c.Close()
+
+ traffic.Run(ctx, c, limiter, ids, lm, nonUniqueWriteLimiter, finish)
+ mux.Lock()
+ reports = append(reports, c.Report())
+ mux.Unlock()
+ }(c)
+ }
+ if !profile.ForbidCompaction {
+ wg.Add(1)
+ c, nerr := client.NewRecordingClient(endpoints, ids, baseTime)
+ if nerr != nil {
+ t.Fatal(nerr)
+ }
+ go func(c *client.RecordingClient) {
+ defer wg.Done()
+ defer c.Close()
+
+ RunCompactLoop(ctx, c, CompactionPeriod, finish)
+ mux.Lock()
+ reports = append(reports, c.Report())
+ mux.Unlock()
+ }(c)
+ }
+ var fr *report.FailpointInjection
+ select {
+ case frp, ok := <-failpointInjected:
+ require.Truef(t, ok, "Failed to collect failpoint report")
+ fr = &frp
+ case <-ctx.Done():
+ t.Fatalf("Traffic finished before failure was injected: %s", ctx.Err())
+ }
+ close(finish)
+ wg.Wait()
+ lg.Info("Finished traffic")
+ endTime := time.Since(baseTime)
+
+ time.Sleep(time.Second)
+ // Ensure that last operation succeeds
+ _, err = cc.Put(ctx, "tombstone", "true")
+ require.NoErrorf(t, err, "Last operation failed, validation requires last operation to succeed")
+ reports = append(reports, cc.Report())
+
+ totalStats := calculateStats(reports, startTime, endTime)
+ beforeFailpointStats := calculateStats(reports, startTime, fr.Start)
+ duringFailpointStats := calculateStats(reports, fr.Start, fr.End)
+ afterFailpointStats := calculateStats(reports, fr.End, endTime)
+
+ lg.Info("Reporting complete traffic", zap.Int("successes", totalStats.successes), zap.Int("failures", totalStats.failures), zap.Float64("successRate", totalStats.successRate()), zap.Duration("period", totalStats.period), zap.Float64("qps", totalStats.QPS()))
+ lg.Info("Reporting traffic before failure injection", zap.Int("successes", beforeFailpointStats.successes), zap.Int("failures", beforeFailpointStats.failures), zap.Float64("successRate", beforeFailpointStats.successRate()), zap.Duration("period", beforeFailpointStats.period), zap.Float64("qps", beforeFailpointStats.QPS()))
+ lg.Info("Reporting traffic during failure injection", zap.Int("successes", duringFailpointStats.successes), zap.Int("failures", duringFailpointStats.failures), zap.Float64("successRate", duringFailpointStats.successRate()), zap.Duration("period", duringFailpointStats.period), zap.Float64("qps", duringFailpointStats.QPS()))
+ lg.Info("Reporting traffic after failure injection", zap.Int("successes", afterFailpointStats.successes), zap.Int("failures", afterFailpointStats.failures), zap.Float64("successRate", afterFailpointStats.successRate()), zap.Duration("period", afterFailpointStats.period), zap.Float64("qps", afterFailpointStats.QPS()))
+
+ if beforeFailpointStats.QPS() < profile.MinimalQPS {
+ t.Errorf("Requiring minimal %f qps before failpoint injection for test results to be reliable, got %f qps", profile.MinimalQPS, beforeFailpointStats.QPS())
+ }
+ // TODO: Validate QPS post failpoint injection to ensure the that we sufficiently cover period when cluster recovers.
+ return reports
+}
+
+func calculateStats(reports []report.ClientReport, start, end time.Duration) (ts trafficStats) {
+ ts.period = end - start
+
+ for _, r := range reports {
+ for _, op := range r.KeyValue {
+ if op.Call < start.Nanoseconds() || op.Call > end.Nanoseconds() {
+ continue
+ }
+ resp := op.Output.(model.MaybeEtcdResponse)
+ if resp.Error == "" {
+ ts.successes++
+ } else {
+ ts.failures++
+ }
+ }
+ }
+ return ts
+}
+
+type trafficStats struct {
+ successes, failures int
+ period time.Duration
+}
+
+func (ts *trafficStats) successRate() float64 {
+ return float64(ts.successes) / float64(ts.successes+ts.failures)
+}
+
+func (ts *trafficStats) QPS() float64 {
+ return float64(ts.successes) / ts.period.Seconds()
+}
+
+type Profile struct {
+ MinimalQPS float64
+ MaximalQPS float64
+ MaxNonUniqueRequestConcurrency int
+ ClientCount int
+ ForbidCompaction bool
+}
+
+func (p Profile) WithoutCompaction() Profile {
+ p.ForbidCompaction = true
+ return p
+}
+
+type Traffic interface {
+ Run(ctx context.Context, c *client.RecordingClient, qpsLimiter *rate.Limiter, ids identity.Provider, lm identity.LeaseIDStorage, nonUniqueWriteLimiter ConcurrencyLimiter, finish <-chan struct{})
+ ExpectUniqueRevision() bool
+}
+
+func RunCompactLoop(ctx context.Context, c *client.RecordingClient, period time.Duration, finish <-chan struct{}) {
+ var lastRev int64 = 2
+ timer := time.NewTimer(period)
+ for {
+ timer.Reset(period)
+ select {
+ case <-ctx.Done():
+ return
+ case <-finish:
+ return
+ case <-timer.C:
+ }
+ statusCtx, cancel := context.WithTimeout(ctx, RequestTimeout)
+ resp, err := c.Status(statusCtx, c.Endpoints()[0])
+ cancel()
+ if err != nil {
+ continue
+ }
+
+ // Range allows for both revision has been compacted and future revision errors
+ compactRev := random.RandRange(lastRev, resp.Header.Revision+5)
+ _, err = c.Compact(ctx, compactRev)
+ if err != nil {
+ continue
+ }
+ lastRev = compactRev
+ }
+}
diff --git a/tests/robustness/validate/operations.go b/tests/robustness/validate/operations.go
new file mode 100644
index 00000000000..9f39407ad81
--- /dev/null
+++ b/tests/robustness/validate/operations.go
@@ -0,0 +1,105 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/anishathalye/porcupine"
+ "github.com/google/go-cmp/cmp"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+var (
+ errRespNotMatched = errors.New("response didn't match expected")
+ errFutureRevRespRequested = errors.New("request about a future rev with response")
+)
+
+func validateLinearizableOperationsAndVisualize(lg *zap.Logger, operations []porcupine.Operation, timeout time.Duration) (result porcupine.CheckResult, visualize func(basepath string) error) {
+ lg.Info("Validating linearizable operations", zap.Duration("timeout", timeout))
+ start := time.Now()
+ result, info := porcupine.CheckOperationsVerbose(model.NonDeterministicModel, operations, timeout)
+ switch result {
+ case porcupine.Illegal:
+ lg.Error("Linearization failed", zap.Duration("duration", time.Since(start)))
+ case porcupine.Unknown:
+ lg.Error("Linearization has timed out", zap.Duration("duration", time.Since(start)))
+ case porcupine.Ok:
+ lg.Info("Linearization success", zap.Duration("duration", time.Since(start)))
+ default:
+ panic(fmt.Sprintf("Unknown Linearization result %s", result))
+ }
+ return result, func(path string) error {
+ lg.Info("Saving visualization", zap.String("path", path))
+ err := porcupine.VisualizePath(model.NonDeterministicModel, info, path)
+ if err != nil {
+ return fmt.Errorf("failed to visualize, err: %w", err)
+ }
+ return nil
+ }
+}
+
+func validateSerializableOperations(lg *zap.Logger, operations []porcupine.Operation, replay *model.EtcdReplay) (lastErr error) {
+ lg.Info("Validating serializable operations")
+ for _, read := range operations {
+ request := read.Input.(model.EtcdRequest)
+ response := read.Output.(model.MaybeEtcdResponse)
+ err := validateSerializableRead(lg, replay, request, response)
+ if err != nil {
+ lastErr = err
+ }
+ }
+ return lastErr
+}
+
+func filterSerializableOperations(clients []report.ClientReport) []porcupine.Operation {
+ resp := []porcupine.Operation{}
+ for _, client := range clients {
+ for _, op := range client.KeyValue {
+ request := op.Input.(model.EtcdRequest)
+ if request.Type == model.Range && request.Range.Revision != 0 {
+ resp = append(resp, op)
+ }
+ }
+ }
+ return resp
+}
+
+func validateSerializableRead(lg *zap.Logger, replay *model.EtcdReplay, request model.EtcdRequest, response model.MaybeEtcdResponse) error {
+ if response.Persisted || response.Error != "" {
+ return nil
+ }
+ state, err := replay.StateForRevision(request.Range.Revision)
+ if err != nil {
+ if response.Error == model.ErrEtcdFutureRev.Error() {
+ return nil
+ }
+ lg.Error("Failed validating serializable operation", zap.Any("request", request), zap.Any("response", response))
+ return errFutureRevRespRequested
+ }
+
+ _, expectResp := state.Step(request)
+
+ if diff := cmp.Diff(response.EtcdResponse.Range, expectResp.Range); diff != "" {
+ lg.Error("Failed validating serializable operation", zap.Any("request", request), zap.String("diff", diff))
+ return errRespNotMatched
+ }
+ return nil
+}
diff --git a/tests/robustness/validate/operations_test.go b/tests/robustness/validate/operations_test.go
new file mode 100644
index 00000000000..c6b7afab91f
--- /dev/null
+++ b/tests/robustness/validate/operations_test.go
@@ -0,0 +1,295 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//nolint:unparam
+package validate
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/anishathalye/porcupine"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+)
+
+func TestValidateSerializableOperations(t *testing.T) {
+ tcs := []struct {
+ name string
+ persistedRequests []model.EtcdRequest
+ operations []porcupine.Operation
+ expectError string
+ }{
+ {
+ name: "Success",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 1, 0),
+ Output: rangeResponse(0),
+ },
+ {
+ Input: rangeRequest("a", "z", 2, 0),
+ Output: rangeResponse(1, keyValueRevision("a", "1", 2)),
+ },
+ {
+ Input: rangeRequest("a", "z", 3, 0),
+ Output: rangeResponse(2,
+ keyValueRevision("a", "1", 2),
+ keyValueRevision("b", "2", 3),
+ ),
+ },
+ {
+ Input: rangeRequest("a", "z", 4, 0),
+ Output: rangeResponse(3,
+ keyValueRevision("a", "1", 2),
+ keyValueRevision("b", "2", 3),
+ keyValueRevision("c", "3", 4),
+ ),
+ },
+ {
+ Input: rangeRequest("a", "z", 4, 3),
+ Output: rangeResponse(3,
+ keyValueRevision("a", "1", 2),
+ keyValueRevision("b", "2", 3),
+ keyValueRevision("c", "3", 4),
+ ),
+ },
+ {
+ Input: rangeRequest("a", "z", 4, 4),
+ Output: rangeResponse(3,
+ keyValueRevision("a", "1", 2),
+ keyValueRevision("b", "2", 3),
+ keyValueRevision("c", "3", 4),
+ ),
+ },
+ {
+ Input: rangeRequest("a", "z", 4, 2),
+ Output: rangeResponse(3,
+ keyValueRevision("a", "1", 2),
+ keyValueRevision("b", "2", 3),
+ ),
+ },
+ {
+ Input: rangeRequest("b\x00", "z", 4, 2),
+ Output: rangeResponse(1,
+ keyValueRevision("c", "3", 4),
+ ),
+ },
+ {
+ Input: rangeRequest("b", "", 4, 0),
+ Output: rangeResponse(1,
+ keyValueRevision("b", "2", 3),
+ ),
+ },
+ {
+ Input: rangeRequest("b", "", 2, 0),
+ Output: rangeResponse(0),
+ },
+ },
+ },
+ {
+ name: "Invalid order",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 4, 0),
+ Output: rangeResponse(3,
+ keyValueRevision("c", "3", 4),
+ keyValueRevision("b", "2", 3),
+ keyValueRevision("a", "1", 2),
+ ),
+ },
+ },
+ expectError: errRespNotMatched.Error(),
+ },
+ {
+ name: "Invalid count",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 1, 0),
+ Output: rangeResponse(1),
+ },
+ },
+ expectError: errRespNotMatched.Error(),
+ },
+ {
+ name: "Invalid keys",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 2, 0),
+ Output: rangeResponse(3,
+ keyValueRevision("b", "2", 3),
+ ),
+ },
+ },
+ expectError: errRespNotMatched.Error(),
+ },
+ {
+ name: "Invalid revision",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 2, 0),
+ Output: rangeResponse(3,
+ keyValueRevision("a", "1", 2),
+ keyValueRevision("b", "2", 3),
+ ),
+ },
+ },
+ expectError: errRespNotMatched.Error(),
+ },
+ {
+ name: "Error",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 2, 0),
+ Output: errorResponse(model.ErrEtcdFutureRev),
+ },
+ {
+ Input: rangeRequest("a", "z", 2, 0),
+ Output: errorResponse(fmt.Errorf("timeout")),
+ },
+ },
+ },
+ {
+ name: "Future rev returned",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 6, 0),
+ Output: errorResponse(model.ErrEtcdFutureRev),
+ },
+ },
+ },
+ {
+ name: "Future rev success",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 6, 0),
+ Output: rangeResponse(0),
+ },
+ },
+ expectError: errFutureRevRespRequested.Error(),
+ },
+ {
+ name: "Future rev failure",
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ operations: []porcupine.Operation{
+ {
+ Input: rangeRequest("a", "z", 6, 0),
+ Output: errorResponse(fmt.Errorf("timeout")),
+ },
+ },
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ replay := model.NewReplay(tc.persistedRequests)
+ err := validateSerializableOperations(zaptest.NewLogger(t), tc.operations, replay)
+ var errStr string
+ if err != nil {
+ errStr = err.Error()
+ }
+ if errStr != tc.expectError {
+ t.Errorf("validateSerializableOperations(...), got: %q, want: %q", err, tc.expectError)
+ }
+ })
+ }
+}
+
+func rangeRequest(start, end string, rev, limit int64) model.EtcdRequest {
+ return model.EtcdRequest{
+ Type: model.Range,
+ Range: &model.RangeRequest{
+ RangeOptions: model.RangeOptions{
+ Start: start,
+ End: end,
+ Limit: limit,
+ },
+ Revision: rev,
+ },
+ }
+}
+
+func rangeResponse(count int64, kvs ...model.KeyValue) model.MaybeEtcdResponse {
+ if kvs == nil {
+ kvs = []model.KeyValue{}
+ }
+ return model.MaybeEtcdResponse{
+ EtcdResponse: model.EtcdResponse{
+ Range: &model.RangeResponse{
+ KVs: kvs,
+ Count: count,
+ },
+ },
+ }
+}
+
+func errorResponse(err error) model.MaybeEtcdResponse {
+ return model.MaybeEtcdResponse{
+ Error: err.Error(),
+ }
+}
+
+func keyValueRevision(key, value string, rev int64) model.KeyValue {
+ return model.KeyValue{
+ Key: key,
+ ValueRevision: model.ValueRevision{
+ Value: model.ToValueOrHash(value),
+ ModRevision: rev,
+ },
+ }
+}
diff --git a/tests/robustness/validate/patch_history.go b/tests/robustness/validate/patch_history.go
new file mode 100644
index 00000000000..4c6d4794ff4
--- /dev/null
+++ b/tests/robustness/validate/patch_history.go
@@ -0,0 +1,280 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+
+ "github.com/anishathalye/porcupine"
+
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+func patchLinearizableOperations(reports []report.ClientReport, persistedRequests []model.EtcdRequest) []porcupine.Operation {
+ allOperations := relevantOperations(reports)
+ putRevision := putRevision(reports)
+ putReturnTime := putReturnTime(allOperations, reports, persistedRequests)
+ clientPutCount := countClientPuts(reports)
+ persistedPutCount := countPersistedPuts(persistedRequests)
+ return patchOperations(allOperations, putRevision, putReturnTime, clientPutCount, persistedPutCount)
+}
+
+func relevantOperations(reports []report.ClientReport) []porcupine.Operation {
+ var ops []porcupine.Operation
+ for _, r := range reports {
+ for _, op := range r.KeyValue {
+ request := op.Input.(model.EtcdRequest)
+ resp := op.Output.(model.MaybeEtcdResponse)
+ // Remove failed read requests as they are not relevant for linearization.
+ if resp.Error == "" || !request.IsRead() {
+ ops = append(ops, op)
+ }
+ }
+ }
+ return ops
+}
+
+func putRevision(reports []report.ClientReport) map[keyValue]int64 {
+ requestRevision := map[keyValue]int64{}
+ for _, client := range reports {
+ for _, watch := range client.Watch {
+ for _, resp := range watch.Responses {
+ for _, event := range resp.Events {
+ switch event.Type {
+ case model.RangeOperation:
+ case model.PutOperation:
+ kv := keyValue{Key: event.Key, Value: event.Value}
+ requestRevision[kv] = event.Revision
+ case model.DeleteOperation:
+ default:
+ panic(fmt.Sprintf("unknown event type %q", event.Type))
+ }
+ }
+ }
+ }
+ }
+ return requestRevision
+}
+
+func patchOperations(operations []porcupine.Operation, watchRevision, putReturnTime, clientPutCount, persistedPutCount map[keyValue]int64) []porcupine.Operation {
+ newOperations := make([]porcupine.Operation, 0, len(operations))
+
+ for _, op := range operations {
+ request := op.Input.(model.EtcdRequest)
+ resp := op.Output.(model.MaybeEtcdResponse)
+ if resp.Error == "" || request.Type != model.Txn {
+ // Cannot patch those requests.
+ newOperations = append(newOperations, op)
+ continue
+ }
+ var txnRevision int64
+ var persisted bool
+ for _, etcdOp := range append(request.Txn.OperationsOnSuccess, request.Txn.OperationsOnFailure...) {
+ switch etcdOp.Type {
+ case model.PutOperation:
+ kv := keyValue{Key: etcdOp.Put.Key, Value: etcdOp.Put.Value}
+ if _, ok := persistedPutCount[kv]; ok {
+ persisted = true
+ }
+ if count := clientPutCount[kv]; count != 1 {
+ continue
+ }
+ if revision, ok := watchRevision[kv]; ok {
+ txnRevision = revision
+ }
+ if returnTime, ok := putReturnTime[kv]; ok {
+ op.Return = min(op.Return, returnTime)
+ }
+ case model.DeleteOperation:
+ case model.RangeOperation:
+ default:
+ panic(fmt.Sprintf("unknown operation type %q", etcdOp.Type))
+ }
+ }
+ if isUniqueTxn(request.Txn, clientPutCount) {
+ if !persisted {
+ // Remove non persisted operations
+ continue
+ } else {
+ if txnRevision != 0 {
+ op.Output = model.MaybeEtcdResponse{Persisted: true, PersistedRevision: txnRevision}
+ } else {
+ op.Output = model.MaybeEtcdResponse{Persisted: true}
+ }
+ }
+ }
+ // Leave operation as it is as we cannot discard it.
+ newOperations = append(newOperations, op)
+ }
+ return newOperations
+}
+
+func isUniqueTxn(request *model.TxnRequest, clientRequestCount map[keyValue]int64) bool {
+ return isUniqueOps(request.OperationsOnSuccess, clientRequestCount) && isUniqueOps(request.OperationsOnFailure, clientRequestCount)
+}
+
+func isUniqueOps(ops []model.EtcdOperation, clientRequestCount map[keyValue]int64) bool {
+ return hasUniqueWriteOperation(ops, clientRequestCount) || !hasWriteOperation(ops)
+}
+
+func hasWriteOperation(ops []model.EtcdOperation) bool {
+ for _, etcdOp := range ops {
+ if etcdOp.Type == model.PutOperation || etcdOp.Type == model.DeleteOperation {
+ return true
+ }
+ }
+ return false
+}
+
+func hasUniqueWriteOperation(ops []model.EtcdOperation, clientRequestCount map[keyValue]int64) bool {
+ for _, operation := range ops {
+ switch operation.Type {
+ case model.PutOperation:
+ kv := keyValue{Key: operation.Put.Key, Value: operation.Put.Value}
+ if count := clientRequestCount[kv]; count == 1 {
+ return true
+ }
+ case model.DeleteOperation:
+ case model.RangeOperation:
+ default:
+ panic(fmt.Sprintf("unknown operation type %q", operation.Type))
+ }
+ }
+ return false
+}
+
+func putReturnTime(allOperations []porcupine.Operation, reports []report.ClientReport, persistedRequests []model.EtcdRequest) map[keyValue]int64 {
+ earliestReturnTime := map[keyValue]int64{}
+ var lastReturnTime int64
+ for _, op := range allOperations {
+ request := op.Input.(model.EtcdRequest)
+ switch request.Type {
+ case model.Txn:
+ for _, etcdOp := range append(request.Txn.OperationsOnSuccess, request.Txn.OperationsOnFailure...) {
+ if etcdOp.Type != model.PutOperation {
+ continue
+ }
+ kv := keyValue{Key: etcdOp.Put.Key, Value: etcdOp.Put.Value}
+ if returnTime, ok := earliestReturnTime[kv]; !ok || returnTime > op.Return {
+ earliestReturnTime[kv] = op.Return
+ }
+ earliestReturnTime[kv] = op.Return
+ }
+ case model.Range:
+ case model.LeaseGrant:
+ case model.LeaseRevoke:
+ case model.Compact:
+ default:
+ panic(fmt.Sprintf("Unknown request type: %q", request.Type))
+ }
+ if op.Return > lastReturnTime {
+ lastReturnTime = op.Return
+ }
+ }
+
+ for _, client := range reports {
+ for _, watch := range client.Watch {
+ for _, resp := range watch.Responses {
+ for _, event := range resp.Events {
+ switch event.Type {
+ case model.RangeOperation:
+ case model.PutOperation:
+ kv := keyValue{Key: event.Key, Value: event.Value}
+ if t, ok := earliestReturnTime[kv]; !ok || t > resp.Time.Nanoseconds() {
+ earliestReturnTime[kv] = resp.Time.Nanoseconds()
+ }
+ case model.DeleteOperation:
+ default:
+ panic(fmt.Sprintf("unknown event type %q", event.Type))
+ }
+ }
+ }
+ }
+ }
+
+ for i := len(persistedRequests) - 1; i >= 0; i-- {
+ request := persistedRequests[i]
+ switch request.Type {
+ case model.Txn:
+ lastReturnTime--
+ for _, op := range request.Txn.OperationsOnSuccess {
+ if op.Type != model.PutOperation {
+ continue
+ }
+ kv := keyValue{Key: op.Put.Key, Value: op.Put.Value}
+ returnTime, ok := earliestReturnTime[kv]
+ if ok {
+ lastReturnTime = min(returnTime, lastReturnTime)
+ earliestReturnTime[kv] = lastReturnTime
+ }
+ }
+ case model.LeaseGrant:
+ case model.LeaseRevoke:
+ case model.Compact:
+ default:
+ panic(fmt.Sprintf("Unknown request type: %q", request.Type))
+ }
+ }
+ return earliestReturnTime
+}
+
+func countClientPuts(reports []report.ClientReport) map[keyValue]int64 {
+ counter := map[keyValue]int64{}
+ for _, client := range reports {
+ for _, op := range client.KeyValue {
+ request := op.Input.(model.EtcdRequest)
+ countPuts(counter, request)
+ }
+ }
+ return counter
+}
+
+func countPersistedPuts(requests []model.EtcdRequest) map[keyValue]int64 {
+ counter := map[keyValue]int64{}
+ for _, request := range requests {
+ countPuts(counter, request)
+ }
+ return counter
+}
+
+func countPuts(counter map[keyValue]int64, request model.EtcdRequest) {
+ switch request.Type {
+ case model.Txn:
+ for _, operation := range append(request.Txn.OperationsOnSuccess, request.Txn.OperationsOnFailure...) {
+ switch operation.Type {
+ case model.PutOperation:
+ kv := keyValue{Key: operation.Put.Key, Value: operation.Put.Value}
+ counter[kv]++
+ case model.DeleteOperation:
+ case model.RangeOperation:
+ default:
+ panic(fmt.Sprintf("unknown operation type %q", operation.Type))
+ }
+ }
+ case model.LeaseGrant:
+ case model.LeaseRevoke:
+ case model.Compact:
+ case model.Defragment:
+ case model.Range:
+ default:
+ panic(fmt.Sprintf("unknown request type %q", request.Type))
+ }
+}
+
+type keyValue struct {
+ Key string
+ Value model.ValueOrHash
+}
diff --git a/tests/robustness/validate/patch_history_test.go b/tests/robustness/validate/patch_history_test.go
new file mode 100644
index 00000000000..bb104b0125f
--- /dev/null
+++ b/tests/robustness/validate/patch_history_test.go
@@ -0,0 +1,412 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//nolint:unparam
+package validate
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/anishathalye/porcupine"
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/robustness/identity"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+const infinite = 1000000000
+
+func TestPatchHistory(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ historyFunc func(h *model.AppendableHistory)
+ persistedRequest []model.EtcdRequest
+ watchOperations []model.WatchOperation
+ expectedRemainingOperations []porcupine.Operation
+ }{
+ {
+ name: "successful range remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendRange("key", "", 0, 0, 100, 200, &clientv3.GetResponse{}, nil)
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 200, Output: rangeResponse(0)},
+ },
+ },
+ {
+ name: "successful put remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key", "value", 100, 200, &clientv3.PutResponse{}, nil)
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value"),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 200, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed put remains if there is a matching event, return time untouched",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key", "value", 100, infinite, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value"),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 99, Output: model.MaybeEtcdResponse{Persisted: true}},
+ },
+ },
+ {
+ name: "failed put remains if there is a matching event, uniqueness allows for return time to be based on next persisted request",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key1", "value", 100, infinite, nil, errors.New("failed"))
+ h.AppendPut("key2", "value", 300, 400, &clientv3.PutResponse{}, nil)
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key1", "value"),
+ putRequest("key2", "value"),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 399, Output: model.MaybeEtcdResponse{Persisted: true}},
+ {Return: 400, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed put remains if there is a matching persisted request, uniqueness allows for revision and return time to be based on watch",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key", "value", 100, infinite, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value"),
+ },
+ watchOperations: watchResponse(300, putEvent("key", "value", 2)),
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 300, Output: model.MaybeEtcdResponse{Persisted: true, PersistedRevision: 2}},
+ },
+ },
+ {
+ name: "failed put remains if there is a matching persisted request, lack of uniqueness causes time to be untouched regardless of persisted event and watch",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key", "value", 1, 2, nil, errors.New("failed"))
+ h.AppendPut("key", "value", 3, 4, &clientv3.PutResponse{}, nil)
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value"),
+ putRequest("key", "value"),
+ },
+ watchOperations: watchResponse(3, putEvent("key", "value", 2), putEvent("key", "value", 3)),
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 1000000004, Output: model.MaybeEtcdResponse{Error: "failed"}},
+ {Return: 4, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed put is dropped if event has different key",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key2", "value", 100, 200, &clientv3.PutResponse{}, nil)
+ h.AppendPut("key1", "value", 300, infinite, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key2", "value"),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 200, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed put is dropped if event has different value",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key", "value2", 100, 200, &clientv3.PutResponse{}, nil)
+ h.AppendPut("key", "value1", 300, infinite, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value2"),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 200, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed put with lease remains if there is a matching event, return time untouched",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPutWithLease("key", "value", 123, 100, infinite, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequestWithLease("key", "value", 123),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 99, Output: model.MaybeEtcdResponse{Persisted: true}},
+ },
+ },
+ {
+ name: "failed put with lease remains if there is a matching event, uniqueness allows return time to be based on next persisted request",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPutWithLease("key1", "value", 123, 100, infinite, nil, errors.New("failed"))
+ h.AppendPutWithLease("key2", "value", 234, 300, 400, &clientv3.PutResponse{}, nil)
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequestWithLease("key1", "value", 123),
+ putRequestWithLease("key2", "value", 234),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 399, Output: model.MaybeEtcdResponse{Persisted: true}},
+ {Return: 400, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed put with lease remains if there is a matching event, uniqueness allows for revision and return time to be based on watch",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPutWithLease("key", "value", 123, 1, 2, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequestWithLease("key", "value", 123),
+ },
+ watchOperations: watchResponse(3, putEvent("key", "value", 2)),
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 3, Output: model.MaybeEtcdResponse{Persisted: true, PersistedRevision: 2}},
+ },
+ },
+ {
+ name: "failed put with lease remains if there is a matching persisted request, lack of uniqueness causes time to be untouched regardless of persisted event and watch",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPutWithLease("key", "value", 123, 1, 2, nil, errors.New("failed"))
+ h.AppendPutWithLease("key", "value", 321, 3, 4, &clientv3.PutResponse{}, nil)
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequestWithLease("key", "value", 123),
+ putRequestWithLease("key", "value", 321),
+ },
+ watchOperations: watchResponse(3, putEvent("key", "value", 2), putEvent("key", "value", 3)),
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 1000000004, Output: model.MaybeEtcdResponse{Error: "failed"}},
+ {Return: 4, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed put is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPut("key", "value", 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ {
+ name: "failed put with lease is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendPutWithLease("key", "value", 123, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ {
+ name: "successful delete remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendDelete("key", 100, 200, &clientv3.DeleteResponse{}, nil)
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 200, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed delete remains, time untouched regardless of persisted event and watch",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendDelete("key", 100, infinite, nil, errors.New("failed"))
+ h.AppendPut("key", "value", 300, 400, &clientv3.PutResponse{}, nil)
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value"),
+ },
+ watchOperations: watchResponse(3, deleteEvent("key", 2)),
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 400, Output: model.MaybeEtcdResponse{Error: "failed"}},
+ {Return: 400, Output: putResponse(model.EtcdOperationResult{})},
+ },
+ },
+ {
+ name: "failed empty txn is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{}, []clientv3.Op{}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ {
+ name: "failed txn put is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpPut("key", "value")}, []clientv3.Op{}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ {
+ name: "failed txn put remains if there is a matching event",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpPut("key", "value")}, []clientv3.Op{}, 100, infinite, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value"),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 99, Output: model.MaybeEtcdResponse{Persisted: true}},
+ },
+ },
+ {
+ name: "failed txn delete remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpDelete("key")}, []clientv3.Op{}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 100, Output: model.MaybeEtcdResponse{Error: "failed"}},
+ },
+ },
+ {
+ name: "successful txn put/delete remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpPut("key", "value")}, []clientv3.Op{clientv3.OpDelete("key")}, 100, 200, &clientv3.TxnResponse{Succeeded: true}, nil)
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: 200, Output: putResponse()},
+ },
+ },
+ {
+ name: "failed txn put/delete remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpPut("key", "value")}, []clientv3.Op{clientv3.OpDelete("key")}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 100, Output: model.MaybeEtcdResponse{Error: "failed"}},
+ },
+ },
+ {
+ name: "failed txn delete/put remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpDelete("key")}, []clientv3.Op{clientv3.OpPut("key", "value")}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 100, Output: model.MaybeEtcdResponse{Error: "failed"}},
+ },
+ },
+ {
+ name: "failed txn empty/put is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{}, []clientv3.Op{clientv3.OpPut("key", "value")}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ {
+ name: "failed txn empty/put remains if there is a matching event",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpPut("key", "value")}, []clientv3.Op{}, 100, infinite, nil, errors.New("failed"))
+ },
+ persistedRequest: []model.EtcdRequest{
+ putRequest("key", "value"),
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 99, Output: model.MaybeEtcdResponse{Persisted: true}},
+ },
+ },
+ {
+ name: "failed txn empty/delete remains",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{}, []clientv3.Op{clientv3.OpDelete("key")}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{
+ {Return: infinite + 100, Output: model.MaybeEtcdResponse{Error: "failed"}},
+ },
+ },
+ {
+ name: "failed txn put&delete is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpPut("key", "value1"), clientv3.OpDelete("key")}, []clientv3.Op{}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ {
+ name: "failed txn empty/put&delete is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{}, []clientv3.Op{clientv3.OpPut("key", "value1"), clientv3.OpDelete("key")}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ {
+ name: "failed txn put&delete/put&delete is dropped",
+ historyFunc: func(h *model.AppendableHistory) {
+ h.AppendTxn(nil, []clientv3.Op{clientv3.OpPut("key", "value1"), clientv3.OpDelete("key")}, []clientv3.Op{clientv3.OpPut("key", "value2"), clientv3.OpDelete("key")}, 100, infinite, nil, errors.New("failed"))
+ },
+ expectedRemainingOperations: []porcupine.Operation{},
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ history := model.NewAppendableHistory(identity.NewIDProvider())
+ tc.historyFunc(history)
+ operations := patchLinearizableOperations([]report.ClientReport{
+ {
+ ClientID: 0,
+ KeyValue: history.History.Operations(),
+ Watch: tc.watchOperations,
+ },
+ }, tc.persistedRequest)
+ if diff := cmp.Diff(tc.expectedRemainingOperations, operations,
+ cmpopts.EquateEmpty(),
+ cmpopts.IgnoreFields(porcupine.Operation{}, "Input", "Call", "ClientId"),
+ ); diff != "" {
+ t.Errorf("Response didn't match expected, diff:\n%s", diff)
+ }
+ })
+ }
+}
+
+func putResponse(result ...model.EtcdOperationResult) model.MaybeEtcdResponse {
+ return model.MaybeEtcdResponse{EtcdResponse: model.EtcdResponse{Txn: &model.TxnResponse{Results: result}}}
+}
+
+func watchResponse(responseTime int64, events ...model.WatchEvent) []model.WatchOperation {
+ return []model.WatchOperation{
+ {
+ Responses: []model.WatchResponse{
+ {
+ Time: time.Duration(responseTime),
+ Events: events,
+ },
+ },
+ },
+ }
+}
+
+func putEvent(key, value string, revision int64) model.WatchEvent {
+ return model.WatchEvent{
+ PersistedEvent: model.PersistedEvent{
+ Event: model.Event{
+ Type: model.PutOperation,
+ Key: key,
+ Value: model.ToValueOrHash(value),
+ },
+ Revision: revision,
+ },
+ }
+}
+
+func deleteEvent(key string, revision int64) model.WatchEvent {
+ return model.WatchEvent{
+ PersistedEvent: model.PersistedEvent{
+ Event: model.Event{
+ Type: model.DeleteOperation,
+ Key: key,
+ },
+ Revision: revision,
+ },
+ }
+}
diff --git a/tests/robustness/validate/validate.go b/tests/robustness/validate/validate.go
new file mode 100644
index 00000000000..5918ec0df83
--- /dev/null
+++ b/tests/robustness/validate/validate.go
@@ -0,0 +1,174 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/anishathalye/porcupine"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+// ValidateAndReturnVisualize returns visualize as porcupine.linearizationInfo used to generate visualization is private.
+func ValidateAndReturnVisualize(t *testing.T, lg *zap.Logger, cfg Config, reports []report.ClientReport, persistedRequests []model.EtcdRequest, timeout time.Duration) (visualize func(basepath string) error) {
+ err := checkValidationAssumptions(reports, persistedRequests)
+ require.NoErrorf(t, err, "Broken validation assumptions")
+ linearizableOperations := patchLinearizableOperations(reports, persistedRequests)
+ serializableOperations := filterSerializableOperations(reports)
+
+ linearizable, visualize := validateLinearizableOperationsAndVisualize(lg, linearizableOperations, timeout)
+ if linearizable != porcupine.Ok {
+ t.Error("Failed linearization, skipping further validation")
+ return visualize
+ }
+ // TODO: Use requests from linearization for replay.
+ replay := model.NewReplay(persistedRequests)
+
+ err = validateWatch(lg, cfg, reports, replay)
+ if err != nil {
+ t.Errorf("Failed validating watch history, err: %s", err)
+ }
+ err = validateSerializableOperations(lg, serializableOperations, replay)
+ if err != nil {
+ t.Errorf("Failed validating serializable operations, err: %s", err)
+ }
+ return visualize
+}
+
+type Config struct {
+ ExpectRevisionUnique bool
+}
+
+func checkValidationAssumptions(reports []report.ClientReport, persistedRequests []model.EtcdRequest) error {
+ err := validateEmptyDatabaseAtStart(reports)
+ if err != nil {
+ return err
+ }
+
+ err = validatePersistedRequestMatchClientRequests(reports, persistedRequests)
+ if err != nil {
+ return err
+ }
+ err = validateNonConcurrentClientRequests(reports)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func validateEmptyDatabaseAtStart(reports []report.ClientReport) error {
+ for _, r := range reports {
+ for _, op := range r.KeyValue {
+ request := op.Input.(model.EtcdRequest)
+ response := op.Output.(model.MaybeEtcdResponse)
+ if response.Revision == 2 && !request.IsRead() {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("non empty database at start or first write didn't succeed, required by model implementation")
+}
+
+func validatePersistedRequestMatchClientRequests(reports []report.ClientReport, persistedRequests []model.EtcdRequest) error {
+ persistedRequestSet := map[string]model.EtcdRequest{}
+ for _, request := range persistedRequests {
+ data, err := json.Marshal(request)
+ if err != nil {
+ return err
+ }
+ persistedRequestSet[string(data)] = request
+ }
+ clientRequests := map[string]porcupine.Operation{}
+ for _, r := range reports {
+ for _, op := range r.KeyValue {
+ request := op.Input.(model.EtcdRequest)
+ data, err := json.Marshal(request)
+ if err != nil {
+ return err
+ }
+ clientRequests[string(data)] = op
+ }
+ }
+
+ for requestDump, request := range persistedRequestSet {
+ _, found := clientRequests[requestDump]
+ // We cannot validate if persisted leaseGrant was sent by client as failed leaseGrant will not return LeaseID to clients.
+ if request.Type == model.LeaseGrant {
+ continue
+ }
+
+ if !found {
+ return fmt.Errorf("request %+v was not sent by client, required to validate", requestDump)
+ }
+ }
+
+ var firstOp, lastOp porcupine.Operation
+ for _, r := range reports {
+ for _, op := range r.KeyValue {
+ request := op.Input.(model.EtcdRequest)
+ response := op.Output.(model.MaybeEtcdResponse)
+ if response.Error != "" || request.IsRead() {
+ continue
+ }
+ if firstOp.Call == 0 || op.Call < firstOp.Call {
+ firstOp = op
+ }
+ if lastOp.Call == 0 || op.Call > lastOp.Call {
+ lastOp = op
+ }
+ }
+ }
+ firstOpData, err := json.Marshal(firstOp.Input.(model.EtcdRequest))
+ if err != nil {
+ return err
+ }
+ _, found := persistedRequestSet[string(firstOpData)]
+ if !found {
+ return fmt.Errorf("first succesful client write %s was not persisted, required to validate", firstOpData)
+ }
+ lastOpData, err := json.Marshal(lastOp.Input.(model.EtcdRequest))
+ if err != nil {
+ return err
+ }
+ _, found = persistedRequestSet[string(lastOpData)]
+ if !found {
+ return fmt.Errorf("last succesful client write %s was not persisted, required to validate", lastOpData)
+ }
+ return nil
+}
+
+func validateNonConcurrentClientRequests(reports []report.ClientReport) error {
+ lastClientRequestReturn := map[int]int64{}
+ for _, r := range reports {
+ for _, op := range r.KeyValue {
+ lastRequest := lastClientRequestReturn[op.ClientId]
+ if op.Call <= lastRequest {
+ return fmt.Errorf("client %d has concurrent request, required for operation linearization", op.ClientId)
+ }
+ if op.Return <= op.Call {
+ return fmt.Errorf("operation %v ends before it starts, required for operation linearization", op)
+ }
+ lastClientRequestReturn[op.ClientId] = op.Return
+ }
+ }
+ return nil
+}
diff --git a/tests/robustness/validate/validate_test.go b/tests/robustness/validate/validate_test.go
new file mode 100644
index 00000000000..14a3a3210ff
--- /dev/null
+++ b/tests/robustness/validate/validate_test.go
@@ -0,0 +1,1957 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//nolint:unparam
+package validate
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
+ "go.etcd.io/etcd/tests/v3/framework/testutils"
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+func TestDataReports(t *testing.T) {
+ testdataPath := testutils.MustAbsPath("../testdata/")
+ files, err := os.ReadDir(testdataPath)
+ require.NoError(t, err)
+ for _, file := range files {
+ if file.Name() == ".gitignore" {
+ continue
+ }
+ t.Run(file.Name(), func(t *testing.T) {
+ lg := zaptest.NewLogger(t)
+ path := filepath.Join(testdataPath, file.Name())
+ reports, err := report.LoadClientReports(path)
+ require.NoError(t, err)
+
+ persistedRequests, err := report.LoadClusterPersistedRequests(lg, path)
+ require.NoError(t, err)
+ visualize := ValidateAndReturnVisualize(t, zaptest.NewLogger(t), Config{}, reports, persistedRequests, 5*time.Minute)
+
+ err = visualize(filepath.Join(path, "history.html"))
+ require.NoError(t, err)
+ })
+ }
+}
+
+func TestValidateWatch(t *testing.T) {
+ tcs := []struct {
+ name string
+ config Config
+ reports []report.ClientReport
+ persistedRequests []model.EtcdRequest
+ expectError string
+ }{
+ {
+ name: "Ordered, Unique - ordered unique events in one response - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ },
+ {
+ name: "Ordered, Unique - unique ordered events in separate response - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ },
+ {
+ name: "Ordered - unordered events in one response - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ expectError: errBrokeOrdered.Error(),
+ },
+ {
+ name: "Ordered - unordered events in separate response - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ expectError: errBrokeOrdered.Error(),
+ },
+ {
+ name: "Ordered - unordered events in separate watch - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "b",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ },
+ },
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ },
+ {
+ name: "Unique - duplicated events in one response - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("a", "2", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ expectError: errBrokeUnique.Error(),
+ },
+ {
+ name: "Unique - duplicated events in separate responses - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "2", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ expectError: errBrokeUnique.Error(),
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ },
+ {
+ name: "Unique - duplicated events in watch requests - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ },
+ {
+ name: "Unique, Atomic - duplicated revision in one response - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ {
+ Type: model.Txn,
+ LeaseGrant: nil,
+ LeaseRevoke: nil,
+ Range: nil,
+ Txn: &model.TxnRequest{
+ Conditions: nil,
+ OperationsOnSuccess: []model.EtcdOperation{
+ {
+ Type: model.PutOperation,
+ Put: model.PutOptions{
+ Key: "a",
+ Value: model.ToValueOrHash("1"),
+ },
+ },
+ {
+ Type: model.PutOperation,
+ Put: model.PutOptions{
+ Key: "b",
+ Value: model.ToValueOrHash("2"),
+ },
+ },
+ },
+ OperationsOnFailure: nil,
+ },
+ Defragment: nil,
+ },
+ },
+ },
+ {
+ name: "Unique - duplicated revision in separate watch request - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ },
+ {
+ name: "Unique revision - duplicated revision in one response - fail",
+ config: Config{ExpectRevisionUnique: true},
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ expectError: errBrokeUnique.Error(),
+ },
+ {
+ name: "Atomic - duplicated revision in one response - fail",
+ config: Config{ExpectRevisionUnique: true},
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ expectError: errBrokeUnique.Error(),
+ },
+ {
+ name: "Atomic - revision in separate responses - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ expectError: errBrokeAtomic.Error(),
+ },
+ {
+ name: "Resumable, Reliable, Bookmarkable - all events with watch revision and bookmark - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Revision: 2,
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Resumable, Reliable, Bookmarkable - all events with only bookmarks - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Revision: 1,
+ IsProgressNotify: true,
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Resumable, Reliable, Bookmarkable - empty events without revision - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Resumable, Reliable, Bookmarkable - empty events with watch revision - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Revision: 2,
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeReliable.Error(),
+ },
+ {
+ name: "Resumable, Reliable, Bookmarkable - unmatched events with watch revision - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "d",
+ Revision: 2,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Revision: 2,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 3,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 3,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Resumable, Reliable, Bookmarkable - empty events between progress notifies - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Revision: 1,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeReliable.Error(),
+ },
+ {
+ name: "Resumable, Reliable, Bookmarkable - unmatched events between progress notifies - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "d",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Revision: 2,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 3,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 3,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Bookmarkable - revision non decreasing - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Revision: 1,
+ IsProgressNotify: true,
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ {
+ Revision: 2,
+ IsProgressNotify: true,
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ {
+ Revision: 3,
+ IsProgressNotify: true,
+ },
+ {
+ Revision: 3,
+ IsProgressNotify: true,
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Bookmarkable - event precedes progress - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ {
+ Revision: 3,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeBookmarkable.Error(),
+ },
+ {
+ name: "Bookmarkable - progress precedes event - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeBookmarkable.Error(),
+ },
+ {
+ name: "Bookmarkable - progress precedes other progress - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ IsProgressNotify: true,
+ Revision: 2,
+ },
+ {
+ IsProgressNotify: true,
+ Revision: 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{},
+ expectError: errBrokeBookmarkable.Error(),
+ },
+ {
+ name: "Bookmarkable - progress notification lower than watch request - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Revision: 3,
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ IsProgressNotify: true,
+ Revision: 2,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ },
+ {
+ name: "Bookmarkable - empty event history - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ IsProgressNotify: true,
+ Revision: 1,
+ },
+ {
+ IsProgressNotify: true,
+ Revision: 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{},
+ },
+ {
+ name: "Reliable - missing event before bookmark - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeReliable.Error(),
+ },
+ {
+ name: "Reliable - missing event matching watch before bookmark - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeReliable.Error(),
+ },
+ {
+ name: "Reliable - missing event matching watch with prefix before bookmark - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("aa", "1", 2, true),
+ },
+ },
+ {
+ Revision: 4,
+ IsProgressNotify: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("aa", "1"),
+ putRequest("ab", "2"),
+ putRequest("cc", "3"),
+ },
+ expectError: errBrokeReliable.Error(),
+ },
+ {
+ name: "Reliable - all events history - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: "",
+ },
+ {
+ name: "Reliable - single revision - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ expectError: "",
+ },
+ {
+ name: "Reliable - single revision with watch revision - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ Revision: 2,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ expectError: "",
+ },
+ {
+ name: "Reliable - missing single revision with watch revision - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ Revision: 2,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{},
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ expectError: "",
+ },
+ {
+ name: "Reliable - single revision with progress notify - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ },
+ },
+ {
+ IsProgressNotify: true,
+ Revision: 2,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ expectError: "",
+ },
+ {
+ name: "Reliable - single revision missing with progress notify - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ Revision: 2,
+ },
+ Responses: []model.WatchResponse{
+ {
+ IsProgressNotify: true,
+ Revision: 2,
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ },
+ expectError: errBrokeReliable.Error(),
+ },
+ {
+ name: "Reliable - missing middle event - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeReliable.Error(),
+ },
+ {
+ name: "Reliable - middle event doesn't match request - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("a", "3", 4, false),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("ab", "2"),
+ putRequest("a", "3"),
+ },
+ },
+ {
+ name: "Reliable - middle event doesn't match request with prefix - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("aa", "1", 2, true),
+ putWatchEvent("ac", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("aa", "1"),
+ putRequest("bb", "2"),
+ putRequest("ac", "3"),
+ },
+ },
+ {
+ name: "Reliable, Resumable - missing first event - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Reliable - missing last event - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Reliable - ignore empty last error response - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ {
+ Revision: 5,
+ Error: "etcdserver: mvcc: required revision has been compacted",
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Resumable - watch revision from middle event - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ Revision: 3,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Resumable - watch key from middle event - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "b",
+ Revision: 2,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ },
+ {
+ name: "Resumable - watch key with prefix from middle event - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "b",
+ Revision: 2,
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("bb", "2", 3, true),
+ putWatchEvent("bc", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("bb", "2"),
+ putRequest("bc", "3"),
+ },
+ },
+ {
+ name: "Resumable - missing first matching event - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ Revision: 3,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("c", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("c", "3"),
+ },
+ expectError: errBrokeResumable.Error(),
+ },
+ {
+ name: "Resumable - missing first matching event with prefix - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "b",
+ Revision: 2,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("b", "3", 4, false),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ putRequest("b", "3"),
+ },
+ expectError: errBrokeResumable.Error(),
+ },
+ {
+ name: "Resumable - missing first matching event with prefix - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "b",
+ WithPrefix: true,
+ Revision: 2,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("bc", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("bb", "2"),
+ putRequest("bc", "3"),
+ },
+ expectError: errBrokeResumable.Error(),
+ },
+ {
+ name: "IsCreate - correct IsCreate values - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("a", "2", 3, false),
+ deleteWatchEvent("a", 4),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ },
+ {
+ name: "IsCreate - second put marked as created - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("a", "2", 3, true),
+ deleteWatchEvent("a", 4),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ expectError: errBrokeIsCreate.Error(),
+ },
+ {
+ name: "IsCreate - put after delete marked as not created - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("a", "2", 3, false),
+ deleteWatchEvent("a", 4),
+ putWatchEvent("a", "4", 5, false),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ expectError: errBrokeIsCreate.Error(),
+ },
+ {
+ name: "PrevKV - no previous values - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrevKV: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("a", "2", 3, false),
+ deleteWatchEvent("a", 4),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ },
+ {
+ name: "PrevKV - all previous values - pass",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrevKV: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEventWithPrevKV("a", "2", 3, false, "1", 2),
+ deleteWatchEventWithPrevKV("a", 4, "2", 3),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ },
+ {
+ name: "PrevKV - mismatch value on put - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrevKV: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEventWithPrevKV("a", "2", 3, false, "2", 2),
+ deleteWatchEventWithPrevKV("a", 4, "2", 3),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ expectError: errBrokePrevKV.Error(),
+ },
+ {
+ name: "PrevKV - mismatch revision on put - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrevKV: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEventWithPrevKV("a", "2", 3, false, "1", 3),
+ deleteWatchEventWithPrevKV("a", 4, "2", 3),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ expectError: errBrokePrevKV.Error(),
+ },
+ {
+ name: "PrevKV - mismatch value on delete - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrevKV: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEventWithPrevKV("a", "2", 3, false, "1", 2),
+ deleteWatchEventWithPrevKV("a", 4, "1", 3),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ expectError: errBrokePrevKV.Error(),
+ },
+ {
+ name: "PrevKV - mismatch revision on delete - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ WithPrefix: true,
+ WithPrevKV: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEventWithPrevKV("a", "2", 3, false, "1", 2),
+ deleteWatchEventWithPrevKV("a", 4, "2", 2),
+ putWatchEvent("a", "4", 5, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("a", "2"),
+ deleteRequest("a"),
+ putRequest("a", "4"),
+ },
+ expectError: errBrokePrevKV.Error(),
+ },
+ {
+ name: "Filter - event not matching the watch - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("a", "1", 2, true),
+ putWatchEvent("b", "2", 3, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("a", "1"),
+ putRequest("b", "2"),
+ },
+ expectError: errBrokeFilter.Error(),
+ },
+ {
+ name: "Filter - event not matching the watch with prefix - fail",
+ reports: []report.ClientReport{
+ {
+ Watch: []model.WatchOperation{
+ {
+ Request: model.WatchRequest{
+ Key: "a",
+ WithPrefix: true,
+ },
+ Responses: []model.WatchResponse{
+ {
+ Events: []model.WatchEvent{
+ putWatchEvent("aa", "1", 2, true),
+ putWatchEvent("bb", "2", 3, true),
+ putWatchEvent("ac", "3", 4, true),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ persistedRequests: []model.EtcdRequest{
+ putRequest("aa", "1"),
+ putRequest("bb", "2"),
+ putRequest("ac", "3"),
+ },
+ expectError: errBrokeFilter.Error(),
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ replay := model.NewReplay(tc.persistedRequests)
+ err := validateWatch(zaptest.NewLogger(t), tc.config, tc.reports, replay)
+ var errStr string
+ if err != nil {
+ errStr = err.Error()
+ }
+ if errStr != tc.expectError {
+ t.Errorf("validateWatch(...), got: %q, want: %q", err, tc.expectError)
+ }
+ })
+ }
+}
+
+func putWatchEvent(key, value string, rev int64, isCreate bool) model.WatchEvent {
+ return model.WatchEvent{
+ PersistedEvent: putPersistedEvent(key, value, rev, isCreate),
+ }
+}
+
+func deleteWatchEvent(key string, rev int64) model.WatchEvent {
+ return model.WatchEvent{
+ PersistedEvent: deletePersistedEvent(key, rev),
+ }
+}
+
+func putWatchEventWithPrevKV(key, value string, rev int64, isCreate bool, prevValue string, modRev int64) model.WatchEvent {
+ return model.WatchEvent{
+ PersistedEvent: putPersistedEvent(key, value, rev, isCreate),
+ PrevValue: &model.ValueRevision{
+ Value: model.ToValueOrHash(prevValue),
+ ModRevision: modRev,
+ },
+ }
+}
+
+func deleteWatchEventWithPrevKV(key string, rev int64, prevValue string, modRev int64) model.WatchEvent {
+ return model.WatchEvent{
+ PersistedEvent: deletePersistedEvent(key, rev),
+ PrevValue: &model.ValueRevision{
+ Value: model.ToValueOrHash(prevValue),
+ ModRevision: modRev,
+ },
+ }
+}
+
+func putPersistedEvent(key, value string, rev int64, isCreate bool) model.PersistedEvent {
+ return model.PersistedEvent{
+ Event: model.Event{
+ Type: model.PutOperation,
+ Key: key,
+ Value: model.ToValueOrHash(value),
+ },
+ Revision: rev,
+ IsCreate: isCreate,
+ }
+}
+
+func deletePersistedEvent(key string, rev int64) model.PersistedEvent {
+ return model.PersistedEvent{
+ Event: model.Event{
+ Type: model.DeleteOperation,
+ Key: key,
+ },
+ Revision: rev,
+ }
+}
+
+func putRequest(key, value string) model.EtcdRequest {
+ return model.EtcdRequest{
+ Type: model.Txn,
+ LeaseGrant: nil,
+ LeaseRevoke: nil,
+ Range: nil,
+ Txn: &model.TxnRequest{
+ Conditions: nil,
+ OperationsOnSuccess: []model.EtcdOperation{
+ {
+ Type: model.PutOperation,
+ Put: model.PutOptions{
+ Key: key,
+ Value: model.ToValueOrHash(value),
+ },
+ },
+ },
+ OperationsOnFailure: nil,
+ },
+ Defragment: nil,
+ }
+}
+
+func putRequestWithLease(key, value string, leaseID int64) model.EtcdRequest {
+ req := putRequest(key, value)
+ req.Txn.OperationsOnSuccess[0].Put.LeaseID = leaseID
+ return req
+}
+
+func deleteRequest(key string) model.EtcdRequest {
+ return model.EtcdRequest{
+ Type: model.Txn,
+ LeaseGrant: nil,
+ LeaseRevoke: nil,
+ Range: nil,
+ Txn: &model.TxnRequest{
+ Conditions: nil,
+ OperationsOnSuccess: []model.EtcdOperation{
+ {
+ Type: model.DeleteOperation,
+ Delete: model.DeleteOptions{
+ Key: key,
+ },
+ },
+ },
+ OperationsOnFailure: nil,
+ },
+ Defragment: nil,
+ }
+}
diff --git a/tests/robustness/validate/watch.go b/tests/robustness/validate/watch.go
new file mode 100644
index 00000000000..506cbeca431
--- /dev/null
+++ b/tests/robustness/validate/watch.go
@@ -0,0 +1,334 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "errors"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/tests/v3/robustness/model"
+ "go.etcd.io/etcd/tests/v3/robustness/report"
+)
+
+var (
+ errBrokeBookmarkable = errors.New("broke Bookmarkable - Progress notification events guarantee that all events up to a revision have been already delivered")
+ errBrokeOrdered = errors.New("broke Ordered - events are ordered by revision; an event will never appear on a watch if it precedes an event in time that has already been posted")
+ errBrokeUnique = errors.New("broke Unique - an event will never appear on a watch twice")
+ errBrokeAtomic = errors.New("broke Atomic - a list of events is guaranteed to encompass complete revisions; updates in the same revision over multiple keys will not be split over several lists of events")
+ errBrokeReliable = errors.New("broke Reliable - a sequence of events will never drop any subsequence of events; if there are events ordered in time as a < b < c, then if the watch receives events a and c, it is guaranteed to receive b")
+ errBrokeResumable = errors.New("broke Resumable - A broken watch can be resumed by establishing a new watch starting after the last revision received in a watch event before the break, so long as the revision is in the history window")
+ errBrokePrevKV = errors.New("incorrect event prevValue")
+ errBrokeIsCreate = errors.New("incorrect event IsCreate")
+ errBrokeFilter = errors.New("event not matching watch filter")
+)
+
+func validateWatch(lg *zap.Logger, cfg Config, reports []report.ClientReport, replay *model.EtcdReplay) error {
+ lg.Info("Validating watch")
+ // Validate etcd watch properties defined in https://etcd.io/docs/v3.6/learning/api_guarantees/#watch-apis
+ for _, r := range reports {
+ err := validateFilter(lg, r)
+ if err != nil {
+ return err
+ }
+ err = validateOrdered(lg, r)
+ if err != nil {
+ return err
+ }
+ err = validateUnique(lg, cfg.ExpectRevisionUnique, r)
+ if err != nil {
+ return err
+ }
+ err = validateAtomic(lg, r)
+ if err != nil {
+ return err
+ }
+ err = validateBookmarkable(lg, r)
+ if err != nil {
+ return err
+ }
+ err = validateResumable(lg, replay, r)
+ if err != nil {
+ return err
+ }
+ err = validateReliable(lg, replay, r)
+ if err != nil {
+ return err
+ }
+ err = validatePrevKV(lg, replay, r)
+ if err != nil {
+ return err
+ }
+ err = validateIsCreate(lg, replay, r)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func validateFilter(lg *zap.Logger, report report.ClientReport) (err error) {
+ for _, watch := range report.Watch {
+ for _, resp := range watch.Responses {
+ for _, event := range resp.Events {
+ if !event.Match(watch.Request) {
+ lg.Error("event not matching event filter", zap.Int("client", report.ClientID), zap.Any("request", watch.Request), zap.Any("event", event))
+ err = errBrokeFilter
+ }
+ }
+ }
+ }
+ return err
+}
+
+func validateBookmarkable(lg *zap.Logger, report report.ClientReport) (err error) {
+ for _, op := range report.Watch {
+ var lastProgressNotifyRevision int64
+ var lastEventRevision int64
+ for _, resp := range op.Responses {
+ for _, event := range resp.Events {
+ if event.Revision <= lastProgressNotifyRevision {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "bookmarkable"), zap.Int("client", report.ClientID), zap.Int64("revision", event.Revision))
+ err = errBrokeBookmarkable
+ }
+ lastEventRevision = event.Revision
+ }
+ if resp.IsProgressNotify {
+ if resp.Revision < lastProgressNotifyRevision {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "bookmarkable"), zap.Int("client", report.ClientID), zap.Int64("revision", resp.Revision))
+ err = errBrokeBookmarkable
+ }
+ if resp.Revision < lastEventRevision {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "bookmarkable"), zap.Int("client", report.ClientID), zap.Int64("revision", resp.Revision))
+ err = errBrokeBookmarkable
+ }
+ lastProgressNotifyRevision = resp.Revision
+ }
+ }
+ }
+ return err
+}
+
+func validateOrdered(lg *zap.Logger, report report.ClientReport) (err error) {
+ for _, op := range report.Watch {
+ var lastEventRevision int64 = 1
+ for _, resp := range op.Responses {
+ for _, event := range resp.Events {
+ if event.Revision < lastEventRevision {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "ordered"), zap.Int("client", report.ClientID), zap.Int64("revision", event.Revision))
+ err = errBrokeOrdered
+ }
+ lastEventRevision = event.Revision
+ }
+ }
+ }
+ return err
+}
+
+func validateUnique(lg *zap.Logger, expectUniqueRevision bool, report report.ClientReport) (err error) {
+ for _, op := range report.Watch {
+ uniqueOperations := map[any]struct{}{}
+ for _, resp := range op.Responses {
+ for _, event := range resp.Events {
+ var key any
+ if expectUniqueRevision {
+ key = event.Revision
+ } else {
+ key = struct {
+ revision int64
+ key string
+ }{event.Revision, event.Key}
+ }
+ if _, found := uniqueOperations[key]; found {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "unique"), zap.Int("client", report.ClientID), zap.String("key", event.Key), zap.Int64("revision", event.Revision))
+ err = errBrokeUnique
+ }
+ uniqueOperations[key] = struct{}{}
+ }
+ }
+ }
+ return err
+}
+
+func validateAtomic(lg *zap.Logger, report report.ClientReport) (err error) {
+ for _, op := range report.Watch {
+ var lastEventRevision int64 = 1
+ for _, resp := range op.Responses {
+ if len(resp.Events) > 0 {
+ if resp.Events[0].Revision == lastEventRevision {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "atomic"), zap.Int("client", report.ClientID), zap.Int64("revision", resp.Events[0].Revision))
+ err = errBrokeAtomic
+ }
+ lastEventRevision = resp.Events[len(resp.Events)-1].Revision
+ }
+ }
+ }
+ return err
+}
+
+func validateReliable(lg *zap.Logger, replay *model.EtcdReplay, report report.ClientReport) (err error) {
+ for _, watch := range report.Watch {
+ firstRev := firstExpectedRevision(watch)
+ lastRev := lastRevision(watch)
+ events := replay.EventsForWatch(watch.Request)
+ wantEvents := []model.PersistedEvent{}
+ if firstRev != 0 {
+ for _, e := range events {
+ if e.Revision < firstRev {
+ continue
+ }
+ if e.Revision > lastRev {
+ break
+ }
+ if e.Match(watch.Request) {
+ wantEvents = append(wantEvents, e)
+ }
+ }
+ }
+ gotEvents := make([]model.PersistedEvent, 0)
+ for _, resp := range watch.Responses {
+ for _, event := range resp.Events {
+ gotEvents = append(gotEvents, event.PersistedEvent)
+ }
+ }
+ if diff := cmp.Diff(wantEvents, gotEvents, cmpopts.IgnoreFields(model.PersistedEvent{}, "IsCreate")); diff != "" {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "reliable"), zap.Int("client", report.ClientID), zap.String("diff", diff))
+ err = errBrokeReliable
+ }
+ }
+ return err
+}
+
+func validateResumable(lg *zap.Logger, replay *model.EtcdReplay, report report.ClientReport) (err error) {
+ for _, watch := range report.Watch {
+ if watch.Request.Revision == 0 {
+ continue
+ }
+ events := replay.EventsForWatch(watch.Request)
+ index := 0
+ for index < len(events) && (events[index].Revision < watch.Request.Revision || !events[index].Match(watch.Request)) {
+ index++
+ }
+ if index == len(events) {
+ continue
+ }
+ firstEvent := firstWatchEvent(watch)
+ // If watch is resumable, first event it gets should the first event that happened after the requested revision.
+ if firstEvent != nil && events[index] != firstEvent.PersistedEvent {
+ lg.Error("Broke watch guarantee", zap.String("guarantee", "resumable"), zap.Int("client", report.ClientID), zap.Any("request", watch.Request), zap.Any("got-event", *firstEvent), zap.Any("want-event", events[index]))
+ err = errBrokeResumable
+ }
+ }
+ return err
+}
+
+// validatePrevKV ensures that a watch response (if configured with WithPrevKV()) returns
+// the appropriate response.
+func validatePrevKV(lg *zap.Logger, replay *model.EtcdReplay, report report.ClientReport) (err error) {
+ for _, op := range report.Watch {
+ if !op.Request.WithPrevKV {
+ continue
+ }
+ for _, resp := range op.Responses {
+ for _, event := range resp.Events {
+ // Get state state just before the current event.
+ state, err2 := replay.StateForRevision(event.Revision - 1)
+ if err2 != nil {
+ panic(err2)
+ }
+ // TODO(MadhavJivrajani): check if compaction has been run as part
+ // of failpoint injection. If compaction has run, prevKV can be nil
+ // even if it is not a create event.
+ //
+ // Considering that Kubernetes opens watches to etcd using WithPrevKV()
+ // option, ideally we would want to explicitly check the condition that
+ // Kubernetes does while parsing events received from etcd:
+ // https://github.com/kubernetes/kubernetes/blob/a9e4f5b7862e84c4152eabe2e960f3f6fb9a4867/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go#L59
+ // i.e. prevKV is nil iff the event is a create event, we cannot reliably
+ // check that without knowing if compaction has run.
+
+ // We allow PrevValue to be nil since in the face of compaction, etcd does not
+ // guarantee its presence.
+ if event.PrevValue != nil && *event.PrevValue != state.KeyValues[event.Key] {
+ lg.Error("Incorrect event prevValue field", zap.Int("client", report.ClientID), zap.Any("event", event), zap.Any("previousValue", state.KeyValues[event.Key]))
+ err = errBrokePrevKV
+ }
+ }
+ }
+ }
+ return err
+}
+
+func validateIsCreate(lg *zap.Logger, replay *model.EtcdReplay, report report.ClientReport) (err error) {
+ for _, op := range report.Watch {
+ for _, resp := range op.Responses {
+ for _, event := range resp.Events {
+ // Get state state just before the current event.
+ state, err2 := replay.StateForRevision(event.Revision - 1)
+ if err2 != nil {
+ panic(err2)
+ }
+ // A create event will not have an entry in our history and a non-create
+ // event *should* have an entry in our history.
+ if _, prevKeyExists := state.KeyValues[event.Key]; event.IsCreate == prevKeyExists {
+ lg.Error("Incorrect event IsCreate field", zap.Int("client", report.ClientID), zap.Any("event", event))
+ err = errBrokeIsCreate
+ }
+ }
+ }
+ }
+ return err
+}
+
+func firstExpectedRevision(op model.WatchOperation) int64 {
+ if op.Request.Revision != 0 {
+ return op.Request.Revision
+ }
+ if len(op.Responses) > 0 {
+ firstResp := op.Responses[0]
+ if firstResp.IsProgressNotify {
+ return firstResp.Revision + 1
+ }
+ if len(firstResp.Events) > 0 {
+ return firstResp.Events[0].Revision
+ }
+ }
+ return 0
+}
+
+func lastRevision(op model.WatchOperation) int64 {
+ for i := len(op.Responses) - 1; i >= 0; i-- {
+ resp := op.Responses[i]
+ if resp.IsProgressNotify {
+ return resp.Revision
+ }
+ if len(resp.Events) > 0 {
+ lastEvent := resp.Events[len(resp.Events)-1]
+ return lastEvent.Revision
+ }
+ }
+ return 0
+}
+
+func firstWatchEvent(op model.WatchOperation) *model.WatchEvent {
+ for _, resp := range op.Responses {
+ for _, event := range resp.Events {
+ return &event
+ }
+ }
+ return nil
+}
diff --git a/tools/.golangci.yaml b/tools/.golangci.yaml
new file mode 100644
index 00000000000..3ea8ab82302
--- /dev/null
+++ b/tools/.golangci.yaml
@@ -0,0 +1,131 @@
+---
+run:
+ timeout: 30m
+issues:
+ max-same-issues: 0
+ # Excluding configuration per-path, per-linter, per-text and per-source
+ exclude-rules:
+ # exclude ineffassing linter for generated files for conversion
+ - path: conversion\.go
+ linters: [ineffassign]
+ - text: "S1000" # TODO: Fix me
+ linters:
+ - gosimple
+ exclude-files:
+ - ^zz_generated.*
+linters:
+ disable-all: true
+ enable: # please keep this alphabetized
+ # Don't use soon to deprecated[1] linters that lead to false
+ # https://github.com/golangci/golangci-lint/issues/1841
+ # - deadcode
+ # - structcheck
+ # - varcheck
+ - errorlint
+ - gofumpt
+ - goimports
+ - gosimple
+ - ineffassign
+ - nakedret
+ - revive
+ - staticcheck
+ - stylecheck
+ - tenv
+ - testifylint
+ - unconvert # Remove unnecessary type conversions
+ - unparam
+ - unused
+ - usestdlibvars
+ - whitespace
+linters-settings: # please keep this alphabetized
+ goimports:
+ local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
+ nakedret:
+ # Align with https://github.com/alexkohler/nakedret/blob/v1.0.2/cmd/nakedret/main.go#L10
+ max-func-lines: 5
+ revive:
+ ignore-generated-header: false
+ severity: error
+ confidence: 0.8
+ enable-all-rules: false
+ rules:
+ - name: blank-imports
+ severity: error
+ disabled: false
+ - name: context-as-argument
+ severity: error
+ disabled: false
+ - name: dot-imports
+ severity: error
+ disabled: false
+ - name: error-return
+ severity: error
+ disabled: false
+ - name: error-naming
+ severity: error
+ disabled: false
+ - name: if-return
+ severity: error
+ disabled: false
+ - name: increment-decrement
+ severity: error
+ disabled: false
+ - name: var-declaration
+ severity: error
+ disabled: false
+ - name: package-comments
+ severity: error
+ disabled: false
+ - name: range
+ severity: error
+ disabled: false
+ - name: receiver-naming
+ severity: error
+ disabled: false
+ - name: time-naming
+ severity: error
+ disabled: false
+ - name: indent-error-flow
+ severity: error
+ disabled: false
+ - name: errorf
+ severity: error
+ disabled: false
+ - name: context-keys-type
+ severity: error
+ disabled: false
+ - name: error-strings
+ severity: error
+ disabled: false
+ - name: var-naming
+ disabled: false
+ arguments:
+ # The following is the configuration for var-naming rule, the first element is the allow list and the second element is the deny list.
+ - [] # AllowList: leave it empty to use the default (empty, too). This means that we're not relaxing the rule in any way, i.e. elementId will raise a violation, it should be elementID, refer to the next line to see the list of denied initialisms.
+ - ["GRPC", "WAL"] # DenyList: Add GRPC and WAL to strict the rule not allowing instances like Wal or Grpc. The default values are located at commonInitialisms, refer to: https://github.com/mgechev/revive/blob/v1.3.7/lint/utils.go#L93-L133.
+ # TODO: enable the following rules
+ - name: exported
+ disabled: true
+ - name: unexported-return
+ disabled: true
+ staticcheck:
+ checks:
+ - all
+ - -SA1019 # TODO(fix) Using a deprecated function, variable, constant or field
+ - -SA2002 # TODO(fix) Called testing.T.FailNow or SkipNow in a goroutine, which isnât allowed
+ stylecheck:
+ checks:
+ - ST1019 # Importing the same package multiple times.
+ testifylint:
+ enable-all: true
+ formatter:
+ # Require f-assertions (e.g. assert.Equalf) if a message is passed to the assertion, even if
+ # there is no variable-length variables, i.e. require require.NoErrorf for both cases below:
+ # require.NoErrorf(t, err, "whatever message")
+ # require.NoErrorf(t, err, "whatever message: %v", v)
+ #
+ # Note from golang programming perspective, we still prefer non-f-functions (i.e. fmt.Print)
+ # to f-functions (i.e. fmt.Printf) when there is no variable-length parameters. It's accepted
+ # to always require f-functions for stretchr/testify, but not for golang standard lib.
+ # Also refer to https://github.com/etcd-io/etcd/pull/18741#issuecomment-2422395914
+ require-f-funcs: true
diff --git a/tools/.yamlfmt b/tools/.yamlfmt
new file mode 100644
index 00000000000..2e48d5dee26
--- /dev/null
+++ b/tools/.yamlfmt
@@ -0,0 +1,4 @@
+formatter:
+ type: basic
+ include_document_start: true
+ retain_line_breaks: true
diff --git a/tools/.yamllint b/tools/.yamllint
new file mode 100644
index 00000000000..cc4dcc4bb1e
--- /dev/null
+++ b/tools/.yamllint
@@ -0,0 +1,7 @@
+---
+
+extends: default
+rules:
+ line-length: disable
+ truthy: disable
+ comments: disable
diff --git a/tools/OWNERS b/tools/OWNERS
new file mode 100644
index 00000000000..a7d60912364
--- /dev/null
+++ b/tools/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/tooling
diff --git a/tools/benchmark/.gitignore b/tools/benchmark/.gitignore
deleted file mode 100644
index d83a1b2ff5c..00000000000
--- a/tools/benchmark/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-benchmark
diff --git a/tools/benchmark/OWNERS b/tools/benchmark/OWNERS
new file mode 100644
index 00000000000..aaec68c4139
--- /dev/null
+++ b/tools/benchmark/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/performance
diff --git a/tools/benchmark/README.md b/tools/benchmark/README.md
index 4af449d3796..c3dbe919ee8 100644
--- a/tools/benchmark/README.md
+++ b/tools/benchmark/README.md
@@ -2,11 +2,30 @@
`etcd/tools/benchmark` is the official benchmarking tool for etcd clusters.
-## Download and install
-To get `benchmark` from the `main` branch via `go get`:
-```sh
-$ go get go.etcd.io/etcd/tools/benchmark
-# GOPATH should be set
-$ ls $GOPATH/bin
-benchmark
+## Installation
+
+Install the tool by running the following command from the etcd source directory.
+
+```
+ $ go install -v ./tools/benchmark
+```
+
+The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the etcd source directory. Make sure that $PATH is set accordingly in your environment.
+
+```
+ $ go list -f "{{.Target}}" ./tools/benchmark
+```
+
+Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source directory.
+
+```
+ $ go run ./tools/benchmark
+```
+
+## Usage
+
+The following command should output the usage per the latest development.
+
+```
+ $ benchmark --help
```
diff --git a/tools/benchmark/cmd/lease.go b/tools/benchmark/cmd/lease.go
index a5de62b4536..5ed3dbe4b3e 100644
--- a/tools/benchmark/cmd/lease.go
+++ b/tools/benchmark/cmd/lease.go
@@ -19,11 +19,11 @@ import (
"fmt"
"time"
+ "github.com/cheggaaa/pb/v3"
+ "github.com/spf13/cobra"
+
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/report"
-
- "github.com/spf13/cobra"
- "gopkg.in/cheggaaa/pb.v1"
)
var leaseKeepaliveCmd = &cobra.Command{
@@ -33,21 +33,18 @@ var leaseKeepaliveCmd = &cobra.Command{
Run: leaseKeepaliveFunc,
}
-var (
- leaseKeepaliveTotal int
-)
+var leaseKeepaliveTotal int
func init() {
RootCmd.AddCommand(leaseKeepaliveCmd)
leaseKeepaliveCmd.Flags().IntVar(&leaseKeepaliveTotal, "total", 10000, "Total number of lease keepalive requests")
}
-func leaseKeepaliveFunc(cmd *cobra.Command, args []string) {
+func leaseKeepaliveFunc(_ *cobra.Command, _ []string) {
requests := make(chan struct{})
clients := mustCreateClients(totalClients, totalConns)
bar = pb.New(leaseKeepaliveTotal)
- bar.Format("Bom !")
bar.Start()
r := newReport()
diff --git a/tools/benchmark/cmd/mvcc-put.go b/tools/benchmark/cmd/mvcc-put.go
index 11ca074f070..25cdb61d541 100644
--- a/tools/benchmark/cmd/mvcc-put.go
+++ b/tools/benchmark/cmd/mvcc-put.go
@@ -56,7 +56,6 @@ func init() {
// TODO: after the PR https://github.com/spf13/cobra/pull/220 is merged, the below pprof related flags should be moved to RootCmd
mvccPutCmd.Flags().StringVar(&cpuProfPath, "cpuprofile", "", "the path of file for storing cpu profile result")
mvccPutCmd.Flags().StringVar(&memProfPath, "memprofile", "", "the path of file for storing heap profile result")
-
}
func createBytesSlice(bytesN, sliceN int) [][]byte {
@@ -70,14 +69,14 @@ func createBytesSlice(bytesN, sliceN int) [][]byte {
return rs
}
-func mvccPutFunc(cmd *cobra.Command, args []string) {
+func mvccPutFunc(_ *cobra.Command, _ []string) {
if cpuProfPath != "" {
f, err := os.Create(cpuProfPath)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to create a file for storing cpu profile result: ", err)
os.Exit(1)
}
-
+ defer f.Close()
err = pprof.StartCPUProfile(f)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to start cpu profile: ", err)
@@ -92,7 +91,7 @@ func mvccPutFunc(cmd *cobra.Command, args []string) {
fmt.Fprintln(os.Stderr, "Failed to create a file for storing heap profile result: ", err)
os.Exit(1)
}
-
+ defer f.Close()
defer func() {
err := pprof.WriteHeapProfile(f)
if err != nil {
diff --git a/tools/benchmark/cmd/mvcc.go b/tools/benchmark/cmd/mvcc.go
index fd7d6aee010..baf2bf1a1bb 100644
--- a/tools/benchmark/cmd/mvcc.go
+++ b/tools/benchmark/cmd/mvcc.go
@@ -21,8 +21,8 @@ import (
"go.uber.org/zap"
"go.etcd.io/etcd/server/v3/lease"
- "go.etcd.io/etcd/server/v3/mvcc"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
"github.com/spf13/cobra"
)
@@ -35,7 +35,7 @@ var (
)
func initMVCC() {
- bcfg := backend.DefaultBackendConfig()
+ bcfg := backend.DefaultBackendConfig(zap.NewNop())
bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = "mvcc-bench", time.Duration(batchInterval)*time.Millisecond, batchLimit
be := backend.New(bcfg)
s = mvcc.NewStore(zap.NewExample(), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
@@ -59,6 +59,6 @@ func init() {
mvccCmd.PersistentFlags().IntVar(&batchLimit, "batch-limit", 10000, "A limit of batched transaction")
}
-func mvccPreRun(cmd *cobra.Command, args []string) {
+func mvccPreRun(_ *cobra.Command, _ []string) {
initMVCC()
}
diff --git a/tools/benchmark/cmd/put.go b/tools/benchmark/cmd/put.go
index a83d570812f..62b9a3dc83c 100644
--- a/tools/benchmark/cmd/put.go
+++ b/tools/benchmark/cmd/put.go
@@ -24,13 +24,13 @@ import (
"strings"
"time"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/report"
-
+ "github.com/cheggaaa/pb/v3"
"github.com/dustin/go-humanize"
"github.com/spf13/cobra"
"golang.org/x/time/rate"
- "gopkg.in/cheggaaa/pb.v1"
+
+ v3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/report"
)
// putCmd represents the put command
@@ -71,7 +71,7 @@ func init() {
putCmd.Flags().BoolVar(&checkHashkv, "check-hashkv", false, "'true' to check hashkv")
}
-func putFunc(cmd *cobra.Command, args []string) {
+func putFunc(cmd *cobra.Command, _ []string) {
if keySpaceSize <= 0 {
fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", keySpaceSize)
os.Exit(1)
@@ -86,7 +86,6 @@ func putFunc(cmd *cobra.Command, args []string) {
k, v := make([]byte, keySize), string(mustRandBytes(valSize))
bar = pb.New(putTotal)
- bar.Format("Bom !")
bar.Start()
r := newReport()
@@ -153,13 +152,6 @@ func compactKV(clients []*v3.Client) {
}
}
-func max(n1, n2 int64) int64 {
- if n1 > n2 {
- return n1
- }
- return n2
-}
-
func hashKV(cmd *cobra.Command, clients []*v3.Client) {
eps, err := cmd.Flags().GetStringSlice("endpoints")
if err != nil {
@@ -171,15 +163,14 @@ func hashKV(cmd *cobra.Command, clients []*v3.Client) {
host := eps[0]
st := time.Now()
- clients[0].HashKV(context.Background(), eps[0], 0)
- rh, eh := clients[0].HashKV(context.Background(), host, 0)
- if eh != nil {
- fmt.Fprintf(os.Stderr, "Failed to get the hashkv of endpoint %s (%v)\n", host, eh)
+ rh, err := clients[0].HashKV(context.Background(), host, 0)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to get the hashkv of endpoint %s (%v)\n", host, err)
panic(err)
}
- rt, es := clients[0].Status(context.Background(), host)
- if es != nil {
- fmt.Fprintf(os.Stderr, "Failed to get the status of endpoint %s (%v)\n", host, es)
+ rt, err := clients[0].Status(context.Background(), host)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to get the status of endpoint %s (%v)\n", host, err)
panic(err)
}
diff --git a/tools/benchmark/cmd/range.go b/tools/benchmark/cmd/range.go
index fc503b71bf8..111d69bd055 100644
--- a/tools/benchmark/cmd/range.go
+++ b/tools/benchmark/cmd/range.go
@@ -21,12 +21,12 @@ import (
"os"
"time"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/report"
-
+ "github.com/cheggaaa/pb/v3"
"github.com/spf13/cobra"
"golang.org/x/time/rate"
- "gopkg.in/cheggaaa/pb.v1"
+
+ v3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/report"
)
// rangeCmd represents the range command
@@ -41,6 +41,8 @@ var (
rangeRate int
rangeTotal int
rangeConsistency string
+ rangeLimit int64
+ rangeCountOnly bool
)
func init() {
@@ -48,6 +50,8 @@ func init() {
rangeCmd.Flags().IntVar(&rangeRate, "rate", 0, "Maximum range requests per second (0 is no limit)")
rangeCmd.Flags().IntVar(&rangeTotal, "total", 10000, "Total number of range requests")
rangeCmd.Flags().StringVar(&rangeConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)")
+ rangeCmd.Flags().Int64Var(&rangeLimit, "limit", 0, "Maximum number of results to return from range request (0 is no limit)")
+ rangeCmd.Flags().BoolVar(&rangeCountOnly, "count-only", false, "Only returns the count of keys")
}
func rangeFunc(cmd *cobra.Command, args []string) {
@@ -80,7 +84,6 @@ func rangeFunc(cmd *cobra.Command, args []string) {
clients := mustCreateClients(totalClients, totalConns)
bar = pb.New(rangeTotal)
- bar.Format("Bom !")
bar.Start()
r := newReport()
@@ -101,7 +104,10 @@ func rangeFunc(cmd *cobra.Command, args []string) {
go func() {
for i := 0; i < rangeTotal; i++ {
- opts := []v3.OpOption{v3.WithRange(end)}
+ opts := []v3.OpOption{v3.WithRange(end), v3.WithLimit(rangeLimit)}
+ if rangeCountOnly {
+ opts = append(opts, v3.WithCountOnly())
+ }
if rangeConsistency == "s" {
opts = append(opts, v3.WithSerializable())
}
diff --git a/tools/benchmark/cmd/root.go b/tools/benchmark/cmd/root.go
index f8eb6e141c3..c85beb9dbc6 100644
--- a/tools/benchmark/cmd/root.go
+++ b/tools/benchmark/cmd/root.go
@@ -18,10 +18,10 @@ import (
"sync"
"time"
- "go.etcd.io/etcd/client/pkg/v3/transport"
-
+ "github.com/cheggaaa/pb/v3"
"github.com/spf13/cobra"
- "gopkg.in/cheggaaa/pb.v1"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
)
// This represents the base command when called without any subcommands
@@ -53,7 +53,8 @@ var (
dialTimeout time.Duration
- targetLeader bool
+ targetLeader bool
+ autoSyncInterval time.Duration
)
func init() {
@@ -66,9 +67,11 @@ func init() {
RootCmd.PersistentFlags().StringVar(&tls.CertFile, "cert", "", "identify HTTPS client using this SSL certificate file")
RootCmd.PersistentFlags().StringVar(&tls.KeyFile, "key", "", "identify HTTPS client using this SSL key file")
RootCmd.PersistentFlags().StringVar(&tls.TrustedCAFile, "cacert", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+ RootCmd.PersistentFlags().BoolVar(&tls.InsecureSkipVerify, "insecure-skip-tls-verify", false, "skip server certificate verification")
RootCmd.PersistentFlags().StringVar(&user, "user", "", "provide username[:password] and prompt if password is not supplied.")
RootCmd.PersistentFlags().DurationVar(&dialTimeout, "dial-timeout", 0, "dial timeout for client connections")
RootCmd.PersistentFlags().BoolVar(&targetLeader, "target-leader", false, "connect only to the leader node")
+ RootCmd.PersistentFlags().DurationVar(&autoSyncInterval, "auto-sync-interval", time.Duration(0), "AutoSyncInterval is the interval to update endpoints with its latest members")
}
diff --git a/tools/benchmark/cmd/stm.go b/tools/benchmark/cmd/stm.go
index b950b28a0e2..d6dfba0d397 100644
--- a/tools/benchmark/cmd/stm.go
+++ b/tools/benchmark/cmd/stm.go
@@ -23,14 +23,14 @@ import (
"os"
"time"
+ "github.com/cheggaaa/pb/v3"
+ "github.com/spf13/cobra"
+ "golang.org/x/time/rate"
+
v3 "go.etcd.io/etcd/client/v3"
v3sync "go.etcd.io/etcd/client/v3/concurrency"
"go.etcd.io/etcd/pkg/v3/report"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
-
- "github.com/spf13/cobra"
- "golang.org/x/time/rate"
- "gopkg.in/cheggaaa/pb.v1"
)
// stmCmd represents the STM benchmark command
@@ -69,7 +69,7 @@ func init() {
stmCmd.Flags().IntVar(&stmRate, "rate", 0, "Maximum STM transactions per second (0 is no limit)")
}
-func stmFunc(cmd *cobra.Command, args []string) {
+func stmFunc(cmd *cobra.Command, _ []string) {
if stmKeyCount <= 0 {
fmt.Fprintf(os.Stderr, "expected positive --keys, got (%v)", stmKeyCount)
os.Exit(1)
@@ -108,7 +108,6 @@ func stmFunc(cmd *cobra.Command, args []string) {
clients := mustCreateClients(totalClients, totalConns)
bar = pb.New(stmTotal)
- bar.Format("Bom !")
bar.Start()
r := newReport()
diff --git a/tools/benchmark/cmd/txn_mixed.go b/tools/benchmark/cmd/txn_mixed.go
new file mode 100644
index 00000000000..ffc004ecfe2
--- /dev/null
+++ b/tools/benchmark/cmd/txn_mixed.go
@@ -0,0 +1,151 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "math/rand"
+ "os"
+ "time"
+
+ "github.com/cheggaaa/pb/v3"
+ "github.com/spf13/cobra"
+ "golang.org/x/time/rate"
+
+ v3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/report"
+)
+
+// mixeTxnCmd represents the mixedTxn command
+var mixedTxnCmd = &cobra.Command{
+ Use: "txn-mixed key [end-range]",
+ Short: "Benchmark a mixed load of txn-put & txn-range.",
+
+ Run: mixedTxnFunc,
+}
+
+var (
+ mixedTxnTotal int
+ mixedTxnRate int
+ mixedTxnReadWriteRatio float64
+ mixedTxnRangeLimit int64
+ mixedTxnEndKey string
+
+ writeOpsTotal uint64
+ readOpsTotal uint64
+)
+
+func init() {
+ RootCmd.AddCommand(mixedTxnCmd)
+ mixedTxnCmd.Flags().IntVar(&keySize, "key-size", 8, "Key size of mixed txn")
+ mixedTxnCmd.Flags().IntVar(&valSize, "val-size", 8, "Value size of mixed txn")
+ mixedTxnCmd.Flags().IntVar(&mixedTxnRate, "rate", 0, "Maximum txns per second (0 is no limit)")
+ mixedTxnCmd.Flags().IntVar(&mixedTxnTotal, "total", 10000, "Total number of txn requests")
+ mixedTxnCmd.Flags().StringVar(&mixedTxnEndKey, "end-key", "",
+ "Read operation range end key. By default, we do full range query with the default limit of 1000.")
+ mixedTxnCmd.Flags().Int64Var(&mixedTxnRangeLimit, "limit", 1000, "Read operation range result limit")
+ mixedTxnCmd.Flags().IntVar(&keySpaceSize, "key-space-size", 1, "Maximum possible keys")
+ mixedTxnCmd.Flags().StringVar(&rangeConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)")
+ mixedTxnCmd.Flags().Float64Var(&mixedTxnReadWriteRatio, "rw-ratio", 1, "Read/write ops ratio")
+}
+
+type request struct {
+ isWrite bool
+ op v3.Op
+}
+
+func mixedTxnFunc(cmd *cobra.Command, _ []string) {
+ if keySpaceSize <= 0 {
+ fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", keySpaceSize)
+ os.Exit(1)
+ }
+
+ if rangeConsistency == "l" {
+ fmt.Println("bench with linearizable range")
+ } else if rangeConsistency == "s" {
+ fmt.Println("bench with serializable range")
+ } else {
+ fmt.Fprintln(os.Stderr, cmd.Usage())
+ os.Exit(1)
+ }
+
+ requests := make(chan request, totalClients)
+ if mixedTxnRate == 0 {
+ mixedTxnRate = math.MaxInt32
+ }
+ limit := rate.NewLimiter(rate.Limit(mixedTxnRate), 1)
+ clients := mustCreateClients(totalClients, totalConns)
+ k, v := make([]byte, keySize), string(mustRandBytes(valSize))
+
+ bar = pb.New(mixedTxnTotal)
+ bar.Start()
+
+ reportRead := newReport()
+ reportWrite := newReport()
+ for i := range clients {
+ wg.Add(1)
+ go func(c *v3.Client) {
+ defer wg.Done()
+ for req := range requests {
+ limit.Wait(context.Background())
+ st := time.Now()
+ _, err := c.Txn(context.TODO()).Then(req.op).Commit()
+ if req.isWrite {
+ reportWrite.Results() <- report.Result{Err: err, Start: st, End: time.Now()}
+ } else {
+ reportRead.Results() <- report.Result{Err: err, Start: st, End: time.Now()}
+ }
+ bar.Increment()
+ }
+ }(clients[i])
+ }
+
+ go func() {
+ for i := 0; i < mixedTxnTotal; i++ {
+ var req request
+ if rand.Float64() < mixedTxnReadWriteRatio/(1+mixedTxnReadWriteRatio) {
+ opts := []v3.OpOption{v3.WithRange(mixedTxnEndKey)}
+ if rangeConsistency == "s" {
+ opts = append(opts, v3.WithSerializable())
+ }
+ opts = append(opts, v3.WithPrefix(), v3.WithLimit(mixedTxnRangeLimit))
+ req.op = v3.OpGet("", opts...)
+ req.isWrite = false
+ readOpsTotal++
+ } else {
+ binary.PutVarint(k, int64(i%keySpaceSize))
+ req.op = v3.OpPut(string(k), v)
+ req.isWrite = true
+ writeOpsTotal++
+ }
+ requests <- req
+ }
+ close(requests)
+ }()
+
+ rcRead := reportRead.Run()
+ rcWrite := reportWrite.Run()
+ wg.Wait()
+ close(reportRead.Results())
+ close(reportWrite.Results())
+ bar.Finish()
+ fmt.Printf("Total Read Ops: %d\nDetails:", readOpsTotal)
+ fmt.Println(<-rcRead)
+ fmt.Printf("Total Write Ops: %d\nDetails:", writeOpsTotal)
+ fmt.Println(<-rcWrite)
+}
diff --git a/tools/benchmark/cmd/txn_put.go b/tools/benchmark/cmd/txn_put.go
index 7558054c5fc..d69293b838a 100644
--- a/tools/benchmark/cmd/txn_put.go
+++ b/tools/benchmark/cmd/txn_put.go
@@ -22,12 +22,12 @@ import (
"os"
"time"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/report"
-
+ "github.com/cheggaaa/pb/v3"
"github.com/spf13/cobra"
"golang.org/x/time/rate"
- "gopkg.in/cheggaaa/pb.v1"
+
+ v3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/report"
)
// txnPutCmd represents the txnPut command
@@ -55,7 +55,7 @@ func init() {
txnPutCmd.Flags().IntVar(&keySpaceSize, "key-space-size", 1, "Maximum possible keys")
}
-func txnPutFunc(cmd *cobra.Command, args []string) {
+func txnPutFunc(_ *cobra.Command, _ []string) {
if keySpaceSize <= 0 {
fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", keySpaceSize)
os.Exit(1)
@@ -76,7 +76,6 @@ func txnPutFunc(cmd *cobra.Command, args []string) {
k, v := make([]byte, keySize), string(mustRandBytes(valSize))
bar = pb.New(txnPutTotal)
- bar.Format("Bom !")
bar.Start()
r := newReport()
diff --git a/tools/benchmark/cmd/util.go b/tools/benchmark/cmd/util.go
index 8f231310fa9..a6b99939067 100644
--- a/tools/benchmark/cmd/util.go
+++ b/tools/benchmark/cmd/util.go
@@ -22,9 +22,10 @@ import (
"strings"
"github.com/bgentry/speakeasy"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/report"
"google.golang.org/grpc/grpclog"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/report"
)
var (
@@ -47,22 +48,22 @@ func mustFindLeaderEndpoints(c *clientv3.Client) {
os.Exit(1)
}
- leaderId := uint64(0)
+ leaderID := uint64(0)
for _, ep := range c.Endpoints() {
if sresp, serr := c.Status(context.TODO(), ep); serr == nil {
- leaderId = sresp.Leader
+ leaderID = sresp.Leader
break
}
}
for _, m := range resp.Members {
- if m.ID == leaderId {
+ if m.ID == leaderID {
leaderEps = m.ClientURLs
return
}
}
- fmt.Fprintf(os.Stderr, "failed to find a leader endpoint\n")
+ fmt.Fprint(os.Stderr, "failed to find a leader endpoint\n")
os.Exit(1)
}
@@ -93,8 +94,9 @@ func mustCreateConn() *clientv3.Client {
dialTotal++
}
cfg := clientv3.Config{
- Endpoints: connEndpoints,
- DialTimeout: dialTimeout,
+ AutoSyncInterval: autoSyncInterval,
+ Endpoints: connEndpoints,
+ DialTimeout: dialTimeout,
}
if !tls.Empty() || tls.TrustedCAFile != "" {
cfgtls, err := tls.ClientConfig()
@@ -113,7 +115,6 @@ func mustCreateConn() *clientv3.Client {
}
cfg.Username = username
cfg.Password = password
-
}
client, err := clientv3.New(cfg)
diff --git a/tools/benchmark/cmd/watch.go b/tools/benchmark/cmd/watch.go
index e25ee41ec99..a52a34360f2 100644
--- a/tools/benchmark/cmd/watch.go
+++ b/tools/benchmark/cmd/watch.go
@@ -23,12 +23,12 @@ import (
"sync/atomic"
"time"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/report"
-
+ "github.com/cheggaaa/pb/v3"
"github.com/spf13/cobra"
"golang.org/x/time/rate"
- "gopkg.in/cheggaaa/pb.v1"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/report"
)
// watchCmd represents the watch command
@@ -88,7 +88,7 @@ func init() {
watchCmd.Flags().BoolVar(&watchSeqKeys, "sequential-keys", false, "Use sequential keys")
}
-func watchFunc(cmd *cobra.Command, args []string) {
+func watchFunc(_ *cobra.Command, _ []string) {
if watchKeySpaceSize <= 0 {
fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", watchKeySpaceSize)
os.Exit(1)
@@ -115,7 +115,6 @@ func benchMakeWatches(clients []*clientv3.Client, wk *watchedKeys) {
keyc := make(chan string, watchStreams)
bar = pb.New(watchStreams * watchWatchesPerStream)
- bar.Format("Bom !")
bar.Start()
r := newReport()
@@ -188,7 +187,6 @@ func benchPutWatches(clients []*clientv3.Client, wk *watchedKeys) {
}
bar = pb.New(eventsTotal)
- bar.Format("Bom !")
bar.Start()
r := newReport()
@@ -211,7 +209,12 @@ func benchPutWatches(clients []*clientv3.Client, wk *watchedKeys) {
}
}()
- limit := rate.NewLimiter(rate.Limit(watchPutRate), 1)
+ watchPutLimit := rate.Inf
+ if watchPutRate > 0 {
+ watchPutLimit = rate.Limit(watchPutRate)
+ }
+
+ limit := rate.NewLimiter(watchPutLimit, 1)
for _, cc := range clients {
go func(c *clientv3.Client) {
for op := range putreqc {
@@ -230,7 +233,6 @@ func benchPutWatches(clients []*clientv3.Client, wk *watchedKeys) {
bar.Finish()
close(r.Results())
fmt.Printf("Watch events received summary:\n%s", <-rc)
-
}
func recvWatchChan(wch clientv3.WatchChan, results chan<- report.Result, nrRxed *int32) {
diff --git a/tools/benchmark/cmd/watch_get.go b/tools/benchmark/cmd/watch_get.go
index 31ada8a7c39..b9a8bc3363d 100644
--- a/tools/benchmark/cmd/watch_get.go
+++ b/tools/benchmark/cmd/watch_get.go
@@ -20,11 +20,11 @@ import (
"sync"
"time"
+ "github.com/cheggaaa/pb/v3"
+ "github.com/spf13/cobra"
+
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/report"
-
- "github.com/spf13/cobra"
- "gopkg.in/cheggaaa/pb.v1"
)
// watchGetCmd represents the watch command
@@ -49,7 +49,7 @@ func init() {
watchGetCmd.Flags().IntVar(&watchEvents, "events", 8, "Number of events per watcher")
}
-func watchGetFunc(cmd *cobra.Command, args []string) {
+func watchGetFunc(_ *cobra.Command, _ []string) {
clients := mustCreateClients(totalClients, totalConns)
getClient := mustCreateClients(1, 1)
@@ -72,7 +72,6 @@ func watchGetFunc(cmd *cobra.Command, args []string) {
}
bar = pb.New(watchGetTotalWatchers * watchEvents)
- bar.Format("Bom !")
bar.Start()
// report from trying to do serialized gets with concurrent watchers
diff --git a/tools/benchmark/cmd/watch_latency.go b/tools/benchmark/cmd/watch_latency.go
index ed46c065dfe..2e60860b460 100644
--- a/tools/benchmark/cmd/watch_latency.go
+++ b/tools/benchmark/cmd/watch_latency.go
@@ -18,15 +18,14 @@ import (
"context"
"fmt"
"os"
- "sync"
"time"
- "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/report"
-
+ "github.com/cheggaaa/pb/v3"
"github.com/spf13/cobra"
"golang.org/x/time/rate"
- "gopkg.in/cheggaaa/pb.v1"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/report"
)
// watchLatencyCmd represents the watch latency command
@@ -40,72 +39,114 @@ var watchLatencyCmd = &cobra.Command{
}
var (
- watchLTotal int
- watchLPutRate int
- watchLKeySize int
- watchLValueSize int
+ watchLPutTotal int
+ watchLPutRate int
+ watchLKeySize int
+ watchLValueSize int
+ watchLStreams int
+ watchLWatchersPerStream int
+ watchLPrevKV bool
)
func init() {
RootCmd.AddCommand(watchLatencyCmd)
- watchLatencyCmd.Flags().IntVar(&watchLTotal, "total", 10000, "Total number of put requests")
+ watchLatencyCmd.Flags().IntVar(&watchLStreams, "streams", 10, "Total watch streams")
+ watchLatencyCmd.Flags().IntVar(&watchLWatchersPerStream, "watchers-per-stream", 10, "Total watchers per stream")
+ watchLatencyCmd.Flags().BoolVar(&watchLPrevKV, "prevkv", false, "PrevKV enabled on watch requests")
+
+ watchLatencyCmd.Flags().IntVar(&watchLPutTotal, "put-total", 1000, "Total number of put requests")
watchLatencyCmd.Flags().IntVar(&watchLPutRate, "put-rate", 100, "Number of keys to put per second")
watchLatencyCmd.Flags().IntVar(&watchLKeySize, "key-size", 32, "Key size of watch response")
watchLatencyCmd.Flags().IntVar(&watchLValueSize, "val-size", 32, "Value size of watch response")
}
-func watchLatencyFunc(cmd *cobra.Command, args []string) {
+func watchLatencyFunc(_ *cobra.Command, _ []string) {
key := string(mustRandBytes(watchLKeySize))
value := string(mustRandBytes(watchLValueSize))
-
- clients := mustCreateClients(totalClients, totalConns)
+ wchs := setupWatchChannels(key)
putClient := mustCreateConn()
- wchs := make([]clientv3.WatchChan, len(clients))
- for i := range wchs {
- wchs[i] = clients[i].Watch(context.TODO(), key)
- }
-
- bar = pb.New(watchLTotal)
- bar.Format("Bom !")
+ bar = pb.New(watchLPutTotal * len(wchs))
bar.Start()
limiter := rate.NewLimiter(rate.Limit(watchLPutRate), watchLPutRate)
- r := newReport()
- rc := r.Run()
- for i := 0; i < watchLTotal; i++ {
+ putTimes := make([]time.Time, watchLPutTotal)
+ eventTimes := make([][]time.Time, len(wchs))
+
+ for i, wch := range wchs {
+ wch := wch
+ i := i
+ eventTimes[i] = make([]time.Time, watchLPutTotal)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ eventCount := 0
+ for eventCount < watchLPutTotal {
+ resp := <-wch
+ for range resp.Events {
+ eventTimes[i][eventCount] = time.Now()
+ eventCount++
+ bar.Increment()
+ }
+ }
+ }()
+ }
+
+ putReport := newReport()
+ putReportResults := putReport.Run()
+ watchReport := newReport()
+ watchReportResults := watchReport.Run()
+ for i := 0; i < watchLPutTotal; i++ {
// limit key put as per reqRate
if err := limiter.Wait(context.TODO()); err != nil {
break
}
-
- var st time.Time
- var wg sync.WaitGroup
- wg.Add(len(clients))
- barrierc := make(chan struct{})
- for _, wch := range wchs {
- ch := wch
- go func() {
- <-barrierc
- <-ch
- r.Results() <- report.Result{Start: st, End: time.Now()}
- wg.Done()
- }()
- }
-
+ start := time.Now()
if _, err := putClient.Put(context.TODO(), key, value); err != nil {
fmt.Fprintf(os.Stderr, "Failed to Put for watch latency benchmark: %v\n", err)
os.Exit(1)
}
-
- st = time.Now()
- close(barrierc)
- wg.Wait()
- bar.Increment()
+ end := time.Now()
+ putReport.Results() <- report.Result{Start: start, End: end}
+ putTimes[i] = end
}
-
- close(r.Results())
+ wg.Wait()
+ close(putReport.Results())
bar.Finish()
- fmt.Printf("%s", <-rc)
+ fmt.Printf("\nPut summary:\n%s", <-putReportResults)
+
+ for i := 0; i < len(wchs); i++ {
+ for j := 0; j < watchLPutTotal; j++ {
+ start := putTimes[j]
+ end := eventTimes[i][j]
+ if end.Before(start) {
+ start = end
+ }
+ watchReport.Results() <- report.Result{Start: start, End: end}
+ }
+ }
+
+ close(watchReport.Results())
+ fmt.Printf("\nWatch events summary:\n%s", <-watchReportResults)
+}
+
+func setupWatchChannels(key string) []clientv3.WatchChan {
+ clients := mustCreateClients(totalClients, totalConns)
+
+ streams := make([]clientv3.Watcher, watchLStreams)
+ for i := range streams {
+ streams[i] = clientv3.NewWatcher(clients[i%len(clients)])
+ }
+ opts := []clientv3.OpOption{}
+ if watchLPrevKV {
+ opts = append(opts, clientv3.WithPrevKV())
+ }
+ wchs := make([]clientv3.WatchChan, len(streams)*watchLWatchersPerStream)
+ for i := 0; i < len(streams); i++ {
+ for j := 0; j < watchLWatchersPerStream; j++ {
+ wchs[i*watchLWatchersPerStream+j] = streams[i].Watch(context.TODO(), key, opts...)
+ }
+ }
+ return wchs
}
diff --git a/tools/etcd-dump-db/OWNERS b/tools/etcd-dump-db/OWNERS
new file mode 100644
index 00000000000..3e371da7b22
--- /dev/null
+++ b/tools/etcd-dump-db/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/debugging
diff --git a/tools/etcd-dump-db/README.md b/tools/etcd-dump-db/README.md
index 252acc885ee..b176096f0a6 100644
--- a/tools/etcd-dump-db/README.md
+++ b/tools/etcd-dump-db/README.md
@@ -1,6 +1,36 @@
-### etcd-dump-db
+# etcd-dump-db
-etcd-dump-db inspects etcd db files.
+`etcd-dump-db` inspects etcd db files.
+
+## Installation
+
+Install the tool by running the following command from the etcd source directory.
+
+```
+ $ go install -v ./tools/etcd-dump-db
+```
+
+The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the etcd source directory. Make sure that $PATH is set accordingly in your environment.
+
+```
+ $ go list -f "{{.Target}}" ./tools/etcd-dump-db
+```
+
+Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source directory.
+
+```
+ $ go run ./tools/etcd-dump-db
+```
+
+## Usage
+
+The following command should output the usage per the latest development.
+
+```
+ $ etcd-dump-db --help
+```
+
+An example of usage detail is provided below.
```
Usage:
@@ -72,3 +102,13 @@ key="\x00\x00\x00\x00\x005@x_\x00\x00\x00\x00\x00\x00\x00\tt", value="\n\x153640
key="\x00\x00\x00\x00\x005@x_\x00\x00\x00\x00\x00\x00\x00\bt", value="\n\x153640412599896088633_8"
key="\x00\x00\x00\x00\x005@x_\x00\x00\x00\x00\x00\x00\x00\at", value="\n\x153640412599896088633_7"
```
+
+#### scan-keys [data dir or db file path]
+
+Scans all the key-value pairs starting from a specific revision in the key space. It works even the db is corrupted.
+
+```
+$ ./etcd-dump-db scan-keys ~/tmp/etcd/778/db.db 16589739 2>/dev/null | grep "/registry/configmaps/istio-system/istio-namespace-controller-election"
+pageID=1306, index=5/5, rev={Revision:{Main:16589739 Sub:0} tombstone:false}, value=[key "/registry/configmaps/istio-system/istio-namespace-controller-election" | val "k8s\x00\n\x0f\n\x02v1\x12\tConfigMap\x12\xeb\x03\n\xe8\x03\n#istio-namespace-controller-election\x12\x00\x1a\fistio-system\"\x00*$bb696087-260d-4167-bf06-17d3361f9b5f2\x008\x00B\b\b\x9e\xbe\xed\xb5\x06\x10\x00b\xe6\x01\n(control-plane.alpha.kubernetes.io/leader\x12\xb9\x01{\"holderIdentity\":\"istiod-d56968787-txq2d\",\"holderKey\":\"default\",\"leaseDurationSeconds\":30,\"acquireTime\":\"2024-08-13T13:26:54Z\",\"renewTime\":\"2024-08-27T06:16:13Z\",\"leaderTransitions\":0}\x8a\x01\x90\x01\n\x0fpilot-discovery\x12\x06Update\x1a\x02v1\"\b\b\xad\u07b5\xb6\x06\x10\x002\bFieldsV1:[\nY{\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:control-plane.alpha.kubernetes.io/leader\":{}}}}B\x00\x1a\x00\"\x00" | created 9612546 | mod 16589739 | ver 157604]
+pageID=4737, index=4/4, rev={Revision:{Main:16589786 Sub:0} tombstone:false}, value=[key "/registry/configmaps/istio-system/istio-namespace-controller-election" | val "k8s\x00\n\x0f\n\x02v1\x12\tConfigMap\x12\xeb\x03\n\xe8\x03\n#istio-namespace-controller-election\x12\x00\x1a\fistio-system\"\x00*$bb696087-260d-4167-bf06-17d3361f9b5f2\x008\x00B\b\b\x9e\xbe\xed\xb5\x06\x10\x00b\xe6\x01\n(control-plane.alpha.kubernetes.io/leader\x12\xb9\x01{\"holderIdentity\":\"istiod-d56968787-txq2d\",\"holderKey\":\"default\",\"leaseDurationSeconds\":30,\"acquireTime\":\"2024-08-13T13:26:54Z\",\"renewTime\":\"2024-08-27T06:16:21Z\",\"leaderTransitions\":0}\x8a\x01\x90\x01\n\x0fpilot-discovery\x12\x06Update\x1a\x02v1\"\b\b\xb5\u07b5\xb6\x06\x10\x002\bFieldsV1:[\nY{\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:control-plane.alpha.kubernetes.io/leader\":{}}}}B\x00\x1a\x00\"\x00" | created 9612546 | mod 16589786 | ver 157605]
+```
\ No newline at end of file
diff --git a/tools/etcd-dump-db/backend.go b/tools/etcd-dump-db/backend.go
index 1cc7706d6da..875997ad247 100644
--- a/tools/etcd-dump-db/backend.go
+++ b/tools/etcd-dump-db/backend.go
@@ -19,14 +19,15 @@ import (
"fmt"
"path/filepath"
- "go.etcd.io/etcd/api/v3/authpb"
+ "go.uber.org/zap"
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/server/v3/lease/leasepb"
- "go.etcd.io/etcd/server/v3/mvcc"
- "go.etcd.io/etcd/server/v3/mvcc/backend"
-
- bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
)
func snapDir(dataDir string) string {
@@ -34,9 +35,9 @@ func snapDir(dataDir string) string {
}
func getBuckets(dbPath string) (buckets []string, err error) {
- db, derr := bolt.Open(dbPath, 0600, &bolt.Options{Timeout: flockTimeout})
+ db, derr := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: flockTimeout})
if derr != nil {
- return nil, fmt.Errorf("failed to open bolt DB %v", derr)
+ return nil, fmt.Errorf("failed to open bolt DB %w", derr)
}
defer db.Close()
@@ -53,28 +54,22 @@ func getBuckets(dbPath string) (buckets []string, err error) {
type decoder func(k, v []byte)
+// key is the bucket name, and value is the function to decode K/V in the bucket.
var decoders = map[string]decoder{
"key": keyDecoder,
"lease": leaseDecoder,
"auth": authDecoder,
"authRoles": authRolesDecoder,
"authUsers": authUsersDecoder,
+ "meta": metaDecoder,
}
-type revision struct {
- main int64
- sub int64
-}
-
-func bytesToRev(bytes []byte) revision {
- return revision{
- main: int64(binary.BigEndian.Uint64(bytes[0:8])),
- sub: int64(binary.BigEndian.Uint64(bytes[9:])),
- }
+func defaultDecoder(k, v []byte) {
+ fmt.Printf("key=%q, value=%q\n", k, v)
}
func keyDecoder(k, v []byte) {
- rev := bytesToRev(k)
+ rev := mvcc.BytesToBucketKey(k)
var kv mvccpb.KeyValue
if err := kv.Unmarshal(v); err != nil {
panic(err)
@@ -95,7 +90,7 @@ func leaseDecoder(k, v []byte) {
if err := lpb.Unmarshal(v); err != nil {
panic(err)
}
- fmt.Printf("lease ID=%016x, TTL=%ds\n", leaseID, lpb.TTL)
+ fmt.Printf("lease ID=%016x, TTL=%ds, remaining TTL=%ds\n", leaseID, lpb.TTL, lpb.RemainingTTL)
}
func authDecoder(k, v []byte) {
@@ -107,7 +102,7 @@ func authDecoder(k, v []byte) {
}
}
-func authRolesDecoder(k, v []byte) {
+func authRolesDecoder(_, v []byte) {
role := &authpb.Role{}
err := role.Unmarshal(v)
if err != nil {
@@ -116,19 +111,30 @@ func authRolesDecoder(k, v []byte) {
fmt.Printf("role=%q, keyPermission=%v\n", string(role.Name), role.KeyPermission)
}
-func authUsersDecoder(k, v []byte) {
+func authUsersDecoder(_, v []byte) {
user := &authpb.User{}
err := user.Unmarshal(v)
if err != nil {
panic(err)
}
- fmt.Printf("user=%q, roles=%q, password=%q, option=%v\n", user.Name, user.Roles, string(user.Password), user.Options)
+ fmt.Printf("user=%q, roles=%q, option=%v\n", user.Name, user.Roles, user.Options)
+}
+
+func metaDecoder(k, v []byte) {
+ if string(k) == string(schema.MetaConsistentIndexKeyName) || string(k) == string(schema.MetaTermKeyName) {
+ fmt.Printf("key=%q, value=%v\n", k, binary.BigEndian.Uint64(v))
+ } else if string(k) == string(schema.ScheduledCompactKeyName) || string(k) == string(schema.FinishedCompactKeyName) {
+ rev := mvcc.BytesToRev(v)
+ fmt.Printf("key=%q, value=%v\n", k, rev)
+ } else {
+ defaultDecoder(k, v)
+ }
}
func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error) {
- db, err := bolt.Open(dbPath, 0600, &bolt.Options{Timeout: flockTimeout})
+ db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: flockTimeout})
if err != nil {
- return fmt.Errorf("failed to open bolt DB %v", err)
+ return fmt.Errorf("failed to open bolt DB %w", err)
}
defer db.Close()
@@ -147,7 +153,7 @@ func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error)
if dec, ok := decoders[bucket]; decode && ok {
dec(k, v)
} else {
- fmt.Printf("key=%q, value=%q\n", k, v)
+ defaultDecoder(k, v)
}
limit--
@@ -162,8 +168,8 @@ func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error)
}
func getHash(dbPath string) (hash uint32, err error) {
- b := backend.NewDefaultBackend(dbPath)
- return b.Hash(mvcc.DefaultIgnores)
+ b := backend.NewDefaultBackend(zap.NewNop(), dbPath)
+ return b.Hash(schema.DefaultIgnores)
}
// TODO: revert by revision and find specified hash value
diff --git a/tools/etcd-dump-db/main.go b/tools/etcd-dump-db/main.go
index f82d91f7689..6be6d83ac73 100644
--- a/tools/etcd-dump-db/main.go
+++ b/tools/etcd-dump-db/main.go
@@ -19,6 +19,7 @@ import (
"log"
"os"
"path/filepath"
+ "strconv"
"strings"
"time"
@@ -40,6 +41,11 @@ var (
Short: "iterate-bucket lists key-value pairs in reverse order.",
Run: iterateBucketCommandFunc,
}
+ scanKeySpaceCommand = &cobra.Command{
+ Use: "scan-keys [data dir or db file path] [start revision]",
+ Short: "scan-keys scans all the key-value pairs starting from a specific revision in the key space.",
+ Run: scanKeysCommandFunc,
+ }
getHashCommand = &cobra.Command{
Use: "hash [data dir or db file path]",
Short: "hash computes the hash of db file.",
@@ -47,9 +53,11 @@ var (
}
)
-var flockTimeout time.Duration
-var iterateBucketLimit uint64
-var iterateBucketDecode bool
+var (
+ flockTimeout time.Duration
+ iterateBucketLimit uint64
+ iterateBucketDecode bool
+)
func init() {
rootCommand.PersistentFlags().DurationVar(&flockTimeout, "timeout", 10*time.Second, "time to wait to obtain a file lock on db file, 0 to block indefinitely")
@@ -58,6 +66,7 @@ func init() {
rootCommand.AddCommand(listBucketCommand)
rootCommand.AddCommand(iterateBucketCommand)
+ rootCommand.AddCommand(scanKeySpaceCommand)
rootCommand.AddCommand(getHashCommand)
}
@@ -68,7 +77,7 @@ func main() {
}
}
-func listBucketCommandFunc(cmd *cobra.Command, args []string) {
+func listBucketCommandFunc(_ *cobra.Command, args []string) {
if len(args) < 1 {
log.Fatalf("Must provide at least 1 argument (got %v)", args)
}
@@ -89,7 +98,7 @@ func listBucketCommandFunc(cmd *cobra.Command, args []string) {
}
}
-func iterateBucketCommandFunc(cmd *cobra.Command, args []string) {
+func iterateBucketCommandFunc(_ *cobra.Command, args []string) {
if len(args) != 2 {
log.Fatalf("Must provide 2 arguments (got %v)", args)
}
@@ -107,7 +116,28 @@ func iterateBucketCommandFunc(cmd *cobra.Command, args []string) {
}
}
-func getHashCommandFunc(cmd *cobra.Command, args []string) {
+func scanKeysCommandFunc(_ *cobra.Command, args []string) {
+ if len(args) != 2 {
+ log.Fatalf("Must provide 2 arguments (got %v)", args)
+ }
+ dp := args[0]
+ if !strings.HasSuffix(dp, "db") {
+ dp = filepath.Join(snapDir(dp), "db")
+ }
+ if !existFileOrDir(dp) {
+ log.Fatalf("%q does not exist", dp)
+ }
+ startRev, err := strconv.ParseInt(args[1], 10, 64)
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = scanKeys(dp, startRev)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func getHashCommandFunc(_ *cobra.Command, args []string) {
if len(args) < 1 {
log.Fatalf("Must provide at least 1 argument (got %v)", args)
}
diff --git a/tools/etcd-dump-db/meta.go b/tools/etcd-dump-db/meta.go
new file mode 100644
index 00000000000..30860c8af8b
--- /dev/null
+++ b/tools/etcd-dump-db/meta.go
@@ -0,0 +1,40 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import "unsafe"
+
+const magic uint32 = 0xED0CDAED
+
+type inBucket struct {
+ root uint64 // page id of the bucket's root-level page
+ sequence uint64 // monotonically incrementing, used by NextSequence()
+}
+
+type meta struct {
+ magic uint32
+ version uint32
+ pageSize uint32
+ flags uint32
+ root inBucket
+ freelist uint64
+ pgid uint64
+ txid uint64
+ checksum uint64
+}
+
+func loadPageMeta(buf []byte) *meta {
+ return (*meta)(unsafe.Pointer(&buf[pageHeaderSize]))
+}
diff --git a/tools/etcd-dump-db/page.go b/tools/etcd-dump-db/page.go
new file mode 100644
index 00000000000..298bdacadbb
--- /dev/null
+++ b/tools/etcd-dump-db/page.go
@@ -0,0 +1,70 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import "unsafe"
+
+const (
+ pageHeaderSize = unsafe.Sizeof(page{})
+ leafPageElementSize = unsafe.Sizeof(leafPageElement{})
+ pageMaxAllocSize = 0xFFFFFFF
+)
+
+const (
+ leafPageFlag = 0x02
+)
+
+type page struct {
+ id uint64
+ flags uint16
+ count uint16
+ overflow uint32
+}
+
+func (p *page) isLeafPage() bool {
+ return p.flags == leafPageFlag
+}
+
+func loadPage(buf []byte) *page {
+ return (*page)(unsafe.Pointer(&buf[0]))
+}
+
+// leafPageElement retrieves the leaf node by index
+func (p *page) leafPageElement(index uint16) *leafPageElement {
+ return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
+ leafPageElementSize, int(index)))
+}
+
+// leafPageElement represents a node on a leaf page.
+type leafPageElement struct {
+ flags uint32
+ pos uint32
+ ksize uint32
+ vsize uint32
+}
+
+// Key returns a byte slice of the node key.
+func (n *leafPageElement) key() []byte {
+ i := int(n.pos)
+ j := i + int(n.ksize)
+ return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
+}
+
+// Value returns a byte slice of the node value.
+func (n *leafPageElement) value() []byte {
+ i := int(n.pos) + int(n.ksize)
+ j := i + int(n.vsize)
+ return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
+}
diff --git a/tools/etcd-dump-db/scan.go b/tools/etcd-dump-db/scan.go
new file mode 100644
index 00000000000..3898459d260
--- /dev/null
+++ b/tools/etcd-dump-db/scan.go
@@ -0,0 +1,150 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+func scanKeys(dbPath string, startRev int64) error {
+ pgSize, hwm, err := readPageAndHWMSize(dbPath)
+ if err != nil {
+ return fmt.Errorf("failed to read page and HWM size: %w", err)
+ }
+
+ for pageID := uint64(2); pageID < hwm; {
+ p, _, err := readPage(dbPath, pgSize, pageID)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Reading page %d failed: %v. Continuting...\n", pageID, err)
+ pageID++
+ continue
+ }
+
+ if !p.isLeafPage() {
+ pageID++
+ continue
+ }
+
+ for i := uint16(0); i < p.count; i++ {
+ e := p.leafPageElement(i)
+
+ rev, err := bytesToBucketKey(e.key())
+ if err != nil {
+ if exceptionCheck(e.key()) {
+ break
+ }
+ fmt.Fprintf(os.Stderr, "Decoding revision failed, pageID: %d, index: %d, key: %x, error: %v\n", pageID, i, string(e.key()), err)
+ continue
+ }
+
+ if startRev != 0 && rev.Main < startRev {
+ continue
+ }
+
+ fmt.Printf("pageID=%d, index=%d/%d, ", pageID, i, p.count-1)
+ keyDecoder(e.key(), e.value())
+ }
+
+ pageID += uint64(p.overflow) + 1
+ }
+ return nil
+}
+
+func bytesToBucketKey(key []byte) (rev mvcc.BucketKey, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("BytesToBucketKey failed: %v", r)
+ }
+ }()
+ rev = mvcc.BytesToBucketKey(key)
+ return rev, err
+}
+
+func readPageAndHWMSize(dbPath string) (uint64, uint64, error) {
+ f, err := os.Open(dbPath)
+ if err != nil {
+ return 0, 0, err
+ }
+ defer f.Close()
+
+ // read 4KB chunk
+ buf := make([]byte, 4096)
+ if _, err := io.ReadFull(f, buf); err != nil {
+ return 0, 0, err
+ }
+
+ m := loadPageMeta(buf)
+ if m.magic != magic {
+ return 0, 0, fmt.Errorf("the Meta Page has wrong (unexpected) magic")
+ }
+
+ return uint64(m.pageSize), m.pgid, nil
+}
+
+func readPage(dbPath string, pageSize uint64, pageID uint64) (*page, []byte, error) {
+ f, err := os.Open(dbPath)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer f.Close()
+
+ buf := make([]byte, pageSize)
+ if _, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
+ return nil, nil, err
+ }
+
+ p := loadPage(buf)
+ if p.id != pageID {
+ return nil, nil, fmt.Errorf("unexpected page id: %d, wanted: %d", p.id, pageID)
+ }
+
+ if p.overflow == 0 {
+ return p, buf, nil
+ }
+
+ buf = make([]byte, (uint64(p.overflow)+1)*pageSize)
+ if _, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
+ return nil, nil, err
+ }
+
+ p = loadPage(buf)
+ if p.id != pageID {
+ return nil, nil, fmt.Errorf("unexpected page id: %d, wanted: %d", p.id, pageID)
+ }
+
+ return p, buf, nil
+}
+
+func exceptionCheck(key []byte) bool {
+ whiteKeyList := map[string]struct{}{
+ "alarm": {},
+ "auth": {},
+ "authRoles": {},
+ "authUsers": {},
+ "cluster": {},
+ "key": {},
+ "lease": {},
+ "members": {},
+ "members_removed": {},
+ "meta": {},
+ }
+
+ _, ok := whiteKeyList[string(key)]
+ return ok
+}
diff --git a/tools/etcd-dump-db/utils.go b/tools/etcd-dump-db/utils.go
index 3af585a84d6..184cb5181c4 100644
--- a/tools/etcd-dump-db/utils.go
+++ b/tools/etcd-dump-db/utils.go
@@ -14,9 +14,34 @@
package main
-import "os"
+import (
+ "os"
+ "unsafe"
+)
func existFileOrDir(name string) bool {
_, err := os.Stat(name)
return err == nil
}
+
+func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(base) + offset)
+}
+
+func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
+}
+
+func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
+ // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
+ //
+ // This memory is not allocated from C, but it is unmanaged by Go's
+ // garbage collector and should behave similarly, and the compiler
+ // should produce similar code. Note that this conversion allows a
+ // subslice to begin after the base address, with an optional offset,
+ // while the URL above does not cover this case and only slices from
+ // index 0. However, the wiki never says that the address must be to
+ // the beginning of a C allocation (or even that malloc was used at
+ // all), so this is believed to be correct.
+ return (*[pageMaxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
+}
diff --git a/tools/etcd-dump-logs/OWNERS b/tools/etcd-dump-logs/OWNERS
new file mode 100644
index 00000000000..3e371da7b22
--- /dev/null
+++ b/tools/etcd-dump-logs/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/debugging
diff --git a/tools/etcd-dump-logs/README.md b/tools/etcd-dump-logs/README.md
index b6edc302134..0922f2878fb 100644
--- a/tools/etcd-dump-logs/README.md
+++ b/tools/etcd-dump-logs/README.md
@@ -1,6 +1,36 @@
-### etcd-dump-logs
+# etcd-dump-logs
-etcd-dump-logs dumps the log from data directory.
+`etcd-dump-logs` dumps the log from data directory.
+
+## Installation
+
+Install the tool by running the following command from the etcd source directory.
+
+```
+ $ go install -v ./tools/etcd-dump-logs
+```
+
+The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the etcd source directory. Make sure that $PATH is set accordingly in your environment.
+
+```
+ $ go list -f "{{.Target}}" ./tools/etcd-dump-logs
+```
+
+Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source directory.
+
+```
+ $ go run ./tools/etcd-dump-logs
+```
+
+## Usage
+
+The following command should output the usage per the latest development.
+
+```
+ $ etcd-dump-logs --help
+```
+
+An example of usage detail is provided below.
```
Usage:
@@ -13,7 +43,9 @@ Usage:
- data_dir/member/wal/0000000000000000-0000000000000000.wal
Flags:
-
+ -wal-dir string
+ If set, dumps WAL from the informed path, rather than following the
+ standard 'data_dir/member/wal/' location
-entry-type string
If set, filters output by entry type. Must be one or more than one of:
ConfigChange, Normal, Request, InternalRaftRequest,
@@ -65,7 +97,7 @@ Entry types (ConfigChange,IRRCompaction) count is : 5
```
#### etcd-dump-logs -stream-decoder [data dir]
-Decode each entry based on logic in the passed decoder. Decoder status and decoded data are listed in separated tab/columns in the ouput. For parsing purpose, the output from decoder are expected to be in format of "|". Please refer to [decoder_correctoutputformat.sh] as an example.
+Decode each entry based on logic in the passed decoder. Decoder status and decoded data are listed in separated tab/columns in the output. For parsing purpose, the output from decoder are expected to be in format of "|". Please refer to [decoder_correctoutputformat.sh] as an example.
However, if the decoder output format is not as expected, "decoder_status" will be "decoder output format is not right, print output anyway", and all output from decoder will be considered as "decoded_data"
diff --git a/tools/etcd-dump-logs/etcd-dump-log_test.go b/tools/etcd-dump-logs/etcd-dump-log_test.go
index 372f7597bfc..bb08ec11403 100644
--- a/tools/etcd-dump-logs/etcd-dump-log_test.go
+++ b/tools/etcd-dump-logs/etcd-dump-log_test.go
@@ -15,8 +15,6 @@
package main
import (
- "bytes"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -24,68 +22,35 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
+
"go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3/raftpb"
- "go.etcd.io/etcd/server/v3/wal"
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/raft/v3/raftpb"
)
func TestEtcdDumpLogEntryType(t *testing.T) {
// directory where the command is
binDir, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+ // TODO(ptabor): The test does not run by default from ./scripts/test.sh.
dumpLogsBinary := path.Join(binDir + "/etcd-dump-logs")
if !fileutil.Exist(dumpLogsBinary) {
t.Skipf("%q does not exist", dumpLogsBinary)
}
- decoder_correctoutputformat := filepath.Join(binDir, "/testdecoder/decoder_correctoutputformat.sh")
- decoder_wrongoutputformat := filepath.Join(binDir, "/testdecoder/decoder_wrongoutputformat.sh")
-
- p, err := ioutil.TempDir(os.TempDir(), "etcddumplogstest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(p)
-
- memberdir := filepath.Join(p, "member")
- err = os.Mkdir(memberdir, 0744)
- if err != nil {
- t.Fatal(err)
- }
- waldir := walDir(p)
- snapdir := snapDir(p)
-
- w, err := wal.Create(zap.NewExample(), waldir, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- err = os.Mkdir(snapdir, 0744)
- if err != nil {
- t.Fatal(err)
- }
+ decoderCorrectOutputFormat := filepath.Join(binDir, "/testdecoder/decoder_correctoutputformat.sh")
+ decoderWrongOutputFormat := filepath.Join(binDir, "/testdecoder/decoder_wrongoutputformat.sh")
- ents := make([]raftpb.Entry, 0)
+ p := t.TempDir()
- // append entries into wal log
- appendConfigChangeEnts(&ents)
- appendNormalRequestEnts(&ents)
- appendNormalIRREnts(&ents)
- appendUnknownNormalEnts(&ents)
-
- // force commit newly appended entries
- err = w.Save(raftpb.HardState{}, ents)
- if err != nil {
- t.Fatal(err)
- }
- w.Close()
+ mustCreateWALLog(t, p)
argtests := []struct {
name string
@@ -105,34 +70,51 @@ func TestEtcdDumpLogEntryType(t *testing.T) {
{"lease grant entry-type", []string{"-entry-type", "IRRLeaseGrant", p}, "expectedoutput/listIRRLeaseGrant.output"},
{"lease revoke entry-type", []string{"-entry-type", "IRRLeaseRevoke", p}, "expectedoutput/listIRRLeaseRevoke.output"},
{"confchange and txn entry-type", []string{"-entry-type", "ConfigChange,IRRCompaction", p}, "expectedoutput/listConfigChangeIRRCompaction.output"},
- {"decoder_correctoutputformat", []string{"-stream-decoder", decoder_correctoutputformat, p}, "expectedoutput/decoder_correctoutputformat.output"},
- {"decoder_wrongoutputformat", []string{"-stream-decoder", decoder_wrongoutputformat, p}, "expectedoutput/decoder_wrongoutputformat.output"},
+ {"decoder_correctoutputformat", []string{"-stream-decoder", decoderCorrectOutputFormat, p}, "expectedoutput/decoder_correctoutputformat.output"},
+ {"decoder_wrongoutputformat", []string{"-stream-decoder", decoderWrongOutputFormat, p}, "expectedoutput/decoder_wrongoutputformat.output"},
}
for _, argtest := range argtests {
t.Run(argtest.name, func(t *testing.T) {
cmd := exec.Command(dumpLogsBinary, argtest.args...)
actual, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatal(err)
- }
- expected, err := ioutil.ReadFile(path.Join(binDir, argtest.fileExpected))
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(actual, expected) {
- t.Errorf(`Got input of length %d, wanted input of length %d
-==== BEGIN RECEIVED FILE ====
-%s
-==== END RECEIVED FILE ====
-==== BEGIN EXPECTED FILE ====
-%s
-==== END EXPECTED FILE ====
-`, len(actual), len(expected), actual, expected)
- }
+ require.NoError(t, err)
+ expected, err := os.ReadFile(path.Join(binDir, argtest.fileExpected))
+ require.NoError(t, err)
+
+ assert.EqualValues(t, string(expected), string(actual))
+ // The output files contains a lot of trailing whitespaces... difficult to diagnose without printing them explicitly.
+ // TODO(ptabor): Get rid of the whitespaces both in code and the test-files.
+ assert.EqualValues(t, strings.ReplaceAll(string(expected), " ", "_"), strings.ReplaceAll(string(actual), " ", "_"))
})
}
+}
+
+func mustCreateWALLog(t *testing.T, path string) {
+ memberdir := filepath.Join(path, "member")
+ err := os.Mkdir(memberdir, 0o744)
+ require.NoError(t, err)
+ waldir := walDir(path)
+ snapdir := snapDir(path)
+
+ w, err := wal.Create(zaptest.NewLogger(t), waldir, nil)
+ require.NoError(t, err)
+
+ err = os.Mkdir(snapdir, 0o744)
+ require.NoError(t, err)
+
+ ents := make([]raftpb.Entry, 0)
+ // append entries into wal log
+ appendConfigChangeEnts(&ents)
+ appendNormalRequestEnts(&ents)
+ appendNormalIRREnts(&ents)
+ appendUnknownNormalEnts(&ents)
+
+ // force commit newly appended entries
+ err = w.Save(raftpb.HardState{}, ents)
+ require.NoError(t, err)
+ w.Close()
}
func appendConfigChangeEnts(ents *[]raftpb.Entry) {
@@ -157,10 +139,10 @@ func appendNormalRequestEnts(ents *[]raftpb.Entry) {
requests := []etcdserverpb.Request{
{ID: 0, Method: "", Path: "/path0", Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: true, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 9, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
- {ID: 1, Method: "QGET", Path: "/path1", Val: "{\"0\":\"1\",\"2\":[\"3\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 9, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
- {ID: 2, Method: "SYNC", Path: "/path2", Val: "{\"0\":\"1\",\"2\":[\"3\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
- {ID: 3, Method: "DELETE", Path: "/path3", Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &a, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
- {ID: 4, Method: "RANDOM", Path: "/path4/superlong" + strings.Repeat("/path", 30), Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
+ {ID: 1, Method: methodQGet, Path: "/path1", Val: "{\"0\":\"1\",\"2\":[\"3\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 9, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
+ {ID: 2, Method: methodSync, Path: "/path2", Val: "{\"0\":\"1\",\"2\":[\"3\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
+ {ID: 3, Method: methodDelete, Path: "/path3", Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &a, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
+ {ID: 4, Method: methodRandom, Path: "/path4/superlong" + strings.Repeat("/path", 30), Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b},
}
for i, request := range requests {
@@ -180,11 +162,12 @@ func appendNormalIRREnts(ents *[]raftpb.Entry) {
irrdeleterange := &etcdserverpb.DeleteRangeRequest{Key: []byte("0"), RangeEnd: []byte("9"), PrevKv: true}
- delInRangeReq := &etcdserverpb.RequestOp{Request: &etcdserverpb.RequestOp_RequestDeleteRange{
- RequestDeleteRange: &etcdserverpb.DeleteRangeRequest{
- Key: []byte("a"), RangeEnd: []byte("b"),
+ delInRangeReq := &etcdserverpb.RequestOp{
+ Request: &etcdserverpb.RequestOp_RequestDeleteRange{
+ RequestDeleteRange: &etcdserverpb.DeleteRangeRequest{
+ Key: []byte("a"), RangeEnd: []byte("b"),
+ },
},
- },
}
irrtxn := &etcdserverpb.TxnRequest{Success: []*etcdserverpb.RequestOp{delInRangeReq}, Failure: []*etcdserverpb.RequestOp{delInRangeReq}}
diff --git a/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output b/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output
index 1937054cad0..a9fcd1a3ac6 100644
--- a/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output
+++ b/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data decoder_status decoded_data
1 1 conf method=ConfChangeAddNode id=2 ERROR jhjaajjjahjbbbjj
@@ -29,7 +29,7 @@ term index type data decoder_status decoded_data
15 21 norm ID:16 auth_user_add: > OK jhajebddajjajefefafdfecaabjegjfagcgccaaajj
16 22 norm ID:17 auth_user_delete: OK jhaaeaddjgjajefefafdfeca
17 23 norm ID:18 auth_user_get: OK jhabfbddjgjajefefafdfeca
- 18 24 norm ID:19 auth_user_change_password: OK jhacfaddjejajefefafdfecaabjegjfagcgccb
+ 18 24 norm ID:19 auth_user_change_password:" > OK jhacfaddjejajefefafdfecaabjegjfagcgccb
19 25 norm ID:20 auth_user_grant_role: OK jhadhbdejejajegegcfegbcaabjegbfffcfeca
20 26 norm ID:21 auth_user_revoke_role: OK jhaehadejejajegegcfegbcbabjegbfffcfecb
21 27 norm ID:22 auth_user_list:<> ERROR jhafibdejj
diff --git a/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output b/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output
index 90341eb62c1..65e7dd3a4f9 100644
--- a/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output
+++ b/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data decoder_status decoded_data
1 1 conf method=ConfChangeAddNode id=2 decoder output format is not right, print output anyway jhjaajjjahjbbbjj
@@ -29,7 +29,7 @@ term index type data decoder_status decoded_data
15 21 norm ID:16 auth_user_add: > decoder output format is not right, print output anyway jhajebddajjajefefafdfecaabjegjfagcgccaaajj
16 22 norm ID:17 auth_user_delete: decoder output format is not right, print output anyway jhaaeaddjgjajefefafdfeca
17 23 norm ID:18 auth_user_get: decoder output format is not right, print output anyway jhabfbddjgjajefefafdfeca
- 18 24 norm ID:19 auth_user_change_password: decoder output format is not right, print output anyway jhacfaddjejajefefafdfecaabjegjfagcgccb
+ 18 24 norm ID:19 auth_user_change_password:" > decoder output format is not right, print output anyway jhacfaddjejajefefafdfecaabjegjfagcgccb
19 25 norm ID:20 auth_user_grant_role: decoder output format is not right, print output anyway jhadhbdejejajegegcfegbcaabjegbfffcfeca
20 26 norm ID:21 auth_user_revoke_role: decoder output format is not right, print output anyway jhaehadejejajegegcfegbcbabjegbfffcfecb
21 27 norm ID:22 auth_user_list:<> decoder output format is not right, print output anyway jhafibdejj
diff --git a/tools/etcd-dump-logs/expectedoutput/listAll.output b/tools/etcd-dump-logs/expectedoutput/listAll.output
index 3a6f41bb9c9..a1168ec20c2 100644
--- a/tools/etcd-dump-logs/expectedoutput/listAll.output
+++ b/tools/etcd-dump-logs/expectedoutput/listAll.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
1 1 conf method=ConfChangeAddNode id=2
@@ -29,7 +29,7 @@ term index type data
15 21 norm ID:16 auth_user_add: >
16 22 norm ID:17 auth_user_delete:
17 23 norm ID:18 auth_user_get:
- 18 24 norm ID:19 auth_user_change_password:
+ 18 24 norm ID:19 auth_user_change_password:" >
19 25 norm ID:20 auth_user_grant_role:
20 26 norm ID:21 auth_user_revoke_role:
21 27 norm ID:22 auth_user_list:<>
diff --git a/tools/etcd-dump-logs/expectedoutput/listConfigChange.output b/tools/etcd-dump-logs/expectedoutput/listConfigChange.output
index 1b2dc9ec90f..acfb23c1535 100644
--- a/tools/etcd-dump-logs/expectedoutput/listConfigChange.output
+++ b/tools/etcd-dump-logs/expectedoutput/listConfigChange.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
1 1 conf method=ConfChangeAddNode id=2
diff --git a/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output b/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output
index c262c2e8aac..15b34dccd9b 100644
--- a/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output
+++ b/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
1 1 conf method=ConfChangeAddNode id=2
diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output b/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output
index 493545884a9..cc764466dbb 100644
--- a/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output
+++ b/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
8 14 norm ID:9 compaction:
diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output b/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output
index 85e71d39602..67716775300 100644
--- a/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output
+++ b/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
6 12 norm ID:7 delete_range:
diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output
index 5cba3a497a7..6f9f8ecb9a3 100644
--- a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output
+++ b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
9 15 norm ID:10 lease_grant:
diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output
index d67d05d0232..33fafaf3417 100644
--- a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output
+++ b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
10 16 norm ID:11 lease_revoke:
diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRPut.output b/tools/etcd-dump-logs/expectedoutput/listIRRPut.output
index 397ae60c7b6..672dae54c3c 100644
--- a/tools/etcd-dump-logs/expectedoutput/listIRRPut.output
+++ b/tools/etcd-dump-logs/expectedoutput/listIRRPut.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
5 11 norm ID:6 put:
diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRRange.output b/tools/etcd-dump-logs/expectedoutput/listIRRRange.output
index 422e0374930..832587c91b1 100644
--- a/tools/etcd-dump-logs/expectedoutput/listIRRRange.output
+++ b/tools/etcd-dump-logs/expectedoutput/listIRRRange.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
4 10 norm ID:5 range:
diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output b/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output
index 92a7d0a26e7..9c5c1183319 100644
--- a/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output
+++ b/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
7 13 norm ID:8 txn: > failure: > >
diff --git a/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output b/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output
index da1ccb9a6e3..de2a0b41574 100644
--- a/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output
+++ b/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
4 10 norm ID:5 range:
@@ -20,7 +20,7 @@ term index type data
15 21 norm ID:16 auth_user_add: >
16 22 norm ID:17 auth_user_delete:
17 23 norm ID:18 auth_user_get:
- 18 24 norm ID:19 auth_user_change_password:
+ 18 24 norm ID:19 auth_user_change_password:" >
19 25 norm ID:20 auth_user_grant_role:
20 26 norm ID:21 auth_user_revoke_role:
21 27 norm ID:22 auth_user_list:<>
diff --git a/tools/etcd-dump-logs/expectedoutput/listNormal.output b/tools/etcd-dump-logs/expectedoutput/listNormal.output
index 37a1bb80cd6..5a584c31209 100644
--- a/tools/etcd-dump-logs/expectedoutput/listNormal.output
+++ b/tools/etcd-dump-logs/expectedoutput/listNormal.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
3 5 norm noop
@@ -25,7 +25,7 @@ term index type data
15 21 norm ID:16 auth_user_add: >
16 22 norm ID:17 auth_user_delete:
17 23 norm ID:18 auth_user_get:
- 18 24 norm ID:19 auth_user_change_password:
+ 18 24 norm ID:19 auth_user_change_password:" >
19 25 norm ID:20 auth_user_grant_role:
20 26 norm ID:21 auth_user_revoke_role:
21 27 norm ID:22 auth_user_list:<>
diff --git a/tools/etcd-dump-logs/expectedoutput/listRequest.output b/tools/etcd-dump-logs/expectedoutput/listRequest.output
index 5201ff9d366..04defbdc0eb 100644
--- a/tools/etcd-dump-logs/expectedoutput/listRequest.output
+++ b/tools/etcd-dump-logs/expectedoutput/listRequest.output
@@ -3,7 +3,7 @@ empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
-WAL entries:
+WAL entries: 34
lastIndex=34
term index type data
3 5 norm noop
diff --git a/tools/etcd-dump-logs/main.go b/tools/etcd-dump-logs/main.go
index b5635634430..f0bba446762 100644
--- a/tools/etcd-dump-logs/main.go
+++ b/tools/etcd-dump-logs/main.go
@@ -16,9 +16,9 @@ package main
import (
"bufio"
- "bytes"
"encoding/hex"
"encoding/json"
+ "errors"
"flag"
"fmt"
"io"
@@ -29,22 +29,28 @@ import (
"strings"
"time"
+ "go.uber.org/zap"
+
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/wal"
- "go.etcd.io/etcd/server/v3/wal/walpb"
- "go.uber.org/zap"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
)
const (
defaultEntryTypes string = "Normal,ConfigChange"
+ methodSync string = "SYNC"
+ methodQGet string = "QGET"
+ methodDelete string = "DELETE"
+ methodRandom string = "RANDOM"
)
func main() {
snapfile := flag.String("start-snap", "", "The base name of snapshot file to start dumping")
+ waldir := flag.String("wal-dir", "", "If set, dumps WAL from the informed path, rather than following the standard 'data_dir/member/wal/' location")
index := flag.Uint64("start-index", 0, "The index to start dumping")
// Default entry types are Normal and ConfigChange
entrytype := flag.String("entry-type", defaultEntryTypes, `If set, filters output by entry type. Must be one or more than one of:
@@ -54,8 +60,10 @@ IRRCompaction, IRRLeaseGrant, IRRLeaseRevoke, IRRLeaseCheckpoint`)
streamdecoder := flag.String("stream-decoder", "", `The name of an executable decoding tool, the executable must process
hex encoded lines of binary input (from etcd-dump-logs)
and output a hex encoded line of binary for each input line`)
+ raw := flag.Bool("raw", false, "Read the logs in the low-level form")
flag.Parse()
+ lg := zap.NewExample()
if len(flag.Args()) != 1 {
log.Fatalf("Must provide data-dir argument (got %+v)", flag.Args())
@@ -66,67 +74,99 @@ and output a hex encoded line of binary for each input line`)
log.Fatal("start-snap and start-index flags cannot be used together.")
}
+ startFromIndex := false
+ flag.Visit(func(f *flag.Flag) {
+ if f.Name == "start-index" {
+ startFromIndex = true
+ }
+ })
+
+ if !*raw {
+ ents := readUsingReadAll(lg, startFromIndex, index, snapfile, dataDir, waldir)
+
+ fmt.Printf("WAL entries: %d\n", len(ents))
+ if len(ents) > 0 {
+ fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
+ }
+
+ fmt.Printf("%4s\t%10s\ttype\tdata", "term", "index")
+ if *streamdecoder != "" {
+ fmt.Print("\tdecoder_status\tdecoded_data")
+ }
+ fmt.Println()
+
+ listEntriesType(*entrytype, *streamdecoder, ents)
+ } else {
+ if *snapfile != "" ||
+ *entrytype != defaultEntryTypes ||
+ *streamdecoder != "" {
+ log.Fatalf("Flags --entry-type, --stream-decoder, --entrytype not supported in the RAW mode.")
+ }
+
+ wd := *waldir
+ if wd == "" {
+ wd = walDir(dataDir)
+ }
+ readRaw(index, wd, os.Stdout)
+ }
+}
+
+func readUsingReadAll(lg *zap.Logger, startFromIndex bool, index *uint64, snapfile *string, dataDir string, waldir *string) []raftpb.Entry {
var (
walsnap walpb.Snapshot
snapshot *raftpb.Snapshot
err error
)
- isIndex := *index != 0
-
- if isIndex {
+ if startFromIndex {
fmt.Printf("Start dumping log entries from index %d.\n", *index)
walsnap.Index = *index
} else {
if *snapfile == "" {
- ss := snap.New(zap.NewExample(), snapDir(dataDir))
+ ss := snap.New(lg, snapDir(dataDir))
snapshot, err = ss.Load()
} else {
- snapshot, err = snap.Read(zap.NewExample(), filepath.Join(snapDir(dataDir), *snapfile))
+ snapshot, err = snap.Read(lg, filepath.Join(snapDir(dataDir), *snapfile))
}
- switch err {
- case nil:
+ switch {
+ case err == nil:
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
nodes := genIDSlice(snapshot.Metadata.ConfState.Voters)
- confstateJson, err := json.Marshal(snapshot.Metadata.ConfState)
- if err != nil {
- confstateJson = []byte(fmt.Sprintf("confstate err: %v", err))
+
+ confStateJSON, merr := json.Marshal(snapshot.Metadata.ConfState)
+ if merr != nil {
+ confStateJSON = []byte(fmt.Sprintf("confstate err: %v", merr))
}
fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s confstate=%s\n",
- walsnap.Term, walsnap.Index, nodes, confstateJson)
- case snap.ErrNoSnapshot:
- fmt.Printf("Snapshot:\nempty\n")
+ walsnap.Term, walsnap.Index, nodes, confStateJSON)
+ case errors.Is(err, snap.ErrNoSnapshot):
+ fmt.Print("Snapshot:\nempty\n")
default:
log.Fatalf("Failed loading snapshot: %v", err)
}
fmt.Println("Start dumping log entries from snapshot.")
}
- w, err := wal.OpenForRead(zap.NewExample(), walDir(dataDir), walsnap)
+ wd := *waldir
+ if wd == "" {
+ wd = walDir(dataDir)
+ }
+
+ w, err := wal.OpenForRead(zap.NewExample(), wd, walsnap)
if err != nil {
log.Fatalf("Failed opening WAL: %v", err)
}
wmetadata, state, ents, err := w.ReadAll()
w.Close()
- if err != nil && (!isIndex || err != wal.ErrSnapshotNotFound) {
+ if err != nil && (!startFromIndex || !errors.Is(err, wal.ErrSnapshotNotFound)) {
log.Fatalf("Failed reading WAL: %v", err)
}
id, cid := parseWALMetadata(wmetadata)
vid := types.ID(state.Vote)
fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n",
id, cid, state.Term, state.Commit, vid)
-
- fmt.Printf("WAL entries:\n")
- fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
-
- fmt.Printf("%4s\t%10s\ttype\tdata", "term", "index")
- if *streamdecoder != "" {
- fmt.Printf("\tdecoder_status\tdecoded_data")
- }
- fmt.Println()
-
- listEntriesType(*entrytype, *streamdecoder, ents)
+ return ents
}
func walDir(dataDir string) string { return filepath.Join(dataDir, "member", "wal") }
@@ -232,6 +272,10 @@ type EntryPrinter func(e raftpb.Entry)
func printInternalRaftRequest(entry raftpb.Entry) {
var rr etcdserverpb.InternalRaftRequest
if err := rr.Unmarshal(entry.Data); err == nil {
+ // Ensure we don't log user password
+ if rr.AuthUserChangePassword != nil && rr.AuthUserChangePassword.Password != "" {
+ rr.AuthUserChangePassword.Password = ""
+ }
fmt.Printf("%4d\t%10d\tnorm\t%s", entry.Term, entry.Index, rr.String())
}
}
@@ -242,10 +286,10 @@ func printUnknownNormal(entry raftpb.Entry) {
func printConfChange(entry raftpb.Entry) {
fmt.Printf("%4d\t%10d", entry.Term, entry.Index)
- fmt.Printf("\tconf")
+ fmt.Print("\tconf")
var r raftpb.ConfChange
if err := r.Unmarshal(entry.Data); err != nil {
- fmt.Printf("\t???")
+ fmt.Print("\t???")
} else {
fmt.Printf("\tmethod=%s id=%s", r.Type, types.ID(r.NodeID))
}
@@ -257,10 +301,10 @@ func printRequest(entry raftpb.Entry) {
fmt.Printf("%4d\t%10d\tnorm", entry.Term, entry.Index)
switch r.Method {
case "":
- fmt.Printf("\tnoop")
- case "SYNC":
+ fmt.Print("\tnoop")
+ case methodSync:
fmt.Printf("\tmethod=SYNC time=%q", time.Unix(0, r.Time).UTC())
- case "QGET", "DELETE":
+ case methodQGet, methodDelete:
fmt.Printf("\tmethod=%s path=%s", r.Method, excerpt(r.Path, 64, 64))
default:
fmt.Printf("\tmethod=%s path=%s val=%s", r.Method, excerpt(r.Path, 64, 64), excerpt(r.Val, 128, 0))
@@ -275,7 +319,8 @@ func evaluateEntrytypeFlag(entrytype string) []EntryFilter {
entrytypelist = strings.Split(entrytype, ",")
}
- validRequest := map[string][]EntryFilter{"ConfigChange": {passConfChange},
+ validRequest := map[string][]EntryFilter{
+ "ConfigChange": {passConfChange},
"Normal": {passInternalRaftRequest, passRequest, passUnknownNormal},
"Request": {passRequest},
"InternalRaftRequest": {passInternalRaftRequest},
@@ -304,14 +349,16 @@ IRRCompaction, IRRLeaseGrant, IRRLeaseRevoke, IRRLeaseCheckpoint`, et)
return filters
}
-// listEntriesType filters and prints entries based on the entry-type flag,
+// listEntriesType filters and prints entries based on the entry-type flag,
func listEntriesType(entrytype string, streamdecoder string, ents []raftpb.Entry) {
entryFilters := evaluateEntrytypeFlag(entrytype)
- printerMap := map[string]EntryPrinter{"InternalRaftRequest": printInternalRaftRequest,
- "Request": printRequest,
- "ConfigChange": printConfChange,
- "UnknownNormal": printUnknownNormal}
- var stderr bytes.Buffer
+ printerMap := map[string]EntryPrinter{
+ "InternalRaftRequest": printInternalRaftRequest,
+ "Request": printRequest,
+ "ConfigChange": printConfChange,
+ "UnknownNormal": printUnknownNormal,
+ }
+ var stderr strings.Builder
args := strings.Split(streamdecoder, " ")
cmd := exec.Command(args[0], args[1:]...)
stdin, err := cmd.StdinPipe()
@@ -360,9 +407,9 @@ func listEntriesType(entrytype string, streamdecoder string, ents []raftpb.Entry
return
}
- decoder_status, decoded_data := parseDecoderOutput(decoderoutput)
+ decoderStatus, decodedData := parseDecoderOutput(decoderoutput)
- fmt.Printf("\t%s\t%s", decoder_status, decoded_data)
+ fmt.Printf("\t%s\t%s", decoderStatus, decodedData)
}
}
@@ -381,19 +428,19 @@ func listEntriesType(entrytype string, streamdecoder string, ents []raftpb.Entry
}
func parseDecoderOutput(decoderoutput string) (string, string) {
- var decoder_status string
- var decoded_data string
+ var decoderStatus string
+ var decodedData string
output := strings.Split(decoderoutput, "|")
switch len(output) {
case 1:
- decoder_status = "decoder output format is not right, print output anyway"
- decoded_data = decoderoutput
+ decoderStatus = "decoder output format is not right, print output anyway"
+ decodedData = decoderoutput
case 2:
- decoder_status = output[0]
- decoded_data = output[1]
+ decoderStatus = output[0]
+ decodedData = output[1]
default:
- decoder_status = output[0] + "(*WARNING: data might contain deliminator used by etcd-dump-logs)"
- decoded_data = strings.Join(output[1:], "")
+ decoderStatus = output[0] + "(*WARNING: data might contain deliminator used by etcd-dump-logs)"
+ decodedData = strings.Join(output[1:], "")
}
- return decoder_status, decoded_data
+ return decoderStatus, decodedData
}
diff --git a/tools/etcd-dump-logs/raw.go b/tools/etcd-dump-logs/raw.go
new file mode 100644
index 00000000000..2c1bed7696a
--- /dev/null
+++ b/tools/etcd-dump-logs/raw.go
@@ -0,0 +1,113 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func readRaw(fromIndex *uint64, waldir string, out io.Writer) {
+ var walReaders []fileutil.FileReader
+ dirEntry, err := os.ReadDir(waldir)
+ if err != nil {
+ log.Fatalf("Error: Failed to read directory '%s' error:%v", waldir, err)
+ }
+ for _, e := range dirEntry {
+ finfo, err := e.Info()
+ if err != nil {
+ log.Fatalf("Error: failed to get fileInfo of file: %s, error: %v", e.Name(), err)
+ }
+ if filepath.Ext(finfo.Name()) != ".wal" {
+ log.Printf("Warning: Ignoring not .wal file: %s", finfo.Name())
+ continue
+ }
+ f, err := os.Open(filepath.Join(waldir, finfo.Name()))
+ if err != nil {
+ log.Printf("Error: Failed to read file: %s . error:%v", finfo.Name(), err)
+ }
+ walReaders = append(walReaders, fileutil.NewFileReader(f))
+ }
+ decoder := wal.NewDecoderAdvanced(true, walReaders...)
+ // The variable is used to not pollute log with multiple continuous crc errors.
+ crcDesync := false
+ for {
+ rec := walpb.Record{}
+ err := decoder.Decode(&rec)
+ if err == nil || errors.Is(err, walpb.ErrCRCMismatch) {
+ if err != nil && !crcDesync {
+ log.Printf("Error: Reading entry failed with CRC error: %c", err)
+ crcDesync = true
+ }
+ printRec(&rec, fromIndex, out)
+ if rec.Type == wal.CrcType {
+ decoder.UpdateCRC(rec.Crc)
+ crcDesync = false
+ }
+ continue
+ }
+ if errors.Is(err, io.EOF) {
+ fmt.Fprintf(out, "EOF: All entries were processed.\n")
+ break
+ } else if errors.Is(err, io.ErrUnexpectedEOF) {
+ fmt.Fprintf(out, "ErrUnexpectedEOF: The last record might be corrupted, error: %v.\n", err)
+ break
+ } else {
+ log.Printf("Error: Reading failed: %v", err)
+ break
+ }
+ }
+}
+
+func printRec(rec *walpb.Record, fromIndex *uint64, out io.Writer) {
+ switch rec.Type {
+ case wal.MetadataType:
+ var metadata etcdserverpb.Metadata
+ pbutil.MustUnmarshal(&metadata, rec.Data)
+ fmt.Fprintf(out, "Metadata: %s\n", metadata.String())
+ case wal.CrcType:
+ fmt.Fprintf(out, "CRC: %d\n", rec.Crc)
+ case wal.EntryType:
+ e := wal.MustUnmarshalEntry(rec.Data)
+ if fromIndex == nil || e.Index >= *fromIndex {
+ fmt.Fprintf(out, "Entry: %s\n", e.String())
+ }
+ case wal.SnapshotType:
+ var snap walpb.Snapshot
+ pbutil.MustUnmarshal(&snap, rec.Data)
+ if fromIndex == nil || snap.Index >= *fromIndex {
+ fmt.Fprintf(out, "Snapshot: %s\n", snap.String())
+ }
+ case wal.StateType:
+ var state raftpb.HardState
+ pbutil.MustUnmarshal(&state, rec.Data)
+ if fromIndex == nil || state.Commit >= *fromIndex {
+ fmt.Fprintf(out, "HardState: %s\n", state.String())
+ }
+ default:
+ log.Printf("Unexpected WAL log type: %d", rec.Type)
+ }
+}
diff --git a/tools/etcd-dump-logs/raw_test.go b/tools/etcd-dump-logs/raw_test.go
new file mode 100644
index 00000000000..3deb4bc390f
--- /dev/null
+++ b/tools/etcd-dump-logs/raw_test.go
@@ -0,0 +1,68 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package main
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_readRaw(t *testing.T) {
+ path := t.TempDir()
+ mustCreateWALLog(t, path)
+ var out bytes.Buffer
+ readRaw(nil, walDir(path), &out)
+ assert.Equal(t,
+ `CRC: 0
+Metadata:
+Snapshot:
+Entry: Term:1 Index:1 Type:EntryConfChange Data:"\010\001\020\000\030\002\"\000"
+Entry: Term:2 Index:2 Type:EntryConfChange Data:"\010\002\020\001\030\002\"\000"
+Entry: Term:2 Index:3 Type:EntryConfChange Data:"\010\003\020\002\030\002\"\000"
+Entry: Term:2 Index:4 Type:EntryConfChange Data:"\010\004\020\003\030\003\"\000"
+Entry: Term:3 Index:5 Data:"\010\000\022\000\032\006/path0\"\030{\"hey\":\"ho\",\"hi\":[\"yo\"]}(\0012\0008\000@\000H\tP\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000"
+Entry: Term:3 Index:6 Data:"\010\001\022\004QGET\032\006/path1\"\023{\"0\":\"1\",\"2\":[\"3\"]}(\0002\0008\000@\000H\tP\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000"
+Entry: Term:3 Index:7 Data:"\010\002\022\004SYNC\032\006/path2\"\023{\"0\":\"1\",\"2\":[\"3\"]}(\0002\0008\000@\000H\002P\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000"
+Entry: Term:3 Index:8 Data:"\010\003\022\006DELETE\032\006/path3\"\030{\"hey\":\"ho\",\"hi\":[\"yo\"]}(\0002\0008\000@\001H\002P\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000"
+Entry: Term:3 Index:9 Data:"\010\004\022\006RANDOM\032\246\001/path4/superlong/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path\"\030{\"hey\":\"ho\",\"hi\":[\"yo\"]}(\0002\0008\000@\000H\002P\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000"
+Entry: Term:4 Index:10 Data:"\010\005\032\025\n\0011\022\002hi\030\006 \001(\001X\240\234\001h\240\234\001"
+Entry: Term:5 Index:11 Data:"\010\006\"\020\n\004foo1\022\004bar1\030\0010\001"
+Entry: Term:6 Index:12 Data:"\010\007*\010\n\0010\022\0019\030\001"
+Entry: Term:7 Index:13 Data:"\010\0102\024\022\010\032\006\n\001a\022\001b\032\010\032\006\n\001a\022\001b"
+Entry: Term:8 Index:14 Data:"\010\t:\002\020\001"
+Entry: Term:9 Index:15 Data:"\010\nB\004\010\001\020\001"
+Entry: Term:10 Index:16 Data:"\010\013J\002\010\002"
+Entry: Term:11 Index:17 Data:"\010\014R\006\010\003\020\004\030\005"
+Entry: Term:12 Index:18 Data:"\010\r\302>\000"
+Entry: Term:13 Index:19 Data:"\010\016\232?\000"
+Entry: Term:14 Index:20 Data:"\010\017\242?\031\n\006myname\022\010password\032\005token"
+Entry: Term:15 Index:21 Data:"\010\020\342D\020\n\005name1\022\005pass1\032\000"
+Entry: Term:16 Index:22 Data:"\010\021\352D\007\n\005name1"
+Entry: Term:17 Index:23 Data:"\010\022\362D\007\n\005name1"
+Entry: Term:18 Index:24 Data:"\010\023\372D\016\n\005name1\022\005pass2"
+Entry: Term:19 Index:25 Data:"\010\024\202E\016\n\005user1\022\005role1"
+Entry: Term:20 Index:26 Data:"\010\025\212E\016\n\005user2\022\005role2"
+Entry: Term:21 Index:27 Data:"\010\026\222E\000"
+Entry: Term:22 Index:28 Data:"\010\027\232E\000"
+Entry: Term:23 Index:29 Data:"\010\030\202K\007\n\005role2"
+Entry: Term:24 Index:30 Data:"\010\031\212K\007\n\005role1"
+Entry: Term:25 Index:31 Data:"\010\032\222K\007\n\005role3"
+Entry: Term:26 Index:32 Data:"\010\033\232K\033\n\005role3\022\022\010\001\022\004Keys\032\010RangeEnd"
+Entry: Term:27 Index:33 Data:"\010\034\242K\026\n\005role3\022\003key\032\010rangeend"
+Entry: Term:27 Index:34 Data:"?"
+EOF: All entries were processed.
+`, out.String())
+}
diff --git a/tools/etcd-dump-metrics/OWNERS b/tools/etcd-dump-metrics/OWNERS
new file mode 100644
index 00000000000..c299e9b517c
--- /dev/null
+++ b/tools/etcd-dump-metrics/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/observability
diff --git a/tools/etcd-dump-metrics/README b/tools/etcd-dump-metrics/README
deleted file mode 100644
index dba52c8741a..00000000000
--- a/tools/etcd-dump-metrics/README
+++ /dev/null
@@ -1,13 +0,0 @@
-
-go install -v ./tools/etcd-dump-metrics
-
-# for latest main branch
-etcd-dump-metrics > Documentation/metrics/latest
-
-# Or download etcd v3.3.9 to ./bin
-goreman start
-etcd-dump-metrics --addr http://localhost:2379/metrics > Documentation/metrics/v3.3.9
-
-# Or download etcd v3.3.9 to temporary directory to fetch metrics
-etcd-dump-metrics --debug --download-ver v3.3.9
-etcd-dump-metrics --download-ver v3.3.9 > Documentation/metrics/v3.3.9
diff --git a/tools/etcd-dump-metrics/README.md b/tools/etcd-dump-metrics/README.md
new file mode 100644
index 00000000000..26fd61ef9bc
--- /dev/null
+++ b/tools/etcd-dump-metrics/README.md
@@ -0,0 +1,53 @@
+# etcd-dump-metrics
+
+ `etcd-dump-metrics` provides metrics for the latest main branch, a given endpoint, or version.
+
+## Installation
+
+Install the tool by running the following command from the etcd source directory.
+
+```
+ $ go install -v ./tools/etcd-dump-metrics
+```
+
+The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be
+installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the
+etcd source directory. Make sure that $PATH is set accordingly in your environment.
+
+```
+ $ go list -f "{{.Target}}" ./tools/etcd-dump-metrics
+```
+
+Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source
+directory.
+
+```
+ $ go run ./tools/etcd-dump-metrics
+```
+
+## Usage
+
+The following command should output the usage per the latest development.
+
+```
+ $ etcd-dump-metrics --help
+```
+
+An example of usage detail is provided below.
+
+### For the latest main branch
+```
+ $ etcd-dump-metrics
+```
+
+### For the provided endpoint
+```
+ $ goreman start
+ $ etcd-dump-metrics --addr http://localhost:2379/metrics
+```
+
+### Download specific version to temporary directory to fetch metrics
+```
+ $ etcd-dump-metrics --debug --download-ver v3.5.3
+ $ etcd-dump-metrics --download-ver v3.5.3
+```
diff --git a/tools/etcd-dump-metrics/etcd.go b/tools/etcd-dump-metrics/etcd.go
index 7997e283237..6d95b3f8e7c 100644
--- a/tools/etcd-dump-metrics/etcd.go
+++ b/tools/etcd-dump-metrics/etcd.go
@@ -17,16 +17,15 @@ package main
import (
"context"
"fmt"
- "io/ioutil"
"net/url"
"os"
"strings"
"time"
+ "go.uber.org/zap"
+
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
-
- "go.uber.org/zap"
)
func newEmbedURLs(n int) (urls []url.URL) {
@@ -44,15 +43,15 @@ func setupEmbedCfg(cfg *embed.Config, curls, purls, ics []url.URL) {
// []string{"stderr"} to enable server logging
var err error
- cfg.Dir, err = ioutil.TempDir(os.TempDir(), fmt.Sprintf("%016X", time.Now().UnixNano()))
+ cfg.Dir, err = os.MkdirTemp(os.TempDir(), fmt.Sprintf("%016X", time.Now().UnixNano()))
if err != nil {
panic(err)
}
os.RemoveAll(cfg.Dir)
cfg.ClusterState = "new"
- cfg.LCUrls, cfg.ACUrls = curls, curls
- cfg.LPUrls, cfg.APUrls = purls, purls
+ cfg.ListenClientUrls, cfg.AdvertiseClientUrls = curls, curls
+ cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = purls, purls
cfg.InitialCluster = ""
for i := range ics {
diff --git a/tools/etcd-dump-metrics/install_darwin.go b/tools/etcd-dump-metrics/install_darwin.go
index 84461a03948..8f30fb65205 100644
--- a/tools/etcd-dump-metrics/install_darwin.go
+++ b/tools/etcd-dump-metrics/install_darwin.go
@@ -13,13 +13,12 @@
// limitations under the License.
//go:build darwin
-// +build darwin
package main
import (
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"os"
"os/exec"
@@ -39,13 +38,13 @@ func install(ver, dir string) (string, error) {
}
defer resp.Body.Close()
- d, err := ioutil.ReadAll(resp.Body)
+ d, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
zipPath := filepath.Join(dir, "etcd.zip")
- if err = ioutil.WriteFile(zipPath, d, fileutil.PrivateFileMode); err != nil {
+ if err = os.WriteFile(zipPath, d, fileutil.PrivateFileMode); err != nil {
return "", err
}
diff --git a/tools/etcd-dump-metrics/install_linux.go b/tools/etcd-dump-metrics/install_linux.go
index 1a01ba6b17b..c2a2bfa035a 100644
--- a/tools/etcd-dump-metrics/install_linux.go
+++ b/tools/etcd-dump-metrics/install_linux.go
@@ -13,14 +13,14 @@
// limitations under the License.
//go:build linux
-// +build linux
package main
import (
"fmt"
- "io/ioutil"
+ "io"
"net/http"
+ "os"
"os/exec"
"path/filepath"
@@ -38,13 +38,13 @@ func install(ver, dir string) (string, error) {
}
defer resp.Body.Close()
- d, err := ioutil.ReadAll(resp.Body)
+ d, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
tarPath := filepath.Join(dir, "etcd.tar.gz")
- if err = ioutil.WriteFile(tarPath, d, fileutil.PrivateFileMode); err != nil {
+ if err = os.WriteFile(tarPath, d, fileutil.PrivateFileMode); err != nil {
return "", err
}
diff --git a/tools/etcd-dump-metrics/install_windows.go b/tools/etcd-dump-metrics/install_windows.go
index 07eb15b3209..71873a80b88 100644
--- a/tools/etcd-dump-metrics/install_windows.go
+++ b/tools/etcd-dump-metrics/install_windows.go
@@ -13,7 +13,6 @@
// limitations under the License.
//go:build windows
-// +build windows
package main
diff --git a/tools/etcd-dump-metrics/main.go b/tools/etcd-dump-metrics/main.go
index f074f5fe704..18d517e450e 100644
--- a/tools/etcd-dump-metrics/main.go
+++ b/tools/etcd-dump-metrics/main.go
@@ -18,23 +18,23 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"net/url"
"os"
"os/exec"
"path/filepath"
"time"
- "go.etcd.io/etcd/server/v3/embed"
-
"go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/server/v3/embed"
)
var lg *zap.Logger
func init() {
var err error
- lg, err = zap.NewProduction()
+ lg, err = logutil.CreateDefaultZapLogger(zap.InfoLevel)
if err != nil {
panic(err)
}
@@ -50,7 +50,11 @@ func main() {
panic("specify either 'addr' or 'download-ver'")
}
if *debug {
- lg = zap.NewExample()
+ var err error
+ lg, err = logutil.CreateDefaultZapLogger(zap.DebugLevel)
+ if err != nil {
+ panic(err)
+ }
}
ep := *addr
@@ -59,7 +63,7 @@ func main() {
ver := *downloadVer
// download release binary to temporary directory
- d, err := ioutil.TempDir(os.TempDir(), ver)
+ d, err := os.MkdirTemp(os.TempDir(), ver)
if err != nil {
panic(err)
}
diff --git a/tools/etcd-dump-metrics/metrics.go b/tools/etcd-dump-metrics/metrics.go
index e7acea29a7b..a1a40b18581 100644
--- a/tools/etcd-dump-metrics/metrics.go
+++ b/tools/etcd-dump-metrics/metrics.go
@@ -16,7 +16,7 @@ package main
import (
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"sort"
"strings"
@@ -38,7 +38,7 @@ func fetchMetrics(ep string) (lines []string, err error) {
return nil, err
}
defer resp.Body.Close()
- b, rerr := ioutil.ReadAll(resp.Body)
+ b, rerr := io.ReadAll(resp.Body)
if rerr != nil {
return nil, rerr
}
@@ -207,7 +207,6 @@ func parse(lines []string) (mss []metric) {
mss[i].grpcCodes = sortMap(codes)
mss[i].metrics = sortMap(metrics)
}
-
}
return mss
}
diff --git a/tools/local-tester/OWNERS b/tools/local-tester/OWNERS
new file mode 100644
index 00000000000..365ae7b38a9
--- /dev/null
+++ b/tools/local-tester/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/testing
diff --git a/tools/local-tester/README.md b/tools/local-tester/README.md
index 75e3e0aa950..cbd39e6f31e 100644
--- a/tools/local-tester/README.md
+++ b/tools/local-tester/README.md
@@ -11,7 +11,7 @@ local-tester depends on `goreman` to manage its processes and `bash` to run faul
local-tester needs `etcd`, `benchmark`, and `bridge` binaries. To build these binaries, run the following from the etcd repository root:
```sh
-./build.sh
+./scripts/build.sh
pushd tools/benchmark/ && go build && popd
pushd tools/local-tester/bridge && go build && popd
```
diff --git a/tools/local-tester/bridge/bridge.go b/tools/local-tester/bridge/bridge.go
index 77dd0e2861c..25ca7e01568 100644
--- a/tools/local-tester/bridge/bridge.go
+++ b/tools/local-tester/bridge/bridge.go
@@ -19,7 +19,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"log"
"math/rand"
"net"
@@ -74,7 +73,7 @@ func timeBridge(b *bridgeConn) {
func blackhole(b *bridgeConn) {
log.Println("blackholing connection", b.String())
- io.Copy(ioutil.Discard, b.in)
+ io.Copy(io.Discard, b.in)
b.Close()
}
@@ -187,8 +186,10 @@ type config struct {
rxDelay string
}
-type acceptFaultFunc func()
-type connFaultFunc func(*bridgeConn)
+type (
+ acceptFaultFunc func()
+ connFaultFunc func(*bridgeConn)
+)
func main() {
var cfg config
@@ -239,7 +240,6 @@ func main() {
log.Fatal(err)
}
l = newListener
-
}
acceptFaults = append(acceptFaults, f)
}
diff --git a/tools/local-tester/bridge/dispatch.go b/tools/local-tester/bridge/dispatch.go
index b385cefe07a..2aae79db664 100644
--- a/tools/local-tester/bridge/dispatch.go
+++ b/tools/local-tester/bridge/dispatch.go
@@ -70,7 +70,7 @@ func (d *dispatcherPool) flush() {
// sort by sockets; preserve the packet ordering within a socket
pktmap := make(map[io.Writer][]dispatchPacket)
- outs := []io.Writer{}
+ var outs []io.Writer
for _, pkt := range pkts {
opkts, ok := pktmap[pkt.out]
if !ok {
@@ -103,7 +103,7 @@ func (d *dispatcherPool) Copy(w io.Writer, f fetchFunc) error {
return err
}
- pkts := []dispatchPacket{}
+ var pkts []dispatchPacket
for len(b) > 0 {
pkt := b
if len(b) > dispatchPacketBytes {
diff --git a/tools/mod/doc.go b/tools/mod/doc.go
new file mode 100644
index 00000000000..e9cd697f803
--- /dev/null
+++ b/tools/mod/doc.go
@@ -0,0 +1,22 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// As this directory implements the pattern for tracking tool dependencies as documented here:
+// https://go.dev/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module, it doesn't
+// contain any valid go source code in the directory directly. This would break scripts for
+// unit testing, golangci-lint, and coverage calculation.
+//
+// Thus, to ensure tools to run normally, we've added this empty file.
+
+package mod
diff --git a/tools/mod/go.mod b/tools/mod/go.mod
index 1173203fbae..e4d7c6a1d36 100644
--- a/tools/mod/go.mod
+++ b/tools/mod/go.mod
@@ -1,26 +1,234 @@
module go.etcd.io/etcd/tools/v3
-go 1.16
+go 1.23
+
+toolchain go1.23.4
require (
- github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 // indirect
- github.com/alexkohler/nakedret v1.0.0
+ github.com/alexfalkowski/gocovmerge v1.3.17
+ github.com/appscodelabs/license-bill-of-materials v0.0.0-20220707232035-6018e0c5287c
github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03
- github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e
- github.com/go-openapi/loads v0.19.5 // indirect
- github.com/go-openapi/spec v0.19.9 // indirect
+ github.com/cloudflare/cfssl v1.6.5
github.com/gogo/protobuf v1.3.2
- github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78
- github.com/grpc-ecosystem/grpc-gateway v1.14.6
- github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535
- github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa
- github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f
- github.com/mgechev/revive v1.0.2
- github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735
- github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f // indirect
+ github.com/golangci/golangci-lint v1.62.2
+ github.com/google/addlicense v1.1.1
+ github.com/google/yamlfmt v0.14.0
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1
+ go.etcd.io/gofail v0.2.0
go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116
- google.golang.org/genproto v0.0.0-20201008135153-289734e2e40c // indirect
- gopkg.in/yaml.v2 v2.3.0 // indirect
- honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
- mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7
+ go.etcd.io/raft/v3 v3.6.0-beta.0
+ gotest.tools/gotestsum v1.12.0
+ gotest.tools/v3 v3.5.1
+ honnef.co/go/tools v0.5.1
+)
+
+require (
+ 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
+ 4d63.com/gochecknoglobals v0.2.1 // indirect
+ github.com/4meepo/tagalign v1.3.4 // indirect
+ github.com/Abirdcfly/dupword v0.1.3 // indirect
+ github.com/Antonboom/errname v1.0.0 // indirect
+ github.com/Antonboom/nilnil v1.0.0 // indirect
+ github.com/Antonboom/testifylint v1.5.2 // indirect
+ github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
+ github.com/Crocmagnon/fatcontext v0.5.3 // indirect
+ github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
+ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect
+ github.com/Masterminds/semver/v3 v3.3.0 // indirect
+ github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
+ github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 // indirect
+ github.com/alecthomas/assert/v2 v2.3.0 // indirect
+ github.com/alecthomas/go-check-sumtype v0.2.0 // indirect
+ github.com/alecthomas/repr v0.4.0 // indirect
+ github.com/alexkohler/nakedret/v2 v2.0.5 // indirect
+ github.com/alexkohler/prealloc v1.0.0 // indirect
+ github.com/alingse/asasalint v0.0.11 // indirect
+ github.com/ashanbrown/forbidigo v1.6.0 // indirect
+ github.com/ashanbrown/makezero v1.1.1 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/bitfield/gotestdox v0.2.2 // indirect
+ github.com/bkielbasa/cyclop v1.2.3 // indirect
+ github.com/blizzy78/varnamelen v0.8.0 // indirect
+ github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect
+ github.com/bombsimon/wsl/v4 v4.4.1 // indirect
+ github.com/braydonk/yaml v0.7.0 // indirect
+ github.com/breml/bidichk v0.3.2 // indirect
+ github.com/breml/errchkjson v0.4.0 // indirect
+ github.com/butuzov/ireturn v0.3.0 // indirect
+ github.com/butuzov/mirror v1.2.0 // indirect
+ github.com/catenacyber/perfsprint v0.7.1 // indirect
+ github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/charithe/durationcheck v0.0.10 // indirect
+ github.com/chavacava/garif v0.1.0 // indirect
+ github.com/ckaznocha/intrange v0.2.1 // indirect
+ github.com/curioswitch/go-reassign v0.2.0 // indirect
+ github.com/daixiang0/gci v0.13.5 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/denis-tingaikin/go-header v0.5.0 // indirect
+ github.com/dnephin/pflag v1.0.7 // indirect
+ github.com/ettle/strcase v0.2.0 // indirect
+ github.com/fatih/color v1.18.0 // indirect
+ github.com/fatih/structtag v1.2.0 // indirect
+ github.com/firefart/nonamedreturns v1.0.5 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fzipp/gocyclo v0.6.0 // indirect
+ github.com/ghostiam/protogetter v0.3.8 // indirect
+ github.com/go-critic/go-critic v0.11.5 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-sql-driver/mysql v1.7.1 // indirect
+ github.com/go-toolsmith/astcast v1.1.0 // indirect
+ github.com/go-toolsmith/astcopy v1.1.0 // indirect
+ github.com/go-toolsmith/astequal v1.2.0 // indirect
+ github.com/go-toolsmith/astfmt v1.1.0 // indirect
+ github.com/go-toolsmith/astp v1.1.0 // indirect
+ github.com/go-toolsmith/strparse v1.1.0 // indirect
+ github.com/go-toolsmith/typep v1.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
+ github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
+ github.com/gobwas/glob v0.2.3 // indirect
+ github.com/gofrs/flock v0.12.1 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
+ github.com/golangci/go-printf-func-name v0.1.0 // indirect
+ github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect
+ github.com/golangci/misspell v0.6.0 // indirect
+ github.com/golangci/modinfo v0.3.4 // indirect
+ github.com/golangci/plugin-module-register v0.1.1 // indirect
+ github.com/golangci/revgrep v0.5.3 // indirect
+ github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect
+ github.com/google/certificate-transparency-go v1.1.7 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+ github.com/gordonklaus/ineffassign v0.1.0 // indirect
+ github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
+ github.com/gostaticanalysis/comment v1.4.2 // indirect
+ github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
+ github.com/gostaticanalysis/nilerr v0.1.1 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/hexops/gotextdiff v1.0.3 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jgautheron/goconst v1.7.1 // indirect
+ github.com/jingyugao/rowserrcheck v1.1.1 // indirect
+ github.com/jjti/go-spancheck v0.6.2 // indirect
+ github.com/jmhodges/clock v1.2.0 // indirect
+ github.com/jmoiron/sqlx v1.3.5 // indirect
+ github.com/julz/importas v0.1.0 // indirect
+ github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect
+ github.com/kisielk/errcheck v1.8.0 // indirect
+ github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 // indirect
+ github.com/kkHAIKE/contextcheck v1.1.5 // indirect
+ github.com/kulti/thelper v0.6.3 // indirect
+ github.com/kunwardeep/paralleltest v1.0.10 // indirect
+ github.com/kyoh86/exportloopref v0.1.11 // indirect
+ github.com/lasiar/canonicalheader v1.1.2 // indirect
+ github.com/ldez/gomoddirectives v0.2.4 // indirect
+ github.com/ldez/tagliatelle v0.5.0 // indirect
+ github.com/leonklingele/grouper v1.1.2 // indirect
+ github.com/lib/pq v1.10.9 // indirect
+ github.com/macabu/inamedparam v0.1.3 // indirect
+ github.com/magiconair/properties v1.8.7 // indirect
+ github.com/maratori/testableexamples v1.0.0 // indirect
+ github.com/maratori/testpackage v1.1.1 // indirect
+ github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/mattn/go-sqlite3 v1.14.22 // indirect
+ github.com/mgechev/revive v1.5.1 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/moricho/tparallel v0.3.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/nakabonne/nestif v0.3.1 // indirect
+ github.com/nishanths/exhaustive v0.12.0 // indirect
+ github.com/nishanths/predeclared v0.2.2 // indirect
+ github.com/nunnatsa/ginkgolinter v0.18.3 // indirect
+ github.com/olekukonko/tablewriter v0.0.5 // indirect
+ github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.3 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/polyfloyd/go-errorlint v1.7.0 // indirect
+ github.com/prometheus/client_golang v1.20.5 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.61.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect
+ github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
+ github.com/quasilyte/gogrep v0.5.0 // indirect
+ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
+ github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
+ github.com/raeperd/recvcheck v0.1.2 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/rogpeppe/go-internal v1.13.1 // indirect
+ github.com/ryancurrah/gomodguard v1.3.5 // indirect
+ github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
+ github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
+ github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
+ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
+ github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
+ github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect
+ github.com/securego/gosec/v2 v2.21.4 // indirect
+ github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/sivchari/containedctx v1.0.3 // indirect
+ github.com/sivchari/tenv v1.12.1 // indirect
+ github.com/sonatard/noctx v0.1.0 // indirect
+ github.com/sourcegraph/go-diff v0.7.0 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
+ github.com/spf13/cast v1.5.0 // indirect
+ github.com/spf13/cobra v1.8.1 // indirect
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/viper v1.12.0 // indirect
+ github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
+ github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
+ github.com/stretchr/objx v0.5.2 // indirect
+ github.com/stretchr/testify v1.10.0 // indirect
+ github.com/subosito/gotenv v1.4.1 // indirect
+ github.com/tdakkota/asciicheck v0.2.0 // indirect
+ github.com/tetafro/godot v1.4.18 // indirect
+ github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect
+ github.com/timonwong/loggercheck v0.10.1 // indirect
+ github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect
+ github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
+ github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f // indirect
+ github.com/ultraware/funlen v0.1.0 // indirect
+ github.com/ultraware/whitespace v0.1.1 // indirect
+ github.com/uudashr/gocognit v1.1.3 // indirect
+ github.com/uudashr/iface v1.2.1 // indirect
+ github.com/weppos/publicsuffix-go v0.30.0 // indirect
+ github.com/xen0n/gosmopolitan v1.2.2 // indirect
+ github.com/yagipy/maintidx v1.0.0 // indirect
+ github.com/yeya24/promlinter v0.3.0 // indirect
+ github.com/ykadowak/zerologlint v0.1.5 // indirect
+ github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 // indirect
+ github.com/zmap/zlint/v3 v3.5.0 // indirect
+ gitlab.com/bosi/decorder v0.4.2 // indirect
+ go-simpler.org/musttag v0.13.0 // indirect
+ go-simpler.org/sloglint v0.7.2 // indirect
+ go.uber.org/automaxprocs v1.6.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
+ golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f // indirect
+ golang.org/x/mod v0.22.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sync v0.10.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ golang.org/x/tools v0.28.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/grpc v1.69.2 // indirect
+ google.golang.org/protobuf v1.36.1 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/klog/v2 v2.100.1 // indirect
+ mvdan.cc/gofumpt v0.7.0 // indirect
+ mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect
)
diff --git a/tools/mod/go.sum b/tools/mod/go.sum
index e5fcb2430a6..ff2d7c7393f 100644
--- a/tools/mod/go.sum
+++ b/tools/mod/go.sum
@@ -1,435 +1,722 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA=
+4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs=
+4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc=
+4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
+github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8=
+github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0=
+github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE=
+github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw=
+github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA=
+github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI=
+github.com/Antonboom/nilnil v1.0.0 h1:n+v+B12dsE5tbAqRODXmEKfZv9j2KcTBrp+LkoM4HZk=
+github.com/Antonboom/nilnil v1.0.0/go.mod h1:fDJ1FSFoLN6yoG65ANb1WihItf6qt9PJVTn/s2IrcII=
+github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk=
+github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8=
+github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
+github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/Crocmagnon/fatcontext v0.5.3 h1:zCh/wjc9oyeF+Gmp+V60wetm8ph2tlsxocgg/J0hOps=
+github.com/Crocmagnon/fatcontext v0.5.3/go.mod h1:XoCQYY1J+XTfyv74qLXvNw4xFunr3L1wkopIIKG7wGM=
+github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
+github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao=
+github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
+github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA=
+github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ=
github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 h1:bYOD6QJnBJY79MJQR1i9cyQePG5oNDZXDKL2bhN/uvE=
github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19/go.mod h1:HcqyLXmWoESd/vPSbCPqvgw5l5cMM5PtoqFOnXLjSeM=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alexkohler/nakedret v1.0.0 h1:S/bzOFhZHYUJp6qPmdXdFHS5nlWGFmLmoc8QOydvotE=
-github.com/alexkohler/nakedret v1.0.0/go.mod h1:tfDQbtPt67HhBK/6P0yNktIX7peCxfOp0jO9007DrLE=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0=
-github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0=
+github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
+github.com/alecthomas/go-check-sumtype v0.2.0 h1:Bo+e4DFf3rs7ME9w/0SU/g6nmzJaphduP8Cjiz0gbwY=
+github.com/alecthomas/go-check-sumtype v0.2.0/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
+github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
+github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
+github.com/alexfalkowski/gocovmerge v1.3.17 h1:9ovtNsC1Kkv5If24T4d0FJWgdZ0aHVSx8KMahBi/5Qw=
+github.com/alexfalkowski/gocovmerge v1.3.17/go.mod h1:yEWbBk7DnFvHVl9AMJwTZEOwLdnYveEHc1S9SDphQyc=
+github.com/alexkohler/nakedret/v2 v2.0.5 h1:fP5qLgtwbx9EJE8dGEERT02YwS8En4r9nnZ71RK+EVU=
+github.com/alexkohler/nakedret/v2 v2.0.5/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU=
+github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
+github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
+github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw=
+github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
+github.com/appscodelabs/license-bill-of-materials v0.0.0-20220707232035-6018e0c5287c h1:xv0ICJ4AO52aNZ+vI2KFUYZBMh7dHvROixZ1vzMMfu8=
+github.com/appscodelabs/license-bill-of-materials v0.0.0-20220707232035-6018e0c5287c/go.mod h1:Y5/1I+0gnnhHKyX4z65mgaGTJ08tnz9WUgkoymA/cws=
+github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY=
+github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
+github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
+github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bitfield/gotestdox v0.2.2 h1:x6RcPAbBbErKLnapz1QeAlf3ospg8efBsedU93CDsnE=
+github.com/bitfield/gotestdox v0.2.2/go.mod h1:D+gwtS0urjBrzguAkTM2wodsTQYFHdpx8eqRJ3N+9pY=
+github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w=
+github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo=
+github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
+github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
+github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvzIZhEXc=
+github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw=
+github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo=
+github.com/braydonk/yaml v0.7.0 h1:ySkqO7r0MGoCNhiRJqE0Xe9yhINMyvOAB3nFjgyJn2k=
+github.com/braydonk/yaml v0.7.0/go.mod h1:hcm3h581tudlirk8XEUPDBAimBPbmnL0Y45hCRl47N4=
+github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs=
+github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos=
+github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk=
+github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8=
+github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0=
+github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA=
+github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs=
+github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ=
+github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc=
+github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50=
+github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg=
+github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4=
+github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
+github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
+github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03 h1:0wUHjDfbCAROEAZ96zAJGwcNMkPIheFaIjtQyv3QqfM=
github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03/go.mod h1:uFE9hX+zXEwvyUThZ4gDb9vkAwc5DoHUnRSEpH0VrOs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e h1:vHRufSa2k8tfkcDdia1vJFa+oiBvvPxW94mg76PPAoA=
-github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e/go.mod h1:4xMOusJ7xxc84WclVxKT8+lNfGYDwojOUC2OQNCwcj4=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/ckaznocha/intrange v0.2.1 h1:M07spnNEQoALOJhwrImSrJLaxwuiQK+hA2DeajBlwYk=
+github.com/ckaznocha/intrange v0.2.1/go.mod h1:7NEhVyf8fzZO5Ds7CRaqPEm52Ut83hsTiL5zbER/HYk=
+github.com/cloudflare/cfssl v1.6.5 h1:46zpNkm6dlNkMZH/wMW22ejih6gIaJbzL2du6vD7ZeI=
+github.com/cloudflare/cfssl v1.6.5/go.mod h1:Bk1si7sq8h2+yVEDrFJiz3d7Aw+pfjjJSZVaD+Taky4=
+github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
+github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
+github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
+github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c=
+github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8=
+github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY=
+github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk=
+github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE=
+github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q=
+github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A=
+github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
-github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE=
-github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
-github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/errors v0.19.3 h1:7MGZI1ibQDLasvAz8HuhvYk9eNJbJkCOXWsSjjMS+Zc=
-github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg=
-github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls=
-github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY=
-github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
-github.com/go-openapi/spec v0.19.9 h1:9z9cbFuZJ7AcvOHKIY+f6Aevb4vObNDkTEyoMfO7rAc=
-github.com/go-openapi/spec v0.19.9/go.mod h1:vqK/dIdLGCosfvYsQV3WfC7N3TiZSnGY2RZKoFK7X28=
-github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-github.com/go-openapi/strfmt v0.19.4 h1:eRvaqAhpL0IL6Trh5fDsGnGhiXndzHFuA05w6sXH6/g=
-github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
-github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.7 h1:VRuXN2EnMSsZdauzdss6JBC29YotDqG59BZ+tdlIL1s=
-github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
-github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
-github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
-github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
-github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
-github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
-github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
-github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
-github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
-github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
-github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
-github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
-github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
-github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
-github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
-github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
-github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
-github.com/goccy/go-yaml v1.8.1 h1:JuZRFlqLM5cWF6A+waL8AKVuCcqvKOuhJtUQI+L3ez0=
-github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA=
+github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
+github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
+github.com/ghostiam/protogetter v0.3.8 h1:LYcXbYvybUyTIxN2Mj9h6rHrDZBDwZloPoKctWrFyJY=
+github.com/ghostiam/protogetter v0.3.8/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA=
+github.com/go-critic/go-critic v0.11.5 h1:TkDTOn5v7EEngMxu8KbuFqFR43USaaH8XRJLz1jhVYA=
+github.com/go-critic/go-critic v0.11.5/go.mod h1:wu6U7ny9PiaHaZHcvMDmdysMqvDem162Rh3zWTrqk8M=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
+github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8=
+github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
+github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s=
+github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw=
+github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4=
+github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ=
+github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw=
+github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY=
+github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco=
+github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4=
+github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA=
+github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA=
+github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk=
+github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus=
+github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
+github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw=
+github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
+github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus=
+github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U=
+github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
+github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78 h1:U/zHjaVG/sECz5xhnh7kPH+Fv/maPbhZPcaTquo5sPg=
-github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o=
-github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
-github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535 h1:BGeD3v3lyKZy+ocGtprXiDXjIiXvZDfuyII7Lym7GbQ=
-github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535/go.mod h1:xV7b0Cn2irnP1jU+mMYvqPAPuFPNjtgB+rvKu/dLIz4=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
+github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU=
+github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s=
+github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME=
+github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE=
+github.com/golangci/golangci-lint v1.62.2 h1:b8K5K9PN+rZN1+mKLtsZHz2XXS9aYKzQ9i25x3Qnxxw=
+github.com/golangci/golangci-lint v1.62.2/go.mod h1:ILWWyeFUrctpHVGMa1dg2xZPKoMUTc5OIMgW7HZr34g=
+github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs=
+github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo=
+github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA=
+github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM=
+github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c=
+github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc=
+github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs=
+github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k=
+github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs=
+github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ=
+github.com/google/addlicense v1.1.1 h1:jpVf9qPbU8rz5MxKo7d+RMcNHkqxi4YJi/laauX4aAE=
+github.com/google/addlicense v1.1.1/go.mod h1:Sm/DHu7Jk+T5miFHHehdIjbi4M5+dJDRS3Cq0rncIxA=
+github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw=
+github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/google/yamlfmt v0.14.0 h1:30Hm8+VfNqMhWfbkjqkHMyo1zzbxMFM6+2oz7Cey1BQ=
+github.com/google/yamlfmt v0.14.0/go.mod h1:KnrVZqRVSE3HUpaI9FfoaxYA71izVleMWPYX8s1S0KM=
+github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s=
+github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
+github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
+github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
+github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
+github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=
+github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
+github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70=
+github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
+github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk=
+github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
+github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
+github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
+github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa h1:oDcxzjIf33MTX7b8Eu7eO3a/z8mlTT+blyEoVxBmUUg=
-github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa/go.mod h1:wSgrm+n3LvHOVxUJo2ha5ffLqRmt6+oGoD6J/suB66c=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
-github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk=
+github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
+github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
+github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk=
+github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA=
+github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
+github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
+github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
+github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos=
+github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/errcheck v1.8.0 h1:ZX/URYa7ilESY19ik/vBmCn6zdGQLxACwjAcWbHlYlg=
+github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 h1:veS9QfglfvqAw2e+eeNT/SbGySq8ajECXJ9e4fPoLhY=
+github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
+github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg=
+github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
-github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8=
-github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
-github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
-github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f h1:Kc3s6QFyh9DLgInXpWKuG+8I7R7lXbnP7mcoOVIt6KY=
-github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f/go.mod h1:AmCV4WB3cDMZqgPk+OUQKumliiQS4ZYsBt3AXekyuAU=
-github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM=
-github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
-github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg=
-github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo=
-github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735 h1:Qn41fatPrqv5qVpDFx+4ABF14LNj9jiNLm/BsrDb01U=
-github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
+github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
+github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs=
+github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
+github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
+github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
+github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4=
+github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI=
+github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg=
+github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g=
+github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo=
+github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
+github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY=
+github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk=
+github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI=
+github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE=
+github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04=
+github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc=
+github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE=
+github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
+github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
+github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/mgechev/revive v1.5.1 h1:hE+QPeq0/wIzJwOphdVyUJ82njdd8Khp4fUIHGZHW3M=
+github.com/mgechev/revive v1.5.1/go.mod h1:lC9AhkJIBs5zwx8wkudyHrU+IJkrEKmpCmGMnIJPk4o=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI=
+github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U=
+github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
+github.com/mreiferson/go-httpclient v0.0.0-20201222173833-5e475fde3a4d/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
+github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
+github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg=
+github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs=
+github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
+github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
+github.com/nunnatsa/ginkgolinter v0.18.3 h1:WgS7X3zzmni3vwHSBhvSgqrRgUecN6PQUcfB0j1noDw=
+github.com/nunnatsa/ginkgolinter v0.18.3/go.mod h1:BE1xyB/PNtXXG1azrvrqJW5eFH0hSRylNzFy8QHPwzs=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
+github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
+github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
+github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
+github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
+github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
+github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
+github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
+github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
+github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
+github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.5.2 h1:qLvObTrvO/XRCqmkKxUlOBc48bI3efyDuAZe25QiF0w=
-github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/polyfloyd/go-errorlint v1.7.0 h1:Zp6lzCK4hpBDj8y8a237YK4EPrMXQWvOe3nGoH4pFrU=
+github.com/polyfloyd/go-errorlint v1.7.0/go.mod h1:dGWKu85mGHnegQ2SWpEybFityCg3j7ZbwsVUxAOk9gY=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo=
+github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
+github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
+github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
+github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU=
+github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
+github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=
+github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
+github.com/raeperd/recvcheck v0.1.2 h1:SjdquRsRXJc26eSonWIo8b7IMtKD3OAT2Lb5G3ZX1+4=
+github.com/raeperd/recvcheck v0.1.2/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU=
+github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE=
+github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU=
+github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ=
+github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
+github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
+github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
+github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
+github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
+github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
+github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
+github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
+github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI=
+github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8=
+github.com/securego/gosec/v2 v2.21.4 h1:Le8MSj0PDmOnHJgUATjD96PaXRvCpKC+DGJvwyy0Mlk=
+github.com/securego/gosec/v2 v2.21.4/go.mod h1:Jtb/MwRQfRxCXyCm1rfM1BEiiiTfUOdyzzAhlr6lUTA=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE=
+github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4=
+github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY=
+github.com/sivchari/tenv v1.12.1/go.mod h1:1LjSOUCc25snIr5n3DtGGrENhX3LuWefcplwVGC24mw=
+github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM=
+github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c=
+github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0=
+github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
+github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
+github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
+github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
+github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
+github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
+github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM=
+github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
+github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
+github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
+github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
+github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
+github.com/tetafro/godot v1.4.18 h1:ouX3XGiziKDypbpXqShBfnNLTSjR8r3/HVzrtJ+bHlI=
+github.com/tetafro/godot v1.4.18/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
+github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
+github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
+github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg=
+github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8=
+github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4=
+github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo=
+github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
+github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f h1:92ZQJRegaqnKjz9HY9an696Sw5EmAqRv0eie/U2IE6k=
github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f/go.mod h1:wxUiQ1klFJmwnM41kQI7IT2g8jjOKbtuL54LdjkxAI0=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
+github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
+github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ=
+github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8=
+github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM=
+github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U=
+github.com/uudashr/iface v1.2.1 h1:vHHyzAUmWZ64Olq6NZT3vg/z1Ws56kyPdBOd5kTXDF8=
+github.com/uudashr/iface v1.2.1/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg=
+github.com/weppos/publicsuffix-go v0.12.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
+github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
+github.com/weppos/publicsuffix-go v0.30.0 h1:QHPZ2GRu/YE7cvejH9iyavPOkVCB4dNxp2ZvtT+vQLY=
+github.com/weppos/publicsuffix-go v0.30.0/go.mod h1:kBi8zwYnR0zrbm8RcuN1o9Fzgpnnn+btVN8uWPMyXAY=
+github.com/weppos/publicsuffix-go/publicsuffix/generator v0.0.0-20220927085643-dc0d00c92642/go.mod h1:GHfoeIdZLdZmLjMlzBftbTDntahTttUMWjxZwQJhULE=
+github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU=
+github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg=
+github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
+github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
+github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs=
+github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4=
+github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw=
+github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
+github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
+github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
+github.com/zmap/zcertificate v0.0.1/go.mod h1:q0dlN54Jm4NVSSuzisusQY0hqDWvu92C+TWveAxiVWk=
+github.com/zmap/zcrypto v0.0.0-20201128221613-3719af1573cf/go.mod h1:aPM7r+JOkfL+9qSB4KbYjtoEzJqUK50EXkkJabeNJDQ=
+github.com/zmap/zcrypto v0.0.0-20201211161100-e54a5822fb7e/go.mod h1:aPM7r+JOkfL+9qSB4KbYjtoEzJqUK50EXkkJabeNJDQ=
+github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o=
+github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w=
+github.com/zmap/zlint/v3 v3.0.0/go.mod h1:paGwFySdHIBEMJ61YjoqT4h7Ge+fdYG4sUQhnTb1lJ8=
+github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ=
+github.com/zmap/zlint/v3 v3.5.0/go.mod h1:JkNSrsDJ8F4VRtBZcYUQSvnWFL7utcjDIn+FE64mlBI=
+gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo=
+gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8=
+go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ=
+go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28=
+go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE=
+go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM=
+go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY=
+go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo=
+go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA=
+go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o=
go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116 h1:QQiUXlqz+d96jyNG71NE+IGTgOK6Xlhdx+PzvfbLHlQ=
go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116/go.mod h1:F9kog+iVAuvPJucb1dkYcDcbV0g4uyGEHllTP5NrXiw=
-go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.3.0 h1:ew6uUIeJOo+qdUUv7LxFCUhtWmVv7ZV/Xuy4FAUsw2E=
-go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.etcd.io/raft/v3 v3.6.0-beta.0 h1:MZFQVjCQxPJj5K9oS69Y+atNvYnGNyOQBnroTdw56jQ=
+go.etcd.io/raft/v3 v3.6.0-beta.0/go.mod h1:C2JoekRXfvImSrk5GnqD0aZ3a+cGVRnyem9qqn2DCEw=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
+golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
+golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f h1:WTyX8eCCyfdqiPYkRGm0MqElSfYFH3yR1+rl/mct9sA=
+golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
+golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
+golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
+golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
+golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201008135153-289734e2e40c h1:dvSnJCVti7yKNAZrunMpEE4QLfy5b/UdEKt+UH39BW4=
-google.golang.org/genproto v0.0.0-20201008135153-289734e2e40c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
-gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
-gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
-gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 h1:kAREL6MPwpsk1/PQPFD3Eg7WAQR5mPTWZJaBiG5LDbY=
-mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/gotestsum v1.12.0 h1:CmwtaGDkHxrZm4Ib0Vob89MTfpc3GrEFMJKovliPwGk=
+gotest.tools/gotestsum v1.12.0/go.mod h1:fAvqkSptospfSbQw26CTYzNwnsE/ztqLeyhP0h67ARY=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I=
+honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
+k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
+k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU=
+mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo=
+mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U=
+mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ=
diff --git a/tools/mod/install_all.sh b/tools/mod/install_all.sh
index 7de5fb01473..c32d6cb6f3b 100755
--- a/tools/mod/install_all.sh
+++ b/tools/mod/install_all.sh
@@ -1,4 +1,6 @@
#!/usr/bin/env bash
+set -euo pipefail
+
cd ./tools/mod || exit 2
-go list --tags tools -f '{{ join .Imports "\n" }}' | xargs gobin -p
+go list --tags tools -f '{{ join .Imports "\n" }}' | xargs go install
diff --git a/tools/mod/libs.go b/tools/mod/libs.go
index 0d05c21853a..fd392d55a9f 100644
--- a/tools/mod/libs.go
+++ b/tools/mod/libs.go
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build libs
+//go:build libs
// This file implements that pattern:
-// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module
+// https://go.dev/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module
// for etcd. Thanks to this file 'go mod tidy' does not removes dependencies.
package libs
diff --git a/tools/mod/tools.go b/tools/mod/tools.go
index 9de5e6db3d3..ce9af775178 100644
--- a/tools/mod/tools.go
+++ b/tools/mod/tools.go
@@ -12,27 +12,29 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build tools
+//go:build tools
// This file implements that pattern:
-// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module
+// https://go.dev/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module
// for etcd. Thanks to this file 'go mod tidy' does not removes dependencies.
package tools
import (
- _ "github.com/alexkohler/nakedret"
+ _ "github.com/alexfalkowski/gocovmerge"
+ _ "github.com/appscodelabs/license-bill-of-materials"
_ "github.com/chzchzchz/goword"
- _ "github.com/coreos/license-bill-of-materials"
- _ "github.com/gordonklaus/ineffassign"
- _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway"
- _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger"
- _ "github.com/gyuho/gocovmerge"
- _ "github.com/hexfusion/schwag"
- _ "github.com/mdempsky/unconvert"
- _ "github.com/mgechev/revive"
- _ "github.com/mikefarah/yq/v3"
+ _ "github.com/cloudflare/cfssl/cmd/cfssl"
+ _ "github.com/cloudflare/cfssl/cmd/cfssljson"
+ _ "github.com/golangci/golangci-lint/cmd/golangci-lint"
+ _ "github.com/google/addlicense"
+ _ "github.com/google/yamlfmt/cmd/yamlfmt"
+ _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway"
+ _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2"
+ _ "go.etcd.io/gofail"
_ "go.etcd.io/protodoc"
+ _ "go.etcd.io/raft/v3"
+ _ "gotest.tools/gotestsum"
+ _ "gotest.tools/v3"
_ "honnef.co/go/tools/cmd/staticcheck"
- _ "mvdan.cc/unparam"
)
diff --git a/tools/proto-annotations/cmd/etcd_version.go b/tools/proto-annotations/cmd/etcd_version.go
new file mode 100644
index 00000000000..86766466adb
--- /dev/null
+++ b/tools/proto-annotations/cmd/etcd_version.go
@@ -0,0 +1,130 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+
+ "github.com/coreos/go-semver/semver"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ "go.etcd.io/etcd/server/v3/storage/wal"
+)
+
+// externalPackages that are not expected to have etcd version annotation.
+var externalPackages = []string{
+ "io.prometheus.client",
+ "grpc.binarylog.v1",
+ "google.protobuf",
+ "google.rpc",
+ "google.api",
+ "raftpb",
+ "grpc.gateway.protoc_gen_swagger.options",
+ "grpc.gateway.protoc_gen_openapiv2.options",
+}
+
+// printEtcdVersion writes etcd_version proto annotation to stdout and returns any errors encountered when reading annotation.
+func printEtcdVersion() []error {
+ var errs []error
+ annotations, err := allEtcdVersionAnnotations()
+ if err != nil {
+ errs = append(errs, err)
+ return errs
+ }
+ sort.Slice(annotations, func(i, j int) bool {
+ return annotations[i].fullName < annotations[j].fullName
+ })
+ output := &strings.Builder{}
+ for _, a := range annotations {
+ newErrs := a.Validate()
+ if len(newErrs) == 0 {
+ err := a.PrintLine(output)
+ if err != nil {
+ errs = append(errs, err)
+ return errs
+ }
+ }
+ errs = append(errs, newErrs...)
+ }
+ if len(errs) == 0 {
+ fmt.Print(output)
+ }
+ return errs
+}
+
+func allEtcdVersionAnnotations() (annotations []etcdVersionAnnotation, err error) {
+ var fileAnnotations []etcdVersionAnnotation
+ protoregistry.GlobalFiles.RangeFiles(func(file protoreflect.FileDescriptor) bool {
+ pkg := string(file.Package())
+ for _, externalPkg := range externalPackages {
+ if pkg == externalPkg {
+ return true
+ }
+ }
+ fileAnnotations, err = fileEtcdVersionAnnotations(file)
+ if err != nil {
+ return false
+ }
+ annotations = append(annotations, fileAnnotations...)
+ return true
+ })
+ return annotations, err
+}
+
+func fileEtcdVersionAnnotations(file protoreflect.FileDescriptor) (annotations []etcdVersionAnnotation, err error) {
+ err = wal.VisitFileDescriptor(file, func(path protoreflect.FullName, ver *semver.Version) error {
+ a := etcdVersionAnnotation{fullName: path, version: ver}
+ annotations = append(annotations, a)
+ return nil
+ })
+ return annotations, err
+}
+
+type etcdVersionAnnotation struct {
+ fullName protoreflect.FullName
+ version *semver.Version
+}
+
+func (a etcdVersionAnnotation) Validate() (errs []error) {
+ if a.version == nil {
+ return nil
+ }
+ if a.version.Major == 0 {
+ errs = append(errs, fmt.Errorf("%s: etcd_version major version should not be zero", a.fullName))
+ }
+ if a.version.Patch != 0 {
+ errs = append(errs, fmt.Errorf("%s: etcd_version patch version should be zero", a.fullName))
+ }
+ if a.version.PreRelease != "" {
+ errs = append(errs, fmt.Errorf("%s: etcd_version should not be prerelease", a.fullName))
+ }
+ if a.version.Metadata != "" {
+ errs = append(errs, fmt.Errorf("%s: etcd_version should not have metadata", a.fullName))
+ }
+ return errs
+}
+
+func (a etcdVersionAnnotation) PrintLine(out io.Writer) error {
+ if a.version == nil {
+ _, err := fmt.Fprintf(out, "%s: \"\"\n", a.fullName)
+ return err
+ }
+ _, err := fmt.Fprintf(out, "%s: \"%d.%d\"\n", a.fullName, a.version.Major, a.version.Minor)
+ return err
+}
diff --git a/tools/proto-annotations/cmd/root.go b/tools/proto-annotations/cmd/root.go
new file mode 100644
index 00000000000..2b4ff9530a0
--- /dev/null
+++ b/tools/proto-annotations/cmd/root.go
@@ -0,0 +1,76 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+)
+
+const (
+ EtcdVersionAnnotation = "etcd_version"
+)
+
+func RootCmd() *cobra.Command {
+ var annotation string
+ cmd := &cobra.Command{
+ Use: "proto-annotation",
+ Short: "Proto-annotations prints a dump of annotations used by all protobuf definitions used by Etcd.",
+ Long: `Tool used to extract values of a specific proto annotation used by protobuf definitions used by Etcd.
+Created to ensure that all newly introduced proto definitions have a etcd_version_* annotation, by analysing diffs between generated by this tool.
+
+Proto annotations is printed to stdout in format:
+: ""
+
+
+For example:
+'''
+etcdserverpb.Member: "3.0"
+etcdserverpb.Member.ID: ""
+etcdserverpb.Member.clientURLs: ""
+etcdserverpb.Member.isLearner: "3.4"
+etcdserverpb.Member.name: ""
+etcdserverpb.Member.peerURLs: ""
+'''
+
+Any errors in proto will be printed to stderr.
+`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runProtoAnnotation(annotation)
+ },
+ }
+ cmd.Flags().StringVar(&annotation, "annotation", "", "Specify what proto annotation to read. Options: etcd_version")
+ cmd.MarkFlagRequired("annotation")
+ return cmd
+}
+
+func runProtoAnnotation(annotation string) error {
+ var errs []error
+ switch annotation {
+ case EtcdVersionAnnotation:
+ errs = printEtcdVersion()
+ default:
+ return fmt.Errorf("unknown annotation %q. Options: %q", annotation, EtcdVersionAnnotation)
+ }
+ if len(errs) != 0 {
+ for _, err := range errs {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ return fmt.Errorf("failed reading anotation")
+ }
+ return nil
+}
diff --git a/tools/proto-annotations/main.go b/tools/proto-annotations/main.go
new file mode 100644
index 00000000000..a350ef82b92
--- /dev/null
+++ b/tools/proto-annotations/main.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "go.etcd.io/etcd/v3/tools/proto-annotations/cmd"
+)
+
+func main() {
+ if err := cmd.RootCmd().Execute(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
diff --git a/tools/rw-heatmaps/.gitignore b/tools/rw-heatmaps/.gitignore
new file mode 100644
index 00000000000..e92d21d11be
--- /dev/null
+++ b/tools/rw-heatmaps/.gitignore
@@ -0,0 +1,6 @@
+results-*.csv
+rw-heatmaps
+*.jpg
+*.jpeg
+*.png
+*.tiff
diff --git a/tools/rw-heatmaps/OWNERS b/tools/rw-heatmaps/OWNERS
new file mode 100644
index 00000000000..405c01e7893
--- /dev/null
+++ b/tools/rw-heatmaps/OWNERS
@@ -0,0 +1,7 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - ivanvc # Ivan Valdes
+
+labels:
+ - area/performance
diff --git a/tools/rw-heatmaps/README.md b/tools/rw-heatmaps/README.md
new file mode 100644
index 00000000000..a4dd7026dab
--- /dev/null
+++ b/tools/rw-heatmaps/README.md
@@ -0,0 +1,70 @@
+# etcd/tools/rw-heatmaps
+
+`etcd/tools/rw-heatmaps` is the mixed read/write performance evaluation tool for etcd clusters.
+
+## Installation
+
+Install the tool by running the following command from the etcd source directory.
+
+```sh
+ $ go install -v ./tools/rw-heatmaps
+```
+
+The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the etcd source directory. Make sure that $PATH is set accordingly in your environment.
+
+```sh
+ $ go list -f "{{.Target}}" ./tools/rw-heatmaps
+```
+
+Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source directory.
+
+```sh
+ $ go run ./tools/rw-heatmaps
+```
+
+## Execute
+
+### Benchmark
+
+To get a mixed read/write performance evaluation result:
+```sh
+ # run with default configurations and specify the working directory
+ $ ./rw-benchmark.sh -w ${WORKING_DIR}
+```
+`rw-benchmark.sh` will automatically use the etcd binary compiled under `etcd/bin/` directory.
+
+Note: the result CSV file will be saved to current working directory. The working directory is where etcd database is saved. The working directory is designed for scenarios where a different mounted disk is preferred.
+
+### Plot Graphs
+
+To generate two images (read and write) based on the benchmark result CSV file:
+
+```sh
+ # to generate a pair of read & write images from one data csv file
+ $ rw-heatmaps ${CSV_FILE} -t ${IMAGE_TITLE} -o ${OUTPUT_IMAGE_NAME}
+```
+
+To generate two images (read and write) showing the performance difference from two result CSV files:
+
+```sh
+ # to generate a pair of read & write images from one data csv file
+ $ rw-heatmaps ${CSV_FILE1} ${CSV_FILE2} -t ${IMAGE_TITLE} -o ${OUTPUT_IMAGE_NAME}
+```
+
+To see the available options use the `--help` option.
+
+```sh
+ $ rw-heatmaps --help
+
+rw-heatmaps is a tool to generate read/write heatmaps images for etcd3.
+
+Usage:
+ rw-heatmaps [input file(s) in csv format] [flags]
+
+Flags:
+ -h, --help help for rw-heatmaps
+ -f, --output-format string output image file format (default "jpg")
+ -o, --output-image-file string output image filename (required)
+ -t, --title string plot graph title (required)
+ --zero-centered plot the improvement graph with white color represents 0.0 (default true)
+```
diff --git a/tools/rw-heatmaps/cmd/root.go b/tools/rw-heatmaps/cmd/root.go
new file mode 100644
index 00000000000..49b98a9fca2
--- /dev/null
+++ b/tools/rw-heatmaps/cmd/root.go
@@ -0,0 +1,105 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "go.etcd.io/etcd/tools/rw-heatmaps/v3/pkg/chart"
+ "go.etcd.io/etcd/tools/rw-heatmaps/v3/pkg/dataset"
+)
+
+var (
+ // ErrMissingTitleArg is returned when the title argument is missing.
+ ErrMissingTitleArg = fmt.Errorf("missing title argument")
+ // ErrMissingOutputImageFileArg is returned when the output image file argument is missing.
+ ErrMissingOutputImageFileArg = fmt.Errorf("missing output image file argument")
+ // ErrMissingInputFileArg is returned when the input file argument is missing.
+ ErrMissingInputFileArg = fmt.Errorf("missing input file argument")
+ // ErrInvalidOutputFormat is returned when the output format is invalid.
+ ErrInvalidOutputFormat = fmt.Errorf("invalid output format, must be one of png, jpg, jpeg, tiff")
+)
+
+// NewRootCommand returns the root command for the rw-heatmaps tool.
+func NewRootCommand() *cobra.Command {
+ o := newOptions()
+ rootCmd := &cobra.Command{
+ Use: "rw-heatmaps [input file(s) in csv format]",
+ Short: "A tool to generate read/write heatmaps for etcd3",
+ Long: "rw-heatmaps is a tool to generate read/write heatmaps images for etcd3.",
+ Args: cobra.RangeArgs(1, 2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if err := o.Validate(); err != nil {
+ return err
+ }
+
+ datasets := make([]*dataset.DataSet, len(args))
+ for i, arg := range args {
+ var err error
+ if datasets[i], err = dataset.LoadCSVData(arg); err != nil {
+ return err
+ }
+ }
+
+ return chart.PlotHeatMaps(datasets, o.title, o.outputImageFile, o.outputFormat, o.zeroCentered)
+ },
+ }
+
+ o.AddFlags(rootCmd.Flags())
+ return rootCmd
+}
+
+// options holds the options for the command.
+type options struct {
+ title string
+ outputImageFile string
+ outputFormat string
+ zeroCentered bool
+}
+
+// newOptions returns a new options for the command with the default values applied.
+func newOptions() options {
+ return options{
+ outputFormat: "jpg",
+ zeroCentered: true,
+ }
+}
+
+// AddFlags sets the flags for the command.
+func (o *options) AddFlags(fs *pflag.FlagSet) {
+ fs.StringVarP(&o.title, "title", "t", o.title, "plot graph title (required)")
+ fs.StringVarP(&o.outputImageFile, "output-image-file", "o", o.outputImageFile, "output image filename (required)")
+ fs.StringVarP(&o.outputFormat, "output-format", "f", o.outputFormat, "output image file format")
+ fs.BoolVar(&o.zeroCentered, "zero-centered", o.zeroCentered, "plot the improvement graph with white color represents 0.0")
+}
+
+// Validate returns an error if the options are invalid.
+func (o *options) Validate() error {
+ if o.title == "" {
+ return ErrMissingTitleArg
+ }
+ if o.outputImageFile == "" {
+ return ErrMissingOutputImageFileArg
+ }
+ switch o.outputFormat {
+ case "png", "jpg", "jpeg", "tiff":
+ default:
+ return ErrInvalidOutputFormat
+ }
+ return nil
+}
diff --git a/tools/rw-heatmaps/go.mod b/tools/rw-heatmaps/go.mod
new file mode 100644
index 00000000000..48e40a982a9
--- /dev/null
+++ b/tools/rw-heatmaps/go.mod
@@ -0,0 +1,26 @@
+module go.etcd.io/etcd/tools/rw-heatmaps/v3
+
+go 1.23
+
+toolchain go1.23.4
+
+require (
+ github.com/spf13/cobra v1.8.1
+ github.com/spf13/pflag v1.0.5
+ gonum.org/v1/plot v0.14.0
+)
+
+require (
+ git.sr.ht/~sbinet/gg v0.5.0 // indirect
+ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect
+ github.com/campoy/embedmd v1.0.0 // indirect
+ github.com/go-fonts/liberation v0.3.1 // indirect
+ github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 // indirect
+ github.com/go-pdf/fpdf v0.8.0 // indirect
+ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
+ golang.org/x/image v0.18.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+)
diff --git a/tools/rw-heatmaps/go.sum b/tools/rw-heatmaps/go.sum
new file mode 100644
index 00000000000..379c4549f7e
--- /dev/null
+++ b/tools/rw-heatmaps/go.sum
@@ -0,0 +1,71 @@
+git.sr.ht/~sbinet/cmpimg v0.1.0 h1:E0zPRk2muWuCqSKSVZIWsgtU9pjsw3eKHi8VmQeScxo=
+git.sr.ht/~sbinet/cmpimg v0.1.0/go.mod h1:FU12psLbF4TfNXkKH2ZZQ29crIqoiqTZmeQ7dkp/pxE=
+git.sr.ht/~sbinet/gg v0.5.0 h1:6V43j30HM623V329xA9Ntq+WJrMjDxRjuAB1LFWF5m8=
+git.sr.ht/~sbinet/gg v0.5.0/go.mod h1:G2C0eRESqlKhS7ErsNey6HHrqU1PwsnCQlekFi9Q2Oo=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
+github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
+github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=
+github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.3.1 h1:/cT8A7uavYKvglYXvrdDw4oS5ZLkcOU22fa2HJ1/JVM=
+github.com/go-fonts/latin-modern v0.3.1/go.mod h1:ysEQXnuT/sCDOAONxC7ImeEDVINbltClhasMAqEtRK0=
+github.com/go-fonts/liberation v0.3.1 h1:9RPT2NhUpxQ7ukUvz3jeUckmN42T9D9TpjtQcqK/ceM=
+github.com/go-fonts/liberation v0.3.1/go.mod h1:jdJ+cqF+F4SUL2V+qxBth8fvBpBDS7yloUL5Fi8GTGY=
+github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 h1:NxXI5pTAtpEaU49bpLpQoDsu1zrteW/vxzTz8Cd2UAs=
+github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9/go.mod h1:gWuR/CrFDDeVRFQwHPvsv9soJVB/iqymhuZQuJ3a9OM=
+github.com/go-pdf/fpdf v0.8.0 h1:IJKpdaagnWUeSkUFUjTcSzTppFxmv8ucGQyNPQWxYOQ=
+github.com/go-pdf/fpdf v0.8.0/go.mod h1:gfqhcNwXrsd3XYKte9a7vM3smvU/jB4ZRDrmWSxpfdc=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
+golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
+golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
+golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0=
+gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU=
+gonum.org/v1/plot v0.14.0 h1:+LBDVFYwFe4LHhdP8coW6296MBEY4nQ+Y4vuUpJopcE=
+gonum.org/v1/plot v0.14.0/go.mod h1:MLdR9424SJed+5VqC6MsouEpig9pZX2VZ57H9ko2bXU=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/tools/rw-heatmaps/main.go b/tools/rw-heatmaps/main.go
new file mode 100644
index 00000000000..cfd956a9ad0
--- /dev/null
+++ b/tools/rw-heatmaps/main.go
@@ -0,0 +1,29 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "go.etcd.io/etcd/tools/rw-heatmaps/v3/cmd"
+)
+
+func main() {
+ if err := cmd.NewRootCommand().Execute(); err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err.Error())
+ os.Exit(1)
+ }
+}
diff --git a/tools/rw-heatmaps/pkg/chart/heatmap_grid.go b/tools/rw-heatmaps/pkg/chart/heatmap_grid.go
new file mode 100644
index 00000000000..2802a707036
--- /dev/null
+++ b/tools/rw-heatmaps/pkg/chart/heatmap_grid.go
@@ -0,0 +1,130 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "sort"
+
+ "go.etcd.io/etcd/tools/rw-heatmaps/v3/pkg/dataset"
+)
+
+// heatMapGrid holds X, Y, Z values for a heatmap.
+type heatMapGrid struct {
+ x, y []float64
+ z [][]float64 // The Z values should be arranged in a 2D slice.
+}
+
+// newHeatMapGrid returns a new heatMapGrid.
+func newHeatMapGrid(plotType string, records []dataset.DataRecord) *heatMapGrid {
+ x, y := populateGridAxes(records)
+
+ // Create a 2D slice to hold the Z values.
+ z := make([][]float64, len(y))
+ for i := range z {
+ z[i] = make([]float64, len(x))
+ for j := range z[i] {
+ recordIndex := i*len(x) + j
+ // If the recordIndex is out of range (incomplete data), break the loop.
+ if recordIndex >= len(records) {
+ break
+ }
+ record := records[recordIndex]
+ if plotType == "read" {
+ z[i][j] = record.AvgRead
+ } else {
+ z[i][j] = record.AvgWrite
+ }
+ }
+ }
+
+ return &heatMapGrid{x, y, z}
+}
+
+// newDeltaHeatMapGrid returns a new heatMapGrid for the delta heatmap.
+func newDeltaHeatMapGrid(plotType string, records [][]dataset.DataRecord) *heatMapGrid {
+ delta := make([]dataset.DataRecord, len(records[0]))
+ for i := range records[0] {
+ delta[i] = dataset.DataRecord{
+ ConnSize: records[0][i].ConnSize,
+ ValueSize: records[0][i].ValueSize,
+ AvgRead: ((records[1][i].AvgRead - records[0][i].AvgRead) / records[0][i].AvgRead) * 100,
+ AvgWrite: ((records[1][i].AvgWrite - records[0][i].AvgWrite) / records[0][i].AvgWrite) * 100,
+ }
+ }
+
+ return newHeatMapGrid(plotType, delta)
+}
+
+// Dims returns the number of elements in the grid.
+// It implements the plotter.GridXYZ interface.
+func (h *heatMapGrid) Dims() (int, int) {
+ return len(h.x), len(h.y)
+}
+
+// Z returns the value of a grid cell at (c, r).
+// It implements the plotter.GridXYZ interface.
+func (h *heatMapGrid) Z(c, r int) float64 {
+ return h.z[r][c]
+}
+
+// X returns the coordinate for the column at index c.
+// It implements the plotter.GridXYZ interface.
+func (h *heatMapGrid) X(c int) float64 {
+ if c >= len(h.x) {
+ panic("index out of range")
+ }
+ return h.x[c]
+}
+
+// Y returns the coordinate for the row at index r.
+// It implements the plotter.GridXYZ interface.
+func (h *heatMapGrid) Y(r int) float64 {
+ if r >= len(h.y) {
+ panic("index out of range")
+ }
+ return h.y[r]
+}
+
+// populateGridAxes populates the X and Y axes for the heatmap grid.
+func populateGridAxes(records []dataset.DataRecord) ([]float64, []float64) {
+ var xslice, yslice []float64
+
+ for _, record := range records {
+ xslice = append(xslice, float64(record.ConnSize))
+ yslice = append(yslice, float64(record.ValueSize))
+ }
+
+ // Sort and deduplicate the slices
+ xUnique := uniqueSortedFloats(xslice)
+ yUnique := uniqueSortedFloats(yslice)
+
+ return xUnique, yUnique
+}
+
+// uniqueSortedFloats returns a sorted slice of unique float64 values.
+func uniqueSortedFloats(input []float64) []float64 {
+ unique := make([]float64, 0)
+ seen := make(map[float64]bool)
+
+ for _, value := range input {
+ if !seen[value] {
+ seen[value] = true
+ unique = append(unique, value)
+ }
+ }
+
+ sort.Float64s(unique)
+ return unique
+}
diff --git a/tools/rw-heatmaps/pkg/chart/heatmaps.go b/tools/rw-heatmaps/pkg/chart/heatmaps.go
new file mode 100644
index 00000000000..3feb118737b
--- /dev/null
+++ b/tools/rw-heatmaps/pkg/chart/heatmaps.go
@@ -0,0 +1,346 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "fmt"
+ "image/color"
+ "io"
+ "math"
+ "os"
+ "strings"
+
+ "gonum.org/v1/plot"
+ "gonum.org/v1/plot/font"
+ "gonum.org/v1/plot/palette"
+ "gonum.org/v1/plot/palette/brewer"
+ "gonum.org/v1/plot/plotter"
+ "gonum.org/v1/plot/vg"
+ "gonum.org/v1/plot/vg/draw"
+ "gonum.org/v1/plot/vg/vgimg"
+
+ "go.etcd.io/etcd/tools/rw-heatmaps/v3/pkg/dataset"
+)
+
+// pow2Ticks is a type that implements the plot.Ticker interface for log2 scale.
+type pow2Ticks struct{}
+
+// Ticks returns the ticks for the log2 scale.
+// It implements the plot.Ticker interface.
+func (pow2Ticks) Ticks(min, max float64) []plot.Tick {
+ var t []plot.Tick
+ for i := math.Log2(min); math.Pow(2, i) <= max; i++ {
+ t = append(t, plot.Tick{
+ Value: math.Pow(2, i),
+ Label: fmt.Sprintf("2^%d", int(i)),
+ })
+ }
+ return t
+}
+
+// invertedPalette takes an existing palette and inverts it.
+type invertedPalette struct {
+ base palette.Palette
+}
+
+// Colors returns the sequence of colors in reverse order from the base palette.
+// It implements the palette.Palette interface.
+func (p invertedPalette) Colors() []color.Color {
+ baseColors := p.base.Colors()
+ invertedColors := make([]color.Color, len(baseColors))
+ for i, c := range baseColors {
+ invertedColors[len(baseColors)-i-1] = c
+ }
+ return invertedColors
+}
+
+// PlotHeatMaps plots, and saves the heatmaps for the given dataset.
+func PlotHeatMaps(datasets []*dataset.DataSet, title, outputImageFile, outputFormat string, zeroCentered bool) error {
+ plot.DefaultFont = font.Font{
+ Typeface: "Liberation",
+ Variant: "Sans",
+ }
+
+ for _, plotType := range []string{"read", "write"} {
+ var canvas *vgimg.Canvas
+ if len(datasets) == 1 {
+ canvas = plotHeatMapGrid(datasets[0], title, plotType)
+ } else {
+ canvas = plotComparisonHeatMapGrid(datasets, title, plotType, zeroCentered)
+ }
+ if err := saveCanvas(canvas, plotType, outputImageFile, outputFormat); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// plotHeatMapGrid plots a grid of heatmaps for the given dataset.
+func plotHeatMapGrid(dataset *dataset.DataSet, title, plotType string) *vgimg.Canvas {
+ // Make a 4x2 grid of heatmaps.
+ const rows, cols = 4, 2
+
+ // Set the width and height of the canvas.
+ const width, height = 30 * vg.Centimeter, 40 * vg.Centimeter
+
+ canvas := vgimg.New(width, height)
+ dc := draw.New(canvas)
+
+ // Create a tiled layout for the plots.
+ t := draw.Tiles{
+ Rows: rows,
+ Cols: cols,
+ PadX: vg.Millimeter * 4,
+ PadY: vg.Millimeter * 4,
+ PadTop: vg.Millimeter * 10,
+ PadBottom: vg.Millimeter * 2,
+ PadLeft: vg.Millimeter * 2,
+ PadRight: vg.Millimeter * 2,
+ }
+
+ // Store the plots and legends (scale label) in a grid.
+ plots := make([][]*plot.Plot, rows)
+ legends := make([][]plot.Legend, rows)
+ for i := range plots {
+ plots[i] = make([]*plot.Plot, cols)
+ legends[i] = make([]plot.Legend, cols)
+ }
+
+ // Load records into the grid.
+ ratios := dataset.GetSortedRatios()
+ row, col := 0, 0
+ for _, ratio := range ratios {
+ records := dataset.Records[ratio]
+ p, l := plotIndividualHeatMap(fmt.Sprintf("R/W Ratio %0.04f", ratio), plotType, records)
+ plots[row][col] = p
+ legends[row][col] = l
+
+ if col++; col == cols {
+ col = 0
+ row++
+ }
+ }
+
+ // Fill the canvas with the plots and legends.
+ canvases := plot.Align(plots, t, dc)
+ for i := 0; i < rows; i++ {
+ for j := 0; j < cols; j++ {
+ // Continue if there is no plot in the current cell (incomplete data).
+ if plots[i][j] == nil {
+ continue
+ }
+
+ l := legends[i][j]
+ r := l.Rectangle(canvases[i][j])
+ legendWidth := r.Max.X - r.Min.X
+ // Adjust the legend down a little.
+ l.YOffs = -plots[i][j].Title.TextStyle.FontExtents().Height
+ l.Draw(canvases[i][j])
+
+ // Crop the plot to make space for the legend.
+ c := draw.Crop(canvases[i][j], 0, -legendWidth-vg.Millimeter, 0, 0)
+ plots[i][j].Draw(c)
+ }
+ }
+
+ // Add the title and parameter legend.
+ l := plot.NewLegend()
+ l.Add(fmt.Sprintf("%s [%s]", title, strings.ToUpper(plotType)))
+ l.Add(dataset.Param)
+ l.Top = true
+ l.Left = true
+ l.Draw(dc)
+
+ return canvas
+}
+
+// plotComparisonHeatMapGrid plots a grid of heatmaps for the given datasets.
+func plotComparisonHeatMapGrid(datasets []*dataset.DataSet, title, plotType string, zeroCentered bool) *vgimg.Canvas {
+ // Make a 8x3 grid of heatmaps.
+ const rows, cols = 8, 3
+ // Set the width and height of the canvas.
+ const width, height = 40 * vg.Centimeter, 66 * vg.Centimeter
+
+ canvas := vgimg.New(width, height)
+ dc := draw.New(canvas)
+
+ // Create a tiled layout for the plots.
+ t := draw.Tiles{
+ Rows: rows,
+ Cols: cols,
+ PadX: vg.Millimeter * 4,
+ PadY: vg.Millimeter * 4,
+ PadTop: vg.Millimeter * 15,
+ PadBottom: vg.Millimeter * 2,
+ PadLeft: vg.Millimeter * 2,
+ PadRight: vg.Millimeter * 2,
+ }
+
+ // Store the plots and legends (scale label) in a grid.
+ plots := make([][]*plot.Plot, rows)
+ legends := make([][]plot.Legend, rows)
+ for i := range plots {
+ plots[i] = make([]*plot.Plot, cols)
+ legends[i] = make([]plot.Legend, cols)
+ }
+
+ // Load records into the grid.
+ ratios := datasets[0].GetSortedRatios()
+ for row, ratio := range ratios {
+ records := make([][]dataset.DataRecord, len(datasets))
+ for col, dataset := range datasets {
+ r := dataset.Records[ratio]
+ p, l := plotIndividualHeatMap(fmt.Sprintf("R/W Ratio %0.04f", ratio), plotType, r)
+ // Add the title to the first row.
+ if row == 0 {
+ p.Title.Text = fmt.Sprintf("%s\n%s", dataset.FileName, p.Title.Text)
+ }
+
+ plots[row][col] = p
+ legends[row][col] = l
+ records[col] = r
+ }
+ plots[row][2], legends[row][2] = plotDeltaHeatMap(fmt.Sprintf("R/W Ratio %0.04f", ratio), plotType, records, zeroCentered)
+ }
+
+ // Fill the canvas with the plots and legends.
+ canvases := plot.Align(plots, t, dc)
+ for i := 0; i < rows; i++ {
+ for j := 0; j < cols; j++ {
+ // Continue if there is no plot in the current cell (incomplete data).
+ if plots[i][j] == nil {
+ continue
+ }
+
+ l := legends[i][j]
+ r := l.Rectangle(canvases[i][j])
+ legendWidth := r.Max.X - r.Min.X
+ // Adjust the legend down a little.
+ l.YOffs = -plots[i][j].Title.TextStyle.FontExtents().Height
+ l.Draw(canvases[i][j])
+
+ // Crop the plot to make space for the legend.
+ c := draw.Crop(canvases[i][j], 0, -legendWidth-vg.Millimeter, 0, 0)
+ plots[i][j].Draw(c)
+ }
+ }
+
+ // Add the title and parameter legend.
+ l := plot.NewLegend()
+ l.Add(fmt.Sprintf("%s [%s]", title, strings.ToUpper(plotType)))
+ for _, dataset := range datasets {
+ l.Add(fmt.Sprintf("%s: %s", dataset.FileName, dataset.Param))
+ }
+ l.Top = true
+ l.Left = true
+ l.Draw(dc)
+
+ return canvas
+}
+
+// saveCanvas saves the canvas to a file.
+func saveCanvas(canvas *vgimg.Canvas, plotType, outputImageFile, outputFormat string) error {
+ f, err := os.Create(fmt.Sprintf("%s_%s.%s", outputImageFile, plotType, outputFormat))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var w io.WriterTo
+ switch outputFormat {
+ case "png":
+ w = vgimg.PngCanvas{Canvas: canvas}
+ case "jpeg", "jpg":
+ w = vgimg.PngCanvas{Canvas: canvas}
+ case "tiff":
+ w = vgimg.TiffCanvas{Canvas: canvas}
+ }
+
+ _, err = w.WriteTo(f)
+ return err
+}
+
+// plotIndividualHeatMap plots a heatmap for a given set of records.
+func plotIndividualHeatMap(title, plotType string, records []dataset.DataRecord) (*plot.Plot, plot.Legend) {
+ p := plot.New()
+ p.X.Scale = plot.LogScale{}
+ p.X.Tick.Marker = pow2Ticks{}
+ p.X.Label.Text = "Connections Amount"
+ p.Y.Scale = plot.LogScale{}
+ p.Y.Tick.Marker = pow2Ticks{}
+ p.Y.Label.Text = "Value Size"
+
+ gridData := newHeatMapGrid(plotType, records)
+
+ // Use the YlGnBu color palette from ColorBrewer to match the original implementation.
+ colors, _ := brewer.GetPalette(brewer.TypeAny, "YlGnBu", 9)
+ pal := invertedPalette{colors}
+ h := plotter.NewHeatMap(gridData, pal)
+
+ p.Title.Text = fmt.Sprintf("%s [%.2f, %.2f]", title, h.Min, h.Max)
+ p.Add(h)
+
+ // Create a legend with the scale.
+ legend := generateScaleLegend(h.Min, h.Max, pal)
+
+ return p, legend
+}
+
+// plotDeltaHeatMap plots a heatmap for the delta between two sets of records.
+func plotDeltaHeatMap(title, plotType string, records [][]dataset.DataRecord, zeroCentered bool) (*plot.Plot, plot.Legend) {
+ p := plot.New()
+ p.X.Scale = plot.LogScale{}
+ p.X.Tick.Marker = pow2Ticks{}
+ p.X.Label.Text = "Connections Amount"
+ p.Y.Scale = plot.LogScale{}
+ p.Y.Tick.Marker = pow2Ticks{}
+ p.Y.Label.Text = "Value Size"
+
+ gridData := newDeltaHeatMapGrid(plotType, records)
+
+ // Use the RdBu color palette from ColorBrewer to match the original implementation.
+ colors, _ := brewer.GetPalette(brewer.TypeAny, "RdBu", 11)
+ pal := invertedPalette{colors}
+ h := plotter.NewHeatMap(gridData, pal)
+ p.Title.Text = fmt.Sprintf("%s [%.2f%%, %.2f%%]", title, h.Min, h.Max)
+
+ if zeroCentered {
+ if h.Min < 0 && math.Abs(h.Min) > h.Max {
+ h.Max = math.Abs(h.Min)
+ } else {
+ h.Min = h.Max * -1
+ }
+ }
+
+ p.Add(h)
+
+ // Create a legend with the scale.
+ legend := generateScaleLegend(h.Min, h.Max, pal)
+
+ return p, legend
+}
+
+// generateScaleLegend generates legends for the heatmap.
+func generateScaleLegend(min, max float64, pal palette.Palette) plot.Legend {
+ legend := plot.NewLegend()
+ thumbs := plotter.PaletteThumbnailers(pal)
+ step := (max - min) / float64(len(thumbs)-1)
+ for i := len(thumbs) - 1; i >= 0; i-- {
+ legend.Add(fmt.Sprintf("%.0f", min+step*float64(i)), thumbs[i])
+ }
+ legend.Top = true
+
+ return legend
+}
diff --git a/tools/rw-heatmaps/pkg/dataset/dataset.go b/tools/rw-heatmaps/pkg/dataset/dataset.go
new file mode 100644
index 00000000000..6328c3833f7
--- /dev/null
+++ b/tools/rw-heatmaps/pkg/dataset/dataset.go
@@ -0,0 +1,122 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dataset
+
+import (
+ "encoding/csv"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+const (
+ _ = iota
+ // fieldIndexRatio is the index of the ratio field in the CSV file.
+ fieldIndexRatio
+ // fieldIndexConnSize is the index of the connection size (connSize) field in the CSV file.
+ fieldIndexConnSize
+ // fieldIndexValueSize is the index of the value size (valueSize) field in the CSV file.
+ fieldIndexValueSize
+ // fieldIndexIterOffset is the index of the first iteration field in the CSV file.
+ fieldIndexIterOffset
+)
+
+// DataSet holds the data for the heatmaps, including the parameter used for the run.
+type DataSet struct {
+ // FileName is the name of the file from which the data was loaded.
+ FileName string
+ // Records is a map from the ratio of read to write operations to the data for that ratio.
+ Records map[float64][]DataRecord
+ // Param is the parameter used for the run.
+ Param string
+}
+
+// DataRecord holds the data for a single heatmap chart.
+type DataRecord struct {
+ ConnSize int64
+ ValueSize int64
+ AvgRead float64
+ AvgWrite float64
+}
+
+// GetSortedRatios returns the sorted ratios of read to write operations in the dataset.
+func (d *DataSet) GetSortedRatios() []float64 {
+ ratios := make([]float64, 0)
+ for ratio := range d.Records {
+ ratios = append(ratios, ratio)
+ }
+ sort.Float64s(ratios)
+ return ratios
+}
+
+// LoadCSVData loads the data from a CSV file into a DataSet.
+func LoadCSVData(inputFile string) (*DataSet, error) {
+ file, err := os.Open(inputFile)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ reader := csv.NewReader(file)
+ lines, err := reader.ReadAll()
+ if err != nil {
+ return nil, err
+ }
+
+ records := make(map[float64][]DataRecord)
+
+ // Count the number of iterations.
+ iters := 0
+ for _, header := range lines[0][fieldIndexIterOffset:] {
+ if strings.HasPrefix(header, "iter") {
+ iters++
+ }
+ }
+
+ // Running parameters are stored in the first line after the header, after the iteration fields.
+ param := lines[1][fieldIndexIterOffset+iters]
+
+ for _, line := range lines[2:] { // Skip header line.
+ ratio, _ := strconv.ParseFloat(line[fieldIndexRatio], 64)
+ if _, ok := records[ratio]; !ok {
+ records[ratio] = make([]DataRecord, 0)
+ }
+ connSize, _ := strconv.ParseInt(line[fieldIndexConnSize], 10, 64)
+ valueSize, _ := strconv.ParseInt(line[fieldIndexValueSize], 10, 64)
+
+ // Calculate the average read and write values for the iterations.
+ var readSum, writeSum float64
+ for _, v := range line[fieldIndexIterOffset : fieldIndexIterOffset+iters] {
+ splitted := strings.Split(v, ":")
+
+ readValue, _ := strconv.ParseFloat(splitted[0], 64)
+ readSum += readValue
+
+ writeValue, _ := strconv.ParseFloat(splitted[1], 64)
+ writeSum += writeValue
+ }
+
+ records[ratio] = append(records[ratio], DataRecord{
+ ConnSize: connSize,
+ ValueSize: valueSize,
+ AvgRead: readSum / float64(iters),
+ AvgWrite: writeSum / float64(iters),
+ })
+ }
+
+ return &DataSet{FileName: filepath.Base(inputFile), Records: records, Param: param}, nil
+}
diff --git a/tools/rw-heatmaps/rw-benchmark.sh b/tools/rw-heatmaps/rw-benchmark.sh
new file mode 100755
index 00000000000..209414aa436
--- /dev/null
+++ b/tools/rw-heatmaps/rw-benchmark.sh
@@ -0,0 +1,214 @@
+#!/bin/bash
+
+#set -x
+
+RATIO_LIST="${RATIO_LIST:-1/128 1/8 1/4 1/2 2/1 4/1 8/1 128/1}"
+VALUE_SIZE_POWER_RANGE="${VALUE_SIZE_POWER_RANGE:-8 14}"
+CONN_CLI_COUNT_POWER_RANGE="${CONN_CLI_COUNT_POWER_RANGE:-5 11}"
+REPEAT_COUNT="${REPEAT_COUNT:-5}"
+RUN_COUNT="${RUN_COUNT:-200000}"
+
+KEY_SIZE="${KEY_SIZE:-256}"
+KEY_SPACE_SIZE="${KEY_SPACE_SIZE:-$((1024 * 64))}"
+BACKEND_SIZE="${BACKEND_SIZE:-$((20 * 1024 * 1024 * 1024))}"
+RANGE_RESULT_LIMIT="${RANGE_RESULT_LIMIT:-100}"
+CLIENT_PORT="${CLIENT_PORT:-23790}"
+
+COMMIT=
+
+ETCD_ROOT_DIR="$(cd $(dirname $0) && pwd)/../.."
+ETCD_BIN_DIR="${ETCD_ROOT_DIR}/bin"
+ETCD_BIN="${ETCD_BIN_DIR}/etcd"
+ETCD_BM_BIN="${ETCD_BIN_DIR}/tools/benchmark"
+
+WORKING_DIR="$(mktemp -d)"
+CURRENT_DIR="$(pwd -P)"
+OUTPUT_FILE="${CURRENT_DIR}/result-$(date '+%Y%m%d%H%M').csv"
+
+trap ctrl_c INT
+
+CURRENT_ETCD_PID=
+
+function ctrl_c() {
+ # capture ctrl-c and kill server
+ echo "terminating..."
+ kill_etcd_server ${CURRENT_ETCD_PID}
+ exit 0
+}
+
+function quit() {
+ if [ ! -z ${CURRENT_ETCD_PID} ]; then
+ kill_etcd_server ${CURRENT_ETCD_PID}
+ fi
+ exit $1
+}
+
+function check_prerequisite() {
+ # check initial parameters
+ if [ -f "${OUTPUT_FILE}" ]; then
+ echo "file ${OUTPUT_FILE} already exists."
+ exit 1
+ fi
+ pushd ${ETCD_ROOT_DIR} > /dev/null
+ COMMIT=$(git log --pretty=format:'%h' -n 1)
+ if [ $? -ne 0 ]; then
+ COMMIT=N/A
+ fi
+ popd > /dev/null
+ cat >"${OUTPUT_FILE}" </dev/null &
+ return $!
+}
+
+function init_etcd_db() {
+ #initialize etcd database
+ if [ ! -x ${ETCD_BM_BIN} ]; then
+ echo "no etcd benchmark binary found at: ${ETCD_BM_BIN}"
+ quit -1
+ fi
+ echo "initialize etcd database..."
+ ${ETCD_BM_BIN} put --sequential-keys \
+ --key-space-size=${KEY_SPACE_SIZE} \
+ --val-size=${VALUE_SIZE} --key-size=${KEY_SIZE} \
+ --endpoints http://127.0.0.1:${CLIENT_PORT} \
+ --total=${KEY_SPACE_SIZE} \
+ &>/dev/null
+}
+
+function kill_etcd_server() {
+ # kill etcd server
+ ETCD_PID=$1
+ if [ -z "$(ps aux | grep etcd | awk "{print \$2}")" ]; then
+ echo "failed to find the etcd instance to kill: ${ETCD_PID}"
+ return
+ fi
+ echo "kill etcd server instance"
+ kill -9 ${ETCD_PID}
+ wait ${ETCD_PID} 2>/dev/null
+ sleep 5
+}
+
+
+while getopts ":w:c:p:l:vh" OPTION; do
+ case $OPTION in
+ h)
+ echo "usage: $(basename $0) [-h] [-w WORKING_DIR] [-c RUN_COUNT] [-p PORT] [-l RANGE_QUERY_LIMIT] [-v]" >&2
+ exit 1
+ ;;
+ w)
+ WORKING_DIR="${OPTARG}"
+ ;;
+ c)
+ RUN_COUNT="${OPTARG}"
+ ;;
+ p)
+ CLIENT_PORT="${OPTARG}"
+ ;;
+ v)
+ set -x
+ ;;
+ l)
+ RANGE_RESULT_LIMIT="${OPTARG}"
+ ;;
+ \?)
+ echo "usage: $(basename $0) [-h] [-w WORKING_DIR] [-c RUN_COUNT] [-p PORT] [-l RANGE_QUERY_LIMIT] [-v]" >&2
+ exit 1
+ ;;
+ esac
+done
+shift "$((${OPTIND} - 1))"
+
+check_prerequisite
+
+pushd "${WORKING_DIR}" > /dev/null
+
+# progress stats management
+ITER_TOTAL=$(($(echo ${RATIO_LIST} | wc | awk "{print \$2}") * \
+ $(seq ${VALUE_SIZE_POWER_RANGE} | wc | awk "{print \$2}") * \
+ $(seq ${CONN_CLI_COUNT_POWER_RANGE} | wc | awk "{print \$2}")))
+ITER_CURRENT=0
+PERCENTAGE_LAST_PRINT=0
+PERCENTAGE_PRINT_THRESHOLD=5
+
+for RATIO_STR in ${RATIO_LIST}; do
+ RATIO=$(echo "scale=4; ${RATIO_STR}" | bc -l)
+ for VALUE_SIZE_POWER in $(seq ${VALUE_SIZE_POWER_RANGE}); do
+ VALUE_SIZE=$((2 ** ${VALUE_SIZE_POWER}))
+ for CONN_CLI_COUNT_POWER in $(seq ${CONN_CLI_COUNT_POWER_RANGE}); do
+
+ # progress stats management
+ ITER_CURRENT=$((${ITER_CURRENT} + 1))
+ PERCENTAGE_CURRENT=$(echo "scale=3; ${ITER_CURRENT}/${ITER_TOTAL}*100" | bc -l)
+ if [ "$(echo "${PERCENTAGE_CURRENT} - ${PERCENTAGE_LAST_PRINT} > ${PERCENTAGE_PRINT_THRESHOLD}" |
+ bc -l)" -eq 1 ]; then
+ PERCENTAGE_LAST_PRINT=${PERCENTAGE_CURRENT}
+ echo "${PERCENTAGE_CURRENT}% completed"
+ fi
+
+ CONN_CLI_COUNT=$((2 ** ${CONN_CLI_COUNT_POWER}))
+
+ run_etcd_server
+ CURRENT_ETCD_PID=$!
+ sleep 5
+
+ init_etcd_db
+
+ START=$(date +%s)
+ LINE="DATA,${RATIO},${CONN_CLI_COUNT},${VALUE_SIZE}"
+ echo -n "run with setting [${LINE}]"
+ for i in $(seq ${REPEAT_COUNT}); do
+ echo -n "."
+ QPS=$(${ETCD_BM_BIN} txn-mixed "" \
+ --conns=${CONN_CLI_COUNT} --clients=${CONN_CLI_COUNT} \
+ --total=${RUN_COUNT} \
+ --endpoints "http://127.0.0.1:${CLIENT_PORT}" \
+ --rw-ratio ${RATIO} --limit ${RANGE_RESULT_LIMIT} \
+ --val-size ${VALUE_SIZE} \
+ 2>/dev/null | grep "Requests/sec" | awk "{print \$2}")
+ if [ $? -ne 0 ]; then
+ echo "benchmark command failed: $?"
+ quit -1
+ fi
+ RD_QPS=$(echo -e "${QPS}" | sed -n '1 p')
+ WR_QPS=$(echo -e "${QPS}" | sed -n '2 p')
+ if [ -z "${RD_QPS}" ]; then
+ RD_QPS=0
+ fi
+ if [ -z "${WR_QPS}" ]; then
+ WR_QPS=0
+ fi
+ LINE="${LINE},${RD_QPS}:${WR_QPS}"
+ done
+ LINE="${LINE},"
+ END=$(date +%s)
+ DIFF=$((${END} - ${START}))
+ echo "took ${DIFF} seconds"
+
+ cat >>"${OUTPUT_FILE}" < /dev/null
diff --git a/tools/testgrid-analysis/OWNERS b/tools/testgrid-analysis/OWNERS
new file mode 100644
index 00000000000..365ae7b38a9
--- /dev/null
+++ b/tools/testgrid-analysis/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/testing
diff --git a/tools/testgrid-analysis/cmd/data.go b/tools/testgrid-analysis/cmd/data.go
new file mode 100644
index 00000000000..1b18ca59361
--- /dev/null
+++ b/tools/testgrid-analysis/cmd/data.go
@@ -0,0 +1,155 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ apipb "github.com/GoogleCloudPlatform/testgrid/pb/api/v1"
+ statuspb "github.com/GoogleCloudPlatform/testgrid/pb/test_status"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+var (
+ validTestStatuses = []statuspb.TestStatus{statuspb.TestStatus_PASS, statuspb.TestStatus_FAIL, statuspb.TestStatus_FLAKY}
+ failureTestStatuses = []statuspb.TestStatus{statuspb.TestStatus_FAIL, statuspb.TestStatus_FLAKY}
+ validTestStatusesInt = intStatusSet(validTestStatuses)
+ failureTestStatusesInt = intStatusSet(failureTestStatuses)
+
+ skippedTestStatuses = make(map[int32]struct{})
+)
+
+type TestResultSummary struct {
+ Name string
+ FullName string
+ TotalRuns, FailedRuns int
+ FailureRate float32
+ FailureLogs []string
+ IssueBody string
+}
+
+func fetchTestResultSummaries(dashboard, tab string) []*TestResultSummary {
+ // Fetch test data
+ rowsURL := fmt.Sprintf("http://testgrid-data.k8s.io/api/v1/dashboards/%s/tabs/%s/rows", dashboard, tab)
+ headersURL := fmt.Sprintf("http://testgrid-data.k8s.io/api/v1/dashboards/%s/tabs/%s/headers", dashboard, tab)
+
+ var testData apipb.ListRowsResponse
+ var headerData apipb.ListHeadersResponse
+ protojson.Unmarshal(fetchJSON(rowsURL), &testData)
+ protojson.Unmarshal(fetchJSON(headersURL), &headerData)
+
+ var allTests []string
+ for _, row := range testData.Rows {
+ allTests = append(allTests, row.Name)
+ }
+
+ summaries := []*TestResultSummary{}
+ // Process rows
+ for _, row := range testData.Rows {
+ t := processRow(dashboard, tab, row, allTests, headerData.Headers)
+ summaries = append(summaries, t)
+ }
+ return summaries
+}
+
+func processRow(dashboard, tab string, row *apipb.ListRowsResponse_Row, allTests []string, headers []*apipb.ListHeadersResponse_Header) *TestResultSummary {
+ t := TestResultSummary{Name: shortenTestName(row.Name), FullName: row.Name}
+ // we do not want to create issues for a parent test.
+ if isParentTest(row.Name, allTests) {
+ return &t
+ }
+ if !strings.HasPrefix(row.Name, "go.etcd.io") {
+ return &t
+ }
+ earliestTimeToConsider := time.Now().AddDate(0, 0, -1*maxDays)
+ total := 0
+ failed := 0
+ logs := []string{}
+ for i, cell := range row.Cells {
+ // ignore tests with status not in the validTestStatuses
+ // cell result codes are listed in https://github.com/GoogleCloudPlatform/testgrid/blob/main/pb/test_status/test_status.proto
+ if _, ok := validTestStatusesInt[cell.Result]; !ok {
+ if cell.Result != 0 {
+ skippedTestStatuses[cell.Result] = struct{}{}
+ }
+ continue
+ }
+ header := headers[i]
+ if maxDays > 0 && header.Started.AsTime().Before(earliestTimeToConsider) {
+ continue
+ }
+ total++
+ if _, ok := failureTestStatusesInt[cell.Result]; ok {
+ failed++
+ // markdown table format of | commit | log |
+ logs = append(logs, fmt.Sprintf("| %s | %s | https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/%s/%s |", strings.Join(header.Extra, ","), header.Started.AsTime().String(), tab, header.Build))
+ }
+ if maxRuns > 0 && total >= maxRuns {
+ break
+ }
+ }
+ t.FailedRuns = failed
+ t.TotalRuns = total
+ t.FailureLogs = logs
+ t.FailureRate = float32(failed) / float32(total)
+ if t.FailedRuns > 0 {
+ dashboardURL := fmt.Sprintf("[%s](https://testgrid.k8s.io/%s#%s)", tab, dashboard, tab)
+ t.IssueBody = fmt.Sprintf("## %s Test: %s \nTest failed %.1f%% (%d/%d) of the time\n\nfailure logs are:\n| commit | started | log |\n| --- | --- | --- |\n%s\n",
+ dashboardURL, t.FullName, t.FailureRate*100, t.FailedRuns, t.TotalRuns, strings.Join(t.FailureLogs, "\n"))
+ t.IssueBody += "\nPlease follow the [instructions in the contributing guide](https://github.com/etcd-io/etcd/blob/main/CONTRIBUTING.md#check-for-flaky-tests) to reproduce the issue.\n"
+ fmt.Printf("%s failed %.1f%% (%d/%d) of the time\n", t.FullName, t.FailureRate*100, t.FailedRuns, t.TotalRuns)
+ }
+ return &t
+}
+
+// isParentTest checks if a test is a rollup of some child tests.
+func isParentTest(test string, allTests []string) bool {
+ for _, t := range allTests {
+ if t != test && strings.HasPrefix(t, test+"/") {
+ return true
+ }
+ }
+ return false
+}
+
+func fetchJSON(url string) []byte {
+ resp, err := http.Get(url)
+ if err != nil {
+ fmt.Println("Error fetching test data:", err)
+ os.Exit(1)
+ }
+ defer resp.Body.Close()
+ testBody, _ := io.ReadAll(resp.Body)
+ return testBody
+}
+
+// intStatusSet converts a list of statuspb.TestStatus into a set of int.
+func intStatusSet(statuses []statuspb.TestStatus) map[int32]struct{} {
+ s := make(map[int32]struct{})
+ for _, status := range statuses {
+ s[int32(status)] = struct{}{}
+ }
+ return s
+}
+
+func shortenTestName(fullname string) string {
+ parts := strings.Split(fullname, ".")
+ return parts[len(parts)-1]
+}
diff --git a/tools/testgrid-analysis/cmd/flaky.go b/tools/testgrid-analysis/cmd/flaky.go
new file mode 100644
index 00000000000..2752b81a873
--- /dev/null
+++ b/tools/testgrid-analysis/cmd/flaky.go
@@ -0,0 +1,79 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+// flakyCmd represents the flaky command
+var flakyCmd = &cobra.Command{
+ Use: "flaky",
+ Short: "detect flaky tests",
+ Long: `detect flaky tests within the dashobard#tab, and create GitHub issues if desired.`,
+ Run: flakyFunc,
+}
+
+var (
+ flakyThreshold float32
+ minRuns int
+ maxRuns int
+ maxDays int
+ createGithubIssue bool
+ githubOwner string
+ githubRepo string
+
+ lineSep = "-------------------------------------------------------------"
+)
+
+func init() {
+ rootCmd.AddCommand(flakyCmd)
+
+ flakyCmd.Flags().BoolVar(&createGithubIssue, "create-issue", false, "create Github issue for each flaky test")
+ flakyCmd.Flags().Float32Var(&flakyThreshold, "flaky-threshold", 0.1, "fraction threshold of test failures for a test to be considered flaky")
+ flakyCmd.Flags().IntVar(&minRuns, "min-runs", 20, "minimum test runs for a test to be included in flaky analysis")
+ flakyCmd.Flags().IntVar(&maxRuns, "max-runs", 0, "maximum test runs for a test to be included in flaky analysis, 0 to include all")
+ flakyCmd.Flags().IntVar(&maxDays, "max-days", 0, "maximum days of results before today to be included in flaky analysis, 0 to include all")
+ flakyCmd.Flags().StringVar(&githubOwner, "github-owner", "etcd-io", "the github organization to create the issue for")
+ flakyCmd.Flags().StringVar(&githubRepo, "github-repo", "etcd", "the github repo to create the issue for")
+}
+
+func flakyFunc(cmd *cobra.Command, args []string) {
+ fmt.Printf("flaky called, for %s#%s, createGithubIssue=%v, githubRepo=%s/%s, flakyThreshold=%f, minRuns=%d\n", dashboard, tab, createGithubIssue, githubOwner, githubRepo, flakyThreshold, minRuns)
+
+ allTests := fetchTestResultSummaries(dashboard, tab)
+ flakyTests := []*TestResultSummary{}
+ for _, t := range allTests {
+ if t.TotalRuns >= minRuns && t.FailureRate >= flakyThreshold {
+ flakyTests = append(flakyTests, t)
+ }
+ }
+ fmt.Println(lineSep)
+ fmt.Printf("Detected total %d flaky tests above the %.0f%% threshold for %s#%s\n", len(flakyTests), flakyThreshold*100, dashboard, tab)
+ fmt.Println(lineSep)
+ if len(flakyTests) == 0 {
+ return
+ }
+ for _, t := range flakyTests {
+ fmt.Println(lineSep)
+ fmt.Println(t.IssueBody)
+ fmt.Println(lineSep)
+ }
+ if createGithubIssue {
+ createIssues(flakyTests, []string{"type/flake"})
+ }
+}
diff --git a/tools/testgrid-analysis/cmd/github.go b/tools/testgrid-analysis/cmd/github.go
new file mode 100644
index 00000000000..47445da4a8e
--- /dev/null
+++ b/tools/testgrid-analysis/cmd/github.go
@@ -0,0 +1,78 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/google/go-github/v60/github"
+)
+
+func createIssues(tests []*TestResultSummary, labels []string) {
+ openIssues := getOpenIssues(labels)
+ for _, t := range tests {
+ createIssueIfNonExist(t, openIssues, append(labels, "help wanted"))
+ }
+}
+
+func getOpenIssues(labels []string) []*github.Issue {
+ client := github.NewClient(nil).WithAuthToken(os.Getenv("GITHUB_TOKEN"))
+ ctx := context.Background()
+ // list open issues with label type/flake
+ issueOpt := &github.IssueListByRepoOptions{
+ Labels: labels,
+ ListOptions: github.ListOptions{PerPage: 100},
+ }
+ allIssues := []*github.Issue{}
+ for {
+ issues, resp, err := client.Issues.ListByRepo(ctx, githubOwner, githubRepo, issueOpt)
+ if err != nil {
+ panic(err)
+ }
+ allIssues = append(allIssues, issues...)
+ if resp.NextPage == 0 {
+ break
+ }
+ issueOpt.Page = resp.NextPage
+ }
+ fmt.Printf("There are %d issues open with label %v\n", len(allIssues), labels)
+ return allIssues
+}
+
+func createIssueIfNonExist(t *TestResultSummary, issues []*github.Issue, labels []string) {
+ // check if there is already an open issue regarding this test
+ for _, issue := range issues {
+ if strings.Contains(*issue.Title, t.Name) {
+ fmt.Printf("%s is already open for test %s\n\n", issue.GetHTMLURL(), t.Name)
+ return
+ }
+ }
+ fmt.Printf("Opening new issue for %s\n", t.Name)
+ client := github.NewClient(nil).WithAuthToken(os.Getenv("GITHUB_TOKEN"))
+ ctx := context.Background()
+ req := &github.IssueRequest{
+ Title: github.String(fmt.Sprintf("Flaky test %s", t.Name)),
+ Body: &t.IssueBody,
+ Labels: &labels,
+ }
+ issue, _, err := client.Issues.Create(ctx, githubOwner, githubRepo, req)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("New issue %s created for %s\n\n", issue.GetHTMLURL(), t.Name)
+}
diff --git a/tools/testgrid-analysis/cmd/root.go b/tools/testgrid-analysis/cmd/root.go
new file mode 100644
index 00000000000..046bdec4fad
--- /dev/null
+++ b/tools/testgrid-analysis/cmd/root.go
@@ -0,0 +1,44 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+)
+
+var (
+ dashboard string
+ tab string
+)
+
+var rootCmd = &cobra.Command{
+ Use: "testgrid-analysis",
+ Short: "testgrid-analysis",
+ Long: `testgrid-analysis analyzes the testgrid test results of sig-etcd.`,
+}
+
+func Execute() {
+ err := rootCmd.Execute()
+ if err != nil {
+ os.Exit(1)
+ }
+}
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&dashboard, "dashboard", "sig-etcd-periodics", "testgrid dashboard to retrieve data from")
+ rootCmd.PersistentFlags().StringVar(&tab, "tab", "ci-etcd-e2e-amd64", "testgrid tab within the dashboard to retrieve data from")
+}
diff --git a/tools/testgrid-analysis/go.mod b/tools/testgrid-analysis/go.mod
new file mode 100644
index 00000000000..a681757299a
--- /dev/null
+++ b/tools/testgrid-analysis/go.mod
@@ -0,0 +1,25 @@
+module go.etcd.io/etcd/tools/testgrid-analysis/v3
+
+go 1.23
+
+toolchain go1.23.4
+
+require (
+ github.com/GoogleCloudPlatform/testgrid v0.0.173
+ github.com/google/go-github/v60 v60.0.0
+ github.com/spf13/cobra v1.8.1
+ google.golang.org/protobuf v1.36.1
+)
+
+require (
+ github.com/google/go-querystring v1.1.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ go.opentelemetry.io/otel v1.33.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.33.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
+ google.golang.org/grpc v1.69.2 // indirect
+)
diff --git a/tools/testgrid-analysis/go.sum b/tools/testgrid-analysis/go.sum
new file mode 100644
index 00000000000..3ea96a45603
--- /dev/null
+++ b/tools/testgrid-analysis/go.sum
@@ -0,0 +1,2012 @@
+bitbucket.org/creachadair/stringset v0.0.11/go.mod h1:wh0BHewFe+j0HrzWz7KcGbSNpFzWwnpmgPRlB57U5jU=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
+cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
+cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw=
+cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68=
+cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM=
+cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
+cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps=
+cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo=
+cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
+cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
+cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
+cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ=
+cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k=
+cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
+cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
+cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
+cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
+cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M=
+cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE=
+cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
+cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
+cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA=
+cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs=
+cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY=
+cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM=
+cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw=
+cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU=
+cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
+cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84=
+cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A=
+cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
+cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY=
+cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
+cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY=
+cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
+cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg=
+cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
+cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
+cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI=
+cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ=
+cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI=
+cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
+cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E=
+cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
+cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
+cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
+cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo=
+cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg=
+cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
+cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ=
+cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
+cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
+cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
+cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
+cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0=
+cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
+cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
+cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE=
+cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA=
+cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A=
+cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
+cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM=
+cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
+cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
+cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
+cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
+cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E=
+cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac=
+cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q=
+cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
+cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
+cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
+cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
+cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
+cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss=
+cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
+cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA=
+cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
+cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
+cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U=
+cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI=
+cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE=
+cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc=
+cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M=
+cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg=
+cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
+cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
+cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
+cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI=
+cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
+cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
+cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y=
+cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
+cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
+cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
+cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
+cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
+cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
+cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
+cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
+cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
+cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4=
+cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM=
+cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
+cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
+cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
+cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
+cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI=
+cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
+cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0=
+cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
+cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
+cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
+cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M=
+cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0=
+cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
+cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E=
+cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
+cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
+cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
+cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
+cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw=
+cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
+cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA=
+cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M=
+cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
+cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI=
+cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
+cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY=
+cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
+cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4=
+cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
+cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
+cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
+cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
+cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
+cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
+cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs=
+cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
+cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
+cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI=
+cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
+cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
+cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
+cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
+cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
+cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
+cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM=
+cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4=
+cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
+cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
+cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
+cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI=
+cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
+cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
+cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM=
+cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
+cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
+cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
+cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
+cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
+cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE=
+cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
+cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc=
+cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4=
+cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw=
+cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
+cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
+cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs=
+cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
+cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
+cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
+cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw=
+cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA=
+cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
+cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE=
+cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
+cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
+cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s=
+cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
+cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU=
+cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
+cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
+cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw=
+cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
+cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E=
+cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY=
+cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
+cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
+cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
+cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8=
+cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
+cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
+cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
+cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
+cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8=
+cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk=
+cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
+cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
+cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo=
+cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ=
+cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw=
+cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o=
+cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
+cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg=
+cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
+cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24=
+cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
+cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM=
+cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
+cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
+cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
+cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
+cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
+cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0=
+cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
+cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
+cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc=
+cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
+cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
+cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ=
+cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc=
+cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
+cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak=
+cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
+cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw=
+cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
+cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s=
+cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s=
+cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
+cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
+cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig=
+cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
+cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
+cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
+cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA=
+cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
+cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
+cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
+cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
+cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
+cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM=
+cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
+cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
+cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E=
+cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E=
+cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
+cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0=
+cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
+cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k=
+cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
+cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ=
+cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
+cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
+cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE=
+cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8=
+cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk=
+cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8=
+cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
+cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M=
+cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE=
+cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
+cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
+cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
+cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc=
+cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE=
+cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
+cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
+cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
+cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs=
+cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
+cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I=
+cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw=
+cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
+cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0=
+cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU=
+cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
+cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
+cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg=
+cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
+cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
+cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
+cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
+cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
+cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
+cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
+cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
+cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
+cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0=
+cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
+cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
+cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
+cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=
+cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU=
+cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
+cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
+cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE=
+cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
+cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
+cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
+cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA=
+cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
+cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
+cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg=
+cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots=
+cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo=
+cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8=
+cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
+cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw=
+cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
+cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
+cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE=
+cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM=
+cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
+cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo=
+cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
+cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
+cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc=
+cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
+cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo=
+cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
+cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
+cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw=
+cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
+cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
+cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
+cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8=
+cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
+cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA=
+cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
+cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
+cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0=
+cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ=
+cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA=
+cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
+cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
+cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
+cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY=
+cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
+cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
+cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
+cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g=
+cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
+cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
+cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI=
+cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
+cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
+cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
+cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0=
+cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
+cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
+cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
+cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
+cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
+cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
+cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0=
+cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
+cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
+cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA=
+cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
+cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24=
+cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk=
+cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E=
+cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
+cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk=
+cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0=
+cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
+cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
+cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg=
+cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk=
+cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
+cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
+cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
+cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
+cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
+cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo=
+cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
+cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
+cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
+cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY=
+cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU=
+cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc=
+cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro=
+cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
+cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8=
+cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
+cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
+cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
+cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs=
+cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
+cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
+cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc=
+cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
+cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg=
+cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
+cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
+cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
+cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
+cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/GoogleCloudPlatform/testgrid v0.0.173 h1:NyHqWe5gJW9G22VjwTBYpQniMfgQFKNm5OpLaRop97s=
+github.com/GoogleCloudPlatform/testgrid v0.0.173/go.mod h1:lOKP2QzzzIDB4D0nJs1BcNMzJErjrlTNqG3vsCddx8c=
+github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
+github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
+github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
+github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg=
+github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
+github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
+github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
+github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
+github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github/v60 v60.0.0 h1:oLG98PsLauFvvu4D/YPxq374jhSxFYdzQGNCyONLfn8=
+github.com/google/go-github/v60 v60.0.0/go.mod h1:ByhX2dP9XT9o/ll2yXAu2VD8l5eNVg8hD4Cr0S/LmQk=
+github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM=
+github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
+github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
+github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
+github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
+github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
+github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
+github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
+github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
+github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
+github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
+github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
+github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
+github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
+github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
+github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
+github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
+go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
+golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
+golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
+golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
+google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
+google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08=
+google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
+google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
+google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
+google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
+google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E=
+google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
+google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4=
+google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750=
+google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
+google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
+google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
+google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
+google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
+google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA=
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY=
+google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
+google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
+google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
+google.golang.org/genproto v0.0.0-20230731193218-e0aa005b6bdf/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230731193218-e0aa005b6bdf/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230720185612-659f7aaaa771/go.mod h1:3QoBVwTHkXbY1oRGzlhwhOykfcATQN43LJ6iT8Wy8kE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230731193218-e0aa005b6bdf/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
+google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+k8s.io/api v0.27.4/go.mod h1:O3smaaX15NfxjzILfiln1D8Z3+gEYpjEpiNA/1EVK1Y=
+k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
+k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
+modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
+modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
+modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI=
+modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
+modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
+modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
+modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
+modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
+modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
+modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s=
+modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA=
+modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0=
+modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0=
+modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
+modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
+modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0=
+modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
+modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
+sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/tools/testgrid-analysis/main.go b/tools/testgrid-analysis/main.go
new file mode 100644
index 00000000000..f51c16e41d3
--- /dev/null
+++ b/tools/testgrid-analysis/main.go
@@ -0,0 +1,21 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import "go.etcd.io/etcd/tools/testgrid-analysis/v3/cmd"
+
+func main() {
+ cmd.Execute()
+}